page                9 arch/alpha/include/asm/agp.h #define map_page_into_agp(page) 
page               10 arch/alpha/include/asm/agp.h #define unmap_page_from_agp(page) 
page               14 arch/alpha/include/asm/cacheflush.h #define flush_dcache_page(page)			do { } while (0)
page               51 arch/alpha/include/asm/cacheflush.h flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
page               64 arch/alpha/include/asm/cacheflush.h 		struct page *page, unsigned long addr, int len);
page               68 arch/alpha/include/asm/cacheflush.h #define flush_icache_page(vma, page) \
page               69 arch/alpha/include/asm/cacheflush.h   flush_icache_user_range((vma), (page), 0, 0)
page               71 arch/alpha/include/asm/cacheflush.h #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
page               73 arch/alpha/include/asm/cacheflush.h      flush_icache_user_range(vma, page, vaddr, len); \
page               75 arch/alpha/include/asm/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
page               94 arch/alpha/include/asm/io.h #define page_to_phys(page)	page_to_pa(page)
page               79 arch/alpha/include/asm/mmzone.h #define mk_pte(page, pgprot)						     \
page               84 arch/alpha/include/asm/mmzone.h 	pfn = page_to_pfn(page) << 32; \
page               93 arch/alpha/include/asm/mmzone.h 	struct page * __xx;						\
page              101 arch/alpha/include/asm/mmzone.h #define page_to_pa(page)						\
page              102 arch/alpha/include/asm/mmzone.h 	(page_to_pfn(page) << PAGE_SHIFT)
page               17 arch/alpha/include/asm/page.h extern void clear_page(void *page);
page               18 arch/alpha/include/asm/page.h #define clear_user_page(page, vaddr, pg)	clear_page(page)
page               66 arch/alpha/include/asm/page.h typedef struct page *pgtable_t;
page               17 arch/alpha/include/asm/pci.h struct page;
page              207 arch/alpha/include/asm/pgtable.h #define page_to_pa(page)	(((page) - mem_map) << PAGE_SHIFT)
page              211 arch/alpha/include/asm/pgtable.h #define mk_pte(page, pgprot)						\
page              215 arch/alpha/include/asm/pgtable.h 	pte_val(pte) = (page_to_pfn(page) << 32) | pgprot_val(pgprot);	\
page              352 arch/alpha/kernel/core_cia.c 	static int page[PAGE_SIZE/4]
page              377 arch/alpha/kernel/core_cia.c 	pte0 = (virt_to_phys(page) >> (PAGE_SHIFT - 1)) | 1;
page              427 arch/alpha/kernel/core_cia.c 	page[0] = data0;
page              463 arch/alpha/kernel/core_cia.c 	page[0] = data0;
page              487 arch/alpha/kernel/core_cia.c 	page[0] = data0;
page              192 arch/alpha/kernel/pci_impl.h extern int iommu_bind(struct pci_iommu_arena *, long, long, struct page **);
page              362 arch/alpha/kernel/pci_iommu.c static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page,
page              373 arch/alpha/kernel/pci_iommu.c 	return pci_map_single_1(pdev, (char *)page_address(page) + offset, 
page              908 arch/alpha/kernel/pci_iommu.c 	   struct page **pages)
page              743 arch/alpha/kernel/smp.c flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
page               67 arch/alpha/kernel/srm_env.c 	char		*page;
page               69 arch/alpha/kernel/srm_env.c 	page = (char *)__get_free_page(GFP_USER);
page               70 arch/alpha/kernel/srm_env.c 	if (!page)
page               73 arch/alpha/kernel/srm_env.c 	ret = callback_getenv(id, page, PAGE_SIZE);
page               76 arch/alpha/kernel/srm_env.c 		seq_write(m, page, ret);
page               80 arch/alpha/kernel/srm_env.c 	free_page((unsigned long)page);
page               27 arch/arc/include/asm/cacheflush.h #define flush_icache_page(vma, page)
page               38 arch/arc/include/asm/cacheflush.h void flush_dcache_page(struct page *page);
page               66 arch/arc/include/asm/cacheflush.h 	unsigned long user_addr, unsigned long page);
page               74 arch/arc/include/asm/cacheflush.h 	struct page *page, unsigned long u_vaddr);
page              107 arch/arc/include/asm/cacheflush.h #define copy_to_user_page(vma, page, vaddr, dst, src, len)		\
page              114 arch/arc/include/asm/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len)		\
page               33 arch/arc/include/asm/highmem.h extern void *kmap(struct page *page);
page               34 arch/arc/include/asm/highmem.h extern void *kmap_high(struct page *page);
page               35 arch/arc/include/asm/highmem.h extern void *kmap_atomic(struct page *page);
page               37 arch/arc/include/asm/highmem.h extern void kunmap_high(struct page *page);
page               46 arch/arc/include/asm/highmem.h static inline void kunmap(struct page *page)
page               49 arch/arc/include/asm/highmem.h 	if (!PageHighMem(page))
page               51 arch/arc/include/asm/highmem.h 	kunmap_high(page);
page               38 arch/arc/include/asm/hugepage.h #define mk_pmd(page, prot)	pte_pmd(mk_pte(page, prot))
page               51 arch/arc/include/asm/io.h #define page_to_phys(page)		(page_to_pfn(page) << PAGE_SHIFT)
page               16 arch/arc/include/asm/page.h struct page;
page               20 arch/arc/include/asm/page.h void copy_user_highpage(struct page *to, struct page *from,
page               22 arch/arc/include/asm/page.h void clear_user_page(void *to, unsigned long u_vaddr, struct page *page);
page              104 arch/arc/include/asm/pgalloc.h 	struct page *page;
page              110 arch/arc/include/asm/pgalloc.h 	page = virt_to_page(pte_pg);
page              111 arch/arc/include/asm/pgalloc.h 	if (!pgtable_pte_page_ctor(page)) {
page              112 arch/arc/include/asm/pgalloc.h 		__free_page(page);
page              280 arch/arc/include/asm/pgtable.h #define mk_pte(page, prot)	pfn_pte(page_to_pfn(page), prot)
page               13 arch/arc/include/asm/tlbflush.h void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
page               24 arch/arc/include/asm/tlbflush.h #define flush_tlb_page(vma, page)	local_flush_tlb_page(vma, page)
page               34 arch/arc/include/asm/tlbflush.h extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
page              486 arch/arc/kernel/perf_event.c 					 char *page)
page              491 arch/arc/kernel/perf_event.c 	return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
page              837 arch/arc/mm/cache.c void flush_dcache_page(struct page *page)
page              842 arch/arc/mm/cache.c 		clear_bit(PG_dc_clean, &page->flags);
page              847 arch/arc/mm/cache.c 	mapping = page_mapping_file(page);
page              856 arch/arc/mm/cache.c 		clear_bit(PG_dc_clean, &page->flags);
page              857 arch/arc/mm/cache.c 	} else if (page_mapcount(page)) {
page              860 arch/arc/mm/cache.c 		phys_addr_t paddr = (unsigned long)page_address(page);
page              861 arch/arc/mm/cache.c 		unsigned long vaddr = page->index << PAGE_SHIFT;
page             1059 arch/arc/mm/cache.c void flush_anon_page(struct vm_area_struct *vma, struct page *page,
page             1063 arch/arc/mm/cache.c 	__flush_dcache_page((phys_addr_t)page_address(page), u_vaddr);
page             1064 arch/arc/mm/cache.c 	__flush_dcache_page((phys_addr_t)page_address(page),
page             1065 arch/arc/mm/cache.c 			    (phys_addr_t)page_address(page));
page             1071 arch/arc/mm/cache.c void copy_user_highpage(struct page *to, struct page *from,
page             1121 arch/arc/mm/cache.c void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
page             1124 arch/arc/mm/cache.c 	clear_bit(PG_dc_clean, &page->flags);
page               18 arch/arc/mm/dma.c void arch_dma_prep_coherent(struct page *page, size_t size)
page               30 arch/arc/mm/dma.c 	dma_cache_wback_inv(page_to_phys(page), size);
page               52 arch/arc/mm/highmem.c void *kmap(struct page *page)
page               55 arch/arc/mm/highmem.c 	if (!PageHighMem(page))
page               56 arch/arc/mm/highmem.c 		return page_address(page);
page               58 arch/arc/mm/highmem.c 	return kmap_high(page);
page               62 arch/arc/mm/highmem.c void *kmap_atomic(struct page *page)
page               69 arch/arc/mm/highmem.c 	if (!PageHighMem(page))
page               70 arch/arc/mm/highmem.c 		return page_address(page);
page               77 arch/arc/mm/highmem.c 		   mk_pte(page, kmap_prot));
page              398 arch/arc/mm/tlb.c void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
page              409 arch/arc/mm/tlb.c 		tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu));
page              601 arch/arc/mm/tlb.c 	struct page *page = pfn_to_page(pte_pfn(*ptep));
page              605 arch/arc/mm/tlb.c 	if (page == ZERO_PAGE(0)) {
page              621 arch/arc/mm/tlb.c 		int dirty = !test_and_set_bit(PG_dc_clean, &page->flags);
page              313 arch/arm/common/dmabounce.c static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
page              321 arch/arm/common/dmabounce.c 		__func__, page, offset, size, dir);
page              323 arch/arm/common/dmabounce.c 	dma_addr = pfn_to_dma(dev, page_to_pfn(page)) + offset;
page              334 arch/arm/common/dmabounce.c 	if (PageHighMem(page)) {
page              339 arch/arm/common/dmabounce.c 	return map_single(dev, page_address(page) + offset, size, dir, attrs);
page              168 arch/arm/include/asm/cacheflush.h extern void copy_to_user_page(struct vm_area_struct *, struct page *,
page              170 arch/arm/include/asm/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
page              292 arch/arm/include/asm/cacheflush.h extern void flush_dcache_page(struct page *);
page              307 arch/arm/include/asm/cacheflush.h 			 struct page *page, unsigned long vmaddr)
page              310 arch/arm/include/asm/cacheflush.h 				struct page *, unsigned long);
page              311 arch/arm/include/asm/cacheflush.h 	if (PageAnon(page))
page              312 arch/arm/include/asm/cacheflush.h 		__flush_anon_page(vma, page, vmaddr);
page              316 arch/arm/include/asm/cacheflush.h extern void flush_kernel_dcache_page(struct page *);
page              321 arch/arm/include/asm/cacheflush.h #define flush_icache_user_range(vma,page,addr,len) \
page              322 arch/arm/include/asm/cacheflush.h 	flush_dcache_page(page)
page              328 arch/arm/include/asm/cacheflush.h #define flush_icache_page(vma,page)	do { } while (0)
page              476 arch/arm/include/asm/cacheflush.h void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
page               23 arch/arm/include/asm/highmem.h extern void *kmap_high(struct page *page);
page               24 arch/arm/include/asm/highmem.h extern void kunmap_high(struct page *page);
page               53 arch/arm/include/asm/highmem.h extern void *kmap_high_get(struct page *page);
page               55 arch/arm/include/asm/highmem.h static inline void *kmap_high_get(struct page *page)
page               66 arch/arm/include/asm/highmem.h extern void *kmap(struct page *page);
page               67 arch/arm/include/asm/highmem.h extern void kunmap(struct page *page);
page               68 arch/arm/include/asm/highmem.h extern void *kmap_atomic(struct page *page);
page               23 arch/arm/include/asm/hugetlb.h static inline void arch_clear_hugepage_flags(struct page *page)
page               25 arch/arm/include/asm/hugetlb.h 	clear_bit(PG_dcache_clean, &page->flags);
page               74 arch/arm/include/asm/kexec.h static inline unsigned long page_to_boot_pfn(struct page *page)
page               76 arch/arm/include/asm/kexec.h 	return page_to_pfn(page) + (arch_phys_to_idmap_offset >> PAGE_SHIFT);
page               80 arch/arm/include/asm/kexec.h static inline struct page *boot_pfn_to_page(unsigned long boot_pfn)
page              190 arch/arm/include/asm/kvm_mmu.h 	struct page *ptr_page = virt_to_page(ptr);
page              132 arch/arm/include/asm/memory.h #define page_to_phys(page)	(__pfn_to_phys(page_to_pfn(page)))
page               11 arch/arm/include/asm/page-nommu.h #define clear_page(page)	memset((page), 0, PAGE_SIZE)
page               14 arch/arm/include/asm/page-nommu.h #define clear_user_page(page, vaddr, pg)	clear_page(page)
page              107 arch/arm/include/asm/page.h struct page;
page              111 arch/arm/include/asm/page.h 	void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
page              112 arch/arm/include/asm/page.h 	void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
page              127 arch/arm/include/asm/page.h extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr);
page              128 arch/arm/include/asm/page.h extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
page              132 arch/arm/include/asm/page.h #define clear_user_highpage(page,vaddr)		\
page              133 arch/arm/include/asm/page.h 	 __cpu_clear_user_highpage(page, vaddr)
page              139 arch/arm/include/asm/page.h #define clear_page(page)	memset((void *)(page), 0, PAGE_SIZE)
page              154 arch/arm/include/asm/page.h typedef struct page *pgtable_t;
page              101 arch/arm/include/asm/pgalloc.h 	struct page *pte;
page              237 arch/arm/include/asm/pgtable-3level.h #define mk_pmd(page,prot)	pfn_pmd(page_to_pfn(page),prot)
page              173 arch/arm/include/asm/pgtable.h extern struct page *empty_zero_page;
page              215 arch/arm/include/asm/pgtable.h #define mk_pte(page,prot)	pfn_pte(page_to_pfn(page), prot)
page               35 arch/arm/include/asm/tlb.h 	free_page_and_swap_cache((struct page *)_table);
page               65 arch/arm/include/asm/tlb.h 	struct page *page = virt_to_page(pmdp);
page               67 arch/arm/include/asm/tlb.h 	tlb_remove_table(tlb, page);
page               42 arch/arm/include/asm/vdso_datapage.h 	u8 page[PAGE_SIZE];
page               26 arch/arm/kernel/patch.c 	struct page *page;
page               29 arch/arm/kernel/patch.c 		page = vmalloc_to_page(addr);
page               31 arch/arm/kernel/patch.c 		page = virt_to_page(addr);
page               40 arch/arm/kernel/patch.c 	set_fixmap(fixmap, page_to_phys(page));
page              399 arch/arm/kernel/process.c static struct page *signal_page;
page              400 arch/arm/kernel/process.c extern struct page *get_signal_page(void);
page              683 arch/arm/kernel/signal.c struct page *get_signal_page(void)
page              687 arch/arm/kernel/signal.c 	struct page *page;
page              690 arch/arm/kernel/signal.c 	page = alloc_pages(GFP_KERNEL, 0);
page              692 arch/arm/kernel/signal.c 	if (!page)
page              695 arch/arm/kernel/signal.c 	addr = page_address(page);
page              710 arch/arm/kernel/signal.c 	return page;
page               29 arch/arm/kernel/vdso.c static struct page **vdso_text_pagelist;
page               42 arch/arm/kernel/vdso.c static struct page *vdso_data_page __ro_after_init;
page              201 arch/arm/kernel/vdso.c 	vdso_text_pagelist = kcalloc(text_pages, sizeof(struct page *),
page              211 arch/arm/kernel/vdso.c 		struct page *page;
page              213 arch/arm/kernel/vdso.c 		page = virt_to_page(vdso_start + i * PAGE_SIZE);
page              214 arch/arm/kernel/vdso.c 		vdso_text_pagelist[i] = page;
page              232 arch/arm/lib/uaccess_with_memcpy.c 	struct page *src_page, *dst_page;
page              150 arch/arm/mach-rpc/ecard.c 		unsigned int page;
page              152 arch/arm/mach-rpc/ecard.c 		page = (off >> 12) * 4;
page              153 arch/arm/mach-rpc/ecard.c 		if (page > 256 * 4)
page              172 arch/arm/mach-rpc/ecard.c 			readb(base + page);
page              177 arch/arm/mach-rpc/ecard.c 			*buf++ = readb(base + page);
page               38 arch/arm/mm/copypage-fa.c void fa_copy_user_highpage(struct page *to, struct page *from,
page               55 arch/arm/mm/copypage-fa.c void fa_clear_user_highpage(struct page *page, unsigned long vaddr)
page               57 arch/arm/mm/copypage-fa.c 	void *ptr, *kaddr = kmap_atomic(page);
page               65 arch/arm/mm/copypage-feroceon.c void feroceon_copy_user_highpage(struct page *to, struct page *from,
page               78 arch/arm/mm/copypage-feroceon.c void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr)
page               80 arch/arm/mm/copypage-feroceon.c 	void *ptr, *kaddr = kmap_atomic(page);
page               64 arch/arm/mm/copypage-v4mc.c void v4_mc_copy_user_highpage(struct page *to, struct page *from,
page               86 arch/arm/mm/copypage-v4mc.c void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
page               88 arch/arm/mm/copypage-v4mc.c 	void *ptr, *kaddr = kmap_atomic(page);
page               47 arch/arm/mm/copypage-v4wb.c void v4wb_copy_user_highpage(struct page *to, struct page *from,
page               65 arch/arm/mm/copypage-v4wb.c void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr)
page               67 arch/arm/mm/copypage-v4wb.c 	void *ptr, *kaddr = kmap_atomic(page);
page               43 arch/arm/mm/copypage-v4wt.c void v4wt_copy_user_highpage(struct page *to, struct page *from,
page               60 arch/arm/mm/copypage-v4wt.c void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr)
page               62 arch/arm/mm/copypage-v4wt.c 	void *ptr, *kaddr = kmap_atomic(page);
page               30 arch/arm/mm/copypage-v6.c static void v6_copy_user_highpage_nonaliasing(struct page *to,
page               31 arch/arm/mm/copypage-v6.c 	struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
page               46 arch/arm/mm/copypage-v6.c static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr)
page               48 arch/arm/mm/copypage-v6.c 	void *kaddr = kmap_atomic(page);
page               69 arch/arm/mm/copypage-v6.c static void v6_copy_user_highpage_aliasing(struct page *to,
page               70 arch/arm/mm/copypage-v6.c 	struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
page              103 arch/arm/mm/copypage-v6.c static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr)
page              108 arch/arm/mm/copypage-v6.c 	discard_old_kernel_data(page_address(page));
page              116 arch/arm/mm/copypage-v6.c 	set_top_pte(to, mk_pte(page, PAGE_KERNEL));
page               63 arch/arm/mm/copypage-xsc3.c void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
page               79 arch/arm/mm/copypage-xsc3.c void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
page               81 arch/arm/mm/copypage-xsc3.c 	void *ptr, *kaddr = kmap_atomic(page);
page               84 arch/arm/mm/copypage-xscale.c void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
page              107 arch/arm/mm/copypage-xscale.c xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
page              109 arch/arm/mm/copypage-xscale.c 	void *ptr, *kaddr = kmap_atomic(page);
page               97 arch/arm/mm/dma-mapping-nommu.c static dma_addr_t arm_nommu_dma_map_page(struct device *dev, struct page *page,
page              102 arch/arm/mm/dma-mapping-nommu.c 	dma_addr_t handle = page_to_phys(page) + offset;
page               58 arch/arm/mm/dma-mapping.c 	struct page *page;
page               67 arch/arm/mm/dma-mapping.c 		       struct page **ret_page);
page              109 arch/arm/mm/dma-mapping.c static void __dma_page_cpu_to_dev(struct page *, unsigned long,
page              111 arch/arm/mm/dma-mapping.c static void __dma_page_dev_to_cpu(struct page *, unsigned long,
page              128 arch/arm/mm/dma-mapping.c static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
page              133 arch/arm/mm/dma-mapping.c 		__dma_page_cpu_to_dev(page, offset, size, dir);
page              134 arch/arm/mm/dma-mapping.c 	return pfn_to_dma(dev, page_to_pfn(page)) + offset;
page              137 arch/arm/mm/dma-mapping.c static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page,
page              141 arch/arm/mm/dma-mapping.c 	return pfn_to_dma(dev, page_to_pfn(page)) + offset;
page              170 arch/arm/mm/dma-mapping.c 	struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
page              171 arch/arm/mm/dma-mapping.c 	__dma_page_dev_to_cpu(page, offset, size, dir);
page              178 arch/arm/mm/dma-mapping.c 	struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
page              179 arch/arm/mm/dma-mapping.c 	__dma_page_cpu_to_dev(page, offset, size, dir);
page              265 arch/arm/mm/dma-mapping.c static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag)
page              271 arch/arm/mm/dma-mapping.c 	if (PageHighMem(page)) {
page              272 arch/arm/mm/dma-mapping.c 		phys_addr_t base = __pfn_to_phys(page_to_pfn(page));
page              275 arch/arm/mm/dma-mapping.c 			void *ptr = kmap_atomic(page);
page              280 arch/arm/mm/dma-mapping.c 			page++;
page              286 arch/arm/mm/dma-mapping.c 		void *ptr = page_address(page);
page              299 arch/arm/mm/dma-mapping.c static struct page *__dma_alloc_buffer(struct device *dev, size_t size,
page              303 arch/arm/mm/dma-mapping.c 	struct page *page, *p, *e;
page              305 arch/arm/mm/dma-mapping.c 	page = alloc_pages(gfp, order);
page              306 arch/arm/mm/dma-mapping.c 	if (!page)
page              312 arch/arm/mm/dma-mapping.c 	split_page(page, order);
page              313 arch/arm/mm/dma-mapping.c 	for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
page              316 arch/arm/mm/dma-mapping.c 	__dma_clear_buffer(page, size, coherent_flag);
page              318 arch/arm/mm/dma-mapping.c 	return page;
page              324 arch/arm/mm/dma-mapping.c static void __dma_free_buffer(struct page *page, size_t size)
page              326 arch/arm/mm/dma-mapping.c 	struct page *e = page + (size >> PAGE_SHIFT);
page              328 arch/arm/mm/dma-mapping.c 	while (page < e) {
page              329 arch/arm/mm/dma-mapping.c 		__free_page(page);
page              330 arch/arm/mm/dma-mapping.c 		page++;
page              335 arch/arm/mm/dma-mapping.c 				     pgprot_t prot, struct page **ret_page,
page              340 arch/arm/mm/dma-mapping.c 				 pgprot_t prot, struct page **ret_page,
page              362 arch/arm/mm/dma-mapping.c 	struct page *page;
page              374 arch/arm/mm/dma-mapping.c 				      &page, atomic_pool_init, true, NORMAL,
page              378 arch/arm/mm/dma-mapping.c 					   &page, atomic_pool_init, true);
page              383 arch/arm/mm/dma-mapping.c 					page_to_phys(page),
page              466 arch/arm/mm/dma-mapping.c 	struct page *page = virt_to_page(addr);
page              469 arch/arm/mm/dma-mapping.c 	set_pte_ext(pte, mk_pte(page, prot), 0);
page              473 arch/arm/mm/dma-mapping.c static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
page              475 arch/arm/mm/dma-mapping.c 	unsigned long start = (unsigned long) page_address(page);
page              483 arch/arm/mm/dma-mapping.c 				 pgprot_t prot, struct page **ret_page,
page              486 arch/arm/mm/dma-mapping.c 	struct page *page;
page              492 arch/arm/mm/dma-mapping.c 	page = __dma_alloc_buffer(dev, size, gfp, NORMAL);
page              493 arch/arm/mm/dma-mapping.c 	if (!page)
page              498 arch/arm/mm/dma-mapping.c 	ptr = dma_common_contiguous_remap(page, size, prot, caller);
page              500 arch/arm/mm/dma-mapping.c 		__dma_free_buffer(page, size);
page              505 arch/arm/mm/dma-mapping.c 	*ret_page = page;
page              509 arch/arm/mm/dma-mapping.c static void *__alloc_from_pool(size_t size, struct page **ret_page)
page              546 arch/arm/mm/dma-mapping.c 				     pgprot_t prot, struct page **ret_page,
page              552 arch/arm/mm/dma-mapping.c 	struct page *page;
page              555 arch/arm/mm/dma-mapping.c 	page = dma_alloc_from_contiguous(dev, count, order, gfp & __GFP_NOWARN);
page              556 arch/arm/mm/dma-mapping.c 	if (!page)
page              559 arch/arm/mm/dma-mapping.c 	__dma_clear_buffer(page, size, coherent_flag);
page              564 arch/arm/mm/dma-mapping.c 	if (PageHighMem(page)) {
page              565 arch/arm/mm/dma-mapping.c 		ptr = dma_common_contiguous_remap(page, size, prot, caller);
page              567 arch/arm/mm/dma-mapping.c 			dma_release_from_contiguous(dev, page, count);
page              571 arch/arm/mm/dma-mapping.c 		__dma_remap(page, size, prot);
page              572 arch/arm/mm/dma-mapping.c 		ptr = page_address(page);
page              576 arch/arm/mm/dma-mapping.c 	*ret_page = page;
page              580 arch/arm/mm/dma-mapping.c static void __free_from_contiguous(struct device *dev, struct page *page,
page              584 arch/arm/mm/dma-mapping.c 		if (PageHighMem(page))
page              587 arch/arm/mm/dma-mapping.c 			__dma_remap(page, size, PAGE_KERNEL);
page              589 arch/arm/mm/dma-mapping.c 	dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
page              601 arch/arm/mm/dma-mapping.c 				   struct page **ret_page)
page              603 arch/arm/mm/dma-mapping.c 	struct page *page;
page              605 arch/arm/mm/dma-mapping.c 	page = __dma_alloc_buffer(dev, size, gfp, COHERENT);
page              606 arch/arm/mm/dma-mapping.c 	if (!page)
page              609 arch/arm/mm/dma-mapping.c 	*ret_page = page;
page              610 arch/arm/mm/dma-mapping.c 	return page_address(page);
page              614 arch/arm/mm/dma-mapping.c 				    struct page **ret_page)
page              622 arch/arm/mm/dma-mapping.c 	__dma_free_buffer(args->page, args->size);
page              631 arch/arm/mm/dma-mapping.c 				 struct page **ret_page)
page              641 arch/arm/mm/dma-mapping.c 	__free_from_contiguous(args->dev, args->page, args->cpu_addr,
page              651 arch/arm/mm/dma-mapping.c 				  struct page **ret_page)
page              667 arch/arm/mm/dma-mapping.c 				   struct page **ret_page)
page              679 arch/arm/mm/dma-mapping.c 	__dma_free_buffer(args->page, args->size);
page              692 arch/arm/mm/dma-mapping.c 	struct page *page = NULL;
page              749 arch/arm/mm/dma-mapping.c 	addr = buf->allocator->alloc(&args, &page);
page              751 arch/arm/mm/dma-mapping.c 	if (page) {
page              754 arch/arm/mm/dma-mapping.c 		*handle = pfn_to_dma(dev, page_to_pfn(page));
page              755 arch/arm/mm/dma-mapping.c 		buf->virt = args.want_vaddr ? addr : page;
page              764 arch/arm/mm/dma-mapping.c 	return args.want_vaddr ? addr : page;
page              835 arch/arm/mm/dma-mapping.c 	struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
page              841 arch/arm/mm/dma-mapping.c 		.page = page,
page              870 arch/arm/mm/dma-mapping.c 	struct page *page;
page              877 arch/arm/mm/dma-mapping.c 	page = pfn_to_page(pfn);
page              883 arch/arm/mm/dma-mapping.c 	sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
page              887 arch/arm/mm/dma-mapping.c static void dma_cache_maint_page(struct page *page, unsigned long offset,
page              894 arch/arm/mm/dma-mapping.c 	pfn = page_to_pfn(page) + offset / PAGE_SIZE;
page              907 arch/arm/mm/dma-mapping.c 		page = pfn_to_page(pfn);
page              909 arch/arm/mm/dma-mapping.c 		if (PageHighMem(page)) {
page              914 arch/arm/mm/dma-mapping.c 				vaddr = kmap_atomic(page);
page              918 arch/arm/mm/dma-mapping.c 				vaddr = kmap_high_get(page);
page              921 arch/arm/mm/dma-mapping.c 					kunmap_high(page);
page              925 arch/arm/mm/dma-mapping.c 			vaddr = page_address(page) + offset;
page              940 arch/arm/mm/dma-mapping.c static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
page              945 arch/arm/mm/dma-mapping.c 	dma_cache_maint_page(page, off, size, dir, dmac_map_area);
page              947 arch/arm/mm/dma-mapping.c 	paddr = page_to_phys(page) + off;
page              956 arch/arm/mm/dma-mapping.c static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
page              959 arch/arm/mm/dma-mapping.c 	phys_addr_t paddr = page_to_phys(page) + off;
page              966 arch/arm/mm/dma-mapping.c 		dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
page              976 arch/arm/mm/dma-mapping.c 		pfn = page_to_pfn(page) + off / PAGE_SIZE;
page              983 arch/arm/mm/dma-mapping.c 			page = pfn_to_page(pfn++);
page              984 arch/arm/mm/dma-mapping.c 			set_bit(PG_dcache_clean, &page->flags);
page             1236 arch/arm/mm/dma-mapping.c static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
page             1240 arch/arm/mm/dma-mapping.c 	struct page **pages;
page             1242 arch/arm/mm/dma-mapping.c 	int array_size = count * sizeof(struct page *);
page             1256 arch/arm/mm/dma-mapping.c 		struct page *page;
page             1258 arch/arm/mm/dma-mapping.c 		page = dma_alloc_from_contiguous(dev, count, order,
page             1260 arch/arm/mm/dma-mapping.c 		if (!page)
page             1263 arch/arm/mm/dma-mapping.c 		__dma_clear_buffer(page, size, coherent_flag);
page             1266 arch/arm/mm/dma-mapping.c 			pages[i] = page + i;
page             1327 arch/arm/mm/dma-mapping.c static int __iommu_free_buffer(struct device *dev, struct page **pages,
page             1349 arch/arm/mm/dma-mapping.c __iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
page             1404 arch/arm/mm/dma-mapping.c static struct page **__atomic_get_pages(void *addr)
page             1406 arch/arm/mm/dma-mapping.c 	struct page *page;
page             1410 arch/arm/mm/dma-mapping.c 	page = phys_to_page(phys);
page             1412 arch/arm/mm/dma-mapping.c 	return (struct page **)page;
page             1415 arch/arm/mm/dma-mapping.c static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs)
page             1430 arch/arm/mm/dma-mapping.c 	struct page *page;
page             1434 arch/arm/mm/dma-mapping.c 		addr = __alloc_simple_buffer(dev, size, gfp, &page);
page             1436 arch/arm/mm/dma-mapping.c 		addr = __alloc_from_pool(size, &page);
page             1440 arch/arm/mm/dma-mapping.c 	*handle = __iommu_create_mapping(dev, &page, size, attrs);
page             1466 arch/arm/mm/dma-mapping.c 	struct page **pages;
page             1526 arch/arm/mm/dma-mapping.c 	struct page **pages = __iommu_get_pages(cpu_addr, attrs);
page             1565 arch/arm/mm/dma-mapping.c 	struct page **pages;
page             1603 arch/arm/mm/dma-mapping.c 	struct page **pages = __iommu_get_pages(cpu_addr, attrs);
page             1837 arch/arm/mm/dma-mapping.c static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page,
page             1851 arch/arm/mm/dma-mapping.c 	ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
page             1871 arch/arm/mm/dma-mapping.c static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
page             1876 arch/arm/mm/dma-mapping.c 		__dma_page_cpu_to_dev(page, offset, size, dir);
page             1878 arch/arm/mm/dma-mapping.c 	return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs);
page             1919 arch/arm/mm/dma-mapping.c 	struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
page             1927 arch/arm/mm/dma-mapping.c 		__dma_page_dev_to_cpu(page, offset, size, dir);
page             1995 arch/arm/mm/dma-mapping.c 	struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
page             2001 arch/arm/mm/dma-mapping.c 	__dma_page_dev_to_cpu(page, offset, size, dir);
page             2009 arch/arm/mm/dma-mapping.c 	struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
page             2015 arch/arm/mm/dma-mapping.c 	__dma_page_cpu_to_dev(page, offset, size, dir);
page              182 arch/arm/mm/fault-armv.c 	struct page *page;
page              191 arch/arm/mm/fault-armv.c 	page = pfn_to_page(pfn);
page              192 arch/arm/mm/fault-armv.c 	if (page == ZERO_PAGE(0))
page              195 arch/arm/mm/fault-armv.c 	mapping = page_mapping_file(page);
page              196 arch/arm/mm/fault-armv.c 	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
page              197 arch/arm/mm/fault-armv.c 		__flush_dcache_page(mapping, page);
page              231 arch/arm/mm/fault-armv.c 	struct page *page;
page              237 arch/arm/mm/fault-armv.c 	page = alloc_page(GFP_KERNEL);
page              238 arch/arm/mm/fault-armv.c 	if (page) {
page              243 arch/arm/mm/fault-armv.c 		p1 = vmap(&page, 1, VM_IOREMAP, prot);
page              244 arch/arm/mm/fault-armv.c 		p2 = vmap(&page, 1, VM_IOREMAP, prot);
page              255 arch/arm/mm/fault-armv.c 		put_page(page);
page              128 arch/arm/mm/flush.c void __flush_ptrace_access(struct page *page, unsigned long uaddr, void *kaddr,
page              140 arch/arm/mm/flush.c 		flush_pfn_alias(page_to_pfn(page), uaddr);
page              149 arch/arm/mm/flush.c 			flush_icache_alias(page_to_pfn(page), uaddr, len);
page              159 arch/arm/mm/flush.c void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
page              167 arch/arm/mm/flush.c 	__flush_ptrace_access(page, uaddr, kaddr, len, flags);
page              170 arch/arm/mm/flush.c void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
page              175 arch/arm/mm/flush.c 	__flush_ptrace_access(page, uaddr, kaddr, len, flags);
page              185 arch/arm/mm/flush.c void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
page              193 arch/arm/mm/flush.c 	flush_ptrace_access(vma, page, uaddr, dst, len);
page              199 arch/arm/mm/flush.c void __flush_dcache_page(struct address_space *mapping, struct page *page)
page              206 arch/arm/mm/flush.c 	if (!PageHighMem(page)) {
page              207 arch/arm/mm/flush.c 		__cpuc_flush_dcache_area(page_address(page), page_size(page));
page              211 arch/arm/mm/flush.c 			for (i = 0; i < compound_nr(page); i++) {
page              212 arch/arm/mm/flush.c 				void *addr = kmap_atomic(page + i);
page              217 arch/arm/mm/flush.c 			for (i = 0; i < compound_nr(page); i++) {
page              218 arch/arm/mm/flush.c 				void *addr = kmap_high_get(page + i);
page              221 arch/arm/mm/flush.c 					kunmap_high(page + i);
page              233 arch/arm/mm/flush.c 		flush_pfn_alias(page_to_pfn(page),
page              234 arch/arm/mm/flush.c 				page->index << PAGE_SHIFT);
page              237 arch/arm/mm/flush.c static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
page              249 arch/arm/mm/flush.c 	pgoff = page->index;
page              263 arch/arm/mm/flush.c 		flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
page              272 arch/arm/mm/flush.c 	struct page *page;
page              282 arch/arm/mm/flush.c 	page = pfn_to_page(pfn);
page              284 arch/arm/mm/flush.c 		mapping = page_mapping_file(page);
page              288 arch/arm/mm/flush.c 	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
page              289 arch/arm/mm/flush.c 		__flush_dcache_page(mapping, page);
page              315 arch/arm/mm/flush.c void flush_dcache_page(struct page *page)
page              323 arch/arm/mm/flush.c 	if (page == ZERO_PAGE(0))
page              327 arch/arm/mm/flush.c 		if (test_bit(PG_dcache_clean, &page->flags))
page              328 arch/arm/mm/flush.c 			clear_bit(PG_dcache_clean, &page->flags);
page              332 arch/arm/mm/flush.c 	mapping = page_mapping_file(page);
page              335 arch/arm/mm/flush.c 	    mapping && !page_mapcount(page))
page              336 arch/arm/mm/flush.c 		clear_bit(PG_dcache_clean, &page->flags);
page              338 arch/arm/mm/flush.c 		__flush_dcache_page(mapping, page);
page              340 arch/arm/mm/flush.c 			__flush_dcache_aliases(mapping, page);
page              343 arch/arm/mm/flush.c 		set_bit(PG_dcache_clean, &page->flags);
page              357 arch/arm/mm/flush.c void flush_kernel_dcache_page(struct page *page)
page              362 arch/arm/mm/flush.c 		mapping = page_mapping_file(page);
page              367 arch/arm/mm/flush.c 			addr = page_address(page);
page              390 arch/arm/mm/flush.c void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
page              401 arch/arm/mm/flush.c 	pfn = page_to_pfn(page);
page              418 arch/arm/mm/flush.c 	__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
page               34 arch/arm/mm/highmem.c void *kmap(struct page *page)
page               37 arch/arm/mm/highmem.c 	if (!PageHighMem(page))
page               38 arch/arm/mm/highmem.c 		return page_address(page);
page               39 arch/arm/mm/highmem.c 	return kmap_high(page);
page               43 arch/arm/mm/highmem.c void kunmap(struct page *page)
page               46 arch/arm/mm/highmem.c 	if (!PageHighMem(page))
page               48 arch/arm/mm/highmem.c 	kunmap_high(page);
page               52 arch/arm/mm/highmem.c void *kmap_atomic(struct page *page)
page               61 arch/arm/mm/highmem.c 	if (!PageHighMem(page))
page               62 arch/arm/mm/highmem.c 		return page_address(page);
page               73 arch/arm/mm/highmem.c 		kmap = kmap_high_get(page);
page               93 arch/arm/mm/highmem.c 	set_fixmap_pte(idx, mk_pte(page, kmap_prot));
page              130 arch/arm/mm/highmem.c 	struct page *page = pfn_to_page(pfn);
page              134 arch/arm/mm/highmem.c 	if (!PageHighMem(page))
page              135 arch/arm/mm/highmem.c 		return page_address(page);
page              329 arch/arm/mm/init.c 	struct page *start_pg, *end_pg;
page               54 arch/arm/mm/mm.h extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
page               46 arch/arm/mm/mmu.c struct page *empty_zero_page;
page              163 arch/arm/mm/nommu.c void flush_dcache_page(struct page *page)
page              165 arch/arm/mm/nommu.c 	__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
page              169 arch/arm/mm/nommu.c void flush_kernel_dcache_page(struct page *page)
page              171 arch/arm/mm/nommu.c 	__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
page              175 arch/arm/mm/nommu.c void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
page              113 arch/arm/probes/uprobes/core.c void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
page              116 arch/arm/probes/uprobes/core.c 	void *xol_page_kaddr = kmap_atomic(page);
page              125 arch/arm/probes/uprobes/core.c 	flush_uprobe_xol_access(page, vaddr, dst, len);
page               66 arch/arm/xen/enlighten.c 			       int nr, struct page **pages)
page               91 arch/arm/xen/p2m.c 			    struct page **pages, unsigned int count)
page              108 arch/arm/xen/p2m.c 			      struct page **pages, unsigned int count)
page              124 arch/arm64/include/asm/cacheflush.h extern void copy_to_user_page(struct vm_area_struct *, struct page *,
page              126 arch/arm64/include/asm/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
page              146 arch/arm64/include/asm/cacheflush.h extern void flush_dcache_page(struct page *);
page              164 arch/arm64/include/asm/cacheflush.h #define flush_icache_page(vma,page)	do { } while (0)
page              179 arch/arm64/include/asm/cacheflush.h int set_direct_map_invalid_noflush(struct page *page);
page              180 arch/arm64/include/asm/cacheflush.h int set_direct_map_default_noflush(struct page *page);
page               32 arch/arm64/include/asm/hugetlb.h static inline void arch_clear_hugepage_flags(struct page *page)
page               34 arch/arm64/include/asm/hugetlb.h 	clear_bit(PG_dcache_clean, &page->flags);
page               38 arch/arm64/include/asm/hugetlb.h 				struct page *page, int writable);
page              141 arch/arm64/include/asm/kvm_mmu.h 	struct page *ptr_page = virt_to_page(ptr);
page              344 arch/arm64/include/asm/kvm_mmu.h 		struct page *page = pte_page(pte);
page              345 arch/arm64/include/asm/kvm_mmu.h 		kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
page              352 arch/arm64/include/asm/kvm_mmu.h 		struct page *page = pmd_page(pmd);
page              353 arch/arm64/include/asm/kvm_mmu.h 		kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
page              360 arch/arm64/include/asm/kvm_mmu.h 		struct page *page = pud_page(pud);
page              361 arch/arm64/include/asm/kvm_mmu.h 		kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
page              281 arch/arm64/include/asm/memory.h #define page_to_phys(page)	(__pfn_to_phys(page_to_pfn(page)))
page              323 arch/arm64/include/asm/memory.h 	u64 __idx = ((u64)__page - VMEMMAP_START) / sizeof(struct page);\
page              330 arch/arm64/include/asm/memory.h 	u64 __addr = VMEMMAP_START + (__idx * sizeof(struct page));	\
page              331 arch/arm64/include/asm/memory.h 	(struct page *)__addr;						\
page               27 arch/arm64/include/asm/page.h typedef struct page *pgtable_t;
page               25 arch/arm64/include/asm/pgalloc.h 	struct page *page;
page               30 arch/arm64/include/asm/pgalloc.h 	page = alloc_page(gfp);
page               31 arch/arm64/include/asm/pgalloc.h 	if (!page)
page               33 arch/arm64/include/asm/pgalloc.h 	if (!pgtable_pmd_page_ctor(page)) {
page               34 arch/arm64/include/asm/pgalloc.h 		__free_page(page);
page               37 arch/arm64/include/asm/pgalloc.h 	return page_address(page);
page               36 arch/arm64/include/asm/pgtable.h extern struct page *vmemmap;
page              389 arch/arm64/include/asm/pgtable.h #define mk_pmd(page,prot)	pfn_pmd(page_to_pfn(page),prot)
page              519 arch/arm64/include/asm/pgtable.h #define mk_pte(page,prot)	pfn_pte(page_to_pfn(page),prot)
page               16 arch/arm64/include/asm/tlb.h 	free_page_and_swap_cache((struct page *)_table);
page               55 arch/arm64/include/asm/tlb.h 	struct page *page = virt_to_page(pmdp);
page               57 arch/arm64/include/asm/tlb.h 	pgtable_pmd_page_dtor(page);
page               58 arch/arm64/include/asm/tlb.h 	tlb_remove_table(tlb, page);
page              436 arch/arm64/include/asm/uaccess.h struct page;
page              437 arch/arm64/include/asm/uaccess.h void memcpy_page_flushcache(char *to, struct page *page, size_t offset, size_t len);
page               85 arch/arm64/kernel/insn.c 	struct page *page;
page               88 arch/arm64/kernel/insn.c 		page = vmalloc_to_page(addr);
page               90 arch/arm64/kernel/insn.c 		page = phys_to_page(__pa_symbol(addr));
page               94 arch/arm64/kernel/insn.c 	BUG_ON(!page);
page               95 arch/arm64/kernel/insn.c 	return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
page              358 arch/arm64/kernel/machine_kexec.c 	struct page *page;
page              361 arch/arm64/kernel/machine_kexec.c 		page = phys_to_page(addr);
page              362 arch/arm64/kernel/machine_kexec.c 		free_reserved_page(page);
page              152 arch/arm64/kernel/perf_event.c 			   struct device_attribute *attr, char *page)
page              158 arch/arm64/kernel/perf_event.c 	return sprintf(page, "event=0x%03llx\n", pmu_attr->id);
page              123 arch/arm64/kernel/probes/kprobes.c 	void *page;
page              125 arch/arm64/kernel/probes/kprobes.c 	page = vmalloc_exec(PAGE_SIZE);
page              126 arch/arm64/kernel/probes/kprobes.c 	if (page) {
page              127 arch/arm64/kernel/probes/kprobes.c 		set_memory_ro((unsigned long)page, 1);
page              128 arch/arm64/kernel/probes/kprobes.c 		set_vm_flush_reset_perms(page);
page              131 arch/arm64/kernel/probes/kprobes.c 	return page;
page               14 arch/arm64/kernel/probes/uprobes.c void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
page               17 arch/arm64/kernel/probes/uprobes.c 	void *xol_page_kaddr = kmap_atomic(page);
page               80 arch/arm64/kernel/vdso.c 	u8			page[PAGE_SIZE];
page              103 arch/arm64/kernel/vdso.c 	struct page **vdso_pagelist;
page              118 arch/arm64/kernel/vdso.c 				sizeof(struct page *),
page              209 arch/arm64/kernel/vdso.c static struct page *aarch32_vdso_pages[C_PAGES] __ro_after_init;
page               22 arch/arm64/lib/uaccess_flushcache.c void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
page               25 arch/arm64/lib/uaccess_flushcache.c 	memcpy_flushcache(to, page_address(page) + offset, len);
page               16 arch/arm64/mm/copypage.c 	struct page *page = virt_to_page(kto);
page               18 arch/arm64/mm/copypage.c 	flush_dcache_page(page);
page               28 arch/arm64/mm/dma-mapping.c void arch_dma_prep_coherent(struct page *page, size_t size)
page               30 arch/arm64/mm/dma-mapping.c 	__dma_flush_area(page_address(page), size);
page               33 arch/arm64/mm/flush.c static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
page               46 arch/arm64/mm/flush.c void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
page               51 arch/arm64/mm/flush.c 	flush_ptrace_access(vma, page, uaddr, dst, len);
page               56 arch/arm64/mm/flush.c 	struct page *page = pte_page(pte);
page               58 arch/arm64/mm/flush.c 	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
page               59 arch/arm64/mm/flush.c 		sync_icache_aliases(page_address(page), page_size(page));
page               68 arch/arm64/mm/flush.c void flush_dcache_page(struct page *page)
page               70 arch/arm64/mm/flush.c 	if (test_bit(PG_dcache_clean, &page->flags))
page               71 arch/arm64/mm/flush.c 		clear_bit(PG_dcache_clean, &page->flags);
page              298 arch/arm64/mm/hugetlbpage.c 			 struct page *page, int writable)
page               56 arch/arm64/mm/init.c struct page *vmemmap __ro_after_init;
page              326 arch/arm64/mm/init.c 	vmemmap = ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT));
page              464 arch/arm64/mm/init.c 	struct page *start_pg, *end_pg;
page              151 arch/arm64/mm/pageattr.c int set_direct_map_invalid_noflush(struct page *page)
page              162 arch/arm64/mm/pageattr.c 				   (unsigned long)page_address(page),
page              166 arch/arm64/mm/pageattr.c int set_direct_map_default_noflush(struct page *page)
page              177 arch/arm64/mm/pageattr.c 				   (unsigned long)page_address(page),
page              181 arch/arm64/mm/pageattr.c void __kernel_map_pages(struct page *page, int numpages, int enable)
page              186 arch/arm64/mm/pageattr.c 	set_memory_valid((unsigned long)page_address(page), numpages, enable);
page              198 arch/arm64/mm/pageattr.c bool kernel_page_present(struct page *page)
page              204 arch/arm64/mm/pageattr.c 	unsigned long addr = (unsigned long)page_address(page);
page               30 arch/c6x/include/asm/cacheflush.h #define flush_dcache_page(page)			do {} while (0)
page               43 arch/c6x/include/asm/cacheflush.h #define flush_icache_page(vma, page)					  \
page               46 arch/c6x/include/asm/cacheflush.h 		L1D_cache_block_writeback_invalidate(page_address(page),  \
page               47 arch/c6x/include/asm/cacheflush.h 			(unsigned long) page_address(page) + PAGE_SIZE)); \
page               48 arch/c6x/include/asm/cacheflush.h 		L1P_cache_block_invalidate(page_address(page),		  \
page               49 arch/c6x/include/asm/cacheflush.h 			(unsigned long) page_address(page) + PAGE_SIZE)); \
page               53 arch/c6x/include/asm/cacheflush.h #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
page               59 arch/c6x/include/asm/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
page               67 arch/c6x/include/asm/processor.h #define free_kernel_stack(page) free_page((page))
page               16 arch/csky/abiv1/cacheflush.c void flush_dcache_page(struct page *page)
page               20 arch/csky/abiv1/cacheflush.c 	if (page == ZERO_PAGE(0))
page               23 arch/csky/abiv1/cacheflush.c 	mapping = page_mapping_file(page);
page               25 arch/csky/abiv1/cacheflush.c 	if (mapping && !page_mapcount(page))
page               26 arch/csky/abiv1/cacheflush.c 		clear_bit(PG_dcache_clean, &page->flags);
page               31 arch/csky/abiv1/cacheflush.c 		set_bit(PG_dcache_clean, &page->flags);
page               40 arch/csky/abiv1/cacheflush.c 	struct page *page;
page               45 arch/csky/abiv1/cacheflush.c 	page = pfn_to_page(pfn);
page               46 arch/csky/abiv1/cacheflush.c 	if (page == ZERO_PAGE(0))
page               49 arch/csky/abiv1/cacheflush.c 	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
page               52 arch/csky/abiv1/cacheflush.c 	if (page_mapping_file(page)) {
page               58 arch/csky/abiv1/cacheflush.c void flush_kernel_dcache_page(struct page *page)
page               62 arch/csky/abiv1/cacheflush.c 	mapping = page_mapping_file(page);
page               12 arch/csky/abiv1/inc/abi/cacheflush.h extern void flush_dcache_page(struct page *);
page               15 arch/csky/abiv1/inc/abi/cacheflush.h #define flush_cache_page(vma, page, pfn)	cache_wbinv_all()
page               19 arch/csky/abiv1/inc/abi/cacheflush.h extern void flush_kernel_dcache_page(struct page *);
page               35 arch/csky/abiv1/inc/abi/cacheflush.h 			 struct page *page, unsigned long vmaddr)
page               37 arch/csky/abiv1/inc/abi/cacheflush.h 	if (PageAnon(page))
page               49 arch/csky/abiv1/inc/abi/cacheflush.h #define flush_icache_page(vma, page)		do {} while (0);
page               52 arch/csky/abiv1/inc/abi/cacheflush.h #define flush_icache_user_range(vma,page,addr,len) \
page               53 arch/csky/abiv1/inc/abi/cacheflush.h 	flush_dcache_page(page)
page               55 arch/csky/abiv1/inc/abi/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
page               60 arch/csky/abiv1/inc/abi/cacheflush.h #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
page                6 arch/csky/abiv1/inc/abi/page.h extern void flush_dcache_page(struct page *page);
page               15 arch/csky/abiv1/inc/abi/page.h 				   struct page *page)
page               19 arch/csky/abiv1/inc/abi/page.h 		flush_dcache_page(page);
page               23 arch/csky/abiv1/inc/abi/page.h 				  struct page *page)
page               27 arch/csky/abiv1/inc/abi/page.h 		flush_dcache_page(page);
page                9 arch/csky/abiv2/cacheflush.c void flush_icache_page(struct vm_area_struct *vma, struct page *page)
page               13 arch/csky/abiv2/cacheflush.c 	start = (unsigned long) kmap_atomic(page);
page               20 arch/csky/abiv2/cacheflush.c void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
page               25 arch/csky/abiv2/cacheflush.c 	kaddr = (unsigned long) kmap_atomic(page) + (vaddr & ~PAGE_MASK);
page               36 arch/csky/abiv2/cacheflush.c 	struct page *page;
page               42 arch/csky/abiv2/cacheflush.c 	page = pfn_to_page(pfn);
page               43 arch/csky/abiv2/cacheflush.c 	if (page == ZERO_PAGE(0))
page               46 arch/csky/abiv2/cacheflush.c 	addr = (unsigned long) kmap_atomic(page);
page               25 arch/csky/abiv2/inc/abi/cacheflush.h #define flush_dcache_page(page)			do { } while (0)
page               31 arch/csky/abiv2/inc/abi/cacheflush.h void flush_icache_page(struct vm_area_struct *vma, struct page *page);
page               32 arch/csky/abiv2/inc/abi/cacheflush.h void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
page               38 arch/csky/abiv2/inc/abi/cacheflush.h #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
page               43 arch/csky/abiv2/inc/abi/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
page                5 arch/csky/abiv2/inc/abi/page.h 				   struct page *page)
page               11 arch/csky/abiv2/inc/abi/page.h 				  struct page *page)
page               33 arch/csky/include/asm/highmem.h extern void *kmap_high(struct page *page);
page               34 arch/csky/include/asm/highmem.h extern void kunmap_high(struct page *page);
page               36 arch/csky/include/asm/highmem.h extern void *kmap(struct page *page);
page               37 arch/csky/include/asm/highmem.h extern void kunmap(struct page *page);
page               38 arch/csky/include/asm/highmem.h extern void *kmap_atomic(struct page *page);
page               41 arch/csky/include/asm/highmem.h extern struct page *kmap_atomic_to_page(void *ptr);
page               47 arch/csky/include/asm/page.h #define clear_page(page)	memset((page), 0, PAGE_SIZE)
page               50 arch/csky/include/asm/page.h #define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
page               53 arch/csky/include/asm/page.h struct page;
page               64 arch/csky/include/asm/page.h typedef struct page *pgtable_t;
page              280 arch/csky/include/asm/pgtable.h #define mk_pte(page, pgprot)    pfn_pte(page_to_pfn(page), (pgprot))
page               18 arch/csky/include/asm/tlbflush.h extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
page               18 arch/csky/kernel/vdso.c static struct page *vdso_page;
page               20 arch/csky/mm/dma-mapping.c 	struct page *page    = phys_to_page(paddr);
page               21 arch/csky/mm/dma-mapping.c 	void *start          = __va(page_to_phys(page));
page               31 arch/csky/mm/dma-mapping.c 		if (PageHighMem(page)) {
page               32 arch/csky/mm/dma-mapping.c 			start = kmap_atomic(page);
page               44 arch/csky/mm/dma-mapping.c 		page++;
page               56 arch/csky/mm/dma-mapping.c void arch_dma_prep_coherent(struct page *page, size_t size)
page               58 arch/csky/mm/dma-mapping.c 	cache_op(page_to_phys(page), size, dma_wbinv_set_zero_range);
page               16 arch/csky/mm/highmem.c void *kmap(struct page *page)
page               21 arch/csky/mm/highmem.c 	if (!PageHighMem(page))
page               22 arch/csky/mm/highmem.c 		return page_address(page);
page               23 arch/csky/mm/highmem.c 	addr = kmap_high(page);
page               30 arch/csky/mm/highmem.c void kunmap(struct page *page)
page               33 arch/csky/mm/highmem.c 	if (!PageHighMem(page))
page               35 arch/csky/mm/highmem.c 	kunmap_high(page);
page               39 arch/csky/mm/highmem.c void *kmap_atomic(struct page *page)
page               46 arch/csky/mm/highmem.c 	if (!PageHighMem(page))
page               47 arch/csky/mm/highmem.c 		return page_address(page);
page               55 arch/csky/mm/highmem.c 	set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL));
page              107 arch/csky/mm/highmem.c struct page *kmap_atomic_to_page(void *ptr)
page               54 arch/csky/mm/init.c 		struct page *page = pfn_to_page(tmp);
page               58 arch/csky/mm/init.c 			free_highmem_page(page);
page               34 arch/hexagon/include/asm/cacheflush.h #define flush_dcache_page(page)			do { } while (0)
page               79 arch/hexagon/include/asm/cacheflush.h void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
page               82 arch/hexagon/include/asm/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
page               72 arch/hexagon/include/asm/page.h typedef struct page *pgtable_t;
page               90 arch/hexagon/include/asm/page.h struct page;
page              103 arch/hexagon/include/asm/page.h static inline void clear_page(void *page)
page              110 arch/hexagon/include/asm/page.h 		: "+r" (page)
page              121 arch/hexagon/include/asm/page.h #define clear_user_page(page, vaddr, pg)	clear_page(page)
page              128 arch/hexagon/include/asm/page.h #define page_to_phys(page)      (page_to_pfn(page) << PAGE_SHIFT)
page              133 arch/hexagon/include/asm/page.h #define page_to_virt(page)	__va(page_to_phys(page))
page              295 arch/hexagon/include/asm/pgtable.h #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
page               15 arch/hexagon/kernel/vdso.c static struct page *vdso_page;
page              118 arch/hexagon/mm/cache.c void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
page               77 arch/hexagon/mm/init.c 	struct page *page;
page               79 arch/hexagon/mm/init.c 	page = pte_page(pte);
page               80 arch/hexagon/mm/init.c 	addr = (unsigned long) page_address(page);
page              815 arch/ia64/hp/common/sba_iommu.c 		struct page *page = virt_to_page((void *)pg_addr);
page              816 arch/ia64/hp/common/sba_iommu.c 		set_bit(PG_arch_1, &page->flags);
page              912 arch/ia64/hp/common/sba_iommu.c static dma_addr_t sba_map_page(struct device *dev, struct page *page,
page              918 arch/ia64/hp/common/sba_iommu.c 	void *addr = page_address(page) + poff;
page             1114 arch/ia64/hp/common/sba_iommu.c 	struct page *page;
page             1125 arch/ia64/hp/common/sba_iommu.c 	page = alloc_pages_node(node, flags, get_order(size));
page             1126 arch/ia64/hp/common/sba_iommu.c 	if (unlikely(!page))
page             1129 arch/ia64/hp/common/sba_iommu.c 	addr = page_address(page);
page             1131 arch/ia64/hp/common/sba_iommu.c 	*dma_handle = page_to_phys(page);
page             1150 arch/ia64/hp/common/sba_iommu.c 	*dma_handle = sba_map_page(&ioc->sac_only_dev->dev, page, 0, size,
page               17 arch/ia64/include/asm/agp.h #define map_page_into_agp(page)		/* nothing */
page               18 arch/ia64/include/asm/agp.h #define unmap_page_from_agp(page)	/* nothing */
page               25 arch/ia64/include/asm/cacheflush.h #define flush_icache_page(vma,page)		do { } while (0)
page               30 arch/ia64/include/asm/cacheflush.h #define flush_dcache_page(page)			\
page               32 arch/ia64/include/asm/cacheflush.h 	clear_bit(PG_arch_1, &(page)->flags);	\
page               42 arch/ia64/include/asm/cacheflush.h #define flush_icache_user_range(vma, page, user_addr, len)					\
page               44 arch/ia64/include/asm/cacheflush.h 	unsigned long _addr = (unsigned long) page_address(page) + ((user_addr) & ~PAGE_MASK);	\
page               48 arch/ia64/include/asm/cacheflush.h #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
page               50 arch/ia64/include/asm/cacheflush.h      flush_icache_user_range(vma, page, vaddr, len); \
page               52 arch/ia64/include/asm/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
page               30 arch/ia64/include/asm/hugetlb.h static inline void arch_clear_hugepage_flags(struct page *page)
page               19 arch/ia64/include/asm/kexec.h #define kexec_flush_icache_page(page) do { \
page               20 arch/ia64/include/asm/kexec.h                 unsigned long page_addr = (unsigned long)page_address(page); \
page               64 arch/ia64/include/asm/meminit.h   extern struct page *vmem_map;
page               65 arch/ia64/include/asm/page.h extern void clear_page (void *page);
page               72 arch/ia64/include/asm/page.h #define clear_user_page(addr, vaddr, page)	\
page               75 arch/ia64/include/asm/page.h 	flush_dcache_page(page);		\
page               78 arch/ia64/include/asm/page.h #define copy_user_page(to, from, vaddr, page)	\
page               81 arch/ia64/include/asm/page.h 	flush_dcache_page(page);		\
page               87 arch/ia64/include/asm/page.h 	struct page *page = alloc_page_vma(				\
page               89 arch/ia64/include/asm/page.h 	if (page)							\
page               90 arch/ia64/include/asm/page.h  		flush_dcache_page(page);				\
page               91 arch/ia64/include/asm/page.h 	page;								\
page              105 arch/ia64/include/asm/page.h extern struct page *vmem_map;
page              107 arch/ia64/include/asm/page.h # define page_to_pfn(page)	((unsigned long) (page - vmem_map))
page              125 arch/ia64/include/asm/page.h #define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
page              183 arch/ia64/include/asm/page.h   typedef struct page *pgtable_t;
page              206 arch/ia64/include/asm/page.h     typedef struct page *pgtable_t;
page              233 arch/ia64/include/asm/pgtable.h # define vmemmap		((struct page *)VMALLOC_END)
page              256 arch/ia64/include/asm/pgtable.h #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
page              487 arch/ia64/include/asm/pgtable.h extern struct page *zero_page_memmap_ptr;
page               85 arch/ia64/include/asm/thread_info.h 	struct page *page = alloc_pages_node(node, GFP_KERNEL | __GFP_COMP,	\
page               87 arch/ia64/include/asm/thread_info.h 	struct task_struct *ret = page ? page_address(page) : NULL;		\
page              264 arch/ia64/include/asm/uaccess.h 	struct page *page;
page              267 arch/ia64/include/asm/uaccess.h 	page = pfn_to_page(p >> PAGE_SHIFT);
page              268 arch/ia64/include/asm/uaccess.h 	if (PageUncached(page))
page              282 arch/ia64/include/asm/uaccess.h 	struct page *page;
page              285 arch/ia64/include/asm/uaccess.h 	page = virt_to_page((unsigned long)p);
page              286 arch/ia64/include/asm/uaccess.h 	if (PageUncached(page))
page               54 arch/ia64/kernel/mca_drv.c static struct page *page_isolate[MAX_PAGE_ISOLATE];
page              117 arch/ia64/kernel/mca_drv.c 	struct page *p;
page               77 arch/ia64/kernel/uncached.c 	struct page *page;
page               97 arch/ia64/kernel/uncached.c 	page = __alloc_pages_node(nid,
page              100 arch/ia64/kernel/uncached.c 	if (!page) {
page              107 arch/ia64/kernel/uncached.c 	c_addr = (unsigned long)page_address(page);
page              116 arch/ia64/kernel/uncached.c 		SetPageUncached(&page[i]);
page              161 arch/ia64/kernel/uncached.c 		ClearPageUncached(&page[i]);
page              191 arch/ia64/mm/contig.c 		vmem_map = (struct page *) 0;
page              198 arch/ia64/mm/contig.c 			sizeof(struct page));
page              200 arch/ia64/mm/contig.c 		vmem_map = (struct page *) VMALLOC_END;
page              609 arch/ia64/mm/discontig.c 		sizeof(struct page));
page              610 arch/ia64/mm/discontig.c 	vmem_map = (struct page *) VMALLOC_END;
page               88 arch/ia64/mm/hugetlbpage.c struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int write)
page               90 arch/ia64/mm/hugetlbpage.c 	struct page *page;
page               99 arch/ia64/mm/hugetlbpage.c 	page = pte_page(*ptep);
page              100 arch/ia64/mm/hugetlbpage.c 	page += ((addr & ~HPAGE_MASK) >> PAGE_SHIFT);
page              101 arch/ia64/mm/hugetlbpage.c 	return page;
page               48 arch/ia64/mm/init.c struct page *vmem_map;
page               52 arch/ia64/mm/init.c struct page *zero_page_memmap_ptr;	/* map entry for zero page */
page               59 arch/ia64/mm/init.c 	struct page *page;
page               61 arch/ia64/mm/init.c 	page = pte_page(pte);
page               62 arch/ia64/mm/init.c 	addr = (unsigned long) page_address(page);
page               64 arch/ia64/mm/init.c 	if (test_bit(PG_arch_1, &page->flags))
page               67 arch/ia64/mm/init.c 	flush_icache_range(addr, addr + page_size(page));
page               68 arch/ia64/mm/init.c 	set_bit(PG_arch_1, &page->flags);	/* mark page as clean */
page              207 arch/ia64/mm/init.c static struct page * __init
page              208 arch/ia64/mm/init.c put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
page              229 arch/ia64/mm/init.c 		set_pte(pte, mk_pte(page, pgprot));
page              233 arch/ia64/mm/init.c 	return page;
page              239 arch/ia64/mm/init.c 	struct page *page;
page              246 arch/ia64/mm/init.c 	page = virt_to_page(ia64_imva(__start_gate_section));
page              247 arch/ia64/mm/init.c 	put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
page              249 arch/ia64/mm/init.c 	page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE));
page              250 arch/ia64/mm/init.c 	put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
page              252 arch/ia64/mm/init.c 	put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
page              422 arch/ia64/mm/init.c 	end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
page              423 arch/ia64/mm/init.c 	hole_next_pfn = end_address / sizeof(struct page);
page              430 arch/ia64/mm/init.c 	struct page *map_start, *map_end;
page              471 arch/ia64/mm/init.c 			void *page = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE,
page              473 arch/ia64/mm/init.c 			if (!page)
page              475 arch/ia64/mm/init.c 			set_pte(pte, pfn_pte(__pa(page) >> PAGE_SHIFT,
page              488 arch/ia64/mm/init.c 	struct page *start;
page              489 arch/ia64/mm/init.c 	struct page *end;
page              498 arch/ia64/mm/init.c 	struct page *map_start, *map_end;
page              514 arch/ia64/mm/init.c 	map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page);
page              516 arch/ia64/mm/init.c 		    / sizeof(struct page));
page              533 arch/ia64/mm/init.c 		struct page *start;
page              550 arch/ia64/mm/init.c 	struct page *pg = pfn_to_page(pfn);
page              252 arch/m68k/include/asm/cacheflush_mm.h #define flush_dcache_page(page)		__flush_page_to_ram(page_address(page))
page              255 arch/m68k/include/asm/cacheflush_mm.h #define flush_icache_page(vma, page)	__flush_page_to_ram(page_address(page))
page              257 arch/m68k/include/asm/cacheflush_mm.h extern void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
page              262 arch/m68k/include/asm/cacheflush_mm.h 				     struct page *page, unsigned long vaddr,
page              265 arch/m68k/include/asm/cacheflush_mm.h 	flush_cache_page(vma, vaddr, page_to_pfn(page));
page              267 arch/m68k/include/asm/cacheflush_mm.h 	flush_icache_user_range(vma, page, vaddr, len);
page              270 arch/m68k/include/asm/cacheflush_mm.h 				       struct page *page, unsigned long vaddr,
page              273 arch/m68k/include/asm/cacheflush_mm.h 	flush_cache_page(vma, vaddr, page_to_pfn(page));
page               18 arch/m68k/include/asm/cacheflush_no.h #define flush_dcache_page(page)			do { } while (0)
page               27 arch/m68k/include/asm/cacheflush_no.h #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
page               29 arch/m68k/include/asm/cacheflush_no.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
page               17 arch/m68k/include/asm/mcf_pgalloc.h 	unsigned long page = __get_free_page(GFP_DMA);
page               19 arch/m68k/include/asm/mcf_pgalloc.h 	if (!page)
page               22 arch/m68k/include/asm/mcf_pgalloc.h 	memset((void *)page, 0, PAGE_SIZE);
page               23 arch/m68k/include/asm/mcf_pgalloc.h 	return (pte_t *) (page);
page               34 arch/m68k/include/asm/mcf_pgalloc.h #define pmd_populate(mm, pmd, page) (pmd_val(*pmd) = \
page               35 arch/m68k/include/asm/mcf_pgalloc.h 	(unsigned long)(page_address(page)))
page               41 arch/m68k/include/asm/mcf_pgalloc.h static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
page               44 arch/m68k/include/asm/mcf_pgalloc.h 	pgtable_pte_page_dtor(page);
page               45 arch/m68k/include/asm/mcf_pgalloc.h 	__free_page(page);
page               50 arch/m68k/include/asm/mcf_pgalloc.h static inline struct page *pte_alloc_one(struct mm_struct *mm)
page               52 arch/m68k/include/asm/mcf_pgalloc.h 	struct page *page = alloc_pages(GFP_DMA, 0);
page               55 arch/m68k/include/asm/mcf_pgalloc.h 	if (!page)
page               57 arch/m68k/include/asm/mcf_pgalloc.h 	if (!pgtable_pte_page_ctor(page)) {
page               58 arch/m68k/include/asm/mcf_pgalloc.h 		__free_page(page);
page               62 arch/m68k/include/asm/mcf_pgalloc.h 	pte = kmap(page);
page               69 arch/m68k/include/asm/mcf_pgalloc.h 	kunmap(page);
page               71 arch/m68k/include/asm/mcf_pgalloc.h 	return page;
page               74 arch/m68k/include/asm/mcf_pgalloc.h static inline void pte_free(struct mm_struct *mm, struct page *page)
page               76 arch/m68k/include/asm/mcf_pgalloc.h 	pgtable_pte_page_dtor(page);
page               77 arch/m68k/include/asm/mcf_pgalloc.h 	__free_page(page);
page              157 arch/m68k/include/asm/mcf_pgtable.h #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
page               33 arch/m68k/include/asm/motorola_pgalloc.h 	struct page *page;
page               36 arch/m68k/include/asm/motorola_pgalloc.h 	page = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
page               37 arch/m68k/include/asm/motorola_pgalloc.h 	if(!page)
page               39 arch/m68k/include/asm/motorola_pgalloc.h 	if (!pgtable_pte_page_ctor(page)) {
page               40 arch/m68k/include/asm/motorola_pgalloc.h 		__free_page(page);
page               44 arch/m68k/include/asm/motorola_pgalloc.h 	pte = kmap(page);
page               48 arch/m68k/include/asm/motorola_pgalloc.h 	kunmap(page);
page               49 arch/m68k/include/asm/motorola_pgalloc.h 	return page;
page               52 arch/m68k/include/asm/motorola_pgalloc.h static inline void pte_free(struct mm_struct *mm, pgtable_t page)
page               54 arch/m68k/include/asm/motorola_pgalloc.h 	pgtable_pte_page_dtor(page);
page               55 arch/m68k/include/asm/motorola_pgalloc.h 	cache_page(kmap(page));
page               56 arch/m68k/include/asm/motorola_pgalloc.h 	kunmap(page);
page               57 arch/m68k/include/asm/motorola_pgalloc.h 	__free_page(page);
page               60 arch/m68k/include/asm/motorola_pgalloc.h static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
page               63 arch/m68k/include/asm/motorola_pgalloc.h 	pgtable_pte_page_dtor(page);
page               64 arch/m68k/include/asm/motorola_pgalloc.h 	cache_page(kmap(page));
page               65 arch/m68k/include/asm/motorola_pgalloc.h 	kunmap(page);
page               66 arch/m68k/include/asm/motorola_pgalloc.h 	__free_page(page);
page              103 arch/m68k/include/asm/motorola_pgalloc.h static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page)
page              105 arch/m68k/include/asm/motorola_pgalloc.h 	pmd_set(pmd, page_address(page));
page              101 arch/m68k/include/asm/motorola_pgtable.h #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
page               28 arch/m68k/include/asm/page.h typedef struct page *pgtable_t;
page               29 arch/m68k/include/asm/page_mm.h static inline void clear_page(void *page)
page               32 arch/m68k/include/asm/page_mm.h 	unsigned long *sp = page;
page               47 arch/m68k/include/asm/page_mm.h 			     : "a" (page), "0" (sp),
page               52 arch/m68k/include/asm/page_mm.h #define clear_page(page)	memset((page), 0, PAGE_SIZE)
page               56 arch/m68k/include/asm/page_mm.h #define clear_user_page(addr, vaddr, page)	\
page               58 arch/m68k/include/asm/page_mm.h 		flush_dcache_page(page);	\
page               60 arch/m68k/include/asm/page_mm.h #define copy_user_page(to, from, vaddr, page)	\
page               62 arch/m68k/include/asm/page_mm.h 		flush_dcache_page(page);	\
page              152 arch/m68k/include/asm/page_mm.h #define page_to_virt(page) ({						\
page              153 arch/m68k/include/asm/page_mm.h 	pfn_to_virt(page_to_pfn(page));					\
page              163 arch/m68k/include/asm/page_mm.h 	const struct page *__p = (_page);				\
page               10 arch/m68k/include/asm/page_no.h #define clear_page(page)	memset((page), 0, PAGE_SIZE)
page               13 arch/m68k/include/asm/page_no.h #define clear_user_page(page, vaddr, pg)	clear_page(page)
page               27 arch/m68k/include/asm/page_no.h #define page_to_virt(page)	__va(((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET))
page               30 arch/m68k/include/asm/page_no.h #define page_to_pfn(page)	virt_to_pfn(page_to_virt(page))
page               33 arch/m68k/include/asm/sun3_pgalloc.h static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page)
page               35 arch/m68k/include/asm/sun3_pgalloc.h 	pmd_val(*pmd) = __pa((unsigned long)page_address(page));
page              103 arch/m68k/include/asm/sun3_pgtable.h #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
page               33 arch/m68k/include/asm/virtconvert.h #define page_to_phys(page) \
page               34 arch/m68k/include/asm/virtconvert.h 	__pa(PAGE_OFFSET + (((page) - pg_data_map[0].node_mem_map) << PAGE_SHIFT))
page               36 arch/m68k/include/asm/virtconvert.h #define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
page               21 arch/m68k/kernel/dma.c void arch_dma_prep_coherent(struct page *page, size_t size)
page               23 arch/m68k/kernel/dma.c 	cache_push(page_to_phys(page), size);
page              109 arch/m68k/mm/cache.c void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
page              127 arch/m68k/mm/cache.c 			      : : "a" (page_to_phys(page)));
page               32 arch/m68k/mm/memory.c #define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru))
page               33 arch/m68k/mm/memory.c #define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
page               41 arch/m68k/mm/memory.c 	unsigned long page = ptable & PAGE_MASK;
page               42 arch/m68k/mm/memory.c 	unsigned char mask = 1 << ((ptable - page)/PTABLE_SIZE);
page               44 arch/m68k/mm/memory.c 	dp = PD_PTABLE(page);
page               74 arch/m68k/mm/memory.c 		void *page;
page               77 arch/m68k/mm/memory.c 		if (!(page = (void *)get_zeroed_page(GFP_KERNEL)))
page               80 arch/m68k/mm/memory.c 		flush_tlb_kernel_page(page);
page               81 arch/m68k/mm/memory.c 		nocache_page(page);
page               83 arch/m68k/mm/memory.c 		new = PD_PTABLE(page);
page               87 arch/m68k/mm/memory.c 		return (pmd_t *)page;
page              103 arch/m68k/mm/memory.c 	unsigned long page = (unsigned long)ptable & PAGE_MASK;
page              104 arch/m68k/mm/memory.c 	unsigned char mask = 1 << (((unsigned long)ptable - page)/PTABLE_SIZE);
page              106 arch/m68k/mm/memory.c 	dp = PD_PTABLE(page);
page              115 arch/m68k/mm/memory.c 		cache_page((void *)page);
page              116 arch/m68k/mm/memory.c 		free_page (page);
page               77 arch/microblaze/include/asm/cacheflush.h #define flush_dcache_page(page) \
page               79 arch/microblaze/include/asm/cacheflush.h 	unsigned long addr = (unsigned long) page_address(page); /* virtual */ \
page              106 arch/microblaze/include/asm/cacheflush.h 				     struct page *page, unsigned long vaddr,
page              118 arch/microblaze/include/asm/cacheflush.h 				       struct page *page, unsigned long vaddr,
page               54 arch/microblaze/include/asm/highmem.h extern void *kmap_high(struct page *page);
page               55 arch/microblaze/include/asm/highmem.h extern void kunmap_high(struct page *page);
page               56 arch/microblaze/include/asm/highmem.h extern void *kmap_atomic_prot(struct page *page, pgprot_t prot);
page               59 arch/microblaze/include/asm/highmem.h static inline void *kmap(struct page *page)
page               62 arch/microblaze/include/asm/highmem.h 	if (!PageHighMem(page))
page               63 arch/microblaze/include/asm/highmem.h 		return page_address(page);
page               64 arch/microblaze/include/asm/highmem.h 	return kmap_high(page);
page               67 arch/microblaze/include/asm/highmem.h static inline void kunmap(struct page *page)
page               70 arch/microblaze/include/asm/highmem.h 	if (!PageHighMem(page))
page               72 arch/microblaze/include/asm/highmem.h 	kunmap_high(page);
page               75 arch/microblaze/include/asm/highmem.h static inline void *kmap_atomic(struct page *page)
page               77 arch/microblaze/include/asm/highmem.h 	return kmap_atomic_prot(page, kmap_prot);
page               37 arch/microblaze/include/asm/io.h #define page_to_bus(page)	(page_to_phys(page))
page               81 arch/microblaze/include/asm/page.h # define clear_user_page(pgaddr, vaddr, page)	memset((pgaddr), 0, PAGE_SIZE)
page               88 arch/microblaze/include/asm/page.h typedef struct page *pgtable_t;
page              154 arch/microblaze/include/asm/page.h #  define page_to_virt(page)   __va(page_to_pfn(page) << PAGE_SHIFT)
page              155 arch/microblaze/include/asm/page.h #  define page_to_phys(page)     (page_to_pfn(page) << PAGE_SHIFT)
page              159 arch/microblaze/include/asm/page.h #  define page_to_virt(page)	(pfn_to_virt(page_to_pfn(page)))
page              160 arch/microblaze/include/asm/page.h #  define page_to_phys(page)	(pfn_to_phys(page_to_pfn(page)))
page              161 arch/microblaze/include/asm/page.h #  define page_to_bus(page)	(page_to_phys(page))
page              375 arch/microblaze/include/asm/pgtable.h #define mk_pte(page, pgprot) \
page              378 arch/microblaze/include/asm/pgtable.h 	pte_val(pte) = (((page - mem_map) << PAGE_SHIFT) + memory_start) |  \
page               18 arch/microblaze/mm/consistent.c void arch_dma_prep_coherent(struct page *page, size_t size)
page               20 arch/microblaze/mm/consistent.c 	phys_addr_t paddr = page_to_phys(page);
page               35 arch/microblaze/mm/highmem.c void *kmap_atomic_prot(struct page *page, pgprot_t prot)
page               43 arch/microblaze/mm/highmem.c 	if (!PageHighMem(page))
page               44 arch/microblaze/mm/highmem.c 		return page_address(page);
page               53 arch/microblaze/mm/highmem.c 	set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
page               75 arch/microblaze/mm/init.c 		struct page *page = pfn_to_page(pfn);
page               79 arch/microblaze/mm/init.c 			free_highmem_page(page);
page               20 arch/mips/include/asm/bcache.h 	void (*bc_wback_inv)(unsigned long page, unsigned long size);
page               21 arch/mips/include/asm/bcache.h 	void (*bc_inv)(unsigned long page, unsigned long size);
page               43 arch/mips/include/asm/bcache.h static inline void bc_wback_inv(unsigned long page, unsigned long size)
page               45 arch/mips/include/asm/bcache.h 	bcops->bc_wback_inv(page, size);
page               48 arch/mips/include/asm/bcache.h static inline void bc_inv(unsigned long page, unsigned long size)
page               50 arch/mips/include/asm/bcache.h 	bcops->bc_inv(page, size);
page               79 arch/mips/include/asm/bcache.h #define bc_wback_inv(page, size) do { } while (0)
page               80 arch/mips/include/asm/bcache.h #define bc_inv(page, size) do { } while (0)
page               39 arch/mips/include/asm/cacheflush.h #define Page_dcache_dirty(page)		\
page               40 arch/mips/include/asm/cacheflush.h 	test_bit(PG_dcache_dirty, &(page)->flags)
page               41 arch/mips/include/asm/cacheflush.h #define SetPageDcacheDirty(page)	\
page               42 arch/mips/include/asm/cacheflush.h 	set_bit(PG_dcache_dirty, &(page)->flags)
page               43 arch/mips/include/asm/cacheflush.h #define ClearPageDcacheDirty(page)	\
page               44 arch/mips/include/asm/cacheflush.h 	clear_bit(PG_dcache_dirty, &(page)->flags)
page               52 arch/mips/include/asm/cacheflush.h extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
page               53 arch/mips/include/asm/cacheflush.h extern void __flush_dcache_page(struct page *page);
page               56 arch/mips/include/asm/cacheflush.h static inline void flush_dcache_page(struct page *page)
page               59 arch/mips/include/asm/cacheflush.h 		__flush_dcache_page(page);
page               61 arch/mips/include/asm/cacheflush.h 		SetPageDcacheDirty(page);
page               68 arch/mips/include/asm/cacheflush.h extern void __flush_anon_page(struct page *, unsigned long);
page               70 arch/mips/include/asm/cacheflush.h 	struct page *page, unsigned long vmaddr)
page               72 arch/mips/include/asm/cacheflush.h 	if (cpu_has_dc_aliases && PageAnon(page))
page               73 arch/mips/include/asm/cacheflush.h 		__flush_anon_page(page, vmaddr);
page               77 arch/mips/include/asm/cacheflush.h 	struct page *page)
page              105 arch/mips/include/asm/cacheflush.h 	struct page *page, unsigned long vaddr, void *dst, const void *src,
page              109 arch/mips/include/asm/cacheflush.h 	struct page *page, unsigned long vaddr, void *dst, const void *src,
page              119 arch/mips/include/asm/cacheflush.h extern void *kmap_coherent(struct page *page, unsigned long addr);
page              121 arch/mips/include/asm/cacheflush.h extern void *kmap_noncoherent(struct page *page, unsigned long addr);
page              129 arch/mips/include/asm/cacheflush.h static inline void flush_kernel_dcache_page(struct page *page)
page              131 arch/mips/include/asm/cacheflush.h 	BUG_ON(cpu_has_dc_aliases && PageHighMem(page));
page              132 arch/mips/include/asm/cacheflush.h 	flush_dcache_page(page);
page               49 arch/mips/include/asm/highmem.h extern void * kmap_high(struct page *page);
page               50 arch/mips/include/asm/highmem.h extern void kunmap_high(struct page *page);
page               52 arch/mips/include/asm/highmem.h extern void *kmap(struct page *page);
page               53 arch/mips/include/asm/highmem.h extern void kunmap(struct page *page);
page               54 arch/mips/include/asm/highmem.h extern void *kmap_atomic(struct page *page);
page               85 arch/mips/include/asm/hugetlb.h static inline void arch_clear_hugepage_flags(struct page *page)
page              154 arch/mips/include/asm/io.h #define page_to_phys(page)	((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
page              702 arch/mips/include/asm/octeon/cvmx-mio-defs.h 		uint64_t page:6;
page              720 arch/mips/include/asm/octeon/cvmx-mio-defs.h 		uint64_t page:6;
page              733 arch/mips/include/asm/octeon/cvmx-mio-defs.h 		uint64_t page:6;
page              751 arch/mips/include/asm/octeon/cvmx-mio-defs.h 		uint64_t page:6;
page               90 arch/mips/include/asm/page.h extern void clear_page(void * page);
page              101 arch/mips/include/asm/page.h struct page;
page              104 arch/mips/include/asm/page.h 	struct page *page)
page              114 arch/mips/include/asm/page.h extern void copy_user_highpage(struct page *to, struct page *from,
page              137 arch/mips/include/asm/page.h typedef struct page *pgtable_t;
page               34 arch/mips/include/asm/pgalloc.h extern void pmd_init(unsigned long page, unsigned long pagetable);
page               47 arch/mips/include/asm/pgalloc.h extern void pgd_init(unsigned long page);
page              364 arch/mips/include/asm/pgtable-64.h extern void pgd_init(unsigned long page);
page              365 arch/mips/include/asm/pgtable-64.h extern void pud_init(unsigned long page, unsigned long pagetable);
page              366 arch/mips/include/asm/pgtable-64.h extern void pmd_init(unsigned long page, unsigned long pagetable);
page              445 arch/mips/include/asm/pgtable.h #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
page              596 arch/mips/include/asm/pgtable.h extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
page              603 arch/mips/include/asm/pgtable.h static inline struct page *pmd_page(pmd_t pmd)
page              545 arch/mips/include/asm/r4kcache.h static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
page              547 arch/mips/include/asm/r4kcache.h 	unsigned long start = page;					\
page              548 arch/mips/include/asm/r4kcache.h 	unsigned long end = page + PAGE_SIZE;				\
page              556 arch/mips/include/asm/r4kcache.h static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
page              559 arch/mips/include/asm/r4kcache.h 	unsigned long start = INDEX_BASE + (page & indexmask);		\
page              593 arch/mips/include/asm/r4kcache.h static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
page              595 arch/mips/include/asm/r4kcache.h 	unsigned long start = page;					\
page              596 arch/mips/include/asm/r4kcache.h 	unsigned long end = page + PAGE_SIZE;				\
page               22 arch/mips/include/asm/tlbflush.h 	unsigned long page);
page               44 arch/mips/include/asm/tlbflush.h #define flush_tlb_page(vma, page)	local_flush_tlb_page(vma, page)
page               55 arch/mips/include/asm/vdso.h 	u8 page[PAGE_SIZE];
page              588 arch/mips/jazz/jazzdma.c static dma_addr_t jazz_dma_map_page(struct device *dev, struct page *page,
page              592 arch/mips/jazz/jazzdma.c 	phys_addr_t phys = page_to_phys(page) + offset;
page              184 arch/mips/kernel/asm-offsets.c 	DEFINE(STRUCT_PAGE_SIZE, sizeof(struct page));
page              331 arch/mips/kernel/setup.c 			(unsigned long)((PFN_UP(ramstart) - ARCH_PFN_OFFSET) * sizeof(struct page)),
page              640 arch/mips/kernel/smp.c void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
page              650 arch/mips/kernel/smp.c 		ginvt_va_mmid(page);
page              659 arch/mips/kernel/smp.c 			.addr1 = page,
page              663 arch/mips/kernel/smp.c 		local_flush_tlb_page(vma, page);
page              677 arch/mips/kernel/smp.c 		local_flush_tlb_page(vma, page);
page              227 arch/mips/kernel/uprobes.c void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
page              233 arch/mips/kernel/uprobes.c 	kaddr = (unsigned long)kmap_atomic(page);
page               35 arch/mips/kernel/vdso.c static struct page *no_pages[] = { NULL };
page               28 arch/mips/kvm/commpage.c 	struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage;
page               31 arch/mips/kvm/commpage.c 	vcpu->arch.cop0 = &page->cop0;
page               31 arch/mips/kvm/mmu.c 	void *page;
page               37 arch/mips/kvm/mmu.c 		page = (void *)__get_free_page(GFP_KERNEL);
page               38 arch/mips/kvm/mmu.c 		if (!page)
page               40 arch/mips/kvm/mmu.c 		cache->objects[cache->nobjs++] = page;
page               75 arch/mips/kvm/mmu.c static void kvm_pgd_init(void *page)
page               86 arch/mips/kvm/mmu.c 	p = (unsigned long *)page;
page              153 arch/mips/mm/c-octeon.c 				    unsigned long page, unsigned long pfn)
page              282 arch/mips/mm/c-r4k.c static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
page              287 arch/mips/mm/c-r4k.c 	blast_icache32_page_indexed(page);
page              291 arch/mips/mm/c-r4k.c static inline void tx49_blast_icache32_page_indexed(unsigned long page)
page              294 arch/mips/mm/c-r4k.c 	unsigned long start = INDEX_BASE + (page & indexmask);
page              648 arch/mips/mm/c-r4k.c 	struct page *page = pfn_to_page(fcp_args->pfn);
page              686 arch/mips/mm/c-r4k.c 				page_mapcount(page) &&
page              687 arch/mips/mm/c-r4k.c 				!Page_dcache_dirty(page));
page              689 arch/mips/mm/c-r4k.c 			vaddr = kmap_coherent(page, addr);
page              691 arch/mips/mm/c-r4k.c 			vaddr = kmap_atomic(page);
page              168 arch/mips/mm/c-tx39.c static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn)
page              184 arch/mips/mm/c-tx39.c 	page &= PAGE_MASK;
page              185 arch/mips/mm/c-tx39.c 	pgdp = pgd_offset(mm, page);
page              186 arch/mips/mm/c-tx39.c 	pudp = pud_offset(pgdp, page);
page              187 arch/mips/mm/c-tx39.c 	pmdp = pmd_offset(pudp, page);
page              188 arch/mips/mm/c-tx39.c 	ptep = pte_offset(pmdp, page);
page              205 arch/mips/mm/c-tx39.c 			tx39_blast_dcache_page(page);
page              207 arch/mips/mm/c-tx39.c 			tx39_blast_icache_page(page);
page              217 arch/mips/mm/c-tx39.c 		tx39_blast_dcache_page_indexed(page);
page              219 arch/mips/mm/c-tx39.c 		tx39_blast_icache_page_indexed(page);
page               32 arch/mips/mm/cache.c void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
page               84 arch/mips/mm/cache.c void __flush_dcache_page(struct page *page)
page               86 arch/mips/mm/cache.c 	struct address_space *mapping = page_mapping_file(page);
page               90 arch/mips/mm/cache.c 		SetPageDcacheDirty(page);
page               99 arch/mips/mm/cache.c 	if (PageHighMem(page))
page              100 arch/mips/mm/cache.c 		addr = (unsigned long)kmap_atomic(page);
page              102 arch/mips/mm/cache.c 		addr = (unsigned long)page_address(page);
page              106 arch/mips/mm/cache.c 	if (PageHighMem(page))
page              112 arch/mips/mm/cache.c void __flush_anon_page(struct page *page, unsigned long vmaddr)
page              114 arch/mips/mm/cache.c 	unsigned long addr = (unsigned long) page_address(page);
page              117 arch/mips/mm/cache.c 		if (page_mapcount(page) && !Page_dcache_dirty(page)) {
page              120 arch/mips/mm/cache.c 			kaddr = kmap_coherent(page, vmaddr);
page              132 arch/mips/mm/cache.c 	struct page *page;
page              139 arch/mips/mm/cache.c 	page = pfn_to_page(pfn);
page              140 arch/mips/mm/cache.c 	if (Page_dcache_dirty(page)) {
page              141 arch/mips/mm/cache.c 		if (PageHighMem(page))
page              142 arch/mips/mm/cache.c 			addr = (unsigned long)kmap_atomic(page);
page              144 arch/mips/mm/cache.c 			addr = (unsigned long)page_address(page);
page              149 arch/mips/mm/cache.c 		if (PageHighMem(page))
page              152 arch/mips/mm/cache.c 		ClearPageDcacheDirty(page);
page               47 arch/mips/mm/dma-noncoherent.c void arch_dma_prep_coherent(struct page *page, size_t size)
page               49 arch/mips/mm/dma-noncoherent.c 	dma_cache_wback_inv((unsigned long)page_address(page), size);
page               97 arch/mips/mm/dma-noncoherent.c 	struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
page              104 arch/mips/mm/dma-noncoherent.c 		if (PageHighMem(page)) {
page              110 arch/mips/mm/dma-noncoherent.c 			addr = kmap_atomic(page);
page              114 arch/mips/mm/dma-noncoherent.c 			dma_sync_virt(page_address(page) + offset, size, dir);
page              116 arch/mips/mm/dma-noncoherent.c 		page++;
page               15 arch/mips/mm/highmem.c void *kmap(struct page *page)
page               20 arch/mips/mm/highmem.c 	if (!PageHighMem(page))
page               21 arch/mips/mm/highmem.c 		return page_address(page);
page               22 arch/mips/mm/highmem.c 	addr = kmap_high(page);
page               29 arch/mips/mm/highmem.c void kunmap(struct page *page)
page               32 arch/mips/mm/highmem.c 	if (!PageHighMem(page))
page               34 arch/mips/mm/highmem.c 	kunmap_high(page);
page               47 arch/mips/mm/highmem.c void *kmap_atomic(struct page *page)
page               54 arch/mips/mm/highmem.c 	if (!PageHighMem(page))
page               55 arch/mips/mm/highmem.c 		return page_address(page);
page               63 arch/mips/mm/highmem.c 	set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL));
page               65 arch/mips/mm/init.c 	struct page *page;
page               76 arch/mips/mm/init.c 	page = virt_to_page((void *)empty_zero_page);
page               77 arch/mips/mm/init.c 	split_page(page, order);
page               78 arch/mips/mm/init.c 	for (i = 0; i < (1 << order); i++, page++)
page               79 arch/mips/mm/init.c 		mark_page_reserved(page);
page               84 arch/mips/mm/init.c static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
page               93 arch/mips/mm/init.c 	BUG_ON(Page_dcache_dirty(page));
page              100 arch/mips/mm/init.c 	pte = mk_pte(page, prot);
page              139 arch/mips/mm/init.c void *kmap_coherent(struct page *page, unsigned long addr)
page              141 arch/mips/mm/init.c 	return __kmap_pgprot(page, addr, PAGE_KERNEL);
page              144 arch/mips/mm/init.c void *kmap_noncoherent(struct page *page, unsigned long addr)
page              146 arch/mips/mm/init.c 	return __kmap_pgprot(page, addr, PAGE_KERNEL_NC);
page              171 arch/mips/mm/init.c void copy_user_highpage(struct page *to, struct page *from,
page              196 arch/mips/mm/init.c 	struct page *page, unsigned long vaddr, void *dst, const void *src,
page              200 arch/mips/mm/init.c 	    page_mapcount(page) && !Page_dcache_dirty(page)) {
page              201 arch/mips/mm/init.c 		void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
page              207 arch/mips/mm/init.c 			SetPageDcacheDirty(page);
page              210 arch/mips/mm/init.c 		flush_cache_page(vma, vaddr, page_to_pfn(page));
page              214 arch/mips/mm/init.c 	struct page *page, unsigned long vaddr, void *dst, const void *src,
page              218 arch/mips/mm/init.c 	    page_mapcount(page) && !Page_dcache_dirty(page)) {
page              219 arch/mips/mm/init.c 		void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
page              225 arch/mips/mm/init.c 			SetPageDcacheDirty(page);
page              437 arch/mips/mm/init.c 		struct page *page = pfn_to_page(tmp);
page              440 arch/mips/mm/init.c 			SetPageReserved(page);
page              442 arch/mips/mm/init.c 			free_highmem_page(page);
page              486 arch/mips/mm/init.c 		struct page *page = pfn_to_page(pfn);
page              490 arch/mips/mm/init.c 		free_reserved_page(page);
page              612 arch/mips/mm/page.c extern void clear_page_cpu(void *page);
page              626 arch/mips/mm/page.c void clear_page(void *page)
page              628 arch/mips/mm/page.c 	u64 to_phys = CPHYSADDR((unsigned long)page);
page              632 arch/mips/mm/page.c 	if ((long)KSEGX((unsigned long)page) != (long)CKSEG0)
page              633 arch/mips/mm/page.c 		return clear_page_cpu(page);
page               17 arch/mips/mm/pgtable-32.c void pgd_init(unsigned long page)
page               19 arch/mips/mm/pgtable-32.c 	unsigned long *p = (unsigned long *) page;
page               35 arch/mips/mm/pgtable-32.c pmd_t mk_pmd(struct page *page, pgprot_t prot)
page               39 arch/mips/mm/pgtable-32.c 	pmd_val(pmd) = (page_to_pfn(page) << _PFN_SHIFT) | pgprot_val(prot);
page               17 arch/mips/mm/pgtable-64.c void pgd_init(unsigned long page)
page               30 arch/mips/mm/pgtable-64.c 	p = (unsigned long *) page;
page               91 arch/mips/mm/pgtable-64.c pmd_t mk_pmd(struct page *page, pgprot_t prot)
page               95 arch/mips/mm/pgtable-64.c 	pmd_val(pmd) = (page_to_pfn(page) << _PFN_SHIFT) | pgprot_val(prot);
page              150 arch/mips/mm/tlb-r3k.c void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
page              160 arch/mips/mm/tlb-r3k.c 		printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page);
page              163 arch/mips/mm/tlb-r3k.c 		page &= PAGE_MASK;
page              166 arch/mips/mm/tlb-r3k.c 		write_c0_entryhi(page | newpid);
page              212 arch/mips/mm/tlb-r4k.c void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
page              221 arch/mips/mm/tlb-r4k.c 		page &= (PAGE_MASK << 1);
page              227 arch/mips/mm/tlb-r4k.c 			write_c0_entryhi(page);
page              230 arch/mips/mm/tlb-r4k.c 			write_c0_entryhi(page | cpu_asid(cpu, vma->vm_mm));
page              260 arch/mips/mm/tlb-r4k.c void local_flush_tlb_one(unsigned long page)
page              268 arch/mips/mm/tlb-r4k.c 	page &= (PAGE_MASK << 1);
page              269 arch/mips/mm/tlb-r4k.c 	write_c0_entryhi(page);
page              370 arch/mips/sgi-ip27/ip27-memory.c 			if ((nodebytes >> PAGE_SHIFT) * (sizeof(struct page)) >
page               14 arch/nds32/include/asm/cacheflush.h void flush_icache_page(struct vm_area_struct *vma, struct page *page);
page               29 arch/nds32/include/asm/cacheflush.h void flush_dcache_page(struct page *page);
page               30 arch/nds32/include/asm/cacheflush.h void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
page               32 arch/nds32/include/asm/cacheflush.h void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
page               37 arch/nds32/include/asm/cacheflush.h 		     struct page *page, unsigned long vaddr);
page               40 arch/nds32/include/asm/cacheflush.h void flush_kernel_dcache_page(struct page *page);
page               47 arch/nds32/include/asm/cacheflush.h void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
page               47 arch/nds32/include/asm/highmem.h extern void *kmap_high(struct page *page);
page               48 arch/nds32/include/asm/highmem.h extern void kunmap_high(struct page *page);
page               57 arch/nds32/include/asm/highmem.h extern void *kmap(struct page *page);
page               58 arch/nds32/include/asm/highmem.h extern void kunmap(struct page *page);
page               59 arch/nds32/include/asm/highmem.h extern void *kmap_atomic(struct page *page);
page               62 arch/nds32/include/asm/highmem.h extern struct page *kmap_atomic_to_page(void *ptr);
page               91 arch/nds32/include/asm/memory.h #define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
page               23 arch/nds32/include/asm/page.h struct page;
page               26 arch/nds32/include/asm/page.h extern void copy_user_highpage(struct page *to, struct page *from,
page               28 arch/nds32/include/asm/page.h extern void clear_user_highpage(struct page *page, unsigned long vaddr);
page               31 arch/nds32/include/asm/page.h 		    struct page *to);
page               32 arch/nds32/include/asm/page.h void clear_user_page(void *addr, unsigned long vaddr, struct page *page);
page               36 arch/nds32/include/asm/page.h #define clear_user_page(page, vaddr, pg)        clear_page(page)
page               40 arch/nds32/include/asm/page.h void clear_page(void *page);
page               58 arch/nds32/include/asm/page.h typedef struct page *pgtable_t;
page              183 arch/nds32/include/asm/pgtable.h extern struct page *empty_zero_page;
page              342 arch/nds32/include/asm/pgtable.h #define mk_pte(page,prot)	pfn_pte(page_to_pfn(page),prot)
page              354 arch/nds32/include/asm/pgtable.h #define page_pte_prot(page,prot)     	mk_pte(page, prot)
page              355 arch/nds32/include/asm/pgtable.h #define page_pte(page)		        mk_pte(page, __pgprot(0))
page               20 arch/nds32/include/asm/proc-fns.h extern void cpu_dcache_inval_page(unsigned long page);
page               21 arch/nds32/include/asm/proc-fns.h extern void cpu_dcache_wb_page(unsigned long page);
page               22 arch/nds32/include/asm/proc-fns.h extern void cpu_dcache_wbinval_page(unsigned long page);
page               28 arch/nds32/include/asm/proc-fns.h extern void cpu_icache_inval_page(unsigned long page);
page               31 arch/nds32/include/asm/proc-fns.h extern void cpu_cache_wbinval_page(unsigned long page, int flushi);
page               16 arch/nds32/kernel/dma.c 	struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
page               24 arch/nds32/kernel/dma.c 		if (PageHighMem(page)) {
page               29 arch/nds32/kernel/dma.c 					page += offset >> PAGE_SHIFT;
page               35 arch/nds32/kernel/dma.c 			addr = kmap_atomic(page);
page               44 arch/nds32/kernel/dma.c 		page++;
page               79 arch/nds32/kernel/dma.c void arch_dma_prep_coherent(struct page *page, size_t size)
page               81 arch/nds32/kernel/dma.c 	cache_op(page_to_phys(page), size, cpu_dma_wbinval_range);
page               38 arch/nds32/kernel/vdso.c static struct page *no_pages[] = { NULL };
page               42 arch/nds32/kernel/vdso.c 	u8 page[PAGE_SIZE];
page               67 arch/nds32/kernel/vdso.c 	struct page **vdso_pagelist;
page               81 arch/nds32/kernel/vdso.c 	vdso_pagelist = kcalloc(vdso_pages, sizeof(struct page *), GFP_KERNEL);
page               28 arch/nds32/mm/cacheflush.c void flush_icache_page(struct vm_area_struct *vma, struct page *page)
page               33 arch/nds32/mm/cacheflush.c 	kaddr = (unsigned long)kmap_atomic(page);
page               40 arch/nds32/mm/cacheflush.c void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
page               44 arch/nds32/mm/cacheflush.c 	kaddr = (unsigned long)kmap_atomic(page) + (addr & ~PAGE_MASK);
page               52 arch/nds32/mm/cacheflush.c 	struct page *page;
page               66 arch/nds32/mm/cacheflush.c 	page = pfn_to_page(pfn);
page               68 arch/nds32/mm/cacheflush.c 	if ((test_and_clear_bit(PG_dcache_dirty, &page->flags)) ||
page               72 arch/nds32/mm/cacheflush.c 		kaddr = (unsigned long)kmap_atomic(page);
page               81 arch/nds32/mm/cacheflush.c static inline unsigned long aliasing(unsigned long addr, unsigned long page)
page               83 arch/nds32/mm/cacheflush.c 	return ((addr & PAGE_MASK) ^ page) & (SHMLBA - 1);
page              179 arch/nds32/mm/cacheflush.c 		    struct page *to)
page              188 arch/nds32/mm/cacheflush.c void clear_user_page(void *addr, unsigned long vaddr, struct page *page)
page              197 arch/nds32/mm/cacheflush.c void copy_user_highpage(struct page *to, struct page *from,
page              219 arch/nds32/mm/cacheflush.c void clear_user_highpage(struct page *page, unsigned long vaddr)
page              223 arch/nds32/mm/cacheflush.c 	kto = ((unsigned long)page_address(page) & PAGE_MASK);
page              230 arch/nds32/mm/cacheflush.c 	vto = kremap0(vaddr, page_to_phys(page));
page              238 arch/nds32/mm/cacheflush.c void flush_dcache_page(struct page *page)
page              242 arch/nds32/mm/cacheflush.c 	mapping = page_mapping(page);
page              244 arch/nds32/mm/cacheflush.c 		set_bit(PG_dcache_dirty, &page->flags);
page              248 arch/nds32/mm/cacheflush.c 		kaddr = (unsigned long)page_address(page);
page              254 arch/nds32/mm/cacheflush.c 			vaddr = page->index << PAGE_SHIFT;
page              256 arch/nds32/mm/cacheflush.c 				kto = kremap0(vaddr, page_to_phys(page));
page              266 arch/nds32/mm/cacheflush.c void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
page              272 arch/nds32/mm/cacheflush.c 	vto = kremap0(vaddr, page_to_phys(page));
page              287 arch/nds32/mm/cacheflush.c void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
page              293 arch/nds32/mm/cacheflush.c 	vto = kremap0(vaddr, page_to_phys(page));
page              301 arch/nds32/mm/cacheflush.c 		     struct page *page, unsigned long vaddr)
page              304 arch/nds32/mm/cacheflush.c 	if (!PageAnon(page))
page              313 arch/nds32/mm/cacheflush.c 	kaddr = (unsigned long)page_address(page);
page              315 arch/nds32/mm/cacheflush.c 		ktmp = kremap0(vaddr, page_to_phys(page));
page              322 arch/nds32/mm/cacheflush.c void flush_kernel_dcache_page(struct page *page)
page              326 arch/nds32/mm/cacheflush.c 	cpu_dcache_wbinval_page((unsigned long)page_address(page));
page               13 arch/nds32/mm/highmem.c void *kmap(struct page *page)
page               17 arch/nds32/mm/highmem.c 	if (!PageHighMem(page))
page               18 arch/nds32/mm/highmem.c 		return page_address(page);
page               19 arch/nds32/mm/highmem.c 	vaddr = (unsigned long)kmap_high(page);
page               25 arch/nds32/mm/highmem.c void kunmap(struct page *page)
page               28 arch/nds32/mm/highmem.c 	if (!PageHighMem(page))
page               30 arch/nds32/mm/highmem.c 	kunmap_high(page);
page               35 arch/nds32/mm/highmem.c void *kmap_atomic(struct page *page)
page               44 arch/nds32/mm/highmem.c 	if (!PageHighMem(page))
page               45 arch/nds32/mm/highmem.c 		return page_address(page);
page               51 arch/nds32/mm/highmem.c 	pte = (page_to_pfn(page) << PAGE_SHIFT) | (PAGE_KERNEL);
page               29 arch/nds32/mm/init.c struct page *empty_zero_page;
page               44 arch/nds32/mm/mm-nds32.c 	struct page *pte;
page              265 arch/nds32/mm/proc.c void cpu_cache_wbinval_page(unsigned long page, int flushi)
page              267 arch/nds32/mm/proc.c 	cpu_dcache_wbinval_page(page);
page              269 arch/nds32/mm/proc.c 		cpu_icache_inval_page(page);
page               31 arch/nios2/include/asm/cacheflush.h extern void flush_dcache_page(struct page *page);
page               34 arch/nios2/include/asm/cacheflush.h extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
page               39 arch/nios2/include/asm/cacheflush.h extern void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
page               42 arch/nios2/include/asm/cacheflush.h extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
page               53 arch/nios2/include/asm/io.h #define page_to_phys(page)	virt_to_phys(page_to_virt(page))
page               48 arch/nios2/include/asm/page.h #define clear_page(page)	memset((page), 0, PAGE_SIZE)
page               51 arch/nios2/include/asm/page.h struct page;
page               53 arch/nios2/include/asm/page.h extern void clear_user_page(void *addr, unsigned long vaddr, struct page *page);
page               55 arch/nios2/include/asm/page.h 				struct page *to);
page               60 arch/nios2/include/asm/page.h typedef struct page *pgtable_t;
page               77 arch/nios2/include/asm/page.h extern struct page *mem_map;
page               84 arch/nios2/include/asm/page.h #define page_to_virt(page)	\
page               85 arch/nios2/include/asm/page.h 	((void *)(((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)
page               33 arch/nios2/include/asm/pgalloc.h extern void pmd_init(unsigned long page, unsigned long pagetable);
page              241 arch/nios2/include/asm/pgtable.h #define mk_pte(page, prot)	(pfn_pte(page_to_pfn(page), prot))
page               73 arch/nios2/mm/cacheflush.c static void flush_aliases(struct address_space *mapping, struct page *page)
page               79 arch/nios2/mm/cacheflush.c 	pgoff = page->index;
page               92 arch/nios2/mm/cacheflush.c 			page_to_pfn(page));
page              140 arch/nios2/mm/cacheflush.c void flush_icache_page(struct vm_area_struct *vma, struct page *page)
page              142 arch/nios2/mm/cacheflush.c 	unsigned long start = (unsigned long) page_address(page);
page              160 arch/nios2/mm/cacheflush.c void __flush_dcache_page(struct address_space *mapping, struct page *page)
page              167 arch/nios2/mm/cacheflush.c 	unsigned long start = (unsigned long)page_address(page);
page              172 arch/nios2/mm/cacheflush.c void flush_dcache_page(struct page *page)
page              180 arch/nios2/mm/cacheflush.c 	if (page == ZERO_PAGE(0))
page              183 arch/nios2/mm/cacheflush.c 	mapping = page_mapping_file(page);
page              187 arch/nios2/mm/cacheflush.c 		clear_bit(PG_dcache_clean, &page->flags);
page              189 arch/nios2/mm/cacheflush.c 		__flush_dcache_page(mapping, page);
page              191 arch/nios2/mm/cacheflush.c 			unsigned long start = (unsigned long)page_address(page);
page              192 arch/nios2/mm/cacheflush.c 			flush_aliases(mapping,  page);
page              195 arch/nios2/mm/cacheflush.c 		set_bit(PG_dcache_clean, &page->flags);
page              205 arch/nios2/mm/cacheflush.c 	struct page *page;
page              217 arch/nios2/mm/cacheflush.c 	page = pfn_to_page(pfn);
page              218 arch/nios2/mm/cacheflush.c 	if (page == ZERO_PAGE(0))
page              221 arch/nios2/mm/cacheflush.c 	mapping = page_mapping_file(page);
page              222 arch/nios2/mm/cacheflush.c 	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
page              223 arch/nios2/mm/cacheflush.c 		__flush_dcache_page(mapping, page);
page              227 arch/nios2/mm/cacheflush.c 		flush_aliases(mapping, page);
page              229 arch/nios2/mm/cacheflush.c 			flush_icache_page(vma, page);
page              234 arch/nios2/mm/cacheflush.c 		    struct page *to)
page              243 arch/nios2/mm/cacheflush.c void clear_user_page(void *addr, unsigned long vaddr, struct page *page)
page              252 arch/nios2/mm/cacheflush.c void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
page              256 arch/nios2/mm/cacheflush.c 	flush_cache_page(vma, user_vaddr, page_to_pfn(page));
page              263 arch/nios2/mm/cacheflush.c void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
page              267 arch/nios2/mm/cacheflush.c 	flush_cache_page(vma, user_vaddr, page_to_pfn(page));
page               63 arch/nios2/mm/dma-mapping.c void arch_dma_prep_coherent(struct page *page, size_t size)
page               65 arch/nios2/mm/dma-mapping.c 	unsigned long start = (unsigned long)page_address(page);
page               88 arch/nios2/mm/init.c static struct page *kuser_page[1];
page              132 arch/nios2/mm/ioremap.c 		struct page *page;
page              136 arch/nios2/mm/ioremap.c 		for (page = virt_to_page(t_addr);
page              137 arch/nios2/mm/ioremap.c 			page <= virt_to_page(t_end); page++)
page              138 arch/nios2/mm/ioremap.c 			if (!PageReserved(page))
page               24 arch/openrisc/include/asm/cacheflush.h extern void local_dcache_page_flush(struct page *page);
page               25 arch/openrisc/include/asm/cacheflush.h extern void local_icache_page_inv(struct page *page);
page               33 arch/openrisc/include/asm/cacheflush.h #define dcache_page_flush(page)      local_dcache_page_flush(page)
page               34 arch/openrisc/include/asm/cacheflush.h #define icache_page_inv(page)        local_icache_page_inv(page)
page               36 arch/openrisc/include/asm/cacheflush.h #define dcache_page_flush(page)      local_dcache_page_flush(page)
page               37 arch/openrisc/include/asm/cacheflush.h #define icache_page_inv(page)        smp_icache_page_inv(page)
page               38 arch/openrisc/include/asm/cacheflush.h extern void smp_icache_page_inv(struct page *page);
page               45 arch/openrisc/include/asm/cacheflush.h static inline void sync_icache_dcache(struct page *page)
page               48 arch/openrisc/include/asm/cacheflush.h 		dcache_page_flush(page);
page               49 arch/openrisc/include/asm/cacheflush.h 	icache_page_inv(page);
page               60 arch/openrisc/include/asm/cacheflush.h static inline void flush_dcache_page(struct page *page)
page               62 arch/openrisc/include/asm/cacheflush.h 	clear_bit(PG_dc_clean, &page->flags);
page               82 arch/openrisc/include/asm/cacheflush.h #define copy_to_user_page(vma, page, vaddr, dst, src, len)           \
page               86 arch/openrisc/include/asm/cacheflush.h 			sync_icache_dcache(page);                    \
page               89 arch/openrisc/include/asm/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len)         \
page               39 arch/openrisc/include/asm/page.h #define clear_page(page)	memset((page), 0, PAGE_SIZE)
page               42 arch/openrisc/include/asm/page.h #define clear_user_page(page, vaddr, pg)        clear_page(page)
page               57 arch/openrisc/include/asm/page.h typedef struct page *pgtable_t;
page               81 arch/openrisc/include/asm/page.h #define page_to_phys(page)      ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
page               29 arch/openrisc/include/asm/pgalloc.h 				struct page *pte)
page               71 arch/openrisc/include/asm/pgalloc.h static inline struct page *pte_alloc_one(struct mm_struct *mm)
page               73 arch/openrisc/include/asm/pgalloc.h 	struct page *pte;
page               90 arch/openrisc/include/asm/pgalloc.h static inline void pte_free(struct mm_struct *mm, struct page *pte)
page              313 arch/openrisc/include/asm/pgtable.h static inline pte_t __mk_pte(void *page, pgprot_t pgprot)
page              317 arch/openrisc/include/asm/pgtable.h 	pte_val(pte) = __pa(page) | pgprot_val(pgprot);
page              321 arch/openrisc/include/asm/pgtable.h #define mk_pte(page, pgprot) __mk_pte(page_address(page), (pgprot))
page              355 arch/openrisc/include/asm/pgtable.h #define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
page               91 arch/openrisc/kernel/dma.c 	void *page;
page               93 arch/openrisc/kernel/dma.c 	page = alloc_pages_exact(size, gfp | __GFP_ZERO);
page               94 arch/openrisc/kernel/dma.c 	if (!page)
page               98 arch/openrisc/kernel/dma.c 	*dma_handle = __pa(page);
page              100 arch/openrisc/kernel/dma.c 	va = (unsigned long)page;
page              108 arch/openrisc/kernel/dma.c 		free_pages_exact(page, size);
page              250 arch/openrisc/kernel/smp.c 	struct page *page = arg;
page              252 arch/openrisc/kernel/smp.c 	local_icache_page_inv(page);
page              255 arch/openrisc/kernel/smp.c void smp_icache_page_inv(struct page *page)
page              257 arch/openrisc/kernel/smp.c 	on_each_cpu(ipi_icache_page_inv, page, 1);
page               19 arch/openrisc/mm/cache.c static void cache_loop(struct page *page, const unsigned int reg)
page               21 arch/openrisc/mm/cache.c 	unsigned long paddr = page_to_pfn(page) << PAGE_SHIFT;
page               30 arch/openrisc/mm/cache.c void local_dcache_page_flush(struct page *page)
page               32 arch/openrisc/mm/cache.c 	cache_loop(page, SPR_DCBFR);
page               36 arch/openrisc/mm/cache.c void local_icache_page_inv(struct page *page)
page               38 arch/openrisc/mm/cache.c 	cache_loop(page, SPR_ICBIR);
page               46 arch/openrisc/mm/cache.c 	struct page *page = pfn_to_page(pfn);
page               47 arch/openrisc/mm/cache.c 	int dirty = !test_and_set_bit(PG_dc_clean, &page->flags);
page               55 arch/openrisc/mm/cache.c 		sync_icache_dcache(page);
page               11 arch/parisc/include/asm/agp.h #define map_page_into_agp(page)		/* nothing */
page               12 arch/parisc/include/asm/agp.h #define unmap_page_from_agp(page)	/* nothing */
page               41 arch/parisc/include/asm/cacheflush.h static inline void flush_kernel_dcache_page(struct page *page)
page               43 arch/parisc/include/asm/cacheflush.h 	flush_kernel_dcache_page_addr(page_address(page));
page               56 arch/parisc/include/asm/cacheflush.h extern void flush_dcache_page(struct page *page);
page               61 arch/parisc/include/asm/cacheflush.h #define flush_icache_page(vma,page)	do { 		\
page               62 arch/parisc/include/asm/cacheflush.h 	flush_kernel_dcache_page(page);			\
page               63 arch/parisc/include/asm/cacheflush.h 	flush_kernel_icache_page(page_address(page)); 	\
page               71 arch/parisc/include/asm/cacheflush.h #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
page               73 arch/parisc/include/asm/cacheflush.h 	flush_cache_page(vma, vaddr, page_to_pfn(page)); \
page               78 arch/parisc/include/asm/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
page               80 arch/parisc/include/asm/cacheflush.h 	flush_cache_page(vma, vaddr, page_to_pfn(page)); \
page               93 arch/parisc/include/asm/cacheflush.h flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
page               95 arch/parisc/include/asm/cacheflush.h 	if (PageAnon(page)) {
page               98 arch/parisc/include/asm/cacheflush.h 		flush_dcache_page_asm(page_to_phys(page), vmaddr);
page              107 arch/parisc/include/asm/cacheflush.h static inline void *kmap(struct page *page)
page              110 arch/parisc/include/asm/cacheflush.h 	return page_address(page);
page              113 arch/parisc/include/asm/cacheflush.h static inline void kunmap(struct page *page)
page              115 arch/parisc/include/asm/cacheflush.h 	flush_kernel_dcache_page_addr(page_address(page));
page              118 arch/parisc/include/asm/cacheflush.h static inline void *kmap_atomic(struct page *page)
page              122 arch/parisc/include/asm/cacheflush.h 	return page_address(page);
page              132 arch/parisc/include/asm/cacheflush.h #define kmap_atomic_prot(page, prot)	kmap_atomic(page)
page               51 arch/parisc/include/asm/hugetlb.h static inline void arch_clear_hugepage_flags(struct page *page)
page               25 arch/parisc/include/asm/page.h #define clear_page(page)	clear_page_asm((void *)(page))
page               28 arch/parisc/include/asm/page.h struct page;
page               30 arch/parisc/include/asm/page.h void clear_page_asm(void *page);
page               32 arch/parisc/include/asm/page.h #define clear_user_page(vto, vaddr, page) clear_page_asm(vto)
page               34 arch/parisc/include/asm/page.h 			struct page *pg);
page               87 arch/parisc/include/asm/page.h typedef struct page *pgtable_t;
page              174 arch/parisc/include/asm/page.h #define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
page              418 arch/parisc/include/asm/pgtable.h #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
page               90 arch/parisc/kernel/cache.c 	struct page *page;
page               98 arch/parisc/kernel/cache.c 	page = pfn_to_page(pfn);
page               99 arch/parisc/kernel/cache.c 	if (page_mapping_file(page) &&
page              100 arch/parisc/kernel/cache.c 	    test_bit(PG_dcache_dirty, &page->flags)) {
page              102 arch/parisc/kernel/cache.c 		clear_bit(PG_dcache_dirty, &page->flags);
page              325 arch/parisc/kernel/cache.c void flush_dcache_page(struct page *page)
page              327 arch/parisc/kernel/cache.c 	struct address_space *mapping = page_mapping_file(page);
page              334 arch/parisc/kernel/cache.c 		set_bit(PG_dcache_dirty, &page->flags);
page              338 arch/parisc/kernel/cache.c 	flush_kernel_dcache_page(page);
page              343 arch/parisc/kernel/cache.c 	pgoff = page->index;
page              367 arch/parisc/kernel/cache.c 			__flush_cache_page(mpnt, addr, page_to_phys(page));
page              470 arch/parisc/kernel/cache.c 	struct page *pg)
page               31 arch/parisc/kernel/patch.c 	struct page *page;
page               35 arch/parisc/kernel/patch.c 		page = vmalloc_to_page(addr);
page               37 arch/parisc/kernel/patch.c 		page = virt_to_page(addr);
page               42 arch/parisc/kernel/patch.c 	set_fixmap(fixmap, page_to_phys(page));
page              170 arch/parisc/kernel/pci-dma.c 		pte_t page = *pte;
page              179 arch/parisc/kernel/pci-dma.c 		if (pte_none(page) || pte_present(page))
page               55 arch/parisc/mm/ioremap.c 		struct page *page;
page               60 arch/parisc/mm/ioremap.c 		for (page = virt_to_page(t_addr); 
page               61 arch/parisc/mm/ioremap.c 		     page <= virt_to_page(t_end); page++) {
page               62 arch/parisc/mm/ioremap.c 			if(!PageReserved(page))
page                8 arch/powerpc/include/asm/agp.h #define map_page_into_agp(page)
page                9 arch/powerpc/include/asm/agp.h #define unmap_page_from_agp(page)
page               13 arch/powerpc/include/asm/async_tx.h 	struct page **dst_lst, int dst_cnt, struct page **src_lst,
page               30 arch/powerpc/include/asm/book3s/64/pgalloc.h 	struct page *page;
page               31 arch/powerpc/include/asm/book3s/64/pgalloc.h 	page = alloc_pages(pgtable_gfp_flags(mm, PGALLOC_GFP | __GFP_RETRY_MAYFAIL),
page               33 arch/powerpc/include/asm/book3s/64/pgalloc.h 	if (!page)
page               35 arch/powerpc/include/asm/book3s/64/pgalloc.h 	return (pgd_t *) page_address(page);
page              291 arch/powerpc/include/asm/book3s/64/pgtable.h extern struct page *vmemmap;
page              926 arch/powerpc/include/asm/book3s/64/pgtable.h extern struct page *pud_page(pud_t pud);
page              927 arch/powerpc/include/asm/book3s/64/pgtable.h extern struct page *pmd_page(pmd_t pmd);
page              992 arch/powerpc/include/asm/book3s/64/pgtable.h extern struct page *pgd_page(pgd_t pgd);
page             1138 arch/powerpc/include/asm/book3s/64/pgtable.h extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
page               21 arch/powerpc/include/asm/cacheflush.h #define flush_icache_page(vma, page)		do { } while (0)
page               41 arch/powerpc/include/asm/cacheflush.h extern void flush_dcache_page(struct page *page);
page               47 arch/powerpc/include/asm/cacheflush.h 				    struct page *page, unsigned long addr,
page               49 arch/powerpc/include/asm/cacheflush.h extern void flush_dcache_icache_page(struct page *page);
page               50 arch/powerpc/include/asm/cacheflush.h void __flush_dcache_icache(void *page);
page              118 arch/powerpc/include/asm/cacheflush.h #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
page              121 arch/powerpc/include/asm/cacheflush.h 		flush_icache_user_range(vma, page, vaddr, len); \
page              123 arch/powerpc/include/asm/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
page               62 arch/powerpc/include/asm/highmem.h extern void *kmap_high(struct page *page);
page               63 arch/powerpc/include/asm/highmem.h extern void kunmap_high(struct page *page);
page               64 arch/powerpc/include/asm/highmem.h extern void *kmap_atomic_prot(struct page *page, pgprot_t prot);
page               67 arch/powerpc/include/asm/highmem.h static inline void *kmap(struct page *page)
page               70 arch/powerpc/include/asm/highmem.h 	if (!PageHighMem(page))
page               71 arch/powerpc/include/asm/highmem.h 		return page_address(page);
page               72 arch/powerpc/include/asm/highmem.h 	return kmap_high(page);
page               75 arch/powerpc/include/asm/highmem.h static inline void kunmap(struct page *page)
page               78 arch/powerpc/include/asm/highmem.h 	if (!PageHighMem(page))
page               80 arch/powerpc/include/asm/highmem.h 	kunmap_high(page);
page               83 arch/powerpc/include/asm/highmem.h static inline void *kmap_atomic(struct page *page)
page               85 arch/powerpc/include/asm/highmem.h 	return kmap_atomic_prot(page, kmap_prot);
page               20 arch/powerpc/include/asm/hugetlb.h void flush_dcache_icache_hugepage(struct page *page);
page               63 arch/powerpc/include/asm/hugetlb.h static inline void arch_clear_hugepage_flags(struct page *page)
page              800 arch/powerpc/include/asm/io.h static inline phys_addr_t page_to_phys(struct page *page)
page              802 arch/powerpc/include/asm/io.h 	unsigned long pfn = page_to_pfn(page);
page              831 arch/powerpc/include/asm/io.h #define page_to_bus(page)	(page_to_phys(page) + PCI_DRAM_OFFSET)
page              269 arch/powerpc/include/asm/iommu.h 				 struct page *page, unsigned long offset,
page              205 arch/powerpc/include/asm/kvm_host.h 	struct page *pages[0];
page              199 arch/powerpc/include/asm/kvm_ppc.h extern struct page *kvm_alloc_hpt_cma(unsigned long nr_pages);
page              200 arch/powerpc/include/asm/kvm_ppc.h extern void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages);
page              881 arch/powerpc/include/asm/kvm_ppc.h 	struct page *page;
page              890 arch/powerpc/include/asm/kvm_ppc.h 	page = pfn_to_page(pfn);
page              891 arch/powerpc/include/asm/kvm_ppc.h 	if (!test_bit(PG_arch_1, &page->flags)) {
page              892 arch/powerpc/include/asm/kvm_ppc.h 		flush_dcache_icache_page(page);
page              893 arch/powerpc/include/asm/kvm_ppc.h 		set_bit(PG_arch_1, &page->flags);
page               22 arch/powerpc/include/asm/mmu_context.h extern int isolate_lru_page(struct page *page);	/* from internal.h */
page               77 arch/powerpc/include/asm/nohash/64/pgtable-4k.h extern struct page *pgd_page(pgd_t pgd);
page               77 arch/powerpc/include/asm/nohash/64/pgtable.h #define vmemmap			((struct page *)VMEMMAP_BASE)
page              148 arch/powerpc/include/asm/nohash/64/pgtable.h extern struct page *pmd_page(pmd_t pmd);
page              166 arch/powerpc/include/asm/nohash/64/pgtable.h extern struct page *pud_page(pud_t pud);
page              320 arch/powerpc/include/asm/page.h struct page;
page              321 arch/powerpc/include/asm/page.h extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
page              323 arch/powerpc/include/asm/page.h 		struct page *p);
page              327 arch/powerpc/include/asm/page.h void arch_free_page(struct page *page, int order);
page              134 arch/powerpc/include/asm/perf_event_server.h 				struct device_attribute *attr, char *page);
page               48 arch/powerpc/include/asm/pgtable.h #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
page              240 arch/powerpc/include/asm/plpar_wrappers.h 		unsigned long ioba, unsigned long page, unsigned long count)
page              242 arch/powerpc/include/asm/plpar_wrappers.h 	return plpar_hcall_norets(H_PUT_TCE_INDIRECT, liobn, ioba, page, count);
page              428 arch/powerpc/include/asm/uaccess.h extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
page               65 arch/powerpc/kernel/dma-iommu.c static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page,
page               71 arch/powerpc/kernel/dma-iommu.c 		return dma_direct_map_page(dev, page, offset, size, direction,
page               73 arch/powerpc/kernel/dma-iommu.c 	return iommu_map_page(dev, get_iommu_table_base(dev), page, offset,
page              678 arch/powerpc/kernel/fadump.c 	struct page *page;
page              686 arch/powerpc/kernel/fadump.c 	page = virt_to_page(vaddr);
page              688 arch/powerpc/kernel/fadump.c 		mark_page_reserved(page + i);
page              294 arch/powerpc/kernel/iommu.c 			      void *page, unsigned int npages,
page              313 arch/powerpc/kernel/iommu.c 				      (unsigned long)page &
page              687 arch/powerpc/kernel/iommu.c 	struct page *page;
page              696 arch/powerpc/kernel/iommu.c 	page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz));
page              697 arch/powerpc/kernel/iommu.c 	if (!page)
page              699 arch/powerpc/kernel/iommu.c 	tbl->it_map = page_address(page);
page              795 arch/powerpc/kernel/iommu.c 			  struct page *page, unsigned long offset, size_t size,
page              806 arch/powerpc/kernel/iommu.c 	vaddr = page_address(page) + offset;
page              860 arch/powerpc/kernel/iommu.c 	struct page *page;
page              880 arch/powerpc/kernel/iommu.c 	page = alloc_pages_node(node, flag, order);
page              881 arch/powerpc/kernel/iommu.c 	if (!page)
page              883 arch/powerpc/kernel/iommu.c 	ret = page_address(page);
page               45 arch/powerpc/kernel/optprobes.c static void __ppc_free_insn_page(void *page __maybe_unused)
page               50 arch/powerpc/kernel/vdso.c static struct page **vdso32_pagelist;
page               62 arch/powerpc/kernel/vdso.c static struct page **vdso64_pagelist;
page               75 arch/powerpc/kernel/vdso.c 	u8			page[PAGE_SIZE];
page              129 arch/powerpc/kernel/vdso.c 	struct page **vdso_pagelist;
page              770 arch/powerpc/kernel/vdso.c 	vdso32_pagelist = kcalloc(vdso32_pages + 2, sizeof(struct page *),
page              774 arch/powerpc/kernel/vdso.c 		struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
page              783 arch/powerpc/kernel/vdso.c 	vdso64_pagelist = kcalloc(vdso64_pages + 2, sizeof(struct page *),
page              787 arch/powerpc/kernel/vdso.c 		struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
page              103 arch/powerpc/kvm/book3s_32_mmu.c 	u32 page, hash, pteg, htabmask;
page              106 arch/powerpc/kvm/book3s_32_mmu.c 	page = (eaddr & 0x0FFFFFFF) >> 12;
page              109 arch/powerpc/kvm/book3s_32_mmu.c 	hash = ((sr_vsid(sre) ^ page) << 6);
page              109 arch/powerpc/kvm/book3s_32_mmu_host.c 	u32 page, hash;
page              112 arch/powerpc/kvm/book3s_32_mmu_host.c 	page = (eaddr & ~ESID_MASK) >> 12;
page              114 arch/powerpc/kvm/book3s_32_mmu_host.c 	hash = ((vsid ^ page) << 6);
page              155 arch/powerpc/kvm/book3s_64_mmu.c 		page, vcpu_book3s->sdr1, pteg, slbe->vsid);
page               74 arch/powerpc/kvm/book3s_64_mmu_hv.c 	struct page *page = NULL;
page               81 arch/powerpc/kvm/book3s_64_mmu_hv.c 	page = kvm_alloc_hpt_cma(1ul << (order - PAGE_SHIFT));
page               82 arch/powerpc/kvm/book3s_64_mmu_hv.c 	if (page) {
page               83 arch/powerpc/kvm/book3s_64_mmu_hv.c 		hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
page              102 arch/powerpc/kvm/book3s_64_mmu_hv.c 			kvm_free_hpt_cma(page, 1 << (order - PAGE_SHIFT));
page              504 arch/powerpc/kvm/book3s_64_mmu_hv.c 	struct page *page, *pages[1];
page              586 arch/powerpc/kvm/book3s_64_mmu_hv.c 	page = NULL;
page              609 arch/powerpc/kvm/book3s_64_mmu_hv.c 		page = pages[0];
page              610 arch/powerpc/kvm/book3s_64_mmu_hv.c 		pfn = page_to_pfn(page);
page              611 arch/powerpc/kvm/book3s_64_mmu_hv.c 		if (PageHuge(page)) {
page              612 arch/powerpc/kvm/book3s_64_mmu_hv.c 			page = compound_head(page);
page              613 arch/powerpc/kvm/book3s_64_mmu_hv.c 			pte_size <<= compound_order(page);
page              721 arch/powerpc/kvm/book3s_64_mmu_hv.c 	if (page && hpte_is_writable(r))
page              722 arch/powerpc/kvm/book3s_64_mmu_hv.c 		SetPageDirty(page);
page              727 arch/powerpc/kvm/book3s_64_mmu_hv.c 	if (page) {
page             1175 arch/powerpc/kvm/book3s_64_mmu_hv.c 	struct page *page, *pages[1];
page             1188 arch/powerpc/kvm/book3s_64_mmu_hv.c 	page = pages[0];
page             1194 arch/powerpc/kvm/book3s_64_mmu_hv.c 	return page_address(page) + offset;
page             1204 arch/powerpc/kvm/book3s_64_mmu_hv.c 	struct page *page = virt_to_page(va);
page             1209 arch/powerpc/kvm/book3s_64_mmu_hv.c 	put_page(page);
page              773 arch/powerpc/kvm/book3s_64_mmu_radix.c 	struct page *page = NULL;
page              794 arch/powerpc/kvm/book3s_64_mmu_radix.c 	if (!kvm_ro && __get_user_pages_fast(hva, 1, 1, &page) == 1) {
page              804 arch/powerpc/kvm/book3s_64_mmu_radix.c 		page = NULL;
page              806 arch/powerpc/kvm/book3s_64_mmu_radix.c 			page = pfn_to_page(pfn);
page              807 arch/powerpc/kvm/book3s_64_mmu_radix.c 			if (PageReserved(page))
page              808 arch/powerpc/kvm/book3s_64_mmu_radix.c 				page = NULL;
page              824 arch/powerpc/kvm/book3s_64_mmu_radix.c 		if (page)
page              825 arch/powerpc/kvm/book3s_64_mmu_radix.c 			put_page(page);
page              872 arch/powerpc/kvm/book3s_64_mmu_radix.c 	if (page) {
page              874 arch/powerpc/kvm/book3s_64_mmu_radix.c 			set_page_dirty_lock(page);
page              875 arch/powerpc/kvm/book3s_64_mmu_radix.c 		put_page(page);
page               44 arch/powerpc/kvm/book3s_64_vio.c 			(tce_pages * sizeof(struct page *));
page              190 arch/powerpc/kvm/book3s_64_vio.c static struct page *kvm_spapr_get_tce_page(struct kvmppc_spapr_tce_table *stt,
page              193 arch/powerpc/kvm/book3s_64_vio.c 	struct page *page = stt->pages[sttpage];
page              195 arch/powerpc/kvm/book3s_64_vio.c 	if (page)
page              196 arch/powerpc/kvm/book3s_64_vio.c 		return page;
page              199 arch/powerpc/kvm/book3s_64_vio.c 	page = stt->pages[sttpage];
page              200 arch/powerpc/kvm/book3s_64_vio.c 	if (!page) {
page              201 arch/powerpc/kvm/book3s_64_vio.c 		page = alloc_page(GFP_KERNEL | __GFP_ZERO);
page              202 arch/powerpc/kvm/book3s_64_vio.c 		WARN_ON_ONCE(!page);
page              203 arch/powerpc/kvm/book3s_64_vio.c 		if (page)
page              204 arch/powerpc/kvm/book3s_64_vio.c 			stt->pages[sttpage] = page;
page              208 arch/powerpc/kvm/book3s_64_vio.c 	return page;
page              214 arch/powerpc/kvm/book3s_64_vio.c 	struct page *page;
page              219 arch/powerpc/kvm/book3s_64_vio.c 	page = kvm_spapr_get_tce_page(stt, vmf->pgoff);
page              220 arch/powerpc/kvm/book3s_64_vio.c 	if (!page)
page              223 arch/powerpc/kvm/book3s_64_vio.c 	get_page(page);
page              224 arch/powerpc/kvm/book3s_64_vio.c 	vmf->page = page;
page              288 arch/powerpc/kvm/book3s_64_vio.c 	stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
page              391 arch/powerpc/kvm/book3s_64_vio.c 	struct page *page;
page              397 arch/powerpc/kvm/book3s_64_vio.c 	page = stt->pages[sttpage];
page              399 arch/powerpc/kvm/book3s_64_vio.c 	if (!page) {
page              404 arch/powerpc/kvm/book3s_64_vio.c 		page = kvm_spapr_get_tce_page(stt, sttpage);
page              405 arch/powerpc/kvm/book3s_64_vio.c 		if (!page)
page              408 arch/powerpc/kvm/book3s_64_vio.c 	tbl = page_to_virt(page);
page              158 arch/powerpc/kvm/book3s_64_vio_hv.c static u64 *kvmppc_page_address(struct page *page)
page              163 arch/powerpc/kvm/book3s_64_vio_hv.c 	return (u64 *) page_address(page);
page              174 arch/powerpc/kvm/book3s_64_vio_hv.c 	struct page *page;
page              178 arch/powerpc/kvm/book3s_64_vio_hv.c 	page = stt->pages[idx / TCES_PER_PAGE];
page              183 arch/powerpc/kvm/book3s_64_vio_hv.c 	WARN_ON_ONCE_RM(!page);
page              184 arch/powerpc/kvm/book3s_64_vio_hv.c 	tbl = kvmppc_page_address(page);
page              659 arch/powerpc/kvm/book3s_64_vio_hv.c 	struct page *page;
page              671 arch/powerpc/kvm/book3s_64_vio_hv.c 	page = stt->pages[idx / TCES_PER_PAGE];
page              672 arch/powerpc/kvm/book3s_64_vio_hv.c 	if (!page) {
page              676 arch/powerpc/kvm/book3s_64_vio_hv.c 	tbl = (u64 *)page_address(page);
page               72 arch/powerpc/kvm/book3s_hv_builtin.c struct page *kvm_alloc_hpt_cma(unsigned long nr_pages)
page               81 arch/powerpc/kvm/book3s_hv_builtin.c void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages)
page               83 arch/powerpc/kvm/book3s_hv_builtin.c 	cma_release(kvm_cma, page, nr_pages);
page              901 arch/powerpc/kvm/book3s_hv_nested.c 	unsigned long page;
page              903 arch/powerpc/kvm/book3s_hv_nested.c 	for (page = 0; page < free->npages; page++) {
page              904 arch/powerpc/kvm/book3s_hv_nested.c 		unsigned long rmap, *rmapp = &free->arch.rmap[page];
page              627 arch/powerpc/kvm/book3s_pr.c 	struct page *hpage;
page              629 arch/powerpc/kvm/book3s_pr.c 	u32 *page;
page              641 arch/powerpc/kvm/book3s_pr.c 	page = kmap_atomic(hpage);
page              645 arch/powerpc/kvm/book3s_pr.c 		if ((be32_to_cpu(page[i]) & 0xff0007ff) == INS_DCBZ)
page              646 arch/powerpc/kvm/book3s_pr.c 			page[i] &= cpu_to_be32(0xfffffff7);
page              648 arch/powerpc/kvm/book3s_pr.c 	kunmap_atomic(page);
page              237 arch/powerpc/kvm/book3s_xive_native.c 	u64 page;
page              263 arch/powerpc/kvm/book3s_xive_native.c 	page = page_offset % 2 ? xd->eoi_page : xd->trig_page;
page              266 arch/powerpc/kvm/book3s_xive_native.c 	if (WARN_ON(!page)) {
page              272 arch/powerpc/kvm/book3s_xive_native.c 	vmf_insert_pfn(vma, vmf->address, page >> PAGE_SHIFT);
page              563 arch/powerpc/kvm/book3s_xive_native.c 	struct page *page;
page              648 arch/powerpc/kvm/book3s_xive_native.c 	page = gfn_to_page(kvm, gfn);
page              649 arch/powerpc/kvm/book3s_xive_native.c 	if (is_error_page(page)) {
page              655 arch/powerpc/kvm/book3s_xive_native.c 	qaddr = page_to_virt(page) + (kvm_eq.qaddr & ~PAGE_MASK);
page              675 arch/powerpc/kvm/book3s_xive_native.c 		put_page(page);
page               82 arch/powerpc/kvm/e500.h 	struct page **shared_tlb_pages;
page              741 arch/powerpc/kvm/e500_mmu.c 	struct page **pages;
page              634 arch/powerpc/kvm/e500_mmu_host.c 	struct page *page;
page              707 arch/powerpc/kvm/e500_mmu_host.c 	page = pfn_to_page(pfn);
page              708 arch/powerpc/kvm/e500_mmu_host.c 	eaddr = (unsigned long)kmap_atomic(page);
page               54 arch/powerpc/lib/pmem.c void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
page               57 arch/powerpc/lib/pmem.c 	memcpy_flushcache(to, page_to_virt(page) + offset, len);
page             1032 arch/powerpc/mm/book3s64/hash_utils.c 	vmemmap = (struct page *)H_VMEMMAP_START;
page             1095 arch/powerpc/mm/book3s64/hash_utils.c 	struct page *page;
page             1100 arch/powerpc/mm/book3s64/hash_utils.c 	page = pte_page(pte);
page             1103 arch/powerpc/mm/book3s64/hash_utils.c 	if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
page             1105 arch/powerpc/mm/book3s64/hash_utils.c 			flush_dcache_icache_page(page);
page             1106 arch/powerpc/mm/book3s64/hash_utils.c 			set_bit(PG_arch_1, &page->flags);
page             1917 arch/powerpc/mm/book3s64/hash_utils.c void __kernel_map_pages(struct page *page, int numpages, int enable)
page             1923 arch/powerpc/mm/book3s64/hash_utils.c 	for (i = 0; i < numpages; i++, page++) {
page             1924 arch/powerpc/mm/book3s64/hash_utils.c 		vaddr = (unsigned long)page_address(page);
page               43 arch/powerpc/mm/book3s64/iommu_api.c 		struct page **hpages;	/* vmalloc'ed */
page              126 arch/powerpc/mm/book3s64/iommu_api.c 		struct page *page = mem->hpages[i];
page              132 arch/powerpc/mm/book3s64/iommu_api.c 		if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page))
page              133 arch/powerpc/mm/book3s64/iommu_api.c 			pageshift = page_shift(compound_head(page));
page              139 arch/powerpc/mm/book3s64/iommu_api.c 		mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
page              202 arch/powerpc/mm/book3s64/iommu_api.c 	struct page *page = NULL;
page              211 arch/powerpc/mm/book3s64/iommu_api.c 		page = pfn_to_page(mem->hpas[i] >> PAGE_SHIFT);
page              212 arch/powerpc/mm/book3s64/iommu_api.c 		if (!page)
page              216 arch/powerpc/mm/book3s64/iommu_api.c 			SetPageDirty(page);
page              218 arch/powerpc/mm/book3s64/iommu_api.c 		put_page(page);
page              228 arch/powerpc/mm/book3s64/mmu_context.c 	struct page *page;
page              230 arch/powerpc/mm/book3s64/mmu_context.c 	page = virt_to_page(pmd_frag);
page              234 arch/powerpc/mm/book3s64/mmu_context.c 	if (atomic_sub_and_test(PMD_FRAG_NR - count, &page->pt_frag_refcount)) {
page              235 arch/powerpc/mm/book3s64/mmu_context.c 		pgtable_pmd_page_dtor(page);
page              236 arch/powerpc/mm/book3s64/mmu_context.c 		__free_page(page);
page              136 arch/powerpc/mm/book3s64/pgtable.c pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
page              138 arch/powerpc/mm/book3s64/pgtable.c 	return pfn_pmd(page_to_pfn(page), pgprot);
page              291 arch/powerpc/mm/book3s64/pgtable.c 	struct page *page;
page              296 arch/powerpc/mm/book3s64/pgtable.c 	page = alloc_page(gfp);
page              297 arch/powerpc/mm/book3s64/pgtable.c 	if (!page)
page              299 arch/powerpc/mm/book3s64/pgtable.c 	if (!pgtable_pmd_page_ctor(page)) {
page              300 arch/powerpc/mm/book3s64/pgtable.c 		__free_pages(page, 0);
page              304 arch/powerpc/mm/book3s64/pgtable.c 	atomic_set(&page->pt_frag_refcount, 1);
page              306 arch/powerpc/mm/book3s64/pgtable.c 	ret = page_address(page);
page              321 arch/powerpc/mm/book3s64/pgtable.c 		atomic_set(&page->pt_frag_refcount, PMD_FRAG_NR);
page              342 arch/powerpc/mm/book3s64/pgtable.c 	struct page *page = virt_to_page(pmd);
page              344 arch/powerpc/mm/book3s64/pgtable.c 	BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
page              345 arch/powerpc/mm/book3s64/pgtable.c 	if (atomic_dec_and_test(&page->pt_frag_refcount)) {
page              346 arch/powerpc/mm/book3s64/pgtable.c 		pgtable_pmd_page_dtor(page);
page              347 arch/powerpc/mm/book3s64/pgtable.c 		__free_page(page);
page              584 arch/powerpc/mm/book3s64/radix_pgtable.c 	vmemmap = (struct page *)RADIX_VMEMMAP_START;
page               59 arch/powerpc/mm/dma-noncoherent.c static inline void __dma_sync_page_highmem(struct page *page,
page               71 arch/powerpc/mm/dma-noncoherent.c 		start = (unsigned long)kmap_atomic(page + seg_nr) + seg_offset;
page               96 arch/powerpc/mm/dma-noncoherent.c 	struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
page              100 arch/powerpc/mm/dma-noncoherent.c 	__dma_sync_page_highmem(page, offset, size, dir);
page              102 arch/powerpc/mm/dma-noncoherent.c 	unsigned long start = (unsigned long)page_address(page) + offset;
page              119 arch/powerpc/mm/dma-noncoherent.c void arch_dma_prep_coherent(struct page *page, size_t size)
page              121 arch/powerpc/mm/dma-noncoherent.c 	unsigned long kaddr = (unsigned long)page_address(page);
page               33 arch/powerpc/mm/highmem.c void *kmap_atomic_prot(struct page *page, pgprot_t prot)
page               40 arch/powerpc/mm/highmem.c 	if (!PageHighMem(page))
page               41 arch/powerpc/mm/highmem.c 		return page_address(page);
page               47 arch/powerpc/mm/highmem.c 	__set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1);
page              496 arch/powerpc/mm/hugetlbpage.c struct page *follow_huge_pd(struct vm_area_struct *vma,
page              502 arch/powerpc/mm/hugetlbpage.c 	struct page *page = NULL;
page              518 arch/powerpc/mm/hugetlbpage.c 		page = pte_page(*ptep);
page              519 arch/powerpc/mm/hugetlbpage.c 		page += ((address & mask) >> PAGE_SHIFT);
page              521 arch/powerpc/mm/hugetlbpage.c 			get_page(page);
page              530 arch/powerpc/mm/hugetlbpage.c 	return page;
page              670 arch/powerpc/mm/hugetlbpage.c void flush_dcache_icache_hugepage(struct page *page)
page              675 arch/powerpc/mm/hugetlbpage.c 	BUG_ON(!PageCompound(page));
page              677 arch/powerpc/mm/hugetlbpage.c 	for (i = 0; i < compound_nr(page); i++) {
page              678 arch/powerpc/mm/hugetlbpage.c 		if (!PageHighMem(page)) {
page              679 arch/powerpc/mm/hugetlbpage.c 			__flush_dcache_icache(page_address(page+i));
page              681 arch/powerpc/mm/hugetlbpage.c 			start = kmap_atomic(page+i);
page               78 arch/powerpc/mm/init_64.c static unsigned long __meminit vmemmap_section_start(unsigned long page)
page               80 arch/powerpc/mm/init_64.c 	unsigned long offset = page - ((unsigned long)(vmemmap));
page               83 arch/powerpc/mm/init_64.c 	return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
page               96 arch/powerpc/mm/init_64.c 	for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
page               97 arch/powerpc/mm/init_64.c 		if (pfn_valid(page_to_pfn((struct page *)start)))
page              178 arch/powerpc/mm/init_64.c 	unsigned long nr_pfn = page_size / sizeof(struct page);
page              179 arch/powerpc/mm/init_64.c 	unsigned long start_pfn = page_to_pfn((struct page *)start);
page              290 arch/powerpc/mm/init_64.c 		struct page *page;
page              304 arch/powerpc/mm/init_64.c 		page = pfn_to_page(addr >> PAGE_SHIFT);
page              310 arch/powerpc/mm/init_64.c 		} else if (PageReserved(page)) {
page              320 arch/powerpc/mm/init_64.c 					free_reserved_page(page++);
page              331 arch/powerpc/mm/init_64.c 				  struct page *start_page, unsigned long size)
page              307 arch/powerpc/mm/mem.c 			struct page *page = pfn_to_page(pfn);
page              309 arch/powerpc/mm/mem.c 				free_highmem_page(page);
page              472 arch/powerpc/mm/mem.c void flush_dcache_page(struct page *page)
page              477 arch/powerpc/mm/mem.c 	if (test_bit(PG_arch_1, &page->flags))
page              478 arch/powerpc/mm/mem.c 		clear_bit(PG_arch_1, &page->flags);
page              482 arch/powerpc/mm/mem.c void flush_dcache_icache_page(struct page *page)
page              485 arch/powerpc/mm/mem.c 	if (PageCompound(page)) {
page              486 arch/powerpc/mm/mem.c 		flush_dcache_icache_hugepage(page);
page              492 arch/powerpc/mm/mem.c 	__flush_dcache_icache(page_address(page));
page              495 arch/powerpc/mm/mem.c 		void *start = kmap_atomic(page);
page              499 arch/powerpc/mm/mem.c 		unsigned long addr = page_to_pfn(page) << PAGE_SHIFT;
page              539 arch/powerpc/mm/mem.c void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
page              541 arch/powerpc/mm/mem.c 	clear_page(page);
page              553 arch/powerpc/mm/mem.c 		    struct page *pg)
page              575 arch/powerpc/mm/mem.c void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
page              580 arch/powerpc/mm/mem.c 	maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
page              582 arch/powerpc/mm/mem.c 	kunmap(page);
page               21 arch/powerpc/mm/pgtable-frag.c 	struct page *page;
page               23 arch/powerpc/mm/pgtable-frag.c 	page = virt_to_page(pte_frag);
page               27 arch/powerpc/mm/pgtable-frag.c 	if (atomic_sub_and_test(PTE_FRAG_NR - count, &page->pt_frag_refcount)) {
page               28 arch/powerpc/mm/pgtable-frag.c 		pgtable_pte_page_dtor(page);
page               29 arch/powerpc/mm/pgtable-frag.c 		__free_page(page);
page               58 arch/powerpc/mm/pgtable-frag.c 	struct page *page;
page               61 arch/powerpc/mm/pgtable-frag.c 		page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT);
page               62 arch/powerpc/mm/pgtable-frag.c 		if (!page)
page               64 arch/powerpc/mm/pgtable-frag.c 		if (!pgtable_pte_page_ctor(page)) {
page               65 arch/powerpc/mm/pgtable-frag.c 			__free_page(page);
page               69 arch/powerpc/mm/pgtable-frag.c 		page = alloc_page(PGALLOC_GFP);
page               70 arch/powerpc/mm/pgtable-frag.c 		if (!page)
page               74 arch/powerpc/mm/pgtable-frag.c 	atomic_set(&page->pt_frag_refcount, 1);
page               76 arch/powerpc/mm/pgtable-frag.c 	ret = page_address(page);
page               90 arch/powerpc/mm/pgtable-frag.c 		atomic_set(&page->pt_frag_refcount, PTE_FRAG_NR);
page              111 arch/powerpc/mm/pgtable-frag.c 	struct page *page = virt_to_page(table);
page              113 arch/powerpc/mm/pgtable-frag.c 	BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
page              114 arch/powerpc/mm/pgtable-frag.c 	if (atomic_dec_and_test(&page->pt_frag_refcount)) {
page              116 arch/powerpc/mm/pgtable-frag.c 			pgtable_pte_page_dtor(page);
page              117 arch/powerpc/mm/pgtable-frag.c 		__free_page(page);
page               53 arch/powerpc/mm/pgtable.c static struct page *maybe_pte_to_page(pte_t pte)
page               56 arch/powerpc/mm/pgtable.c 	struct page *page;
page               60 arch/powerpc/mm/pgtable.c 	page = pfn_to_page(pfn);
page               61 arch/powerpc/mm/pgtable.c 	if (PageReserved(page))
page               63 arch/powerpc/mm/pgtable.c 	return page;
page               82 arch/powerpc/mm/pgtable.c 		struct page *pg = maybe_pte_to_page(pte);
page              105 arch/powerpc/mm/pgtable.c 	struct page *pg;
page              137 arch/powerpc/mm/pgtable.c 	struct page *pg;
page              161 arch/powerpc/mm/pgtable_32.c static int __change_page_attr_noflush(struct page *page, pgprot_t prot)
page              167 arch/powerpc/mm/pgtable_32.c 	BUG_ON(PageHighMem(page));
page              168 arch/powerpc/mm/pgtable_32.c 	address = (unsigned long)page_address(page);
page              174 arch/powerpc/mm/pgtable_32.c 	__set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0);
page              185 arch/powerpc/mm/pgtable_32.c static int change_page_attr(struct page *page, int numpages, pgprot_t prot)
page              189 arch/powerpc/mm/pgtable_32.c 	struct page *start = page;
page              192 arch/powerpc/mm/pgtable_32.c 	for (i = 0; i < numpages; i++, page++) {
page              193 arch/powerpc/mm/pgtable_32.c 		err = __change_page_attr_noflush(page, prot);
page              200 arch/powerpc/mm/pgtable_32.c 			       (unsigned long)page_address(page));
page              206 arch/powerpc/mm/pgtable_32.c 	struct page *page = virt_to_page(_sinittext);
page              213 arch/powerpc/mm/pgtable_32.c 		change_page_attr(page, numpages, PAGE_KERNEL);
page              219 arch/powerpc/mm/pgtable_32.c 	struct page *page;
page              228 arch/powerpc/mm/pgtable_32.c 	page = virt_to_page(_stext);
page              232 arch/powerpc/mm/pgtable_32.c 	change_page_attr(page, numpages, PAGE_KERNEL_ROX);
page              237 arch/powerpc/mm/pgtable_32.c 	page = virt_to_page(__start_rodata);
page              241 arch/powerpc/mm/pgtable_32.c 	change_page_attr(page, numpages, PAGE_KERNEL_RO);
page              249 arch/powerpc/mm/pgtable_32.c void __kernel_map_pages(struct page *page, int numpages, int enable)
page              251 arch/powerpc/mm/pgtable_32.c 	if (PageHighMem(page))
page              254 arch/powerpc/mm/pgtable_32.c 	change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
page               94 arch/powerpc/mm/pgtable_64.c struct page *vmemmap;
page              104 arch/powerpc/mm/pgtable_64.c struct page *pgd_page(pgd_t pgd)
page              114 arch/powerpc/mm/pgtable_64.c struct page *pud_page(pud_t pud)
page              127 arch/powerpc/mm/pgtable_64.c struct page *pmd_page(pmd_t pmd)
page             2008 arch/powerpc/perf/core-book3s.c 				struct device_attribute *attr, char *page)
page             2014 arch/powerpc/perf/core-book3s.c 	return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
page              297 arch/powerpc/perf/hv-24x7.c static long h_get_24x7_catalog_page(char page[], u64 version, u32 index)
page              299 arch/powerpc/perf/hv-24x7.c 	return h_get_24x7_catalog_page_(virt_to_phys(page),
page              697 arch/powerpc/perf/hv-24x7.c 	void *page = page_0;
page              703 arch/powerpc/perf/hv-24x7.c 	if (!page) {
page              708 arch/powerpc/perf/hv-24x7.c 	hret = h_get_24x7_catalog_page(page, 0, 0);
page              900 arch/powerpc/perf/hv-24x7.c 	kmem_cache_free(hv_page_cache, page);
page              914 arch/powerpc/perf/hv-24x7.c 	kmem_cache_free(hv_page_cache, page);
page              933 arch/powerpc/perf/hv-24x7.c 	void *page = kmem_cache_alloc(hv_page_cache, GFP_USER);
page              934 arch/powerpc/perf/hv-24x7.c 	struct hv_24x7_catalog_page_0 *page_0 = page;
page              936 arch/powerpc/perf/hv-24x7.c 	if (!page)
page              939 arch/powerpc/perf/hv-24x7.c 	hret = h_get_24x7_catalog_page(page, 0, 0);
page              956 arch/powerpc/perf/hv-24x7.c 		hret = h_get_24x7_catalog_page(page, catalog_version_num,
page              968 arch/powerpc/perf/hv-24x7.c 	memcpy(buf, page+offset_in_page, copy_len);
page              976 arch/powerpc/perf/hv-24x7.c 	kmem_cache_free(hv_page_cache, page);
page              986 arch/powerpc/perf/hv-24x7.c 			    char *page)
page              996 arch/powerpc/perf/hv-24x7.c 		n = sprintf(page, "%d: %s\n", d, str);
page             1001 arch/powerpc/perf/hv-24x7.c 		page += n;
page             1013 arch/powerpc/perf/hv-24x7.c 	void *page = kmem_cache_alloc(hv_page_cache, GFP_USER);	\
page             1014 arch/powerpc/perf/hv-24x7.c 	struct hv_24x7_catalog_page_0 *page_0 = page;		\
page             1015 arch/powerpc/perf/hv-24x7.c 	if (!page)						\
page             1017 arch/powerpc/perf/hv-24x7.c 	hret = h_get_24x7_catalog_page(page, 0, 0);		\
page             1024 arch/powerpc/perf/hv-24x7.c 	kmem_cache_free(hv_page_cache, page);			\
page               79 arch/powerpc/perf/hv-gpci.c 			    char *page)				\
page               86 arch/powerpc/perf/hv-gpci.c 	return sprintf(page, _format, caps._name);		\
page               92 arch/powerpc/perf/hv-gpci.c 				   char *page)
page               94 arch/powerpc/perf/hv-gpci.c 	return sprintf(page, "0x%x\n", COUNTER_INFO_VERSION_CURRENT);
page              580 arch/powerpc/perf/imc-pmu.c 	struct page *page;
page              591 arch/powerpc/perf/imc-pmu.c 	page = alloc_pages_node(nid,
page              594 arch/powerpc/perf/imc-pmu.c 	if (!page)
page              596 arch/powerpc/perf/imc-pmu.c 	mem_info->vbase = page_address(page);
page              854 arch/powerpc/perf/imc-pmu.c 		struct page *page;
page              859 arch/powerpc/perf/imc-pmu.c 		page = alloc_pages_node(nid,
page              862 arch/powerpc/perf/imc-pmu.c 		if (!page)
page              864 arch/powerpc/perf/imc-pmu.c 		local_mem = page_address(page);
page             1102 arch/powerpc/perf/imc-pmu.c 		struct page *page;
page             1104 arch/powerpc/perf/imc-pmu.c 		page = alloc_pages_node(phys_id,
page             1107 arch/powerpc/perf/imc-pmu.c 		if (!page)
page             1109 arch/powerpc/perf/imc-pmu.c 		local_mem = page_address(page);
page              184 arch/powerpc/platforms/512x/mpc512x_shared.c static inline void mpc512x_free_bootmem(struct page *page)
page              186 arch/powerpc/platforms/512x/mpc512x_shared.c 	BUG_ON(PageTail(page));
page              187 arch/powerpc/platforms/512x/mpc512x_shared.c 	BUG_ON(page_ref_count(page) > 1);
page              188 arch/powerpc/platforms/512x/mpc512x_shared.c 	free_reserved_page(page);
page              300 arch/powerpc/platforms/cell/iommu.c 	struct page *page;
page              310 arch/powerpc/platforms/cell/iommu.c 	page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(stab_size));
page              311 arch/powerpc/platforms/cell/iommu.c 	BUG_ON(!page);
page              312 arch/powerpc/platforms/cell/iommu.c 	iommu->stab = page_address(page);
page              320 arch/powerpc/platforms/cell/iommu.c 	struct page *page;
page              335 arch/powerpc/platforms/cell/iommu.c 	page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(ptab_size));
page              336 arch/powerpc/platforms/cell/iommu.c 	BUG_ON(!page);
page              338 arch/powerpc/platforms/cell/iommu.c 	ptab = page_address(page);
page              467 arch/powerpc/platforms/cell/iommu.c 	struct page *page;
page              509 arch/powerpc/platforms/cell/iommu.c 	page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0);
page              510 arch/powerpc/platforms/cell/iommu.c 	BUG_ON(!page);
page              511 arch/powerpc/platforms/cell/iommu.c 	iommu->pad_page = page_address(page);
page              100 arch/powerpc/platforms/cell/ras.c 	struct page *pages;
page              305 arch/powerpc/platforms/powernv/opal-core.c 	struct page *page;
page              331 arch/powerpc/platforms/powernv/opal-core.c 	page = virt_to_page(oc_conf->opalcorebuf);
page              333 arch/powerpc/platforms/powernv/opal-core.c 		mark_page_reserved(page + i);
page               36 arch/powerpc/platforms/powernv/pci-ioda-tce.c 	struct page *tce_mem = NULL;
page             1791 arch/powerpc/platforms/powernv/pci-ioda.c 	struct page *table_pages;
page             2175 arch/powerpc/platforms/powernv/pci-ioda.c 	struct page *tce_mem = NULL;
page              560 arch/powerpc/platforms/ps3/system-bus.c static dma_addr_t ps3_sb_map_page(struct device *_dev, struct page *page,
page              567 arch/powerpc/platforms/ps3/system-bus.c 	void *ptr = page_address(page) + offset;
page              582 arch/powerpc/platforms/ps3/system-bus.c static dma_addr_t ps3_ioc0_map_page(struct device *_dev, struct page *page,
page              591 arch/powerpc/platforms/ps3/system-bus.c 	void *ptr = page_address(page) + offset;
page               85 arch/powerpc/platforms/pseries/cmm.c 	unsigned long page[CMM_NR_PAGES];
page              195 arch/powerpc/platforms/pseries/cmm.c 		pa->page[pa->index++] = addr;
page              224 arch/powerpc/platforms/pseries/cmm.c 		addr = pa->page[--pa->index];
page              510 arch/powerpc/platforms/pseries/cmm.c 			if (pa->page[idx] >= start && pa->page[idx] < end)
page              567 arch/powerpc/platforms/pseries/cmm.c 			if ((pa_curr->page[idx] < start_page) ||
page              568 arch/powerpc/platforms/pseries/cmm.c 			    (pa_curr->page[idx] >= end_page))
page              571 arch/powerpc/platforms/pseries/cmm.c 			plpar_page_set_active(__pa(pa_curr->page[idx]));
page              572 arch/powerpc/platforms/pseries/cmm.c 			free_page(pa_curr->page[idx]);
page              576 arch/powerpc/platforms/pseries/cmm.c 			pa_curr->page[idx] = pa_last->page[--pa_last->index];
page               87 arch/powerpc/platforms/pseries/ibmebus.c 				   struct page *page,
page               93 arch/powerpc/platforms/pseries/ibmebus.c 	return (dma_addr_t)(page_address(page) + offset);
page             1742 arch/powerpc/platforms/pseries/lpar.c static void pSeries_set_page_state(struct page *page, int order,
page             1749 arch/powerpc/platforms/pseries/lpar.c 	addr = __pa((unsigned long)page_address(page));
page             1757 arch/powerpc/platforms/pseries/lpar.c void arch_free_page(struct page *page, int order)
page             1764 arch/powerpc/platforms/pseries/lpar.c 	pSeries_set_page_state(page, order, H_PAGE_SET_UNUSED);
page              191 arch/powerpc/platforms/pseries/pseries_energy.c static ssize_t get_best_energy_list(char *page, int activate)
page              197 arch/powerpc/platforms/pseries/pseries_energy.c 	char *s = page;
page              221 arch/powerpc/platforms/pseries/pseries_energy.c 	if (s > page) { /* Something to show */
page              227 arch/powerpc/platforms/pseries/pseries_energy.c 	return s-page;
page              231 arch/powerpc/platforms/pseries/pseries_energy.c 					char *page, int activate)
page              248 arch/powerpc/platforms/pseries/pseries_energy.c 	return sprintf(page, "%lu\n", retbuf[1] >> 32);
page              254 arch/powerpc/platforms/pseries/pseries_energy.c 			struct device_attribute *attr, char *page)
page              256 arch/powerpc/platforms/pseries/pseries_energy.c 	return get_best_energy_list(page, 1);
page              260 arch/powerpc/platforms/pseries/pseries_energy.c 			struct device_attribute *attr, char *page)
page              262 arch/powerpc/platforms/pseries/pseries_energy.c 	return get_best_energy_list(page, 0);
page              266 arch/powerpc/platforms/pseries/pseries_energy.c 			struct device_attribute *attr, char *page)
page              268 arch/powerpc/platforms/pseries/pseries_energy.c 	return get_best_energy_data(dev, page, 1);
page              272 arch/powerpc/platforms/pseries/pseries_energy.c 			struct device_attribute *attr, char *page)
page              274 arch/powerpc/platforms/pseries/pseries_energy.c 	return get_best_energy_data(dev, page, 0);
page               60 arch/powerpc/platforms/pseries/svm.c static struct page *dtl_page_store[NR_DTL_PAGE];
page               63 arch/powerpc/platforms/pseries/svm.c static bool is_dtl_page_shared(struct page *page)
page               68 arch/powerpc/platforms/pseries/svm.c 		if (dtl_page_store[i] == page)
page               77 arch/powerpc/platforms/pseries/svm.c 	struct page *page = pfn_to_page(pfn);
page               79 arch/powerpc/platforms/pseries/svm.c 	if (!is_dtl_page_shared(page)) {
page               80 arch/powerpc/platforms/pseries/svm.c 		dtl_page_store[dtl_nr_pages] = page;
page              512 arch/powerpc/platforms/pseries/vio.c static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
page              523 arch/powerpc/platforms/pseries/vio.c 	ret = iommu_map_page(dev, tbl, page, offset, size, dma_get_mask(dev),
page             1541 arch/powerpc/sysdev/xive/common.c 	struct page *pages;
page               50 arch/riscv/include/asm/cacheflush.h 				     struct page *page)
page               62 arch/riscv/include/asm/cacheflush.h #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
page               65 arch/riscv/include/asm/cacheflush.h 		flush_icache_user_range(vma, page, vaddr, len); \
page               67 arch/riscv/include/asm/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
page               77 arch/riscv/include/asm/cacheflush.h static inline void flush_dcache_page(struct page *page)
page               79 arch/riscv/include/asm/cacheflush.h 	if (test_bit(PG_dcache_clean, &page->flags))
page               80 arch/riscv/include/asm/cacheflush.h 		clear_bit(PG_dcache_clean, &page->flags);
page               14 arch/riscv/include/asm/hugetlb.h static inline void arch_clear_hugepage_flags(struct page *page)
page               53 arch/riscv/include/asm/page.h #define clear_user_page(pgaddr, vaddr, page)	memset((pgaddr), 0, PAGE_SIZE)
page               75 arch/riscv/include/asm/page.h typedef struct page *pgtable_t;
page              107 arch/riscv/include/asm/page.h #define page_to_virt(page)	(pfn_to_virt(page_to_pfn(page)))
page              109 arch/riscv/include/asm/page.h #define page_to_phys(page)	(pfn_to_phys(page_to_pfn(page)))
page              110 arch/riscv/include/asm/page.h #define page_to_bus(page)	(page_to_phys(page))
page              103 arch/riscv/include/asm/pgtable.h #define vmemmap		((struct page *)VMEMMAP_START)
page              168 arch/riscv/include/asm/pgtable.h static inline struct page *pmd_page(pmd_t pmd)
page              192 arch/riscv/include/asm/pgtable.h #define mk_pte(page, prot)       pfn_pte(page_to_pfn(page), prot)
page               20 arch/riscv/kernel/vdso.c static struct page **vdso_pagelist;
page               27 arch/riscv/kernel/vdso.c 	u8			page[PAGE_SIZE];
page               37 arch/riscv/kernel/vdso.c 		kcalloc(vdso_pages + 1, sizeof(struct page *), GFP_KERNEL);
page               44 arch/riscv/kernel/vdso.c 		struct page *pg;
page               72 arch/riscv/mm/cacheflush.c 	struct page *page = pte_page(pte);
page               74 arch/riscv/mm/cacheflush.c 	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
page              373 arch/s390/include/asm/cio.h int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta);
page              374 arch/s390/include/asm/cio.h int chsc_sstpi(void *page, void *result, size_t size);
page               45 arch/s390/include/asm/hugetlb.h static inline void arch_clear_hugepage_flags(struct page *page)
page               47 arch/s390/include/asm/hugetlb.h 	clear_bit(PG_arch_1, &page->flags);
page               84 arch/s390/include/asm/hugetlb.h static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
page               86 arch/s390/include/asm/hugetlb.h 	return mk_pte(page, pgprot);
page              688 arch/s390/include/asm/kvm_host.h 	struct page *page;
page              826 arch/s390/include/asm/kvm_host.h 	struct page *pages[KVM_MAX_VCPUS];
page               49 arch/s390/include/asm/page.h #define clear_page(page)	memset((page), 0, PAGE_SIZE)
page               68 arch/s390/include/asm/page.h #define clear_user_page(page, vaddr, pg)	clear_page(page)
page              140 arch/s390/include/asm/page.h struct page;
page              141 arch/s390/include/asm/page.h void arch_free_page(struct page *page, int order);
page              142 arch/s390/include/asm/page.h void arch_alloc_page(struct page *page, int order);
page              143 arch/s390/include/asm/page.h void arch_set_page_dat(struct page *page, int order);
page              144 arch/s390/include/asm/page.h void arch_set_page_nodat(struct page *page, int order);
page              145 arch/s390/include/asm/page.h int arch_test_page_nodat(struct page *page);
page              169 arch/s390/include/asm/page.h #define page_to_virt(page)	pfn_to_virt(page_to_pfn(page))
page              175 arch/s390/include/asm/page.h #define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
page               28 arch/s390/include/asm/perf_event.h 				       char *page);
page               26 arch/s390/include/asm/pgalloc.h struct page *page_table_alloc_pgste(struct mm_struct *mm);
page               29 arch/s390/include/asm/pgalloc.h void page_table_free_pgste(struct page *page);
page               90 arch/s390/include/asm/pgtable.h extern struct page *vmemmap;
page             1203 arch/s390/include/asm/pgtable.h static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
page             1205 arch/s390/include/asm/pgtable.h 	unsigned long physpage = page_to_phys(page);
page             1208 arch/s390/include/asm/pgtable.h 	if (pte_write(__pte) && PageDirty(page))
page             1616 arch/s390/include/asm/pgtable.h #define mk_pmd(page, pgprot)	pfn_pmd(page_to_pfn(page), (pgprot))
page               28 arch/s390/include/asm/tlb.h 					  struct page *page, int page_size);
page               49 arch/s390/include/asm/tlb.h 					  struct page *page, int page_size)
page               51 arch/s390/include/asm/tlb.h 	free_page_and_swap_cache(page);
page              175 arch/s390/kernel/ipl.c 		char *page)						\
page              177 arch/s390/kernel/ipl.c 	return snprintf(page, PAGE_SIZE, _format, ##args);		\
page              269 arch/s390/kernel/ipl.c 			     char *page)
page              271 arch/s390/kernel/ipl.c 	return sprintf(page, "%s\n", ipl_type_str(ipl_info.type));
page              277 arch/s390/kernel/ipl.c 			       struct kobj_attribute *attr, char *page)
page              279 arch/s390/kernel/ipl.c 	return sprintf(page, "%i\n", !!ipl_secure_flag);
page              286 arch/s390/kernel/ipl.c 				   struct kobj_attribute *attr, char *page)
page              288 arch/s390/kernel/ipl.c 	return sprintf(page, "%i\n", !!sclp.has_sipl);
page              295 arch/s390/kernel/ipl.c 				struct kobj_attribute *attr, char *page)
page              301 arch/s390/kernel/ipl.c 	return sprintf(page, "%s\n", parm);
page              308 arch/s390/kernel/ipl.c 				   struct kobj_attribute *attr, char *page)
page              312 arch/s390/kernel/ipl.c 		return sprintf(page, "0.%x.%04x\n", ipl_block.ccw.ssid,
page              316 arch/s390/kernel/ipl.c 		return sprintf(page, "0.0.%04x\n", ipl_block.fcp.devno);
page              366 arch/s390/kernel/ipl.c 				     struct kobj_attribute *attr, char *page)
page              371 arch/s390/kernel/ipl.c 		return sprintf(page, "#unknown#\n");
page              375 arch/s390/kernel/ipl.c 	return sprintf(page, "%s\n", loadparm);
page              498 arch/s390/kernel/ipl.c 					  char *page)
page              503 arch/s390/kernel/ipl.c 	return sprintf(page, "%s\n", vmparm);
page              540 arch/s390/kernel/ipl.c 				     struct kobj_attribute *attr, char *page)
page              542 arch/s390/kernel/ipl.c 	return reipl_generic_vmparm_show(reipl_block_nss, page);
page              554 arch/s390/kernel/ipl.c 				     struct kobj_attribute *attr, char *page)
page              556 arch/s390/kernel/ipl.c 	return reipl_generic_vmparm_show(reipl_block_ccw, page);
page              640 arch/s390/kernel/ipl.c 					   char *page)
page              645 arch/s390/kernel/ipl.c 	return sprintf(page, "%s\n", buf);
page              678 arch/s390/kernel/ipl.c 				       struct kobj_attribute *attr, char *page)
page              680 arch/s390/kernel/ipl.c 	return reipl_generic_loadparm_show(reipl_block_fcp, page);
page              714 arch/s390/kernel/ipl.c 				       struct kobj_attribute *attr, char *page)
page              716 arch/s390/kernel/ipl.c 	return reipl_generic_loadparm_show(reipl_block_nss, page);
page              728 arch/s390/kernel/ipl.c 				       struct kobj_attribute *attr, char *page)
page              730 arch/s390/kernel/ipl.c 	return reipl_generic_loadparm_show(reipl_block_ccw, page);
page              778 arch/s390/kernel/ipl.c 				   struct kobj_attribute *attr, char *page)
page              783 arch/s390/kernel/ipl.c 	return sprintf(page, "%s\n", nss_name);
page              864 arch/s390/kernel/ipl.c 			       struct kobj_attribute *attr, char *page)
page              866 arch/s390/kernel/ipl.c 	return sprintf(page, "%s\n", ipl_type_str(reipl_type));
page             1145 arch/s390/kernel/ipl.c 			      struct kobj_attribute *attr, char *page)
page             1147 arch/s390/kernel/ipl.c 	return sprintf(page, "%s\n", dump_type_str(dump_type));
page             1408 arch/s390/kernel/ipl.c 			      struct kobj_attribute *attr, char *page)
page             1410 arch/s390/kernel/ipl.c 	return sprintf(page, "%s\n", on_reboot_trigger.action->name);
page             1434 arch/s390/kernel/ipl.c 			     struct kobj_attribute *attr, char *page)
page             1436 arch/s390/kernel/ipl.c 	return sprintf(page, "%s\n", on_panic_trigger.action->name);
page             1460 arch/s390/kernel/ipl.c 			       struct kobj_attribute *attr, char *page)
page             1462 arch/s390/kernel/ipl.c 	return sprintf(page, "%s\n", on_restart_trigger.action->name);
page             1497 arch/s390/kernel/ipl.c 			    struct kobj_attribute *attr, char *page)
page             1499 arch/s390/kernel/ipl.c 	return sprintf(page, "%s\n", on_halt_trigger.action->name);
page             1523 arch/s390/kernel/ipl.c 			    struct kobj_attribute *attr, char *page)
page             1525 arch/s390/kernel/ipl.c 	return sprintf(page, "%s\n", on_poff_trigger.action->name);
page               43 arch/s390/kernel/kprobes.c static void free_s390_insn_page(void *page)
page               45 arch/s390/kernel/kprobes.c 	set_memory_nx((unsigned long) page, 1);
page              238 arch/s390/kernel/perf_event.c 				struct device_attribute *attr, char *page)
page              243 arch/s390/kernel/perf_event.c 	return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
page              122 arch/s390/kernel/setup.c struct page *vmemmap;
page              570 arch/s390/kernel/setup.c 		tmp = tmp * (sizeof(struct page) + PAGE_SIZE);
page              584 arch/s390/kernel/setup.c 	tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
page              587 arch/s390/kernel/setup.c 	tmp = VMALLOC_START - tmp * sizeof(struct page);
page              590 arch/s390/kernel/setup.c 	vmemmap = (struct page *) tmp;
page              597 arch/s390/kernel/setup.c 	vmemmap = max(vmemmap, (struct page *)KASAN_SHADOW_END);
page              628 arch/s390/kernel/smp.c 				     bool is_boot_cpu, unsigned long page)
page              630 arch/s390/kernel/smp.c 	__vector128 *vxrs = (__vector128 *) page;
page              635 arch/s390/kernel/smp.c 		__pcpu_sigp_relax(addr, SIGP_STORE_ADDITIONAL_STATUS, page);
page              640 arch/s390/kernel/smp.c 				     bool is_boot_cpu, unsigned long page)
page              642 arch/s390/kernel/smp.c 	void *regs = (void *) page;
page              647 arch/s390/kernel/smp.c 		__pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, page);
page              655 arch/s390/kernel/smp.c 	unsigned long page;
page              662 arch/s390/kernel/smp.c 	page = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 0, 1UL << 31);
page              663 arch/s390/kernel/smp.c 	if (!page)
page              682 arch/s390/kernel/smp.c 			smp_save_cpu_vxrs(sa, addr, is_boot_cpu, page);
page              691 arch/s390/kernel/smp.c 			smp_save_cpu_regs(sa, addr, is_boot_cpu, page);
page              693 arch/s390/kernel/smp.c 	memblock_free(page, PAGE_SIZE);
page              102 arch/s390/kernel/suspend.c 	struct page *page;
page              106 arch/s390/kernel/suspend.c 	page = pfn_to_page(*pfn);
page              107 arch/s390/kernel/suspend.c 	addr = (unsigned long) page_address(page);
page              109 arch/s390/kernel/suspend.c 	if (arch_test_page_nodat(page))
page              136 arch/s390/kernel/suspend.c 	struct page *page;
page              141 arch/s390/kernel/suspend.c 	page = virt_to_page(address);
page              143 arch/s390/kernel/suspend.c 		arch_set_page_nodat(page, 0);
page              145 arch/s390/kernel/suspend.c 		arch_set_page_dat(page, 0);
page               36 arch/s390/kernel/vdso.c static struct page **vdso32_pagelist;
page               42 arch/s390/kernel/vdso.c static struct page **vdso64_pagelist;
page               53 arch/s390/kernel/vdso.c 	struct page **vdso_pagelist;
page               68 arch/s390/kernel/vdso.c 	vmf->page = vdso_pagelist[vmf->pgoff];
page               69 arch/s390/kernel/vdso.c 	get_page(vmf->page);
page              115 arch/s390/kernel/vdso.c 	u8			page[PAGE_SIZE];
page              276 arch/s390/kernel/vdso.c 	vdso32_pagelist = kcalloc(vdso32_pages + 1, sizeof(struct page *),
page              280 arch/s390/kernel/vdso.c 		struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
page              293 arch/s390/kernel/vdso.c 	vdso64_pagelist = kcalloc(vdso64_pages + 1, sizeof(struct page *),
page              297 arch/s390/kernel/vdso.c 		struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
page             2379 arch/s390/kvm/interrupt.c 	ret = get_user_pages_fast(map->addr, 1, FOLL_WRITE, &map->page);
page             2388 arch/s390/kvm/interrupt.c 		put_page(map->page);
page             2413 arch/s390/kvm/interrupt.c 			put_page(map->page);
page             2434 arch/s390/kvm/interrupt.c 			put_page(map->page);
page             2730 arch/s390/kvm/interrupt.c 	map = page_address(info->page);
page             2735 arch/s390/kvm/interrupt.c 	set_page_dirty_lock(info->page);
page             2741 arch/s390/kvm/interrupt.c 	map = page_address(info->page);
page             2746 arch/s390/kvm/interrupt.c 	set_page_dirty_lock(info->page);
page             4490 arch/s390/kvm/kvm-s390.c 		vmf->page = virt_to_page(vcpu->arch.sie_block);
page             4491 arch/s390/kvm/kvm-s390.c 		get_page(vmf->page);
page              566 arch/s390/kvm/vsie.c 	struct page *page;
page              580 arch/s390/kvm/vsie.c 		page = READ_ONCE(kvm->arch.vsie.pages[i]);
page              581 arch/s390/kvm/vsie.c 		if (!page)
page              583 arch/s390/kvm/vsie.c 		cur = page_to_virt(page);
page              645 arch/s390/kvm/vsie.c 	struct page *page;
page              647 arch/s390/kvm/vsie.c 	page = gfn_to_page(kvm, gpa_to_gfn(gpa));
page              648 arch/s390/kvm/vsie.c 	if (is_error_page(page))
page              650 arch/s390/kvm/vsie.c 	*hpa = (hpa_t) page_to_virt(page) + (gpa & ~PAGE_MASK);
page             1220 arch/s390/kvm/vsie.c 	struct page *page;
page             1224 arch/s390/kvm/vsie.c 	page = radix_tree_lookup(&kvm->arch.vsie.addr_to_page, addr >> 9);
page             1226 arch/s390/kvm/vsie.c 	if (page) {
page             1227 arch/s390/kvm/vsie.c 		if (page_ref_inc_return(page) == 2)
page             1228 arch/s390/kvm/vsie.c 			return page_to_virt(page);
page             1229 arch/s390/kvm/vsie.c 		page_ref_dec(page);
page             1240 arch/s390/kvm/vsie.c 		page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA);
page             1241 arch/s390/kvm/vsie.c 		if (!page) {
page             1245 arch/s390/kvm/vsie.c 		page_ref_inc(page);
page             1246 arch/s390/kvm/vsie.c 		kvm->arch.vsie.pages[kvm->arch.vsie.page_count] = page;
page             1251 arch/s390/kvm/vsie.c 			page = kvm->arch.vsie.pages[kvm->arch.vsie.next];
page             1252 arch/s390/kvm/vsie.c 			if (page_ref_inc_return(page) == 2)
page             1254 arch/s390/kvm/vsie.c 			page_ref_dec(page);
page             1258 arch/s390/kvm/vsie.c 		radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
page             1260 arch/s390/kvm/vsie.c 	page->index = addr;
page             1262 arch/s390/kvm/vsie.c 	if (radix_tree_insert(&kvm->arch.vsie.addr_to_page, addr >> 9, page)) {
page             1263 arch/s390/kvm/vsie.c 		page_ref_dec(page);
page             1269 arch/s390/kvm/vsie.c 	vsie_page = page_to_virt(page);
page             1280 arch/s390/kvm/vsie.c 	struct page *page = pfn_to_page(__pa(vsie_page) >> PAGE_SHIFT);
page             1282 arch/s390/kvm/vsie.c 	page_ref_dec(page);
page             1349 arch/s390/kvm/vsie.c 	struct page *page;
page             1354 arch/s390/kvm/vsie.c 		page = kvm->arch.vsie.pages[i];
page             1356 arch/s390/kvm/vsie.c 		vsie_page = page_to_virt(page);
page             1359 arch/s390/kvm/vsie.c 		radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
page             1360 arch/s390/kvm/vsie.c 		__free_page(page);
page               38 arch/s390/mm/gmap.c 	struct page *page;
page               71 arch/s390/mm/gmap.c 	page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
page               72 arch/s390/mm/gmap.c 	if (!page)
page               74 arch/s390/mm/gmap.c 	page->index = 0;
page               75 arch/s390/mm/gmap.c 	list_add(&page->lru, &gmap->crst_list);
page               76 arch/s390/mm/gmap.c 	table = (unsigned long *) page_to_phys(page);
page              185 arch/s390/mm/gmap.c 	struct page *page, *next;
page              191 arch/s390/mm/gmap.c 	list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
page              192 arch/s390/mm/gmap.c 		__free_pages(page, CRST_ALLOC_ORDER);
page              199 arch/s390/mm/gmap.c 		list_for_each_entry_safe(page, next, &gmap->pt_list, lru)
page              200 arch/s390/mm/gmap.c 			page_table_free_pgste(page);
page              308 arch/s390/mm/gmap.c 	struct page *page;
page              312 arch/s390/mm/gmap.c 	page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
page              313 arch/s390/mm/gmap.c 	if (!page)
page              315 arch/s390/mm/gmap.c 	new = (unsigned long *) page_to_phys(page);
page              319 arch/s390/mm/gmap.c 		list_add(&page->lru, &gmap->crst_list);
page              322 arch/s390/mm/gmap.c 		page->index = gaddr;
page              323 arch/s390/mm/gmap.c 		page = NULL;
page              326 arch/s390/mm/gmap.c 	if (page)
page              327 arch/s390/mm/gmap.c 		__free_pages(page, CRST_ALLOC_ORDER);
page              339 arch/s390/mm/gmap.c 	struct page *page;
page              345 arch/s390/mm/gmap.c 	page = virt_to_page((void *)((unsigned long) entry & mask));
page              346 arch/s390/mm/gmap.c 	return page->index + offset;
page             1326 arch/s390/mm/gmap.c 	struct page *page;
page             1339 arch/s390/mm/gmap.c 	page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
page             1340 arch/s390/mm/gmap.c 	list_del(&page->lru);
page             1341 arch/s390/mm/gmap.c 	page_table_free_pgste(page);
page             1356 arch/s390/mm/gmap.c 	struct page *page;
page             1367 arch/s390/mm/gmap.c 		page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
page             1368 arch/s390/mm/gmap.c 		list_del(&page->lru);
page             1369 arch/s390/mm/gmap.c 		page_table_free_pgste(page);
page             1383 arch/s390/mm/gmap.c 	struct page *page;
page             1396 arch/s390/mm/gmap.c 	page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
page             1397 arch/s390/mm/gmap.c 	list_del(&page->lru);
page             1398 arch/s390/mm/gmap.c 	__free_pages(page, CRST_ALLOC_ORDER);
page             1413 arch/s390/mm/gmap.c 	struct page *page;
page             1424 arch/s390/mm/gmap.c 		page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
page             1425 arch/s390/mm/gmap.c 		list_del(&page->lru);
page             1426 arch/s390/mm/gmap.c 		__free_pages(page, CRST_ALLOC_ORDER);
page             1440 arch/s390/mm/gmap.c 	struct page *page;
page             1453 arch/s390/mm/gmap.c 	page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
page             1454 arch/s390/mm/gmap.c 	list_del(&page->lru);
page             1455 arch/s390/mm/gmap.c 	__free_pages(page, CRST_ALLOC_ORDER);
page             1470 arch/s390/mm/gmap.c 	struct page *page;
page             1481 arch/s390/mm/gmap.c 		page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
page             1482 arch/s390/mm/gmap.c 		list_del(&page->lru);
page             1483 arch/s390/mm/gmap.c 		__free_pages(page, CRST_ALLOC_ORDER);
page             1497 arch/s390/mm/gmap.c 	struct page *page;
page             1510 arch/s390/mm/gmap.c 	page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
page             1511 arch/s390/mm/gmap.c 	list_del(&page->lru);
page             1512 arch/s390/mm/gmap.c 	__free_pages(page, CRST_ALLOC_ORDER);
page             1527 arch/s390/mm/gmap.c 	struct page *page;
page             1541 arch/s390/mm/gmap.c 		page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
page             1542 arch/s390/mm/gmap.c 		list_del(&page->lru);
page             1543 arch/s390/mm/gmap.c 		__free_pages(page, CRST_ALLOC_ORDER);
page             1739 arch/s390/mm/gmap.c 	struct page *page;
page             1744 arch/s390/mm/gmap.c 	page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
page             1745 arch/s390/mm/gmap.c 	if (!page)
page             1747 arch/s390/mm/gmap.c 	page->index = r2t & _REGION_ENTRY_ORIGIN;
page             1749 arch/s390/mm/gmap.c 		page->index |= GMAP_SHADOW_FAKE_TABLE;
page             1750 arch/s390/mm/gmap.c 	s_r2t = (unsigned long *) page_to_phys(page);
page             1771 arch/s390/mm/gmap.c 	list_add(&page->lru, &sg->crst_list);
page             1800 arch/s390/mm/gmap.c 	__free_pages(page, CRST_ALLOC_ORDER);
page             1823 arch/s390/mm/gmap.c 	struct page *page;
page             1828 arch/s390/mm/gmap.c 	page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
page             1829 arch/s390/mm/gmap.c 	if (!page)
page             1831 arch/s390/mm/gmap.c 	page->index = r3t & _REGION_ENTRY_ORIGIN;
page             1833 arch/s390/mm/gmap.c 		page->index |= GMAP_SHADOW_FAKE_TABLE;
page             1834 arch/s390/mm/gmap.c 	s_r3t = (unsigned long *) page_to_phys(page);
page             1855 arch/s390/mm/gmap.c 	list_add(&page->lru, &sg->crst_list);
page             1884 arch/s390/mm/gmap.c 	__free_pages(page, CRST_ALLOC_ORDER);
page             1907 arch/s390/mm/gmap.c 	struct page *page;
page             1912 arch/s390/mm/gmap.c 	page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
page             1913 arch/s390/mm/gmap.c 	if (!page)
page             1915 arch/s390/mm/gmap.c 	page->index = sgt & _REGION_ENTRY_ORIGIN;
page             1917 arch/s390/mm/gmap.c 		page->index |= GMAP_SHADOW_FAKE_TABLE;
page             1918 arch/s390/mm/gmap.c 	s_sgt = (unsigned long *) page_to_phys(page);
page             1939 arch/s390/mm/gmap.c 	list_add(&page->lru, &sg->crst_list);
page             1968 arch/s390/mm/gmap.c 	__free_pages(page, CRST_ALLOC_ORDER);
page             1991 arch/s390/mm/gmap.c 	struct page *page;
page             1999 arch/s390/mm/gmap.c 		page = pfn_to_page(*table >> PAGE_SHIFT);
page             2000 arch/s390/mm/gmap.c 		*pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE;
page             2002 arch/s390/mm/gmap.c 		*fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE);
page             2031 arch/s390/mm/gmap.c 	struct page *page;
page             2036 arch/s390/mm/gmap.c 	page = page_table_alloc_pgste(sg->mm);
page             2037 arch/s390/mm/gmap.c 	if (!page)
page             2039 arch/s390/mm/gmap.c 	page->index = pgt & _SEGMENT_ENTRY_ORIGIN;
page             2041 arch/s390/mm/gmap.c 		page->index |= GMAP_SHADOW_FAKE_TABLE;
page             2042 arch/s390/mm/gmap.c 	s_pgt = (unsigned long *) page_to_phys(page);
page             2060 arch/s390/mm/gmap.c 	list_add(&page->lru, &sg->pt_list);
page             2087 arch/s390/mm/gmap.c 	page_table_free_pgste(page);
page             2574 arch/s390/mm/gmap.c 	struct page *page = pmd_page(*pmd);
page             2589 arch/s390/mm/gmap.c 	set_bit(PG_arch_1, &page->flags);
page              131 arch/s390/mm/hugetlbpage.c 	struct page *page;
page              139 arch/s390/mm/hugetlbpage.c 		page = pud_page(__pud(rste));
page              143 arch/s390/mm/hugetlbpage.c 		page = pmd_page(__pmd(rste));
page              148 arch/s390/mm/hugetlbpage.c 	if (!test_and_set_bit(PG_arch_1, &page->flags))
page              247 arch/s390/mm/hugetlbpage.c struct page *
page               61 arch/s390/mm/init.c 	struct page *page;
page               75 arch/s390/mm/init.c 	page = virt_to_page((void *) empty_zero_page);
page               76 arch/s390/mm/init.c 	split_page(page, order);
page               78 arch/s390/mm/init.c 		mark_page_reserved(page);
page               79 arch/s390/mm/init.c 		page++;
page              160 arch/s390/mm/kasan_init.c 				void *page;
page              163 arch/s390/mm/kasan_init.c 					page = (void *)address;
page              165 arch/s390/mm/kasan_init.c 					page = kasan_early_alloc_segment();
page              166 arch/s390/mm/kasan_init.c 					memset(page, 0, _SEGMENT_SIZE);
page              168 arch/s390/mm/kasan_init.c 				pmd_val(*pm_dir) = __pa(page) | sgt_prot;
page              182 arch/s390/mm/kasan_init.c 			void *page;
page              186 arch/s390/mm/kasan_init.c 				page = (void *)address;
page              187 arch/s390/mm/kasan_init.c 				pte_val(*pt_dir) = __pa(page) | pgt_prot;
page              190 arch/s390/mm/kasan_init.c 				page = kasan_early_alloc_pages(0);
page              191 arch/s390/mm/kasan_init.c 				memset(page, 0, PAGE_SIZE);
page              192 arch/s390/mm/kasan_init.c 				pte_val(*pt_dir) = __pa(page) | pgt_prot;
page              195 arch/s390/mm/kasan_init.c 				page = kasan_early_shadow_page;
page              196 arch/s390/mm/kasan_init.c 				pte_val(*pt_dir) = __pa(page) | pgt_prot_zero;
page               60 arch/s390/mm/page-states.c static inline unsigned char get_page_state(struct page *page)
page               66 arch/s390/mm/page-states.c 		     : "a" (page_to_phys(page)),
page               71 arch/s390/mm/page-states.c static inline void set_page_unused(struct page *page, int order)
page               78 arch/s390/mm/page-states.c 			     : "a" (page_to_phys(page + i)),
page               82 arch/s390/mm/page-states.c static inline void set_page_stable_dat(struct page *page, int order)
page               89 arch/s390/mm/page-states.c 			     : "a" (page_to_phys(page + i)),
page               93 arch/s390/mm/page-states.c static inline void set_page_stable_nodat(struct page *page, int order)
page              100 arch/s390/mm/page-states.c 			     : "a" (page_to_phys(page + i)),
page              107 arch/s390/mm/page-states.c 	struct page *page;
page              115 arch/s390/mm/page-states.c 		page = virt_to_page(pmd_val(*pmd));
page              116 arch/s390/mm/page-states.c 		set_bit(PG_arch_1, &page->flags);
page              123 arch/s390/mm/page-states.c 	struct page *page;
page              133 arch/s390/mm/page-states.c 			page = virt_to_page(pud_val(*pud));
page              135 arch/s390/mm/page-states.c 				set_bit(PG_arch_1, &page[i].flags);
page              144 arch/s390/mm/page-states.c 	struct page *page;
page              154 arch/s390/mm/page-states.c 			page = virt_to_page(p4d_val(*p4d));
page              156 arch/s390/mm/page-states.c 				set_bit(PG_arch_1, &page[i].flags);
page              165 arch/s390/mm/page-states.c 	struct page *page;
page              176 arch/s390/mm/page-states.c 			page = virt_to_page(pgd_val(*pgd));
page              178 arch/s390/mm/page-states.c 				set_bit(PG_arch_1, &page[i].flags);
page              187 arch/s390/mm/page-states.c 	struct page *page;
page              199 arch/s390/mm/page-states.c 		page = pfn_to_page(start);
page              200 arch/s390/mm/page-states.c 		for (ix = start; ix < end; ix++, page++) {
page              201 arch/s390/mm/page-states.c 			if (__test_and_clear_bit(PG_arch_1, &page->flags))
page              203 arch/s390/mm/page-states.c 			if (!list_empty(&page->lru))
page              205 arch/s390/mm/page-states.c 			set_page_stable_nodat(page, 0);
page              210 arch/s390/mm/page-states.c void arch_free_page(struct page *page, int order)
page              214 arch/s390/mm/page-states.c 	set_page_unused(page, order);
page              217 arch/s390/mm/page-states.c void arch_alloc_page(struct page *page, int order)
page              222 arch/s390/mm/page-states.c 		set_page_stable_dat(page, order);
page              224 arch/s390/mm/page-states.c 		set_page_stable_nodat(page, order);
page              227 arch/s390/mm/page-states.c void arch_set_page_dat(struct page *page, int order)
page              231 arch/s390/mm/page-states.c 	set_page_stable_dat(page, order);
page              234 arch/s390/mm/page-states.c void arch_set_page_nodat(struct page *page, int order)
page              238 arch/s390/mm/page-states.c 	set_page_stable_nodat(page, order);
page              241 arch/s390/mm/page-states.c int arch_test_page_nodat(struct page *page)
page              247 arch/s390/mm/page-states.c 	state = get_page_state(page);
page              255 arch/s390/mm/page-states.c 	struct page *page;
page              266 arch/s390/mm/page-states.c 				page = list_entry(l, struct page, lru);
page              268 arch/s390/mm/page-states.c 					set_page_stable_dat(page, order);
page              270 arch/s390/mm/page-states.c 					set_page_unused(page, order);
page              337 arch/s390/mm/pageattr.c void __kernel_map_pages(struct page *page, int numpages, int enable)
page              348 arch/s390/mm/pageattr.c 		address = page_to_phys(page + i);
page              371 arch/s390/mm/pageattr.c bool kernel_page_present(struct page *page)
page              376 arch/s390/mm/pageattr.c 	addr = page_to_phys(page);
page               56 arch/s390/mm/pgalloc.c 	struct page *page = alloc_pages(GFP_KERNEL, 2);
page               58 arch/s390/mm/pgalloc.c 	if (!page)
page               60 arch/s390/mm/pgalloc.c 	arch_set_page_dat(page, 2);
page               61 arch/s390/mm/pgalloc.c 	return (unsigned long *) page_to_phys(page);
page              168 arch/s390/mm/pgalloc.c struct page *page_table_alloc_pgste(struct mm_struct *mm)
page              170 arch/s390/mm/pgalloc.c 	struct page *page;
page              173 arch/s390/mm/pgalloc.c 	page = alloc_page(GFP_KERNEL);
page              174 arch/s390/mm/pgalloc.c 	if (page) {
page              175 arch/s390/mm/pgalloc.c 		table = (u64 *)page_to_phys(page);
page              179 arch/s390/mm/pgalloc.c 	return page;
page              182 arch/s390/mm/pgalloc.c void page_table_free_pgste(struct page *page)
page              184 arch/s390/mm/pgalloc.c 	__free_page(page);
page              195 arch/s390/mm/pgalloc.c 	struct page *page;
page              203 arch/s390/mm/pgalloc.c 			page = list_first_entry(&mm->context.pgtable_list,
page              204 arch/s390/mm/pgalloc.c 						struct page, lru);
page              205 arch/s390/mm/pgalloc.c 			mask = atomic_read(&page->_refcount) >> 24;
page              208 arch/s390/mm/pgalloc.c 				table = (unsigned long *) page_to_phys(page);
page              212 arch/s390/mm/pgalloc.c 				atomic_xor_bits(&page->_refcount,
page              214 arch/s390/mm/pgalloc.c 				list_del(&page->lru);
page              222 arch/s390/mm/pgalloc.c 	page = alloc_page(GFP_KERNEL);
page              223 arch/s390/mm/pgalloc.c 	if (!page)
page              225 arch/s390/mm/pgalloc.c 	if (!pgtable_pte_page_ctor(page)) {
page              226 arch/s390/mm/pgalloc.c 		__free_page(page);
page              229 arch/s390/mm/pgalloc.c 	arch_set_page_dat(page, 0);
page              231 arch/s390/mm/pgalloc.c 	table = (unsigned long *) page_to_phys(page);
page              234 arch/s390/mm/pgalloc.c 		atomic_xor_bits(&page->_refcount, 3 << 24);
page              239 arch/s390/mm/pgalloc.c 		atomic_xor_bits(&page->_refcount, 1 << 24);
page              242 arch/s390/mm/pgalloc.c 		list_add(&page->lru, &mm->context.pgtable_list);
page              250 arch/s390/mm/pgalloc.c 	struct page *page;
page              253 arch/s390/mm/pgalloc.c 	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
page              258 arch/s390/mm/pgalloc.c 		mask = atomic_xor_bits(&page->_refcount, 1U << (bit + 24));
page              261 arch/s390/mm/pgalloc.c 			list_add(&page->lru, &mm->context.pgtable_list);
page              263 arch/s390/mm/pgalloc.c 			list_del(&page->lru);
page              268 arch/s390/mm/pgalloc.c 		atomic_xor_bits(&page->_refcount, 3U << 24);
page              271 arch/s390/mm/pgalloc.c 	pgtable_pte_page_dtor(page);
page              272 arch/s390/mm/pgalloc.c 	__free_page(page);
page              279 arch/s390/mm/pgalloc.c 	struct page *page;
page              283 arch/s390/mm/pgalloc.c 	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
page              292 arch/s390/mm/pgalloc.c 	mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
page              295 arch/s390/mm/pgalloc.c 		list_add_tail(&page->lru, &mm->context.pgtable_list);
page              297 arch/s390/mm/pgalloc.c 		list_del(&page->lru);
page              307 arch/s390/mm/pgalloc.c 	struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
page              315 arch/s390/mm/pgalloc.c 		mask = atomic_xor_bits(&page->_refcount, mask << (4 + 24));
page              322 arch/s390/mm/pgalloc.c 			atomic_xor_bits(&page->_refcount, 3 << 24);
page              323 arch/s390/mm/pgalloc.c 		pgtable_pte_page_dtor(page);
page              324 arch/s390/mm/pgalloc.c 		__free_page(page);
page              375 arch/s390/mm/pgalloc.c BASE_ADDR_END_FUNC(page,    _PAGE_SIZE)
page              676 arch/s390/mm/pgtable.c 		struct page *page = migration_entry_to_page(entry);
page              678 arch/s390/mm/pgtable.c 		dec_mm_counter(mm, mm_counter(page));
page              335 arch/s390/pci/pci_dma.c static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
page              341 arch/s390/pci/pci_dma.c 	unsigned long pa = page_to_phys(page) + offset;
page              402 arch/s390/pci/pci_dma.c 	struct page *page;
page              407 arch/s390/pci/pci_dma.c 	page = alloc_pages(flag | __GFP_ZERO, get_order(size));
page              408 arch/s390/pci/pci_dma.c 	if (!page)
page              411 arch/s390/pci/pci_dma.c 	pa = page_to_phys(page);
page              412 arch/s390/pci/pci_dma.c 	map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, 0);
page               47 arch/sh/include/asm/cacheflush.h extern void flush_dcache_page(struct page *page);
page               50 arch/sh/include/asm/cacheflush.h 				 struct page *page);
page               59 arch/sh/include/asm/cacheflush.h extern void __flush_anon_page(struct page *page, unsigned long);
page               62 arch/sh/include/asm/cacheflush.h 				   struct page *page, unsigned long vmaddr)
page               64 arch/sh/include/asm/cacheflush.h 	if (boot_cpu_data.dcache.n_aliases && PageAnon(page))
page               65 arch/sh/include/asm/cacheflush.h 		__flush_anon_page(page, vmaddr);
page               77 arch/sh/include/asm/cacheflush.h static inline void flush_kernel_dcache_page(struct page *page)
page               79 arch/sh/include/asm/cacheflush.h 	flush_dcache_page(page);
page               83 arch/sh/include/asm/cacheflush.h 	struct page *page, unsigned long vaddr, void *dst, const void *src,
page               87 arch/sh/include/asm/cacheflush.h 	struct page *page, unsigned long vaddr, void *dst, const void *src,
page               97 arch/sh/include/asm/cacheflush.h void *kmap_coherent(struct page *page, unsigned long addr);
page               35 arch/sh/include/asm/hugetlb.h static inline void arch_clear_hugepage_flags(struct page *page)
page               37 arch/sh/include/asm/hugetlb.h 	clear_bit(PG_dcache_clean, &page->flags);
page               61 arch/sh/include/asm/page.h #define clear_page(page)	memset((void *)(page), 0, PAGE_SIZE)
page               65 arch/sh/include/asm/page.h struct page;
page               68 arch/sh/include/asm/page.h extern void copy_user_highpage(struct page *to, struct page *from,
page               71 arch/sh/include/asm/page.h extern void clear_user_highpage(struct page *page, unsigned long vaddr);
page              105 arch/sh/include/asm/page.h typedef struct page *pgtable_t;
page              169 arch/sh/include/asm/page.h #define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
page               39 arch/sh/include/asm/pgalloc.h 	struct page *page = virt_to_page(pmdp);		\
page               40 arch/sh/include/asm/pgalloc.h 	pgtable_pmd_page_dtor(page);			\
page               41 arch/sh/include/asm/pgalloc.h 	tlb_remove_page((tlb), page);			\
page              390 arch/sh/include/asm/pgtable_32.h #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
page              276 arch/sh/include/asm/pgtable_64.h #define mk_pte(page,pgprot)							\
page              280 arch/sh/include/asm/pgtable_64.h 	set_pte(&__pte, __pte((((page)-mem_map) << PAGE_SHIFT) | 		\
page               20 arch/sh/include/asm/tlbflush.h 				 unsigned long page);
page               23 arch/sh/include/asm/tlbflush.h extern void local_flush_tlb_one(unsigned long asid, unsigned long page);
page               33 arch/sh/include/asm/tlbflush.h extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
page               35 arch/sh/include/asm/tlbflush.h extern void flush_tlb_one(unsigned long asid, unsigned long page);
page               41 arch/sh/include/asm/tlbflush.h #define flush_tlb_page(vma, page)	local_flush_tlb_page(vma, page)
page               42 arch/sh/include/asm/tlbflush.h #define flush_tlb_one(asid, page)	local_flush_tlb_one(asid, page)
page              148 arch/sh/kernel/cpu/sh4/sq.c 	int ret, page;
page              169 arch/sh/kernel/cpu/sh4/sq.c 	page = bitmap_find_free_region(sq_bitmap, 0x04000000 >> PAGE_SHIFT,
page              171 arch/sh/kernel/cpu/sh4/sq.c 	if (unlikely(page < 0)) {
page              176 arch/sh/kernel/cpu/sh4/sq.c 	map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT);
page              209 arch/sh/kernel/cpu/sh4/sq.c 	int page;
page              221 arch/sh/kernel/cpu/sh4/sq.c 	page = (map->sq_addr - P4SEG_STORE_QUE) >> PAGE_SHIFT;
page              222 arch/sh/kernel/cpu/sh4/sq.c 	bitmap_release_region(sq_bitmap, page, get_order(map->size));
page               45 arch/sh/kernel/io_trapped.c 	struct page *pages[TRAPPED_PAGES_MAX];
page              436 arch/sh/kernel/smp.c void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
page              444 arch/sh/kernel/smp.c 		fd.addr1 = page;
page              452 arch/sh/kernel/smp.c 	local_flush_tlb_page(vma, page);
page               38 arch/sh/kernel/vsyscall/vsyscall.c static struct page *syscall_pages[1];
page              112 arch/sh/mm/cache-sh4.c 	struct page *page = arg;
page              113 arch/sh/mm/cache-sh4.c 	unsigned long addr = (unsigned long)page_address(page);
page              115 arch/sh/mm/cache-sh4.c 	struct address_space *mapping = page_mapping_file(page);
page              118 arch/sh/mm/cache-sh4.c 		clear_bit(PG_dcache_clean, &page->flags);
page              122 arch/sh/mm/cache-sh4.c 				(addr & shm_align_mask), page_to_phys(page));
page              208 arch/sh/mm/cache-sh4.c 	struct page *page;
page              221 arch/sh/mm/cache-sh4.c 	page = pfn_to_page(pfn);
page              243 arch/sh/mm/cache-sh4.c 			test_bit(PG_dcache_clean, &page->flags) &&
page              244 arch/sh/mm/cache-sh4.c 			page_mapcount(page));
page              246 arch/sh/mm/cache-sh4.c 			vaddr = kmap_coherent(page, address);
page              248 arch/sh/mm/cache-sh4.c 			vaddr = kmap_atomic(page);
page              564 arch/sh/mm/cache-sh5.c static void sh5_flush_dcache_page(void *page)
page              566 arch/sh/mm/cache-sh5.c 	sh64_dcache_purge_phy_page(page_to_phys((struct page *)page));
page              138 arch/sh/mm/cache-sh7705.c 	struct page *page = arg;
page              139 arch/sh/mm/cache-sh7705.c 	struct address_space *mapping = page_mapping_file(page);
page              142 arch/sh/mm/cache-sh7705.c 		clear_bit(PG_dcache_clean, &page->flags);
page              144 arch/sh/mm/cache-sh7705.c 		__flush_dcache_page(__pa(page_address(page)));
page              180 arch/sh/mm/cache-sh7705.c static void sh7705_flush_icache_page(void *page)
page              182 arch/sh/mm/cache-sh7705.c 	__flush_purge_region(page_address(page), PAGE_SIZE);
page               60 arch/sh/mm/cache.c void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
page               64 arch/sh/mm/cache.c 	if (boot_cpu_data.dcache.n_aliases && page_mapcount(page) &&
page               65 arch/sh/mm/cache.c 	    test_bit(PG_dcache_clean, &page->flags)) {
page               66 arch/sh/mm/cache.c 		void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
page               72 arch/sh/mm/cache.c 			clear_bit(PG_dcache_clean, &page->flags);
page               76 arch/sh/mm/cache.c 		flush_cache_page(vma, vaddr, page_to_pfn(page));
page               79 arch/sh/mm/cache.c void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
page               83 arch/sh/mm/cache.c 	if (boot_cpu_data.dcache.n_aliases && page_mapcount(page) &&
page               84 arch/sh/mm/cache.c 	    test_bit(PG_dcache_clean, &page->flags)) {
page               85 arch/sh/mm/cache.c 		void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
page               91 arch/sh/mm/cache.c 			clear_bit(PG_dcache_clean, &page->flags);
page               95 arch/sh/mm/cache.c void copy_user_highpage(struct page *to, struct page *from,
page              123 arch/sh/mm/cache.c void clear_user_highpage(struct page *page, unsigned long vaddr)
page              125 arch/sh/mm/cache.c 	void *kaddr = kmap_atomic(page);
page              139 arch/sh/mm/cache.c 	struct page *page;
page              145 arch/sh/mm/cache.c 	page = pfn_to_page(pfn);
page              147 arch/sh/mm/cache.c 		int dirty = !test_and_set_bit(PG_dcache_clean, &page->flags);
page              149 arch/sh/mm/cache.c 			__flush_purge_region(page_address(page), PAGE_SIZE);
page              153 arch/sh/mm/cache.c void __flush_anon_page(struct page *page, unsigned long vmaddr)
page              155 arch/sh/mm/cache.c 	unsigned long addr = (unsigned long) page_address(page);
page              158 arch/sh/mm/cache.c 		if (boot_cpu_data.dcache.n_aliases && page_mapcount(page) &&
page              159 arch/sh/mm/cache.c 		    test_bit(PG_dcache_clean, &page->flags)) {
page              162 arch/sh/mm/cache.c 			kaddr = kmap_coherent(page, vmaddr);
page              218 arch/sh/mm/cache.c void flush_dcache_page(struct page *page)
page              220 arch/sh/mm/cache.c 	cacheop_on_each_cpu(local_flush_dcache_page, page, 1);
page              236 arch/sh/mm/cache.c void flush_icache_page(struct vm_area_struct *vma, struct page *page)
page              239 arch/sh/mm/cache.c 	cacheop_on_each_cpu(local_flush_icache_page, page, 1);
page               31 arch/sh/mm/kmap.c void *kmap_coherent(struct page *page, unsigned long addr)
page               36 arch/sh/mm/kmap.c 	BUG_ON(!test_bit(PG_dcache_clean, &page->flags));
page               48 arch/sh/mm/kmap.c 	set_pte(kmap_coherent_pte - idx, mk_pte(page, PAGE_KERNEL));
page               54 arch/sh/mm/nommu.c void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
page               59 arch/sh/mm/nommu.c void local_flush_tlb_one(unsigned long asid, unsigned long page)
page               81 arch/sh/mm/nommu.c void *kmap_coherent(struct page *page, unsigned long addr)
page               70 arch/sh/mm/tlb-pteaex.c void local_flush_tlb_one(unsigned long asid, unsigned long page)
page               73 arch/sh/mm/tlb-pteaex.c 	__raw_writel(page, MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT);
page               75 arch/sh/mm/tlb-pteaex.c 	__raw_writel(page, MMU_ITLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT);
page               56 arch/sh/mm/tlb-sh3.c void local_flush_tlb_one(unsigned long asid, unsigned long page)
page               67 arch/sh/mm/tlb-sh3.c 	addr = MMU_TLB_ADDRESS_ARRAY | (page & 0x1F000);
page               68 arch/sh/mm/tlb-sh3.c 	data = (page & 0xfffe0000) | asid; /* VALID bit is off */
page               65 arch/sh/mm/tlb-sh4.c void local_flush_tlb_one(unsigned long asid, unsigned long page)
page               76 arch/sh/mm/tlb-sh4.c 	data = page | asid; /* VALID bit is off */
page               15 arch/sh/mm/tlbflush_32.c void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
page               25 arch/sh/mm/tlbflush_32.c 		page &= PAGE_MASK;
page               32 arch/sh/mm/tlbflush_32.c 		local_flush_tlb_one(asid, page);
page               31 arch/sh/mm/tlbflush_64.c void local_flush_tlb_one(unsigned long asid, unsigned long page)
page               39 arch/sh/mm/tlbflush_64.c 	lpage = neff_sign_extend(page);
page               67 arch/sh/mm/tlbflush_64.c void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
page               72 arch/sh/mm/tlbflush_64.c 		page &= PAGE_MASK;
page               74 arch/sh/mm/tlbflush_64.c 		local_flush_tlb_one(get_asid(), page);
page                7 arch/sparc/include/asm/agp.h #define map_page_into_agp(page)
page                8 arch/sparc/include/asm/agp.h #define unmap_page_from_agp(page)
page               22 arch/sparc/include/asm/cacheflush_32.h #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
page               24 arch/sparc/include/asm/cacheflush_32.h 		flush_cache_page(vma, vaddr, page_to_pfn(page));\
page               27 arch/sparc/include/asm/cacheflush_32.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
page               29 arch/sparc/include/asm/cacheflush_32.h 		flush_cache_page(vma, vaddr, page_to_pfn(page));\
page               40 arch/sparc/include/asm/cacheflush_32.h void sparc_flush_page_to_ram(struct page *page);
page               43 arch/sparc/include/asm/cacheflush_32.h #define flush_dcache_page(page)			sparc_flush_page_to_ram(page)
page               26 arch/sparc/include/asm/cacheflush_64.h #define flush_cache_page(vma, page, pfn) \
page               38 arch/sparc/include/asm/cacheflush_64.h void flush_dcache_page_impl(struct page *page);
page               40 arch/sparc/include/asm/cacheflush_64.h void smp_flush_dcache_page_impl(struct page *page, int cpu);
page               41 arch/sparc/include/asm/cacheflush_64.h void flush_dcache_page_all(struct mm_struct *mm, struct page *page);
page               43 arch/sparc/include/asm/cacheflush_64.h #define smp_flush_dcache_page_impl(page,cpu) flush_dcache_page_impl(page)
page               44 arch/sparc/include/asm/cacheflush_64.h #define flush_dcache_page_all(mm,page) flush_dcache_page_impl(page)
page               49 arch/sparc/include/asm/cacheflush_64.h void flush_dcache_page(struct page *page);
page               54 arch/sparc/include/asm/cacheflush_64.h void flush_ptrace_access(struct vm_area_struct *, struct page *,
page               58 arch/sparc/include/asm/cacheflush_64.h #define copy_to_user_page(vma, page, vaddr, dst, src, len)		\
page               60 arch/sparc/include/asm/cacheflush_64.h 		flush_cache_page(vma, vaddr, page_to_pfn(page));	\
page               62 arch/sparc/include/asm/cacheflush_64.h 		flush_ptrace_access(vma, page, vaddr, src, len, 0);	\
page               65 arch/sparc/include/asm/cacheflush_64.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) 		\
page               67 arch/sparc/include/asm/cacheflush_64.h 		flush_cache_page(vma, vaddr, page_to_pfn(page));	\
page               69 arch/sparc/include/asm/cacheflush_64.h 		flush_ptrace_access(vma, page, vaddr, dst, len, 1);	\
page               53 arch/sparc/include/asm/highmem.h void *kmap_high(struct page *page);
page               54 arch/sparc/include/asm/highmem.h void kunmap_high(struct page *page);
page               56 arch/sparc/include/asm/highmem.h static inline void *kmap(struct page *page)
page               59 arch/sparc/include/asm/highmem.h 	if (!PageHighMem(page))
page               60 arch/sparc/include/asm/highmem.h 		return page_address(page);
page               61 arch/sparc/include/asm/highmem.h 	return kmap_high(page);
page               64 arch/sparc/include/asm/highmem.h static inline void kunmap(struct page *page)
page               67 arch/sparc/include/asm/highmem.h 	if (!PageHighMem(page))
page               69 arch/sparc/include/asm/highmem.h 	kunmap_high(page);
page               72 arch/sparc/include/asm/highmem.h void *kmap_atomic(struct page *page);
page               56 arch/sparc/include/asm/hugetlb.h static inline void arch_clear_hugepage_flags(struct page *page)
page              207 arch/sparc/include/asm/leon.h void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page);
page                5 arch/sparc/include/asm/page.h #define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
page               20 arch/sparc/include/asm/page_32.h #define clear_page(page)	 memset((void *)(page), 0, PAGE_SIZE)
page               22 arch/sparc/include/asm/page_32.h #define clear_user_page(addr, vaddr, page)	\
page               24 arch/sparc/include/asm/page_32.h 		sparc_flush_page_to_ram(page);	\
page               26 arch/sparc/include/asm/page_32.h #define copy_user_page(to, from, vaddr, page)	\
page               28 arch/sparc/include/asm/page_32.h 		sparc_flush_page_to_ram(page);	\
page              109 arch/sparc/include/asm/page_32.h typedef struct page *pgtable_t;
page               45 arch/sparc/include/asm/page_64.h void _clear_page(void *page);
page               47 arch/sparc/include/asm/page_64.h struct page;
page               48 arch/sparc/include/asm/page_64.h void clear_user_page(void *addr, unsigned long vaddr, struct page *page);
page               50 arch/sparc/include/asm/page_64.h void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *topage);
page               53 arch/sparc/include/asm/page_64.h void copy_user_highpage(struct page *to, struct page *from,
page               56 arch/sparc/include/asm/page_64.h void copy_highpage(struct page *to, struct page *from);
page               13 arch/sparc/include/asm/pgalloc_32.h struct page;
page               53 arch/sparc/include/asm/pgalloc_32.h void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep);
page               27 arch/sparc/include/asm/pgtable_32.h struct page;
page              128 arch/sparc/include/asm/pgtable_32.h static inline struct page *pmd_page(pmd_t pmd)
page              284 arch/sparc/include/asm/pgtable_32.h static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
page              286 arch/sparc/include/asm/pgtable_32.h 	return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot));
page              289 arch/sparc/include/asm/pgtable_32.h static inline pte_t mk_pte_phys(unsigned long page, pgprot_t pgprot)
page              291 arch/sparc/include/asm/pgtable_32.h 	return __pte(((page) >> 4) | pgprot_val(pgprot));
page              294 arch/sparc/include/asm/pgtable_32.h static inline pte_t mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
page              296 arch/sparc/include/asm/pgtable_32.h 	return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot));
page               86 arch/sparc/include/asm/pgtable_64.h #define vmemmap			((struct page *)VMEMMAP_BASE)
page              231 arch/sparc/include/asm/pgtable_64.h extern struct page *mem_map_zero;
page              246 arch/sparc/include/asm/pgtable_64.h #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
page              255 arch/sparc/include/asm/pgtable_64.h #define mk_pmd(page, pgprot)	pfn_pmd(page_to_pfn(page), (pgprot))
page              381 arch/sparc/include/asm/pgtable_64.h 				struct page *page, int writable);
page              175 arch/sparc/include/asm/ross.h static inline void hyper_flush_cache_page(unsigned long page)
page              179 arch/sparc/include/asm/ross.h 	page &= PAGE_MASK;
page              180 arch/sparc/include/asm/ross.h 	end = page + PAGE_SIZE;
page              181 arch/sparc/include/asm/ross.h 	while (page < end) {
page              184 arch/sparc/include/asm/ross.h 				     : "r" (page), "i" (ASI_M_FLUSH_PAGE)
page              186 arch/sparc/include/asm/ross.h 		page += vac_line_size;
page              185 arch/sparc/include/asm/spitfire.h static inline void spitfire_flush_dtlb_nucleus_page(unsigned long page)
page              190 arch/sparc/include/asm/spitfire.h 			     : "r" (page | 0x20), "i" (ASI_DMMU_DEMAP));
page              193 arch/sparc/include/asm/spitfire.h static inline void spitfire_flush_itlb_nucleus_page(unsigned long page)
page              198 arch/sparc/include/asm/spitfire.h 			     : "r" (page | 0x20), "i" (ASI_IMMU_DEMAP));
page               75 arch/sparc/include/asm/swift.h static inline void swift_flush_page(unsigned long page)
page               79 arch/sparc/include/asm/swift.h 			     : "r" (page), "i" (ASI_M_FLUSH_PAGE)
page              169 arch/sparc/include/asm/viking.h 	unsigned long info, page;
page              174 arch/sparc/include/asm/viking.h 			      : "=r" (info), "=r" (page)
page              178 arch/sparc/include/asm/viking.h 	data[1] = page;
page               98 arch/sparc/kernel/iommu.c 	struct page *page;
page              122 arch/sparc/kernel/iommu.c 	page = alloc_pages_node(numa_node, GFP_KERNEL, 0);
page              123 arch/sparc/kernel/iommu.c 	if (!page) {
page              127 arch/sparc/kernel/iommu.c 	iommu->dummy_page = (unsigned long) page_address(page);
page              133 arch/sparc/kernel/iommu.c 	page = alloc_pages_node(numa_node, GFP_KERNEL, order);
page              134 arch/sparc/kernel/iommu.c 	if (!page) {
page              138 arch/sparc/kernel/iommu.c 	iommu->page_table = (iopte_t *)page_address(page);
page              203 arch/sparc/kernel/iommu.c 	struct page *page;
page              214 arch/sparc/kernel/iommu.c 	page = alloc_pages_node(nid, gfp, order);
page              215 arch/sparc/kernel/iommu.c 	if (unlikely(!page))
page              218 arch/sparc/kernel/iommu.c 	first_page = (unsigned long) page_address(page);
page              263 arch/sparc/kernel/iommu.c static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
page              282 arch/sparc/kernel/iommu.c 	oaddr = (unsigned long)(page_address(page) + offset);
page             1036 arch/sparc/kernel/irq_64.c 	unsigned long page;
page             1050 arch/sparc/kernel/irq_64.c 	page = get_zeroed_page(GFP_KERNEL);
page             1051 arch/sparc/kernel/irq_64.c 	if (!page) {
page             1056 arch/sparc/kernel/irq_64.c 	tb->cpu_list_pa = __pa(page);
page              188 arch/sparc/kernel/pci_sun4v.c 	struct page *page;
page              204 arch/sparc/kernel/pci_sun4v.c 	page = alloc_pages_node(nid, gfp, order);
page              205 arch/sparc/kernel/pci_sun4v.c 	if (unlikely(!page))
page              208 arch/sparc/kernel/pci_sun4v.c 	first_page = (unsigned long) page_address(page);
page              353 arch/sparc/kernel/pci_sun4v.c static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
page              374 arch/sparc/kernel/pci_sun4v.c 	oaddr = (unsigned long)(page_address(page) + offset);
page             1279 arch/sparc/kernel/pci_sun4v.c 			unsigned long page = get_zeroed_page(GFP_KERNEL);
page             1281 arch/sparc/kernel/pci_sun4v.c 			if (!page)
page             1284 arch/sparc/kernel/pci_sun4v.c 			per_cpu(iommu_batch, i).pglist = (u64 *) page;
page              108 arch/sparc/kernel/ptrace_64.c void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
page              927 arch/sparc/kernel/smp_64.c static inline void __local_flush_dcache_page(struct page *page)
page              930 arch/sparc/kernel/smp_64.c 	__flush_dcache_page(page_address(page),
page              932 arch/sparc/kernel/smp_64.c 			     page_mapping_file(page) != NULL));
page              934 arch/sparc/kernel/smp_64.c 	if (page_mapping_file(page) != NULL &&
page              936 arch/sparc/kernel/smp_64.c 		__flush_icache_page(__pa(page_address(page)));
page              940 arch/sparc/kernel/smp_64.c void smp_flush_dcache_page_impl(struct page *page, int cpu)
page              954 arch/sparc/kernel/smp_64.c 		__local_flush_dcache_page(page);
page              956 arch/sparc/kernel/smp_64.c 		void *pg_addr = page_address(page);
page              961 arch/sparc/kernel/smp_64.c 			if (page_mapping_file(page) != NULL)
page              980 arch/sparc/kernel/smp_64.c void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
page              994 arch/sparc/kernel/smp_64.c 	pg_addr = page_address(page);
page              997 arch/sparc/kernel/smp_64.c 		if (page_mapping_file(page) != NULL)
page             1011 arch/sparc/kernel/smp_64.c 	__local_flush_dcache_page(page);
page               31 arch/sparc/kernel/uprobes.c static void copy_to_page(struct page *page, unsigned long vaddr,
page               34 arch/sparc/kernel/uprobes.c 	void *kaddr = kmap_atomic(page);
page               47 arch/sparc/kernel/uprobes.c void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
page               67 arch/sparc/kernel/uprobes.c 	copy_to_page(page, vaddr, &insn, len);
page               68 arch/sparc/kernel/uprobes.c 	copy_to_page(page, vaddr+len, &stp_insn, 4);
page               52 arch/sparc/mm/highmem.c void *kmap_atomic(struct page *page)
page               59 arch/sparc/mm/highmem.c 	if (!PageHighMem(page))
page               60 arch/sparc/mm/highmem.c 		return page_address(page);
page               76 arch/sparc/mm/highmem.c 	set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
page              182 arch/sparc/mm/hugetlbpage.c 			 struct page *page, int writeable)
page              296 arch/sparc/mm/init_32.c void sparc_flush_page_to_ram(struct page *page)
page              298 arch/sparc/mm/init_32.c 	unsigned long vaddr = (unsigned long)page_address(page);
page              180 arch/sparc/mm/init_64.c struct page *mem_map_zero __read_mostly;
page              198 arch/sparc/mm/init_64.c inline void flush_dcache_page_impl(struct page *page)
page              206 arch/sparc/mm/init_64.c 	__flush_dcache_page(page_address(page),
page              208 arch/sparc/mm/init_64.c 			     page_mapping_file(page) != NULL));
page              210 arch/sparc/mm/init_64.c 	if (page_mapping_file(page) != NULL &&
page              212 arch/sparc/mm/init_64.c 		__flush_icache_page(__pa(page_address(page)));
page              221 arch/sparc/mm/init_64.c #define dcache_dirty_cpu(page) \
page              222 arch/sparc/mm/init_64.c 	(((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
page              224 arch/sparc/mm/init_64.c static inline void set_dcache_dirty(struct page *page, int this_cpu)
page              241 arch/sparc/mm/init_64.c 			     : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
page              245 arch/sparc/mm/init_64.c static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
page              263 arch/sparc/mm/init_64.c 			     : "r" (cpu), "r" (mask), "r" (&page->flags),
page              283 arch/sparc/mm/init_64.c 	struct page *page;
page              285 arch/sparc/mm/init_64.c 	page = pfn_to_page(pfn);
page              286 arch/sparc/mm/init_64.c 	if (page) {
page              289 arch/sparc/mm/init_64.c 		pg_flags = page->flags;
page              299 arch/sparc/mm/init_64.c 				flush_dcache_page_impl(page);
page              301 arch/sparc/mm/init_64.c 				smp_flush_dcache_page_impl(page, cpu);
page              303 arch/sparc/mm/init_64.c 			clear_dcache_dirty_cpu(page, cpu);
page              475 arch/sparc/mm/init_64.c void flush_dcache_page(struct page *page)
page              487 arch/sparc/mm/init_64.c 	if (page == ZERO_PAGE(0))
page              492 arch/sparc/mm/init_64.c 	mapping = page_mapping_file(page);
page              494 arch/sparc/mm/init_64.c 		int dirty = test_bit(PG_dcache_dirty, &page->flags);
page              496 arch/sparc/mm/init_64.c 			int dirty_cpu = dcache_dirty_cpu(page);
page              500 arch/sparc/mm/init_64.c 			smp_flush_dcache_page_impl(page, dirty_cpu);
page              502 arch/sparc/mm/init_64.c 		set_dcache_dirty(page, this_cpu);
page              509 arch/sparc/mm/init_64.c 		flush_dcache_page_impl(page);
page             1921 arch/sparc/mm/init_64.c void __kernel_map_pages(struct page *page, int numpages, int enable)
page             1923 arch/sparc/mm/init_64.c 	unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
page             2563 arch/sparc/mm/init_64.c 		unsigned long page;
page             2565 arch/sparc/mm/init_64.c 		page = (addr +
page             2571 arch/sparc/mm/init_64.c 			free_reserved_page(virt_to_page(page));
page             2805 arch/sparc/mm/init_64.c pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
page             2809 arch/sparc/mm/init_64.c 	pte_val(pte)  = page | pgprot_val(pgprot_noncached(prot));
page             2892 arch/sparc/mm/init_64.c 	struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
page             2895 arch/sparc/mm/init_64.c 	if (page)
page             2896 arch/sparc/mm/init_64.c 		pte = (pte_t *) page_address(page);
page             2903 arch/sparc/mm/init_64.c 	struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
page             2904 arch/sparc/mm/init_64.c 	if (!page)
page             2906 arch/sparc/mm/init_64.c 	if (!pgtable_pte_page_ctor(page)) {
page             2907 arch/sparc/mm/init_64.c 		free_unref_page(page);
page             2910 arch/sparc/mm/init_64.c 	return (pte_t *) page_address(page);
page             2920 arch/sparc/mm/init_64.c 	struct page *page = virt_to_page(pte);
page             2922 arch/sparc/mm/init_64.c 	pgtable_pte_page_dtor(page);
page             2923 arch/sparc/mm/init_64.c 	__free_page(page);
page             3128 arch/sparc/mm/init_64.c void copy_user_highpage(struct page *to, struct page *from,
page             3163 arch/sparc/mm/init_64.c void copy_highpage(struct page *to, struct page *from)
page              143 arch/sparc/mm/io-unit.c static dma_addr_t iounit_map_page(struct device *dev, struct page *page,
page              147 arch/sparc/mm/io-unit.c 	void *vaddr = page_address(page) + offset;
page              218 arch/sparc/mm/io-unit.c 	unsigned long va, addr, page, end, ret;
page              239 arch/sparc/mm/io-unit.c 		page = va;
page              250 arch/sparc/mm/io-unit.c 			set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
page              255 arch/sparc/mm/io-unit.c 			sbus_writel(iopte_val(MKIOPTE(__pa(page))), iopte);
page               45 arch/sparc/mm/iommu.c extern void viking_flush_page(unsigned long page);
page               46 arch/sparc/mm/iommu.c extern void viking_mxcc_flush_page(unsigned long page);
page              178 arch/sparc/mm/iommu.c static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page,
page              182 arch/sparc/mm/iommu.c 	phys_addr_t paddr = page_to_phys(page) + offset;
page              199 arch/sparc/mm/iommu.c 	if (per_page_flush && !PageHighMem(page)) {
page              202 arch/sparc/mm/iommu.c 		vaddr = (unsigned long)page_address(page) + offset;
page              229 arch/sparc/mm/iommu.c 		struct page *page, unsigned long offset, size_t len,
page              233 arch/sparc/mm/iommu.c 	return __sbus_iommu_map_page(dev, page, offset, len, false);
page              237 arch/sparc/mm/iommu.c 		struct page *page, unsigned long offset, size_t len,
page              240 arch/sparc/mm/iommu.c 	return __sbus_iommu_map_page(dev, page, offset, len, true);
page              311 arch/sparc/mm/iommu.c 	unsigned long va, addr, page, end, ret;
page              343 arch/sparc/mm/iommu.c 		page = va;
page              350 arch/sparc/mm/iommu.c 				viking_mxcc_flush_page(page);
page              352 arch/sparc/mm/iommu.c 				viking_flush_page(page);
page              354 arch/sparc/mm/iommu.c 				__flush_page_to_ram(page);
page              360 arch/sparc/mm/iommu.c 			set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
page              363 arch/sparc/mm/iommu.c 		    MKIOPTE(page_to_pfn(virt_to_page(page)), ioperm_noc);
page              396 arch/sparc/mm/iommu.c 	struct page *page = virt_to_page(cpu_addr);
page              416 arch/sparc/mm/iommu.c 	__free_pages(page, get_order(len));
page              194 arch/sparc/mm/leon_mm.c void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page)
page              282 arch/sparc/mm/leon_mm.c static void leon_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
page              284 arch/sparc/mm/leon_mm.c 	leon_flush_pcache_all(vma, page);
page              300 arch/sparc/mm/leon_mm.c 				unsigned long page)
page              312 arch/sparc/mm/leon_mm.c static void leon_flush_page_to_ram(unsigned long page)
page              317 arch/sparc/mm/leon_mm.c static void leon_flush_sig_insns(struct mm_struct *mm, unsigned long page)
page              322 arch/sparc/mm/leon_mm.c static void leon_flush_page_for_dma(unsigned long page)
page              149 arch/sparc/mm/srmmu.c void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
page              376 arch/sparc/mm/srmmu.c 	struct page *page;
page              380 arch/sparc/mm/srmmu.c 	page = pfn_to_page(__nocache_pa(pte) >> PAGE_SHIFT);
page              381 arch/sparc/mm/srmmu.c 	if (!pgtable_pte_page_ctor(page)) {
page              382 arch/sparc/mm/srmmu.c 		__free_page(page);
page              385 arch/sparc/mm/srmmu.c 	return page;
page              579 arch/sparc/mm/srmmu.c extern void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
page              580 arch/sparc/mm/srmmu.c extern void tsunami_flush_page_to_ram(unsigned long page);
page              581 arch/sparc/mm/srmmu.c extern void tsunami_flush_page_for_dma(unsigned long page);
page              586 arch/sparc/mm/srmmu.c extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
page              594 arch/sparc/mm/srmmu.c extern void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
page              595 arch/sparc/mm/srmmu.c extern void swift_flush_page_to_ram(unsigned long page);
page              596 arch/sparc/mm/srmmu.c extern void swift_flush_page_for_dma(unsigned long page);
page              602 arch/sparc/mm/srmmu.c extern void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
page              605 arch/sparc/mm/srmmu.c void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
page              609 arch/sparc/mm/srmmu.c 	page &= PAGE_MASK;
page              616 arch/sparc/mm/srmmu.c 			swift_flush_page(page);
page              618 arch/sparc/mm/srmmu.c 					"r" (page), "i" (ASI_M_FLUSH_PROBE));
page              624 arch/sparc/mm/srmmu.c 			swift_flush_page(page);
page              627 arch/sparc/mm/srmmu.c 				"r" (page), "i" (ASI_M_FLUSH_PROBE));
page              646 arch/sparc/mm/srmmu.c extern void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
page              647 arch/sparc/mm/srmmu.c extern void viking_flush_page_to_ram(unsigned long page);
page              648 arch/sparc/mm/srmmu.c extern void viking_flush_page_for_dma(unsigned long page);
page              650 arch/sparc/mm/srmmu.c extern void viking_flush_page(unsigned long page);
page              651 arch/sparc/mm/srmmu.c extern void viking_mxcc_flush_page(unsigned long page);
page              657 arch/sparc/mm/srmmu.c 				  unsigned long page);
page              663 arch/sparc/mm/srmmu.c 				  unsigned long page);
page              669 arch/sparc/mm/srmmu.c extern void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
page              670 arch/sparc/mm/srmmu.c extern void hypersparc_flush_page_to_ram(unsigned long page);
page              671 arch/sparc/mm/srmmu.c extern void hypersparc_flush_page_for_dma(unsigned long page);
page              676 arch/sparc/mm/srmmu.c extern void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
page             1271 arch/sparc/mm/srmmu.c static void turbosparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
page             1282 arch/sparc/mm/srmmu.c static void turbosparc_flush_page_to_ram(unsigned long page)
page             1287 arch/sparc/mm/srmmu.c 	if (srmmu_probe(page))
page             1288 arch/sparc/mm/srmmu.c 		turbosparc_flush_page_cache(page);
page             1297 arch/sparc/mm/srmmu.c static void turbosparc_flush_page_for_dma(unsigned long page)
page             1321 arch/sparc/mm/srmmu.c static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
page             1655 arch/sparc/mm/srmmu.c static void smp_flush_page_for_dma(unsigned long page)
page             1657 arch/sparc/mm/srmmu.c 	xc1((smpfunc_t) local_ops->page_for_dma, page);
page             1658 arch/sparc/mm/srmmu.c 	local_ops->page_for_dma(page);
page             1735 arch/sparc/mm/srmmu.c static void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
page             1745 arch/sparc/mm/srmmu.c 			    (unsigned long) vma, page);
page             1746 arch/sparc/mm/srmmu.c 		local_ops->cache_page(vma, page);
page             1750 arch/sparc/mm/srmmu.c static void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
page             1760 arch/sparc/mm/srmmu.c 			    (unsigned long) vma, page);
page             1761 arch/sparc/mm/srmmu.c 		local_ops->tlb_page(vma, page);
page             1765 arch/sparc/mm/srmmu.c static void smp_flush_page_to_ram(unsigned long page)
page             1774 arch/sparc/mm/srmmu.c 	xc1((smpfunc_t) local_ops->page_to_ram, page);
page             1776 arch/sparc/mm/srmmu.c 	local_ops->page_to_ram(page);
page              121 arch/sparc/mm/tlb.c 		struct page *page;
page              126 arch/sparc/mm/tlb.c 		page = pfn_to_page(pfn);
page              127 arch/sparc/mm/tlb.c 		if (PageReserved(page))
page              131 arch/sparc/mm/tlb.c 		mapping = page_mapping_file(page);
page              135 arch/sparc/mm/tlb.c 		paddr = (unsigned long) page_address(page);
page              137 arch/sparc/mm/tlb.c 			flush_dcache_page_all(mm, page);
page              250 arch/sparc/vdso/vma.c 	struct page *dp, **dpp = NULL;
page              251 arch/sparc/vdso/vma.c 	struct page *cp, **cpp = NULL;
page              268 arch/sparc/vdso/vma.c 	cpp = kcalloc(cnpages, sizeof(struct page *), GFP_KERNEL);
page              290 arch/sparc/vdso/vma.c 		dpp = kcalloc(dnpages, sizeof(struct page *), GFP_KERNEL);
page              336 arch/um/drivers/mconsole_kern.c 			struct page *page;
page              338 arch/um/drivers/mconsole_kern.c 			page = alloc_page(GFP_ATOMIC);
page              339 arch/um/drivers/mconsole_kern.c 			if (page == NULL)
page              342 arch/um/drivers/mconsole_kern.c 			unplugged = page_address(page);
page               15 arch/um/include/asm/mmu.h 	struct page *stub_pages[2];
page               19 arch/um/include/asm/page.h struct page;
page               29 arch/um/include/asm/page.h #define clear_page(page)	memset((void *)(page), 0, PAGE_SIZE)
page               32 arch/um/include/asm/page.h #define clear_user_page(page, vaddr, pg)	clear_page(page)
page               82 arch/um/include/asm/page.h typedef struct page *pgtable_t;
page              284 arch/um/include/asm/pgtable.h #define page_to_phys(page) pfn_to_phys(page_to_pfn(page))
page              287 arch/um/include/asm/pgtable.h #define mk_pte(page, pgprot) \
page              290 arch/um/include/asm/pgtable.h 	pte_set_val(pte, page_to_phys(page), (pgprot));	\
page               68 arch/um/kernel/process.c 	unsigned long page;
page               73 arch/um/kernel/process.c 	page = __get_free_pages(flags, order);
page               75 arch/um/kernel/process.c 	return page;
page               62 arch/um/kernel/skas/uaccess.c 	struct page *page;
page               70 arch/um/kernel/skas/uaccess.c 	page = pte_page(*pte);
page               73 arch/um/kernel/skas/uaccess.c 	addr = (unsigned long) page_address(page) +
page               76 arch/um/kernel/skas/uaccess.c 	addr = (unsigned long) kmap_atomic(page) +
page              106 arch/unicore32/include/asm/cacheflush.h extern void copy_to_user_page(struct vm_area_struct *, struct page *,
page              108 arch/unicore32/include/asm/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len)	\
page              168 arch/unicore32/include/asm/cacheflush.h extern void flush_dcache_page(struct page *);
page              173 arch/unicore32/include/asm/cacheflush.h #define flush_icache_user_range(vma, page, addr, len)	\
page              174 arch/unicore32/include/asm/cacheflush.h 	flush_dcache_page(page)
page              180 arch/unicore32/include/asm/cacheflush.h #define flush_icache_page(vma, page)	do { } while (0)
page               57 arch/unicore32/include/asm/memory.h #define page_to_phys(page)	(__pfn_to_phys(page_to_pfn(page)))
page               19 arch/unicore32/include/asm/page.h struct page;
page               22 arch/unicore32/include/asm/page.h #define clear_page(page)	memset((void *)(page), 0, PAGE_SIZE)
page               25 arch/unicore32/include/asm/page.h #define clear_user_page(page, vaddr, pg)	clear_page(page)
page               64 arch/unicore32/include/asm/page.h typedef struct page *pgtable_t;
page               47 arch/unicore32/include/asm/pgalloc.h 	struct page *pte;
page              147 arch/unicore32/include/asm/pgtable.h extern struct page *empty_zero_page;
page              226 arch/unicore32/include/asm/pgtable.h #define mk_pte(page, prot)	pfn_pte(page_to_pfn(page), prot)
page               32 arch/unicore32/mm/flush.c static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
page               50 arch/unicore32/mm/flush.c void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
page               55 arch/unicore32/mm/flush.c 	flush_ptrace_access(vma, page, uaddr, dst, len);
page               58 arch/unicore32/mm/flush.c void __flush_dcache_page(struct address_space *mapping, struct page *page)
page               65 arch/unicore32/mm/flush.c 	__cpuc_flush_kern_dcache_area(page_address(page), PAGE_SIZE);
page               72 arch/unicore32/mm/flush.c void flush_dcache_page(struct page *page)
page               80 arch/unicore32/mm/flush.c 	if (page == ZERO_PAGE(0))
page               83 arch/unicore32/mm/flush.c 	mapping = page_mapping_file(page);
page               86 arch/unicore32/mm/flush.c 		clear_bit(PG_dcache_clean, &page->flags);
page               88 arch/unicore32/mm/flush.c 		__flush_dcache_page(mapping, page);
page               91 arch/unicore32/mm/flush.c 		set_bit(PG_dcache_clean, &page->flags);
page              202 arch/unicore32/mm/init.c 	struct page *start_pg, *end_pg;
page               35 arch/unicore32/mm/mm.h extern void __flush_dcache_page(struct address_space *, struct page *);
page               34 arch/unicore32/mm/mmu.c struct page *empty_zero_page;
page              494 arch/unicore32/mm/mmu.c 	struct page *page;
page              503 arch/unicore32/mm/mmu.c 	page = pfn_to_page(pfn);
page              504 arch/unicore32/mm/mmu.c 	if (page == ZERO_PAGE(0))
page              507 arch/unicore32/mm/mmu.c 	mapping = page_mapping_file(page);
page              508 arch/unicore32/mm/mmu.c 	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
page              509 arch/unicore32/mm/mmu.c 		__flush_dcache_page(mapping, page);
page               50 arch/x86/entry/vdso/vma.c 	vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
page               51 arch/x86/entry/vdso/vma.c 	get_page(vmf->page);
page              893 arch/x86/events/amd/core.c static ssize_t amd_event_sysfs_show(char *page, u64 config)
page              898 arch/x86/events/amd/core.c 	return x86_event_sysfs_show(page, config, event);
page              267 arch/x86/events/amd/uncore.c 		char *page)						     \
page              270 arch/x86/events/amd/uncore.c 	return sprintf(page, _format "\n");				     \
page             1647 arch/x86/events/core.c ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, char *page)
page             1658 arch/x86/events/core.c 		return sprintf(page, "%s", pmu_attr->event_str);
page             1660 arch/x86/events/core.c 	return x86_pmu.events_sysfs_show(page, config);
page             1665 arch/x86/events/core.c 			  char *page)
page             1681 arch/x86/events/core.c 	return sprintf(page, "%s",
page             1737 arch/x86/events/core.c ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event)
page             1751 arch/x86/events/core.c 	ret = sprintf(page, "event=0x%02llx", event);
page             1754 arch/x86/events/core.c 		ret += sprintf(page + ret, ",umask=0x%02llx", umask);
page             1757 arch/x86/events/core.c 		ret += sprintf(page + ret, ",edge");
page             1760 arch/x86/events/core.c 		ret += sprintf(page + ret, ",pc");
page             1763 arch/x86/events/core.c 		ret += sprintf(page + ret, ",any");
page             1766 arch/x86/events/core.c 		ret += sprintf(page + ret, ",inv");
page             1769 arch/x86/events/core.c 		ret += sprintf(page + ret, ",cmask=0x%02llx", cmask);
page             1771 arch/x86/events/core.c 	ret += sprintf(page + ret, "\n");
page               45 arch/x86/events/intel/bts.c 	struct page	*page;
page               66 arch/x86/events/intel/bts.c static int buf_nr_pages(struct page *page)
page               68 arch/x86/events/intel/bts.c 	if (!PagePrivate(page))
page               71 arch/x86/events/intel/bts.c 	return 1 << page_private(page);
page               74 arch/x86/events/intel/bts.c static size_t buf_size(struct page *page)
page               76 arch/x86/events/intel/bts.c 	return buf_nr_pages(page) * PAGE_SIZE;
page               84 arch/x86/events/intel/bts.c 	struct page *page;
page               93 arch/x86/events/intel/bts.c 		page = virt_to_page(pages[pg]);
page               94 arch/x86/events/intel/bts.c 		pg += buf_nr_pages(page);
page              117 arch/x86/events/intel/bts.c 		page = virt_to_page(pages[pg]);
page              118 arch/x86/events/intel/bts.c 		__nr_pages = buf_nr_pages(page);
page              119 arch/x86/events/intel/bts.c 		buf->buf[nbuf].page = page;
page              122 arch/x86/events/intel/bts.c 		buf->buf[nbuf].size = buf_size(page) - buf->buf[nbuf].displacement;
page              150 arch/x86/events/intel/bts.c 	struct page *page = phys->page;
page              155 arch/x86/events/intel/bts.c 		if (buf->end < phys->offset + buf_size(page))
page              168 arch/x86/events/intel/bts.c 	ds->bts_buffer_base = (u64)(long)page_address(page) + phys->displacement;
page              180 arch/x86/events/intel/bts.c 	memset(page_address(phys->page) + index, 0, phys->size - index);
page              394 arch/x86/events/intel/bts.c 		gap = buf_size(phys->page) - phys->displacement - phys->size +
page             3608 arch/x86/events/intel/core.c ssize_t intel_event_sysfs_show(char *page, u64 config)
page             3612 arch/x86/events/intel/core.c 	return x86_event_sysfs_show(page, config, event);
page              112 arch/x86/events/intel/cstate.c 				char *page)			\
page              115 arch/x86/events/intel/cstate.c 	return sprintf(page, _format "\n");			\
page              322 arch/x86/events/intel/ds.c 	struct page *page;
page              324 arch/x86/events/intel/ds.c 	page = __alloc_pages_node(node, flags | __GFP_ZERO, order);
page              325 arch/x86/events/intel/ds.c 	return page ? page_address(page) : NULL;
page              138 arch/x86/events/intel/pt.c 		    char *page)
page              145 arch/x86/events/intel/pt.c 		return sprintf(page, "%lu\n", pt_pmu.max_nonturbo_ratio);
page              147 arch/x86/events/intel/pt.c 		return sprintf(page, "%u:%u\n",
page              616 arch/x86/events/intel/pt.c 	struct page *p;
page              705 arch/x86/events/intel/pt.c 	struct page *p;
page               97 arch/x86/events/intel/rapl.c 				char *page)			\
page              100 arch/x86/events/intel/rapl.c 	return sprintf(page, _format "\n");			\
page              180 arch/x86/events/intel/uncore.h 				char *page)				\
page              183 arch/x86/events/intel/uncore.h 	return sprintf(page, _format "\n");				\
page              628 arch/x86/events/perf_event.h 	ssize_t		(*events_sysfs_show)(char *page, u64 config);
page              897 arch/x86/events/perf_event.h ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event);
page              898 arch/x86/events/perf_event.h ssize_t intel_event_sysfs_show(char *page, u64 config);
page              901 arch/x86/events/perf_event.h 			  char *page);
page              903 arch/x86/events/perf_event.h 			  char *page);
page               60 arch/x86/hyperv/hv_init.c 	struct page *pg;
page               77 arch/x86/hyperv/nested.c 		flush->gpa_list[gpa_n].page.additional_pages = additional_pages;
page               78 arch/x86/hyperv/nested.c 		flush->gpa_list[gpa_n].page.largepage = false;
page               79 arch/x86/hyperv/nested.c 		flush->gpa_list[gpa_n].page.basepfn = cur;
page               16 arch/x86/include/asm/agp.h #define map_page_into_agp(page) set_pages_uc(page, 1)
page               17 arch/x86/include/asm/agp.h #define unmap_page_from_agp(page) set_pages_wb(page, 1)
page               61 arch/x86/include/asm/highmem.h extern void *kmap_high(struct page *page);
page               62 arch/x86/include/asm/highmem.h extern void kunmap_high(struct page *page);
page               64 arch/x86/include/asm/highmem.h void *kmap(struct page *page);
page               65 arch/x86/include/asm/highmem.h void kunmap(struct page *page);
page               67 arch/x86/include/asm/highmem.h void *kmap_atomic_prot(struct page *page, pgprot_t prot);
page               68 arch/x86/include/asm/highmem.h void *kmap_atomic(struct page *page);
page               16 arch/x86/include/asm/hugetlb.h static inline void arch_clear_hugepage_flags(struct page *page)
page              870 arch/x86/include/asm/hyperv-tlfs.h 	} page;
page              157 arch/x86/include/asm/io.h #define page_to_phys(page)    ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
page             1472 arch/x86/include/asm/kvm_host.h 	struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
page             1474 arch/x86/include/asm/kvm_host.h 	return (struct kvm_mmu_page *)page_private(page);
page               12 arch/x86/include/asm/mmx.h extern void mmx_clear_page(void *page);
page               19 arch/x86/include/asm/page.h struct page;
page               25 arch/x86/include/asm/page.h static inline void clear_user_page(void *page, unsigned long vaddr,
page               26 arch/x86/include/asm/page.h 				   struct page *pg)
page               28 arch/x86/include/asm/page.h 	clear_page(page);
page               32 arch/x86/include/asm/page.h 				  struct page *topage)
page               25 arch/x86/include/asm/page_32.h static inline void clear_page(void *page)
page               27 arch/x86/include/asm/page_32.h 	mmx_clear_page(page);
page               37 arch/x86/include/asm/page_32.h static inline void clear_page(void *page)
page               39 arch/x86/include/asm/page_32.h 	memset(page, 0, PAGE_SIZE);
page               43 arch/x86/include/asm/page_64.h void clear_page_orig(void *page);
page               44 arch/x86/include/asm/page_64.h void clear_page_rep(void *page);
page               45 arch/x86/include/asm/page_64.h void clear_page_erms(void *page);
page               47 arch/x86/include/asm/page_64.h static inline void clear_page(void *page)
page               52 arch/x86/include/asm/page_64.h 			   "=D" (page),
page               53 arch/x86/include/asm/page_64.h 			   "0" (page)
page               48 arch/x86/include/asm/paravirt_types.h struct page;
page               55 arch/x86/include/asm/pgalloc.h extern void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
page               57 arch/x86/include/asm/pgalloc.h static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
page               78 arch/x86/include/asm/pgalloc.h 				struct page *pte)
page               91 arch/x86/include/asm/pgalloc.h 	struct page *page;
page               96 arch/x86/include/asm/pgalloc.h 	page = alloc_pages(gfp, 0);
page               97 arch/x86/include/asm/pgalloc.h 	if (!page)
page               99 arch/x86/include/asm/pgalloc.h 	if (!pgtable_pmd_page_ctor(page)) {
page              100 arch/x86/include/asm/pgalloc.h 		__free_pages(page, 0);
page              103 arch/x86/include/asm/pgalloc.h 	return (pmd_t *)page_address(page);
page               56 arch/x86/include/asm/pgtable.h extern struct mm_struct *pgd_page_get_mm(struct page *page);
page              825 arch/x86/include/asm/pgtable.h #define mk_pte(page, pgprot)   pfn_pte(page_to_pfn(page), (pgprot))
page             1158 arch/x86/include/asm/pgtable.h #define mk_pmd(page, pgprot)   pfn_pmd(page_to_pfn(page), (pgprot))
page              253 arch/x86/include/asm/pgtable_64.h #define vmemmap ((struct page *)VMEMMAP_START)
page              507 arch/x86/include/asm/pgtable_types.h typedef struct page *pgtable_t;
page               50 arch/x86/include/asm/set_memory.h int set_pages_array_uc(struct page **pages, int addrinarray);
page               51 arch/x86/include/asm/set_memory.h int set_pages_array_wc(struct page **pages, int addrinarray);
page               52 arch/x86/include/asm/set_memory.h int set_pages_array_wt(struct page **pages, int addrinarray);
page               53 arch/x86/include/asm/set_memory.h int set_pages_array_wb(struct page **pages, int addrinarray);
page               75 arch/x86/include/asm/set_memory.h int set_pages_uc(struct page *page, int numpages);
page               76 arch/x86/include/asm/set_memory.h int set_pages_wb(struct page *page, int numpages);
page               77 arch/x86/include/asm/set_memory.h int set_pages_ro(struct page *page, int numpages);
page               78 arch/x86/include/asm/set_memory.h int set_pages_rw(struct page *page, int numpages);
page               80 arch/x86/include/asm/set_memory.h int set_direct_map_invalid_noflush(struct page *page);
page               81 arch/x86/include/asm/set_memory.h int set_direct_map_default_noflush(struct page *page);
page              615 arch/x86/include/asm/tlbflush.h #define paravirt_tlb_remove_table(tlb, page) \
page              616 arch/x86/include/asm/tlbflush.h 	tlb_remove_page(tlb, (void *)(page))
page              192 arch/x86/include/asm/uaccess_64.h extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
page               68 arch/x86/include/asm/xen/page.h 				   struct page **pages, unsigned int count);
page               71 arch/x86/include/asm/xen/page.h 				     struct page **pages, unsigned int count);
page               76 arch/x86/include/asm/xen/page.h 			struct page **pages, unsigned int count)
page               84 arch/x86/include/asm/xen/page.h 			  struct page **pages, unsigned int count)
page              791 arch/x86/kernel/alternative.c 	struct page *pages[2] = {NULL};
page              228 arch/x86/kernel/amd_gart_64.c static dma_addr_t gart_map_page(struct device *dev, struct page *page,
page              234 arch/x86/kernel/amd_gart_64.c 	phys_addr_t paddr = page_to_phys(page) + offset;
page               39 arch/x86/kernel/cpu/mtrr/if.c 	      unsigned int type, bool increment, struct file *file, int page)
page               51 arch/x86/kernel/cpu/mtrr/if.c 	if (!page) {
page               65 arch/x86/kernel/cpu/mtrr/if.c 	      struct file *file, int page)
page               70 arch/x86/kernel/cpu/mtrr/if.c 	if (!page) {
page               77 arch/x86/kernel/espfix_64.c 	unsigned long page, slot;
page               80 arch/x86/kernel/espfix_64.c 	page = (cpu / ESPFIX_STACKS_PER_PAGE) ^ page_random;
page               82 arch/x86/kernel/espfix_64.c 	addr = (page << PAGE_SHIFT) + (slot * ESPFIX_STACK_SIZE);
page              133 arch/x86/kernel/espfix_64.c 	unsigned int page;
page              147 arch/x86/kernel/espfix_64.c 	page = cpu/ESPFIX_STACKS_PER_PAGE;
page              150 arch/x86/kernel/espfix_64.c 	stack_page = READ_ONCE(espfix_pages[page]);
page              157 arch/x86/kernel/espfix_64.c 	stack_page = READ_ONCE(espfix_pages[page]);
page              167 arch/x86/kernel/espfix_64.c 		struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0);
page              169 arch/x86/kernel/espfix_64.c 		pmd_p = (pmd_t *)page_address(page);
page              179 arch/x86/kernel/espfix_64.c 		struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0);
page              181 arch/x86/kernel/espfix_64.c 		pte_p = (pte_t *)page_address(page);
page              199 arch/x86/kernel/espfix_64.c 	WRITE_ONCE(espfix_pages[page], stack_page);
page              115 arch/x86/kernel/irq_32.c 	struct page *ph, *ps;
page               36 arch/x86/kernel/irq_64.c 	struct page *pages[IRQ_STACK_SIZE / PAGE_SIZE];
page              418 arch/x86/kernel/kprobes/core.c 	void *page;
page              420 arch/x86/kernel/kprobes/core.c 	page = module_alloc(PAGE_SIZE);
page              421 arch/x86/kernel/kprobes/core.c 	if (!page)
page              424 arch/x86/kernel/kprobes/core.c 	set_vm_flush_reset_perms(page);
page              429 arch/x86/kernel/kprobes/core.c 	set_memory_ro((unsigned long)page, 1);
page              435 arch/x86/kernel/kprobes/core.c 	set_memory_x((unsigned long)page, 1);
page              437 arch/x86/kernel/kprobes/core.c 	return page;
page              441 arch/x86/kernel/kprobes/core.c void free_insn_page(void *page)
page              443 arch/x86/kernel/kprobes/core.c 	module_memfree(page);
page              228 arch/x86/kernel/kvmclock.c 	struct page *p;
page              182 arch/x86/kernel/machine_kexec_64.c 	struct page *page;
page              185 arch/x86/kernel/machine_kexec_64.c 	page = kimage_alloc_control_pages(image, 0);
page              186 arch/x86/kernel/machine_kexec_64.c 	if (page) {
page              187 arch/x86/kernel/machine_kexec_64.c 		p = page_address(page);
page              615 arch/x86/kernel/machine_kexec_64.c 	struct page *page;
page              625 arch/x86/kernel/machine_kexec_64.c 	page = pfn_to_page(start >> PAGE_SHIFT);
page              628 arch/x86/kernel/machine_kexec_64.c 		return set_pages_ro(page, nr_pages);
page              630 arch/x86/kernel/machine_kexec_64.c 		return set_pages_rw(page, nr_pages);
page              391 arch/x86/kernel/pci-calgary_64.c static dma_addr_t calgary_map_page(struct device *dev, struct page *page,
page              396 arch/x86/kernel/pci-calgary_64.c 	void *vaddr = page_address(page) + offset;
page             1091 arch/x86/kvm/mmu.c 	void *page;
page             1096 arch/x86/kvm/mmu.c 		page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT);
page             1097 arch/x86/kvm/mmu.c 		if (!page)
page             1099 arch/x86/kvm/mmu.c 		cache->objects[cache->nobjs++] = page;
page             2247 arch/x86/kvm/mmu.c 	} page[KVM_PAGE_ARRAY_NR];
page             2258 arch/x86/kvm/mmu.c 			if (pvec->page[i].sp == sp)
page             2261 arch/x86/kvm/mmu.c 	pvec->page[pvec->nr].sp = sp;
page             2262 arch/x86/kvm/mmu.c 	pvec->page[pvec->nr].idx = idx;
page             2438 arch/x86/kvm/mmu.c 			i < pvec.nr && ({ sp = pvec.page[i].sp; 1;});	\
page             2448 arch/x86/kvm/mmu.c 		struct kvm_mmu_page *sp = pvec->page[n].sp;
page             2449 arch/x86/kvm/mmu.c 		unsigned idx = pvec->page[n].idx;
page             2471 arch/x86/kvm/mmu.c 	WARN_ON(pvec->page[0].idx != INVALID_INDEX);
page             2473 arch/x86/kvm/mmu.c 	sp = pvec->page[0].sp;
page             3235 arch/x86/kvm/mmu.c 	struct page *pages[PTE_PREFETCH_NUM];
page             5766 arch/x86/kvm/mmu.c 	struct page *page;
page             5781 arch/x86/kvm/mmu.c 	page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32);
page             5782 arch/x86/kvm/mmu.c 	if (!page)
page             5785 arch/x86/kvm/mmu.c 	mmu->pae_root = page_address(page);
page              138 arch/x86/kvm/paging_tmpl.h 	struct page *page;
page              140 arch/x86/kvm/paging_tmpl.h 	npages = get_user_pages_fast((unsigned long)ptep_user, 1, FOLL_WRITE, &page);
page              142 arch/x86/kvm/paging_tmpl.h 		table = kmap_atomic(page);
page              146 arch/x86/kvm/paging_tmpl.h 		kvm_release_page_dirty(page);
page              143 arch/x86/kvm/svm.c 	struct page *avic_logical_id_table_page;
page              144 arch/x86/kvm/svm.c 	struct page *avic_physical_id_table_page;
page              234 arch/x86/kvm/svm.c 	struct page *avic_backing_page;
page              429 arch/x86/kvm/svm.c 	struct page **pages;
page              672 arch/x86/kvm/svm.c 	struct page *save_area;
page             1344 arch/x86/kvm/svm.c 	struct page *iopm_pages;
page             1826 arch/x86/kvm/svm.c static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
page             1833 arch/x86/kvm/svm.c 	struct page **pages;
page             1852 arch/x86/kvm/svm.c 	size = npages * sizeof(struct page *);
page             1882 arch/x86/kvm/svm.c static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
page             1892 arch/x86/kvm/svm.c static void sev_clflush_pages(struct page *pages[], unsigned long npages)
page             1998 arch/x86/kvm/svm.c 	struct page *p_page;
page             1999 arch/x86/kvm/svm.c 	struct page *l_page;
page             2184 arch/x86/kvm/svm.c 	struct page *page;
page             2185 arch/x86/kvm/svm.c 	struct page *msrpm_pages;
page             2186 arch/x86/kvm/svm.c 	struct page *hsave_page;
page             2187 arch/x86/kvm/svm.c 	struct page *nested_msrpm_pages;
page             2220 arch/x86/kvm/svm.c 	page = alloc_page(GFP_KERNEL_ACCOUNT);
page             2221 arch/x86/kvm/svm.c 	if (!page)
page             2253 arch/x86/kvm/svm.c 	svm->vmcb = page_address(page);
page             2255 arch/x86/kvm/svm.c 	svm->vmcb_pa = __sme_set(page_to_pfn(page) << PAGE_SHIFT);
page             2270 arch/x86/kvm/svm.c 	__free_page(page);
page             6496 arch/x86/kvm/svm.c 				struct page **inpages, unsigned long npages)
page             6522 arch/x86/kvm/svm.c 	struct page **inpages;
page             6756 arch/x86/kvm/svm.c 	struct page *tpage = NULL;
page             6794 arch/x86/kvm/svm.c 	struct page *src_tpage = NULL;
page             6795 arch/x86/kvm/svm.c 	struct page *dst_tpage = NULL;
page             6869 arch/x86/kvm/svm.c 	struct page **src_p, **dst_p;
page             6955 arch/x86/kvm/svm.c 	struct page **pages;
page             2936 arch/x86/kvm/vmx/nested.c 	struct page *page;
page             2950 arch/x86/kvm/vmx/nested.c 		page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr);
page             2951 arch/x86/kvm/vmx/nested.c 		if (!is_error_page(page)) {
page             2952 arch/x86/kvm/vmx/nested.c 			vmx->nested.apic_access_page = page;
page              209 arch/x86/kvm/vmx/vmx.c 	struct page *page;
page              258 arch/x86/kvm/vmx/vmx.c 		page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
page              259 arch/x86/kvm/vmx/vmx.c 		if (!page)
page              261 arch/x86/kvm/vmx/vmx.c 		vmx_l1d_flush_pages = page_address(page);
page             2505 arch/x86/kvm/vmx/vmx.c 	struct page *pages;
page             3519 arch/x86/kvm/vmx/vmx.c 	struct page *page;
page             3530 arch/x86/kvm/vmx/vmx.c 	page = gfn_to_page(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
page             3531 arch/x86/kvm/vmx/vmx.c 	if (is_error_page(page)) {
page             3540 arch/x86/kvm/vmx/vmx.c 	put_page(page);
page              153 arch/x86/kvm/vmx/vmx.h 	struct page *apic_access_page;
page              266 arch/x86/kvm/vmx/vmx.h 	struct page *pml_pg;
page             2588 arch/x86/kvm/x86.c 	u8 *page;
page             2595 arch/x86/kvm/x86.c 	page = memdup_user(blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE);
page             2596 arch/x86/kvm/x86.c 	if (IS_ERR(page)) {
page             2597 arch/x86/kvm/x86.c 		r = PTR_ERR(page);
page             2600 arch/x86/kvm/x86.c 	if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE))
page             2604 arch/x86/kvm/x86.c 	kfree(page);
page             7997 arch/x86/kvm/x86.c 	struct page *page = NULL;
page             8005 arch/x86/kvm/x86.c 	page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
page             8006 arch/x86/kvm/x86.c 	if (is_error_page(page))
page             8008 arch/x86/kvm/x86.c 	kvm_x86_ops->set_apic_access_page_addr(vcpu, page_to_phys(page));
page             8014 arch/x86/kvm/x86.c 	put_page(page);
page             9447 arch/x86/kvm/x86.c 	struct page *page;
page             9456 arch/x86/kvm/x86.c 	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
page             9457 arch/x86/kvm/x86.c 	if (!page) {
page             9461 arch/x86/kvm/x86.c 	vcpu->arch.pio_data = page_address(page);
page              126 arch/x86/lib/mmx_32.c static void fast_clear_page(void *page)
page              146 arch/x86/lib/mmx_32.c 		: : "r" (page) : "memory");
page              147 arch/x86/lib/mmx_32.c 		page += 64;
page              246 arch/x86/lib/mmx_32.c static void fast_clear_page(void *page)
page              274 arch/x86/lib/mmx_32.c 			: : "r" (page) : "memory");
page              275 arch/x86/lib/mmx_32.c 		page += 128;
page              337 arch/x86/lib/mmx_32.c static void slow_zero_page(void *page)
page              346 arch/x86/lib/mmx_32.c 			:"a" (0), "1" (page), "0" (1024)
page              350 arch/x86/lib/mmx_32.c void mmx_clear_page(void *page)
page              353 arch/x86/lib/mmx_32.c 		slow_zero_page(page);
page              355 arch/x86/lib/mmx_32.c 		fast_clear_page(page);
page              202 arch/x86/lib/usercopy_64.c void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
page              205 arch/x86/lib/usercopy_64.c 	char *from = kmap_atomic(page);
page              202 arch/x86/mm/fault.c 		struct page *page;
page              205 arch/x86/mm/fault.c 		list_for_each_entry(page, &pgd_list, lru) {
page              209 arch/x86/mm/fault.c 			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
page              212 arch/x86/mm/fault.c 			vmalloc_sync_one(page_address(page), address);
page                7 arch/x86/mm/highmem_32.c void *kmap(struct page *page)
page               10 arch/x86/mm/highmem_32.c 	if (!PageHighMem(page))
page               11 arch/x86/mm/highmem_32.c 		return page_address(page);
page               12 arch/x86/mm/highmem_32.c 	return kmap_high(page);
page               16 arch/x86/mm/highmem_32.c void kunmap(struct page *page)
page               20 arch/x86/mm/highmem_32.c 	if (!PageHighMem(page))
page               22 arch/x86/mm/highmem_32.c 	kunmap_high(page);
page               34 arch/x86/mm/highmem_32.c void *kmap_atomic_prot(struct page *page, pgprot_t prot)
page               42 arch/x86/mm/highmem_32.c 	if (!PageHighMem(page))
page               43 arch/x86/mm/highmem_32.c 		return page_address(page);
page               49 arch/x86/mm/highmem_32.c 	set_pte(kmap_pte-idx, mk_pte(page, prot));
page               56 arch/x86/mm/highmem_32.c void *kmap_atomic(struct page *page)
page               58 arch/x86/mm/highmem_32.c 	return kmap_atomic_prot(page, kmap_prot);
page               25 arch/x86/mm/hugetlbpage.c struct page *
page               31 arch/x86/mm/hugetlbpage.c 	struct page *page;
page               43 arch/x86/mm/hugetlbpage.c 	page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
page               45 arch/x86/mm/hugetlbpage.c 	WARN_ON(!PageHead(page));
page               47 arch/x86/mm/hugetlbpage.c 	return page;
page              133 arch/x86/mm/init_64.c 		struct page *page;
page              143 arch/x86/mm/init_64.c 		list_for_each_entry(page, &pgd_list, lru) {
page              147 arch/x86/mm/init_64.c 			pgd = (pgd_t *)page_address(page) + pgd_index(addr);
page              149 arch/x86/mm/init_64.c 			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
page              171 arch/x86/mm/init_64.c 		struct page *page;
page              184 arch/x86/mm/init_64.c 		list_for_each_entry(page, &pgd_list, lru) {
page              189 arch/x86/mm/init_64.c 			pgd = (pgd_t *)page_address(page) + pgd_index(addr);
page              192 arch/x86/mm/init_64.c 			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
page              874 arch/x86/mm/init_64.c static void __meminit free_pagetable(struct page *page, int order)
page              880 arch/x86/mm/init_64.c 	if (PageReserved(page)) {
page              881 arch/x86/mm/init_64.c 		__ClearPageReserved(page);
page              883 arch/x86/mm/init_64.c 		magic = (unsigned long)page->freelist;
page              886 arch/x86/mm/init_64.c 				put_page_bootmem(page++);
page              889 arch/x86/mm/init_64.c 				free_reserved_page(page++);
page              891 arch/x86/mm/init_64.c 		free_pages((unsigned long)page_address(page), order);
page              894 arch/x86/mm/init_64.c static void __meminit free_hugepage_table(struct page *page,
page              900 arch/x86/mm/init_64.c 		free_pagetable(page, get_order(PMD_SIZE));
page             1519 arch/x86/mm/init_64.c 	if (end - start < PAGES_PER_SECTION * sizeof(struct page))
page             1536 arch/x86/mm/init_64.c 				  struct page *start_page, unsigned long nr_pages)
page             1546 arch/x86/mm/init_64.c 	struct page *page;
page             1593 arch/x86/mm/init_64.c 			page = pmd_page(*pmd);
page             1595 arch/x86/mm/init_64.c 				get_page_bootmem(section_nr, page++,
page              118 arch/x86/mm/kaslr.c 			sizeof(struct page);
page              114 arch/x86/mm/pageattr-test.c static struct page *pages[NPAGES];
page               47 arch/x86/mm/pageattr.c 	struct page	**pages;
page              261 arch/x86/mm/pageattr.c 		struct page *page = cpa->pages[idx];
page              263 arch/x86/mm/pageattr.c 		if (unlikely(PageHighMem(page)))
page              266 arch/x86/mm/pageattr.c 		return (unsigned long)page_address(page);
page              711 arch/x86/mm/pageattr.c 		struct page *page;
page              713 arch/x86/mm/pageattr.c 		list_for_each_entry(page, &pgd_list, lru) {
page              719 arch/x86/mm/pageattr.c 			pgd = (pgd_t *)page_address(page) + pgd_index(address);
page              942 arch/x86/mm/pageattr.c 		   struct page *base)
page             1048 arch/x86/mm/pageattr.c 	struct page *base;
page             1671 arch/x86/mm/pageattr.c 				    struct page **pages)
page             1774 arch/x86/mm/pageattr.c static inline int cpa_set_pages_array(struct page **pages, int numpages,
page             1781 arch/x86/mm/pageattr.c static inline int cpa_clear_pages_array(struct page **pages, int numpages,
page             1998 arch/x86/mm/pageattr.c int set_pages_uc(struct page *page, int numpages)
page             2000 arch/x86/mm/pageattr.c 	unsigned long addr = (unsigned long)page_address(page);
page             2006 arch/x86/mm/pageattr.c static int _set_pages_array(struct page **pages, int numpages,
page             2052 arch/x86/mm/pageattr.c int set_pages_array_uc(struct page **pages, int numpages)
page             2058 arch/x86/mm/pageattr.c int set_pages_array_wc(struct page **pages, int numpages)
page             2064 arch/x86/mm/pageattr.c int set_pages_array_wt(struct page **pages, int numpages)
page             2070 arch/x86/mm/pageattr.c int set_pages_wb(struct page *page, int numpages)
page             2072 arch/x86/mm/pageattr.c 	unsigned long addr = (unsigned long)page_address(page);
page             2078 arch/x86/mm/pageattr.c int set_pages_array_wb(struct page **pages, int numpages)
page             2103 arch/x86/mm/pageattr.c int set_pages_ro(struct page *page, int numpages)
page             2105 arch/x86/mm/pageattr.c 	unsigned long addr = (unsigned long)page_address(page);
page             2110 arch/x86/mm/pageattr.c int set_pages_rw(struct page *page, int numpages)
page             2112 arch/x86/mm/pageattr.c 	unsigned long addr = (unsigned long)page_address(page);
page             2117 arch/x86/mm/pageattr.c static int __set_pages_p(struct page *page, int numpages)
page             2119 arch/x86/mm/pageattr.c 	unsigned long tempaddr = (unsigned long) page_address(page);
page             2136 arch/x86/mm/pageattr.c static int __set_pages_np(struct page *page, int numpages)
page             2138 arch/x86/mm/pageattr.c 	unsigned long tempaddr = (unsigned long) page_address(page);
page             2155 arch/x86/mm/pageattr.c int set_direct_map_invalid_noflush(struct page *page)
page             2157 arch/x86/mm/pageattr.c 	return __set_pages_np(page, 1);
page             2160 arch/x86/mm/pageattr.c int set_direct_map_default_noflush(struct page *page)
page             2162 arch/x86/mm/pageattr.c 	return __set_pages_p(page, 1);
page             2165 arch/x86/mm/pageattr.c void __kernel_map_pages(struct page *page, int numpages, int enable)
page             2167 arch/x86/mm/pageattr.c 	if (PageHighMem(page))
page             2170 arch/x86/mm/pageattr.c 		debug_check_no_locks_freed(page_address(page),
page             2180 arch/x86/mm/pageattr.c 		__set_pages_p(page, numpages);
page             2182 arch/x86/mm/pageattr.c 		__set_pages_np(page, numpages);
page             2198 arch/x86/mm/pageattr.c bool kernel_page_present(struct page *page)
page             2203 arch/x86/mm/pageattr.c 	if (PageHighMem(page))
page             2206 arch/x86/mm/pageattr.c 	pte = lookup_address((unsigned long)page_address(page), &level);
page              103 arch/x86/mm/pat.c static inline enum page_cache_mode get_page_memtype(struct page *pg)
page              117 arch/x86/mm/pat.c static inline void set_page_memtype(struct page *pg,
page              146 arch/x86/mm/pat.c static inline enum page_cache_mode get_page_memtype(struct page *pg)
page              150 arch/x86/mm/pat.c static inline void set_page_memtype(struct page *pg,
page              464 arch/x86/mm/pat.c 	struct page *page;
page              482 arch/x86/mm/pat.c 		page = pfn_to_page(pfn);
page              483 arch/x86/mm/pat.c 		type = get_page_memtype(page);
page              498 arch/x86/mm/pat.c 		page = pfn_to_page(pfn);
page              499 arch/x86/mm/pat.c 		set_page_memtype(page, req_type);
page              506 arch/x86/mm/pat.c 	struct page *page;
page              510 arch/x86/mm/pat.c 		page = pfn_to_page(pfn);
page              511 arch/x86/mm/pat.c 		set_page_memtype(page, _PAGE_CACHE_MODE_WB);
page              688 arch/x86/mm/pat.c 		struct page *page;
page              690 arch/x86/mm/pat.c 		page = pfn_to_page(paddr >> PAGE_SHIFT);
page              691 arch/x86/mm/pat.c 		return get_page_memtype(page);
page               46 arch/x86/mm/pgtable.c void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
page               56 arch/x86/mm/pgtable.c 	struct page *page = virt_to_page(pmd);
page               65 arch/x86/mm/pgtable.c 	pgtable_pmd_page_dtor(page);
page               66 arch/x86/mm/pgtable.c 	paravirt_tlb_remove_table(tlb, page);
page               88 arch/x86/mm/pgtable.c 	struct page *page = virt_to_page(pgd);
page               90 arch/x86/mm/pgtable.c 	list_add(&page->lru, &pgd_list);
page               95 arch/x86/mm/pgtable.c 	struct page *page = virt_to_page(pgd);
page               97 arch/x86/mm/pgtable.c 	list_del(&page->lru);
page              111 arch/x86/mm/pgtable.c struct mm_struct *pgd_page_get_mm(struct page *page)
page              113 arch/x86/mm/pgtable.c 	return page->pt_mm;
page              358 arch/x86/pci/pcbios.c 	unsigned long page;
page              362 arch/x86/pci/pcbios.c 	page = __get_free_page(GFP_KERNEL);
page              363 arch/x86/pci/pcbios.c 	if (!page)
page              365 arch/x86/pci/pcbios.c 	opt.table = (struct irq_info *) page;
page              396 arch/x86/pci/pcbios.c 			memcpy(rt->slots, (void *) page, opt.size);
page              400 arch/x86/pci/pcbios.c 	free_page(page);
page              342 arch/x86/platform/efi/efi_64.c 	struct page *page;
page              391 arch/x86/platform/efi/efi_64.c 	page = alloc_page(GFP_KERNEL|__GFP_DMA32);
page              392 arch/x86/platform/efi/efi_64.c 	if (!page) {
page              397 arch/x86/platform/efi/efi_64.c 	efi_scratch.phys_stack = page_to_phys(page + 1); /* stack grows down */
page              303 arch/x86/um/ldt.c 	long page, err=0;
page              342 arch/x86/um/ldt.c 			page = __get_free_page(GFP_KERNEL|__GFP_ZERO);
page              343 arch/x86/um/ldt.c 			if (!page) {
page              348 arch/x86/um/ldt.c 				(struct ldt_entry *) page;
page               17 arch/x86/um/os-Linux/task_size.c static int page_ok(unsigned long page)
page               19 arch/x86/um/os-Linux/task_size.c 	unsigned long *address = (unsigned long *) (page << UM_KERN_PAGE_SHIFT);
page               19 arch/x86/um/vdso/vma.c static struct page **vdsop;
page               23 arch/x86/um/vdso/vma.c 	struct page *um_vdso;
page               29 arch/x86/um/vdso/vma.c 	vdsop = kmalloc(sizeof(struct page *), GFP_KERNEL);
page              354 arch/x86/xen/enlighten_pv.c 	struct page *page;
page              361 arch/x86/xen/enlighten_pv.c 	page = pfn_to_page(pfn);
page              392 arch/x86/xen/enlighten_pv.c 	if (!PageHighMem(page)) {
page               43 arch/x86/xen/mmu.c 			       int nr, struct page **pages)
page              176 arch/x86/xen/mmu_pv.c 		struct page *page = virt_to_page(ptr);
page              178 arch/x86/xen/mmu_pv.c 		return PagePinned(page);
page              491 arch/x86/xen/mmu_pv.c 		struct page *page = virt_to_page(pgd_page);
page              492 arch/x86/xen/mmu_pv.c 		user_ptr = (pgd_t *)page->private;
page              577 arch/x86/xen/mmu_pv.c 		int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
page              591 arch/x86/xen/mmu_pv.c 		int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
page              613 arch/x86/xen/mmu_pv.c 		int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
page              646 arch/x86/xen/mmu_pv.c 			  int (*func)(struct mm_struct *mm, struct page *,
page              688 arch/x86/xen/mmu_pv.c 			int (*func)(struct mm_struct *mm, struct page *,
page              697 arch/x86/xen/mmu_pv.c static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
page              702 arch/x86/xen/mmu_pv.c 	ptl = ptlock_ptr(page);
page              725 arch/x86/xen/mmu_pv.c static int xen_pin_page(struct mm_struct *mm, struct page *page,
page              728 arch/x86/xen/mmu_pv.c 	unsigned pgfl = TestSetPagePinned(page);
page              733 arch/x86/xen/mmu_pv.c 	else if (PageHighMem(page))
page              738 arch/x86/xen/mmu_pv.c 		void *pt = lowmem_page_address(page);
page              739 arch/x86/xen/mmu_pv.c 		unsigned long pfn = page_to_pfn(page);
page              767 arch/x86/xen/mmu_pv.c 			ptl = xen_pte_lock(page, mm);
page              843 arch/x86/xen/mmu_pv.c 	struct page *page;
page              847 arch/x86/xen/mmu_pv.c 	list_for_each_entry(page, &pgd_list, lru) {
page              848 arch/x86/xen/mmu_pv.c 		if (!PagePinned(page)) {
page              849 arch/x86/xen/mmu_pv.c 			__xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
page              850 arch/x86/xen/mmu_pv.c 			SetPageSavePinned(page);
page              857 arch/x86/xen/mmu_pv.c static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
page              860 arch/x86/xen/mmu_pv.c 	SetPagePinned(page);
page              879 arch/x86/xen/mmu_pv.c static int xen_unpin_page(struct mm_struct *mm, struct page *page,
page              882 arch/x86/xen/mmu_pv.c 	unsigned pgfl = TestClearPagePinned(page);
page              884 arch/x86/xen/mmu_pv.c 	if (pgfl && !PageHighMem(page)) {
page              885 arch/x86/xen/mmu_pv.c 		void *pt = lowmem_page_address(page);
page              886 arch/x86/xen/mmu_pv.c 		unsigned long pfn = page_to_pfn(page);
page              898 arch/x86/xen/mmu_pv.c 			ptl = xen_pte_lock(page, mm);
page              962 arch/x86/xen/mmu_pv.c 	struct page *page;
page              966 arch/x86/xen/mmu_pv.c 	list_for_each_entry(page, &pgd_list, lru) {
page              967 arch/x86/xen/mmu_pv.c 		if (PageSavePinned(page)) {
page              968 arch/x86/xen/mmu_pv.c 			BUG_ON(!PagePinned(page));
page              969 arch/x86/xen/mmu_pv.c 			__xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
page              970 arch/x86/xen/mmu_pv.c 			ClearPageSavePinned(page);
page             1492 arch/x86/xen/mmu_pv.c 		struct page *page = virt_to_page(pgd);
page             1495 arch/x86/xen/mmu_pv.c 		BUG_ON(page->private != 0);
page             1500 arch/x86/xen/mmu_pv.c 		page->private = (unsigned long)user_pgd;
page             1640 arch/x86/xen/mmu_pv.c 		struct page *page = pfn_to_page(pfn);
page             1643 arch/x86/xen/mmu_pv.c 			SetPagePinned(page);
page             1645 arch/x86/xen/mmu_pv.c 		if (!PageHighMem(page)) {
page             1675 arch/x86/xen/mmu_pv.c 	struct page *page = pfn_to_page(pfn);
page             1676 arch/x86/xen/mmu_pv.c 	bool pinned = PagePinned(page);
page             1681 arch/x86/xen/mmu_pv.c 		if (!PageHighMem(page)) {
page             1691 arch/x86/xen/mmu_pv.c 		ClearPagePinned(page);
page             2717 arch/x86/xen/mmu_pv.c 		  unsigned int domid, bool no_translate, struct page **pages)
page              700 arch/x86/xen/p2m.c 			    struct page **pages, unsigned int count)
page              746 arch/x86/xen/p2m.c 			      struct page **pages, unsigned int count)
page              123 arch/xtensa/include/asm/cacheflush.h extern void flush_dcache_page(struct page*);
page              140 arch/xtensa/include/asm/cacheflush.h #define flush_dcache_page(page)				do { } while (0)
page              156 arch/xtensa/include/asm/cacheflush.h #define	flush_icache_page(vma,page)			do { } while (0)
page              163 arch/xtensa/include/asm/cacheflush.h extern void copy_to_user_page(struct vm_area_struct*, struct page*,
page              165 arch/xtensa/include/asm/cacheflush.h extern void copy_from_user_page(struct vm_area_struct*, struct page*,
page              170 arch/xtensa/include/asm/cacheflush.h #define copy_to_user_page(vma, page, vaddr, dst, src, len)		\
page              177 arch/xtensa/include/asm/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
page               32 arch/xtensa/include/asm/highmem.h static inline int get_pkmap_color(struct page *page)
page               34 arch/xtensa/include/asm/highmem.h 	return DCACHE_ALIAS(page_to_phys(page));
page               66 arch/xtensa/include/asm/highmem.h void *kmap_high(struct page *page);
page               67 arch/xtensa/include/asm/highmem.h void kunmap_high(struct page *page);
page               69 arch/xtensa/include/asm/highmem.h static inline void *kmap(struct page *page)
page               77 arch/xtensa/include/asm/highmem.h 	if (!PageHighMem(page))
page               78 arch/xtensa/include/asm/highmem.h 		return page_address(page);
page               79 arch/xtensa/include/asm/highmem.h 	return kmap_high(page);
page               82 arch/xtensa/include/asm/highmem.h static inline void kunmap(struct page *page)
page               85 arch/xtensa/include/asm/highmem.h 	if (!PageHighMem(page))
page               87 arch/xtensa/include/asm/highmem.h 	kunmap_high(page);
page               95 arch/xtensa/include/asm/highmem.h void *kmap_atomic(struct page *page);
page              100 arch/xtensa/include/asm/page.h typedef struct page *pgtable_t;
page              130 arch/xtensa/include/asm/page.h struct page;
page              132 arch/xtensa/include/asm/page.h extern void clear_page(void *page);
page              146 arch/xtensa/include/asm/page.h void clear_user_highpage(struct page *page, unsigned long vaddr);
page              148 arch/xtensa/include/asm/page.h void copy_user_highpage(struct page *to, struct page *from,
page              151 arch/xtensa/include/asm/page.h # define clear_user_page(page, vaddr, pg)	clear_page(page)
page              189 arch/xtensa/include/asm/page.h #define page_to_virt(page)	__va(page_to_pfn(page) << PAGE_SHIFT)
page              191 arch/xtensa/include/asm/page.h #define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
page               21 arch/xtensa/include/asm/pgalloc.h #define pmd_populate(mm, pmdp, page)					     \
page               22 arch/xtensa/include/asm/pgalloc.h 	(pmd_val(*(pmdp)) = ((unsigned long)page_to_virt(page)))
page               52 arch/xtensa/include/asm/pgalloc.h 	struct page *page;
page               57 arch/xtensa/include/asm/pgalloc.h 	page = virt_to_page(pte);
page               58 arch/xtensa/include/asm/pgalloc.h 	if (!pgtable_pte_page_ctor(page)) {
page               59 arch/xtensa/include/asm/pgalloc.h 		__free_page(page);
page               62 arch/xtensa/include/asm/pgalloc.h 	return page;
page              298 arch/xtensa/include/asm/pgtable.h #define mk_pte(page, prot)	pfn_pte(page_to_pfn(page), prot)
page               36 arch/xtensa/include/asm/tlbflush.h 		unsigned long page);
page               54 arch/xtensa/include/asm/tlbflush.h #define flush_tlb_page(vma, page)	   local_flush_tlb_page(vma, page)
page              117 arch/xtensa/kernel/asm-offsets.c 	DEFINE(PAGE_FLAGS, offsetof(struct page, flags));
page               30 arch/xtensa/kernel/pci-dma.c 	struct page *page = pfn_to_page(pfn);
page               32 arch/xtensa/kernel/pci-dma.c 	if (!PageHighMem(page))
page               37 arch/xtensa/kernel/pci-dma.c 			void *vaddr = kmap_atomic(page);
page               42 arch/xtensa/kernel/pci-dma.c 			++page;
page              145 arch/xtensa/kernel/pci-dma.c 	struct page *page = NULL;
page              155 arch/xtensa/kernel/pci-dma.c 		page = dma_alloc_from_contiguous(dev, count, get_order(size),
page              158 arch/xtensa/kernel/pci-dma.c 	if (!page)
page              159 arch/xtensa/kernel/pci-dma.c 		page = alloc_pages(flag | __GFP_ZERO, get_order(size));
page              161 arch/xtensa/kernel/pci-dma.c 	if (!page)
page              164 arch/xtensa/kernel/pci-dma.c 	*handle = phys_to_dma(dev, page_to_phys(page));
page              167 arch/xtensa/kernel/pci-dma.c 	if (PageHighMem(page)) {
page              170 arch/xtensa/kernel/pci-dma.c 		p = dma_common_contiguous_remap(page, size,
page              174 arch/xtensa/kernel/pci-dma.c 			if (!dma_release_from_contiguous(dev, page, count))
page              175 arch/xtensa/kernel/pci-dma.c 				__free_pages(page, get_order(size));
page              180 arch/xtensa/kernel/pci-dma.c 	BUG_ON(!platform_vaddr_cached(page_address(page)));
page              181 arch/xtensa/kernel/pci-dma.c 	__invalidate_dcache_range((unsigned long)page_address(page), size);
page              182 arch/xtensa/kernel/pci-dma.c 	return platform_vaddr_to_uncached(page_address(page));
page              189 arch/xtensa/kernel/pci-dma.c 	struct page *page;
page              192 arch/xtensa/kernel/pci-dma.c 		page = virt_to_page(platform_vaddr_to_cached(vaddr));
page              197 arch/xtensa/kernel/pci-dma.c 		page = pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_handle)));
page              200 arch/xtensa/kernel/pci-dma.c 	if (!dma_release_from_contiguous(dev, page, count))
page              201 arch/xtensa/kernel/pci-dma.c 		__free_pages(page, get_order(size));
page               60 arch/xtensa/mm/cache.c static inline void kmap_invalidate_coherent(struct page *page,
page               63 arch/xtensa/mm/cache.c 	if (!DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
page               66 arch/xtensa/mm/cache.c 		if (!PageHighMem(page)) {
page               67 arch/xtensa/mm/cache.c 			kvaddr = (unsigned long)page_to_virt(page);
page               72 arch/xtensa/mm/cache.c 				(page_to_phys(page) & DCACHE_ALIAS_MASK);
page               75 arch/xtensa/mm/cache.c 						       page_to_phys(page));
page               80 arch/xtensa/mm/cache.c static inline void *coherent_kvaddr(struct page *page, unsigned long base,
page               83 arch/xtensa/mm/cache.c 	if (PageHighMem(page) || !DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
page               84 arch/xtensa/mm/cache.c 		*paddr = page_to_phys(page);
page               88 arch/xtensa/mm/cache.c 		return page_to_virt(page);
page               92 arch/xtensa/mm/cache.c void clear_user_highpage(struct page *page, unsigned long vaddr)
page               95 arch/xtensa/mm/cache.c 	void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr);
page               98 arch/xtensa/mm/cache.c 	kmap_invalidate_coherent(page, vaddr);
page               99 arch/xtensa/mm/cache.c 	set_bit(PG_arch_1, &page->flags);
page              105 arch/xtensa/mm/cache.c void copy_user_highpage(struct page *dst, struct page *src,
page              128 arch/xtensa/mm/cache.c void flush_dcache_page(struct page *page)
page              130 arch/xtensa/mm/cache.c 	struct address_space *mapping = page_mapping_file(page);
page              139 arch/xtensa/mm/cache.c 		if (!test_bit(PG_arch_1, &page->flags))
page              140 arch/xtensa/mm/cache.c 			set_bit(PG_arch_1, &page->flags);
page              145 arch/xtensa/mm/cache.c 		unsigned long phys = page_to_phys(page);
page              146 arch/xtensa/mm/cache.c 		unsigned long temp = page->index << PAGE_SHIFT;
page              214 arch/xtensa/mm/cache.c 	struct page *page;
page              219 arch/xtensa/mm/cache.c 	page = pfn_to_page(pfn);
page              227 arch/xtensa/mm/cache.c 	if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
page              228 arch/xtensa/mm/cache.c 		unsigned long phys = page_to_phys(page);
page              237 arch/xtensa/mm/cache.c 		clear_bit(PG_arch_1, &page->flags);
page              240 arch/xtensa/mm/cache.c 	if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)
page              242 arch/xtensa/mm/cache.c 		unsigned long paddr = (unsigned long)kmap_atomic(page);
page              245 arch/xtensa/mm/cache.c 		set_bit(PG_arch_1, &page->flags);
page              258 arch/xtensa/mm/cache.c void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
page              262 arch/xtensa/mm/cache.c 	unsigned long phys = page_to_phys(page);
page              294 arch/xtensa/mm/cache.c extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
page              298 arch/xtensa/mm/cache.c 	unsigned long phys = page_to_phys(page);
page               40 arch/xtensa/mm/highmem.c void *kmap_atomic(struct page *page)
page               47 arch/xtensa/mm/highmem.c 	if (!PageHighMem(page))
page               48 arch/xtensa/mm/highmem.c 		return page_address(page);
page               51 arch/xtensa/mm/highmem.c 		       DCACHE_ALIAS(page_to_phys(page)));
page               56 arch/xtensa/mm/highmem.c 	set_pte(kmap_pte + idx, mk_pte(page, PAGE_KERNEL_EXEC));
page              126 arch/xtensa/mm/tlb.c void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
page              142 arch/xtensa/mm/tlb.c 		invalidate_itlb_mapping(page);
page              143 arch/xtensa/mm/tlb.c 	invalidate_dtlb_mapping(page);
page              240 arch/xtensa/mm/tlb.c 				struct page *p = pfn_to_page(r1 >> PAGE_SHIFT);
page               58 block/badblocks.c 	u64 *p = bb->page;
page              130 block/badblocks.c 	u64 *p = bb->page;
page              187 block/badblocks.c 	p = bb->page;
page              353 block/badblocks.c 	p = bb->page;
page              435 block/badblocks.c 	if (bb->page == NULL || bb->changed)
page              441 block/badblocks.c 		u64 *p = bb->page;
page              467 block/badblocks.c ssize_t badblocks_show(struct badblocks *bb, char *page, int unack)
page              471 block/badblocks.c 	u64 *p = bb->page;
page              493 block/badblocks.c 		len += snprintf(page+len, PAGE_SIZE-len, "%llu %u\n",
page              517 block/badblocks.c ssize_t badblocks_store(struct badblocks *bb, const char *page, size_t len,
page              524 block/badblocks.c 	switch (sscanf(page, "%llu %d%c", &sector, &length, &newline)) {
page              554 block/badblocks.c 		bb->page = devm_kzalloc(dev, PAGE_SIZE, GFP_KERNEL);
page              556 block/badblocks.c 		bb->page = kzalloc(PAGE_SIZE, GFP_KERNEL);
page              557 block/badblocks.c 	if (!bb->page) {
page              598 block/badblocks.c 		devm_kfree(bb->dev, bb->page);
page              600 block/badblocks.c 		kfree(bb->page);
page              601 block/badblocks.c 	bb->page = NULL;
page             6568 block/bfq-iosched.c static ssize_t bfq_var_show(unsigned int var, char *page)
page             6570 block/bfq-iosched.c 	return sprintf(page, "%u\n", var);
page             6573 block/bfq-iosched.c static int bfq_var_store(unsigned long *var, const char *page)
page             6576 block/bfq-iosched.c 	int ret = kstrtoul(page, 10, &new_val);
page             6585 block/bfq-iosched.c static ssize_t __FUNC(struct elevator_queue *e, char *page)		\
page             6593 block/bfq-iosched.c 	return bfq_var_show(__data, (page));				\
page             6607 block/bfq-iosched.c static ssize_t __FUNC(struct elevator_queue *e, char *page)		\
page             6612 block/bfq-iosched.c 	return bfq_var_show(__data, (page));				\
page             6619 block/bfq-iosched.c __FUNC(struct elevator_queue *e, const char *page, size_t count)	\
page             6625 block/bfq-iosched.c 	ret = bfq_var_store(&__data, (page));				\
page             6651 block/bfq-iosched.c static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)\
page             6657 block/bfq-iosched.c 	ret = bfq_var_store(&__data, (page));				\
page             6672 block/bfq-iosched.c 				    const char *page, size_t count)
page             6678 block/bfq-iosched.c 	ret = bfq_var_store(&__data, (page));
page             6700 block/bfq-iosched.c 				      const char *page, size_t count)
page             6706 block/bfq-iosched.c 	ret = bfq_var_store(&__data, (page));
page             6723 block/bfq-iosched.c 				     const char *page, size_t count)
page             6729 block/bfq-iosched.c 	ret = bfq_var_store(&__data, (page));
page             6745 block/bfq-iosched.c 				     const char *page, size_t count)
page             6751 block/bfq-iosched.c 	ret = bfq_var_store(&__data, (page));
page              120 block/bio-integrity.c int bio_integrity_add_page(struct bio *bio, struct page *page,
page              138 block/bio-integrity.c 	iv->bv_page = page;
page              683 block/bio.c    		struct page *page, unsigned int len, unsigned int off,
page              688 block/bio.c    	phys_addr_t page_addr = page_to_phys(page);
page              692 block/bio.c    	if (xen_domain() && !xen_biovec_phys_mergeable(bv, page))
page              696 block/bio.c    	if (!*same_page && pfn_to_page(PFN_DOWN(vec_end_addr)) + 1 != page)
page              702 block/bio.c    		struct page *page, unsigned len, unsigned offset,
page              708 block/bio.c    	phys_addr_t addr2 = page_to_phys(page) + offset + len - 1;
page              714 block/bio.c    	return __bio_try_merge_page(bio, page, len, offset, same_page);
page              734 block/bio.c    		struct page *page, unsigned int len, unsigned int offset,
page              749 block/bio.c    		if (bio_try_merge_pc_page(q, bio, page, len, offset, same_page))
page              768 block/bio.c    	bvec->bv_page = page;
page              777 block/bio.c    		struct page *page, unsigned int len, unsigned int offset)
page              780 block/bio.c    	return __bio_add_pc_page(q, bio, page, len, offset, &same_page);
page              800 block/bio.c    bool __bio_try_merge_page(struct bio *bio, struct page *page,
page              809 block/bio.c    		if (page_is_mergeable(bv, page, len, off, same_page)) {
page              831 block/bio.c    void __bio_add_page(struct bio *bio, struct page *page,
page              839 block/bio.c    	bv->bv_page = page;
page              846 block/bio.c    	if (!bio_flagged(bio, BIO_WORKINGSET) && unlikely(PageWorkingset(page)))
page              861 block/bio.c    int bio_add_page(struct bio *bio, struct page *page,
page              866 block/bio.c    	if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) {
page              869 block/bio.c    		__bio_add_page(bio, page, len, offset);
page              908 block/bio.c    #define PAGE_PTRS_PER_BVEC     (sizeof(struct bio_vec) / sizeof(struct page *))
page              925 block/bio.c    	struct page **pages = (struct page **)bv;
page              944 block/bio.c    		struct page *page = pages[i];
page              948 block/bio.c    		if (__bio_try_merge_page(bio, page, len, offset, &same_page)) {
page              950 block/bio.c    				put_page(page);
page              954 block/bio.c    			__bio_add_page(bio, page, len, offset);
page             1279 block/bio.c    	struct page *page;
page             1326 block/bio.c    			page = map_data->pages[i / nr_pages];
page             1327 block/bio.c    			page += (i % nr_pages);
page             1331 block/bio.c    			page = alloc_page(q->bounce_gfp | gfp_mask);
page             1332 block/bio.c    			if (!page) {
page             1338 block/bio.c    		if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) {
page             1340 block/bio.c    				__free_page(page);
page             1406 block/bio.c    		struct page **pages;
page             1424 block/bio.c    				struct page *page = pages[j];
page             1431 block/bio.c    				if (!__bio_add_pc_page(q, bio, page, n, offs,
page             1434 block/bio.c    						put_page(page);
page             1525 block/bio.c    	struct page *page;
page             1549 block/bio.c    			page = virt_to_page(data);
page             1551 block/bio.c    			page = vmalloc_to_page(data);
page             1552 block/bio.c    		if (bio_add_pc_page(q, bio, page, bytes,
page             1621 block/bio.c    		struct page *page;
page             1627 block/bio.c    		page = alloc_page(q->bounce_gfp | gfp_mask);
page             1628 block/bio.c    		if (!page)
page             1632 block/bio.c    			memcpy(page_address(page), p, bytes);
page             1634 block/bio.c    		if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
page             2127 block/bio.c    void bio_associate_blkg_from_page(struct bio *bio, struct page *page)
page             2131 block/bio.c    	if (!page->mem_cgroup)
page             2136 block/bio.c    	css = cgroup_e_css(page->mem_cgroup->css.cgroup, &io_cgrp_subsys);
page              224 block/blk-integrity.c 				   char *page)
page              231 block/blk-integrity.c 	return entry->show(bi, page);
page              235 block/blk-integrity.c 				    struct attribute *attr, const char *page,
page              245 block/blk-integrity.c 		ret = entry->store(bi, page, count);
page              250 block/blk-integrity.c static ssize_t integrity_format_show(struct blk_integrity *bi, char *page)
page              253 block/blk-integrity.c 		return sprintf(page, "%s\n", bi->profile->name);
page              255 block/blk-integrity.c 		return sprintf(page, "none\n");
page              258 block/blk-integrity.c static ssize_t integrity_tag_size_show(struct blk_integrity *bi, char *page)
page              260 block/blk-integrity.c 	return sprintf(page, "%u\n", bi->tag_size);
page              263 block/blk-integrity.c static ssize_t integrity_interval_show(struct blk_integrity *bi, char *page)
page              265 block/blk-integrity.c 	return sprintf(page, "%u\n",
page              270 block/blk-integrity.c 				      const char *page, size_t count)
page              272 block/blk-integrity.c 	char *p = (char *) page;
page              283 block/blk-integrity.c static ssize_t integrity_verify_show(struct blk_integrity *bi, char *page)
page              285 block/blk-integrity.c 	return sprintf(page, "%d\n", (bi->flags & BLK_INTEGRITY_VERIFY) != 0);
page              289 block/blk-integrity.c 					const char *page, size_t count)
page              291 block/blk-integrity.c 	char *p = (char *) page;
page              302 block/blk-integrity.c static ssize_t integrity_generate_show(struct blk_integrity *bi, char *page)
page              304 block/blk-integrity.c 	return sprintf(page, "%d\n", (bi->flags & BLK_INTEGRITY_GENERATE) != 0);
page              307 block/blk-integrity.c static ssize_t integrity_device_show(struct blk_integrity *bi, char *page)
page              309 block/blk-integrity.c 	return sprintf(page, "%u\n",
page              782 block/blk-iocost.c 			u64 *page, u64 *seqio, u64 *randio)
page              786 block/blk-iocost.c 	*page = *seqio = *randio = 0;
page              789 block/blk-iocost.c 		*page = DIV64_U64_ROUND_UP(VTIME_PER_SEC,
page              794 block/blk-iocost.c 		if (v > *page)
page              795 block/blk-iocost.c 			*seqio = v - *page;
page              800 block/blk-iocost.c 		if (v > *page)
page              801 block/blk-iocost.c 			*randio = v - *page;
page              132 block/blk-lib.c 		sector_t nr_sects, gfp_t gfp_mask, struct page *page,
page              161 block/blk-lib.c 		bio->bi_io_vec->bv_page = page;
page              194 block/blk-lib.c 				struct page *page)
page              201 block/blk-lib.c 	ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page,
page              408 block/blk-merge.c 		struct page *page = bvec->bv_page;
page              418 block/blk-merge.c 		page += (offset >> PAGE_SHIFT);
page              422 block/blk-merge.c 		sg_set_page(*sg, page, len, offset);
page               63 block/blk-mq-sysfs.c 				 char *page)
page               80 block/blk-mq-sysfs.c 		res = entry->show(ctx, page);
page               86 block/blk-mq-sysfs.c 				  const char *page, size_t length)
page              103 block/blk-mq-sysfs.c 		res = entry->store(ctx, page, length);
page              109 block/blk-mq-sysfs.c 				    struct attribute *attr, char *page)
page              126 block/blk-mq-sysfs.c 		res = entry->show(hctx, page);
page              132 block/blk-mq-sysfs.c 				     struct attribute *attr, const char *page,
page              150 block/blk-mq-sysfs.c 		res = entry->store(hctx, page, length);
page              156 block/blk-mq-sysfs.c 					    char *page)
page              158 block/blk-mq-sysfs.c 	return sprintf(page, "%u\n", hctx->tags->nr_tags);
page              162 block/blk-mq-sysfs.c 						     char *page)
page              164 block/blk-mq-sysfs.c 	return sprintf(page, "%u\n", hctx->tags->nr_reserved_tags);
page              167 block/blk-mq-sysfs.c static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
page              175 block/blk-mq-sysfs.c 			ret = snprintf(pos + page, size - pos, "%u", i);
page              177 block/blk-mq-sysfs.c 			ret = snprintf(pos + page, size - pos, ", %u", i);
page              186 block/blk-mq-sysfs.c 	ret = snprintf(pos + page, size + 1 - pos, "\n");
page             2063 block/blk-mq.c 	struct page *page;
page             2079 block/blk-mq.c 		page = list_first_entry(&tags->page_list, struct page, lru);
page             2080 block/blk-mq.c 		list_del_init(&page->lru);
page             2085 block/blk-mq.c 		kmemleak_free(page_address(page));
page             2086 block/blk-mq.c 		__free_pages(page, page->private);
page             2180 block/blk-mq.c 		struct page *page;
page             2188 block/blk-mq.c 			page = alloc_pages_node(node,
page             2191 block/blk-mq.c 			if (page)
page             2199 block/blk-mq.c 		if (!page)
page             2202 block/blk-mq.c 		page->private = this_order;
page             2203 block/blk-mq.c 		list_add_tail(&page->lru, &tags->page_list);
page             2205 block/blk-mq.c 		p = page_address(page);
page               27 block/blk-sysfs.c queue_var_show(unsigned long var, char *page)
page               29 block/blk-sysfs.c 	return sprintf(page, "%lu\n", var);
page               33 block/blk-sysfs.c queue_var_store(unsigned long *var, const char *page, size_t count)
page               38 block/blk-sysfs.c 	err = kstrtoul(page, 10, &v);
page               47 block/blk-sysfs.c static ssize_t queue_var_store64(s64 *var, const char *page)
page               52 block/blk-sysfs.c 	err = kstrtos64(page, 10, &v);
page               60 block/blk-sysfs.c static ssize_t queue_requests_show(struct request_queue *q, char *page)
page               62 block/blk-sysfs.c 	return queue_var_show(q->nr_requests, (page));
page               66 block/blk-sysfs.c queue_requests_store(struct request_queue *q, const char *page, size_t count)
page               74 block/blk-sysfs.c 	ret = queue_var_store(&nr, page, count);
page               88 block/blk-sysfs.c static ssize_t queue_ra_show(struct request_queue *q, char *page)
page               93 block/blk-sysfs.c 	return queue_var_show(ra_kb, (page));
page               97 block/blk-sysfs.c queue_ra_store(struct request_queue *q, const char *page, size_t count)
page              100 block/blk-sysfs.c 	ssize_t ret = queue_var_store(&ra_kb, page, count);
page              110 block/blk-sysfs.c static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
page              114 block/blk-sysfs.c 	return queue_var_show(max_sectors_kb, (page));
page              117 block/blk-sysfs.c static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
page              119 block/blk-sysfs.c 	return queue_var_show(queue_max_segments(q), (page));
page              123 block/blk-sysfs.c 		char *page)
page              125 block/blk-sysfs.c 	return queue_var_show(queue_max_discard_segments(q), (page));
page              128 block/blk-sysfs.c static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
page              130 block/blk-sysfs.c 	return queue_var_show(q->limits.max_integrity_segments, (page));
page              133 block/blk-sysfs.c static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
page              135 block/blk-sysfs.c 	return queue_var_show(queue_max_segment_size(q), (page));
page              138 block/blk-sysfs.c static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
page              140 block/blk-sysfs.c 	return queue_var_show(queue_logical_block_size(q), page);
page              143 block/blk-sysfs.c static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
page              145 block/blk-sysfs.c 	return queue_var_show(queue_physical_block_size(q), page);
page              148 block/blk-sysfs.c static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
page              150 block/blk-sysfs.c 	return queue_var_show(q->limits.chunk_sectors, page);
page              153 block/blk-sysfs.c static ssize_t queue_io_min_show(struct request_queue *q, char *page)
page              155 block/blk-sysfs.c 	return queue_var_show(queue_io_min(q), page);
page              158 block/blk-sysfs.c static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
page              160 block/blk-sysfs.c 	return queue_var_show(queue_io_opt(q), page);
page              163 block/blk-sysfs.c static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
page              165 block/blk-sysfs.c 	return queue_var_show(q->limits.discard_granularity, page);
page              168 block/blk-sysfs.c static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
page              171 block/blk-sysfs.c 	return sprintf(page, "%llu\n",
page              175 block/blk-sysfs.c static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
page              177 block/blk-sysfs.c 	return sprintf(page, "%llu\n",
page              182 block/blk-sysfs.c 				       const char *page, size_t count)
page              185 block/blk-sysfs.c 	ssize_t ret = queue_var_store(&max_discard, page, count);
page              204 block/blk-sysfs.c static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
page              206 block/blk-sysfs.c 	return queue_var_show(0, page);
page              209 block/blk-sysfs.c static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
page              211 block/blk-sysfs.c 	return sprintf(page, "%llu\n",
page              215 block/blk-sysfs.c static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
page              217 block/blk-sysfs.c 	return sprintf(page, "%llu\n",
page              222 block/blk-sysfs.c queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
page              227 block/blk-sysfs.c 	ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
page              246 block/blk-sysfs.c static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
page              250 block/blk-sysfs.c 	return queue_var_show(max_hw_sectors_kb, (page));
page              255 block/blk-sysfs.c queue_show_##name(struct request_queue *q, char *page)			\
page              259 block/blk-sysfs.c 	return queue_var_show(neg ? !bit : bit, page);			\
page              262 block/blk-sysfs.c queue_store_##name(struct request_queue *q, const char *page, size_t count) \
page              266 block/blk-sysfs.c 	ret = queue_var_store(&val, page, count);			\
page              284 block/blk-sysfs.c static ssize_t queue_zoned_show(struct request_queue *q, char *page)
page              288 block/blk-sysfs.c 		return sprintf(page, "host-aware\n");
page              290 block/blk-sysfs.c 		return sprintf(page, "host-managed\n");
page              292 block/blk-sysfs.c 		return sprintf(page, "none\n");
page              296 block/blk-sysfs.c static ssize_t queue_nr_zones_show(struct request_queue *q, char *page)
page              298 block/blk-sysfs.c 	return queue_var_show(blk_queue_nr_zones(q), page);
page              301 block/blk-sysfs.c static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
page              304 block/blk-sysfs.c 			       blk_queue_noxmerges(q), page);
page              307 block/blk-sysfs.c static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
page              311 block/blk-sysfs.c 	ssize_t ret = queue_var_store(&nm, page, count);
page              326 block/blk-sysfs.c static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
page              331 block/blk-sysfs.c 	return queue_var_show(set << force, page);
page              335 block/blk-sysfs.c queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
page              341 block/blk-sysfs.c 	ret = queue_var_store(&val, page, count);
page              359 block/blk-sysfs.c static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
page              368 block/blk-sysfs.c 	return sprintf(page, "%d\n", val);
page              371 block/blk-sysfs.c static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
page              379 block/blk-sysfs.c 	err = kstrtoint(page, 10, &val);
page              393 block/blk-sysfs.c static ssize_t queue_poll_show(struct request_queue *q, char *page)
page              395 block/blk-sysfs.c 	return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
page              398 block/blk-sysfs.c static ssize_t queue_poll_store(struct request_queue *q, const char *page,
page              408 block/blk-sysfs.c 	ret = queue_var_store(&poll_on, page, count);
page              420 block/blk-sysfs.c static ssize_t queue_io_timeout_show(struct request_queue *q, char *page)
page              422 block/blk-sysfs.c 	return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout));
page              425 block/blk-sysfs.c static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
page              431 block/blk-sysfs.c 	err = kstrtou32(page, 10, &val);
page              440 block/blk-sysfs.c static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
page              445 block/blk-sysfs.c 	return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
page              448 block/blk-sysfs.c static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
page              455 block/blk-sysfs.c 	ret = queue_var_store64(&val, page);
page              492 block/blk-sysfs.c static ssize_t queue_wc_show(struct request_queue *q, char *page)
page              495 block/blk-sysfs.c 		return sprintf(page, "write back\n");
page              497 block/blk-sysfs.c 	return sprintf(page, "write through\n");
page              500 block/blk-sysfs.c static ssize_t queue_wc_store(struct request_queue *q, const char *page,
page              505 block/blk-sysfs.c 	if (!strncmp(page, "write back", 10))
page              507 block/blk-sysfs.c 	else if (!strncmp(page, "write through", 13) ||
page              508 block/blk-sysfs.c 		 !strncmp(page, "none", 4))
page              522 block/blk-sysfs.c static ssize_t queue_fua_show(struct request_queue *q, char *page)
page              524 block/blk-sysfs.c 	return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags));
page              527 block/blk-sysfs.c static ssize_t queue_dax_show(struct request_queue *q, char *page)
page              529 block/blk-sysfs.c 	return queue_var_show(blk_queue_dax(q), page);
page              794 block/blk-sysfs.c queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
page              808 block/blk-sysfs.c 	res = entry->show(q, page);
page              815 block/blk-sysfs.c 		    const char *page, size_t length)
page              830 block/blk-sysfs.c 	res = entry->store(q, page, length);
page             2454 block/blk-throttle.c ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page)
page             2458 block/blk-throttle.c 	return sprintf(page, "%u\n", jiffies_to_msecs(q->td->throtl_slice));
page             2462 block/blk-throttle.c 	const char *page, size_t count)
page             2469 block/blk-throttle.c 	if (kstrtoul(page, 10, &v))
page              323 block/blk.h    extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
page              325 block/blk.h    	const char *page, size_t count);
page              322 block/bounce.c 		struct page *page = to->bv_page;
page              324 block/bounce.c 		if (page_to_pfn(page) <= q->limits.bounce_pfn)
page              333 block/bounce.c 			flush_dcache_page(page);
page              336 block/bounce.c 			vfrom = kmap_atomic(page) + to->bv_offset;
page              439 block/elevator.c elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
page              450 block/elevator.c 	error = e->type ? entry->show(e, page) : -ENOENT;
page              457 block/elevator.c 	       const char *page, size_t length)
page              468 block/elevator.c 	error = e->type ? entry->store(e, page, length) : -ENOENT;
page              841 block/genhd.c  					char *page)
page              846 block/genhd.c  		return sprintf(page, "\n");
page              848 block/genhd.c  	return badblocks_show(disk->bb, page, 0);
page              853 block/genhd.c  					const char *page, size_t len)
page              860 block/genhd.c  	return badblocks_store(disk->bb, page, len, 0);
page              861 block/kyber-iosched.c 				       char *page)			\
page              865 block/kyber-iosched.c 	return sprintf(page, "%llu\n", kqd->latency_targets[domain]);	\
page              869 block/kyber-iosched.c 					const char *page, size_t count)	\
page              875 block/kyber-iosched.c 	ret = kstrtoull(page, 10, &nsec);				\
page              591 block/mq-deadline.c deadline_var_show(int var, char *page)
page              593 block/mq-deadline.c 	return sprintf(page, "%d\n", var);
page              597 block/mq-deadline.c deadline_var_store(int *var, const char *page)
page              599 block/mq-deadline.c 	char *p = (char *) page;
page              605 block/mq-deadline.c static ssize_t __FUNC(struct elevator_queue *e, char *page)		\
page              611 block/mq-deadline.c 	return deadline_var_show(__data, (page));			\
page              621 block/mq-deadline.c static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)	\
page              625 block/mq-deadline.c 	deadline_var_store(&__data, (page));				\
page              665 block/partition-generic.c 	struct page *page;
page              667 block/partition-generic.c 	page = read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_SHIFT-9)), NULL);
page              668 block/partition-generic.c 	if (!IS_ERR(page)) {
page              669 block/partition-generic.c 		if (PageError(page))
page              671 block/partition-generic.c 		p->v = page;
page              672 block/partition-generic.c 		return (unsigned char *)page_address(page) +  ((n & ((1 << (PAGE_SHIFT - 9)) - 1)) << 9);
page              674 block/partition-generic.c 		put_page(page);
page              205 crypto/ablkcipher.c 	walk->src.page = scatterwalk_page(&walk->in);
page              207 crypto/ablkcipher.c 	walk->dst.page = scatterwalk_page(&walk->out);
page              249 crypto/ablkcipher.c 		walk->src.page = virt_to_page(src);
page              250 crypto/ablkcipher.c 		walk->dst.page = virt_to_page(dst);
page              599 crypto/af_alg.c 			struct page *page = sg_page(sg + i);
page              601 crypto/af_alg.c 			if (!page)
page              614 crypto/af_alg.c 					get_page(page);
page              615 crypto/af_alg.c 					sg_set_page(dst + j, page,
page              632 crypto/af_alg.c 			put_page(page);
page              961 crypto/af_alg.c ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
page              998 crypto/af_alg.c 	get_page(page);
page              999 crypto/af_alg.c 	sg_set_page(sgl->sg + sgl->cur, page, size, offset);
page              427 crypto/algif_aead.c static ssize_t aead_sendpage_nokey(struct socket *sock, struct page *page,
page              436 crypto/algif_aead.c 	return af_alg_sendpage(sock, page, offset, size, flags);
page              131 crypto/algif_hash.c static ssize_t hash_sendpage(struct socket *sock, struct page *page,
page              144 crypto/algif_hash.c 	sg_set_page(ctx->sgl.sg, page, size, offset);
page              344 crypto/algif_hash.c static ssize_t hash_sendpage_nokey(struct socket *sock, struct page *page,
page              353 crypto/algif_hash.c 	return hash_sendpage(sock, page, offset, size, flags);
page              258 crypto/algif_skcipher.c static ssize_t skcipher_sendpage_nokey(struct socket *sock, struct page *page,
page              267 crypto/algif_skcipher.c 	return af_alg_sendpage(sock, page, offset, size, flags);
page               32 crypto/async_tx/async_memcpy.c async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
page               18 crypto/async_tx/async_pq.c static struct page *pq_scribble_page;
page              107 crypto/async_tx/async_pq.c do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
page              163 crypto/async_tx/async_pq.c async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
page              256 crypto/async_tx/async_pq.c pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len)
page              281 crypto/async_tx/async_pq.c async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
page              282 crypto/async_tx/async_pq.c 		   size_t len, enum sum_check_flags *pqres, struct page *spare,
page              357 crypto/async_tx/async_pq.c 		struct page *p_src = P(blocks, disks);
page              358 crypto/async_tx/async_pq.c 		struct page *q_src = Q(blocks, disks);
page               18 crypto/async_tx/async_raid6_recov.c async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef,
page               83 crypto/async_tx/async_raid6_recov.c async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
page              147 crypto/async_tx/async_raid6_recov.c 		struct page **blocks, struct async_submit_ctl *submit)
page              150 crypto/async_tx/async_raid6_recov.c 	struct page *p, *q, *a, *b;
page              151 crypto/async_tx/async_raid6_recov.c 	struct page *srcs[2];
page              186 crypto/async_tx/async_raid6_recov.c 		struct page **blocks, struct async_submit_ctl *submit)
page              189 crypto/async_tx/async_raid6_recov.c 	struct page *p, *q, *g, *dp, *dq;
page              190 crypto/async_tx/async_raid6_recov.c 	struct page *srcs[2];
page              260 crypto/async_tx/async_raid6_recov.c 	      struct page **blocks, struct async_submit_ctl *submit)
page              263 crypto/async_tx/async_raid6_recov.c 	struct page *p, *q, *dp, *dq;
page              264 crypto/async_tx/async_raid6_recov.c 	struct page *srcs[2];
page              337 crypto/async_tx/async_raid6_recov.c 			struct page **blocks, struct async_submit_ctl *submit)
page              410 crypto/async_tx/async_raid6_recov.c 			struct page **blocks, struct async_submit_ctl *submit)
page              413 crypto/async_tx/async_raid6_recov.c 	struct page *p, *q, *dq;
page              420 crypto/async_tx/async_raid6_recov.c 	struct page *srcs[2];
page              474 crypto/async_tx/async_raid6_recov.c 		struct page *g = blocks[good];
page              100 crypto/async_tx/async_xor.c do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
page              160 crypto/async_tx/async_xor.c async_xor(struct page *dest, struct page **src_list, unsigned int offset,
page              223 crypto/async_tx/async_xor.c static int page_is_zero(struct page *p, unsigned int offset, size_t len)
page              229 crypto/async_tx/async_xor.c xor_val_chan(struct async_submit_ctl *submit, struct page *dest,
page              230 crypto/async_tx/async_xor.c 		 struct page **src_list, int src_cnt, size_t len)
page              256 crypto/async_tx/async_xor.c async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
page               20 crypto/async_tx/raid6test.c static struct page *dataptrs[NDISKS];
page               22 crypto/async_tx/raid6test.c static struct page *data[NDISKS+3];
page               23 crypto/async_tx/raid6test.c static struct page *spare;
page               24 crypto/async_tx/raid6test.c static struct page *recovi;
page               25 crypto/async_tx/raid6test.c static struct page *recovj;
page               55 crypto/async_tx/raid6test.c static void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, struct page **ptrs)
page               71 crypto/async_tx/raid6test.c 			struct page *blocks[NDISKS];
page               72 crypto/async_tx/raid6test.c 			struct page *dest;
page               83 crypto/blkcipher.c 		memcpy(walk->dst.virt.addr, walk->page, n);
page              131 crypto/blkcipher.c 	if (walk->buffer != walk->page)
page              133 crypto/blkcipher.c 	if (walk->page)
page              134 crypto/blkcipher.c 		free_page((unsigned long)walk->page);
page              150 crypto/blkcipher.c 	walk->buffer = walk->page;
page              177 crypto/blkcipher.c 	u8 *tmp = walk->page;
page              194 crypto/blkcipher.c 	walk->src.phys.page = scatterwalk_page(&walk->in);
page              196 crypto/blkcipher.c 	walk->dst.phys.page = scatterwalk_page(&walk->out);
page              203 crypto/blkcipher.c 	diff |= walk->src.virt.page - walk->dst.virt.page;
page              236 crypto/blkcipher.c 		if (!walk->page) {
page              237 crypto/blkcipher.c 			walk->page = (void *)__get_free_page(GFP_ATOMIC);
page              238 crypto/blkcipher.c 			if (!walk->page)
page              261 crypto/blkcipher.c 		walk->src.phys.page = virt_to_page(walk->src.virt.addr);
page              262 crypto/blkcipher.c 		walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
page              336 crypto/blkcipher.c 	walk->page = NULL;
page               52 crypto/skcipher.c 	struct page *page = scatterwalk_page(walk);
page               54 crypto/skcipher.c 	return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) +
page              128 crypto/skcipher.c 		memcpy(walk->dst.virt.addr, walk->page, n);
page              163 crypto/skcipher.c 	if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
page              171 crypto/skcipher.c 	if (walk->buffer != walk->page)
page              173 crypto/skcipher.c 	if (walk->page)
page              174 crypto/skcipher.c 		free_page((unsigned long)walk->page);
page              210 crypto/skcipher.c 	if (walk->buffer != walk->page)
page              212 crypto/skcipher.c 	if (walk->page)
page              213 crypto/skcipher.c 		free_page((unsigned long)walk->page);
page              236 crypto/skcipher.c 			walk->buffer = walk->page;
page              288 crypto/skcipher.c 	u8 *tmp = walk->page;
page              304 crypto/skcipher.c 	p->data = walk->page;
page              308 crypto/skcipher.c 	if (offset_in_page(walk->page) + walk->nbytes + walk->stride >
page              310 crypto/skcipher.c 		walk->page = NULL;
page              312 crypto/skcipher.c 		walk->page += walk->nbytes;
page              321 crypto/skcipher.c 	walk->src.phys.page = scatterwalk_page(&walk->in);
page              323 crypto/skcipher.c 	walk->dst.phys.page = scatterwalk_page(&walk->out);
page              330 crypto/skcipher.c 	diff |= walk->src.virt.page - walk->dst.virt.page;
page              367 crypto/skcipher.c 		if (!walk->page) {
page              370 crypto/skcipher.c 			walk->page = (void *)__get_free_page(gfp);
page              371 crypto/skcipher.c 			if (!walk->page)
page              376 crypto/skcipher.c 				     PAGE_SIZE - offset_in_page(walk->page));
page              388 crypto/skcipher.c 		walk->src.phys.page = virt_to_page(walk->src.virt.addr);
page              389 crypto/skcipher.c 		walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
page              443 crypto/skcipher.c 	walk->page = NULL;
page              186 drivers/android/binder_alloc.c 	struct binder_lru_page *page;
page              204 drivers/android/binder_alloc.c 		page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
page              205 drivers/android/binder_alloc.c 		if (!page->page_ptr) {
page              232 drivers/android/binder_alloc.c 		page = &alloc->pages[index];
page              234 drivers/android/binder_alloc.c 		if (page->page_ptr) {
page              237 drivers/android/binder_alloc.c 			on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
page              248 drivers/android/binder_alloc.c 		page->page_ptr = alloc_page(GFP_KERNEL |
page              251 drivers/android/binder_alloc.c 		if (!page->page_ptr) {
page              256 drivers/android/binder_alloc.c 		page->alloc = alloc;
page              257 drivers/android/binder_alloc.c 		INIT_LIST_HEAD(&page->lru);
page              260 drivers/android/binder_alloc.c 		ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
page              285 drivers/android/binder_alloc.c 		page = &alloc->pages[index];
page              289 drivers/android/binder_alloc.c 		ret = list_lru_add(&binder_alloc_lru, &page->lru);
page              298 drivers/android/binder_alloc.c 		__free_page(page->page_ptr);
page              299 drivers/android/binder_alloc.c 		page->page_ptr = NULL;
page              840 drivers/android/binder_alloc.c 	struct binder_lru_page *page;
page              853 drivers/android/binder_alloc.c 			page = &alloc->pages[i];
page              854 drivers/android/binder_alloc.c 			if (!page->page_ptr)
page              856 drivers/android/binder_alloc.c 			else if (list_empty(&page->lru))
page              915 drivers/android/binder_alloc.c 	struct binder_lru_page *page = container_of(item,
page              923 drivers/android/binder_alloc.c 	alloc = page->alloc;
page              927 drivers/android/binder_alloc.c 	if (!page->page_ptr)
page              930 drivers/android/binder_alloc.c 	index = page - alloc->pages;
page              955 drivers/android/binder_alloc.c 	__free_page(page->page_ptr);
page              956 drivers/android/binder_alloc.c 	page->page_ptr = NULL;
page             1073 drivers/android/binder_alloc.c static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
page             1114 drivers/android/binder_alloc.c 		struct page *page;
page             1118 drivers/android/binder_alloc.c 		page = binder_alloc_get_page(alloc, buffer,
page             1121 drivers/android/binder_alloc.c 		kptr = kmap(page) + pgoff;
page             1123 drivers/android/binder_alloc.c 		kunmap(page);
page             1146 drivers/android/binder_alloc.c 		struct page *page;
page             1151 drivers/android/binder_alloc.c 		page = binder_alloc_get_page(alloc, buffer,
page             1154 drivers/android/binder_alloc.c 		base_ptr = kmap_atomic(page);
page               64 drivers/android/binder_alloc.h 	struct page *page_ptr;
page             2043 drivers/ata/libata-core.c 			       u8 page, void *buf, unsigned int sectors)
page             2050 drivers/ata/libata-core.c 	DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
page             2072 drivers/ata/libata-core.c 	tf.lbam = page;
page             2099 drivers/ata/libata-core.c static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
page             2123 drivers/ata/libata-core.c 		if (ap->sector_buf[9 + i] == page)
page              657 drivers/ata/libata-sff.c 	struct page *page;
page              668 drivers/ata/libata-sff.c 	page = sg_page(qc->cursg);
page              672 drivers/ata/libata-sff.c 	page = nth_page(page, (offset >> PAGE_SHIFT));
page              678 drivers/ata/libata-sff.c 	buf = kmap_atomic(page);
page              682 drivers/ata/libata-sff.c 	if (!do_write && !PageSlab(page))
page              683 drivers/ata/libata-sff.c 		flush_dcache_page(page);
page              782 drivers/ata/libata-sff.c 	struct page *page;
page              795 drivers/ata/libata-sff.c 	page = sg_page(sg);
page              799 drivers/ata/libata-sff.c 	page = nth_page(page, (offset >> PAGE_SHIFT));
page              811 drivers/ata/libata-sff.c 	buf = kmap_atomic(page);
page               86 drivers/ata/libata.h 				      u8 page, void *buf, unsigned int sectors);
page              188 drivers/ata/pata_octeon_cf.c 	reg_tim.s.page = 0;
page              124 drivers/atm/adummy.c adummy_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
page              129 drivers/atm/adummy.c 		return sprintf(page, "version %s\n", DRV_VERSION);
page             1409 drivers/atm/ambassador.c static int amb_proc_read (struct atm_dev * atm_dev, loff_t * pos, char * page) {
page             1420 drivers/atm/ambassador.c     return sprintf (page,
page             1430 drivers/atm/ambassador.c     return sprintf (page, "cmd queue [cur/hi/max]: %u/%u/%u. ",
page             1436 drivers/atm/ambassador.c     return sprintf (page, "TX queue [cur/max high full]: %u/%u %u %u.\n",
page             1441 drivers/atm/ambassador.c     unsigned int count = sprintf (page, "RX queues [cur/max/req low empty]:");
page             1444 drivers/atm/ambassador.c       count += sprintf (page+count, " %u/%u/%u %u %u",
page             1447 drivers/atm/ambassador.c     count += sprintf (page+count, ".\n");
page             1452 drivers/atm/ambassador.c     unsigned int count = sprintf (page, "RX buffer sizes:");
page             1455 drivers/atm/ambassador.c       count += sprintf (page+count, " %u", r->buffer_size);
page             1457 drivers/atm/ambassador.c     count += sprintf (page+count, ".\n");
page              235 drivers/atm/atmtcp.c static int atmtcp_v_proc(struct atm_dev *dev,loff_t *pos,char *page)
page              240 drivers/atm/atmtcp.c 	if (!dev_data->persist) return sprintf(page,"ephemeral\n");
page              241 drivers/atm/atmtcp.c 	return sprintf(page,"persistent, %sconnected\n",
page             2100 drivers/atm/eni.c static int eni_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
page             2110 drivers/atm/eni.c 		return sprintf(page,DEV_LABEL "(itf %d) signal %s, %dkB, "
page             2114 drivers/atm/eni.c 		return sprintf(page,"%4sBursts: TX"
page             2157 drivers/atm/eni.c 		return sprintf(page,"%4sBuffer multipliers: tx %d%%, rx %d%%\n",
page             2164 drivers/atm/eni.c 			return sprintf(page, "tx[%d]:    0x%lx-0x%lx "
page             2172 drivers/atm/eni.c 		return sprintf(page,"%10sbacklog %u packets\n","",
page             2188 drivers/atm/eni.c 			length = sprintf(page,"vcc %4d: ",vcc->vci);
page             2190 drivers/atm/eni.c 				length += sprintf(page+length, "0x%lx-0x%lx "
page             2195 drivers/atm/eni.c 				if (eni_vcc->tx) length += sprintf(page+length,", ");
page             2198 drivers/atm/eni.c 				length += sprintf(page+length,"tx[%d], txing %d bytes",
page             2200 drivers/atm/eni.c 			page[length] = '\n';
page             2212 drivers/atm/eni.c 		return sprintf(page,"free      %p-%p (%6d bytes)\n",
page              605 drivers/atm/fore200e.c fore200e_pca_proc_read(struct fore200e* fore200e, char *page)
page              609 drivers/atm/fore200e.c     return sprintf(page, "   PCI bus/slot/function:\t%d/%d/%d\n",
page              738 drivers/atm/fore200e.c static int fore200e_sba_proc_read(struct fore200e *fore200e, char *page)
page              745 drivers/atm/fore200e.c 	return sprintf(page, "   SBUS slot/device:\t\t%d/'%pOFn'\n",
page             2734 drivers/atm/fore200e.c fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
page             2747 drivers/atm/fore200e.c 	len = sprintf(page,"\n"
page             2753 drivers/atm/fore200e.c 	    len += fore200e->bus->proc_read(fore200e, page + len);
page             2755 drivers/atm/fore200e.c 	len += sprintf(page + len,
page             2771 drivers/atm/fore200e.c 	return sprintf(page,
page             2784 drivers/atm/fore200e.c 	len = sprintf(page,"\n\n"
page             2789 drivers/atm/fore200e.c 	    len += sprintf(page + len, "0x%08x\n", hb);
page             2791 drivers/atm/fore200e.c 	    len += sprintf(page + len, "*** FATAL ERROR %04x ***\n", hb & 0xFFFF);
page             2832 drivers/atm/fore200e.c 	return sprintf(page,
page             2848 drivers/atm/fore200e.c 	return sprintf(page,
page             2858 drivers/atm/fore200e.c 	return sprintf(page,
page             2868 drivers/atm/fore200e.c 	return sprintf(page, "\n"
page             2886 drivers/atm/fore200e.c 	return sprintf(page,"\n"
page             2902 drivers/atm/fore200e.c 	return sprintf(page,"\n"
page             2912 drivers/atm/fore200e.c 	return sprintf(page,"\n"
page             2936 drivers/atm/fore200e.c 	return sprintf(page,"\n"
page             2960 drivers/atm/fore200e.c 	return sprintf(page,"\n"
page             2976 drivers/atm/fore200e.c 	return sprintf(page,"\n"
page             2981 drivers/atm/fore200e.c         return sprintf(page,"\n"
page             3000 drivers/atm/fore200e.c 	    len = sprintf(page,
page              104 drivers/atm/he.c static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
page             2703 drivers/atm/he.c he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
page             2718 drivers/atm/he.c 		return sprintf(page, "ATM he driver\n");
page             2721 drivers/atm/he.c 		return sprintf(page, "%s%s\n\n",
page             2725 drivers/atm/he.c 		return sprintf(page, "Mismatched Cells  VPI/VCI Not Open  Dropped Cells  RCM Dropped Cells\n");
page             2735 drivers/atm/he.c 		return sprintf(page, "%16ld  %16ld  %13ld  %17ld\n\n", 
page             2739 drivers/atm/he.c 		return sprintf(page, "irq_size = %d  inuse = ?  peak = %d\n",
page             2743 drivers/atm/he.c 		return sprintf(page, "tpdrq_size = %d  inuse = ?\n",
page             2747 drivers/atm/he.c 		return sprintf(page, "rbrq_size = %d  inuse = ?  peak = %d\n",
page             2751 drivers/atm/he.c 		return sprintf(page, "tbrq_size = %d  peak = %d\n",
page             2765 drivers/atm/he.c 		return sprintf(page, "rbpl_size = %d  inuse = %d\n\n",
page             2770 drivers/atm/he.c 		return sprintf(page, "rate controller periods (cbr)\n                 pcr  #vc\n");
page             2774 drivers/atm/he.c 			return sprintf(page, "cs_stper%-2d  %8ld  %3d\n", i,
page             2779 drivers/atm/he.c 		return sprintf(page, "total bw (cbr): %d  (limit %d)\n",
page             2598 drivers/atm/horizon.c static int hrz_proc_read (struct atm_dev * atm_dev, loff_t * pos, char * page) {
page             2607 drivers/atm/horizon.c     unsigned int count = sprintf (page, "vbr buckets:");
page             2610 drivers/atm/horizon.c       count += sprintf (page, " %u/%u",
page             2613 drivers/atm/horizon.c     count += sprintf (page+count, ".\n");
page             2619 drivers/atm/horizon.c     return sprintf (page,
page             2625 drivers/atm/horizon.c     return sprintf (page,
page             2632 drivers/atm/horizon.c     return sprintf (page,
page              133 drivers/atm/idt77252.c 			      char *page);
page             2629 drivers/atm/idt77252.c idt77252_proc_read(struct atm_dev *dev, loff_t * pos, char *page)
page             2636 drivers/atm/idt77252.c 		return sprintf(page, "IDT77252 Interrupts:\n");
page             2638 drivers/atm/idt77252.c 		return sprintf(page, "TSIF:  %lu\n", card->irqstat[15]);
page             2640 drivers/atm/idt77252.c 		return sprintf(page, "TXICP: %lu\n", card->irqstat[14]);
page             2642 drivers/atm/idt77252.c 		return sprintf(page, "TSQF:  %lu\n", card->irqstat[12]);
page             2644 drivers/atm/idt77252.c 		return sprintf(page, "TMROF: %lu\n", card->irqstat[11]);
page             2646 drivers/atm/idt77252.c 		return sprintf(page, "PHYI:  %lu\n", card->irqstat[10]);
page             2648 drivers/atm/idt77252.c 		return sprintf(page, "FBQ3A: %lu\n", card->irqstat[8]);
page             2650 drivers/atm/idt77252.c 		return sprintf(page, "FBQ2A: %lu\n", card->irqstat[7]);
page             2652 drivers/atm/idt77252.c 		return sprintf(page, "RSQF:  %lu\n", card->irqstat[6]);
page             2654 drivers/atm/idt77252.c 		return sprintf(page, "EPDU:  %lu\n", card->irqstat[5]);
page             2656 drivers/atm/idt77252.c 		return sprintf(page, "RAWCF: %lu\n", card->irqstat[4]);
page             2658 drivers/atm/idt77252.c 		return sprintf(page, "FBQ1A: %lu\n", card->irqstat[3]);
page             2660 drivers/atm/idt77252.c 		return sprintf(page, "FBQ0A: %lu\n", card->irqstat[2]);
page             2662 drivers/atm/idt77252.c 		return sprintf(page, "RSQAF: %lu\n", card->irqstat[1]);
page             2664 drivers/atm/idt77252.c 		return sprintf(page, "IDT77252 Transmit Connection Table:\n");
page             2684 drivers/atm/idt77252.c 		p = page;
page             2691 drivers/atm/idt77252.c 		return p - page;
page             3111 drivers/atm/iphase.c static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
page             3118 drivers/atm/iphase.c        n = sprintf(page, "  Board Type         :  Iphase5525-1KVC-128K\n");
page             3122 drivers/atm/iphase.c         n = sprintf(page, "  Board Type         :  Iphase-ATM-DS3");
page             3124 drivers/atm/iphase.c         n = sprintf(page, "  Board Type         :  Iphase-ATM-E3");
page             3126 drivers/atm/iphase.c          n = sprintf(page, "  Board Type         :  Iphase-ATM-UTP155"); 
page             3128 drivers/atm/iphase.c         n = sprintf(page, "  Board Type         :  Iphase-ATM-OC3");
page             3129 drivers/atm/iphase.c      tmpPtr = page + n;
page             3134 drivers/atm/iphase.c      tmpPtr = page + n; 
page             3144 drivers/atm/iphase.c      return  sprintf(page, "  Number of Tx Buffer:  %u\n"
page             2440 drivers/atm/lanai.c static int lanai_proc_read(struct atm_dev *atmdev, loff_t *pos, char *page)
page             2446 drivers/atm/lanai.c 		return sprintf(page, DEV_LABEL "(itf %d): chip=LANAI%s, "
page             2452 drivers/atm/lanai.c 		return sprintf(page, "revision: board=%d, pci_if=%d\n",
page             2455 drivers/atm/lanai.c 		return sprintf(page, "EEPROM ESI: %pM\n",
page             2458 drivers/atm/lanai.c 		return sprintf(page, "status: SOOL=%d, LOCD=%d, LED=%d, "
page             2464 drivers/atm/lanai.c 		return sprintf(page, "global buffer sizes: service=%zu, "
page             2469 drivers/atm/lanai.c 		return sprintf(page, "cells in error: overflow=%u, "
page             2475 drivers/atm/lanai.c 		return sprintf(page, "PCI errors: parity_detect=%u, "
page             2481 drivers/atm/lanai.c 		return sprintf(page, "            slave_target_abort=%u, "
page             2485 drivers/atm/lanai.c 		return sprintf(page, "                     no_tx=%u, "
page             2490 drivers/atm/lanai.c 		return sprintf(page, "resets: dma=%u, card=%u\n",
page             2504 drivers/atm/lanai.c 	left = sprintf(page, "VCI %4d: nref=%d, rx_nomem=%u",  (vci_t) left,
page             2507 drivers/atm/lanai.c 		left += sprintf(&page[left], ",\n          rx_AAL=%d",
page             2510 drivers/atm/lanai.c 			left += sprintf(&page[left], ", rx_buf_size=%zu, "
page             2520 drivers/atm/lanai.c 		left += sprintf(&page[left], ",\n          tx_AAL=%d, "
page             2526 drivers/atm/lanai.c 	page[left++] = '\n';
page             2527 drivers/atm/lanai.c 	page[left] = '\0';
page              144 drivers/atm/nicstar.c static int ns_proc_read(struct atm_dev *dev, loff_t * pos, char *page);
page             2395 drivers/atm/nicstar.c static int ns_proc_read(struct atm_dev *dev, loff_t * pos, char *page)
page             2405 drivers/atm/nicstar.c 		return sprintf(page, "Pool   count    min   init    max \n");
page             2407 drivers/atm/nicstar.c 		return sprintf(page, "Small  %5d  %5d  %5d  %5d \n",
page             2411 drivers/atm/nicstar.c 		return sprintf(page, "Large  %5d  %5d  %5d  %5d \n",
page             2415 drivers/atm/nicstar.c 		return sprintf(page, "Huge   %5d  %5d  %5d  %5d \n",
page             2419 drivers/atm/nicstar.c 		return sprintf(page, "Iovec  %5d  %5d  %5d  %5d \n",
page             2425 drivers/atm/nicstar.c 		    sprintf(page, "Interrupt counter: %u \n", card->intcnt);
page             2445 drivers/atm/nicstar.c 		return sprintf(page, "PHY regs: 0x%02X 0x%02X 0x%02X 0x%02X \n",
page             2454 drivers/atm/nicstar.c 			return sprintf(page, "%5d - VBR/UBR \n", left + 1);
page             2456 drivers/atm/nicstar.c 			return sprintf(page, "%5d - %d %d \n", left + 1,
page              145 drivers/auxdisplay/cfag12864b.c static void cfag12864b_page(unsigned char page)
page              149 drivers/auxdisplay/cfag12864b.c 	ks0108_page(page);
page               55 drivers/auxdisplay/cfag12864bfb.c 	struct page *pages = virt_to_page(cfag12864b_buffer);
page              226 drivers/auxdisplay/ht16k33.c 	struct page *pages = virt_to_page(priv->fbdev.buffer);
page               90 drivers/auxdisplay/ks0108.c void ks0108_page(unsigned char page)
page               92 drivers/auxdisplay/ks0108.c 	ks0108_writedata(min_t(unsigned char, page, 7) | bit(3) | bit(4) |
page               69 drivers/base/firmware_loader/firmware.h 	struct page **pages;
page              292 drivers/base/firmware_loader/main.c 		struct page **new_pages;
page              386 drivers/base/firmware_loader/main.c 	struct page *page;
page              406 drivers/base/firmware_loader/main.c 		page = fw_priv->pages[fw_priv->nr_pages - 1];
page              407 drivers/base/firmware_loader/main.c 		xz_buf.out = kmap(page);
page              411 drivers/base/firmware_loader/main.c 		kunmap(page);
page               35 drivers/block/aoe/aoeblk.c 				  struct device_attribute *attr, char *page)
page               40 drivers/block/aoe/aoeblk.c 	return snprintf(page, PAGE_SIZE,
page               48 drivers/block/aoe/aoeblk.c 				struct device_attribute *attr, char *page)
page               55 drivers/block/aoe/aoeblk.c 		return snprintf(page, PAGE_SIZE, "none\n");
page               56 drivers/block/aoe/aoeblk.c 	return snprintf(page, PAGE_SIZE, "%pm\n", t->addr);
page               59 drivers/block/aoe/aoeblk.c 				  struct device_attribute *attr, char *page)
page               88 drivers/block/aoe/aoeblk.c 		return snprintf(page, PAGE_SIZE, "none\n");
page               89 drivers/block/aoe/aoeblk.c 	for (p = page; nd < ne; nd++)
page               90 drivers/block/aoe/aoeblk.c 		p += snprintf(p, PAGE_SIZE - (p-page), "%s%s",
page               91 drivers/block/aoe/aoeblk.c 			p == page ? "" : ",", (*nd)->name);
page               92 drivers/block/aoe/aoeblk.c 	p += snprintf(p, PAGE_SIZE - (p-page), "\n");
page               93 drivers/block/aoe/aoeblk.c 	return p-page;
page               97 drivers/block/aoe/aoeblk.c 				  struct device_attribute *attr, char *page)
page              102 drivers/block/aoe/aoeblk.c 	return snprintf(page, PAGE_SIZE, "0x%04x\n", (unsigned int) d->fw_ver);
page              105 drivers/block/aoe/aoeblk.c 				    struct device_attribute *attr, char *page)
page              110 drivers/block/aoe/aoeblk.c 	return snprintf(page, PAGE_SIZE, "%lu\n", d->maxbcnt);
page               60 drivers/block/aoe/aoecmd.c static struct page *empty_page;
page               56 drivers/block/brd.c static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
page               59 drivers/block/brd.c 	struct page *page;
page               74 drivers/block/brd.c 	page = radix_tree_lookup(&brd->brd_pages, idx);
page               77 drivers/block/brd.c 	BUG_ON(page && page->index != idx);
page               79 drivers/block/brd.c 	return page;
page               87 drivers/block/brd.c static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
page               90 drivers/block/brd.c 	struct page *page;
page               93 drivers/block/brd.c 	page = brd_lookup_page(brd, sector);
page               94 drivers/block/brd.c 	if (page)
page               95 drivers/block/brd.c 		return page;
page              102 drivers/block/brd.c 	page = alloc_page(gfp_flags);
page              103 drivers/block/brd.c 	if (!page)
page              107 drivers/block/brd.c 		__free_page(page);
page              113 drivers/block/brd.c 	page->index = idx;
page              114 drivers/block/brd.c 	if (radix_tree_insert(&brd->brd_pages, idx, page)) {
page              115 drivers/block/brd.c 		__free_page(page);
page              116 drivers/block/brd.c 		page = radix_tree_lookup(&brd->brd_pages, idx);
page              117 drivers/block/brd.c 		BUG_ON(!page);
page              118 drivers/block/brd.c 		BUG_ON(page->index != idx);
page              124 drivers/block/brd.c 	return page;
page              135 drivers/block/brd.c 	struct page *pages[FREE_BATCH];
page              195 drivers/block/brd.c 	struct page *page;
page              201 drivers/block/brd.c 	page = brd_lookup_page(brd, sector);
page              202 drivers/block/brd.c 	BUG_ON(!page);
page              204 drivers/block/brd.c 	dst = kmap_atomic(page);
page              212 drivers/block/brd.c 		page = brd_lookup_page(brd, sector);
page              213 drivers/block/brd.c 		BUG_ON(!page);
page              215 drivers/block/brd.c 		dst = kmap_atomic(page);
page              227 drivers/block/brd.c 	struct page *page;
page              233 drivers/block/brd.c 	page = brd_lookup_page(brd, sector);
page              234 drivers/block/brd.c 	if (page) {
page              235 drivers/block/brd.c 		src = kmap_atomic(page);
page              245 drivers/block/brd.c 		page = brd_lookup_page(brd, sector);
page              246 drivers/block/brd.c 		if (page) {
page              247 drivers/block/brd.c 			src = kmap_atomic(page);
page              258 drivers/block/brd.c static int brd_do_bvec(struct brd_device *brd, struct page *page,
page              271 drivers/block/brd.c 	mem = kmap_atomic(page);
page              274 drivers/block/brd.c 		flush_dcache_page(page);
page              276 drivers/block/brd.c 		flush_dcache_page(page);
page              315 drivers/block/brd.c 		       struct page *page, unsigned int op)
page              320 drivers/block/brd.c 	if (PageTransHuge(page))
page              322 drivers/block/brd.c 	err = brd_do_bvec(brd, page, PAGE_SIZE, 0, op, sector);
page              323 drivers/block/brd.c 	page_endio(page, op_is_write(op), err);
page               96 drivers/block/cryptoloop.c 		    struct page *raw_page, unsigned raw_off,
page               97 drivers/block/cryptoloop.c 		    struct page *loop_page, unsigned loop_off,
page              106 drivers/block/cryptoloop.c 	struct page *in_page, *out_page;
page               96 drivers/block/drbd/drbd_actlog.c 	return page_address(device->md_io.page);
page              145 drivers/block/drbd/drbd_actlog.c 	if (bio_add_page(bio, device->md_io.page, size, 0) != size)
page               84 drivers/block/drbd/drbd_bitmap.c 	struct page **bm_pages;
page              197 drivers/block/drbd/drbd_bitmap.c static void bm_store_page_idx(struct page *page, unsigned long idx)
page              200 drivers/block/drbd/drbd_bitmap.c 	set_page_private(page, idx);
page              203 drivers/block/drbd/drbd_bitmap.c static unsigned long bm_page_to_idx(struct page *page)
page              205 drivers/block/drbd/drbd_bitmap.c 	return page_private(page) & BM_PAGE_IDX_MASK;
page              228 drivers/block/drbd/drbd_bitmap.c static void bm_set_page_unchanged(struct page *page)
page              231 drivers/block/drbd/drbd_bitmap.c 	clear_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
page              232 drivers/block/drbd/drbd_bitmap.c 	clear_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
page              235 drivers/block/drbd/drbd_bitmap.c static void bm_set_page_need_writeout(struct page *page)
page              237 drivers/block/drbd/drbd_bitmap.c 	set_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
page              257 drivers/block/drbd/drbd_bitmap.c 	struct page *page;
page              263 drivers/block/drbd/drbd_bitmap.c 	page = device->bitmap->bm_pages[page_nr];
page              265 drivers/block/drbd/drbd_bitmap.c 	if (!test_and_set_bit(BM_PAGE_HINT_WRITEOUT, &page_private(page)))
page              269 drivers/block/drbd/drbd_bitmap.c static int bm_test_page_unchanged(struct page *page)
page              271 drivers/block/drbd/drbd_bitmap.c 	volatile const unsigned long *addr = &page_private(page);
page              275 drivers/block/drbd/drbd_bitmap.c static void bm_set_page_io_err(struct page *page)
page              277 drivers/block/drbd/drbd_bitmap.c 	set_bit(BM_PAGE_IO_ERROR, &page_private(page));
page              280 drivers/block/drbd/drbd_bitmap.c static void bm_clear_page_io_err(struct page *page)
page              282 drivers/block/drbd/drbd_bitmap.c 	clear_bit(BM_PAGE_IO_ERROR, &page_private(page));
page              285 drivers/block/drbd/drbd_bitmap.c static void bm_set_page_lazy_writeout(struct page *page)
page              287 drivers/block/drbd/drbd_bitmap.c 	set_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
page              290 drivers/block/drbd/drbd_bitmap.c static int bm_test_page_lazy_writeout(struct page *page)
page              292 drivers/block/drbd/drbd_bitmap.c 	return test_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
page              314 drivers/block/drbd/drbd_bitmap.c 	struct page *page = b->bm_pages[idx];
page              315 drivers/block/drbd/drbd_bitmap.c 	return (unsigned long *) kmap_atomic(page);
page              353 drivers/block/drbd/drbd_bitmap.c static void bm_free_pages(struct page **pages, unsigned long number)
page              378 drivers/block/drbd/drbd_bitmap.c static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
page              380 drivers/block/drbd/drbd_bitmap.c 	struct page **old_pages = b->bm_pages;
page              381 drivers/block/drbd/drbd_bitmap.c 	struct page **new_pages, *page;
page              396 drivers/block/drbd/drbd_bitmap.c 	bytes = sizeof(struct page *)*want;
page              410 drivers/block/drbd/drbd_bitmap.c 			page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
page              411 drivers/block/drbd/drbd_bitmap.c 			if (!page) {
page              418 drivers/block/drbd/drbd_bitmap.c 			bm_store_page_idx(page, i);
page              419 drivers/block/drbd/drbd_bitmap.c 			new_pages[i] = page;
page              637 drivers/block/drbd/drbd_bitmap.c 	struct page **npages, **opages = NULL;
page              984 drivers/block/drbd/drbd_bitmap.c 	struct page *page;
page             1005 drivers/block/drbd/drbd_bitmap.c 		page = mempool_alloc(&drbd_md_io_page_pool,
page             1007 drivers/block/drbd/drbd_bitmap.c 		copy_highpage(page, b->bm_pages[page_nr]);
page             1008 drivers/block/drbd/drbd_bitmap.c 		bm_store_page_idx(page, page_nr);
page             1010 drivers/block/drbd/drbd_bitmap.c 		page = b->bm_pages[page_nr];
page             1015 drivers/block/drbd/drbd_bitmap.c 	bio_add_page(bio, page, len, 0);
page              398 drivers/block/drbd/drbd_int.h 	struct page *pages;
page              602 drivers/block/drbd/drbd_int.h 	struct page *page;
page             1410 drivers/block/drbd/drbd_int.h extern struct page *drbd_pp_pool;
page             1568 drivers/block/drbd/drbd_int.h extern struct page *drbd_alloc_pages(struct drbd_peer_device *, unsigned int, bool);
page             1704 drivers/block/drbd/drbd_int.h static inline struct page *page_chain_next(struct page *page)
page             1706 drivers/block/drbd/drbd_int.h 	return (struct page *)page_private(page);
page             1708 drivers/block/drbd/drbd_int.h #define page_chain_for_each(page) \
page             1709 drivers/block/drbd/drbd_int.h 	for (; page && ({ prefetch(page_chain_next(page)); 1; }); \
page             1710 drivers/block/drbd/drbd_int.h 			page = page_chain_next(page))
page             1711 drivers/block/drbd/drbd_int.h #define page_chain_for_each_safe(page, n) \
page             1712 drivers/block/drbd/drbd_int.h 	for (; page && ({ n = page_chain_next(page); 1; }); page = n)
page             1717 drivers/block/drbd/drbd_int.h 	struct page *page = peer_req->pages;
page             1718 drivers/block/drbd/drbd_int.h 	page_chain_for_each(page) {
page             1719 drivers/block/drbd/drbd_int.h 		if (page_count(page) > 1)
page              127 drivers/block/drbd/drbd_main.c struct page *drbd_pp_pool;
page             1526 drivers/block/drbd/drbd_main.c static int _drbd_no_send_page(struct drbd_peer_device *peer_device, struct page *page,
page             1534 drivers/block/drbd/drbd_main.c 	addr = kmap(page) + offset;
page             1536 drivers/block/drbd/drbd_main.c 	kunmap(page);
page             1542 drivers/block/drbd/drbd_main.c static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *page,
page             1555 drivers/block/drbd/drbd_main.c 	if (drbd_disable_sendpage || (page_count(page) < 1) || PageSlab(page))
page             1556 drivers/block/drbd/drbd_main.c 		return _drbd_no_send_page(peer_device, page, offset, size, msg_flags);
page             1563 drivers/block/drbd/drbd_main.c 		sent = socket->ops->sendpage(socket, page, offset, len, msg_flags);
page             1634 drivers/block/drbd/drbd_main.c 	struct page *page = peer_req->pages;
page             1639 drivers/block/drbd/drbd_main.c 	page_chain_for_each(page) {
page             1642 drivers/block/drbd/drbd_main.c 		err = _drbd_send_page(peer_device, page, 0, l,
page             1643 drivers/block/drbd/drbd_main.c 				      page_chain_next(page) ? MSG_MORE : 0);
page             2099 drivers/block/drbd/drbd_main.c 	struct page *page;
page             2102 drivers/block/drbd/drbd_main.c 		page = drbd_pp_pool;
page             2103 drivers/block/drbd/drbd_main.c 		drbd_pp_pool = (struct page *)page_private(page);
page             2104 drivers/block/drbd/drbd_main.c 		__free_page(page);
page             2130 drivers/block/drbd/drbd_main.c 	struct page *page;
page             2182 drivers/block/drbd/drbd_main.c 		page = alloc_page(GFP_HIGHUSER);
page             2183 drivers/block/drbd/drbd_main.c 		if (!page)
page             2185 drivers/block/drbd/drbd_main.c 		set_page_private(page, (unsigned long)drbd_pp_pool);
page             2186 drivers/block/drbd/drbd_main.c 		drbd_pp_pool = page;
page             2254 drivers/block/drbd/drbd_main.c 	__free_page(device->md_io.page);
page             2837 drivers/block/drbd/drbd_main.c 	device->md_io.page = alloc_page(GFP_KERNEL);
page             2838 drivers/block/drbd/drbd_main.c 	if (!device->md_io.page)
page             2922 drivers/block/drbd/drbd_main.c 	__free_page(device->md_io.page);
page               75 drivers/block/drbd/drbd_receiver.c static struct page *page_chain_del(struct page **head, int n)
page               77 drivers/block/drbd/drbd_receiver.c 	struct page *page;
page               78 drivers/block/drbd/drbd_receiver.c 	struct page *tmp;
page               83 drivers/block/drbd/drbd_receiver.c 	page = *head;
page               85 drivers/block/drbd/drbd_receiver.c 	if (!page)
page               88 drivers/block/drbd/drbd_receiver.c 	while (page) {
page               89 drivers/block/drbd/drbd_receiver.c 		tmp = page_chain_next(page);
page               95 drivers/block/drbd/drbd_receiver.c 		page = tmp;
page               99 drivers/block/drbd/drbd_receiver.c 	set_page_private(page, 0);
page              101 drivers/block/drbd/drbd_receiver.c 	page = *head;
page              103 drivers/block/drbd/drbd_receiver.c 	return page;
page              109 drivers/block/drbd/drbd_receiver.c static struct page *page_chain_tail(struct page *page, int *len)
page              111 drivers/block/drbd/drbd_receiver.c 	struct page *tmp;
page              113 drivers/block/drbd/drbd_receiver.c 	while ((tmp = page_chain_next(page)))
page              114 drivers/block/drbd/drbd_receiver.c 		++i, page = tmp;
page              117 drivers/block/drbd/drbd_receiver.c 	return page;
page              120 drivers/block/drbd/drbd_receiver.c static int page_chain_free(struct page *page)
page              122 drivers/block/drbd/drbd_receiver.c 	struct page *tmp;
page              124 drivers/block/drbd/drbd_receiver.c 	page_chain_for_each_safe(page, tmp) {
page              125 drivers/block/drbd/drbd_receiver.c 		put_page(page);
page              131 drivers/block/drbd/drbd_receiver.c static void page_chain_add(struct page **head,
page              132 drivers/block/drbd/drbd_receiver.c 		struct page *chain_first, struct page *chain_last)
page              135 drivers/block/drbd/drbd_receiver.c 	struct page *tmp;
page              145 drivers/block/drbd/drbd_receiver.c static struct page *__drbd_alloc_pages(struct drbd_device *device,
page              148 drivers/block/drbd/drbd_receiver.c 	struct page *page = NULL;
page              149 drivers/block/drbd/drbd_receiver.c 	struct page *tmp = NULL;
page              156 drivers/block/drbd/drbd_receiver.c 		page = page_chain_del(&drbd_pp_pool, number);
page              157 drivers/block/drbd/drbd_receiver.c 		if (page)
page              160 drivers/block/drbd/drbd_receiver.c 		if (page)
page              161 drivers/block/drbd/drbd_receiver.c 			return page;
page              171 drivers/block/drbd/drbd_receiver.c 		set_page_private(tmp, (unsigned long)page);
page              172 drivers/block/drbd/drbd_receiver.c 		page = tmp;
page              176 drivers/block/drbd/drbd_receiver.c 		return page;
page              181 drivers/block/drbd/drbd_receiver.c 	if (page) {
page              182 drivers/block/drbd/drbd_receiver.c 		tmp = page_chain_tail(page, NULL);
page              184 drivers/block/drbd/drbd_receiver.c 		page_chain_add(&drbd_pp_pool, page, tmp);
page              260 drivers/block/drbd/drbd_receiver.c struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int number,
page              264 drivers/block/drbd/drbd_receiver.c 	struct page *page = NULL;
page              275 drivers/block/drbd/drbd_receiver.c 		page = __drbd_alloc_pages(device, number);
page              279 drivers/block/drbd/drbd_receiver.c 	if (page && atomic_read(&device->pp_in_use_by_net) > 512)
page              282 drivers/block/drbd/drbd_receiver.c 	while (page == NULL) {
page              288 drivers/block/drbd/drbd_receiver.c 			page = __drbd_alloc_pages(device, number);
page              289 drivers/block/drbd/drbd_receiver.c 			if (page)
page              306 drivers/block/drbd/drbd_receiver.c 	if (page)
page              308 drivers/block/drbd/drbd_receiver.c 	return page;
page              315 drivers/block/drbd/drbd_receiver.c static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net)
page              320 drivers/block/drbd/drbd_receiver.c 	if (page == NULL)
page              324 drivers/block/drbd/drbd_receiver.c 		i = page_chain_free(page);
page              326 drivers/block/drbd/drbd_receiver.c 		struct page *tmp;
page              327 drivers/block/drbd/drbd_receiver.c 		tmp = page_chain_tail(page, &i);
page              329 drivers/block/drbd/drbd_receiver.c 		page_chain_add(&drbd_pp_pool, page, tmp);
page              363 drivers/block/drbd/drbd_receiver.c 	struct page *page = NULL;
page              377 drivers/block/drbd/drbd_receiver.c 		page = drbd_alloc_pages(peer_device, nr_pages,
page              379 drivers/block/drbd/drbd_receiver.c 		if (!page)
page              390 drivers/block/drbd/drbd_receiver.c 	peer_req->pages = page;
page             1643 drivers/block/drbd/drbd_receiver.c 	struct page *page = peer_req->pages;
page             1705 drivers/block/drbd/drbd_receiver.c 	page_chain_for_each(page) {
page             1707 drivers/block/drbd/drbd_receiver.c 		if (!bio_add_page(bio, page, len, 0))
page             1714 drivers/block/drbd/drbd_receiver.c 	D_ASSERT(device, page == NULL);
page             1864 drivers/block/drbd/drbd_receiver.c 	struct page *page;
page             1950 drivers/block/drbd/drbd_receiver.c 	page = peer_req->pages;
page             1951 drivers/block/drbd/drbd_receiver.c 	page_chain_for_each(page) {
page             1953 drivers/block/drbd/drbd_receiver.c 		data = kmap(page);
page             1959 drivers/block/drbd/drbd_receiver.c 		kunmap(page);
page             1985 drivers/block/drbd/drbd_receiver.c 	struct page *page;
page             1992 drivers/block/drbd/drbd_receiver.c 	page = drbd_alloc_pages(peer_device, 1, 1);
page             1994 drivers/block/drbd/drbd_receiver.c 	data = kmap(page);
page             2003 drivers/block/drbd/drbd_receiver.c 	kunmap(page);
page             2004 drivers/block/drbd/drbd_receiver.c 	drbd_free_pages(peer_device->device, page, 0);
page              289 drivers/block/drbd/drbd_worker.c 	struct page *page = peer_req->pages;
page              290 drivers/block/drbd/drbd_worker.c 	struct page *tmp;
page              298 drivers/block/drbd/drbd_worker.c 	src = kmap_atomic(page);
page              299 drivers/block/drbd/drbd_worker.c 	while ((tmp = page_chain_next(page))) {
page              303 drivers/block/drbd/drbd_worker.c 		page = tmp;
page              304 drivers/block/drbd/drbd_worker.c 		src = kmap_atomic(page);
page             1087 drivers/block/drbd/drbd_worker.c 	struct page *page = peer_req->pages;
page             1090 drivers/block/drbd/drbd_worker.c 	page_chain_for_each(page) {
page             1095 drivers/block/drbd/drbd_worker.c 		d = kmap_atomic(page);
page             4150 drivers/block/floppy.c 	struct page *page;
page             4154 drivers/block/floppy.c 	page = alloc_page(GFP_NOIO);
page             4155 drivers/block/floppy.c 	if (!page) {
page             4168 drivers/block/floppy.c 	bio_add_page(&bio, page, size, 0);
page             4183 drivers/block/floppy.c 	__free_page(page);
page               93 drivers/block/loop.c 			struct page *raw_page, unsigned raw_off,
page               94 drivers/block/loop.c 			struct page *loop_page, unsigned loop_off,
page              251 drivers/block/loop.c 	       struct page *rpage, unsigned roffs,
page              252 drivers/block/loop.c 	       struct page *lpage, unsigned loffs,
page              316 drivers/block/loop.c 	struct page *page;
page              319 drivers/block/loop.c 	page = alloc_page(GFP_NOIO);
page              320 drivers/block/loop.c 	if (unlikely(!page))
page              324 drivers/block/loop.c 		ret = lo_do_transfer(lo, WRITE, page, 0, bvec.bv_page,
page              329 drivers/block/loop.c 		b.bv_page = page;
page              337 drivers/block/loop.c 	__free_page(page);
page              376 drivers/block/loop.c 	struct page *page;
page              380 drivers/block/loop.c 	page = alloc_page(GFP_NOIO);
page              381 drivers/block/loop.c 	if (unlikely(!page))
page              387 drivers/block/loop.c 		b.bv_page = page;
page              398 drivers/block/loop.c 		ret = lo_do_transfer(lo, READ, page, 0, bvec.bv_page,
page              416 drivers/block/loop.c 	__free_page(page);
page              754 drivers/block/loop.c static ssize_t loop_attr_show(struct device *dev, char *page,
page              760 drivers/block/loop.c 	return callback(lo, page);
page               36 drivers/block/loop.h 				    struct page *raw_page, unsigned raw_off,
page               37 drivers/block/loop.h 				    struct page *loop_page, unsigned loop_off,
page               81 drivers/block/loop.h 			struct page *raw_page, unsigned raw_off,
page               82 drivers/block/loop.h 			struct page *loop_page, unsigned loop_off,
page              485 drivers/block/mtip32xx/mtip32xx.c static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer,
page             1255 drivers/block/mtip32xx/mtip32xx.c static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer,
page             1266 drivers/block/mtip32xx/mtip32xx.c 	fis.lba_low	= page;
page               62 drivers/block/null_blk_main.c 	struct page *page;
page              207 drivers/block/null_blk_main.c static inline ssize_t nullb_device_uint_attr_show(unsigned int val, char *page)
page              209 drivers/block/null_blk_main.c 	return snprintf(page, PAGE_SIZE, "%u\n", val);
page              213 drivers/block/null_blk_main.c 	char *page)
page              215 drivers/block/null_blk_main.c 	return snprintf(page, PAGE_SIZE, "%lu\n", val);
page              218 drivers/block/null_blk_main.c static inline ssize_t nullb_device_bool_attr_show(bool val, char *page)
page              220 drivers/block/null_blk_main.c 	return snprintf(page, PAGE_SIZE, "%u\n", val);
page              224 drivers/block/null_blk_main.c 	const char *page, size_t count)
page              229 drivers/block/null_blk_main.c 	result = kstrtouint(page, 0, &tmp);
page              238 drivers/block/null_blk_main.c 	const char *page, size_t count)
page              243 drivers/block/null_blk_main.c 	result = kstrtoul(page, 0, &tmp);
page              251 drivers/block/null_blk_main.c static ssize_t nullb_device_bool_attr_store(bool *val, const char *page,
page              257 drivers/block/null_blk_main.c 	result = kstrtobool(page,  &tmp);
page              268 drivers/block/null_blk_main.c nullb_device_##NAME##_show(struct config_item *item, char *page)		\
page              271 drivers/block/null_blk_main.c 				to_nullb_device(item)->NAME, page);		\
page              274 drivers/block/null_blk_main.c nullb_device_##NAME##_store(struct config_item *item, const char *page,		\
page              280 drivers/block/null_blk_main.c 			&to_nullb_device(item)->NAME, page, count);		\
page              303 drivers/block/null_blk_main.c static ssize_t nullb_device_power_show(struct config_item *item, char *page)
page              305 drivers/block/null_blk_main.c 	return nullb_device_bool_attr_show(to_nullb_device(item)->power, page);
page              309 drivers/block/null_blk_main.c 				     const char *page, size_t count)
page              315 drivers/block/null_blk_main.c 	ret = nullb_device_bool_attr_store(&newp, page, count);
page              344 drivers/block/null_blk_main.c static ssize_t nullb_device_badblocks_show(struct config_item *item, char *page)
page              348 drivers/block/null_blk_main.c 	return badblocks_show(&t_dev->badblocks, page, 0);
page              352 drivers/block/null_blk_main.c 				     const char *page, size_t count)
page              359 drivers/block/null_blk_main.c 	orig = kstrndup(page, count, GFP_KERNEL);
page              468 drivers/block/null_blk_main.c static ssize_t memb_group_features_show(struct config_item *item, char *page)
page              470 drivers/block/null_blk_main.c 	return snprintf(page, PAGE_SIZE, "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size\n");
page              661 drivers/block/null_blk_main.c 	t_page->page = alloc_pages(gfp_flags, 0);
page              662 drivers/block/null_blk_main.c 	if (!t_page->page)
page              678 drivers/block/null_blk_main.c 	__free_page(t_page->page);
page              682 drivers/block/null_blk_main.c static bool null_page_empty(struct nullb_page *page)
page              686 drivers/block/null_blk_main.c 	return find_first_bit(page->bitmap, size) == size;
page              725 drivers/block/null_blk_main.c 		WARN_ON(!t_page || t_page->page->index != idx);
page              748 drivers/block/null_blk_main.c 			pos = t_pages[i]->page->index;
page              774 drivers/block/null_blk_main.c 	WARN_ON(t_page && t_page->page->index != idx);
page              785 drivers/block/null_blk_main.c 	struct nullb_page *page = NULL;
page              788 drivers/block/null_blk_main.c 		page = __null_lookup_page(nullb, sector, for_write, true);
page              789 drivers/block/null_blk_main.c 	if (page)
page              790 drivers/block/null_blk_main.c 		return page;
page              817 drivers/block/null_blk_main.c 	t_page->page->index = idx;
page              837 drivers/block/null_blk_main.c 	idx = c_page->page->index;
page              855 drivers/block/null_blk_main.c 	src = kmap_atomic(c_page->page);
page              856 drivers/block/null_blk_main.c 	dst = kmap_atomic(t_page->page);
page              896 drivers/block/null_blk_main.c 		nullb->cache_flush_pos = c_pages[i]->page->index;
page              931 drivers/block/null_blk_main.c static int copy_to_nullb(struct nullb *nullb, struct page *source,
page              952 drivers/block/null_blk_main.c 		dst = kmap_atomic(t_page->page);
page              968 drivers/block/null_blk_main.c static int copy_from_nullb(struct nullb *nullb, struct page *dest,
page              988 drivers/block/null_blk_main.c 		src = kmap_atomic(t_page->page);
page             1036 drivers/block/null_blk_main.c static int null_transfer(struct nullb *nullb, struct page *page,
page             1043 drivers/block/null_blk_main.c 		err = copy_from_nullb(nullb, page, off, sector, len);
page             1044 drivers/block/null_blk_main.c 		flush_dcache_page(page);
page             1046 drivers/block/null_blk_main.c 		flush_dcache_page(page);
page             1047 drivers/block/null_blk_main.c 		err = copy_to_nullb(nullb, page, off, sector, len, is_fua);
page             1277 drivers/block/pktcdvd.c 		struct page *page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
page             1280 drivers/block/pktcdvd.c 		if (!bio_add_page(pkt->w_bio, page, CD_FRAMESIZE, offset))
page             1963 drivers/block/rbd.c 	struct page **pages;
page             2157 drivers/block/rbd.c 	struct page **pages;
page             2317 drivers/block/rbd.c 	struct page **pages;
page             3841 drivers/block/rbd.c 				struct page ***preply_pages,
page             3866 drivers/block/rbd.c 	struct page **reply_pages;
page             3891 drivers/block/rbd.c 	struct page **reply_pages;
page             4751 drivers/block/rbd.c 	struct page *req_page = NULL;
page             4752 drivers/block/rbd.c 	struct page *reply_page;
page             4936 drivers/block/rbd.c 	struct page **pages;
page             5831 drivers/block/rbd.c 			     struct page *req_page,
page             5832 drivers/block/rbd.c 			     struct page *reply_page,
page             5874 drivers/block/rbd.c 				    struct page *req_page,
page             5875 drivers/block/rbd.c 				    struct page *reply_page,
page             5911 drivers/block/rbd.c 	struct page *req_page, *reply_page;
page               23 drivers/block/rsxx/dma.c 	struct page		 *page;
page              441 drivers/block/rsxx/dma.c 			dma->dma_addr = dma_map_page(&ctrl->card->dev->dev, dma->page,
page              617 drivers/block/rsxx/dma.c 	dma->page         = NULL;
page              635 drivers/block/rsxx/dma.c 			      struct page *page,
page              650 drivers/block/rsxx/dma.c 	dma->page         = page;
page              658 drivers/block/rsxx/dma.c 		dma->sub_page.cnt, dma->page, dma->pg_off);
page              256 drivers/block/umem.c 	struct mm_page *page;
page              260 drivers/block/umem.c 	page = &card->mm_pages[card->Active];
page              262 drivers/block/umem.c 		card->Active, page->headcnt, page->cnt - 1);
page              263 drivers/block/umem.c 	desc = &page->desc[page->cnt-1];
page              273 drivers/block/umem.c 	desc = &page->desc[page->headcnt];
page              286 drivers/block/umem.c 	offset = ((char *)desc) - ((char *)page->desc);
page              287 drivers/block/umem.c 	writel(cpu_to_le32((page->page_dma+offset) & 0xffffffff),
page              291 drivers/block/umem.c 	writel(cpu_to_le32(((u64)page->page_dma)>>32),
page              323 drivers/block/umem.c static inline void reset_page(struct mm_page *page)
page              325 drivers/block/umem.c 	page->cnt = 0;
page              326 drivers/block/umem.c 	page->headcnt = 0;
page              327 drivers/block/umem.c 	page->bio = NULL;
page              328 drivers/block/umem.c 	page->biotail = &page->bio;
page              416 drivers/block/umem.c 	struct mm_page *page;
page              424 drivers/block/umem.c 	page = &card->mm_pages[card->Active];
page              426 drivers/block/umem.c 	while (page->headcnt < page->cnt) {
page              427 drivers/block/umem.c 		struct bio *bio = page->bio;
page              428 drivers/block/umem.c 		struct mm_dma_desc *desc = &page->desc[page->headcnt];
page              438 drivers/block/umem.c 		page->headcnt++;
page              439 drivers/block/umem.c 		vec = bio_iter_iovec(bio, page->iter);
page              440 drivers/block/umem.c 		bio_advance_iter(bio, &page->iter, vec.bv_len);
page              442 drivers/block/umem.c 		if (!page->iter.bi_size) {
page              443 drivers/block/umem.c 			page->bio = bio->bi_next;
page              444 drivers/block/umem.c 			if (page->bio)
page              445 drivers/block/umem.c 				page->iter = page->bio->bi_iter;
page              470 drivers/block/umem.c 		if (bio != page->bio) {
page              486 drivers/block/umem.c 	if (page->headcnt >= page->cnt) {
page              487 drivers/block/umem.c 		reset_page(page);
page              145 drivers/block/xen-blkback/blkback.c static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page)
page              153 drivers/block/xen-blkback/blkback.c 		return gnttab_alloc_pages(1, page);
page              156 drivers/block/xen-blkback/blkback.c 	page[0] = list_first_entry(&ring->free_pages, struct page, lru);
page              157 drivers/block/xen-blkback/blkback.c 	list_del(&page[0]->lru);
page              164 drivers/block/xen-blkback/blkback.c static inline void put_free_pages(struct xen_blkif_ring *ring, struct page **page,
page              172 drivers/block/xen-blkback/blkback.c 		list_add(&page[i]->lru, &ring->free_pages);
page              180 drivers/block/xen-blkback/blkback.c 	struct page *page[NUM_BATCH_FREE_PAGES];
page              187 drivers/block/xen-blkback/blkback.c 		page[num_pages] = list_first_entry(&ring->free_pages,
page              188 drivers/block/xen-blkback/blkback.c 		                                   struct page, lru);
page              189 drivers/block/xen-blkback/blkback.c 		list_del(&page[num_pages]->lru);
page              193 drivers/block/xen-blkback/blkback.c 			gnttab_free_pages(num_pages, page);
page              200 drivers/block/xen-blkback/blkback.c 		gnttab_free_pages(num_pages, page);
page              203 drivers/block/xen-blkback/blkback.c #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
page              308 drivers/block/xen-blkback/blkback.c 	struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
page              323 drivers/block/xen-blkback/blkback.c 				persistent_gnt->page)),
page              327 drivers/block/xen-blkback/blkback.c 		pages[segs_to_unmap] = persistent_gnt->page;
page              349 drivers/block/xen-blkback/blkback.c 	struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
page              366 drivers/block/xen-blkback/blkback.c 			vaddr(persistent_gnt->page),
page              370 drivers/block/xen-blkback/blkback.c 		pages[segs_to_unmap] = persistent_gnt->page;
page              699 drivers/block/xen-blkback/blkback.c 	struct page **unmap_pages)
page              710 drivers/block/xen-blkback/blkback.c 		unmap_pages[invcount] = pages[i]->page;
page              711 drivers/block/xen-blkback/blkback.c 		gnttab_set_unmap_op(&unmap_ops[invcount], vaddr(pages[i]->page),
page              785 drivers/block/xen-blkback/blkback.c 	struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
page              809 drivers/block/xen-blkback/blkback.c 	struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST];
page              841 drivers/block/xen-blkback/blkback.c 			pages[i]->page = persistent_gnt->page;
page              844 drivers/block/xen-blkback/blkback.c 			if (get_free_page(ring, &pages[i]->page))
page              846 drivers/block/xen-blkback/blkback.c 			addr = vaddr(pages[i]->page);
page              847 drivers/block/xen-blkback/blkback.c 			pages_to_gnt[segs_to_map] = pages[i]->page;
page              877 drivers/block/xen-blkback/blkback.c 				put_free_pages(ring, &pages[seg_idx]->page, 1);
page              904 drivers/block/xen-blkback/blkback.c 			persistent_gnt->page = pages[seg_idx]->page;
page              983 drivers/block/xen-blkback/blkback.c 			segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
page             1365 drivers/block/xen-blkback/blkback.c 				     pages[i]->page,
page              240 drivers/block/xen-blkback/common.h 	struct page *page;
page              330 drivers/block/xen-blkback/common.h 	struct page 		*page;
page              356 drivers/block/xen-blkback/common.h 	struct page                   *unmap_pages[MAX_INDIRECT_SEGMENTS];
page               87 drivers/block/xen-blkfront.c 	struct page *page;
page              292 drivers/block/xen-blkfront.c 	struct page *granted_page;
page              307 drivers/block/xen-blkfront.c 			gnt_list_entry->page = granted_page;
page              322 drivers/block/xen-blkfront.c 			__free_page(gnt_list_entry->page);
page              350 drivers/block/xen-blkfront.c 						 gnt_list_entry->page,
page              392 drivers/block/xen-blkfront.c 		struct page *indirect_page;
page              397 drivers/block/xen-blkfront.c 						 struct page, lru);
page              399 drivers/block/xen-blkfront.c 		gnt_list_entry->page = indirect_page;
page              621 drivers/block/xen-blkfront.c 		setup->segments = kmap_atomic(gnt_list_entry->page);
page              636 drivers/block/xen-blkfront.c 		shared_data = kmap_atomic(gnt_list_entry->page);
page             1254 drivers/block/xen-blkfront.c 		struct page *indirect_page, *n;
page             1274 drivers/block/xen-blkfront.c 				__free_page(persistent_gnt->page);
page             1295 drivers/block/xen-blkfront.c 				__free_page(persistent_gnt->page);
page             1309 drivers/block/xen-blkfront.c 			__free_page(persistent_gnt->page);
page             1377 drivers/block/xen-blkfront.c 	shared_data = kmap_atomic(s->grants_used[info->grant_idx]->page);
page             1528 drivers/block/xen-blkfront.c 				struct page *indirect_page;
page             1536 drivers/block/xen-blkfront.c 					indirect_page = s->indirect_grants[i]->page;
page             2229 drivers/block/xen-blkfront.c 			struct page *indirect_page = alloc_page(GFP_KERNEL);
page             2271 drivers/block/xen-blkfront.c 		struct page *indirect_page, *n;
page              211 drivers/block/zram/zram_drv.c 	unsigned long *page;
page              214 drivers/block/zram/zram_drv.c 	page = (unsigned long *)ptr;
page              215 drivers/block/zram/zram_drv.c 	val = page[0];
page              217 drivers/block/zram/zram_drv.c 	for (pos = 1; pos < PAGE_SIZE / sizeof(*page); pos++) {
page              218 drivers/block/zram/zram_drv.c 		if (val != page[pos])
page              579 drivers/block/zram/zram_drv.c 	struct page *page = bio_first_page_all(bio);
page              581 drivers/block/zram/zram_drv.c 	page_endio(page, op_is_write(bio_op(bio)),
page              628 drivers/block/zram/zram_drv.c 	struct page *page;
page              651 drivers/block/zram/zram_drv.c 	page = alloc_page(GFP_KERNEL);
page              652 drivers/block/zram/zram_drv.c 	if (!page) {
page              660 drivers/block/zram/zram_drv.c 		bvec.bv_page = page;
page              765 drivers/block/zram/zram_drv.c 	__free_page(page);
page             1212 drivers/block/zram/zram_drv.c static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
page             1226 drivers/block/zram/zram_drv.c 		bvec.bv_page = page;
page             1240 drivers/block/zram/zram_drv.c 		mem = kmap_atomic(page);
page             1251 drivers/block/zram/zram_drv.c 		dst = kmap_atomic(page);
page             1258 drivers/block/zram/zram_drv.c 		dst = kmap_atomic(page);
page             1277 drivers/block/zram/zram_drv.c 	struct page *page;
page             1279 drivers/block/zram/zram_drv.c 	page = bvec->bv_page;
page             1282 drivers/block/zram/zram_drv.c 		page = alloc_page(GFP_NOIO|__GFP_HIGHMEM);
page             1283 drivers/block/zram/zram_drv.c 		if (!page)
page             1287 drivers/block/zram/zram_drv.c 	ret = __zram_bvec_read(zram, page, index, bio, is_partial_io(bvec));
page             1293 drivers/block/zram/zram_drv.c 		void *src = kmap_atomic(page);
page             1301 drivers/block/zram/zram_drv.c 		__free_page(page);
page             1315 drivers/block/zram/zram_drv.c 	struct page *page = bvec->bv_page;
page             1319 drivers/block/zram/zram_drv.c 	mem = kmap_atomic(page);
page             1331 drivers/block/zram/zram_drv.c 	src = kmap_atomic(page);
page             1387 drivers/block/zram/zram_drv.c 		src = kmap_atomic(page);
page             1426 drivers/block/zram/zram_drv.c 	struct page *page = NULL;
page             1437 drivers/block/zram/zram_drv.c 		page = alloc_page(GFP_NOIO|__GFP_HIGHMEM);
page             1438 drivers/block/zram/zram_drv.c 		if (!page)
page             1441 drivers/block/zram/zram_drv.c 		ret = __zram_bvec_read(zram, page, index, bio, true);
page             1446 drivers/block/zram/zram_drv.c 		dst = kmap_atomic(page);
page             1451 drivers/block/zram/zram_drv.c 		vec.bv_page = page;
page             1459 drivers/block/zram/zram_drv.c 		__free_page(page);
page             1626 drivers/block/zram/zram_drv.c 		       struct page *page, unsigned int op)
page             1633 drivers/block/zram/zram_drv.c 	if (PageTransHuge(page))
page             1646 drivers/block/zram/zram_drv.c 	bv.bv_page = page;
page             1665 drivers/block/zram/zram_drv.c 		page_endio(page, op_is_write(op), 0);
page              118 drivers/char/agp/agp.h 	struct page *(*agp_alloc_page)(struct agp_bridge_data *);
page              120 drivers/char/agp/agp.h 	void (*agp_destroy_page)(struct page *, int flags);
page              136 drivers/char/agp/agp.h 	struct page *scratch_page_page;
page              201 drivers/char/agp/agp.h struct page *agp_generic_alloc_page(struct agp_bridge_data *bridge);
page              204 drivers/char/agp/agp.h void agp_generic_destroy_page(struct page *page, int flags);
page              144 drivers/char/agp/ali-agp.c static struct page *m1541_alloc_page(struct agp_bridge_data *bridge)
page              146 drivers/char/agp/ali-agp.c 	struct page *page = agp_generic_alloc_page(agp_bridge);
page              149 drivers/char/agp/ali-agp.c 	if (!page)
page              155 drivers/char/agp/ali-agp.c 			  page_to_phys(page)) | ALI_CACHE_FLUSH_EN ));
page              156 drivers/char/agp/ali-agp.c 	return page;
page              159 drivers/char/agp/ali-agp.c static void ali_destroy_page(struct page *page, int flags)
page              161 drivers/char/agp/ali-agp.c 	if (page) {
page              164 drivers/char/agp/ali-agp.c 			agp_generic_destroy_page(page, flags);
page              166 drivers/char/agp/ali-agp.c 			agp_generic_destroy_page(page, flags);
page              170 drivers/char/agp/ali-agp.c static void m1541_destroy_page(struct page *page, int flags)
page              174 drivers/char/agp/ali-agp.c 	if (page == NULL)
page              183 drivers/char/agp/ali-agp.c 					 page_to_phys(page)) | ALI_CACHE_FLUSH_EN));
page              185 drivers/char/agp/ali-agp.c 	agp_generic_destroy_page(page, flags);
page               19 drivers/char/agp/alpha-agp.c 	struct page *page;
page               30 drivers/char/agp/alpha-agp.c 	page = virt_to_page(__va(pa));
page               31 drivers/char/agp/alpha-agp.c 	get_page(page);
page               32 drivers/char/agp/alpha-agp.c 	vmf->page = page;
page              145 drivers/char/agp/backend.c 		struct page *page = bridge->driver->agp_alloc_page(bridge);
page              147 drivers/char/agp/backend.c 		if (!page) {
page              153 drivers/char/agp/backend.c 		bridge->scratch_page_page = page;
page              154 drivers/char/agp/backend.c 		bridge->scratch_page_dma = page_to_phys(page);
page              197 drivers/char/agp/backend.c 		struct page *page = bridge->scratch_page_page;
page              199 drivers/char/agp/backend.c 		bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_UNMAP);
page              200 drivers/char/agp/backend.c 		bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_FREE);
page              224 drivers/char/agp/backend.c 		struct page *page = bridge->scratch_page_page;
page              226 drivers/char/agp/backend.c 		bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_UNMAP);
page              227 drivers/char/agp/backend.c 		bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_FREE);
page               69 drivers/char/agp/efficeon-agp.c static inline unsigned long efficeon_mask_memory(struct page *page)
page               71 drivers/char/agp/efficeon-agp.c 	unsigned long addr = page_to_phys(page);
page              163 drivers/char/agp/efficeon-agp.c 		unsigned long page = efficeon_private.l1_table[index];
page              164 drivers/char/agp/efficeon-agp.c 		if (page) {
page              166 drivers/char/agp/efficeon-agp.c 			free_page(page);
page              210 drivers/char/agp/efficeon-agp.c 		unsigned long page;
page              213 drivers/char/agp/efficeon-agp.c 		page = efficeon_private.l1_table[index];
page              214 drivers/char/agp/efficeon-agp.c 		BUG_ON(page);
page              216 drivers/char/agp/efficeon-agp.c 		page = get_zeroed_page(GFP_KERNEL);
page              217 drivers/char/agp/efficeon-agp.c 		if (!page) {
page              223 drivers/char/agp/efficeon-agp.c 			clflush((char *)page+offset);
page              225 drivers/char/agp/efficeon-agp.c 		efficeon_private.l1_table[index] = page;
page              227 drivers/char/agp/efficeon-agp.c 		value = virt_to_phys((unsigned long *)page) | pati | present | index;
page              239 drivers/char/agp/efficeon-agp.c 	unsigned int *page, *last_page;
page              261 drivers/char/agp/efficeon-agp.c 		page = (unsigned int *) efficeon_private.l1_table[index >> 10];
page              263 drivers/char/agp/efficeon-agp.c 		if (!page)
page              266 drivers/char/agp/efficeon-agp.c 		page += (index & 0x3ff);
page              267 drivers/char/agp/efficeon-agp.c 		*page = insert;
page              271 drivers/char/agp/efficeon-agp.c 		    (((unsigned long)page^(unsigned long)last_page) &
page              275 drivers/char/agp/efficeon-agp.c 		last_page = page;
page              300 drivers/char/agp/efficeon-agp.c 		unsigned int *page = (unsigned int *) efficeon_private.l1_table[index >> 10];
page              302 drivers/char/agp/efficeon-agp.c 		if (!page)
page              304 drivers/char/agp/efficeon-agp.c 		page += (index & 0x3ff);
page              305 drivers/char/agp/efficeon-agp.c 		*page = 0;
page              100 drivers/char/agp/generic.c 	unsigned long alloc_size = num_agp_pages*sizeof(struct page *);
page              102 drivers/char/agp/generic.c 	if (INT_MAX/sizeof(struct page *) < num_agp_pages)
page              265 drivers/char/agp/generic.c 		struct page *page = bridge->driver->agp_alloc_page(bridge);
page              267 drivers/char/agp/generic.c 		if (page == NULL) {
page              271 drivers/char/agp/generic.c 		new->pages[i] = page;
page              858 drivers/char/agp/generic.c 	struct page *page;
page              934 drivers/char/agp/generic.c 	for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
page              935 drivers/char/agp/generic.c 		SetPageReserved(page);
page              953 drivers/char/agp/generic.c 		for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
page              954 drivers/char/agp/generic.c 			ClearPageReserved(page);
page              977 drivers/char/agp/generic.c 	struct page *page;
page             1014 drivers/char/agp/generic.c 	for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
page             1015 drivers/char/agp/generic.c 		ClearPageReserved(page);
page             1193 drivers/char/agp/generic.c 	struct page * page;
page             1197 drivers/char/agp/generic.c 		page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
page             1199 drivers/char/agp/generic.c 		if (page == NULL)
page             1203 drivers/char/agp/generic.c 		map_page_into_agp(page);
page             1205 drivers/char/agp/generic.c 		get_page(page);
page             1208 drivers/char/agp/generic.c 		mem->pages[i] = page;
page             1221 drivers/char/agp/generic.c struct page *agp_generic_alloc_page(struct agp_bridge_data *bridge)
page             1223 drivers/char/agp/generic.c 	struct page * page;
page             1225 drivers/char/agp/generic.c 	page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
page             1226 drivers/char/agp/generic.c 	if (page == NULL)
page             1229 drivers/char/agp/generic.c 	map_page_into_agp(page);
page             1231 drivers/char/agp/generic.c 	get_page(page);
page             1233 drivers/char/agp/generic.c 	return page;
page             1240 drivers/char/agp/generic.c 	struct page *page;
page             1250 drivers/char/agp/generic.c 		page = mem->pages[i];
page             1253 drivers/char/agp/generic.c 		unmap_page_from_agp(page);
page             1255 drivers/char/agp/generic.c 		put_page(page);
page             1256 drivers/char/agp/generic.c 		__free_page(page);
page             1263 drivers/char/agp/generic.c void agp_generic_destroy_page(struct page *page, int flags)
page             1265 drivers/char/agp/generic.c 	if (page == NULL)
page             1269 drivers/char/agp/generic.c 		unmap_page_from_agp(page);
page             1272 drivers/char/agp/generic.c 		put_page(page);
page             1273 drivers/char/agp/generic.c 		__free_page(page);
page               80 drivers/char/agp/i460-agp.c 		struct page *page; 		/* page pointer */
page              371 drivers/char/agp/i460-agp.c 	lp->page = alloc_pages(GFP_KERNEL, order);
page              372 drivers/char/agp/i460-agp.c 	if (!lp->page) {
page              380 drivers/char/agp/i460-agp.c 		__free_pages(lp->page, order);
page              385 drivers/char/agp/i460-agp.c 	lp->paddr = page_to_phys(lp->page);
page              396 drivers/char/agp/i460-agp.c 	__free_pages(lp->page, I460_IO_PAGE_SHIFT - PAGE_SHIFT);
page              453 drivers/char/agp/i460-agp.c 			mem->pages[i] = lp->page;
page              526 drivers/char/agp/i460-agp.c static struct page *i460_alloc_page (struct agp_bridge_data *bridge)
page              528 drivers/char/agp/i460-agp.c 	void *page;
page              531 drivers/char/agp/i460-agp.c 		page = agp_generic_alloc_page(agp_bridge);
page              535 drivers/char/agp/i460-agp.c 		page = (void *)~0UL;
page              536 drivers/char/agp/i460-agp.c 	return page;
page              539 drivers/char/agp/i460-agp.c static void i460_destroy_page (struct page *page, int flags)
page              542 drivers/char/agp/i460-agp.c 		agp_generic_destroy_page(page, flags);
page               76 drivers/char/agp/intel-gtt.c 	struct page *scratch_page;
page               98 drivers/char/agp/intel-gtt.c static int intel_gtt_map_memory(struct page **pages,
page              144 drivers/char/agp/intel-gtt.c static struct page *i8xx_alloc_pages(void)
page              146 drivers/char/agp/intel-gtt.c 	struct page *page;
page              148 drivers/char/agp/intel-gtt.c 	page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
page              149 drivers/char/agp/intel-gtt.c 	if (page == NULL)
page              152 drivers/char/agp/intel-gtt.c 	if (set_pages_uc(page, 4) < 0) {
page              153 drivers/char/agp/intel-gtt.c 		set_pages_wb(page, 4);
page              154 drivers/char/agp/intel-gtt.c 		__free_pages(page, 2);
page              158 drivers/char/agp/intel-gtt.c 	return page;
page              161 drivers/char/agp/intel-gtt.c static void i8xx_destroy_pages(struct page *page)
page              163 drivers/char/agp/intel-gtt.c 	if (page == NULL)
page              166 drivers/char/agp/intel-gtt.c 	set_pages_wb(page, 4);
page              167 drivers/char/agp/intel-gtt.c 	__free_pages(page, 2);
page              242 drivers/char/agp/intel-gtt.c 	struct page *page;
page              245 drivers/char/agp/intel-gtt.c 	case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
page              249 drivers/char/agp/intel-gtt.c 		page = i8xx_alloc_pages();
page              255 drivers/char/agp/intel-gtt.c 	if (page == NULL)
page              262 drivers/char/agp/intel-gtt.c 	new->pages[0] = page;
page              296 drivers/char/agp/intel-gtt.c 	struct page *page;
page              299 drivers/char/agp/intel-gtt.c 	page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
page              300 drivers/char/agp/intel-gtt.c 	if (page == NULL)
page              302 drivers/char/agp/intel-gtt.c 	set_pages_uc(page, 1);
page              305 drivers/char/agp/intel-gtt.c 		dma_addr = pci_map_page(intel_private.pcidev, page, 0,
page              312 drivers/char/agp/intel-gtt.c 		intel_private.scratch_page_dma = page_to_phys(page);
page              314 drivers/char/agp/intel-gtt.c 	intel_private.scratch_page = page;
page              884 drivers/char/agp/intel-gtt.c 				   struct page **pages,
page              365 drivers/char/agp/uninorth-agp.c 	struct page **pages_arr;
page              377 drivers/char/agp/uninorth-agp.c 	struct page *page;
page              407 drivers/char/agp/uninorth-agp.c 						sizeof(struct page *),
page              414 drivers/char/agp/uninorth-agp.c 	for (page = virt_to_page(table), i = 0; page <= virt_to_page(table_end);
page              415 drivers/char/agp/uninorth-agp.c 	     page++, i++) {
page              416 drivers/char/agp/uninorth-agp.c 		SetPageReserved(page);
page              417 drivers/char/agp/uninorth-agp.c 		uninorth_priv.pages_arr[i] = page;
page              453 drivers/char/agp/uninorth-agp.c 	struct page *page;
page              468 drivers/char/agp/uninorth-agp.c 	for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
page              469 drivers/char/agp/uninorth-agp.c 		ClearPageReserved(page);
page              287 drivers/char/tpm/tpm.h 	struct page *data_page;
page              362 drivers/char/virtio_console.c 		struct page *page = sg_page(&buf->sg[i]);
page              363 drivers/char/virtio_console.c 		if (!page)
page              365 drivers/char/virtio_console.c 		put_page(page);
page              876 drivers/char/virtio_console.c 		get_page(buf->page);
page              877 drivers/char/virtio_console.c 		unlock_page(buf->page);
page              880 drivers/char/virtio_console.c 		sg_set_page(&(sgl->sg[sgl->n]), buf->page, len, buf->offset);
page              883 drivers/char/virtio_console.c 		struct page *page = alloc_page(GFP_KERNEL);
page              886 drivers/char/virtio_console.c 		if (!page)
page              895 drivers/char/virtio_console.c 		src = kmap_atomic(buf->page);
page              896 drivers/char/virtio_console.c 		memcpy(page_address(page) + offset, src + buf->offset, len);
page              899 drivers/char/virtio_console.c 		sg_set_page(&(sgl->sg[sgl->n]), page, len, offset);
page              601 drivers/crypto/axis/artpec6_crypto.c 				      struct page *page, size_t offset,
page              616 drivers/crypto/axis/artpec6_crypto.c 	dma_addr = dma_map_page(dev, page, offset, size, dir);
page              636 drivers/crypto/axis/artpec6_crypto.c 	struct page *page = virt_to_page(ptr);
page              639 drivers/crypto/axis/artpec6_crypto.c 	return artpec6_crypto_dma_map_page(common, page, offset, size, dir,
page              488 drivers/crypto/ccp/psp-dev.c 	struct page *p;
page              362 drivers/crypto/chelsio/chtls/chtls.h #define TCP_PAGE(sk)   (sk->sk_frag.page)
page              477 drivers/crypto/chelsio/chtls/chtls.h int chtls_sendpage(struct sock *sk, struct page *page,
page              227 drivers/crypto/chelsio/chtls/chtls_io.c 	struct page *page;
page              257 drivers/crypto/chelsio/chtls/chtls_io.c 		page = alloc_pages(sk->sk_allocation | __GFP_COMP, 0);
page              258 drivers/crypto/chelsio/chtls/chtls_io.c 		if (!page) {
page              264 drivers/crypto/chelsio/chtls/chtls_io.c 		memcpy(page_address(page), ivs, number_of_ivs *
page              266 drivers/crypto/chelsio/chtls/chtls_io.c 		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, 0,
page              888 drivers/crypto/chelsio/chtls/chtls_io.c 					  struct page *page,
page              893 drivers/crypto/chelsio/chtls/chtls_io.c 	err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) +
page             1075 drivers/crypto/chelsio/chtls/chtls_io.c 			struct page *page = TCP_PAGE(sk);
page             1080 drivers/crypto/chelsio/chtls/chtls_io.c 			if (page)
page             1081 drivers/crypto/chelsio/chtls/chtls_io.c 				pg_size = page_size(page);
page             1083 drivers/crypto/chelsio/chtls/chtls_io.c 			    skb_can_coalesce(skb, i, page, off)) {
page             1092 drivers/crypto/chelsio/chtls/chtls_io.c 			if (page && off == pg_size) {
page             1093 drivers/crypto/chelsio/chtls/chtls_io.c 				put_page(page);
page             1094 drivers/crypto/chelsio/chtls/chtls_io.c 				TCP_PAGE(sk) = page = NULL;
page             1098 drivers/crypto/chelsio/chtls/chtls_io.c 			if (!page) {
page             1103 drivers/crypto/chelsio/chtls/chtls_io.c 					page = alloc_pages(gfp | __GFP_COMP |
page             1107 drivers/crypto/chelsio/chtls/chtls_io.c 					if (page)
page             1110 drivers/crypto/chelsio/chtls/chtls_io.c 				if (!page) {
page             1111 drivers/crypto/chelsio/chtls/chtls_io.c 					page = alloc_page(gfp);
page             1114 drivers/crypto/chelsio/chtls/chtls_io.c 				if (!page)
page             1125 drivers/crypto/chelsio/chtls/chtls_io.c 							     skb, page,
page             1129 drivers/crypto/chelsio/chtls/chtls_io.c 					TCP_PAGE(sk) = page;
page             1140 drivers/crypto/chelsio/chtls/chtls_io.c 				skb_fill_page_desc(skb, i, page, off, copy);
page             1143 drivers/crypto/chelsio/chtls/chtls_io.c 					get_page(page);
page             1144 drivers/crypto/chelsio/chtls/chtls_io.c 					TCP_PAGE(sk) = page;
page             1200 drivers/crypto/chelsio/chtls/chtls_io.c int chtls_sendpage(struct sock *sk, struct page *page,
page             1250 drivers/crypto/chelsio/chtls/chtls_io.c 		if (skb_can_coalesce(skb, i, page, offset)) {
page             1253 drivers/crypto/chelsio/chtls/chtls_io.c 			get_page(page);
page             1254 drivers/crypto/chelsio/chtls/chtls_io.c 			skb_fill_page_desc(skb, i, page, offset, copy);
page             1230 drivers/crypto/hifn_795x.c static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page,
page             1237 drivers/crypto/hifn_795x.c 	addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_TODEVICE);
page             1288 drivers/crypto/hifn_795x.c static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page,
page             1295 drivers/crypto/hifn_795x.c 	addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_FROMDEVICE);
page             1323 drivers/crypto/hifn_795x.c 	struct page *spage, *dpage;
page             1377 drivers/crypto/hifn_795x.c 		struct page *page = alloc_page(gfp_flags);
page             1380 drivers/crypto/hifn_795x.c 		if (!page)
page             1385 drivers/crypto/hifn_795x.c 		sg_set_page(s, page, PAGE_SIZE, 0);
page              913 drivers/crypto/n2_core.c 		src_paddr = (page_to_phys(walk->src.page) +
page              915 drivers/crypto/n2_core.c 		dest_paddr = (page_to_phys(walk->dst.page) +
page              151 drivers/crypto/qat/qat_common/icp_qat_uclo.h 	struct icp_qat_uclo_page *page;
page              156 drivers/crypto/qat/qat_common/icp_qat_uclo.h 	struct icp_qat_uclo_page *page;
page              194 drivers/crypto/qat/qat_common/icp_qat_uclo.h 	struct icp_qat_uclo_encap_page *page;
page               67 drivers/crypto/qat/qat_common/qat_uclo.c 	struct icp_qat_uclo_page *page = NULL;
page               85 drivers/crypto/qat/qat_common/qat_uclo.c 	ae_slice->page = kzalloc(sizeof(*ae_slice->page), GFP_KERNEL);
page               86 drivers/crypto/qat/qat_common/qat_uclo.c 	if (!ae_slice->page)
page               88 drivers/crypto/qat/qat_common/qat_uclo.c 	page = ae_slice->page;
page               89 drivers/crypto/qat/qat_common/qat_uclo.c 	page->encap_page = encap_image->page;
page               90 drivers/crypto/qat/qat_common/qat_uclo.c 	ae_slice->page->region = ae_slice->region;
page              111 drivers/crypto/qat/qat_common/qat_uclo.c 		kfree(ae_data->ae_slices[i].page);
page              112 drivers/crypto/qat/qat_common/qat_uclo.c 		ae_data->ae_slices[i].page = NULL;
page              408 drivers/crypto/qat/qat_common/qat_uclo.c 	struct icp_qat_uclo_encap_page *page;
page              424 drivers/crypto/qat/qat_common/qat_uclo.c 	page = image->page;
page              430 drivers/crypto/qat/qat_common/qat_uclo.c 		patt_pos = page->beg_addr_p + page->micro_words_num;
page              433 drivers/crypto/qat/qat_common/qat_uclo.c 				  page->beg_addr_p, &fill_data[0]);
page              436 drivers/crypto/qat/qat_common/qat_uclo.c 				  &fill_data[page->beg_addr_p]);
page              598 drivers/crypto/qat/qat_common/qat_uclo.c 				     struct icp_qat_uclo_encap_page *page)
page              608 drivers/crypto/qat/qat_common/qat_uclo.c 	page->def_page = code_page->def_page;
page              609 drivers/crypto/qat/qat_common/qat_uclo.c 	page->page_region = code_page->page_region;
page              610 drivers/crypto/qat/qat_common/qat_uclo.c 	page->beg_addr_v = code_page->beg_addr_v;
page              611 drivers/crypto/qat/qat_common/qat_uclo.c 	page->beg_addr_p = code_page->beg_addr_p;
page              614 drivers/crypto/qat/qat_common/qat_uclo.c 	page->micro_words_num = code_area->micro_words_num;
page              618 drivers/crypto/qat/qat_common/qat_uclo.c 	page->uwblock_num = uword_block_tab->entry_num;
page              621 drivers/crypto/qat/qat_common/qat_uclo.c 	page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock;
page              623 drivers/crypto/qat/qat_common/qat_uclo.c 		page->uwblock[i].micro_words =
page              670 drivers/crypto/qat/qat_common/qat_uclo.c 		ae_uimage[j].page =
page              673 drivers/crypto/qat/qat_common/qat_uclo.c 		if (!ae_uimage[j].page)
page              676 drivers/crypto/qat/qat_common/qat_uclo.c 					ae_uimage[j].page);
page              681 drivers/crypto/qat/qat_common/qat_uclo.c 		kfree(ae_uimage[i].page);
page              963 drivers/crypto/qat/qat_common/qat_uclo.c 		image->uwords_num = image->page->beg_addr_p +
page              964 drivers/crypto/qat/qat_common/qat_uclo.c 					image->page->micro_words_num;
page             1012 drivers/crypto/qat/qat_common/qat_uclo.c 		kfree(obj_handle->ae_uimage[ae].page);
page             1504 drivers/crypto/qat/qat_common/qat_uclo.c 		kfree(obj_handle->ae_uimage[a].page);
page             1588 drivers/crypto/qat/qat_common/qat_uclo.c 	struct icp_qat_uclo_page *page;
page             1609 drivers/crypto/qat/qat_common/qat_uclo.c 		page = obj_handle->ae_data[ae].ae_slices[s].page;
page             1610 drivers/crypto/qat/qat_common/qat_uclo.c 		if (!page->encap_page->def_page)
page             1612 drivers/crypto/qat/qat_common/qat_uclo.c 		qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae);
page             1614 drivers/crypto/qat/qat_common/qat_uclo.c 		page = obj_handle->ae_data[ae].ae_slices[s].page;
page             1617 drivers/crypto/qat/qat_common/qat_uclo.c 					(ctx_mask & (1 << ctx)) ? page : NULL;
page              916 drivers/crypto/ux500/cryp/cryp_core.c 		src_paddr = (page_to_phys(walk.src.page) + walk.src.offset);
page              919 drivers/crypto/ux500/cryp/cryp_core.c 		dst_paddr = (page_to_phys(walk.dst.page) + walk.dst.offset);
page              260 drivers/dax/device.c 			struct page *page;
page              262 drivers/dax/device.c 			page = pfn_to_page(pfn_t_to_pfn(pfn) + i);
page              263 drivers/dax/device.c 			if (page->mapping)
page              265 drivers/dax/device.c 			page->mapping = filp->f_mapping;
page              266 drivers/dax/device.c 			page->index = pgoff + i;
page               20 drivers/dma-buf/udmabuf.c 	struct page **pages;
page               28 drivers/dma-buf/udmabuf.c 	vmf->page = ubuf->pages[vmf->pgoff];
page               29 drivers/dma-buf/udmabuf.c 	get_page(vmf->page);
page               99 drivers/dma-buf/udmabuf.c 	struct page *page = ubuf->pages[page_num];
page              101 drivers/dma-buf/udmabuf.c 	return kmap(page);
page              130 drivers/dma-buf/udmabuf.c 	struct page *page;
page              173 drivers/dma-buf/udmabuf.c 			page = shmem_read_mapping_page(
page              175 drivers/dma-buf/udmabuf.c 			if (IS_ERR(page)) {
page              176 drivers/dma-buf/udmabuf.c 				ret = PTR_ERR(page);
page              179 drivers/dma-buf/udmabuf.c 			ubuf->pages[pgbuf++] = page;
page              729 drivers/dma/dmatest.c 			struct page *pg = virt_to_page(buf);
page              747 drivers/dma/dmatest.c 			struct page *pg = virt_to_page(buf);
page              788 drivers/dma/ioat/init.c 	struct page *dest;
page              789 drivers/dma/ioat/init.c 	struct page *xor_srcs[IOAT_NUM_SRC_TEST];
page              790 drivers/dma/ioat/init.c 	struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1];
page               17 drivers/dma/ioat/sysfs.c static ssize_t cap_show(struct dma_chan *c, char *page)
page               21 drivers/dma/ioat/sysfs.c 	return sprintf(page, "copy%s%s%s%s%s\n",
page               31 drivers/dma/ioat/sysfs.c static ssize_t version_show(struct dma_chan *c, char *page)
page               36 drivers/dma/ioat/sysfs.c 	return sprintf(page, "%d.%d\n",
page               42 drivers/dma/ioat/sysfs.c ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
page               52 drivers/dma/ioat/sysfs.c 	return entry->show(&ioat_chan->dma_chan, page);
page               57 drivers/dma/ioat/sysfs.c const char *page, size_t count)
page               67 drivers/dma/ioat/sysfs.c 	return entry->store(&ioat_chan->dma_chan, page, count);
page              111 drivers/dma/ioat/sysfs.c static ssize_t ring_size_show(struct dma_chan *c, char *page)
page              115 drivers/dma/ioat/sysfs.c 	return sprintf(page, "%d\n", (1 << ioat_chan->alloc_order) & ~1);
page              119 drivers/dma/ioat/sysfs.c static ssize_t ring_active_show(struct dma_chan *c, char *page)
page              124 drivers/dma/ioat/sysfs.c 	return sprintf(page, "%d\n", ioat_ring_active(ioat_chan));
page              128 drivers/dma/ioat/sysfs.c static ssize_t intr_coalesce_show(struct dma_chan *c, char *page)
page              132 drivers/dma/ioat/sysfs.c 	return sprintf(page, "%d\n", ioat_chan->intr_coalesce);
page              135 drivers/dma/ioat/sysfs.c static ssize_t intr_coalesce_store(struct dma_chan *c, const char *page,
page              141 drivers/dma/ioat/sysfs.c 	if (sscanf(page, "%du", &intr_coalesce) != -1) {
page              909 drivers/dma/iop-adma.c 	struct page *dest;
page              910 drivers/dma/iop-adma.c 	struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST];
page              911 drivers/dma/iop-adma.c 	struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
page             1083 drivers/dma/iop-adma.c 	struct page *pq[IOP_ADMA_NUM_SRC_TEST+2+2];
page             1085 drivers/dma/iop-adma.c 	struct page **pq_hw = &pq[IOP_ADMA_NUM_SRC_TEST+2];
page              875 drivers/dma/mv_xor.c 	struct page *dest;
page              876 drivers/dma/mv_xor.c 	struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
page             1248 drivers/dma/ppc4xx/adma.c static struct page *ppc440spe_rxor_srcs[32];
page             1253 drivers/dma/ppc4xx/adma.c static int ppc440spe_can_rxor(struct page **srcs, int src_cnt, size_t len)
page             1329 drivers/dma/ppc4xx/adma.c 	enum dma_transaction_type cap, struct page **dst_lst, int dst_cnt,
page             1330 drivers/dma/ppc4xx/adma.c 	struct page **src_lst, int src_cnt, size_t src_sz)
page             1371 drivers/dma/ppc4xx/adma.c 	struct page **dst_lst, int dst_cnt, struct page **src_lst,
page             3705 drivers/dma/ppc4xx/adma.c 	struct page *pg;
page              105 drivers/dma/ppc4xx/adma.h 	struct page *pdest_page;
page              106 drivers/dma/ppc4xx/adma.h 	struct page *qdest_page;
page              494 drivers/dma/sh/rcar-dmac.c 	struct rcar_dmac_desc_page *page;
page              499 drivers/dma/sh/rcar-dmac.c 	page = (void *)get_zeroed_page(gfp);
page              500 drivers/dma/sh/rcar-dmac.c 	if (!page)
page              504 drivers/dma/sh/rcar-dmac.c 		struct rcar_dmac_desc *desc = &page->descs[i];
page              515 drivers/dma/sh/rcar-dmac.c 	list_add_tail(&page->node, &chan->desc.pages);
page              625 drivers/dma/sh/rcar-dmac.c 	struct rcar_dmac_desc_page *page;
page              630 drivers/dma/sh/rcar-dmac.c 	page = (void *)get_zeroed_page(gfp);
page              631 drivers/dma/sh/rcar-dmac.c 	if (!page)
page              635 drivers/dma/sh/rcar-dmac.c 		struct rcar_dmac_xfer_chunk *chunk = &page->chunks[i];
page              642 drivers/dma/sh/rcar-dmac.c 	list_add_tail(&page->node, &chan->desc.pages);
page             1045 drivers/dma/sh/rcar-dmac.c 	struct rcar_dmac_desc_page *page, *_page;
page             1077 drivers/dma/sh/rcar-dmac.c 	list_for_each_entry_safe(page, _page, &rchan->desc.pages, node) {
page             1078 drivers/dma/sh/rcar-dmac.c 		list_del(&page->node);
page             1079 drivers/dma/sh/rcar-dmac.c 		free_page((unsigned long)page);
page              694 drivers/edac/amd64_edac.c 	err->page = (u32) (error_address >> PAGE_SHIFT);
page             2537 drivers/edac/amd64_edac.c 			     err->page, err->offset, err->syndrome,
page              403 drivers/edac/amd64_edac.h 	u32 page;
page               95 drivers/edac/aspeed_edac.c 	u32 page, offset, syndrome;
page              104 drivers/edac/aspeed_edac.c 		page = 0;
page              108 drivers/edac/aspeed_edac.c 				     page, offset, syndrome, 0, 0, -1,
page              114 drivers/edac/aspeed_edac.c 	page = rec_addr >> PAGE_SHIFT;
page              119 drivers/edac/aspeed_edac.c 			     csrow->first_page + page, offset, syndrome,
page              128 drivers/edac/aspeed_edac.c 	u32 page, offset, syndrome;
page              135 drivers/edac/aspeed_edac.c 	page = un_rec_addr >> PAGE_SHIFT;
page              140 drivers/edac/aspeed_edac.c 			     csrow->first_page + page, offset, syndrome,
page              147 drivers/edac/aspeed_edac.c 		page = 0;
page              151 drivers/edac/aspeed_edac.c 				     page, offset, syndrome, 0, 0, -1,
page              305 drivers/edac/e752x_edac.c 				unsigned long page)
page              312 drivers/edac/e752x_edac.c 	if (page < pvt->tolm)
page              313 drivers/edac/e752x_edac.c 		return page;
page              315 drivers/edac/e752x_edac.c 	if ((page >= 0x100000) && (page < pvt->remapbase))
page              316 drivers/edac/e752x_edac.c 		return page;
page              318 drivers/edac/e752x_edac.c 	remap = (page - pvt->tolm) + pvt->remapbase;
page              323 drivers/edac/e752x_edac.c 	e752x_printk(KERN_ERR, "Invalid page %lx - out of range\n", page);
page              330 drivers/edac/e752x_edac.c 	u32 page;
page              339 drivers/edac/e752x_edac.c 	page = sec1_add >> (PAGE_SHIFT - 4);
page              344 drivers/edac/e752x_edac.c 		row = ((page >> 1) & 3);
page              366 drivers/edac/e752x_edac.c 		row = edac_mc_find_csrow_by_page(mci, page);
page              373 drivers/edac/e752x_edac.c 			     page, offset_in_page(sec1_add << 4), sec1_syndrome,
page              463 drivers/edac/e752x_edac.c 	u32 error_1b, page;
page              468 drivers/edac/e752x_edac.c 	page = error_1b >> (PAGE_SHIFT - 4);  /* convert the addr to 4k page */
page              471 drivers/edac/e752x_edac.c 	row = pvt->mc_symmetric ? ((page >> 1) & 3) :
page              472 drivers/edac/e752x_edac.c 		edac_mc_find_csrow_by_page(mci, page);
page              476 drivers/edac/e752x_edac.c 			(long unsigned int)page, row);
page              183 drivers/edac/e7xxx_edac.c 				unsigned long page)
page              190 drivers/edac/e7xxx_edac.c 	if ((page < pvt->tolm) ||
page              191 drivers/edac/e7xxx_edac.c 		((page >= 0x100000) && (page < pvt->remapbase)))
page              192 drivers/edac/e7xxx_edac.c 		return page;
page              194 drivers/edac/e7xxx_edac.c 	remap = (page - pvt->tolm) + pvt->remapbase;
page              199 drivers/edac/e7xxx_edac.c 	e7xxx_printk(KERN_ERR, "Invalid page %lx - out of range\n", page);
page              205 drivers/edac/e7xxx_edac.c 	u32 error_1b, page;
page              214 drivers/edac/e7xxx_edac.c 	page = error_1b >> 6;	/* convert the address to 4k page */
page              218 drivers/edac/e7xxx_edac.c 	row = edac_mc_find_csrow_by_page(mci, page);
page              221 drivers/edac/e7xxx_edac.c 	edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, page, 0, syndrome,
page              820 drivers/edac/edac_mc.c static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
page              823 drivers/edac/edac_mc.c 	struct page *pg;
page              830 drivers/edac/edac_mc.c 	if (!pfn_valid(page))
page              834 drivers/edac/edac_mc.c 	pg = pfn_to_page(page);
page              852 drivers/edac/edac_mc.c int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
page              857 drivers/edac/edac_mc.c 	edac_dbg(1, "MC%d: 0x%lx\n", mci->mc_idx, page);
page              872 drivers/edac/edac_mc.c 			 csrow->first_page, page, csrow->last_page,
page              875 drivers/edac/edac_mc.c 		if ((page >= csrow->first_page) &&
page              876 drivers/edac/edac_mc.c 		    (page <= csrow->last_page) &&
page              877 drivers/edac/edac_mc.c 		    ((page & csrow->page_mask) ==
page              887 drivers/edac/edac_mc.c 			(unsigned long)page);
page              209 drivers/edac/edac_mc.h 				      unsigned long page);
page              218 drivers/edac/i7core_edac.c 	int channel, dimm, rank, bank, page, col;
page              856 drivers/edac/i7core_edac.c DECLARE_ADDR_MATCH(page, 0x10000);
page              863 drivers/edac/i7core_edac.c ATTR_ADDR_MATCH(page);
page              963 drivers/edac/i7core_edac.c 	if (pvt->inject.page < 0)
page              966 drivers/edac/i7core_edac.c 		mask |= (pvt->inject.page & 0xffff) << 14;
page             2214 drivers/edac/i7core_edac.c 	pvt->inject.page = -1;
page              146 drivers/edac/i82443bxgx_edac.c 	u32 eapaddr, page, pageoffset;
page              151 drivers/edac/i82443bxgx_edac.c 	page = eapaddr >> PAGE_SHIFT;
page              152 drivers/edac/i82443bxgx_edac.c 	pageoffset = eapaddr - (page << PAGE_SHIFT);
page              158 drivers/edac/i82443bxgx_edac.c 					     page, pageoffset, 0,
page              159 drivers/edac/i82443bxgx_edac.c 					     edac_mc_find_csrow_by_page(mci, page),
page              167 drivers/edac/i82443bxgx_edac.c 					     page, pageoffset, 0,
page              168 drivers/edac/i82443bxgx_edac.c 					     edac_mc_find_csrow_by_page(mci, page),
page              281 drivers/edac/i82975x_edac.c 	unsigned long offst, page;
page              295 drivers/edac/i82975x_edac.c 	page = (unsigned long) info->eap;
page              296 drivers/edac/i82975x_edac.c 	page >>= 1;
page              298 drivers/edac/i82975x_edac.c 		page |= 0x80000000;
page              299 drivers/edac/i82975x_edac.c 	page >>= (PAGE_SHIFT - 1);
page              300 drivers/edac/i82975x_edac.c 	row = edac_mc_find_csrow_by_page(mci, page);
page              307 drivers/edac/i82975x_edac.c 			(info->xeap & 1) ? 1 : 0, info->eap, (unsigned int) page);
page              317 drivers/edac/i82975x_edac.c 				     page, offst, 0,
page              322 drivers/edac/i82975x_edac.c 				     page, offst, info->derrsyn,
page              747 drivers/edac/ppc4xx_edac.c 	const unsigned long page = bear >> PAGE_SHIFT;
page              757 drivers/edac/ppc4xx_edac.c 					     page, offset, 0,
page              163 drivers/edac/r82600_edac.c 	u32 eapaddr, page;
page              175 drivers/edac/r82600_edac.c 	page = eapaddr >> PAGE_SHIFT;
page              182 drivers/edac/r82600_edac.c 					     page, 0, syndrome,
page              183 drivers/edac/r82600_edac.c 					     edac_mc_find_csrow_by_page(mci, page),
page              194 drivers/edac/r82600_edac.c 					     page, 0, 0,
page              195 drivers/edac/r82600_edac.c 					     edac_mc_find_csrow_by_page(mci, page),
page              207 drivers/edac/thunderx_edac.c 	struct page *mem;
page               89 drivers/firewire/ohci.c 	struct page *pages[AR_BUFFERS];
page              616 drivers/firewire/ohci.c static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr)
page              620 drivers/firewire/ohci.c 	ret = update_phy_reg(ohci, 7, PHY_PAGE_SELECT, page << 5);
page              975 drivers/firewire/ohci.c 	struct page *pages[AR_BUFFERS + AR_WRAPAROUND_PAGES];
page             3203 drivers/firewire/ohci.c 	int page, end_page, i, length, offset;
page             3263 drivers/firewire/ohci.c 		page               = payload_index >> PAGE_SHIFT;
page             3265 drivers/firewire/ohci.c 		next_page_index    = (page + 1) << PAGE_SHIFT;
page             3270 drivers/firewire/ohci.c 		page_bus = page_private(buffer->pages[page]);
page             3306 drivers/firewire/ohci.c 	int page, offset, packet_count, header_size, payload_per_buffer;
page             3317 drivers/firewire/ohci.c 	page     = payload >> PAGE_SHIFT;
page             3353 drivers/firewire/ohci.c 			page_bus = page_private(buffer->pages[page]);
page             3363 drivers/firewire/ohci.c 				page++;
page             3384 drivers/firewire/ohci.c 	int page, offset, rest, z, i, length;
page             3386 drivers/firewire/ohci.c 	page   = payload >> PAGE_SHIFT;
page             3393 drivers/firewire/ohci.c 	if (WARN_ON(offset & 3 || rest & 3 || page + z > buffer->page_count))
page             3416 drivers/firewire/ohci.c 		page_bus = page_private(buffer->pages[page]);
page             3425 drivers/firewire/ohci.c 		page++;
page               24 drivers/firmware/dmi-id.c 				  char *page)
page               28 drivers/firmware/dmi-id.c 	len = scnprintf(page, PAGE_SIZE, "%s\n", dmi_get_system_info(field));
page               29 drivers/firmware/dmi-id.c 	page[len-1] = '\n';
page              126 drivers/firmware/dmi-id.c 				     struct device_attribute *attr, char *page)
page              129 drivers/firmware/dmi-id.c 	r = get_modalias(page, PAGE_SIZE-1);
page              130 drivers/firmware/dmi-id.c 	page[r] = '\n';
page              131 drivers/firmware/dmi-id.c 	page[r+1] = 0;
page              172 drivers/firmware/efi/capsule-loader.c 	struct page *page;
page              185 drivers/firmware/efi/capsule-loader.c 		page = alloc_page(GFP_KERNEL);
page              186 drivers/firmware/efi/capsule-loader.c 		if (!page) {
page              191 drivers/firmware/efi/capsule-loader.c 		cap_info->pages[cap_info->index] = page;
page              192 drivers/firmware/efi/capsule-loader.c 		cap_info->phys[cap_info->index] = page_to_phys(page);
page              196 drivers/firmware/efi/capsule-loader.c 		page = cap_info->pages[cap_info->index - 1];
page              199 drivers/firmware/efi/capsule-loader.c 	kbuff = kmap(page);
page              219 drivers/firmware/efi/capsule-loader.c 	kunmap(page);
page              238 drivers/firmware/efi/capsule-loader.c 	kunmap(page);
page              143 drivers/firmware/efi/capsule.c 			  struct page **sg_pages, int reset)
page              221 drivers/firmware/efi/capsule.c 	struct page **sg_pages;
page               24 drivers/firmware/efi/memmap.c 	struct page *p = alloc_pages(GFP_KERNEL, order);
page               19 drivers/fpga/dfl-afu-dma-region.c static void put_all_pages(struct page **pages, int npages)
page               54 drivers/fpga/dfl-afu-dma-region.c 	region->pages = kcalloc(npages, sizeof(struct page *), GFP_KERNEL);
page               57 drivers/fpga/dfl-afu.h 	struct page **pages;
page              251 drivers/fpga/fpga-mgr.c 	struct page **pages;
page              272 drivers/fpga/fpga-mgr.c 	pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
page             1275 drivers/gpu/drm/amd/amdgpu/amdgpu.h 			       char *page)				\
page             1278 drivers/gpu/drm/amd/amdgpu/amdgpu.h 	return sprintf(page, _object "\n");				\
page               38 drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h 	struct page			**user_pages;
page              625 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 					sizeof(struct page *),
page               74 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c 	struct page *dummy_page = adev->mman.bdev.glob->dummy_read_page;
page              311 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c 		     int pages, struct page **pagelist, dma_addr_t *dma_addr,
page               50 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h 	struct page			**pages;
page               70 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h 		     int pages, struct page **pagelist,
page              252 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	else if (ring == &adev->sdma.instance[0].page)
page               42 drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c 		    ring == &adev->sdma.instance[i].page)
page               55 drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c 			ring == &adev->sdma.instance[i].page) {
page               49 drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h 	struct amdgpu_ring	page;
page              787 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
page              925 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
page             2267 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		struct page *page;
page             2273 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		page = adev->gart.pages[p];
page             2274 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		if (page) {
page             2275 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 			ptr = kmap(page);
page             2326 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		struct page *p;
page             2381 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		struct page *p;
page              107 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages);
page              111 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h 					       struct page **pages)
page              121 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages);
page              858 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		sdma[i] = &adev->sdma.instance[i].page;
page             1075 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	struct amdgpu_ring *ring = &adev->sdma.instance[i].page;
page             1346 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 			struct amdgpu_ring *page = &adev->sdma.instance[i].page;
page             1348 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 			r = amdgpu_ring_test_helper(page);
page             1352 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 			if (adev->mman.buffer_funcs_ring == page)
page             1834 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 			ring = &adev->sdma.instance[i].page;
page             1880 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 			amdgpu_ring_fini(&adev->sdma.instance[i].page);
page             2014 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 			amdgpu_fence_process(&adev->sdma.instance[instance].page);
page             2021 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 			amdgpu_fence_process(&adev->sdma.instance[instance].page);
page             2400 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 				adev->sdma.instance[i].page.funcs =
page             2403 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 				adev->sdma.instance[i].page.funcs =
page             2405 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 			adev->sdma.instance[i].page.me = i;
page             2511 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].page;
page             2532 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 			sched = &adev->sdma.instance[i].page.sched;
page               59 drivers/gpu/drm/amd/amdkfd/kfd_events.c static uint64_t *page_slots(struct kfd_signal_page *page)
page               61 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	return page->kernel_address;
page               67 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	struct kfd_signal_page *page;
page               69 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	page = kzalloc(sizeof(*page), GFP_KERNEL);
page               70 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	if (!page)
page               82 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	page->kernel_address = backing_store;
page               83 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	page->need_to_free_pages = true;
page               85 drivers/gpu/drm/amd/amdkfd/kfd_events.c 			page, p);
page               87 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	return page;
page               90 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	kfree(page);
page              271 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	struct kfd_signal_page *page = p->signal_page;
page              273 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	if (page) {
page              274 drivers/gpu/drm/amd/amdkfd/kfd_events.c 		if (page->need_to_free_pages)
page              275 drivers/gpu/drm/amd/amdkfd/kfd_events.c 			free_pages((unsigned long)page->kernel_address,
page              277 drivers/gpu/drm/amd/amdkfd/kfd_events.c 		kfree(page);
page              301 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	struct kfd_signal_page *page;
page              306 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	page = kzalloc(sizeof(*page), GFP_KERNEL);
page              307 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	if (!page)
page              314 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	page->kernel_address = kernel_address;
page              316 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	p->signal_page = page;
page              785 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	struct kfd_signal_page *page;
page              795 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	page = p->signal_page;
page              796 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	if (!page) {
page              802 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	pfn = __pa(page->kernel_address);
page              816 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	page->user_address = (uint64_t __user *)vma->vm_start;
page               50 drivers/gpu/drm/armada/armada_gem.c 	if (dobj->page) {
page               53 drivers/gpu/drm/armada/armada_gem.c 		__free_pages(dobj->page, order);
page               83 drivers/gpu/drm/armada/armada_gem.c 	if (obj->page || obj->linear)
page               96 drivers/gpu/drm/armada/armada_gem.c 		struct page *p = alloc_pages(GFP_KERNEL, order);
page              101 drivers/gpu/drm/armada/armada_gem.c 			obj->page = p;
page              131 drivers/gpu/drm/armada/armada_gem.c 	if (!obj->page) {
page              399 drivers/gpu/drm/armada/armada_gem.c 			struct page *page;
page              401 drivers/gpu/drm/armada/armada_gem.c 			page = shmem_read_mapping_page(mapping, i);
page              402 drivers/gpu/drm/armada/armada_gem.c 			if (IS_ERR(page)) {
page              407 drivers/gpu/drm/armada/armada_gem.c 			sg_set_page(sg, page, PAGE_SIZE, 0);
page              414 drivers/gpu/drm/armada/armada_gem.c 	} else if (dobj->page) {
page              419 drivers/gpu/drm/armada/armada_gem.c 		sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
page               18 drivers/gpu/drm/armada/armada_gem.h 	struct page		*page;		/* for page backed */
page               72 drivers/gpu/drm/bridge/parade-ps8622.c static int ps8622_set(struct i2c_client *client, u8 page, u8 reg, u8 val)
page               79 drivers/gpu/drm/bridge/parade-ps8622.c 	msg.addr = client->addr + page;
page               87 drivers/gpu/drm/bridge/parade-ps8622.c 			client->addr + page, reg, val, ret);
page              514 drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c 	.page = snd_pcm_lib_get_vmalloc_page,
page              321 drivers/gpu/drm/drm_agpsupport.c 	int page;
page              328 drivers/gpu/drm/drm_agpsupport.c 	page = (request->offset + PAGE_SIZE - 1) / PAGE_SIZE;
page              329 drivers/gpu/drm/drm_agpsupport.c 	retcode = drm_bind_agp(entry->memory, page);
page              332 drivers/gpu/drm/drm_agpsupport.c 	entry->bound = dev->agp->base + (page << PAGE_SHIFT);
page               45 drivers/gpu/drm/drm_cache.c drm_clflush_page(struct page *page)
page               51 drivers/gpu/drm/drm_cache.c 	if (unlikely(page == NULL))
page               54 drivers/gpu/drm/drm_cache.c 	page_virtual = kmap_atomic(page);
page               60 drivers/gpu/drm/drm_cache.c static void drm_cache_flush_clflush(struct page *pages[],
page               81 drivers/gpu/drm/drm_cache.c drm_clflush_pages(struct page *pages[], unsigned long num_pages)
page               96 drivers/gpu/drm/drm_cache.c 		struct page *page = pages[i];
page               99 drivers/gpu/drm/drm_cache.c 		if (unlikely(page == NULL))
page              102 drivers/gpu/drm/drm_cache.c 		page_virtual = kmap_atomic(page);
page              660 drivers/gpu/drm/drm_fb_helper.c 	struct page *page;
page              665 drivers/gpu/drm/drm_fb_helper.c 	list_for_each_entry(page, pagelist, lru) {
page              666 drivers/gpu/drm/drm_fb_helper.c 		start = page->index << PAGE_SHIFT;
page              553 drivers/gpu/drm/drm_gem.c struct page **drm_gem_get_pages(struct drm_gem_object *obj)
page              556 drivers/gpu/drm/drm_gem.c 	struct page *p, **pages;
page              571 drivers/gpu/drm/drm_gem.c 	pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
page              616 drivers/gpu/drm/drm_gem.c void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
page              141 drivers/gpu/drm/drm_gem_shmem_helper.c 	struct page **pages;
page              477 drivers/gpu/drm/drm_gem_shmem_helper.c 	struct page *page;
page              482 drivers/gpu/drm/drm_gem_shmem_helper.c 	page = shmem->pages[vmf->pgoff];
page              484 drivers/gpu/drm/drm_gem_shmem_helper.c 	return vmf_insert_page(vma, vmf->address, page);
page              669 drivers/gpu/drm/drm_gem_shmem_helper.c 	shmem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
page               65 drivers/gpu/drm/drm_memory.c 	struct page **page_map;
page               66 drivers/gpu/drm/drm_memory.c 	struct page **phys_page_map;
page               89 drivers/gpu/drm/drm_memory.c 	page_map = vmalloc(array_size(num_pages, sizeof(struct page *)));
page              793 drivers/gpu/drm/drm_prime.c struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
page              947 drivers/gpu/drm/drm_prime.c int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
page              952 drivers/gpu/drm/drm_prime.c 	struct page *page;
page              959 drivers/gpu/drm/drm_prime.c 		page = sg_page(sg);
page              966 drivers/gpu/drm/drm_prime.c 				pages[index] = page;
page              970 drivers/gpu/drm/drm_prime.c 			page++;
page               57 drivers/gpu/drm/drm_scatter.c 	struct page *page;
page               61 drivers/gpu/drm/drm_scatter.c 		page = entry->pagelist[i];
page               62 drivers/gpu/drm/drm_scatter.c 		if (page)
page               63 drivers/gpu/drm/drm_scatter.c 			ClearPageReserved(page);
page              148 drivers/gpu/drm/drm_vm.c 		struct page *page;
page              173 drivers/gpu/drm/drm_vm.c 		page = agpmem->memory->pages[offset];
page              174 drivers/gpu/drm/drm_vm.c 		get_page(page);
page              175 drivers/gpu/drm/drm_vm.c 		vmf->page = page;
page              182 drivers/gpu/drm/drm_vm.c 		     page_count(page));
page              211 drivers/gpu/drm/drm_vm.c 	struct page *page;
page              218 drivers/gpu/drm/drm_vm.c 	page = vmalloc_to_page((void *)i);
page              219 drivers/gpu/drm/drm_vm.c 	if (!page)
page              221 drivers/gpu/drm/drm_vm.c 	get_page(page);
page              222 drivers/gpu/drm/drm_vm.c 	vmf->page = page;
page              315 drivers/gpu/drm/drm_vm.c 	struct page *page;
page              325 drivers/gpu/drm/drm_vm.c 	page = virt_to_page((void *)dma->pagelist[page_nr]);
page              327 drivers/gpu/drm/drm_vm.c 	get_page(page);
page              328 drivers/gpu/drm/drm_vm.c 	vmf->page = page;
page              352 drivers/gpu/drm/drm_vm.c 	struct page *page;
page              362 drivers/gpu/drm/drm_vm.c 	page = entry->pagelist[page_offset];
page              363 drivers/gpu/drm/drm_vm.c 	get_page(page);
page              364 drivers/gpu/drm/drm_vm.c 	vmf->page = page;
page              198 drivers/gpu/drm/etnaviv/etnaviv_dump.c 		struct page **pages;
page               61 drivers/gpu/drm/etnaviv/etnaviv_gem.c 	struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
page               89 drivers/gpu/drm/etnaviv/etnaviv_gem.c struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
page              178 drivers/gpu/drm/etnaviv/etnaviv_gem.c 	struct page **pages, *page;
page              202 drivers/gpu/drm/etnaviv/etnaviv_gem.c 	page = pages[pgoff];
page              205 drivers/gpu/drm/etnaviv/etnaviv_gem.c 	     page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
page              207 drivers/gpu/drm/etnaviv/etnaviv_gem.c 	return vmf_insert_page(vma, vmf->address, page);
page              256 drivers/gpu/drm/etnaviv/etnaviv_gem.c 	struct page **pages;
page              353 drivers/gpu/drm/etnaviv/etnaviv_gem.c 	struct page **pages;
page              660 drivers/gpu/drm/etnaviv/etnaviv_gem.c 	struct page **pvec = NULL;
page              669 drivers/gpu/drm/etnaviv/etnaviv_gem.c 	pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
page              676 drivers/gpu/drm/etnaviv/etnaviv_gem.c 		struct page **pages = pvec + pinned;
page               46 drivers/gpu/drm/etnaviv/etnaviv_gem.h 	struct page **pages;
page              119 drivers/gpu/drm/etnaviv/etnaviv_gem.h struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *obj);
page              123 drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c 	etnaviv_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
page              381 drivers/gpu/drm/exynos/exynos_drm_g2d.c 	struct page **pages;
page               58 drivers/gpu/drm/exynos/exynos_drm_gem.c 	exynos_gem->pages = kvmalloc_array(nr_pages, sizeof(struct page *),
page              490 drivers/gpu/drm/exynos/exynos_drm_gem.c 	exynos_gem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
page               51 drivers/gpu/drm/exynos/exynos_drm_gem.h 	struct page		**pages;
page               80 drivers/gpu/drm/gma500/gtt.c 	struct page **pages;
page              196 drivers/gpu/drm/gma500/gtt.c 	struct page **pages;
page               38 drivers/gpu/drm/gma500/gtt.h 	struct page **pages;		/* Backing pages if present */
page              696 drivers/gpu/drm/gma500/mmu.c int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
page               39 drivers/gpu/drm/gma500/mmu.h 	struct page *p;
page               47 drivers/gpu/drm/gma500/mmu.h 	struct page *p;
page               48 drivers/gpu/drm/gma500/mmu.h 	struct page *dummy_pt;
page               49 drivers/gpu/drm/gma500/mmu.h 	struct page *dummy_page;
page               77 drivers/gpu/drm/gma500/mmu.h extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
page              443 drivers/gpu/drm/gma500/psb_drv.h 	struct page *scratch_page;
page              101 drivers/gpu/drm/i2c/tda998x_drv.c #define REG(page, addr) (((page) << 8) | (addr))
page               99 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c 	struct page *page;
page              111 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c 	page = i915_gem_object_get_page(obj, page_num);
page              112 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c 	if (IS_ERR(page))
page              115 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c 	return kmap(page);
page              248 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		unsigned long page; /** Currently mapped page index */
page              900 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	cache->page = -1;
page              982 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	cache->page = -1;
page              987 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			unsigned long page)
page             1010 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page));
page             1012 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	cache->page = page;
page             1019 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			 unsigned long page)
page             1066 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 				     i915_gem_object_get_dma_address(obj, page),
page             1069 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		offset += page << PAGE_SHIFT;
page             1074 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	cache->page = page;
page             1082 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			 unsigned long page)
page             1086 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	if (cache->page == page) {
page             1091 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			vaddr = reloc_iomap(obj, cache, page);
page             1093 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			vaddr = reloc_kmap(obj, cache, page);
page               83 drivers/gpu/drm/i915/gem/i915_gem_internal.c 		struct page *page;
page               86 drivers/gpu/drm/i915/gem/i915_gem_internal.c 			page = alloc_pages(gfp | (order ? QUIET : MAYFAIL),
page               88 drivers/gpu/drm/i915/gem/i915_gem_internal.c 			if (page)
page               97 drivers/gpu/drm/i915/gem/i915_gem_internal.c 		sg_set_page(sg, page, PAGE_SIZE << order, 0);
page              218 drivers/gpu/drm/i915/gem/i915_gem_object.h struct page *
page              222 drivers/gpu/drm/i915/gem/i915_gem_object.h struct page *
page              233 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	struct page *page;
page              234 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	struct page *stack_pages[32];
page              235 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	struct page **pages = stack_pages;
page              251 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	for_each_sgt_page(page, sgt_iter, sgt)
page              252 drivers/gpu/drm/i915/gem/i915_gem_pages.c 		pages[i++] = page;
page              486 drivers/gpu/drm/i915/gem/i915_gem_pages.c struct page *
page              499 drivers/gpu/drm/i915/gem/i915_gem_pages.c struct page *
page              503 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	struct page *page;
page              505 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	page = i915_gem_object_get_page(obj, n);
page              507 drivers/gpu/drm/i915/gem/i915_gem_pages.c 		set_page_dirty(page);
page              509 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	return page;
page               56 drivers/gpu/drm/i915/gem/i915_gem_phys.c 	sg_assign_page(sg, (struct page *)vaddr);
page               62 drivers/gpu/drm/i915/gem/i915_gem_phys.c 		struct page *page;
page               65 drivers/gpu/drm/i915/gem/i915_gem_phys.c 		page = shmem_read_mapping_page(mapping, i);
page               66 drivers/gpu/drm/i915/gem/i915_gem_phys.c 		if (IS_ERR(page))
page               69 drivers/gpu/drm/i915/gem/i915_gem_phys.c 		src = kmap_atomic(page);
page               74 drivers/gpu/drm/i915/gem/i915_gem_phys.c 		put_page(page);
page              108 drivers/gpu/drm/i915/gem/i915_gem_phys.c 			struct page *page;
page              111 drivers/gpu/drm/i915/gem/i915_gem_phys.c 			page = shmem_read_mapping_page(mapping, i);
page              112 drivers/gpu/drm/i915/gem/i915_gem_phys.c 			if (IS_ERR(page))
page              115 drivers/gpu/drm/i915/gem/i915_gem_phys.c 			dst = kmap_atomic(page);
page              120 drivers/gpu/drm/i915/gem/i915_gem_phys.c 			set_page_dirty(page);
page              122 drivers/gpu/drm/i915/gem/i915_gem_phys.c 				mark_page_accessed(page);
page              123 drivers/gpu/drm/i915/gem/i915_gem_phys.c 			put_page(page);
page               35 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 	struct page *page;
page               91 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 			page = shmem_read_mapping_page_gfp(mapping, i, gfp);
page               92 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 			if (!IS_ERR(page))
page               96 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 				ret = PTR_ERR(page);
page              135 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 		    page_to_pfn(page) != last_pfn + 1) {
page              141 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 			sg_set_page(sg, page, PAGE_SIZE, 0);
page              145 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 		last_pfn = page_to_pfn(page);
page              166 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 			for_each_sgt_page(page, sgt_iter, st)
page              167 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 				put_page(page);
page              192 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 	for_each_sgt_page(page, sgt_iter, st) {
page              193 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 		if (!pagevec_add(&pvec, page))
page              253 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 		struct page *page;
page              255 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 		page = find_lock_entry(mapping, i);
page              256 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 		if (!page || xa_is_value(page))
page              259 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 		if (!page_mapped(page) && clear_page_dirty_for_io(page)) {
page              262 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 			SetPageReclaim(page);
page              263 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 			ret = mapping->a_ops->writepage(page, &wbc);
page              264 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 			if (!PageWriteback(page))
page              265 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 				ClearPageReclaim(page);
page              269 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 		unlock_page(page);
page              271 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 		put_page(page);
page              298 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 	struct page *page;
page              310 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 	for_each_sgt_page(page, sgt_iter, pages) {
page              312 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 			set_page_dirty(page);
page              315 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 			mark_page_accessed(page);
page              317 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 		if (!pagevec_add(&pvec, page))
page              369 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 		struct page *page;
page              389 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 					    &page, &data);
page              393 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 		vaddr = kmap_atomic(page);
page              401 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 					  page, data);
page              550 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 		struct page *page;
page              555 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 					    &page, &pgdata);
page              559 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 		vaddr = kmap(page);
page              561 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 		kunmap(page);
page              565 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 					  page, pgdata);
page              430 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 			       struct page **pvec, unsigned long num_pages)
page              478 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	struct page **pvec;
page              484 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
page              584 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	struct page **pvec;
page              618 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 		pvec = kvmalloc_array(num_pages, sizeof(struct page *),
page              663 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	struct page *page;
page              682 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	for_each_sgt_page(page, sgt_iter, pages) {
page              683 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 		if (obj->mm.dirty && trylock_page(page)) {
page              702 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 			set_page_dirty(page);
page              703 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 			unlock_page(page);
page              706 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 		mark_page_accessed(page);
page              707 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 		put_page(page);
page               44 drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c 		struct page *page;
page               46 drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c 		page = alloc_page(GFP | __GFP_HIGHMEM);
page               47 drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c 		if (!page) {
page               52 drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c 		sg_set_page(sg, page, PAGE_SIZE, 0);
page               90 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 			struct page *page;
page               93 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 			page = alloc_pages(GFP | __GFP_ZERO, order);
page               94 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 			if (!page)
page               97 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 			sg_set_page(sg, page, page_size, 0);
page               19 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 	struct page *page;
page               28 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 	page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
page               29 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 	map = kmap_atomic(page);
page               51 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 	struct page *page;
page               60 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 	page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
page               61 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 	map = kmap_atomic(page);
page               84 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 	unsigned long page;
page              110 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 	for_each_prime_number_from(page, 1, npages) {
page              112 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 			compute_partial_view(obj, page, MIN_CHUNK_PAGES);
page              114 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 		struct page *p;
page              125 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 			       page, (int)PTR_ERR(vma));
page              129 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 		n = page - view.partial.offset;
page              136 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 			       page, (int)PTR_ERR(io));
page              140 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 		iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
page              143 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 		offset = tiled_offset(tile, page << PAGE_SHIFT);
page              152 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 		if (*cpu != (u32)page) {
page              154 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 			       page, n,
page              163 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 			       (u32)page, *cpu);
page              116 drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c 	mock = kmalloc(sizeof(*mock) + npages * sizeof(struct page *),
page               14 drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.h 	struct page *pages[];
page             2255 drivers/gpu/drm/i915/gt/intel_lrc.c 	struct page *page;
page             2290 drivers/gpu/drm/i915/gt/intel_lrc.c 	page = i915_gem_object_get_dirty_page(wa_ctx->vma->obj, 0);
page             2291 drivers/gpu/drm/i915/gt/intel_lrc.c 	batch = batch_ptr = kmap_atomic(page);
page              517 drivers/gpu/drm/i915/gt/intel_ringbuffer.c static struct page *status_page(struct intel_engine_cs *engine)
page               19 drivers/gpu/drm/i915/gt/selftest_timeline.c static struct page *hwsp_page(struct intel_timeline *tl)
page              718 drivers/gpu/drm/i915/gvt/gtt.c 	spt->shadow_page.page = alloc_page(gfp_mask);
page              719 drivers/gpu/drm/i915/gvt/gtt.c 	if (!spt->shadow_page.page) {
page              728 drivers/gpu/drm/i915/gvt/gtt.c 	__free_page(spt->shadow_page.page);
page              845 drivers/gpu/drm/i915/gvt/gtt.c 	daddr = dma_map_page(kdev, spt->shadow_page.page,
page              852 drivers/gpu/drm/i915/gvt/gtt.c 	spt->shadow_page.vaddr = page_address(spt->shadow_page.page);
page             2377 drivers/gpu/drm/i915/gvt/gtt.c 	gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
page             2417 drivers/gpu/drm/i915/gvt/gtt.c 		if (vgpu->gtt.scratch_pt[i].page != NULL) {
page             2421 drivers/gpu/drm/i915/gvt/gtt.c 			__free_page(vgpu->gtt.scratch_pt[i].page);
page             2422 drivers/gpu/drm/i915/gvt/gtt.c 			vgpu->gtt.scratch_pt[i].page = NULL;
page             2684 drivers/gpu/drm/i915/gvt/gtt.c 	void *page;
page             2693 drivers/gpu/drm/i915/gvt/gtt.c 	page = (void *)get_zeroed_page(GFP_KERNEL);
page             2694 drivers/gpu/drm/i915/gvt/gtt.c 	if (!page) {
page             2699 drivers/gpu/drm/i915/gvt/gtt.c 	daddr = dma_map_page(dev, virt_to_page(page), 0,
page             2703 drivers/gpu/drm/i915/gvt/gtt.c 		__free_page(virt_to_page(page));
page             2707 drivers/gpu/drm/i915/gvt/gtt.c 	gvt->gtt.scratch_page = virt_to_page(page);
page               94 drivers/gpu/drm/i915/gvt/gtt.h 	struct page *scratch_page;
page              194 drivers/gpu/drm/i915/gvt/gtt.h 	struct page *page;
page              240 drivers/gpu/drm/i915/gvt/gtt.h 		struct page *page;
page              139 drivers/gpu/drm/i915/gvt/kvmgt.c 		unsigned long size, struct page **page)
page              180 drivers/gpu/drm/i915/gvt/kvmgt.c 	*page = pfn_to_page(base_pfn);
page              191 drivers/gpu/drm/i915/gvt/kvmgt.c 	struct page *page = NULL;
page              194 drivers/gpu/drm/i915/gvt/kvmgt.c 	ret = gvt_pin_guest_page(vgpu, gfn, size, &page);
page              199 drivers/gpu/drm/i915/gvt/kvmgt.c 	*dma_addr = dma_map_page(dev, page, 0, size, PCI_DMA_BIDIRECTIONAL);
page              202 drivers/gpu/drm/i915/gvt/kvmgt.c 			     page_to_pfn(page), ret);
page               63 drivers/gpu/drm/i915/gvt/scheduler.c 	struct page *page;
page               71 drivers/gpu/drm/i915/gvt/scheduler.c 	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
page               72 drivers/gpu/drm/i915/gvt/scheduler.c 	shadow_ring_context = kmap(page);
page               75 drivers/gpu/drm/i915/gvt/scheduler.c 	kunmap(page);
page              134 drivers/gpu/drm/i915/gvt/scheduler.c 	struct page *page;
page              139 drivers/gpu/drm/i915/gvt/scheduler.c 	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
page              140 drivers/gpu/drm/i915/gvt/scheduler.c 	shadow_ring_context = kmap(page);
page              172 drivers/gpu/drm/i915/gvt/scheduler.c 	kunmap(page);
page              197 drivers/gpu/drm/i915/gvt/scheduler.c 		page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
page              198 drivers/gpu/drm/i915/gvt/scheduler.c 		dst = kmap(page);
page              201 drivers/gpu/drm/i915/gvt/scheduler.c 		kunmap(page);
page              805 drivers/gpu/drm/i915/gvt/scheduler.c 	struct page *page;
page              851 drivers/gpu/drm/i915/gvt/scheduler.c 		page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
page              852 drivers/gpu/drm/i915/gvt/scheduler.c 		src = kmap(page);
page              855 drivers/gpu/drm/i915/gvt/scheduler.c 		kunmap(page);
page              862 drivers/gpu/drm/i915/gvt/scheduler.c 	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
page              863 drivers/gpu/drm/i915/gvt/scheduler.c 	shadow_ring_context = kmap(page);
page              881 drivers/gpu/drm/i915/gvt/scheduler.c 	kunmap(page);
page              244 drivers/gpu/drm/i915/i915_gem.c shmem_pread(struct page *page, int offset, int len, char __user *user_data,
page              250 drivers/gpu/drm/i915/i915_gem.c 	vaddr = kmap(page);
page              257 drivers/gpu/drm/i915/i915_gem.c 	kunmap(page);
page              286 drivers/gpu/drm/i915/i915_gem.c 		struct page *page = i915_gem_object_get_page(obj, idx);
page              289 drivers/gpu/drm/i915/i915_gem.c 		ret = shmem_pread(page, offset, length, user_data,
page              658 drivers/gpu/drm/i915/i915_gem.c shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
page              665 drivers/gpu/drm/i915/i915_gem.c 	vaddr = kmap(page);
page              674 drivers/gpu/drm/i915/i915_gem.c 	kunmap(page);
page              712 drivers/gpu/drm/i915/i915_gem.c 		struct page *page = i915_gem_object_get_page(obj, idx);
page              715 drivers/gpu/drm/i915/i915_gem.c 		ret = shmem_pwrite(page, offset, length, user_data,
page              729 drivers/gpu/drm/i915/i915_gem_fence_reg.c static void i915_gem_swizzle_page(struct page *page)
page              735 drivers/gpu/drm/i915/i915_gem_fence_reg.c 	vaddr = kmap(page);
page              743 drivers/gpu/drm/i915/i915_gem_fence_reg.c 	kunmap(page);
page              764 drivers/gpu/drm/i915/i915_gem_fence_reg.c 	struct page *page;
page              771 drivers/gpu/drm/i915/i915_gem_fence_reg.c 	for_each_sgt_page(page, sgt_iter, pages) {
page              772 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		char new_bit_17 = page_to_phys(page) >> 17;
page              774 drivers/gpu/drm/i915/i915_gem_fence_reg.c 			i915_gem_swizzle_page(page);
page              775 drivers/gpu/drm/i915/i915_gem_fence_reg.c 			set_page_dirty(page);
page              796 drivers/gpu/drm/i915/i915_gem_fence_reg.c 	struct page *page;
page              810 drivers/gpu/drm/i915/i915_gem_fence_reg.c 	for_each_sgt_page(page, sgt_iter, pages) {
page              811 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		if (page_to_phys(page) & (1 << 17))
page              336 drivers/gpu/drm/i915/i915_gem_gtt.c static struct page *stash_pop_page(struct pagestash *stash)
page              338 drivers/gpu/drm/i915/i915_gem_gtt.c 	struct page *page = NULL;
page              342 drivers/gpu/drm/i915/i915_gem_gtt.c 		page = stash->pvec.pages[--stash->pvec.nr];
page              345 drivers/gpu/drm/i915/i915_gem_gtt.c 	return page;
page              365 drivers/gpu/drm/i915/i915_gem_gtt.c static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
page              368 drivers/gpu/drm/i915/i915_gem_gtt.c 	struct page *page;
page              373 drivers/gpu/drm/i915/i915_gem_gtt.c 	page = stash_pop_page(&vm->free_pages);
page              374 drivers/gpu/drm/i915/i915_gem_gtt.c 	if (page)
page              375 drivers/gpu/drm/i915/i915_gem_gtt.c 		return page;
page              381 drivers/gpu/drm/i915/i915_gem_gtt.c 	page = stash_pop_page(&vm->i915->mm.wc_stash);
page              382 drivers/gpu/drm/i915/i915_gem_gtt.c 	if (page)
page              383 drivers/gpu/drm/i915/i915_gem_gtt.c 		return page;
page              395 drivers/gpu/drm/i915/i915_gem_gtt.c 		struct page *page;
page              397 drivers/gpu/drm/i915/i915_gem_gtt.c 		page = alloc_page(gfp);
page              398 drivers/gpu/drm/i915/i915_gem_gtt.c 		if (unlikely(!page))
page              401 drivers/gpu/drm/i915/i915_gem_gtt.c 		stack.pages[stack.nr++] = page;
page              405 drivers/gpu/drm/i915/i915_gem_gtt.c 		page = stack.pages[--stack.nr];
page              422 drivers/gpu/drm/i915/i915_gem_gtt.c 	return page;
page              468 drivers/gpu/drm/i915/i915_gem_gtt.c static void vm_free_page(struct i915_address_space *vm, struct page *page)
page              482 drivers/gpu/drm/i915/i915_gem_gtt.c 	pagevec_add(&vm->free_pages.pvec, page);
page              573 drivers/gpu/drm/i915/i915_gem_gtt.c 	p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL);
page              574 drivers/gpu/drm/i915/i915_gem_gtt.c 	if (unlikely(!p->page))
page              578 drivers/gpu/drm/i915/i915_gem_gtt.c 				      p->page, 0, PAGE_SIZE,
page              583 drivers/gpu/drm/i915/i915_gem_gtt.c 		vm_free_page(vm, p->page);
page              600 drivers/gpu/drm/i915/i915_gem_gtt.c 	vm_free_page(vm, p->page);
page              603 drivers/gpu/drm/i915/i915_gem_gtt.c #define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
page              608 drivers/gpu/drm/i915/i915_gem_gtt.c 	kunmap_atomic(memset64(kmap_atomic(p->page), val, count));
page              643 drivers/gpu/drm/i915/i915_gem_gtt.c 		struct page *page;
page              646 drivers/gpu/drm/i915/i915_gem_gtt.c 		page = alloc_pages(gfp, order);
page              647 drivers/gpu/drm/i915/i915_gem_gtt.c 		if (unlikely(!page))
page              651 drivers/gpu/drm/i915/i915_gem_gtt.c 					  page, 0, size,
page              661 drivers/gpu/drm/i915/i915_gem_gtt.c 		vm->scratch[0].base.page = page;
page              669 drivers/gpu/drm/i915/i915_gem_gtt.c 		__free_pages(page, order);
page              686 drivers/gpu/drm/i915/i915_gem_gtt.c 	__free_pages(p->page, order);
page              763 drivers/gpu/drm/i915/i915_gem_gtt.c 	u64 * const vaddr = kmap_atomic(pdma->page);
page              220 drivers/gpu/drm/i915/i915_gem_gtt.h 	struct page *page;
page              186 drivers/gpu/drm/i915/i915_gpu_error.c 		struct page *p;
page              213 drivers/gpu/drm/i915/i915_gpu_error.c 	struct page *p;
page              224 drivers/gpu/drm/i915/i915_gpu_error.c 	struct page *p = virt_to_page(addr);
page              276 drivers/gpu/drm/i915/i915_gpu_error.c 	void *page;
page              281 drivers/gpu/drm/i915/i915_gpu_error.c 	page = pool_alloc(&c->pool, ALLOW_FAIL);
page              282 drivers/gpu/drm/i915/i915_gpu_error.c 	if (!page)
page              285 drivers/gpu/drm/i915/i915_gpu_error.c 	return dst->pages[dst->page_count++] = page;
page              565 drivers/gpu/drm/i915/i915_gpu_error.c 	int page;
page              578 drivers/gpu/drm/i915/i915_gpu_error.c 	for (page = 0; page < obj->page_count; page++) {
page              582 drivers/gpu/drm/i915/i915_gpu_error.c 		if (page == obj->page_count - 1)
page              587 drivers/gpu/drm/i915/i915_gpu_error.c 			err_puts(m, ascii85_encode(obj->pages[page][i], out));
page              894 drivers/gpu/drm/i915/i915_gpu_error.c 	int page;
page              899 drivers/gpu/drm/i915/i915_gpu_error.c 	for (page = 0; page < obj->page_count; page++)
page              900 drivers/gpu/drm/i915/i915_gpu_error.c 		free_page((unsigned long)obj->pages[page]);
page              403 drivers/gpu/drm/i915/i915_vma.h static inline struct page *i915_vma_first_page(struct i915_vma *vma)
page               42 drivers/gpu/drm/i915/selftests/i915_gem.c 	unsigned long page;
page               45 drivers/gpu/drm/i915/selftests/i915_gem.c 	for (page = 0; page < size; page += PAGE_SIZE) {
page               46 drivers/gpu/drm/i915/selftests/i915_gem.c 		const dma_addr_t dma = i915->dsm.start + page;
page               52 drivers/gpu/drm/i915/selftests/scatterlist.c 		struct page *page = sg_page(sg);
page               55 drivers/gpu/drm/i915/selftests/scatterlist.c 		if (page_to_pfn(page) != pfn) {
page               57 drivers/gpu/drm/i915/selftests/scatterlist.c 			       __func__, who, pfn, page_to_pfn(page));
page               90 drivers/gpu/drm/i915/selftests/scatterlist.c 		struct page *page = sg_page_iter_page(&sgiter);
page               92 drivers/gpu/drm/i915/selftests/scatterlist.c 		if (page != pfn_to_page(pfn)) {
page               94 drivers/gpu/drm/i915/selftests/scatterlist.c 			       __func__, who, pfn, page_to_pfn(page));
page              117 drivers/gpu/drm/i915/selftests/scatterlist.c 	struct page *page;
page              121 drivers/gpu/drm/i915/selftests/scatterlist.c 	for_each_sgt_page(page, sgt, &pt->st) {
page              122 drivers/gpu/drm/i915/selftests/scatterlist.c 		if (page != pfn_to_page(pfn)) {
page              124 drivers/gpu/drm/i915/selftests/scatterlist.c 			       __func__, who, pfn, page_to_pfn(page));
page              207 drivers/gpu/drm/i915/selftests/scatterlist.c static inline bool page_contiguous(struct page *first,
page              208 drivers/gpu/drm/i915/selftests/scatterlist.c 				   struct page *last,
page               14 drivers/gpu/drm/lima/lima_object.h 	struct page **pages;
page               32 drivers/gpu/drm/mediatek/mtk_drm_gem.h 	struct page		**pages;
page              912 drivers/gpu/drm/msm/adreno/a6xx_gmu.c 	bo->pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL);
page               16 drivers/gpu/drm/msm/adreno/a6xx_gmu.h 	struct page **pages;
page              282 drivers/gpu/drm/msm/msm_drv.h struct page **msm_gem_get_pages(struct drm_gem_object *obj);
page               78 drivers/gpu/drm/msm/msm_gem.c static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
page               83 drivers/gpu/drm/msm/msm_gem.c 	struct page **p;
page               86 drivers/gpu/drm/msm/msm_gem.c 	p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
page              107 drivers/gpu/drm/msm/msm_gem.c static struct page **get_pages(struct drm_gem_object *obj)
page              113 drivers/gpu/drm/msm/msm_gem.c 		struct page **p;
page              186 drivers/gpu/drm/msm/msm_gem.c struct page **msm_gem_get_pages(struct drm_gem_object *obj)
page              189 drivers/gpu/drm/msm/msm_gem.c 	struct page **p;
page              255 drivers/gpu/drm/msm/msm_gem.c 	struct page **pages;
page              423 drivers/gpu/drm/msm/msm_gem.c 	struct page **pages;
page              575 drivers/gpu/drm/msm/msm_gem.c 		struct page **pages = get_pages(obj);
page             1053 drivers/gpu/drm/msm/msm_gem.c 		struct page **pages;
page             1135 drivers/gpu/drm/msm/msm_gem.c 	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
page               70 drivers/gpu/drm/msm/msm_gem.h 	struct page **pages;
page               86 drivers/gpu/drm/nouveau/dispnv50/wndw.c 		args.gf119.page = GF119_DMA_V0_PAGE_LP;
page               61 drivers/gpu/drm/nouveau/include/nvif/cl0002.h 	__u8  page;
page                6 drivers/gpu/drm/nouveau/include/nvif/if000a.h 	__u8  page;
page               40 drivers/gpu/drm/nouveau/include/nvif/if000c.h 	__u8  page;
page               71 drivers/gpu/drm/nouveau/include/nvif/if000c.h 	__u8  page;
page                8 drivers/gpu/drm/nouveau/include/nvif/mem.h 	u8  page;
page               13 drivers/gpu/drm/nouveau/include/nvif/mem.h int nvif_mem_init_type(struct nvif_mmu *mmu, s32 oclass, int type, u8 page,
page               15 drivers/gpu/drm/nouveau/include/nvif/mem.h int nvif_mem_init(struct nvif_mmu *mmu, s32 oclass, u8 type, u8 page,
page               29 drivers/gpu/drm/nouveau/include/nvif/vmm.h 	} *page;
page               37 drivers/gpu/drm/nouveau/include/nvif/vmm.h 		 u8 page, u8 align, u64 size, struct nvif_vma *);
page               31 drivers/gpu/drm/nouveau/include/nvkm/core/memory.h 	u8 (*page)(struct nvkm_memory *);
page               59 drivers/gpu/drm/nouveau/include/nvkm/core/memory.h #define nvkm_memory_page(p) (p)->func->page(p)
page               44 drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h 	u8 page;
page              143 drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h nvkm_ram_get(struct nvkm_device *, u8 heap, u8 type, u8 page, u64 size,
page               14 drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h 	u8   page:3; /* Requested page type (index, or NONE for automatic). */
page               59 drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h int nvkm_vmm_get(struct nvkm_vmm *, u8 page, u64 size, struct nvkm_vma **);
page               72 drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h 	const struct nvkm_vmm_page *page;
page              187 drivers/gpu/drm/nouveau/nouveau_bo.c 		*size = roundup_64(*size, (1 << nvbo->page));
page              188 drivers/gpu/drm/nouveau/nouveau_bo.c 		*align = max((1 <<  nvbo->page), *align);
page              261 drivers/gpu/drm/nouveau/nouveau_bo.c 		    (flags & TTM_PL_FLAG_VRAM) && !vmm->page[i].vram)
page              264 drivers/gpu/drm/nouveau/nouveau_bo.c 		    (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
page              271 drivers/gpu/drm/nouveau/nouveau_bo.c 		if (pi < 0 || !nvbo->comp || vmm->page[i].comp)
page              275 drivers/gpu/drm/nouveau/nouveau_bo.c 		if (*size >= 1ULL << vmm->page[i].shift)
page              283 drivers/gpu/drm/nouveau/nouveau_bo.c 	if (nvbo->comp && !vmm->page[pi].comp) {
page              288 drivers/gpu/drm/nouveau/nouveau_bo.c 	nvbo->page = vmm->page[pi].shift;
page             1100 drivers/gpu/drm/nouveau/nouveau_bo.c 	ret = nvif_vmm_get(vmm, LAZY, false, old_mem->mem.page, 0,
page             1105 drivers/gpu/drm/nouveau/nouveau_bo.c 	ret = nvif_vmm_get(vmm, LAZY, false, new_mem->mem.page, 0,
page             1316 drivers/gpu/drm/nouveau/nouveau_bo.c 	    mem->mem.page == nvbo->page) {
page               30 drivers/gpu/drm/nouveau/nouveau_bo.h 	unsigned page:5;
page               82 drivers/gpu/drm/nouveau/nouveau_dmem.c static inline struct nouveau_dmem *page_to_dmem(struct page *page)
page               84 drivers/gpu/drm/nouveau/nouveau_dmem.c 	return container_of(page->pgmap, struct nouveau_dmem, pagemap);
page               87 drivers/gpu/drm/nouveau/nouveau_dmem.c static unsigned long nouveau_dmem_page_addr(struct page *page)
page               89 drivers/gpu/drm/nouveau/nouveau_dmem.c 	struct nouveau_dmem_chunk *chunk = page->zone_device_data;
page               90 drivers/gpu/drm/nouveau/nouveau_dmem.c 	unsigned long idx = page_to_pfn(page) - chunk->pfn_first;
page               95 drivers/gpu/drm/nouveau/nouveau_dmem.c static void nouveau_dmem_page_free(struct page *page)
page               97 drivers/gpu/drm/nouveau/nouveau_dmem.c 	struct nouveau_dmem_chunk *chunk = page->zone_device_data;
page               98 drivers/gpu/drm/nouveau/nouveau_dmem.c 	unsigned long idx = page_to_pfn(page) - chunk->pfn_first;
page              136 drivers/gpu/drm/nouveau/nouveau_dmem.c 	struct page *dpage, *spage;
page              167 drivers/gpu/drm/nouveau/nouveau_dmem.c 	struct nouveau_dmem *dmem = page_to_dmem(vmf->page);
page              322 drivers/gpu/drm/nouveau/nouveau_dmem.c static struct page *
page              326 drivers/gpu/drm/nouveau/nouveau_dmem.c 	struct page *page;
page              334 drivers/gpu/drm/nouveau/nouveau_dmem.c 	page = pfn_to_page(pfns[0]);
page              335 drivers/gpu/drm/nouveau/nouveau_dmem.c 	get_page(page);
page              336 drivers/gpu/drm/nouveau/nouveau_dmem.c 	lock_page(page);
page              337 drivers/gpu/drm/nouveau/nouveau_dmem.c 	return page;
page              341 drivers/gpu/drm/nouveau/nouveau_dmem.c nouveau_dmem_page_free_locked(struct nouveau_drm *drm, struct page *page)
page              343 drivers/gpu/drm/nouveau/nouveau_dmem.c 	unlock_page(page);
page              344 drivers/gpu/drm/nouveau/nouveau_dmem.c 	put_page(page);
page              535 drivers/gpu/drm/nouveau/nouveau_dmem.c 		struct page *page;
page              548 drivers/gpu/drm/nouveau/nouveau_dmem.c 		page = pfn_to_page(chunk->pfn_first);
page              549 drivers/gpu/drm/nouveau/nouveau_dmem.c 		for (j = 0; j < DMEM_CHUNK_NPAGES; ++j, ++page)
page              550 drivers/gpu/drm/nouveau/nouveau_dmem.c 			page->zone_device_data = chunk;
page              564 drivers/gpu/drm/nouveau/nouveau_dmem.c 	struct page *dpage, *spage;
page              673 drivers/gpu/drm/nouveau/nouveau_dmem.c nouveau_dmem_page(struct nouveau_drm *drm, struct page *page)
page              675 drivers/gpu/drm/nouveau/nouveau_dmem.c 	return is_device_private_page(page) && drm->dmem == page_to_dmem(page);
page              686 drivers/gpu/drm/nouveau/nouveau_dmem.c 		struct page *page;
page              689 drivers/gpu/drm/nouveau/nouveau_dmem.c 		page = hmm_device_entry_to_page(range, range->pfns[i]);
page              690 drivers/gpu/drm/nouveau/nouveau_dmem.c 		if (page == NULL)
page              697 drivers/gpu/drm/nouveau/nouveau_dmem.c 		if (!nouveau_dmem_page(drm, page)) {
page              703 drivers/gpu/drm/nouveau/nouveau_dmem.c 		addr = nouveau_dmem_page_addr(page);
page              133 drivers/gpu/drm/nouveau/nouveau_mem.c nouveau_mem_vram(struct ttm_mem_reg *reg, bool contig, u8 page)
page              140 drivers/gpu/drm/nouveau/nouveau_mem.c 	u64 size = ALIGN(reg->num_pages << PAGE_SHIFT, 1 << page);
page              148 drivers/gpu/drm/nouveau/nouveau_mem.c 					 drm->ttm.type_vram, page, size,
page              156 drivers/gpu/drm/nouveau/nouveau_mem.c 					 drm->ttm.type_vram, page, size,
page               26 drivers/gpu/drm/nouveau/nouveau_mem.h int nouveau_mem_vram(struct ttm_mem_reg *, bool contig, u8 page);
page              641 drivers/gpu/drm/nouveau/nouveau_svm.c 		args.i.p.page = PAGE_SHIFT;
page               75 drivers/gpu/drm/nouveau/nouveau_ttm.c 	ret = nouveau_mem_vram(reg, nvbo->contig, nvbo->page);
page              100 drivers/gpu/drm/nouveau/nouveau_vmm.c 	    mem->mem.page == nvbo->page) {
page              101 drivers/gpu/drm/nouveau/nouveau_vmm.c 		ret = nvif_vmm_get(&vmm->vmm, LAZY, false, mem->mem.page, 0,
page              109 drivers/gpu/drm/nouveau/nouveau_vmm.c 		ret = nvif_vmm_get(&vmm->vmm, PTES, false, mem->mem.page, 0,
page               47 drivers/gpu/drm/nouveau/nvif/mem.c nvif_mem_init_type(struct nvif_mmu *mmu, s32 oclass, int type, u8 page,
page               66 drivers/gpu/drm/nouveau/nvif/mem.c 	args->page = page;
page               74 drivers/gpu/drm/nouveau/nvif/mem.c 		mem->page = args->page;
page               86 drivers/gpu/drm/nouveau/nvif/mem.c nvif_mem_init(struct nvif_mmu *mmu, s32 oclass, u8 type, u8 page,
page               95 drivers/gpu/drm/nouveau/nvif/mem.c 			ret = nvif_mem_init_type(mmu, oclass, i, page, size,
page               78 drivers/gpu/drm/nouveau/nvif/vmm.c 	     u8 page, u8 align, u64 size, struct nvif_vma *vma)
page               85 drivers/gpu/drm/nouveau/nvif/vmm.c 	args.page = page;
page              110 drivers/gpu/drm/nouveau/nvif/vmm.c 	kfree(vmm->page);
page              123 drivers/gpu/drm/nouveau/nvif/vmm.c 	vmm->page = NULL;
page              142 drivers/gpu/drm/nouveau/nvif/vmm.c 	vmm->page = kmalloc_array(vmm->page_nr, sizeof(*vmm->page),
page              144 drivers/gpu/drm/nouveau/nvif/vmm.c 	if (!vmm->page) {
page              157 drivers/gpu/drm/nouveau/nvif/vmm.c 		vmm->page[i].shift = args.shift;
page              158 drivers/gpu/drm/nouveau/nvif/vmm.c 		vmm->page[i].sparse = args.sparse;
page              159 drivers/gpu/drm/nouveau/nvif/vmm.c 		vmm->page[i].vram = args.vram;
page              160 drivers/gpu/drm/nouveau/nvif/vmm.c 		vmm->page[i].host = args.host;
page              161 drivers/gpu/drm/nouveau/nvif/vmm.c 		vmm->page[i].comp = args.comp;
page               76 drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf119.c 	u32 kind, page;
page               95 drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf119.c 			   args->v0.version, args->v0.page, args->v0.kind);
page               97 drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf119.c 		page = args->v0.page;
page              102 drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf119.c 			page = GF119_DMA_V0_PAGE_SP;
page              105 drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf119.c 			page = GF119_DMA_V0_PAGE_LP;
page              110 drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf119.c 	if (page > 1)
page              112 drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf119.c 	dmaobj->flags0 = (kind << 20) | (page << 6);
page               75 drivers/gpu/drm/nouveau/nvkm/engine/dma/usergv100.c 	u32 kind, page;
page               94 drivers/gpu/drm/nouveau/nvkm/engine/dma/usergv100.c 			   args->v0.version, args->v0.page, args->v0.kind);
page               96 drivers/gpu/drm/nouveau/nvkm/engine/dma/usergv100.c 		page = args->v0.page != 0;
page              100 drivers/gpu/drm/nouveau/nvkm/engine/dma/usergv100.c 		page = GF119_DMA_V0_PAGE_SP;
page              106 drivers/gpu/drm/nouveau/nvkm/engine/dma/usergv100.c 	if (page)
page              195 drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c 	fb->page = nvkm_longopt(device->cfgopt, "NvFbBigPage",
page               49 drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c 	int ret, size = 1 << (fb->base.page ? fb->base.page : 17);
page               79 drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c 	switch (fb->page) {
page                9 drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h 	struct page *r100c10_page;
page               33 drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c 	switch (fb->page) {
page               28 drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c 	return (fb->page == 16) ? 0 : -EINVAL;
page               10 drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h 	struct page *r100c08_page;
page               33 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c 	u8 page;
page               69 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c 	return nvkm_vram(memory)->page;
page               97 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c 	.page = nvkm_vram_page,
page              111 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c 	u8   page = max(rpage, (u8)NVKM_RAM_MM_SHIFT);
page              112 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c 	u32 align = (1 << page) >> NVKM_RAM_MM_SHIFT;
page              113 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c 	u32   max = ALIGN(size, 1 << page) >> NVKM_RAM_MM_SHIFT;
page              126 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c 	vram->page = page;
page               87 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c 	struct page *pages[];
page              356 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c 	.page = gk20a_instobj_page,
page              368 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c 	.page = gk20a_instobj_page,
page              446 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c 		struct page *p = alloc_page(GFP_KERNEL);
page              357 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c 	u8 page = max(order_base_2(align), 12);
page              368 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c 	return nvkm_ram_get(device, 0, 1, page, size, true, true, &iobj->ram);
page               94 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c 	if (device->fb->page)
page               52 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c 	if (device->fb->page)
page               35 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c 	struct page **mem;
page              103 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c 	.page = nvkm_mem_page,
page              126 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c 	.page = nvkm_mem_page,
page              144 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c nvkm_mem_new_host(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
page              163 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c 	if (page != PAGE_SHIFT)
page              205 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c 		struct page *p = alloc_page(gfp);
page              224 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c nvkm_mem_new_type(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
page              231 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c 		ret = mmu->func->mem.vram(mmu, type, page, size,
page              234 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c 		ret = nvkm_mem_new_host(mmu, type, page, size,
page                5 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.h int nvkm_mem_new_type(struct nvkm_mmu *, int type, u8 page, u64 size,
page               69 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c gf100_mem_new(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
page               92 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c 	return nvkm_ram_get(mmu->subdev.device, type, 0x01, page,
page               51 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c nv04_mem_new(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
page               67 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c 	return nvkm_ram_get(mmu->subdev.device, type, 0x01, page,
page               66 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c nv50_mem_new(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
page               87 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c 			    page, size, contig, false, pmemory);
page               23 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h 		int (*vram)(struct nvkm_mmu *, int type, u8 page, u64 size,
page              152 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	u8  page;
page              157 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 		page = args->v0.page;
page              175 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 		page = max_t(u8, page, PAGE_SHIFT);
page              179 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	ret = nvkm_mem_new_type(mmu, type, page, size, argv, argc,
page              188 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	args->v0.page = nvkm_memory_page(umem->memory);
page               84 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 	u8  page;
page               87 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 		page = args->v0.page;
page               91 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 		if (argc != (size >> page) * sizeof(args->v0.phys[0]))
page              101 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 		ret = nvkm_vmm_pfn_map(vmm, page, addr, size, phys);
page              279 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 	u8 page, align;
page              286 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 		page = args->v0.page;
page              294 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 				  page, align, size, &vma);
page              310 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 	const struct nvkm_vmm_page *page;
page              314 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 	page = uvmm->vmm->func->page;
page              315 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 	for (nr = 0; page[nr].shift; nr++);
page              320 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 		type = page[index].type;
page              321 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 		args->v0.shift = page[index].shift;
page              380 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 	const struct nvkm_vmm_page *page;
page              412 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 	page = uvmm->vmm->func->page;
page              414 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 	while (page && (page++)->shift)
page               41 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		const struct nvkm_vmm_page *page)
page               49 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			const struct nvkm_vmm_desc *pair = page[-1].desc;
page               58 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	pgt->page = page ? page->shift : 0;
page               73 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	const struct nvkm_vmm_page *page;
page              200 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	const struct nvkm_vmm_desc *pair = it->page[-1].desc;
page              299 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	const struct nvkm_vmm_desc *pair = it->page[-1].desc;
page              489 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	pgt = nvkm_vmm_pt_new(desc, NVKM_VMM_PDE_SPARSED(pgt), it->page);
page              501 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_iter(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
page              507 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	const struct nvkm_vmm_desc *desc = page->desc;
page              509 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	u64 bits = addr >> page->shift;
page              511 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	it.page = page;
page              514 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	it.cnt = size >> page->shift;
page              527 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	         addr, size, page->shift, it.cnt);
page              598 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	return addr << page->shift;
page              602 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_ptes_sparse_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
page              605 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	nvkm_vmm_iter(vmm, page, addr, size, "sparse unref", false, false,
page              607 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		      page->desc->func->invalid ?
page              608 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		      page->desc->func->invalid : page->desc->func->unmap);
page              612 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_ptes_sparse_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
page              615 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	if ((page->type & NVKM_VMM_PAGE_SPARSE)) {
page              616 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "sparse ref",
page              618 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 					 NULL, NULL, page->desc->func->sparse);
page              621 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 				nvkm_vmm_ptes_sparse_put(vmm, page, addr, size);
page              632 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	const struct nvkm_vmm_page *page = vmm->func->page;
page              639 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		while (size < (1ULL << page[m].shift))
page              644 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		while (!IS_ALIGNED(addr, 1ULL << page[i].shift))
page              650 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			u64 next = 1ULL << page[i - 1].shift;
page              653 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 				block = (part >> page[i].shift) << page[i].shift;
page              655 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 				block = (size >> page[i].shift) << page[i].shift;
page              657 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			block = (size >> page[i].shift) << page[i].shift;
page              662 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			int ret = nvkm_vmm_ptes_sparse_get(vmm, &page[i], addr, block);
page              669 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			nvkm_vmm_ptes_sparse_put(vmm, &page[i], addr, block);
page              680 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
page              683 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	const struct nvkm_vmm_desc_func *func = page->desc->func;
page              684 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	nvkm_vmm_iter(vmm, page, addr, size, "unmap + unref",
page              691 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
page              695 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref + map", true,
page              699 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, false, false);
page              706 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_ptes_unmap(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
page              709 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	const struct nvkm_vmm_desc_func *func = page->desc->func;
page              710 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	nvkm_vmm_iter(vmm, page, addr, size, "unmap", false, pfn,
page              717 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_ptes_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
page              721 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	nvkm_vmm_iter(vmm, page, addr, size, "map", false, false,
page              726 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_ptes_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
page              729 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	nvkm_vmm_iter(vmm, page, addr, size, "unref", false, false,
page              734 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_ptes_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
page              737 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref", true, false,
page              741 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			nvkm_vmm_ptes_put(vmm, page, addr, fail - addr);
page              754 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		vma->page = NVKM_VMA_PAGE_NONE;
page              773 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	new->page = vma->page;
page              959 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	       vma->page != NVKM_VMA_PAGE_NONE ? '0' + vma->page : '-',
page              992 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		const struct nvkm_vmm_page *page = vmm->func->page;
page              995 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		while (page[1].shift)
page              996 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			page++;
page              999 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		nvkm_vmm_ptes_put(vmm, page, vmm->start, limit);
page             1040 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	const struct nvkm_vmm_page *page = func->page;
page             1056 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	while (page[1].shift)
page             1057 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		page++;
page             1063 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	for (levels = 0, desc = page->desc; desc->bits; desc++, levels++)
page             1065 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	bits += page->shift;
page             1152 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			 u64 addr, u64 size, u8 page, bool map)
page             1190 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		nvkm_vmm_ptes_unmap_put(vmm, &vmm->func->page[vma->refd],
page             1213 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	const struct nvkm_vmm_page *page = vmm->func->page;
page             1223 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	while (page->shift && page->shift != shift &&
page             1224 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	       page->desc->func->pfn == NULL)
page             1225 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		page++;
page             1227 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	if (!page->shift || !IS_ALIGNED(addr, 1ULL << shift) ||
page             1231 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			  shift, page->shift, addr, size);
page             1252 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		size = min_t(u64, size, pn << page->shift);
page             1276 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 						       page -
page             1277 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 						       vmm->func->page, map);
page             1284 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 				tmp->refd = page - vmm->func->page;
page             1293 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			args.page = page;
page             1297 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 				ret = nvkm_vmm_ptes_get_map(vmm, page, addr,
page             1298 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 							    size, &args, page->
page             1301 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 				nvkm_vmm_ptes_map(vmm, page, addr, size, &args,
page             1302 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 						  page->desc->func->pfn);
page             1306 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 				nvkm_vmm_ptes_unmap_put(vmm, page, addr, size,
page             1323 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 				size -= 1 << page->shift;
page             1326 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			pi += size >> page->shift;
page             1353 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	const struct nvkm_vmm_page *page = &vmm->func->page[vma->refd];
page             1356 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		nvkm_vmm_ptes_unmap_put(vmm, page, vma->addr, vma->size, vma->sparse, pfn);
page             1359 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		nvkm_vmm_ptes_unmap(vmm, page, vma->addr, vma->size, vma->sparse, pfn);
page             1381 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		if (!(map->page->type & NVKM_VMM_PAGE_VRAM)) {
page             1382 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			VMM_DEBUG(vmm, "%d !VRAM", map->page->shift);
page             1388 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		if (!(map->page->type & NVKM_VMM_PAGE_HOST)) {
page             1389 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			VMM_DEBUG(vmm, "%d !HOST", map->page->shift);
page             1398 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	if (!IS_ALIGNED(     vma->addr, 1ULL << map->page->shift) ||
page             1399 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	    !IS_ALIGNED((u64)vma->size, 1ULL << map->page->shift) ||
page             1400 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	    !IS_ALIGNED(   map->offset, 1ULL << map->page->shift) ||
page             1401 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	    nvkm_memory_page(map->memory) < map->page->shift) {
page             1403 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		    vma->addr, (u64)vma->size, map->offset, map->page->shift,
page             1415 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	for (map->page = vmm->func->page; map->page->shift; map->page++) {
page             1416 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		VMM_DEBUG(vmm, "trying %d", map->page->shift);
page             1439 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	if (vma->page == NVKM_VMA_PAGE_NONE &&
page             1454 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			map->page = &vmm->func->page[vma->refd];
page             1456 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			map->page = &vmm->func->page[vma->page];
page             1474 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		func = map->page->desc->func->mem;
page             1483 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		func = map->page->desc->func->sgl;
page             1487 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		func = map->page->desc->func->dma;
page             1492 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		ret = nvkm_vmm_ptes_get_map(vmm, map->page, vma->addr, vma->size, map, func);
page             1496 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		vma->refd = map->page - vmm->func->page;
page             1498 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		nvkm_vmm_ptes_map(vmm, map->page, vma->addr, vma->size, map, func);
page             1543 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	const struct nvkm_vmm_page *page = vmm->func->page;
page             1568 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 				nvkm_vmm_ptes_unmap_put(vmm, &page[refd], addr,
page             1574 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 				nvkm_vmm_ptes_put(vmm, &page[refd], addr, size);
page             1598 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		nvkm_vmm_ptes_sparse_put(vmm, &page[vma->refd], vma->addr, vma->size);
page             1616 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	vma->page = NVKM_VMA_PAGE_NONE;
page             1639 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	const struct nvkm_vmm_page *page = &vmm->func->page[NVKM_VMA_PAGE_NONE];
page             1672 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		for (page = vmm->func->page; page->shift; page++) {
page             1673 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			if (shift == page->shift)
page             1677 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		if (!page->shift || !IS_ALIGNED(size, 1ULL << page->shift)) {
page             1708 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		const int p = page - vmm->func->page;
page             1711 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		if (vmm->func->page_block && prev && prev->page != p)
page             1716 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		if (vmm->func->page_block && next && next->page != p)
page             1751 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		ret = nvkm_vmm_ptes_sparse_get(vmm, page, vma->addr, vma->size);
page             1755 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		ret = nvkm_vmm_ptes_get(vmm, page, vma->addr, vma->size);
page             1765 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	vma->page = page - vmm->func->page;
page             1766 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	vma->refd = getref ? vma->page : NVKM_VMA_PAGE_NONE;
page             1774 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma)
page             1778 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	ret = nvkm_vmm_get_locked(vmm, false, true, false, page, 0, size, pvma);
page             1817 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	const struct nvkm_vmm_page *page = vmm->func->page;
page             1821 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	while (page[1].shift)
page             1822 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		page++;
page             1824 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	ret = nvkm_vmm_ptes_get(vmm, page, vmm->start, limit);
page             1828 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	nvkm_vmm_iter(vmm, page, vmm->start, limit, "bootstrap", false, false,
page               22 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h 	u8 page;
page              154 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h 	const struct nvkm_vmm_page page[];
page              173 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h 			bool sparse, u8 page, u8 align, u64 size,
page              188 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h int nvkm_vmm_pfn_map(struct nvkm_vmm *, u8 page, u64 addr, u64 size, u64 *pfn);
page              291 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h 		u64 _ptes = ((SIZE) - MAP->off) >> MAP->page->shift;           \
page              295 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h 			MAP->off += PTEN << MAP->page->shift;                  \
page               68 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c 	if (map->page->shift == PAGE_SHIFT) {
page              242 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c 	const struct nvkm_vmm_page *page = map->page;
page              243 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c 	const bool gm20x = page->desc->func->sparse != NULL;
page              254 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c 	map->next = (1 << page->shift) >> 8;
page              284 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c 		u32 comp = (page->shift == 16 && !gm20x) ? 16 : 17;
page              286 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c 		if (aper != 0 || !(page->type & NVKM_VMM_PAGE_COMP)) {
page              287 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c 			VMM_DEBUG(vmm, "comp %d %02x", aper, page->type);
page              301 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c 			if (page->shift == 17 || !gm20x) {
page              378 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c 	.page = {
page              393 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c 	.page = {
page              407 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c 	switch (mmu->subdev.device->fb->page) {
page               75 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk104.c 	.page = {
page               90 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk104.c 	.page = {
page               44 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk20a.c 	.page = {
page               59 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk20a.c 	.page = {
page               98 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c 	if (vmm->func->page[1].shift == 16)
page              117 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c 	.page = {
page              133 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c 	.page = {
page               32 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm20b.c 	.page = {
page               48 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm20b.c 	.page = {
page              133 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c 	if (map->page->shift == PAGE_SHIFT) {
page              316 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c 	const struct nvkm_vmm_page *page = map->page;
page              327 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c 	map->next = (1ULL << page->shift) >> 4;
page              358 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c 		if (aper != 0 || !(page->type & NVKM_VMM_PAGE_COMP)) {
page              359 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c 			VMM_DEBUG(vmm, "comp %d %02x", aper, page->type);
page              373 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c 			map->ctag |= ((1ULL << page->shift) >> 16) << 36;
page              496 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c 	.page = {
page               33 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c 	.page = {
page               71 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgv100.c 	.page = {
page               31 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c 	.page = {
page               95 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c 	.page = {
page              100 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c 	.page = {
page              201 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c 	.page = {
page               68 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c 	if (map->page->shift == PAGE_SHIFT) {
page              111 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c 		switch (pgt->page) {
page              230 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c 	const struct nvkm_vmm_page *page = map->page;
page              243 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c 	map->next = 1 << page->shift;
page              295 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c 		if (aper != 0 || !(page->type & NVKM_VMM_PAGE_COMP)) {
page              296 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c 			VMM_DEBUG(vmm, "comp %d %02x", aper, page->type);
page              371 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c 	.page = {
page               60 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c 	.page = {
page              166 drivers/gpu/drm/omapdrm/omap_dmm_priv.h 	struct page *dummy_page;
page              354 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 		struct page **pages, u32 npages, u32 roll)
page              468 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c static int fill(struct tcm_area *area, struct page **pages,
page              516 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c int tiler_pin(struct tiler_block *block, struct page **pages,
page               90 drivers/gpu/drm/omapdrm/omap_dmm_tiler.h int tiler_pin(struct tiler_block *block, struct page **pages,
page               87 drivers/gpu/drm/omapdrm/omap_gem.c 	struct page **pages;
page              225 drivers/gpu/drm/omapdrm/omap_gem.c 	struct page **pages;
page              379 drivers/gpu/drm/omapdrm/omap_gem.c 	struct page *pages[64];  /* XXX is this too much to have on stack? */
page              446 drivers/gpu/drm/omapdrm/omap_gem.c 			sizeof(struct page *) * slots);
page              448 drivers/gpu/drm/omapdrm/omap_gem.c 			sizeof(struct page *) * (n - slots));
page              724 drivers/gpu/drm/omapdrm/omap_gem.c 	struct page **pages = omap_obj->pages;
page              913 drivers/gpu/drm/omapdrm/omap_gem.c int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
page             1256 drivers/gpu/drm/omapdrm/omap_gem.c 		struct page **pages;
page               24 drivers/gpu/drm/omapdrm/omap_gem.h struct page;
page               79 drivers/gpu/drm/omapdrm/omap_gem.h int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
page               69 drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c 	struct page **pages;
page               92 drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c 	struct page **pages;
page              102 drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c 	struct page **pages;
page               46 drivers/gpu/drm/panel/panel-ilitek-ili9881c.c 		u8	page;
page               54 drivers/gpu/drm/panel/panel-ilitek-ili9881c.c 			.page = (_page),	\
page              273 drivers/gpu/drm/panel/panel-ilitek-ili9881c.c static int ili9881c_switch_page(struct ili9881c *ctx, u8 page)
page              275 drivers/gpu/drm/panel/panel-ilitek-ili9881c.c 	u8 buf[4] = { 0xff, 0x98, 0x81, page };
page              320 drivers/gpu/drm/panel/panel-ilitek-ili9881c.c 			ret = ili9881c_switch_page(ctx, instr->arg.page);
page              455 drivers/gpu/drm/panfrost/panfrost_mmu.c 	struct page **pages;
page              487 drivers/gpu/drm/panfrost/panfrost_mmu.c 				       sizeof(struct page *), GFP_KERNEL | __GFP_ZERO);
page              137 drivers/gpu/drm/qxl/qxl_image.c 		int page;
page              142 drivers/gpu/drm/qxl/qxl_image.c 			page = 0;
page              146 drivers/gpu/drm/qxl/qxl_image.c 				ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page << PAGE_SHIFT);
page              148 drivers/gpu/drm/qxl/qxl_image.c 				if (page == 0) {
page              163 drivers/gpu/drm/qxl/qxl_image.c 				page++;
page              253 drivers/gpu/drm/radeon/radeon.h 	struct page	*page;
page              653 drivers/gpu/drm/radeon/radeon.h 	struct page			**pages;
page              669 drivers/gpu/drm/radeon/radeon.h 		     int pages, struct page **pagelist,
page              728 drivers/gpu/drm/radeon/radeon.h int radeon_doorbell_get(struct radeon_device *rdev, u32 *page);
page              782 drivers/gpu/drm/radeon/radeon_device.c 	if (rdev->dummy_page.page)
page              784 drivers/gpu/drm/radeon/radeon_device.c 	rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
page              785 drivers/gpu/drm/radeon/radeon_device.c 	if (rdev->dummy_page.page == NULL)
page              787 drivers/gpu/drm/radeon/radeon_device.c 	rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
page              791 drivers/gpu/drm/radeon/radeon_device.c 		__free_page(rdev->dummy_page.page);
page              792 drivers/gpu/drm/radeon/radeon_device.c 		rdev->dummy_page.page = NULL;
page              809 drivers/gpu/drm/radeon/radeon_device.c 	if (rdev->dummy_page.page == NULL)
page              813 drivers/gpu/drm/radeon/radeon_device.c 	__free_page(rdev->dummy_page.page);
page              814 drivers/gpu/drm/radeon/radeon_device.c 	rdev->dummy_page.page = NULL;
page              290 drivers/gpu/drm/radeon/radeon_gart.c 		     int pages, struct page **pagelist, dma_addr_t *dma_addr,
page              512 drivers/gpu/drm/radeon/radeon_ttm.c 		struct page **pages = ttm->pages + pinned;
page              565 drivers/gpu/drm/radeon/radeon_ttm.c 		struct page *page = sg_page_iter_page(&sg_iter);
page              567 drivers/gpu/drm/radeon/radeon_ttm.c 			set_page_dirty(page);
page              569 drivers/gpu/drm/radeon/radeon_ttm.c 		mark_page_accessed(page);
page              570 drivers/gpu/drm/radeon/radeon_ttm.c 		put_page(page);
page             1024 drivers/gpu/drm/radeon/radeon_ttm.c 		struct page *page;
page             1030 drivers/gpu/drm/radeon/radeon_ttm.c 		page = rdev->gart.pages[p];
page             1031 drivers/gpu/drm/radeon/radeon_ttm.c 		if (page) {
page             1032 drivers/gpu/drm/radeon/radeon_ttm.c 			ptr = kmap(page);
page               24 drivers/gpu/drm/rockchip/rockchip_drm_gem.h 	struct page **pages;
page              344 drivers/gpu/drm/savage/savage_bci.c void savage_dma_wait(drm_savage_private_t * dev_priv, unsigned int page)
page              362 drivers/gpu/drm/savage/savage_bci.c 	if (dev_priv->dma_pages[page].age.wrap > wrap ||
page              363 drivers/gpu/drm/savage/savage_bci.c 	    (dev_priv->dma_pages[page].age.wrap == wrap &&
page              364 drivers/gpu/drm/savage/savage_bci.c 	     dev_priv->dma_pages[page].age.event > event)) {
page              366 drivers/gpu/drm/savage/savage_bci.c 					dev_priv->dma_pages[page].age.event)
page              211 drivers/gpu/drm/savage/savage_drv.h extern void savage_dma_wait(drm_savage_private_t * dev_priv, unsigned int page);
page               68 drivers/gpu/drm/tegra/gem.c static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
page               73 drivers/gpu/drm/tegra/gem.c 		return obj->vaddr + page * PAGE_SIZE;
page               75 drivers/gpu/drm/tegra/gem.c 		return dma_buf_kmap(obj->gem.import_attach->dmabuf, page);
page               77 drivers/gpu/drm/tegra/gem.c 		return vmap(obj->pages + page, 1, VM_MAP,
page               81 drivers/gpu/drm/tegra/gem.c static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
page               89 drivers/gpu/drm/tegra/gem.c 		dma_buf_kunmap(obj->gem.import_attach->dmabuf, page, addr);
page              430 drivers/gpu/drm/tegra/gem.c 	struct page *page;
page              437 drivers/gpu/drm/tegra/gem.c 	page = bo->pages[offset];
page              439 drivers/gpu/drm/tegra/gem.c 	return vmf_insert_page(vma, vmf->address, page);
page              585 drivers/gpu/drm/tegra/gem.c static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
page              590 drivers/gpu/drm/tegra/gem.c static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page,
page               39 drivers/gpu/drm/tegra/gem.h 	struct page **pages;
page               54 drivers/gpu/drm/ttm/ttm_agp_backend.c 	struct page *dummy_read_page = ttm->bdev->glob->dummy_read_page;
page               66 drivers/gpu/drm/ttm/ttm_agp_backend.c 		struct page *page = ttm->pages[i];
page               68 drivers/gpu/drm/ttm/ttm_agp_backend.c 		if (!page)
page               69 drivers/gpu/drm/ttm/ttm_agp_backend.c 			page = dummy_read_page;
page               71 drivers/gpu/drm/ttm/ttm_agp_backend.c 		mem->pages[mem->page_count++] = page;
page              251 drivers/gpu/drm/ttm/ttm_bo_util.c static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
page              254 drivers/gpu/drm/ttm/ttm_bo_util.c 	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
page              256 drivers/gpu/drm/ttm/ttm_bo_util.c 	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
page              287 drivers/gpu/drm/ttm/ttm_bo_util.c void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot)
page              290 drivers/gpu/drm/ttm/ttm_bo_util.c 		return kmap_atomic(page);
page              292 drivers/gpu/drm/ttm/ttm_bo_util.c 		return __ttm_kmap_atomic_prot(page, prot);
page              313 drivers/gpu/drm/ttm/ttm_bo_util.c 				unsigned long page,
page              316 drivers/gpu/drm/ttm/ttm_bo_util.c 	struct page *d = ttm->pages[page];
page              322 drivers/gpu/drm/ttm/ttm_bo_util.c 	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
page              335 drivers/gpu/drm/ttm/ttm_bo_util.c 				unsigned long page,
page              338 drivers/gpu/drm/ttm/ttm_bo_util.c 	struct page *s = ttm->pages[page];
page              344 drivers/gpu/drm/ttm/ttm_bo_util.c 	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
page              369 drivers/gpu/drm/ttm/ttm_bo_util.c 	unsigned long page;
page              419 drivers/gpu/drm/ttm/ttm_bo_util.c 		page = i * dir + add;
page              423 drivers/gpu/drm/ttm/ttm_bo_util.c 			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
page              428 drivers/gpu/drm/ttm/ttm_bo_util.c 			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
page              431 drivers/gpu/drm/ttm/ttm_bo_util.c 			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
page              604 drivers/gpu/drm/ttm/ttm_bo_util.c 		map->page = ttm->pages[start_page];
page              605 drivers/gpu/drm/ttm/ttm_bo_util.c 		map->virtual = kmap(map->page);
page              666 drivers/gpu/drm/ttm/ttm_bo_util.c 		kunmap(map->page);
page              677 drivers/gpu/drm/ttm/ttm_bo_util.c 	map->page = NULL;
page              119 drivers/gpu/drm/ttm/ttm_bo_vm.c 	struct page *page;
page              263 drivers/gpu/drm/ttm/ttm_bo_vm.c 			page = ttm->pages[page_offset];
page              264 drivers/gpu/drm/ttm/ttm_bo_vm.c 			if (unlikely(!page && i == 0)) {
page              267 drivers/gpu/drm/ttm/ttm_bo_vm.c 			} else if (unlikely(!page)) {
page              270 drivers/gpu/drm/ttm/ttm_bo_vm.c 			page->index = drm_vma_node_start(&bo->base.vma_node) +
page              272 drivers/gpu/drm/ttm/ttm_bo_vm.c 			pfn = page_to_pfn(page);
page              323 drivers/gpu/drm/ttm/ttm_bo_vm.c 	unsigned long page = offset >> PAGE_SHIFT;
page              330 drivers/gpu/drm/ttm/ttm_bo_vm.c 	offset -= page << PAGE_SHIFT;
page              337 drivers/gpu/drm/ttm/ttm_bo_vm.c 		ret = ttm_bo_kmap(bo, page, 1, &map);
page              349 drivers/gpu/drm/ttm/ttm_bo_vm.c 		page++;
page              633 drivers/gpu/drm/ttm/ttm_memory.c 			      struct page *page, uint64_t size,
page              644 drivers/gpu/drm/ttm/ttm_memory.c 	if (PageHighMem(page) && glob->zone_highmem != NULL)
page              647 drivers/gpu/drm/ttm/ttm_memory.c 	if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
page              653 drivers/gpu/drm/ttm/ttm_memory.c void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page,
page              659 drivers/gpu/drm/ttm/ttm_memory.c 	if (PageHighMem(page) && glob->zone_highmem != NULL)
page              662 drivers/gpu/drm/ttm/ttm_memory.c 	if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
page               52 drivers/gpu/drm/ttm/ttm_page_alloc.c #define NUM_PAGES_TO_ALLOC		(PAGE_SIZE/sizeof(struct page *))
page              247 drivers/gpu/drm/ttm/ttm_page_alloc.c static void ttm_pages_put(struct page *pages[], unsigned npages,
page              286 drivers/gpu/drm/ttm/ttm_page_alloc.c 	static struct page *static_buf[NUM_PAGES_TO_ALLOC];
page              288 drivers/gpu/drm/ttm/ttm_page_alloc.c 	struct page *p;
page              289 drivers/gpu/drm/ttm/ttm_page_alloc.c 	struct page **pages_to_free;
page              300 drivers/gpu/drm/ttm/ttm_page_alloc.c 					      sizeof(struct page *),
page              442 drivers/gpu/drm/ttm/ttm_page_alloc.c static int ttm_set_pages_caching(struct page **pages,
page              471 drivers/gpu/drm/ttm/ttm_page_alloc.c 		struct page **failed_pages, unsigned cpages)
page              491 drivers/gpu/drm/ttm/ttm_page_alloc.c 	struct page **caching_array;
page              492 drivers/gpu/drm/ttm/ttm_page_alloc.c 	struct page *p;
page              499 drivers/gpu/drm/ttm/ttm_page_alloc.c 	caching_array = kmalloc_array(max_cpages, sizeof(struct page *),
page              575 drivers/gpu/drm/ttm/ttm_page_alloc.c 	struct page *p;
page              676 drivers/gpu/drm/ttm/ttm_page_alloc.c 		struct page *page;
page              678 drivers/gpu/drm/ttm/ttm_page_alloc.c 		list_for_each_entry(page, pages, lru) {
page              679 drivers/gpu/drm/ttm/ttm_page_alloc.c 			if (PageHighMem(page))
page              680 drivers/gpu/drm/ttm/ttm_page_alloc.c 				clear_highpage(page);
page              682 drivers/gpu/drm/ttm/ttm_page_alloc.c 				clear_page(page_address(page));
page              708 drivers/gpu/drm/ttm/ttm_page_alloc.c static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
page              723 drivers/gpu/drm/ttm/ttm_page_alloc.c 			struct page *p = pages[i];
page              764 drivers/gpu/drm/ttm/ttm_page_alloc.c 			struct page *p = pages[i];
page              826 drivers/gpu/drm/ttm/ttm_page_alloc.c static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
page              834 drivers/gpu/drm/ttm/ttm_page_alloc.c 	struct page *p = NULL;
page              922 drivers/gpu/drm/ttm/ttm_page_alloc.c 		struct page *tmp = p;
page             1108 drivers/gpu/drm/ttm/ttm_page_alloc.c 		struct page *p = tt->ttm.pages[i];
page             1145 drivers/gpu/drm/ttm/ttm_page_alloc.c 		struct page *p = tt->ttm.pages[i];
page               55 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c #define NUM_PAGES_TO_ALLOC		(PAGE_SIZE/sizeof(struct page *))
page              125 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	struct page *p;
page              267 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 				 struct page **pages, unsigned cpages)
page              355 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	struct page *page = d_page->p;
page              361 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		if (ttm_set_pages_wb(page, num_pages))
page              371 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 			      struct page *pages[], unsigned npages)
page              407 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	static struct page *static_buf[NUM_PAGES_TO_ALLOC];
page              410 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	struct page **pages_to_free;
page              422 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 					      sizeof(struct page *),
page              668 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 						 struct page **failed_pages,
page              672 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	struct page *p;
page              703 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	struct page **caching_array;
page              705 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	struct page *p;
page              709 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 			(unsigned)(PAGE_SIZE/sizeof(struct page *)));
page              712 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	caching_array = kmalloc_array(max_cpages, sizeof(struct page *),
page              116 drivers/gpu/drm/ttm/ttm_tt.c static int ttm_tt_set_page_caching(struct page *p,
page              151 drivers/gpu/drm/ttm/ttm_tt.c 	struct page *cur_page;
page              351 drivers/gpu/drm/ttm/ttm_tt.c 	struct page *from_page;
page              352 drivers/gpu/drm/ttm/ttm_tt.c 	struct page *to_page;
page              393 drivers/gpu/drm/ttm/ttm_tt.c 	struct page *from_page;
page              394 drivers/gpu/drm/ttm/ttm_tt.c 	struct page *to_page;
page              479 drivers/gpu/drm/ttm/ttm_tt.c 	struct page **page = ttm->pages;
page              485 drivers/gpu/drm/ttm/ttm_tt.c 		(*page)->mapping = NULL;
page              486 drivers/gpu/drm/ttm/ttm_tt.c 		(*page++)->index = 0;
page              203 drivers/gpu/drm/udl/udl_dmabuf.c 	obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
page               81 drivers/gpu/drm/udl/udl_drv.h 	struct page **pages;
page              165 drivers/gpu/drm/udl/udl_fb.c 	unsigned long page, pos;
page              184 drivers/gpu/drm/udl/udl_fb.c 		page = vmalloc_to_pfn((void *)pos);
page              185 drivers/gpu/drm/udl/udl_fb.c 		if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED))
page              107 drivers/gpu/drm/udl/udl_gem.c 	struct page *page;
page              115 drivers/gpu/drm/udl/udl_gem.c 	page = obj->pages[page_offset];
page              116 drivers/gpu/drm/udl/udl_gem.c 	return vmf_insert_page(vma, vmf->address, page);
page              121 drivers/gpu/drm/udl/udl_gem.c 	struct page **pages;
page               91 drivers/gpu/drm/v3d/v3d_mmu.c 	u32 page = bo->node.start;
page              105 drivers/gpu/drm/v3d/v3d_mmu.c 			v3d->pt[page++] = pte + i;
page              108 drivers/gpu/drm/v3d/v3d_mmu.c 	WARN_ON_ONCE(page - bo->node.start !=
page              119 drivers/gpu/drm/v3d/v3d_mmu.c 	u32 page;
page              121 drivers/gpu/drm/v3d/v3d_mmu.c 	for (page = bo->node.start; page < bo->node.start + npages; page++)
page              122 drivers/gpu/drm/v3d/v3d_mmu.c 		v3d->pt[page] = 0;
page               90 drivers/gpu/drm/vgem/vgem_drv.c 		vmf->page = obj->pages[page_offset];
page               95 drivers/gpu/drm/vgem/vgem_drv.c 		struct page *page;
page               97 drivers/gpu/drm/vgem/vgem_drv.c 		page = shmem_read_mapping_page(
page              100 drivers/gpu/drm/vgem/vgem_drv.c 		if (!IS_ERR(page)) {
page              101 drivers/gpu/drm/vgem/vgem_drv.c 			vmf->page = page;
page              103 drivers/gpu/drm/vgem/vgem_drv.c 		} else switch (PTR_ERR(page)) {
page              116 drivers/gpu/drm/vgem/vgem_drv.c 				WARN_ON(PTR_ERR(page));
page              290 drivers/gpu/drm/vgem/vgem_drv.c static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo)
page              294 drivers/gpu/drm/vgem/vgem_drv.c 		struct page **pages;
page              324 drivers/gpu/drm/vgem/vgem_drv.c 	struct page **pages;
page              373 drivers/gpu/drm/vgem/vgem_drv.c 	obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
page              389 drivers/gpu/drm/vgem/vgem_drv.c 	struct page **pages;
page               46 drivers/gpu/drm/vgem/vgem_drv.h 	struct page **pages;
page              239 drivers/gpu/drm/via/via_dmablit.c 	vsg->pages = vzalloc(array_size(sizeof(struct page *), vsg->num_pages));
page               41 drivers/gpu/drm/via/via_dmablit.h 	struct page **pages;
page              207 drivers/gpu/drm/virtio/virtgpu_object.c 	struct page **pages = bo->tbo.ttm->pages;
page               90 drivers/gpu/drm/vkms/vkms_drv.h 	struct page **pages;
page               61 drivers/gpu/drm/vkms/vkms_gem.c 		vmf->page = obj->pages[page_offset];
page               66 drivers/gpu/drm/vkms/vkms_gem.c 		struct page *page;
page               70 drivers/gpu/drm/vkms/vkms_gem.c 		page = shmem_read_mapping_page(mapping, page_offset);
page               72 drivers/gpu/drm/vkms/vkms_gem.c 		if (!IS_ERR(page)) {
page               73 drivers/gpu/drm/vkms/vkms_gem.c 			vmf->page = page;
page               76 drivers/gpu/drm/vkms/vkms_gem.c 			switch (PTR_ERR(page)) {
page               89 drivers/gpu/drm/vkms/vkms_gem.c 				WARN_ON(PTR_ERR(page));
page              149 drivers/gpu/drm/vkms/vkms_gem.c static struct page **_get_pages(struct vkms_gem_object *vkms_obj)
page              154 drivers/gpu/drm/vkms/vkms_gem.c 		struct page **pages = drm_gem_get_pages(gem_obj);
page              199 drivers/gpu/drm/vkms/vkms_gem.c 		struct page **pages = _get_pages(vkms_obj);
page              337 drivers/gpu/drm/vmwgfx/vmwgfx_blit.c 	struct page **dst_pages;
page              342 drivers/gpu/drm/vmwgfx/vmwgfx_blit.c 	struct page **src_pages;
page              297 drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 	struct page **pages;
page              319 drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 	struct page **pages;
page              326 drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 	struct page *(*page)(struct vmw_piter *);
page              975 drivers/gpu/drm/vmwgfx/vmwgfx_drv.h static inline struct page *vmw_piter_page(struct vmw_piter *viter)
page              977 drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 	return viter->page(viter);
page              327 drivers/gpu/drm/vmwgfx/vmwgfx_fb.c 	struct page *page;
page              332 drivers/gpu/drm/vmwgfx/vmwgfx_fb.c 	list_for_each_entry(page, pagelist, lru) {
page              333 drivers/gpu/drm/vmwgfx/vmwgfx_fb.c 		start = page->index << PAGE_SHIFT;
page              514 drivers/gpu/drm/vmwgfx/vmwgfx_mob.c 	struct page *page;
page              517 drivers/gpu/drm/vmwgfx/vmwgfx_mob.c 		page = vmw_piter_page(pt_iter);
page              519 drivers/gpu/drm/vmwgfx/vmwgfx_mob.c 		save_addr = addr = kmap_atomic(page);
page              284 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter)
page              329 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c 	viter->page = &__vmw_piter_non_sg_page;
page              109 drivers/gpu/drm/vmwgfx/vmwgfx_validation.c 		struct page *page;
page              121 drivers/gpu/drm/vmwgfx/vmwgfx_validation.c 		page = alloc_page(GFP_KERNEL | __GFP_ZERO);
page              122 drivers/gpu/drm/vmwgfx/vmwgfx_validation.c 		if (!page)
page              128 drivers/gpu/drm/vmwgfx/vmwgfx_validation.c 		list_add_tail(&page->lru, &ctx->page_list);
page              129 drivers/gpu/drm/vmwgfx/vmwgfx_validation.c 		ctx->page_address = page_address(page);
page              149 drivers/gpu/drm/vmwgfx/vmwgfx_validation.c 	struct page *entry, *next;
page              160 drivers/gpu/drm/xen/xen_drm_front.c 			      u32 bpp, u64 size, struct page **pages)
page              148 drivers/gpu/drm/xen/xen_drm_front.h 			      u32 bpp, u64 size, struct page **pages);
page               85 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 	struct xendispl_event_page *page = evtchnl->u.evt.page;
page               94 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 	prod = page->in_prod;
page               97 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 	if (prod == page->in_cons)
page              100 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 	for (cons = page->in_cons; cons != prod; cons++) {
page              103 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 		event = &XENDISPL_IN_RING_REF(page, cons);
page              114 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 	page->in_cons = cons;
page              126 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 	unsigned long page = 0;
page              129 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 		page = (unsigned long)evtchnl->u.req.ring.sring;
page              131 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 		page = (unsigned long)evtchnl->u.evt.page;
page              132 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 	if (!page)
page              151 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 		gnttab_end_foreign_access(evtchnl->gref, 0, page);
page              161 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 	unsigned long page;
page              173 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 	page = get_zeroed_page(GFP_NOIO | __GFP_HIGH);
page              174 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 	if (!page) {
page              184 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 		sring = (struct xen_displif_sring *)page;
page              191 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 			free_page(page);
page              198 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 						  virt_to_gfn((void *)page), 0);
page              200 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 			free_page(page);
page              204 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 		evtchnl->u.evt.page = (struct xendispl_event_page *)page;
page               60 drivers/gpu/drm/xen/xen_drm_front_evtchnl.h 			struct xendispl_event_page *page;
page               29 drivers/gpu/drm/xen/xen_drm_front_gem.c 	struct page **pages;
page               49 drivers/gpu/drm/xen/xen_drm_front_gem.c 					sizeof(struct page *), GFP_KERNEL);
page              168 drivers/gpu/drm/xen/xen_drm_front_gem.c struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *gem_obj)
page               31 drivers/gpu/drm/xen/xen_drm_front_gem.h struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *obj);
page               34 drivers/hid/hid-debug.c 	unsigned  page;
page              444 drivers/hid/hid-debug.c static char *resolv_usage_page(unsigned page, struct seq_file *f) {
page              455 drivers/hid/hid-debug.c 		if (p->page == page) {
page              467 drivers/hid/hid-debug.c 		snprintf(buf, HID_DEBUG_BUFSIZE, "%04x", page);
page              469 drivers/hid/hid-debug.c 		seq_printf(f, "%04x", page);
page              494 drivers/hid/hid-debug.c 		if (p->page == (usage >> 16)) {
page               81 drivers/hid/hid-rmi.c 	int page;
page              121 drivers/hid/hid-rmi.c static int rmi_set_page(struct hid_device *hdev, u8 page)
page              129 drivers/hid/hid-rmi.c 	data->writeReport[4] = page;
page              139 drivers/hid/hid-rmi.c 	data->page = page;
page              201 drivers/hid/hid-rmi.c 	if (RMI_PAGE(addr) != data->page) {
page              270 drivers/hid/hid-rmi.c 	if (RMI_PAGE(addr) != data->page) {
page              802 drivers/hid/usbhid/hid-core.c static int hid_find_field_early(struct hid_device *hid, unsigned int page,
page              815 drivers/hid/usbhid/hid-core.c 				if ((usage->hid & HID_USAGE_PAGE) == page &&
page             1091 drivers/hsi/clients/cmt_speech.c 	struct page *page;
page             1093 drivers/hsi/clients/cmt_speech.c 	page = virt_to_page(csdata->mmap_base);
page             1094 drivers/hsi/clients/cmt_speech.c 	get_page(page);
page             1095 drivers/hsi/clients/cmt_speech.c 	vmf->page = page;
page               89 drivers/hv/channel.c 	struct page *page;
page               97 drivers/hv/channel.c 	page = alloc_pages_node(cpu_to_node(newchannel->target_cpu),
page              100 drivers/hv/channel.c 	if (!page)
page              101 drivers/hv/channel.c 		page = alloc_pages(GFP_KERNEL|__GFP_ZERO, order);
page              103 drivers/hv/channel.c 	if (!page)
page              106 drivers/hv/channel.c 	newchannel->ringbuffer_page = page;
page              120 drivers/hv/channel.c 	struct page *page = newchannel->ringbuffer_page;
page              142 drivers/hv/channel.c 	err = hv_ringbuffer_init(&newchannel->outbound, page, send_pages);
page              147 drivers/hv/channel.c 				 &page[send_pages], recv_pages);
page              672 drivers/hv/hv_balloon.c static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
page              769 drivers/hv/hv_balloon.c static void hv_online_page(struct page *pg, unsigned int order)
page             1195 drivers/hv/hv_balloon.c 	struct page *pg;
page             1214 drivers/hv/hv_balloon.c 	struct page *pg;
page              183 drivers/hv/hyperv_vmbus.h 		       struct page *pages, u32 pagecnt);
page              193 drivers/hv/ring_buffer.c 		       struct page *pages, u32 page_cnt)
page              196 drivers/hv/ring_buffer.c 	struct page **pages_wraparound;
page              204 drivers/hv/ring_buffer.c 	pages_wraparound = kcalloc(page_cnt * 2 - 1, sizeof(struct page *),
page              101 drivers/hwmon/ftsteutates.c 	unsigned char page = reg >> 8;
page              106 drivers/hwmon/ftsteutates.c 	dev_dbg(&client->dev, "page select - page: 0x%.02x\n", page);
page              107 drivers/hwmon/ftsteutates.c 	ret = i2c_smbus_write_byte_data(client, FTS_PAGE_SELECT_REG, page);
page              124 drivers/hwmon/ftsteutates.c 	unsigned char page = reg >> 8;
page              129 drivers/hwmon/ftsteutates.c 	dev_dbg(&client->dev, "page select - page: 0x%.02x\n", page);
page              130 drivers/hwmon/ftsteutates.c 	ret = i2c_smbus_write_byte_data(client, FTS_PAGE_SELECT_REG, page);
page              229 drivers/hwmon/pmbus/adm1275.c static int adm1275_read_word_data(struct i2c_client *client, int page, int reg)
page              235 drivers/hwmon/pmbus/adm1275.c 	if (page > 0)
page              329 drivers/hwmon/pmbus/adm1275.c static int adm1275_write_word_data(struct i2c_client *client, int page, int reg,
page              336 drivers/hwmon/pmbus/adm1275.c 	if (page > 0)
page              386 drivers/hwmon/pmbus/adm1275.c static int adm1275_read_byte_data(struct i2c_client *client, int page, int reg)
page              392 drivers/hwmon/pmbus/adm1275.c 	if (page > 0)
page              397 drivers/hwmon/pmbus/adm1275.c 		ret = pmbus_read_byte_data(client, page, PMBUS_STATUS_IOUT);
page              402 drivers/hwmon/pmbus/adm1275.c 		mfr_status = pmbus_read_byte_data(client, page,
page              425 drivers/hwmon/pmbus/adm1275.c 			mfr_status = pmbus_read_byte_data(client, page,
page              218 drivers/hwmon/pmbus/ibm-cffps.c static int ibm_cffps_read_byte_data(struct i2c_client *client, int page,
page              228 drivers/hwmon/pmbus/ibm-cffps.c 		rc = pmbus_read_byte_data(client, page, reg);
page              232 drivers/hwmon/pmbus/ibm-cffps.c 		mfr = pmbus_read_byte_data(client, page,
page              268 drivers/hwmon/pmbus/ibm-cffps.c static int ibm_cffps_read_word_data(struct i2c_client *client, int page,
page              275 drivers/hwmon/pmbus/ibm-cffps.c 		rc = pmbus_read_word_data(client, page, reg);
page              279 drivers/hwmon/pmbus/ibm-cffps.c 		mfr = pmbus_read_byte_data(client, page,
page               24 drivers/hwmon/pmbus/ir35221.c static int ir35221_read_word_data(struct i2c_client *client, int page, int reg)
page               30 drivers/hwmon/pmbus/ir35221.c 		ret = pmbus_read_word_data(client, page, IR35221_MFR_VIN_PEAK);
page               33 drivers/hwmon/pmbus/ir35221.c 		ret = pmbus_read_word_data(client, page, IR35221_MFR_VOUT_PEAK);
page               36 drivers/hwmon/pmbus/ir35221.c 		ret = pmbus_read_word_data(client, page, IR35221_MFR_IOUT_PEAK);
page               39 drivers/hwmon/pmbus/ir35221.c 		ret = pmbus_read_word_data(client, page, IR35221_MFR_TEMP_PEAK);
page               42 drivers/hwmon/pmbus/ir35221.c 		ret = pmbus_read_word_data(client, page,
page               46 drivers/hwmon/pmbus/ir35221.c 		ret = pmbus_read_word_data(client, page,
page               50 drivers/hwmon/pmbus/ir35221.c 		ret = pmbus_read_word_data(client, page,
page               54 drivers/hwmon/pmbus/ir35221.c 		ret = pmbus_read_word_data(client, page,
page               22 drivers/hwmon/pmbus/isl68137.c 					     int page,
page               25 drivers/hwmon/pmbus/isl68137.c 	int val = pmbus_read_byte_data(client, page, PMBUS_OPERATION);
page               32 drivers/hwmon/pmbus/isl68137.c 					      int page,
page               52 drivers/hwmon/pmbus/isl68137.c 		rc = pmbus_read_word_data(client, page, PMBUS_VOUT_COMMAND);
page               56 drivers/hwmon/pmbus/isl68137.c 		rc = pmbus_write_word_data(client, page, PMBUS_VOUT_COMMAND,
page               62 drivers/hwmon/pmbus/isl68137.c 	rc = pmbus_update_byte_data(client, page, PMBUS_OPERATION,
page              214 drivers/hwmon/pmbus/lm25066.c static int lm25066_read_word_data(struct i2c_client *client, int page, int reg)
page              291 drivers/hwmon/pmbus/lm25066.c static int lm25056_read_word_data(struct i2c_client *client, int page, int reg)
page              313 drivers/hwmon/pmbus/lm25066.c 		ret = lm25066_read_word_data(client, page, reg);
page              319 drivers/hwmon/pmbus/lm25066.c static int lm25056_read_byte_data(struct i2c_client *client, int page, int reg)
page              343 drivers/hwmon/pmbus/lm25066.c static int lm25066_write_word_data(struct i2c_client *client, int page, int reg,
page              154 drivers/hwmon/pmbus/ltc2978.c static int ltc_read_word_data(struct i2c_client *client, int page, int reg)
page              162 drivers/hwmon/pmbus/ltc2978.c 	return pmbus_read_word_data(client, page, reg);
page              165 drivers/hwmon/pmbus/ltc2978.c static int ltc_read_byte_data(struct i2c_client *client, int page, int reg)
page              173 drivers/hwmon/pmbus/ltc2978.c 	return pmbus_read_byte_data(client, page, reg);
page              176 drivers/hwmon/pmbus/ltc2978.c static int ltc_write_byte(struct i2c_client *client, int page, u8 byte)
page              184 drivers/hwmon/pmbus/ltc2978.c 	return pmbus_write_byte(client, page, byte);
page              201 drivers/hwmon/pmbus/ltc2978.c 		       int page, int reg, u16 *pmax)
page              205 drivers/hwmon/pmbus/ltc2978.c 	ret = ltc_read_word_data(client, page, reg);
page              215 drivers/hwmon/pmbus/ltc2978.c 		       int page, int reg, u16 *pmin)
page              219 drivers/hwmon/pmbus/ltc2978.c 	ret = ltc_read_word_data(client, page, reg);
page              228 drivers/hwmon/pmbus/ltc2978.c static int ltc2978_read_word_data_common(struct i2c_client *client, int page,
page              237 drivers/hwmon/pmbus/ltc2978.c 		ret = ltc_get_max(data, client, page, LTC2978_MFR_VIN_PEAK,
page              241 drivers/hwmon/pmbus/ltc2978.c 		ret = ltc_read_word_data(client, page, LTC2978_MFR_VOUT_PEAK);
page              247 drivers/hwmon/pmbus/ltc2978.c 			if (ret > data->vout_max[page])
page              248 drivers/hwmon/pmbus/ltc2978.c 				data->vout_max[page] = ret;
page              249 drivers/hwmon/pmbus/ltc2978.c 			ret = data->vout_max[page];
page              253 drivers/hwmon/pmbus/ltc2978.c 		ret = ltc_get_max(data, client, page,
page              255 drivers/hwmon/pmbus/ltc2978.c 				  &data->temp_max[page]);
page              272 drivers/hwmon/pmbus/ltc2978.c static int ltc2978_read_word_data(struct i2c_client *client, int page, int reg)
page              280 drivers/hwmon/pmbus/ltc2978.c 		ret = ltc_get_min(data, client, page, LTC2978_MFR_VIN_MIN,
page              284 drivers/hwmon/pmbus/ltc2978.c 		ret = ltc_read_word_data(client, page, LTC2978_MFR_VOUT_MIN);
page              292 drivers/hwmon/pmbus/ltc2978.c 			if (data->vout_max[page] && ret > data->vout_max[page])
page              293 drivers/hwmon/pmbus/ltc2978.c 				ret = data->vout_max[page];
page              294 drivers/hwmon/pmbus/ltc2978.c 			if (ret < data->vout_min[page])
page              295 drivers/hwmon/pmbus/ltc2978.c 				data->vout_min[page] = ret;
page              296 drivers/hwmon/pmbus/ltc2978.c 			ret = data->vout_min[page];
page              300 drivers/hwmon/pmbus/ltc2978.c 		ret = ltc_get_min(data, client, page,
page              302 drivers/hwmon/pmbus/ltc2978.c 				  &data->temp_min[page]);
page              311 drivers/hwmon/pmbus/ltc2978.c 		ret = ltc2978_read_word_data_common(client, page, reg);
page              317 drivers/hwmon/pmbus/ltc2978.c static int ltc2974_read_word_data(struct i2c_client *client, int page, int reg)
page              325 drivers/hwmon/pmbus/ltc2978.c 		ret = ltc_get_max(data, client, page, LTC2974_MFR_IOUT_PEAK,
page              326 drivers/hwmon/pmbus/ltc2978.c 				  &data->iout_max[page]);
page              329 drivers/hwmon/pmbus/ltc2978.c 		ret = ltc_get_min(data, client, page, LTC2974_MFR_IOUT_MIN,
page              330 drivers/hwmon/pmbus/ltc2978.c 				  &data->iout_min[page]);
page              336 drivers/hwmon/pmbus/ltc2978.c 		ret = ltc2978_read_word_data(client, page, reg);
page              342 drivers/hwmon/pmbus/ltc2978.c static int ltc2975_read_word_data(struct i2c_client *client, int page, int reg)
page              350 drivers/hwmon/pmbus/ltc2978.c 		ret = ltc_get_max(data, client, page, LTC2975_MFR_IIN_PEAK,
page              354 drivers/hwmon/pmbus/ltc2978.c 		ret = ltc_get_min(data, client, page, LTC2975_MFR_IIN_MIN,
page              358 drivers/hwmon/pmbus/ltc2978.c 		ret = ltc_get_max(data, client, page, LTC2975_MFR_PIN_PEAK,
page              362 drivers/hwmon/pmbus/ltc2978.c 		ret = ltc_get_min(data, client, page, LTC2975_MFR_PIN_MIN,
page              370 drivers/hwmon/pmbus/ltc2978.c 		ret = ltc2978_read_word_data(client, page, reg);
page              376 drivers/hwmon/pmbus/ltc2978.c static int ltc3880_read_word_data(struct i2c_client *client, int page, int reg)
page              384 drivers/hwmon/pmbus/ltc2978.c 		ret = ltc_get_max(data, client, page, LTC3880_MFR_IOUT_PEAK,
page              385 drivers/hwmon/pmbus/ltc2978.c 				  &data->iout_max[page]);
page              388 drivers/hwmon/pmbus/ltc2978.c 		ret = ltc_get_max(data, client, page,
page              402 drivers/hwmon/pmbus/ltc2978.c 		ret = ltc2978_read_word_data_common(client, page, reg);
page              408 drivers/hwmon/pmbus/ltc2978.c static int ltc3883_read_word_data(struct i2c_client *client, int page, int reg)
page              416 drivers/hwmon/pmbus/ltc2978.c 		ret = ltc_get_max(data, client, page, LTC3883_MFR_IIN_PEAK,
page              423 drivers/hwmon/pmbus/ltc2978.c 		ret = ltc3880_read_word_data(client, page, reg);
page              430 drivers/hwmon/pmbus/ltc2978.c 			       struct i2c_client *client, int page)
page              437 drivers/hwmon/pmbus/ltc2978.c 		ret = ltc_write_byte(client, page, PMBUS_CLEAR_FAULTS);
page              442 drivers/hwmon/pmbus/ltc2978.c static int ltc2978_write_word_data(struct i2c_client *client, int page,
page              461 drivers/hwmon/pmbus/ltc2978.c 		data->iout_max[page] = 0x7c00;
page              462 drivers/hwmon/pmbus/ltc2978.c 		data->iout_min[page] = 0xfbff;
page              463 drivers/hwmon/pmbus/ltc2978.c 		ret = ltc2978_clear_peaks(data, client, page);
page              467 drivers/hwmon/pmbus/ltc2978.c 		ret = ltc2978_clear_peaks(data, client, page);
page              470 drivers/hwmon/pmbus/ltc2978.c 		data->vout_min[page] = 0xffff;
page              471 drivers/hwmon/pmbus/ltc2978.c 		data->vout_max[page] = 0;
page              472 drivers/hwmon/pmbus/ltc2978.c 		ret = ltc2978_clear_peaks(data, client, page);
page              477 drivers/hwmon/pmbus/ltc2978.c 		ret = ltc2978_clear_peaks(data, client, page);
page              480 drivers/hwmon/pmbus/ltc2978.c 		data->temp_min[page] = 0x7bff;
page              481 drivers/hwmon/pmbus/ltc2978.c 		data->temp_max[page] = 0x7c00;
page              482 drivers/hwmon/pmbus/ltc2978.c 		ret = ltc2978_clear_peaks(data, client, page);
page               27 drivers/hwmon/pmbus/ltc3815.c static int ltc3815_read_byte_data(struct i2c_client *client, int page, int reg)
page               48 drivers/hwmon/pmbus/ltc3815.c static int ltc3815_write_byte(struct i2c_client *client, int page, u8 reg)
page               72 drivers/hwmon/pmbus/ltc3815.c static int ltc3815_read_word_data(struct i2c_client *client, int page, int reg)
page               78 drivers/hwmon/pmbus/ltc3815.c 		ret = pmbus_read_word_data(client, page, LTC3815_MFR_VIN_PEAK);
page               81 drivers/hwmon/pmbus/ltc3815.c 		ret = pmbus_read_word_data(client, page, LTC3815_MFR_VOUT_PEAK);
page               84 drivers/hwmon/pmbus/ltc3815.c 		ret = pmbus_read_word_data(client, page, LTC3815_MFR_TEMP_PEAK);
page               87 drivers/hwmon/pmbus/ltc3815.c 		ret = pmbus_read_word_data(client, page, LTC3815_MFR_IOUT_PEAK);
page               90 drivers/hwmon/pmbus/ltc3815.c 		ret = pmbus_read_word_data(client, page, LTC3815_MFR_IIN_PEAK);
page              106 drivers/hwmon/pmbus/ltc3815.c static int ltc3815_write_word_data(struct i2c_client *client, int page,
page              113 drivers/hwmon/pmbus/ltc3815.c 		ret = pmbus_write_word_data(client, page,
page              117 drivers/hwmon/pmbus/ltc3815.c 		ret = pmbus_write_word_data(client, page,
page              121 drivers/hwmon/pmbus/ltc3815.c 		ret = pmbus_write_word_data(client, page,
page              125 drivers/hwmon/pmbus/ltc3815.c 		ret = pmbus_write_word_data(client, page,
page              129 drivers/hwmon/pmbus/ltc3815.c 		ret = pmbus_write_word_data(client, page,
page               18 drivers/hwmon/pmbus/max16064.c static int max16064_read_word_data(struct i2c_client *client, int page, int reg)
page               24 drivers/hwmon/pmbus/max16064.c 		ret = pmbus_read_word_data(client, page,
page               28 drivers/hwmon/pmbus/max16064.c 		ret = pmbus_read_word_data(client, page,
page               42 drivers/hwmon/pmbus/max16064.c static int max16064_write_word_data(struct i2c_client *client, int page,
page               49 drivers/hwmon/pmbus/max16064.c 		ret = pmbus_write_word_data(client, page,
page               53 drivers/hwmon/pmbus/max16064.c 		ret = pmbus_write_word_data(client, page,
page               26 drivers/hwmon/pmbus/max31785.c static int max31785_read_byte_data(struct i2c_client *client, int page,
page               29 drivers/hwmon/pmbus/max31785.c 	if (page < MAX31785_NR_PAGES)
page               36 drivers/hwmon/pmbus/max31785.c 		return pmbus_read_byte_data(client, page - MAX31785_NR_PAGES,
page               43 drivers/hwmon/pmbus/max31785.c static int max31785_write_byte(struct i2c_client *client, int page, u8 value)
page               45 drivers/hwmon/pmbus/max31785.c 	if (page < MAX31785_NR_PAGES)
page               51 drivers/hwmon/pmbus/max31785.c static int max31785_read_long_data(struct i2c_client *client, int page,
page               75 drivers/hwmon/pmbus/max31785.c 	rc = pmbus_set_page(client, page);
page               89 drivers/hwmon/pmbus/max31785.c static int max31785_get_pwm(struct i2c_client *client, int page)
page               93 drivers/hwmon/pmbus/max31785.c 	rv = pmbus_get_fan_rate_device(client, page, 0, percent);
page              104 drivers/hwmon/pmbus/max31785.c static int max31785_get_pwm_mode(struct i2c_client *client, int page)
page              109 drivers/hwmon/pmbus/max31785.c 	config = pmbus_read_byte_data(client, page, PMBUS_FAN_CONFIG_12);
page              113 drivers/hwmon/pmbus/max31785.c 	command = pmbus_read_word_data(client, page, PMBUS_FAN_COMMAND_1);
page              128 drivers/hwmon/pmbus/max31785.c static int max31785_read_word_data(struct i2c_client *client, int page,
page              136 drivers/hwmon/pmbus/max31785.c 		if (page < MAX31785_NR_PAGES)
page              139 drivers/hwmon/pmbus/max31785.c 		rv = max31785_read_long_data(client, page - MAX31785_NR_PAGES,
page              153 drivers/hwmon/pmbus/max31785.c 		rv = (page >= MAX31785_NR_PAGES) ? -ENOTSUPP : -ENODATA;
page              156 drivers/hwmon/pmbus/max31785.c 		rv = max31785_get_pwm(client, page);
page              159 drivers/hwmon/pmbus/max31785.c 		rv = max31785_get_pwm_mode(client, page);
page              190 drivers/hwmon/pmbus/max31785.c static int max31785_pwm_enable(struct i2c_client *client, int page,
page              201 drivers/hwmon/pmbus/max31785.c 		rate = pmbus_get_fan_rate_cached(client, page, 0, percent);
page              208 drivers/hwmon/pmbus/max31785.c 		rate = pmbus_get_fan_rate_cached(client, page, 0, rpm);
page              219 drivers/hwmon/pmbus/max31785.c 	return pmbus_update_fan(client, page, 0, config, PB_FAN_1_RPM, rate);
page              222 drivers/hwmon/pmbus/max31785.c static int max31785_write_word_data(struct i2c_client *client, int page,
page              227 drivers/hwmon/pmbus/max31785.c 		return pmbus_update_fan(client, page, 0, 0, PB_FAN_1_RPM,
page              230 drivers/hwmon/pmbus/max31785.c 		return max31785_pwm_enable(client, page, word);
page               44 drivers/hwmon/pmbus/max34440.c static int max34440_read_word_data(struct i2c_client *client, int page, int reg)
page               52 drivers/hwmon/pmbus/max34440.c 		ret = pmbus_read_word_data(client, page,
page               56 drivers/hwmon/pmbus/max34440.c 		ret = pmbus_read_word_data(client, page,
page               62 drivers/hwmon/pmbus/max34440.c 		ret = pmbus_read_word_data(client, page,
page               66 drivers/hwmon/pmbus/max34440.c 		ret = pmbus_read_word_data(client, page,
page               72 drivers/hwmon/pmbus/max34440.c 		ret = pmbus_read_word_data(client, page,
page               78 drivers/hwmon/pmbus/max34440.c 		ret = pmbus_read_word_data(client, page,
page               85 drivers/hwmon/pmbus/max34440.c 		ret = pmbus_read_word_data(client, page,
page               89 drivers/hwmon/pmbus/max34440.c 		ret = pmbus_read_word_data(client, page,
page              109 drivers/hwmon/pmbus/max34440.c static int max34440_write_word_data(struct i2c_client *client, int page,
page              118 drivers/hwmon/pmbus/max34440.c 		ret = pmbus_write_word_data(client, page,
page              122 drivers/hwmon/pmbus/max34440.c 		ret = pmbus_write_word_data(client, page,
page              126 drivers/hwmon/pmbus/max34440.c 		ret = pmbus_write_word_data(client, page,
page              130 drivers/hwmon/pmbus/max34440.c 		ret = pmbus_write_word_data(client, page,
page              134 drivers/hwmon/pmbus/max34440.c 		ret = pmbus_write_word_data(client, page,
page              137 drivers/hwmon/pmbus/max34440.c 			ret = pmbus_write_word_data(client, page,
page              142 drivers/hwmon/pmbus/max34440.c 		ret = pmbus_write_word_data(client, page,
page              146 drivers/hwmon/pmbus/max34440.c 			ret = pmbus_write_word_data(client, page,
page              156 drivers/hwmon/pmbus/max34440.c static int max34440_read_byte_data(struct i2c_client *client, int page, int reg)
page              161 drivers/hwmon/pmbus/max34440.c 	if (page >= 0) {
page              162 drivers/hwmon/pmbus/max34440.c 		ret = pmbus_set_page(client, page);
page              211 drivers/hwmon/pmbus/max34440.c 	int page, rv;
page              213 drivers/hwmon/pmbus/max34440.c 	for (page = 0; page < 16; page++) {
page              214 drivers/hwmon/pmbus/max34440.c 		rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
page              226 drivers/hwmon/pmbus/max34440.c 			data->info.func[page] = PMBUS_HAVE_VOUT |
page              230 drivers/hwmon/pmbus/max34440.c 			data->info.func[page] = PMBUS_HAVE_VOUT;
page              233 drivers/hwmon/pmbus/max34440.c 			data->info.func[page] = PMBUS_HAVE_IOUT |
page              237 drivers/hwmon/pmbus/max34440.c 			data->info.func[page] = PMBUS_HAVE_IOUT;
page               31 drivers/hwmon/pmbus/max8688.c static int max8688_read_word_data(struct i2c_client *client, int page, int reg)
page               35 drivers/hwmon/pmbus/max8688.c 	if (page > 0)
page               61 drivers/hwmon/pmbus/max8688.c static int max8688_write_word_data(struct i2c_client *client, int page, int reg,
page               87 drivers/hwmon/pmbus/max8688.c static int max8688_read_byte_data(struct i2c_client *client, int page, int reg)
page               92 drivers/hwmon/pmbus/max8688.c 	if (page > 0)
page               29 drivers/hwmon/pmbus/pmbus.c 	int page;
page               68 drivers/hwmon/pmbus/pmbus.c 	for (page = 0; page < info->pages; page++) {
page               69 drivers/hwmon/pmbus/pmbus.c 		if (pmbus_check_word_register(client, page, PMBUS_READ_VOUT)) {
page               70 drivers/hwmon/pmbus/pmbus.c 			info->func[page] |= PMBUS_HAVE_VOUT;
page               71 drivers/hwmon/pmbus/pmbus.c 			if (pmbus_check_byte_register(client, page,
page               73 drivers/hwmon/pmbus/pmbus.c 				info->func[page] |= PMBUS_HAVE_STATUS_VOUT;
page               75 drivers/hwmon/pmbus/pmbus.c 		if (pmbus_check_word_register(client, page, PMBUS_READ_IOUT)) {
page               76 drivers/hwmon/pmbus/pmbus.c 			info->func[page] |= PMBUS_HAVE_IOUT;
page               79 drivers/hwmon/pmbus/pmbus.c 				info->func[page] |= PMBUS_HAVE_STATUS_IOUT;
page               81 drivers/hwmon/pmbus/pmbus.c 		if (pmbus_check_word_register(client, page, PMBUS_READ_POUT))
page               82 drivers/hwmon/pmbus/pmbus.c 			info->func[page] |= PMBUS_HAVE_POUT;
page              102 drivers/hwmon/pmbus/pmbus.c 			int page;
page              104 drivers/hwmon/pmbus/pmbus.c 			for (page = 1; page < PMBUS_PAGES; page++) {
page              105 drivers/hwmon/pmbus/pmbus.c 				if (pmbus_set_page(client, page) < 0)
page              109 drivers/hwmon/pmbus/pmbus.c 			info->pages = page;
page              406 drivers/hwmon/pmbus/pmbus.h 	int (*read_byte_data)(struct i2c_client *client, int page, int reg);
page              407 drivers/hwmon/pmbus/pmbus.h 	int (*read_word_data)(struct i2c_client *client, int page, int reg);
page              408 drivers/hwmon/pmbus/pmbus.h 	int (*write_word_data)(struct i2c_client *client, int page, int reg,
page              410 drivers/hwmon/pmbus/pmbus.h 	int (*write_byte)(struct i2c_client *client, int page, u8 value);
page              446 drivers/hwmon/pmbus/pmbus.h int pmbus_set_page(struct i2c_client *client, int page);
page              447 drivers/hwmon/pmbus/pmbus.h int pmbus_read_word_data(struct i2c_client *client, int page, u8 reg);
page              448 drivers/hwmon/pmbus/pmbus.h int pmbus_write_word_data(struct i2c_client *client, int page, u8 reg, u16 word);
page              449 drivers/hwmon/pmbus/pmbus.h int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg);
page              450 drivers/hwmon/pmbus/pmbus.h int pmbus_write_byte(struct i2c_client *client, int page, u8 value);
page              451 drivers/hwmon/pmbus/pmbus.h int pmbus_write_byte_data(struct i2c_client *client, int page, u8 reg,
page              453 drivers/hwmon/pmbus/pmbus.h int pmbus_update_byte_data(struct i2c_client *client, int page, u8 reg,
page              456 drivers/hwmon/pmbus/pmbus.h bool pmbus_check_byte_register(struct i2c_client *client, int page, int reg);
page              457 drivers/hwmon/pmbus/pmbus.h bool pmbus_check_word_register(struct i2c_client *client, int page, int reg);
page              463 drivers/hwmon/pmbus/pmbus.h int pmbus_get_fan_rate_device(struct i2c_client *client, int page, int id,
page              465 drivers/hwmon/pmbus/pmbus.h int pmbus_get_fan_rate_cached(struct i2c_client *client, int page, int id,
page              467 drivers/hwmon/pmbus/pmbus.h int pmbus_update_fan(struct i2c_client *client, int page, int id,
page               51 drivers/hwmon/pmbus/pmbus_core.c 	u8 page;		/* page number */
page              109 drivers/hwmon/pmbus/pmbus_core.c 	int (*read_status)(struct i2c_client *client, int page);
page              116 drivers/hwmon/pmbus/pmbus_core.c 	u8 page;
page              149 drivers/hwmon/pmbus/pmbus_core.c int pmbus_set_page(struct i2c_client *client, int page)
page              154 drivers/hwmon/pmbus/pmbus_core.c 	if (page < 0 || page == data->currpage)
page              157 drivers/hwmon/pmbus/pmbus_core.c 	if (!(data->info->func[page] & PMBUS_PAGE_VIRTUAL)) {
page              158 drivers/hwmon/pmbus/pmbus_core.c 		rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
page              166 drivers/hwmon/pmbus/pmbus_core.c 		if (rv != page)
page              170 drivers/hwmon/pmbus/pmbus_core.c 	data->currpage = page;
page              176 drivers/hwmon/pmbus/pmbus_core.c int pmbus_write_byte(struct i2c_client *client, int page, u8 value)
page              180 drivers/hwmon/pmbus/pmbus_core.c 	rv = pmbus_set_page(client, page);
page              192 drivers/hwmon/pmbus/pmbus_core.c static int _pmbus_write_byte(struct i2c_client *client, int page, u8 value)
page              199 drivers/hwmon/pmbus/pmbus_core.c 		status = info->write_byte(client, page, value);
page              203 drivers/hwmon/pmbus/pmbus_core.c 	return pmbus_write_byte(client, page, value);
page              206 drivers/hwmon/pmbus/pmbus_core.c int pmbus_write_word_data(struct i2c_client *client, int page, u8 reg,
page              211 drivers/hwmon/pmbus/pmbus_core.c 	rv = pmbus_set_page(client, page);
page              220 drivers/hwmon/pmbus/pmbus_core.c static int pmbus_write_virt_reg(struct i2c_client *client, int page, int reg,
page              231 drivers/hwmon/pmbus/pmbus_core.c 		rv = pmbus_update_fan(client, page, id, bit, bit, word);
page              245 drivers/hwmon/pmbus/pmbus_core.c static int _pmbus_write_word_data(struct i2c_client *client, int page, int reg,
page              253 drivers/hwmon/pmbus/pmbus_core.c 		status = info->write_word_data(client, page, reg, word);
page              259 drivers/hwmon/pmbus/pmbus_core.c 		return pmbus_write_virt_reg(client, page, reg, word);
page              261 drivers/hwmon/pmbus/pmbus_core.c 	return pmbus_write_word_data(client, page, reg, word);
page              264 drivers/hwmon/pmbus/pmbus_core.c int pmbus_update_fan(struct i2c_client *client, int page, int id,
page              271 drivers/hwmon/pmbus/pmbus_core.c 	from = pmbus_read_byte_data(client, page,
page              278 drivers/hwmon/pmbus/pmbus_core.c 		rv = pmbus_write_byte_data(client, page,
page              284 drivers/hwmon/pmbus/pmbus_core.c 	return _pmbus_write_word_data(client, page,
page              289 drivers/hwmon/pmbus/pmbus_core.c int pmbus_read_word_data(struct i2c_client *client, int page, u8 reg)
page              293 drivers/hwmon/pmbus/pmbus_core.c 	rv = pmbus_set_page(client, page);
page              301 drivers/hwmon/pmbus/pmbus_core.c static int pmbus_read_virt_reg(struct i2c_client *client, int page, int reg)
page              309 drivers/hwmon/pmbus/pmbus_core.c 		rv = pmbus_get_fan_rate_device(client, page, id, rpm);
page              323 drivers/hwmon/pmbus/pmbus_core.c static int _pmbus_read_word_data(struct i2c_client *client, int page, int reg)
page              330 drivers/hwmon/pmbus/pmbus_core.c 		status = info->read_word_data(client, page, reg);
page              336 drivers/hwmon/pmbus/pmbus_core.c 		return pmbus_read_virt_reg(client, page, reg);
page              338 drivers/hwmon/pmbus/pmbus_core.c 	return pmbus_read_word_data(client, page, reg);
page              341 drivers/hwmon/pmbus/pmbus_core.c int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg)
page              345 drivers/hwmon/pmbus/pmbus_core.c 	rv = pmbus_set_page(client, page);
page              353 drivers/hwmon/pmbus/pmbus_core.c int pmbus_write_byte_data(struct i2c_client *client, int page, u8 reg, u8 value)
page              357 drivers/hwmon/pmbus/pmbus_core.c 	rv = pmbus_set_page(client, page);
page              365 drivers/hwmon/pmbus/pmbus_core.c int pmbus_update_byte_data(struct i2c_client *client, int page, u8 reg,
page              371 drivers/hwmon/pmbus/pmbus_core.c 	rv = pmbus_read_byte_data(client, page, reg);
page              378 drivers/hwmon/pmbus/pmbus_core.c 		rv = pmbus_write_byte_data(client, page, reg, tmp);
page              388 drivers/hwmon/pmbus/pmbus_core.c static int _pmbus_read_byte_data(struct i2c_client *client, int page, int reg)
page              395 drivers/hwmon/pmbus/pmbus_core.c 		status = info->read_byte_data(client, page, reg);
page              399 drivers/hwmon/pmbus/pmbus_core.c 	return pmbus_read_byte_data(client, page, reg);
page              402 drivers/hwmon/pmbus/pmbus_core.c static struct pmbus_sensor *pmbus_find_sensor(struct pmbus_data *data, int page,
page              408 drivers/hwmon/pmbus/pmbus_core.c 		if (sensor->page == page && sensor->reg == reg)
page              415 drivers/hwmon/pmbus/pmbus_core.c static int pmbus_get_fan_rate(struct i2c_client *client, int page, int id,
page              429 drivers/hwmon/pmbus/pmbus_core.c 		s = pmbus_find_sensor(data, page, reg + id);
page              436 drivers/hwmon/pmbus/pmbus_core.c 	config = pmbus_read_byte_data(client, page,
page              443 drivers/hwmon/pmbus/pmbus_core.c 		return pmbus_read_word_data(client, page,
page              450 drivers/hwmon/pmbus/pmbus_core.c int pmbus_get_fan_rate_device(struct i2c_client *client, int page, int id,
page              453 drivers/hwmon/pmbus/pmbus_core.c 	return pmbus_get_fan_rate(client, page, id, mode, false);
page              457 drivers/hwmon/pmbus/pmbus_core.c int pmbus_get_fan_rate_cached(struct i2c_client *client, int page, int id,
page              460 drivers/hwmon/pmbus/pmbus_core.c 	return pmbus_get_fan_rate(client, page, id, mode, true);
page              464 drivers/hwmon/pmbus/pmbus_core.c static void pmbus_clear_fault_page(struct i2c_client *client, int page)
page              466 drivers/hwmon/pmbus/pmbus_core.c 	_pmbus_write_byte(client, page, PMBUS_CLEAR_FAULTS);
page              495 drivers/hwmon/pmbus/pmbus_core.c 					     int page, int reg),
page              496 drivers/hwmon/pmbus/pmbus_core.c 				 int page, int reg)
page              501 drivers/hwmon/pmbus/pmbus_core.c 	rv = func(client, page, reg);
page              508 drivers/hwmon/pmbus/pmbus_core.c static bool pmbus_check_status_register(struct i2c_client *client, int page)
page              513 drivers/hwmon/pmbus/pmbus_core.c 	status = data->read_status(client, page);
page              525 drivers/hwmon/pmbus/pmbus_core.c bool pmbus_check_byte_register(struct i2c_client *client, int page, int reg)
page              527 drivers/hwmon/pmbus/pmbus_core.c 	return pmbus_check_register(client, _pmbus_read_byte_data, page, reg);
page              531 drivers/hwmon/pmbus/pmbus_core.c bool pmbus_check_word_register(struct i2c_client *client, int page, int reg)
page              533 drivers/hwmon/pmbus/pmbus_core.c 	return pmbus_check_register(client, _pmbus_read_word_data, page, reg);
page              597 drivers/hwmon/pmbus/pmbus_core.c 							    sensor->page,
page              620 drivers/hwmon/pmbus/pmbus_core.c 		exponent = data->exponent[sensor->page];
page              760 drivers/hwmon/pmbus/pmbus_core.c 		if (data->exponent[sensor->page] < 0)
page              761 drivers/hwmon/pmbus/pmbus_core.c 			val <<= -data->exponent[sensor->page];
page              763 drivers/hwmon/pmbus/pmbus_core.c 			val >>= data->exponent[sensor->page];
page              977 drivers/hwmon/pmbus/pmbus_core.c 	ret = _pmbus_write_word_data(client, sensor->page, sensor->reg, regval);
page             1071 drivers/hwmon/pmbus/pmbus_core.c 					     int seq, int page, int reg,
page             1091 drivers/hwmon/pmbus/pmbus_core.c 	sensor->page = page;
page             1179 drivers/hwmon/pmbus/pmbus_core.c 				 const char *name, int index, int page,
page             1190 drivers/hwmon/pmbus/pmbus_core.c 		if (pmbus_check_word_register(client, page, l->reg)) {
page             1192 drivers/hwmon/pmbus/pmbus_core.c 						page, l->reg, attr->class,
page             1197 drivers/hwmon/pmbus/pmbus_core.c 			if (l->sbit && (info->func[page] & attr->sfunc)) {
page             1204 drivers/hwmon/pmbus/pmbus_core.c 					attr->sbase + page, l->sbit);
page             1219 drivers/hwmon/pmbus/pmbus_core.c 				      int index, int page,
page             1229 drivers/hwmon/pmbus/pmbus_core.c 				      paged ? page + 1 : 0);
page             1233 drivers/hwmon/pmbus/pmbus_core.c 	base = pmbus_add_sensor(data, name, "input", index, page, attr->reg,
page             1239 drivers/hwmon/pmbus/pmbus_core.c 					    index, page, base, attr);
page             1250 drivers/hwmon/pmbus/pmbus_core.c 		    pmbus_check_status_register(client, page)) {
page             1253 drivers/hwmon/pmbus/pmbus_core.c 						PB_STATUS_BASE + page,
page             1298 drivers/hwmon/pmbus/pmbus_core.c 		int page, pages;
page             1302 drivers/hwmon/pmbus/pmbus_core.c 		for (page = 0; page < pages; page++) {
page             1303 drivers/hwmon/pmbus/pmbus_core.c 			if (!(info->func[page] & attrs->func))
page             1306 drivers/hwmon/pmbus/pmbus_core.c 							 name, index, page,
page             1808 drivers/hwmon/pmbus/pmbus_core.c 		struct pmbus_data *data, int index, int page, int id,
page             1813 drivers/hwmon/pmbus/pmbus_core.c 	sensor = pmbus_add_sensor(data, "fan", "target", index, page,
page             1820 drivers/hwmon/pmbus/pmbus_core.c 	if (!((data->info->func[page] & PMBUS_HAVE_PWM12) ||
page             1821 drivers/hwmon/pmbus/pmbus_core.c 			(data->info->func[page] & PMBUS_HAVE_PWM34)))
page             1824 drivers/hwmon/pmbus/pmbus_core.c 	sensor = pmbus_add_sensor(data, "pwm", NULL, index, page,
page             1831 drivers/hwmon/pmbus/pmbus_core.c 	sensor = pmbus_add_sensor(data, "pwm", "enable", index, page,
page             1846 drivers/hwmon/pmbus/pmbus_core.c 	int page;
page             1849 drivers/hwmon/pmbus/pmbus_core.c 	for (page = 0; page < info->pages; page++) {
page             1855 drivers/hwmon/pmbus/pmbus_core.c 			if (!(info->func[page] & pmbus_fan_flags[f]))
page             1858 drivers/hwmon/pmbus/pmbus_core.c 			if (!pmbus_check_word_register(client, page,
page             1867 drivers/hwmon/pmbus/pmbus_core.c 			regval = _pmbus_read_byte_data(client, page,
page             1874 drivers/hwmon/pmbus/pmbus_core.c 					     page, pmbus_fan_registers[f],
page             1879 drivers/hwmon/pmbus/pmbus_core.c 			if (pmbus_check_word_register(client, page,
page             1882 drivers/hwmon/pmbus/pmbus_core.c 							 page, f, regval);
page             1891 drivers/hwmon/pmbus/pmbus_core.c 			if ((info->func[page] & pmbus_fan_status_flags[f]) &&
page             1893 drivers/hwmon/pmbus/pmbus_core.c 					page, pmbus_fan_status_registers[f])) {
page             1897 drivers/hwmon/pmbus/pmbus_core.c 					base = PB_STATUS_FAN34_BASE + page;
page             1899 drivers/hwmon/pmbus/pmbus_core.c 					base = PB_STATUS_FAN_BASE + page;
page             1923 drivers/hwmon/pmbus/pmbus_core.c 	int page;
page             1956 drivers/hwmon/pmbus/pmbus_core.c 	val = _pmbus_read_word_data(client, reg->page, reg->attr->reg);
page             1977 drivers/hwmon/pmbus/pmbus_core.c 	ret = _pmbus_write_word_data(client, reg->page, reg->attr->reg, val);
page             1983 drivers/hwmon/pmbus/pmbus_core.c static int pmbus_add_samples_attr(struct pmbus_data *data, int page,
page             1993 drivers/hwmon/pmbus/pmbus_core.c 	reg->page = page;
page             2069 drivers/hwmon/pmbus/pmbus_core.c 				 struct pmbus_data *data, int page)
page             2073 drivers/hwmon/pmbus/pmbus_core.c 	if (pmbus_check_byte_register(client, page, PMBUS_VOUT_MODE))
page             2074 drivers/hwmon/pmbus/pmbus_core.c 		vout_mode = _pmbus_read_byte_data(client, page,
page             2086 drivers/hwmon/pmbus/pmbus_core.c 			data->exponent[page] = ((s8)(vout_mode << 3)) >> 3;
page             2101 drivers/hwmon/pmbus/pmbus_core.c 	pmbus_clear_fault_page(client, page);
page             2105 drivers/hwmon/pmbus/pmbus_core.c static int pmbus_read_status_byte(struct i2c_client *client, int page)
page             2107 drivers/hwmon/pmbus/pmbus_core.c 	return _pmbus_read_byte_data(client, page, PMBUS_STATUS_BYTE);
page             2110 drivers/hwmon/pmbus/pmbus_core.c static int pmbus_read_status_word(struct i2c_client *client, int page)
page             2112 drivers/hwmon/pmbus/pmbus_core.c 	return _pmbus_read_word_data(client, page, PMBUS_STATUS_WORD);
page             2119 drivers/hwmon/pmbus/pmbus_core.c 	int page, ret;
page             2162 drivers/hwmon/pmbus/pmbus_core.c 	for (page = 0; page < info->pages; page++) {
page             2163 drivers/hwmon/pmbus/pmbus_core.c 		ret = pmbus_identify_common(client, data, page);
page             2177 drivers/hwmon/pmbus/pmbus_core.c 	u8 page = rdev_get_id(rdev);
page             2180 drivers/hwmon/pmbus/pmbus_core.c 	ret = pmbus_read_byte_data(client, page, PMBUS_OPERATION);
page             2191 drivers/hwmon/pmbus/pmbus_core.c 	u8 page = rdev_get_id(rdev);
page             2193 drivers/hwmon/pmbus/pmbus_core.c 	return pmbus_update_byte_data(client, page, PMBUS_OPERATION,
page             2258 drivers/hwmon/pmbus/pmbus_core.c 	rc = _pmbus_read_byte_data(entry->client, entry->page, entry->reg);
page             2275 drivers/hwmon/pmbus/pmbus_core.c 	rc = pdata->read_status(entry->client, entry->page);
page             2319 drivers/hwmon/pmbus/pmbus_core.c 			entries[idx].page = i;
page             2328 drivers/hwmon/pmbus/pmbus_core.c 			entries[idx].page = i;
page             2338 drivers/hwmon/pmbus/pmbus_core.c 			entries[idx].page = i;
page             2348 drivers/hwmon/pmbus/pmbus_core.c 			entries[idx].page = i;
page             2358 drivers/hwmon/pmbus/pmbus_core.c 			entries[idx].page = i;
page             2368 drivers/hwmon/pmbus/pmbus_core.c 			entries[idx].page = i;
page             2378 drivers/hwmon/pmbus/pmbus_core.c 			entries[idx].page = i;
page             2389 drivers/hwmon/pmbus/pmbus_core.c 			entries[idx].page = i;
page             2399 drivers/hwmon/pmbus/pmbus_core.c 			entries[idx].page = i;
page             2409 drivers/hwmon/pmbus/pmbus_core.c 			entries[idx].page = i;
page               88 drivers/hwmon/pmbus/ucd9000.c static int ucd9000_read_byte_data(struct i2c_client *client, int page, int reg)
page               95 drivers/hwmon/pmbus/ucd9000.c 		if (page > 0)
page              109 drivers/hwmon/pmbus/ucd9000.c 		if (page > 0)
page              547 drivers/hwmon/pmbus/ucd9000.c 		int page = UCD9000_MON_PAGE(block_buffer[i]);
page              549 drivers/hwmon/pmbus/ucd9000.c 		if (page >= info->pages)
page              555 drivers/hwmon/pmbus/ucd9000.c 			info->func[page] |= PMBUS_HAVE_VOUT
page              559 drivers/hwmon/pmbus/ucd9000.c 			info->func[page] |= PMBUS_HAVE_TEMP2
page              563 drivers/hwmon/pmbus/ucd9000.c 			info->func[page] |= PMBUS_HAVE_IOUT
page              128 drivers/hwmon/pmbus/zl6100.c static int zl6100_read_word_data(struct i2c_client *client, int page, int reg)
page              134 drivers/hwmon/pmbus/zl6100.c 	if (page > 0)
page              170 drivers/hwmon/pmbus/zl6100.c 	ret = pmbus_read_word_data(client, page, vreg);
page              187 drivers/hwmon/pmbus/zl6100.c static int zl6100_read_byte_data(struct i2c_client *client, int page, int reg)
page              193 drivers/hwmon/pmbus/zl6100.c 	if (page > 0)
page              217 drivers/hwmon/pmbus/zl6100.c 		ret = pmbus_read_byte_data(client, page, reg);
page              225 drivers/hwmon/pmbus/zl6100.c static int zl6100_write_word_data(struct i2c_client *client, int page, int reg,
page              232 drivers/hwmon/pmbus/zl6100.c 	if (page > 0)
page              261 drivers/hwmon/pmbus/zl6100.c 	ret = pmbus_write_word_data(client, page, vreg, word);
page              267 drivers/hwmon/pmbus/zl6100.c static int zl6100_write_byte(struct i2c_client *client, int page, u8 value)
page              273 drivers/hwmon/pmbus/zl6100.c 	if (page > 0)
page              277 drivers/hwmon/pmbus/zl6100.c 	ret = pmbus_write_byte(client, page, value);
page              196 drivers/hwtracing/coresight/coresight-tmc-etr.c 	struct page *page;
page              214 drivers/hwtracing/coresight/coresight-tmc-etr.c 			page = virt_to_page(pages[i]);
page              216 drivers/hwtracing/coresight/coresight-tmc-etr.c 			get_page(page);
page              218 drivers/hwtracing/coresight/coresight-tmc-etr.c 			page = alloc_pages_node(node,
page              221 drivers/hwtracing/coresight/coresight-tmc-etr.c 		paddr = dma_map_page(real_dev, page, 0, PAGE_SIZE, dir);
page              225 drivers/hwtracing/coresight/coresight-tmc-etr.c 		tmc_pages->pages[i] = page;
page              230 drivers/hwtracing/coresight/coresight-tmc.h 	struct page	**pages;
page              912 drivers/hwtracing/intel_th/msu.c 	struct page *page;
page              923 drivers/hwtracing/intel_th/msu.c 	page = alloc_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32, order);
page              924 drivers/hwtracing/intel_th/msu.c 	if (!page)
page              927 drivers/hwtracing/intel_th/msu.c 	split_page(page, order);
page              928 drivers/hwtracing/intel_th/msu.c 	sg_set_buf(msc->single_sgt.sgl, page_address(page), size);
page              936 drivers/hwtracing/intel_th/msu.c 	msc->base = page_address(page);
page              942 drivers/hwtracing/intel_th/msu.c 	__free_pages(page, order);
page              964 drivers/hwtracing/intel_th/msu.c 		struct page *page = virt_to_page(msc->base + off);
page              966 drivers/hwtracing/intel_th/msu.c 		page->mapping = NULL;
page              967 drivers/hwtracing/intel_th/msu.c 		__free_page(page);
page              980 drivers/hwtracing/intel_th/msu.c static struct page *msc_buffer_contig_get_page(struct msc *msc,
page             1124 drivers/hwtracing/intel_th/msu.c 		struct page *page = sg_page(sg);
page             1126 drivers/hwtracing/intel_th/msu.c 		page->mapping = NULL;
page             1367 drivers/hwtracing/intel_th/msu.c static struct page *msc_buffer_get_page(struct msc *msc, unsigned long pgoff)
page             1386 drivers/hwtracing/intel_th/msu.c 		struct page *page = sg_page(sg);
page             1390 drivers/hwtracing/intel_th/msu.c 			return page + pgoff;
page             1566 drivers/hwtracing/intel_th/msu.c 		struct page *page = msc_buffer_get_page(msc, pg);
page             1568 drivers/hwtracing/intel_th/msu.c 		if (WARN_ON_ONCE(!page))
page             1571 drivers/hwtracing/intel_th/msu.c 		if (page->mapping)
page             1572 drivers/hwtracing/intel_th/msu.c 			page->mapping = NULL;
page             1585 drivers/hwtracing/intel_th/msu.c 	vmf->page = msc_buffer_get_page(msc, vmf->pgoff);
page             1586 drivers/hwtracing/intel_th/msu.c 	if (!vmf->page)
page             1589 drivers/hwtracing/intel_th/msu.c 	get_page(vmf->page);
page             1590 drivers/hwtracing/intel_th/msu.c 	vmf->page->mapping = vmf->vma->vm_file->f_mapping;
page             1591 drivers/hwtracing/intel_th/msu.c 	vmf->page->index = vmf->pgoff;
page              119 drivers/hwtracing/stm/p_sys-t.c 				      char *page)
page              123 drivers/hwtracing/stm/p_sys-t.c 	return sprintf(page, "%pU\n", &pn->uuid);
page              127 drivers/hwtracing/stm/p_sys-t.c sys_t_policy_uuid_store(struct config_item *item, const char *page,
page              135 drivers/hwtracing/stm/p_sys-t.c 	ret = uuid_parse(page, &pn->uuid);
page              144 drivers/hwtracing/stm/p_sys-t.c 				      char *page)
page              148 drivers/hwtracing/stm/p_sys-t.c 	return sprintf(page, "%d\n", pn->do_len);
page              152 drivers/hwtracing/stm/p_sys-t.c sys_t_policy_do_len_store(struct config_item *item, const char *page,
page              160 drivers/hwtracing/stm/p_sys-t.c 	ret = kstrtobool(page, &pn->do_len);
page              169 drivers/hwtracing/stm/p_sys-t.c 					     char *page)
page              173 drivers/hwtracing/stm/p_sys-t.c 	return sprintf(page, "%u\n", jiffies_to_msecs(pn->ts_interval));
page              177 drivers/hwtracing/stm/p_sys-t.c sys_t_policy_ts_interval_store(struct config_item *item, const char *page,
page              186 drivers/hwtracing/stm/p_sys-t.c 	ret = kstrtouint(page, 10, &ms);
page              200 drivers/hwtracing/stm/p_sys-t.c 						    char *page)
page              204 drivers/hwtracing/stm/p_sys-t.c 	return sprintf(page, "%u\n", jiffies_to_msecs(pn->clocksync_interval));
page              209 drivers/hwtracing/stm/p_sys-t.c 				      const char *page, size_t count)
page              217 drivers/hwtracing/stm/p_sys-t.c 	ret = kstrtouint(page, 10, &ms);
page               90 drivers/hwtracing/stm/policy.c stp_policy_node_masters_show(struct config_item *item, char *page)
page               95 drivers/hwtracing/stm/policy.c 	count = sprintf(page, "%u %u\n", policy_node->first_master,
page              102 drivers/hwtracing/stm/policy.c stp_policy_node_masters_store(struct config_item *item, const char *page,
page              108 drivers/hwtracing/stm/policy.c 	char *p = (char *)page;
page              137 drivers/hwtracing/stm/policy.c stp_policy_node_channels_show(struct config_item *item, char *page)
page              142 drivers/hwtracing/stm/policy.c 	count = sprintf(page, "%u %u\n", policy_node->first_channel,
page              149 drivers/hwtracing/stm/policy.c stp_policy_node_channels_store(struct config_item *item, const char *page,
page              155 drivers/hwtracing/stm/policy.c 	char *p = (char *)page;
page              294 drivers/hwtracing/stm/policy.c 				      char *page)
page              299 drivers/hwtracing/stm/policy.c 	count = sprintf(page, "%s\n",
page              310 drivers/hwtracing/stm/policy.c 					char *page)
page              315 drivers/hwtracing/stm/policy.c 	count = sprintf(page, "%s\n",
page              320 drivers/ide/ide-floppy.c 	u8 *page, buf[40];
page              339 drivers/ide/ide-floppy.c 	page = &buf[8];
page              350 drivers/ide/ide-floppy.c 	if (memcmp(page, &floppy->flexible_disk_page, 32))
page              356 drivers/ide/ide-floppy.c 	memcpy(&floppy->flexible_disk_page, page, 32);
page              233 drivers/ide/ide-taskfile.c 	struct page *page;
page              243 drivers/ide/ide-taskfile.c 		page = sg_page(cursg);
page              247 drivers/ide/ide-taskfile.c 		page = nth_page(page, (offset >> PAGE_SHIFT));
page              252 drivers/ide/ide-taskfile.c 		buf = kmap_atomic(page) + offset;
page               31 drivers/iio/chemical/bme680_spi.c 	u8 page = (reg & 0x80) ? 0 : 1; /* Page "1" is low range */
page               33 drivers/iio/chemical/bme680_spi.c 	if (page == ctx->current_page)
page               43 drivers/iio/chemical/bme680_spi.c 		dev_err(&spi->dev, "failed to set page %u\n", page);
page               48 drivers/iio/chemical/bme680_spi.c 	if (page)
page               55 drivers/iio/chemical/bme680_spi.c 		dev_err(&spi->dev, "failed to set page %u\n", page);
page               59 drivers/iio/chemical/bme680_spi.c 	ctx->current_page = page;
page               32 drivers/iio/imu/adis.c 	unsigned int page = reg / ADIS_PAGE_SIZE;
page               77 drivers/iio/imu/adis.c 	if (adis->current_page != page) {
page               79 drivers/iio/imu/adis.c 		adis->tx[1] = page;
page              113 drivers/iio/imu/adis.c 		adis->current_page = page;
page              132 drivers/iio/imu/adis.c 	unsigned int page = reg / ADIS_PAGE_SIZE;
page              172 drivers/iio/imu/adis.c 	if (adis->current_page != page) {
page              174 drivers/iio/imu/adis.c 		adis->tx[1] = page;
page              201 drivers/iio/imu/adis.c 		adis->current_page = page;
page               30 drivers/iio/imu/adis16480.c #define ADIS16480_REG(page, reg) ((page) * ADIS16480_PAGE_SIZE + (reg))
page              108 drivers/iio/imu/adis16480.c #define ADIS16480_FIR_COEF(page) (x < 60 ? ADIS16480_REG(page, (x) + 8) : \
page              109 drivers/iio/imu/adis16480.c 		ADIS16480_REG((page) + 1, (x) - 60 + 8))
page              244 drivers/iio/multiplexer/iio-mux.c 	char *page = NULL;
page              277 drivers/iio/multiplexer/iio-mux.c 		page = devm_kzalloc(dev, PAGE_SIZE, GFP_KERNEL);
page              278 drivers/iio/multiplexer/iio-mux.c 		if (!page)
page              298 drivers/iio/multiplexer/iio-mux.c 						page);
page              310 drivers/iio/multiplexer/iio-mux.c 		child->ext_info_cache[i].data = devm_kmemdup(dev, page, ret + 1,
page              319 drivers/iio/multiplexer/iio-mux.c 	if (page)
page              320 drivers/iio/multiplexer/iio-mux.c 		devm_kfree(dev, page);
page               49 drivers/infiniband/core/umem.c 	struct page *page;
page               56 drivers/infiniband/core/umem.c 		page = sg_page_iter_page(&sg_iter);
page               57 drivers/infiniband/core/umem.c 		put_user_pages_dirty_lock(&page, 1, umem->writable && dirty);
page               74 drivers/infiniband/core/umem.c 						struct page **page_list,
page               93 drivers/infiniband/core/umem.c 		struct page *first_page = page_list[i];
page              198 drivers/infiniband/core/umem.c 	struct page **page_list;
page              244 drivers/infiniband/core/umem.c 	page_list = (struct page **) __get_free_page(GFP_KERNEL);
page              280 drivers/infiniband/core/umem.c 					   PAGE_SIZE / sizeof (struct page *)),
page              505 drivers/infiniband/core/umem_odp.c 		struct page *page,
page              525 drivers/infiniband/core/umem_odp.c 			ib_dma_map_page(dev, page, 0, BIT(umem_odp->page_shift),
page              532 drivers/infiniband/core/umem_odp.c 		umem_odp->page_list[page_index] = page;
page              534 drivers/infiniband/core/umem_odp.c 	} else if (umem_odp->page_list[page_index] == page) {
page              538 drivers/infiniband/core/umem_odp.c 		       umem_odp->page_list[page_index], page);
page              545 drivers/infiniband/core/umem_odp.c 	put_user_page(page);
page              593 drivers/infiniband/core/umem_odp.c 	struct page       **local_page_list = NULL;
page              606 drivers/infiniband/core/umem_odp.c 	local_page_list = (struct page **)__get_free_page(GFP_KERNEL);
page              636 drivers/infiniband/core/umem_odp.c 				PAGE_SIZE / sizeof(struct page *));
page              736 drivers/infiniband/core/umem_odp.c 			struct page *page = umem_odp->page_list[idx];
page              746 drivers/infiniband/core/umem_odp.c 				struct page *head_page = compound_head(page);
page              163 drivers/infiniband/core/uverbs.h 	struct page *disassociate_page;
page              901 drivers/infiniband/core/uverbs_main.c 		vmf->page = ZERO_PAGE(vmf->address);
page              902 drivers/infiniband/core/uverbs_main.c 		get_page(vmf->page);
page              916 drivers/infiniband/core/uverbs_main.c 		vmf->page = ufile->disassociate_page;
page              917 drivers/infiniband/core/uverbs_main.c 		get_page(vmf->page);
page             1068 drivers/infiniband/hw/efa/efa_verbs.c 	struct page *pg;
page              607 drivers/infiniband/hw/hfi1/file_ops.c 	struct page *page;
page              609 drivers/infiniband/hw/hfi1/file_ops.c 	page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
page              610 drivers/infiniband/hw/hfi1/file_ops.c 	if (!page)
page              613 drivers/infiniband/hw/hfi1/file_ops.c 	get_page(page);
page              614 drivers/infiniband/hw/hfi1/file_ops.c 	vmf->page = page;
page              732 drivers/infiniband/hw/hfi1/file_ops.c 	struct page *page;
page              735 drivers/infiniband/hw/hfi1/file_ops.c 	page = vmalloc_to_page(addr);
page              736 drivers/infiniband/hw/hfi1/file_ops.c 	if (page)
page              737 drivers/infiniband/hw/hfi1/file_ops.c 		paddr = page_to_pfn(page) << PAGE_SHIFT;
page             2019 drivers/infiniband/hw/hfi1/hfi.h 			    size_t npages, bool writable, struct page **pages);
page             2020 drivers/infiniband/hw/hfi1/hfi.h void hfi1_release_user_pages(struct mm_struct *mm, struct page **p,
page              404 drivers/infiniband/hw/hfi1/qsfp.c 	u8 page;
page              414 drivers/infiniband/hw/hfi1/qsfp.c 		page = (u8)(addr / QSFP_PAGESIZE);
page              417 drivers/infiniband/hw/hfi1/qsfp.c 				  QSFP_PAGE_SELECT_BYTE_OFFS, &page, 1);
page              484 drivers/infiniband/hw/hfi1/qsfp.c 	u8 page;
page              494 drivers/infiniband/hw/hfi1/qsfp.c 		page = (u8)(addr / QSFP_PAGESIZE);
page              496 drivers/infiniband/hw/hfi1/qsfp.c 				  QSFP_PAGE_SELECT_BYTE_OFFS, &page, 1);
page             3114 drivers/infiniband/hw/hfi1/sdma.c 			   int type, void *kvaddr, struct page *page,
page             3134 drivers/infiniband/hw/hfi1/sdma.c 			kvaddr = kmap(page);
page             3144 drivers/infiniband/hw/hfi1/sdma.c 			kunmap(page);
page              660 drivers/infiniband/hw/hfi1/sdma.h 			   int type, void *kvaddr, struct page *page,
page              733 drivers/infiniband/hw/hfi1/sdma.h 	struct page *page,
page              742 drivers/infiniband/hw/hfi1/sdma.h 					      NULL, page, offset, len);
page              749 drivers/infiniband/hw/hfi1/sdma.h 		       page,
page              876 drivers/infiniband/hw/hfi1/tid_rdma.c 					struct page **pages,
page             1014 drivers/infiniband/hw/hfi1/tid_rdma.c 					struct page **pages,
page             1081 drivers/infiniband/hw/hfi1/tid_rdma.c 			   struct page **pages,
page             1134 drivers/infiniband/hw/hfi1/tid_rdma.c static int dma_map_flow(struct tid_rdma_flow *flow, struct page **pages)
page             1169 drivers/infiniband/hw/hfi1/tid_rdma.c 				struct page **pages,
page              425 drivers/infiniband/hw/hfi1/trace_tid.h 		__field(u64, page)
page              434 drivers/infiniband/hw/hfi1/trace_tid.h 		__entry->page = vaddr ? (u64)virt_to_page(vaddr) : 0ULL;
page              442 drivers/infiniband/hw/hfi1/trace_tid.h 		__entry->page,
page              198 drivers/infiniband/hw/hfi1/user_exp_rcv.c 	struct page **pages;
page              220 drivers/infiniband/hw/hfi1/user_exp_rcv.c 	struct page **pages = NULL;
page              592 drivers/infiniband/hw/hfi1/user_exp_rcv.c 	struct page **pages = tidbuf->pages;
page              758 drivers/infiniband/hw/hfi1/user_exp_rcv.c 	struct page **pages = tbuf->pages + pageidx;
page              764 drivers/infiniband/hw/hfi1/user_exp_rcv.c 	node = kzalloc(sizeof(*node) + (sizeof(struct page *) * npages),
page              787 drivers/infiniband/hw/hfi1/user_exp_rcv.c 	memcpy(node->pages, pages, sizeof(struct page *) * npages);
page               62 drivers/infiniband/hw/hfi1/user_exp_rcv.h 	struct page **pages;
page               75 drivers/infiniband/hw/hfi1/user_exp_rcv.h 	struct page *pages[0];
page              104 drivers/infiniband/hw/hfi1/user_pages.c 			    bool writable, struct page **pages)
page              118 drivers/infiniband/hw/hfi1/user_pages.c void hfi1_release_user_pages(struct mm_struct *mm, struct page **p,
page               85 drivers/infiniband/hw/hfi1/user_sdma.c static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
page              973 drivers/infiniband/hw/hfi1/user_sdma.c 	struct page **pages;
page             1079 drivers/infiniband/hw/hfi1/user_sdma.c static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
page              148 drivers/infiniband/hw/hfi1/user_sdma.h 	struct page **pages;
page              158 drivers/infiniband/hw/hfi1/user_sdma.h 	struct page **pages;
page              162 drivers/infiniband/hw/hfi1/verbs.h 	struct page **pages;                      /* for TID page scan */
page               16 drivers/infiniband/hw/hns/hns_roce_db.c 	struct hns_roce_user_db_page *page;
page               22 drivers/infiniband/hw/hns/hns_roce_db.c 	list_for_each_entry(page, &context->page_list, list)
page               23 drivers/infiniband/hw/hns/hns_roce_db.c 		if (page->user_virt == page_addr)
page               26 drivers/infiniband/hw/hns/hns_roce_db.c 	page = kmalloc(sizeof(*page), GFP_KERNEL);
page               27 drivers/infiniband/hw/hns/hns_roce_db.c 	if (!page) {
page               32 drivers/infiniband/hw/hns/hns_roce_db.c 	refcount_set(&page->refcount, 1);
page               33 drivers/infiniband/hw/hns/hns_roce_db.c 	page->user_virt = page_addr;
page               34 drivers/infiniband/hw/hns/hns_roce_db.c 	page->umem = ib_umem_get(udata, page_addr, PAGE_SIZE, 0, 0);
page               35 drivers/infiniband/hw/hns/hns_roce_db.c 	if (IS_ERR(page->umem)) {
page               36 drivers/infiniband/hw/hns/hns_roce_db.c 		ret = PTR_ERR(page->umem);
page               37 drivers/infiniband/hw/hns/hns_roce_db.c 		kfree(page);
page               41 drivers/infiniband/hw/hns/hns_roce_db.c 	list_add(&page->list, &context->page_list);
page               45 drivers/infiniband/hw/hns/hns_roce_db.c 	db->dma = sg_dma_address(page->umem->sg_head.sgl) + offset;
page               46 drivers/infiniband/hw/hns/hns_roce_db.c 	db->virt_addr = sg_virt(page->umem->sg_head.sgl) + offset;
page               47 drivers/infiniband/hw/hns/hns_roce_db.c 	db->u.user_page = page;
page               48 drivers/infiniband/hw/hns/hns_roce_db.c 	refcount_inc(&page->refcount);
page               84 drivers/infiniband/hw/hns/hns_roce_db.c 	pgdir->page = dma_alloc_coherent(dma_device, PAGE_SIZE,
page               86 drivers/infiniband/hw/hns/hns_roce_db.c 	if (!pgdir->page) {
page              118 drivers/infiniband/hw/hns/hns_roce_db.c 	db->db_record	= pgdir->page + db->index;
page              174 drivers/infiniband/hw/hns/hns_roce_db.c 		dma_free_coherent(hr_dev->dev, PAGE_SIZE, db->u.pgdir->page,
page              462 drivers/infiniband/hw/hns/hns_roce_device.h 	u32			*page;
page             3723 drivers/infiniband/hw/i40iw/i40iw_cm.c 		if (iwqp->page)
page             3724 drivers/infiniband/hw/i40iw/i40iw_cm.c 			iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
page             3731 drivers/infiniband/hw/i40iw/i40iw_cm.c 		if (iwqp->page)
page             3732 drivers/infiniband/hw/i40iw/i40iw_cm.c 			iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
page             3736 drivers/infiniband/hw/i40iw/i40iw_cm.c 	if (iwqp->page)
page             3737 drivers/infiniband/hw/i40iw/i40iw_cm.c 		kunmap(iwqp->page);
page             4104 drivers/infiniband/hw/i40iw/i40iw_cm.c 	if (iwqp->page)
page             4105 drivers/infiniband/hw/i40iw/i40iw_cm.c 		iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
page             4107 drivers/infiniband/hw/i40iw/i40iw_cm.c 	if (iwqp->page)
page             4108 drivers/infiniband/hw/i40iw/i40iw_cm.c 		kunmap(iwqp->page);
page              624 drivers/infiniband/hw/i40iw/i40iw_hmc.c 	struct i40iw_dma_mem *page = &mem;
page              642 drivers/infiniband/hw/i40iw/i40iw_hmc.c 			page = rsrc_pg;
page              644 drivers/infiniband/hw/i40iw/i40iw_hmc.c 			ret_code = i40iw_allocate_dma_mem(hw, page,
page              652 drivers/infiniband/hw/i40iw/i40iw_hmc.c 		memcpy(&pd_entry->bp.addr, page, sizeof(struct i40iw_dma_mem));
page              655 drivers/infiniband/hw/i40iw/i40iw_hmc.c 		page_desc = page->pa | 0x1;
page              197 drivers/infiniband/hw/i40iw/i40iw_pble.c 	struct page *page;
page              215 drivers/infiniband/hw/i40iw/i40iw_pble.c 		page = vmalloc_to_page((void *)addr);
page              216 drivers/infiniband/hw/i40iw/i40iw_pble.c 		if (!page)
page              218 drivers/infiniband/hw/i40iw/i40iw_pble.c 		chunk->dmaaddrs[i] = dma_map_page(&pcidev->dev, page, 0,
page              438 drivers/infiniband/hw/i40iw/i40iw_verbs.c 	iwqp->page = qpmr->sq_page;
page               70 drivers/infiniband/hw/i40iw/i40iw_verbs.h 	struct page *sq_page;
page              157 drivers/infiniband/hw/i40iw/i40iw_verbs.h 	struct page *page;
page               48 drivers/infiniband/hw/mlx4/doorbell.c 	struct mlx4_ib_user_db_page *page;
page               55 drivers/infiniband/hw/mlx4/doorbell.c 	list_for_each_entry(page, &context->db_page_list, list)
page               56 drivers/infiniband/hw/mlx4/doorbell.c 		if (page->user_virt == (virt & PAGE_MASK))
page               59 drivers/infiniband/hw/mlx4/doorbell.c 	page = kmalloc(sizeof *page, GFP_KERNEL);
page               60 drivers/infiniband/hw/mlx4/doorbell.c 	if (!page) {
page               65 drivers/infiniband/hw/mlx4/doorbell.c 	page->user_virt = (virt & PAGE_MASK);
page               66 drivers/infiniband/hw/mlx4/doorbell.c 	page->refcnt    = 0;
page               67 drivers/infiniband/hw/mlx4/doorbell.c 	page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0, 0);
page               68 drivers/infiniband/hw/mlx4/doorbell.c 	if (IS_ERR(page->umem)) {
page               69 drivers/infiniband/hw/mlx4/doorbell.c 		err = PTR_ERR(page->umem);
page               70 drivers/infiniband/hw/mlx4/doorbell.c 		kfree(page);
page               74 drivers/infiniband/hw/mlx4/doorbell.c 	list_add(&page->list, &context->db_page_list);
page               77 drivers/infiniband/hw/mlx4/doorbell.c 	db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK);
page               78 drivers/infiniband/hw/mlx4/doorbell.c 	db->u.user_page = page;
page               79 drivers/infiniband/hw/mlx4/doorbell.c 	++page->refcnt;
page               50 drivers/infiniband/hw/mlx5/doorbell.c 	struct mlx5_ib_user_db_page *page;
page               55 drivers/infiniband/hw/mlx5/doorbell.c 	list_for_each_entry(page, &context->db_page_list, list)
page               56 drivers/infiniband/hw/mlx5/doorbell.c 		if (page->user_virt == (virt & PAGE_MASK))
page               59 drivers/infiniband/hw/mlx5/doorbell.c 	page = kmalloc(sizeof(*page), GFP_KERNEL);
page               60 drivers/infiniband/hw/mlx5/doorbell.c 	if (!page) {
page               65 drivers/infiniband/hw/mlx5/doorbell.c 	page->user_virt = (virt & PAGE_MASK);
page               66 drivers/infiniband/hw/mlx5/doorbell.c 	page->refcnt    = 0;
page               67 drivers/infiniband/hw/mlx5/doorbell.c 	page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0, 0);
page               68 drivers/infiniband/hw/mlx5/doorbell.c 	if (IS_ERR(page->umem)) {
page               69 drivers/infiniband/hw/mlx5/doorbell.c 		err = PTR_ERR(page->umem);
page               70 drivers/infiniband/hw/mlx5/doorbell.c 		kfree(page);
page               74 drivers/infiniband/hw/mlx5/doorbell.c 	list_add(&page->list, &context->db_page_list);
page               77 drivers/infiniband/hw/mlx5/doorbell.c 	db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK);
page               78 drivers/infiniband/hw/mlx5/doorbell.c 	db->u.user_page = page;
page               79 drivers/infiniband/hw/mlx5/doorbell.c 	++page->refcnt;
page              677 drivers/infiniband/hw/mlx5/mlx5_ib.h 	u32			page;
page              175 drivers/infiniband/hw/mlx5/mr.c 		MLX5_SET(mkc, mkc, log_page_size, ent->page);
page              642 drivers/infiniband/hw/mlx5/mr.c 		ent->page = PAGE_SHIFT;
page             1563 drivers/infiniband/hw/mlx5/odp.c 		ent->page = PAGE_SHIFT;
page             1572 drivers/infiniband/hw/mlx5/odp.c 		ent->page = MLX5_KSM_PAGE_SHIFT;
page              122 drivers/infiniband/hw/mthca/mthca_allocator.c 	if (array->page_list[p].page)
page              123 drivers/infiniband/hw/mthca/mthca_allocator.c 		return array->page_list[p].page[index & MTHCA_ARRAY_MASK];
page              133 drivers/infiniband/hw/mthca/mthca_allocator.c 	if (!array->page_list[p].page)
page              134 drivers/infiniband/hw/mthca/mthca_allocator.c 		array->page_list[p].page = (void **) get_zeroed_page(GFP_ATOMIC);
page              136 drivers/infiniband/hw/mthca/mthca_allocator.c 	if (!array->page_list[p].page)
page              139 drivers/infiniband/hw/mthca/mthca_allocator.c 	array->page_list[p].page[index & MTHCA_ARRAY_MASK] = value;
page              150 drivers/infiniband/hw/mthca/mthca_allocator.c 		free_page((unsigned long) array->page_list[p].page);
page              151 drivers/infiniband/hw/mthca/mthca_allocator.c 		array->page_list[p].page = NULL;
page              153 drivers/infiniband/hw/mthca/mthca_allocator.c 		array->page_list[p].page[index & MTHCA_ARRAY_MASK] = NULL;
page              171 drivers/infiniband/hw/mthca/mthca_allocator.c 		array->page_list[i].page = NULL;
page              183 drivers/infiniband/hw/mthca/mthca_allocator.c 		free_page((unsigned long) array->page_list[i].page);
page              189 drivers/infiniband/hw/mthca/mthca_dev.h 		void    **page;
page              233 drivers/infiniband/hw/mthca/mthca_dev.h 	struct page       *icm_page;
page               61 drivers/infiniband/hw/mthca/mthca_memfree.c 	}                page[0];
page              109 drivers/infiniband/hw/mthca/mthca_memfree.c 	struct page *page;
page              115 drivers/infiniband/hw/mthca/mthca_memfree.c 	page = alloc_pages(gfp_mask | __GFP_ZERO, order);
page              116 drivers/infiniband/hw/mthca/mthca_memfree.c 	if (!page)
page              119 drivers/infiniband/hw/mthca/mthca_memfree.c 	sg_set_page(mem, page, PAGE_SIZE << order, 0);
page              283 drivers/infiniband/hw/mthca/mthca_memfree.c 	struct page *page = NULL;
page              309 drivers/infiniband/hw/mthca/mthca_memfree.c 				page = sg_page(&chunk->mem[i]);
page              318 drivers/infiniband/hw/mthca/mthca_memfree.c 	return page ? lowmem_page_address(page) + offset : NULL;
page              439 drivers/infiniband/hw/mthca/mthca_memfree.c static u64 mthca_uarc_virt(struct mthca_dev *dev, struct mthca_uar *uar, int page)
page              443 drivers/infiniband/hw/mthca/mthca_memfree.c 		page * MTHCA_ICM_PAGE_SIZE;
page              449 drivers/infiniband/hw/mthca/mthca_memfree.c 	struct page *pages[1];
page              463 drivers/infiniband/hw/mthca/mthca_memfree.c 	if ((db_tab->page[i].refcount >= MTHCA_DB_REC_PER_PAGE)       ||
page              464 drivers/infiniband/hw/mthca/mthca_memfree.c 	    (db_tab->page[i].uvirt && db_tab->page[i].uvirt != uaddr) ||
page              470 drivers/infiniband/hw/mthca/mthca_memfree.c 	if (db_tab->page[i].refcount) {
page              471 drivers/infiniband/hw/mthca/mthca_memfree.c 		++db_tab->page[i].refcount;
page              480 drivers/infiniband/hw/mthca/mthca_memfree.c 	sg_set_page(&db_tab->page[i].mem, pages[0], MTHCA_ICM_PAGE_SIZE,
page              483 drivers/infiniband/hw/mthca/mthca_memfree.c 	ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
page              489 drivers/infiniband/hw/mthca/mthca_memfree.c 	ret = mthca_MAP_ICM_page(dev, sg_dma_address(&db_tab->page[i].mem),
page              492 drivers/infiniband/hw/mthca/mthca_memfree.c 		pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
page              493 drivers/infiniband/hw/mthca/mthca_memfree.c 		put_user_page(sg_page(&db_tab->page[i].mem));
page              497 drivers/infiniband/hw/mthca/mthca_memfree.c 	db_tab->page[i].uvirt    = uaddr;
page              498 drivers/infiniband/hw/mthca/mthca_memfree.c 	db_tab->page[i].refcount = 1;
page              518 drivers/infiniband/hw/mthca/mthca_memfree.c 	--db_tab->page[index / MTHCA_DB_REC_PER_PAGE].refcount;
page              533 drivers/infiniband/hw/mthca/mthca_memfree.c 	db_tab = kmalloc(struct_size(db_tab, page, npages), GFP_KERNEL);
page              539 drivers/infiniband/hw/mthca/mthca_memfree.c 		db_tab->page[i].refcount = 0;
page              540 drivers/infiniband/hw/mthca/mthca_memfree.c 		db_tab->page[i].uvirt    = 0;
page              541 drivers/infiniband/hw/mthca/mthca_memfree.c 		sg_init_table(&db_tab->page[i].mem, 1);
page              556 drivers/infiniband/hw/mthca/mthca_memfree.c 		if (db_tab->page[i].uvirt) {
page              558 drivers/infiniband/hw/mthca/mthca_memfree.c 			pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
page              559 drivers/infiniband/hw/mthca/mthca_memfree.c 			put_user_page(sg_page(&db_tab->page[i].mem));
page              572 drivers/infiniband/hw/mthca/mthca_memfree.c 	struct mthca_db_page *page;
page              601 drivers/infiniband/hw/mthca/mthca_memfree.c 		if (dev->db_tab->page[i].db_rec &&
page              602 drivers/infiniband/hw/mthca/mthca_memfree.c 		    !bitmap_full(dev->db_tab->page[i].used,
page              604 drivers/infiniband/hw/mthca/mthca_memfree.c 			page = dev->db_tab->page + i;
page              609 drivers/infiniband/hw/mthca/mthca_memfree.c 		if (!dev->db_tab->page[i].db_rec) {
page              610 drivers/infiniband/hw/mthca/mthca_memfree.c 			page = dev->db_tab->page + i;
page              624 drivers/infiniband/hw/mthca/mthca_memfree.c 	page = dev->db_tab->page + end;
page              627 drivers/infiniband/hw/mthca/mthca_memfree.c 	page->db_rec = dma_alloc_coherent(&dev->pdev->dev,
page              628 drivers/infiniband/hw/mthca/mthca_memfree.c 					  MTHCA_ICM_PAGE_SIZE, &page->mapping,
page              630 drivers/infiniband/hw/mthca/mthca_memfree.c 	if (!page->db_rec) {
page              635 drivers/infiniband/hw/mthca/mthca_memfree.c 	ret = mthca_MAP_ICM_page(dev, page->mapping,
page              639 drivers/infiniband/hw/mthca/mthca_memfree.c 				  page->db_rec, page->mapping);
page              643 drivers/infiniband/hw/mthca/mthca_memfree.c 	bitmap_zero(page->used, MTHCA_DB_REC_PER_PAGE);
page              646 drivers/infiniband/hw/mthca/mthca_memfree.c 	j = find_first_zero_bit(page->used, MTHCA_DB_REC_PER_PAGE);
page              647 drivers/infiniband/hw/mthca/mthca_memfree.c 	set_bit(j, page->used);
page              654 drivers/infiniband/hw/mthca/mthca_memfree.c 	page->db_rec[j] = cpu_to_be64((qn << 8) | (type << 5));
page              656 drivers/infiniband/hw/mthca/mthca_memfree.c 	*db = (__be32 *) &page->db_rec[j];
page              667 drivers/infiniband/hw/mthca/mthca_memfree.c 	struct mthca_db_page *page;
page              672 drivers/infiniband/hw/mthca/mthca_memfree.c 	page = dev->db_tab->page + i;
page              676 drivers/infiniband/hw/mthca/mthca_memfree.c 	page->db_rec[j] = 0;
page              679 drivers/infiniband/hw/mthca/mthca_memfree.c 	clear_bit(j, page->used);
page              681 drivers/infiniband/hw/mthca/mthca_memfree.c 	if (bitmap_empty(page->used, MTHCA_DB_REC_PER_PAGE) &&
page              686 drivers/infiniband/hw/mthca/mthca_memfree.c 				  page->db_rec, page->mapping);
page              687 drivers/infiniband/hw/mthca/mthca_memfree.c 		page->db_rec = NULL;
page              717 drivers/infiniband/hw/mthca/mthca_memfree.c 	dev->db_tab->page = kmalloc_array(dev->db_tab->npages,
page              718 drivers/infiniband/hw/mthca/mthca_memfree.c 					  sizeof(*dev->db_tab->page),
page              720 drivers/infiniband/hw/mthca/mthca_memfree.c 	if (!dev->db_tab->page) {
page              726 drivers/infiniband/hw/mthca/mthca_memfree.c 		dev->db_tab->page[i].db_rec = NULL;
page              745 drivers/infiniband/hw/mthca/mthca_memfree.c 		if (!dev->db_tab->page[i].db_rec)
page              748 drivers/infiniband/hw/mthca/mthca_memfree.c 		if (!bitmap_empty(dev->db_tab->page[i].used, MTHCA_DB_REC_PER_PAGE))
page              754 drivers/infiniband/hw/mthca/mthca_memfree.c 				  dev->db_tab->page[i].db_rec,
page              755 drivers/infiniband/hw/mthca/mthca_memfree.c 				  dev->db_tab->page[i].mapping);
page              758 drivers/infiniband/hw/mthca/mthca_memfree.c 	kfree(dev->db_tab->page);
page              148 drivers/infiniband/hw/mthca/mthca_memfree.h 	struct mthca_db_page *page;
page              894 drivers/infiniband/hw/qib/qib.h 	struct page **pageshadow;
page             1345 drivers/infiniband/hw/qib/qib.h int qib_get_user_pages(unsigned long, size_t, struct page **);
page             1346 drivers/infiniband/hw/qib/qib.h void qib_release_user_pages(struct page **, size_t);
page             1427 drivers/infiniband/hw/qib/qib.h int qib_map_page(struct pci_dev *d, struct page *p, dma_addr_t *daddr);
page               88 drivers/infiniband/hw/qib/qib_file_ops.c 	struct page *page;
page               91 drivers/infiniband/hw/qib/qib_file_ops.c 	page = vmalloc_to_page(p);
page               92 drivers/infiniband/hw/qib/qib_file_ops.c 	if (page)
page               93 drivers/infiniband/hw/qib/qib_file_ops.c 		paddr = page_to_pfn(page) << PAGE_SHIFT;
page              295 drivers/infiniband/hw/qib/qib_file_ops.c 	struct page **pagep = NULL;
page              332 drivers/infiniband/hw/qib/qib_file_ops.c 	pagep = (struct page **) rcd->tid_pg_list;
page              535 drivers/infiniband/hw/qib/qib_file_ops.c 			struct page *p;
page              877 drivers/infiniband/hw/qib/qib_file_ops.c 	struct page *page;
page              879 drivers/infiniband/hw/qib/qib_file_ops.c 	page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
page              880 drivers/infiniband/hw/qib/qib_file_ops.c 	if (!page)
page              883 drivers/infiniband/hw/qib/qib_file_ops.c 	get_page(page);
page              884 drivers/infiniband/hw/qib/qib_file_ops.c 	vmf->page = page;
page             1308 drivers/infiniband/hw/qib/qib_file_ops.c 			       dd->rcvtidcnt * sizeof(struct page **),
page             1774 drivers/infiniband/hw/qib/qib_file_ops.c 		struct page *p = dd->pageshadow[i];
page              367 drivers/infiniband/hw/qib/qib_init.c 	struct page **pages;
page              370 drivers/infiniband/hw/qib/qib_init.c 	pages = vzalloc(array_size(sizeof(struct page *),
page             1327 drivers/infiniband/hw/qib/qib_init.c 		struct page **tmpp = dd->pageshadow;
page               59 drivers/infiniband/hw/qib/qib_qp.c 		off = find_next_zero_bit(map->page, RVT_BITS_PER_PAGE, off);
page              109 drivers/infiniband/hw/qib/qib_qp.c 	unsigned long page = get_zeroed_page(GFP_KERNEL);
page              116 drivers/infiniband/hw/qib/qib_qp.c 	if (map->page)
page              117 drivers/infiniband/hw/qib/qib_qp.c 		free_page(page);
page              119 drivers/infiniband/hw/qib/qib_qp.c 		map->page = (void *)page;
page              161 drivers/infiniband/hw/qib/qib_qp.c 		if (unlikely(!map->page)) {
page              163 drivers/infiniband/hw/qib/qib_qp.c 			if (unlikely(!map->page))
page              167 drivers/infiniband/hw/qib/qib_qp.c 			if (!test_and_set_bit(offset, map->page)) {
page               40 drivers/infiniband/hw/qib/qib_user_pages.c static void __qib_release_user_pages(struct page **p, size_t num_pages,
page               59 drivers/infiniband/hw/qib/qib_user_pages.c int qib_map_page(struct pci_dev *hwdev, struct page *page, dma_addr_t *daddr)
page               63 drivers/infiniband/hw/qib/qib_user_pages.c 	phys = pci_map_page(hwdev, page, 0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
page               69 drivers/infiniband/hw/qib/qib_user_pages.c 		phys = pci_map_page(hwdev, page, 0, PAGE_SIZE,
page               95 drivers/infiniband/hw/qib/qib_user_pages.c 		       struct page **p)
page              130 drivers/infiniband/hw/qib/qib_user_pages.c void qib_release_user_pages(struct page **p, size_t num_pages)
page               93 drivers/infiniband/hw/qib/qib_user_sdma.c 		struct page *page;              /* may be NULL (coherent mem) */
page              258 drivers/infiniband/hw/qib/qib_user_sdma.c 				    struct page *page, void *kvaddr,
page              267 drivers/infiniband/hw/qib/qib_user_sdma.c 	pkt->addr[i].page = page;
page              298 drivers/infiniband/hw/qib/qib_user_sdma.c 				       struct page *page, u16 put,
page              310 drivers/infiniband/hw/qib/qib_user_sdma.c 			page, offset, len, DMA_TO_DEVICE);
page              320 drivers/infiniband/hw/qib/qib_user_sdma.c 			put_user_page(page);
page              323 drivers/infiniband/hw/qib/qib_user_sdma.c 			kunmap(page);
page              324 drivers/infiniband/hw/qib/qib_user_sdma.c 			__free_page(page);
page              368 drivers/infiniband/hw/qib/qib_user_sdma.c 		page, kvaddr,		/* struct page, virt addr */
page              543 drivers/infiniband/hw/qib/qib_user_sdma.c 			page = NULL;
page              564 drivers/infiniband/hw/qib/qib_user_sdma.c 	struct page *page = alloc_page(GFP_KERNEL);
page              570 drivers/infiniband/hw/qib/qib_user_sdma.c 	if (!page) {
page              575 drivers/infiniband/hw/qib/qib_user_sdma.c 	mpage = kmap(page);
page              592 drivers/infiniband/hw/qib/qib_user_sdma.c 			page, 0, 0, len, mpage_save);
page              596 drivers/infiniband/hw/qib/qib_user_sdma.c 	kunmap(page);
page              597 drivers/infiniband/hw/qib/qib_user_sdma.c 	__free_page(page);
page              622 drivers/infiniband/hw/qib/qib_user_sdma.c 	if (pkt->addr[i].page) {
page              631 drivers/infiniband/hw/qib/qib_user_sdma.c 			kunmap(pkt->addr[i].page);
page              634 drivers/infiniband/hw/qib/qib_user_sdma.c 			put_user_page(pkt->addr[i].page);
page              636 drivers/infiniband/hw/qib/qib_user_sdma.c 			__free_page(pkt->addr[i].page);
page              663 drivers/infiniband/hw/qib/qib_user_sdma.c 	struct page *pages[8];
page               69 drivers/infiniband/hw/usnic/usnic_uiom.c 	struct page *page;
page               76 drivers/infiniband/hw/usnic/usnic_uiom.c 			page = sg_page(sg);
page               78 drivers/infiniband/hw/usnic/usnic_uiom.c 			put_user_pages_dirty_lock(&page, 1, dirty);
page               89 drivers/infiniband/hw/usnic/usnic_uiom.c 	struct page **page_list;
page              119 drivers/infiniband/hw/usnic/usnic_uiom.c 	page_list = (struct page **) __get_free_page(GFP_KERNEL);
page              146 drivers/infiniband/hw/usnic/usnic_uiom.c 				     PAGE_SIZE / sizeof(struct page *)),
page              289 drivers/infiniband/sw/rdmavt/qp.c 	u32 page = ((unsigned long)address >> PAGE_SHIFT) & wss->pages_mask;
page              290 drivers/infiniband/sw/rdmavt/qp.c 	u32 entry = page / BITS_PER_LONG; /* assumes this ends up a shift */
page              291 drivers/infiniband/sw/rdmavt/qp.c 	u32 nr = page & (BITS_PER_LONG - 1);
page              310 drivers/infiniband/sw/rdmavt/qp.c 	unsigned long page = get_zeroed_page(GFP_KERNEL);
page              317 drivers/infiniband/sw/rdmavt/qp.c 	if (map->page)
page              318 drivers/infiniband/sw/rdmavt/qp.c 		free_page(page);
page              320 drivers/infiniband/sw/rdmavt/qp.c 		map->page = (void *)page;
page              361 drivers/infiniband/sw/rdmavt/qp.c 		if (!map->page) {
page              363 drivers/infiniband/sw/rdmavt/qp.c 			if (!map->page) {
page              368 drivers/infiniband/sw/rdmavt/qp.c 		set_bit(offset, map->page);
page              389 drivers/infiniband/sw/rdmavt/qp.c 		free_page((unsigned long)qpt->map[i].page);
page              563 drivers/infiniband/sw/rdmavt/qp.c 		if (unlikely(!map->page)) {
page              565 drivers/infiniband/sw/rdmavt/qp.c 			if (unlikely(!map->page))
page              569 drivers/infiniband/sw/rdmavt/qp.c 			if (!test_and_set_bit(offset, map->page)) {
page              991 drivers/infiniband/sw/rdmavt/qp.c 	if (map->page)
page              992 drivers/infiniband/sw/rdmavt/qp.c 		clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
page               68 drivers/infiniband/sw/rdmavt/trace_mr.h 		__field(struct page *, page)
page               81 drivers/infiniband/sw/rdmavt/trace_mr.h 		__entry->page = virt_to_page(v);
page               99 drivers/infiniband/sw/rdmavt/trace_mr.h 		__entry->page,
page              136 drivers/infiniband/sw/rxe/rxe_loc.h 		      u64 *page, int num_pages, u64 iova);
page              592 drivers/infiniband/sw/rxe/rxe_mr.c 		      u64 *page, int num_pages, u64 iova)
page              612 drivers/infiniband/sw/rxe/rxe_mr.c 		buf->addr = *page++;
page              129 drivers/infiniband/sw/siw/siw.h 	struct page **plist;
page              420 drivers/infiniband/sw/siw/siw_mem.c 			kcalloc(nents, sizeof(struct page *), GFP_KERNEL);
page              427 drivers/infiniband/sw/siw/siw_mem.c 			struct page **plist = &umem->page_chunk[i].plist[got];
page               53 drivers/infiniband/sw/siw/siw_mem.h #define PAGE_CHUNK_SIZE (PAGES_PER_CHUNK * sizeof(struct page *))
page               63 drivers/infiniband/sw/siw/siw_mem.h static inline struct page *siw_get_upage(struct siw_umem *umem, u64 addr)
page               35 drivers/infiniband/sw/siw/siw_qp_rx.c 		struct page *p;
page               25 drivers/infiniband/sw/siw/siw_qp_tx.c static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx)
page               67 drivers/infiniband/sw/siw/siw_qp_tx.c 			struct page *p;
page              324 drivers/infiniband/sw/siw/siw_qp_tx.c static int siw_tcp_sendpages(struct socket *s, struct page **page, int offset,
page              340 drivers/infiniband/sw/siw/siw_qp_tx.c 		rv = do_tcp_sendpages(sk, page[i], offset, bytes, flags);
page              369 drivers/infiniband/sw/siw/siw_qp_tx.c static int siw_0copy_tx(struct socket *s, struct page **page,
page              379 drivers/infiniband/sw/siw/siw_qp_tx.c 		rv = siw_tcp_sendpages(s, &page[i], offset, sge_bytes);
page              399 drivers/infiniband/sw/siw/siw_qp_tx.c static void siw_unmap_pages(struct page **pp, unsigned long kmap_mask)
page              429 drivers/infiniband/sw/siw/siw_qp_tx.c 	struct page *page_array[MAX_ARRAY];
page              490 drivers/infiniband/sw/siw/siw_qp_tx.c 				struct page *p;
page              169 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		struct page *page = alloc_page(gfp);
page              171 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		if (!page)
page              173 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
page              175 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		mapping[i + 1] = ib_dma_map_page(priv->ca, page,
page             3442 drivers/infiniband/ulp/srpt/ib_srpt.c 		char *page)
page             3447 drivers/infiniband/ulp/srpt/ib_srpt.c 	return sprintf(page, "%u\n", sport->port_attrib.srp_max_rdma_size);
page             3451 drivers/infiniband/ulp/srpt/ib_srpt.c 		const char *page, size_t count)
page             3458 drivers/infiniband/ulp/srpt/ib_srpt.c 	ret = kstrtoul(page, 0, &val);
page             3479 drivers/infiniband/ulp/srpt/ib_srpt.c 		char *page)
page             3484 drivers/infiniband/ulp/srpt/ib_srpt.c 	return sprintf(page, "%u\n", sport->port_attrib.srp_max_rsp_size);
page             3488 drivers/infiniband/ulp/srpt/ib_srpt.c 		const char *page, size_t count)
page             3495 drivers/infiniband/ulp/srpt/ib_srpt.c 	ret = kstrtoul(page, 0, &val);
page             3516 drivers/infiniband/ulp/srpt/ib_srpt.c 		char *page)
page             3521 drivers/infiniband/ulp/srpt/ib_srpt.c 	return sprintf(page, "%u\n", sport->port_attrib.srp_sq_size);
page             3525 drivers/infiniband/ulp/srpt/ib_srpt.c 		const char *page, size_t count)
page             3532 drivers/infiniband/ulp/srpt/ib_srpt.c 	ret = kstrtoul(page, 0, &val);
page             3553 drivers/infiniband/ulp/srpt/ib_srpt.c 					    char *page)
page             3558 drivers/infiniband/ulp/srpt/ib_srpt.c 	return sprintf(page, "%d\n", sport->port_attrib.use_srq);
page             3562 drivers/infiniband/ulp/srpt/ib_srpt.c 					     const char *page, size_t count)
page             3571 drivers/infiniband/ulp/srpt/ib_srpt.c 	ret = kstrtoul(page, 0, &val);
page             3646 drivers/infiniband/ulp/srpt/ib_srpt.c static ssize_t srpt_rdma_cm_port_show(struct config_item *item, char *page)
page             3648 drivers/infiniband/ulp/srpt/ib_srpt.c 	return sprintf(page, "%d\n", rdma_cm_port);
page             3652 drivers/infiniband/ulp/srpt/ib_srpt.c 				       const char *page, size_t count)
page             3660 drivers/infiniband/ulp/srpt/ib_srpt.c 	ret = kstrtou16(page, 0, &val);
page             3699 drivers/infiniband/ulp/srpt/ib_srpt.c static ssize_t srpt_tpg_enable_show(struct config_item *item, char *page)
page             3704 drivers/infiniband/ulp/srpt/ib_srpt.c 	return snprintf(page, PAGE_SIZE, "%d\n", sport->enabled);
page             3708 drivers/infiniband/ulp/srpt/ib_srpt.c 		const char *page, size_t count)
page             3715 drivers/infiniband/ulp/srpt/ib_srpt.c 	ret = kstrtoul(page, 0, &tmp);
page              106 drivers/input/misc/drv2667.c 	u32 page;
page              151 drivers/input/misc/drv2667.c 		haptics->page != read_buf) {
page              153 drivers/input/misc/drv2667.c 				DRV2667_PAGE, haptics->page);
page              168 drivers/input/misc/drv2667.c 		haptics->page != read_buf) {
page              187 drivers/input/misc/drv2667.c 				DRV2667_PAGE, haptics->page);
page              282 drivers/input/misc/drv2667.c 	haptics->page = DRV2667_PAGE_1;
page              294 drivers/input/misc/drv2667.c 	error = regmap_write(haptics->regmap, DRV2667_PAGE, haptics->page);
page               39 drivers/input/misc/xen-kbdfront.c 	struct xenkbd_page *page;
page              184 drivers/input/misc/xen-kbdfront.c 	struct xenkbd_page *page = info->page;
page              187 drivers/input/misc/xen-kbdfront.c 	prod = page->in_prod;
page              188 drivers/input/misc/xen-kbdfront.c 	if (prod == page->in_cons)
page              191 drivers/input/misc/xen-kbdfront.c 	for (cons = page->in_cons; cons != prod; cons++)
page              192 drivers/input/misc/xen-kbdfront.c 		xenkbd_handle_event(info, &XENKBD_IN_RING_REF(page, cons));
page              194 drivers/input/misc/xen-kbdfront.c 	page->in_cons = cons;
page              219 drivers/input/misc/xen-kbdfront.c 	info->page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
page              220 drivers/input/misc/xen-kbdfront.c 	if (!info->page)
page              403 drivers/input/misc/xen-kbdfront.c 	memset(info->page, 0, PAGE_SIZE);
page              418 drivers/input/misc/xen-kbdfront.c 	free_page((unsigned long)info->page);
page              430 drivers/input/misc/xen-kbdfront.c 	                                  virt_to_gfn(info->page), 0);
page              453 drivers/input/misc/xen-kbdfront.c 			    virt_to_gfn(info->page));
page               77 drivers/input/mouse/elan_i2c.h 			      const u8 *page, u16 checksum, int idx);
page              434 drivers/input/mouse/elan_i2c_core.c 			       const u8 *page, u16 checksum, int idx)
page              441 drivers/input/mouse/elan_i2c_core.c 						  page, checksum, idx);
page              472 drivers/input/mouse/elan_i2c_core.c 		const u8 *page = &fw->data[i * ETP_FW_PAGE_SIZE];
page              475 drivers/input/mouse/elan_i2c_core.c 			checksum += ((page[j + 1] << 8) | page[j]);
page              477 drivers/input/mouse/elan_i2c_core.c 		error = elan_write_fw_block(data, page, checksum, i);
page              576 drivers/input/mouse/elan_i2c_i2c.c 				   const u8 *page, u16 checksum, int idx)
page              586 drivers/input/mouse/elan_i2c_i2c.c 	memcpy(&page_store[2], page, ETP_FW_PAGE_SIZE);
page              418 drivers/input/mouse/elan_i2c_smbus.c 				     const u8 *page, u16 checksum, int idx)
page              433 drivers/input/mouse/elan_i2c_smbus.c 					   page);
page              443 drivers/input/mouse/elan_i2c_smbus.c 					   page + ETP_FW_PAGE_SIZE / 2);
page              518 drivers/input/mouse/sentelic.c PSMOUSE_DEFINE_ATTR(page, S_IWUSR | S_IRUGO, NULL,
page              500 drivers/input/rmi4/rmi_driver.c 			     int page,
page              509 drivers/input/rmi4/rmi_driver.c 	u16 page_start = RMI4_PAGE_SIZE * page;
page              546 drivers/input/rmi4/rmi_driver.c 	int page;
page              550 drivers/input/rmi4/rmi_driver.c 	for (page = 0; page <= RMI4_MAX_PAGE; page++) {
page              551 drivers/input/rmi4/rmi_driver.c 		retval = rmi_scan_pdt_page(rmi_dev, page, &empty_pages,
page               32 drivers/input/rmi4/rmi_i2c.c 	int page;
page               58 drivers/input/rmi4/rmi_i2c.c static int rmi_set_page(struct rmi_i2c_xport *rmi_i2c, u8 page)
page               61 drivers/input/rmi4/rmi_i2c.c 	u8 txbuf[2] = {RMI_PAGE_SELECT_REGISTER, page};
page               71 drivers/input/rmi4/rmi_i2c.c 	rmi_i2c->page = page;
page              103 drivers/input/rmi4/rmi_i2c.c 	if (RMI_I2C_PAGE(addr) != rmi_i2c->page) {
page              148 drivers/input/rmi4/rmi_i2c.c 	if (RMI_I2C_PAGE(addr) != rmi_i2c->page) {
page               36 drivers/input/rmi4/rmi_smbus.c 	int page;
page               41 drivers/input/rmi4/rmi_spi.c 	int page;
page              254 drivers/input/rmi4/rmi_spi.c static int rmi_set_page(struct rmi_spi_xport *rmi_spi, u8 page)
page              262 drivers/input/rmi4/rmi_spi.c 	ret = rmi_spi_xfer(rmi_spi, &cmd, &page, 1, NULL, 0);
page              265 drivers/input/rmi4/rmi_spi.c 		rmi_spi->page = page;
page              280 drivers/input/rmi4/rmi_spi.c 	if (RMI_SPI_PAGE(addr) != rmi_spi->page) {
page              306 drivers/input/rmi4/rmi_spi.c 	if (RMI_SPI_PAGE(addr) != rmi_spi->page) {
page              146 drivers/input/touchscreen/atmel_mxt_ts.c 	u8 page;
page             2227 drivers/input/touchscreen/atmel_mxt_ts.c 	unsigned int ofs, page;
page             2240 drivers/input/touchscreen/atmel_mxt_ts.c 	page = ofs / MXT_DIAGNOSTIC_SIZE;
page             2244 drivers/input/touchscreen/atmel_mxt_ts.c 		page += col * MXT1386_PAGES_PER_COLUMN;
page             2246 drivers/input/touchscreen/atmel_mxt_ts.c 	return get_unaligned_le16(&dbg->t37_buf[page].data[ofs]);
page             2280 drivers/input/touchscreen/atmel_mxt_ts.c 	int page;
page             2286 drivers/input/touchscreen/atmel_mxt_ts.c 	for (page = 0; page < dbg->t37_pages; page++) {
page             2287 drivers/input/touchscreen/atmel_mxt_ts.c 		p = dbg->t37_buf + page;
page             2318 drivers/input/touchscreen/atmel_mxt_ts.c 		if (p->mode != mode || p->page != page) {
page             2324 drivers/input/touchscreen/atmel_mxt_ts.c 			__func__, page, retries);
page              582 drivers/input/touchscreen/elants_i2c.c 				    const void *page)
page              590 drivers/input/touchscreen/elants_i2c.c 		error = elants_i2c_send(client, page, ELAN_FW_PAGESIZE);
page              626 drivers/input/touchscreen/elants_i2c.c 	int page, n_fw_pages;
page              698 drivers/input/touchscreen/elants_i2c.c 	for (page = 0; page < n_fw_pages; page++) {
page              700 drivers/input/touchscreen/elants_i2c.c 					fw->data + page * ELAN_FW_PAGESIZE);
page              704 drivers/input/touchscreen/elants_i2c.c 				page, error);
page             1364 drivers/iommu/amd_iommu.c static void free_page_list(struct page *freelist)
page             1373 drivers/iommu/amd_iommu.c static struct page *free_pt_page(unsigned long pt, struct page *freelist)
page             1375 drivers/iommu/amd_iommu.c 	struct page *p = virt_to_page((void *)pt);
page             1383 drivers/iommu/amd_iommu.c static struct page *free_pt_##LVL (unsigned long __pt, struct page *freelist)	\
page             1414 drivers/iommu/amd_iommu.c static struct page *free_sub_pt(unsigned long root, int mode,
page             1415 drivers/iommu/amd_iommu.c 				struct page *freelist)
page             1449 drivers/iommu/amd_iommu.c 	struct page *freelist = NULL;
page             1503 drivers/iommu/amd_iommu.c 	u64 *pte, *page;
page             1546 drivers/iommu/amd_iommu.c 			page = (u64 *)get_zeroed_page(gfp);
page             1548 drivers/iommu/amd_iommu.c 			if (!page)
page             1551 drivers/iommu/amd_iommu.c 			__npte = PM_LEVEL_PDE(level, iommu_virt_to_phys(page));
page             1555 drivers/iommu/amd_iommu.c 				free_page((unsigned long)page);
page             1632 drivers/iommu/amd_iommu.c static struct page *free_clear_pte(u64 *pte, u64 pteval, struct page *freelist)
page             1665 drivers/iommu/amd_iommu.c 	struct page *freelist = NULL;
page             2505 drivers/iommu/amd_iommu.c static dma_addr_t map_page(struct device *dev, struct page *page,
page             2510 drivers/iommu/amd_iommu.c 	phys_addr_t paddr = page_to_phys(page) + offset;
page             2696 drivers/iommu/amd_iommu.c 	struct page *page;
page             2700 drivers/iommu/amd_iommu.c 		page = alloc_pages(flag, get_order(size));
page             2701 drivers/iommu/amd_iommu.c 		*dma_addr = page_to_phys(page);
page             2702 drivers/iommu/amd_iommu.c 		return page_address(page);
page             2712 drivers/iommu/amd_iommu.c 	page = alloc_pages(flag | __GFP_NOWARN,  get_order(size));
page             2713 drivers/iommu/amd_iommu.c 	if (!page) {
page             2717 drivers/iommu/amd_iommu.c 		page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
page             2719 drivers/iommu/amd_iommu.c 		if (!page)
page             2726 drivers/iommu/amd_iommu.c 	*dma_addr = __map_single(dev, dma_dom, page_to_phys(page),
page             2732 drivers/iommu/amd_iommu.c 	return page_address(page);
page             2736 drivers/iommu/amd_iommu.c 	if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
page             2737 drivers/iommu/amd_iommu.c 		__free_pages(page, get_order(size));
page             2751 drivers/iommu/amd_iommu.c 	struct page *page;
page             2753 drivers/iommu/amd_iommu.c 	page = virt_to_page(virt_addr);
page             2765 drivers/iommu/amd_iommu.c 	if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
page             2766 drivers/iommu/amd_iommu.c 		__free_pages(page, get_order(size));
page               28 drivers/iommu/arm-smmu-impl.c static u32 arm_smmu_read_ns(struct arm_smmu_device *smmu, int page,
page               31 drivers/iommu/arm-smmu-impl.c 	if (page == ARM_SMMU_GR0)
page               33 drivers/iommu/arm-smmu-impl.c 	return readl_relaxed(arm_smmu_page(smmu, page) + offset);
page               36 drivers/iommu/arm-smmu-impl.c static void arm_smmu_write_ns(struct arm_smmu_device *smmu, int page,
page               39 drivers/iommu/arm-smmu-impl.c 	if (page == ARM_SMMU_GR0)
page               41 drivers/iommu/arm-smmu-impl.c 	writel_relaxed(val, arm_smmu_page(smmu, page) + offset);
page              241 drivers/iommu/arm-smmu.c static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
page              247 drivers/iommu/arm-smmu.c 	arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL);
page              250 drivers/iommu/arm-smmu.c 			reg = arm_smmu_readl(smmu, page, status);
page              329 drivers/iommu/arm-smmu.h 	u32 (*read_reg)(struct arm_smmu_device *smmu, int page, int offset);
page              330 drivers/iommu/arm-smmu.h 	void (*write_reg)(struct arm_smmu_device *smmu, int page, int offset,
page              332 drivers/iommu/arm-smmu.h 	u64 (*read_reg64)(struct arm_smmu_device *smmu, int page, int offset);
page              333 drivers/iommu/arm-smmu.h 	void (*write_reg64)(struct arm_smmu_device *smmu, int page, int offset,
page              345 drivers/iommu/arm-smmu.h static inline u32 arm_smmu_readl(struct arm_smmu_device *smmu, int page, int offset)
page              348 drivers/iommu/arm-smmu.h 		return smmu->impl->read_reg(smmu, page, offset);
page              349 drivers/iommu/arm-smmu.h 	return readl_relaxed(arm_smmu_page(smmu, page) + offset);
page              352 drivers/iommu/arm-smmu.h static inline void arm_smmu_writel(struct arm_smmu_device *smmu, int page,
page              356 drivers/iommu/arm-smmu.h 		smmu->impl->write_reg(smmu, page, offset, val);
page              358 drivers/iommu/arm-smmu.h 		writel_relaxed(val, arm_smmu_page(smmu, page) + offset);
page              361 drivers/iommu/arm-smmu.h static inline u64 arm_smmu_readq(struct arm_smmu_device *smmu, int page, int offset)
page              364 drivers/iommu/arm-smmu.h 		return smmu->impl->read_reg64(smmu, page, offset);
page              365 drivers/iommu/arm-smmu.h 	return readq_relaxed(arm_smmu_page(smmu, page) + offset);
page              368 drivers/iommu/arm-smmu.h static inline void arm_smmu_writeq(struct arm_smmu_device *smmu, int page,
page              372 drivers/iommu/arm-smmu.h 		smmu->impl->write_reg64(smmu, page, offset, val);
page              374 drivers/iommu/arm-smmu.h 		writeq_relaxed(val, arm_smmu_page(smmu, page) + offset);
page              485 drivers/iommu/dma-iommu.c static void __iommu_dma_free_pages(struct page **pages, int count)
page              492 drivers/iommu/dma-iommu.c static struct page **__iommu_dma_alloc_pages(struct device *dev,
page              495 drivers/iommu/dma-iommu.c 	struct page **pages;
page              510 drivers/iommu/dma-iommu.c 		struct page *page = NULL;
page              526 drivers/iommu/dma-iommu.c 			page = alloc_pages_node(nid, alloc_flags, order);
page              527 drivers/iommu/dma-iommu.c 			if (!page)
page              531 drivers/iommu/dma-iommu.c 			if (!PageCompound(page)) {
page              532 drivers/iommu/dma-iommu.c 				split_page(page, order);
page              534 drivers/iommu/dma-iommu.c 			} else if (!split_huge_page(page)) {
page              537 drivers/iommu/dma-iommu.c 			__free_pages(page, order);
page              539 drivers/iommu/dma-iommu.c 		if (!page) {
page              545 drivers/iommu/dma-iommu.c 			pages[i++] = page++;
page              574 drivers/iommu/dma-iommu.c 	struct page **pages;
page              646 drivers/iommu/dma-iommu.c static int __iommu_dma_mmap(struct page **pages, size_t size,
page              704 drivers/iommu/dma-iommu.c static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
page              708 drivers/iommu/dma-iommu.c 	phys_addr_t phys = page_to_phys(page) + offset;
page              926 drivers/iommu/dma-iommu.c 	struct page *page = NULL, **pages = NULL;
page              940 drivers/iommu/dma-iommu.c 			page = vmalloc_to_page(cpu_addr);
page              944 drivers/iommu/dma-iommu.c 		page = virt_to_page(cpu_addr);
page              949 drivers/iommu/dma-iommu.c 	if (page)
page              950 drivers/iommu/dma-iommu.c 		dma_free_contiguous(dev, page, alloc_size);
page              961 drivers/iommu/dma-iommu.c 		struct page **pagep, gfp_t gfp, unsigned long attrs)
page              966 drivers/iommu/dma-iommu.c 	struct page *page = NULL;
page              969 drivers/iommu/dma-iommu.c 	page = dma_alloc_contiguous(dev, alloc_size, gfp);
page              970 drivers/iommu/dma-iommu.c 	if (!page)
page              971 drivers/iommu/dma-iommu.c 		page = alloc_pages_node(node, gfp, get_order(alloc_size));
page              972 drivers/iommu/dma-iommu.c 	if (!page)
page              975 drivers/iommu/dma-iommu.c 	if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
page              978 drivers/iommu/dma-iommu.c 		cpu_addr = dma_common_contiguous_remap(page, alloc_size,
page              984 drivers/iommu/dma-iommu.c 			arch_dma_prep_coherent(page, size);
page              986 drivers/iommu/dma-iommu.c 		cpu_addr = page_address(page);
page              989 drivers/iommu/dma-iommu.c 	*pagep = page;
page              993 drivers/iommu/dma-iommu.c 	dma_free_contiguous(dev, page, alloc_size);
page             1002 drivers/iommu/dma-iommu.c 	struct page *page = NULL;
page             1013 drivers/iommu/dma-iommu.c 		cpu_addr = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp);
page             1015 drivers/iommu/dma-iommu.c 		cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
page             1019 drivers/iommu/dma-iommu.c 	*handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot);
page             1045 drivers/iommu/dma-iommu.c 		struct page **pages = dma_common_find_pages(cpu_addr);
page             1063 drivers/iommu/dma-iommu.c 	struct page *page;
page             1067 drivers/iommu/dma-iommu.c 		struct page **pages = dma_common_find_pages(cpu_addr);
page             1075 drivers/iommu/dma-iommu.c 		page = vmalloc_to_page(cpu_addr);
page             1077 drivers/iommu/dma-iommu.c 		page = virt_to_page(cpu_addr);
page             1082 drivers/iommu/dma-iommu.c 		sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
page             1459 drivers/iommu/dmar.c 	struct page *desc_page;
page             1008 drivers/iommu/fsl_pamu.c 	struct page *p;
page              162 drivers/iommu/intel-iommu.c static inline unsigned long page_to_dma_pfn(struct page *pg)
page              516 drivers/iommu/intel-iommu.c 	struct page *page;
page              519 drivers/iommu/intel-iommu.c 	page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
page              520 drivers/iommu/intel-iommu.c 	if (page)
page              521 drivers/iommu/intel-iommu.c 		vaddr = page_address(page);
page             1070 drivers/iommu/intel-iommu.c static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
page             1072 drivers/iommu/intel-iommu.c 					    struct page *freelist)
page             1074 drivers/iommu/intel-iommu.c 	struct page *pg;
page             1094 drivers/iommu/intel-iommu.c static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
page             1098 drivers/iommu/intel-iommu.c 					struct page *freelist)
page             1146 drivers/iommu/intel-iommu.c static struct page *domain_unmap(struct dmar_domain *domain,
page             1150 drivers/iommu/intel-iommu.c 	struct page *freelist;
page             1162 drivers/iommu/intel-iommu.c 		struct page *pgd_page = virt_to_page(domain->pgd);
page             1172 drivers/iommu/intel-iommu.c static void dma_free_pagelist(struct page *freelist)
page             1174 drivers/iommu/intel-iommu.c 	struct page *pg;
page             1184 drivers/iommu/intel-iommu.c 	struct page *freelist = (struct page *)data;
page             1914 drivers/iommu/intel-iommu.c 		struct page *freelist;
page             3551 drivers/iommu/intel-iommu.c static dma_addr_t intel_map_page(struct device *dev, struct page *page,
page             3557 drivers/iommu/intel-iommu.c 		return __intel_map_single(dev, page_to_phys(page) + offset,
page             3559 drivers/iommu/intel-iommu.c 	return dma_direct_map_page(dev, page, offset, size, dir, attrs);
page             3579 drivers/iommu/intel-iommu.c 	struct page *freelist;
page             3637 drivers/iommu/intel-iommu.c 	struct page *page = NULL;
page             3649 drivers/iommu/intel-iommu.c 		page = dma_alloc_from_contiguous(dev, count, order,
page             3653 drivers/iommu/intel-iommu.c 	if (!page)
page             3654 drivers/iommu/intel-iommu.c 		page = alloc_pages(flags, order);
page             3655 drivers/iommu/intel-iommu.c 	if (!page)
page             3657 drivers/iommu/intel-iommu.c 	memset(page_address(page), 0, size);
page             3659 drivers/iommu/intel-iommu.c 	*dma_handle = __intel_map_single(dev, page_to_phys(page), size,
page             3663 drivers/iommu/intel-iommu.c 		return page_address(page);
page             3664 drivers/iommu/intel-iommu.c 	if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
page             3665 drivers/iommu/intel-iommu.c 		__free_pages(page, order);
page             3674 drivers/iommu/intel-iommu.c 	struct page *page = virt_to_page(vaddr);
page             3683 drivers/iommu/intel-iommu.c 	if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
page             3684 drivers/iommu/intel-iommu.c 		__free_pages(page, order);
page             3918 drivers/iommu/intel-iommu.c bounce_map_page(struct device *dev, struct page *page, unsigned long offset,
page             3921 drivers/iommu/intel-iommu.c 	return bounce_map_single(dev, page_to_phys(page) + offset,
page             4675 drivers/iommu/intel-iommu.c 			struct page *freelist;
page             5482 drivers/iommu/intel-iommu.c 	struct page *freelist = NULL;
page              127 drivers/iommu/intel-pasid.c 	struct page *pages;
page               43 drivers/iommu/intel-svm.c 	struct page *pages;
page              534 drivers/iommu/intel_irq_remapping.c 	struct page *pages;
page              235 drivers/iommu/io-pgtable-arm.c 	struct page *p;
page              229 drivers/iommu/rockchip-iommu.c static u32 rk_mk_pte(phys_addr_t page, int prot)
page              234 drivers/iommu/rockchip-iommu.c 	page &= RK_PTE_PAGE_ADDRESS_MASK;
page              235 drivers/iommu/rockchip-iommu.c 	return page | flags | RK_PTE_PAGE_VALID;
page               53 drivers/iommu/tegra-smmu.c 	struct page **pts;
page               54 drivers/iommu/tegra-smmu.c 	struct page *pd;
page              541 drivers/iommu/tegra-smmu.c static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova)
page              553 drivers/iommu/tegra-smmu.c 	struct page *pt_page;
page              573 drivers/iommu/tegra-smmu.c 		struct page *page;
page              576 drivers/iommu/tegra-smmu.c 		page = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
page              577 drivers/iommu/tegra-smmu.c 		if (!page)
page              580 drivers/iommu/tegra-smmu.c 		dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT,
page              583 drivers/iommu/tegra-smmu.c 			__free_page(page);
page              590 drivers/iommu/tegra-smmu.c 			__free_page(page);
page              594 drivers/iommu/tegra-smmu.c 		as->pts[pde] = page;
page              619 drivers/iommu/tegra-smmu.c 	struct page *page = as->pts[pde];
page              633 drivers/iommu/tegra-smmu.c 		__free_page(page);
page             1637 drivers/irqchip/irq-gic-v3-its.c static struct page *its_allocate_prop_table(gfp_t gfp_flags)
page             1639 drivers/irqchip/irq-gic-v3-its.c 	struct page *prop_page;
page             1650 drivers/irqchip/irq-gic-v3-its.c static void its_free_prop_table(struct page *prop_page)
page             1705 drivers/irqchip/irq-gic-v3-its.c 		struct page *page;
page             1710 drivers/irqchip/irq-gic-v3-its.c 		page = its_allocate_prop_table(GFP_NOWAIT);
page             1711 drivers/irqchip/irq-gic-v3-its.c 		if (!page) {
page             1716 drivers/irqchip/irq-gic-v3-its.c 		gic_rdists->prop_table_pa = page_to_phys(page);
page             1717 drivers/irqchip/irq-gic-v3-its.c 		gic_rdists->prop_table_va = page_address(page);
page             1763 drivers/irqchip/irq-gic-v3-its.c 	struct page *page;
page             1776 drivers/irqchip/irq-gic-v3-its.c 	page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
page             1777 drivers/irqchip/irq-gic-v3-its.c 	if (!page)
page             1780 drivers/irqchip/irq-gic-v3-its.c 	base = (void *)page_address(page);
page             2013 drivers/irqchip/irq-gic-v3-its.c static struct page *its_allocate_pending_table(gfp_t gfp_flags)
page             2015 drivers/irqchip/irq-gic-v3-its.c 	struct page *pend_page;
page             2028 drivers/irqchip/irq-gic-v3-its.c static void its_free_pending_table(struct page *pt)
page             2075 drivers/irqchip/irq-gic-v3-its.c 		struct page *pend_page;
page             2115 drivers/irqchip/irq-gic-v3-its.c 	struct page *pend_page;
page             2316 drivers/irqchip/irq-gic-v3-its.c 	struct page *page;
page             2334 drivers/irqchip/irq-gic-v3-its.c 		page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
page             2336 drivers/irqchip/irq-gic-v3-its.c 		if (!page)
page             2341 drivers/irqchip/irq-gic-v3-its.c 			gic_flush_dcache_to_poc(page_address(page), baser->psz);
page             2343 drivers/irqchip/irq-gic-v3-its.c 		table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
page             3016 drivers/irqchip/irq-gic-v3-its.c 	struct page *vpt_page;
page             3083 drivers/irqchip/irq-gic-v3-its.c 	struct page *vprop_page;
page             3576 drivers/irqchip/irq-gic-v3-its.c 	struct page *page;
page             3633 drivers/irqchip/irq-gic-v3-its.c 	page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
page             3635 drivers/irqchip/irq-gic-v3-its.c 	if (!page) {
page             3639 drivers/irqchip/irq-gic-v3-its.c 	its->cmd_base = (void *)page_address(page);
page              834 drivers/lightnvm/core.c 	struct page *page;
page              837 drivers/lightnvm/core.c 	page = alloc_page(GFP_KERNEL);
page              838 drivers/lightnvm/core.c 	if (!page)
page              842 drivers/lightnvm/core.c 	bio_add_page(&bio, page, PAGE_SIZE, 0);
page              855 drivers/lightnvm/core.c 	__free_page(page);
page              327 drivers/lightnvm/pblk-core.c 	struct page *page;
page              332 drivers/lightnvm/pblk-core.c 		page = bv->bv_page;
page              335 drivers/lightnvm/pblk-core.c 				mempool_free(page++, &pblk->page_bio_pool);
page              343 drivers/lightnvm/pblk-core.c 	struct page *page;
page              347 drivers/lightnvm/pblk-core.c 		page = mempool_alloc(&pblk->page_bio_pool, flags);
page              349 drivers/lightnvm/pblk-core.c 		ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
page              352 drivers/lightnvm/pblk-core.c 			mempool_free(page, &pblk->page_bio_pool);
page             2136 drivers/lightnvm/pblk-core.c 	void *page;
page             2142 drivers/lightnvm/pblk-core.c 	page = page_to_virt(rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page);
page             2146 drivers/lightnvm/pblk-core.c 			page + (i * sizeof(struct pblk_sec_meta)),
page              559 drivers/lightnvm/pblk-rb.c 	struct page *page;
page              589 drivers/lightnvm/pblk-rb.c 		page = virt_to_page(entry->data);
page              590 drivers/lightnvm/pblk-rb.c 		if (!page) {
page              599 drivers/lightnvm/pblk-rb.c 		if (bio_add_pc_page(q, bio, page, rb->seg_size, 0) !=
page               24 drivers/lightnvm/pblk-sysfs.c static ssize_t pblk_sysfs_luns_show(struct pblk *pblk, char *page)
page               40 drivers/lightnvm/pblk-sysfs.c 		sz += snprintf(page + sz, PAGE_SIZE - sz,
page               51 drivers/lightnvm/pblk-sysfs.c static ssize_t pblk_sysfs_rate_limiter(struct pblk *pblk, char *page)
page               68 drivers/lightnvm/pblk-sysfs.c 	return snprintf(page, PAGE_SIZE,
page               83 drivers/lightnvm/pblk-sysfs.c static ssize_t pblk_sysfs_gc_state_show(struct pblk *pblk, char *page)
page               88 drivers/lightnvm/pblk-sysfs.c 	return snprintf(page, PAGE_SIZE, "gc_enabled=%d, gc_active=%d\n",
page               92 drivers/lightnvm/pblk-sysfs.c static ssize_t pblk_sysfs_stats(struct pblk *pblk, char *page)
page               96 drivers/lightnvm/pblk-sysfs.c 	sz = snprintf(page, PAGE_SIZE,
page              108 drivers/lightnvm/pblk-sysfs.c static ssize_t pblk_sysfs_write_buffer(struct pblk *pblk, char *page)
page              110 drivers/lightnvm/pblk-sysfs.c 	return pblk_rb_sysfs(&pblk->rwb, page);
page              113 drivers/lightnvm/pblk-sysfs.c static ssize_t pblk_sysfs_ppaf(struct pblk *pblk, char *page)
page              123 drivers/lightnvm/pblk-sysfs.c 		sz = snprintf(page, PAGE_SIZE,
page              133 drivers/lightnvm/pblk-sysfs.c 		sz += snprintf(page + sz, PAGE_SIZE - sz,
page              145 drivers/lightnvm/pblk-sysfs.c 		sz = snprintf(page, PAGE_SIZE,
page              153 drivers/lightnvm/pblk-sysfs.c 		sz += snprintf(page + sz, PAGE_SIZE - sz,
page              164 drivers/lightnvm/pblk-sysfs.c static ssize_t pblk_sysfs_lines(struct pblk *pblk, char *page)
page              281 drivers/lightnvm/pblk-sysfs.c 	sz = snprintf(page, PAGE_SIZE - sz,
page              285 drivers/lightnvm/pblk-sysfs.c 	sz += snprintf(page + sz, PAGE_SIZE - sz,
page              295 drivers/lightnvm/pblk-sysfs.c 	sz += snprintf(page + sz, PAGE_SIZE - sz,
page              300 drivers/lightnvm/pblk-sysfs.c 	sz += snprintf(page + sz, PAGE_SIZE - sz,
page              309 drivers/lightnvm/pblk-sysfs.c static ssize_t pblk_sysfs_lines_info(struct pblk *pblk, char *page)
page              316 drivers/lightnvm/pblk-sysfs.c 	sz = snprintf(page, PAGE_SIZE - sz,
page              319 drivers/lightnvm/pblk-sysfs.c 	sz += snprintf(page + sz, PAGE_SIZE - sz,
page              323 drivers/lightnvm/pblk-sysfs.c 	sz += snprintf(page + sz, PAGE_SIZE - sz,
page              328 drivers/lightnvm/pblk-sysfs.c 	sz += snprintf(page + sz, PAGE_SIZE - sz,
page              337 drivers/lightnvm/pblk-sysfs.c static ssize_t pblk_sysfs_get_sec_per_write(struct pblk *pblk, char *page)
page              339 drivers/lightnvm/pblk-sysfs.c 	return snprintf(page, PAGE_SIZE, "%d\n", pblk->sec_per_write);
page              343 drivers/lightnvm/pblk-sysfs.c 				  char *page)
page              347 drivers/lightnvm/pblk-sysfs.c 	sz = snprintf(page, PAGE_SIZE,
page              352 drivers/lightnvm/pblk-sysfs.c 		sz += snprintf(page + sz, PAGE_SIZE - sz, "NaN\n");
page              361 drivers/lightnvm/pblk-sysfs.c 		sz += snprintf(page + sz, PAGE_SIZE - sz, "%llu.%05u\n",
page              368 drivers/lightnvm/pblk-sysfs.c static ssize_t pblk_sysfs_get_write_amp_mileage(struct pblk *pblk, char *page)
page              372 drivers/lightnvm/pblk-sysfs.c 		page);
page              375 drivers/lightnvm/pblk-sysfs.c static ssize_t pblk_sysfs_get_write_amp_trip(struct pblk *pblk, char *page)
page              380 drivers/lightnvm/pblk-sysfs.c 		atomic64_read(&pblk->pad_wa) - pblk->pad_rst_wa, page);
page              393 drivers/lightnvm/pblk-sysfs.c static ssize_t pblk_sysfs_get_padding_dist(struct pblk *pblk, char *page)
page              404 drivers/lightnvm/pblk-sysfs.c 			sz += snprintf(page + sz, PAGE_SIZE - sz,
page              406 drivers/lightnvm/pblk-sysfs.c 		sz += snprintf(page + sz, PAGE_SIZE - sz, "\n");
page              414 drivers/lightnvm/pblk-sysfs.c 	sz += snprintf(page + sz, PAGE_SIZE - sz, "0:%lld%% ",
page              422 drivers/lightnvm/pblk-sysfs.c 		sz += snprintf(page + sz, PAGE_SIZE - sz, "%d:%lld%% ",
page              425 drivers/lightnvm/pblk-sysfs.c 	sz += snprintf(page + sz, PAGE_SIZE - sz, "\n");
page              431 drivers/lightnvm/pblk-sysfs.c static ssize_t pblk_sysfs_stats_debug(struct pblk *pblk, char *page)
page              433 drivers/lightnvm/pblk-sysfs.c 	return snprintf(page, PAGE_SIZE,
page              451 drivers/lightnvm/pblk-sysfs.c static ssize_t pblk_sysfs_gc_force(struct pblk *pblk, const char *page,
page              457 drivers/lightnvm/pblk-sysfs.c 	c_len = strcspn(page, "\n");
page              461 drivers/lightnvm/pblk-sysfs.c 	if (kstrtouint(page, 0, &force))
page              470 drivers/lightnvm/pblk-sysfs.c 					     const char *page, size_t len)
page              475 drivers/lightnvm/pblk-sysfs.c 	c_len = strcspn(page, "\n");
page              479 drivers/lightnvm/pblk-sysfs.c 	if (kstrtouint(page, 0, &sec_per_write))
page              500 drivers/lightnvm/pblk-sysfs.c 			const char *page, size_t len)
page              505 drivers/lightnvm/pblk-sysfs.c 	c_len = strcspn(page, "\n");
page              509 drivers/lightnvm/pblk-sysfs.c 	if (kstrtouint(page, 0, &reset_value))
page              524 drivers/lightnvm/pblk-sysfs.c 			const char *page, size_t len)
page              531 drivers/lightnvm/pblk-sysfs.c 	c_len = strcspn(page, "\n");
page              535 drivers/lightnvm/pblk-sysfs.c 	if (kstrtouint(page, 0, &reset_value))
page              158 drivers/lightnvm/pblk.h 	struct page *pages;
page             1251 drivers/md/bcache/bset.c 		struct page *outp;
page               63 drivers/md/bcache/super.c 			      struct page **res)
page             1355 drivers/md/bcache/super.c static int register_bdev(struct cache_sb *sb, struct page *sb_page,
page             2262 drivers/md/bcache/super.c static int register_cache(struct cache_sb *sb, struct page *sb_page,
page             2379 drivers/md/bcache/super.c 	struct page *sb_page;
page             1303 drivers/md/dm-crypt.c 	struct page *page;
page             1318 drivers/md/dm-crypt.c 		page = mempool_alloc(&cc->page_pool, gfp_mask);
page             1319 drivers/md/dm-crypt.c 		if (!page) {
page             1328 drivers/md/dm-crypt.c 		bio_add_page(clone, page, len, 0);
page             2093 drivers/md/dm-crypt.c 	struct page *page;
page             2099 drivers/md/dm-crypt.c 	page = alloc_page(gfp_mask);
page             2100 drivers/md/dm-crypt.c 	if (likely(page != NULL))
page             2103 drivers/md/dm-crypt.c 	return page;
page             2106 drivers/md/dm-crypt.c static void crypt_page_free(void *page, void *pool_data)
page             2110 drivers/md/dm-crypt.c 	__free_page(page);
page              503 drivers/md/dm-integrity.c 	unsigned long bit, end_bit, this_end_bit, page, end_page;
page              523 drivers/md/dm-integrity.c 	page = bit / (PAGE_SIZE * 8);
page              530 drivers/md/dm-integrity.c 	if (page < end_page) {
page              536 drivers/md/dm-integrity.c 	data = lowmem_page_address(bitmap[page].page);
page              596 drivers/md/dm-integrity.c 	if (unlikely(page < end_page)) {
page              598 drivers/md/dm-integrity.c 		page++;
page              611 drivers/md/dm-integrity.c 		unsigned long *dst_data = lowmem_page_address(dst[i].page);
page              612 drivers/md/dm-integrity.c 		unsigned long *src_data = lowmem_page_address(src[i].page);
page              665 drivers/md/dm-integrity.c 	va = lowmem_page_address(pl[pl_index].page);
page              808 drivers/md/dm-integrity.c 		struct page *src_pages[2];
page              809 drivers/md/dm-integrity.c 		struct page *dst_page;
page              823 drivers/md/dm-integrity.c 		dst_page = target_pl[pl_index].page;
page              824 drivers/md/dm-integrity.c 		src_pages[0] = source_pl[pl_index].page;
page              825 drivers/md/dm-integrity.c 		src_pages[1] = ic->journal_xor[pl_index].page;
page             2619 drivers/md/dm-integrity.c 			DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal");
page             2628 drivers/md/dm-integrity.c 		DEBUG_bytes(lowmem_page_address(ic->journal[0].page), 64, "decrypted journal");
page             3151 drivers/md/dm-integrity.c 	for (i = 0; pl[i].page; i++)
page             3152 drivers/md/dm-integrity.c 		__free_page(pl[i].page);
page             3166 drivers/md/dm-integrity.c 		pl[i].page = alloc_page(GFP_KERNEL);
page             3167 drivers/md/dm-integrity.c 		if (!pl[i].page) {
page             3174 drivers/md/dm-integrity.c 	pl[i].page = NULL;
page             3222 drivers/md/dm-integrity.c 			char *va = lowmem_page_address(pl[idx].page);
page             3403 drivers/md/dm-integrity.c 				char *va = lowmem_page_address(ic->journal_xor[i].page);
page             3421 drivers/md/dm-integrity.c 			DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data");
page             4063 drivers/md/dm-integrity.c 			bbs->bitmap = lowmem_page_address(ic->journal[pl_index].page) + pl_offset;
page              163 drivers/md/dm-io.c 			 struct page **p, unsigned long *len, unsigned *offset);
page              180 drivers/md/dm-io.c 		  struct page **p, unsigned long *len, unsigned *offset)
page              185 drivers/md/dm-io.c 	*p = pl->page;
page              208 drivers/md/dm-io.c static void bio_get_page(struct dpages *dp, struct page **p,
page              247 drivers/md/dm-io.c 		 struct page **p, unsigned long *len, unsigned *offset)
page              271 drivers/md/dm-io.c static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
page              301 drivers/md/dm-io.c 	struct page *page;
page              363 drivers/md/dm-io.c 			dp->get_page(dp, &page, &len, &offset);
page              364 drivers/md/dm-io.c 			bio_add_page(bio, page, logical_block_size, offset);
page              375 drivers/md/dm-io.c 			dp->get_page(dp, &page, &len, &offset);
page              377 drivers/md/dm-io.c 			if (!bio_add_page(bio, page, len, offset))
page              222 drivers/md/dm-kcopyd.c 	pl->page = alloc_page(gfp);
page              223 drivers/md/dm-kcopyd.c 	if (!pl->page) {
page              233 drivers/md/dm-kcopyd.c 	__free_page(pl->page);
page              396 drivers/md/dm-kcopyd.c 	zero_page_list.page = ZERO_PAGE(0);
page              216 drivers/md/dm-log-writes.c 	struct page *page;
page              233 drivers/md/dm-log-writes.c 	page = alloc_page(GFP_KERNEL);
page              234 drivers/md/dm-log-writes.c 	if (!page) {
page              240 drivers/md/dm-log-writes.c 	ptr = kmap_atomic(page);
page              248 drivers/md/dm-log-writes.c 	ret = bio_add_page(bio, page, lc->sectorsize, 0);
page              257 drivers/md/dm-log-writes.c 	__free_page(page);
page              268 drivers/md/dm-log-writes.c 	struct page *page;
page              296 drivers/md/dm-log-writes.c 			page = alloc_page(GFP_KERNEL);
page              297 drivers/md/dm-log-writes.c 			if (!page) {
page              302 drivers/md/dm-log-writes.c 			ptr = kmap_atomic(page);
page              308 drivers/md/dm-log-writes.c 			ret = bio_add_page(bio, page, pg_sectorlen, 0);
page              311 drivers/md/dm-log-writes.c 				__free_page(page);
page              755 drivers/md/dm-log-writes.c 		struct page *page;
page              758 drivers/md/dm-log-writes.c 		page = alloc_page(GFP_NOIO);
page              759 drivers/md/dm-log-writes.c 		if (!page) {
page              769 drivers/md/dm-log-writes.c 		dst = kmap_atomic(page);
page              773 drivers/md/dm-log-writes.c 		block->vecs[i].bv_page = page;
page               23 drivers/md/dm-sysfs.c 			    char *page)
page               37 drivers/md/dm-sysfs.c 	ret = dm_attr->show(md, page);
page               48 drivers/md/dm-sysfs.c 			     const char *page, size_t count)
page               62 drivers/md/dm-sysfs.c 	ret = dm_attr->store(md, page, count);
page              422 drivers/md/dm-verity-target.c 		u8 *page;
page              426 drivers/md/dm-verity-target.c 		page = kmap_atomic(bv.bv_page);
page              432 drivers/md/dm-verity-target.c 		r = process(v, io, page + bv.bv_offset, len);
page              433 drivers/md/dm-verity-target.c 		kunmap_atomic(page);
page              226 drivers/md/dm-writecache.c 	struct page **pages;
page              261 drivers/md/dm-writecache.c 		pages = kvmalloc_array(p, sizeof(struct page *), GFP_KERNEL);
page              319 drivers/md/dm-writecache.c static struct page *persistent_memory_page(void *addr)
page              106 drivers/md/dm-zoned-metadata.c 	struct page		*page;
page              294 drivers/md/dm-zoned-metadata.c 	mblk->page = alloc_page(GFP_NOIO);
page              295 drivers/md/dm-zoned-metadata.c 	if (!mblk->page) {
page              305 drivers/md/dm-zoned-metadata.c 	mblk->data = page_address(mblk->page);
page              317 drivers/md/dm-zoned-metadata.c 	__free_pages(mblk->page, 0);
page              447 drivers/md/dm-zoned-metadata.c 	bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
page              601 drivers/md/dm-zoned-metadata.c 	bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
page              611 drivers/md/dm-zoned-metadata.c 			  struct page *page)
page              626 drivers/md/dm-zoned-metadata.c 	bio_add_page(bio, page, DMZ_BLOCK_SIZE, 0);
page              662 drivers/md/dm-zoned-metadata.c 	ret = dmz_rdwr_block(zmd, REQ_OP_WRITE, block, mblk->page);
page              901 drivers/md/dm-zoned-metadata.c 			      zmd->sb[set].mblk->page);
page              972 drivers/md/dm-zoned-metadata.c 	struct page *page;
page              984 drivers/md/dm-zoned-metadata.c 	page = alloc_page(GFP_NOIO);
page              985 drivers/md/dm-zoned-metadata.c 	if (!page)
page              991 drivers/md/dm-zoned-metadata.c 				     zmd->sb[src_set].block + i, page);
page              995 drivers/md/dm-zoned-metadata.c 				     zmd->sb[dst_set].block + i, page);
page             1012 drivers/md/dm-zoned-metadata.c 	__free_pages(page, 0);
page               51 drivers/md/md-bitmap.c 			       unsigned long page, int create, int no_hijack)
page               57 drivers/md/md-bitmap.c 	if (page >= bitmap->pages) {
page               65 drivers/md/md-bitmap.c 	if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */
page               68 drivers/md/md-bitmap.c 	if (bitmap->bp[page].map) /* page is already allocated, just return */
page              100 drivers/md/md-bitmap.c 		if (!bitmap->bp[page].map)
page              101 drivers/md/md-bitmap.c 			bitmap->bp[page].hijacked = 1;
page              102 drivers/md/md-bitmap.c 	} else if (bitmap->bp[page].map ||
page              103 drivers/md/md-bitmap.c 		   bitmap->bp[page].hijacked) {
page              110 drivers/md/md-bitmap.c 		bitmap->bp[page].map = mappage;
page              119 drivers/md/md-bitmap.c static void md_bitmap_checkfree(struct bitmap_counts *bitmap, unsigned long page)
page              123 drivers/md/md-bitmap.c 	if (bitmap->bp[page].count) /* page is still busy */
page              128 drivers/md/md-bitmap.c 	if (bitmap->bp[page].hijacked) { /* page was hijacked, undo this now */
page              129 drivers/md/md-bitmap.c 		bitmap->bp[page].hijacked = 0;
page              130 drivers/md/md-bitmap.c 		bitmap->bp[page].map = NULL;
page              133 drivers/md/md-bitmap.c 		ptr = bitmap->bp[page].map;
page              134 drivers/md/md-bitmap.c 		bitmap->bp[page].map = NULL;
page              150 drivers/md/md-bitmap.c 			struct page *page,
page              168 drivers/md/md-bitmap.c 				 page, REQ_OP_READ, 0, true)) {
page              169 drivers/md/md-bitmap.c 			page->index = index;
page              212 drivers/md/md-bitmap.c static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
page              227 drivers/md/md-bitmap.c 		if (page->index == store->file_pages-1) {
page              239 drivers/md/md-bitmap.c 			if (rdev->sb_start + offset + (page->index
page              250 drivers/md/md-bitmap.c 			    + (long)(page->index * (PAGE_SIZE/512))
page              262 drivers/md/md-bitmap.c 			    + page->index*(PAGE_SIZE/512) + size/512
page              271 drivers/md/md-bitmap.c 			       + page->index * (PAGE_SIZE/512),
page              273 drivers/md/md-bitmap.c 			       page);
page              288 drivers/md/md-bitmap.c static void write_page(struct bitmap *bitmap, struct page *page, int wait)
page              293 drivers/md/md-bitmap.c 		switch (write_sb_page(bitmap, page, wait)) {
page              299 drivers/md/md-bitmap.c 		bh = page_buffers(page);
page              329 drivers/md/md-bitmap.c __clear_page_buffers(struct page *page)
page              331 drivers/md/md-bitmap.c 	ClearPagePrivate(page);
page              332 drivers/md/md-bitmap.c 	set_page_private(page, 0);
page              333 drivers/md/md-bitmap.c 	put_page(page);
page              335 drivers/md/md-bitmap.c static void free_buffers(struct page *page)
page              339 drivers/md/md-bitmap.c 	if (!PagePrivate(page))
page              342 drivers/md/md-bitmap.c 	bh = page_buffers(page);
page              348 drivers/md/md-bitmap.c 	__clear_page_buffers(page);
page              349 drivers/md/md-bitmap.c 	put_page(page);
page              362 drivers/md/md-bitmap.c 		     struct page *page)
page              372 drivers/md/md-bitmap.c 	bh = alloc_page_buffers(page, 1<<inode->i_blkbits, false);
page              377 drivers/md/md-bitmap.c 	attach_page_buffers(page, bh);
page              405 drivers/md/md-bitmap.c 	page->index = index;
page              590 drivers/md/md-bitmap.c 	struct page *sb_page;
page              770 drivers/md/md-bitmap.c static inline struct page *filemap_get_page(struct bitmap_storage *store,
page              793 drivers/md/md-bitmap.c 	store->filemap = kmalloc_array(num_pages, sizeof(struct page *),
page              836 drivers/md/md-bitmap.c 	struct page **map, *sb_page;
page              930 drivers/md/md-bitmap.c 	struct page *page;
page              939 drivers/md/md-bitmap.c 	page = filemap_get_page(&bitmap->storage, chunk);
page              940 drivers/md/md-bitmap.c 	if (!page)
page              945 drivers/md/md-bitmap.c 	kaddr = kmap_atomic(page);
page              951 drivers/md/md-bitmap.c 	pr_debug("set file bit %lu page %lu\n", bit, page->index);
page              953 drivers/md/md-bitmap.c 	set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_DIRTY);
page              959 drivers/md/md-bitmap.c 	struct page *page;
page              968 drivers/md/md-bitmap.c 	page = filemap_get_page(&bitmap->storage, chunk);
page              969 drivers/md/md-bitmap.c 	if (!page)
page              972 drivers/md/md-bitmap.c 	paddr = kmap_atomic(page);
page              978 drivers/md/md-bitmap.c 	if (!test_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_NEEDWRITE)) {
page              979 drivers/md/md-bitmap.c 		set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_PENDING);
page              987 drivers/md/md-bitmap.c 	struct page *page;
page              992 drivers/md/md-bitmap.c 	page = filemap_get_page(&bitmap->storage, chunk);
page              993 drivers/md/md-bitmap.c 	if (!page)
page              996 drivers/md/md-bitmap.c 	paddr = kmap_atomic(page);
page             1062 drivers/md/md-bitmap.c 	struct page *page = NULL;
page             1120 drivers/md/md-bitmap.c 			page = store->filemap[index];
page             1123 drivers/md/md-bitmap.c 						count, page);
page             1128 drivers/md/md-bitmap.c 					page,
page             1141 drivers/md/md-bitmap.c 				paddr = kmap_atomic(page);
page             1145 drivers/md/md-bitmap.c 				write_page(bitmap, page, 1);
page             1153 drivers/md/md-bitmap.c 		paddr = kmap_atomic(page);
page             1206 drivers/md/md-bitmap.c 	unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
page             1207 drivers/md/md-bitmap.c 	bitmap->bp[page].count += inc;
page             1208 drivers/md/md-bitmap.c 	md_bitmap_checkfree(bitmap, page);
page             1214 drivers/md/md-bitmap.c 	unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
page             1215 drivers/md/md-bitmap.c 	struct bitmap_page *bp = &bitmap->bp[page];
page             1365 drivers/md/md-bitmap.c 	unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
page             1370 drivers/md/md-bitmap.c 	err = md_bitmap_checkpage(bitmap, page, create, 0);
page             1372 drivers/md/md-bitmap.c 	if (bitmap->bp[page].hijacked ||
page             1373 drivers/md/md-bitmap.c 	    bitmap->bp[page].map == NULL)
page             1385 drivers/md/md-bitmap.c 	if (bitmap->bp[page].hijacked) { /* hijacked pointer */
page             1390 drivers/md/md-bitmap.c 			  &bitmap->bp[page].map)[hi];
page             1393 drivers/md/md-bitmap.c 			&(bitmap->bp[page].map[pageoff]);
page             2160 drivers/md/md-bitmap.c 		unsigned long page;
page             2161 drivers/md/md-bitmap.c 		for (page = 0; page < pages; page++) {
page             2162 drivers/md/md-bitmap.c 			ret = md_bitmap_checkpage(&bitmap->counts, page, 1, 1);
page             2167 drivers/md/md-bitmap.c 				for (k = 0; k < page; k++) {
page             2184 drivers/md/md-bitmap.c 				bitmap->counts.bp[page].count += 1;
page             2258 drivers/md/md-bitmap.c location_show(struct mddev *mddev, char *page)
page             2262 drivers/md/md-bitmap.c 		len = sprintf(page, "file");
page             2264 drivers/md/md-bitmap.c 		len = sprintf(page, "%+lld", (long long)mddev->bitmap_info.offset);
page             2266 drivers/md/md-bitmap.c 		len = sprintf(page, "none");
page             2267 drivers/md/md-bitmap.c 	len += sprintf(page+len, "\n");
page             2379 drivers/md/md-bitmap.c space_show(struct mddev *mddev, char *page)
page             2381 drivers/md/md-bitmap.c 	return sprintf(page, "%lu\n", mddev->bitmap_info.space);
page             2412 drivers/md/md-bitmap.c timeout_show(struct mddev *mddev, char *page)
page             2418 drivers/md/md-bitmap.c 	len = sprintf(page, "%lu", secs);
page             2420 drivers/md/md-bitmap.c 		len += sprintf(page+len, ".%03u", jiffies_to_msecs(jifs));
page             2421 drivers/md/md-bitmap.c 	len += sprintf(page+len, "\n");
page             2462 drivers/md/md-bitmap.c backlog_show(struct mddev *mddev, char *page)
page             2464 drivers/md/md-bitmap.c 	return sprintf(page, "%lu\n", mddev->bitmap_info.max_write_behind);
page             2498 drivers/md/md-bitmap.c chunksize_show(struct mddev *mddev, char *page)
page             2500 drivers/md/md-bitmap.c 	return sprintf(page, "%lu\n", mddev->bitmap_info.chunksize);
page             2524 drivers/md/md-bitmap.c static ssize_t metadata_show(struct mddev *mddev, char *page)
page             2527 drivers/md/md-bitmap.c 		return sprintf(page, "clustered\n");
page             2528 drivers/md/md-bitmap.c 	return sprintf(page, "%s\n", (mddev->bitmap_info.external
page             2551 drivers/md/md-bitmap.c static ssize_t can_clear_show(struct mddev *mddev, char *page)
page             2556 drivers/md/md-bitmap.c 		len = sprintf(page, "%s\n", (mddev->bitmap->need_sync ?
page             2559 drivers/md/md-bitmap.c 		len = sprintf(page, "\n");
page             2583 drivers/md/md-bitmap.c behind_writes_used_show(struct mddev *mddev, char *page)
page             2588 drivers/md/md-bitmap.c 		ret = sprintf(page, "0\n");
page             2590 drivers/md/md-bitmap.c 		ret = sprintf(page, "%lu\n",
page              202 drivers/md/md-bitmap.h 		struct page *sb_page;		/* cached copy of the bitmap
page              204 drivers/md/md-bitmap.h 		struct page **filemap;		/* list of cache pages for
page              870 drivers/md/md.c 		   sector_t sector, int size, struct page *page)
page              881 drivers/md/md.c 	if (!page)
page              893 drivers/md/md.c 	bio_add_page(bio, page, size, 0);
page              917 drivers/md/md.c 		 struct page *page, int op, int op_flags, bool metadata_op)
page              935 drivers/md/md.c 	bio_add_page(bio, page, size, 0);
page             1994 drivers/md/md.c 		u64 *p = bb->page;
page             2802 drivers/md/md.c state_show(struct md_rdev *rdev, char *page)
page             2811 drivers/md/md.c 		len += sprintf(page+len, "faulty%s", sep);
page             2813 drivers/md/md.c 		len += sprintf(page+len, "in_sync%s", sep);
page             2815 drivers/md/md.c 		len += sprintf(page+len, "journal%s", sep);
page             2817 drivers/md/md.c 		len += sprintf(page+len, "write_mostly%s", sep);
page             2821 drivers/md/md.c 		len += sprintf(page+len, "blocked%s", sep);
page             2825 drivers/md/md.c 		len += sprintf(page+len, "spare%s", sep);
page             2827 drivers/md/md.c 		len += sprintf(page+len, "write_error%s", sep);
page             2829 drivers/md/md.c 		len += sprintf(page+len, "want_replacement%s", sep);
page             2831 drivers/md/md.c 		len += sprintf(page+len, "replacement%s", sep);
page             2833 drivers/md/md.c 		len += sprintf(page+len, "external_bbl%s", sep);
page             2835 drivers/md/md.c 		len += sprintf(page+len, "failfast%s", sep);
page             2840 drivers/md/md.c 	return len+sprintf(page+len, "\n");
page             3010 drivers/md/md.c errors_show(struct md_rdev *rdev, char *page)
page             3012 drivers/md/md.c 	return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
page             3031 drivers/md/md.c slot_show(struct md_rdev *rdev, char *page)
page             3034 drivers/md/md.c 		return sprintf(page, "journal\n");
page             3036 drivers/md/md.c 		return sprintf(page, "none\n");
page             3038 drivers/md/md.c 		return sprintf(page, "%d\n", rdev->raid_disk);
page             3129 drivers/md/md.c offset_show(struct md_rdev *rdev, char *page)
page             3131 drivers/md/md.c 	return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
page             3154 drivers/md/md.c static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
page             3156 drivers/md/md.c 	return sprintf(page, "%llu\n",
page             3213 drivers/md/md.c rdev_size_show(struct md_rdev *rdev, char *page)
page             3215 drivers/md/md.c 	return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
page             3324 drivers/md/md.c static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
page             3330 drivers/md/md.c 		return sprintf(page, "none\n");
page             3332 drivers/md/md.c 	return sprintf(page, "%llu\n", recovery_start);
page             3370 drivers/md/md.c static ssize_t bb_show(struct md_rdev *rdev, char *page)
page             3372 drivers/md/md.c 	return badblocks_show(&rdev->badblocks, page, 0);
page             3374 drivers/md/md.c static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
page             3376 drivers/md/md.c 	int rv = badblocks_store(&rdev->badblocks, page, len, 0);
page             3385 drivers/md/md.c static ssize_t ubb_show(struct md_rdev *rdev, char *page)
page             3387 drivers/md/md.c 	return badblocks_show(&rdev->badblocks, page, 1);
page             3389 drivers/md/md.c static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
page             3391 drivers/md/md.c 	return badblocks_store(&rdev->badblocks, page, len, 1);
page             3397 drivers/md/md.c ppl_sector_show(struct md_rdev *rdev, char *page)
page             3399 drivers/md/md.c 	return sprintf(page, "%llu\n", (unsigned long long)rdev->ppl.sector);
page             3436 drivers/md/md.c ppl_size_show(struct md_rdev *rdev, char *page)
page             3438 drivers/md/md.c 	return sprintf(page, "%u\n", rdev->ppl.size);
page             3483 drivers/md/md.c rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
page             3492 drivers/md/md.c 	return entry->show(rdev, page);
page             3497 drivers/md/md.c 	      const char *page, size_t length)
page             3513 drivers/md/md.c 			rv = entry->store(rdev, page, length);
page             3734 drivers/md/md.c safe_delay_show(struct mddev *mddev, char *page)
page             3737 drivers/md/md.c 	return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
page             3769 drivers/md/md.c level_show(struct mddev *mddev, char *page)
page             3776 drivers/md/md.c 		ret = sprintf(page, "%s\n", p->name);
page             3778 drivers/md/md.c 		ret = sprintf(page, "%s\n", mddev->clevel);
page             3780 drivers/md/md.c 		ret = sprintf(page, "%d\n", mddev->level);
page             3993 drivers/md/md.c layout_show(struct mddev *mddev, char *page)
page             3998 drivers/md/md.c 		return sprintf(page, "%d (%d)\n",
page             4000 drivers/md/md.c 	return sprintf(page, "%d\n", mddev->layout);
page             4039 drivers/md/md.c raid_disks_show(struct mddev *mddev, char *page)
page             4045 drivers/md/md.c 		return sprintf(page, "%d (%d)\n", mddev->raid_disks,
page             4047 drivers/md/md.c 	return sprintf(page, "%d\n", mddev->raid_disks);
page             4094 drivers/md/md.c chunk_size_show(struct mddev *mddev, char *page)
page             4098 drivers/md/md.c 		return sprintf(page, "%d (%d)\n",
page             4101 drivers/md/md.c 	return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
page             4140 drivers/md/md.c resync_start_show(struct mddev *mddev, char *page)
page             4143 drivers/md/md.c 		return sprintf(page, "none\n");
page             4144 drivers/md/md.c 	return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
page             4238 drivers/md/md.c array_state_show(struct mddev *mddev, char *page)
page             4273 drivers/md/md.c 	return sprintf(page, "%s\n", array_states[st]);
page             4396 drivers/md/md.c max_corrected_read_errors_show(struct mddev *mddev, char *page) {
page             4397 drivers/md/md.c 	return sprintf(page, "%d\n",
page             4419 drivers/md/md.c null_show(struct mddev *mddev, char *page)
page             4525 drivers/md/md.c size_show(struct mddev *mddev, char *page)
page             4527 drivers/md/md.c 	return sprintf(page, "%llu\n",
page             4573 drivers/md/md.c metadata_show(struct mddev *mddev, char *page)
page             4576 drivers/md/md.c 		return sprintf(page, "%d.%d\n",
page             4579 drivers/md/md.c 		return sprintf(page, "external:%s\n", mddev->metadata_type);
page             4581 drivers/md/md.c 		return sprintf(page, "none\n");
page             4651 drivers/md/md.c action_show(struct mddev *mddev, char *page)
page             4673 drivers/md/md.c 	return sprintf(page, "%s\n", type);
page             4677 drivers/md/md.c action_store(struct mddev *mddev, const char *page, size_t len)
page             4683 drivers/md/md.c 	if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
page             4684 drivers/md/md.c 		if (cmd_match(page, "frozen"))
page             4699 drivers/md/md.c 	else if (cmd_match(page, "resync"))
page             4701 drivers/md/md.c 	else if (cmd_match(page, "recover")) {
page             4704 drivers/md/md.c 	} else if (cmd_match(page, "reshape")) {
page             4722 drivers/md/md.c 		if (cmd_match(page, "check"))
page             4724 drivers/md/md.c 		else if (!cmd_match(page, "repair"))
page             4747 drivers/md/md.c last_sync_action_show(struct mddev *mddev, char *page)
page             4749 drivers/md/md.c 	return sprintf(page, "%s\n", mddev->last_sync_action);
page             4755 drivers/md/md.c mismatch_cnt_show(struct mddev *mddev, char *page)
page             4757 drivers/md/md.c 	return sprintf(page, "%llu\n",
page             4765 drivers/md/md.c sync_min_show(struct mddev *mddev, char *page)
page             4767 drivers/md/md.c 	return sprintf(page, "%d (%s)\n", speed_min(mddev),
page             4794 drivers/md/md.c sync_max_show(struct mddev *mddev, char *page)
page             4796 drivers/md/md.c 	return sprintf(page, "%d (%s)\n", speed_max(mddev),
page             4823 drivers/md/md.c degraded_show(struct mddev *mddev, char *page)
page             4825 drivers/md/md.c 	return sprintf(page, "%d\n", mddev->degraded);
page             4830 drivers/md/md.c sync_force_parallel_show(struct mddev *mddev, char *page)
page             4832 drivers/md/md.c 	return sprintf(page, "%d\n", mddev->parallel_resync);
page             4860 drivers/md/md.c sync_speed_show(struct mddev *mddev, char *page)
page             4864 drivers/md/md.c 		return sprintf(page, "none\n");
page             4869 drivers/md/md.c 	return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
page             4875 drivers/md/md.c sync_completed_show(struct mddev *mddev, char *page)
page             4880 drivers/md/md.c 		return sprintf(page, "none\n");
page             4884 drivers/md/md.c 		return sprintf(page, "delayed\n");
page             4893 drivers/md/md.c 	return sprintf(page, "%llu / %llu\n", resync, max_sectors);
page             4900 drivers/md/md.c min_sync_show(struct mddev *mddev, char *page)
page             4902 drivers/md/md.c 	return sprintf(page, "%llu\n",
page             4936 drivers/md/md.c max_sync_show(struct mddev *mddev, char *page)
page             4939 drivers/md/md.c 		return sprintf(page, "max\n");
page             4941 drivers/md/md.c 		return sprintf(page, "%llu\n",
page             4989 drivers/md/md.c suspend_lo_show(struct mddev *mddev, char *page)
page             4991 drivers/md/md.c 	return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
page             5026 drivers/md/md.c suspend_hi_show(struct mddev *mddev, char *page)
page             5028 drivers/md/md.c 	return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
page             5063 drivers/md/md.c reshape_position_show(struct mddev *mddev, char *page)
page             5066 drivers/md/md.c 		return sprintf(page, "%llu\n",
page             5068 drivers/md/md.c 	strcpy(page, "none\n");
page             5109 drivers/md/md.c reshape_direction_show(struct mddev *mddev, char *page)
page             5111 drivers/md/md.c 	return sprintf(page, "%s\n",
page             5150 drivers/md/md.c array_size_show(struct mddev *mddev, char *page)
page             5153 drivers/md/md.c 		return sprintf(page, "%llu\n",
page             5156 drivers/md/md.c 		return sprintf(page, "default\n");
page             5207 drivers/md/md.c consistency_policy_show(struct mddev *mddev, char *page)
page             5212 drivers/md/md.c 		ret = sprintf(page, "journal\n");
page             5214 drivers/md/md.c 		ret = sprintf(page, "ppl\n");
page             5216 drivers/md/md.c 		ret = sprintf(page, "bitmap\n");
page             5219 drivers/md/md.c 			ret = sprintf(page, "resync\n");
page             5221 drivers/md/md.c 			ret = sprintf(page, "none\n");
page             5223 drivers/md/md.c 		ret = sprintf(page, "unknown\n");
page             5252 drivers/md/md.c static ssize_t fail_last_dev_show(struct mddev *mddev, char *page)
page             5254 drivers/md/md.c 	return sprintf(page, "%d\n", mddev->fail_last_dev);
page             5323 drivers/md/md.c md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
page             5339 drivers/md/md.c 	rv = entry->show(mddev, page);
page             5346 drivers/md/md.c 	      const char *page, size_t length)
page             5363 drivers/md/md.c 	rv = entry->store(mddev, page, length);
page             9455 drivers/md/md.c 	struct page *swapout = rdev->sb_page;
page               53 drivers/md/md.h 	struct page	*sb_page, *bb_page;
page              677 drivers/md/md.h static inline void safe_put_page(struct page *p)
page              708 drivers/md/md.h 			   sector_t sector, int size, struct page *page);
page              711 drivers/md/md.h 			struct page *page, int op, int op_flags,
page               34 drivers/md/raid1-10.c 	struct page	*pages[RESYNC_PAGES];
page               77 drivers/md/raid1-10.c static inline struct page *resync_fetch_page(struct resync_pages *rp,
page              102 drivers/md/raid1-10.c 		struct page *page = resync_fetch_page(rp, idx);
page              109 drivers/md/raid1-10.c 		bio_add_page(bio, page, len, 0);
page             1132 drivers/md/raid1.c 		struct page *page;
page             1135 drivers/md/raid1.c 		page = alloc_page(GFP_NOIO);
page             1136 drivers/md/raid1.c 		if (unlikely(!page))
page             1139 drivers/md/raid1.c 		bio_add_page(behind_bio, page, len, 0);
page             1954 drivers/md/raid1.c 			    int sectors, struct page *page, int rw)
page             1956 drivers/md/raid1.c 	if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))
page             1988 drivers/md/raid1.c 	struct page **pages = get_resync_pages(bio)->pages;
page             2154 drivers/md/raid1.c 		struct page **ppages = get_resync_pages(pbio)->pages;
page             2155 drivers/md/raid1.c 		struct page **spages = get_resync_pages(sbio)->pages;
page             2856 drivers/md/raid1.c 		struct page *page;
page             2878 drivers/md/raid1.c 				page = resync_fetch_page(rp, page_idx);
page             2884 drivers/md/raid1.c 				bio_add_page(bio, page, len, 0);
page              129 drivers/md/raid1.h 	struct page		*tmppage;
page             2014 drivers/md/raid10.c 	struct page **tpages, **fpages;
page             2158 drivers/md/raid10.c 	struct page **pages = get_resync_pages(bio)->pages;
page             2299 drivers/md/raid10.c 			    int sectors, struct page *page, int rw)
page             2307 drivers/md/raid10.c 	if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))
page             3422 drivers/md/raid10.c 		struct page *page;
page             3430 drivers/md/raid10.c 			page = resync_fetch_page(rp, page_idx);
page             3435 drivers/md/raid10.c 			bio_add_page(bio, page, len, 0);
page             4444 drivers/md/raid10.c 	struct page **pages;
page             4636 drivers/md/raid10.c 		struct page *page = pages[s / (PAGE_SIZE >> 9)];
page             4645 drivers/md/raid10.c 			bio_add_page(bio, page, len, 0);
page             4773 drivers/md/raid10.c 	struct page **pages;
page               98 drivers/md/raid10.h 	struct page		*tmppage;
page              214 drivers/md/raid5-cache.c 	struct page *meta_page;	/* store meta block */
page              846 drivers/md/raid5-cache.c static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
page              858 drivers/md/raid5-cache.c 	if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0))
page              945 drivers/md/raid5-cache.c 		r5l_append_payload_page(log, sh->dev[i].page);
page              952 drivers/md/raid5-cache.c 		r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
page              953 drivers/md/raid5-cache.c 		r5l_append_payload_page(log, sh->dev[sh->qd_idx].page);
page              958 drivers/md/raid5-cache.c 		r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
page             1027 drivers/md/raid5-cache.c 		addr = kmap_atomic(sh->dev[i].page);
page             1612 drivers/md/raid5-cache.c 	struct page *meta_page;		/* current meta */
page             1627 drivers/md/raid5-cache.c 	struct page *ra_pool[R5L_RECOVERY_PAGE_POOL_SIZE];
page             1637 drivers/md/raid5-cache.c 	struct page *page;
page             1646 drivers/md/raid5-cache.c 		page = alloc_page(GFP_KERNEL);
page             1648 drivers/md/raid5-cache.c 		if (!page)
page             1650 drivers/md/raid5-cache.c 		ctx->ra_pool[ctx->total_pages] = page;
page             1711 drivers/md/raid5-cache.c 				  struct page *page,
page             1726 drivers/md/raid5-cache.c 	memcpy(page_address(page),
page             1736 drivers/md/raid5-cache.c 	struct page *page = ctx->meta_page;
page             1741 drivers/md/raid5-cache.c 	ret = r5l_recovery_read_page(log, ctx, page, ctx->pos);
page             1745 drivers/md/raid5-cache.c 	mb = page_address(page);
page             1769 drivers/md/raid5-cache.c 				     struct page *page,
page             1774 drivers/md/raid5-cache.c 	mb = page_address(page);
page             1786 drivers/md/raid5-cache.c 	struct page *page;
page             1789 drivers/md/raid5-cache.c 	page = alloc_page(GFP_KERNEL);
page             1790 drivers/md/raid5-cache.c 	if (!page)
page             1792 drivers/md/raid5-cache.c 	r5l_recovery_create_empty_meta_block(log, page, pos, seq);
page             1793 drivers/md/raid5-cache.c 	mb = page_address(page);
page             1796 drivers/md/raid5-cache.c 	if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE,
page             1798 drivers/md/raid5-cache.c 		__free_page(page);
page             1801 drivers/md/raid5-cache.c 	__free_page(page);
page             1825 drivers/md/raid5-cache.c 	r5l_recovery_read_page(log, ctx, sh->dev[dd_idx].page, log_offset);
page             1844 drivers/md/raid5-cache.c 	r5l_recovery_read_page(log, ctx, sh->dev[sh->pd_idx].page, log_offset);
page             1851 drivers/md/raid5-cache.c 			log, ctx, sh->dev[sh->qd_idx].page,
page             1906 drivers/md/raid5-cache.c 				     sh->dev[disk_index].page, REQ_OP_WRITE, 0,
page             1916 drivers/md/raid5-cache.c 				     sh->dev[disk_index].page, REQ_OP_WRITE, 0,
page             1987 drivers/md/raid5-cache.c 				  struct page *page,
page             1993 drivers/md/raid5-cache.c 	r5l_recovery_read_page(log, ctx, page, log_offset);
page             1994 drivers/md/raid5-cache.c 	addr = kmap_atomic(page);
page             2013 drivers/md/raid5-cache.c 	struct page *page;
page             2017 drivers/md/raid5-cache.c 	page = alloc_page(GFP_KERNEL);
page             2018 drivers/md/raid5-cache.c 	if (!page)
page             2027 drivers/md/raid5-cache.c 				    log, ctx, page, log_offset,
page             2032 drivers/md/raid5-cache.c 				    log, ctx, page, log_offset,
page             2037 drivers/md/raid5-cache.c 				    log, ctx, page,
page             2061 drivers/md/raid5-cache.c 	put_page(page);
page             2065 drivers/md/raid5-cache.c 	put_page(page);
page             2359 drivers/md/raid5-cache.c 	struct page *page;
page             2362 drivers/md/raid5-cache.c 	page = alloc_page(GFP_KERNEL);
page             2363 drivers/md/raid5-cache.c 	if (!page) {
page             2378 drivers/md/raid5-cache.c 		r5l_recovery_create_empty_meta_block(log, page,
page             2380 drivers/md/raid5-cache.c 		mb = page_address(page);
page             2396 drivers/md/raid5-cache.c 				addr = kmap_atomic(dev->page);
page             2402 drivers/md/raid5-cache.c 					     dev->page, REQ_OP_WRITE, 0, false);
page             2413 drivers/md/raid5-cache.c 		sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page,
page             2423 drivers/md/raid5-cache.c 	__free_page(page);
page             2530 drivers/md/raid5-cache.c static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page)
page             2548 drivers/md/raid5-cache.c 			page, PAGE_SIZE, "[%s] %s\n",
page             2554 drivers/md/raid5-cache.c 			page, PAGE_SIZE, "%s [%s]\n",
page             2598 drivers/md/raid5-cache.c 				      const char *page, size_t length)
page             2607 drivers/md/raid5-cache.c 	if (page[len - 1] == '\n')
page             2612 drivers/md/raid5-cache.c 		    !strncmp(page, r5c_journal_mode_str[mode], len))
page             2768 drivers/md/raid5-cache.c 		if (sh->dev[i].page != sh->dev[i].orig_page) {
page             2769 drivers/md/raid5-cache.c 			struct page *p = sh->dev[i].orig_page;
page             2771 drivers/md/raid5-cache.c 			sh->dev[i].orig_page = sh->dev[i].page;
page             2792 drivers/md/raid5-cache.c 		if (dev->orig_page != dev->page)
page             2902 drivers/md/raid5-cache.c 		addr = kmap_atomic(sh->dev[i].page);
page             2961 drivers/md/raid5-cache.c 	struct page *page;
page             2971 drivers/md/raid5-cache.c 	page = alloc_page(GFP_KERNEL);
page             2972 drivers/md/raid5-cache.c 	if (!page)
page             2975 drivers/md/raid5-cache.c 	if (!sync_page_io(rdev, cp, PAGE_SIZE, page, REQ_OP_READ, 0, false)) {
page             2979 drivers/md/raid5-cache.c 	mb = page_address(page);
page             3017 drivers/md/raid5-cache.c 	__free_page(page);
page             3029 drivers/md/raid5-cache.c 	__free_page(page);
page              138 drivers/md/raid5-ppl.c 	struct page *header_page;	/* for ppl_header */
page              162 drivers/md/raid5-ppl.c 	struct page **srcs = percpu->scribble;
page              180 drivers/md/raid5-ppl.c 		srcs[count++] = sh->dev[pd_idx].page;
page              186 drivers/md/raid5-ppl.c 				srcs[count++] = dev->page;
page              238 drivers/md/raid5-ppl.c 	struct page *header_page;
page              716 drivers/md/raid5-ppl.c static void ppl_xor(int size, struct page *page1, struct page *page2)
page              720 drivers/md/raid5-ppl.c 	struct page *xor_srcs[] = { page1, page2 };
page              802 drivers/md/raid5-ppl.c 	struct page *page1;
page              803 drivers/md/raid5-ppl.c 	struct page *page2;
page              978 drivers/md/raid5-ppl.c 	struct page *page;
page              982 drivers/md/raid5-ppl.c 	page = alloc_page(GFP_KERNEL);
page              983 drivers/md/raid5-ppl.c 	if (!page)
page             1006 drivers/md/raid5-ppl.c 					s, page, REQ_OP_READ, 0, false)) {
page             1012 drivers/md/raid5-ppl.c 			crc = crc32c_le(crc, page_address(page), s);
page             1042 drivers/md/raid5-ppl.c 	__free_page(page);
page             1048 drivers/md/raid5-ppl.c 	struct page *page;
page             1056 drivers/md/raid5-ppl.c 	page = alloc_page(GFP_NOIO | __GFP_ZERO);
page             1057 drivers/md/raid5-ppl.c 	if (!page)
page             1060 drivers/md/raid5-ppl.c 	pplhdr = page_address(page);
page             1069 drivers/md/raid5-ppl.c 			  PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_SYNC |
page             1075 drivers/md/raid5-ppl.c 	__free_page(page);
page             1084 drivers/md/raid5-ppl.c 	struct page *page, *page2, *tmp;
page             1093 drivers/md/raid5-ppl.c 	page = alloc_page(GFP_KERNEL);
page             1094 drivers/md/raid5-ppl.c 	if (!page)
page             1099 drivers/md/raid5-ppl.c 		__free_page(page);
page             1107 drivers/md/raid5-ppl.c 				  pplhdr_offset, PAGE_SIZE, page, REQ_OP_READ,
page             1115 drivers/md/raid5-ppl.c 		pplhdr = page_address(page);
page             1159 drivers/md/raid5-ppl.c 		tmp = page;
page             1160 drivers/md/raid5-ppl.c 		page = page2;
page             1186 drivers/md/raid5-ppl.c 	__free_page(page);
page             1521 drivers/md/raid5-ppl.c ppl_write_hint_store(struct mddev *mddev, const char *page, size_t len)
page             1530 drivers/md/raid5-ppl.c 	if (kstrtou16(page, 10, &new))
page              453 drivers/md/raid5.c 	struct page *p;
page              458 drivers/md/raid5.c 		WARN_ON(sh->dev[i].page != sh->dev[i].orig_page);
page              459 drivers/md/raid5.c 		p = sh->dev[i].page;
page              462 drivers/md/raid5.c 		sh->dev[i].page = NULL;
page              473 drivers/md/raid5.c 		struct page *page;
page              475 drivers/md/raid5.c 		if (!(page = alloc_page(gfp))) {
page              478 drivers/md/raid5.c 		sh->dev[i].page = page;
page              479 drivers/md/raid5.c 		sh->dev[i].orig_page = page;
page             1130 drivers/md/raid5.c 				sh->dev[i].vec.bv_page = sh->dev[i].page;
page             1184 drivers/md/raid5.c 			sh->dev[i].rvec.bv_page = sh->dev[i].page;
page             1228 drivers/md/raid5.c async_copy_data(int frombio, struct bio *bio, struct page **page,
page             1234 drivers/md/raid5.c 	struct page *bio_page;
page             1272 drivers/md/raid5.c 					*page = bio_page;
page             1274 drivers/md/raid5.c 					tx = async_memcpy(*page, bio_page, page_offset,
page             1277 drivers/md/raid5.c 				tx = async_memcpy(bio_page, *page, b_offset,
page             1348 drivers/md/raid5.c 				tx = async_copy_data(0, rbi, &dev->page,
page             1392 drivers/md/raid5.c static struct page **to_addr_page(struct raid5_percpu *percpu, int i)
page             1408 drivers/md/raid5.c 	struct page **xor_srcs = to_addr_page(percpu, 0);
page             1411 drivers/md/raid5.c 	struct page *xor_dest = tgt->page;
page             1425 drivers/md/raid5.c 			xor_srcs[count++] = sh->dev[i].page;
page             1448 drivers/md/raid5.c static int set_syndrome_sources(struct page **srcs,
page             1478 drivers/md/raid5.c 				srcs[slot] = sh->dev[i].page;
page             1490 drivers/md/raid5.c 	struct page **blocks = to_addr_page(percpu, 0);
page             1496 drivers/md/raid5.c 	struct page *dest;
page             1514 drivers/md/raid5.c 	dest = tgt->page;
page             1532 drivers/md/raid5.c 			blocks[count++] = sh->dev[i].page;
page             1556 drivers/md/raid5.c 	struct page **blocks = to_addr_page(percpu, 0);
page             1576 drivers/md/raid5.c 		blocks[slot] = sh->dev[i].page;
page             1603 drivers/md/raid5.c 			struct page *dest;
page             1617 drivers/md/raid5.c 				blocks[count++] = sh->dev[i].page;
page             1619 drivers/md/raid5.c 			dest = sh->dev[data_target].page;
page             1672 drivers/md/raid5.c 	struct page **xor_srcs = to_addr_page(percpu, 0);
page             1677 drivers/md/raid5.c 	struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
page             1689 drivers/md/raid5.c 			xor_srcs[count++] = dev->page;
page             1703 drivers/md/raid5.c 	struct page **blocks = to_addr_page(percpu, 0);
page             1752 drivers/md/raid5.c 			WARN_ON(dev->page != dev->orig_page);
page             1763 drivers/md/raid5.c 					tx = async_copy_data(1, wbi, &dev->page,
page             1766 drivers/md/raid5.c 					if (dev->page != dev->orig_page &&
page             1842 drivers/md/raid5.c 	struct page **xor_srcs;
page             1845 drivers/md/raid5.c 	struct page *xor_dest;
page             1875 drivers/md/raid5.c 		xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
page             1880 drivers/md/raid5.c 				xor_srcs[count++] = dev->page;
page             1883 drivers/md/raid5.c 		xor_dest = sh->dev[pd_idx].page;
page             1887 drivers/md/raid5.c 				xor_srcs[count++] = dev->page;
page             1929 drivers/md/raid5.c 	struct page **blocks;
page             2001 drivers/md/raid5.c 	struct page *xor_dest;
page             2002 drivers/md/raid5.c 	struct page **xor_srcs = to_addr_page(percpu, 0);
page             2013 drivers/md/raid5.c 	xor_dest = sh->dev[pd_idx].page;
page             2018 drivers/md/raid5.c 		xor_srcs[count++] = sh->dev[i].page;
page             2033 drivers/md/raid5.c 	struct page **srcs = to_addr_page(percpu, 0);
page             2234 drivers/md/raid5.c 		sizeof(struct page *) * (num+2) +
page             2364 drivers/md/raid5.c 			nsh->dev[i].page = osh->dev[i].page;
page             2365 drivers/md/raid5.c 			nsh->dev[i].orig_page = osh->dev[i].page;
page             2417 drivers/md/raid5.c 			if (nsh->dev[i].page == NULL) {
page             2418 drivers/md/raid5.c 				struct page *p = alloc_page(GFP_NOIO);
page             2419 drivers/md/raid5.c 				nsh->dev[i].page = p;
page             3408 drivers/md/raid5.c 			sh->dev[i].page = sh->dev[i].orig_page;
page             3777 drivers/md/raid5.c 				dev->page = dev->orig_page;
page             3929 drivers/md/raid5.c 			    dev->page == dev->orig_page &&
page             3932 drivers/md/raid5.c 				struct page *p = alloc_page(GFP_NOIO);
page             4338 drivers/md/raid5.c 			tx = async_memcpy(sh2->dev[dd_idx].page,
page             4339 drivers/md/raid5.c 					  sh->dev[i].page, 0, 0, STRIPE_SIZE,
page             5934 drivers/md/raid5.c 			memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
page             6361 drivers/md/raid5.c raid5_show_stripe_cache_size(struct mddev *mddev, char *page)
page             6368 drivers/md/raid5.c 		ret = sprintf(page, "%d\n", conf->min_nr_stripes);
page             6405 drivers/md/raid5.c raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len)
page             6413 drivers/md/raid5.c 	if (kstrtoul(page, 10, &new))
page             6434 drivers/md/raid5.c raid5_show_rmw_level(struct mddev  *mddev, char *page)
page             6438 drivers/md/raid5.c 		return sprintf(page, "%d\n", conf->rmw_level);
page             6444 drivers/md/raid5.c raid5_store_rmw_level(struct mddev  *mddev, const char *page, size_t len)
page             6455 drivers/md/raid5.c 	if (kstrtoul(page, 10, &new))
page             6477 drivers/md/raid5.c raid5_show_preread_threshold(struct mddev *mddev, char *page)
page             6484 drivers/md/raid5.c 		ret = sprintf(page, "%d\n", conf->bypass_threshold);
page             6490 drivers/md/raid5.c raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len)
page             6498 drivers/md/raid5.c 	if (kstrtoul(page, 10, &new))
page             6522 drivers/md/raid5.c raid5_show_skip_copy(struct mddev *mddev, char *page)
page             6529 drivers/md/raid5.c 		ret = sprintf(page, "%d\n", conf->skip_copy);
page             6535 drivers/md/raid5.c raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len)
page             6543 drivers/md/raid5.c 	if (kstrtoul(page, 10, &new))
page             6574 drivers/md/raid5.c stripe_cache_active_show(struct mddev *mddev, char *page)
page             6578 drivers/md/raid5.c 		return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
page             6587 drivers/md/raid5.c raid5_show_group_thread_cnt(struct mddev *mddev, char *page)
page             6594 drivers/md/raid5.c 		ret = sprintf(page, "%d\n", conf->worker_cnt_per_group);
page             6604 drivers/md/raid5.c raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
page             6614 drivers/md/raid5.c 	if (kstrtouint(page, 10, &new))
page              237 drivers/md/raid5.h 	struct page		*ppl_page; /* partial parity of this stripe */
page              255 drivers/md/raid5.h 		struct page	*page, *orig_page;
page              467 drivers/md/raid5.h 	struct page	*extra_page; /* extra page to use in prexor */
page              637 drivers/md/raid5.h 		struct page	*spare_page; /* Used when checking P/Q in raid6 */
page              140 drivers/media/common/saa7146/saa7146_core.c 	struct page *pg;
page              427 drivers/media/common/videobuf2/videobuf2-dma-contig.c 	struct page **pages;
page               37 drivers/media/common/videobuf2/videobuf2-dma-sg.c 	struct page			**pages;
page               65 drivers/media/common/videobuf2/videobuf2-dma-sg.c 		struct page *pages;
page              123 drivers/media/common/videobuf2/videobuf2-dma-sg.c 	buf->pages = kvmalloc_array(buf->num_pages, sizeof(struct page *),
page              132 drivers/media/common/videobuf2/videobuf2-vmalloc.c 	struct page **pages;
page              234 drivers/media/common/videobuf2/videobuf2-vmalloc.c 		struct page *page = vmalloc_to_page(vaddr);
page              236 drivers/media/common/videobuf2/videobuf2-vmalloc.c 		if (!page) {
page              241 drivers/media/common/videobuf2/videobuf2-vmalloc.c 		sg_set_page(sg, page, PAGE_SIZE, 0);
page              224 drivers/media/i2c/adv7180.c static int adv7180_select_page(struct adv7180_state *state, unsigned int page)
page              226 drivers/media/i2c/adv7180.c 	if (state->register_page != page) {
page              228 drivers/media/i2c/adv7180.c 			page);
page              229 drivers/media/i2c/adv7180.c 		state->register_page = page;
page              118 drivers/media/i2c/adv748x/adv748x-core.c int adv748x_read(struct adv748x_state *state, u8 page, u8 reg)
page              120 drivers/media/i2c/adv748x/adv748x-core.c 	return adv748x_read_check(state, page, reg);
page              123 drivers/media/i2c/adv748x/adv748x-core.c int adv748x_write(struct adv748x_state *state, u8 page, u8 reg, u8 value)
page              125 drivers/media/i2c/adv748x/adv748x-core.c 	return regmap_write(state->regmap[page], reg, value);
page              128 drivers/media/i2c/adv748x/adv748x-core.c static int adv748x_write_check(struct adv748x_state *state, u8 page, u8 reg,
page              134 drivers/media/i2c/adv748x/adv748x-core.c 	*error = adv748x_write(state, page, reg, value);
page              211 drivers/media/i2c/adv748x/adv748x-core.c 	u8 page;
page              221 drivers/media/i2c/adv748x/adv748x-core.c 	for (; regs->page != ADV748X_PAGE_EOR; regs++) {
page              222 drivers/media/i2c/adv748x/adv748x-core.c 		ret = adv748x_write(state, regs->page, regs->reg, regs->value);
page              225 drivers/media/i2c/adv748x/adv748x-core.c 				regs->page, regs->reg);
page              240 drivers/media/i2c/adv748x/adv748x-core.c 	u8 page = is_txa(tx) ? ADV748X_PAGE_TXA : ADV748X_PAGE_TXB;
page              244 drivers/media/i2c/adv748x/adv748x-core.c 	adv748x_write_check(state, page, 0x00, 0x80 | tx->num_lanes, &ret);
page              247 drivers/media/i2c/adv748x/adv748x-core.c 	adv748x_write_check(state, page, 0x00, 0xa0 | tx->num_lanes, &ret);
page              251 drivers/media/i2c/adv748x/adv748x-core.c 		adv748x_write_check(state, page, 0xdb, 0x10, &ret);
page              252 drivers/media/i2c/adv748x/adv748x-core.c 		adv748x_write_check(state, page, 0xd6, 0x07, &ret);
page              254 drivers/media/i2c/adv748x/adv748x-core.c 		adv748x_write_check(state, page, 0xd2, 0x40, &ret);
page              257 drivers/media/i2c/adv748x/adv748x-core.c 	adv748x_write_check(state, page, 0xc4, 0x0a, &ret);
page              258 drivers/media/i2c/adv748x/adv748x-core.c 	adv748x_write_check(state, page, 0x71, 0x33, &ret);
page              259 drivers/media/i2c/adv748x/adv748x-core.c 	adv748x_write_check(state, page, 0x72, 0x11, &ret);
page              262 drivers/media/i2c/adv748x/adv748x-core.c 	adv748x_write_check(state, page, 0xf0, 0x00, &ret);
page              265 drivers/media/i2c/adv748x/adv748x-core.c 	adv748x_write_check(state, page, 0x31, 0x82, &ret);
page              266 drivers/media/i2c/adv748x/adv748x-core.c 	adv748x_write_check(state, page, 0x1e, 0x40, &ret);
page              269 drivers/media/i2c/adv748x/adv748x-core.c 	adv748x_write_check(state, page, 0xda, 0x01, &ret);
page              273 drivers/media/i2c/adv748x/adv748x-core.c 	adv748x_write_check(state, page, 0x00, 0x20 | tx->num_lanes, &ret);
page              277 drivers/media/i2c/adv748x/adv748x-core.c 	adv748x_write_check(state, page, 0xc1, 0x2b, &ret);
page              279 drivers/media/i2c/adv748x/adv748x-core.c 	adv748x_write_check(state, page, 0x31, 0x80, &ret);
page              287 drivers/media/i2c/adv748x/adv748x-core.c 	u8 page = is_txa(tx) ? ADV748X_PAGE_TXA : ADV748X_PAGE_TXB;
page              291 drivers/media/i2c/adv748x/adv748x-core.c 	adv748x_write_check(state, page, 0x31, 0x82, &ret);
page              292 drivers/media/i2c/adv748x/adv748x-core.c 	adv748x_write_check(state, page, 0x1e, 0x00, &ret);
page              295 drivers/media/i2c/adv748x/adv748x-core.c 	adv748x_write_check(state, page, 0x00, 0x80 | tx->num_lanes, &ret);
page              298 drivers/media/i2c/adv748x/adv748x-core.c 	adv748x_write_check(state, page, 0xda, 0x01, &ret);
page              301 drivers/media/i2c/adv748x/adv748x-core.c 	adv748x_write_check(state, page, 0xc1, 0x3b, &ret);
page              697 drivers/media/i2c/adv748x/adv748x-core.c 	state->txa.page = ADV748X_PAGE_TXA;
page              698 drivers/media/i2c/adv748x/adv748x-core.c 	state->txb.page = ADV748X_PAGE_TXB;
page               79 drivers/media/i2c/adv748x/adv748x.h 	unsigned int page;
page              390 drivers/media/i2c/adv748x/adv748x.h int adv748x_write(struct adv748x_state *state, u8 page, u8 reg, u8 value);
page              414 drivers/media/i2c/adv748x/adv748x.h #define tx_read(t, r) adv748x_read(t->state, t->page, r)
page              415 drivers/media/i2c/adv748x/adv748x.h #define tx_write(t, r, v) adv748x_write(t->state, t->page, r, v)
page              604 drivers/media/i2c/adv7604.c #define ADV76XX_REG(page, offset)	(((page) << 8) | (offset))
page              611 drivers/media/i2c/adv7604.c 	unsigned int page = reg >> 8;
page              615 drivers/media/i2c/adv7604.c 	if (page >= ADV76XX_PAGE_MAX || !(BIT(page) & state->info->page_mask))
page              619 drivers/media/i2c/adv7604.c 	err = regmap_read(state->regmap[page], reg, &val);
page              628 drivers/media/i2c/adv7604.c 	unsigned int page = reg >> 8;
page              630 drivers/media/i2c/adv7604.c 	if (page >= ADV76XX_PAGE_MAX || !(BIT(page) & state->info->page_mask))
page              635 drivers/media/i2c/adv7604.c 	return regmap_write(state->regmap[page], reg, val);
page             2870 drivers/media/i2c/adv7604.c 					       unsigned int page)
page             2875 drivers/media/i2c/adv7604.c 	unsigned int io_reg = 0xf2 + page;
page             2878 drivers/media/i2c/adv7604.c 	if (pdata && pdata->i2c_addresses[page])
page             2880 drivers/media/i2c/adv7604.c 					   pdata->i2c_addresses[page]);
page             2883 drivers/media/i2c/adv7604.c 				adv76xx_default_addresses[page].name,
page             2884 drivers/media/i2c/adv7604.c 				adv76xx_default_addresses[page].default_addr);
page              304 drivers/media/i2c/mt9m111.c 	u16 page;
page              307 drivers/media/i2c/mt9m111.c 	page = (reg >> 8);
page              308 drivers/media/i2c/mt9m111.c 	if (page == mt9m111->lastpage)
page              310 drivers/media/i2c/mt9m111.c 	if (page > 2)
page              313 drivers/media/i2c/mt9m111.c 	ret = i2c_smbus_write_word_swapped(client, MT9M111_PAGE_MAP, page);
page              315 drivers/media/i2c/mt9m111.c 		mt9m111->lastpage = page;
page              244 drivers/media/i2c/noon010pc30.c 	u32 page = reg >> 8 & 0xFF;
page              247 drivers/media/i2c/noon010pc30.c 	if (info->i2c_reg_page != page && (reg & 0xFF) != 0x03) {
page              248 drivers/media/i2c/noon010pc30.c 		ret = i2c_smbus_write_byte_data(client, PAGEMODE_REG, page);
page              250 drivers/media/i2c/noon010pc30.c 			info->i2c_reg_page = page;
page              289 drivers/media/i2c/sr030pc30.c 	u32 page = reg >> 8 & 0xFF;
page              291 drivers/media/i2c/sr030pc30.c 	if (info->i2c_reg_page != page && (reg & 0xFF) != 0x03) {
page              292 drivers/media/i2c/sr030pc30.c 		ret = i2c_smbus_write_byte_data(client, PAGEMODE_REG, page);
page              294 drivers/media/i2c/sr030pc30.c 			info->i2c_reg_page = page;
page              231 drivers/media/i2c/tda1997x.c 	char page;
page              339 drivers/media/i2c/tda1997x.c static int tda1997x_setpage(struct v4l2_subdev *sd, u8 page)
page              344 drivers/media/i2c/tda1997x.c 	if (state->page != page) {
page              346 drivers/media/i2c/tda1997x.c 			REG_CURPAGE_00H, page);
page              350 drivers/media/i2c/tda1997x.c 				REG_CURPAGE_00H, page);
page              353 drivers/media/i2c/tda1997x.c 		state->page = page;
page             2560 drivers/media/i2c/tda1997x.c 	state->page = 0xff;
page              493 drivers/media/pci/cobalt/cobalt-alsa-pcm.c static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs,
page              510 drivers/media/pci/cobalt/cobalt-alsa-pcm.c 	.page		= snd_pcm_get_vmalloc_page,
page              522 drivers/media/pci/cobalt/cobalt-alsa-pcm.c 	.page		= snd_pcm_get_vmalloc_page,
page              294 drivers/media/pci/cx18/cx18-alsa-pcm.c static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs,
page              311 drivers/media/pci/cx18/cx18-alsa-pcm.c 	.page		= snd_pcm_get_vmalloc_page,
page               74 drivers/media/pci/cx23885/cx23885-alsa.c 	struct page *pg;
page              485 drivers/media/pci/cx23885/cx23885-alsa.c static struct page *snd_cx23885_page(struct snd_pcm_substream *substream,
page              504 drivers/media/pci/cx23885/cx23885-alsa.c 	.page = snd_cx23885_page,
page              137 drivers/media/pci/cx25821/cx25821-alsa.c 	struct page *pg;
page              628 drivers/media/pci/cx25821/cx25821-alsa.c static struct page *snd_cx25821_page(struct snd_pcm_substream *substream,
page              648 drivers/media/pci/cx25821/cx25821-alsa.c 	.page = snd_cx25821_page,
page              277 drivers/media/pci/cx88/cx88-alsa.c 	struct page *pg;
page              574 drivers/media/pci/cx88/cx88-alsa.c static struct page *snd_cx88_page(struct snd_pcm_substream *substream,
page              594 drivers/media/pci/cx88/cx88-alsa.c 	.page = snd_cx88_page,
page              299 drivers/media/pci/ivtv/ivtv-alsa-pcm.c static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs,
page              316 drivers/media/pci/ivtv/ivtv-alsa-pcm.c 	.page		= snd_pcm_get_vmalloc_page,
page              288 drivers/media/pci/ivtv/ivtv-driver.h 	struct page *map[IVTV_DMA_SG_OSD_ENT];
page              290 drivers/media/pci/ivtv/ivtv-driver.h 	struct page *bouncemap[IVTV_DMA_SG_OSD_ENT];
page             1443 drivers/media/pci/meye/meye.c 	unsigned long page, pos;
page             1466 drivers/media/pci/meye/meye.c 		page = vmalloc_to_pfn((void *)pos);
page             1467 drivers/media/pci/meye/meye.c 		if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED)) {
page               53 drivers/media/pci/pt1/pt1.c 	struct pt1_buffer_page *page;
page               58 drivers/media/pci/pt1/pt1.c 	struct pt1_table_page *page;
page              435 drivers/media/pci/pt1/pt1.c static int pt1_filter(struct pt1 *pt1, struct pt1_buffer_page *page)
page              445 drivers/media/pci/pt1/pt1.c 	if (!page->upackets[PT1_NR_UPACKETS - 1])
page              449 drivers/media/pci/pt1/pt1.c 		upacket = le32_to_cpu(page->upackets[i]);
page              485 drivers/media/pci/pt1/pt1.c 	page->upackets[PT1_NR_UPACKETS - 1] = 0;
page              492 drivers/media/pci/pt1/pt1.c 	struct pt1_buffer_page *page;
page              509 drivers/media/pci/pt1/pt1.c 		page = pt1->tables[pt1->table_index].bufs[pt1->buf_index].page;
page              510 drivers/media/pci/pt1/pt1.c 		if (!pt1_filter(pt1, page)) {
page              532 drivers/media/pci/pt1/pt1.c static void pt1_free_page(struct pt1 *pt1, void *page, dma_addr_t addr)
page              534 drivers/media/pci/pt1/pt1.c 	dma_free_coherent(&pt1->pdev->dev, PT1_PAGE_SIZE, page, addr);
page              539 drivers/media/pci/pt1/pt1.c 	void *page;
page              542 drivers/media/pci/pt1/pt1.c 	page = dma_alloc_coherent(&pt1->pdev->dev, PT1_PAGE_SIZE, &addr,
page              544 drivers/media/pci/pt1/pt1.c 	if (page == NULL)
page              552 drivers/media/pci/pt1/pt1.c 	return page;
page              557 drivers/media/pci/pt1/pt1.c 	pt1_free_page(pt1, buf->page, buf->addr);
page              563 drivers/media/pci/pt1/pt1.c 	struct pt1_buffer_page *page;
page              566 drivers/media/pci/pt1/pt1.c 	page = pt1_alloc_page(pt1, &addr, pfnp);
page              567 drivers/media/pci/pt1/pt1.c 	if (page == NULL)
page              570 drivers/media/pci/pt1/pt1.c 	page->upackets[PT1_NR_UPACKETS - 1] = 0;
page              572 drivers/media/pci/pt1/pt1.c 	buf->page = page;
page              584 drivers/media/pci/pt1/pt1.c 	pt1_free_page(pt1, table->page, table->addr);
page              590 drivers/media/pci/pt1/pt1.c 	struct pt1_table_page *page;
page              595 drivers/media/pci/pt1/pt1.c 	page = pt1_alloc_page(pt1, &addr, pfnp);
page              596 drivers/media/pci/pt1/pt1.c 	if (page == NULL)
page              604 drivers/media/pci/pt1/pt1.c 		page->buf_pfns[i] = cpu_to_le32(buf_pfn);
page              608 drivers/media/pci/pt1/pt1.c 	table->page = page;
page              616 drivers/media/pci/pt1/pt1.c 	pt1_free_page(pt1, page, addr);
page              659 drivers/media/pci/pt1/pt1.c 		tables[i - 1].page->next_pfn = cpu_to_le32(pfn);
page              663 drivers/media/pci/pt1/pt1.c 	tables[pt1_nr_tables - 1].page->next_pfn = cpu_to_le32(first_pfn);
page             1287 drivers/media/pci/pt1/pt1.c 			pt1->tables[i].bufs[j].page->upackets[PT1_NR_UPACKETS-1]
page              258 drivers/media/pci/saa7134/saa7134-alsa.c 	struct page *pg;
page              854 drivers/media/pci/saa7134/saa7134-alsa.c static struct page *snd_card_saa7134_page(struct snd_pcm_substream *substream,
page              874 drivers/media/pci/saa7134/saa7134-alsa.c 	.page =			snd_card_saa7134_page,
page              226 drivers/media/pci/solo6x10/solo6x10-g723.c 		int page = (pos / G723_FRAMES_PER_PAGE) + i;
page              230 drivers/media/pci/solo6x10/solo6x10-g723.c 				     (page * G723_PERIOD_BLOCK) +
page              253 drivers/media/pci/solo6x10/solo6x10-g723.c 		int page = (pos / G723_FRAMES_PER_PAGE) + i;
page              257 drivers/media/pci/solo6x10/solo6x10-g723.c 				     (page * G723_PERIOD_BLOCK) +
page              138 drivers/media/pci/ttpci/av7110_ca.c 	u8 *page = (u8 *)__get_free_page(GFP_USER);
page              141 drivers/media/pci/ttpci/av7110_ca.c 	if (!page)
page              149 drivers/media/pci/ttpci/av7110_ca.c 	if (copy_from_user(page, buf, count))
page              166 drivers/media/pci/ttpci/av7110_ca.c 	res = dvb_ringbuffer_write(cibuf, page, count);
page              168 drivers/media/pci/ttpci/av7110_ca.c 	free_page((unsigned long)page);
page              109 drivers/media/usb/b2c2/flexcop-usb.c 		flexcop_usb_request_t req, u8 page, u16 wAddress,
page              115 drivers/media/usb/b2c2/flexcop-usb.c 	wIndex = page << 8;
page             2395 drivers/media/usb/cpia2/cpia2_core.c 	unsigned long page, pos;
page             2409 drivers/media/usb/cpia2/cpia2_core.c 		page = kvirt_to_pa(pos);
page             2410 drivers/media/usb/cpia2/cpia2_core.c 		if (remap_pfn_range(vma, start, page >> PAGE_SHIFT, PAGE_SIZE, PAGE_SHARED))
page              618 drivers/media/usb/cx231xx/cx231xx-audio.c static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs,
page              635 drivers/media/usb/cx231xx/cx231xx-audio.c 	.page = snd_pcm_get_vmalloc_page,
page              173 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 				msg[1].buf[0] = dev->page;
page              177 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 				req.index = CMD_DEMOD_RD | dev->page;
page              214 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 				dev->page = msg[0].buf[1];
page              219 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 				req.index = CMD_DEMOD_WR | dev->page;
page               66 drivers/media/usb/dvb-usb-v2/rtl28xxu.h 	u8 page; /* integrated demod active register page */
page              475 drivers/media/usb/em28xx/em28xx-audio.c static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs,
page              718 drivers/media/usb/em28xx/em28xx-audio.c 	.page      = snd_pcm_get_vmalloc_page,
page              188 drivers/media/usb/go7007/snd-go7007.c static struct page *go7007_snd_pcm_page(struct snd_pcm_substream *substream,
page              203 drivers/media/usb/go7007/snd-go7007.c 	.page		= go7007_snd_pcm_page,
page              720 drivers/media/usb/gspca/m5602/m5602_s5k4aa.c 	u8 page, old_page;
page              722 drivers/media/usb/gspca/m5602/m5602_s5k4aa.c 	for (page = 0; page < 16; page++) {
page              723 drivers/media/usb/gspca/m5602/m5602_s5k4aa.c 		m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &page, 1);
page              725 drivers/media/usb/gspca/m5602/m5602_s5k4aa.c 			page);
page              735 drivers/media/usb/gspca/m5602/m5602_s5k4aa.c 	for (page = 0; page < 16; page++) {
page              736 drivers/media/usb/gspca/m5602/m5602_s5k4aa.c 		m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &page, 1);
page              738 drivers/media/usb/gspca/m5602/m5602_s5k4aa.c 			page);
page              542 drivers/media/usb/gspca/m5602/m5602_s5k83a.c 	u8 page, old_page;
page              545 drivers/media/usb/gspca/m5602/m5602_s5k83a.c 	for (page = 0; page < 16; page++) {
page              546 drivers/media/usb/gspca/m5602/m5602_s5k83a.c 		m5602_write_sensor(sd, S5K83A_PAGE_MAP, &page, 1);
page              548 drivers/media/usb/gspca/m5602/m5602_s5k83a.c 			page);
page              557 drivers/media/usb/gspca/m5602/m5602_s5k83a.c 	for (page = 0; page < 16; page++) {
page              558 drivers/media/usb/gspca/m5602/m5602_s5k83a.c 		m5602_write_sensor(sd, S5K83A_PAGE_MAP, &page, 1);
page              560 drivers/media/usb/gspca/m5602/m5602_s5k83a.c 			page);
page              289 drivers/media/usb/gspca/pac7302.c 			const u8 *page, int len)
page              297 drivers/media/usb/gspca/pac7302.c 		if (page[index] == SKIP)		/* skip this index */
page              299 drivers/media/usb/gspca/pac7302.c 		gspca_dev->usb_buf[0] = page[index];
page              308 drivers/media/usb/gspca/pac7302.c 			       index, page[index], ret);
page              208 drivers/media/usb/gspca/pac7311.c 			const __u8 *page, int len)
page              216 drivers/media/usb/gspca/pac7311.c 		if (page[index] == SKIP)		/* skip this index */
page              218 drivers/media/usb/gspca/pac7311.c 		gspca_dev->usb_buf[0] = page[index];
page              227 drivers/media/usb/gspca/pac7311.c 			       index, page[index], ret);
page              167 drivers/media/usb/gspca/stk1135.c static void sensor_set_page(struct gspca_dev *gspca_dev, u8 page)
page              171 drivers/media/usb/gspca/stk1135.c 	if (page != sd->sensor_page) {
page              172 drivers/media/usb/gspca/stk1135.c 		sensor_write_16(gspca_dev, 0xf0, page);
page              173 drivers/media/usb/gspca/stk1135.c 		sd->sensor_page = page;
page              372 drivers/media/usb/tm6000/tm6000-alsa.c static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs,
page              392 drivers/media/usb/tm6000/tm6000-alsa.c 	.page = snd_pcm_get_vmalloc_page,
page               66 drivers/media/v4l2-core/videobuf-dma-sg.c 	struct page *pg;
page               92 drivers/media/v4l2-core/videobuf-dma-sg.c static struct scatterlist *videobuf_pages_to_sg(struct page **pages,
page              175 drivers/media/v4l2-core/videobuf-dma-sg.c 	dma->pages = kmalloc_array(dma->nr_pages, sizeof(struct page *),
page              441 drivers/media/v4l2-core/videobuf-dma-sg.c 	struct page *page;
page              446 drivers/media/v4l2-core/videobuf-dma-sg.c 	page = alloc_page(GFP_USER | __GFP_DMA32);
page              447 drivers/media/v4l2-core/videobuf-dma-sg.c 	if (!page)
page              449 drivers/media/v4l2-core/videobuf-dma-sg.c 	clear_user_highpage(page, vmf->address);
page              450 drivers/media/v4l2-core/videobuf-dma-sg.c 	vmf->page = page;
page              787 drivers/memstick/core/ms_block.c 						u16 pba, u8 page, u8 flag)
page              793 drivers/memstick/core/ms_block.c 	msb->regs.param.page_address = page;
page              800 drivers/memstick/core/ms_block.c 							flag, pba, page);
page              812 drivers/memstick/core/ms_block.c static int msb_mark_page_bad(struct msb_data *msb, int pba, int page)
page              814 drivers/memstick/core/ms_block.c 	dbg("marking page %d of pba %d as bad", page, pba);
page              817 drivers/memstick/core/ms_block.c 		pba, page, ~MEMSTICK_OVERWRITE_PGST0);
page              855 drivers/memstick/core/ms_block.c 	u16 pba, u8 page, struct ms_extra_data_register *extra,
page              904 drivers/memstick/core/ms_block.c 		msb->regs.param.page_address = page;
page              914 drivers/memstick/core/ms_block.c 				pba, page);
page              929 drivers/memstick/core/ms_block.c 			pba, page);
page              933 drivers/memstick/core/ms_block.c 			msb_mark_page_bad(msb, pba, page);
page              939 drivers/memstick/core/ms_block.c 			pba, page, error);
page              944 drivers/memstick/core/ms_block.c static int msb_read_oob(struct msb_data *msb, u16 pba, u16 page,
page              951 drivers/memstick/core/ms_block.c 	msb->regs.param.page_address = page;
page              964 drivers/memstick/core/ms_block.c 			pba, page);
page              976 drivers/memstick/core/ms_block.c 	int page = 0, error;
page              980 drivers/memstick/core/ms_block.c 	while (page < msb->pages_in_block) {
page              982 drivers/memstick/core/ms_block.c 		error = msb_read_page(msb, pba, page,
page              983 drivers/memstick/core/ms_block.c 				NULL, &sg, page * msb->page_size);
page              986 drivers/memstick/core/ms_block.c 		page++;
page             1191 drivers/memstick/core/ms_block.c 	struct ms_boot_page *page;
page             1200 drivers/memstick/core/ms_block.c 		page = kmalloc_array(2, sizeof(struct ms_boot_page),
page             1202 drivers/memstick/core/ms_block.c 		if (!page)
page             1205 drivers/memstick/core/ms_block.c 		msb->boot_page = page;
page             1207 drivers/memstick/core/ms_block.c 		page = msb->boot_page;
page             1213 drivers/memstick/core/ms_block.c 		sg_init_one(&sg, page, sizeof(*page));
page             1225 drivers/memstick/core/ms_block.c 		if (be16_to_cpu(page->header.block_id) != MS_BLOCK_BOOT_ID) {
page             1230 drivers/memstick/core/ms_block.c 		msb_fix_boot_page_endianness(page);
page             1233 drivers/memstick/core/ms_block.c 		page++;
page             1256 drivers/memstick/core/ms_block.c 	int data_size, data_offset, page, page_offset, size_to_read;
page             1272 drivers/memstick/core/ms_block.c 	page = data_offset / msb->page_size;
page             1289 drivers/memstick/core/ms_block.c 		error = msb_read_page(msb, pba, page, NULL, &sg, offset);
page             1293 drivers/memstick/core/ms_block.c 		page++;
page             1296 drivers/memstick/core/ms_block.c 		if (page == msb->pages_in_block) {
page             1530 drivers/memstick/core/ms_block.c 	int page, offset, error;
page             1548 drivers/memstick/core/ms_block.c 	for (page = 0; page < msb->pages_in_block; page++) {
page             1550 drivers/memstick/core/ms_block.c 		if (test_bit(page, &msb->valid_cache_bitmap))
page             1553 drivers/memstick/core/ms_block.c 		offset = page * msb->page_size;
page             1556 drivers/memstick/core/ms_block.c 			page, lba);
page             1557 drivers/memstick/core/ms_block.c 		error = msb_read_page(msb, pba, page, &extra, &sg, offset);
page             1561 drivers/memstick/core/ms_block.c 			pr_err("read error on sector %d, contents probably damaged", page);
page             1570 drivers/memstick/core/ms_block.c 			dbg("page %d is marked as bad", page);
page             1574 drivers/memstick/core/ms_block.c 		set_bit(page, &msb->valid_cache_bitmap);
page             1583 drivers/memstick/core/ms_block.c 		for (page = 0; page < msb->pages_in_block; page++) {
page             1585 drivers/memstick/core/ms_block.c 			if (test_bit(page, &msb->valid_cache_bitmap))
page             1589 drivers/memstick/core/ms_block.c 				page);
page             1591 drivers/memstick/core/ms_block.c 				pba , page, 0xFF & ~MEMSTICK_OV_PG_NORMAL);
page             1600 drivers/memstick/core/ms_block.c 	int page, bool add_to_cache_only, struct scatterlist *sg, int offset)
page             1628 drivers/memstick/core/ms_block.c 	dbg_verbose("Write of LBA %d page %d to cache ", lba, page);
page             1634 drivers/memstick/core/ms_block.c 		msb->cache + page * msb->page_size, msb->page_size);
page             1636 drivers/memstick/core/ms_block.c 	set_bit(page, &msb->valid_cache_bitmap);
page             1641 drivers/memstick/core/ms_block.c 				int page, struct scatterlist *sg, int offset)
page             1648 drivers/memstick/core/ms_block.c 			test_bit(page, &msb->valid_cache_bitmap)) {
page             1651 drivers/memstick/core/ms_block.c 							lba, pba, page);
page             1657 drivers/memstick/core/ms_block.c 			msb->cache + msb->page_size * page,
page             1661 drivers/memstick/core/ms_block.c 							lba, pba, page);
page             1663 drivers/memstick/core/ms_block.c 		error = msb_read_page(msb, pba, page, NULL, sg, offset);
page             1667 drivers/memstick/core/ms_block.c 		msb_cache_write(msb, lba, page, true, sg, offset);
page             1803 drivers/memstick/core/ms_block.c 	int page, struct scatterlist *sg, size_t len, int *sucessfuly_written)
page             1810 drivers/memstick/core/ms_block.c 		if (page == 0 && len - offset >= msb->block_size) {
page             1826 drivers/memstick/core/ms_block.c 		error = msb_cache_write(msb, lba, page, false, sg, offset);
page             1833 drivers/memstick/core/ms_block.c 		page++;
page             1834 drivers/memstick/core/ms_block.c 		if (page == msb->pages_in_block) {
page             1835 drivers/memstick/core/ms_block.c 			page = 0;
page             1843 drivers/memstick/core/ms_block.c 		int page, struct scatterlist *sg, int len, int *sucessfuly_read)
page             1851 drivers/memstick/core/ms_block.c 		error = msb_cache_read(msb, lba, page, sg, offset);
page             1858 drivers/memstick/core/ms_block.c 		page++;
page             1859 drivers/memstick/core/ms_block.c 		if (page == msb->pages_in_block) {
page             1860 drivers/memstick/core/ms_block.c 			page = 0;
page             1870 drivers/memstick/core/ms_block.c 	int page, error, len;
page             1903 drivers/memstick/core/ms_block.c 		page = sector_div(lba, msb->pages_in_block);
page             1906 drivers/memstick/core/ms_block.c 			error = msb_do_read_request(msb, lba, page, sg,
page             1909 drivers/memstick/core/ms_block.c 			error = msb_do_write_request(msb, lba, page, sg,
page              305 drivers/memstick/host/jmb38x_ms.c 	struct page *pg;
page              187 drivers/memstick/host/tifm_ms.c 	struct page *pg;
page              161 drivers/misc/cxl/context.c 			vmf->page = ctx->ff_page;
page              548 drivers/misc/cxl/cxl.h 	struct page *ff_page;
page               99 drivers/misc/eeprom/ee1004.c 	int page;
page              104 drivers/misc/eeprom/ee1004.c 	page = off >> EE1004_PAGE_SHIFT;
page              105 drivers/misc/eeprom/ee1004.c 	if (unlikely(page > 1))
page              119 drivers/misc/eeprom/ee1004.c 		if (page != ee1004_current_page) {
page              121 drivers/misc/eeprom/ee1004.c 			status = i2c_smbus_write_byte(ee1004_set_page[page],
page              130 drivers/misc/eeprom/ee1004.c 				if (ee1004_get_current_page() == page)
page              135 drivers/misc/eeprom/ee1004.c 					page, status);
page              139 drivers/misc/eeprom/ee1004.c 			dev_dbg(dev, "Selected page %d\n", page);
page              140 drivers/misc/eeprom/ee1004.c 			ee1004_current_page = page;
page              153 drivers/misc/eeprom/ee1004.c 			page++;
page              170 drivers/misc/genwqe/card_base.h 	struct page **page_list;	/* list of pages used by user buff */
page              240 drivers/misc/genwqe/card_utils.c 			   struct page **page_list, int num_pages,
page              526 drivers/misc/genwqe/card_utils.c static int genwqe_free_user_pages(struct page **page_list,
page              588 drivers/misc/genwqe/card_utils.c 			       sizeof(struct page *) + sizeof(dma_addr_t),
page             1331 drivers/misc/habanalabs/memory.c 	struct page **pages;
page               27 drivers/misc/mic/host/mic_boot.c _mic_dma_map_page(struct device *dev, struct page *page,
page               31 drivers/misc/mic/host/mic_boot.c 	void *va = phys_to_virt(page_to_phys(page)) + offset;
page              165 drivers/misc/mic/host/mic_boot.c __mic_dma_map_page(struct device *dev, struct page *page, unsigned long offset,
page              169 drivers/misc/mic/host/mic_boot.c 	void *va = phys_to_virt(page_to_phys(page)) + offset;
page              316 drivers/misc/mic/host/mic_boot.c mic_dma_map_page(struct device *dev, struct page *page,
page              320 drivers/misc/mic/host/mic_boot.c 	void *va = phys_to_virt(page_to_phys(page)) + offset;
page              653 drivers/misc/mic/scif/scif_dma.c 		struct page **pages = window->pinned_pages->pages;
page              180 drivers/misc/mic/scif/scif_fence.c 	struct page **pages = window->pinned_pages->pages;
page              106 drivers/misc/mic/scif/scif_map.h scif_map_page(dma_addr_t *dma_handle, struct page *page,
page              112 drivers/misc/mic/scif/scif_map.h 		*dma_handle = page_to_phys(page);
page              116 drivers/misc/mic/scif/scif_map.h 					   page, 0x0, PAGE_SIZE,
page              363 drivers/misc/mic/scif/scif_nodeqp.c 	struct page *page;
page              371 drivers/misc/mic/scif/scif_nodeqp.c 		page = pfn_to_page(pa >> PAGE_SHIFT);
page              372 drivers/misc/mic/scif/scif_nodeqp.c 		sg_set_page(&sg[i], page, page_size, 0);
page              192 drivers/misc/mic/scif/scif_rma.h 	struct page **pages;
page              181 drivers/misc/sgi-gru/grufault.c 	struct page *page;
page              188 drivers/misc/sgi-gru/grufault.c 	if (get_user_pages(vaddr, 1, write ? FOLL_WRITE : 0, &page, NULL) <= 0)
page              190 drivers/misc/sgi-gru/grufault.c 	*paddr = page_to_phys(page);
page              191 drivers/misc/sgi-gru/grufault.c 	put_page(page);
page              256 drivers/misc/sgi-gru/grufile.c 	struct page *page;
page              266 drivers/misc/sgi-gru/grufile.c 		page = alloc_pages_node(nid, GFP_KERNEL, order);
page              267 drivers/misc/sgi-gru/grufile.c 		if (!page)
page              269 drivers/misc/sgi-gru/grufile.c 		gru_base[bid] = page_address(page);
page              217 drivers/misc/sgi-xp/xpc_uv.c 	struct page *page;
page              245 drivers/misc/sgi-xp/xpc_uv.c 	page = __alloc_pages_node(nid,
page              248 drivers/misc/sgi-xp/xpc_uv.c 	if (page == NULL) {
page              254 drivers/misc/sgi-xp/xpc_uv.c 	mq->address = page_address(page);
page              334 drivers/misc/vmw_balloon.c 	struct page *page;
page              603 drivers/misc/vmw_balloon.c vmballoon_mark_page_offline(struct page *page,
page              609 drivers/misc/vmw_balloon.c 		__SetPageOffline(page + i);
page              618 drivers/misc/vmw_balloon.c vmballoon_mark_page_online(struct page *page,
page              624 drivers/misc/vmw_balloon.c 		__ClearPageOffline(page + i);
page              669 drivers/misc/vmw_balloon.c 	struct page *page;
page              679 drivers/misc/vmw_balloon.c 			page = list_first_entry(&ctl->prealloc_pages,
page              680 drivers/misc/vmw_balloon.c 						struct page, lru);
page              681 drivers/misc/vmw_balloon.c 			list_del(&page->lru);
page              684 drivers/misc/vmw_balloon.c 				page = alloc_pages(__GFP_HIGHMEM|__GFP_NOWARN|
page              687 drivers/misc/vmw_balloon.c 				page = balloon_page_alloc();
page              693 drivers/misc/vmw_balloon.c 		if (page) {
page              695 drivers/misc/vmw_balloon.c 			list_add(&page->lru, &ctl->pages);
page              718 drivers/misc/vmw_balloon.c static int vmballoon_handle_one_result(struct vmballoon *b, struct page *page,
page              727 drivers/misc/vmw_balloon.c 		 page_to_pfn(page), status,
page              751 drivers/misc/vmw_balloon.c 					   struct page **p)
page              760 drivers/misc/vmw_balloon.c 	*p = b->page;
page              809 drivers/misc/vmw_balloon.c 		pfn = page_to_pfn(b->page);
page              829 drivers/misc/vmw_balloon.c 			       struct page *p)
page              837 drivers/misc/vmw_balloon.c 		b->page = p;
page              862 drivers/misc/vmw_balloon.c 	struct page *page;
page              873 drivers/misc/vmw_balloon.c 	list_for_each_entry(page, &ctl->pages, lru)
page              874 drivers/misc/vmw_balloon.c 		vmballoon_add_page(b, i++, page);
page              887 drivers/misc/vmw_balloon.c 		status = vmballoon_status_page(b, i, &page);
page              897 drivers/misc/vmw_balloon.c 		if (!vmballoon_handle_one_result(b, page, ctl->page_size,
page              905 drivers/misc/vmw_balloon.c 		list_move(&page->lru, &ctl->refused_pages);
page              928 drivers/misc/vmw_balloon.c 	struct page *page, *tmp;
page              930 drivers/misc/vmw_balloon.c 	list_for_each_entry_safe(page, tmp, page_list, lru) {
page              931 drivers/misc/vmw_balloon.c 		list_del(&page->lru);
page              932 drivers/misc/vmw_balloon.c 		__free_pages(page, vmballoon_page_order(page_size));
page             1006 drivers/misc/vmw_balloon.c 	struct page *page;
page             1017 drivers/misc/vmw_balloon.c 		list_for_each_entry(page, pages, lru) {
page             1018 drivers/misc/vmw_balloon.c 			vmballoon_mark_page_offline(page, VMW_BALLOON_2M_PAGE);
page             1049 drivers/misc/vmw_balloon.c 	struct page *page, *tmp;
page             1062 drivers/misc/vmw_balloon.c 	list_for_each_entry_safe(page, tmp, &b->huge_pages, lru) {
page             1063 drivers/misc/vmw_balloon.c 		vmballoon_mark_page_online(page, VMW_BALLOON_2M_PAGE);
page             1065 drivers/misc/vmw_balloon.c 		list_move(&page->lru, pages);
page             1088 drivers/misc/vmw_balloon.c 	struct page *page, *tmp;
page             1093 drivers/misc/vmw_balloon.c 	list_for_each_entry_safe(page, tmp, &ctl->refused_pages, lru) {
page             1094 drivers/misc/vmw_balloon.c 		list_del(&page->lru);
page             1095 drivers/misc/vmw_balloon.c 		split_page(page, order);
page             1097 drivers/misc/vmw_balloon.c 			list_add(&page[i].lru, &ctl->prealloc_pages);
page             1326 drivers/misc/vmw_balloon.c 	struct page *page;
page             1328 drivers/misc/vmw_balloon.c 	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
page             1329 drivers/misc/vmw_balloon.c 	if (!page)
page             1332 drivers/misc/vmw_balloon.c 	b->batch_page = page_address(page);
page             1766 drivers/misc/vmw_balloon.c 				 struct page *newpage, struct page *page,
page             1790 drivers/misc/vmw_balloon.c 	vmballoon_add_page(b, 0, page);
page             1795 drivers/misc/vmw_balloon.c 		status = vmballoon_status_page(b, 0, &page);
page             1812 drivers/misc/vmw_balloon.c 	balloon_page_delete(page);
page             1814 drivers/misc/vmw_balloon.c 	put_page(page);
page             1955 drivers/misc/vmw_balloon.c 	balloon.page = NULL;
page              578 drivers/misc/vmw_vmci/vmci_context.c 	struct page *notify_page;
page               82 drivers/misc/vmw_vmci/vmci_context.h 	struct page *notify_page;	/* Page backing the notify UVA. */
page              136 drivers/misc/vmw_vmci/vmci_queue_pair.c 			struct page **page;
page              137 drivers/misc/vmw_vmci/vmci_queue_pair.c 			struct page **header_page;
page              346 drivers/misc/vmw_vmci/vmci_queue_pair.c 			va = kmap(kernel_if->u.h.page[page_index]);
page              360 drivers/misc/vmw_vmci/vmci_queue_pair.c 				kunmap(kernel_if->u.h.page[page_index]);
page              365 drivers/misc/vmw_vmci/vmci_queue_pair.c 			kunmap(kernel_if->u.h.page[page_index]);
page              394 drivers/misc/vmw_vmci/vmci_queue_pair.c 			va = kmap(kernel_if->u.h.page[page_index]);
page              408 drivers/misc/vmw_vmci/vmci_queue_pair.c 				kunmap(kernel_if->u.h.page[page_index]);
page              413 drivers/misc/vmw_vmci/vmci_queue_pair.c 			kunmap(kernel_if->u.h.page[page_index]);
page              535 drivers/misc/vmw_vmci/vmci_queue_pair.c 		 sizeof(*queue->kernel_if->u.h.page))
page              538 drivers/misc/vmw_vmci/vmci_queue_pair.c 	queue_page_size = num_pages * sizeof(*queue->kernel_if->u.h.page);
page              549 drivers/misc/vmw_vmci/vmci_queue_pair.c 		    (struct page **)((u8 *)queue + queue_size);
page              550 drivers/misc/vmw_vmci/vmci_queue_pair.c 		queue->kernel_if->u.h.page =
page              626 drivers/misc/vmw_vmci/vmci_queue_pair.c static void qp_release_pages(struct page **pages,
page              742 drivers/misc/vmw_vmci/vmci_queue_pair.c 		struct page *headers[2];
page              608 drivers/misc/xilinx_sdfec.c 	struct page *page[MAX_NUM_PAGES];
page              625 drivers/misc/xilinx_sdfec.c 	res = get_user_pages_fast((unsigned long)src_ptr, n, 0, page);
page              628 drivers/misc/xilinx_sdfec.c 			put_page(page[i]);
page              633 drivers/misc/xilinx_sdfec.c 		addr = kmap(page[i]);
page              642 drivers/misc/xilinx_sdfec.c 		put_page(page[i]);
page               49 drivers/mmc/core/mmc_test.c 	struct page *page;
page              148 drivers/mmc/core/mmc_test.c 	struct page	*highmem;
page              321 drivers/mmc/core/mmc_test.c 		__free_pages(mem->arr[mem->cnt].page,
page              365 drivers/mmc/core/mmc_test.c 		struct page *page;
page              372 drivers/mmc/core/mmc_test.c 			page = alloc_pages(flags, order);
page              373 drivers/mmc/core/mmc_test.c 			if (page || !order)
page              377 drivers/mmc/core/mmc_test.c 		if (!page) {
page              382 drivers/mmc/core/mmc_test.c 		mem->arr[mem->cnt].page = page;
page              437 drivers/mmc/core/mmc_test.c 			sg_set_page(sg, mem->arr[i].page, len, 0);
page              474 drivers/mmc/core/mmc_test.c 		base = page_address(mem->arr[--i].page);
page              187 drivers/mmc/host/bcm2835.c 	struct page		*drain_page;
page             1072 drivers/mmc/host/bcm2835.c 		void *page;
page             1080 drivers/mmc/host/bcm2835.c 		page = kmap_atomic(host->drain_page);
page             1081 drivers/mmc/host/bcm2835.c 		buf = page + host->drain_offset;
page             1091 drivers/mmc/host/bcm2835.c 		kunmap_atomic(page);
page              572 drivers/mmc/host/jz4740_mmc.c 		flush_dcache_page(miter->page);
page              305 drivers/mmc/host/sdricoh_cs.c 			struct page *page;
page              307 drivers/mmc/host/sdricoh_cs.c 			page = sg_page(data->sg);
page              309 drivers/mmc/host/sdricoh_cs.c 			buf = kmap(page) + data->sg->offset + (len * i);
page              313 drivers/mmc/host/sdricoh_cs.c 			kunmap(page);
page              314 drivers/mmc/host/sdricoh_cs.c 			flush_dcache_page(page);
page              110 drivers/mmc/host/tifm_sd.c static void tifm_sd_read_fifo(struct tifm_sd *host, struct page *pg,
page              136 drivers/mmc/host/tifm_sd.c static void tifm_sd_write_fifo(struct tifm_sd *host, struct page *pg,
page              169 drivers/mmc/host/tifm_sd.c 	struct page *pg;
page              207 drivers/mmc/host/tifm_sd.c static void tifm_sd_copy_page(struct page *dst, unsigned int dst_off,
page              208 drivers/mmc/host/tifm_sd.c 			      struct page *src, unsigned int src_off,
page              226 drivers/mmc/host/tifm_sd.c 	struct page *pg;
page              152 drivers/mmc/host/usdhi6rol0.c 	struct page *page;
page              323 drivers/mmc/host/usdhi6rol0.c 	host->head_pg.page	= host->pg.page;
page              325 drivers/mmc/host/usdhi6rol0.c 	host->pg.page		= nth_page(host->pg.page, 1);
page              326 drivers/mmc/host/usdhi6rol0.c 	host->pg.mapped		= kmap(host->pg.page);
page              361 drivers/mmc/host/usdhi6rol0.c 	WARN(host->pg.page, "%p not properly unmapped!\n", host->pg.page);
page              367 drivers/mmc/host/usdhi6rol0.c 	host->pg.page = sg_page(sg);
page              368 drivers/mmc/host/usdhi6rol0.c 	host->pg.mapped = kmap(host->pg.page);
page              387 drivers/mmc/host/usdhi6rol0.c 		host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped,
page              397 drivers/mmc/host/usdhi6rol0.c 	struct page *page = host->head_pg.page;
page              399 drivers/mmc/host/usdhi6rol0.c 	if (page) {
page              412 drivers/mmc/host/usdhi6rol0.c 		flush_dcache_page(page);
page              413 drivers/mmc/host/usdhi6rol0.c 		kunmap(page);
page              415 drivers/mmc/host/usdhi6rol0.c 		host->head_pg.page = NULL;
page              423 drivers/mmc/host/usdhi6rol0.c 	page = host->pg.page;
page              424 drivers/mmc/host/usdhi6rol0.c 	if (!page)
page              427 drivers/mmc/host/usdhi6rol0.c 	flush_dcache_page(page);
page              428 drivers/mmc/host/usdhi6rol0.c 	kunmap(page);
page              430 drivers/mmc/host/usdhi6rol0.c 	host->pg.page = NULL;
page              440 drivers/mmc/host/usdhi6rol0.c 	if (host->head_pg.page) {
page              505 drivers/mmc/host/usdhi6rol0.c 	host->pg.page = nth_page(sg_page(host->sg), host->page_idx);
page              506 drivers/mmc/host/usdhi6rol0.c 	host->pg.mapped = kmap(host->pg.page);
page              510 drivers/mmc/host/usdhi6rol0.c 		host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped,
page              901 drivers/mmc/host/usdhi6rol0.c 	if (WARN(host->pg.page || host->head_pg.page,
page              903 drivers/mmc/host/usdhi6rol0.c 		 host->pg.page, host->head_pg.page, host->wait, mrq->cmd->opcode,
page             1259 drivers/mmc/host/usdhi6rol0.c 	if (host->pg.page) {
page             1299 drivers/mmc/host/usdhi6rol0.c 	if (host->pg.page) {
page              178 drivers/mtd/devices/bcm47xxsflash.c 	u32 page = (offset & ~mask) << 1;
page              186 drivers/mtd/devices/bcm47xxsflash.c 		b47s->cc_write(b47s, BCMA_CC_FLASHADDR, page);
page              191 drivers/mtd/devices/bcm47xxsflash.c 			pr_err("Timeout reading page 0x%X info buffer\n", page);
page              210 drivers/mtd/devices/bcm47xxsflash.c 	b47s->cc_write(b47s, BCMA_CC_FLASHADDR, page);
page               47 drivers/mtd/devices/block2mtd.c static struct page *page_read(struct address_space *mapping, int index)
page               56 drivers/mtd/devices/block2mtd.c 	struct page *page;
page               63 drivers/mtd/devices/block2mtd.c 		page = page_read(mapping, index);
page               64 drivers/mtd/devices/block2mtd.c 		if (IS_ERR(page))
page               65 drivers/mtd/devices/block2mtd.c 			return PTR_ERR(page);
page               67 drivers/mtd/devices/block2mtd.c 		max = page_address(page) + PAGE_SIZE;
page               68 drivers/mtd/devices/block2mtd.c 		for (p=page_address(page); p<max; p++)
page               70 drivers/mtd/devices/block2mtd.c 				lock_page(page);
page               71 drivers/mtd/devices/block2mtd.c 				memset(page_address(page), 0xff, PAGE_SIZE);
page               72 drivers/mtd/devices/block2mtd.c 				set_page_dirty(page);
page               73 drivers/mtd/devices/block2mtd.c 				unlock_page(page);
page               78 drivers/mtd/devices/block2mtd.c 		put_page(page);
page              105 drivers/mtd/devices/block2mtd.c 	struct page *page;
page              117 drivers/mtd/devices/block2mtd.c 		page = page_read(dev->blkdev->bd_inode->i_mapping, index);
page              118 drivers/mtd/devices/block2mtd.c 		if (IS_ERR(page))
page              119 drivers/mtd/devices/block2mtd.c 			return PTR_ERR(page);
page              121 drivers/mtd/devices/block2mtd.c 		memcpy(buf, page_address(page) + offset, cpylen);
page              122 drivers/mtd/devices/block2mtd.c 		put_page(page);
page              138 drivers/mtd/devices/block2mtd.c 	struct page *page;
page              151 drivers/mtd/devices/block2mtd.c 		page = page_read(mapping, index);
page              152 drivers/mtd/devices/block2mtd.c 		if (IS_ERR(page))
page              153 drivers/mtd/devices/block2mtd.c 			return PTR_ERR(page);
page              155 drivers/mtd/devices/block2mtd.c 		if (memcmp(page_address(page)+offset, buf, cpylen)) {
page              156 drivers/mtd/devices/block2mtd.c 			lock_page(page);
page              157 drivers/mtd/devices/block2mtd.c 			memcpy(page_address(page) + offset, buf, cpylen);
page              158 drivers/mtd/devices/block2mtd.c 			set_page_dirty(page);
page              159 drivers/mtd/devices/block2mtd.c 			unlock_page(page);
page              162 drivers/mtd/devices/block2mtd.c 		put_page(page);
page              456 drivers/mtd/devices/docg3.c static int doc_read_seek(struct docg3 *docg3, int block0, int block1, int page,
page              462 drivers/mtd/devices/docg3.c 		block0, block1, page, ofs, wear);
page              481 drivers/mtd/devices/docg3.c 	sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
page              485 drivers/mtd/devices/docg3.c 	sector = (block1 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
page              505 drivers/mtd/devices/docg3.c static int doc_write_seek(struct docg3 *docg3, int block0, int block1, int page,
page              511 drivers/mtd/devices/docg3.c 		block0, block1, page, ofs);
page              528 drivers/mtd/devices/docg3.c 	sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
page              538 drivers/mtd/devices/docg3.c 	sector = (block1 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
page              696 drivers/mtd/devices/docg3.c 				 int page, int offset)
page              701 drivers/mtd/devices/docg3.c 		block0, block1, page, offset);
page              713 drivers/mtd/devices/docg3.c 	ret = doc_read_seek(docg3, block0, block1, page, wear_area, offset);
page              832 drivers/mtd/devices/docg3.c static void calc_block_sector(loff_t from, int *block0, int *block1, int *page,
page              844 drivers/mtd/devices/docg3.c 	*page = sector % pages_biblock;
page              845 drivers/mtd/devices/docg3.c 	*page /= DOC_LAYOUT_NBPLANES;
page              847 drivers/mtd/devices/docg3.c 		*page *= 2;
page              869 drivers/mtd/devices/docg3.c 	int block0, block1, page, ret, skip, ofs = 0;
page              899 drivers/mtd/devices/docg3.c 		calc_block_sector(from - skip, &block0, &block1, &page, &ofs,
page              903 drivers/mtd/devices/docg3.c 		ret = doc_read_page_prepare(docg3, block0, block1, page, ofs);
page              979 drivers/mtd/devices/docg3.c 	int ret = 0, nbpages, page;
page              983 drivers/mtd/devices/docg3.c 	for (page = 0; !ret && (page < nbpages); page++) {
page              985 drivers/mtd/devices/docg3.c 					    page + DOC_LAYOUT_PAGE_BBT, 0);
page             1008 drivers/mtd/devices/docg3.c 	int block0, block1, page, ofs, is_good;
page             1010 drivers/mtd/devices/docg3.c 	calc_block_sector(from, &block0, &block1, &page, &ofs,
page             1013 drivers/mtd/devices/docg3.c 		from, block0, block1, page, ofs);
page             1039 drivers/mtd/devices/docg3.c 	int block0, block1, page, ofs;
page             1044 drivers/mtd/devices/docg3.c 	calc_block_sector(from, &block0, &block1, &page, &ofs, docg3->reliable);
page             1050 drivers/mtd/devices/docg3.c 		ret = doc_read_page_prepare(docg3, block0, block1, page,
page             1180 drivers/mtd/devices/docg3.c 	int block0, block1, page, ret = 0, ofs = 0;
page             1184 drivers/mtd/devices/docg3.c 	calc_block_sector(info->addr + info->len, &block0, &block1, &page,
page             1186 drivers/mtd/devices/docg3.c 	if (info->addr + info->len > mtd->size || page || ofs)
page             1189 drivers/mtd/devices/docg3.c 	calc_block_sector(info->addr, &block0, &block1, &page, &ofs,
page             1226 drivers/mtd/devices/docg3.c 	int block0, block1, page, ret, ofs = 0;
page             1230 drivers/mtd/devices/docg3.c 	calc_block_sector(to, &block0, &block1, &page, &ofs, docg3->reliable);
page             1238 drivers/mtd/devices/docg3.c 	ret = doc_write_seek(docg3, block0, block1, page, ofs);
page               93 drivers/mtd/maps/sbc_gxx.c 	unsigned long page = ofs >> WINDOW_SHIFT;
page               95 drivers/mtd/maps/sbc_gxx.c 	if( page!=page_in_window ) {
page               96 drivers/mtd/maps/sbc_gxx.c 		outw( page | DEVICE_ENABLE, PAGE_IO );
page               97 drivers/mtd/maps/sbc_gxx.c 		page_in_window = page;
page               58 drivers/mtd/mtdoops.c static void mark_page_used(struct mtdoops_context *cxt, int page)
page               60 drivers/mtd/mtdoops.c 	set_bit(page, cxt->oops_page_used);
page               63 drivers/mtd/mtdoops.c static void mark_page_unused(struct mtdoops_context *cxt, int page)
page               65 drivers/mtd/mtdoops.c 	clear_bit(page, cxt->oops_page_used);
page               68 drivers/mtd/mtdoops.c static int page_is_used(struct mtdoops_context *cxt, int page)
page               70 drivers/mtd/mtdoops.c 	return test_bit(page, cxt->oops_page_used);
page               81 drivers/mtd/mtdoops.c 	int page;
page               95 drivers/mtd/mtdoops.c 	for (page = start_page; page < start_page + erase_pages; page++)
page               96 drivers/mtd/mtdoops.c 		mark_page_unused(cxt, page);
page              219 drivers/mtd/mtdoops.c 	int ret, page, maxpos = 0;
page              223 drivers/mtd/mtdoops.c 	for (page = 0; page < cxt->oops_pages; page++) {
page              224 drivers/mtd/mtdoops.c 		if (mtd_block_isbad(mtd, page * record_size))
page              227 drivers/mtd/mtdoops.c 		mark_page_used(cxt, page);
page              228 drivers/mtd/mtdoops.c 		ret = mtd_read(mtd, page * record_size, MTDOOPS_HEADER_SIZE,
page              233 drivers/mtd/mtdoops.c 			       page * record_size, retlen,
page              239 drivers/mtd/mtdoops.c 			mark_page_unused(cxt, page);
page              244 drivers/mtd/mtdoops.c 			maxpos = page;
page              247 drivers/mtd/mtdoops.c 			maxpos = page;
page              250 drivers/mtd/mtdoops.c 			maxpos = page;
page              254 drivers/mtd/mtdoops.c 			maxpos = page;
page              562 drivers/mtd/mtdswap.c static int mtdswap_map_free_block(struct mtdswap_dev *d, unsigned int page,
page              597 drivers/mtd/mtdswap.c 	d->revmap[*block] = page;
page              615 drivers/mtd/mtdswap.c 			unsigned int page, unsigned int *bp, int gc_context)
page              629 drivers/mtd/mtdswap.c 	ret = mtdswap_map_free_block(d, page, bp);
page              682 drivers/mtd/mtdswap.c 	unsigned int page, retries;
page              685 drivers/mtd/mtdswap.c 	page = d->revmap[oldblock];
page              712 drivers/mtd/mtdswap.c 	ret = mtdswap_write_block(d, d->page_buf, page, newblock, 1);
page              714 drivers/mtd/mtdswap.c 		d->page_data[page] = BLOCK_ERROR;
page              720 drivers/mtd/mtdswap.c 	d->page_data[page] = *newblock;
page              728 drivers/mtd/mtdswap.c 	d->page_data[page] = BLOCK_ERROR;
page             1021 drivers/mtd/mtdswap.c 			unsigned long page, char *buf)
page             1035 drivers/mtd/mtdswap.c 		if (unlikely(page == 0))
page             1038 drivers/mtd/mtdswap.c 		page--;
page             1041 drivers/mtd/mtdswap.c 	mapped = d->page_data[page];
page             1046 drivers/mtd/mtdswap.c 		d->page_data[page] = BLOCK_UNDEF;
page             1050 drivers/mtd/mtdswap.c 	ret = mtdswap_write_block(d, buf, page, &newblock, 0);
page             1057 drivers/mtd/mtdswap.c 	d->page_data[page] = newblock;
page             1079 drivers/mtd/mtdswap.c 			unsigned long page, char *buf)
page             1092 drivers/mtd/mtdswap.c 		if (unlikely(page == 0))
page             1095 drivers/mtd/mtdswap.c 		page--;
page             1098 drivers/mtd/mtdswap.c 	realblock = d->page_data[page];
page             1146 drivers/mtd/mtdswap.c 	unsigned long page;
page             1152 drivers/mtd/mtdswap.c 	for (page = first; page < first + nr_pages; page++) {
page             1153 drivers/mtd/mtdswap.c 		mapped = d->page_data[page];
page             1158 drivers/mtd/mtdswap.c 			d->page_data[page] = BLOCK_UNDEF;
page             1162 drivers/mtd/mtdswap.c 			d->page_data[page] = BLOCK_UNDEF;
page              258 drivers/mtd/nand/onenand/onenand_base.c static int onenand_page_address(int page, int sector)
page              263 drivers/mtd/nand/onenand/onenand_base.c 	fpa = page & ONENAND_FPA_MASK;
page              401 drivers/mtd/nand/onenand/onenand_base.c 	int value, block, page;
page              410 drivers/mtd/nand/onenand/onenand_base.c 		page = -1;
page              416 drivers/mtd/nand/onenand/onenand_base.c 		page = -1;
page              425 drivers/mtd/nand/onenand/onenand_base.c 		page = -1;
page              431 drivers/mtd/nand/onenand/onenand_base.c 		page = 0;
page              437 drivers/mtd/nand/onenand/onenand_base.c 			page = (int) (addr - onenand_addr(this, block))>>\
page              440 drivers/mtd/nand/onenand/onenand_base.c 			page = (int) (addr >> this->page_shift);
page              447 drivers/mtd/nand/onenand/onenand_base.c 			page >>= 1;
page              449 drivers/mtd/nand/onenand/onenand_base.c 		page &= this->page_mask;
page              479 drivers/mtd/nand/onenand/onenand_base.c 	if (page != -1) {
page              503 drivers/mtd/nand/onenand/onenand_base.c 		value = onenand_page_address(page, sectors);
page              876 drivers/mtd/nand/onenand/onenand_base.c 	int blockpage, block, page;
page              883 drivers/mtd/nand/onenand/onenand_base.c 	page = (int) (addr >> (this->page_shift + 1)) & this->page_mask;
page              884 drivers/mtd/nand/onenand/onenand_base.c 	blockpage = (block << 7) | page;
page             2700 drivers/mtd/nand/onenand/onenand_base.c 	int value, block, page;
page             2706 drivers/mtd/nand/onenand/onenand_base.c 		page = -1;
page             2711 drivers/mtd/nand/onenand/onenand_base.c 		page = (int) (addr >> this->page_shift);
page             2719 drivers/mtd/nand/onenand/onenand_base.c 			page >>= 1;
page             2721 drivers/mtd/nand/onenand/onenand_base.c 		page &= this->page_mask;
page             2732 drivers/mtd/nand/onenand/onenand_base.c 	if (page != -1) {
page             2746 drivers/mtd/nand/onenand/onenand_base.c 		value = onenand_page_address(page, sectors);
page              639 drivers/mtd/nand/onenand/samsung.c 		struct page *page;
page              644 drivers/mtd/nand/onenand/samsung.c 		page = vmalloc_to_page(buf);
page              645 drivers/mtd/nand/onenand/samsung.c 		if (!page)
page              654 drivers/mtd/nand/onenand/samsung.c 		dma_dst = dma_map_page(dev, page, ofs, count, DMA_FROM_DEVICE);
page              691 drivers/mtd/nand/raw/atmel/nand-controller.c static void atmel_nfc_set_op_addr(struct nand_chip *chip, int page, int column)
page              708 drivers/mtd/nand/raw/atmel/nand-controller.c 	if (page >= 0) {
page              709 drivers/mtd/nand/raw/atmel/nand-controller.c 		nc->op.addrs[nc->op.naddrs++] = page;
page              710 drivers/mtd/nand/raw/atmel/nand-controller.c 		nc->op.addrs[nc->op.naddrs++] = page >> 8;
page              713 drivers/mtd/nand/raw/atmel/nand-controller.c 			nc->op.addrs[nc->op.naddrs++] = page >> 16;
page              829 drivers/mtd/nand/raw/atmel/nand-controller.c 				     bool oob_required, int page, bool raw)
page              835 drivers/mtd/nand/raw/atmel/nand-controller.c 	nand_prog_page_begin_op(chip, page, 0, NULL, 0);
page              857 drivers/mtd/nand/raw/atmel/nand-controller.c 				       int oob_required, int page)
page              859 drivers/mtd/nand/raw/atmel/nand-controller.c 	return atmel_nand_pmecc_write_pg(chip, buf, oob_required, page, false);
page              864 drivers/mtd/nand/raw/atmel/nand-controller.c 					   int page)
page              866 drivers/mtd/nand/raw/atmel/nand-controller.c 	return atmel_nand_pmecc_write_pg(chip, buf, oob_required, page, true);
page              870 drivers/mtd/nand/raw/atmel/nand-controller.c 				    bool oob_required, int page, bool raw)
page              875 drivers/mtd/nand/raw/atmel/nand-controller.c 	nand_read_page_op(chip, page, 0, NULL, 0);
page              892 drivers/mtd/nand/raw/atmel/nand-controller.c 				      int oob_required, int page)
page              894 drivers/mtd/nand/raw/atmel/nand-controller.c 	return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page, false);
page              898 drivers/mtd/nand/raw/atmel/nand-controller.c 					  int oob_required, int page)
page              900 drivers/mtd/nand/raw/atmel/nand-controller.c 	return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page, true);
page              905 drivers/mtd/nand/raw/atmel/nand-controller.c 					  int page, bool raw)
page              918 drivers/mtd/nand/raw/atmel/nand-controller.c 	atmel_nfc_set_op_addr(chip, page, 0x0);
page              961 drivers/mtd/nand/raw/atmel/nand-controller.c 					    int page)
page              963 drivers/mtd/nand/raw/atmel/nand-controller.c 	return atmel_hsmc_nand_pmecc_write_pg(chip, buf, oob_required, page,
page              969 drivers/mtd/nand/raw/atmel/nand-controller.c 						int oob_required, int page)
page              971 drivers/mtd/nand/raw/atmel/nand-controller.c 	return atmel_hsmc_nand_pmecc_write_pg(chip, buf, oob_required, page,
page              976 drivers/mtd/nand/raw/atmel/nand-controller.c 					 bool oob_required, int page,
page              992 drivers/mtd/nand/raw/atmel/nand-controller.c 		nand_read_page_op(chip, page, 0, NULL, 0);
page              994 drivers/mtd/nand/raw/atmel/nand-controller.c 		return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page,
page             1003 drivers/mtd/nand/raw/atmel/nand-controller.c 	atmel_nfc_set_op_addr(chip, page, 0x0);
page             1030 drivers/mtd/nand/raw/atmel/nand-controller.c 					   int oob_required, int page)
page             1032 drivers/mtd/nand/raw/atmel/nand-controller.c 	return atmel_hsmc_nand_pmecc_read_pg(chip, buf, oob_required, page,
page             1038 drivers/mtd/nand/raw/atmel/nand-controller.c 					       int page)
page             1040 drivers/mtd/nand/raw/atmel/nand-controller.c 	return atmel_hsmc_nand_pmecc_read_pg(chip, buf, oob_required, page,
page             1793 drivers/mtd/nand/raw/brcmnand/brcmnand.c 	int page = addr >> chip->page_shift;
page             1803 drivers/mtd/nand/raw/brcmnand/brcmnand.c 	ret = chip->ecc.read_page_raw(chip, buf, true, page);
page             1903 drivers/mtd/nand/raw/brcmnand/brcmnand.c 			      int oob_required, int page)
page             1909 drivers/mtd/nand/raw/brcmnand/brcmnand.c 	nand_read_page_op(chip, page, 0, NULL, 0);
page             1916 drivers/mtd/nand/raw/brcmnand/brcmnand.c 				  int oob_required, int page)
page             1923 drivers/mtd/nand/raw/brcmnand/brcmnand.c 	nand_read_page_op(chip, page, 0, NULL, 0);
page             1932 drivers/mtd/nand/raw/brcmnand/brcmnand.c static int brcmnand_read_oob(struct nand_chip *chip, int page)
page             1936 drivers/mtd/nand/raw/brcmnand/brcmnand.c 	return brcmnand_read(mtd, chip, (u64)page << chip->page_shift,
page             1941 drivers/mtd/nand/raw/brcmnand/brcmnand.c static int brcmnand_read_oob_raw(struct nand_chip *chip, int page)
page             1947 drivers/mtd/nand/raw/brcmnand/brcmnand.c 	brcmnand_read(mtd, chip, (u64)page << chip->page_shift,
page             2020 drivers/mtd/nand/raw/brcmnand/brcmnand.c 			       int oob_required, int page)
page             2026 drivers/mtd/nand/raw/brcmnand/brcmnand.c 	nand_prog_page_begin_op(chip, page, 0, NULL, 0);
page             2033 drivers/mtd/nand/raw/brcmnand/brcmnand.c 				   int oob_required, int page)
page             2039 drivers/mtd/nand/raw/brcmnand/brcmnand.c 	nand_prog_page_begin_op(chip, page, 0, NULL, 0);
page             2047 drivers/mtd/nand/raw/brcmnand/brcmnand.c static int brcmnand_write_oob(struct nand_chip *chip, int page)
page             2050 drivers/mtd/nand/raw/brcmnand/brcmnand.c 			      (u64)page << chip->page_shift, NULL,
page             2054 drivers/mtd/nand/raw/brcmnand/brcmnand.c static int brcmnand_write_oob_raw(struct nand_chip *chip, int page)
page             2061 drivers/mtd/nand/raw/brcmnand/brcmnand.c 	ret = brcmnand_write(mtd, chip, (u64)page << chip->page_shift, NULL,
page              345 drivers/mtd/nand/raw/cafe_nand.c static int cafe_nand_write_oob(struct nand_chip *chip, int page)
page              349 drivers/mtd/nand/raw/cafe_nand.c 	return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
page              354 drivers/mtd/nand/raw/cafe_nand.c static int cafe_nand_read_oob(struct nand_chip *chip, int page)
page              358 drivers/mtd/nand/raw/cafe_nand.c 	return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
page              371 drivers/mtd/nand/raw/cafe_nand.c 			       int oob_required, int page)
page              381 drivers/mtd/nand/raw/cafe_nand.c 	nand_read_page_op(chip, page, 0, buf, mtd->writesize);
page              535 drivers/mtd/nand/raw/cafe_nand.c 					 int page)
page              540 drivers/mtd/nand/raw/cafe_nand.c 	nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
page              331 drivers/mtd/nand/raw/denali.c 			   int page)
page              338 drivers/mtd/nand/raw/denali.c 	ret = nand_read_page_op(chip, page, 0, NULL, 0);
page              358 drivers/mtd/nand/raw/denali.c 			    const void *oob_buf, int page)
page              365 drivers/mtd/nand/raw/denali.c 	ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
page              385 drivers/mtd/nand/raw/denali.c 				int oob_required, int page)
page              388 drivers/mtd/nand/raw/denali.c 			       page);
page              392 drivers/mtd/nand/raw/denali.c 				 int oob_required, int page)
page              395 drivers/mtd/nand/raw/denali.c 				page);
page              398 drivers/mtd/nand/raw/denali.c static int denali_read_oob(struct nand_chip *chip, int page)
page              400 drivers/mtd/nand/raw/denali.c 	return denali_read_raw(chip, NULL, chip->oob_poi, page);
page              403 drivers/mtd/nand/raw/denali.c static int denali_write_oob(struct nand_chip *chip, int page)
page              405 drivers/mtd/nand/raw/denali.c 	return denali_write_raw(chip, NULL, chip->oob_poi, page);
page              549 drivers/mtd/nand/raw/denali.c 			       dma_addr_t dma_addr, int page, bool write)
page              554 drivers/mtd/nand/raw/denali.c 	mode = DENALI_MAP10 | DENALI_BANK(denali) | page;
page              574 drivers/mtd/nand/raw/denali.c 			       dma_addr_t dma_addr, int page, bool write)
page              584 drivers/mtd/nand/raw/denali.c 	denali->host_write(denali, mode | page,
page              598 drivers/mtd/nand/raw/denali.c 			   size_t size, int page)
page              600 drivers/mtd/nand/raw/denali.c 	u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
page              625 drivers/mtd/nand/raw/denali.c 			    size_t size, int page)
page              627 drivers/mtd/nand/raw/denali.c 	u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
page              646 drivers/mtd/nand/raw/denali.c 			   size_t size, int page, bool write)
page              649 drivers/mtd/nand/raw/denali.c 		return denali_pio_write(denali, buf, size, page);
page              651 drivers/mtd/nand/raw/denali.c 		return denali_pio_read(denali, buf, size, page);
page              655 drivers/mtd/nand/raw/denali.c 			   size_t size, int page, bool write)
page              665 drivers/mtd/nand/raw/denali.c 		return denali_pio_xfer(denali, buf, size, page, write);
page              693 drivers/mtd/nand/raw/denali.c 	denali->setup_dma(denali, dma_addr, page, write);
page              712 drivers/mtd/nand/raw/denali.c 			    int page, bool write)
page              719 drivers/mtd/nand/raw/denali.c 		return denali_dma_xfer(denali, buf, size, page, write);
page              721 drivers/mtd/nand/raw/denali.c 		return denali_pio_xfer(denali, buf, size, page, write);
page              725 drivers/mtd/nand/raw/denali.c 			    int oob_required, int page)
page              733 drivers/mtd/nand/raw/denali.c 	ret = denali_page_xfer(chip, buf, mtd->writesize, page, false);
page              746 drivers/mtd/nand/raw/denali.c 		ret = denali_read_oob(chip, page);
page              758 drivers/mtd/nand/raw/denali.c 			     int oob_required, int page)
page              762 drivers/mtd/nand/raw/denali.c 	return denali_page_xfer(chip, (void *)buf, mtd->writesize, page, true);
page              386 drivers/mtd/nand/raw/denali.h 			  int page, bool write);
page               55 drivers/mtd/nand/raw/fsl_elbc_nand.c 	unsigned int page;       /* Last page written to / read from      */
page              161 drivers/mtd/nand/raw/fsl_elbc_nand.c 	elbc_fcm_ctrl->page = page_addr;
page              635 drivers/mtd/nand/raw/fsl_elbc_nand.c 			      int oob_required, int page)
page              642 drivers/mtd/nand/raw/fsl_elbc_nand.c 	nand_read_page_op(chip, page, 0, buf, mtd->writesize);
page              656 drivers/mtd/nand/raw/fsl_elbc_nand.c 			       int oob_required, int page)
page              660 drivers/mtd/nand/raw/fsl_elbc_nand.c 	nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
page              671 drivers/mtd/nand/raw/fsl_elbc_nand.c 				  int oob_required, int page)
page              675 drivers/mtd/nand/raw/fsl_elbc_nand.c 	nand_prog_page_begin_op(chip, page, 0, NULL, 0);
page               46 drivers/mtd/nand/raw/fsl_ifc_nand.c 	unsigned int page;	/* Last page written to / read from	*/
page              147 drivers/mtd/nand/raw/fsl_ifc_nand.c 	ifc_nand_ctrl->page = page_addr;
page              213 drivers/mtd/nand/raw/fsl_ifc_nand.c 		int bufnum = nctrl->page & priv->bufnum_mask;
page              668 drivers/mtd/nand/raw/fsl_ifc_nand.c 			     int oob_required, int page)
page              675 drivers/mtd/nand/raw/fsl_ifc_nand.c 	nand_read_page_op(chip, page, 0, buf, mtd->writesize);
page              696 drivers/mtd/nand/raw/fsl_ifc_nand.c 			      int oob_required, int page)
page              700 drivers/mtd/nand/raw/fsl_ifc_nand.c 	nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
page              672 drivers/mtd/nand/raw/fsmc_nand.c 				int oob_required, int page)
page              692 drivers/mtd/nand/raw/fsmc_nand.c 		nand_read_page_op(chip, page, s * eccsize, NULL, 0);
page              716 drivers/mtd/nand/raw/fsmc_nand.c 			nand_read_oob_op(chip, page, off, oob + j, len);
page             1453 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 			      int oob_required, int page)
page             1464 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 	ret = nand_read_page_op(chip, page, 0, buf, geo->page_size);
page             1495 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 				 uint32_t len, uint8_t *buf, int page)
page             1527 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 				page, first, last, marker_pos);
page             1528 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 			return gpmi_ecc_read_page(chip, buf, 0, page);
page             1558 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 	ret = nand_read_page_op(chip, page, col, buf, page_size);
page             1563 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 		page, offs, len, col, first, n, page_size);
page             1571 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 			       int oob_required, int page)
page             1596 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 	ret = nand_prog_page_op(chip, page, 0, buf, nfc_geo->page_size);
page             1661 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c static int gpmi_ecc_read_oob(struct nand_chip *chip, int page)
page             1671 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 	ret = nand_read_page_op(chip, page, mtd->writesize, chip->oob_poi,
page             1683 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 		ret = nand_read_page_op(chip, page, 0, chip->oob_poi, 1);
page             1691 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c static int gpmi_ecc_write_oob(struct nand_chip *chip, int page)
page             1704 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 	return nand_prog_page_op(chip, page, mtd->writesize + of.offset,
page             1721 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 				  int oob_required, int page)
page             1736 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 	ret = nand_read_page_op(chip, page, 0, tmp_buf,
page             1808 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 				   int oob_required, int page)
page             1874 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 	return nand_prog_page_op(chip, page, 0, tmp_buf,
page             1878 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c static int gpmi_ecc_read_oob_raw(struct nand_chip *chip, int page)
page             1880 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 	return gpmi_ecc_read_page_raw(chip, NULL, 1, page);
page             1883 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c static int gpmi_ecc_write_oob_raw(struct nand_chip *chip, int page)
page             1885 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 	return gpmi_ecc_write_page_raw(chip, NULL, 1, page);
page             1894 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 	int column, page, chipnr;
page             1906 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 	page = (int)(ofs >> chip->page_shift);
page             1908 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 	ret = nand_prog_page_op(chip, page, column, block_mark, 1);
page             1949 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 	unsigned int page;
page             1966 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 		page = stride * rom_geo->stride_size_in_pages;
page             1968 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 		dev_dbg(dev, "Looking for a fingerprint in page 0x%x\n", page);
page             1974 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 		ret = nand_read_page_op(chip, page, 12, buffer,
page             2009 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 	unsigned int page;
page             2048 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 		page = stride * rom_geo->stride_size_in_pages;
page             2051 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 		dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
page             2053 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 		status = chip->ecc.write_page_raw(chip, buffer, 0, page);
page             2071 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 	int     page;
page             2104 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 		page = block << (chip->phys_erase_shift - chip->page_shift);
page             2109 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 		ret = nand_read_page_op(chip, page, mtd->writesize, &block_mark,
page              519 drivers/mtd/nand/raw/hisi504_nand.c 				     int oob_required, int page)
page              526 drivers/mtd/nand/raw/hisi504_nand.c 	nand_read_page_op(chip, page, 0, buf, mtd->writesize);
page              551 drivers/mtd/nand/raw/hisi504_nand.c static int hisi_nand_read_oob(struct nand_chip *chip, int page)
page              556 drivers/mtd/nand/raw/hisi504_nand.c 	nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
page              569 drivers/mtd/nand/raw/hisi504_nand.c 				      int page)
page              573 drivers/mtd/nand/raw/hisi504_nand.c 	nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
page               79 drivers/mtd/nand/raw/internals.h int nand_bbm_get_next_page(struct nand_chip *chip, int page);
page               89 drivers/mtd/nand/raw/internals.h 			       int oob_required, int page);
page               91 drivers/mtd/nand/raw/internals.h 				int oob_required, int page);
page               93 drivers/mtd/nand/raw/internals.h int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
page              435 drivers/mtd/nand/raw/lpc32xx_mlc.c 			     int oob_required, int page)
page              455 drivers/mtd/nand/raw/lpc32xx_mlc.c 	nand_read_page_op(chip, page, 0, NULL, 0);
page              502 drivers/mtd/nand/raw/lpc32xx_mlc.c 				       int page)
page              516 drivers/mtd/nand/raw/lpc32xx_mlc.c 	nand_prog_page_begin_op(chip, page, 0, NULL, 0);
page              550 drivers/mtd/nand/raw/lpc32xx_mlc.c static int lpc32xx_read_oob(struct nand_chip *chip, int page)
page              555 drivers/mtd/nand/raw/lpc32xx_mlc.c 	lpc32xx_read_page(chip, host->dummy_buf, 1, page);
page              560 drivers/mtd/nand/raw/lpc32xx_mlc.c static int lpc32xx_write_oob(struct nand_chip *chip, int page)
page              386 drivers/mtd/nand/raw/lpc32xx_slc.c static int lpc32xx_nand_read_oob_syndrome(struct nand_chip *chip, int page)
page              390 drivers/mtd/nand/raw/lpc32xx_slc.c 	return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
page              396 drivers/mtd/nand/raw/lpc32xx_slc.c static int lpc32xx_nand_write_oob_syndrome(struct nand_chip *chip, int page)
page              400 drivers/mtd/nand/raw/lpc32xx_slc.c 	return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
page              603 drivers/mtd/nand/raw/lpc32xx_slc.c 					   int oob_required, int page)
page              612 drivers/mtd/nand/raw/lpc32xx_slc.c 	nand_read_page_op(chip, page, 0, NULL, 0);
page              651 drivers/mtd/nand/raw/lpc32xx_slc.c 					       int page)
page              656 drivers/mtd/nand/raw/lpc32xx_slc.c 	nand_read_page_op(chip, page, 0, NULL, 0);
page              671 drivers/mtd/nand/raw/lpc32xx_slc.c 					    int oob_required, int page)
page              679 drivers/mtd/nand/raw/lpc32xx_slc.c 	nand_prog_page_begin_op(chip, page, 0, NULL, 0);
page              709 drivers/mtd/nand/raw/lpc32xx_slc.c 						int oob_required, int page)
page              714 drivers/mtd/nand/raw/lpc32xx_slc.c 	nand_prog_page_begin_op(chip, page, 0, buf,
page              985 drivers/mtd/nand/raw/marvell_nand.c 					       bool raw, int page)
page              996 drivers/mtd/nand/raw/marvell_nand.c 		.ndcb[1] = NDCB1_ADDRS_PAGE(page),
page              997 drivers/mtd/nand/raw/marvell_nand.c 		.ndcb[2] = NDCB2_ADDR5_PAGE(page),
page             1037 drivers/mtd/nand/raw/marvell_nand.c 						int oob_required, int page)
page             1041 drivers/mtd/nand/raw/marvell_nand.c 						   true, page);
page             1045 drivers/mtd/nand/raw/marvell_nand.c 					    int oob_required, int page)
page             1055 drivers/mtd/nand/raw/marvell_nand.c 					    page);
page             1071 drivers/mtd/nand/raw/marvell_nand.c 					    lt->data_bytes, true, page);
page             1084 drivers/mtd/nand/raw/marvell_nand.c static int marvell_nfc_hw_ecc_hmg_read_oob_raw(struct nand_chip *chip, int page)
page             1090 drivers/mtd/nand/raw/marvell_nand.c 						   true, page);
page             1097 drivers/mtd/nand/raw/marvell_nand.c 						int page)
page             1108 drivers/mtd/nand/raw/marvell_nand.c 		.ndcb[1] = NDCB1_ADDRS_PAGE(page),
page             1109 drivers/mtd/nand/raw/marvell_nand.c 		.ndcb[2] = NDCB2_ADDR5_PAGE(page),
page             1150 drivers/mtd/nand/raw/marvell_nand.c 						 int oob_required, int page)
page             1154 drivers/mtd/nand/raw/marvell_nand.c 						    true, page);
page             1159 drivers/mtd/nand/raw/marvell_nand.c 					     int oob_required, int page)
page             1166 drivers/mtd/nand/raw/marvell_nand.c 						   false, page);
page             1178 drivers/mtd/nand/raw/marvell_nand.c 						int page)
page             1187 drivers/mtd/nand/raw/marvell_nand.c 						    true, page);
page             1192 drivers/mtd/nand/raw/marvell_nand.c 						int oob_required, int page)
page             1210 drivers/mtd/nand/raw/marvell_nand.c 	nand_read_page_op(chip, page, 0, NULL, 0);
page             1241 drivers/mtd/nand/raw/marvell_nand.c 					      int page)
page             1251 drivers/mtd/nand/raw/marvell_nand.c 		.ndcb[1] = NDCB1_ADDRS_PAGE(page),
page             1252 drivers/mtd/nand/raw/marvell_nand.c 		.ndcb[2] = NDCB2_ADDR5_PAGE(page),
page             1307 drivers/mtd/nand/raw/marvell_nand.c 					    int page)
page             1338 drivers/mtd/nand/raw/marvell_nand.c 						  spare, spare_len, page);
page             1432 drivers/mtd/nand/raw/marvell_nand.c static int marvell_nfc_hw_ecc_bch_read_oob_raw(struct nand_chip *chip, int page)
page             1436 drivers/mtd/nand/raw/marvell_nand.c 	return chip->ecc.read_page_raw(chip, buf, true, page);
page             1439 drivers/mtd/nand/raw/marvell_nand.c static int marvell_nfc_hw_ecc_bch_read_oob(struct nand_chip *chip, int page)
page             1443 drivers/mtd/nand/raw/marvell_nand.c 	return chip->ecc.read_page(chip, buf, true, page);
page             1449 drivers/mtd/nand/raw/marvell_nand.c 						 int oob_required, int page)
page             1463 drivers/mtd/nand/raw/marvell_nand.c 	nand_prog_page_begin_op(chip, page, 0, NULL, 0);
page             1504 drivers/mtd/nand/raw/marvell_nand.c 				   int page)
page             1533 drivers/mtd/nand/raw/marvell_nand.c 		nfc_op.ndcb[1] |= NDCB1_ADDRS_PAGE(page);
page             1534 drivers/mtd/nand/raw/marvell_nand.c 		nfc_op.ndcb[2] |= NDCB2_ADDR5_PAGE(page);
page             1564 drivers/mtd/nand/raw/marvell_nand.c 					     int oob_required, int page)
page             1589 drivers/mtd/nand/raw/marvell_nand.c 						   spare, spare_len, page);
page             1614 drivers/mtd/nand/raw/marvell_nand.c 						int page)
page             1621 drivers/mtd/nand/raw/marvell_nand.c 	return chip->ecc.write_page_raw(chip, buf, true, page);
page             1624 drivers/mtd/nand/raw/marvell_nand.c static int marvell_nfc_hw_ecc_bch_write_oob(struct nand_chip *chip, int page)
page             1631 drivers/mtd/nand/raw/marvell_nand.c 	return chip->ecc.write_page(chip, buf, true, page);
page               94 drivers/mtd/nand/raw/meson_nand.c #define ROW_ADDER(page, index)	(((page) >> (8 * (index))) & 0xff)
page              574 drivers/mtd/nand/raw/meson_nand.c 						int page, bool in)
page              599 drivers/mtd/nand/raw/meson_nand.c 	addrs[row_start] = cs | NFC_CMD_ALE | ROW_ADDER(page, 0);
page              600 drivers/mtd/nand/raw/meson_nand.c 	addrs[row_start + 1] = cs | NFC_CMD_ALE | ROW_ADDER(page, 1);
page              604 drivers/mtd/nand/raw/meson_nand.c 			cs | NFC_CMD_ALE | ROW_ADDER(page, 2);
page              627 drivers/mtd/nand/raw/meson_nand.c 				    int page, int raw)
page              643 drivers/mtd/nand/raw/meson_nand.c 	ret = meson_nfc_rw_cmd_prepare_and_execute(nand, page, DIRWRITE);
page              654 drivers/mtd/nand/raw/meson_nand.c 		meson_nfc_cmd_seed(nfc, page);
page              672 drivers/mtd/nand/raw/meson_nand.c 				    int oob_required, int page)
page              678 drivers/mtd/nand/raw/meson_nand.c 	return meson_nfc_write_page_sub(nand, page, 1);
page              682 drivers/mtd/nand/raw/meson_nand.c 				      const u8 *buf, int oob_required, int page)
page              692 drivers/mtd/nand/raw/meson_nand.c 	return meson_nfc_write_page_sub(nand, page, 0);
page              714 drivers/mtd/nand/raw/meson_nand.c 				   int page, int raw)
page              727 drivers/mtd/nand/raw/meson_nand.c 	ret = meson_nfc_rw_cmd_prepare_and_execute(nand, page, DIRREAD);
page              738 drivers/mtd/nand/raw/meson_nand.c 		meson_nfc_cmd_seed(nfc, page);
page              755 drivers/mtd/nand/raw/meson_nand.c 				   int oob_required, int page)
page              760 drivers/mtd/nand/raw/meson_nand.c 	ret = meson_nfc_read_page_sub(nand, page, 1);
page              770 drivers/mtd/nand/raw/meson_nand.c 				     int oob_required, int page)
page              780 drivers/mtd/nand/raw/meson_nand.c 	ret = meson_nfc_read_page_sub(nand, page, 0);
page              795 drivers/mtd/nand/raw/meson_nand.c 		ret  = meson_nfc_read_page_raw(nand, buf, 0, page);
page              823 drivers/mtd/nand/raw/meson_nand.c static int meson_nfc_read_oob_raw(struct nand_chip *nand, int page)
page              825 drivers/mtd/nand/raw/meson_nand.c 	return meson_nfc_read_page_raw(nand, NULL, 1, page);
page              828 drivers/mtd/nand/raw/meson_nand.c static int meson_nfc_read_oob(struct nand_chip *nand, int page)
page              830 drivers/mtd/nand/raw/meson_nand.c 	return meson_nfc_read_page_hwecc(nand, NULL, 1, page);
page              232 drivers/mtd/nand/raw/mpc5121_nfc.c static void mpc5121_nfc_addr_cycle(struct mtd_info *mtd, int column, int page)
page              243 drivers/mtd/nand/raw/mpc5121_nfc.c 	if (page != -1) {
page              245 drivers/mtd/nand/raw/mpc5121_nfc.c 			mpc5121_nfc_send_addr(mtd, page & 0xFF);
page              246 drivers/mtd/nand/raw/mpc5121_nfc.c 			page >>= 8;
page              321 drivers/mtd/nand/raw/mpc5121_nfc.c 				int column, int page)
page              354 drivers/mtd/nand/raw/mpc5121_nfc.c 		mpc5121_nfc_command(chip, NAND_CMD_READ0, column, page);
page              369 drivers/mtd/nand/raw/mpc5121_nfc.c 	mpc5121_nfc_addr_cycle(mtd, column, page);
page              746 drivers/mtd/nand/raw/mtk_nand.c 				 const u8 *buf, int page, int len)
page              797 drivers/mtd/nand/raw/mtk_nand.c 			      const u8 *buf, int page, int raw)
page              806 drivers/mtd/nand/raw/mtk_nand.c 	nand_prog_page_begin_op(chip, page, 0, NULL, 0);
page              836 drivers/mtd/nand/raw/mtk_nand.c 	ret = mtk_nfc_do_write_page(mtd, chip, bufpoi, page, len);
page              848 drivers/mtd/nand/raw/mtk_nand.c 				    int oob_on, int page)
page              850 drivers/mtd/nand/raw/mtk_nand.c 	return mtk_nfc_write_page(nand_to_mtd(chip), chip, buf, page, 0);
page              865 drivers/mtd/nand/raw/mtk_nand.c 				       int oob_on, int page)
page              876 drivers/mtd/nand/raw/mtk_nand.c 	return mtk_nfc_write_page(mtd, chip, nfc->buffer, page, 1);
page              879 drivers/mtd/nand/raw/mtk_nand.c static int mtk_nfc_write_oob_std(struct nand_chip *chip, int page)
page              881 drivers/mtd/nand/raw/mtk_nand.c 	return mtk_nfc_write_page_raw(chip, NULL, 1, page);
page              911 drivers/mtd/nand/raw/mtk_nand.c 				u8 *bufpoi, int page, int raw)
page              932 drivers/mtd/nand/raw/mtk_nand.c 	nand_read_page_op(chip, page, column, NULL, 0);
page             1023 drivers/mtd/nand/raw/mtk_nand.c 				 int page)
page             1033 drivers/mtd/nand/raw/mtk_nand.c 				   page, 1);
page             1051 drivers/mtd/nand/raw/mtk_nand.c static int mtk_nfc_read_oob_std(struct nand_chip *chip, int page)
page             1053 drivers/mtd/nand/raw/mtk_nand.c 	return mtk_nfc_read_page_raw(chip, NULL, 1, page);
page              129 drivers/mtd/nand/raw/mxc_nand.c 			 int page);
page              714 drivers/mtd/nand/raw/mxc_nand.c 				 bool ecc, int page)
page              725 drivers/mtd/nand/raw/mxc_nand.c 	mxc_do_addr_cycle(mtd, 0, page);
page              772 drivers/mtd/nand/raw/mxc_nand.c 				    void *oob, bool ecc, int page)
page              784 drivers/mtd/nand/raw/mxc_nand.c 	mxc_do_addr_cycle(mtd, 0, page);
page              820 drivers/mtd/nand/raw/mxc_nand.c 			      int oob_required, int page)
page              830 drivers/mtd/nand/raw/mxc_nand.c 	return host->devtype_data->read_page(chip, buf, oob_buf, 1, page);
page              834 drivers/mtd/nand/raw/mxc_nand.c 				  int oob_required, int page)
page              844 drivers/mtd/nand/raw/mxc_nand.c 	return host->devtype_data->read_page(chip, buf, oob_buf, 0, page);
page              847 drivers/mtd/nand/raw/mxc_nand.c static int mxc_nand_read_oob(struct nand_chip *chip, int page)
page              852 drivers/mtd/nand/raw/mxc_nand.c 					     page);
page              856 drivers/mtd/nand/raw/mxc_nand.c 			       bool ecc, int page)
page              864 drivers/mtd/nand/raw/mxc_nand.c 	mxc_do_addr_cycle(mtd, 0, page);
page              871 drivers/mtd/nand/raw/mxc_nand.c 	mxc_do_addr_cycle(mtd, 0, page);
page              877 drivers/mtd/nand/raw/mxc_nand.c 				   int oob_required, int page)
page              879 drivers/mtd/nand/raw/mxc_nand.c 	return mxc_nand_write_page(chip, buf, true, page);
page              883 drivers/mtd/nand/raw/mxc_nand.c 				   int oob_required, int page)
page              885 drivers/mtd/nand/raw/mxc_nand.c 	return mxc_nand_write_page(chip, buf, false, page);
page              888 drivers/mtd/nand/raw/mxc_nand.c static int mxc_nand_write_oob(struct nand_chip *chip, int page)
page              895 drivers/mtd/nand/raw/mxc_nand.c 	return mxc_nand_write_page(chip, host->data_buf, false, page);
page              290 drivers/mtd/nand/raw/nand_base.c int nand_bbm_get_next_page(struct nand_chip *chip, int page)
page              298 drivers/mtd/nand/raw/nand_base.c 	if (page == 0 && !(chip->options & bbm_flags))
page              300 drivers/mtd/nand/raw/nand_base.c 	if (page == 0 && chip->options & NAND_BBM_FIRSTPAGE)
page              302 drivers/mtd/nand/raw/nand_base.c 	if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE)
page              304 drivers/mtd/nand/raw/nand_base.c 	if (page <= last_page && chip->options & NAND_BBM_LASTPAGE)
page              448 drivers/mtd/nand/raw/nand_base.c 	int chipnr, page, status, len, ret;
page              477 drivers/mtd/nand/raw/nand_base.c 	page = (int)(to >> chip->page_shift);
page              486 drivers/mtd/nand/raw/nand_base.c 	if (page == chip->pagecache.page)
page              487 drivers/mtd/nand/raw/nand_base.c 		chip->pagecache.page = -1;
page              492 drivers/mtd/nand/raw/nand_base.c 		status = chip->ecc.write_oob_raw(chip, page & chip->pagemask);
page              494 drivers/mtd/nand/raw/nand_base.c 		status = chip->ecc.write_oob(chip, page & chip->pagemask);
page             1012 drivers/mtd/nand/raw/nand_base.c static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
page             1044 drivers/mtd/nand/raw/nand_base.c 	addrs[1] = page;
page             1045 drivers/mtd/nand/raw/nand_base.c 	addrs[2] = page >> 8;
page             1048 drivers/mtd/nand/raw/nand_base.c 		addrs[3] = page >> 16;
page             1055 drivers/mtd/nand/raw/nand_base.c static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
page             1081 drivers/mtd/nand/raw/nand_base.c 	addrs[2] = page;
page             1082 drivers/mtd/nand/raw/nand_base.c 	addrs[3] = page >> 8;
page             1085 drivers/mtd/nand/raw/nand_base.c 		addrs[4] = page >> 16;
page             1105 drivers/mtd/nand/raw/nand_base.c int nand_read_page_op(struct nand_chip *chip, unsigned int page,
page             1118 drivers/mtd/nand/raw/nand_base.c 			return nand_lp_exec_read_page_op(chip, page,
page             1122 drivers/mtd/nand/raw/nand_base.c 		return nand_sp_exec_read_page_op(chip, page, offset_in_page,
page             1126 drivers/mtd/nand/raw/nand_base.c 	chip->legacy.cmdfunc(chip, NAND_CMD_READ0, offset_in_page, page);
page             1146 drivers/mtd/nand/raw/nand_base.c int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
page             1160 drivers/mtd/nand/raw/nand_base.c 			NAND_OP_ADDR(1, &page, PSEC_TO_NSEC(sdr->tWB_max)),
page             1174 drivers/mtd/nand/raw/nand_base.c 	chip->legacy.cmdfunc(chip, NAND_CMD_PARAM, page, -1);
page             1258 drivers/mtd/nand/raw/nand_base.c int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
page             1270 drivers/mtd/nand/raw/nand_base.c 		return nand_read_page_op(chip, page,
page             1274 drivers/mtd/nand/raw/nand_base.c 	chip->legacy.cmdfunc(chip, NAND_CMD_READOOB, offset_in_oob, page);
page             1282 drivers/mtd/nand/raw/nand_base.c static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
page             1311 drivers/mtd/nand/raw/nand_base.c 	addrs[naddrs++] = page;
page             1312 drivers/mtd/nand/raw/nand_base.c 	addrs[naddrs++] = page >> 8;
page             1314 drivers/mtd/nand/raw/nand_base.c 		addrs[naddrs++] = page >> 16;
page             1370 drivers/mtd/nand/raw/nand_base.c int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
page             1383 drivers/mtd/nand/raw/nand_base.c 		return nand_exec_prog_page_op(chip, page, offset_in_page, buf,
page             1386 drivers/mtd/nand/raw/nand_base.c 	chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page, page);
page             1455 drivers/mtd/nand/raw/nand_base.c int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
page             1469 drivers/mtd/nand/raw/nand_base.c 		status = nand_exec_prog_page_op(chip, page, offset_in_page, buf,
page             1473 drivers/mtd/nand/raw/nand_base.c 				     page);
page             1674 drivers/mtd/nand/raw/nand_base.c 	unsigned int page = eraseblock <<
page             1682 drivers/mtd/nand/raw/nand_base.c 		u8 addrs[3] = {	page, page >> 8, page >> 16 };
page             1703 drivers/mtd/nand/raw/nand_base.c 		chip->legacy.cmdfunc(chip, NAND_CMD_ERASE1, -1, page);
page             2592 drivers/mtd/nand/raw/nand_base.c 			       int oob_required, int page)
page             2607 drivers/mtd/nand/raw/nand_base.c 		       int page)
page             2612 drivers/mtd/nand/raw/nand_base.c 	ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize);
page             2637 drivers/mtd/nand/raw/nand_base.c 				       int oob_required, int page)
page             2645 drivers/mtd/nand/raw/nand_base.c 	ret = nand_read_page_op(chip, page, 0, NULL, 0);
page             2699 drivers/mtd/nand/raw/nand_base.c 				int oob_required, int page)
page             2710 drivers/mtd/nand/raw/nand_base.c 	chip->ecc.read_page_raw(chip, buf, 1, page);
page             2746 drivers/mtd/nand/raw/nand_base.c 			     uint32_t readlen, uint8_t *bufpoi, int page)
page             2771 drivers/mtd/nand/raw/nand_base.c 	ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len);
page             2858 drivers/mtd/nand/raw/nand_base.c 				int oob_required, int page)
page             2869 drivers/mtd/nand/raw/nand_base.c 	ret = nand_read_page_op(chip, page, 0, NULL, 0);
page             2932 drivers/mtd/nand/raw/nand_base.c 					  int oob_required, int page)
page             2944 drivers/mtd/nand/raw/nand_base.c 	ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
page             2948 drivers/mtd/nand/raw/nand_base.c 	ret = nand_read_page_op(chip, page, 0, NULL, 0);
page             2999 drivers/mtd/nand/raw/nand_base.c 				   int oob_required, int page)
page             3010 drivers/mtd/nand/raw/nand_base.c 	ret = nand_read_page_op(chip, page, 0, NULL, 0);
page             3156 drivers/mtd/nand/raw/nand_base.c 	int chipnr, page, realpage, col, bytes, aligned, oob_required;
page             3173 drivers/mtd/nand/raw/nand_base.c 	page = realpage & chip->pagemask;
page             3197 drivers/mtd/nand/raw/nand_base.c 		if (realpage != chip->pagecache.page || oob) {
page             3212 drivers/mtd/nand/raw/nand_base.c 							      page);
page             3216 drivers/mtd/nand/raw/nand_base.c 							     bufpoi, page);
page             3219 drivers/mtd/nand/raw/nand_base.c 							  oob_required, page);
page             3223 drivers/mtd/nand/raw/nand_base.c 					chip->pagecache.page = -1;
page             3232 drivers/mtd/nand/raw/nand_base.c 					chip->pagecache.page = realpage;
page             3236 drivers/mtd/nand/raw/nand_base.c 					chip->pagecache.page = -1;
page             3297 drivers/mtd/nand/raw/nand_base.c 		page = realpage & chip->pagemask;
page             3299 drivers/mtd/nand/raw/nand_base.c 		if (!page) {
page             3325 drivers/mtd/nand/raw/nand_base.c int nand_read_oob_std(struct nand_chip *chip, int page)
page             3329 drivers/mtd/nand/raw/nand_base.c 	return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
page             3339 drivers/mtd/nand/raw/nand_base.c static int nand_read_oob_syndrome(struct nand_chip *chip, int page)
page             3348 drivers/mtd/nand/raw/nand_base.c 	ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0);
page             3362 drivers/mtd/nand/raw/nand_base.c 				ret = nand_read_page_op(chip, page, pos, NULL,
page             3392 drivers/mtd/nand/raw/nand_base.c int nand_write_oob_std(struct nand_chip *chip, int page)
page             3396 drivers/mtd/nand/raw/nand_base.c 	return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
page             3407 drivers/mtd/nand/raw/nand_base.c static int nand_write_oob_syndrome(struct nand_chip *chip, int page)
page             3426 drivers/mtd/nand/raw/nand_base.c 	ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0);
page             3487 drivers/mtd/nand/raw/nand_base.c 	int page, realpage, chipnr;
page             3506 drivers/mtd/nand/raw/nand_base.c 	page = realpage & chip->pagemask;
page             3510 drivers/mtd/nand/raw/nand_base.c 			ret = chip->ecc.read_oob_raw(chip, page);
page             3512 drivers/mtd/nand/raw/nand_base.c 			ret = chip->ecc.read_oob(chip, page);
page             3531 drivers/mtd/nand/raw/nand_base.c 		page = realpage & chip->pagemask;
page             3533 drivers/mtd/nand/raw/nand_base.c 		if (!page) {
page             3596 drivers/mtd/nand/raw/nand_base.c 				int oob_required, int page)
page             3611 drivers/mtd/nand/raw/nand_base.c 			int oob_required, int page)
page             3616 drivers/mtd/nand/raw/nand_base.c 	ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
page             3642 drivers/mtd/nand/raw/nand_base.c 					int page)
page             3650 drivers/mtd/nand/raw/nand_base.c 	ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
page             3703 drivers/mtd/nand/raw/nand_base.c 				 int oob_required, int page)
page             3721 drivers/mtd/nand/raw/nand_base.c 	return chip->ecc.write_page_raw(chip, buf, 1, page);
page             3732 drivers/mtd/nand/raw/nand_base.c 				 int oob_required, int page)
page             3741 drivers/mtd/nand/raw/nand_base.c 	ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
page             3779 drivers/mtd/nand/raw/nand_base.c 				    int oob_required, int page)
page             3792 drivers/mtd/nand/raw/nand_base.c 	ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
page             3849 drivers/mtd/nand/raw/nand_base.c 				    int oob_required, int page)
page             3859 drivers/mtd/nand/raw/nand_base.c 	ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
page             3920 drivers/mtd/nand/raw/nand_base.c 			   int page, int raw)
page             3933 drivers/mtd/nand/raw/nand_base.c 						  page);
page             3936 drivers/mtd/nand/raw/nand_base.c 						 oob_required, page);
page             3938 drivers/mtd/nand/raw/nand_base.c 		status = chip->ecc.write_page(chip, buf, oob_required, page);
page             3960 drivers/mtd/nand/raw/nand_base.c 	int chipnr, realpage, page, column;
page             3994 drivers/mtd/nand/raw/nand_base.c 	page = realpage & chip->pagemask;
page             3997 drivers/mtd/nand/raw/nand_base.c 	if (to <= ((loff_t)chip->pagecache.page << chip->page_shift) &&
page             3998 drivers/mtd/nand/raw/nand_base.c 	    ((loff_t)chip->pagecache.page << chip->page_shift) < (to + ops->len))
page             3999 drivers/mtd/nand/raw/nand_base.c 		chip->pagecache.page = -1;
page             4043 drivers/mtd/nand/raw/nand_base.c 				      oob_required, page,
page             4056 drivers/mtd/nand/raw/nand_base.c 		page = realpage & chip->pagemask;
page             4058 drivers/mtd/nand/raw/nand_base.c 		if (!page) {
page             4170 drivers/mtd/nand/raw/nand_base.c 	int page, pages_per_block, ret, chipnr;
page             4186 drivers/mtd/nand/raw/nand_base.c 	page = (int)(instr->addr >> chip->page_shift);
page             4208 drivers/mtd/nand/raw/nand_base.c 		if (nand_block_checkbad(chip, ((loff_t) page) <<
page             4211 drivers/mtd/nand/raw/nand_base.c 				    __func__, page);
page             4220 drivers/mtd/nand/raw/nand_base.c 		if (page <= chip->pagecache.page && chip->pagecache.page <
page             4221 drivers/mtd/nand/raw/nand_base.c 		    (page + pages_per_block))
page             4222 drivers/mtd/nand/raw/nand_base.c 			chip->pagecache.page = -1;
page             4224 drivers/mtd/nand/raw/nand_base.c 		ret = nand_erase_op(chip, (page & chip->pagemask) >>
page             4228 drivers/mtd/nand/raw/nand_base.c 					__func__, page);
page             4230 drivers/mtd/nand/raw/nand_base.c 				((loff_t)page << chip->page_shift);
page             4236 drivers/mtd/nand/raw/nand_base.c 		page += pages_per_block;
page             4239 drivers/mtd/nand/raw/nand_base.c 		if (len && !(page & chip->pagemask)) {
page             5760 drivers/mtd/nand/raw/nand_base.c 	chip->pagecache.page = -1;
page              166 drivers/mtd/nand/raw/nand_bbt.c static int read_bbt(struct nand_chip *this, uint8_t *buf, int page, int num,
page              180 drivers/mtd/nand/raw/nand_bbt.c 	from = ((loff_t)page) << this->page_shift;
page              625 drivers/mtd/nand/raw/nand_bbt.c 	int startblock, dir, page, numblocks, i;
page              662 drivers/mtd/nand/raw/nand_bbt.c 		page = block << (this->bbt_erase_shift - this->page_shift);
page              665 drivers/mtd/nand/raw/nand_bbt.c 		if (!md || md->pages[chip] != page)
page              720 drivers/mtd/nand/raw/nand_bbt.c 	int bits, page, offs, numblocks, sft, sftmsk;
page              765 drivers/mtd/nand/raw/nand_bbt.c 		page = block << (this->bbt_erase_shift - this->page_shift);
page              786 drivers/mtd/nand/raw/nand_bbt.c 		to = ((loff_t)page) << this->page_shift;
page              809 drivers/mtd/nand/raw/nand_bbt.c 			pageoffs = page - (int)(to >> this->page_shift);
page              881 drivers/mtd/nand/raw/nand_bbt.c 		td->pages[chip++] = page;
page               56 drivers/mtd/nand/raw/nand_hynix.c 	int page;
page              230 drivers/mtd/nand/raw/nand_hynix.c 	ret = nand_read_page_op(chip, info->page, 0, buf, info->size);
page              360 drivers/mtd/nand/raw/nand_hynix.c 		.page = 0x21f,
page              367 drivers/mtd/nand/raw/nand_hynix.c 		.page = 0x200,
page              189 drivers/mtd/nand/raw/nand_micron.c 					   void *buf, int page,
page              222 drivers/mtd/nand/raw/nand_micron.c 	ret = nand_read_page_op(chip, page, 0, micron->ecc.rawbuf,
page              285 drivers/mtd/nand/raw/nand_micron.c 				 int oob_required, int page)
page              295 drivers/mtd/nand/raw/nand_micron.c 	ret = nand_read_page_op(chip, page, 0, NULL, 0);
page              314 drivers/mtd/nand/raw/nand_micron.c 							       buf, page,
page              327 drivers/mtd/nand/raw/nand_micron.c 				  int oob_required, int page)
page              335 drivers/mtd/nand/raw/nand_micron.c 	ret = nand_write_page_raw(chip, buf, oob_required, page);
page               43 drivers/mtd/nand/raw/nand_toshiba.c 			      int oob_required, int page)
page               47 drivers/mtd/nand/raw/nand_toshiba.c 	ret = nand_read_page_raw(chip, buf, oob_required, page);
page               56 drivers/mtd/nand/raw/nand_toshiba.c 				 uint32_t readlen, uint8_t *bufpoi, int page)
page               60 drivers/mtd/nand/raw/nand_toshiba.c 	ret = nand_read_page_op(chip, page, data_offs,
page              354 drivers/mtd/nand/raw/nandsim.c 	struct page *held_pages[NS_MAX_HELD_PAGES];
page             1302 drivers/mtd/nand/raw/nandsim.c 	struct page *page;
page             1311 drivers/mtd/nand/raw/nandsim.c 		page = find_get_page(mapping, index);
page             1312 drivers/mtd/nand/raw/nandsim.c 		if (page == NULL) {
page             1313 drivers/mtd/nand/raw/nandsim.c 			page = find_or_create_page(mapping, index, GFP_NOFS);
page             1314 drivers/mtd/nand/raw/nandsim.c 			if (page == NULL) {
page             1316 drivers/mtd/nand/raw/nandsim.c 				page = find_or_create_page(mapping, index, GFP_NOFS);
page             1318 drivers/mtd/nand/raw/nandsim.c 			if (page == NULL) {
page             1322 drivers/mtd/nand/raw/nandsim.c 			unlock_page(page);
page             1324 drivers/mtd/nand/raw/nandsim.c 		ns->held_pages[ns->held_cnt++] = page;
page             1524 drivers/mtd/nand/raw/omap2.c 			       int oob_required, int page)
page             1530 drivers/mtd/nand/raw/omap2.c 	nand_prog_page_begin_op(chip, page, 0, NULL, 0);
page             1565 drivers/mtd/nand/raw/omap2.c 				  int oob_required, int page)
page             1582 drivers/mtd/nand/raw/omap2.c 	nand_prog_page_begin_op(chip, page, 0, NULL, 0);
page             1633 drivers/mtd/nand/raw/omap2.c 			      int oob_required, int page)
page             1641 drivers/mtd/nand/raw/omap2.c 	nand_read_page_op(chip, page, 0, NULL, 0);
page              660 drivers/mtd/nand/raw/qcom_nandc.c static void set_address(struct qcom_nand_host *host, u16 column, int page)
page              668 drivers/mtd/nand/raw/qcom_nandc.c 	nandc_set_reg(nandc, NAND_ADDR0, page << 16 | column);
page              669 drivers/mtd/nand/raw/qcom_nandc.c 	nandc_set_reg(nandc, NAND_ADDR1, page >> 16 & 0xff);
page             1584 drivers/mtd/nand/raw/qcom_nandc.c 		       u8 *data_buf, u8 *oob_buf, int page, int cw)
page             1592 drivers/mtd/nand/raw/qcom_nandc.c 	nand_read_page_op(chip, page, 0, NULL, 0);
page             1596 drivers/mtd/nand/raw/qcom_nandc.c 	set_address(host, host->cw_size * cw, page);
page             1667 drivers/mtd/nand/raw/qcom_nandc.c 		      int page, unsigned int max_bitflips)
page             1697 drivers/mtd/nand/raw/qcom_nandc.c 					     cw_oob_buf, page, cw);
page             1725 drivers/mtd/nand/raw/qcom_nandc.c 			     u8 *oob_buf, int page)
page             1820 drivers/mtd/nand/raw/qcom_nandc.c 				     uncorrectable_cws, page,
page             1829 drivers/mtd/nand/raw/qcom_nandc.c 			 u8 *oob_buf, int page)
page             1902 drivers/mtd/nand/raw/qcom_nandc.c 	return parse_read_errors(host, data_buf_start, oob_buf_start, page);
page             1909 drivers/mtd/nand/raw/qcom_nandc.c static int copy_last_cw(struct qcom_nand_host *host, int page)
page             1924 drivers/mtd/nand/raw/qcom_nandc.c 	set_address(host, host->cw_size * (ecc->steps - 1), page);
page             1942 drivers/mtd/nand/raw/qcom_nandc.c 				int oob_required, int page)
page             1948 drivers/mtd/nand/raw/qcom_nandc.c 	nand_read_page_op(chip, page, 0, NULL, 0);
page             1954 drivers/mtd/nand/raw/qcom_nandc.c 	return read_page_ecc(host, data_buf, oob_buf, page);
page             1959 drivers/mtd/nand/raw/qcom_nandc.c 				    int oob_required, int page)
page             1969 drivers/mtd/nand/raw/qcom_nandc.c 					     page, cw);
page             1981 drivers/mtd/nand/raw/qcom_nandc.c static int qcom_nandc_read_oob(struct nand_chip *chip, int page)
page             1991 drivers/mtd/nand/raw/qcom_nandc.c 	set_address(host, 0, page);
page             1994 drivers/mtd/nand/raw/qcom_nandc.c 	return read_page_ecc(host, NULL, chip->oob_poi, page);
page             1999 drivers/mtd/nand/raw/qcom_nandc.c 				 int oob_required, int page)
page             2007 drivers/mtd/nand/raw/qcom_nandc.c 	nand_prog_page_begin_op(chip, page, 0, NULL, 0);
page             2070 drivers/mtd/nand/raw/qcom_nandc.c 				     int page)
page             2079 drivers/mtd/nand/raw/qcom_nandc.c 	nand_prog_page_begin_op(chip, page, 0, NULL, 0);
page             2147 drivers/mtd/nand/raw/qcom_nandc.c static int qcom_nandc_write_oob(struct nand_chip *chip, int page)
page             2169 drivers/mtd/nand/raw/qcom_nandc.c 	set_address(host, host->cw_size * (ecc->steps - 1), page);
page             2195 drivers/mtd/nand/raw/qcom_nandc.c 	int page, ret, bbpos, bad = 0;
page             2197 drivers/mtd/nand/raw/qcom_nandc.c 	page = (int)(ofs >> chip->page_shift) & chip->pagemask;
page             2208 drivers/mtd/nand/raw/qcom_nandc.c 	ret = copy_last_cw(host, page);
page             2232 drivers/mtd/nand/raw/qcom_nandc.c 	int page, ret;
page             2244 drivers/mtd/nand/raw/qcom_nandc.c 	page = (int)(ofs >> chip->page_shift) & chip->pagemask;
page             2248 drivers/mtd/nand/raw/qcom_nandc.c 	set_address(host, host->cw_size * (ecc->steps - 1), page);
page              518 drivers/mtd/nand/raw/r852.c static int r852_read_oob(struct nand_chip *chip, int page)
page              522 drivers/mtd/nand/raw/r852.c 	return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
page              600 drivers/mtd/nand/raw/sh_flctl.c 				 int oob_required, int page)
page              604 drivers/mtd/nand/raw/sh_flctl.c 	nand_read_page_op(chip, page, 0, buf, mtd->writesize);
page              611 drivers/mtd/nand/raw/sh_flctl.c 				  int oob_required, int page)
page              615 drivers/mtd/nand/raw/sh_flctl.c 	nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
page              730 drivers/mtd/nand/raw/stm32_fmc2_nand.c 				int oob_required, int page)
page              742 drivers/mtd/nand/raw/stm32_fmc2_nand.c 	ret = nand_read_page_op(chip, page, 0, NULL, 0);
page              792 drivers/mtd/nand/raw/stm32_fmc2_nand.c static void stm32_fmc2_rw_page_init(struct nand_chip *chip, int page,
page              859 drivers/mtd/nand/raw/stm32_fmc2_nand.c 	csqar1 = FMC2_CSQCAR1_ADDC3(page);
page              860 drivers/mtd/nand/raw/stm32_fmc2_nand.c 	csqar1 |= FMC2_CSQCAR1_ADDC4(page >> 8);
page              875 drivers/mtd/nand/raw/stm32_fmc2_nand.c 		csqar2 |= FMC2_CSQCAR2_ADDC5(page >> 16);
page             1025 drivers/mtd/nand/raw/stm32_fmc2_nand.c 				      int page, int raw)
page             1031 drivers/mtd/nand/raw/stm32_fmc2_nand.c 	stm32_fmc2_rw_page_init(chip, page, raw, true);
page             1053 drivers/mtd/nand/raw/stm32_fmc2_nand.c 					   int page)
page             1062 drivers/mtd/nand/raw/stm32_fmc2_nand.c 	return stm32_fmc2_sequencer_write(chip, buf, oob_required, page, false);
page             1068 drivers/mtd/nand/raw/stm32_fmc2_nand.c 					       int page)
page             1077 drivers/mtd/nand/raw/stm32_fmc2_nand.c 	return stm32_fmc2_sequencer_write(chip, buf, oob_required, page, true);
page             1147 drivers/mtd/nand/raw/stm32_fmc2_nand.c 					  int oob_required, int page)
page             1162 drivers/mtd/nand/raw/stm32_fmc2_nand.c 	stm32_fmc2_rw_page_init(chip, page, 0, false);
page             1197 drivers/mtd/nand/raw/stm32_fmc2_nand.c 					      int oob_required, int page)
page             1208 drivers/mtd/nand/raw/stm32_fmc2_nand.c 	stm32_fmc2_rw_page_init(chip, page, 1, false);
page              592 drivers/mtd/nand/raw/sunxi_nand.c static u16 sunxi_nfc_randomizer_state(struct nand_chip *nand, int page,
page              609 drivers/mtd/nand/raw/sunxi_nand.c 	return seeds[page % mod];
page              612 drivers/mtd/nand/raw/sunxi_nand.c static void sunxi_nfc_randomizer_config(struct nand_chip *nand, int page,
page              623 drivers/mtd/nand/raw/sunxi_nand.c 	state = sunxi_nfc_randomizer_state(nand, page, ecc);
page              650 drivers/mtd/nand/raw/sunxi_nand.c static void sunxi_nfc_randomize_bbm(struct nand_chip *nand, int page, u8 *bbm)
page              652 drivers/mtd/nand/raw/sunxi_nand.c 	u16 state = sunxi_nfc_randomizer_state(nand, page, true);
page              660 drivers/mtd/nand/raw/sunxi_nand.c 					   bool ecc, int page)
page              662 drivers/mtd/nand/raw/sunxi_nand.c 	sunxi_nfc_randomizer_config(nand, page, ecc);
page              669 drivers/mtd/nand/raw/sunxi_nand.c 					  int len, bool ecc, int page)
page              671 drivers/mtd/nand/raw/sunxi_nand.c 	sunxi_nfc_randomizer_config(nand, page, ecc);
page              717 drivers/mtd/nand/raw/sunxi_nand.c 						int step, bool bbm, int page)
page              726 drivers/mtd/nand/raw/sunxi_nand.c 		sunxi_nfc_randomize_bbm(nand, page, oob);
page              731 drivers/mtd/nand/raw/sunxi_nand.c 						bool bbm, int page)
page              739 drivers/mtd/nand/raw/sunxi_nand.c 		sunxi_nfc_randomize_bbm(nand, page, user_data);
page              801 drivers/mtd/nand/raw/sunxi_nand.c 				       bool bbm, bool oob_required, int page)
page              812 drivers/mtd/nand/raw/sunxi_nand.c 	sunxi_nfc_randomizer_read_buf(nand, NULL, ecc->size, false, page);
page              865 drivers/mtd/nand/raw/sunxi_nand.c 						      true, page);
page              868 drivers/mtd/nand/raw/sunxi_nand.c 							    bbm, page);
page              879 drivers/mtd/nand/raw/sunxi_nand.c 					    bool randomize, int page)
page              897 drivers/mtd/nand/raw/sunxi_nand.c 					      false, page);
page              904 drivers/mtd/nand/raw/sunxi_nand.c 					    int oob_required, int page,
page              926 drivers/mtd/nand/raw/sunxi_nand.c 	sunxi_nfc_randomizer_config(nand, page, false);
page              973 drivers/mtd/nand/raw/sunxi_nand.c 							    !i, page);
page             1021 drivers/mtd/nand/raw/sunxi_nand.c 						page);
page             1030 drivers/mtd/nand/raw/sunxi_nand.c 					int page)
page             1039 drivers/mtd/nand/raw/sunxi_nand.c 	sunxi_nfc_randomizer_write_buf(nand, data, ecc->size, false, page);
page             1049 drivers/mtd/nand/raw/sunxi_nand.c 	sunxi_nfc_hw_ecc_set_prot_oob_bytes(nand, oob, 0, bbm, page);
page             1067 drivers/mtd/nand/raw/sunxi_nand.c 					     int page)
page             1081 drivers/mtd/nand/raw/sunxi_nand.c 	sunxi_nfc_randomizer_write_buf(nand, oob + offset, len, false, page);
page             1088 drivers/mtd/nand/raw/sunxi_nand.c 				      int oob_required, int page)
page             1098 drivers/mtd/nand/raw/sunxi_nand.c 	nand_read_page_op(nand, page, 0, NULL, 0);
page             1111 drivers/mtd/nand/raw/sunxi_nand.c 						  !i, oob_required, page);
page             1120 drivers/mtd/nand/raw/sunxi_nand.c 						!raw_mode, page);
page             1128 drivers/mtd/nand/raw/sunxi_nand.c 					  int oob_required, int page)
page             1134 drivers/mtd/nand/raw/sunxi_nand.c 	nand_read_page_op(nand, page, 0, NULL, 0);
page             1136 drivers/mtd/nand/raw/sunxi_nand.c 	ret = sunxi_nfc_hw_ecc_read_chunks_dma(nand, buf, oob_required, page,
page             1142 drivers/mtd/nand/raw/sunxi_nand.c 	return sunxi_nfc_hw_ecc_read_page(nand, buf, oob_required, page);
page             1147 drivers/mtd/nand/raw/sunxi_nand.c 					 u8 *bufpoi, int page)
page             1156 drivers/mtd/nand/raw/sunxi_nand.c 	nand_read_page_op(nand, page, 0, NULL, 0);
page             1171 drivers/mtd/nand/raw/sunxi_nand.c 						  false, page);
page             1183 drivers/mtd/nand/raw/sunxi_nand.c 					     u8 *buf, int page)
page             1190 drivers/mtd/nand/raw/sunxi_nand.c 	nand_read_page_op(nand, page, 0, NULL, 0);
page             1192 drivers/mtd/nand/raw/sunxi_nand.c 	ret = sunxi_nfc_hw_ecc_read_chunks_dma(nand, buf, false, page, nchunks);
page             1198 drivers/mtd/nand/raw/sunxi_nand.c 					     buf, page);
page             1203 drivers/mtd/nand/raw/sunxi_nand.c 				       int page)
page             1211 drivers/mtd/nand/raw/sunxi_nand.c 	nand_prog_page_begin_op(nand, page, 0, NULL, 0);
page             1223 drivers/mtd/nand/raw/sunxi_nand.c 						   &cur_off, !i, page);
page             1230 drivers/mtd/nand/raw/sunxi_nand.c 						 &cur_off, page);
page             1240 drivers/mtd/nand/raw/sunxi_nand.c 					  int page)
page             1248 drivers/mtd/nand/raw/sunxi_nand.c 	nand_prog_page_begin_op(nand, page, 0, NULL, 0);
page             1261 drivers/mtd/nand/raw/sunxi_nand.c 						   &cur_off, !i, page);
page             1274 drivers/mtd/nand/raw/sunxi_nand.c 					   int page)
page             1295 drivers/mtd/nand/raw/sunxi_nand.c 		sunxi_nfc_hw_ecc_set_prot_oob_bytes(nand, oob, i, !i, page);
page             1298 drivers/mtd/nand/raw/sunxi_nand.c 	nand_prog_page_begin_op(nand, page, 0, NULL, 0);
page             1301 drivers/mtd/nand/raw/sunxi_nand.c 	sunxi_nfc_randomizer_config(nand, page, false);
page             1328 drivers/mtd/nand/raw/sunxi_nand.c 						 NULL, page);
page             1333 drivers/mtd/nand/raw/sunxi_nand.c 	return sunxi_nfc_hw_ecc_write_page(nand, buf, oob_required, page);
page             1336 drivers/mtd/nand/raw/sunxi_nand.c static int sunxi_nfc_hw_ecc_read_oob(struct nand_chip *nand, int page)
page             1340 drivers/mtd/nand/raw/sunxi_nand.c 	return nand->ecc.read_page(nand, buf, 1, page);
page             1343 drivers/mtd/nand/raw/sunxi_nand.c static int sunxi_nfc_hw_ecc_write_oob(struct nand_chip *nand, int page)
page             1350 drivers/mtd/nand/raw/sunxi_nand.c 	ret = nand->ecc.write_page(nand, buf, 1, page);
page              230 drivers/mtd/nand/raw/tango_nand.c 		  const void *buf, int len, int page)
page              256 drivers/mtd/nand/raw/tango_nand.c 	writel_relaxed(page, nfc->reg_base + NFC_ADDR_PAGE);
page              276 drivers/mtd/nand/raw/tango_nand.c 			   int oob_required, int page)
page              283 drivers/mtd/nand/raw/tango_nand.c 		chip->ecc.read_oob(chip, page);
page              285 drivers/mtd/nand/raw/tango_nand.c 	err = do_dma(nfc, DMA_FROM_DEVICE, NFC_READ, buf, len, page);
page              291 drivers/mtd/nand/raw/tango_nand.c 		chip->ecc.read_oob_raw(chip, page);
page              299 drivers/mtd/nand/raw/tango_nand.c 			    int oob_required, int page)
page              310 drivers/mtd/nand/raw/tango_nand.c 	err = do_dma(nfc, DMA_TO_DEVICE, NFC_WRITE, buf, len, page);
page              421 drivers/mtd/nand/raw/tango_nand.c 			       int oob_required, int page)
page              423 drivers/mtd/nand/raw/tango_nand.c 	nand_read_page_op(chip, page, 0, NULL, 0);
page              429 drivers/mtd/nand/raw/tango_nand.c 				int oob_required, int page)
page              431 drivers/mtd/nand/raw/tango_nand.c 	nand_prog_page_begin_op(chip, page, 0, NULL, 0);
page              436 drivers/mtd/nand/raw/tango_nand.c static int tango_read_oob(struct nand_chip *chip, int page)
page              438 drivers/mtd/nand/raw/tango_nand.c 	nand_read_page_op(chip, page, 0, NULL, 0);
page              443 drivers/mtd/nand/raw/tango_nand.c static int tango_write_oob(struct nand_chip *chip, int page)
page              445 drivers/mtd/nand/raw/tango_nand.c 	nand_prog_page_begin_op(chip, page, 0, NULL, 0);
page              492 drivers/mtd/nand/raw/tegra_nand.c 				void *buf, void *oob_buf, int oob_len, int page,
page              513 drivers/mtd/nand/raw/tegra_nand.c 	addr1 = page << 16;
page              520 drivers/mtd/nand/raw/tegra_nand.c 		writel_relaxed(page >> 16, ctrl->regs + ADDR_REG2);
page              615 drivers/mtd/nand/raw/tegra_nand.c 				    int oob_required, int page)
page              621 drivers/mtd/nand/raw/tegra_nand.c 				    mtd->oobsize, page, true);
page              625 drivers/mtd/nand/raw/tegra_nand.c 				     int oob_required, int page)
page              631 drivers/mtd/nand/raw/tegra_nand.c 				     mtd->oobsize, page, false);
page              634 drivers/mtd/nand/raw/tegra_nand.c static int tegra_nand_read_oob(struct nand_chip *chip, int page)
page              639 drivers/mtd/nand/raw/tegra_nand.c 				    mtd->oobsize, page, true);
page              642 drivers/mtd/nand/raw/tegra_nand.c static int tegra_nand_write_oob(struct nand_chip *chip, int page)
page              647 drivers/mtd/nand/raw/tegra_nand.c 				    mtd->oobsize, page, false);
page              651 drivers/mtd/nand/raw/tegra_nand.c 				      int oob_required, int page)
page              662 drivers/mtd/nand/raw/tegra_nand.c 	ret = tegra_nand_page_xfer(mtd, chip, buf, oob_buf, 0, page, true);
page              717 drivers/mtd/nand/raw/tegra_nand.c 		ret = tegra_nand_read_oob(chip, page);
page              761 drivers/mtd/nand/raw/tegra_nand.c 				       int oob_required, int page)
page              770 drivers/mtd/nand/raw/tegra_nand.c 				   0, page, false);
page              511 drivers/mtd/nand/raw/vf610_nfc.c 					 uint8_t *oob, int page)
page              527 drivers/mtd/nand/raw/vf610_nfc.c 	nand_read_oob_op(&nfc->chip, page, 0, oob, mtd->oobsize);
page              539 drivers/mtd/nand/raw/vf610_nfc.c static void vf610_nfc_fill_row(struct nand_chip *chip, int page, u32 *code,
page              542 drivers/mtd/nand/raw/vf610_nfc.c 	*row = ROW_ADDR(0, page & 0xff) | ROW_ADDR(1, page >> 8);
page              546 drivers/mtd/nand/raw/vf610_nfc.c 		*row |= ROW_ADDR(2, page >> 16);
page              552 drivers/mtd/nand/raw/vf610_nfc.c 			       int oob_required, int page)
page              565 drivers/mtd/nand/raw/vf610_nfc.c 	vf610_nfc_fill_row(chip, page, &code, &row);
page              588 drivers/mtd/nand/raw/vf610_nfc.c 	stat = vf610_nfc_correct_data(chip, buf, chip->oob_poi, page);
page              600 drivers/mtd/nand/raw/vf610_nfc.c 				int oob_required, int page)
page              614 drivers/mtd/nand/raw/vf610_nfc.c 	vf610_nfc_fill_row(chip, page, &code, &row);
page              644 drivers/mtd/nand/raw/vf610_nfc.c 				   int oob_required, int page)
page              650 drivers/mtd/nand/raw/vf610_nfc.c 	ret = nand_read_page_raw(chip, buf, oob_required, page);
page              657 drivers/mtd/nand/raw/vf610_nfc.c 				    int oob_required, int page)
page              664 drivers/mtd/nand/raw/vf610_nfc.c 	ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
page              676 drivers/mtd/nand/raw/vf610_nfc.c static int vf610_nfc_read_oob(struct nand_chip *chip, int page)
page              682 drivers/mtd/nand/raw/vf610_nfc.c 	ret = nand_read_oob_std(chip, page);
page              688 drivers/mtd/nand/raw/vf610_nfc.c static int vf610_nfc_write_oob(struct nand_chip *chip, int page)
page              695 drivers/mtd/nand/raw/vf610_nfc.c 	ret = nand_prog_page_begin_op(chip, page, mtd->writesize,
page              912 drivers/mtd/spi-nor/spi-nor.c 	u32 offset, page;
page              915 drivers/mtd/spi-nor/spi-nor.c 	page = addr / nor->page_size;
page              916 drivers/mtd/spi-nor/spi-nor.c 	page <<= (nor->page_size > 512) ? 10 : 9;
page              918 drivers/mtd/spi-nor/spi-nor.c 	return page | offset;
page              291 drivers/net/can/janz-ican3.c static inline void ican3_set_page(struct ican3_dev *mod, unsigned int page)
page              293 drivers/net/can/janz-ican3.c 	BUG_ON(page >= DPM_NUM_PAGES);
page              294 drivers/net/can/janz-ican3.c 	iowrite8(page, &mod->dpmctrl->window_address);
page              508 drivers/net/can/peak_canfd/peak_pciefd_main.c 	struct pciefd_page *page = priv->tx_pages + priv->tx_page_index;
page              514 drivers/net/can/peak_canfd/peak_pciefd_main.c 	if (page->offset + msg_size > page->size) {
page              528 drivers/net/can/peak_canfd/peak_pciefd_main.c 		lk = page->vbase + page->offset;
page              533 drivers/net/can/peak_canfd/peak_pciefd_main.c 		page = priv->tx_pages + priv->tx_page_index;
page              538 drivers/net/can/peak_canfd/peak_pciefd_main.c 		lk->laddr_lo = cpu_to_le32(page->lbase);
page              541 drivers/net/can/peak_canfd/peak_pciefd_main.c 		lk->laddr_hi = cpu_to_le32(page->lbase >> 32);
page              546 drivers/net/can/peak_canfd/peak_pciefd_main.c 		page->offset = 0;
page              549 drivers/net/can/peak_canfd/peak_pciefd_main.c 	*room_left = priv->tx_pages_free * page->size;
page              553 drivers/net/can/peak_canfd/peak_pciefd_main.c 	msg = page->vbase + page->offset;
page              556 drivers/net/can/peak_canfd/peak_pciefd_main.c 	*room_left += page->size - (page->offset + msg_size);
page              565 drivers/net/can/peak_canfd/peak_pciefd_main.c 	struct pciefd_page *page = priv->tx_pages + priv->tx_page_index;
page              568 drivers/net/can/peak_canfd/peak_pciefd_main.c 	page->offset += le16_to_cpu(msg->size);
page              717 drivers/net/can/rcar/rcar_canfd.c 	int offset, start, page, num_rules = RCANFD_CHANNEL_NUMRULES;
page              729 drivers/net/can/rcar/rcar_canfd.c 	page = RCANFD_GAFL_PAGENUM(start);
page              731 drivers/net/can/rcar/rcar_canfd.c 			   (RCANFD_GAFLECTR_AFLPN(page) |
page               41 drivers/net/dsa/b53/b53_mdio.c static int b53_mdio_op(struct b53_device *dev, u8 page, u8 reg, u16 op)
page               48 drivers/net/dsa/b53/b53_mdio.c 	if (dev->current_page != page) {
page               50 drivers/net/dsa/b53/b53_mdio.c 		v = (page << 8) | REG_MII_PAGE_ENABLE;
page               55 drivers/net/dsa/b53/b53_mdio.c 		dev->current_page = page;
page               79 drivers/net/dsa/b53/b53_mdio.c static int b53_mdio_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val)
page               84 drivers/net/dsa/b53/b53_mdio.c 	ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ);
page               94 drivers/net/dsa/b53/b53_mdio.c static int b53_mdio_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val)
page               99 drivers/net/dsa/b53/b53_mdio.c 	ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ);
page              108 drivers/net/dsa/b53/b53_mdio.c static int b53_mdio_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val)
page              113 drivers/net/dsa/b53/b53_mdio.c 	ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ);
page              124 drivers/net/dsa/b53/b53_mdio.c static int b53_mdio_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val)
page              131 drivers/net/dsa/b53/b53_mdio.c 	ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ);
page              146 drivers/net/dsa/b53/b53_mdio.c static int b53_mdio_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val)
page              153 drivers/net/dsa/b53/b53_mdio.c 	ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ);
page              168 drivers/net/dsa/b53/b53_mdio.c static int b53_mdio_write8(struct b53_device *dev, u8 page, u8 reg, u8 value)
page              178 drivers/net/dsa/b53/b53_mdio.c 	return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE);
page              181 drivers/net/dsa/b53/b53_mdio.c static int b53_mdio_write16(struct b53_device *dev, u8 page, u8 reg,
page              192 drivers/net/dsa/b53/b53_mdio.c 	return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE);
page              195 drivers/net/dsa/b53/b53_mdio.c static int b53_mdio_write32(struct b53_device *dev, u8 page, u8 reg,
page              211 drivers/net/dsa/b53/b53_mdio.c 	return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE);
page              214 drivers/net/dsa/b53/b53_mdio.c static int b53_mdio_write48(struct b53_device *dev, u8 page, u8 reg,
page              230 drivers/net/dsa/b53/b53_mdio.c 	return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE);
page              233 drivers/net/dsa/b53/b53_mdio.c static int b53_mdio_write64(struct b53_device *dev, u8 page, u8 reg,
page              249 drivers/net/dsa/b53/b53_mdio.c 	return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE);
page               31 drivers/net/dsa/b53/b53_mmap.c static int b53_mmap_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val)
page               36 drivers/net/dsa/b53/b53_mmap.c 	*val = readb(regs + (page << 8) + reg);
page               41 drivers/net/dsa/b53/b53_mmap.c static int b53_mmap_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val)
page               50 drivers/net/dsa/b53/b53_mmap.c 		*val = ioread16be(regs + (page << 8) + reg);
page               52 drivers/net/dsa/b53/b53_mmap.c 		*val = readw(regs + (page << 8) + reg);
page               57 drivers/net/dsa/b53/b53_mmap.c static int b53_mmap_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val)
page               66 drivers/net/dsa/b53/b53_mmap.c 		*val = ioread32be(regs + (page << 8) + reg);
page               68 drivers/net/dsa/b53/b53_mmap.c 		*val = readl(regs + (page << 8) + reg);
page               73 drivers/net/dsa/b53/b53_mmap.c static int b53_mmap_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val)
page               86 drivers/net/dsa/b53/b53_mmap.c 			lo = ioread16be(regs + (page << 8) + reg);
page               87 drivers/net/dsa/b53/b53_mmap.c 			hi = ioread32be(regs + (page << 8) + reg + 2);
page               89 drivers/net/dsa/b53/b53_mmap.c 			lo = readw(regs + (page << 8) + reg);
page               90 drivers/net/dsa/b53/b53_mmap.c 			hi = readl(regs + (page << 8) + reg + 2);
page               99 drivers/net/dsa/b53/b53_mmap.c 			lo = ioread32be(regs + (page << 8) + reg);
page              100 drivers/net/dsa/b53/b53_mmap.c 			hi = ioread16be(regs + (page << 8) + reg + 4);
page              102 drivers/net/dsa/b53/b53_mmap.c 			lo = readl(regs + (page << 8) + reg);
page              103 drivers/net/dsa/b53/b53_mmap.c 			hi = readw(regs + (page << 8) + reg + 4);
page              112 drivers/net/dsa/b53/b53_mmap.c static int b53_mmap_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val)
page              122 drivers/net/dsa/b53/b53_mmap.c 		lo = ioread32be(regs + (page << 8) + reg);
page              123 drivers/net/dsa/b53/b53_mmap.c 		hi = ioread32be(regs + (page << 8) + reg + 4);
page              125 drivers/net/dsa/b53/b53_mmap.c 		lo = readl(regs + (page << 8) + reg);
page              126 drivers/net/dsa/b53/b53_mmap.c 		hi = readl(regs + (page << 8) + reg + 4);
page              134 drivers/net/dsa/b53/b53_mmap.c static int b53_mmap_write8(struct b53_device *dev, u8 page, u8 reg, u8 value)
page              139 drivers/net/dsa/b53/b53_mmap.c 	writeb(value, regs + (page << 8) + reg);
page              144 drivers/net/dsa/b53/b53_mmap.c static int b53_mmap_write16(struct b53_device *dev, u8 page, u8 reg,
page              154 drivers/net/dsa/b53/b53_mmap.c 		iowrite16be(value, regs + (page << 8) + reg);
page              156 drivers/net/dsa/b53/b53_mmap.c 		writew(value, regs + (page << 8) + reg);
page              161 drivers/net/dsa/b53/b53_mmap.c static int b53_mmap_write32(struct b53_device *dev, u8 page, u8 reg,
page              171 drivers/net/dsa/b53/b53_mmap.c 		iowrite32be(value, regs + (page << 8) + reg);
page              173 drivers/net/dsa/b53/b53_mmap.c 		writel(value, regs + (page << 8) + reg);
page              178 drivers/net/dsa/b53/b53_mmap.c static int b53_mmap_write48(struct b53_device *dev, u8 page, u8 reg,
page              188 drivers/net/dsa/b53/b53_mmap.c 		b53_mmap_write16(dev, page, reg, lo);
page              189 drivers/net/dsa/b53/b53_mmap.c 		b53_mmap_write32(dev, page, reg + 2, hi);
page              194 drivers/net/dsa/b53/b53_mmap.c 		b53_mmap_write32(dev, page, reg, lo);
page              195 drivers/net/dsa/b53/b53_mmap.c 		b53_mmap_write16(dev, page, reg + 4, hi);
page              201 drivers/net/dsa/b53/b53_mmap.c static int b53_mmap_write64(struct b53_device *dev, u8 page, u8 reg,
page              212 drivers/net/dsa/b53/b53_mmap.c 	b53_mmap_write32(dev, page, reg, lo);
page              213 drivers/net/dsa/b53/b53_mmap.c 	b53_mmap_write32(dev, page, reg + 4, hi);
page               35 drivers/net/dsa/b53/b53_priv.h 	int (*read8)(struct b53_device *dev, u8 page, u8 reg, u8 *value);
page               36 drivers/net/dsa/b53/b53_priv.h 	int (*read16)(struct b53_device *dev, u8 page, u8 reg, u16 *value);
page               37 drivers/net/dsa/b53/b53_priv.h 	int (*read32)(struct b53_device *dev, u8 page, u8 reg, u32 *value);
page               38 drivers/net/dsa/b53/b53_priv.h 	int (*read48)(struct b53_device *dev, u8 page, u8 reg, u64 *value);
page               39 drivers/net/dsa/b53/b53_priv.h 	int (*read64)(struct b53_device *dev, u8 page, u8 reg, u64 *value);
page               40 drivers/net/dsa/b53/b53_priv.h 	int (*write8)(struct b53_device *dev, u8 page, u8 reg, u8 value);
page               41 drivers/net/dsa/b53/b53_priv.h 	int (*write16)(struct b53_device *dev, u8 page, u8 reg, u16 value);
page               42 drivers/net/dsa/b53/b53_priv.h 	int (*write32)(struct b53_device *dev, u8 page, u8 reg, u32 value);
page               43 drivers/net/dsa/b53/b53_priv.h 	int (*write48)(struct b53_device *dev, u8 page, u8 reg, u64 value);
page               44 drivers/net/dsa/b53/b53_priv.h 	int (*write64)(struct b53_device *dev, u8 page, u8 reg, u64 value);
page              228 drivers/net/dsa/b53/b53_priv.h static inline int b53_##type_op_size(struct b53_device *dev, u8 page,	\
page              234 drivers/net/dsa/b53/b53_priv.h 	ret = dev->ops->type_op_size(dev, page, reg, val);		\
page               76 drivers/net/dsa/b53/b53_spi.c static inline int b53_spi_set_page(struct spi_device *spi, u8 page)
page               82 drivers/net/dsa/b53/b53_spi.c 	txbuf[2] = page;
page               87 drivers/net/dsa/b53/b53_spi.c static inline int b53_prepare_reg_access(struct spi_device *spi, u8 page)
page               94 drivers/net/dsa/b53/b53_spi.c 	return b53_spi_set_page(spi, page);
page              124 drivers/net/dsa/b53/b53_spi.c static int b53_spi_read(struct b53_device *dev, u8 page, u8 reg, u8 *data,
page              130 drivers/net/dsa/b53/b53_spi.c 	ret = b53_prepare_reg_access(spi, page);
page              141 drivers/net/dsa/b53/b53_spi.c static int b53_spi_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val)
page              143 drivers/net/dsa/b53/b53_spi.c 	return b53_spi_read(dev, page, reg, val, 1);
page              146 drivers/net/dsa/b53/b53_spi.c static int b53_spi_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val)
page              148 drivers/net/dsa/b53/b53_spi.c 	int ret = b53_spi_read(dev, page, reg, (u8 *)val, 2);
page              156 drivers/net/dsa/b53/b53_spi.c static int b53_spi_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val)
page              158 drivers/net/dsa/b53/b53_spi.c 	int ret = b53_spi_read(dev, page, reg, (u8 *)val, 4);
page              166 drivers/net/dsa/b53/b53_spi.c static int b53_spi_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val)
page              171 drivers/net/dsa/b53/b53_spi.c 	ret = b53_spi_read(dev, page, reg, (u8 *)val, 6);
page              178 drivers/net/dsa/b53/b53_spi.c static int b53_spi_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val)
page              180 drivers/net/dsa/b53/b53_spi.c 	int ret = b53_spi_read(dev, page, reg, (u8 *)val, 8);
page              188 drivers/net/dsa/b53/b53_spi.c static int b53_spi_write8(struct b53_device *dev, u8 page, u8 reg, u8 value)
page              194 drivers/net/dsa/b53/b53_spi.c 	ret = b53_prepare_reg_access(spi, page);
page              205 drivers/net/dsa/b53/b53_spi.c static int b53_spi_write16(struct b53_device *dev, u8 page, u8 reg, u16 value)
page              211 drivers/net/dsa/b53/b53_spi.c 	ret = b53_prepare_reg_access(spi, page);
page              222 drivers/net/dsa/b53/b53_spi.c static int b53_spi_write32(struct b53_device *dev, u8 page, u8 reg, u32 value)
page              228 drivers/net/dsa/b53/b53_spi.c 	ret = b53_prepare_reg_access(spi, page);
page              239 drivers/net/dsa/b53/b53_spi.c static int b53_spi_write48(struct b53_device *dev, u8 page, u8 reg, u64 value)
page              245 drivers/net/dsa/b53/b53_spi.c 	ret = b53_prepare_reg_access(spi, page);
page              256 drivers/net/dsa/b53/b53_spi.c static int b53_spi_write64(struct b53_device *dev, u8 page, u8 reg, u64 value)
page              262 drivers/net/dsa/b53/b53_spi.c 	ret = b53_prepare_reg_access(spi, page);
page              125 drivers/net/dsa/b53/b53_srab.c static int b53_srab_op(struct b53_device *dev, u8 page, u8 reg, u32 op)
page              133 drivers/net/dsa/b53/b53_srab.c 	cmdstat = (page << B53_SRAB_CMDSTAT_PAGE) |
page              153 drivers/net/dsa/b53/b53_srab.c static int b53_srab_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val)
page              163 drivers/net/dsa/b53/b53_srab.c 	ret = b53_srab_op(dev, page, reg, 0);
page              175 drivers/net/dsa/b53/b53_srab.c static int b53_srab_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val)
page              185 drivers/net/dsa/b53/b53_srab.c 	ret = b53_srab_op(dev, page, reg, 0);
page              197 drivers/net/dsa/b53/b53_srab.c static int b53_srab_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val)
page              207 drivers/net/dsa/b53/b53_srab.c 	ret = b53_srab_op(dev, page, reg, 0);
page              219 drivers/net/dsa/b53/b53_srab.c static int b53_srab_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val)
page              229 drivers/net/dsa/b53/b53_srab.c 	ret = b53_srab_op(dev, page, reg, 0);
page              242 drivers/net/dsa/b53/b53_srab.c static int b53_srab_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val)
page              252 drivers/net/dsa/b53/b53_srab.c 	ret = b53_srab_op(dev, page, reg, 0);
page              265 drivers/net/dsa/b53/b53_srab.c static int b53_srab_write8(struct b53_device *dev, u8 page, u8 reg, u8 value)
page              277 drivers/net/dsa/b53/b53_srab.c 	ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE);
page              285 drivers/net/dsa/b53/b53_srab.c static int b53_srab_write16(struct b53_device *dev, u8 page, u8 reg,
page              298 drivers/net/dsa/b53/b53_srab.c 	ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE);
page              306 drivers/net/dsa/b53/b53_srab.c static int b53_srab_write32(struct b53_device *dev, u8 page, u8 reg,
page              319 drivers/net/dsa/b53/b53_srab.c 	ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE);
page              327 drivers/net/dsa/b53/b53_srab.c static int b53_srab_write48(struct b53_device *dev, u8 page, u8 reg,
page              341 drivers/net/dsa/b53/b53_srab.c 	ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE);
page              349 drivers/net/dsa/b53/b53_srab.c static int b53_srab_write64(struct b53_device *dev, u8 page, u8 reg,
page              363 drivers/net/dsa/b53/b53_srab.c 	ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE);
page              815 drivers/net/dsa/bcm_sf2.c #define SF2_PAGE_REG_MKADDR(page, reg)	((page) << 10 | (reg) << 2)
page              817 drivers/net/dsa/bcm_sf2.c static int bcm_sf2_core_read8(struct b53_device *dev, u8 page, u8 reg,
page              822 drivers/net/dsa/bcm_sf2.c 	*val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg));
page              827 drivers/net/dsa/bcm_sf2.c static int bcm_sf2_core_read16(struct b53_device *dev, u8 page, u8 reg,
page              832 drivers/net/dsa/bcm_sf2.c 	*val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg));
page              837 drivers/net/dsa/bcm_sf2.c static int bcm_sf2_core_read32(struct b53_device *dev, u8 page, u8 reg,
page              842 drivers/net/dsa/bcm_sf2.c 	*val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg));
page              847 drivers/net/dsa/bcm_sf2.c static int bcm_sf2_core_read64(struct b53_device *dev, u8 page, u8 reg,
page              852 drivers/net/dsa/bcm_sf2.c 	*val = core_readq(priv, SF2_PAGE_REG_MKADDR(page, reg));
page              857 drivers/net/dsa/bcm_sf2.c static int bcm_sf2_core_write8(struct b53_device *dev, u8 page, u8 reg,
page              862 drivers/net/dsa/bcm_sf2.c 	core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
page              867 drivers/net/dsa/bcm_sf2.c static int bcm_sf2_core_write16(struct b53_device *dev, u8 page, u8 reg,
page              872 drivers/net/dsa/bcm_sf2.c 	core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
page              877 drivers/net/dsa/bcm_sf2.c static int bcm_sf2_core_write32(struct b53_device *dev, u8 page, u8 reg,
page              882 drivers/net/dsa/bcm_sf2.c 	core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
page              887 drivers/net/dsa/bcm_sf2.c static int bcm_sf2_core_write64(struct b53_device *dev, u8 page, u8 reg,
page              892 drivers/net/dsa/bcm_sf2.c 	core_writeq(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
page              175 drivers/net/dsa/mt7530.c 	u16 page, r, lo, hi;
page              178 drivers/net/dsa/mt7530.c 	page = (reg >> 6) & 0x3ff;
page              184 drivers/net/dsa/mt7530.c 	ret = bus->write(bus, 0x1f, 0x1f, page);
page              204 drivers/net/dsa/mt7530.c 	u16 page, r, lo, hi;
page              207 drivers/net/dsa/mt7530.c 	page = (reg >> 6) & 0x3ff;
page              211 drivers/net/dsa/mt7530.c 	ret = bus->write(bus, 0x1f, 0x1f, page);
page               58 drivers/net/dsa/mv88e6xxx/phy.c static int mv88e6xxx_phy_page_get(struct mv88e6xxx_chip *chip, int phy, u8 page)
page               60 drivers/net/dsa/mv88e6xxx/phy.c 	return mv88e6xxx_phy_write(chip, phy, MV88E6XXX_PHY_PAGE, page);
page               80 drivers/net/dsa/mv88e6xxx/phy.c 			    u8 page, int reg, u16 *val)
page               88 drivers/net/dsa/mv88e6xxx/phy.c 	err = mv88e6xxx_phy_page_get(chip, phy, page);
page               98 drivers/net/dsa/mv88e6xxx/phy.c 			     u8 page, int reg, u16 val)
page              106 drivers/net/dsa/mv88e6xxx/phy.c 	err = mv88e6xxx_phy_page_get(chip, phy, page);
page              108 drivers/net/dsa/mv88e6xxx/phy.c 		err = mv88e6xxx_phy_write(chip, phy, MV88E6XXX_PHY_PAGE, page);
page               32 drivers/net/dsa/mv88e6xxx/phy.h 			    u8 page, int reg, u16 *val);
page               34 drivers/net/dsa/mv88e6xxx/phy.h 			     u8 page, int reg, u16 val);
page               78 drivers/net/dsa/qca8k.c qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
page               87 drivers/net/dsa/qca8k.c 	*page = regaddr & 0x3ff;
page              130 drivers/net/dsa/qca8k.c qca8k_set_page(struct mii_bus *bus, u16 page)
page              132 drivers/net/dsa/qca8k.c 	if (page == qca8k_current_page)
page              135 drivers/net/dsa/qca8k.c 	if (bus->write(bus, 0x18, 0, page) < 0)
page              138 drivers/net/dsa/qca8k.c 	qca8k_current_page = page;
page              144 drivers/net/dsa/qca8k.c 	u16 r1, r2, page;
page              147 drivers/net/dsa/qca8k.c 	qca8k_split_addr(reg, &r1, &r2, &page);
page              151 drivers/net/dsa/qca8k.c 	qca8k_set_page(priv->bus, page);
page              162 drivers/net/dsa/qca8k.c 	u16 r1, r2, page;
page              164 drivers/net/dsa/qca8k.c 	qca8k_split_addr(reg, &r1, &r2, &page);
page              168 drivers/net/dsa/qca8k.c 	qca8k_set_page(priv->bus, page);
page              177 drivers/net/dsa/qca8k.c 	u16 r1, r2, page;
page              180 drivers/net/dsa/qca8k.c 	qca8k_split_addr(reg, &r1, &r2, &page);
page              184 drivers/net/dsa/qca8k.c 	qca8k_set_page(priv->bus, page);
page              467 drivers/net/ethernet/amazon/ena/ena_netdev.c 	struct page *page;
page              471 drivers/net/ethernet/amazon/ena/ena_netdev.c 	if (unlikely(rx_info->page))
page              474 drivers/net/ethernet/amazon/ena/ena_netdev.c 	page = alloc_page(gfp);
page              475 drivers/net/ethernet/amazon/ena/ena_netdev.c 	if (unlikely(!page)) {
page              482 drivers/net/ethernet/amazon/ena/ena_netdev.c 	dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
page              489 drivers/net/ethernet/amazon/ena/ena_netdev.c 		__free_page(page);
page              493 drivers/net/ethernet/amazon/ena/ena_netdev.c 		  "alloc page %p, rx_info %p\n", page, rx_info);
page              495 drivers/net/ethernet/amazon/ena/ena_netdev.c 	rx_info->page = page;
page              507 drivers/net/ethernet/amazon/ena/ena_netdev.c 	struct page *page = rx_info->page;
page              510 drivers/net/ethernet/amazon/ena/ena_netdev.c 	if (unlikely(!page)) {
page              519 drivers/net/ethernet/amazon/ena/ena_netdev.c 	__free_page(page);
page              520 drivers/net/ethernet/amazon/ena/ena_netdev.c 	rx_info->page = NULL;
page              590 drivers/net/ethernet/amazon/ena/ena_netdev.c 		if (rx_info->page)
page              876 drivers/net/ethernet/amazon/ena/ena_netdev.c 	if (unlikely(!rx_info->page)) {
page              884 drivers/net/ethernet/amazon/ena/ena_netdev.c 		  rx_info, rx_info->page);
page              887 drivers/net/ethernet/amazon/ena/ena_netdev.c 	va = page_address(rx_info->page) + rx_info->page_offset;
page              927 drivers/net/ethernet/amazon/ena/ena_netdev.c 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
page              934 drivers/net/ethernet/amazon/ena/ena_netdev.c 		rx_info->page = NULL;
page              203 drivers/net/ethernet/amazon/ena/ena_netdev.h 	struct page *page;
page              289 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 	struct page *pages = NULL;
page              386 drivers/net/ethernet/amd/xgbe/xgbe.h 	struct page *pages;
page               75 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	struct page *page;
page               92 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		page = dev_alloc_page();
page               93 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		if (unlikely(!page))
page               96 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		dma_addr = dma_map_page(dev, page, 0,
page               99 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			put_page(page);
page              108 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		buf_pool->frag_page[tail] = page;
page              198 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	struct page *page;
page              203 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		page = buf_pool->frag_page[i];
page              204 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		if (page) {
page              208 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			put_page(page);
page              615 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	struct page *page;
page              636 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		page = buf_pool->frag_page[head];
page              637 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		put_page(page);
page              679 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	struct page *page;
page              739 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		page = page_pool->frag_page[head];
page              740 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 0,
page             1376 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 						    sizeof(struct page *),
page              116 drivers/net/ethernet/apm/xgene/xgene_enet_main.h 	struct page *(*frag_page);
page               24 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 	__free_pages(rxpage->page, rxpage->order);
page               25 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 	rxpage->page = NULL;
page               31 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 	struct page *page;
page               35 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 	page = dev_alloc_pages(order);
page               36 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 	if (unlikely(!page))
page               39 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 	daddr = dma_map_page(dev, page, 0, PAGE_SIZE << order,
page               45 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 	rxpage->page = page;
page               53 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 	__free_pages(page, order);
page               64 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 	if (rxbuf->rxdata.page) {
page               66 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 		if (page_ref_count(rxbuf->rxdata.page) > 1) {
page               86 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 	if (!rxbuf->rxdata.page) {
page              361 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 			page_ref_inc(buff->rxdata.page);
page              379 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 				skb_add_rx_frag(skb, 0, buff->rxdata.page,
page              383 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 				page_ref_inc(buff->rxdata.page);
page              400 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 							buff_->rxdata.page,
page              404 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 					page_ref_inc(buff_->rxdata.page);
page               14 drivers/net/ethernet/aquantia/atlantic/aq_ring.h struct page;
page               18 drivers/net/ethernet/aquantia/atlantic/aq_ring.h 	struct page *page;
page              135 drivers/net/ethernet/aquantia/atlantic/aq_ring.h 	return page_to_virt(rxpage->page) + rxpage->pg_off;
page              509 drivers/net/ethernet/atheros/atl1c/atl1c.h 	struct page         *rx_page;
page             1659 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 	struct page *page;
page             1665 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 	page = adapter->rx_page;
page             1666 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 	if (!page) {
page             1667 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 		adapter->rx_page = page = alloc_page(GFP_ATOMIC);
page             1668 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 		if (unlikely(!page))
page             1673 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 	skb = build_skb(page_address(page) + adapter->rx_page_offset,
page             1681 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 			get_page(page);
page             1836 drivers/net/ethernet/atheros/atlx/atl1.c 	struct page *page;
page             1869 drivers/net/ethernet/atheros/atlx/atl1.c 		page = virt_to_page(skb->data);
page             1871 drivers/net/ethernet/atheros/atlx/atl1.c 		buffer_info->dma = pci_map_page(pdev, page, offset,
page             2191 drivers/net/ethernet/atheros/atlx/atl1.c 	struct page *page;
page             2213 drivers/net/ethernet/atheros/atlx/atl1.c 		page = virt_to_page(skb->data);
page             2215 drivers/net/ethernet/atheros/atlx/atl1.c 		buffer_info->dma = pci_map_page(adapter->pdev, page,
page             2236 drivers/net/ethernet/atheros/atlx/atl1.c 				page = virt_to_page(skb->data +
page             2241 drivers/net/ethernet/atheros/atlx/atl1.c 					page, offset, buffer_info->length,
page             2250 drivers/net/ethernet/atheros/atlx/atl1.c 		page = virt_to_page(skb->data);
page             2252 drivers/net/ethernet/atheros/atlx/atl1.c 		buffer_info->dma = pci_map_page(adapter->pdev, page,
page              194 drivers/net/ethernet/aurora/nb8800.c 	struct page *page;
page              202 drivers/net/ethernet/aurora/nb8800.c 	page = virt_to_head_page(data);
page              203 drivers/net/ethernet/aurora/nb8800.c 	offset = data - page_address(page);
page              205 drivers/net/ethernet/aurora/nb8800.c 	dma_addr = dma_map_page(&dev->dev, page, offset, RX_BUF_SIZE,
page              213 drivers/net/ethernet/aurora/nb8800.c 	rxb->page = page;
page              225 drivers/net/ethernet/aurora/nb8800.c 	struct page *page = priv->rx_bufs[i].page;
page              227 drivers/net/ethernet/aurora/nb8800.c 	void *data = page_address(page) + offset;
page              258 drivers/net/ethernet/aurora/nb8800.c 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
page              753 drivers/net/ethernet/aurora/nb8800.c 			if (priv->rx_bufs[i].page)
page              754 drivers/net/ethernet/aurora/nb8800.c 				put_page(priv->rx_bufs[i].page);
page              196 drivers/net/ethernet/aurora/nb8800.h 	struct page			*page;
page             2734 drivers/net/ethernet/broadcom/bnx2.c 	struct page *page = alloc_page(gfp);
page             2736 drivers/net/ethernet/broadcom/bnx2.c 	if (!page)
page             2738 drivers/net/ethernet/broadcom/bnx2.c 	mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
page             2741 drivers/net/ethernet/broadcom/bnx2.c 		__free_page(page);
page             2745 drivers/net/ethernet/broadcom/bnx2.c 	rx_pg->page = page;
page             2756 drivers/net/ethernet/broadcom/bnx2.c 	struct page *page = rx_pg->page;
page             2758 drivers/net/ethernet/broadcom/bnx2.c 	if (!page)
page             2764 drivers/net/ethernet/broadcom/bnx2.c 	__free_page(page);
page             2765 drivers/net/ethernet/broadcom/bnx2.c 	rx_pg->page = NULL;
page             2959 drivers/net/ethernet/broadcom/bnx2.c 		struct page *page;
page             2964 drivers/net/ethernet/broadcom/bnx2.c 		page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
page             2967 drivers/net/ethernet/broadcom/bnx2.c 		cons_rx_pg->page = page;
page             2984 drivers/net/ethernet/broadcom/bnx2.c 			prod_rx_pg->page = cons_rx_pg->page;
page             2985 drivers/net/ethernet/broadcom/bnx2.c 			cons_rx_pg->page = NULL;
page             3104 drivers/net/ethernet/broadcom/bnx2.c 			skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
page             3105 drivers/net/ethernet/broadcom/bnx2.c 			rx_pg->page = NULL;
page             6638 drivers/net/ethernet/broadcom/bnx2.h 	struct page		*page;
page              366 drivers/net/ethernet/broadcom/bnx2x/bnx2x.h 	struct page	*page;
page              540 drivers/net/ethernet/broadcom/bnx2x/bnx2x.h 	struct page	*page;
page              554 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	if (!pool->page) {
page              555 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
page              556 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		if (unlikely(!pool->page))
page              562 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	mapping = dma_map_page(&bp->pdev->dev, pool->page,
page              569 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	sw_buf->page = pool->page;
page              579 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		get_page(pool->page);
page              581 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		pool->page = NULL;
page              648 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			skb_fill_page_desc(skb, j, old_rx_pg.page,
page              656 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 						   old_rx_pg.page,
page              660 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 					get_page(old_rx_pg.page);
page              805 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h 	struct page *page = sw_buf->page;
page              809 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h 	if (!page)
page              818 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h 	put_page(page);
page              820 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h 	sw_buf->page = NULL;
page             1007 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h 	if (!pool->page)
page             1010 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h 	put_page(pool->page);
page             1012 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h 	pool->page = NULL;
page              505 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h 	void *page;
page              622 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h 		BNX2X_ILT_FREE(line->page, line->page_mapping, line->size);
page              625 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h 	BNX2X_ILT_ZALLOC(line->page, &line->page_mapping, size);
page              626 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h 	if (!line->page)
page             1124 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c 				  i, j, rx_sge[1], rx_sge[0], sw_page->page);
page             7975 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c 		ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt;
page             1664 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c 		ilt->lines[line+i].page = hw_cxt->addr;
page              685 drivers/net/ethernet/broadcom/bnxt/bnxt.c static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
page              690 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct page *page;
page              692 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	page = page_pool_dev_alloc_pages(rxr->page_pool);
page              693 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	if (!page)
page              696 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	*mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
page              699 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		page_pool_recycle_direct(rxr->page_pool, page);
page              703 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	return page;
page              735 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		struct page *page =
page              738 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		if (!page)
page              741 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		rx_buf->data = page;
page              742 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		rx_buf->data_ptr = page_address(page) + bp->rx_offset;
page              796 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct page *page;
page              802 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		page = rxr->rx_page;
page              803 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		if (!page) {
page              804 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			page = alloc_page(gfp);
page              805 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			if (!page)
page              807 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			rxr->rx_page = page;
page              815 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			get_page(page);
page              817 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		page = alloc_page(gfp);
page              818 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		if (!page)
page              822 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	mapping = dma_map_page_attrs(&pdev->dev, page, offset,
page              826 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		__free_page(page);
page              837 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rx_agg_buf->page = page;
page              885 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		struct page *page;
page              904 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		page = cons_rx_buf->page;
page              905 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		cons_rx_buf->page = NULL;
page              906 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		prod_rx_buf->page = page;
page              932 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct page *page = data;
page              945 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	page_pool_release_page(rxr->page_pool, page);
page              952 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		__free_page(page);
page              956 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	off = (void *)data_ptr - page_address(page);
page              957 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
page             1018 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		struct page *page;
page             1030 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		skb_fill_page_desc(skb, i, cons_rx_buf->page,
page             1039 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		page = cons_rx_buf->page;
page             1040 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		cons_rx_buf->page = NULL;
page             1052 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			cons_rx_buf->page = page;
page             2605 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			struct page *page = rx_agg_buf->page;
page             2607 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			if (!page)
page             2615 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			rx_agg_buf->page = NULL;
page             2618 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			__free_page(page);
page              705 drivers/net/ethernet/broadcom/bnxt/bnxt.h 	struct page		*page;
page              887 drivers/net/ethernet/broadcom/bnxt/bnxt.h 	struct page		*rx_page;
page              112 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c 		 struct page *page, u8 **data_ptr, unsigned int *len, u8 *event)
page              166 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c 			bnxt_reuse_rx_data(rxr, cons, page);
page              175 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c 		bnxt_reuse_rx_data(rxr, cons, page);
page              189 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c 			bnxt_reuse_rx_data(rxr, cons, page);
page              195 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c 			page_pool_recycle_direct(rxr->page_pool, page);
page              208 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c 		bnxt_reuse_rx_data(rxr, cons, page);
page               18 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h 		 struct page *page, u8 **data_ptr, unsigned int *len,
page              294 drivers/net/ethernet/brocade/bna/bnad.c 	if (!unmap->page)
page              300 drivers/net/ethernet/brocade/bna/bnad.c 	put_page(unmap->page);
page              301 drivers/net/ethernet/brocade/bna/bnad.c 	unmap->page = NULL;
page              345 drivers/net/ethernet/brocade/bna/bnad.c 	struct page *page;
page              359 drivers/net/ethernet/brocade/bna/bnad.c 			page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
page              364 drivers/net/ethernet/brocade/bna/bnad.c 			page = prev->page;
page              366 drivers/net/ethernet/brocade/bna/bnad.c 			get_page(page);
page              369 drivers/net/ethernet/brocade/bna/bnad.c 		if (unlikely(!page)) {
page              375 drivers/net/ethernet/brocade/bna/bnad.c 		dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
page              378 drivers/net/ethernet/brocade/bna/bnad.c 			put_page(page);
page              384 drivers/net/ethernet/brocade/bna/bnad.c 		unmap->page = page;
page              537 drivers/net/ethernet/brocade/bna/bnad.c 	prefetch(page_address(unmap_q->unmap[ci].page) +
page              556 drivers/net/ethernet/brocade/bna/bnad.c 				   unmap->page, unmap->page_offset, len);
page              558 drivers/net/ethernet/brocade/bna/bnad.c 		unmap->page = NULL;
page              235 drivers/net/ethernet/brocade/bna/bnad.h 	struct page		*page;
page              603 drivers/net/ethernet/cavium/liquidio/lio_core.c 			if (pg_info->page) {
page              605 drivers/net/ethernet/cavium/liquidio/lio_core.c 				va = page_address(pg_info->page) +
page              610 drivers/net/ethernet/cavium/liquidio/lio_core.c 						pg_info->page,
page              619 drivers/net/ethernet/cavium/liquidio/lio_core.c 			skb_copy_to_linear_data(skb, page_address(pg_info->page)
page              622 drivers/net/ethernet/cavium/liquidio/lio_core.c 			put_page(pg_info->page);
page              270 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c 		if (pg_info->page) {
page              271 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c 			va = page_address(pg_info->page) +
page              278 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c 				pg_info->page,
page              286 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c 		skb_copy_to_linear_data(skb, page_address(pg_info->page) +
page              289 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c 		put_page(pg_info->page);
page              156 drivers/net/ethernet/cavium/liquidio/octeon_droq.c 		if (pg_info->page)
page              368 drivers/net/ethernet/cavium/liquidio/octeon_droq.c 			pg_info->page = NULL;
page              454 drivers/net/ethernet/cavium/liquidio/octeon_droq.c 			if (pg_info->page)
page              647 drivers/net/ethernet/cavium/liquidio/octeon_droq.c 					pg_info->page = NULL;
page               68 drivers/net/ethernet/cavium/liquidio/octeon_droq.h 	struct page *page;
page              263 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	struct page *page;
page              267 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	page = alloc_page(GFP_ATOMIC);
page              268 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	if (unlikely(!page))
page              273 drivers/net/ethernet/cavium/liquidio/octeon_network.h 		__free_page(page);
page              274 drivers/net/ethernet/cavium/liquidio/octeon_network.h 		pg_info->page = NULL;
page              286 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	pg_info->dma = dma_map_page(&oct->pci_dev->dev, page, 0,
page              291 drivers/net/ethernet/cavium/liquidio/octeon_network.h 		__free_page(page);
page              293 drivers/net/ethernet/cavium/liquidio/octeon_network.h 		pg_info->page = NULL;
page              297 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	pg_info->page = page;
page              299 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	skb_pg_info->page = page;
page              323 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	skb_pg_info->page = NULL;
page              335 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	if (!pg_info->page) {
page              341 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	if (unlikely(page_count(pg_info->page) != 1) ||
page              342 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	    unlikely(page_to_nid(pg_info->page)	!= numa_node_id())) {
page              347 drivers/net/ethernet/cavium/liquidio/octeon_network.h 		pg_info->page = NULL;
page              357 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	page_ref_inc(pg_info->page);
page              383 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	skb_pg_info->page = pg_info->page;
page              395 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	put_page(pg_info->page);
page              397 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	pg_info->page = NULL;
page              411 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	if (pg_info->page) {
page              412 drivers/net/ethernet/cavium/liquidio/octeon_network.h 		put_page(pg_info->page);
page              414 drivers/net/ethernet/cavium/liquidio/octeon_network.h 		pg_info->page = NULL;
page              444 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	va = page_address(pg_info->page) + pg_info->page_offset;
page              458 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	if (!pg_info->page) {
page              296 drivers/net/ethernet/cavium/thunder/nic.h 	struct page		*rb_page;
page              535 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	struct page *page;
page              549 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	page = virt_to_page((void *)cpu_addr);
page              551 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	xdp.data_hard_start = page_address(page);
page              576 drivers/net/ethernet/cavium/thunder/nicvf_main.c 		if (page_ref_count(page) == 1) {
page              588 drivers/net/ethernet/cavium/thunder/nicvf_main.c 			put_page(page);
page              607 drivers/net/ethernet/cavium/thunder/nicvf_main.c 		if (page_ref_count(page) == 1) {
page              614 drivers/net/ethernet/cavium/thunder/nicvf_main.c 		put_page(page);
page              664 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	struct page *page;
page              682 drivers/net/ethernet/cavium/thunder/nicvf_main.c 		page = (struct page *)sq->xdp_page[cqe_tx->sqe_ptr];
page              684 drivers/net/ethernet/cavium/thunder/nicvf_main.c 		if (page && (page_ref_count(page) == 1))
page              689 drivers/net/ethernet/cavium/thunder/nicvf_main.c 		if (page)
page              690 drivers/net/ethernet/cavium/thunder/nicvf_main.c 			put_page(page);
page               96 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	struct page *page = NULL;
page              101 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	page = pgcache->page;
page              103 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	if (page) {
page              104 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		ref_count = page_ref_count(page);
page              114 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 				page = NULL;
page              116 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 			page = NULL;
page              120 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	if (!page) {
page              121 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, 0);
page              122 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		if (!page)
page              130 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 			nic->rb_page = page;
page              135 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		pgcache->page = page;
page              152 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 			page_ref_add(page, XDP_PAGE_REFCNT_REFILL);
page              159 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		page_ref_add(page, 1);
page              167 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	page = next->page;
page              168 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	if (page)
page              169 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		prefetch(&page->_refcount);
page              207 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		nic->rb_page = pgcache->page;
page              363 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		if (pgcache->page && page_ref_count(pgcache->page) != 0) {
page              365 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 				page_ref_sub(pgcache->page,
page              368 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 			put_page(pgcache->page);
page              574 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	struct page *page;
page              597 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		page = (struct page *)sq->xdp_page[sq->head];
page              598 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		if (!page)
page              601 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 			put_page(page);
page             1640 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	struct page *page = NULL;
page             1644 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		page = virt_to_page(phys_to_virt(buf_addr));
page             1650 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		if (page_ref_count(page) != 1)
page             1668 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	struct page *page;
page             1711 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 			page = virt_to_page(phys_to_virt(phys_addr));
page             1712 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 			offset = phys_to_virt(phys_addr) - page_address(page);
page             1713 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
page              216 drivers/net/ethernet/cavium/thunder/nicvf_queues.h 	struct page	*page;
page               92 drivers/net/ethernet/chelsio/cxgb3/adapter.h 	struct page *page;
page              355 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (q->use_pages && d->pg_chunk.page) {
page              362 drivers/net/ethernet/chelsio/cxgb3/sge.c 		put_page(d->pg_chunk.page);
page              363 drivers/net/ethernet/chelsio/cxgb3/sge.c 		d->pg_chunk.page = NULL;
page              393 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (q->pg_chunk.page) {
page              394 drivers/net/ethernet/chelsio/cxgb3/sge.c 		__free_pages(q->pg_chunk.page, q->order);
page              395 drivers/net/ethernet/chelsio/cxgb3/sge.c 		q->pg_chunk.page = NULL;
page              446 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (!q->pg_chunk.page) {
page              449 drivers/net/ethernet/chelsio/cxgb3/sge.c 		q->pg_chunk.page = alloc_pages(gfp, order);
page              450 drivers/net/ethernet/chelsio/cxgb3/sge.c 		if (unlikely(!q->pg_chunk.page))
page              452 drivers/net/ethernet/chelsio/cxgb3/sge.c 		q->pg_chunk.va = page_address(q->pg_chunk.page);
page              456 drivers/net/ethernet/chelsio/cxgb3/sge.c 		mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
page              459 drivers/net/ethernet/chelsio/cxgb3/sge.c 			__free_pages(q->pg_chunk.page, order);
page              460 drivers/net/ethernet/chelsio/cxgb3/sge.c 			q->pg_chunk.page = NULL;
page              471 drivers/net/ethernet/chelsio/cxgb3/sge.c 		q->pg_chunk.page = NULL;
page              474 drivers/net/ethernet/chelsio/cxgb3/sge.c 		get_page(q->pg_chunk.page);
page              888 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
page              896 drivers/net/ethernet/chelsio/cxgb3/sge.c 		skb_fill_page_desc(newskb, 0, sd->pg_chunk.page,
page              904 drivers/net/ethernet/chelsio/cxgb3/sge.c 				   sd->pg_chunk.page,
page             2152 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
page             2159 drivers/net/ethernet/chelsio/cxgb3/sge.c 		put_page(sd->pg_chunk.page);
page             2184 drivers/net/ethernet/chelsio/cxgb3/sge.c 	__skb_frag_set_page(rx_frag, sd->pg_chunk.page);
page             3529 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	struct page *page;
page             3543 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		page = sg_page(iter);
page             3544 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		if (page)
page             3545 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 			__free_pages(page, HMA_PAGE_ORDER);
page             3558 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	struct page *newpage;
page              133 drivers/net/ethernet/chelsio/cxgb4/sge.c 	struct page *page;
page              503 drivers/net/ethernet/chelsio/cxgb4/sge.c 		put_page(d->page);
page              504 drivers/net/ethernet/chelsio/cxgb4/sge.c 		d->page = NULL;
page              529 drivers/net/ethernet/chelsio/cxgb4/sge.c 	d->page = NULL;
page              570 drivers/net/ethernet/chelsio/cxgb4/sge.c static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
page              573 drivers/net/ethernet/chelsio/cxgb4/sge.c 	sd->page = pg;
page              595 drivers/net/ethernet/chelsio/cxgb4/sge.c 	struct page *pg;
page             2620 drivers/net/ethernet/chelsio/cxgb4/sge.c 	__skb_fill_page_desc(skb, 0, gl->frags[0].page,
page             2625 drivers/net/ethernet/chelsio/cxgb4/sge.c 		__skb_fill_page_desc(skb, i, gl->frags[i].page,
page             2630 drivers/net/ethernet/chelsio/cxgb4/sge.c 	get_page(gl->frags[gl->nfrags - 1].page);
page             2687 drivers/net/ethernet/chelsio/cxgb4/sge.c 		put_page(p->page);
page             3085 drivers/net/ethernet/chelsio/cxgb4/sge.c 		d->page = si->frags[frags].page;
page             3172 drivers/net/ethernet/chelsio/cxgb4/sge.c 				fp->page = rsd->page;
page             3191 drivers/net/ethernet/chelsio/cxgb4/sge.c 			si.va = page_address(si.frags[0].page) +
page              169 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	struct page *page;		/* Free List page buffer */
page              482 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		put_page(sdesc->page);
page              483 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		sdesc->page = NULL;
page              511 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	sdesc->page = NULL;
page              571 drivers/net/ethernet/chelsio/cxgb4vf/sge.c static inline void set_rx_sw_desc(struct rx_sw_desc *sdesc, struct page *page,
page              574 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	sdesc->page = page;
page              583 drivers/net/ethernet/chelsio/cxgb4vf/sge.c static inline void poison_buf(struct page *page, size_t sz)
page              586 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	memset(page_address(page), POISON_BUF_VAL, sz);
page              608 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	struct page *page;
page              633 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		page = __dev_alloc_pages(gfp, s->fl_pg_order);
page              634 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		if (unlikely(!page)) {
page              643 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		poison_buf(page, PAGE_SIZE << s->fl_pg_order);
page              645 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		dma_addr = dma_map_page(adapter->pdev_dev, page, 0,
page              657 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 			__free_pages(page, s->fl_pg_order);
page              663 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		set_rx_sw_desc(sdesc, page, dma_addr);
page              677 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		page = __dev_alloc_page(gfp);
page              678 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		if (unlikely(!page)) {
page              682 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		poison_buf(page, PAGE_SIZE);
page              684 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE,
page              687 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 			put_page(page);
page              692 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		set_rx_sw_desc(sdesc, page, dma_addr);
page             1479 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	__skb_fill_page_desc(skb, 0, gl->frags[0].page,
page             1484 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		__skb_fill_page_desc(skb, i, gl->frags[i].page,
page             1489 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	get_page(gl->frags[gl->nfrags - 1].page);
page             1555 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		put_page(gl->frags[frag].page);
page             1722 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		sdesc->page = gl->frags[frags].page;
page             1812 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 				fp->page = sdesc->page;
page             1830 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 			gl.va = (page_address(gl.frags[0].page) +
page               91 drivers/net/ethernet/cortina/gemini.c 	struct page *page;
page              800 drivers/net/ethernet/cortina/gemini.c 		put_page(gpage->page);
page              807 drivers/net/ethernet/cortina/gemini.c static struct page *geth_freeq_alloc_map_page(struct gemini_ethernet *geth,
page              815 drivers/net/ethernet/cortina/gemini.c 	struct page *page;
page              819 drivers/net/ethernet/cortina/gemini.c 	page = alloc_page(GFP_ATOMIC);
page              820 drivers/net/ethernet/cortina/gemini.c 	if (!page)
page              823 drivers/net/ethernet/cortina/gemini.c 	mapping = dma_map_single(geth->dev, page_address(page),
page              826 drivers/net/ethernet/cortina/gemini.c 		put_page(page);
page              849 drivers/net/ethernet/cortina/gemini.c 	if (gpage->page) {
page              855 drivers/net/ethernet/cortina/gemini.c 		put_page(gpage->page);
page              860 drivers/net/ethernet/cortina/gemini.c 		pn, (unsigned int)mapping, page);
page              862 drivers/net/ethernet/cortina/gemini.c 	gpage->page = page;
page              864 drivers/net/ethernet/cortina/gemini.c 	return page;
page              896 drivers/net/ethernet/cortina/gemini.c 		struct page *page;
page              899 drivers/net/ethernet/cortina/gemini.c 		page = gpage->page;
page              902 drivers/net/ethernet/cortina/gemini.c 			pn, page_ref_count(page), 1 << fpp_order);
page              904 drivers/net/ethernet/cortina/gemini.c 		if (page_ref_count(page) > 1) {
page              910 drivers/net/ethernet/cortina/gemini.c 			page = geth_freeq_alloc_map_page(geth, pn);
page              911 drivers/net/ethernet/cortina/gemini.c 			if (!page)
page              916 drivers/net/ethernet/cortina/gemini.c 		page_ref_add(page, 1 << fpp_order);
page              986 drivers/net/ethernet/cortina/gemini.c 		put_page(gpage->page);
page             1022 drivers/net/ethernet/cortina/gemini.c 		while (page_ref_count(gpage->page) > 0)
page             1023 drivers/net/ethernet/cortina/gemini.c 			put_page(gpage->page);
page             1408 drivers/net/ethernet/cortina/gemini.c 	struct page *page = NULL;
page             1448 drivers/net/ethernet/cortina/gemini.c 		page = gpage->page;
page             1465 drivers/net/ethernet/cortina/gemini.c 			put_page(page);
page             1479 drivers/net/ethernet/cortina/gemini.c 		skb_fill_page_desc(skb, frag_nr, page, page_offs, frag_len);
page             1499 drivers/net/ethernet/cortina/gemini.c 			put_page(page);
page              251 drivers/net/ethernet/emulex/benet/be.h 	struct page *page;
page             2286 drivers/net/ethernet/emulex/benet/be_main.c 	BUG_ON(!rx_page_info->page);
page             2313 drivers/net/ethernet/emulex/benet/be_main.c 		put_page(page_info->page);
page             2331 drivers/net/ethernet/emulex/benet/be_main.c 	start = page_address(page_info->page) + page_info->page_offset;
page             2341 drivers/net/ethernet/emulex/benet/be_main.c 		put_page(page_info->page);
page             2348 drivers/net/ethernet/emulex/benet/be_main.c 		skb_frag_set_page(skb, 0, page_info->page);
page             2357 drivers/net/ethernet/emulex/benet/be_main.c 	page_info->page = NULL;
page             2374 drivers/net/ethernet/emulex/benet/be_main.c 			skb_frag_set_page(skb, j, page_info->page);
page             2380 drivers/net/ethernet/emulex/benet/be_main.c 			put_page(page_info->page);
page             2388 drivers/net/ethernet/emulex/benet/be_main.c 		page_info->page = NULL;
page             2456 drivers/net/ethernet/emulex/benet/be_main.c 			skb_frag_set_page(skb, j, page_info->page);
page             2461 drivers/net/ethernet/emulex/benet/be_main.c 			put_page(page_info->page);
page             2576 drivers/net/ethernet/emulex/benet/be_main.c static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
page             2594 drivers/net/ethernet/emulex/benet/be_main.c 	struct page *pagep = NULL;
page             2601 drivers/net/ethernet/emulex/benet/be_main.c 	for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
page             2623 drivers/net/ethernet/emulex/benet/be_main.c 		page_info->page = pagep;
page             2819 drivers/net/ethernet/emulex/benet/be_main.c 		put_page(page_info->page);
page              274 drivers/net/ethernet/faraday/ftmac100.c static void ftmac100_rxdes_set_page(struct ftmac100_rxdes *rxdes, struct page *page)
page              276 drivers/net/ethernet/faraday/ftmac100.c 	rxdes->rxdes3 = (unsigned int)page;
page              279 drivers/net/ethernet/faraday/ftmac100.c static struct page *ftmac100_rxdes_get_page(struct ftmac100_rxdes *rxdes)
page              281 drivers/net/ethernet/faraday/ftmac100.c 	return (struct page *)rxdes->rxdes3;
page              390 drivers/net/ethernet/faraday/ftmac100.c 	struct page *page;
page              428 drivers/net/ethernet/faraday/ftmac100.c 	page = ftmac100_rxdes_get_page(rxdes);
page              429 drivers/net/ethernet/faraday/ftmac100.c 	skb_fill_page_desc(skb, 0, page, 0, length);
page              663 drivers/net/ethernet/faraday/ftmac100.c 	struct page *page;
page              666 drivers/net/ethernet/faraday/ftmac100.c 	page = alloc_page(gfp);
page              667 drivers/net/ethernet/faraday/ftmac100.c 	if (!page) {
page              673 drivers/net/ethernet/faraday/ftmac100.c 	map = dma_map_page(priv->dev, page, 0, RX_BUF_SIZE, DMA_FROM_DEVICE);
page              677 drivers/net/ethernet/faraday/ftmac100.c 		__free_page(page);
page              681 drivers/net/ethernet/faraday/ftmac100.c 	ftmac100_rxdes_set_page(rxdes, page);
page              694 drivers/net/ethernet/faraday/ftmac100.c 		struct page *page = ftmac100_rxdes_get_page(rxdes);
page              697 drivers/net/ethernet/faraday/ftmac100.c 		if (!page)
page              701 drivers/net/ethernet/faraday/ftmac100.c 		__free_page(page);
page             1738 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	struct page *page, *head_page;
page             1795 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 			page = virt_to_page(sg_vaddr);
page             1801 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 				(page_address(page) - page_address(head_page));
page              133 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	struct page *page, *head_page;
page              175 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 			page = virt_to_page(sg_vaddr);
page              185 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 				(page_address(page) - page_address(head_page));
page              950 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	struct page *page;
page              962 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 		page = dev_alloc_pages(0);
page              963 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 		if (!page)
page              966 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 		addr = dma_map_page(dev, page, 0, priv->rx_buf_size,
page              975 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 					 page, DPAA2_ETH_RX_BUF_RAW_SIZE,
page              997 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	__free_pages(page, 0);
page             1481 drivers/net/ethernet/freescale/dpaa2/dpni.c 			u8 page,
page             1494 drivers/net/ethernet/freescale/dpaa2/dpni.c 	cmd_params->page_number = page;
page              496 drivers/net/ethernet/freescale/dpaa2/dpni.h 			u8			page,
page              413 drivers/net/ethernet/freescale/enetc/enetc.c 	struct page *page;
page              416 drivers/net/ethernet/freescale/enetc/enetc.c 	page = dev_alloc_page();
page              417 drivers/net/ethernet/freescale/enetc/enetc.c 	if (unlikely(!page))
page              420 drivers/net/ethernet/freescale/enetc/enetc.c 	addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
page              422 drivers/net/ethernet/freescale/enetc/enetc.c 		__free_page(page);
page              428 drivers/net/ethernet/freescale/enetc/enetc.c 	rx_swbd->page = page;
page              446 drivers/net/ethernet/freescale/enetc/enetc.c 		if (unlikely(!rx_swbd->page)) {
page              537 drivers/net/ethernet/freescale/enetc/enetc.c static bool enetc_page_reusable(struct page *page)
page              539 drivers/net/ethernet/freescale/enetc/enetc.c 	return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1);
page              570 drivers/net/ethernet/freescale/enetc/enetc.c 	if (likely(enetc_page_reusable(rx_swbd->page))) {
page              572 drivers/net/ethernet/freescale/enetc/enetc.c 		page_ref_inc(rx_swbd->page);
page              586 drivers/net/ethernet/freescale/enetc/enetc.c 	rx_swbd->page = NULL;
page              596 drivers/net/ethernet/freescale/enetc/enetc.c 	ba = page_address(rx_swbd->page) + rx_swbd->page_offset;
page              616 drivers/net/ethernet/freescale/enetc/enetc.c 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page,
page              920 drivers/net/ethernet/freescale/enetc/enetc.c 		if (!rx_swbd->page)
page              925 drivers/net/ethernet/freescale/enetc/enetc.c 		__free_page(rx_swbd->page);
page              926 drivers/net/ethernet/freescale/enetc/enetc.c 		rx_swbd->page = NULL;
page               37 drivers/net/ethernet/freescale/enetc/enetc.h 	struct page *page;
page             1131 drivers/net/ethernet/freescale/gianfar.c 		if (!rxb->page)
page             1136 drivers/net/ethernet/freescale/gianfar.c 		__free_page(rxb->page);
page             1138 drivers/net/ethernet/freescale/gianfar.c 		rxb->page = NULL;
page             1237 drivers/net/ethernet/freescale/gianfar.c 	struct page *page;
page             1240 drivers/net/ethernet/freescale/gianfar.c 	page = dev_alloc_page();
page             1241 drivers/net/ethernet/freescale/gianfar.c 	if (unlikely(!page))
page             1244 drivers/net/ethernet/freescale/gianfar.c 	addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
page             1246 drivers/net/ethernet/freescale/gianfar.c 		__free_page(page);
page             1252 drivers/net/ethernet/freescale/gianfar.c 	rxb->page = page;
page             1280 drivers/net/ethernet/freescale/gianfar.c 		if (unlikely(!rxb->page)) {
page             2388 drivers/net/ethernet/freescale/gianfar.c 	struct page *page = rxb->page;
page             2397 drivers/net/ethernet/freescale/gianfar.c 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
page             2403 drivers/net/ethernet/freescale/gianfar.c 	if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
page             2409 drivers/net/ethernet/freescale/gianfar.c 	page_ref_inc(page);
page             2439 drivers/net/ethernet/freescale/gianfar.c 	struct page *page = rxb->page;
page             2443 drivers/net/ethernet/freescale/gianfar.c 		void *buff_addr = page_address(page) + rxb->page_offset;
page             2467 drivers/net/ethernet/freescale/gianfar.c 	rxb->page = NULL;
page              990 drivers/net/ethernet/freescale/gianfar.h 	struct page *page;
page               39 drivers/net/ethernet/google/gve/gve.h 	struct page *page;
page               50 drivers/net/ethernet/google/gve/gve.h 	struct page **pages; /* list of num_entries pages */
page              429 drivers/net/ethernet/google/gve/gve.h int gve_alloc_page(struct device *dev, struct page **page, dma_addr_t *dma,
page              431 drivers/net/ethernet/google/gve/gve.h void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
page              517 drivers/net/ethernet/google/gve/gve_main.c int gve_alloc_page(struct device *dev, struct page **page, dma_addr_t *dma,
page              520 drivers/net/ethernet/google/gve/gve_main.c 	*page = alloc_page(GFP_KERNEL);
page              521 drivers/net/ethernet/google/gve/gve_main.c 	if (!*page)
page              523 drivers/net/ethernet/google/gve/gve_main.c 	*dma = dma_map_page(dev, *page, 0, PAGE_SIZE, dir);
page              525 drivers/net/ethernet/google/gve/gve_main.c 		put_page(*page);
page              572 drivers/net/ethernet/google/gve/gve_main.c void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
page              577 drivers/net/ethernet/google/gve/gve_main.c 	if (page)
page              578 drivers/net/ethernet/google/gve/gve_main.c 		put_page(page);
page               50 drivers/net/ethernet/google/gve/gve_rx.c 				dma_addr_t addr, struct page *page)
page               52 drivers/net/ethernet/google/gve/gve_rx.c 	page_info->page = page;
page               54 drivers/net/ethernet/google/gve/gve_rx.c 	page_info->page_address = page_address(page);
page               77 drivers/net/ethernet/google/gve/gve_rx.c 		struct page *page = rx->data.qpl->pages[i];
page               81 drivers/net/ethernet/google/gve/gve_rx.c 				    &rx->data.data_ring[i], addr, page);
page              258 drivers/net/ethernet/google/gve/gve_rx.c 	skb_add_rx_frag(skb, 0, page_info->page,
page              310 drivers/net/ethernet/google/gve/gve_rx.c 		pagecount = page_count(page_info->page);
page              320 drivers/net/ethernet/google/gve/gve_rx.c 			get_page(page_info->page);
page              399 drivers/net/ethernet/google/gve/gve_tx.c 	u64 page;
page              401 drivers/net/ethernet/google/gve/gve_tx.c 	for (page = first_page; page <= last_page; page++) {
page              402 drivers/net/ethernet/google/gve/gve_tx.c 		dma = page_buses[page];
page               39 drivers/net/ethernet/hisilicon/hns/hnae.c 	struct page *p = dev_alloc_pages(order);
page               62 drivers/net/ethernet/hisilicon/hns/hnae.c 		put_page((struct page *)cb->priv);
page             2124 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	struct page *p;
page             2146 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		put_page((struct page *)cb->priv);
page              602 drivers/net/ethernet/hp/hp100.h #define hp100_page( page ) \
page              603 drivers/net/ethernet/hp/hp100.h 	outw( HP100_PAGE_##page, ioaddr + HP100_REG_PAGING )
page              128 drivers/net/ethernet/i825xx/ether1.c 	unsigned int page, thislen, offset;
page              132 drivers/net/ethernet/i825xx/ether1.c 	page = start >> 12;
page              143 drivers/net/ethernet/i825xx/ether1.c 		writeb(page, REG_PAGE);
page              184 drivers/net/ethernet/i825xx/ether1.c 		page++;
page              191 drivers/net/ethernet/i825xx/ether1.c 	unsigned int page, thislen, offset;
page              195 drivers/net/ethernet/i825xx/ether1.c 	page = start >> 12;
page              206 drivers/net/ethernet/i825xx/ether1.c 		writeb(page, REG_PAGE);
page              247 drivers/net/ethernet/i825xx/ether1.c 		page++;
page              138 drivers/net/ethernet/intel/e1000/e1000.h 		struct page *page; /* jumbo: alloc_page */
page             2915 drivers/net/ethernet/intel/e1000/e1000_hw.h #define PHY_REG(page, reg)    \
page             2916 drivers/net/ethernet/intel/e1000/e1000_hw.h         (((page) << PHY_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
page             2103 drivers/net/ethernet/intel/e1000/e1000_main.c 			if (buffer_info->rxbuf.page) {
page             2104 drivers/net/ethernet/intel/e1000/e1000_main.c 				put_page(buffer_info->rxbuf.page);
page             2105 drivers/net/ethernet/intel/e1000/e1000_main.c 				buffer_info->rxbuf.page = NULL;
page             3975 drivers/net/ethernet/intel/e1000/e1000_main.c 	bi->rxbuf.page = NULL;
page             4166 drivers/net/ethernet/intel/e1000/e1000_main.c 			u8 *mapped = page_address(buffer_info->rxbuf.page);
page             4195 drivers/net/ethernet/intel/e1000/e1000_main.c 						   buffer_info->rxbuf.page,
page             4201 drivers/net/ethernet/intel/e1000/e1000_main.c 				    buffer_info->rxbuf.page, 0, length);
page             4210 drivers/net/ethernet/intel/e1000/e1000_main.c 				    buffer_info->rxbuf.page, 0, length);
page             4215 drivers/net/ethernet/intel/e1000/e1000_main.c 				struct page *p;
page             4219 drivers/net/ethernet/intel/e1000/e1000_main.c 				p = buffer_info->rxbuf.page;
page             4501 drivers/net/ethernet/intel/e1000/e1000_main.c 		if (!buffer_info->rxbuf.page) {
page             4502 drivers/net/ethernet/intel/e1000/e1000_main.c 			buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC);
page             4503 drivers/net/ethernet/intel/e1000/e1000_main.c 			if (unlikely(!buffer_info->rxbuf.page)) {
page             4511 drivers/net/ethernet/intel/e1000/e1000_main.c 							buffer_info->rxbuf.page, 0,
page             4515 drivers/net/ethernet/intel/e1000/e1000_main.c 				put_page(buffer_info->rxbuf.page);
page             4516 drivers/net/ethernet/intel/e1000/e1000_main.c 				buffer_info->rxbuf.page = NULL;
page              769 drivers/net/ethernet/intel/e1000e/defines.h #define GG82563_REG(page, reg)    \
page              770 drivers/net/ethernet/intel/e1000e/defines.h 	(((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
page              120 drivers/net/ethernet/intel/e1000e/e1000.h 	struct page *page;
page              144 drivers/net/ethernet/intel/e1000e/e1000.h 			struct page *page;
page              107 drivers/net/ethernet/intel/e1000e/ich8lan.h #define PHY_REG(page, reg)	(((page) << PHY_PAGE_SHIFT) | \
page              183 drivers/net/ethernet/intel/e1000e/netdev.c 		if (ps_page->page) {
page              186 drivers/net/ethernet/intel/e1000e/netdev.c 				       16, 1, page_address(ps_page->page),
page              738 drivers/net/ethernet/intel/e1000e/netdev.c 			if (!ps_page->page) {
page              739 drivers/net/ethernet/intel/e1000e/netdev.c 				ps_page->page = alloc_page(gfp);
page              740 drivers/net/ethernet/intel/e1000e/netdev.c 				if (!ps_page->page) {
page              745 drivers/net/ethernet/intel/e1000e/netdev.c 							    ps_page->page,
page              848 drivers/net/ethernet/intel/e1000e/netdev.c 		if (!buffer_info->page) {
page              849 drivers/net/ethernet/intel/e1000e/netdev.c 			buffer_info->page = alloc_page(gfp);
page              850 drivers/net/ethernet/intel/e1000e/netdev.c 			if (unlikely(!buffer_info->page)) {
page              858 drivers/net/ethernet/intel/e1000e/netdev.c 							buffer_info->page, 0,
page             1401 drivers/net/ethernet/intel/e1000e/netdev.c 				vaddr = kmap_atomic(ps_page->page);
page             1429 drivers/net/ethernet/intel/e1000e/netdev.c 			skb_fill_page_desc(skb, j, ps_page->page, 0, length);
page             1430 drivers/net/ethernet/intel/e1000e/netdev.c 			ps_page->page = NULL;
page             1493 drivers/net/ethernet/intel/e1000e/netdev.c 	bi->page = NULL;
page             1571 drivers/net/ethernet/intel/e1000e/netdev.c 				skb_fill_page_desc(rxtop, 0, buffer_info->page,
page             1577 drivers/net/ethernet/intel/e1000e/netdev.c 						   buffer_info->page, 0,
page             1589 drivers/net/ethernet/intel/e1000e/netdev.c 						   buffer_info->page, 0,
page             1605 drivers/net/ethernet/intel/e1000e/netdev.c 					vaddr = kmap_atomic(buffer_info->page);
page             1615 drivers/net/ethernet/intel/e1000e/netdev.c 							   buffer_info->page, 0,
page             1699 drivers/net/ethernet/intel/e1000e/netdev.c 		if (buffer_info->page) {
page             1700 drivers/net/ethernet/intel/e1000e/netdev.c 			put_page(buffer_info->page);
page             1701 drivers/net/ethernet/intel/e1000e/netdev.c 			buffer_info->page = NULL;
page             1711 drivers/net/ethernet/intel/e1000e/netdev.c 			if (!ps_page->page)
page             1716 drivers/net/ethernet/intel/e1000e/netdev.c 			put_page(ps_page->page);
page             1717 drivers/net/ethernet/intel/e1000e/netdev.c 			ps_page->page = NULL;
page                9 drivers/net/ethernet/intel/e1000e/phy.c static u32 e1000_get_phy_addr_for_hv_page(u32 page);
page              296 drivers/net/ethernet/intel/e1000e/phy.c s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page)
page              298 drivers/net/ethernet/intel/e1000e/phy.c 	e_dbg("Setting page 0x%x\n", page);
page              302 drivers/net/ethernet/intel/e1000e/phy.c 	return e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, page);
page             2317 drivers/net/ethernet/intel/e1000e/phy.c static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg)
page             2321 drivers/net/ethernet/intel/e1000e/phy.c 	if ((page >= 768) || (page == 0 && reg == 25) || (reg == 31))
page             2339 drivers/net/ethernet/intel/e1000e/phy.c 	u32 page = offset >> IGP_PAGE_SHIFT;
page             2346 drivers/net/ethernet/intel/e1000e/phy.c 	if (page == BM_WUC_PAGE) {
page             2352 drivers/net/ethernet/intel/e1000e/phy.c 	hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
page             2371 drivers/net/ethernet/intel/e1000e/phy.c 						    (page << page_shift));
page             2397 drivers/net/ethernet/intel/e1000e/phy.c 	u32 page = offset >> IGP_PAGE_SHIFT;
page             2404 drivers/net/ethernet/intel/e1000e/phy.c 	if (page == BM_WUC_PAGE) {
page             2410 drivers/net/ethernet/intel/e1000e/phy.c 	hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
page             2429 drivers/net/ethernet/intel/e1000e/phy.c 						    (page << page_shift));
page             2454 drivers/net/ethernet/intel/e1000e/phy.c 	u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
page             2461 drivers/net/ethernet/intel/e1000e/phy.c 	if (page == BM_WUC_PAGE) {
page             2472 drivers/net/ethernet/intel/e1000e/phy.c 						    page);
page             2497 drivers/net/ethernet/intel/e1000e/phy.c 	u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
page             2504 drivers/net/ethernet/intel/e1000e/phy.c 	if (page == BM_WUC_PAGE) {
page             2515 drivers/net/ethernet/intel/e1000e/phy.c 						    page);
page             2640 drivers/net/ethernet/intel/e1000e/phy.c 	u16 page = BM_PHY_REG_PAGE(offset);
page             2647 drivers/net/ethernet/intel/e1000e/phy.c 		      page);
page             2658 drivers/net/ethernet/intel/e1000e/phy.c 	e_dbg("Accessing PHY page %d reg 0x%x\n", page, reg);
page             2663 drivers/net/ethernet/intel/e1000e/phy.c 		e_dbg("Could not write address opcode to page %d\n", page);
page             2678 drivers/net/ethernet/intel/e1000e/phy.c 		e_dbg("Could not access PHY reg %d.%d\n", page, reg);
page             2740 drivers/net/ethernet/intel/e1000e/phy.c 	u16 page = BM_PHY_REG_PAGE(offset);
page             2742 drivers/net/ethernet/intel/e1000e/phy.c 	u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
page             2751 drivers/net/ethernet/intel/e1000e/phy.c 	if (page == BM_WUC_PAGE) {
page             2757 drivers/net/ethernet/intel/e1000e/phy.c 	if (page > 0 && page < HV_INTC_FC_PAGE_START) {
page             2764 drivers/net/ethernet/intel/e1000e/phy.c 		if (page == HV_INTC_FC_PAGE_START)
page             2765 drivers/net/ethernet/intel/e1000e/phy.c 			page = 0;
page             2770 drivers/net/ethernet/intel/e1000e/phy.c 						     (page << IGP_PAGE_SHIFT));
page             2779 drivers/net/ethernet/intel/e1000e/phy.c 	e_dbg("reading PHY page %d (or 0x%x shifted) reg 0x%x\n", page,
page             2780 drivers/net/ethernet/intel/e1000e/phy.c 	      page << IGP_PAGE_SHIFT, reg);
page             2847 drivers/net/ethernet/intel/e1000e/phy.c 	u16 page = BM_PHY_REG_PAGE(offset);
page             2849 drivers/net/ethernet/intel/e1000e/phy.c 	u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
page             2858 drivers/net/ethernet/intel/e1000e/phy.c 	if (page == BM_WUC_PAGE) {
page             2864 drivers/net/ethernet/intel/e1000e/phy.c 	if (page > 0 && page < HV_INTC_FC_PAGE_START) {
page             2871 drivers/net/ethernet/intel/e1000e/phy.c 		if (page == HV_INTC_FC_PAGE_START)
page             2872 drivers/net/ethernet/intel/e1000e/phy.c 			page = 0;
page             2893 drivers/net/ethernet/intel/e1000e/phy.c 						     (page << IGP_PAGE_SHIFT));
page             2902 drivers/net/ethernet/intel/e1000e/phy.c 	e_dbg("writing PHY page %d (or 0x%x shifted) reg 0x%x\n", page,
page             2903 drivers/net/ethernet/intel/e1000e/phy.c 	      page << IGP_PAGE_SHIFT, reg);
page             2961 drivers/net/ethernet/intel/e1000e/phy.c static u32 e1000_get_phy_addr_for_hv_page(u32 page)
page             2965 drivers/net/ethernet/intel/e1000e/phy.c 	if (page >= HV_INTC_FC_PAGE_START)
page               30 drivers/net/ethernet/intel/e1000e/phy.h s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page);
page               94 drivers/net/ethernet/intel/e1000e/phy.h #define BM_PHY_REG(page, reg) \
page               96 drivers/net/ethernet/intel/e1000e/phy.h 	 (((page) & 0xFFFF) << PHY_PAGE_SHIFT) |\
page               74 drivers/net/ethernet/intel/fm10k/fm10k.h 	struct page *page;
page               73 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	struct page *page = bi->page;
page               77 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	if (likely(page))
page               81 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	page = dev_alloc_page();
page               82 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	if (unlikely(!page)) {
page               88 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
page               94 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		__free_page(page);
page              101 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	bi->page = page;
page              200 drivers/net/ethernet/intel/fm10k/fm10k_main.c static inline bool fm10k_page_is_reserved(struct page *page)
page              202 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
page              206 drivers/net/ethernet/intel/fm10k/fm10k_main.c 				    struct page *page,
page              210 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	if (unlikely(fm10k_page_is_reserved(page)))
page              215 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	if (unlikely(page_count(page) != 1))
page              231 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	page_ref_inc(page);
page              256 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	struct page *page = rx_buffer->page;
page              257 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	unsigned char *va = page_address(page) + rx_buffer->page_offset;
page              272 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		if (likely(!fm10k_page_is_reserved(page)))
page              276 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		__free_page(page);
page              293 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
page              296 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	return fm10k_can_reuse_rx_page(rx_buffer, page, truesize);
page              305 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	struct page *page;
page              308 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	page = rx_buffer->page;
page              309 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	prefetchw(page);
page              312 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		void *page_addr = page_address(page) +
page              354 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	rx_buffer->page = NULL;
page              263 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 		if (!buffer->page)
page              268 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 		__free_page(buffer->page);
page              270 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 		buffer->page = NULL;
page             4743 drivers/net/ethernet/intel/i40e/i40e_common.c 				u8 page, u16 reg, u8 phy_addr, u16 *value)
page             4751 drivers/net/ethernet/intel/i40e/i40e_common.c 		  (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
page             4774 drivers/net/ethernet/intel/i40e/i40e_common.c 	command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
page             4817 drivers/net/ethernet/intel/i40e/i40e_common.c 				u8 page, u16 reg, u8 phy_addr, u16 value)
page             4825 drivers/net/ethernet/intel/i40e/i40e_common.c 		  (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
page             4850 drivers/net/ethernet/intel/i40e/i40e_common.c 	command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
page             4884 drivers/net/ethernet/intel/i40e/i40e_common.c 				    u8 page, u16 reg, u8 phy_addr, u16 value)
page             4898 drivers/net/ethernet/intel/i40e/i40e_common.c 		status = i40e_write_phy_register_clause45(hw, page, reg,
page             4920 drivers/net/ethernet/intel/i40e/i40e_common.c 				   u8 page, u16 reg, u8 phy_addr, u16 *value)
page             4935 drivers/net/ethernet/intel/i40e/i40e_common.c 		status = i40e_read_phy_register_clause45(hw, page, reg,
page              118 drivers/net/ethernet/intel/i40e/i40e_hmc.c 	struct i40e_dma_mem *page = &mem;
page              141 drivers/net/ethernet/intel/i40e/i40e_hmc.c 			page = rsrc_pg;
page              144 drivers/net/ethernet/intel/i40e/i40e_hmc.c 			ret_code = i40e_allocate_dma_mem(hw, page, i40e_mem_bp,
page              152 drivers/net/ethernet/intel/i40e/i40e_hmc.c 		pd_entry->bp.addr = *page;
page              156 drivers/net/ethernet/intel/i40e/i40e_hmc.c 		page_desc = page->pa | 0x1;
page              426 drivers/net/ethernet/intel/i40e/i40e_prototype.h 				u8 page, u16 reg, u8 phy_addr, u16 *value);
page              428 drivers/net/ethernet/intel/i40e/i40e_prototype.h 				u8 page, u16 reg, u8 phy_addr, u16 value);
page              429 drivers/net/ethernet/intel/i40e/i40e_prototype.h i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
page              431 drivers/net/ethernet/intel/i40e/i40e_prototype.h i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
page             1219 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	new_buff->page		= old_buff->page;
page             1226 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	old_buff->page = NULL;
page             1366 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		if (!rx_bi->page)
page             1384 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		__page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
page             1386 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		rx_bi->page = NULL;
page             1521 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	struct page *page = bi->page;
page             1525 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	if (likely(page)) {
page             1531 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	page = dev_alloc_pages(i40e_rx_pg_order(rx_ring));
page             1532 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	if (unlikely(!page)) {
page             1538 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	dma = dma_map_page_attrs(rx_ring->dev, page, 0,
page             1547 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		__free_pages(page, i40e_rx_pg_order(rx_ring));
page             1553 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	bi->page = page;
page             1555 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	page_ref_add(page, USHRT_MAX - 1);
page             1856 drivers/net/ethernet/intel/i40e/i40e_txrx.c static inline bool i40e_page_is_reusable(struct page *page)
page             1858 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	return (page_to_nid(page) == numa_mem_id()) &&
page             1859 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		!page_is_pfmemalloc(page);
page             1892 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	struct page *page = rx_buffer->page;
page             1895 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	if (unlikely(!i40e_page_is_reusable(page)))
page             1900 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	if (unlikely((page_count(page) - pagecnt_bias) > 1))
page             1914 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		page_ref_add(page, USHRT_MAX - 1);
page             1944 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
page             1969 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	prefetchw(rx_buffer->page);
page             2048 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		skb_add_rx_frag(skb, 0, rx_buffer->page,
page             2138 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		__page_frag_cache_drain(rx_buffer->page,
page             2141 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		rx_buffer->page = NULL;
page             2386 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			xdp.data = page_address(rx_buffer->page) +
page              301 drivers/net/ethernet/intel/i40e/i40e_txrx.h 			struct page *page;
page              673 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		if (!rx_bi->page)
page              691 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		__page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
page              693 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		rx_bi->page = NULL;
page              813 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	struct page *page = bi->page;
page              817 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	if (likely(page)) {
page              823 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	page = dev_alloc_pages(iavf_rx_pg_order(rx_ring));
page              824 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	if (unlikely(!page)) {
page              830 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	dma = dma_map_page_attrs(rx_ring->dev, page, 0,
page              839 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		__free_pages(page, iavf_rx_pg_order(rx_ring));
page              845 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	bi->page = page;
page             1139 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	new_buff->page		= old_buff->page;
page             1151 drivers/net/ethernet/intel/iavf/iavf_txrx.c static inline bool iavf_page_is_reusable(struct page *page)
page             1153 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	return (page_to_nid(page) == numa_mem_id()) &&
page             1154 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		!page_is_pfmemalloc(page);
page             1187 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	struct page *page = rx_buffer->page;
page             1190 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	if (unlikely(!iavf_page_is_reusable(page)))
page             1195 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	if (unlikely((page_count(page) - pagecnt_bias) > 1))
page             1209 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		page_ref_add(page, USHRT_MAX);
page             1242 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
page             1270 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	prefetchw(rx_buffer->page);
page             1311 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	va = page_address(rx_buffer->page) + rx_buffer->page_offset;
page             1335 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		skb_add_rx_frag(skb, 0, rx_buffer->page,
page             1378 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	va = page_address(rx_buffer->page) + rx_buffer->page_offset;
page             1425 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		__page_frag_cache_drain(rx_buffer->page,
page             1430 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	rx_buffer->page = NULL;
page              276 drivers/net/ethernet/intel/iavf/iavf_txrx.h 	struct page *page;
page              626 drivers/net/ethernet/intel/ice/ice_ethtool.c 		received_buf = page_address(rx_buf->page);
page              284 drivers/net/ethernet/intel/ice/ice_txrx.c 		if (!rx_buf->page)
page              297 drivers/net/ethernet/intel/ice/ice_txrx.c 		__page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
page              299 drivers/net/ethernet/intel/ice/ice_txrx.c 		rx_buf->page = NULL;
page              416 drivers/net/ethernet/intel/ice/ice_txrx.c 	struct page *page = bi->page;
page              420 drivers/net/ethernet/intel/ice/ice_txrx.c 	if (likely(page)) {
page              426 drivers/net/ethernet/intel/ice/ice_txrx.c 	page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
page              427 drivers/net/ethernet/intel/ice/ice_txrx.c 	if (unlikely(!page)) {
page              433 drivers/net/ethernet/intel/ice/ice_txrx.c 	dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
page              440 drivers/net/ethernet/intel/ice/ice_txrx.c 		__free_pages(page, 0);
page              446 drivers/net/ethernet/intel/ice/ice_txrx.c 	bi->page = page;
page              448 drivers/net/ethernet/intel/ice/ice_txrx.c 	page_ref_add(page, USHRT_MAX - 1);
page              522 drivers/net/ethernet/intel/ice/ice_txrx.c static bool ice_page_is_reserved(struct page *page)
page              524 drivers/net/ethernet/intel/ice/ice_txrx.c 	return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
page              564 drivers/net/ethernet/intel/ice/ice_txrx.c 	struct page *page = rx_buf->page;
page              567 drivers/net/ethernet/intel/ice/ice_txrx.c 	if (unlikely(ice_page_is_reserved(page)))
page              572 drivers/net/ethernet/intel/ice/ice_txrx.c 	if (unlikely((page_count(page) - pagecnt_bias) > 1))
page              584 drivers/net/ethernet/intel/ice/ice_txrx.c 		page_ref_add(page, USHRT_MAX - 1);
page              613 drivers/net/ethernet/intel/ice/ice_txrx.c 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
page              644 drivers/net/ethernet/intel/ice/ice_txrx.c 	new_buf->page = old_buf->page;
page              665 drivers/net/ethernet/intel/ice/ice_txrx.c 	prefetchw(rx_buf->page);
page              695 drivers/net/ethernet/intel/ice/ice_txrx.c 	void *va = page_address(rx_buf->page) + rx_buf->page_offset;
page              728 drivers/net/ethernet/intel/ice/ice_txrx.c 		skb_add_rx_frag(skb, 0, rx_buf->page,
page              764 drivers/net/ethernet/intel/ice/ice_txrx.c 		__page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
page              768 drivers/net/ethernet/intel/ice/ice_txrx.c 	rx_buf->page = NULL;
page               79 drivers/net/ethernet/intel/ice/ice_txrx.h 	struct page *page;
page              215 drivers/net/ethernet/intel/igb/igb.h 	struct page *page;
page             1789 drivers/net/ethernet/intel/igb/igb_ethtool.c 	data = kmap(rx_buffer->page);
page             1796 drivers/net/ethernet/intel/igb/igb_ethtool.c 	kunmap(rx_buffer->page);
page              542 drivers/net/ethernet/intel/igb/igb_main.c 				    buffer_info->dma && buffer_info->page) {
page              546 drivers/net/ethernet/intel/igb/igb_main.c 					  page_address(buffer_info->page) +
page             4758 drivers/net/ethernet/intel/igb/igb_main.c 		__page_frag_cache_drain(buffer_info->page,
page             7933 drivers/net/ethernet/intel/igb/igb_main.c 	new_buff->page		= old_buff->page;
page             7938 drivers/net/ethernet/intel/igb/igb_main.c static inline bool igb_page_is_reserved(struct page *page)
page             7940 drivers/net/ethernet/intel/igb/igb_main.c 	return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
page             7946 drivers/net/ethernet/intel/igb/igb_main.c 	struct page *page = rx_buffer->page;
page             7949 drivers/net/ethernet/intel/igb/igb_main.c 	if (unlikely(igb_page_is_reserved(page)))
page             7954 drivers/net/ethernet/intel/igb/igb_main.c 	if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
page             7969 drivers/net/ethernet/intel/igb/igb_main.c 		page_ref_add(page, USHRT_MAX);
page             7997 drivers/net/ethernet/intel/igb/igb_main.c 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
page             8011 drivers/net/ethernet/intel/igb/igb_main.c 	void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
page             8048 drivers/net/ethernet/intel/igb/igb_main.c 		skb_add_rx_frag(skb, 0, rx_buffer->page,
page             8049 drivers/net/ethernet/intel/igb/igb_main.c 				(va + headlen) - page_address(rx_buffer->page),
page             8068 drivers/net/ethernet/intel/igb/igb_main.c 	void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
page             8268 drivers/net/ethernet/intel/igb/igb_main.c 	prefetchw(rx_buffer->page);
page             8295 drivers/net/ethernet/intel/igb/igb_main.c 		__page_frag_cache_drain(rx_buffer->page,
page             8300 drivers/net/ethernet/intel/igb/igb_main.c 	rx_buffer->page = NULL;
page             8402 drivers/net/ethernet/intel/igb/igb_main.c 	struct page *page = bi->page;
page             8406 drivers/net/ethernet/intel/igb/igb_main.c 	if (likely(page))
page             8410 drivers/net/ethernet/intel/igb/igb_main.c 	page = dev_alloc_pages(igb_rx_pg_order(rx_ring));
page             8411 drivers/net/ethernet/intel/igb/igb_main.c 	if (unlikely(!page)) {
page             8417 drivers/net/ethernet/intel/igb/igb_main.c 	dma = dma_map_page_attrs(rx_ring->dev, page, 0,
page             8426 drivers/net/ethernet/intel/igb/igb_main.c 		__free_pages(page, igb_rx_pg_order(rx_ring));
page             8433 drivers/net/ethernet/intel/igb/igb_main.c 	bi->page = page;
page              112 drivers/net/ethernet/intel/igbvf/igbvf.h 			struct page *page;
page              157 drivers/net/ethernet/intel/igbvf/netdev.c 			if (!buffer_info->page) {
page              158 drivers/net/ethernet/intel/igbvf/netdev.c 				buffer_info->page = alloc_page(GFP_ATOMIC);
page              159 drivers/net/ethernet/intel/igbvf/netdev.c 				if (!buffer_info->page) {
page              168 drivers/net/ethernet/intel/igbvf/netdev.c 				dma_map_page(&pdev->dev, buffer_info->page,
page              174 drivers/net/ethernet/intel/igbvf/netdev.c 				__free_page(buffer_info->page);
page              175 drivers/net/ethernet/intel/igbvf/netdev.c 				buffer_info->page = NULL;
page              311 drivers/net/ethernet/intel/igbvf/netdev.c 					   buffer_info->page,
page              316 drivers/net/ethernet/intel/igbvf/netdev.c 			    (page_count(buffer_info->page) != 1))
page              317 drivers/net/ethernet/intel/igbvf/netdev.c 				buffer_info->page = NULL;
page              319 drivers/net/ethernet/intel/igbvf/netdev.c 				get_page(buffer_info->page);
page              579 drivers/net/ethernet/intel/igbvf/netdev.c 		if (buffer_info->page) {
page              585 drivers/net/ethernet/intel/igbvf/netdev.c 			put_page(buffer_info->page);
page              586 drivers/net/ethernet/intel/igbvf/netdev.c 			buffer_info->page = NULL;
page              198 drivers/net/ethernet/intel/igc/igc.h 	struct page *page;
page              382 drivers/net/ethernet/intel/igc/igc_main.c 		__page_frag_cache_drain(buffer_info->page,
page             1203 drivers/net/ethernet/intel/igc/igc_main.c 	prefetchw(rx_buffer->page);
page             1234 drivers/net/ethernet/intel/igc/igc_main.c 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
page             1241 drivers/net/ethernet/intel/igc/igc_main.c 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
page             1252 drivers/net/ethernet/intel/igc/igc_main.c 	void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
page             1291 drivers/net/ethernet/intel/igc/igc_main.c 	void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
page             1322 drivers/net/ethernet/intel/igc/igc_main.c 		skb_add_rx_frag(skb, 0, rx_buffer->page,
page             1323 drivers/net/ethernet/intel/igc/igc_main.c 				(va + headlen) - page_address(rx_buffer->page),
page             1361 drivers/net/ethernet/intel/igc/igc_main.c 	new_buff->page		= old_buff->page;
page             1366 drivers/net/ethernet/intel/igc/igc_main.c static inline bool igc_page_is_reserved(struct page *page)
page             1368 drivers/net/ethernet/intel/igc/igc_main.c 	return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
page             1374 drivers/net/ethernet/intel/igc/igc_main.c 	struct page *page = rx_buffer->page;
page             1377 drivers/net/ethernet/intel/igc/igc_main.c 	if (unlikely(igc_page_is_reserved(page)))
page             1382 drivers/net/ethernet/intel/igc/igc_main.c 	if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
page             1397 drivers/net/ethernet/intel/igc/igc_main.c 		page_ref_add(page, USHRT_MAX);
page             1480 drivers/net/ethernet/intel/igc/igc_main.c 		__page_frag_cache_drain(rx_buffer->page,
page             1485 drivers/net/ethernet/intel/igc/igc_main.c 	rx_buffer->page = NULL;
page             1656 drivers/net/ethernet/intel/igc/igc_main.c 	struct page *page = bi->page;
page             1660 drivers/net/ethernet/intel/igc/igc_main.c 	if (likely(page))
page             1664 drivers/net/ethernet/intel/igc/igc_main.c 	page = dev_alloc_pages(igc_rx_pg_order(rx_ring));
page             1665 drivers/net/ethernet/intel/igc/igc_main.c 	if (unlikely(!page)) {
page             1671 drivers/net/ethernet/intel/igc/igc_main.c 	dma = dma_map_page_attrs(rx_ring->dev, page, 0,
page             1680 drivers/net/ethernet/intel/igc/igc_main.c 		__free_page(page);
page             1687 drivers/net/ethernet/intel/igc/igc_main.c 	bi->page = page;
page              231 drivers/net/ethernet/intel/ixgbe/ixgbe.h 			struct page *page;
page             1874 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	data = kmap(rx_buffer->page) + rx_buffer->page_offset;
page             1881 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	kunmap(rx_buffer->page);
page              819 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 					   page_address(rx_buffer_info->page) +
page             1533 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	struct page *page = bi->page;
page             1537 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (likely(page))
page             1541 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring));
page             1542 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (unlikely(!page)) {
page             1548 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	dma = dma_map_page_attrs(rx_ring->dev, page, 0,
page             1558 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		__free_pages(page, ixgbe_rx_pg_order(rx_ring));
page             1565 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	bi->page = page;
page             1567 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	page_ref_add(page, USHRT_MAX - 1);
page             1940 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	new_buff->page		= old_buff->page;
page             1945 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static inline bool ixgbe_page_is_reserved(struct page *page)
page             1947 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
page             1953 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	struct page *page = rx_buffer->page;
page             1956 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (unlikely(ixgbe_page_is_reserved(page)))
page             1961 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
page             1980 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		page_ref_add(page, USHRT_MAX - 1);
page             2014 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
page             2031 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	prefetchw(rx_buffer->page);
page             2076 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		__page_frag_cache_drain(rx_buffer->page,
page             2081 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	rx_buffer->page = NULL;
page             2129 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		skb_add_rx_frag(skb, 0, rx_buffer->page,
page             2130 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				xdp->data - page_address(rx_buffer->page),
page             2320 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			xdp.data = page_address(rx_buffer->page) +
page             5325 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		__page_frag_cache_drain(rx_buffer->page,
page               47 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h 	struct page *page;
page              536 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	prefetchw(rx_buffer->page);
page              566 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		__page_frag_cache_drain(rx_buffer->page,
page              571 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	rx_buffer->page = NULL;
page              609 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	struct page *page = bi->page;
page              613 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	if (likely(page))
page              617 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	page = dev_alloc_pages(ixgbevf_rx_pg_order(rx_ring));
page              618 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	if (unlikely(!page)) {
page              624 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	dma = dma_map_page_attrs(rx_ring->dev, page, 0,
page              632 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		__free_pages(page, ixgbevf_rx_pg_order(rx_ring));
page              639 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	bi->page = page;
page              780 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	new_buff->page = old_buff->page;
page              786 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c static inline bool ixgbevf_page_is_reserved(struct page *page)
page              788 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
page              794 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	struct page *page = rx_buffer->page;
page              797 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	if (unlikely(ixgbevf_page_is_reserved(page)))
page              802 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
page              818 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		page_ref_add(page, USHRT_MAX);
page              846 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
page              909 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		skb_add_rx_frag(skb, 0, rx_buffer->page,
page              911 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 					page_address(rx_buffer->page),
page             1154 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			xdp.data = page_address(rx_buffer->page) +
page             2359 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		__page_frag_cache_drain(rx_buffer->page,
page             1969 drivers/net/ethernet/jme.c 		struct page *page,
page             1977 drivers/net/ethernet/jme.c 				page,
page             1827 drivers/net/ethernet/marvell/mvneta.c 	struct page *page;
page             1829 drivers/net/ethernet/marvell/mvneta.c 	page = __dev_alloc_page(gfp_mask);
page             1830 drivers/net/ethernet/marvell/mvneta.c 	if (!page)
page             1834 drivers/net/ethernet/marvell/mvneta.c 	phys_addr = dma_map_page(pp->dev->dev.parent, page, 0, PAGE_SIZE,
page             1837 drivers/net/ethernet/marvell/mvneta.c 		__free_page(page);
page             1842 drivers/net/ethernet/marvell/mvneta.c 	mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq);
page             1958 drivers/net/ethernet/marvell/mvneta.c 		struct page *page;
page             1965 drivers/net/ethernet/marvell/mvneta.c 		page = (struct page *)rxq->buf_virt_addr[index];
page             1966 drivers/net/ethernet/marvell/mvneta.c 		data = page_address(page);
page             2028 drivers/net/ethernet/marvell/mvneta.c 				skb_add_rx_frag(rxq->skb, frag_num, page,
page             2058 drivers/net/ethernet/marvell/mvneta.c 				skb_add_rx_frag(rxq->skb, frag_num, page,
page             1464 drivers/net/ethernet/marvell/sky2.c 		struct page *page = alloc_page(gfp);
page             1466 drivers/net/ethernet/marvell/sky2.c 		if (!page)
page             1468 drivers/net/ethernet/marvell/sky2.c 		skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
page               57 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	struct page *page;
page               60 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	page = alloc_page(gfp);
page               61 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	if (unlikely(!page))
page               63 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE, priv->dma_dir);
page               65 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		__free_page(page);
page               68 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	frag->page = page;
page               83 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		if (!frags->page) {
page               97 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	if (frag->page) {
page              100 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		__free_page(frag->page);
page              143 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		if (!frags->page) {
page              145 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			frags->page = ring->page_cache.buf[ring->page_cache.index].page;
page              423 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	cache->buf[cache->index].page = frame->page;
page              458 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		put_page(ring->page_cache.buf[i].page);
page              476 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	struct page *page;
page              483 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		page = frags->page;
page              484 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		if (unlikely(!page))
page              491 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		__skb_fill_page_desc(skb, nr, page, frags->page_offset,
page              497 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			release = page_count(page) != 1 ||
page              498 drivers/net/ethernet/mellanox/mlx4/en_rx.c 				  page_is_pfmemalloc(page) ||
page              499 drivers/net/ethernet/mellanox/mlx4/en_rx.c 				  page_to_nid(page) != numa_mem_id();
page              512 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			frags->page = NULL;
page              514 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			page_ref_inc(page);
page              706 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		va = page_address(frags[0].page) + frags[0].page_offset;
page              801 drivers/net/ethernet/mellanox/mlx4/en_rx.c 					frags[0].page = NULL;
page              349 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		.page = tx_info->page,
page              356 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		put_page(tx_info->page);
page             1154 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	tx_info->page = frame->page;
page             1155 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	frame->page = NULL;
page              101 drivers/net/ethernet/mellanox/mlx4/icm.c 	struct page *page;
page              103 drivers/net/ethernet/mellanox/mlx4/icm.c 	page = alloc_pages_node(node, gfp_mask, order);
page              104 drivers/net/ethernet/mellanox/mlx4/icm.c 	if (!page) {
page              105 drivers/net/ethernet/mellanox/mlx4/icm.c 		page = alloc_pages(gfp_mask, order);
page              106 drivers/net/ethernet/mellanox/mlx4/icm.c 		if (!page)
page              110 drivers/net/ethernet/mellanox/mlx4/icm.c 	sg_set_page(mem, page, PAGE_SIZE << order, 0);
page              345 drivers/net/ethernet/mellanox/mlx4/icm.c 				struct page *page;
page              355 drivers/net/ethernet/mellanox/mlx4/icm.c 				page = sg_page(&chunk->sg[i]);
page              356 drivers/net/ethernet/mellanox/mlx4/icm.c 				addr = lowmem_page_address(page);
page              224 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h 		struct page *page;
page              259 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h 	struct page	*page;
page              269 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h 		struct page	*page;
page              348 drivers/net/ethernet/mellanox/mlx5/core/en.h 	struct page *resync_dump_frag_page;
page              431 drivers/net/ethernet/mellanox/mlx5/core/en.h 		struct page *page;
page              470 drivers/net/ethernet/mellanox/mlx5/core/en.h 		} page;
page              113 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c 		xdpi.page.rq    = rq;
page              114 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c 		xdpi.page.di    = *di;
page              375 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c 			mlx5e_page_release_dynamic(xdpi.page.rq, &xdpi.page.di, recycle);
page              107 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c 		       struct page *page)
page              114 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c 	wi->resync_dump_frag_page = page;
page              187 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c static inline bool mlx5e_page_is_reserved(struct page *page)
page              189 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	return page_is_pfmemalloc(page) || page_to_nid(page) != numa_mem_id();
page              204 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (unlikely(mlx5e_page_is_reserved(dma_info->page))) {
page              225 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (page_ref_count(cache->page_cache[cache->head].page) != 1) {
page              246 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	dma_info->page = page_pool_dev_alloc_pages(rq->page_pool);
page              247 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (unlikely(!dma_info->page))
page              250 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	dma_info->addr = dma_map_page(rq->pdev, dma_info->page, 0,
page              253 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		page_pool_recycle_direct(rq->page_pool, dma_info->page);
page              254 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		dma_info->page = NULL;
page              284 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		page_pool_recycle_direct(rq->page_pool, dma_info->page);
page              287 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		page_pool_release_page(rq->page_pool, dma_info->page);
page              288 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		put_page(dma_info->page);
page              415 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	page_ref_inc(di->page);
page              417 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 			di->page, frag_offset, len, truesize);
page              425 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	const void *from = page_address(dma_info->page) + offset_from;
page             1068 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	va             = page_address(di->page) + wi->offset;
page             1088 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	page_ref_inc(di->page);
page             1300 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	va             = page_address(di->page) + head_offset;
page             1323 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	page_ref_inc(di->page);
page               59 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c 	struct page	       *page;
page               76 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
page              101 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c 	nfp->page = page;
page              205 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c 		__free_page(fwp->page);
page              216 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c 	struct page *page;
page              221 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c 	page = alloc_pages_node(nid, GFP_HIGHUSER, 0);
page              222 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c 	if (!page) {
page              227 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c 	addr = dma_map_page(device, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
page              240 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c 	err = insert_page(dev, addr, page, func_id);
page              248 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c 		__free_page(page);
page              105 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	struct page *page;
page              128 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	struct page *page;
page              846 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	struct page *dmatest_page;
page             1193 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	struct page *page;
page             1208 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 			get_page(rx->page);
page             1211 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 			page =
page             1214 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 			if (unlikely(page == NULL)) {
page             1220 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 			bus = pci_map_page(mgp->pdev, page, 0,
page             1224 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 				__free_pages(page, MYRI10GE_ALLOC_ORDER);
page             1230 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 			rx->page = page;
page             1235 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		rx->info[idx].page = rx->page;
page             1338 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	va = page_address(rx->info[idx].page) + rx->info[idx].page_offset;
page             1346 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 			put_page(rx->info[idx].page);
page             1357 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		skb_fill_page_desc(skb, i, rx->info[idx].page,
page             2043 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		put_page(ss->rx_big.info[idx].page);
page             2053 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		put_page(ss->rx_small.info[idx].page);
page             2096 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		put_page(ss->rx_big.info[idx].page);
page             2108 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		put_page(ss->rx_small.info[idx].page);
page             1150 drivers/net/ethernet/natsemi/ns83820.c 			(long long)buf, (long) page_to_pfn(frag->page),
page             1382 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		struct page *page;
page             1384 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		page = alloc_page(GFP_KERNEL);
page             1385 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		frag = page ? page_address(page) : NULL;
page             1411 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		struct page *page;
page             1413 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		page = dev_alloc_page();
page             1414 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		if (unlikely(!page))
page             1416 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		frag = page_address(page);
page               14 drivers/net/ethernet/pensando/ionic/ionic_bus.h void ionic_bus_unmap_dbpage(struct ionic *ionic, void __iomem *page);
page              102 drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c void ionic_bus_unmap_dbpage(struct ionic *ionic, void __iomem *page)
page              104 drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c 	iounmap(page);
page              300 drivers/net/ethernet/qlogic/qede/qede.h 	struct page *data;
page              392 drivers/net/ethernet/qlogic/qede/qede.h 	struct page *page;
page               56 drivers/net/ethernet/qlogic/qede/qede_fp.c 	struct page *data;
page              365 drivers/net/ethernet/qlogic/qede/qede_fp.c 	txq->sw_tx_ring.xdp[idx].page = metadata->data;
page              402 drivers/net/ethernet/qlogic/qede/qede_fp.c 		__free_page(txq->sw_tx_ring.xdp[idx].page);
page             1347 drivers/net/ethernet/qlogic/qede/qede_main.c 		struct page *data;
page              165 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
page              170 drivers/net/ethernet/qlogic/qla3xxx.c 	writel(((ISP_CONTROL_NP_MASK << 16) | page),
page              173 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->current_page = page;
page              664 drivers/net/ethernet/realtek/r8169_main.c 	struct page *Rx_databuff[NUM_RX_DESC];	/* Rx data buffers */
page             5536 drivers/net/ethernet/realtek/r8169_main.c static struct page *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
page             5542 drivers/net/ethernet/realtek/r8169_main.c 	struct page *data;
page             5586 drivers/net/ethernet/realtek/r8169_main.c 		struct page *data;
page              212 drivers/net/ethernet/sfc/falcon/io.h #define EF4_PAGED_REG(page, reg) \
page              213 drivers/net/ethernet/sfc/falcon/io.h 	((page) * EF4_VI_PAGE_SIZE + (reg))
page              217 drivers/net/ethernet/sfc/falcon/io.h 				    unsigned int reg, unsigned int page)
page              219 drivers/net/ethernet/sfc/falcon/io.h 	reg = EF4_PAGED_REG(page, reg);
page              235 drivers/net/ethernet/sfc/falcon/io.h #define ef4_writeo_page(efx, value, reg, page)				\
page              239 drivers/net/ethernet/sfc/falcon/io.h 			 page)
page              246 drivers/net/ethernet/sfc/falcon/io.h 		 unsigned int reg, unsigned int page)
page              248 drivers/net/ethernet/sfc/falcon/io.h 	ef4_writed(efx, value, EF4_PAGED_REG(page, reg));
page              250 drivers/net/ethernet/sfc/falcon/io.h #define ef4_writed_page(efx, value, reg, page)				\
page              259 drivers/net/ethernet/sfc/falcon/io.h 			 page)
page              268 drivers/net/ethernet/sfc/falcon/io.h 					   unsigned int page)
page              272 drivers/net/ethernet/sfc/falcon/io.h 	if (page == 0) {
page              274 drivers/net/ethernet/sfc/falcon/io.h 		ef4_writed(efx, value, EF4_PAGED_REG(page, reg));
page              277 drivers/net/ethernet/sfc/falcon/io.h 		ef4_writed(efx, value, EF4_PAGED_REG(page, reg));
page              280 drivers/net/ethernet/sfc/falcon/io.h #define ef4_writed_page_locked(efx, value, reg, page)			\
page              283 drivers/net/ethernet/sfc/falcon/io.h 				page)
page              268 drivers/net/ethernet/sfc/falcon/net_driver.h 	struct page *page;
page              342 drivers/net/ethernet/sfc/falcon/net_driver.h 	struct page **page_ring;
page               59 drivers/net/ethernet/sfc/falcon/rx.c 	return page_address(buf->page) + buf->page_offset;
page              106 drivers/net/ethernet/sfc/falcon/rx.c static struct page *ef4_reuse_page(struct ef4_rx_queue *rx_queue)
page              109 drivers/net/ethernet/sfc/falcon/rx.c 	struct page *page;
page              114 drivers/net/ethernet/sfc/falcon/rx.c 	page = rx_queue->page_ring[index];
page              115 drivers/net/ethernet/sfc/falcon/rx.c 	if (page == NULL)
page              124 drivers/net/ethernet/sfc/falcon/rx.c 	if (page_count(page) == 1) {
page              126 drivers/net/ethernet/sfc/falcon/rx.c 		return page;
page              128 drivers/net/ethernet/sfc/falcon/rx.c 		state = page_address(page);
page              132 drivers/net/ethernet/sfc/falcon/rx.c 		put_page(page);
page              153 drivers/net/ethernet/sfc/falcon/rx.c 	struct page *page;
page              161 drivers/net/ethernet/sfc/falcon/rx.c 		page = ef4_reuse_page(rx_queue);
page              162 drivers/net/ethernet/sfc/falcon/rx.c 		if (page == NULL) {
page              163 drivers/net/ethernet/sfc/falcon/rx.c 			page = alloc_pages(__GFP_COMP |
page              166 drivers/net/ethernet/sfc/falcon/rx.c 			if (unlikely(page == NULL))
page              169 drivers/net/ethernet/sfc/falcon/rx.c 				dma_map_page(&efx->pci_dev->dev, page, 0,
page              174 drivers/net/ethernet/sfc/falcon/rx.c 				__free_pages(page, efx->rx_buffer_order);
page              177 drivers/net/ethernet/sfc/falcon/rx.c 			state = page_address(page);
page              180 drivers/net/ethernet/sfc/falcon/rx.c 			state = page_address(page);
page              191 drivers/net/ethernet/sfc/falcon/rx.c 			rx_buf->page = page;
page              196 drivers/net/ethernet/sfc/falcon/rx.c 			get_page(page);
page              213 drivers/net/ethernet/sfc/falcon/rx.c 	struct page *page = rx_buf->page;
page              215 drivers/net/ethernet/sfc/falcon/rx.c 	if (page) {
page              216 drivers/net/ethernet/sfc/falcon/rx.c 		struct ef4_rx_page_state *state = page_address(page);
page              229 drivers/net/ethernet/sfc/falcon/rx.c 		if (rx_buf->page) {
page              230 drivers/net/ethernet/sfc/falcon/rx.c 			put_page(rx_buf->page);
page              231 drivers/net/ethernet/sfc/falcon/rx.c 			rx_buf->page = NULL;
page              244 drivers/net/ethernet/sfc/falcon/rx.c 	struct page *page = rx_buf->page;
page              264 drivers/net/ethernet/sfc/falcon/rx.c 		rx_queue->page_ring[index] = page;
page              270 drivers/net/ethernet/sfc/falcon/rx.c 	put_page(rx_buf->page);
page              277 drivers/net/ethernet/sfc/falcon/rx.c 	if (rx_buf->page)
page              278 drivers/net/ethernet/sfc/falcon/rx.c 		put_page(rx_buf->page);
page              285 drivers/net/ethernet/sfc/falcon/rx.c 	rx_buf->page = NULL;
page              447 drivers/net/ethernet/sfc/falcon/rx.c 				   rx_buf->page, rx_buf->page_offset,
page              449 drivers/net/ethernet/sfc/falcon/rx.c 		rx_buf->page = NULL;
page              497 drivers/net/ethernet/sfc/falcon/rx.c 					   rx_buf->page, rx_buf->page_offset,
page              499 drivers/net/ethernet/sfc/falcon/rx.c 			rx_buf->page = NULL;
page              508 drivers/net/ethernet/sfc/falcon/rx.c 		__free_pages(rx_buf->page, efx->rx_buffer_order);
page              509 drivers/net/ethernet/sfc/falcon/rx.c 		rx_buf->page = NULL;
page              796 drivers/net/ethernet/sfc/falcon/rx.c 		struct page *page = rx_queue->page_ring[i];
page              799 drivers/net/ethernet/sfc/falcon/rx.c 		if (page == NULL)
page              802 drivers/net/ethernet/sfc/falcon/rx.c 		state = page_address(page);
page              806 drivers/net/ethernet/sfc/falcon/rx.c 		put_page(page);
page              224 drivers/net/ethernet/sfc/io.h static inline unsigned int efx_paged_reg(struct efx_nic *efx, unsigned int page,
page              227 drivers/net/ethernet/sfc/io.h 	return page * efx->vi_stride + reg;
page              232 drivers/net/ethernet/sfc/io.h 				    unsigned int reg, unsigned int page)
page              234 drivers/net/ethernet/sfc/io.h 	reg = efx_paged_reg(efx, page, reg);
page              250 drivers/net/ethernet/sfc/io.h #define efx_writeo_page(efx, value, reg, page)				\
page              254 drivers/net/ethernet/sfc/io.h 			 page)
page              261 drivers/net/ethernet/sfc/io.h 		 unsigned int reg, unsigned int page)
page              263 drivers/net/ethernet/sfc/io.h 	efx_writed(efx, value, efx_paged_reg(efx, page, reg));
page              265 drivers/net/ethernet/sfc/io.h #define efx_writed_page(efx, value, reg, page)				\
page              274 drivers/net/ethernet/sfc/io.h 			 page)
page              283 drivers/net/ethernet/sfc/io.h 					   unsigned int page)
page              287 drivers/net/ethernet/sfc/io.h 	if (page == 0) {
page              289 drivers/net/ethernet/sfc/io.h 		efx_writed(efx, value, efx_paged_reg(efx, page, reg));
page              292 drivers/net/ethernet/sfc/io.h 		efx_writed(efx, value, efx_paged_reg(efx, page, reg));
page              295 drivers/net/ethernet/sfc/io.h #define efx_writed_page_locked(efx, value, reg, page)			\
page              298 drivers/net/ethernet/sfc/io.h 				page)
page              309 drivers/net/ethernet/sfc/mcdi_mon.c 	unsigned int n_pages, n_sensors, n_attrs, page;
page              317 drivers/net/ethernet/sfc/mcdi_mon.c 	page = 0;
page              319 drivers/net/ethernet/sfc/mcdi_mon.c 		MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE, page);
page              330 drivers/net/ethernet/sfc/mcdi_mon.c 		++page;
page              332 drivers/net/ethernet/sfc/mcdi_mon.c 	n_pages = page;
page              376 drivers/net/ethernet/sfc/mcdi_mon.c 				page = type / 32;
page              378 drivers/net/ethernet/sfc/mcdi_mon.c 				if (page == n_pages)
page              382 drivers/net/ethernet/sfc/mcdi_mon.c 					       page);
page              926 drivers/net/ethernet/sfc/mcdi_port.c 					       unsigned int page,
page              942 drivers/net/ethernet/sfc/mcdi_port.c 	MCDI_SET_DWORD(inbuf, GET_PHY_MEDIA_INFO_IN_PAGE, page);
page              966 drivers/net/ethernet/sfc/mcdi_port.c 					       unsigned int page,
page              972 drivers/net/ethernet/sfc/mcdi_port.c 	rc = efx_mcdi_phy_get_module_eeprom_page(efx, page, &data, byte, 1);
page             1022 drivers/net/ethernet/sfc/mcdi_port.c 	int page;
page             1028 drivers/net/ethernet/sfc/mcdi_port.c 		page = 0;
page             1033 drivers/net/ethernet/sfc/mcdi_port.c 		page = -1; /* We obtain the lower page by asking for -1. */
page             1041 drivers/net/ethernet/sfc/mcdi_port.c 	page += ee->offset / SFP_PAGE_SIZE;
page             1043 drivers/net/ethernet/sfc/mcdi_port.c 	while (space_remaining && (page < num_pages)) {
page             1044 drivers/net/ethernet/sfc/mcdi_port.c 		rc = efx_mcdi_phy_get_module_eeprom_page(efx, page,
page             1052 drivers/net/ethernet/sfc/mcdi_port.c 			page++;
page             1055 drivers/net/ethernet/sfc/mcdi_port.c 		} else if (ignore_missing && (page > 0)) {
page             1065 drivers/net/ethernet/sfc/mcdi_port.c 				page++;
page              306 drivers/net/ethernet/sfc/net_driver.h 	struct page *page;
page              381 drivers/net/ethernet/sfc/net_driver.h 	struct page **page_ring;
page               59 drivers/net/ethernet/sfc/rx.c 	return page_address(buf->page) + buf->page_offset;
page              106 drivers/net/ethernet/sfc/rx.c static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
page              109 drivers/net/ethernet/sfc/rx.c 	struct page *page;
page              114 drivers/net/ethernet/sfc/rx.c 	page = rx_queue->page_ring[index];
page              115 drivers/net/ethernet/sfc/rx.c 	if (page == NULL)
page              124 drivers/net/ethernet/sfc/rx.c 	if (page_count(page) == 1) {
page              126 drivers/net/ethernet/sfc/rx.c 		return page;
page              128 drivers/net/ethernet/sfc/rx.c 		state = page_address(page);
page              132 drivers/net/ethernet/sfc/rx.c 		put_page(page);
page              153 drivers/net/ethernet/sfc/rx.c 	struct page *page;
page              161 drivers/net/ethernet/sfc/rx.c 		page = efx_reuse_page(rx_queue);
page              162 drivers/net/ethernet/sfc/rx.c 		if (page == NULL) {
page              163 drivers/net/ethernet/sfc/rx.c 			page = alloc_pages(__GFP_COMP |
page              166 drivers/net/ethernet/sfc/rx.c 			if (unlikely(page == NULL))
page              169 drivers/net/ethernet/sfc/rx.c 				dma_map_page(&efx->pci_dev->dev, page, 0,
page              174 drivers/net/ethernet/sfc/rx.c 				__free_pages(page, efx->rx_buffer_order);
page              177 drivers/net/ethernet/sfc/rx.c 			state = page_address(page);
page              180 drivers/net/ethernet/sfc/rx.c 			state = page_address(page);
page              191 drivers/net/ethernet/sfc/rx.c 			rx_buf->page = page;
page              196 drivers/net/ethernet/sfc/rx.c 			get_page(page);
page              213 drivers/net/ethernet/sfc/rx.c 	struct page *page = rx_buf->page;
page              215 drivers/net/ethernet/sfc/rx.c 	if (page) {
page              216 drivers/net/ethernet/sfc/rx.c 		struct efx_rx_page_state *state = page_address(page);
page              229 drivers/net/ethernet/sfc/rx.c 		if (rx_buf->page) {
page              230 drivers/net/ethernet/sfc/rx.c 			put_page(rx_buf->page);
page              231 drivers/net/ethernet/sfc/rx.c 			rx_buf->page = NULL;
page              244 drivers/net/ethernet/sfc/rx.c 	struct page *page = rx_buf->page;
page              264 drivers/net/ethernet/sfc/rx.c 		rx_queue->page_ring[index] = page;
page              270 drivers/net/ethernet/sfc/rx.c 	put_page(rx_buf->page);
page              277 drivers/net/ethernet/sfc/rx.c 	if (rx_buf->page)
page              278 drivers/net/ethernet/sfc/rx.c 		put_page(rx_buf->page);
page              285 drivers/net/ethernet/sfc/rx.c 	rx_buf->page = NULL;
page              436 drivers/net/ethernet/sfc/rx.c 				   rx_buf->page, rx_buf->page_offset,
page              438 drivers/net/ethernet/sfc/rx.c 		rx_buf->page = NULL;
page              486 drivers/net/ethernet/sfc/rx.c 					   rx_buf->page, rx_buf->page_offset,
page              488 drivers/net/ethernet/sfc/rx.c 			rx_buf->page = NULL;
page              497 drivers/net/ethernet/sfc/rx.c 		__free_pages(rx_buf->page, efx->rx_buffer_order);
page              498 drivers/net/ethernet/sfc/rx.c 		rx_buf->page = NULL;
page              794 drivers/net/ethernet/sfc/rx.c 		struct page *page = rx_queue->page_ring[i];
page              797 drivers/net/ethernet/sfc/rx.c 		if (page == NULL)
page              800 drivers/net/ethernet/sfc/rx.c 		state = page_address(page);
page              804 drivers/net/ethernet/sfc/rx.c 		put_page(page);
page              723 drivers/net/ethernet/socionext/netsec.c 	struct page *page;
page              725 drivers/net/ethernet/socionext/netsec.c 	page = page_pool_dev_alloc_pages(dring->page_pool);
page              726 drivers/net/ethernet/socionext/netsec.c 	if (!page)
page              733 drivers/net/ethernet/socionext/netsec.c 	*dma_handle = page_pool_get_dma_addr(page) + NETSEC_RXBUF_HEADROOM;
page              741 drivers/net/ethernet/socionext/netsec.c 	return page_address(page);
page              818 drivers/net/ethernet/socionext/netsec.c 	struct page *page = virt_to_page(xdpf->data);
page              850 drivers/net/ethernet/socionext/netsec.c 		dma_handle = page_pool_get_dma_addr(page) + xdpf->headroom +
page              943 drivers/net/ethernet/socionext/netsec.c 		struct page *page = virt_to_page(desc->addr);
page             1016 drivers/net/ethernet/socionext/netsec.c 			page_pool_recycle_direct(dring->page_pool, page);
page             1021 drivers/net/ethernet/socionext/netsec.c 		page_pool_release_page(dring->page_pool, page);
page             1192 drivers/net/ethernet/socionext/netsec.c 			struct page *page = virt_to_page(desc->addr);
page             1194 drivers/net/ethernet/socionext/netsec.c 			page_pool_put_page(dring->page_pool, page, false);
page               60 drivers/net/ethernet/stmicro/stmmac/stmmac.h 	struct page *page;
page               61 drivers/net/ethernet/stmicro/stmmac/stmmac.h 	struct page *sec_page;
page             1212 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
page             1213 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	if (!buf->page)
page             1227 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	buf->addr = page_pool_get_dma_addr(buf->page);
page             1246 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	if (buf->page)
page             1247 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		page_pool_put_page(rx_q->page_pool, buf->page, false);
page             1248 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	buf->page = NULL;
page             3402 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		if (!buf->page) {
page             3403 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
page             3404 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			if (!buf->page)
page             3419 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		buf->addr = page_pool_get_dma_addr(buf->page);
page             3525 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		prefetch(page_address(buf->page));
page             3531 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
page             3532 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			buf->page = NULL;
page             3588 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			skb_copy_to_linear_data(skb, page_address(buf->page),
page             3593 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
page             3594 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			buf->page = NULL;
page             3604 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 					buf->page, 0, buf_len,
page             3608 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			page_pool_release_page(rx_q->page_pool, buf->page);
page             3609 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			buf->page = NULL;
page              456 drivers/net/ethernet/sun/cassini.c static int cas_page_free(struct cas *cp, cas_page_t *page)
page              458 drivers/net/ethernet/sun/cassini.c 	pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
page              460 drivers/net/ethernet/sun/cassini.c 	__free_pages(page->buffer, cp->page_order);
page              461 drivers/net/ethernet/sun/cassini.c 	kfree(page);
page              478 drivers/net/ethernet/sun/cassini.c 	cas_page_t *page;
page              480 drivers/net/ethernet/sun/cassini.c 	page = kmalloc(sizeof(cas_page_t), flags);
page              481 drivers/net/ethernet/sun/cassini.c 	if (!page)
page              484 drivers/net/ethernet/sun/cassini.c 	INIT_LIST_HEAD(&page->list);
page              485 drivers/net/ethernet/sun/cassini.c 	RX_USED_SET(page, 0);
page              486 drivers/net/ethernet/sun/cassini.c 	page->buffer = alloc_pages(flags, cp->page_order);
page              487 drivers/net/ethernet/sun/cassini.c 	if (!page->buffer)
page              489 drivers/net/ethernet/sun/cassini.c 	page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0,
page              491 drivers/net/ethernet/sun/cassini.c 	return page;
page              494 drivers/net/ethernet/sun/cassini.c 	kfree(page);
page              561 drivers/net/ethernet/sun/cassini.c 		cas_page_t *page = list_entry(elem, cas_page_t, list);
page              575 drivers/net/ethernet/sun/cassini.c 		if (page_count(page->buffer) > 1)
page              586 drivers/net/ethernet/sun/cassini.c 			cas_page_free(cp, page);
page             1361 drivers/net/ethernet/sun/cassini.c 	cas_page_t *page = cp->rx_pages[1][index];
page             1364 drivers/net/ethernet/sun/cassini.c 	if (page_count(page->buffer) == 1)
page             1365 drivers/net/ethernet/sun/cassini.c 		return page;
page             1370 drivers/net/ethernet/sun/cassini.c 		list_add(&page->list, &cp->rx_inuse_list);
page             1412 drivers/net/ethernet/sun/cassini.c 		cas_page_t *page = cas_page_swap(cp, 0, i);
page             1413 drivers/net/ethernet/sun/cassini.c 		rxd[i].buffer = cpu_to_le64(page->dma_addr);
page             1947 drivers/net/ethernet/sun/cassini.c 	struct cas_page *page;
page             1973 drivers/net/ethernet/sun/cassini.c 		page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
page             1980 drivers/net/ethernet/sun/cassini.c 		pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
page             1982 drivers/net/ethernet/sun/cassini.c 		addr = cas_page_map(page->buffer);
page             1984 drivers/net/ethernet/sun/cassini.c 		pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
page             1987 drivers/net/ethernet/sun/cassini.c 		RX_USED_ADD(page, 0x100);
page             1998 drivers/net/ethernet/sun/cassini.c 		page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
page             2011 drivers/net/ethernet/sun/cassini.c 		pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
page             2017 drivers/net/ethernet/sun/cassini.c 			addr = cas_page_map(page->buffer);
page             2019 drivers/net/ethernet/sun/cassini.c 			pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
page             2024 drivers/net/ethernet/sun/cassini.c 			RX_USED_ADD(page, cp->mtu_stride);
page             2026 drivers/net/ethernet/sun/cassini.c 			RX_USED_ADD(page, hlen);
page             2035 drivers/net/ethernet/sun/cassini.c 		__skb_frag_set_page(frag, page->buffer);
page             2046 drivers/net/ethernet/sun/cassini.c 			page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
page             2047 drivers/net/ethernet/sun/cassini.c 			pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
page             2050 drivers/net/ethernet/sun/cassini.c 			pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
page             2059 drivers/net/ethernet/sun/cassini.c 			__skb_frag_set_page(frag, page->buffer);
page             2063 drivers/net/ethernet/sun/cassini.c 			RX_USED_ADD(page, hlen + cp->crc_size);
page             2067 drivers/net/ethernet/sun/cassini.c 			addr = cas_page_map(page->buffer);
page             2077 drivers/net/ethernet/sun/cassini.c 		page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
page             2089 drivers/net/ethernet/sun/cassini.c 		pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
page             2091 drivers/net/ethernet/sun/cassini.c 		addr = cas_page_map(page->buffer);
page             2093 drivers/net/ethernet/sun/cassini.c 		pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
page             2097 drivers/net/ethernet/sun/cassini.c 			RX_USED_ADD(page, cp->mtu_stride);
page             2099 drivers/net/ethernet/sun/cassini.c 			RX_USED_ADD(page, i);
page             2105 drivers/net/ethernet/sun/cassini.c 			page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
page             2106 drivers/net/ethernet/sun/cassini.c 			pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
page             2109 drivers/net/ethernet/sun/cassini.c 			addr = cas_page_map(page->buffer);
page             2111 drivers/net/ethernet/sun/cassini.c 			pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
page             2115 drivers/net/ethernet/sun/cassini.c 			RX_USED_ADD(page, dlen + cp->crc_size);
page             2210 drivers/net/ethernet/sun/cassini.c 	cas_page_t **page = cp->rx_pages[ring];
page             2223 drivers/net/ethernet/sun/cassini.c 		if (page_count(page[entry]->buffer) > 1) {
page             2238 drivers/net/ethernet/sun/cassini.c 			list_add(&page[entry]->list, &cp->rx_inuse_list);
page             2242 drivers/net/ethernet/sun/cassini.c 			page[entry] = new;
page             3403 drivers/net/ethernet/sun/cassini.c 		struct page *page = alloc_pages(GFP_ATOMIC,
page             3406 drivers/net/ethernet/sun/cassini.c 		if (page) {
page             3407 drivers/net/ethernet/sun/cassini.c 			__free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT);
page             3929 drivers/net/ethernet/sun/cassini.c 	cas_page_t **page = cp->rx_pages[ring];
page             3934 drivers/net/ethernet/sun/cassini.c 		if (page[i]) {
page             3935 drivers/net/ethernet/sun/cassini.c 			cas_page_free(cp, page[i]);
page             3936 drivers/net/ethernet/sun/cassini.c 			page[i] = NULL;
page             3969 drivers/net/ethernet/sun/cassini.c 	cas_page_t **page = cp->rx_pages[ring];
page             3974 drivers/net/ethernet/sun/cassini.c 		if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL)
page             2699 drivers/net/ethernet/sun/cassini.h 	struct page *buffer;
page             3260 drivers/net/ethernet/sun/niu.c static void niu_rx_skb_append(struct sk_buff *skb, struct page *page,
page             3263 drivers/net/ethernet/sun/niu.c 	skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, offset, size);
page             3278 drivers/net/ethernet/sun/niu.c static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr,
page             3279 drivers/net/ethernet/sun/niu.c 				    struct page ***link)
page             3282 drivers/net/ethernet/sun/niu.c 	struct page *p, **pp;
page             3286 drivers/net/ethernet/sun/niu.c 	for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) {
page             3298 drivers/net/ethernet/sun/niu.c static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base)
page             3302 drivers/net/ethernet/sun/niu.c 	page->index = base;
page             3303 drivers/net/ethernet/sun/niu.c 	page->mapping = (struct address_space *) rp->rxhash[h];
page             3304 drivers/net/ethernet/sun/niu.c 	rp->rxhash[h] = page;
page             3310 drivers/net/ethernet/sun/niu.c 	struct page *page;
page             3314 drivers/net/ethernet/sun/niu.c 	page = alloc_page(mask);
page             3315 drivers/net/ethernet/sun/niu.c 	if (!page)
page             3318 drivers/net/ethernet/sun/niu.c 	addr = np->ops->map_page(np->device, page, 0,
page             3321 drivers/net/ethernet/sun/niu.c 		__free_page(page);
page             3325 drivers/net/ethernet/sun/niu.c 	niu_hash_page(rp, page, addr);
page             3327 drivers/net/ethernet/sun/niu.c 		page_ref_add(page, rp->rbr_blocks_per_page - 1);
page             3371 drivers/net/ethernet/sun/niu.c 		struct page *page, **link;
page             3380 drivers/net/ethernet/sun/niu.c 		page = niu_find_rxpage(rp, addr, &link);
page             3384 drivers/net/ethernet/sun/niu.c 		if ((page->index + PAGE_SIZE) - rcr_size == addr) {
page             3385 drivers/net/ethernet/sun/niu.c 			*link = (struct page *) page->mapping;
page             3386 drivers/net/ethernet/sun/niu.c 			np->ops->unmap_page(np->device, page->index,
page             3388 drivers/net/ethernet/sun/niu.c 			page->index = 0;
page             3389 drivers/net/ethernet/sun/niu.c 			page->mapping = NULL;
page             3390 drivers/net/ethernet/sun/niu.c 			__free_page(page);
page             3418 drivers/net/ethernet/sun/niu.c 		struct page *page, **link;
page             3432 drivers/net/ethernet/sun/niu.c 		page = niu_find_rxpage(rp, addr, &link);
page             3452 drivers/net/ethernet/sun/niu.c 		niu_rx_skb_append(skb, page, off, append_size, rcr_size);
page             3453 drivers/net/ethernet/sun/niu.c 		if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
page             3454 drivers/net/ethernet/sun/niu.c 			*link = (struct page *) page->mapping;
page             3455 drivers/net/ethernet/sun/niu.c 			np->ops->unmap_page(np->device, page->index,
page             3457 drivers/net/ethernet/sun/niu.c 			page->index = 0;
page             3458 drivers/net/ethernet/sun/niu.c 			page->mapping = NULL;
page             3461 drivers/net/ethernet/sun/niu.c 			get_page(page);
page             3517 drivers/net/ethernet/sun/niu.c 		struct page *page;
page             3519 drivers/net/ethernet/sun/niu.c 		page = rp->rxhash[i];
page             3520 drivers/net/ethernet/sun/niu.c 		while (page) {
page             3521 drivers/net/ethernet/sun/niu.c 			struct page *next = (struct page *) page->mapping;
page             3522 drivers/net/ethernet/sun/niu.c 			u64 base = page->index;
page             3526 drivers/net/ethernet/sun/niu.c 			page->index = 0;
page             3527 drivers/net/ethernet/sun/niu.c 			page->mapping = NULL;
page             3529 drivers/net/ethernet/sun/niu.c 			__free_page(page);
page             3531 drivers/net/ethernet/sun/niu.c 			page = next;
page             4327 drivers/net/ethernet/sun/niu.c 	rp->rxhash = kcalloc(MAX_RBR_RING_SIZE, sizeof(struct page *),
page             6441 drivers/net/ethernet/sun/niu.c 				struct page *page;
page             6443 drivers/net/ethernet/sun/niu.c 				page = rp->rxhash[j];
page             6444 drivers/net/ethernet/sun/niu.c 				while (page) {
page             6445 drivers/net/ethernet/sun/niu.c 					struct page *next =
page             6446 drivers/net/ethernet/sun/niu.c 						(struct page *) page->mapping;
page             6447 drivers/net/ethernet/sun/niu.c 					u64 base = page->index;
page             6450 drivers/net/ethernet/sun/niu.c 					page = next;
page             9581 drivers/net/ethernet/sun/niu.c static u64 niu_pci_map_page(struct device *dev, struct page *page,
page             9585 drivers/net/ethernet/sun/niu.c 	return dma_map_page(dev, page, offset, size, direction);
page             9947 drivers/net/ethernet/sun/niu.c 	unsigned long page = __get_free_pages(flag, order);
page             9949 drivers/net/ethernet/sun/niu.c 	if (page == 0UL)
page             9951 drivers/net/ethernet/sun/niu.c 	memset((char *)page, 0, PAGE_SIZE << order);
page             9952 drivers/net/ethernet/sun/niu.c 	*dma_addr = __pa(page);
page             9954 drivers/net/ethernet/sun/niu.c 	return (void *) page;
page             9965 drivers/net/ethernet/sun/niu.c static u64 niu_phys_map_page(struct device *dev, struct page *page,
page             9969 drivers/net/ethernet/sun/niu.c 	return page_to_phys(page) + offset;
page             2920 drivers/net/ethernet/sun/niu.h 	struct page		**rxhash;
page             3133 drivers/net/ethernet/sun/niu.h 	u64 (*map_page)(struct device *dev, struct page *page,
page              334 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 	struct page *pages = NULL;
page              222 drivers/net/ethernet/synopsys/dwc-xlgmac.h 	struct page *pages;
page              451 drivers/net/ethernet/ti/cpsw.c 			     struct page *page)
page              465 drivers/net/ethernet/ti/cpsw.c 	if (page) {
page              466 drivers/net/ethernet/ti/cpsw.c 		dma = page_pool_get_dma_addr(page);
page              489 drivers/net/ethernet/ti/cpsw.c 			struct page *page)
page              516 drivers/net/ethernet/ti/cpsw.c 		cpsw_xdp_tx_frame(priv, xdpf, page);
page              543 drivers/net/ethernet/ti/cpsw.c 	page_pool_recycle_direct(cpsw->page_pool[ch], page);
page              674 drivers/net/ethernet/ti/cpsw.c 	struct page		*new_page, *page = token;
page              675 drivers/net/ethernet/ti/cpsw.c 	void			*pa = page_address(page);
page              706 drivers/net/ethernet/ti/cpsw.c 			new_page = page;
page              711 drivers/net/ethernet/ti/cpsw.c 		page_pool_recycle_direct(pool, page);
page              717 drivers/net/ethernet/ti/cpsw.c 		new_page = page;
page              738 drivers/net/ethernet/ti/cpsw.c 		ret = cpsw_run_xdp(priv, ch, &xdp, page);
page              754 drivers/net/ethernet/ti/cpsw.c 		page_pool_recycle_direct(pool, page);
page              768 drivers/net/ethernet/ti/cpsw.c 	page_pool_release_page(pool, page);
page             1356 drivers/net/ethernet/ti/cpsw.c 	struct page *page;
page             1365 drivers/net/ethernet/ti/cpsw.c 			page = page_pool_dev_alloc_pages(pool);
page             1366 drivers/net/ethernet/ti/cpsw.c 			if (!page) {
page             1371 drivers/net/ethernet/ti/cpsw.c 			xmeta = page_address(page) + CPSW_XMETA_OFFSET;
page             1375 drivers/net/ethernet/ti/cpsw.c 			dma = page_pool_get_dma_addr(page) + CPSW_HEADROOM;
page             1377 drivers/net/ethernet/ti/cpsw.c 							    page, dma,
page             1384 drivers/net/ethernet/ti/cpsw.c 				page_pool_recycle_direct(pool, page);
page              692 drivers/net/ethernet/ti/netcp_core.c 		struct page *page;
page              704 drivers/net/ethernet/ti/netcp_core.c 		page = (struct page *)GET_SW_DATA0(ndesc);
page              706 drivers/net/ethernet/ti/netcp_core.c 		if (likely(dma_buff && buf_len && page)) {
page              711 drivers/net/ethernet/ti/netcp_core.c 				&dma_buff, buf_len, page);
page              715 drivers/net/ethernet/ti/netcp_core.c 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
page              857 drivers/net/ethernet/ti/netcp_core.c 	struct page *page;
page              895 drivers/net/ethernet/ti/netcp_core.c 		page = alloc_page(GFP_ATOMIC | GFP_DMA);
page              896 drivers/net/ethernet/ti/netcp_core.c 		if (unlikely(!page)) {
page              901 drivers/net/ethernet/ti/netcp_core.c 		dma = dma_map_page(netcp->dev, page, 0, buf_len, DMA_TO_DEVICE);
page              905 drivers/net/ethernet/ti/netcp_core.c 		sw_data[0] = (u32)page;
page             1118 drivers/net/ethernet/ti/netcp_core.c 		struct page *page = skb_frag_page(frag);
page             1124 drivers/net/ethernet/ti/netcp_core.c 		dma_addr = dma_map_page(dev, page, page_offset, buf_len,
page              321 drivers/net/ethernet/xircom/xirc2ps_cs.c 	int i, page;
page              327 drivers/net/ethernet/xircom/xirc2ps_cs.c 	for (page = 0; page <= 8; page++) {
page              328 drivers/net/ethernet/xircom/xirc2ps_cs.c 	    printk(KERN_DEBUG pr_fmt("Register page %2x: "), page);
page              329 drivers/net/ethernet/xircom/xirc2ps_cs.c 	    SelectPage(page);
page              334 drivers/net/ethernet/xircom/xirc2ps_cs.c 	for (page=0x40 ; page <= 0x5f; page++) {
page              335 drivers/net/ethernet/xircom/xirc2ps_cs.c 		if (page == 0x43 || (page >= 0x46 && page <= 0x4f) ||
page              336 drivers/net/ethernet/xircom/xirc2ps_cs.c 		    (page >= 0x51 && page <=0x5e))
page              338 drivers/net/ethernet/xircom/xirc2ps_cs.c 	    printk(KERN_DEBUG pr_fmt("Register page %2x: "), page);
page              339 drivers/net/ethernet/xircom/xirc2ps_cs.c 	    SelectPage(page);
page              375 drivers/net/hyperv/netvsc_drv.c static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
page              383 drivers/net/hyperv/netvsc_drv.c 	page += (offset >> PAGE_SHIFT);
page              392 drivers/net/hyperv/netvsc_drv.c 		pb[j].pfn = page_to_pfn(page);
page              400 drivers/net/hyperv/netvsc_drv.c 			page++;
page              508 drivers/net/ieee802154/adf7242.c 	int status, i, page = 0;
page              524 drivers/net/ieee802154/adf7242.c 		adf7242_write_reg(lp, REG_PRAMPG, page);
page              527 drivers/net/ieee802154/adf7242.c 		xfer_buf.tx_buf = &data[page * PRAM_PAGESIZE];
page              532 drivers/net/ieee802154/adf7242.c 		page++;
page              543 drivers/net/ieee802154/adf7242.c 	unsigned int page;
page              549 drivers/net/ieee802154/adf7242.c 	for (page = 0, i = len; i >= 0; i -= PRAM_PAGESIZE, page++) {
page              552 drivers/net/ieee802154/adf7242.c 		adf7242_write_reg(lp, REG_PRAMPG, page);
page              556 drivers/net/ieee802154/adf7242.c 			if (buf[j] != data[page * PRAM_PAGESIZE + j]) {
page              720 drivers/net/ieee802154/adf7242.c static int adf7242_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
page              729 drivers/net/ieee802154/adf7242.c 	WARN_ON(page != 0);
page              978 drivers/net/ieee802154/at86rf230.c at86rf23x_set_channel(struct at86rf230_local *lp, u8 page, u8 channel)
page             1033 drivers/net/ieee802154/at86rf230.c at86rf212_set_channel(struct at86rf230_local *lp, u8 page, u8 channel)
page             1044 drivers/net/ieee802154/at86rf230.c 	if (page == 0) {
page             1067 drivers/net/ieee802154/at86rf230.c 		if (page == 0) {
page             1075 drivers/net/ieee802154/at86rf230.c 		if (page == 0)
page             1092 drivers/net/ieee802154/at86rf230.c at86rf230_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
page             1097 drivers/net/ieee802154/at86rf230.c 	rc = lp->data->set_channel(lp, page, channel);
page              616 drivers/net/ieee802154/atusb.c static int atusb_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
page              622 drivers/net/ieee802154/atusb.c 		ret = atusb->data->set_channel(hw, page, channel);
page              630 drivers/net/ieee802154/atusb.c static int atusb_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
page              641 drivers/net/ieee802154/atusb.c static int hulusb_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
page              655 drivers/net/ieee802154/atusb.c 	if (page == 0) {
page              678 drivers/net/ieee802154/atusb.c 		if (page == 0) {
page              686 drivers/net/ieee802154/atusb.c 		if (page == 0)
page             2091 drivers/net/ieee802154/ca8210.c 	u8                     page,
page              634 drivers/net/ieee802154/cc2520.c cc2520_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
page              641 drivers/net/ieee802154/cc2520.c 	WARN_ON(page != 0);
page               33 drivers/net/ieee802154/fakelb.c 	u8 page;
page               50 drivers/net/ieee802154/fakelb.c static int fakelb_hw_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
page               55 drivers/net/ieee802154/fakelb.c 	phy->page = page;
page               71 drivers/net/ieee802154/fakelb.c 		if (current_phy->page == phy->page &&
page               48 drivers/net/ieee802154/mac802154_hwsim.c 	u8 page;
page               91 drivers/net/ieee802154/mac802154_hwsim.c static int hwsim_hw_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
page              100 drivers/net/ieee802154/mac802154_hwsim.c 	pib->page = page;
page              130 drivers/net/ieee802154/mac802154_hwsim.c 		if (current_pib->page == endpoint_pib->page &&
page              493 drivers/net/ieee802154/mcr20a.c mcr20a_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
page              632 drivers/net/ieee802154/mrf24j40.c static int mrf24j40_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
page              640 drivers/net/ieee802154/mrf24j40.c 	WARN_ON(page != 0);
page              129 drivers/net/phy/dp83640.c 	int page;
page              217 drivers/net/phy/dp83640.c static int ext_read(struct phy_device *phydev, int page, u32 regnum)
page              222 drivers/net/phy/dp83640.c 	if (dp83640->clock->page != page) {
page              223 drivers/net/phy/dp83640.c 		broadcast_write(phydev, PAGESEL, page);
page              224 drivers/net/phy/dp83640.c 		dp83640->clock->page = page;
page              233 drivers/net/phy/dp83640.c 		      int page, u32 regnum, u16 val)
page              237 drivers/net/phy/dp83640.c 	if (dp83640->clock->page != page) {
page              238 drivers/net/phy/dp83640.c 		broadcast_write(phydev, PAGESEL, page);
page              239 drivers/net/phy/dp83640.c 		dp83640->clock->page = page;
page              643 drivers/net/phy/dp83640.c 		enable_broadcast(tmp->phydev, clock->page, 1);
page              648 drivers/net/phy/dp83640.c 	enable_broadcast(master, clock->page, 1);
page             1250 drivers/net/phy/dp83640.c 		enable_broadcast(phydev, clock->page, 1);
page              180 drivers/net/phy/marvell.c 	u8 page;
page              202 drivers/net/phy/marvell.c static int marvell_write_page(struct phy_device *phydev, int page)
page              204 drivers/net/phy/marvell.c 	return __phy_write(phydev, MII_MARVELL_PHY_PAGE, page);
page              207 drivers/net/phy/marvell.c static int marvell_set_page(struct phy_device *phydev, int page)
page              209 drivers/net/phy/marvell.c 	return phy_write(phydev, MII_MARVELL_PHY_PAGE, page);
page              395 drivers/net/phy/marvell.c 		u16 page = be32_to_cpup(paddr + i);
page              401 drivers/net/phy/marvell.c 		if (page != current_page) {
page              402 drivers/net/phy/marvell.c 			current_page = page;
page              403 drivers/net/phy/marvell.c 			ret = marvell_write_page(phydev, page);
page             1321 drivers/net/phy/marvell.c static int marvell_read_status_page(struct phy_device *phydev, int page)
page             1329 drivers/net/phy/marvell.c 	if (page == MII_MARVELL_FIBER_PAGE)
page             1616 drivers/net/phy/marvell.c 	val = phy_read_paged(phydev, stat.page, stat.reg);
page               29 drivers/net/phy/microchip.c static int lan88xx_write_page(struct phy_device *phydev, int page)
page               31 drivers/net/phy/microchip.c 	return __phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, page);
page              324 drivers/net/phy/mscc.c 	u16 page;
page              332 drivers/net/phy/mscc.c 		.page	= MSCC_PHY_PAGE_STANDARD,
page              337 drivers/net/phy/mscc.c 		.page	= MSCC_PHY_PAGE_STANDARD,
page              342 drivers/net/phy/mscc.c 		.page	= MSCC_PHY_PAGE_STANDARD,
page              347 drivers/net/phy/mscc.c 		.page	= MSCC_PHY_PAGE_EXTENDED,
page              352 drivers/net/phy/mscc.c 		.page	= MSCC_PHY_PAGE_EXTENDED,
page              361 drivers/net/phy/mscc.c 		.page	= MSCC_PHY_PAGE_STANDARD,
page              366 drivers/net/phy/mscc.c 		.page	= MSCC_PHY_PAGE_STANDARD,
page              371 drivers/net/phy/mscc.c 		.page	= MSCC_PHY_PAGE_STANDARD,
page              376 drivers/net/phy/mscc.c 		.page	= MSCC_PHY_PAGE_EXTENDED,
page              381 drivers/net/phy/mscc.c 		.page	= MSCC_PHY_PAGE_EXTENDED,
page              386 drivers/net/phy/mscc.c 		.page	= MSCC_PHY_PAGE_EXTENDED_3,
page              391 drivers/net/phy/mscc.c 		.page	= MSCC_PHY_PAGE_EXTENDED_3,
page              396 drivers/net/phy/mscc.c 		.page	= MSCC_PHY_PAGE_EXTENDED_3,
page              401 drivers/net/phy/mscc.c 		.page	= MSCC_PHY_PAGE_EXTENDED_3,
page              440 drivers/net/phy/mscc.c static int vsc85xx_phy_write_page(struct phy_device *phydev, int page)
page              442 drivers/net/phy/mscc.c 	return __phy_write(phydev, MSCC_EXT_PAGE_ACCESS, page);
page              473 drivers/net/phy/mscc.c 	val = phy_read_paged(phydev, priv->hw_stats[i].page,
page              695 drivers/net/phy/phy-core.c static int __phy_write_page(struct phy_device *phydev, int page)
page              697 drivers/net/phy/phy-core.c 	return phydev->drv->write_page(phydev, page);
page              726 drivers/net/phy/phy-core.c int phy_select_page(struct phy_device *phydev, int page)
page              734 drivers/net/phy/phy-core.c 	if (oldpage != page) {
page              735 drivers/net/phy/phy-core.c 		ret = __phy_write_page(phydev, page);
page              791 drivers/net/phy/phy-core.c int phy_read_paged(struct phy_device *phydev, int page, u32 regnum)
page              795 drivers/net/phy/phy-core.c 	oldpage = phy_select_page(phydev, page);
page              812 drivers/net/phy/phy-core.c int phy_write_paged(struct phy_device *phydev, int page, u32 regnum, u16 val)
page              816 drivers/net/phy/phy-core.c 	oldpage = phy_select_page(phydev, page);
page              834 drivers/net/phy/phy-core.c int phy_modify_paged_changed(struct phy_device *phydev, int page, u32 regnum,
page              839 drivers/net/phy/phy-core.c 	oldpage = phy_select_page(phydev, page);
page              857 drivers/net/phy/phy-core.c int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum,
page              860 drivers/net/phy/phy-core.c 	int ret = phy_modify_paged_changed(phydev, page, regnum, mask, set);
page               61 drivers/net/phy/realtek.c static int rtl821x_write_page(struct phy_device *phydev, int page)
page               63 drivers/net/phy/realtek.c 	return __phy_write(phydev, RTL821x_PAGE_SELECT, page);
page              121 drivers/net/phy/vitesse.c static int vsc73xx_write_page(struct phy_device *phydev, int page)
page              123 drivers/net/phy/vitesse.c 	return __phy_write(phydev, VSC73XX_EXT_PAGE_ACCESS, page);
page              133 drivers/net/thunderbolt.c 	struct page *page;
page              332 drivers/net/thunderbolt.c 		if (!tf->page)
page              349 drivers/net/thunderbolt.c 		__free_pages(tf->page, order);
page              350 drivers/net/thunderbolt.c 		tf->page = NULL;
page              477 drivers/net/thunderbolt.c 		if (tf->page)
page              484 drivers/net/thunderbolt.c 		tf->page = dev_alloc_pages(TBNET_RX_PAGE_ORDER);
page              485 drivers/net/thunderbolt.c 		if (!tf->page) {
page              490 drivers/net/thunderbolt.c 		dma_addr = dma_map_page(dma_dev, tf->page, 0,
page              556 drivers/net/thunderbolt.c 		tf->page = alloc_page(GFP_KERNEL);
page              557 drivers/net/thunderbolt.c 		if (!tf->page) {
page              562 drivers/net/thunderbolt.c 		dma_addr = dma_map_page(dma_dev, tf->page, 0, TBNET_FRAME_SIZE,
page              565 drivers/net/thunderbolt.c 			__free_page(tf->page);
page              566 drivers/net/thunderbolt.c 			tf->page = NULL;
page              753 drivers/net/thunderbolt.c 		struct page *page;
page              775 drivers/net/thunderbolt.c 		page = tf->page;
page              776 drivers/net/thunderbolt.c 		tf->page = NULL;
page              780 drivers/net/thunderbolt.c 		hdr = page_address(page);
page              782 drivers/net/thunderbolt.c 			__free_pages(page, TBNET_RX_PAGE_ORDER);
page              792 drivers/net/thunderbolt.c 			skb = build_skb(page_address(page),
page              795 drivers/net/thunderbolt.c 				__free_pages(page, TBNET_RX_PAGE_ORDER);
page              806 drivers/net/thunderbolt.c 					page, hdr_size, frame_size,
page              905 drivers/net/thunderbolt.c 	struct thunderbolt_ip_frame_header *hdr = page_address(frames[0]->page);
page              919 drivers/net/thunderbolt.c 			hdr = page_address(frames[i]->page);
page              980 drivers/net/thunderbolt.c 		hdr = page_address(frames[i]->page);
page             1037 drivers/net/thunderbolt.c 	hdr = page_address(frames[frame_index]->page);
page             1087 drivers/net/thunderbolt.c 		hdr = page_address(frames[frame_index]->page);
page              182 drivers/net/tun.c 	struct page *page;
page             1491 drivers/net/tun.c 		struct page *page;
page             1503 drivers/net/tun.c 		page = virt_to_head_page(frag);
page             1504 drivers/net/tun.c 		skb_fill_page_desc(skb, i - 1, page,
page             1505 drivers/net/tun.c 				   frag - page_address(page), fragsz);
page             1617 drivers/net/tun.c 	get_page(alloc_frag->page);
page             1680 drivers/net/tun.c 	buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
page             1681 drivers/net/tun.c 	copied = copy_page_from_iter(alloc_frag->page,
page             1714 drivers/net/tun.c 			get_page(alloc_frag->page);
page             1720 drivers/net/tun.c 				put_page(alloc_frag->page);
page             2424 drivers/net/tun.c 	if (tpage->page)
page             2425 drivers/net/tun.c 		__page_frag_cache_drain(tpage->page, tpage->count);
page             2443 drivers/net/tun.c 	struct page *page;
page             2470 drivers/net/tun.c 			page = virt_to_head_page(xdp->data);
page             2471 drivers/net/tun.c 			if (tpage->page == page) {
page             2475 drivers/net/tun.c 				tpage->page = page;
page              119 drivers/net/usb/cdc-phonet.c 	struct page *page;
page              122 drivers/net/usb/cdc-phonet.c 	page = __dev_alloc_page(gfp_flags | __GFP_NOMEMALLOC);
page              123 drivers/net/usb/cdc-phonet.c 	if (!page)
page              126 drivers/net/usb/cdc-phonet.c 	usb_fill_bulk_urb(req, pnd->usb, pnd->rx_pipe, page_address(page),
page              132 drivers/net/usb/cdc-phonet.c 		put_page(page);
page              141 drivers/net/usb/cdc-phonet.c 	struct page *page = virt_to_page(req->transfer_buffer);
page              154 drivers/net/usb/cdc-phonet.c 				skb_put_data(skb, page_address(page), 1);
page              156 drivers/net/usb/cdc-phonet.c 						page, 1, req->actual_length,
page              158 drivers/net/usb/cdc-phonet.c 				page = NULL;
page              162 drivers/net/usb/cdc-phonet.c 					page, 0, req->actual_length,
page              164 drivers/net/usb/cdc-phonet.c 			page = NULL;
page              201 drivers/net/usb/cdc-phonet.c 	if (page)
page              202 drivers/net/usb/cdc-phonet.c 		put_page(page);
page              718 drivers/net/usb/r8152.c 	struct page *page;
page             1530 drivers/net/usb/r8152.c 	put_page(agg->page);
page             1548 drivers/net/usb/r8152.c 	rx_agg->page = alloc_pages(mflags | __GFP_COMP, order);
page             1549 drivers/net/usb/r8152.c 	if (!rx_agg->page)
page             1552 drivers/net/usb/r8152.c 	rx_agg->buffer = page_address(rx_agg->page);
page             1571 drivers/net/usb/r8152.c 	__free_pages(rx_agg->page, order);
page             2019 drivers/net/usb/r8152.c 		if (page_count(agg->page) == 1) {
page             2132 drivers/net/usb/r8152.c 				skb_add_rx_frag(skb, 0, agg->page,
page             2136 drivers/net/usb/r8152.c 				get_page(agg->page);
page             2157 drivers/net/usb/r8152.c 		WARN_ON(!agg_free && page_count(agg->page) > 1);
page             2161 drivers/net/usb/r8152.c 			if (page_count(agg->page) == 1) {
page             2558 drivers/net/usb/r8152.c 		if (page_count(agg->page) > 1)
page              621 drivers/net/veth.c 		struct page *page;
page              628 drivers/net/veth.c 		page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
page              629 drivers/net/veth.c 		if (!page)
page              632 drivers/net/veth.c 		head = page_address(page);
page              145 drivers/net/virtio_net.c 	struct page *pages;
page              291 drivers/net/virtio_net.c static void give_pages(struct receive_queue *rq, struct page *page)
page              293 drivers/net/virtio_net.c 	struct page *end;
page              296 drivers/net/virtio_net.c 	for (end = page; end->private; end = (struct page *)end->private);
page              298 drivers/net/virtio_net.c 	rq->pages = page;
page              301 drivers/net/virtio_net.c static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
page              303 drivers/net/virtio_net.c 	struct page *p = rq->pages;
page              306 drivers/net/virtio_net.c 		rq->pages = (struct page *)p->private;
page              372 drivers/net/virtio_net.c 				   struct page *page, unsigned int offset,
page              381 drivers/net/virtio_net.c 	p = page_address(page) + offset;
page              413 drivers/net/virtio_net.c 			skb_add_rx_frag(skb, 0, page, offset, len, truesize);
page              415 drivers/net/virtio_net.c 			put_page(page);
page              433 drivers/net/virtio_net.c 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
page              436 drivers/net/virtio_net.c 		page = (struct page *)page->private;
page              440 drivers/net/virtio_net.c 	if (page)
page              441 drivers/net/virtio_net.c 		give_pages(rq, page);
page              578 drivers/net/virtio_net.c static struct page *xdp_linearize_page(struct receive_queue *rq,
page              580 drivers/net/virtio_net.c 				       struct page *p,
page              585 drivers/net/virtio_net.c 	struct page *page = alloc_page(GFP_ATOMIC);
page              587 drivers/net/virtio_net.c 	if (!page)
page              590 drivers/net/virtio_net.c 	memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
page              614 drivers/net/virtio_net.c 		memcpy(page_address(page) + page_off,
page              622 drivers/net/virtio_net.c 	return page;
page              624 drivers/net/virtio_net.c 	__free_pages(page, 0);
page              643 drivers/net/virtio_net.c 	struct page *page = virt_to_head_page(buf);
page              645 drivers/net/virtio_net.c 	struct page *xdp_page;
page              664 drivers/net/virtio_net.c 			int offset = buf - page_address(page) + header_offset;
page              673 drivers/net/virtio_net.c 			xdp_page = xdp_linearize_page(rq, &num_buf, page,
page              680 drivers/net/virtio_net.c 			put_page(page);
page              681 drivers/net/virtio_net.c 			page = xdp_page;
page              733 drivers/net/virtio_net.c 		put_page(page);
page              750 drivers/net/virtio_net.c 	put_page(page);
page              762 drivers/net/virtio_net.c 	struct page *page = buf;
page              763 drivers/net/virtio_net.c 	struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len,
page              774 drivers/net/virtio_net.c 	give_pages(rq, page);
page              789 drivers/net/virtio_net.c 	struct page *page = virt_to_head_page(buf);
page              790 drivers/net/virtio_net.c 	int offset = buf - page_address(page);
page              804 drivers/net/virtio_net.c 		struct page *xdp_page;
page              826 drivers/net/virtio_net.c 						      page, offset,
page              833 drivers/net/virtio_net.c 			xdp_page = page;
page              863 drivers/net/virtio_net.c 			if (unlikely(xdp_page != page)) {
page              865 drivers/net/virtio_net.c 				put_page(page);
page              880 drivers/net/virtio_net.c 				if (unlikely(xdp_page != page))
page              885 drivers/net/virtio_net.c 			if (unlikely(xdp_page != page))
page              886 drivers/net/virtio_net.c 				put_page(page);
page              893 drivers/net/virtio_net.c 				if (unlikely(xdp_page != page))
page              898 drivers/net/virtio_net.c 			if (unlikely(xdp_page != page))
page              899 drivers/net/virtio_net.c 				put_page(page);
page              909 drivers/net/virtio_net.c 			if (unlikely(xdp_page != page))
page              924 drivers/net/virtio_net.c 	head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog);
page              943 drivers/net/virtio_net.c 		page = virt_to_head_page(buf);
page              972 drivers/net/virtio_net.c 		offset = buf - page_address(page);
page              973 drivers/net/virtio_net.c 		if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
page              974 drivers/net/virtio_net.c 			put_page(page);
page              978 drivers/net/virtio_net.c 			skb_add_rx_frag(curr_skb, num_skb_frags, page,
page              990 drivers/net/virtio_net.c 	put_page(page);
page             1000 drivers/net/virtio_net.c 		page = virt_to_head_page(buf);
page             1001 drivers/net/virtio_net.c 		put_page(page);
page             1089 drivers/net/virtio_net.c 	buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
page             1090 drivers/net/virtio_net.c 	get_page(alloc_frag->page);
page             1103 drivers/net/virtio_net.c 	struct page *first, *list = NULL;
page             1185 drivers/net/virtio_net.c 	buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
page             1187 drivers/net/virtio_net.c 	get_page(alloc_frag->page);
page             2690 drivers/net/virtio_net.c 		if (vi->rq[i].alloc_frag.page)
page             2691 drivers/net/virtio_net.c 			put_page(vi->rq[i].alloc_frag.page);
page              609 drivers/net/vmxnet3/vmxnet3_drv.c 			if (rbi->page == NULL) {
page              610 drivers/net/vmxnet3/vmxnet3_drv.c 				rbi->page = alloc_page(GFP_ATOMIC);
page              611 drivers/net/vmxnet3/vmxnet3_drv.c 				if (unlikely(rbi->page == NULL)) {
page              617 drivers/net/vmxnet3/vmxnet3_drv.c 						rbi->page, 0, PAGE_SIZE,
page              621 drivers/net/vmxnet3/vmxnet3_drv.c 					put_page(rbi->page);
page              664 drivers/net/vmxnet3/vmxnet3_drv.c 	__skb_frag_set_page(frag, rbi->page);
page             1298 drivers/net/vmxnet3/vmxnet3_drv.c 		struct page *new_page = NULL;
page             1484 drivers/net/vmxnet3/vmxnet3_drv.c 				rbi->page = new_page;
page             1601 drivers/net/vmxnet3/vmxnet3_drv.c 					rq->buf_info[ring_idx][i].page) {
page             1604 drivers/net/vmxnet3/vmxnet3_drv.c 				put_page(rq->buf_info[ring_idx][i].page);
page             1605 drivers/net/vmxnet3/vmxnet3_drv.c 				rq->buf_info[ring_idx][i].page = NULL;
page             1639 drivers/net/vmxnet3/vmxnet3_drv.c 				BUG_ON(rq->buf_info[i][j].page != NULL);
page              259 drivers/net/vmxnet3/vmxnet3_int.h 		struct page    *page;
page               72 drivers/net/wan/c101.c 	u8 page;
page              105 drivers/net/wan/c101.c 	return card->page;
page              108 drivers/net/wan/c101.c static inline void openwin(card_t *card, u8 page)
page              110 drivers/net/wan/c101.c 	card->page = page;
page              111 drivers/net/wan/c101.c 	writeb(page, card->win0base + C101_PAGE);
page              248 drivers/net/wan/hd64570.c 	u8 page;
page              258 drivers/net/wan/hd64570.c 	page = buff / winsize(card);
page              262 drivers/net/wan/hd64570.c 	openwin(card, page);
page              266 drivers/net/wan/hd64570.c 		openwin(card, page + 1);
page              380 drivers/net/wan/hd64570.c 	u8 page = sca_get_page(card);
page              399 drivers/net/wan/hd64570.c 	openwin(card, page);		/* Restore original page */
page              569 drivers/net/wan/hd64570.c 	u8 page = sca_get_page(card);
page              611 drivers/net/wan/hd64570.c 	openwin(card, page); /* Restore original page */
page              623 drivers/net/wan/hd64570.c 	u8 page;
page              639 drivers/net/wan/hd64570.c 	page = buff / winsize(card);
page              643 drivers/net/wan/hd64570.c 	openwin(card, page);
page              646 drivers/net/wan/hd64570.c 		openwin(card, page + 1);
page              154 drivers/net/wan/n2.c static __inline__ void openwin(card_t *card, u8 page)
page              157 drivers/net/wan/n2.c 	outb((psr & ~PSR_PAGEBITS) | page, card->io + N2_PSR);
page             4036 drivers/net/wireless/cisco/airo.c static u16 aux_setup(struct airo_info *ai, u16 page,
page             4041 drivers/net/wireless/cisco/airo.c 	OUT4500(ai, AUXPAGE, page);
page             4054 drivers/net/wireless/cisco/airo.c 	u16 page;
page             4062 drivers/net/wireless/cisco/airo.c 	page = IN4500(ai, SWS0+whichbap);
page             4064 drivers/net/wireless/cisco/airo.c 	next = aux_setup(ai, page, offset, &len);
page              985 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	struct page *page;
page             1005 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		page = alloc_pages(gfp_mask, il->hw_params.rx_page_order);
page             1006 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		if (!page) {
page             1022 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		    pci_map_page(il->pci_dev, page, 0,
page             1027 drivers/net/wireless/intel/iwlegacy/3945-mac.c 			__free_pages(page, il->hw_params.rx_page_order);
page             1038 drivers/net/wireless/intel/iwlegacy/3945-mac.c 			__free_pages(page, il->hw_params.rx_page_order);
page             1046 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		rxb->page = page;
page             1068 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		if (rxq->pool[i].page != NULL) {
page             1072 drivers/net/wireless/intel/iwlegacy/3945-mac.c 			__il_free_pages(il, rxq->pool[i].page);
page             1073 drivers/net/wireless/intel/iwlegacy/3945-mac.c 			rxq->pool[i].page = NULL;
page             1117 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		if (rxq->pool[i].page != NULL) {
page             1121 drivers/net/wireless/intel/iwlegacy/3945-mac.c 			__il_free_pages(il, rxq->pool[i].page);
page             1122 drivers/net/wireless/intel/iwlegacy/3945-mac.c 			rxq->pool[i].page = NULL;
page             1255 drivers/net/wireless/intel/iwlegacy/3945-mac.c 			if (rxb->page)
page             1265 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		if (rxb->page != NULL) {
page             1267 drivers/net/wireless/intel/iwlegacy/3945-mac.c 			    pci_map_page(il->pci_dev, rxb->page, 0,
page             1272 drivers/net/wireless/intel/iwlegacy/3945-mac.c 				__il_free_pages(il, rxb->page);
page             1273 drivers/net/wireless/intel/iwlegacy/3945-mac.c 				rxb->page = NULL;
page              510 drivers/net/wireless/intel/iwlegacy/3945.c 		skb_add_rx_frag(skb, 0, rxb->page,
page              514 drivers/net/wireless/intel/iwlegacy/3945.c 		rxb->page = NULL;
page               95 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		if (rxq->pool[i].page != NULL) {
page               99 drivers/net/wireless/intel/iwlegacy/4965-mac.c 			__il_free_pages(il, rxq->pool[i].page);
page              100 drivers/net/wireless/intel/iwlegacy/4965-mac.c 			rxq->pool[i].page = NULL;
page              260 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		BUG_ON(rxb && rxb->page);
page              304 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	struct page *page;
page              324 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		page = alloc_pages(gfp_mask, il->hw_params.rx_page_order);
page              325 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		if (!page) {
page              345 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		    pci_map_page(il->pci_dev, page, 0,
page              349 drivers/net/wireless/intel/iwlegacy/4965-mac.c 			__free_pages(page, il->hw_params.rx_page_order);
page              360 drivers/net/wireless/intel/iwlegacy/4965-mac.c 			__free_pages(page, il->hw_params.rx_page_order);
page              368 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		BUG_ON(rxb->page);
page              370 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		rxb->page = page;
page              410 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		if (rxq->pool[i].page != NULL) {
page              414 drivers/net/wireless/intel/iwlegacy/4965-mac.c 			__il_free_pages(il, rxq->pool[i].page);
page              415 drivers/net/wireless/intel/iwlegacy/4965-mac.c 			rxq->pool[i].page = NULL;
page              595 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb),
page              598 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		rxb->page = NULL;
page             4286 drivers/net/wireless/intel/iwlegacy/4965-mac.c 			if (rxb->page)
page             4296 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		if (rxb->page != NULL) {
page             4298 drivers/net/wireless/intel/iwlegacy/4965-mac.c 			    pci_map_page(il->pci_dev, rxb->page, 0,
page             4304 drivers/net/wireless/intel/iwlegacy/4965-mac.c 				__il_free_pages(il, rxb->page);
page             4305 drivers/net/wireless/intel/iwlegacy/4965-mac.c 				rxb->page = NULL;
page             3315 drivers/net/wireless/intel/iwlegacy/common.c 		rxb->page = NULL;
page               83 drivers/net/wireless/intel/iwlegacy/common.h 	struct page *page;
page               87 drivers/net/wireless/intel/iwlegacy/common.h #define rxb_addr(r) page_address(r->page)
page             1484 drivers/net/wireless/intel/iwlegacy/common.h __il_free_pages(struct il_priv *il, struct page *page)
page             1486 drivers/net/wireless/intel/iwlegacy/common.h 	__free_pages(page, il->hw_params.rx_page_order);
page             1491 drivers/net/wireless/intel/iwlegacy/common.h il_free_pages(struct il_priv *il, unsigned long page)
page             1493 drivers/net/wireless/intel/iwlegacy/common.h 	free_pages(page, il->hw_params.rx_page_order);
page              629 drivers/net/wireless/intel/iwlwifi/fw/dbg.c 	struct page *new_page;
page              798 drivers/net/wireless/intel/iwlwifi/fw/dbg.c 		struct page *pages =
page             1119 drivers/net/wireless/intel/iwlwifi/fw/dbg.c 	struct page *page = fwrt->fw_paging_db[++idx].fw_paging_block;
page             1128 drivers/net/wireless/intel/iwlwifi/fw/dbg.c 	memcpy(range->data, page_address(page), page_size);
page              183 drivers/net/wireless/intel/iwlwifi/fw/img.h 	struct page *fw_paging_block;
page              100 drivers/net/wireless/intel/iwlwifi/fw/paging.c 	struct page *block;
page              273 drivers/net/wireless/intel/iwlwifi/iwl-trans.h 	struct page *_page;
page              290 drivers/net/wireless/intel/iwlwifi/iwl-trans.h static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
page              112 drivers/net/wireless/intel/iwlwifi/pcie/internal.h 	struct page *page;
page              397 drivers/net/wireless/intel/iwlwifi/pcie/internal.h 	struct page *page;
page              369 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		BUG_ON(rxb && rxb->page);
page              418 drivers/net/wireless/intel/iwlwifi/pcie/rx.c static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
page              422 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	struct page *page;
page              429 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
page              430 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	if (!page) {
page              443 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	return page;
page              460 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	struct page *page;
page              471 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		page = iwl_pcie_rx_alloc_page(trans, priority);
page              472 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		if (!page)
page              479 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			__free_pages(page, trans_pcie->rx_page_order);
page              487 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		BUG_ON(rxb->page);
page              488 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		rxb->page = page;
page              491 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			dma_map_page(trans->dev, page, 0,
page              495 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			rxb->page = NULL;
page              499 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			__free_pages(page, trans_pcie->rx_page_order);
page              518 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		if (!trans_pcie->rx_pool[i].page)
page              523 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		__free_pages(trans_pcie->rx_pool[i].page,
page              525 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		trans_pcie->rx_pool[i].page = NULL;
page              561 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			struct page *page;
page              572 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			BUG_ON(rxb->page);
page              575 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			page = iwl_pcie_rx_alloc_page(trans, gfp_mask);
page              576 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			if (!page)
page              578 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			rxb->page = page;
page              581 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			rxb->page_dma = dma_map_page(trans->dev, page, 0,
page              585 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 				rxb->page = NULL;
page              586 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 				__free_pages(page, trans_pcie->rx_page_order);
page             1262 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			._page = rxb->page,
page             1357 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		__free_pages(rxb->page, trans_pcie->rx_page_order);
page             1358 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		rxb->page = NULL;
page             1364 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	if (rxb->page != NULL) {
page             1366 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			dma_map_page(trans->dev, rxb->page, 0,
page             1375 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			__free_pages(rxb->page, trans_pcie->rx_page_order);
page             1376 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			rxb->page = NULL;
page             2005 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 		if (p->page)
page             2006 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 			__free_page(p->page);
page             2991 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 		memcpy(rb->data, page_address(rxb->page), max_len);
page             2993 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 		rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0,
page              258 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 	struct page **page_ptr;
page              278 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 	get_page(hdr_page->page);
page              281 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 	*page_ptr = hdr_page->page;
page              626 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	struct page **page_ptr;
page             1838 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 		struct page *p = rxb_steal_page(rxb);
page             2062 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	if (!p->page)
page             2066 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE)
page             2070 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	__free_page(p->page);
page             2073 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	p->page = alloc_page(GFP_ATOMIC);
page             2074 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	if (!p->page)
page             2076 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	p->pos = page_address(p->page);
page             2113 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	struct page **page_ptr;
page             2140 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	get_page(hdr_page->page);
page             2143 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	*page_ptr = hdr_page->page;
page              208 drivers/net/wireless/intersil/hostap/hostap_common.h 	__le16 page;
page               90 drivers/net/wireless/intersil/hostap/hostap_download.c 	u16 page, offset;
page               94 drivers/net/wireless/intersil/hostap/hostap_download.c 	page = addr >> 7;
page               97 drivers/net/wireless/intersil/hostap/hostap_download.c 	HFA384X_OUTW(page, HFA384X_AUXPAGE_OFF);
page              121 drivers/net/wireless/intersil/hostap/hostap_download.c 	u16 page, offset;
page              125 drivers/net/wireless/intersil/hostap/hostap_download.c 	page = addr >> 7;
page              128 drivers/net/wireless/intersil/hostap/hostap_download.c 	HFA384X_OUTW(page, HFA384X_AUXPAGE_OFF);
page              182 drivers/net/wireless/intersil/hostap/hostap_download.c 	u16 page[0x80];
page              189 drivers/net/wireless/intersil/hostap/hostap_download.c 	hfa384x_from_aux(ctx->local->dev, (unsigned long)v - 1, 0x80, ctx->page);
page              190 drivers/net/wireless/intersil/hostap/hostap_download.c 	seq_write(m, ctx->page, 0x80);
page              596 drivers/net/wireless/intersil/hostap/hostap_download.c 		__le16 page;
page              619 drivers/net/wireless/intersil/hostap/hostap_download.c 	       le16_to_cpu(dlbuffer.page),
page              622 drivers/net/wireless/intersil/hostap/hostap_download.c 	bufaddr = (le16_to_cpu(dlbuffer.page) << 7) + le16_to_cpu(dlbuffer.offset);
page              232 drivers/net/wireless/intersil/hostap/hostap_proc.c static int prism2_io_debug_proc_read(char *page, char **start, off_t off,
page              253 drivers/net/wireless/intersil/hostap/hostap_proc.c 		memcpy(page, ((u8 *) &local->io_debug[head]) + off, copy);
page              256 drivers/net/wireless/intersil/hostap/hostap_proc.c 			memcpy(&page[copy], local->io_debug, left);
page              258 drivers/net/wireless/intersil/hostap/hostap_proc.c 		memcpy(page, ((u8 *) local->io_debug) + (off - start_bytes),
page              262 drivers/net/wireless/intersil/hostap/hostap_proc.c 	*start = page;
page             1318 drivers/net/wireless/mac80211_hwsim.c 			struct page *page = alloc_page(GFP_ATOMIC);
page             1320 drivers/net/wireless/mac80211_hwsim.c 			if (!page)
page             1325 drivers/net/wireless/mac80211_hwsim.c 				__free_page(page);
page             1329 drivers/net/wireless/mac80211_hwsim.c 			memcpy(page_address(page), skb->data, skb->len);
page             1330 drivers/net/wireless/mac80211_hwsim.c 			skb_add_rx_frag(nskb, 0, page, 0, skb->len, skb->len);
page               82 drivers/net/wireless/marvell/mwifiex/debugfs.c 	unsigned long page = get_zeroed_page(GFP_KERNEL);
page               83 drivers/net/wireless/marvell/mwifiex/debugfs.c 	char *p = (char *) page, fmt[64];
page              148 drivers/net/wireless/marvell/mwifiex/debugfs.c 	ret = simple_read_from_buffer(ubuf, count, ppos, (char *) page,
page              149 drivers/net/wireless/marvell/mwifiex/debugfs.c 				      (unsigned long) p - page);
page              152 drivers/net/wireless/marvell/mwifiex/debugfs.c 	free_page(page);
page              183 drivers/net/wireless/marvell/mwifiex/debugfs.c 	unsigned long page = get_zeroed_page(GFP_KERNEL);
page              184 drivers/net/wireless/marvell/mwifiex/debugfs.c 	char *p = (char *) page;
page              235 drivers/net/wireless/marvell/mwifiex/debugfs.c 	ret = simple_read_from_buffer(ubuf, count, ppos, (char *) page,
page              236 drivers/net/wireless/marvell/mwifiex/debugfs.c 				      (unsigned long) p - page);
page              239 drivers/net/wireless/marvell/mwifiex/debugfs.c 	free_page(page);
page              262 drivers/net/wireless/marvell/mwifiex/debugfs.c 	unsigned long page = get_zeroed_page(GFP_KERNEL);
page              263 drivers/net/wireless/marvell/mwifiex/debugfs.c 	char *p = (char *)page;
page              321 drivers/net/wireless/marvell/mwifiex/debugfs.c 	ret = simple_read_from_buffer(ubuf, count, ppos, (char *)page,
page              322 drivers/net/wireless/marvell/mwifiex/debugfs.c 				      (unsigned long)p - page);
page              393 drivers/net/wireless/marvell/mwifiex/debugfs.c 	unsigned long page = get_zeroed_page(GFP_KERNEL);
page              394 drivers/net/wireless/marvell/mwifiex/debugfs.c 	char *p = (char *) page;
page              406 drivers/net/wireless/marvell/mwifiex/debugfs.c 	ret = simple_read_from_buffer(ubuf, count, ppos, (char *) page,
page              407 drivers/net/wireless/marvell/mwifiex/debugfs.c 				      (unsigned long) p - page);
page              410 drivers/net/wireless/marvell/mwifiex/debugfs.c 	free_page(page);
page              518 drivers/net/wireless/marvell/mwifiex/debugfs.c 	unsigned long page = get_zeroed_page(GFP_KERNEL);
page              519 drivers/net/wireless/marvell/mwifiex/debugfs.c 	char *buf = (char *)page;
page              530 drivers/net/wireless/marvell/mwifiex/debugfs.c 	free_page(page);
page              402 drivers/net/wireless/mediatek/mt76/dma.c 	struct page *page;
page              419 drivers/net/wireless/mediatek/mt76/dma.c 	page = virt_to_page(q->rx_page.va);
page              420 drivers/net/wireless/mediatek/mt76/dma.c 	__page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
page              448 drivers/net/wireless/mediatek/mt76/dma.c 	struct page *page = virt_to_head_page(data);
page              449 drivers/net/wireless/mediatek/mt76/dma.c 	int offset = data - page_address(page);
page              455 drivers/net/wireless/mediatek/mt76/dma.c 		skb_add_rx_frag(skb, shinfo->nr_frags, page, offset, len,
page              281 drivers/net/wireless/mediatek/mt76/usb.c 		struct page *page;
page              289 drivers/net/wireless/mediatek/mt76/usb.c 		page = virt_to_head_page(data);
page              290 drivers/net/wireless/mediatek/mt76/usb.c 		offset = data - page_address(page);
page              291 drivers/net/wireless/mediatek/mt76/usb.c 		sg_set_page(&urb->sg[i], page, q->buf_size, offset);
page              429 drivers/net/wireless/mediatek/mt76/usb.c 		struct page *page;
page              440 drivers/net/wireless/mediatek/mt76/usb.c 		page = virt_to_head_page(data);
page              442 drivers/net/wireless/mediatek/mt76/usb.c 				page, data - page_address(page),
page              614 drivers/net/wireless/mediatek/mt76/usb.c 	struct page *page;
page              623 drivers/net/wireless/mediatek/mt76/usb.c 	page = virt_to_page(q->rx_page.va);
page              624 drivers/net/wireless/mediatek/mt76/usb.c 	__page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
page               29 drivers/net/wireless/mediatek/mt7601u/dma.c 			void *data, u32 seg_len, u32 truesize, struct page *p)
page               77 drivers/net/wireless/mediatek/mt7601u/dma.c 				   u32 seg_len, struct page *p)
page              132 drivers/net/wireless/mediatek/mt7601u/dma.c 	struct page *new_p = NULL;
page               72 drivers/net/wireless/mediatek/mt7601u/mt7601u.h 		struct page *p;
page               98 drivers/net/wireless/realtek/rtlwifi/debug.c 	u32 page = debugfs_priv->cb_data;
page              103 drivers/net/wireless/realtek/rtlwifi/debug.c 		seq_printf(m, "\n%8.8x  ", n + page);
page              106 drivers/net/wireless/realtek/rtlwifi/debug.c 				   rtl_read_dword(rtlpriv, (page | n)));
page              112 drivers/net/wireless/realtek/rtlwifi/debug.c #define RTL_DEBUG_IMPL_MAC_SERIES(page, addr)			\
page              113 drivers/net/wireless/realtek/rtlwifi/debug.c static struct rtl_debugfs_priv rtl_debug_priv_mac_ ##page = {	\
page              140 drivers/net/wireless/realtek/rtlwifi/debug.c 	u32 page = debugfs_priv->cb_data;
page              145 drivers/net/wireless/realtek/rtlwifi/debug.c 		seq_printf(m, "\n%8.8x  ", n + page);
page              148 drivers/net/wireless/realtek/rtlwifi/debug.c 				   rtl_get_bbreg(hw, (page | n), 0xffffffff));
page              154 drivers/net/wireless/realtek/rtlwifi/debug.c #define RTL_DEBUG_IMPL_BB_SERIES(page, addr)			\
page              155 drivers/net/wireless/realtek/rtlwifi/debug.c static struct rtl_debugfs_priv rtl_debug_priv_bb_ ##page = {	\
page              201 drivers/net/wireless/realtek/rtlwifi/debug.c #define RTL_DEBUG_IMPL_RF_SERIES(page, addr)			\
page              202 drivers/net/wireless/realtek/rtlwifi/debug.c static struct rtl_debugfs_priv rtl_debug_priv_rf_ ##page = {	\
page              256 drivers/net/wireless/realtek/rtlwifi/debug.c #define RTL_DEBUG_IMPL_CAM_SERIES(page, addr)			\
page              257 drivers/net/wireless/realtek/rtlwifi/debug.c static struct rtl_debugfs_priv rtl_debug_priv_cam_ ##page = {	\
page             1303 drivers/net/wireless/realtek/rtlwifi/efuse.c void rtl_fw_page_write(struct ieee80211_hw *hw, u32 page, const u8 *buffer,
page             1308 drivers/net/wireless/realtek/rtlwifi/efuse.c 	u8 u8page = (u8)(page & 0x07);
page               94 drivers/net/wireless/realtek/rtlwifi/efuse.h void rtl_fw_page_write(struct ieee80211_hw *hw, u32 page, const u8 *buffer,
page               41 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c 	u32 page, offset;
page               53 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c 	for (page = 0; page < pagenums; page++) {
page               54 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c 		offset = page * FW_8192C_PAGE_SIZE;
page               55 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c 		rtl_fw_page_write(hw, page, (bufferptr + offset),
page               61 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c 		page = pagenums;
page               62 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c 		rtl_fw_page_write(hw, page, (bufferptr + offset), remainsize);
page               61 drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c 		u32 page, offset;
page               72 drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c 		for (page = 0; page < pagenums; page++) {
page               73 drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c 			offset = page * FW_8192C_PAGE_SIZE;
page               74 drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c 			rtl_fw_page_write(hw, page, (bufferptr + offset),
page               80 drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c 			page = pagenums;
page               81 drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c 			rtl_fw_page_write(hw, page, (bufferptr + offset),
page               48 drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c 	u32 page, offset;
page               57 drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c 	for (page = 0; page < pagenums; page++) {
page               58 drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c 		offset = page * FW_8192D_PAGE_SIZE;
page               59 drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c 		rtl_fw_page_write(hw, page, (bufferptr + offset),
page               64 drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c 		page = pagenums;
page               65 drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c 		rtl_fw_page_write(hw, page, (bufferptr + offset), remainsize);
page               37 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c 	u32 page, offset;
page               49 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c 	for (page = 0; page < pagenums; page++) {
page               50 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c 		offset = page * FW_8192C_PAGE_SIZE;
page               51 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c 		rtl_fw_page_write(hw, page, (bufferptr + offset),
page               58 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c 		page = pagenums;
page               59 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c 		rtl_fw_page_write(hw, page, (bufferptr + offset), remainsize);
page               42 drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c 	u32 page, offset;
page               55 drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c 	for (page = 0; page < page_nums; page++) {
page               56 drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c 		offset = page * FW_8192C_PAGE_SIZE;
page               57 drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c 		rtl_fw_page_write(hw, page, (bufferptr + offset),
page               63 drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c 		page = page_nums;
page               64 drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c 		rtl_fw_page_write(hw, page, (bufferptr + offset), remain_size);
page               40 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c 	u32 page, offset;
page               52 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c 	for (page = 0; page < pagenums; page++) {
page               53 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c 		offset = page * FW_8821AE_PAGE_SIZE;
page               54 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c 		rtl_fw_page_write(hw, page, (bufferptr + offset),
page               60 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c 		page = pagenums;
page               61 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c 		rtl_fw_page_write(hw, page, (bufferptr + offset), remainsize);
page              405 drivers/net/wireless/realtek/rtw88/debug.c 	u32 page = debugfs_priv->cb_data;
page              411 drivers/net/wireless/realtek/rtw88/debug.c 		seq_printf(m, "\n%8.8x  ", n + page);
page              414 drivers/net/wireless/realtek/rtw88/debug.c 				   rtw_read32(rtwdev, (page | n)));
page              425 drivers/net/wireless/realtek/rtw88/debug.c 	u32 page = debugfs_priv->cb_data;
page              431 drivers/net/wireless/realtek/rtw88/debug.c 		seq_printf(m, "\n%8.8x  ", n + page);
page              434 drivers/net/wireless/realtek/rtw88/debug.c 				   rtw_read32(rtwdev, (page | n)));
page              570 drivers/net/wireless/realtek/rtw88/debug.c #define rtw_debug_impl_mac(page, addr)				\
page              571 drivers/net/wireless/realtek/rtw88/debug.c static struct rtw_debugfs_priv rtw_debug_priv_mac_ ##page = {	\
page              593 drivers/net/wireless/realtek/rtw88/debug.c #define rtw_debug_impl_bb(page, addr)			\
page              594 drivers/net/wireless/realtek/rtw88/debug.c static struct rtw_debugfs_priv rtw_debug_priv_bb_ ##page = {	\
page              394 drivers/net/wireless/realtek/rtw88/fw.c 			location = rsvd_pkt->page;
page              496 drivers/net/wireless/realtek/rtw88/fw.c 				      u8 page_margin, u32 page, u8 *buf,
page              501 drivers/net/wireless/realtek/rtw88/fw.c 	if (page >= 1)
page              502 drivers/net/wireless/realtek/rtw88/fw.c 		memcpy(buf + page_margin + page_size * (page - 1),
page              613 drivers/net/wireless/realtek/rtw88/fw.c 	u32 page = 0;
page              636 drivers/net/wireless/realtek/rtw88/fw.c 		rsvd_pkt->page = total_page;
page              677 drivers/net/wireless/realtek/rtw88/fw.c 					  page, buf, rsvd_pkt);
page              678 drivers/net/wireless/realtek/rtw88/fw.c 		if (page == 0)
page              679 drivers/net/wireless/realtek/rtw88/fw.c 			page += rtw_len_to_page(rsvd_pkt->skb->len +
page              682 drivers/net/wireless/realtek/rtw88/fw.c 			page += rtw_len_to_page(rsvd_pkt->skb->len, page_size);
page               93 drivers/net/wireless/realtek/rtw88/fw.h 	u8 page;
page             1019 drivers/net/wireless/realtek/rtw88/pci.c 	u8 page;
page             1025 drivers/net/wireless/realtek/rtw88/pci.c 	page = addr < 0x20 ? 0 : 1;
page             1026 drivers/net/wireless/realtek/rtw88/pci.c 	page += g1 ? 0 : 2;
page             1028 drivers/net/wireless/realtek/rtw88/pci.c 	rtw_write8(rtwdev, REG_PCIE_MIX_CFG + 3, page);
page              809 drivers/net/wireless/realtek/rtw88/rtw8822b.c 	u8 page;
page              811 drivers/net/wireless/realtek/rtw88/rtw8822b.c 	page = *phy_status & 0xf;
page              813 drivers/net/wireless/realtek/rtw88/rtw8822b.c 	switch (page) {
page              821 drivers/net/wireless/realtek/rtw88/rtw8822b.c 		rtw_warn(rtwdev, "unused phy status page (%d)\n", page);
page             1677 drivers/net/wireless/realtek/rtw88/rtw8822c.c 	u8 page;
page             1679 drivers/net/wireless/realtek/rtw88/rtw8822c.c 	page = *phy_status & 0xf;
page             1681 drivers/net/wireless/realtek/rtw88/rtw8822c.c 	switch (page) {
page             1689 drivers/net/wireless/realtek/rtw88/rtw8822c.c 		rtw_warn(rtwdev, "unused phy status page (%d)\n", page);
page              188 drivers/net/wireless/wl3501_cs.c static inline void wl3501_switch_page(struct wl3501_card *this, u8 page)
page              190 drivers/net/wireless/wl3501_cs.c 	wl3501_outb(page, this->base_addr + WL3501_NIC_BSS);
page              151 drivers/net/xen-netback/common.h 	struct page *mmap_pages[MAX_PENDING_REQS];
page              162 drivers/net/xen-netback/common.h 	struct page *pages_to_map[MAX_PENDING_REQS];
page              163 drivers/net/xen-netback/common.h 	struct page *pages_to_unmap[MAX_PENDING_REQS];
page              570 drivers/net/xen-netback/netback.c 		struct page *page;
page              587 drivers/net/xen-netback/netback.c 		page = virt_to_page(idx_to_kaddr(queue, pending_idx));
page              588 drivers/net/xen-netback/netback.c 		__skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
page             1051 drivers/net/xen-netback/netback.c 		struct page *page;
page             1055 drivers/net/xen-netback/netback.c 		page = alloc_page(GFP_ATOMIC);
page             1056 drivers/net/xen-netback/netback.c 		if (!page) {
page             1068 drivers/net/xen-netback/netback.c 		if (skb_copy_bits(skb, offset, page_address(page), len))
page             1072 drivers/net/xen-netback/netback.c 		__skb_frag_set_page(&frags[i], page);
page              172 drivers/net/xen-netback/rx.c 	struct page *page;
page              180 drivers/net/xen-netback/rx.c 	page = virt_to_page(data);
page              184 drivers/net/xen-netback/rx.c 	foreign = xen_page_foreign(page);
page              135 drivers/net/xen-netfront.c 	struct page *grant_tx_page[NET_TX_RING_SIZE];
page              260 drivers/net/xen-netfront.c 	struct page *page;
page              268 drivers/net/xen-netfront.c 	page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
page              269 drivers/net/xen-netfront.c 	if (!page) {
page              273 drivers/net/xen-netfront.c 	skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
page              298 drivers/net/xen-netfront.c 		struct page *page;
page              316 drivers/net/xen-netfront.c 		page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
page              321 drivers/net/xen-netfront.c 							 page,
page              423 drivers/net/xen-netfront.c 	struct page *page;
page              436 drivers/net/xen-netfront.c 	struct page *page = info->page;
page              449 drivers/net/xen-netfront.c 	queue->grant_tx_page[id] = page;
page              464 drivers/net/xen-netfront.c 	struct page *page, unsigned int offset, unsigned int len)
page              469 drivers/net/xen-netfront.c 		.page = page,
page              473 drivers/net/xen-netfront.c 	gnttab_for_one_grant(page, offset, len, xennet_tx_setup_grant, &info);
page              490 drivers/net/xen-netfront.c 	struct sk_buff *skb, struct page *page,
page              500 drivers/net/xen-netfront.c 	page += offset >> PAGE_SHIFT;
page              504 drivers/net/xen-netfront.c 		info.page = page;
page              507 drivers/net/xen-netfront.c 		gnttab_foreach_grant_in_range(page, offset, len,
page              511 drivers/net/xen-netfront.c 		page++;
page              573 drivers/net/xen-netfront.c 	struct page *page;
page              607 drivers/net/xen-netfront.c 	page = virt_to_page(skb->data);
page              619 drivers/net/xen-netfront.c 		page = virt_to_page(skb->data);
page              636 drivers/net/xen-netfront.c 						page, offset, len);
page              639 drivers/net/xen-netfront.c 		page++;
page              672 drivers/net/xen-netfront.c 	tx = xennet_make_txreqs(queue, tx, skb, page, offset, len);
page             1159 drivers/net/xen-netfront.c 		struct page *page;
page             1169 drivers/net/xen-netfront.c 		page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
page             1174 drivers/net/xen-netfront.c 		get_page(page);
page             1176 drivers/net/xen-netfront.c 					  (unsigned long)page_address(page));
page             1378 drivers/net/xen-netfront.c static void xennet_end_access(int ref, void *page)
page             1382 drivers/net/xen-netfront.c 		gnttab_end_foreign_access(ref, 0, (unsigned long)page);
page              117 drivers/nvdimm/blk.c 		struct bio_integrity_payload *bip, struct page *page,
page              146 drivers/nvdimm/blk.c 		iobuf = kmap_atomic(page);
page             1106 drivers/nvdimm/btt.c static int btt_data_read(struct arena_info *arena, struct page *page,
page             1111 drivers/nvdimm/btt.c 	void *mem = kmap_atomic(page);
page             1120 drivers/nvdimm/btt.c 			struct page *page, unsigned int off, u32 len)
page             1124 drivers/nvdimm/btt.c 	void *mem = kmap_atomic(page);
page             1132 drivers/nvdimm/btt.c static void zero_fill_data(struct page *page, unsigned int off, u32 len)
page             1134 drivers/nvdimm/btt.c 	void *mem = kmap_atomic(page);
page             1198 drivers/nvdimm/btt.c 			struct page *page, unsigned int off, sector_t sector,
page             1232 drivers/nvdimm/btt.c 				zero_fill_data(page, off, cur_len);
page             1262 drivers/nvdimm/btt.c 		ret = btt_data_read(arena, page, off, postmap, cur_len);
page             1311 drivers/nvdimm/btt.c 			sector_t sector, struct page *page, unsigned int off,
page             1365 drivers/nvdimm/btt.c 		ret = btt_data_write(arena, new_postmap, page, off, cur_len);
page             1426 drivers/nvdimm/btt.c 			struct page *page, unsigned int len, unsigned int off,
page             1432 drivers/nvdimm/btt.c 		ret = btt_read_pg(btt, bip, page, off, sector, len);
page             1433 drivers/nvdimm/btt.c 		flush_dcache_page(page);
page             1435 drivers/nvdimm/btt.c 		flush_dcache_page(page);
page             1436 drivers/nvdimm/btt.c 		ret = btt_write_pg(btt, bip, sector, page, off, len);
page             1487 drivers/nvdimm/btt.c 		struct page *page, unsigned int op)
page             1493 drivers/nvdimm/btt.c 	len = hpage_nr_pages(page) * PAGE_SIZE;
page             1494 drivers/nvdimm/btt.c 	rc = btt_do_bvec(btt, NULL, page, len, 0, op, sector);
page             1496 drivers/nvdimm/btt.c 		page_endio(page, op_is_write(op), 0);
page              512 drivers/nvdimm/pfn_devs.c 	if ((le16_to_cpu(pfn_sb->page_struct_size) < sizeof(struct page)) &&
page              769 drivers/nvdimm/pfn_devs.c 		BUILD_BUG_ON(sizeof(struct page) > MAX_STRUCT_PAGE_SIZE);
page               59 drivers/nvdimm/pmem.c 		struct page *page = pfn_to_page(pfn);
page               66 drivers/nvdimm/pmem.c 		if (test_and_clear_pmem_poison(page))
page              100 drivers/nvdimm/pmem.c static void write_pmem(void *pmem_addr, struct page *page,
page              107 drivers/nvdimm/pmem.c 		mem = kmap_atomic(page);
page              113 drivers/nvdimm/pmem.c 		page++;
page              118 drivers/nvdimm/pmem.c static blk_status_t read_pmem(struct page *page, unsigned int off,
page              126 drivers/nvdimm/pmem.c 		mem = kmap_atomic(page);
page              134 drivers/nvdimm/pmem.c 		page++;
page              140 drivers/nvdimm/pmem.c static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page,
page              156 drivers/nvdimm/pmem.c 			rc = read_pmem(page, off, pmem_addr, len);
page              157 drivers/nvdimm/pmem.c 			flush_dcache_page(page);
page              174 drivers/nvdimm/pmem.c 		flush_dcache_page(page);
page              175 drivers/nvdimm/pmem.c 		write_pmem(pmem_addr, page, off, len);
page              178 drivers/nvdimm/pmem.c 			write_pmem(pmem_addr, page, off, len);
page              222 drivers/nvdimm/pmem.c 		       struct page *page, unsigned int op)
page              227 drivers/nvdimm/pmem.c 	rc = pmem_do_bvec(pmem, page, hpage_nr_pages(page) * PAGE_SIZE,
page              237 drivers/nvdimm/pmem.c 		page_endio(page, op_is_write(op), 0);
page              341 drivers/nvdimm/pmem.c static void pmem_pagemap_page_free(struct page *page)
page              343 drivers/nvdimm/pmem.c 	wake_up_var(&page->_refcount);
page               33 drivers/nvdimm/pmem.h static inline bool test_and_clear_pmem_poison(struct page *page)
page               35 drivers/nvdimm/pmem.h 	return TestClearPageHWPoison(page);
page               38 drivers/nvdimm/pmem.h static inline bool test_and_clear_pmem_poison(struct page *page)
page              742 drivers/nvme/host/core.c 		struct page *page = req->special_vec.bv_page;
page              744 drivers/nvme/host/core.c 		if (page == ns->ctrl->discard_page)
page              747 drivers/nvme/host/core.c 			kfree(page_address(page) + req->special_vec.bv_offset);
page              982 drivers/nvme/host/lightnvm.c 		struct device_attribute *dattr, char *page)
page              996 drivers/nvme/host/lightnvm.c 			return scnprintf(page, PAGE_SIZE, "%u\n",
page              999 drivers/nvme/host/lightnvm.c 			return scnprintf(page, PAGE_SIZE, "%u.%u\n",
page             1003 drivers/nvme/host/lightnvm.c 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->cap);
page             1005 drivers/nvme/host/lightnvm.c 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->trdt);
page             1007 drivers/nvme/host/lightnvm.c 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->trdm);
page             1009 drivers/nvme/host/lightnvm.c 		return scnprintf(page,
page             1016 drivers/nvme/host/lightnvm.c static ssize_t nvm_dev_attr_show_ppaf(struct nvm_addrf_12 *ppaf, char *page)
page             1018 drivers/nvme/host/lightnvm.c 	return scnprintf(page, PAGE_SIZE,
page             1029 drivers/nvme/host/lightnvm.c 		struct device_attribute *dattr, char *page)
page             1042 drivers/nvme/host/lightnvm.c 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->vmnt);
page             1044 drivers/nvme/host/lightnvm.c 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->dom);
page             1047 drivers/nvme/host/lightnvm.c 		return scnprintf(page, PAGE_SIZE, "%s\n", "gennvm");
page             1049 drivers/nvme/host/lightnvm.c 		return nvm_dev_attr_show_ppaf((void *)&geo->addrf, page);
page             1051 drivers/nvme/host/lightnvm.c 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->mtype);
page             1053 drivers/nvme/host/lightnvm.c 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->fmtype);
page             1055 drivers/nvme/host/lightnvm.c 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_ch);
page             1057 drivers/nvme/host/lightnvm.c 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_lun);
page             1059 drivers/nvme/host/lightnvm.c 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_pln);
page             1061 drivers/nvme/host/lightnvm.c 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_chk);
page             1063 drivers/nvme/host/lightnvm.c 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_pg);
page             1065 drivers/nvme/host/lightnvm.c 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->fpg_sz);
page             1067 drivers/nvme/host/lightnvm.c 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->csecs);
page             1069 drivers/nvme/host/lightnvm.c 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->sos);
page             1071 drivers/nvme/host/lightnvm.c 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprt);
page             1073 drivers/nvme/host/lightnvm.c 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprm);
page             1075 drivers/nvme/host/lightnvm.c 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbet);
page             1077 drivers/nvme/host/lightnvm.c 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbem);
page             1079 drivers/nvme/host/lightnvm.c 		return scnprintf(page, PAGE_SIZE, "0x%08x\n", geo->mpos);
page             1081 drivers/nvme/host/lightnvm.c 		return scnprintf(page, PAGE_SIZE, "0x%08x\n", geo->mccap);
page             1083 drivers/nvme/host/lightnvm.c 		return scnprintf(page, PAGE_SIZE, "%u\n", NVM_MAX_VLBA);
page             1085 drivers/nvme/host/lightnvm.c 		return scnprintf(page, PAGE_SIZE,
page             1092 drivers/nvme/host/lightnvm.c 		struct device_attribute *dattr, char *page)
page             1105 drivers/nvme/host/lightnvm.c 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_ch);
page             1107 drivers/nvme/host/lightnvm.c 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_lun);
page             1109 drivers/nvme/host/lightnvm.c 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_chk);
page             1111 drivers/nvme/host/lightnvm.c 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->clba);
page             1113 drivers/nvme/host/lightnvm.c 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->ws_min);
page             1115 drivers/nvme/host/lightnvm.c 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->ws_opt);
page             1117 drivers/nvme/host/lightnvm.c 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->maxoc);
page             1119 drivers/nvme/host/lightnvm.c 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->maxocpu);
page             1121 drivers/nvme/host/lightnvm.c 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->mw_cunits);
page             1123 drivers/nvme/host/lightnvm.c 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprt);
page             1125 drivers/nvme/host/lightnvm.c 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprm);
page             1127 drivers/nvme/host/lightnvm.c 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbet);
page             1129 drivers/nvme/host/lightnvm.c 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbem);
page             1131 drivers/nvme/host/lightnvm.c 		return scnprintf(page, PAGE_SIZE,
page              281 drivers/nvme/host/nvme.h 	struct page *discard_page;
page              177 drivers/nvme/host/tcp.c static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
page              284 drivers/nvme/host/tcp.c 		struct page *page, off_t off, size_t len)
page              289 drivers/nvme/host/tcp.c 	sg_set_page(&sg, page, len, off);
page              851 drivers/nvme/host/tcp.c 		struct page *page = nvme_tcp_req_cur_page(req);
page              863 drivers/nvme/host/tcp.c 		if (unlikely(PageSlab(page))) {
page              864 drivers/nvme/host/tcp.c 			ret = sock_no_sendpage(queue->sock, page, offset, len,
page              867 drivers/nvme/host/tcp.c 			ret = kernel_sendpage(queue->sock, page, offset, len,
page              875 drivers/nvme/host/tcp.c 			nvme_tcp_ddgst_update(queue->snd_hash, page,
page               38 drivers/nvme/target/configfs.c 		char *page)
page               42 drivers/nvme/target/configfs.c 		return sprintf(page, "ipv4\n");
page               44 drivers/nvme/target/configfs.c 		return sprintf(page, "ipv6\n");
page               46 drivers/nvme/target/configfs.c 		return sprintf(page, "ib\n");
page               48 drivers/nvme/target/configfs.c 		return sprintf(page, "fc\n");
page               50 drivers/nvme/target/configfs.c 		return sprintf(page, "\n");
page               55 drivers/nvme/target/configfs.c 		const char *page, size_t count)
page               65 drivers/nvme/target/configfs.c 	if (sysfs_streq(page, "ipv4")) {
page               67 drivers/nvme/target/configfs.c 	} else if (sysfs_streq(page, "ipv6")) {
page               69 drivers/nvme/target/configfs.c 	} else if (sysfs_streq(page, "ib")) {
page               71 drivers/nvme/target/configfs.c 	} else if (sysfs_streq(page, "fc")) {
page               74 drivers/nvme/target/configfs.c 		pr_err("Invalid value '%s' for adrfam\n", page);
page               84 drivers/nvme/target/configfs.c 		char *page)
page               88 drivers/nvme/target/configfs.c 	return snprintf(page, PAGE_SIZE, "%d\n",
page               93 drivers/nvme/target/configfs.c 		const char *page, size_t count)
page               98 drivers/nvme/target/configfs.c 	if (kstrtou16(page, 0, &portid)) {
page               99 drivers/nvme/target/configfs.c 		pr_err("Invalid value '%s' for portid\n", page);
page              115 drivers/nvme/target/configfs.c 		char *page)
page              119 drivers/nvme/target/configfs.c 	return snprintf(page, PAGE_SIZE, "%s\n",
page              124 drivers/nvme/target/configfs.c 		const char *page, size_t count)
page              129 drivers/nvme/target/configfs.c 		pr_err("Invalid value '%s' for traddr\n", page);
page              139 drivers/nvme/target/configfs.c 	if (sscanf(page, "%s\n", port->disc_addr.traddr) != 1)
page              147 drivers/nvme/target/configfs.c 		char *page)
page              152 drivers/nvme/target/configfs.c 		return sprintf(page, "not specified\n");
page              154 drivers/nvme/target/configfs.c 		return sprintf(page, "required\n");
page              156 drivers/nvme/target/configfs.c 		return sprintf(page, "not required\n");
page              158 drivers/nvme/target/configfs.c 		return sprintf(page, "\n");
page              163 drivers/nvme/target/configfs.c 		const char *page, size_t count)
page              174 drivers/nvme/target/configfs.c 	if (sysfs_streq(page, "not specified")) {
page              176 drivers/nvme/target/configfs.c 	} else if (sysfs_streq(page, "required")) {
page              178 drivers/nvme/target/configfs.c 	} else if (sysfs_streq(page, "not required")) {
page              181 drivers/nvme/target/configfs.c 		pr_err("Invalid value '%s' for treq\n", page);
page              192 drivers/nvme/target/configfs.c 		char *page)
page              196 drivers/nvme/target/configfs.c 	return snprintf(page, PAGE_SIZE, "%s\n",
page              201 drivers/nvme/target/configfs.c 		const char *page, size_t count)
page              206 drivers/nvme/target/configfs.c 		pr_err("Invalid value '%s' for trsvcid\n", page);
page              215 drivers/nvme/target/configfs.c 	if (sscanf(page, "%s\n", port->disc_addr.trsvcid) != 1)
page              223 drivers/nvme/target/configfs.c 		char *page)
page              227 drivers/nvme/target/configfs.c 	return snprintf(page, PAGE_SIZE, "%d\n", port->inline_data_size);
page              231 drivers/nvme/target/configfs.c 		const char *page, size_t count)
page              241 drivers/nvme/target/configfs.c 	ret = kstrtoint(page, 0, &port->inline_data_size);
page              243 drivers/nvme/target/configfs.c 		pr_err("Invalid value '%s' for inline_data_size\n", page);
page              252 drivers/nvme/target/configfs.c 		char *page)
page              260 drivers/nvme/target/configfs.c 		return sprintf(page, "%s\n", nvmet_transport_names[i].name);
page              263 drivers/nvme/target/configfs.c 	return sprintf(page, "\n");
page              274 drivers/nvme/target/configfs.c 		const char *page, size_t count)
page              286 drivers/nvme/target/configfs.c 		if (sysfs_streq(page, nvmet_transport_names[i].name))
page              290 drivers/nvme/target/configfs.c 	pr_err("Invalid value '%s' for trtype\n", page);
page              305 drivers/nvme/target/configfs.c static ssize_t nvmet_ns_device_path_show(struct config_item *item, char *page)
page              307 drivers/nvme/target/configfs.c 	return sprintf(page, "%s\n", to_nvmet_ns(item)->device_path);
page              311 drivers/nvme/target/configfs.c 		const char *page, size_t count)
page              324 drivers/nvme/target/configfs.c 	len = strcspn(page, "\n");
page              330 drivers/nvme/target/configfs.c 	ns->device_path = kstrndup(page, len, GFP_KERNEL);
page              345 drivers/nvme/target/configfs.c static ssize_t nvmet_ns_p2pmem_show(struct config_item *item, char *page)
page              349 drivers/nvme/target/configfs.c 	return pci_p2pdma_enable_show(page, ns->p2p_dev, ns->use_p2pmem);
page              353 drivers/nvme/target/configfs.c 		const char *page, size_t count)
page              367 drivers/nvme/target/configfs.c 	error = pci_p2pdma_enable_store(page, &p2p_dev, &use_p2pmem);
page              386 drivers/nvme/target/configfs.c static ssize_t nvmet_ns_device_uuid_show(struct config_item *item, char *page)
page              388 drivers/nvme/target/configfs.c 	return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->uuid);
page              392 drivers/nvme/target/configfs.c 					  const char *page, size_t count)
page              406 drivers/nvme/target/configfs.c 	if (uuid_parse(page, &ns->uuid))
page              416 drivers/nvme/target/configfs.c static ssize_t nvmet_ns_device_nguid_show(struct config_item *item, char *page)
page              418 drivers/nvme/target/configfs.c 	return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->nguid);
page              422 drivers/nvme/target/configfs.c 		const char *page, size_t count)
page              427 drivers/nvme/target/configfs.c 	const char *p = page;
page              438 drivers/nvme/target/configfs.c 		if (p + 2 > page + count) {
page              462 drivers/nvme/target/configfs.c static ssize_t nvmet_ns_ana_grpid_show(struct config_item *item, char *page)
page              464 drivers/nvme/target/configfs.c 	return sprintf(page, "%u\n", to_nvmet_ns(item)->anagrpid);
page              468 drivers/nvme/target/configfs.c 		const char *page, size_t count)
page              474 drivers/nvme/target/configfs.c 	ret = kstrtou32(page, 0, &newgrpid);
page              495 drivers/nvme/target/configfs.c static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page)
page              497 drivers/nvme/target/configfs.c 	return sprintf(page, "%d\n", to_nvmet_ns(item)->enabled);
page              501 drivers/nvme/target/configfs.c 		const char *page, size_t count)
page              507 drivers/nvme/target/configfs.c 	if (strtobool(page, &enable))
page              520 drivers/nvme/target/configfs.c static ssize_t nvmet_ns_buffered_io_show(struct config_item *item, char *page)
page              522 drivers/nvme/target/configfs.c 	return sprintf(page, "%d\n", to_nvmet_ns(item)->buffered_io);
page              526 drivers/nvme/target/configfs.c 		const char *page, size_t count)
page              531 drivers/nvme/target/configfs.c 	if (strtobool(page, &val))
page              773 drivers/nvme/target/configfs.c 		char *page)
page              775 drivers/nvme/target/configfs.c 	return snprintf(page, PAGE_SIZE, "%d\n",
page              780 drivers/nvme/target/configfs.c 		const char *page, size_t count)
page              786 drivers/nvme/target/configfs.c 	if (strtobool(page, &allow_any_host))
page              809 drivers/nvme/target/configfs.c 					      char *page)
page              814 drivers/nvme/target/configfs.c 		return snprintf(page, PAGE_SIZE, "%d.%d.%d\n",
page              819 drivers/nvme/target/configfs.c 		return snprintf(page, PAGE_SIZE, "%d.%d\n",
page              825 drivers/nvme/target/configfs.c 					       const char *page, size_t count)
page              832 drivers/nvme/target/configfs.c 	ret = sscanf(page, "%d.%d.%d\n", &major, &minor, &tertiary);
page              845 drivers/nvme/target/configfs.c 					     char *page)
page              849 drivers/nvme/target/configfs.c 	return snprintf(page, PAGE_SIZE, "%llx\n", subsys->serial);
page              853 drivers/nvme/target/configfs.c 					      const char *page, size_t count)
page              858 drivers/nvme/target/configfs.c 	sscanf(page, "%llx\n", &subsys->serial);
page              931 drivers/nvme/target/configfs.c 		char *page)
page              933 drivers/nvme/target/configfs.c 	return snprintf(page, PAGE_SIZE, "%d\n", to_nvmet_port(item)->enabled);
page              937 drivers/nvme/target/configfs.c 		const char *page, size_t count)
page              943 drivers/nvme/target/configfs.c 	if (strtobool(page, &enable))
page              953 drivers/nvme/target/configfs.c 	pr_err("Invalid value '%s' for enable\n", page);
page             1028 drivers/nvme/target/configfs.c 		char *page)
page             1037 drivers/nvme/target/configfs.c 		return sprintf(page, "%s\n", nvmet_ana_state_names[i].name);
page             1040 drivers/nvme/target/configfs.c 	return sprintf(page, "\n");
page             1044 drivers/nvme/target/configfs.c 		const char *page, size_t count)
page             1050 drivers/nvme/target/configfs.c 		if (sysfs_streq(page, nvmet_ana_state_names[i].name))
page             1054 drivers/nvme/target/configfs.c 	pr_err("Invalid value '%s' for ana_state\n", page);
page              241 drivers/nvme/target/rdma.c 	struct page *pg;
page              524 drivers/nvme/target/tcp.c 		struct page *page = sg_page(cmd->cur_sg);
page              533 drivers/nvme/target/tcp.c 		ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset,
page               93 drivers/nvmem/rave-sp-eeprom.c 			     struct rave_sp_eeprom_page *page)
page               96 drivers/nvmem/rave-sp-eeprom.c 	const unsigned int data_size = is_write ? sizeof(page->data) : 0;
page               99 drivers/nvmem/rave-sp-eeprom.c 		is_write ? sizeof(*page) - sizeof(page->data) : sizeof(*page);
page              101 drivers/nvmem/rave-sp-eeprom.c 	u8 cmd[RAVE_SP_EEPROM_HEADER_MAX + sizeof(page->data)];
page              124 drivers/nvmem/rave-sp-eeprom.c 	memcpy(&cmd[offset], page->data, data_size);
page              126 drivers/nvmem/rave-sp-eeprom.c 	ret = rave_sp_exec(eeprom->sp, cmd, cmd_size, page, rsp_size);
page              130 drivers/nvmem/rave-sp-eeprom.c 	if (page->type != type)
page              133 drivers/nvmem/rave-sp-eeprom.c 	if (!page->success)
page              163 drivers/nvmem/rave-sp-eeprom.c 	struct rave_sp_eeprom_page page;
page              172 drivers/nvmem/rave-sp-eeprom.c 	if (WARN_ON(data_len > sizeof(page.data) - page_offset))
page              182 drivers/nvmem/rave-sp-eeprom.c 						page_nr, &page);
page              187 drivers/nvmem/rave-sp-eeprom.c 		memcpy(&page.data[page_offset], data, data_len);
page              190 drivers/nvmem/rave-sp-eeprom.c 	ret = rave_sp_eeprom_io(eeprom, type, page_nr, &page);
page              199 drivers/nvmem/rave-sp-eeprom.c 		memcpy(data, &page.data[page_offset], data_len);
page              786 drivers/parisc/ccio-dma.c ccio_map_page(struct device *dev, struct page *page, unsigned long offset,
page              790 drivers/parisc/ccio-dma.c 	return ccio_map_single(dev, page_address(page) + offset, size,
page              775 drivers/parisc/sba_iommu.c sba_map_page(struct device *dev, struct page *page, unsigned long offset,
page              779 drivers/parisc/sba_iommu.c 	return sba_map_single(dev, page_address(page) + offset, size,
page              189 drivers/pci/controller/dwc/pcie-designware.h 	struct page		*msi_page;
page              339 drivers/pci/controller/vmd.c static dma_addr_t vmd_map_page(struct device *dev, struct page *page,
page              344 drivers/pci/controller/vmd.c 	return dma_map_page_attrs(to_vmd_dev(dev), page, offset, size, dir,
page               45 drivers/pci/endpoint/pci-ep-cfs.c static ssize_t pci_epc_start_store(struct config_item *item, const char *page,
page               55 drivers/pci/endpoint/pci-ep-cfs.c 	ret = kstrtobool(page, &start);
page               75 drivers/pci/endpoint/pci-ep-cfs.c static ssize_t pci_epc_start_show(struct config_item *item, char *page)
page               77 drivers/pci/endpoint/pci-ep-cfs.c 	return sprintf(page, "%d\n",
page              211 drivers/pci/endpoint/pci-ep-cfs.c static ssize_t pci_epf_##_name##_show(struct config_item *item,	char *page)    \
page              216 drivers/pci/endpoint/pci-ep-cfs.c 	return sprintf(page, "0x%04x\n", epf->header->_name);		       \
page              221 drivers/pci/endpoint/pci-ep-cfs.c 				       const char *page, size_t len)	       \
page              228 drivers/pci/endpoint/pci-ep-cfs.c 	ret = kstrtou32(page, 0, &val);					       \
page              237 drivers/pci/endpoint/pci-ep-cfs.c 				       const char *page, size_t len)	       \
page              244 drivers/pci/endpoint/pci-ep-cfs.c 	ret = kstrtou16(page, 0, &val);					       \
page              253 drivers/pci/endpoint/pci-ep-cfs.c 				       const char *page, size_t len)	       \
page              260 drivers/pci/endpoint/pci-ep-cfs.c 	ret = kstrtou8(page, 0, &val);					       \
page              268 drivers/pci/endpoint/pci-ep-cfs.c 					    const char *page, size_t len)
page              273 drivers/pci/endpoint/pci-ep-cfs.c 	ret = kstrtou8(page, 0, &val);
page              283 drivers/pci/endpoint/pci-ep-cfs.c 					   char *page)
page              285 drivers/pci/endpoint/pci-ep-cfs.c 	return sprintf(page, "%d\n",
page              290 drivers/pci/endpoint/pci-ep-cfs.c 					     const char *page, size_t len)
page              295 drivers/pci/endpoint/pci-ep-cfs.c 	ret = kstrtou16(page, 0, &val);
page              305 drivers/pci/endpoint/pci-ep-cfs.c 					    char *page)
page              307 drivers/pci/endpoint/pci-ep-cfs.c 	return sprintf(page, "%d\n",
page              928 drivers/pci/p2pdma.c int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev,
page              933 drivers/pci/p2pdma.c 	dev = bus_find_device_by_name(&pci_bus_type, NULL, page);
page              941 drivers/pci/p2pdma.c 				page);
page              947 drivers/pci/p2pdma.c 	} else if ((page[0] == '0' || page[0] == '1') && !iscntrl(page[1])) {
page              954 drivers/pci/p2pdma.c 	} else if (!strtobool(page, use_p2pdma)) {
page              958 drivers/pci/p2pdma.c 	pr_err("No such PCI device: %.*s\n", (int)strcspn(page, "\n"), page);
page              975 drivers/pci/p2pdma.c ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev,
page              979 drivers/pci/p2pdma.c 		return sprintf(page, "0\n");
page              982 drivers/pci/p2pdma.c 		return sprintf(page, "1\n");
page              984 drivers/pci/p2pdma.c 	return sprintf(page, "%s\n", pci_name(p2p_dev));
page              308 drivers/pcmcia/vrc4173_cardu.c 	uint32_t start, stop, offset, page;
page              332 drivers/pcmcia/vrc4173_cardu.c 	page = exca_readb(socket, MEM_WIN_SAU(map)) << 24;
page              333 drivers/pcmcia/vrc4173_cardu.c 	mem->sys_start = start + page;
page              334 drivers/pcmcia/vrc4173_cardu.c 	mem->sys_stop = start + page;
page              501 drivers/perf/arm_smmuv3_pmu.c 				   struct device_attribute *attr, char *page)
page              507 drivers/perf/arm_smmuv3_pmu.c 	return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
page              821 drivers/perf/arm_spe_pmu.c 	struct page **pglist;
page              104 drivers/perf/fsl_imx8_ddr_perf.c 		   char *page)
page              109 drivers/perf/fsl_imx8_ddr_perf.c 	return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
page               42 drivers/perf/hisilicon/hisi_uncore_pmu.c 			      struct device_attribute *attr, char *page)
page               48 drivers/perf/hisilicon/hisi_uncore_pmu.c 	return sprintf(page, "config=0x%lx\n", (unsigned long)eattr->var);
page              716 drivers/perf/qcom_l2_pmu.c 				      struct device_attribute *attr, char *page)
page              721 drivers/perf/qcom_l2_pmu.c 	return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
page              641 drivers/perf/qcom_l3_pmu.c 				     struct device_attribute *attr, char *page)
page              646 drivers/perf/qcom_l3_pmu.c 	return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
page              164 drivers/platform/goldfish/goldfish_pipe.c 	struct page *pages[MAX_BUFFERS_PER_COMMAND];
page              264 drivers/platform/goldfish/goldfish_pipe.c 			  struct page *pages[MAX_BUFFERS_PER_COMMAND],
page              288 drivers/platform/goldfish/goldfish_pipe.c static void release_user_pages(struct page **pages, int pages_count,
page              301 drivers/platform/goldfish/goldfish_pipe.c static void populate_rw_params(struct page **pages,
page              849 drivers/platform/x86/asus-laptop.c 			  char *page)
page              862 drivers/platform/x86/asus-laptop.c 	len += sprintf(page, ASUS_LAPTOP_NAME " " ASUS_LAPTOP_VERSION "\n");
page              863 drivers/platform/x86/asus-laptop.c 	len += sprintf(page + len, "Model reference    : %s\n", asus->name);
page              872 drivers/platform/x86/asus-laptop.c 		len += sprintf(page + len, "SFUN value         : %#x\n",
page              884 drivers/platform/x86/asus-laptop.c 		len += sprintf(page + len, "HWRS value         : %#x\n",
page              895 drivers/platform/x86/asus-laptop.c 		len += sprintf(page + len, "ASYM value         : %#x\n",
page              899 drivers/platform/x86/asus-laptop.c 		len += sprintf(page + len, "DSDT length        : %s\n", buf);
page              901 drivers/platform/x86/asus-laptop.c 		len += sprintf(page + len, "DSDT checksum      : %s\n", buf);
page              903 drivers/platform/x86/asus-laptop.c 		len += sprintf(page + len, "DSDT revision      : %s\n", buf);
page              905 drivers/platform/x86/asus-laptop.c 		len += sprintf(page + len, "OEM id             : %s\n", buf);
page              907 drivers/platform/x86/asus-laptop.c 		len += sprintf(page + len, "OEM table id       : %s\n", buf);
page              909 drivers/platform/x86/asus-laptop.c 		len += sprintf(page + len, "OEM revision       : 0x%s\n", buf);
page              911 drivers/platform/x86/asus-laptop.c 		len += sprintf(page + len, "ASL comp vendor id : %s\n", buf);
page              913 drivers/platform/x86/asus-laptop.c 		len += sprintf(page + len, "ASL comp revision  : 0x%s\n", buf);
page               13 drivers/ptp/ptp_sysfs.c 			       struct device_attribute *attr, char *page)
page               16 drivers/ptp/ptp_sysfs.c 	return snprintf(page, PAGE_SIZE-1, "%s\n", ptp->info->name);
page               22 drivers/ptp/ptp_sysfs.c 			   struct device_attribute *attr, char *page)	\
page               25 drivers/ptp/ptp_sysfs.c 	return snprintf(page, PAGE_SIZE-1, "%d\n", ptp->info->var);	\
page               63 drivers/ptp/ptp_sysfs.c 			       struct device_attribute *attr, char *page)
page               88 drivers/ptp/ptp_sysfs.c 	cnt = snprintf(page, PAGE_SIZE, "%u %lld %u\n",
page              212 drivers/ptp/ptp_sysfs.c 			    char *page)
page              230 drivers/ptp/ptp_sysfs.c 	return snprintf(page, PAGE_SIZE, "%u %u\n", func, chan);
page              541 drivers/rapidio/devices/rio_mport_cdev.c 	struct page **page_list;
page              814 drivers/rapidio/devices/rio_mport_cdev.c 	struct page **page_list = NULL;
page             5395 drivers/s390/block/dasd_eckd.c dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
page             5402 drivers/s390/block/dasd_eckd.c 		len += sprintf(page + len, PRINTK_HEADER
page             5414 drivers/s390/block/dasd_eckd.c 			if (count % 8 == 0) len += sprintf(page + len, " ");
page             5415 drivers/s390/block/dasd_eckd.c 			if (count % 4 == 0) len += sprintf(page + len, " ");
page             5416 drivers/s390/block/dasd_eckd.c 			len += sprintf(page + len, "%02x", datap[count]);
page             5418 drivers/s390/block/dasd_eckd.c 		len += sprintf(page + len, "\n");
page             5452 drivers/s390/block/dasd_eckd.c 	char *page;
page             5456 drivers/s390/block/dasd_eckd.c 	page = (char *) get_zeroed_page(GFP_ATOMIC);
page             5457 drivers/s390/block/dasd_eckd.c 	if (page == NULL) {
page             5463 drivers/s390/block/dasd_eckd.c 	len = sprintf(page, PRINTK_HEADER
page             5466 drivers/s390/block/dasd_eckd.c 	len += sprintf(page + len, PRINTK_HEADER
page             5473 drivers/s390/block/dasd_eckd.c 	len += sprintf(page + len, PRINTK_HEADER
page             5479 drivers/s390/block/dasd_eckd.c 			len += sprintf(page + len, PRINTK_HEADER
page             5484 drivers/s390/block/dasd_eckd.c 				len += sprintf(page + len, " %02x",
page             5487 drivers/s390/block/dasd_eckd.c 			len += sprintf(page + len, "\n");
page             5492 drivers/s390/block/dasd_eckd.c 			sprintf(page + len, PRINTK_HEADER
page             5499 drivers/s390/block/dasd_eckd.c 			sprintf(page + len, PRINTK_HEADER
page             5505 drivers/s390/block/dasd_eckd.c 		sprintf(page + len, PRINTK_HEADER
page             5508 drivers/s390/block/dasd_eckd.c 	printk(KERN_ERR "%s", page);
page             5517 drivers/s390/block/dasd_eckd.c 		len = sprintf(page, PRINTK_HEADER
page             5519 drivers/s390/block/dasd_eckd.c 		dasd_eckd_dump_ccw_range(first, to, page + len);
page             5520 drivers/s390/block/dasd_eckd.c 		printk(KERN_ERR "%s", page);
page             5530 drivers/s390/block/dasd_eckd.c 			len += sprintf(page, PRINTK_HEADER "......\n");
page             5533 drivers/s390/block/dasd_eckd.c 		len += dasd_eckd_dump_ccw_range(from, to, page + len);
page             5539 drivers/s390/block/dasd_eckd.c 			len += sprintf(page + len, PRINTK_HEADER "......\n");
page             5541 drivers/s390/block/dasd_eckd.c 		len += dasd_eckd_dump_ccw_range(from, last, page + len);
page             5543 drivers/s390/block/dasd_eckd.c 			printk(KERN_ERR "%s", page);
page             5545 drivers/s390/block/dasd_eckd.c 	free_page((unsigned long) page);
page             5555 drivers/s390/block/dasd_eckd.c 	char *page;
page             5560 drivers/s390/block/dasd_eckd.c 	page = (char *) get_zeroed_page(GFP_ATOMIC);
page             5561 drivers/s390/block/dasd_eckd.c 	if (page == NULL) {
page             5567 drivers/s390/block/dasd_eckd.c 	len = sprintf(page, PRINTK_HEADER
page             5570 drivers/s390/block/dasd_eckd.c 	len += sprintf(page + len, PRINTK_HEADER
page             5579 drivers/s390/block/dasd_eckd.c 	len += sprintf(page + len, PRINTK_HEADER
page             5591 drivers/s390/block/dasd_eckd.c 		len += sprintf(page + len, PRINTK_HEADER
page             5593 drivers/s390/block/dasd_eckd.c 		len += sprintf(page + len, PRINTK_HEADER
page             5595 drivers/s390/block/dasd_eckd.c 		len += sprintf(page + len, PRINTK_HEADER
page             5597 drivers/s390/block/dasd_eckd.c 		len += sprintf(page + len, PRINTK_HEADER
page             5600 drivers/s390/block/dasd_eckd.c 		len += sprintf(page + len, PRINTK_HEADER
page             5605 drivers/s390/block/dasd_eckd.c 			len += sprintf(page + len, PRINTK_HEADER
page             5608 drivers/s390/block/dasd_eckd.c 			len += sprintf(page + len, PRINTK_HEADER
page             5611 drivers/s390/block/dasd_eckd.c 			len += sprintf(page + len, PRINTK_HEADER
page             5614 drivers/s390/block/dasd_eckd.c 			len += sprintf(page + len, PRINTK_HEADER
page             5617 drivers/s390/block/dasd_eckd.c 			len += sprintf(page + len, PRINTK_HEADER
page             5623 drivers/s390/block/dasd_eckd.c 			len += sprintf(page + len, PRINTK_HEADER
page             5626 drivers/s390/block/dasd_eckd.c 				len += sprintf(page + len, PRINTK_HEADER
page             5631 drivers/s390/block/dasd_eckd.c 					len += sprintf(page + len, " %02x",
page             5634 drivers/s390/block/dasd_eckd.c 				len += sprintf(page + len, "\n");
page             5639 drivers/s390/block/dasd_eckd.c 			len += sprintf(page + len, PRINTK_HEADER
page             5646 drivers/s390/block/dasd_eckd.c 				len += sprintf(page + len, PRINTK_HEADER
page             5650 drivers/s390/block/dasd_eckd.c 					len += sprintf(page + len, " %02x",
page             5653 drivers/s390/block/dasd_eckd.c 				len += sprintf(page + len, "\n");
page             5658 drivers/s390/block/dasd_eckd.c 				sprintf(page + len, PRINTK_HEADER
page             5665 drivers/s390/block/dasd_eckd.c 				sprintf(page + len, PRINTK_HEADER
page             5671 drivers/s390/block/dasd_eckd.c 			sprintf(page + len, PRINTK_HEADER
page             5675 drivers/s390/block/dasd_eckd.c 		sprintf(page + len, PRINTK_HEADER
page             5678 drivers/s390/block/dasd_eckd.c 	printk(KERN_ERR "%s", page);
page             5679 drivers/s390/block/dasd_eckd.c 	free_page((unsigned long) page);
page              671 drivers/s390/block/dasd_fba.c 	char *page;
page              675 drivers/s390/block/dasd_fba.c 	page = (char *) get_zeroed_page(GFP_ATOMIC);
page              676 drivers/s390/block/dasd_fba.c 	if (page == NULL) {
page              681 drivers/s390/block/dasd_fba.c 	len = sprintf(page, PRINTK_HEADER
page              684 drivers/s390/block/dasd_fba.c 	len += sprintf(page + len, PRINTK_HEADER
page              687 drivers/s390/block/dasd_fba.c 	len += sprintf(page + len, PRINTK_HEADER
page              693 drivers/s390/block/dasd_fba.c 			len += sprintf(page + len, PRINTK_HEADER
page              698 drivers/s390/block/dasd_fba.c 				len += sprintf(page + len, " %02x",
page              701 drivers/s390/block/dasd_fba.c 			len += sprintf(page + len, "\n");
page              704 drivers/s390/block/dasd_fba.c 		len += sprintf(page + len, PRINTK_HEADER
page              707 drivers/s390/block/dasd_fba.c 	printk(KERN_ERR "%s", page);
page              714 drivers/s390/block/dasd_fba.c 	len = sprintf(page, PRINTK_HEADER " Related CP in req: %p\n", req);
page              716 drivers/s390/block/dasd_fba.c 		len += sprintf(page + len, PRINTK_HEADER
page              721 drivers/s390/block/dasd_fba.c 			len += sprintf(page + len, " %08X",
page              724 drivers/s390/block/dasd_fba.c 		len += sprintf(page + len, "\n");
page              727 drivers/s390/block/dasd_fba.c 	printk(KERN_ERR "%s", page);
page              734 drivers/s390/block/dasd_fba.c 		len += sprintf(page + len, PRINTK_HEADER "......\n");
page              738 drivers/s390/block/dasd_fba.c 		len += sprintf(page + len, PRINTK_HEADER
page              743 drivers/s390/block/dasd_fba.c 			len += sprintf(page + len, " %08X",
page              746 drivers/s390/block/dasd_fba.c 		len += sprintf(page + len, "\n");
page              753 drivers/s390/block/dasd_fba.c 		len += sprintf(page + len, PRINTK_HEADER "......\n");
page              756 drivers/s390/block/dasd_fba.c 		len += sprintf(page + len, PRINTK_HEADER
page              761 drivers/s390/block/dasd_fba.c 			len += sprintf(page + len, " %08X",
page              764 drivers/s390/block/dasd_fba.c 		len += sprintf(page + len, "\n");
page              768 drivers/s390/block/dasd_fba.c 		printk(KERN_ERR "%s", page);
page              769 drivers/s390/block/dasd_fba.c 	free_page((unsigned long) page);
page              153 drivers/s390/block/scm_blk.c 	struct page *page = mempool_alloc(aidaw_pool, GFP_ATOMIC);
page              155 drivers/s390/block/scm_blk.c 	return page ? page_address(page) : NULL;
page              217 drivers/s390/char/sclp.h 	char *page = (char *) sccb;
page              223 drivers/s390/char/sclp.h 	memcpy(&info->core, page + sccb->offset_configured,
page               51 drivers/s390/char/sclp_con.c 	void *page;
page               54 drivers/s390/char/sclp_con.c 		page = sclp_unmake_buffer(buffer);
page               59 drivers/s390/char/sclp_con.c 		list_add_tail((struct list_head *) page, &sclp_con_pages);
page              141 drivers/s390/char/sclp_con.c 	void *page;
page              153 drivers/s390/char/sclp_con.c 	page = sclp_unmake_buffer(buffer);
page              154 drivers/s390/char/sclp_con.c 	list_add_tail((struct list_head *) page, &sclp_con_pages);
page              166 drivers/s390/char/sclp_con.c 	void *page;
page              190 drivers/s390/char/sclp_con.c 			page = sclp_con_pages.next;
page              191 drivers/s390/char/sclp_con.c 			list_del((struct list_head *) page);
page              192 drivers/s390/char/sclp_con.c 			sclp_conbuf = sclp_make_buffer(page, sclp_con_columns,
page              315 drivers/s390/char/sclp_con.c 	void *page;
page              328 drivers/s390/char/sclp_con.c 		page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
page              329 drivers/s390/char/sclp_con.c 		list_add_tail(page, &sclp_con_pages);
page              221 drivers/s390/char/sclp_cpi_sys.c 				struct kobj_attribute *attr, char *page)
page              226 drivers/s390/char/sclp_cpi_sys.c 	rc = snprintf(page, PAGE_SIZE, "%s\n", system_name);
page              253 drivers/s390/char/sclp_cpi_sys.c 				 struct kobj_attribute *attr, char *page)
page              258 drivers/s390/char/sclp_cpi_sys.c 	rc = snprintf(page, PAGE_SIZE, "%s\n", sysplex_name);
page              285 drivers/s390/char/sclp_cpi_sys.c 				struct kobj_attribute *attr, char *page)
page              290 drivers/s390/char/sclp_cpi_sys.c 	rc = snprintf(page, PAGE_SIZE, "%s\n", system_type);
page              317 drivers/s390/char/sclp_cpi_sys.c 				 struct kobj_attribute *attr, char *page)
page              324 drivers/s390/char/sclp_cpi_sys.c 	return snprintf(page, PAGE_SIZE, "%#018llx\n", level);
page               97 drivers/s390/char/sclp_ocf.c 			     struct kobj_attribute *attr, char *page)
page              104 drivers/s390/char/sclp_ocf.c 	return snprintf(page, PAGE_SIZE, "%s\n", name);
page              111 drivers/s390/char/sclp_ocf.c 				struct kobj_attribute *attr, char *page)
page              116 drivers/s390/char/sclp_ocf.c 	rc = snprintf(page, PAGE_SIZE, "%s\n", hmc_network);
page               48 drivers/s390/char/sclp_rw.c sclp_make_buffer(void *page, unsigned short columns, unsigned short htab)
page               53 drivers/s390/char/sclp_rw.c 	sccb = (struct sccb_header *) page;
page              188 drivers/s390/char/sclp_sd.c static int sclp_sd_sync(unsigned long page, u8 eq, u8 di, u64 sat, u64 sa,
page              191 drivers/s390/char/sclp_sd.c 	struct sclp_sd_sccb *sccb = (void *) page;
page              287 drivers/s390/char/sclp_sd.c 	unsigned long page, asce = 0;
page              291 drivers/s390/char/sclp_sd.c 	page = __get_free_page(GFP_KERNEL | GFP_DMA);
page              292 drivers/s390/char/sclp_sd.c 	if (!page)
page              296 drivers/s390/char/sclp_sd.c 	rc = sclp_sd_sync(page, SD_EQ_SIZE, di, 0, 0, &dsize, &esize);
page              318 drivers/s390/char/sclp_sd.c 	rc = sclp_sd_sync(page, SD_EQ_STORE_DATA, di, asce, (u64) data, &dsize,
page              323 drivers/s390/char/sclp_sd.c 			sclp_sd_sync(page, SD_EQ_HALT, di, 0, 0, NULL, NULL);
page              335 drivers/s390/char/sclp_sd.c 	free_page(page);
page              111 drivers/s390/char/sclp_tty.c 	void *page;
page              114 drivers/s390/char/sclp_tty.c 		page = sclp_unmake_buffer(buffer);
page              119 drivers/s390/char/sclp_tty.c 		list_add_tail((struct list_head *) page, &sclp_tty_pages);
page              175 drivers/s390/char/sclp_tty.c 	void *page;
page              195 drivers/s390/char/sclp_tty.c 			page = sclp_tty_pages.next;
page              196 drivers/s390/char/sclp_tty.c 			list_del((struct list_head *) page);
page              197 drivers/s390/char/sclp_tty.c 			sclp_ttybuf = sclp_make_buffer(page, sclp_tty_columns,
page              501 drivers/s390/char/sclp_tty.c 	void *page;
page              522 drivers/s390/char/sclp_tty.c 		page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
page              523 drivers/s390/char/sclp_tty.c 		if (page == NULL) {
page              527 drivers/s390/char/sclp_tty.c 		list_add_tail((struct list_head *) page, &sclp_tty_pages);
page              124 drivers/s390/char/sclp_vt220.c 	void *page;
page              128 drivers/s390/char/sclp_vt220.c 		page = request->sclp_req.sccb;
page              132 drivers/s390/char/sclp_vt220.c 		list_add_tail((struct list_head *) page, &sclp_vt220_empty);
page              266 drivers/s390/char/sclp_vt220.c sclp_vt220_initialize_page(void *page)
page              273 drivers/s390/char/sclp_vt220.c 			((addr_t) page + PAGE_SIZE)) - 1;
page              275 drivers/s390/char/sclp_vt220.c 	request->sclp_req.sccb = page;
page              277 drivers/s390/char/sclp_vt220.c 	sccb = (struct sclp_vt220_sccb *) page;
page              375 drivers/s390/char/sclp_vt220.c 	void *page;
page              387 drivers/s390/char/sclp_vt220.c 	page = request->sclp_req.sccb;
page              388 drivers/s390/char/sclp_vt220.c 	list_add_tail((struct list_head *) page, &sclp_vt220_empty);
page              409 drivers/s390/char/sclp_vt220.c 	void *page;
page              432 drivers/s390/char/sclp_vt220.c 			page = (void *) sclp_vt220_empty.next;
page              433 drivers/s390/char/sclp_vt220.c 			list_del((struct list_head *) page);
page              435 drivers/s390/char/sclp_vt220.c 				sclp_vt220_initialize_page(page);
page              666 drivers/s390/char/sclp_vt220.c 	struct list_head *page, *p;
page              668 drivers/s390/char/sclp_vt220.c 	list_for_each_safe(page, p, &sclp_vt220_empty) {
page              669 drivers/s390/char/sclp_vt220.c 		list_del(page);
page              670 drivers/s390/char/sclp_vt220.c 		free_page((unsigned long) page);
page              690 drivers/s390/char/sclp_vt220.c 	void *page;
page              709 drivers/s390/char/sclp_vt220.c 		page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
page              710 drivers/s390/char/sclp_vt220.c 		if (!page)
page              712 drivers/s390/char/sclp_vt220.c 		list_add_tail(page, &sclp_vt220_empty);
page               62 drivers/s390/char/vmcp.c 	struct page *page = NULL;
page               73 drivers/s390/char/vmcp.c 		page = cma_alloc(vmcp_cma, nr_pages, 0, false);
page               74 drivers/s390/char/vmcp.c 	if (page) {
page               75 drivers/s390/char/vmcp.c 		session->response = (char *)page_to_phys(page);
page               85 drivers/s390/char/vmcp.c 	struct page *page;
page               92 drivers/s390/char/vmcp.c 		page = phys_to_page((unsigned long)session->response);
page               93 drivers/s390/char/vmcp.c 		cma_release(vmcp_cma, page, nr_pages);
page              899 drivers/s390/cio/chsc.c 				     int c, int m, void *page)
page              912 drivers/s390/cio/chsc.c 	memset(page, 0, PAGE_SIZE);
page              913 drivers/s390/cio/chsc.c 	scpd_area = page;
page             1209 drivers/s390/cio/chsc.c int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta)
page             1225 drivers/s390/cio/chsc.c 	memset(page, 0, PAGE_SIZE);
page             1226 drivers/s390/cio/chsc.c 	rr = page;
page             1240 drivers/s390/cio/chsc.c int chsc_sstpi(void *page, void *result, size_t size)
page             1250 drivers/s390/cio/chsc.c 	memset(page, 0, PAGE_SIZE);
page             1251 drivers/s390/cio/chsc.c 	rr = page;
page              154 drivers/s390/cio/chsc.h 				     int c, int m, void *page);
page             2587 drivers/s390/net/qeth_core_main.c 	struct page *page;
page             2612 drivers/s390/net/qeth_core_main.c 			page = alloc_page(GFP_ATOMIC);
page             2613 drivers/s390/net/qeth_core_main.c 			if (!page) {
page             2617 drivers/s390/net/qeth_core_main.c 				entry->elements[i] = page_address(page);
page             5016 drivers/s390/net/qeth_core_main.c 	struct page *page = virt_to_page(element->addr);
page             5032 drivers/s390/net/qeth_core_main.c 	get_page(page);
page             5033 drivers/s390/net/qeth_core_main.c 	skb_add_rx_frag(skb, next_frag, page, offset, data_len, data_len);
page              232 drivers/s390/scsi/zfcp_fc.c 				   struct fc_els_rscn_page *page)
page              240 drivers/s390/scsi/zfcp_fc.c 		if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range))
page              251 drivers/s390/scsi/zfcp_fc.c 	struct fc_els_rscn_page *page;
page              257 drivers/s390/scsi/zfcp_fc.c 	page = (struct fc_els_rscn_page *) head;
page              281 drivers/s390/scsi/zfcp_fc.c 		page++;
page              282 drivers/s390/scsi/zfcp_fc.c 		afmt = page->rscn_page_flags & ELS_RSCN_ADDR_FMT_MASK;
page              284 drivers/s390/scsi/zfcp_fc.c 				       page);
page              286 drivers/s390/scsi/zfcp_fc.c 				      *(u32 *)page);
page              802 drivers/s390/scsi/zfcp_fsf.c 	struct page *page;
page              817 drivers/s390/scsi/zfcp_fsf.c 	page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC);
page              818 drivers/s390/scsi/zfcp_fsf.c 	if (!page) {
page              822 drivers/s390/scsi/zfcp_fsf.c 	sr_buf = page_address(page);
page              196 drivers/sbus/char/oradax.c 	struct page		*pages[DAX_CA_ELEMS][NUM_STREAM_TYPES];
page              409 drivers/sbus/char/oradax.c 			struct page *p = ctx->pages[i][j];
page              422 drivers/sbus/char/oradax.c static int dax_lock_page(void *va, struct page **p)
page              141 drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h 	uint32_t	page		: 3,
page              153 drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h 			page		: 3;
page              213 drivers/scsi/aic94xx/aic94xx_seq.c 	u32 page;
page              216 drivers/scsi/aic94xx/aic94xx_seq.c 	for (page = 0; page < pages; page++) {
page              220 drivers/scsi/aic94xx/aic94xx_seq.c 				    page << LmRAMPAGE_LSHIFT);
page              230 drivers/scsi/aic94xx/aic94xx_seq.c 					   lseq, page, i);
page              275 drivers/scsi/aic94xx/aic94xx_seq.c 	int page;
page              304 drivers/scsi/aic94xx/aic94xx_seq.c 	for (page = 0; page < pages; page++) {
page              306 drivers/scsi/aic94xx/aic94xx_seq.c 		u32 left = min(size-page*MAX_DMA_OVLY_COUNT,
page              309 drivers/scsi/aic94xx/aic94xx_seq.c 		memcpy(token->vaddr, prog + page*MAX_DMA_OVLY_COUNT, left);
page              312 drivers/scsi/aic94xx/aic94xx_seq.c 		reg = !page ? RESETOVLYDMA : 0;
page              907 drivers/scsi/arm/acornscsi.c     unsigned int page, offset, len = length;
page              909 drivers/scsi/arm/acornscsi.c     page = (start_addr >> 12);
page              912 drivers/scsi/arm/acornscsi.c     writeb((page & 0x3f) | host->card.page_reg, host->fast + PAGE_REG);
page              930 drivers/scsi/arm/acornscsi.c 	    page ++;
page              931 drivers/scsi/arm/acornscsi.c 	    writeb((page & 0x3f) | host->card.page_reg, host->fast + PAGE_REG);
page              952 drivers/scsi/arm/acornscsi.c     unsigned int page, offset, len = length;
page              954 drivers/scsi/arm/acornscsi.c     page = (start_addr >> 12);
page              957 drivers/scsi/arm/acornscsi.c     writeb((page & 0x3f) | host->card.page_reg, host->fast + PAGE_REG);
page              975 drivers/scsi/arm/acornscsi.c 	    page ++;
page              976 drivers/scsi/arm/acornscsi.c 	    writeb((page & 0x3f) | host->card.page_reg, host->fast + PAGE_REG);
page              665 drivers/scsi/bfa/bfa_fcbuild.c 	int             page;
page              675 drivers/scsi/bfa/bfa_fcbuild.c 	for (page = 0; page < num_pages; page++) {
page              676 drivers/scsi/bfa/bfa_fcbuild.c 		tprlo_acc->tprlo_acc_params[page].opa_valid = 0;
page              677 drivers/scsi/bfa/bfa_fcbuild.c 		tprlo_acc->tprlo_acc_params[page].rpa_valid = 0;
page              678 drivers/scsi/bfa/bfa_fcbuild.c 		tprlo_acc->tprlo_acc_params[page].fc4type_csp = FC_TYPE_FCP;
page              679 drivers/scsi/bfa/bfa_fcbuild.c 		tprlo_acc->tprlo_acc_params[page].orig_process_assc = 0;
page              680 drivers/scsi/bfa/bfa_fcbuild.c 		tprlo_acc->tprlo_acc_params[page].resp_process_assc = 0;
page              689 drivers/scsi/bfa/bfa_fcbuild.c 	int             page;
page              698 drivers/scsi/bfa/bfa_fcbuild.c 	for (page = 0; page < num_pages; page++) {
page              699 drivers/scsi/bfa/bfa_fcbuild.c 		prlo_acc->prlo_acc_params[page].opa_valid = 0;
page              700 drivers/scsi/bfa/bfa_fcbuild.c 		prlo_acc->prlo_acc_params[page].rpa_valid = 0;
page              701 drivers/scsi/bfa/bfa_fcbuild.c 		prlo_acc->prlo_acc_params[page].fc4type_csp = FC_TYPE_FCP;
page              702 drivers/scsi/bfa/bfa_fcbuild.c 		prlo_acc->prlo_acc_params[page].orig_process_assc = 0;
page              703 drivers/scsi/bfa/bfa_fcbuild.c 		prlo_acc->prlo_acc_params[page].resp_process_assc = 0;
page              850 drivers/scsi/bfa/bfa_fcbuild.c 	int             page;
page              858 drivers/scsi/bfa/bfa_fcbuild.c 	for (page = 0; page < num_pages; page++) {
page              859 drivers/scsi/bfa/bfa_fcbuild.c 		prlo->prlo_params[page].type = FC_TYPE_FCP;
page              860 drivers/scsi/bfa/bfa_fcbuild.c 		prlo->prlo_params[page].opa_valid = 0;
page              861 drivers/scsi/bfa/bfa_fcbuild.c 		prlo->prlo_params[page].rpa_valid = 0;
page              862 drivers/scsi/bfa/bfa_fcbuild.c 		prlo->prlo_params[page].orig_process_assc = 0;
page              863 drivers/scsi/bfa/bfa_fcbuild.c 		prlo->prlo_params[page].resp_process_assc = 0;
page              874 drivers/scsi/bfa/bfa_fcbuild.c 	int             page;
page              882 drivers/scsi/bfa/bfa_fcbuild.c 	for (page = 0; page < num_pages; page++) {
page              883 drivers/scsi/bfa/bfa_fcbuild.c 		tprlo->tprlo_params[page].type = FC_TYPE_FCP;
page              884 drivers/scsi/bfa/bfa_fcbuild.c 		tprlo->tprlo_params[page].opa_valid = 0;
page              885 drivers/scsi/bfa/bfa_fcbuild.c 		tprlo->tprlo_params[page].rpa_valid = 0;
page              886 drivers/scsi/bfa/bfa_fcbuild.c 		tprlo->tprlo_params[page].orig_process_assc = 0;
page              887 drivers/scsi/bfa/bfa_fcbuild.c 		tprlo->tprlo_params[page].resp_process_assc = 0;
page              889 drivers/scsi/bfa/bfa_fcbuild.c 			tprlo->tprlo_params[page].global_process_logout = 1;
page              891 drivers/scsi/bfa/bfa_fcbuild.c 			tprlo->tprlo_params[page].tpo_nport_valid = 1;
page              892 drivers/scsi/bfa/bfa_fcbuild.c 			tprlo->tprlo_params[page].tpo_nport_id = (tpr_id);
page              666 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	dma_addr_t page;
page              722 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	page = tgt->rq_dma;
page              726 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		*pbl = (u32)page;
page              728 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		*pbl = (u32)((u64)page >> 32);
page              730 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		page += CNIC_PAGE_SIZE;
page              776 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	page = tgt->confq_dma;
page              780 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		*pbl = (u32)page;
page              782 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		*pbl = (u32)((u64)page >> 32);
page              784 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		page += CNIC_PAGE_SIZE;
page              226 drivers/scsi/bnx2i/bnx2i.h 	void *page[1];
page              938 drivers/scsi/bnx2i/bnx2i_hwi.c 	dma_addr_t page;
page              949 drivers/scsi/bnx2i/bnx2i_hwi.c 	page = ep->qp.sq_phys;
page              958 drivers/scsi/bnx2i/bnx2i_hwi.c 			*ptbl = (u32) page;
page              960 drivers/scsi/bnx2i/bnx2i_hwi.c 			*ptbl = (u32) ((u64) page >> 32);
page              962 drivers/scsi/bnx2i/bnx2i_hwi.c 			page += CNIC_PAGE_SIZE;
page              966 drivers/scsi/bnx2i/bnx2i_hwi.c 			*ptbl = (u32) ((u64) page >> 32);
page              968 drivers/scsi/bnx2i/bnx2i_hwi.c 			*ptbl = (u32) page;
page              970 drivers/scsi/bnx2i/bnx2i_hwi.c 			page += CNIC_PAGE_SIZE;
page              977 drivers/scsi/bnx2i/bnx2i_hwi.c 	page = ep->qp.rq_phys;
page              986 drivers/scsi/bnx2i/bnx2i_hwi.c 			*ptbl = (u32) page;
page              988 drivers/scsi/bnx2i/bnx2i_hwi.c 			*ptbl = (u32) ((u64) page >> 32);
page              990 drivers/scsi/bnx2i/bnx2i_hwi.c 			page += CNIC_PAGE_SIZE;
page              994 drivers/scsi/bnx2i/bnx2i_hwi.c 			*ptbl = (u32) ((u64) page >> 32);
page              996 drivers/scsi/bnx2i/bnx2i_hwi.c 			*ptbl = (u32) page;
page              998 drivers/scsi/bnx2i/bnx2i_hwi.c 			page += CNIC_PAGE_SIZE;
page             1005 drivers/scsi/bnx2i/bnx2i_hwi.c 	page = ep->qp.cq_phys;
page             1014 drivers/scsi/bnx2i/bnx2i_hwi.c 			*ptbl = (u32) page;
page             1016 drivers/scsi/bnx2i/bnx2i_hwi.c 			*ptbl = (u32) ((u64) page >> 32);
page             1018 drivers/scsi/bnx2i/bnx2i_hwi.c 			page += CNIC_PAGE_SIZE;
page             1022 drivers/scsi/bnx2i/bnx2i_hwi.c 			*ptbl = (u32) ((u64) page >> 32);
page             1024 drivers/scsi/bnx2i/bnx2i_hwi.c 			*ptbl = (u32) page;
page             1026 drivers/scsi/bnx2i/bnx2i_hwi.c 			page += CNIC_PAGE_SIZE;
page             1842 drivers/scsi/cxgbi/libcxgbi.c 	struct page *page = sg_page(sg);
page             1858 drivers/scsi/cxgbi/libcxgbi.c 			page = sg_page(sg);
page             1862 drivers/scsi/cxgbi/libcxgbi.c 		if (i && page == frags[i - 1].page &&
page             1873 drivers/scsi/cxgbi/libcxgbi.c 			frags[i].page = page;
page             1967 drivers/scsi/cxgbi/libcxgbi.c 	struct page *pg;
page             2010 drivers/scsi/cxgbi/libcxgbi.c 				char *src = kmap_atomic(frag->page);
page             2025 drivers/scsi/cxgbi/libcxgbi.c 						tdata->frags[i].page,
page             1057 drivers/scsi/cxlflash/superpipe.c static struct page *get_err_page(struct cxlflash_cfg *cfg)
page             1059 drivers/scsi/cxlflash/superpipe.c 	struct page *err_page = global.err_page;
page             1112 drivers/scsi/cxlflash/superpipe.c 	struct page *err_page = NULL;
page             1147 drivers/scsi/cxlflash/superpipe.c 		vmf->page = err_page;
page              114 drivers/scsi/cxlflash/superpipe.h 	struct page *err_page; /* One page of all 0xF for error notification */
page             1288 drivers/scsi/fcoe/fcoe.c 	struct page *crc_eof;
page              471 drivers/scsi/fcoe/fcoe_transport.c 	struct page *page;
page              473 drivers/scsi/fcoe/fcoe_transport.c 	page = fps->crc_eof_page;
page              474 drivers/scsi/fcoe/fcoe_transport.c 	if (!page) {
page              475 drivers/scsi/fcoe/fcoe_transport.c 		page = alloc_page(GFP_ATOMIC);
page              476 drivers/scsi/fcoe/fcoe_transport.c 		if (!page)
page              479 drivers/scsi/fcoe/fcoe_transport.c 		fps->crc_eof_page = page;
page              483 drivers/scsi/fcoe/fcoe_transport.c 	get_page(page);
page              484 drivers/scsi/fcoe/fcoe_transport.c 	skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page,
page              494 drivers/scsi/fcoe/fcoe_transport.c 		put_page(page);
page              325 drivers/scsi/hpsa.c 	unsigned char scsi3addr[], u8 page);
page             2986 drivers/scsi/hpsa.c 					u8 page, u8 *buf, size_t bufsize)
page             2994 drivers/scsi/hpsa.c 			page, scsi3addr, TYPE_CMD)) {
page             3037 drivers/scsi/hpsa.c 			u16 page, unsigned char *buf,
page             3047 drivers/scsi/hpsa.c 			page, scsi3addr, TYPE_CMD)) {
page             3596 drivers/scsi/hpsa.c 	unsigned char scsi3addr[], u8 page)
page             3628 drivers/scsi/hpsa.c 		if (buf[3 + i] == page)
page             3956 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c 					  char *page)
page             3958 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c 	return scnprintf(page, PAGE_SIZE, "%s\n", IBMVSCSIS_VERSION);
page             3968 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c 					 char *page)
page             3975 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c 	return snprintf(page, PAGE_SIZE, "%d\n", (tport->enabled) ? 1 : 0);
page             3979 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c 					  const char *page, size_t count)
page             3990 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c 	rc = kstrtoul(page, 0, &tmp);
page             2971 drivers/scsi/ipr.c 	__be32 *page;
page             2984 drivers/scsi/ipr.c 			page = (__be32 *)__get_free_page(GFP_ATOMIC);
page             2986 drivers/scsi/ipr.c 			if (!page) {
page             2992 drivers/scsi/ipr.c 			ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
page             2995 drivers/scsi/ipr.c 			page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
page             3007 drivers/scsi/ipr.c 							&page[ioa_dump->page_offset / 4],
page             3914 drivers/scsi/ipr.c 		struct page *page = sg_page(sg);
page             3916 drivers/scsi/ipr.c 		kaddr = kmap(page);
page             3918 drivers/scsi/ipr.c 		kunmap(page);
page             3929 drivers/scsi/ipr.c 		struct page *page = sg_page(sg);
page             3931 drivers/scsi/ipr.c 		kaddr = kmap(page);
page             3933 drivers/scsi/ipr.c 		kunmap(page);
page             7980 drivers/scsi/ipr.c static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
page             7991 drivers/scsi/ipr.c 	ioarcb->cmd_pkt.cdb[2] = page;
page             8010 drivers/scsi/ipr.c static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
page             8015 drivers/scsi/ipr.c 		if (page0->page[i] == page)
page              845 drivers/scsi/ipr.h 	u8 page[IPR_INQUIRY_PAGE0_ENTRIES];
page             5817 drivers/scsi/ips.c 	scb->cmd.nvram.page = 5;
page              526 drivers/scsi/ips.h    uint8_t  page;
page             1427 drivers/scsi/isci/request.c 			struct page *page = sg_page(sg);
page             1430 drivers/scsi/isci/request.c 			kaddr = kmap_atomic(page);
page               46 drivers/scsi/iscsi_tcp.h 	ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
page              601 drivers/scsi/libfc/fc_fcp.c 	struct page *page;
page              681 drivers/scsi/libfc/fc_fcp.c 		page = sg_page(sg) + (off >> PAGE_SHIFT);
page              683 drivers/scsi/libfc/fc_fcp.c 			get_page(page);
page              686 drivers/scsi/libfc/fc_fcp.c 					   page, off & ~PAGE_MASK, sg_bytes);
page              695 drivers/scsi/libfc/fc_fcp.c 			page_addr = kmap_atomic(page);
page             1137 drivers/scsi/lpfc/lpfc_hw4.h 			struct dma_address page[LPFC_MAX_EQ_PAGE];
page             1261 drivers/scsi/lpfc/lpfc_hw4.h 			struct dma_address page[LPFC_MAX_CQ_PAGE];
page             1371 drivers/scsi/lpfc/lpfc_hw4.h 			struct dma_address page[1];
page             1421 drivers/scsi/lpfc/lpfc_hw4.h 			struct dma_address page[LPFC_MAX_WQ_PAGE_V0];
page             1455 drivers/scsi/lpfc/lpfc_hw4.h 			struct dma_address page[LPFC_MAX_WQ_PAGE-1];
page             1579 drivers/scsi/lpfc/lpfc_hw4.h 			struct dma_address page[LPFC_MAX_RQ_PAGE];
page             1631 drivers/scsi/lpfc/lpfc_hw4.h 			struct dma_address page[1];
page             1697 drivers/scsi/lpfc/lpfc_hw4.h 			struct dma_address page[LPFC_MAX_MQ_PAGE];
page             1748 drivers/scsi/lpfc/lpfc_hw4.h 			struct dma_address page[LPFC_MAX_MQ_PAGE];
page             14827 drivers/scsi/lpfc/lpfc_sli.c 		eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
page             14829 drivers/scsi/lpfc/lpfc_sli.c 		eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
page             14961 drivers/scsi/lpfc/lpfc_sli.c 		cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
page             14963 drivers/scsi/lpfc/lpfc_sli.c 		cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
page             15214 drivers/scsi/lpfc/lpfc_sli.c 			cq_set->u.request.page[cnt].addr_lo =
page             15216 drivers/scsi/lpfc/lpfc_sli.c 			cq_set->u.request.page[cnt].addr_hi =
page             15306 drivers/scsi/lpfc/lpfc_sli.c 		mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
page             15308 drivers/scsi/lpfc/lpfc_sli.c 		mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
page             15418 drivers/scsi/lpfc/lpfc_sli.c 		mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
page             15420 drivers/scsi/lpfc/lpfc_sli.c 		mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
page             15500 drivers/scsi/lpfc/lpfc_sli.c 	struct dma_address *page;
page             15571 drivers/scsi/lpfc/lpfc_sli.c 		page = wq_create->u.request_1.page;
page             15574 drivers/scsi/lpfc/lpfc_sli.c 		page = wq_create->u.request.page;
page             15580 drivers/scsi/lpfc/lpfc_sli.c 		page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
page             15581 drivers/scsi/lpfc/lpfc_sli.c 		page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
page             15843 drivers/scsi/lpfc/lpfc_sli.c 		rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
page             15845 drivers/scsi/lpfc/lpfc_sli.c 		rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
page             15985 drivers/scsi/lpfc/lpfc_sli.c 		rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
page             15987 drivers/scsi/lpfc/lpfc_sli.c 		rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
page             16142 drivers/scsi/lpfc/lpfc_sli.c 			rq_create->u.request.page[cnt].addr_lo =
page             16144 drivers/scsi/lpfc/lpfc_sli.c 			rq_create->u.request.page[cnt].addr_hi =
page             16154 drivers/scsi/lpfc/lpfc_sli.c 			rq_create->u.request.page[cnt].addr_lo =
page             16156 drivers/scsi/lpfc/lpfc_sli.c 			rq_create->u.request.page[cnt].addr_hi =
page               82 drivers/scsi/mpt3sas/mpt3sas_config.c 	void			*page;
page              208 drivers/scsi/mpt3sas/mpt3sas_config.c 		mem->page = dma_alloc_coherent(&ioc->pdev->dev, mem->sz,
page              210 drivers/scsi/mpt3sas/mpt3sas_config.c 		if (!mem->page) {
page              216 drivers/scsi/mpt3sas/mpt3sas_config.c 		mem->page = ioc->config_page;
page              219 drivers/scsi/mpt3sas/mpt3sas_config.c 	ioc->config_vaddr = mem->page;
page              237 drivers/scsi/mpt3sas/mpt3sas_config.c 		dma_free_coherent(&ioc->pdev->dev, mem->sz, mem->page,
page              343 drivers/scsi/mpt3sas/mpt3sas_config.c 			memcpy(mem.page, config_page, min_t(u16, mem.sz,
page              349 drivers/scsi/mpt3sas/mpt3sas_config.c 			memset(mem.page, 0, min_t(u16, mem.sz, config_page_sz));
page              436 drivers/scsi/mpt3sas/mpt3sas_config.c 		u8 *p = (u8 *)mem.page;
page              464 drivers/scsi/mpt3sas/mpt3sas_config.c 		memcpy(config_page, mem.page, min_t(u16, mem.sz,
page             3270 drivers/scsi/pmcraid.c 		struct page *page = sg_page(sg);
page             3272 drivers/scsi/pmcraid.c 		kaddr = kmap(page);
page             3278 drivers/scsi/pmcraid.c 		kunmap(page);
page             3289 drivers/scsi/pmcraid.c 		struct page *page = sg_page(sg);
page             3291 drivers/scsi/pmcraid.c 		kaddr = kmap(page);
page             3298 drivers/scsi/pmcraid.c 		kunmap(page);
page             1181 drivers/scsi/qedf/qedf_main.c 	dma_addr_t page;
page             1210 drivers/scsi/qedf/qedf_main.c 	page = fcport->sq_dma;
page             1214 drivers/scsi/qedf/qedf_main.c 		*pbl = U64_LO(page);
page             1216 drivers/scsi/qedf/qedf_main.c 		*pbl = U64_HI(page);
page             1218 drivers/scsi/qedf/qedf_main.c 		page += QEDF_PAGE_SIZE;
page             2824 drivers/scsi/qedf/qedf_main.c 	dma_addr_t page;
page             2885 drivers/scsi/qedf/qedf_main.c 	page = qedf->bdq_pbl_list_dma;
page             2889 drivers/scsi/qedf/qedf_main.c 		page += QEDF_PAGE_SIZE;
page             2901 drivers/scsi/qedf/qedf_main.c 	dma_addr_t page;
page             2990 drivers/scsi/qedf/qedf_main.c 		page = qedf->global_queues[i]->cq_dma;
page             2994 drivers/scsi/qedf/qedf_main.c 			*pbl = U64_LO(page);
page             2996 drivers/scsi/qedf/qedf_main.c 			*pbl = U64_HI(page);
page             2998 drivers/scsi/qedf/qedf_main.c 			page += QEDF_PAGE_SIZE;
page             1472 drivers/scsi/qedi/qedi_main.c 	dma_addr_t page;
page             1542 drivers/scsi/qedi/qedi_main.c 	page = qedi->bdq_pbl_list_dma;
page             1546 drivers/scsi/qedi/qedi_main.c 		page += QEDI_PAGE_SIZE;
page             1558 drivers/scsi/qedi/qedi_main.c 	dma_addr_t page;
page             1650 drivers/scsi/qedi/qedi_main.c 		page = qedi->global_queues[i]->cq_dma;
page             1654 drivers/scsi/qedi/qedi_main.c 			*pbl = (u32)page;
page             1656 drivers/scsi/qedi/qedi_main.c 			*pbl = (u32)((u64)page >> 32);
page             1658 drivers/scsi/qedi/qedi_main.c 			page += QEDI_PAGE_SIZE;
page             1693 drivers/scsi/qedi/qedi_main.c 	dma_addr_t page;
page             1725 drivers/scsi/qedi/qedi_main.c 	page = ep->sq_dma;
page             1729 drivers/scsi/qedi/qedi_main.c 		*pbl = (u32)page;
page             1731 drivers/scsi/qedi/qedi_main.c 		*pbl = (u32)((u64)page >> 32);
page             1733 drivers/scsi/qedi/qedi_main.c 		page += QEDI_PAGE_SIZE;
page              824 drivers/scsi/qla2xxx/tcm_qla2xxx.c 		struct config_item *item, char *page)			\
page              830 drivers/scsi/qla2xxx/tcm_qla2xxx.c 	return sprintf(page, "%u\n", tpg->tpg_attrib.name);	\
page              834 drivers/scsi/qla2xxx/tcm_qla2xxx.c 		struct config_item *item, const char *page, size_t count) \
page              843 drivers/scsi/qla2xxx/tcm_qla2xxx.c 	ret = kstrtoul(page, 0, &val);					\
page              885 drivers/scsi/qla2xxx/tcm_qla2xxx.c 		char *page)
page              891 drivers/scsi/qla2xxx/tcm_qla2xxx.c 	return snprintf(page, PAGE_SIZE, "%d\n",
page              896 drivers/scsi/qla2xxx/tcm_qla2xxx.c 		const char *page, size_t count)
page              908 drivers/scsi/qla2xxx/tcm_qla2xxx.c 	rc = kstrtoul(page, 0, &op);
page              935 drivers/scsi/qla2xxx/tcm_qla2xxx.c 		char *page)
page              937 drivers/scsi/qla2xxx/tcm_qla2xxx.c 	return target_show_dynamic_sessions(to_tpg(item), page);
page              941 drivers/scsi/qla2xxx/tcm_qla2xxx.c 		const char *page, size_t count)
page              947 drivers/scsi/qla2xxx/tcm_qla2xxx.c 	int ret = kstrtoul(page, 0, &val);
page              963 drivers/scsi/qla2xxx/tcm_qla2xxx.c 		char *page)
page              969 drivers/scsi/qla2xxx/tcm_qla2xxx.c 	return sprintf(page, "%d\n", tpg->tpg_attrib.fabric_prot_type);
page             1052 drivers/scsi/qla2xxx/tcm_qla2xxx.c 		char *page)
page             1054 drivers/scsi/qla2xxx/tcm_qla2xxx.c 	return tcm_qla2xxx_tpg_enable_show(item, page);
page             1058 drivers/scsi/qla2xxx/tcm_qla2xxx.c 		const char *page, size_t count)
page             1070 drivers/scsi/qla2xxx/tcm_qla2xxx.c 	rc = kstrtoul(page, 0, &op);
page             1791 drivers/scsi/qla2xxx/tcm_qla2xxx.c 		char *page)
page             1793 drivers/scsi/qla2xxx/tcm_qla2xxx.c 	return sprintf(page,
page              310 drivers/scsi/scsi.c 							u8 page, unsigned len)
page              320 drivers/scsi/scsi.c 	cmd[2] = page;
page              335 drivers/scsi/scsi.c 	if (buffer[1] != page)
page              355 drivers/scsi/scsi.c int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
page              369 drivers/scsi/scsi.c 	if (page == 0)
page              373 drivers/scsi/scsi.c 		if (buf[i] == page)
page              383 drivers/scsi/scsi.c 	result = scsi_vpd_inquiry(sdev, buf, page, buf_len);
page              401 drivers/scsi/scsi.c static struct scsi_vpd *scsi_get_vpd_buf(struct scsi_device *sdev, u8 page)
page              411 drivers/scsi/scsi.c 	result = scsi_vpd_inquiry(sdev, vpd_buf->data, page, vpd_len);
page              427 drivers/scsi/scsi.c static void scsi_update_vpd_page(struct scsi_device *sdev, u8 page,
page              432 drivers/scsi/scsi.c 	vpd_buf = scsi_get_vpd_buf(sdev, page);
page             2862 drivers/scsi/scsi_lib.c 	struct page *page;
page             2885 drivers/scsi/scsi_lib.c 	page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
page             2893 drivers/scsi/scsi_lib.c 	return kmap_atomic(page);
page               54 drivers/scsi/scsi_proc.c 	char *page;
page               62 drivers/scsi/scsi_proc.c 	page = (char *)__get_free_page(GFP_KERNEL);
page               63 drivers/scsi/scsi_proc.c 	if (page) {
page               65 drivers/scsi/scsi_proc.c 		if (copy_from_user(page, buf, count))
page               67 drivers/scsi/scsi_proc.c 		ret = shost->hostt->write_info(shost, page, count);
page               70 drivers/scsi/scsi_proc.c 	free_page((unsigned long)page);
page              628 drivers/scsi/ses.c 	unsigned char *buf = NULL, *hdr_buf, *type_ptr, page;
page              657 drivers/scsi/ses.c 	page = 1;
page              658 drivers/scsi/ses.c 	result = ses_recv_diag(sdev, page, hdr_buf, INIT_ALLOC_SIZE);
page              667 drivers/scsi/ses.c 	result = ses_recv_diag(sdev, page, buf, len);
page              697 drivers/scsi/ses.c 	page = 2;
page              698 drivers/scsi/ses.c 	result = ses_recv_diag(sdev, page, hdr_buf, INIT_ALLOC_SIZE);
page              717 drivers/scsi/ses.c 	page = 10;
page              718 drivers/scsi/ses.c 	result = ses_recv_diag(sdev, page, hdr_buf, INIT_ALLOC_SIZE);
page              726 drivers/scsi/ses.c 		result = ses_recv_diag(sdev, page, buf, len);
page              765 drivers/scsi/ses.c 		    page);
page              110 drivers/scsi/sg.c 	struct page **pages;
page             1246 drivers/scsi/sg.c 			struct page *page = nth_page(rsv_schp->pages[k],
page             1248 drivers/scsi/sg.c 			get_page(page);	/* increment page count */
page             1249 drivers/scsi/sg.c 			vmf->page = page;
page             1861 drivers/scsi/sg.c 	int sg_bufflen = tablesize * sizeof(struct page *);
page             2476 drivers/scsi/st.c static int read_mode_page(struct scsi_tape *STp, int page, int omit_block_descs)
page             2485 drivers/scsi/st.c 	cmd[2] = page;
page             2501 drivers/scsi/st.c static int write_mode_page(struct scsi_tape *STp, int page, int slow)
page             3892 drivers/scsi/st.c 	tb->reserved_pages = kcalloc(max_sg, sizeof(struct page *),
page             3946 drivers/scsi/st.c 		struct page *page;
page             3948 drivers/scsi/st.c 		page = alloc_pages(priority, order);
page             3949 drivers/scsi/st.c 		if (!page) {
page             3958 drivers/scsi/st.c 		STbuffer->reserved_pages[segs] = page;
page             4010 drivers/scsi/st.c 		struct page *page = st_bp->reserved_pages[i];
page             4012 drivers/scsi/st.c 		res = copy_from_user(page_address(page) + offset, ubp, cnt);
page             4042 drivers/scsi/st.c 		struct page *page = st_bp->reserved_pages[i];
page             4044 drivers/scsi/st.c 		res = copy_to_user(ubp, page_address(page) + offset, cnt);
page             4080 drivers/scsi/st.c 		struct page *dpage = st_bp->reserved_pages[dst_seg];
page             4081 drivers/scsi/st.c 		struct page *spage = st_bp->reserved_pages[src_seg];
page             4905 drivers/scsi/st.c 	struct page **pages;
page             4962 drivers/scsi/st.c 		struct page *page = STbp->mapped_pages[i];
page             4965 drivers/scsi/st.c 			SetPageDirty(page);
page             4969 drivers/scsi/st.c 		put_page(page);
page               49 drivers/scsi/st.h 	struct page **reserved_pages;
page               51 drivers/scsi/st.h 	struct page **mapped_pages;
page              622 drivers/scsi/stex.c 		unsigned char page;
page              624 drivers/scsi/stex.c 		page = cmd->cmnd[2] & 0x3f;
page              625 drivers/scsi/stex.c 		if (page == 0x8 || page == 0x3f) {
page              397 drivers/scsi/xen-scsifront.c 	struct page *page;
page              436 drivers/scsi/xen-scsifront.c 		page = virt_to_page(seg);
page              447 drivers/scsi/xen-scsifront.c 				xen_page_to_gfn(page), 1);
page              453 drivers/scsi/xen-scsifront.c 			page++;
page              463 drivers/scsi/xen-scsifront.c 		page = sg_page(sg);
page              481 drivers/scsi/xen-scsifront.c 				xen_page_to_gfn(page),
page              489 drivers/scsi/xen-scsifront.c 			page++;
page               78 drivers/sfi/sfi_core.c #define TABLE_ON_PAGE(page, table, size) (ON_SAME_PAGE(page, table) && \
page               79 drivers/sfi/sfi_core.c 				ON_SAME_PAGE(page, table + size))
page              996 drivers/soc/ti/knav_qmss_queue.c 	struct page *page;
page             1025 drivers/soc/ti/knav_qmss_queue.c 	page = virt_to_page(region->virt_start);
page             1027 drivers/soc/ti/knav_qmss_queue.c 	region->dma_start = dma_map_page(kdev->dev, page, 0, size,
page              239 drivers/soundwire/bus.c 	if (msg->page)
page              268 drivers/soundwire/bus.c 	if (msg->page)
page              315 drivers/soundwire/bus.c 	msg->page = true;
page               64 drivers/soundwire/bus.h 	bool page;
page              490 drivers/soundwire/cadence_master.c 	if (msg->page) {
page              176 drivers/spi/spi-mxs.c 	struct page *vm_page;
page              821 drivers/spi/spi.c 	struct page *vm_page;
page              120 drivers/staging/android/ashmem.c static inline bool page_in_range(struct ashmem_range *range, size_t page)
page              122 drivers/staging/android/ashmem.c 	return (range->pgstart <= page) && (range->pgend >= page);
page              133 drivers/staging/android/ashmem.c 				     size_t page)
page              135 drivers/staging/android/ashmem.c 	return range->pgend < page;
page              180 drivers/staging/android/ion/ion.h int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot);
page              290 drivers/staging/android/ion/ion.h struct page *ion_page_pool_alloc(struct ion_page_pool *pool);
page              291 drivers/staging/android/ion/ion.h void ion_page_pool_free(struct ion_page_pool *pool, struct page *page);
page               33 drivers/staging/android/ion/ion_cma_heap.c 	struct page *pages;
page               48 drivers/staging/android/ion/ion_cma_heap.c 		struct page *page = pages;
page               51 drivers/staging/android/ion/ion_cma_heap.c 			void *vaddr = kmap_atomic(page);
page               55 drivers/staging/android/ion/ion_cma_heap.c 			page++;
page               86 drivers/staging/android/ion/ion_cma_heap.c 	struct page *pages = buffer->priv_virt;
page               29 drivers/staging/android/ion/ion_heap.c 	struct page **pages = vmalloc(array_size(npages,
page               30 drivers/staging/android/ion/ion_heap.c 						 sizeof(struct page *)));
page               31 drivers/staging/android/ion/ion_heap.c 	struct page **tmp = pages;
page               43 drivers/staging/android/ion/ion_heap.c 		struct page *page = sg_page(sg);
page               47 drivers/staging/android/ion/ion_heap.c 			*(tmp++) = page++;
page               75 drivers/staging/android/ion/ion_heap.c 		struct page *page = sg_page(sg);
page               83 drivers/staging/android/ion/ion_heap.c 			page += offset / PAGE_SIZE;
page               88 drivers/staging/android/ion/ion_heap.c 		ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
page              100 drivers/staging/android/ion/ion_heap.c static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
page              118 drivers/staging/android/ion/ion_heap.c 	struct page *pages[32];
page              148 drivers/staging/android/ion/ion_heap.c int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot)
page              153 drivers/staging/android/ion/ion_heap.c 	sg_set_page(&sg, page, size, 0);
page               15 drivers/staging/android/ion/ion_page_pool.c static inline struct page *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
page               23 drivers/staging/android/ion/ion_page_pool.c 				     struct page *page)
page               25 drivers/staging/android/ion/ion_page_pool.c 	__free_pages(page, pool->order);
page               28 drivers/staging/android/ion/ion_page_pool.c static void ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
page               31 drivers/staging/android/ion/ion_page_pool.c 	if (PageHighMem(page)) {
page               32 drivers/staging/android/ion/ion_page_pool.c 		list_add_tail(&page->lru, &pool->high_items);
page               35 drivers/staging/android/ion/ion_page_pool.c 		list_add_tail(&page->lru, &pool->low_items);
page               39 drivers/staging/android/ion/ion_page_pool.c 	mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
page               44 drivers/staging/android/ion/ion_page_pool.c static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
page               46 drivers/staging/android/ion/ion_page_pool.c 	struct page *page;
page               50 drivers/staging/android/ion/ion_page_pool.c 		page = list_first_entry(&pool->high_items, struct page, lru);
page               54 drivers/staging/android/ion/ion_page_pool.c 		page = list_first_entry(&pool->low_items, struct page, lru);
page               58 drivers/staging/android/ion/ion_page_pool.c 	list_del(&page->lru);
page               59 drivers/staging/android/ion/ion_page_pool.c 	mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
page               61 drivers/staging/android/ion/ion_page_pool.c 	return page;
page               64 drivers/staging/android/ion/ion_page_pool.c struct page *ion_page_pool_alloc(struct ion_page_pool *pool)
page               66 drivers/staging/android/ion/ion_page_pool.c 	struct page *page = NULL;
page               72 drivers/staging/android/ion/ion_page_pool.c 		page = ion_page_pool_remove(pool, true);
page               74 drivers/staging/android/ion/ion_page_pool.c 		page = ion_page_pool_remove(pool, false);
page               77 drivers/staging/android/ion/ion_page_pool.c 	if (!page)
page               78 drivers/staging/android/ion/ion_page_pool.c 		page = ion_page_pool_alloc_pages(pool);
page               80 drivers/staging/android/ion/ion_page_pool.c 	return page;
page               83 drivers/staging/android/ion/ion_page_pool.c void ion_page_pool_free(struct ion_page_pool *pool, struct page *page)
page               85 drivers/staging/android/ion/ion_page_pool.c 	BUG_ON(pool->order != compound_order(page));
page               87 drivers/staging/android/ion/ion_page_pool.c 	ion_page_pool_add(pool, page);
page              115 drivers/staging/android/ion/ion_page_pool.c 		struct page *page;
page              119 drivers/staging/android/ion/ion_page_pool.c 			page = ion_page_pool_remove(pool, false);
page              121 drivers/staging/android/ion/ion_page_pool.c 			page = ion_page_pool_remove(pool, true);
page              127 drivers/staging/android/ion/ion_page_pool.c 		ion_page_pool_free_pages(pool, page);
page               47 drivers/staging/android/ion/ion_system_heap.c static struct page *alloc_buffer_page(struct ion_system_heap *heap,
page               57 drivers/staging/android/ion/ion_system_heap.c 			     struct ion_buffer *buffer, struct page *page)
page               60 drivers/staging/android/ion/ion_system_heap.c 	unsigned int order = compound_order(page);
page               64 drivers/staging/android/ion/ion_system_heap.c 		__free_pages(page, order);
page               70 drivers/staging/android/ion/ion_system_heap.c 	ion_page_pool_free(pool, page);
page               73 drivers/staging/android/ion/ion_system_heap.c static struct page *alloc_largest_available(struct ion_system_heap *heap,
page               78 drivers/staging/android/ion/ion_system_heap.c 	struct page *page;
page               87 drivers/staging/android/ion/ion_system_heap.c 		page = alloc_buffer_page(heap, buffer, orders[i]);
page               88 drivers/staging/android/ion/ion_system_heap.c 		if (!page)
page               91 drivers/staging/android/ion/ion_system_heap.c 		return page;
page              108 drivers/staging/android/ion/ion_system_heap.c 	struct page *page, *tmp_page;
page              118 drivers/staging/android/ion/ion_system_heap.c 		page = alloc_largest_available(sys_heap, buffer, size_remaining,
page              120 drivers/staging/android/ion/ion_system_heap.c 		if (!page)
page              122 drivers/staging/android/ion/ion_system_heap.c 		list_add_tail(&page->lru, &pages);
page              123 drivers/staging/android/ion/ion_system_heap.c 		size_remaining -= page_size(page);
page              124 drivers/staging/android/ion/ion_system_heap.c 		max_order = compound_order(page);
page              135 drivers/staging/android/ion/ion_system_heap.c 	list_for_each_entry_safe(page, tmp_page, &pages, lru) {
page              136 drivers/staging/android/ion/ion_system_heap.c 		sg_set_page(sg, page, page_size(page), 0);
page              138 drivers/staging/android/ion/ion_system_heap.c 		list_del(&page->lru);
page              147 drivers/staging/android/ion/ion_system_heap.c 	list_for_each_entry_safe(page, tmp_page, &pages, lru)
page              148 drivers/staging/android/ion/ion_system_heap.c 		free_buffer_page(sys_heap, buffer, page);
page              290 drivers/staging/android/ion/ion_system_heap.c 	struct page *page;
page              295 drivers/staging/android/ion/ion_system_heap.c 	page = alloc_pages(low_order_gfp_flags | __GFP_NOWARN, order);
page              296 drivers/staging/android/ion/ion_system_heap.c 	if (!page)
page              299 drivers/staging/android/ion/ion_system_heap.c 	split_page(page, order);
page              303 drivers/staging/android/ion/ion_system_heap.c 		__free_page(page + i);
page              315 drivers/staging/android/ion/ion_system_heap.c 	sg_set_page(table->sgl, page, len, 0);
page              325 drivers/staging/android/ion/ion_system_heap.c 		__free_page(page + i);
page              333 drivers/staging/android/ion/ion_system_heap.c 	struct page *page = sg_page(table->sgl);
page              338 drivers/staging/android/ion/ion_system_heap.c 		__free_page(page + i);
page              145 drivers/staging/comedi/comedi_buf.c 	struct page **pages = NULL;
page              173 drivers/staging/comedi/comedi_buf.c 		pages = vmalloc(sizeof(struct page *) * n_pages);
page              186 drivers/staging/comedi/drivers/pcmmio.c 			     int page, int port)
page              193 drivers/staging/comedi/drivers/pcmmio.c 	if (page == 0) {
page              199 drivers/staging/comedi/drivers/pcmmio.c 		outb(PCMMIO_PAGE(page), iobase + PCMMIO_PAGE_LOCK_REG);
page              208 drivers/staging/comedi/drivers/pcmmio.c 				    int page, int port)
page              216 drivers/staging/comedi/drivers/pcmmio.c 	if (page == 0) {
page              222 drivers/staging/comedi/drivers/pcmmio.c 		outb(PCMMIO_PAGE(page), iobase + PCMMIO_PAGE_LOCK_REG);
page              154 drivers/staging/comedi/drivers/pcmuio.c 			 int asic, int page, int port)
page              162 drivers/staging/comedi/drivers/pcmuio.c 	if (page == 0) {
page              168 drivers/staging/comedi/drivers/pcmuio.c 		outb(PCMUIO_PAGE(page), iobase + PCMUIO_PAGE_LOCK_REG);
page              177 drivers/staging/comedi/drivers/pcmuio.c 				int asic, int page, int port)
page              186 drivers/staging/comedi/drivers/pcmuio.c 	if (page == 0) {
page              192 drivers/staging/comedi/drivers/pcmuio.c 		outb(PCMUIO_PAGE(page), iobase + PCMUIO_PAGE_LOCK_REG);
page             3163 drivers/staging/exfat/exfat_super.c static int exfat_readpage(struct file *file, struct page *page)
page             3165 drivers/staging/exfat/exfat_super.c 	return  mpage_readpage(page, exfat_get_block);
page             3174 drivers/staging/exfat/exfat_super.c static int exfat_writepage(struct page *page, struct writeback_control *wbc)
page             3176 drivers/staging/exfat/exfat_super.c 	return block_write_full_page(page, exfat_get_block, wbc);
page             3198 drivers/staging/exfat/exfat_super.c 			     struct page **pagep, void **fsdata)
page             3214 drivers/staging/exfat/exfat_super.c 			   struct page *pagep, void *fsdata)
page              116 drivers/staging/fbtft/fb_sh1106.c 	int page, page_start, page_end, x, i, ret;
page              123 drivers/staging/fbtft/fb_sh1106.c 	for (page = page_start; page < page_end; page++) {
page              125 drivers/staging/fbtft/fb_sh1106.c 		write_reg(par, 0xb0 | page, 0x00 | 2, 0x10 | 0);
page              130 drivers/staging/fbtft/fb_sh1106.c 				if (vmem16[(page * 8 + i) * xres + x])
page              339 drivers/staging/fbtft/fbtft-core.c 	struct page *page;
page              353 drivers/staging/fbtft/fbtft-core.c 	list_for_each_entry(page, pagelist, lru) {
page              355 drivers/staging/fbtft/fbtft-core.c 		index = page->index << PAGE_SHIFT;
page              360 drivers/staging/fbtft/fbtft-core.c 			page->index, y_low, y_high);
page              128 drivers/staging/gasket/gasket_page_table.c 	struct page *page;
page              335 drivers/staging/gasket/gasket_page_table.c 	if (pte->page)
page              336 drivers/staging/gasket/gasket_page_table.c 		free_page((ulong)page_address(pte->page));
page              445 drivers/staging/gasket/gasket_page_table.c static bool gasket_release_page(struct page *page)
page              447 drivers/staging/gasket/gasket_page_table.c 	if (!page)
page              450 drivers/staging/gasket/gasket_page_table.c 	if (!PageReserved(page))
page              451 drivers/staging/gasket/gasket_page_table.c 		SetPageDirty(page);
page              452 drivers/staging/gasket/gasket_page_table.c 	put_page(page);
page              472 drivers/staging/gasket/gasket_page_table.c 	struct page *page;
page              484 drivers/staging/gasket/gasket_page_table.c 			ptes[i].page = NULL;
page              490 drivers/staging/gasket/gasket_page_table.c 						  FOLL_WRITE, &page);
page              500 drivers/staging/gasket/gasket_page_table.c 			ptes[i].page = page;
page              505 drivers/staging/gasket/gasket_page_table.c 				dma_map_page(pg_tbl->device, page, 0, PAGE_SIZE,
page              510 drivers/staging/gasket/gasket_page_table.c 				if (gasket_release_page(ptes[i].page))
page              612 drivers/staging/gasket/gasket_page_table.c 			if (ptes[i].page && ptes[i].dma_addr) {
page              616 drivers/staging/gasket/gasket_page_table.c 			if (gasket_release_page(ptes[i].page))
page              659 drivers/staging/gasket/gasket_page_table.c 			slot_base = (u64 __iomem *)(page_address(pte->page) +
page              863 drivers/staging/gasket/gasket_page_table.c 	pte->page = virt_to_page((void *)page_addr);
page              876 drivers/staging/gasket/gasket_page_table.c 	pte->dma_addr = dma_map_page(pg_tbl->device, pte->page, 0, PAGE_SIZE,
page              985 drivers/staging/gasket/gasket_page_table.c 			(u64 __iomem *)(page_address(pte->page) + pte->offset);
page             1089 drivers/staging/gasket/gasket_page_table.c 				  ulong dev_addr, struct page **ppage,
page             1121 drivers/staging/gasket/gasket_page_table.c 	*ppage = pte->page;
page              149 drivers/staging/gasket/gasket_page_table.h 				  ulong dev_addr, struct page **page,
page               71 drivers/staging/kpc2000/kpc_dma/fileops.c 	acd->user_pages = kzalloc(sizeof(struct page *) * acd->page_count, GFP_KERNEL);
page               92 drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.h 	struct page       **user_pages;
page               20 drivers/staging/media/ipu3/ipu3-dmamap.c static void imgu_dmamap_free_buffer(struct page **pages,
page               34 drivers/staging/media/ipu3/ipu3-dmamap.c static struct page **imgu_dmamap_alloc_buffer(size_t size, gfp_t gfp)
page               36 drivers/staging/media/ipu3/ipu3-dmamap.c 	struct page **pages;
page               50 drivers/staging/media/ipu3/ipu3-dmamap.c 		struct page *page = NULL;
page               58 drivers/staging/media/ipu3/ipu3-dmamap.c 			page = alloc_pages((order_mask - order_size) ?
page               60 drivers/staging/media/ipu3/ipu3-dmamap.c 			if (!page)
page               64 drivers/staging/media/ipu3/ipu3-dmamap.c 			if (!PageCompound(page)) {
page               65 drivers/staging/media/ipu3/ipu3-dmamap.c 				split_page(page, order);
page               69 drivers/staging/media/ipu3/ipu3-dmamap.c 			__free_pages(page, order);
page               71 drivers/staging/media/ipu3/ipu3-dmamap.c 		if (!page) {
page               77 drivers/staging/media/ipu3/ipu3-dmamap.c 			pages[i++] = page++;
page               99 drivers/staging/media/ipu3/ipu3-dmamap.c 	struct page **pages;
page              112 drivers/staging/most/configfs.c 					   const char *page, size_t count)
page              118 drivers/staging/most/configfs.c 	ret = kstrtobool(page, &tmp);
page              132 drivers/staging/most/configfs.c 					    const char *page, size_t count)
page              138 drivers/staging/most/configfs.c 	ret = kstrtobool(page, &tmp);
page              153 drivers/staging/most/configfs.c static ssize_t mdev_link_direction_show(struct config_item *item, char *page)
page              155 drivers/staging/most/configfs.c 	return snprintf(page, PAGE_SIZE, "%s\n", to_mdev_link(item)->direction);
page              159 drivers/staging/most/configfs.c 					 const char *page, size_t count)
page              163 drivers/staging/most/configfs.c 	if (!sysfs_streq(page, "dir_rx") && !sysfs_streq(page, "rx") &&
page              164 drivers/staging/most/configfs.c 	    !sysfs_streq(page, "dir_tx") && !sysfs_streq(page, "tx"))
page              166 drivers/staging/most/configfs.c 	strcpy(mdev_link->direction, page);
page              170 drivers/staging/most/configfs.c static ssize_t mdev_link_datatype_show(struct config_item *item, char *page)
page              172 drivers/staging/most/configfs.c 	return snprintf(page, PAGE_SIZE, "%s\n", to_mdev_link(item)->datatype);
page              176 drivers/staging/most/configfs.c 					const char *page, size_t count)
page              180 drivers/staging/most/configfs.c 	if (!sysfs_streq(page, "control") && !sysfs_streq(page, "async") &&
page              181 drivers/staging/most/configfs.c 	    !sysfs_streq(page, "sync") && !sysfs_streq(page, "isoc") &&
page              182 drivers/staging/most/configfs.c 	    !sysfs_streq(page, "isoc_avp"))
page              184 drivers/staging/most/configfs.c 	strcpy(mdev_link->datatype, page);
page              188 drivers/staging/most/configfs.c static ssize_t mdev_link_device_show(struct config_item *item, char *page)
page              190 drivers/staging/most/configfs.c 	return snprintf(page, PAGE_SIZE, "%s\n", to_mdev_link(item)->device);
page              194 drivers/staging/most/configfs.c 				      const char *page, size_t count)
page              198 drivers/staging/most/configfs.c 	strcpy(mdev_link->device, page);
page              202 drivers/staging/most/configfs.c static ssize_t mdev_link_channel_show(struct config_item *item, char *page)
page              204 drivers/staging/most/configfs.c 	return snprintf(page, PAGE_SIZE, "%s\n", to_mdev_link(item)->channel);
page              208 drivers/staging/most/configfs.c 				       const char *page, size_t count)
page              212 drivers/staging/most/configfs.c 	strcpy(mdev_link->channel, page);
page              216 drivers/staging/most/configfs.c static ssize_t mdev_link_comp_show(struct config_item *item, char *page)
page              218 drivers/staging/most/configfs.c 	return snprintf(page, PAGE_SIZE, "%s\n", to_mdev_link(item)->comp);
page              222 drivers/staging/most/configfs.c 				    const char *page, size_t count)
page              226 drivers/staging/most/configfs.c 	strcpy(mdev_link->comp, page);
page              230 drivers/staging/most/configfs.c static ssize_t mdev_link_comp_params_show(struct config_item *item, char *page)
page              232 drivers/staging/most/configfs.c 	return snprintf(page, PAGE_SIZE, "%s\n",
page              237 drivers/staging/most/configfs.c 					   const char *page, size_t count)
page              241 drivers/staging/most/configfs.c 	strcpy(mdev_link->comp_params, page);
page              245 drivers/staging/most/configfs.c static ssize_t mdev_link_num_buffers_show(struct config_item *item, char *page)
page              247 drivers/staging/most/configfs.c 	return snprintf(page, PAGE_SIZE, "%d\n",
page              252 drivers/staging/most/configfs.c 					   const char *page, size_t count)
page              257 drivers/staging/most/configfs.c 	ret = kstrtou16(page, 0, &mdev_link->num_buffers);
page              263 drivers/staging/most/configfs.c static ssize_t mdev_link_buffer_size_show(struct config_item *item, char *page)
page              265 drivers/staging/most/configfs.c 	return snprintf(page, PAGE_SIZE, "%d\n",
page              270 drivers/staging/most/configfs.c 					   const char *page, size_t count)
page              275 drivers/staging/most/configfs.c 	ret = kstrtou16(page, 0, &mdev_link->buffer_size);
page              282 drivers/staging/most/configfs.c 					     char *page)
page              284 drivers/staging/most/configfs.c 	return snprintf(page, PAGE_SIZE, "%d\n",
page              289 drivers/staging/most/configfs.c 					      const char *page, size_t count)
page              294 drivers/staging/most/configfs.c 	ret = kstrtou16(page, 0, &mdev_link->subbuffer_size);
page              301 drivers/staging/most/configfs.c 					       char *page)
page              303 drivers/staging/most/configfs.c 	return snprintf(page, PAGE_SIZE, "%d\n",
page              308 drivers/staging/most/configfs.c 						const char *page, size_t count)
page              313 drivers/staging/most/configfs.c 	ret = kstrtou16(page, 0, &mdev_link->packets_per_xact);
page              319 drivers/staging/most/configfs.c static ssize_t mdev_link_dbr_size_show(struct config_item *item, char *page)
page              321 drivers/staging/most/configfs.c 	return snprintf(page, PAGE_SIZE, "%d\n", to_mdev_link(item)->dbr_size);
page              325 drivers/staging/most/configfs.c 					const char *page, size_t count)
page              330 drivers/staging/most/configfs.c 	ret = kstrtou16(page, 0, &mdev_link->dbr_size);
page              497 drivers/staging/most/configfs.c 					      const char *page, size_t count)
page              503 drivers/staging/most/configfs.c 	ret = kstrtobool(page, &tmp);
page              472 drivers/staging/most/sound/sound.c 	.page       = snd_pcm_lib_get_vmalloc_page,
page             1362 drivers/staging/qlge/qlge.h 	struct page *page;	/* master page */
page             1093 drivers/staging/qlge/qlge_main.c 	if (!rx_ring->pg_chunk.page) {
page             1095 drivers/staging/qlge/qlge_main.c 		rx_ring->pg_chunk.page = alloc_pages(__GFP_COMP | GFP_ATOMIC,
page             1097 drivers/staging/qlge/qlge_main.c 		if (unlikely(!rx_ring->pg_chunk.page)) {
page             1103 drivers/staging/qlge/qlge_main.c 		map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
page             1107 drivers/staging/qlge/qlge_main.c 			__free_pages(rx_ring->pg_chunk.page,
page             1109 drivers/staging/qlge/qlge_main.c 			rx_ring->pg_chunk.page = NULL;
page             1115 drivers/staging/qlge/qlge_main.c 		rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
page             1128 drivers/staging/qlge/qlge_main.c 		rx_ring->pg_chunk.page = NULL;
page             1132 drivers/staging/qlge/qlge_main.c 		get_page(rx_ring->pg_chunk.page);
page             1473 drivers/staging/qlge/qlge_main.c 				  void *page, size_t *len)
page             1480 drivers/staging/qlge/qlge_main.c 		tags = (u16 *)page;
page             1504 drivers/staging/qlge/qlge_main.c 		put_page(lbq_desc->p.pg_chunk.page);
page             1514 drivers/staging/qlge/qlge_main.c 		put_page(lbq_desc->p.pg_chunk.page);
page             1519 drivers/staging/qlge/qlge_main.c 			     lbq_desc->p.pg_chunk.page,
page             1554 drivers/staging/qlge/qlge_main.c 		put_page(lbq_desc->p.pg_chunk.page);
page             1583 drivers/staging/qlge/qlge_main.c 	skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
page             1627 drivers/staging/qlge/qlge_main.c 	put_page(lbq_desc->p.pg_chunk.page);
page             1857 drivers/staging/qlge/qlge_main.c 			skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
page             1886 drivers/staging/qlge/qlge_main.c 						lbq_desc->p.pg_chunk.page,
page             1941 drivers/staging/qlge/qlge_main.c 						lbq_desc->p.pg_chunk.page,
page             2840 drivers/staging/qlge/qlge_main.c 		put_page(lbq_desc->p.pg_chunk.page);
page             2841 drivers/staging/qlge/qlge_main.c 		lbq_desc->p.pg_chunk.page = NULL;
page             2847 drivers/staging/qlge/qlge_main.c 	if (rx_ring->pg_chunk.page) {
page             2850 drivers/staging/qlge/qlge_main.c 		put_page(rx_ring->pg_chunk.page);
page             2851 drivers/staging/qlge/qlge_main.c 		rx_ring->pg_chunk.page = NULL;
page               12 drivers/staging/rtl8188eu/core/rtw_debug.c int proc_get_drv_version(char *page, char **start,
page               18 drivers/staging/rtl8188eu/core/rtw_debug.c 	len += snprintf(page + len, count - len, "%s\n", DRIVERVERSION);
page               24 drivers/staging/rtl8188eu/core/rtw_debug.c int proc_get_write_reg(char *page, char **start,
page               73 drivers/staging/rtl8188eu/core/rtw_debug.c int proc_get_read_reg(char *page, char **start,
page               89 drivers/staging/rtl8188eu/core/rtw_debug.c 		len += snprintf(page + len, count - len, "usb_read8(0x%x)=0x%x\n", proc_get_read_addr, usb_read8(padapter, proc_get_read_addr));
page               92 drivers/staging/rtl8188eu/core/rtw_debug.c 		len += snprintf(page + len, count - len, "usb_read16(0x%x)=0x%x\n", proc_get_read_addr, usb_read16(padapter, proc_get_read_addr));
page               95 drivers/staging/rtl8188eu/core/rtw_debug.c 		len += snprintf(page + len, count - len, "usb_read32(0x%x)=0x%x\n", proc_get_read_addr, usb_read32(padapter, proc_get_read_addr));
page               98 drivers/staging/rtl8188eu/core/rtw_debug.c 		len += snprintf(page + len, count - len, "error read length=%d\n", proc_get_read_len);
page              133 drivers/staging/rtl8188eu/core/rtw_debug.c int proc_get_adapter_state(char *page, char **start,
page              141 drivers/staging/rtl8188eu/core/rtw_debug.c 	len += snprintf(page + len, count - len, "bSurpriseRemoved=%d, bDriverStopped=%d\n",
page              148 drivers/staging/rtl8188eu/core/rtw_debug.c int proc_get_best_channel(char *page, char **start,
page              173 drivers/staging/rtl8188eu/core/rtw_debug.c 		len += snprintf(page + len, count - len, "The rx cnt of channel %3d = %d\n",
page              177 drivers/staging/rtl8188eu/core/rtw_debug.c 	len += snprintf(page + len, count - len, "best_channel_24G = %d\n", best_channel_24G);
page               64 drivers/staging/rtl8188eu/hal/fw.c 				  u32 page, const u8 *buffer, u32 size)
page               67 drivers/staging/rtl8188eu/hal/fw.c 	u8 u8page = (u8)(page & 0x07);
page               79 drivers/staging/rtl8188eu/hal/fw.c 	u32 page, offset;
page               84 drivers/staging/rtl8188eu/hal/fw.c 	for (page = 0; page < page_no; page++) {
page               85 drivers/staging/rtl8188eu/hal/fw.c 		offset = page * FW_8192C_PAGE_SIZE;
page               86 drivers/staging/rtl8188eu/hal/fw.c 		_rtl88e_fw_page_write(adapt, page, (buf_ptr + offset),
page               92 drivers/staging/rtl8188eu/hal/fw.c 		page = page_no;
page               93 drivers/staging/rtl8188eu/hal/fw.c 		_rtl88e_fw_page_write(adapt, page, (buf_ptr + offset), remain);
page              107 drivers/staging/rtl8188eu/include/rtw_debug.h int proc_get_drv_version(char *page, char **start,
page              111 drivers/staging/rtl8188eu/include/rtw_debug.h int proc_get_write_reg(char *page, char **start,
page              117 drivers/staging/rtl8188eu/include/rtw_debug.h int proc_get_read_reg(char *page, char **start,
page              124 drivers/staging/rtl8188eu/include/rtw_debug.h int proc_get_adapter_state(char *page, char **start,
page              128 drivers/staging/rtl8188eu/include/rtw_debug.h int proc_get_best_channel(char *page, char **start,
page              469 drivers/staging/rtl8192u/r8192U.h 		unsigned char page;
page             2892 drivers/staging/rtl8192u/r8192U_dm.c 	u8	page;
page             2895 drivers/staging/rtl8192u/r8192U_dm.c 	for (page = 0; page < 5; page++)
page             2897 drivers/staging/rtl8192u/r8192U_dm.c 			read_nic_byte(dev, offset + page * 256, &dm_shadow[page][offset]);
page             2901 drivers/staging/rtl8192u/r8192U_dm.c 	for (page = 8; page < 11; page++)
page             2903 drivers/staging/rtl8192u/r8192U_dm.c 			read_nic_byte(dev, offset + page * 256, &dm_shadow[page][offset]);
page             2905 drivers/staging/rtl8192u/r8192U_dm.c 	for (page = 12; page < 15; page++)
page             2907 drivers/staging/rtl8192u/r8192U_dm.c 			read_nic_byte(dev, offset + page * 256, &dm_shadow[page][offset]);
page              137 drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c 	u32 page,
page              143 drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c 	u8 u8Page = (u8) (page & 0x07);
page              157 drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c 	u32 page, offset;
page              164 drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c 	for (page = 0; page < pageNums; page++) {
page              165 drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c 		offset = page * MAX_DLFW_PAGE_SIZE;
page              166 drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c 		ret = _PageWrite(padapter, page, bufferPtr+offset, MAX_DLFW_PAGE_SIZE);
page              176 drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c 		page = pageNums;
page              177 drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c 		ret = _PageWrite(padapter, page, bufferPtr+offset, remainSize);
page               75 drivers/staging/rts5208/rtsx_transport.c 			struct page *page = sg_page(sg) +
page               95 drivers/staging/rts5208/rtsx_transport.c 				unsigned char *ptr = kmap(page);
page              101 drivers/staging/rts5208/rtsx_transport.c 				kunmap(page);
page              105 drivers/staging/rts5208/rtsx_transport.c 				++page;
page               62 drivers/staging/speakup/speakup.h int spk_set_string_var(const char *page, struct st_var_header *var, int len);
page              247 drivers/staging/speakup/varhandlers.c int spk_set_string_var(const char *page, struct st_var_header *var, int len)
page              264 drivers/staging/speakup/varhandlers.c 		strcpy((char *)var->p_val, page);
page              228 drivers/staging/uwb/i1480/dfu/i1480-dfu.h 		u8 page, offset;
page              241 drivers/staging/uwb/i1480/dfu/i1480-dfu.h 		u8 page, offset, value;
page              102 drivers/staging/uwb/i1480/dfu/phy.c 		cmd->data[cnt].page = (srcaddr + cnt) >> 8;
page              118 drivers/staging/uwb/i1480/dfu/phy.c 		if (reply->data[cnt].page != (srcaddr + cnt) >> 8)
page              121 drivers/staging/uwb/i1480/dfu/phy.c 				(srcaddr + cnt) >> 8, reply->data[cnt].page);
page               43 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c 	struct page **pages;
page              360 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c 	struct page **pages;
page              399 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c 	pages		= (struct page **)(addrs + num_pages);
page              426 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c 			struct page *pg = vmalloc_to_page(buf + (actual_pages *
page              548 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c 	struct page **pages = pagelistinfo->pages;
page              435 drivers/staging/wlan-ng/hfa384x.h 	u16 page;
page              831 drivers/staging/wlan-ng/hfa384x.h 	__le16 page;
page              839 drivers/staging/wlan-ng/hfa384x.h 	__le16 page;
page              251 drivers/staging/wlan-ng/hfa384x_usb.c 	       u16 page,
page              258 drivers/staging/wlan-ng/hfa384x_usb.c 	       u16 page,
page             1457 drivers/staging/wlan-ng/hfa384x_usb.c 	       u16 page,
page             1475 drivers/staging/wlan-ng/hfa384x_usb.c 			sizeof(ctlx->outbuf.rmemreq.page) + len);
page             1477 drivers/staging/wlan-ng/hfa384x_usb.c 	ctlx->outbuf.rmemreq.page = cpu_to_le16(page);
page             1484 drivers/staging/wlan-ng/hfa384x_usb.c 		 ctlx->outbuf.rmemreq.offset, ctlx->outbuf.rmemreq.page);
page             1543 drivers/staging/wlan-ng/hfa384x_usb.c 	       u16 page,
page             1551 drivers/staging/wlan-ng/hfa384x_usb.c 	pr_debug("page=0x%04x offset=0x%04x len=%d\n", page, offset, len);
page             1563 drivers/staging/wlan-ng/hfa384x_usb.c 			sizeof(ctlx->outbuf.wmemreq.page) + len);
page             1565 drivers/staging/wlan-ng/hfa384x_usb.c 	ctlx->outbuf.wmemreq.page = cpu_to_le16(page);
page             1571 drivers/staging/wlan-ng/hfa384x_usb.c 	    sizeof(ctlx->outbuf.wmemreq.page) + len;
page             1720 drivers/staging/wlan-ng/hfa384x_usb.c 	le16_to_cpus(&hw->bufinfo.page);
page             1833 drivers/staging/wlan-ng/hfa384x_usb.c 	    HFA384x_ADDR_AUX_MKFLAT(hw->bufinfo.page, hw->bufinfo.offset);
page             1835 drivers/staging/wlan-ng/hfa384x_usb.c 		 hw->bufinfo.page, hw->bufinfo.offset, dlbufaddr);
page              219 drivers/target/iscsi/cxgbit/cxgbit_main.c 	__skb_fill_page_desc(skb, skb_frag_idx, gl->frags[0].page,
page              224 drivers/target/iscsi/cxgbit/cxgbit_main.c 				     gl->frags[i].page,
page              231 drivers/target/iscsi/cxgbit/cxgbit_main.c 	get_page(gl->frags[gl->nfrags - 1].page);
page              363 drivers/target/iscsi/cxgbit/cxgbit_target.c 	struct page *page;
page              378 drivers/target/iscsi/cxgbit/cxgbit_target.c 		page = sg_page(sg);
page              380 drivers/target/iscsi/cxgbit/cxgbit_target.c 		get_page(page);
page              381 drivers/target/iscsi/cxgbit/cxgbit_target.c 		skb_fill_page_desc(skb, i, page, sg->offset + page_off,
page              397 drivers/target/iscsi/cxgbit/cxgbit_target.c 		page = alloc_page(GFP_KERNEL | __GFP_ZERO);
page              398 drivers/target/iscsi/cxgbit/cxgbit_target.c 		if (!page)
page              400 drivers/target/iscsi/cxgbit/cxgbit_target.c 		skb_fill_page_desc(skb, i, page, 0, padding);
page               39 drivers/target/iscsi/iscsi_target_configfs.c static ssize_t lio_target_np_driver_show(struct config_item *item, char *page,
page               48 drivers/target/iscsi/iscsi_target_configfs.c 		rb = sprintf(page, "1\n");
page               50 drivers/target/iscsi/iscsi_target_configfs.c 		rb = sprintf(page, "0\n");
page               56 drivers/target/iscsi/iscsi_target_configfs.c 		const char *page, size_t count, enum iscsit_transport_type type,
page               66 drivers/target/iscsi/iscsi_target_configfs.c 	rc = kstrtou32(page, 0, &op);
page              116 drivers/target/iscsi/iscsi_target_configfs.c static ssize_t lio_target_np_iser_show(struct config_item *item, char *page)
page              118 drivers/target/iscsi/iscsi_target_configfs.c 	return lio_target_np_driver_show(item, page, ISCSI_INFINIBAND);
page              122 drivers/target/iscsi/iscsi_target_configfs.c 					const char *page, size_t count)
page              124 drivers/target/iscsi/iscsi_target_configfs.c 	return lio_target_np_driver_store(item, page, count,
page              129 drivers/target/iscsi/iscsi_target_configfs.c static ssize_t lio_target_np_cxgbit_show(struct config_item *item, char *page)
page              131 drivers/target/iscsi/iscsi_target_configfs.c 	return lio_target_np_driver_show(item, page, ISCSI_CXGBIT);
page              135 drivers/target/iscsi/iscsi_target_configfs.c 					  const char *page, size_t count)
page              137 drivers/target/iscsi/iscsi_target_configfs.c 	return lio_target_np_driver_store(item, page, count,
page              282 drivers/target/iscsi/iscsi_target_configfs.c 		char *page)						\
page              288 drivers/target/iscsi/iscsi_target_configfs.c 	return sprintf(page, "%u\n", nacl->node_attrib.name);		\
page              292 drivers/target/iscsi/iscsi_target_configfs.c 		const char *page, size_t count)				\
page              300 drivers/target/iscsi/iscsi_target_configfs.c 	ret = kstrtou32(page, 0, &val);					\
page              340 drivers/target/iscsi/iscsi_target_configfs.c 	char *page)							\
page              346 drivers/target/iscsi/iscsi_target_configfs.c 	return snprintf(page, PAGE_SIZE, "%s\n", auth->name);		\
page              351 drivers/target/iscsi/iscsi_target_configfs.c 	const char *page,						\
page              360 drivers/target/iscsi/iscsi_target_configfs.c 	snprintf(auth->name, sizeof(auth->name), "%s", page);		\
page              378 drivers/target/iscsi/iscsi_target_configfs.c 		char *page)						\
page              382 drivers/target/iscsi/iscsi_target_configfs.c 			struct iscsi_node_acl, se_node_acl), page);	\
page              385 drivers/target/iscsi/iscsi_target_configfs.c 		const char *page, size_t count)				\
page              389 drivers/target/iscsi/iscsi_target_configfs.c 			struct iscsi_node_acl, se_node_acl), page, count); \
page              405 drivers/target/iscsi/iscsi_target_configfs.c 	char *page)							\
page              412 drivers/target/iscsi/iscsi_target_configfs.c 	return snprintf(page, PAGE_SIZE, "%d\n", auth->name);		\
page              418 drivers/target/iscsi/iscsi_target_configfs.c 		char *page)						\
page              422 drivers/target/iscsi/iscsi_target_configfs.c 			struct iscsi_node_acl, se_node_acl), page);	\
page              444 drivers/target/iscsi/iscsi_target_configfs.c 		char *page)						\
page              454 drivers/target/iscsi/iscsi_target_configfs.c 		rb = snprintf(page, PAGE_SIZE,				\
page              458 drivers/target/iscsi/iscsi_target_configfs.c 		rb = snprintf(page, PAGE_SIZE, "%u\n",			\
page              499 drivers/target/iscsi/iscsi_target_configfs.c static ssize_t lio_target_nacl_info_show(struct config_item *item, char *page)
page              511 drivers/target/iscsi/iscsi_target_configfs.c 		rb += sprintf(page+rb, "No active iSCSI Session for Initiator"
page              516 drivers/target/iscsi/iscsi_target_configfs.c 		rb += sprintf(page+rb, "InitiatorName: %s\n",
page              518 drivers/target/iscsi/iscsi_target_configfs.c 		rb += sprintf(page+rb, "InitiatorAlias: %s\n",
page              521 drivers/target/iscsi/iscsi_target_configfs.c 		rb += sprintf(page+rb,
page              524 drivers/target/iscsi/iscsi_target_configfs.c 		rb += sprintf(page+rb, "SessionType: %s\n",
page              527 drivers/target/iscsi/iscsi_target_configfs.c 		rb += sprintf(page+rb, "Session State: ");
page              530 drivers/target/iscsi/iscsi_target_configfs.c 			rb += sprintf(page+rb, "TARG_SESS_FREE\n");
page              533 drivers/target/iscsi/iscsi_target_configfs.c 			rb += sprintf(page+rb, "TARG_SESS_STATE_ACTIVE\n");
page              536 drivers/target/iscsi/iscsi_target_configfs.c 			rb += sprintf(page+rb, "TARG_SESS_STATE_LOGGED_IN\n");
page              539 drivers/target/iscsi/iscsi_target_configfs.c 			rb += sprintf(page+rb, "TARG_SESS_STATE_FAILED\n");
page              542 drivers/target/iscsi/iscsi_target_configfs.c 			rb += sprintf(page+rb, "TARG_SESS_STATE_IN_CONTINUE\n");
page              545 drivers/target/iscsi/iscsi_target_configfs.c 			rb += sprintf(page+rb, "ERROR: Unknown Session"
page              550 drivers/target/iscsi/iscsi_target_configfs.c 		rb += sprintf(page+rb, "---------------------[iSCSI Session"
page              552 drivers/target/iscsi/iscsi_target_configfs.c 		rb += sprintf(page+rb, "  CmdSN/WR  :  CmdSN/WC  :  ExpCmdSN"
page              555 drivers/target/iscsi/iscsi_target_configfs.c 		rb += sprintf(page+rb, " 0x%08x   0x%08x   0x%08x   0x%08x"
page              561 drivers/target/iscsi/iscsi_target_configfs.c 		rb += sprintf(page+rb, "----------------------[iSCSI"
page              566 drivers/target/iscsi/iscsi_target_configfs.c 			rb += sprintf(page+rb, "CID: %hu  Connection"
page              570 drivers/target/iscsi/iscsi_target_configfs.c 				rb += sprintf(page+rb,
page              574 drivers/target/iscsi/iscsi_target_configfs.c 				rb += sprintf(page+rb,
page              578 drivers/target/iscsi/iscsi_target_configfs.c 				rb += sprintf(page+rb,
page              582 drivers/target/iscsi/iscsi_target_configfs.c 				rb += sprintf(page+rb,
page              586 drivers/target/iscsi/iscsi_target_configfs.c 				rb += sprintf(page+rb,
page              590 drivers/target/iscsi/iscsi_target_configfs.c 				rb += sprintf(page+rb,
page              594 drivers/target/iscsi/iscsi_target_configfs.c 				rb += sprintf(page+rb,
page              598 drivers/target/iscsi/iscsi_target_configfs.c 				rb += sprintf(page+rb,
page              603 drivers/target/iscsi/iscsi_target_configfs.c 			rb += sprintf(page+rb, "   Address %pISc %s", &conn->login_sockaddr,
page              606 drivers/target/iscsi/iscsi_target_configfs.c 			rb += sprintf(page+rb, "  StatSN: 0x%08x\n",
page              617 drivers/target/iscsi/iscsi_target_configfs.c 		char *page)
page              619 drivers/target/iscsi/iscsi_target_configfs.c 	return sprintf(page, "%u\n", acl_to_nacl(item)->queue_depth);
page              623 drivers/target/iscsi/iscsi_target_configfs.c 		const char *page, size_t count)
page              633 drivers/target/iscsi/iscsi_target_configfs.c 	ret = kstrtou32(page, 0, &cmdsn_depth);
page              672 drivers/target/iscsi/iscsi_target_configfs.c static ssize_t lio_target_nacl_tag_show(struct config_item *item, char *page)
page              674 drivers/target/iscsi/iscsi_target_configfs.c 	return snprintf(page, PAGE_SIZE, "%s", acl_to_nacl(item)->acl_tag);
page              678 drivers/target/iscsi/iscsi_target_configfs.c 		const char *page, size_t count)
page              683 drivers/target/iscsi/iscsi_target_configfs.c 	ret = core_tpg_set_initiator_node_tag(se_nacl->se_tpg, se_nacl, page);
page              721 drivers/target/iscsi/iscsi_target_configfs.c 		char *page)						\
page              731 drivers/target/iscsi/iscsi_target_configfs.c 	rb = sprintf(page, "%u\n", tpg->tpg_attrib.name);		\
page              737 drivers/target/iscsi/iscsi_target_configfs.c 		const char *page, size_t count)				\
page              748 drivers/target/iscsi/iscsi_target_configfs.c 	ret = kstrtou32(page, 0, &val);					\
page              802 drivers/target/iscsi/iscsi_target_configfs.c 		char *page)							\
page              811 drivers/target/iscsi/iscsi_target_configfs.c 	return snprintf(page, PAGE_SIZE, "%s\n", auth->name);			\
page              815 drivers/target/iscsi/iscsi_target_configfs.c 		const char *page, size_t count)					\
page              824 drivers/target/iscsi/iscsi_target_configfs.c 	snprintf(auth->name, sizeof(auth->name), "%s", page);			\
page              842 drivers/target/iscsi/iscsi_target_configfs.c 		char *page)							\
page              844 drivers/target/iscsi/iscsi_target_configfs.c 	return __iscsi_tpg_auth_##name##_show(auth_to_tpg(item), page);		\
page              848 drivers/target/iscsi/iscsi_target_configfs.c 		const char *page, size_t count)					\
page              850 drivers/target/iscsi/iscsi_target_configfs.c 	return __iscsi_tpg_auth_##name##_store(auth_to_tpg(item), page, count);	\
page              863 drivers/target/iscsi/iscsi_target_configfs.c 		char *page)								\
page              872 drivers/target/iscsi/iscsi_target_configfs.c 	return snprintf(page, PAGE_SIZE, "%d\n", auth->name);			\
page              878 drivers/target/iscsi/iscsi_target_configfs.c 		char *page) \
page              880 drivers/target/iscsi/iscsi_target_configfs.c 	return __iscsi_tpg_auth_##name##_show(auth_to_tpg(item), page);		\
page              901 drivers/target/iscsi/iscsi_target_configfs.c 		char *page)						\
page              918 drivers/target/iscsi/iscsi_target_configfs.c 	rb = snprintf(page, PAGE_SIZE, "%s\n", param->value);		\
page              924 drivers/target/iscsi/iscsi_target_configfs.c 		const char *page, size_t count)				\
page              935 drivers/target/iscsi/iscsi_target_configfs.c 	len = snprintf(buf, PAGE_SIZE, "%s=%s", __stringify(name), page);	\
page             1009 drivers/target/iscsi/iscsi_target_configfs.c static ssize_t lio_target_tpg_enable_show(struct config_item *item, char *page)
page             1017 drivers/target/iscsi/iscsi_target_configfs.c 	len = sprintf(page, "%d\n",
page             1025 drivers/target/iscsi/iscsi_target_configfs.c 		const char *page, size_t count)
page             1033 drivers/target/iscsi/iscsi_target_configfs.c 	ret = kstrtou32(page, 0, &op);
page             1067 drivers/target/iscsi/iscsi_target_configfs.c 		char *page)
page             1069 drivers/target/iscsi/iscsi_target_configfs.c 	return target_show_dynamic_sessions(to_tpg(item), page);
page             1152 drivers/target/iscsi/iscsi_target_configfs.c 		char *page)
page             1154 drivers/target/iscsi/iscsi_target_configfs.c 	return sprintf(page, "Datera Inc. iSCSI Target "ISCSIT_VERSION"\n");
page             1227 drivers/target/iscsi/iscsi_target_configfs.c static ssize_t iscsi_disc_##name##_show(struct config_item *item, char *page) \
page             1230 drivers/target/iscsi/iscsi_target_configfs.c 		page);							\
page             1233 drivers/target/iscsi/iscsi_target_configfs.c 		const char *page, size_t count)				\
page             1236 drivers/target/iscsi/iscsi_target_configfs.c 		page, count);						\
page             1248 drivers/target/iscsi/iscsi_target_configfs.c static ssize_t iscsi_disc_##name##_show(struct config_item *item, char *page) \
page             1251 drivers/target/iscsi/iscsi_target_configfs.c 			page);						\
page             1259 drivers/target/iscsi/iscsi_target_configfs.c 		char *page)
page             1263 drivers/target/iscsi/iscsi_target_configfs.c 	return sprintf(page, "%d\n", discovery_auth->enforce_discovery_auth);
page             1267 drivers/target/iscsi/iscsi_target_configfs.c 		const char *page, size_t count)
page             1274 drivers/target/iscsi/iscsi_target_configfs.c 	err = kstrtou32(page, 0, &op);
page               52 drivers/target/iscsi/iscsi_target_stat.c 		char *page)
page               54 drivers/target/iscsi/iscsi_target_stat.c 	return snprintf(page, PAGE_SIZE, "%u\n",
page               59 drivers/target/iscsi/iscsi_target_stat.c 		char *page)
page               61 drivers/target/iscsi/iscsi_target_stat.c 	return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DRAFT20_VERSION);
page               65 drivers/target/iscsi/iscsi_target_stat.c 		char *page)
page               67 drivers/target/iscsi/iscsi_target_stat.c 	return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DRAFT20_VERSION);
page               71 drivers/target/iscsi/iscsi_target_stat.c 		char *page)
page               73 drivers/target/iscsi/iscsi_target_stat.c 	return snprintf(page, PAGE_SIZE, "%u\n",
page               78 drivers/target/iscsi/iscsi_target_stat.c 		char *page)
page               80 drivers/target/iscsi/iscsi_target_stat.c 	return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_INST_NUM_NODES);
page               84 drivers/target/iscsi/iscsi_target_stat.c 		char *page)
page               86 drivers/target/iscsi/iscsi_target_stat.c 	return snprintf(page, PAGE_SIZE, "%u\n",
page               91 drivers/target/iscsi/iscsi_target_stat.c 		char *page)
page              103 drivers/target/iscsi/iscsi_target_stat.c 	return snprintf(page, PAGE_SIZE, "%u\n", sess_err_count);
page              107 drivers/target/iscsi/iscsi_target_stat.c 		char *page)
page              112 drivers/target/iscsi/iscsi_target_stat.c 	return snprintf(page, PAGE_SIZE, "%u\n",
page              117 drivers/target/iscsi/iscsi_target_stat.c 		char *page)
page              122 drivers/target/iscsi/iscsi_target_stat.c 	return snprintf(page, PAGE_SIZE, "%s\n",
page              128 drivers/target/iscsi/iscsi_target_stat.c 		char *page)
page              130 drivers/target/iscsi/iscsi_target_stat.c 	return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DISCONTINUITY_TIME);
page              134 drivers/target/iscsi/iscsi_target_stat.c 		char *page)
page              136 drivers/target/iscsi/iscsi_target_stat.c 	return snprintf(page, PAGE_SIZE, "%s\n", ISCSI_INST_DESCR);
page              140 drivers/target/iscsi/iscsi_target_stat.c 		char *page)
page              142 drivers/target/iscsi/iscsi_target_stat.c 	return snprintf(page, PAGE_SIZE, "Datera, Inc. iSCSI-Target\n");
page              146 drivers/target/iscsi/iscsi_target_stat.c 		char *page)
page              148 drivers/target/iscsi/iscsi_target_stat.c 	return snprintf(page, PAGE_SIZE, "%s\n", ISCSIT_VERSION);
page              198 drivers/target/iscsi/iscsi_target_stat.c 		char *page)
page              200 drivers/target/iscsi/iscsi_target_stat.c 	return snprintf(page, PAGE_SIZE, "%u\n",
page              205 drivers/target/iscsi/iscsi_target_stat.c 		char *page)
page              210 drivers/target/iscsi/iscsi_target_stat.c 	return snprintf(page, PAGE_SIZE, "%u\n", sess_err->digest_errors);
page              214 drivers/target/iscsi/iscsi_target_stat.c 		char *page)
page              219 drivers/target/iscsi/iscsi_target_stat.c 	return snprintf(page, PAGE_SIZE, "%u\n", sess_err->cxn_timeout_errors);
page              223 drivers/target/iscsi/iscsi_target_stat.c 		char *page)
page              228 drivers/target/iscsi/iscsi_target_stat.c 	return snprintf(page, PAGE_SIZE, "%u\n", sess_err->pdu_format_errors);
page              260 drivers/target/iscsi/iscsi_target_stat.c 		char *page)
page              262 drivers/target/iscsi/iscsi_target_stat.c 	return snprintf(page, PAGE_SIZE, "%u\n",
page              267 drivers/target/iscsi/iscsi_target_stat.c 		char *page)
page              269 drivers/target/iscsi/iscsi_target_stat.c 	return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX);
page              273 drivers/target/iscsi/iscsi_target_stat.c 		char *page)
page              285 drivers/target/iscsi/iscsi_target_stat.c 	return snprintf(page, PAGE_SIZE, "%u\n", fail_count);
page              289 drivers/target/iscsi/iscsi_target_stat.c 		char *page)
page              301 drivers/target/iscsi/iscsi_target_stat.c 	return snprintf(page, PAGE_SIZE, "%u\n", last_fail_time);
page              305 drivers/target/iscsi/iscsi_target_stat.c 		char *page)
page              315 drivers/target/iscsi/iscsi_target_stat.c 	return snprintf(page, PAGE_SIZE, "%u\n", last_fail_type);
page              319 drivers/target/iscsi/iscsi_target_stat.c 		char *page)
page              330 drivers/target/iscsi/iscsi_target_stat.c 	return snprintf(page, PAGE_SIZE, "%s\n", buf);
page              334 drivers/target/iscsi/iscsi_target_stat.c 		char *page)
page              342 drivers/target/iscsi/iscsi_target_stat.c 		ret = snprintf(page, PAGE_SIZE, "ipv6\n");
page              344 drivers/target/iscsi/iscsi_target_stat.c 		ret = snprintf(page, PAGE_SIZE, "ipv4\n");
page              351 drivers/target/iscsi/iscsi_target_stat.c 		char *page)
page              358 drivers/target/iscsi/iscsi_target_stat.c 	ret = snprintf(page, PAGE_SIZE, "%pISc\n", &lstat->last_intr_fail_sockaddr);
page              400 drivers/target/iscsi/iscsi_target_stat.c static ssize_t iscsi_stat_login_inst_show(struct config_item *item, char *page)
page              402 drivers/target/iscsi/iscsi_target_stat.c 	return snprintf(page, PAGE_SIZE, "%u\n",
page              407 drivers/target/iscsi/iscsi_target_stat.c 		char *page)
page              409 drivers/target/iscsi/iscsi_target_stat.c 	return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX);
page              413 drivers/target/iscsi/iscsi_target_stat.c 		char *page)
page              420 drivers/target/iscsi/iscsi_target_stat.c 	ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->accepts);
page              427 drivers/target/iscsi/iscsi_target_stat.c 		char *page)
page              434 drivers/target/iscsi/iscsi_target_stat.c 	ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->other_fails);
page              441 drivers/target/iscsi/iscsi_target_stat.c 		char *page)
page              448 drivers/target/iscsi/iscsi_target_stat.c 	ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->redirects);
page              455 drivers/target/iscsi/iscsi_target_stat.c 		char *page)
page              462 drivers/target/iscsi/iscsi_target_stat.c 	ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->authorize_fails);
page              469 drivers/target/iscsi/iscsi_target_stat.c 		struct config_item *item, char *page)
page              476 drivers/target/iscsi/iscsi_target_stat.c 	ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->authenticate_fails);
page              483 drivers/target/iscsi/iscsi_target_stat.c 		char *page)
page              490 drivers/target/iscsi/iscsi_target_stat.c 	ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->negotiate_fails);
page              532 drivers/target/iscsi/iscsi_target_stat.c static ssize_t iscsi_stat_logout_inst_show(struct config_item *item, char *page)
page              534 drivers/target/iscsi/iscsi_target_stat.c 	return snprintf(page, PAGE_SIZE, "%u\n",
page              538 drivers/target/iscsi/iscsi_target_stat.c static ssize_t iscsi_stat_logout_indx_show(struct config_item *item, char *page)
page              540 drivers/target/iscsi/iscsi_target_stat.c 	return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX);
page              544 drivers/target/iscsi/iscsi_target_stat.c 		char *page)
page              549 drivers/target/iscsi/iscsi_target_stat.c 	return snprintf(page, PAGE_SIZE, "%u\n", lstats->normal_logouts);
page              553 drivers/target/iscsi/iscsi_target_stat.c 		char *page)
page              558 drivers/target/iscsi/iscsi_target_stat.c 	return snprintf(page, PAGE_SIZE, "%u\n", lstats->abnormal_logouts);
page              589 drivers/target/iscsi/iscsi_target_stat.c static ssize_t iscsi_stat_sess_inst_show(struct config_item *item, char *page)
page              596 drivers/target/iscsi/iscsi_target_stat.c 	return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
page              599 drivers/target/iscsi/iscsi_target_stat.c static ssize_t iscsi_stat_sess_node_show(struct config_item *item, char *page)
page              612 drivers/target/iscsi/iscsi_target_stat.c 			ret = snprintf(page, PAGE_SIZE, "%u\n",
page              620 drivers/target/iscsi/iscsi_target_stat.c static ssize_t iscsi_stat_sess_indx_show(struct config_item *item, char *page)
page              633 drivers/target/iscsi/iscsi_target_stat.c 			ret = snprintf(page, PAGE_SIZE, "%u\n",
page              642 drivers/target/iscsi/iscsi_target_stat.c 		char *page)
page              655 drivers/target/iscsi/iscsi_target_stat.c 			ret = snprintf(page, PAGE_SIZE, "%lu\n",
page              664 drivers/target/iscsi/iscsi_target_stat.c 		char *page)
page              677 drivers/target/iscsi/iscsi_target_stat.c 			ret = snprintf(page, PAGE_SIZE, "%lu\n",
page              686 drivers/target/iscsi/iscsi_target_stat.c 		char *page)
page              699 drivers/target/iscsi/iscsi_target_stat.c 			ret = snprintf(page, PAGE_SIZE, "%lu\n",
page              708 drivers/target/iscsi/iscsi_target_stat.c 		char *page)
page              721 drivers/target/iscsi/iscsi_target_stat.c 			ret = snprintf(page, PAGE_SIZE, "%lu\n",
page              730 drivers/target/iscsi/iscsi_target_stat.c 		char *page)
page              743 drivers/target/iscsi/iscsi_target_stat.c 			ret = snprintf(page, PAGE_SIZE, "%lu\n",
page              752 drivers/target/iscsi/iscsi_target_stat.c 		struct config_item *item, char *page)
page              765 drivers/target/iscsi/iscsi_target_stat.c 			ret = snprintf(page, PAGE_SIZE, "%lu\n",
page              676 drivers/target/loopback/tcm_loop.c 		struct config_item *item, char *page)
page              682 drivers/target/loopback/tcm_loop.c 	return sprintf(page, "%d\n", tl_tpg->tl_fabric_prot_type);
page              686 drivers/target/loopback/tcm_loop.c 		struct config_item *item, const char *page, size_t count)
page              692 drivers/target/loopback/tcm_loop.c 	int ret = kstrtoul(page, 0, &val);
page              791 drivers/target/loopback/tcm_loop.c static ssize_t tcm_loop_tpg_nexus_show(struct config_item *item, char *page)
page              803 drivers/target/loopback/tcm_loop.c 	ret = snprintf(page, PAGE_SIZE, "%s\n",
page              810 drivers/target/loopback/tcm_loop.c 		const char *page, size_t count)
page              821 drivers/target/loopback/tcm_loop.c 	if (!strncmp(page, "NULL", 4)) {
page              830 drivers/target/loopback/tcm_loop.c 	if (strlen(page) >= TL_WWN_ADDR_LEN) {
page              832 drivers/target/loopback/tcm_loop.c 		       page, TL_WWN_ADDR_LEN);
page              835 drivers/target/loopback/tcm_loop.c 	snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page);
page              885 drivers/target/loopback/tcm_loop.c 		char *page)
page              905 drivers/target/loopback/tcm_loop.c 		ret = snprintf(page, PAGE_SIZE, "%s\n", status);
page              911 drivers/target/loopback/tcm_loop.c 		const char *page, size_t count)
page              917 drivers/target/loopback/tcm_loop.c 	if (!strncmp(page, "online", 6)) {
page              921 drivers/target/loopback/tcm_loop.c 	if (!strncmp(page, "offline", 7)) {
page              934 drivers/target/loopback/tcm_loop.c 					 char *page)
page              941 drivers/target/loopback/tcm_loop.c 	return snprintf(page, PAGE_SIZE, "%d:0:%d\n",
page             1111 drivers/target/loopback/tcm_loop.c static ssize_t tcm_loop_wwn_version_show(struct config_item *item, char *page)
page             1113 drivers/target/loopback/tcm_loop.c 	return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION);
page             2080 drivers/target/sbp/sbp_target.c static ssize_t sbp_wwn_version_show(struct config_item *item, char *page)
page             2082 drivers/target/sbp/sbp_target.c 	return sprintf(page, "FireWire SBP fabric module %s\n", SBP_VERSION);
page             2092 drivers/target/sbp/sbp_target.c static ssize_t sbp_tpg_directory_id_show(struct config_item *item, char *page)
page             2099 drivers/target/sbp/sbp_target.c 		return sprintf(page, "implicit\n");
page             2101 drivers/target/sbp/sbp_target.c 		return sprintf(page, "%06x\n", tport->directory_id);
page             2105 drivers/target/sbp/sbp_target.c 		const char *page, size_t count)
page             2117 drivers/target/sbp/sbp_target.c 	if (strstr(page, "implicit") == page) {
page             2120 drivers/target/sbp/sbp_target.c 		if (kstrtoul(page, 16, &val) < 0)
page             2131 drivers/target/sbp/sbp_target.c static ssize_t sbp_tpg_enable_show(struct config_item *item, char *page)
page             2136 drivers/target/sbp/sbp_target.c 	return sprintf(page, "%d\n", tport->enable);
page             2140 drivers/target/sbp/sbp_target.c 		const char *page, size_t count)
page             2148 drivers/target/sbp/sbp_target.c 	if (kstrtoul(page, 0, &val) < 0)
page             2192 drivers/target/sbp/sbp_target.c 		char *page)
page             2197 drivers/target/sbp/sbp_target.c 	return sprintf(page, "%d\n", tport->mgt_orb_timeout);
page             2201 drivers/target/sbp/sbp_target.c 		const char *page, size_t count)
page             2209 drivers/target/sbp/sbp_target.c 	if (kstrtoul(page, 0, &val) < 0)
page             2227 drivers/target/sbp/sbp_target.c 		char *page)
page             2232 drivers/target/sbp/sbp_target.c 	return sprintf(page, "%d\n", tport->max_reconnect_timeout);
page             2236 drivers/target/sbp/sbp_target.c 		const char *page, size_t count)
page             2244 drivers/target/sbp/sbp_target.c 	if (kstrtoul(page, 0, &val) < 0)
page             2262 drivers/target/sbp/sbp_target.c 		char *page)
page             2267 drivers/target/sbp/sbp_target.c 	return sprintf(page, "%d\n", tport->max_logins_per_lun);
page             2271 drivers/target/sbp/sbp_target.c 		const char *page, size_t count)
page             2278 drivers/target/sbp/sbp_target.c 	if (kstrtoul(page, 0, &val) < 0)
page             1880 drivers/target/target_core_alua.c ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page)
page             1890 drivers/target/target_core_alua.c 		len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
page             1910 drivers/target/target_core_alua.c 	const char *page,
page             1932 drivers/target/target_core_alua.c 	memcpy(buf, page, count);
page             1995 drivers/target/target_core_alua.c 	char *page)
page             1999 drivers/target/target_core_alua.c 		return sprintf(page, "Implicit and Explicit\n");
page             2001 drivers/target/target_core_alua.c 		return sprintf(page, "Implicit\n");
page             2003 drivers/target/target_core_alua.c 		return sprintf(page, "Explicit\n");
page             2005 drivers/target/target_core_alua.c 		return sprintf(page, "None\n");
page             2010 drivers/target/target_core_alua.c 	const char *page,
page             2016 drivers/target/target_core_alua.c 	ret = kstrtoul(page, 0, &tmp);
page             2041 drivers/target/target_core_alua.c 	char *page)
page             2043 drivers/target/target_core_alua.c 	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs);
page             2048 drivers/target/target_core_alua.c 	const char *page,
page             2054 drivers/target/target_core_alua.c 	ret = kstrtoul(page, 0, &tmp);
page             2072 drivers/target/target_core_alua.c 	char *page)
page             2074 drivers/target/target_core_alua.c 	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs);
page             2079 drivers/target/target_core_alua.c 	const char *page,
page             2085 drivers/target/target_core_alua.c 	ret = kstrtoul(page, 0, &tmp);
page             2103 drivers/target/target_core_alua.c 	char *page)
page             2105 drivers/target/target_core_alua.c 	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implicit_trans_secs);
page             2110 drivers/target/target_core_alua.c 	const char *page,
page             2116 drivers/target/target_core_alua.c 	ret = kstrtoul(page, 0, &tmp);
page             2134 drivers/target/target_core_alua.c 	char *page)
page             2136 drivers/target/target_core_alua.c 	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref);
page             2141 drivers/target/target_core_alua.c 	const char *page,
page             2147 drivers/target/target_core_alua.c 	ret = kstrtoul(page, 0, &tmp);
page             2161 drivers/target/target_core_alua.c ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
page             2163 drivers/target/target_core_alua.c 	return sprintf(page, "%d\n",
page             2169 drivers/target/target_core_alua.c 	const char *page,
page             2184 drivers/target/target_core_alua.c 	ret = kstrtoul(page, 0, &tmp);
page             2204 drivers/target/target_core_alua.c 	char *page)
page             2206 drivers/target/target_core_alua.c 	return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_stat);
page             2211 drivers/target/target_core_alua.c 	const char *page,
page             2217 drivers/target/target_core_alua.c 	ret = kstrtoul(page, 0, &tmp);
page             2236 drivers/target/target_core_alua.c 	char *page)
page             2238 drivers/target/target_core_alua.c 	return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_write_md);
page             2243 drivers/target/target_core_alua.c 	const char *page,
page             2249 drivers/target/target_core_alua.c 	ret = kstrtoul(page, 0, &tmp);
page               85 drivers/target/target_core_configfs.c 		char *page)
page               87 drivers/target/target_core_configfs.c 	return sprintf(page, "Target Engine Core ConfigFS Infrastructure %s"
page               98 drivers/target/target_core_configfs.c 					    char *page)
page              100 drivers/target/target_core_configfs.c 	return sprintf(page, "%s\n", db_root);
page              104 drivers/target/target_core_configfs.c 					const char *page, size_t count)
page              123 drivers/target/target_core_configfs.c 	read_bytes = snprintf(db_root_stage, DB_ROOT_LEN, "%s", page);
page              513 drivers/target/target_core_configfs.c static ssize_t _name##_show(struct config_item *item, char *page)	\
page              515 drivers/target/target_core_configfs.c 	return snprintf(page, PAGE_SIZE, "%u\n", to_attrib(item)->_name); \
page              551 drivers/target/target_core_configfs.c static ssize_t _name##_store(struct config_item *item, const char *page,\
page              558 drivers/target/target_core_configfs.c 	ret = kstrtou32(page, 0, &val);					\
page              572 drivers/target/target_core_configfs.c static ssize_t _name##_store(struct config_item *item, const char *page,	\
page              579 drivers/target/target_core_configfs.c 	ret = strtobool(page, &flag);					\
page              594 drivers/target/target_core_configfs.c static ssize_t _name##_store(struct config_item *item, const char *page,\
page              625 drivers/target/target_core_configfs.c 		const char *page, size_t count)
page              639 drivers/target/target_core_configfs.c 	ret = strtobool(page, &flag);
page              655 drivers/target/target_core_configfs.c 		const char *page, size_t count)
page              661 drivers/target/target_core_configfs.c 	ret = strtobool(page, &flag);
page              677 drivers/target/target_core_configfs.c 		const char *page, size_t count)
page              683 drivers/target/target_core_configfs.c 	ret = kstrtou32(page, 0, &val);
page              705 drivers/target/target_core_configfs.c 		const char *page, size_t count)
page              711 drivers/target/target_core_configfs.c 	ret = strtobool(page, &flag);
page              729 drivers/target/target_core_configfs.c 		const char *page, size_t count)
page              735 drivers/target/target_core_configfs.c 	ret = strtobool(page, &flag);
page              755 drivers/target/target_core_configfs.c 		const char *page, size_t count)
page              761 drivers/target/target_core_configfs.c 	ret = strtobool(page, &flag);
page              781 drivers/target/target_core_configfs.c 		const char *page, size_t count)
page              788 drivers/target/target_core_configfs.c 	ret = kstrtou32(page, 0, &flag);
page              844 drivers/target/target_core_configfs.c static ssize_t pi_prot_format_show(struct config_item *item, char *page)
page              846 drivers/target/target_core_configfs.c 	return snprintf(page, PAGE_SIZE, "0\n");
page              850 drivers/target/target_core_configfs.c 		const char *page, size_t count)
page              857 drivers/target/target_core_configfs.c 	ret = strtobool(page, &flag);
page              888 drivers/target/target_core_configfs.c 		const char *page, size_t count)
page              894 drivers/target/target_core_configfs.c 	ret = strtobool(page, &flag);
page              917 drivers/target/target_core_configfs.c 		const char *page, size_t count)
page              923 drivers/target/target_core_configfs.c 	ret = strtobool(page, &flag);
page              939 drivers/target/target_core_configfs.c 		const char *page, size_t count)
page              945 drivers/target/target_core_configfs.c 	ret = strtobool(page, &flag);
page              961 drivers/target/target_core_configfs.c 		const char *page, size_t count)
page              967 drivers/target/target_core_configfs.c 	ret = strtobool(page, &flag);
page              997 drivers/target/target_core_configfs.c 		const char *page, size_t count)
page             1004 drivers/target/target_core_configfs.c 	ret = kstrtou32(page, 0, &val);
page             1034 drivers/target/target_core_configfs.c 		const char *page, size_t count)
page             1040 drivers/target/target_core_configfs.c 	ret = kstrtou32(page, 0, &val);
page             1064 drivers/target/target_core_configfs.c 		const char *page, size_t count)
page             1070 drivers/target/target_core_configfs.c 	ret = kstrtou32(page, 0, &val);
page             1097 drivers/target/target_core_configfs.c static ssize_t alua_support_show(struct config_item *item, char *page)
page             1102 drivers/target/target_core_configfs.c 	return snprintf(page, PAGE_SIZE, "%d\n",
page             1106 drivers/target/target_core_configfs.c static ssize_t pgr_support_show(struct config_item *item, char *page)
page             1111 drivers/target/target_core_configfs.c 	return snprintf(page, PAGE_SIZE, "%d\n",
page             1249 drivers/target/target_core_configfs.c 		char *page)
page             1251 drivers/target/target_core_configfs.c 	return sprintf(page, "%s\n", &to_t10_wwn(item)->vendor[0]);
page             1255 drivers/target/target_core_configfs.c 		const char *page, size_t count)
page             1265 drivers/target/target_core_configfs.c 	len = strlcpy(buf, page, sizeof(buf));
page             1305 drivers/target/target_core_configfs.c 		char *page)
page             1307 drivers/target/target_core_configfs.c 	return sprintf(page, "%s\n", &to_t10_wwn(item)->model[0]);
page             1311 drivers/target/target_core_configfs.c 		const char *page, size_t count)
page             1321 drivers/target/target_core_configfs.c 	len = strlcpy(buf, page, sizeof(buf));
page             1361 drivers/target/target_core_configfs.c 		char *page)
page             1363 drivers/target/target_core_configfs.c 	return sprintf(page, "%s\n", &to_t10_wwn(item)->revision[0]);
page             1367 drivers/target/target_core_configfs.c 		const char *page, size_t count)
page             1377 drivers/target/target_core_configfs.c 	len = strlcpy(buf, page, sizeof(buf));
page             1420 drivers/target/target_core_configfs.c 		char *page)
page             1422 drivers/target/target_core_configfs.c 	return sprintf(page, "T10 VPD Unit Serial Number: %s\n",
page             1427 drivers/target/target_core_configfs.c 		const char *page, size_t count)
page             1449 drivers/target/target_core_configfs.c 	if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) {
page             1474 drivers/target/target_core_configfs.c 	snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page);
page             1489 drivers/target/target_core_configfs.c 		char *page)
page             1508 drivers/target/target_core_configfs.c 		len += sprintf(page+len, "%s", buf);
page             1520 drivers/target/target_core_configfs.c 		char *page)						\
page             1536 drivers/target/target_core_configfs.c 		len += sprintf(page+len, "%s", buf);			\
page             1542 drivers/target/target_core_configfs.c 		len += sprintf(page+len, "%s", buf);			\
page             1548 drivers/target/target_core_configfs.c 		len += sprintf(page+len, "%s", buf);			\
page             1596 drivers/target/target_core_configfs.c 		char *page)
page             1606 drivers/target/target_core_configfs.c 		return sprintf(page, "No SPC-3 Reservation holder\n");
page             1611 drivers/target/target_core_configfs.c 	return sprintf(page, "SPC-3 Reservation: %s Initiator: %s%s\n",
page             1617 drivers/target/target_core_configfs.c 		char *page)
page             1625 drivers/target/target_core_configfs.c 		len = sprintf(page,
page             1630 drivers/target/target_core_configfs.c 		len = sprintf(page, "No SPC-2 Reservation holder\n");
page             1635 drivers/target/target_core_configfs.c static ssize_t target_pr_res_holder_show(struct config_item *item, char *page)
page             1641 drivers/target/target_core_configfs.c 		return sprintf(page, "SPC_RESERVATIONS_DISABLED\n");
page             1644 drivers/target/target_core_configfs.c 		return sprintf(page, "Passthrough\n");
page             1648 drivers/target/target_core_configfs.c 		ret = target_core_dev_pr_show_spc2_res(dev, page);
page             1650 drivers/target/target_core_configfs.c 		ret = target_core_dev_pr_show_spc3_res(dev, page);
page             1656 drivers/target/target_core_configfs.c 		char *page)
page             1663 drivers/target/target_core_configfs.c 		len = sprintf(page, "No SPC-3 Reservation holder\n");
page             1665 drivers/target/target_core_configfs.c 		len = sprintf(page, "SPC-3 Reservation: All Target"
page             1668 drivers/target/target_core_configfs.c 		len = sprintf(page, "SPC-3 Reservation: Single"
page             1677 drivers/target/target_core_configfs.c 		char *page)
page             1679 drivers/target/target_core_configfs.c 	return sprintf(page, "0x%08x\n", pr_to_dev(item)->t10_pr.pr_generation);
page             1684 drivers/target/target_core_configfs.c 		char *page)
page             1696 drivers/target/target_core_configfs.c 		len = sprintf(page, "No SPC-3 Reservation holder\n");
page             1704 drivers/target/target_core_configfs.c 	len += sprintf(page+len, "SPC-3 Reservation: %s"
page             1707 drivers/target/target_core_configfs.c 	len += sprintf(page+len, "SPC-3 Reservation: Relative Port"
page             1720 drivers/target/target_core_configfs.c 		char *page)
page             1730 drivers/target/target_core_configfs.c 	len += sprintf(page+len, "SPC-3 PR Registrations:\n");
page             1749 drivers/target/target_core_configfs.c 		len += sprintf(page+len, "%s", buf);
page             1755 drivers/target/target_core_configfs.c 		len += sprintf(page+len, "None\n");
page             1760 drivers/target/target_core_configfs.c static ssize_t target_pr_res_pr_type_show(struct config_item *item, char *page)
page             1769 drivers/target/target_core_configfs.c 		len = sprintf(page, "SPC-3 Reservation Type: %s\n",
page             1772 drivers/target/target_core_configfs.c 		len = sprintf(page, "No SPC-3 Reservation holder\n");
page             1779 drivers/target/target_core_configfs.c static ssize_t target_pr_res_type_show(struct config_item *item, char *page)
page             1784 drivers/target/target_core_configfs.c 		return sprintf(page, "SPC_RESERVATIONS_DISABLED\n");
page             1786 drivers/target/target_core_configfs.c 		return sprintf(page, "SPC_PASSTHROUGH\n");
page             1788 drivers/target/target_core_configfs.c 		return sprintf(page, "SPC2_RESERVATIONS\n");
page             1790 drivers/target/target_core_configfs.c 	return sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
page             1794 drivers/target/target_core_configfs.c 		char *page)
page             1802 drivers/target/target_core_configfs.c 	return sprintf(page, "APTPL Bit Status: %s\n",
page             1807 drivers/target/target_core_configfs.c 		char *page)
page             1815 drivers/target/target_core_configfs.c 	return sprintf(page, "Ready to process PR APTPL metadata..\n");
page             1844 drivers/target/target_core_configfs.c 		const char *page, size_t count)
page             1870 drivers/target/target_core_configfs.c 	opts = kstrdup(page, GFP_KERNEL);
page             2063 drivers/target/target_core_configfs.c static ssize_t target_dev_info_show(struct config_item *item, char *page)
page             2069 drivers/target/target_core_configfs.c 	transport_dump_dev_state(dev, page, &bl);
page             2072 drivers/target/target_core_configfs.c 			page+read_bytes);
page             2077 drivers/target/target_core_configfs.c 		const char *page, size_t count)
page             2081 drivers/target/target_core_configfs.c 	return dev->transport->set_configfs_dev_params(dev, page, count);
page             2084 drivers/target/target_core_configfs.c static ssize_t target_dev_alias_show(struct config_item *item, char *page)
page             2091 drivers/target/target_core_configfs.c 	return snprintf(page, PAGE_SIZE, "%s\n", dev->dev_alias);
page             2095 drivers/target/target_core_configfs.c 		const char *page, size_t count)
page             2108 drivers/target/target_core_configfs.c 	read_bytes = snprintf(&dev->dev_alias[0], SE_DEV_ALIAS_LEN, "%s", page);
page             2124 drivers/target/target_core_configfs.c static ssize_t target_dev_udev_path_show(struct config_item *item, char *page)
page             2131 drivers/target/target_core_configfs.c 	return snprintf(page, PAGE_SIZE, "%s\n", dev->udev_path);
page             2135 drivers/target/target_core_configfs.c 		const char *page, size_t count)
page             2149 drivers/target/target_core_configfs.c 			"%s", page);
page             2165 drivers/target/target_core_configfs.c static ssize_t target_dev_enable_show(struct config_item *item, char *page)
page             2169 drivers/target/target_core_configfs.c 	return snprintf(page, PAGE_SIZE, "%d\n", target_dev_configured(dev));
page             2173 drivers/target/target_core_configfs.c 		const char *page, size_t count)
page             2179 drivers/target/target_core_configfs.c 	ptr = strstr(page, "1");
page             2192 drivers/target/target_core_configfs.c static ssize_t target_dev_alua_lu_gp_show(struct config_item *item, char *page)
page             2208 drivers/target/target_core_configfs.c 		len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n",
page             2217 drivers/target/target_core_configfs.c 		const char *page, size_t count)
page             2235 drivers/target/target_core_configfs.c 	memcpy(buf, page, count);
page             2296 drivers/target/target_core_configfs.c static ssize_t target_dev_lba_map_show(struct config_item *item, char *page)
page             2301 drivers/target/target_core_configfs.c 	char *b = page;
page             2342 drivers/target/target_core_configfs.c 		const char *page, size_t count)
page             2354 drivers/target/target_core_configfs.c 	orig = map_entries = kstrdup(page, GFP_KERNEL);
page             2504 drivers/target/target_core_configfs.c static ssize_t target_lu_gp_lu_gp_id_show(struct config_item *item, char *page)
page             2510 drivers/target/target_core_configfs.c 	return sprintf(page, "%hu\n", lu_gp->lu_gp_id);
page             2514 drivers/target/target_core_configfs.c 		const char *page, size_t count)
page             2521 drivers/target/target_core_configfs.c 	ret = kstrtoul(page, 0, &lu_gp_id);
page             2545 drivers/target/target_core_configfs.c static ssize_t target_lu_gp_members_show(struct config_item *item, char *page)
page             2571 drivers/target/target_core_configfs.c 		memcpy(page+len, buf, cur_len);
page             2675 drivers/target/target_core_configfs.c 		char *page)
page             2677 drivers/target/target_core_configfs.c 	return sprintf(page, "%d\n",
page             2682 drivers/target/target_core_configfs.c 		const char *page, size_t count)
page             2700 drivers/target/target_core_configfs.c 	ret = kstrtoul(page, 0, &tmp);
page             2703 drivers/target/target_core_configfs.c 				" %s\n", page);
page             2727 drivers/target/target_core_configfs.c 		char *page)
page             2730 drivers/target/target_core_configfs.c 	return sprintf(page, "%s\n",
page             2735 drivers/target/target_core_configfs.c 		struct config_item *item, const char *page, size_t count)
page             2748 drivers/target/target_core_configfs.c 	ret = kstrtoul(page, 0, &tmp);
page             2751 drivers/target/target_core_configfs.c 				" from %s\n", page);
page             2769 drivers/target/target_core_configfs.c 		char *page)
page             2771 drivers/target/target_core_configfs.c 	return core_alua_show_access_type(to_tg_pt_gp(item), page);
page             2775 drivers/target/target_core_configfs.c 		const char *page, size_t count)
page             2777 drivers/target/target_core_configfs.c 	return core_alua_store_access_type(to_tg_pt_gp(item), page, count);
page             2829 drivers/target/target_core_configfs.c 		struct config_item *item, char *page)
page             2831 drivers/target/target_core_configfs.c 	return sprintf(page, "%d\n",
page             2836 drivers/target/target_core_configfs.c 		struct config_item *item, const char *page, size_t count)
page             2842 drivers/target/target_core_configfs.c 	ret = kstrtoul(page, 0, &tmp);
page             2859 drivers/target/target_core_configfs.c 		char *page)
page             2861 drivers/target/target_core_configfs.c 	return core_alua_show_nonop_delay_msecs(to_tg_pt_gp(item), page);
page             2865 drivers/target/target_core_configfs.c 		const char *page, size_t count)
page             2867 drivers/target/target_core_configfs.c 	return core_alua_store_nonop_delay_msecs(to_tg_pt_gp(item), page,
page             2872 drivers/target/target_core_configfs.c 		char *page)
page             2874 drivers/target/target_core_configfs.c 	return core_alua_show_trans_delay_msecs(to_tg_pt_gp(item), page);
page             2878 drivers/target/target_core_configfs.c 		const char *page, size_t count)
page             2880 drivers/target/target_core_configfs.c 	return core_alua_store_trans_delay_msecs(to_tg_pt_gp(item), page,
page             2885 drivers/target/target_core_configfs.c 		struct config_item *item, char *page)
page             2887 drivers/target/target_core_configfs.c 	return core_alua_show_implicit_trans_secs(to_tg_pt_gp(item), page);
page             2891 drivers/target/target_core_configfs.c 		struct config_item *item, const char *page, size_t count)
page             2893 drivers/target/target_core_configfs.c 	return core_alua_store_implicit_trans_secs(to_tg_pt_gp(item), page,
page             2898 drivers/target/target_core_configfs.c 		char *page)
page             2900 drivers/target/target_core_configfs.c 	return core_alua_show_preferred_bit(to_tg_pt_gp(item), page);
page             2904 drivers/target/target_core_configfs.c 		const char *page, size_t count)
page             2906 drivers/target/target_core_configfs.c 	return core_alua_store_preferred_bit(to_tg_pt_gp(item), page, count);
page             2910 drivers/target/target_core_configfs.c 		char *page)
page             2916 drivers/target/target_core_configfs.c 	return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id);
page             2920 drivers/target/target_core_configfs.c 		const char *page, size_t count)
page             2927 drivers/target/target_core_configfs.c 	ret = kstrtoul(page, 0, &tg_pt_gp_id);
page             2930 drivers/target/target_core_configfs.c 		       page);
page             2952 drivers/target/target_core_configfs.c 		char *page)
page             2978 drivers/target/target_core_configfs.c 		memcpy(page+len, buf, cur_len);
page             3261 drivers/target/target_core_configfs.c static ssize_t target_hba_info_show(struct config_item *item, char *page)
page             3265 drivers/target/target_core_configfs.c 	return sprintf(page, "HBA Index: %d plugin: %s version: %s\n",
page             3270 drivers/target/target_core_configfs.c static ssize_t target_hba_mode_show(struct config_item *item, char *page)
page             3278 drivers/target/target_core_configfs.c 	return sprintf(page, "%d\n", hba_mode);
page             3282 drivers/target/target_core_configfs.c 		const char *page, size_t count)
page             3291 drivers/target/target_core_configfs.c 	ret = kstrtoul(page, 0, &mode_flag);
page              160 drivers/target/target_core_fabric_configfs.c 		struct config_item *item, char *page)
page              170 drivers/target/target_core_fabric_configfs.c 		len = sprintf(page, "%d\n", deve->lun_access_ro);
page              178 drivers/target/target_core_fabric_configfs.c 		struct config_item *item, const char *page, size_t count)
page              186 drivers/target/target_core_fabric_configfs.c 	ret = kstrtoul(page, 0, &wp);
page              519 drivers/target/target_core_fabric_configfs.c 		char *page)
page              526 drivers/target/target_core_fabric_configfs.c 	return core_alua_show_tg_pt_gp_info(lun, page);
page              530 drivers/target/target_core_fabric_configfs.c 		const char *page, size_t count)
page              537 drivers/target/target_core_fabric_configfs.c 	return core_alua_store_tg_pt_gp_info(lun, page, count);
page              541 drivers/target/target_core_fabric_configfs.c 		struct config_item *item, char *page)
page              548 drivers/target/target_core_fabric_configfs.c 	return core_alua_show_offline_bit(lun, page);
page              552 drivers/target/target_core_fabric_configfs.c 		struct config_item *item, const char *page, size_t count)
page              559 drivers/target/target_core_fabric_configfs.c 	return core_alua_store_offline_bit(lun, page, count);
page              563 drivers/target/target_core_fabric_configfs.c 		struct config_item *item, char *page)
page              570 drivers/target/target_core_fabric_configfs.c 	return core_alua_show_secondary_status(lun, page);
page              574 drivers/target/target_core_fabric_configfs.c 		struct config_item *item, const char *page, size_t count)
page              581 drivers/target/target_core_fabric_configfs.c 	return core_alua_store_secondary_status(lun, page, count);
page              585 drivers/target/target_core_fabric_configfs.c 		struct config_item *item, char *page)
page              592 drivers/target/target_core_fabric_configfs.c 	return core_alua_show_secondary_write_metadata(lun, page);
page              596 drivers/target/target_core_fabric_configfs.c 		struct config_item *item, const char *page, size_t count)
page              603 drivers/target/target_core_fabric_configfs.c 	return core_alua_store_secondary_write_metadata(lun, page, count);
page              715 drivers/target/target_core_file.c 		const char *page, ssize_t count)
page              722 drivers/target/target_core_file.c 	opts = kstrdup(page, GFP_KERNEL);
page              532 drivers/target/target_core_iblock.c 		const char *page, ssize_t count)
page              540 drivers/target/target_core_iblock.c 	opts = kstrdup(page, GFP_KERNEL);
page              660 drivers/target/target_core_iblock.c 		rc = bio_integrity_add_page(bio, miter->page, len,
page              669 drivers/target/target_core_iblock.c 			  miter->page, len, offset_in_page(miter->addr));
page              718 drivers/target/target_core_pscsi.c 		const char *page, ssize_t count)
page              726 drivers/target/target_core_pscsi.c 	opts = kstrdup(page, GFP_KERNEL);
page              851 drivers/target/target_core_pscsi.c 	struct page *page;
page              864 drivers/target/target_core_pscsi.c 		page = sg_page(sg);
page              869 drivers/target/target_core_pscsi.c 			page, len, off);
page              903 drivers/target/target_core_pscsi.c 				page, len, off);
page              906 drivers/target/target_core_pscsi.c 					bio, page, bytes, off);
page               66 drivers/target/target_core_rd.c 	struct page *pg;
page              118 drivers/target/target_core_rd.c 	struct page *pg;
page              339 drivers/target/target_core_rd.c static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
page              345 drivers/target/target_core_rd.c 	i = page / sg_per_table;
page              348 drivers/target/target_core_rd.c 		if ((sg_table->page_start_offset <= page) &&
page              349 drivers/target/target_core_rd.c 		    (sg_table->page_end_offset >= page))
page              354 drivers/target/target_core_rd.c 			page);
page              359 drivers/target/target_core_rd.c static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page)
page              365 drivers/target/target_core_rd.c 	i = page / sg_per_table;
page              368 drivers/target/target_core_rd.c 		if ((sg_table->page_start_offset <= page) &&
page              369 drivers/target/target_core_rd.c 		     (sg_table->page_end_offset >= page))
page              374 drivers/target/target_core_rd.c 			page);
page              543 drivers/target/target_core_rd.c 		const char *page, ssize_t count)
page              550 drivers/target/target_core_rd.c 	opts = kstrdup(page, GFP_KERNEL);
page              535 drivers/target/target_core_sbc.c 			sg_set_page(&write_sg[i], m.page, block_size,
page              539 drivers/target/target_core_sbc.c 			sg_set_page(&write_sg[i], m.page, block_size,
page              667 drivers/target/target_core_spc.c 	uint8_t		page;
page              670 drivers/target/target_core_spc.c 	{ .page = 0x00, .emulate = spc_emulate_evpd_00 },
page              671 drivers/target/target_core_spc.c 	{ .page = 0x80, .emulate = spc_emulate_evpd_80 },
page              672 drivers/target/target_core_spc.c 	{ .page = 0x83, .emulate = spc_emulate_evpd_83 },
page              673 drivers/target/target_core_spc.c 	{ .page = 0x86, .emulate = spc_emulate_evpd_86 },
page              674 drivers/target/target_core_spc.c 	{ .page = 0xb0, .emulate = spc_emulate_evpd_b0 },
page              675 drivers/target/target_core_spc.c 	{ .page = 0xb1, .emulate = spc_emulate_evpd_b1 },
page              676 drivers/target/target_core_spc.c 	{ .page = 0xb2, .emulate = spc_emulate_evpd_b2 },
page              677 drivers/target/target_core_spc.c 	{ .page = 0xb3, .emulate = spc_emulate_evpd_b3 },
page              694 drivers/target/target_core_spc.c 			buf[p + 4] = evpd_handlers[p].page;
page              737 drivers/target/target_core_spc.c 		if (cdb[2] == evpd_handlers[p].page) {
page              921 drivers/target/target_core_spc.c 	uint8_t		page;
page              925 drivers/target/target_core_spc.c 	{ .page = 0x01, .subpage = 0x00, .emulate = spc_modesense_rwrecovery },
page              926 drivers/target/target_core_spc.c 	{ .page = 0x08, .subpage = 0x00, .emulate = spc_modesense_caching },
page              927 drivers/target/target_core_spc.c 	{ .page = 0x0a, .subpage = 0x00, .emulate = spc_modesense_control },
page              928 drivers/target/target_core_spc.c 	{ .page = 0x1c, .subpage = 0x00, .emulate = spc_modesense_informational_exceptions },
page              991 drivers/target/target_core_spc.c 	u8 page = cdb[2] & 0x3f;
page             1049 drivers/target/target_core_spc.c 	if (page == 0x3f) {
page             1074 drivers/target/target_core_spc.c 		if (modesense_handlers[i].page == page &&
page             1084 drivers/target/target_core_spc.c 	if (page != 0x03)
page             1086 drivers/target/target_core_spc.c 		       page, subpage);
page             1112 drivers/target/target_core_spc.c 	u8 page, subpage;
page             1136 drivers/target/target_core_spc.c 	page = buf[off] & 0x3f;
page             1140 drivers/target/target_core_spc.c 		if (modesense_handlers[i].page == page &&
page               51 drivers/target/target_core_stat.c static ssize_t target_stat_inst_show(struct config_item *item, char *page)
page               55 drivers/target/target_core_stat.c 	return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
page               58 drivers/target/target_core_stat.c static ssize_t target_stat_indx_show(struct config_item *item, char *page)
page               60 drivers/target/target_core_stat.c 	return snprintf(page, PAGE_SIZE, "%u\n", to_stat_dev(item)->dev_index);
page               63 drivers/target/target_core_stat.c static ssize_t target_stat_role_show(struct config_item *item, char *page)
page               65 drivers/target/target_core_stat.c 	return snprintf(page, PAGE_SIZE, "Target\n");
page               68 drivers/target/target_core_stat.c static ssize_t target_stat_ports_show(struct config_item *item, char *page)
page               70 drivers/target/target_core_stat.c 	return snprintf(page, PAGE_SIZE, "%u\n", to_stat_dev(item)->export_count);
page              101 drivers/target/target_core_stat.c static ssize_t target_stat_tgt_inst_show(struct config_item *item, char *page)
page              105 drivers/target/target_core_stat.c 	return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
page              108 drivers/target/target_core_stat.c static ssize_t target_stat_tgt_indx_show(struct config_item *item, char *page)
page              110 drivers/target/target_core_stat.c 	return snprintf(page, PAGE_SIZE, "%u\n", to_stat_tgt_dev(item)->dev_index);
page              114 drivers/target/target_core_stat.c 		char *page)
page              116 drivers/target/target_core_stat.c 	return snprintf(page, PAGE_SIZE, "%u\n", LU_COUNT);
page              120 drivers/target/target_core_stat.c 		char *page)
page              123 drivers/target/target_core_stat.c 		return snprintf(page, PAGE_SIZE, "activated");
page              125 drivers/target/target_core_stat.c 		return snprintf(page, PAGE_SIZE, "deactivated");
page              129 drivers/target/target_core_stat.c 		char *page)
page              138 drivers/target/target_core_stat.c 	return snprintf(page, PAGE_SIZE, "%u\n", non_accessible_lus);
page              142 drivers/target/target_core_stat.c 		char *page)
page              144 drivers/target/target_core_stat.c 	return snprintf(page, PAGE_SIZE, "%lu\n",
page              149 drivers/target/target_core_stat.c 		char *page)
page              151 drivers/target/target_core_stat.c 	return snprintf(page, PAGE_SIZE, "%lu\n",
page              156 drivers/target/target_core_stat.c 		char *page)
page              158 drivers/target/target_core_stat.c 	return snprintf(page, PAGE_SIZE, "%lu\n",
page              199 drivers/target/target_core_stat.c static ssize_t target_stat_lu_inst_show(struct config_item *item, char *page)
page              203 drivers/target/target_core_stat.c 	return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
page              206 drivers/target/target_core_stat.c static ssize_t target_stat_lu_dev_show(struct config_item *item, char *page)
page              208 drivers/target/target_core_stat.c 	return snprintf(page, PAGE_SIZE, "%u\n",
page              212 drivers/target/target_core_stat.c static ssize_t target_stat_lu_indx_show(struct config_item *item, char *page)
page              214 drivers/target/target_core_stat.c 	return snprintf(page, PAGE_SIZE, "%u\n", SCSI_LU_INDEX);
page              217 drivers/target/target_core_stat.c static ssize_t target_stat_lu_lun_show(struct config_item *item, char *page)
page              220 drivers/target/target_core_stat.c 	return snprintf(page, PAGE_SIZE, "%llu\n", (unsigned long long)0);
page              223 drivers/target/target_core_stat.c static ssize_t target_stat_lu_lu_name_show(struct config_item *item, char *page)
page              228 drivers/target/target_core_stat.c 	return snprintf(page, PAGE_SIZE, "%s\n",
page              233 drivers/target/target_core_stat.c static ssize_t target_stat_lu_vend_show(struct config_item *item, char *page)
page              237 drivers/target/target_core_stat.c 	return snprintf(page, PAGE_SIZE, "%-" __stringify(INQUIRY_VENDOR_LEN)
page              241 drivers/target/target_core_stat.c static ssize_t target_stat_lu_prod_show(struct config_item *item, char *page)
page              245 drivers/target/target_core_stat.c 	return snprintf(page, PAGE_SIZE, "%-" __stringify(INQUIRY_MODEL_LEN)
page              249 drivers/target/target_core_stat.c static ssize_t target_stat_lu_rev_show(struct config_item *item, char *page)
page              253 drivers/target/target_core_stat.c 	return snprintf(page, PAGE_SIZE, "%-" __stringify(INQUIRY_REVISION_LEN)
page              257 drivers/target/target_core_stat.c static ssize_t target_stat_lu_dev_type_show(struct config_item *item, char *page)
page              262 drivers/target/target_core_stat.c 	return snprintf(page, PAGE_SIZE, "%u\n",
page              266 drivers/target/target_core_stat.c static ssize_t target_stat_lu_status_show(struct config_item *item, char *page)
page              271 drivers/target/target_core_stat.c 	return snprintf(page, PAGE_SIZE, "%s\n",
page              276 drivers/target/target_core_stat.c 		char *page)
page              279 drivers/target/target_core_stat.c 	return snprintf(page, PAGE_SIZE, "exposed\n");
page              283 drivers/target/target_core_stat.c 		char *page)
page              288 drivers/target/target_core_stat.c 	return snprintf(page, PAGE_SIZE, "%lu\n",
page              293 drivers/target/target_core_stat.c 		char *page)
page              298 drivers/target/target_core_stat.c 	return snprintf(page, PAGE_SIZE, "%lu\n",
page              303 drivers/target/target_core_stat.c 		char *page)
page              308 drivers/target/target_core_stat.c 	return snprintf(page, PAGE_SIZE, "%lu\n",
page              312 drivers/target/target_core_stat.c static ssize_t target_stat_lu_resets_show(struct config_item *item, char *page)
page              317 drivers/target/target_core_stat.c 	return snprintf(page, PAGE_SIZE, "%lu\n",
page              322 drivers/target/target_core_stat.c 		char *page)
page              325 drivers/target/target_core_stat.c 	return snprintf(page, PAGE_SIZE, "%u\n", 0);
page              329 drivers/target/target_core_stat.c 		char *page)
page              332 drivers/target/target_core_stat.c 	return snprintf(page, PAGE_SIZE, "%u\n", 0);
page              336 drivers/target/target_core_stat.c 		char *page)
page              341 drivers/target/target_core_stat.c 	return snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)dev->creation_time -
page              424 drivers/target/target_core_stat.c static ssize_t target_stat_port_inst_show(struct config_item *item, char *page)
page              433 drivers/target/target_core_stat.c 		ret = snprintf(page, PAGE_SIZE, "%u\n", dev->hba_index);
page              438 drivers/target/target_core_stat.c static ssize_t target_stat_port_dev_show(struct config_item *item, char *page)
page              447 drivers/target/target_core_stat.c 		ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
page              452 drivers/target/target_core_stat.c static ssize_t target_stat_port_indx_show(struct config_item *item, char *page)
page              461 drivers/target/target_core_stat.c 		ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_rtpi);
page              466 drivers/target/target_core_stat.c static ssize_t target_stat_port_role_show(struct config_item *item, char *page)
page              475 drivers/target/target_core_stat.c 		ret = snprintf(page, PAGE_SIZE, "%s%u\n", "Device", dev->dev_index);
page              481 drivers/target/target_core_stat.c 		char *page)
page              491 drivers/target/target_core_stat.c 		ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
page              528 drivers/target/target_core_stat.c 		char *page)
page              537 drivers/target/target_core_stat.c 		ret = snprintf(page, PAGE_SIZE, "%u\n", dev->hba_index);
page              543 drivers/target/target_core_stat.c 		char *page)
page              552 drivers/target/target_core_stat.c 		ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
page              558 drivers/target/target_core_stat.c 		char *page)
page              567 drivers/target/target_core_stat.c 		ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_rtpi);
page              573 drivers/target/target_core_stat.c 		char *page)
page              583 drivers/target/target_core_stat.c 		ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n",
page              591 drivers/target/target_core_stat.c 		char *page)
page              601 drivers/target/target_core_stat.c 		ret = snprintf(page, PAGE_SIZE, "%s%s%d\n",
page              609 drivers/target/target_core_stat.c 		char *page)
page              618 drivers/target/target_core_stat.c 		ret = snprintf(page, PAGE_SIZE, "%lu\n",
page              625 drivers/target/target_core_stat.c 		char *page)
page              634 drivers/target/target_core_stat.c 		ret = snprintf(page, PAGE_SIZE, "%u\n",
page              641 drivers/target/target_core_stat.c 		char *page)
page              650 drivers/target/target_core_stat.c 		ret = snprintf(page, PAGE_SIZE, "%u\n",
page              657 drivers/target/target_core_stat.c 		char *page)
page              667 drivers/target/target_core_stat.c 		ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
page              712 drivers/target/target_core_stat.c 		char *page)
page              721 drivers/target/target_core_stat.c 		ret = snprintf(page, PAGE_SIZE, "%u\n", dev->hba_index);
page              727 drivers/target/target_core_stat.c 		char *page)
page              738 drivers/target/target_core_stat.c 		ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n",
page              746 drivers/target/target_core_stat.c 		char *page)
page              756 drivers/target/target_core_stat.c 		ret = snprintf(page, PAGE_SIZE, "%u\n",
page              763 drivers/target/target_core_stat.c 		char *page)
page              776 drivers/target/target_core_stat.c 		ret = snprintf(page, PAGE_SIZE, "%s+%s\n",
page              786 drivers/target/target_core_stat.c 		char *page)
page              796 drivers/target/target_core_stat.c 		ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->proto_id);
page              855 drivers/target/target_core_stat.c 		char *page)
page              871 drivers/target/target_core_stat.c 	ret = snprintf(page, PAGE_SIZE, "%u\n",
page              878 drivers/target/target_core_stat.c 		char *page)
page              894 drivers/target/target_core_stat.c 	ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_index);
page              900 drivers/target/target_core_stat.c 		char *page)
page              916 drivers/target/target_core_stat.c 	ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg));
page              922 drivers/target/target_core_stat.c 		char *page)
page              936 drivers/target/target_core_stat.c 	ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index);
page              942 drivers/target/target_core_stat.c 		char *page)
page              956 drivers/target/target_core_stat.c 	ret = snprintf(page, PAGE_SIZE, "%u\n", 1);
page              962 drivers/target/target_core_stat.c 		char *page)
page              976 drivers/target/target_core_stat.c 	ret = snprintf(page, PAGE_SIZE, "%s\n", nacl->initiatorname);
page              982 drivers/target/target_core_stat.c 		char *page)
page              996 drivers/target/target_core_stat.c 	ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
page             1002 drivers/target/target_core_stat.c 		char *page)
page             1016 drivers/target/target_core_stat.c 	ret = snprintf(page, PAGE_SIZE, "%u\n", deve->attach_count);
page             1022 drivers/target/target_core_stat.c 		char *page)
page             1036 drivers/target/target_core_stat.c 	ret = snprintf(page, PAGE_SIZE, "%lu\n",
page             1043 drivers/target/target_core_stat.c 		char *page)
page             1057 drivers/target/target_core_stat.c 	ret = snprintf(page, PAGE_SIZE, "%u\n",
page             1064 drivers/target/target_core_stat.c 		char *page)
page             1078 drivers/target/target_core_stat.c 	ret = snprintf(page, PAGE_SIZE, "%u\n",
page             1085 drivers/target/target_core_stat.c 		char *page)
page             1099 drivers/target/target_core_stat.c 	ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
page             1105 drivers/target/target_core_stat.c 		char *page)
page             1119 drivers/target/target_core_stat.c 	ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)deve->creation_time -
page             1126 drivers/target/target_core_stat.c 		char *page)
page             1140 drivers/target/target_core_stat.c 	ret = snprintf(page, PAGE_SIZE, "Ready\n");
page             1195 drivers/target/target_core_stat.c 		char *page)
page             1211 drivers/target/target_core_stat.c 	ret = snprintf(page, PAGE_SIZE, "%u\n",
page             1218 drivers/target/target_core_stat.c 		char *page)
page             1234 drivers/target/target_core_stat.c 	ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_index);
page             1240 drivers/target/target_core_stat.c 		char *page)
page             1256 drivers/target/target_core_stat.c 	ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg));
page             1262 drivers/target/target_core_stat.c 		char *page)
page             1279 drivers/target/target_core_stat.c 	ret = snprintf(page, PAGE_SIZE, "%u\n",
page             1286 drivers/target/target_core_stat.c 		char *page)
page             1300 drivers/target/target_core_stat.c 	ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index);
page             1306 drivers/target/target_core_stat.c 		char *page)
page             1328 drivers/target/target_core_stat.c 	ret = snprintf(page, PAGE_SIZE, "%s+i+%s\n", nacl->initiatorname, buf);
page              461 drivers/target/target_core_transport.c ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page)
page              475 drivers/target/target_core_transport.c 		len += snprintf(page + len, PAGE_SIZE - len, "%s\n",
page             2467 drivers/target/target_core_transport.c 	struct page **pages;
page              488 drivers/target/target_core_user.c 	struct page *page;
page              495 drivers/target/target_core_user.c 	page = radix_tree_lookup(&udev->data_blocks, dbi);
page              496 drivers/target/target_core_user.c 	if (!page) {
page              502 drivers/target/target_core_user.c 		page = alloc_page(GFP_KERNEL);
page              503 drivers/target/target_core_user.c 		if (!page)
page              506 drivers/target/target_core_user.c 		ret = radix_tree_insert(&udev->data_blocks, dbi, page);
page              519 drivers/target/target_core_user.c 	__free_page(page);
page              537 drivers/target/target_core_user.c static inline struct page *
page              672 drivers/target/target_core_user.c 	struct page *page;
page              684 drivers/target/target_core_user.c 				page = tcmu_get_block_page(udev, dbi);
page              685 drivers/target/target_core_user.c 				to = kmap_atomic(page);
page              747 drivers/target/target_core_user.c 	struct page *page;
page              779 drivers/target/target_core_user.c 				page = tcmu_get_block_page(udev, dbi);
page              780 drivers/target/target_core_user.c 				from = kmap_atomic(page);
page             1503 drivers/target/target_core_user.c static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
page             1505 drivers/target/target_core_user.c 	struct page *page;
page             1508 drivers/target/target_core_user.c 	page = tcmu_get_block_page(udev, dbi);
page             1509 drivers/target/target_core_user.c 	if (likely(page)) {
page             1511 drivers/target/target_core_user.c 		return page;
page             1520 drivers/target/target_core_user.c 	page = NULL;
page             1523 drivers/target/target_core_user.c 	return page;
page             1530 drivers/target/target_core_user.c 	struct page *page;
page             1547 drivers/target/target_core_user.c 		page = vmalloc_to_page(addr);
page             1553 drivers/target/target_core_user.c 		page = tcmu_try_get_block_page(udev, dbi);
page             1554 drivers/target/target_core_user.c 		if (!page)
page             1558 drivers/target/target_core_user.c 	get_page(page);
page             1559 drivers/target/target_core_user.c 	vmf->page = page;
page             1622 drivers/target/target_core_user.c 	struct page *page;
page             1625 drivers/target/target_core_user.c 		page = radix_tree_delete(blocks, i);
page             1626 drivers/target/target_core_user.c 		if (page) {
page             1627 drivers/target/target_core_user.c 			__free_page(page);
page             2154 drivers/target/target_core_user.c 		const char *page, ssize_t count)
page             2161 drivers/target/target_core_user.c 	opts = kstrdup(page, GFP_KERNEL);
page             2244 drivers/target/target_core_user.c static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page)
page             2250 drivers/target/target_core_user.c 	return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC);
page             2253 drivers/target/target_core_user.c static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page,
page             2268 drivers/target/target_core_user.c 	ret = kstrtou32(page, 0, &val);
page             2277 drivers/target/target_core_user.c static ssize_t tcmu_qfull_time_out_show(struct config_item *item, char *page)
page             2283 drivers/target/target_core_user.c 	return snprintf(page, PAGE_SIZE, "%ld\n", udev->qfull_time_out <= 0 ?
page             2289 drivers/target/target_core_user.c 					 const char *page, size_t count)
page             2297 drivers/target/target_core_user.c 	ret = kstrtos32(page, 0, &val);
page             2313 drivers/target/target_core_user.c static ssize_t tcmu_max_data_area_mb_show(struct config_item *item, char *page)
page             2319 drivers/target/target_core_user.c 	return snprintf(page, PAGE_SIZE, "%u\n",
page             2324 drivers/target/target_core_user.c static ssize_t tcmu_dev_config_show(struct config_item *item, char *page)
page             2330 drivers/target/target_core_user.c 	return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config);
page             2354 drivers/target/target_core_user.c static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page,
page             2362 drivers/target/target_core_user.c 	len = strlen(page);
page             2368 drivers/target/target_core_user.c 		ret = tcmu_send_dev_config_event(udev, page);
page             2373 drivers/target/target_core_user.c 		strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);
page             2380 drivers/target/target_core_user.c 	strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);
page             2386 drivers/target/target_core_user.c static ssize_t tcmu_dev_size_show(struct config_item *item, char *page)
page             2392 drivers/target/target_core_user.c 	return snprintf(page, PAGE_SIZE, "%llu\n", udev->dev_size);
page             2415 drivers/target/target_core_user.c static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
page             2424 drivers/target/target_core_user.c 	ret = kstrtou64(page, 0, &val);
page             2442 drivers/target/target_core_user.c 		char *page)
page             2448 drivers/target/target_core_user.c 	return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported);
page             2452 drivers/target/target_core_user.c 		const char *page, size_t count)
page             2460 drivers/target/target_core_user.c 	ret = kstrtos8(page, 0, &val);
page             2470 drivers/target/target_core_user.c 					     char *page)
page             2475 drivers/target/target_core_user.c 	return snprintf(page, PAGE_SIZE, "%i\n", da->emulate_write_cache);
page             2498 drivers/target/target_core_user.c 					      const char *page, size_t count)
page             2506 drivers/target/target_core_user.c 	ret = kstrtou8(page, 0, &val);
page             2524 drivers/target/target_core_user.c static ssize_t tcmu_block_dev_show(struct config_item *item, char *page)
page             2532 drivers/target/target_core_user.c 		return snprintf(page, PAGE_SIZE, "%s\n", "blocked");
page             2534 drivers/target/target_core_user.c 		return snprintf(page, PAGE_SIZE, "%s\n", "unblocked");
page             2537 drivers/target/target_core_user.c static ssize_t tcmu_block_dev_store(struct config_item *item, const char *page,
page             2552 drivers/target/target_core_user.c 	ret = kstrtou8(page, 0, &val);
page             2569 drivers/target/target_core_user.c static ssize_t tcmu_reset_ring_store(struct config_item *item, const char *page,
page             2584 drivers/target/target_core_user.c 	ret = kstrtou8(page, 0, &val);
page              124 drivers/target/tcm_fc/tfc_conf.c static ssize_t ft_nacl_port_name_show(struct config_item *item, char *page)
page              130 drivers/target/tcm_fc/tfc_conf.c 	return ft_wwn_show(&acl->node_auth.port_name, page);
page              134 drivers/target/tcm_fc/tfc_conf.c 		const char *page, size_t count)
page              140 drivers/target/tcm_fc/tfc_conf.c 	return ft_wwn_store(&acl->node_auth.port_name, page, count);
page              144 drivers/target/tcm_fc/tfc_conf.c 		char *page)
page              150 drivers/target/tcm_fc/tfc_conf.c 	return ft_wwn_show(&acl->node_auth.node_name, page);
page              154 drivers/target/tcm_fc/tfc_conf.c 		const char *page, size_t count)
page              160 drivers/target/tcm_fc/tfc_conf.c 	return ft_wwn_store(&acl->node_auth.node_name, page, count);
page              167 drivers/target/tcm_fc/tfc_conf.c 		char *page)
page              169 drivers/target/tcm_fc/tfc_conf.c 	return snprintf(page, PAGE_SIZE, "%s", acl_to_nacl(item)->acl_tag);
page              173 drivers/target/tcm_fc/tfc_conf.c 		const char *page, size_t count)
page              178 drivers/target/tcm_fc/tfc_conf.c 	ret = core_tpg_set_initiator_node_tag(se_nacl->se_tpg, se_nacl, page);
page              369 drivers/target/tcm_fc/tfc_conf.c static ssize_t ft_wwn_version_show(struct config_item *item, char *page)
page              371 drivers/target/tcm_fc/tfc_conf.c 	return sprintf(page, "TCM FC " FT_VERSION " on %s/%s on "
page               58 drivers/target/tcm_fc/tfc_io.c 	struct page *page = NULL;
page               85 drivers/target/tcm_fc/tfc_io.c 		page = sg_page(sg);
page              103 drivers/target/tcm_fc/tfc_io.c 			page = sg_page(sg);
page              132 drivers/target/tcm_fc/tfc_io.c 			BUG_ON(!page);
page              133 drivers/target/tcm_fc/tfc_io.c 			get_page(page);
page              136 drivers/target/tcm_fc/tfc_io.c 					   page, off_in_page, tlen);
page              139 drivers/target/tcm_fc/tfc_io.c 			fp_skb(fp)->truesize += page_size(page);
page              141 drivers/target/tcm_fc/tfc_io.c 			BUG_ON(!page);
page              142 drivers/target/tcm_fc/tfc_io.c 			from = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
page              209 drivers/target/tcm_fc/tfc_io.c 	struct page *page = NULL;
page              281 drivers/target/tcm_fc/tfc_io.c 		page = sg_page(sg);
page              289 drivers/target/tcm_fc/tfc_io.c 			page = sg_page(sg);
page              302 drivers/target/tcm_fc/tfc_io.c 		to = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
page              456 drivers/tee/optee/call.c void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
page              573 drivers/tee/optee/call.c 		       struct page **pages, size_t num_pages,
page              648 drivers/tee/optee/call.c 			    struct page **pages, size_t num_pages,
page              157 drivers/tee/optee/optee_private.h 		       struct page **pages, size_t num_pages,
page              162 drivers/tee/optee/optee_private.h 			    struct page **pages, size_t num_pages,
page              173 drivers/tee/optee/optee_private.h void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
page              243 drivers/tee/optee/rpc.c 		struct page **pages;
page               19 drivers/tee/optee/shm_pool.c 	struct page *page;
page               22 drivers/tee/optee/shm_pool.c 	page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
page               23 drivers/tee/optee/shm_pool.c 	if (!page)
page               26 drivers/tee/optee/shm_pool.c 	shm->kaddr = page_address(page);
page               27 drivers/tee/optee/shm_pool.c 	shm->paddr = page_to_phys(page);
page               32 drivers/tee/optee/shm_pool.c 		struct page **pages;
page               39 drivers/tee/optee/shm_pool.c 			pages[i] = page;
page               40 drivers/tee/optee/shm_pool.c 			page++;
page              521 drivers/tty/amiserial.c 	unsigned long page;
page              523 drivers/tty/amiserial.c 	page = get_zeroed_page(GFP_KERNEL);
page              524 drivers/tty/amiserial.c 	if (!page)
page              530 drivers/tty/amiserial.c 		free_page(page);
page              535 drivers/tty/amiserial.c 		free_page(page);
page              537 drivers/tty/amiserial.c 		info->xmit.buf = (unsigned char *) page;
page             1267 drivers/tty/cyclades.c 	unsigned long page;
page             1272 drivers/tty/cyclades.c 	page = get_zeroed_page(GFP_KERNEL);
page             1273 drivers/tty/cyclades.c 	if (!page)
page             1287 drivers/tty/cyclades.c 		free_page(page);
page             1289 drivers/tty/cyclades.c 		info->port.xmit_buf = (unsigned char *)page;
page             1382 drivers/tty/cyclades.c 	free_page(page);
page              862 drivers/tty/mxser.c 	unsigned long page;
page              865 drivers/tty/mxser.c 	page = __get_free_page(GFP_KERNEL);
page              866 drivers/tty/mxser.c 	if (!page)
page              873 drivers/tty/mxser.c 		free_page(page);
page              877 drivers/tty/mxser.c 	info->port.xmit_buf = (unsigned char *) page;
page              877 drivers/tty/rocket.c 	unsigned long page;
page              884 drivers/tty/rocket.c 	page = __get_free_page(GFP_KERNEL);
page              885 drivers/tty/rocket.c 	if (!page)
page              892 drivers/tty/rocket.c 		free_page(page);
page              894 drivers/tty/rocket.c 		info->xmit_buf = (unsigned char *) page;
page              185 drivers/tty/serial/serial_core.c 	unsigned long page;
page              201 drivers/tty/serial/serial_core.c 	page = get_zeroed_page(GFP_KERNEL);
page              202 drivers/tty/serial/serial_core.c 	if (!page)
page              207 drivers/tty/serial/serial_core.c 		state->xmit.buf = (unsigned char *) page;
page              216 drivers/tty/serial/serial_core.c 		free_page(page);
page              670 drivers/uio/uio.c 	struct page *page;
page              696 drivers/uio/uio.c 		page = virt_to_page(addr);
page              698 drivers/uio/uio.c 		page = vmalloc_to_page(addr);
page              699 drivers/uio/uio.c 	get_page(page);
page              700 drivers/uio/uio.c 	vmf->page = page;
page              160 drivers/usb/atm/usbatm.c static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t *pos, char *page);
page              728 drivers/usb/atm/usbatm.c static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t *pos, char *page)
page              737 drivers/usb/atm/usbatm.c 		return sprintf(page, "%s\n", instance->description);
page              740 drivers/usb/atm/usbatm.c 		return sprintf(page, "MAC: %pM\n", atm_dev->esi);
page              743 drivers/usb/atm/usbatm.c 		return sprintf(page,
page              753 drivers/usb/atm/usbatm.c 			return sprintf(page, "Disconnected\n");
page              757 drivers/usb/atm/usbatm.c 				return sprintf(page, "Line up\n");
page              759 drivers/usb/atm/usbatm.c 				return sprintf(page, "Line down\n");
page              761 drivers/usb/atm/usbatm.c 				return sprintf(page, "Line state unknown\n");
page              371 drivers/usb/chipidea/udc.c 		node->ptr->page[0] = cpu_to_le32(temp);
page              373 drivers/usb/chipidea/udc.c 			u32 page = temp + i * CI_HDRC_PAGE_SIZE;
page              374 drivers/usb/chipidea/udc.c 			page &= ~TD_RESERVED_MASK;
page              375 drivers/usb/chipidea/udc.c 			node->ptr->page[i] = cpu_to_le32(page);
page               36 drivers/usb/chipidea/udc.h 	__le32 page[5];
page              133 drivers/usb/gadget/configfs.c 			char *page)	\
page              135 drivers/usb/gadget/configfs.c 	return sprintf(page, "0x%02x\n", \
page              141 drivers/usb/gadget/configfs.c 			char *page)	\
page              143 drivers/usb/gadget/configfs.c 	return sprintf(page, "0x%04x\n", \
page              150 drivers/usb/gadget/configfs.c 		const char *page, size_t len)		\
page              154 drivers/usb/gadget/configfs.c 	ret = kstrtou8(page, 0, &val);			\
page              163 drivers/usb/gadget/configfs.c 		const char *page, size_t len)		\
page              167 drivers/usb/gadget/configfs.c 	ret = kstrtou16(page, 0, &val);			\
page              201 drivers/usb/gadget/configfs.c 		const char *page, size_t len)
page              206 drivers/usb/gadget/configfs.c 	ret = kstrtou16(page, 0, &bcdDevice);
page              218 drivers/usb/gadget/configfs.c 		const char *page, size_t len)
page              223 drivers/usb/gadget/configfs.c 	ret = kstrtou16(page, 0, &bcdUSB);
page              234 drivers/usb/gadget/configfs.c static ssize_t gadget_dev_desc_UDC_show(struct config_item *item, char *page)
page              238 drivers/usb/gadget/configfs.c 	return sprintf(page, "%s\n", udc_name ?: "");
page              257 drivers/usb/gadget/configfs.c 		const char *page, size_t len)
page              263 drivers/usb/gadget/configfs.c 	if (strlen(page) < len)
page              266 drivers/usb/gadget/configfs.c 	name = kstrdup(page, GFP_KERNEL);
page              462 drivers/usb/gadget/configfs.c 		char *page)
page              464 drivers/usb/gadget/configfs.c 	return sprintf(page, "%u\n", to_config_usb_cfg(item)->c.MaxPower);
page              468 drivers/usb/gadget/configfs.c 		const char *page, size_t len)
page              472 drivers/usb/gadget/configfs.c 	ret = kstrtou16(page, 0, &val);
page              482 drivers/usb/gadget/configfs.c 		char *page)
page              484 drivers/usb/gadget/configfs.c 	return sprintf(page, "0x%02x\n",
page              489 drivers/usb/gadget/configfs.c 		const char *page, size_t len)
page              493 drivers/usb/gadget/configfs.c 	ret = kstrtou8(page, 0, &val);
page              745 drivers/usb/gadget/configfs.c static ssize_t os_desc_use_show(struct config_item *item, char *page)
page              747 drivers/usb/gadget/configfs.c 	return sprintf(page, "%d\n",
page              751 drivers/usb/gadget/configfs.c static ssize_t os_desc_use_store(struct config_item *item, const char *page,
page              759 drivers/usb/gadget/configfs.c 	ret = strtobool(page, &use);
page              769 drivers/usb/gadget/configfs.c static ssize_t os_desc_b_vendor_code_show(struct config_item *item, char *page)
page              771 drivers/usb/gadget/configfs.c 	return sprintf(page, "0x%02x\n",
page              776 drivers/usb/gadget/configfs.c 					   const char *page, size_t len)
page              783 drivers/usb/gadget/configfs.c 	ret = kstrtou8(page, 0, &b_vendor_code);
page              793 drivers/usb/gadget/configfs.c static ssize_t os_desc_qw_sign_show(struct config_item *item, char *page)
page              799 drivers/usb/gadget/configfs.c 			      UTF16_LITTLE_ENDIAN, page, PAGE_SIZE - 1);
page              800 drivers/usb/gadget/configfs.c 	page[res++] = '\n';
page              805 drivers/usb/gadget/configfs.c static ssize_t os_desc_qw_sign_store(struct config_item *item, const char *page,
page              812 drivers/usb/gadget/configfs.c 	if (page[l - 1] == '\n')
page              816 drivers/usb/gadget/configfs.c 	res = utf8s_to_utf16s(page, l,
page              911 drivers/usb/gadget/configfs.c static ssize_t ext_prop_type_show(struct config_item *item, char *page)
page              913 drivers/usb/gadget/configfs.c 	return sprintf(page, "%d\n", to_usb_os_desc_ext_prop(item)->type);
page              917 drivers/usb/gadget/configfs.c 				   const char *page, size_t len)
page              926 drivers/usb/gadget/configfs.c 	ret = kstrtou8(page, 0, &type);
page              957 drivers/usb/gadget/configfs.c static ssize_t ext_prop_data_show(struct config_item *item, char *page)
page              966 drivers/usb/gadget/configfs.c 	memcpy(page, ext_prop->data, len);
page              972 drivers/usb/gadget/configfs.c 				   const char *page, size_t len)
page              979 drivers/usb/gadget/configfs.c 	if (page[len - 1] == '\n' || page[len - 1] == '\0')
page              981 drivers/usb/gadget/configfs.c 	new_data = kmemdup(page, len, GFP_KERNEL);
page             1092 drivers/usb/gadget/configfs.c 					     char *page)
page             1094 drivers/usb/gadget/configfs.c 	memcpy(page, to_usb_os_desc(item)->ext_compat_id, 8);
page             1099 drivers/usb/gadget/configfs.c 					      const char *page, size_t len)
page             1105 drivers/usb/gadget/configfs.c 	if (page[l - 1] == '\n')
page             1109 drivers/usb/gadget/configfs.c 	memcpy(desc->ext_compat_id, page, l);
page             1118 drivers/usb/gadget/configfs.c 						 char *page)
page             1120 drivers/usb/gadget/configfs.c 	memcpy(page, to_usb_os_desc(item)->ext_compat_id + 8, 8);
page             1125 drivers/usb/gadget/configfs.c 						  const char *page, size_t len)
page             1131 drivers/usb/gadget/configfs.c 	if (page[l - 1] == '\n')
page             1135 drivers/usb/gadget/configfs.c 	memcpy(desc->ext_compat_id + 8, page, l);
page              774 drivers/usb/gadget/function/f_acm.c static ssize_t f_acm_port_num_show(struct config_item *item, char *page)
page              776 drivers/usb/gadget/function/f_acm.c 	return sprintf(page, "%u\n", to_f_serial_opts(item)->port_num);
page              766 drivers/usb/gadget/function/f_fs.c 	struct page **pages;
page              776 drivers/usb/gadget/function/f_fs.c 	pages = kvmalloc_array(n_pages, sizeof(struct page *), GFP_KERNEL);
page              881 drivers/usb/gadget/function/f_hid.c static ssize_t f_hid_opts_##name##_show(struct config_item *item, char *page)\
page              887 drivers/usb/gadget/function/f_hid.c 	result = sprintf(page, "%d\n", opts->name);			\
page              894 drivers/usb/gadget/function/f_hid.c 					 const char *page, size_t len)	\
page              906 drivers/usb/gadget/function/f_hid.c 	ret = kstrtou##prec(page, 0, &num);				\
page              928 drivers/usb/gadget/function/f_hid.c static ssize_t f_hid_opts_report_desc_show(struct config_item *item, char *page)
page              935 drivers/usb/gadget/function/f_hid.c 	memcpy(page, opts->report_desc, opts->report_desc_length);
page              942 drivers/usb/gadget/function/f_hid.c 					    const char *page, size_t len)
page              956 drivers/usb/gadget/function/f_hid.c 	d = kmemdup(page, len, GFP_KERNEL);
page              973 drivers/usb/gadget/function/f_hid.c static ssize_t f_hid_opts_dev_show(struct config_item *item, char *page)
page              977 drivers/usb/gadget/function/f_hid.c 	return sprintf(page, "%d:%d\n", major, opts->minor);
page              473 drivers/usb/gadget/function/f_loopback.c static ssize_t f_lb_opts_qlen_show(struct config_item *item, char *page)
page              479 drivers/usb/gadget/function/f_loopback.c 	result = sprintf(page, "%d\n", opts->qlen);
page              486 drivers/usb/gadget/function/f_loopback.c 				    const char *page, size_t len)
page              498 drivers/usb/gadget/function/f_loopback.c 	ret = kstrtou32(page, 0, &num);
page              511 drivers/usb/gadget/function/f_loopback.c static ssize_t f_lb_opts_bulk_buflen_show(struct config_item *item, char *page)
page              517 drivers/usb/gadget/function/f_loopback.c 	result = sprintf(page, "%d\n", opts->bulk_buflen);
page              524 drivers/usb/gadget/function/f_loopback.c 				    const char *page, size_t len)
page              536 drivers/usb/gadget/function/f_loopback.c 	ret = kstrtou32(page, 0, &num);
page             3032 drivers/usb/gadget/function/f_mass_storage.c static ssize_t fsg_lun_opts_file_show(struct config_item *item, char *page)
page             3037 drivers/usb/gadget/function/f_mass_storage.c 	return fsg_show_file(opts->lun, &fsg_opts->common->filesem, page);
page             3041 drivers/usb/gadget/function/f_mass_storage.c 				       const char *page, size_t len)
page             3046 drivers/usb/gadget/function/f_mass_storage.c 	return fsg_store_file(opts->lun, &fsg_opts->common->filesem, page, len);
page             3051 drivers/usb/gadget/function/f_mass_storage.c static ssize_t fsg_lun_opts_ro_show(struct config_item *item, char *page)
page             3053 drivers/usb/gadget/function/f_mass_storage.c 	return fsg_show_ro(to_fsg_lun_opts(item)->lun, page);
page             3057 drivers/usb/gadget/function/f_mass_storage.c 				       const char *page, size_t len)
page             3062 drivers/usb/gadget/function/f_mass_storage.c 	return fsg_store_ro(opts->lun, &fsg_opts->common->filesem, page, len);
page             3068 drivers/usb/gadget/function/f_mass_storage.c 					   char *page)
page             3070 drivers/usb/gadget/function/f_mass_storage.c 	return fsg_show_removable(to_fsg_lun_opts(item)->lun, page);
page             3074 drivers/usb/gadget/function/f_mass_storage.c 				       const char *page, size_t len)
page             3076 drivers/usb/gadget/function/f_mass_storage.c 	return fsg_store_removable(to_fsg_lun_opts(item)->lun, page, len);
page             3081 drivers/usb/gadget/function/f_mass_storage.c static ssize_t fsg_lun_opts_cdrom_show(struct config_item *item, char *page)
page             3083 drivers/usb/gadget/function/f_mass_storage.c 	return fsg_show_cdrom(to_fsg_lun_opts(item)->lun, page);
page             3087 drivers/usb/gadget/function/f_mass_storage.c 				       const char *page, size_t len)
page             3092 drivers/usb/gadget/function/f_mass_storage.c 	return fsg_store_cdrom(opts->lun, &fsg_opts->common->filesem, page,
page             3098 drivers/usb/gadget/function/f_mass_storage.c static ssize_t fsg_lun_opts_nofua_show(struct config_item *item, char *page)
page             3100 drivers/usb/gadget/function/f_mass_storage.c 	return fsg_show_nofua(to_fsg_lun_opts(item)->lun, page);
page             3104 drivers/usb/gadget/function/f_mass_storage.c 				       const char *page, size_t len)
page             3106 drivers/usb/gadget/function/f_mass_storage.c 	return fsg_store_nofua(to_fsg_lun_opts(item)->lun, page, len);
page             3112 drivers/usb/gadget/function/f_mass_storage.c 						char *page)
page             3114 drivers/usb/gadget/function/f_mass_storage.c 	return fsg_show_inquiry_string(to_fsg_lun_opts(item)->lun, page);
page             3118 drivers/usb/gadget/function/f_mass_storage.c 						 const char *page, size_t len)
page             3120 drivers/usb/gadget/function/f_mass_storage.c 	return fsg_store_inquiry_string(to_fsg_lun_opts(item)->lun, page, len);
page             3235 drivers/usb/gadget/function/f_mass_storage.c static ssize_t fsg_opts_stall_show(struct config_item *item, char *page)
page             3241 drivers/usb/gadget/function/f_mass_storage.c 	result = sprintf(page, "%d", opts->common->can_stall);
page             3247 drivers/usb/gadget/function/f_mass_storage.c static ssize_t fsg_opts_stall_store(struct config_item *item, const char *page,
page             3261 drivers/usb/gadget/function/f_mass_storage.c 	ret = strtobool(page, &stall);
page             3275 drivers/usb/gadget/function/f_mass_storage.c static ssize_t fsg_opts_num_buffers_show(struct config_item *item, char *page)
page             3281 drivers/usb/gadget/function/f_mass_storage.c 	result = sprintf(page, "%d", opts->common->fsg_num_buffers);
page             3288 drivers/usb/gadget/function/f_mass_storage.c 					  const char *page, size_t len)
page             3299 drivers/usb/gadget/function/f_mass_storage.c 	ret = kstrtou8(page, 0, &num);
page             1086 drivers/usb/gadget/function/f_midi.c static ssize_t f_midi_opts_##name##_show(struct config_item *item, char *page) \
page             1092 drivers/usb/gadget/function/f_midi.c 	result = sprintf(page, "%d\n", opts->name);			\
page             1099 drivers/usb/gadget/function/f_midi.c 					 const char *page, size_t len)	\
page             1111 drivers/usb/gadget/function/f_midi.c 	ret = kstrtou32(page, 0, &num);					\
page             1135 drivers/usb/gadget/function/f_midi.c static ssize_t f_midi_opts_id_show(struct config_item *item, char *page)
page             1142 drivers/usb/gadget/function/f_midi.c 		result = strlcpy(page, opts->id, PAGE_SIZE);
page             1144 drivers/usb/gadget/function/f_midi.c 		page[0] = 0;
page             1154 drivers/usb/gadget/function/f_midi.c 				    const char *page, size_t len)
page             1166 drivers/usb/gadget/function/f_midi.c 	c = kstrndup(page, len, GFP_KERNEL);
page              398 drivers/usb/gadget/function/f_obex.c static ssize_t f_obex_port_num_show(struct config_item *item, char *page)
page              400 drivers/usb/gadget/function/f_obex.c 	return sprintf(page, "%u\n", to_f_serial_opts(item)->port_num);
page              294 drivers/usb/gadget/function/f_phonet.c 	struct page *page;
page              297 drivers/usb/gadget/function/f_phonet.c 	page = __dev_alloc_page(gfp_flags | __GFP_NOMEMALLOC);
page              298 drivers/usb/gadget/function/f_phonet.c 	if (!page)
page              301 drivers/usb/gadget/function/f_phonet.c 	req->buf = page_address(page);
page              303 drivers/usb/gadget/function/f_phonet.c 	req->context = page;
page              307 drivers/usb/gadget/function/f_phonet.c 		put_page(page);
page              315 drivers/usb/gadget/function/f_phonet.c 	struct page *page = req->context;
page              337 drivers/usb/gadget/function/f_phonet.c 			skb_put_data(skb, page_address(page), 1);
page              340 drivers/usb/gadget/function/f_phonet.c 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
page              342 drivers/usb/gadget/function/f_phonet.c 		page = NULL;
page              369 drivers/usb/gadget/function/f_phonet.c 	if (page)
page              370 drivers/usb/gadget/function/f_phonet.c 		put_page(page);
page              589 drivers/usb/gadget/function/f_phonet.c static ssize_t f_phonet_ifname_show(struct config_item *item, char *page)
page              591 drivers/usb/gadget/function/f_phonet.c 	return gether_get_ifname(to_f_phonet_opts(item)->net, page, PAGE_SIZE);
page             1163 drivers/usb/gadget/function/f_printer.c 					      char *page)
page             1172 drivers/usb/gadget/function/f_printer.c 	result = strlcpy(page, opts->pnp_string, PAGE_SIZE);
page             1175 drivers/usb/gadget/function/f_printer.c 	} else if (page[result - 1] != '\n' && result + 1 < PAGE_SIZE) {
page             1176 drivers/usb/gadget/function/f_printer.c 		page[result++] = '\n';
page             1177 drivers/usb/gadget/function/f_printer.c 		page[result] = '\0';
page             1187 drivers/usb/gadget/function/f_printer.c 					       const char *page, size_t len)
page             1195 drivers/usb/gadget/function/f_printer.c 	new_pnp = kstrndup(page, len, GFP_KERNEL);
page             1216 drivers/usb/gadget/function/f_printer.c 					 char *page)
page             1222 drivers/usb/gadget/function/f_printer.c 	result = sprintf(page, "%d\n", opts->q_len);
page             1229 drivers/usb/gadget/function/f_printer.c 					  const char *page, size_t len)
page             1241 drivers/usb/gadget/function/f_printer.c 	ret = kstrtou16(page, 0, &num);
page              269 drivers/usb/gadget/function/f_serial.c static ssize_t f_serial_port_num_show(struct config_item *item, char *page)
page              271 drivers/usb/gadget/function/f_serial.c 	return sprintf(page, "%u\n", to_f_serial_opts(item)->port_num);
page              888 drivers/usb/gadget/function/f_sourcesink.c static ssize_t f_ss_opts_pattern_show(struct config_item *item, char *page)
page              894 drivers/usb/gadget/function/f_sourcesink.c 	result = sprintf(page, "%u\n", opts->pattern);
page              901 drivers/usb/gadget/function/f_sourcesink.c 				       const char *page, size_t len)
page              913 drivers/usb/gadget/function/f_sourcesink.c 	ret = kstrtou8(page, 0, &num);
page              931 drivers/usb/gadget/function/f_sourcesink.c static ssize_t f_ss_opts_isoc_interval_show(struct config_item *item, char *page)
page              937 drivers/usb/gadget/function/f_sourcesink.c 	result = sprintf(page, "%u\n", opts->isoc_interval);
page              944 drivers/usb/gadget/function/f_sourcesink.c 				       const char *page, size_t len)
page              956 drivers/usb/gadget/function/f_sourcesink.c 	ret = kstrtou8(page, 0, &num);
page              974 drivers/usb/gadget/function/f_sourcesink.c static ssize_t f_ss_opts_isoc_maxpacket_show(struct config_item *item, char *page)
page              980 drivers/usb/gadget/function/f_sourcesink.c 	result = sprintf(page, "%u\n", opts->isoc_maxpacket);
page              987 drivers/usb/gadget/function/f_sourcesink.c 				       const char *page, size_t len)
page              999 drivers/usb/gadget/function/f_sourcesink.c 	ret = kstrtou16(page, 0, &num);
page             1017 drivers/usb/gadget/function/f_sourcesink.c static ssize_t f_ss_opts_isoc_mult_show(struct config_item *item, char *page)
page             1023 drivers/usb/gadget/function/f_sourcesink.c 	result = sprintf(page, "%u\n", opts->isoc_mult);
page             1030 drivers/usb/gadget/function/f_sourcesink.c 				       const char *page, size_t len)
page             1042 drivers/usb/gadget/function/f_sourcesink.c 	ret = kstrtou8(page, 0, &num);
page             1060 drivers/usb/gadget/function/f_sourcesink.c static ssize_t f_ss_opts_isoc_maxburst_show(struct config_item *item, char *page)
page             1066 drivers/usb/gadget/function/f_sourcesink.c 	result = sprintf(page, "%u\n", opts->isoc_maxburst);
page             1073 drivers/usb/gadget/function/f_sourcesink.c 				       const char *page, size_t len)
page             1085 drivers/usb/gadget/function/f_sourcesink.c 	ret = kstrtou8(page, 0, &num);
page             1103 drivers/usb/gadget/function/f_sourcesink.c static ssize_t f_ss_opts_bulk_buflen_show(struct config_item *item, char *page)
page             1109 drivers/usb/gadget/function/f_sourcesink.c 	result = sprintf(page, "%u\n", opts->bulk_buflen);
page             1116 drivers/usb/gadget/function/f_sourcesink.c 					   const char *page, size_t len)
page             1128 drivers/usb/gadget/function/f_sourcesink.c 	ret = kstrtou32(page, 0, &num);
page             1141 drivers/usb/gadget/function/f_sourcesink.c static ssize_t f_ss_opts_bulk_qlen_show(struct config_item *item, char *page)
page             1147 drivers/usb/gadget/function/f_sourcesink.c 	result = sprintf(page, "%u\n", opts->bulk_qlen);
page             1154 drivers/usb/gadget/function/f_sourcesink.c 					   const char *page, size_t len)
page             1166 drivers/usb/gadget/function/f_sourcesink.c 	ret = kstrtou32(page, 0, &num);
page             1179 drivers/usb/gadget/function/f_sourcesink.c static ssize_t f_ss_opts_iso_qlen_show(struct config_item *item, char *page)
page             1185 drivers/usb/gadget/function/f_sourcesink.c 	result = sprintf(page, "%u\n", opts->iso_qlen);
page             1192 drivers/usb/gadget/function/f_sourcesink.c 					   const char *page, size_t len)
page             1204 drivers/usb/gadget/function/f_sourcesink.c 	ret = kstrtou32(page, 0, &num);
page             1484 drivers/usb/gadget/function/f_tcm.c static ssize_t usbg_wwn_version_show(struct config_item *item,  char *page)
page             1486 drivers/usb/gadget/function/f_tcm.c 	return sprintf(page, "usb-gadget fabric module\n");
page             1496 drivers/usb/gadget/function/f_tcm.c static ssize_t tcm_usbg_tpg_enable_show(struct config_item *item, char *page)
page             1501 drivers/usb/gadget/function/f_tcm.c 	return snprintf(page, PAGE_SIZE, "%u\n", tpg->gadget_connect);
page             1508 drivers/usb/gadget/function/f_tcm.c 		const char *page, size_t count)
page             1515 drivers/usb/gadget/function/f_tcm.c 	ret = strtobool(page, &op);
page             1534 drivers/usb/gadget/function/f_tcm.c static ssize_t tcm_usbg_tpg_nexus_show(struct config_item *item, char *page)
page             1547 drivers/usb/gadget/function/f_tcm.c 	ret = snprintf(page, PAGE_SIZE, "%s\n",
page             1639 drivers/usb/gadget/function/f_tcm.c 		const char *page, size_t count)
page             1646 drivers/usb/gadget/function/f_tcm.c 	if (!strncmp(page, "NULL", 4)) {
page             1650 drivers/usb/gadget/function/f_tcm.c 	if (strlen(page) >= USBG_NAMELEN) {
page             1653 drivers/usb/gadget/function/f_tcm.c 		pr_err(NEXUS_STORE_MSG, page, USBG_NAMELEN);
page             1657 drivers/usb/gadget/function/f_tcm.c 	snprintf(i_port, USBG_NAMELEN, "%s", page);
page              651 drivers/usb/gadget/function/f_uac1.c 					  char *page)			\
page              657 drivers/usb/gadget/function/f_uac1.c 	result = sprintf(page, "%u\n", opts->name);			\
page              665 drivers/usb/gadget/function/f_uac1.c 					  const char *page, size_t len)	\
page              677 drivers/usb/gadget/function/f_uac1.c 	ret = kstrtou32(page, 0, &num);					\
page              821 drivers/usb/gadget/function/f_uac1_legacy.c 					 char *page)			\
page              827 drivers/usb/gadget/function/f_uac1_legacy.c 	result = sprintf(page, "%u\n", opts->name);			\
page              834 drivers/usb/gadget/function/f_uac1_legacy.c 					  const char *page, size_t len)	\
page              846 drivers/usb/gadget/function/f_uac1_legacy.c 	ret = kstrtou32(page, 0, &num);					\
page              866 drivers/usb/gadget/function/f_uac1_legacy.c 					 char *page)			\
page              872 drivers/usb/gadget/function/f_uac1_legacy.c 	result = sprintf(page, "%s\n", opts->name);			\
page              879 drivers/usb/gadget/function/f_uac1_legacy.c 					  const char *page, size_t len)	\
page              889 drivers/usb/gadget/function/f_uac1_legacy.c 	tmp = kstrndup(page, len, GFP_KERNEL);				\
page              992 drivers/usb/gadget/function/f_uac2.c 					 char *page)			\
page              998 drivers/usb/gadget/function/f_uac2.c 	result = sprintf(page, "%u\n", opts->name);			\
page             1005 drivers/usb/gadget/function/f_uac2.c 					  const char *page, size_t len)	\
page             1017 drivers/usb/gadget/function/f_uac2.c 	ret = kstrtou32(page, 0, &num);					\
page               30 drivers/usb/gadget/function/u_ether_configfs.h 						char *page)		\
page               36 drivers/usb/gadget/function/u_ether_configfs.h 		result = gether_get_dev_addr(opts->net, page, PAGE_SIZE); \
page               43 drivers/usb/gadget/function/u_ether_configfs.h 						 const char *page, size_t len)\
page               54 drivers/usb/gadget/function/u_ether_configfs.h 		ret = gether_set_dev_addr(opts->net, page);		\
page               65 drivers/usb/gadget/function/u_ether_configfs.h 						 char *page)		\
page               71 drivers/usb/gadget/function/u_ether_configfs.h 		result = gether_get_host_addr(opts->net, page, PAGE_SIZE); \
page               78 drivers/usb/gadget/function/u_ether_configfs.h 						  const char *page, size_t len)\
page               89 drivers/usb/gadget/function/u_ether_configfs.h 		ret = gether_set_host_addr(opts->net, page);		\
page              100 drivers/usb/gadget/function/u_ether_configfs.h 					     char *page)		\
page              108 drivers/usb/gadget/function/u_ether_configfs.h 		return sprintf(page, "%d\n", qmult);			\
page              112 drivers/usb/gadget/function/u_ether_configfs.h 					      const char *page, size_t len)\
page              124 drivers/usb/gadget/function/u_ether_configfs.h 		ret = kstrtou8(page, 0, &val);				\
page              139 drivers/usb/gadget/function/u_ether_configfs.h 					      char *page)		\
page              145 drivers/usb/gadget/function/u_ether_configfs.h 		ret = gether_get_ifname(opts->net, page, PAGE_SIZE);	\
page              155 drivers/usb/gadget/function/u_ether_configfs.h 					       char *page)		\
page              161 drivers/usb/gadget/function/u_ether_configfs.h 		ret = sprintf(page, "%02x\n", opts->_n_);		\
page              168 drivers/usb/gadget/function/u_ether_configfs.h 						const char *page,	\
page              176 drivers/usb/gadget/function/u_ether_configfs.h 		ret = sscanf(page, "%02hhx", &val);			\
page              143 drivers/usb/gadget/function/uvc_configfs.c 	struct config_item *item, char *page)				\
page              157 drivers/usb/gadget/function/uvc_configfs.c 	result = sprintf(page, "%u\n", le##bits##_to_cpu(ch->desc.aname));\
page              166 drivers/usb/gadget/function/uvc_configfs.c 			   const char *page, size_t len)		\
page              186 drivers/usb/gadget/function/uvc_configfs.c 	ret = kstrtou##bits(page, 0, &num);				\
page              261 drivers/usb/gadget/function/uvc_configfs.c 	struct config_item *item, char *page)				\
page              277 drivers/usb/gadget/function/uvc_configfs.c 	result = sprintf(page, "%u\n", le##bits##_to_cpu(pd->aname));	\
page              294 drivers/usb/gadget/function/uvc_configfs.c 	struct config_item *item, char *page)
page              302 drivers/usb/gadget/function/uvc_configfs.c 	char *pg = page;
page              313 drivers/usb/gadget/function/uvc_configfs.c 		pg = page + result;
page              364 drivers/usb/gadget/function/uvc_configfs.c 	struct config_item *item, char *page)				\
page              381 drivers/usb/gadget/function/uvc_configfs.c 	result = sprintf(page, "%u\n", le##bits##_to_cpu(cd->aname));	\
page              405 drivers/usb/gadget/function/uvc_configfs.c 	struct config_item *item, char *page)
page              413 drivers/usb/gadget/function/uvc_configfs.c 	char *pg = page;
page              425 drivers/usb/gadget/function/uvc_configfs.c 		pg = page + result;
page              478 drivers/usb/gadget/function/uvc_configfs.c 	struct config_item *item, char *page)				\
page              495 drivers/usb/gadget/function/uvc_configfs.c 	result = sprintf(page, "%u\n", le##bits##_to_cpu(cd->aname));	\
page              717 drivers/usb/gadget/function/uvc_configfs.c 	struct config_item *item, char *page)
page              731 drivers/usb/gadget/function/uvc_configfs.c 	result += sprintf(page, "%u\n", opts->control_interface);
page              790 drivers/usb/gadget/function/uvc_configfs.c static ssize_t uvcg_format_bma_controls_show(struct uvcg_format *f, char *page)
page              796 drivers/usb/gadget/function/uvc_configfs.c 	char *pg = page;
page              808 drivers/usb/gadget/function/uvc_configfs.c 		pg = page + result;
page              817 drivers/usb/gadget/function/uvc_configfs.c 					      const char *page, size_t len)
page              835 drivers/usb/gadget/function/uvc_configfs.c 	if (len < 4 || *page != '0' ||
page              836 drivers/usb/gadget/function/uvc_configfs.c 	    (*(page + 1) != 'x' && *(page + 1) != 'X'))
page              838 drivers/usb/gadget/function/uvc_configfs.c 	ret = hex2bin(ch->bmaControls, page + 2, 1);
page              984 drivers/usb/gadget/function/uvc_configfs.c 	struct config_item *item, char *page)				\
page              998 drivers/usb/gadget/function/uvc_configfs.c 	result = sprintf(page, "%u\n", le##bits##_to_cpu(sh->desc.aname));\
page             1093 drivers/usb/gadget/function/uvc_configfs.c static ssize_t uvcg_frame_##cname##_show(struct config_item *item, char *page)\
page             1107 drivers/usb/gadget/function/uvc_configfs.c 	result = sprintf(page, "%u\n", f->frame.cname);			\
page             1115 drivers/usb/gadget/function/uvc_configfs.c 					   const char *page, size_t len)\
page             1125 drivers/usb/gadget/function/uvc_configfs.c 	ret = kstrtou##bits(page, 0, &num);				\
page             1152 drivers/usb/gadget/function/uvc_configfs.c 					     char *page)
page             1176 drivers/usb/gadget/function/uvc_configfs.c 	result = sprintf(page, "%u\n", f->frame.b_frame_index);
page             1197 drivers/usb/gadget/function/uvc_configfs.c 						 char *page)
page             1204 drivers/usb/gadget/function/uvc_configfs.c 	char *pg = page;
page             1214 drivers/usb/gadget/function/uvc_configfs.c 		pg = page + result;
page             1244 drivers/usb/gadget/function/uvc_configfs.c static int __uvcg_iter_frm_intrv(const char *page, size_t len,
page             1249 drivers/usb/gadget/function/uvc_configfs.c 	const char *pg = page;
page             1255 drivers/usb/gadget/function/uvc_configfs.c 	while (pg - page < len) {
page             1257 drivers/usb/gadget/function/uvc_configfs.c 		while (i < sizeof(buf) && (pg - page < len) &&
page             1262 drivers/usb/gadget/function/uvc_configfs.c 		while ((pg - page < len) && (*pg == '\0' || *pg == '\n'))
page             1274 drivers/usb/gadget/function/uvc_configfs.c 						  const char *page, size_t len)
page             1296 drivers/usb/gadget/function/uvc_configfs.c 	ret = __uvcg_iter_frm_intrv(page, len, __uvcg_count_frm_intrv, &n);
page             1306 drivers/usb/gadget/function/uvc_configfs.c 	ret = __uvcg_iter_frm_intrv(page, len, __uvcg_fill_frm_intrv, &tmp);
page             1446 drivers/usb/gadget/function/uvc_configfs.c 							char *page)
page             1459 drivers/usb/gadget/function/uvc_configfs.c 	memcpy(page, ch->desc.guidFormat, sizeof(ch->desc.guidFormat));
page             1468 drivers/usb/gadget/function/uvc_configfs.c 						   const char *page, size_t len)
page             1487 drivers/usb/gadget/function/uvc_configfs.c 	memcpy(ch->desc.guidFormat, page,
page             1501 drivers/usb/gadget/function/uvc_configfs.c 	struct config_item *item, char *page)				\
page             1515 drivers/usb/gadget/function/uvc_configfs.c 	result = sprintf(page, "%u\n", le##bits##_to_cpu(u->desc.aname));\
page             1526 drivers/usb/gadget/function/uvc_configfs.c 	struct config_item *item, char *page)				\
page             1540 drivers/usb/gadget/function/uvc_configfs.c 	result = sprintf(page, "%u\n", le##bits##_to_cpu(u->desc.aname));\
page             1549 drivers/usb/gadget/function/uvc_configfs.c 				    const char *page, size_t len)	\
page             1569 drivers/usb/gadget/function/uvc_configfs.c 	ret = kstrtou8(page, 0, &num);					\
page             1594 drivers/usb/gadget/function/uvc_configfs.c uvcg_uncompressed_bma_controls_show(struct config_item *item, char *page)
page             1597 drivers/usb/gadget/function/uvc_configfs.c 	return uvcg_format_bma_controls_show(&unc->fmt, page);
page             1602 drivers/usb/gadget/function/uvc_configfs.c 				     const char *page, size_t len)
page             1605 drivers/usb/gadget/function/uvc_configfs.c 	return uvcg_format_bma_controls_store(&unc->fmt, page, len);
page             1695 drivers/usb/gadget/function/uvc_configfs.c static ssize_t uvcg_mjpeg_##cname##_show(struct config_item *item, char *page)\
page             1709 drivers/usb/gadget/function/uvc_configfs.c 	result = sprintf(page, "%u\n", le##bits##_to_cpu(u->desc.aname));\
page             1719 drivers/usb/gadget/function/uvc_configfs.c static ssize_t uvcg_mjpeg_##cname##_show(struct config_item *item, char *page)\
page             1733 drivers/usb/gadget/function/uvc_configfs.c 	result = sprintf(page, "%u\n", le##bits##_to_cpu(u->desc.aname));\
page             1742 drivers/usb/gadget/function/uvc_configfs.c 			   const char *page, size_t len)		\
page             1762 drivers/usb/gadget/function/uvc_configfs.c 	ret = kstrtou8(page, 0, &num);					\
page             1787 drivers/usb/gadget/function/uvc_configfs.c uvcg_mjpeg_bma_controls_show(struct config_item *item, char *page)
page             1790 drivers/usb/gadget/function/uvc_configfs.c 	return uvcg_format_bma_controls_show(&u->fmt, page);
page             1795 drivers/usb/gadget/function/uvc_configfs.c 				     const char *page, size_t len)
page             1798 drivers/usb/gadget/function/uvc_configfs.c 	return uvcg_format_bma_controls_store(&u->fmt, page, len);
page             1865 drivers/usb/gadget/function/uvc_configfs.c 	struct config_item *item, char *page)				\
page             1881 drivers/usb/gadget/function/uvc_configfs.c 	result = sprintf(page, "%u\n", le##bits##_to_cpu(cd->aname));	\
page             2320 drivers/usb/gadget/function/uvc_configfs.c 	struct config_item *item, char *page)
page             2334 drivers/usb/gadget/function/uvc_configfs.c 	result += sprintf(page, "%u\n", opts->streaming_interface);
page             2384 drivers/usb/gadget/function/uvc_configfs.c 	struct config_item *item, char *page)				\
page             2390 drivers/usb/gadget/function/uvc_configfs.c 	result = sprintf(page, "%u\n", opts->cname);			\
page             2398 drivers/usb/gadget/function/uvc_configfs.c 			   const char *page, size_t len)		\
page             2410 drivers/usb/gadget/function/uvc_configfs.c 	ret = kstrtouint(page, 0, &num);				\
page              396 drivers/usb/host/ohci-dbg.c 	char *page;
page              476 drivers/usb/host/ohci-dbg.c 	temp = show_list(ohci, buf->page, size, ohci->ed_controltail);
page              477 drivers/usb/host/ohci-dbg.c 	temp += show_list(ohci, buf->page + temp, size - temp,
page              501 drivers/usb/host/ohci-dbg.c 	next = buf->page;
page              594 drivers/usb/host/ohci-dbg.c 	next = buf->page;
page              685 drivers/usb/host/ohci-dbg.c 	if (!buf->page)
page              686 drivers/usb/host/ohci-dbg.c 		buf->page = (char *)get_zeroed_page(GFP_KERNEL);
page              688 drivers/usb/host/ohci-dbg.c 	if (!buf->page) {
page              721 drivers/usb/host/ohci-dbg.c 				      buf->page, buf->count);
page              733 drivers/usb/host/ohci-dbg.c 		if (buf->page)
page              734 drivers/usb/host/ohci-dbg.c 			free_page((unsigned long)buf->page);
page              241 drivers/usb/host/xhci-tegra.c 	u32 page = CSB_PAGE_SELECT(offset);
page              244 drivers/usb/host/xhci-tegra.c 	fpci_writel(tegra, page, XUSB_CFG_ARU_C11_CSBRANGE);
page              252 drivers/usb/host/xhci-tegra.c 	u32 page = CSB_PAGE_SELECT(offset);
page              255 drivers/usb/host/xhci-tegra.c 	fpci_writel(tegra, page, XUSB_CFG_ARU_C11_CSBRANGE);
page              176 drivers/usb/mon/mon_bin.c 	struct page *pg;
page             1249 drivers/usb/mon/mon_bin.c 	struct page *pageptr;
page             1257 drivers/usb/mon/mon_bin.c 	vmf->page = pageptr;
page              722 drivers/usb/storage/alauda.c 		unsigned int page, unsigned int pages, unsigned char *data)
page              727 drivers/usb/storage/alauda.c 		PBA_ZONE(pba), 0, PBA_LO(pba) + page, pages, 0, MEDIA_PORT(us)
page              730 drivers/usb/storage/alauda.c 	usb_stor_dbg(us, "pba %d page %d count %d\n", pba, page, pages);
page              748 drivers/usb/storage/alauda.c 		unsigned int page, unsigned int pages, unsigned char *data)
page              753 drivers/usb/storage/alauda.c 	rc = alauda_read_block_raw(us, pba, page, pages, data);
page              801 drivers/usb/storage/alauda.c 		 unsigned int page, unsigned int pages,
page              872 drivers/usb/storage/alauda.c 	for (i = page; i < page+pages; i++) {
page              911 drivers/usb/storage/alauda.c 	unsigned int page, len, offset;
page              935 drivers/usb/storage/alauda.c 	page = (address & MEDIA_INFO(us).blockmask);
page              958 drivers/usb/storage/alauda.c 		pages = min(sectors, blocksize - page);
page              966 drivers/usb/storage/alauda.c 				     pages, lba, page);
page              978 drivers/usb/storage/alauda.c 				     pages, pba, lba, page);
page              980 drivers/usb/storage/alauda.c 			result = alauda_read_block(us, pba, page, pages, buffer);
page              989 drivers/usb/storage/alauda.c 		page = 0;
page             1005 drivers/usb/storage/alauda.c 	unsigned int page, len, offset;
page             1037 drivers/usb/storage/alauda.c 	page = (address & MEDIA_INFO(us).blockmask);
page             1046 drivers/usb/storage/alauda.c 		unsigned int pages = min(sectors, blocksize - page);
page             1061 drivers/usb/storage/alauda.c 		result = alauda_write_lba(us, lba, page, pages, buffer,
page             1066 drivers/usb/storage/alauda.c 		page = 0;
page             1164 drivers/usb/storage/alauda.c 		unsigned int page, pages;
page             1170 drivers/usb/storage/alauda.c 		page = short_pack(srb->cmnd[3], srb->cmnd[2]);
page             1171 drivers/usb/storage/alauda.c 		page <<= 16;
page             1172 drivers/usb/storage/alauda.c 		page |= short_pack(srb->cmnd[5], srb->cmnd[4]);
page             1175 drivers/usb/storage/alauda.c 		usb_stor_dbg(us, "READ_10: page %d pagect %d\n", page, pages);
page             1177 drivers/usb/storage/alauda.c 		return alauda_read_data(us, page, pages);
page             1181 drivers/usb/storage/alauda.c 		unsigned int page, pages;
page             1187 drivers/usb/storage/alauda.c 		page = short_pack(srb->cmnd[3], srb->cmnd[2]);
page             1188 drivers/usb/storage/alauda.c 		page <<= 16;
page             1189 drivers/usb/storage/alauda.c 		page |= short_pack(srb->cmnd[5], srb->cmnd[4]);
page             1192 drivers/usb/storage/alauda.c 		usb_stor_dbg(us, "WRITE_10: page %d pagect %d\n", page, pages);
page             1194 drivers/usb/storage/alauda.c 		return alauda_write_data(us, page, pages);
page              739 drivers/usb/storage/sddr09.c 	unsigned int page, pages;
page              746 drivers/usb/storage/sddr09.c 	page = (address & info->blockmask);
page              770 drivers/usb/storage/sddr09.c 		pages = min(sectors, info->blocksize - page);
page              787 drivers/usb/storage/sddr09.c 				     pages, lba, page);
page              800 drivers/usb/storage/sddr09.c 				     pages, pba, lba, page);
page              802 drivers/usb/storage/sddr09.c 			address = ((pba << info->blockshift) + page) << 
page              815 drivers/usb/storage/sddr09.c 		page = 0;
page              852 drivers/usb/storage/sddr09.c 		 unsigned int page, unsigned int pages,
page              919 drivers/usb/storage/sddr09.c 	for (i = page; i < page+pages; i++) {
page              963 drivers/usb/storage/sddr09.c 	unsigned int lba, maxlba, page, pages;
page              973 drivers/usb/storage/sddr09.c 	page = (address & info->blockmask);
page             1015 drivers/usb/storage/sddr09.c 		pages = min(sectors, info->blocksize - page);
page             1030 drivers/usb/storage/sddr09.c 		result = sddr09_write_lba(us, lba, page, pages,
page             1035 drivers/usb/storage/sddr09.c 		page = 0;
page             1544 drivers/usb/storage/sddr09.c 	unsigned int page, pages;
page             1658 drivers/usb/storage/sddr09.c 		page = short_pack(srb->cmnd[3], srb->cmnd[2]);
page             1659 drivers/usb/storage/sddr09.c 		page <<= 16;
page             1660 drivers/usb/storage/sddr09.c 		page |= short_pack(srb->cmnd[5], srb->cmnd[4]);
page             1664 drivers/usb/storage/sddr09.c 			     page, pages);
page             1666 drivers/usb/storage/sddr09.c 		result = sddr09_read_data(us, page, pages);
page             1673 drivers/usb/storage/sddr09.c 		page = short_pack(srb->cmnd[3], srb->cmnd[2]);
page             1674 drivers/usb/storage/sddr09.c 		page <<= 16;
page             1675 drivers/usb/storage/sddr09.c 		page |= short_pack(srb->cmnd[5], srb->cmnd[4]);
page             1679 drivers/usb/storage/sddr09.c 			     page, pages);
page             1681 drivers/usb/storage/sddr09.c 		result = sddr09_write_data(us, page, pages);
page              189 drivers/usb/storage/sddr55.c 		unsigned int page,
page              228 drivers/usb/storage/sddr55.c 				info->blocksize - page);
page              232 drivers/usb/storage/sddr55.c 			     pages, pba, lba, page);
page              239 drivers/usb/storage/sddr55.c 			address = (pba << info->blockshift) + page;
page              293 drivers/usb/storage/sddr55.c 		page = 0;
page              308 drivers/usb/storage/sddr55.c 		unsigned int page,
page              355 drivers/usb/storage/sddr55.c 				info->blocksize - page);
page              363 drivers/usb/storage/sddr55.c 			     pages, pba, lba, page);
page              414 drivers/usb/storage/sddr55.c 		address = (pba << info->blockshift) + page;
page              501 drivers/usb/storage/sddr55.c 		page = 0;
page              791 drivers/usb/storage/sddr55.c 	unsigned int page;
page              925 drivers/usb/storage/sddr55.c 		page = short_pack(srb->cmnd[3], srb->cmnd[2]);
page              926 drivers/usb/storage/sddr55.c 		page <<= 16;
page              927 drivers/usb/storage/sddr55.c 		page |= short_pack(srb->cmnd[5], srb->cmnd[4]);
page              930 drivers/usb/storage/sddr55.c 		page <<= info->smallpageshift;
page              934 drivers/usb/storage/sddr55.c 		lba = page >> info->blockshift;
page              935 drivers/usb/storage/sddr55.c 		page = page & info->blockmask;
page              953 drivers/usb/storage/sddr55.c 				     pba, lba, page, pages);
page              955 drivers/usb/storage/sddr55.c 			return sddr55_write_data(us, lba, page, pages);
page              958 drivers/usb/storage/sddr55.c 				     pba, lba, page, pages);
page              960 drivers/usb/storage/sddr55.c 			return sddr55_read_data(us, lba, page, pages);
page              181 drivers/vfio/vfio_iommu_spapr_tce.c 	struct page *page;
page              187 drivers/vfio/vfio_iommu_spapr_tce.c 	page = pfn_to_page(hpa >> PAGE_SHIFT);
page              193 drivers/vfio/vfio_iommu_spapr_tce.c 	return page_shift(compound_head(page)) >= it_page_shift;
page              383 drivers/vfio/vfio_iommu_spapr_tce.c 	struct page *page;
page              385 drivers/vfio/vfio_iommu_spapr_tce.c 	page = pfn_to_page(hpa >> PAGE_SHIFT);
page              386 drivers/vfio/vfio_iommu_spapr_tce.c 	put_page(page);
page              486 drivers/vfio/vfio_iommu_spapr_tce.c 	struct page *page = NULL;
page              491 drivers/vfio/vfio_iommu_spapr_tce.c 			&page) != 1)
page              494 drivers/vfio/vfio_iommu_spapr_tce.c 	*hpa = __pa((unsigned long) page_address(page));
page              302 drivers/vfio/vfio_iommu_type1.c 		struct page *tail = pfn_to_page(pfn);
page              303 drivers/vfio/vfio_iommu_type1.c 		struct page *head = compound_head(tail);
page              329 drivers/vfio/vfio_iommu_type1.c 		struct page *page = pfn_to_page(pfn);
page              331 drivers/vfio/vfio_iommu_type1.c 			SetPageDirty(page);
page              332 drivers/vfio/vfio_iommu_type1.c 		put_page(page);
page              341 drivers/vfio/vfio_iommu_type1.c 	struct page *page[1];
page              352 drivers/vfio/vfio_iommu_type1.c 		ret = get_user_pages(vaddr, 1, flags | FOLL_LONGTERM, page,
page              355 drivers/vfio/vfio_iommu_type1.c 		ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page,
page              366 drivers/vfio/vfio_iommu_type1.c 			put_page(page[0]);
page              372 drivers/vfio/vfio_iommu_type1.c 		*pfn = page_to_pfn(page[0]);
page             1274 drivers/vfio/vfio_iommu_type1.c 	struct page *pages;
page              648 drivers/vhost/net.c 	if (pfrag->page) {
page              651 drivers/vhost/net.c 		__page_frag_cache_drain(pfrag->page, net->refcnt_bias);
page              658 drivers/vhost/net.c 		pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
page              662 drivers/vhost/net.c 		if (likely(pfrag->page)) {
page              667 drivers/vhost/net.c 	pfrag->page = alloc_page(gfp);
page              668 drivers/vhost/net.c 	if (likely(pfrag->page)) {
page              676 drivers/vhost/net.c 	page_ref_add(pfrag->page, USHRT_MAX - 1);
page              714 drivers/vhost/net.c 	buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
page              715 drivers/vhost/net.c 	copied = copy_page_from_iter(alloc_frag->page,
page              738 drivers/vhost/net.c 	copied = copy_page_from_iter(alloc_frag->page,
page             1333 drivers/vhost/net.c 	n->page_frag.page = NULL;
page             1409 drivers/vhost/net.c 	if (n->page_frag.page)
page             1410 drivers/vhost/net.c 		__page_frag_cache_drain(n->page_frag.page, n->refcnt_bias);
page               94 drivers/vhost/scsi.c 	struct page **tvc_upages;
page              577 drivers/vhost/scsi.c 	struct page **pages;
page              627 drivers/vhost/scsi.c 	struct page **pages = cmd->tvc_upages;
page              682 drivers/vhost/scsi.c 				struct page *page = sg_page(p++);
page              683 drivers/vhost/scsi.c 				if (page)
page              684 drivers/vhost/scsi.c 					put_page(page);
page             1873 drivers/vhost/scsi.c 		struct config_item *item, const char *page, size_t count)
page             1879 drivers/vhost/scsi.c 	int ret = kstrtoul(page, 0, &val);
page             1895 drivers/vhost/scsi.c 		struct config_item *item, char *page)
page             1901 drivers/vhost/scsi.c 	return sprintf(page, "%d\n", tpg->tv_fabric_prot_type);
page             1929 drivers/vhost/scsi.c 					     sizeof(struct page *),
page             2040 drivers/vhost/scsi.c static ssize_t vhost_scsi_tpg_nexus_show(struct config_item *item, char *page)
page             2054 drivers/vhost/scsi.c 	ret = snprintf(page, PAGE_SIZE, "%s\n",
page             2062 drivers/vhost/scsi.c 		const char *page, size_t count)
page             2073 drivers/vhost/scsi.c 	if (!strncmp(page, "NULL", 4)) {
page             2082 drivers/vhost/scsi.c 	if (strlen(page) >= VHOST_SCSI_NAMELEN) {
page             2084 drivers/vhost/scsi.c 				" max: %d\n", page, VHOST_SCSI_NAMELEN);
page             2087 drivers/vhost/scsi.c 	snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
page             2276 drivers/vhost/scsi.c vhost_scsi_wwn_version_show(struct config_item *item, char *page)
page             2278 drivers/vhost/scsi.c 	return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
page             1827 drivers/vhost/vhost.c 	struct page *page;
page             1832 drivers/vhost/vhost.c 	r = get_user_pages_fast(log, 1, FOLL_WRITE, &page);
page             1836 drivers/vhost/vhost.c 	base = kmap_atomic(page);
page             1839 drivers/vhost/vhost.c 	set_page_dirty_lock(page);
page             1840 drivers/vhost/vhost.c 	put_page(page);
page              942 drivers/video/fbdev/acornfb.c 		struct page *page;
page              949 drivers/video/fbdev/acornfb.c 		page = virt_to_page(virtual_start);
page              950 drivers/video/fbdev/acornfb.c 		__free_reserved_page(page);
page             1920 drivers/video/fbdev/aty/atyfb_base.c 	unsigned int size, page, map_size = 0;
page             1943 drivers/video/fbdev/aty/atyfb_base.c 	for (page = 0; page < size;) {
page             1948 drivers/video/fbdev/aty/atyfb_base.c 			unsigned long offset = off + page;
page             1960 drivers/video/fbdev/aty/atyfb_base.c 			page += PAGE_SIZE;
page             1963 drivers/video/fbdev/aty/atyfb_base.c 		if (page + map_size > size)
page             1964 drivers/video/fbdev/aty/atyfb_base.c 			map_size = size - page;
page             1969 drivers/video/fbdev/aty/atyfb_base.c 		if (remap_pfn_range(vma, vma->vm_start + page,
page             1973 drivers/video/fbdev/aty/atyfb_base.c 		page += map_size;
page              937 drivers/video/fbdev/broadsheetfb.c 	struct page *cur;
page               26 drivers/video/fbdev/core/fb_defio.c static struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs)
page               29 drivers/video/fbdev/core/fb_defio.c 	struct page *page;
page               32 drivers/video/fbdev/core/fb_defio.c 		page = vmalloc_to_page(screen_base + offs);
page               34 drivers/video/fbdev/core/fb_defio.c 		page = pfn_to_page((info->fix.smem_start + offs) >> PAGE_SHIFT);
page               36 drivers/video/fbdev/core/fb_defio.c 	return page;
page               43 drivers/video/fbdev/core/fb_defio.c 	struct page *page;
page               50 drivers/video/fbdev/core/fb_defio.c 	page = fb_deferred_io_page(info, offset);
page               51 drivers/video/fbdev/core/fb_defio.c 	if (!page)
page               54 drivers/video/fbdev/core/fb_defio.c 	get_page(page);
page               57 drivers/video/fbdev/core/fb_defio.c 		page->mapping = vmf->vma->vm_file->f_mapping;
page               61 drivers/video/fbdev/core/fb_defio.c 	BUG_ON(!page->mapping);
page               62 drivers/video/fbdev/core/fb_defio.c 	page->index = vmf->pgoff;
page               64 drivers/video/fbdev/core/fb_defio.c 	vmf->page = page;
page               95 drivers/video/fbdev/core/fb_defio.c 	struct page *page = vmf->page;
page               98 drivers/video/fbdev/core/fb_defio.c 	struct page *cur;
page              123 drivers/video/fbdev/core/fb_defio.c 	lock_page(page);
page              133 drivers/video/fbdev/core/fb_defio.c 		if (unlikely(cur == page))
page              135 drivers/video/fbdev/core/fb_defio.c 		else if (cur->index > page->index)
page              139 drivers/video/fbdev/core/fb_defio.c 	list_add_tail(&page->lru, &cur->lru);
page              154 drivers/video/fbdev/core/fb_defio.c static int fb_deferred_io_set_page_dirty(struct page *page)
page              156 drivers/video/fbdev/core/fb_defio.c 	if (!PageDirty(page))
page              157 drivers/video/fbdev/core/fb_defio.c 		SetPageDirty(page);
page              182 drivers/video/fbdev/core/fb_defio.c 	struct page *cur;
page              228 drivers/video/fbdev/core/fb_defio.c 	struct page *page;
page              236 drivers/video/fbdev/core/fb_defio.c 		page = fb_deferred_io_page(info, i);
page              237 drivers/video/fbdev/core/fb_defio.c 		page->mapping = NULL;
page              440 drivers/video/fbdev/grvga.c 		unsigned long page;
page              457 drivers/video/fbdev/grvga.c 		for (page = virtual_start;
page              458 drivers/video/fbdev/grvga.c 		     page < PAGE_ALIGN(virtual_start + grvga_mem_size);
page              459 drivers/video/fbdev/grvga.c 		     page += PAGE_SIZE) {
page              460 drivers/video/fbdev/grvga.c 			SetPageReserved(virt_to_page(page));
page              472 drivers/video/fbdev/metronomefb.c 	struct page *cur;
page              646 drivers/video/fbdev/pvr2fb.c 	struct page **pages;
page              651 drivers/video/fbdev/pvr2fb.c 	pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
page               47 drivers/video/fbdev/sbuslib.c 	unsigned int size, page, r, map_size;
page               66 drivers/video/fbdev/sbuslib.c 	for (page = 0; page < size; ){
page               69 drivers/video/fbdev/sbuslib.c 			if (map[i].voff == off+page) {
page               80 drivers/video/fbdev/sbuslib.c 			page += PAGE_SIZE;
page               83 drivers/video/fbdev/sbuslib.c 		if (page + map_size > size)
page               84 drivers/video/fbdev/sbuslib.c 			map_size = size - page;
page               86 drivers/video/fbdev/sbuslib.c 					vma->vm_start + page,
page               93 drivers/video/fbdev/sbuslib.c 		page += map_size;
page              446 drivers/video/fbdev/sh_mobile_lcdcfb.c 	struct page *page;
page              451 drivers/video/fbdev/sh_mobile_lcdcfb.c 	list_for_each_entry(page, pagelist, lru)
page              452 drivers/video/fbdev/sh_mobile_lcdcfb.c 		sg_set_page(&ch->sglist[nr_pages++], page, PAGE_SIZE, 0);
page              780 drivers/video/fbdev/smscufx.c 	unsigned long page, pos;
page              795 drivers/video/fbdev/smscufx.c 		page = vmalloc_to_pfn((void *)pos);
page              796 drivers/video/fbdev/smscufx.c 		if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED))
page              958 drivers/video/fbdev/smscufx.c 	struct page *cur;
page              327 drivers/video/fbdev/udlfb.c 	unsigned long page, pos;
page              342 drivers/video/fbdev/udlfb.c 		page = vmalloc_to_pfn((void *)pos);
page              343 drivers/video/fbdev/udlfb.c 		if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED))
page              784 drivers/video/fbdev/udlfb.c 	struct page *cur;
page               50 drivers/video/fbdev/xen-fbfront.c 	struct xenfb_page	*page;
page               80 drivers/video/fbdev/xen-fbfront.c 	prod = info->page->out_prod;
page               83 drivers/video/fbdev/xen-fbfront.c 	XENFB_OUT_RING_REF(info->page, prod) = *event;
page               85 drivers/video/fbdev/xen-fbfront.c 	info->page->out_prod = prod + 1;
page              121 drivers/video/fbdev/xen-fbfront.c 	prod = info->page->out_prod;
page              122 drivers/video/fbdev/xen-fbfront.c 	cons = info->page->out_cons;
page              188 drivers/video/fbdev/xen-fbfront.c 	struct page *page;
page              194 drivers/video/fbdev/xen-fbfront.c 	list_for_each_entry(page, pagelist, lru) {
page              195 drivers/video/fbdev/xen-fbfront.c 		beg = page->index << PAGE_SHIFT;
page              276 drivers/video/fbdev/xen-fbfront.c 	xenfb_refresh(info, 0, 0, info->page->width, info->page->height);
page              291 drivers/video/fbdev/xen-fbfront.c 		    var->bits_per_pixel == xenfb_info->page->depth) {
page              301 drivers/video/fbdev/xen-fbfront.c 	required_mem_len = var->xres * var->yres * xenfb_info->page->depth / 8;
page              302 drivers/video/fbdev/xen-fbfront.c 	if (var->bits_per_pixel == xenfb_info->page->depth &&
page              351 drivers/video/fbdev/xen-fbfront.c 	struct xenfb_page *page = info->page;
page              353 drivers/video/fbdev/xen-fbfront.c 	if (page->in_cons != page->in_prod) {
page              354 drivers/video/fbdev/xen-fbfront.c 		info->page->in_cons = info->page->in_prod;
page              420 drivers/video/fbdev/xen-fbfront.c 	info->page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
page              421 drivers/video/fbdev/xen-fbfront.c 	if (!info->page)
page              542 drivers/video/fbdev/xen-fbfront.c 	free_page((unsigned long)info->page);
page              565 drivers/video/fbdev/xen-fbfront.c 		info->page->pd[i] = vmalloc_to_gfn(&info->gfns[i * epd]);
page              567 drivers/video/fbdev/xen-fbfront.c 	info->page->width = fb_info->var.xres;
page              568 drivers/video/fbdev/xen-fbfront.c 	info->page->height = fb_info->var.yres;
page              569 drivers/video/fbdev/xen-fbfront.c 	info->page->depth = fb_info->var.bits_per_pixel;
page              570 drivers/video/fbdev/xen-fbfront.c 	info->page->line_length = fb_info->fix.line_length;
page              571 drivers/video/fbdev/xen-fbfront.c 	info->page->mem_length = fb_info->fix.smem_len;
page              572 drivers/video/fbdev/xen-fbfront.c 	info->page->in_cons = info->page->in_prod = 0;
page              573 drivers/video/fbdev/xen-fbfront.c 	info->page->out_cons = info->page->out_prod = 0;
page              599 drivers/video/fbdev/xen-fbfront.c 			    virt_to_gfn(info->page));
page              151 drivers/virt/fsl_hypervisor.c 	struct page **pages = NULL;
page              229 drivers/virt/fsl_hypervisor.c 	pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
page               50 drivers/virt/vboxguest/vboxguest_core.c 	struct page **pages = NULL;
page              271 drivers/virt/vboxguest/vboxguest_core.c 	struct page **pages;
page              322 drivers/virt/vboxguest/vboxguest_core.c 	struct page **pages = gdev->mem_balloon.pages[chunk_idx];
page              376 drivers/virt/vboxguest/vboxguest_core.c 				     sizeof(struct page **), GFP_KERNEL);
page               36 drivers/virt/vboxguest/vboxguest_core.h 	struct page ***pages;
page               66 drivers/virt/vboxguest/vboxguest_core.h 	struct page *guest_mappings_dummy_page;
page              333 drivers/virt/vboxguest/vboxguest_utils.c 	struct page *page;
page              358 drivers/virt/vboxguest/vboxguest_utils.c 			page = vmalloc_to_page(buf);
page              360 drivers/virt/vboxguest/vboxguest_utils.c 			page = virt_to_page(buf);
page              362 drivers/virt/vboxguest/vboxguest_utils.c 		dst_pg_lst->pages[i] = page_to_phys(page);
page              123 drivers/virtio/virtio_balloon.c static u32 page_to_balloon_pfn(struct page *page)
page              125 drivers/virtio/virtio_balloon.c 	unsigned long pfn = page_to_pfn(page);
page              156 drivers/virtio/virtio_balloon.c 			  __virtio32 pfns[], struct page *page)
page              168 drivers/virtio/virtio_balloon.c 					  page_to_balloon_pfn(page) + i);
page              175 drivers/virtio/virtio_balloon.c 	struct page *page;
page              183 drivers/virtio/virtio_balloon.c 		struct page *page = balloon_page_alloc();
page              185 drivers/virtio/virtio_balloon.c 		if (!page) {
page              194 drivers/virtio/virtio_balloon.c 		balloon_page_push(&pages, page);
page              201 drivers/virtio/virtio_balloon.c 	while ((page = balloon_page_pop(&pages))) {
page              202 drivers/virtio/virtio_balloon.c 		balloon_page_enqueue(&vb->vb_dev_info, page);
page              204 drivers/virtio/virtio_balloon.c 		set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
page              208 drivers/virtio/virtio_balloon.c 			adjust_managed_page_count(page, -1);
page              224 drivers/virtio/virtio_balloon.c 	struct page *page, *next;
page              226 drivers/virtio/virtio_balloon.c 	list_for_each_entry_safe(page, next, pages, lru) {
page              229 drivers/virtio/virtio_balloon.c 			adjust_managed_page_count(page, 1);
page              230 drivers/virtio/virtio_balloon.c 		list_del(&page->lru);
page              231 drivers/virtio/virtio_balloon.c 		put_page(page); /* balloon reference */
page              238 drivers/virtio/virtio_balloon.c 	struct page *page;
page              250 drivers/virtio/virtio_balloon.c 		page = balloon_page_dequeue(vb_dev_info);
page              251 drivers/virtio/virtio_balloon.c 		if (!page)
page              253 drivers/virtio/virtio_balloon.c 		set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
page              254 drivers/virtio/virtio_balloon.c 		list_add(&page->lru, &pages);
page              376 drivers/virtio/virtio_balloon.c 	struct page *page;
page              381 drivers/virtio/virtio_balloon.c 		page = balloon_page_pop(&vb->free_page_list);
page              382 drivers/virtio/virtio_balloon.c 		if (!page)
page              384 drivers/virtio/virtio_balloon.c 		free_pages((unsigned long)page_address(page),
page              579 drivers/virtio/virtio_balloon.c 	struct page *page;
page              588 drivers/virtio/virtio_balloon.c 	page = alloc_pages(VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG,
page              594 drivers/virtio/virtio_balloon.c 	if (!page)
page              597 drivers/virtio/virtio_balloon.c 	p = page_address(page);
page              609 drivers/virtio/virtio_balloon.c 		balloon_page_push(&vb->free_page_list, page);
page              709 drivers/virtio/virtio_balloon.c 		struct page *newpage, struct page *page, enum migrate_mode mode)
page              734 drivers/virtio/virtio_balloon.c 	    page_zone(page) != page_zone(newpage)) {
page              735 drivers/virtio/virtio_balloon.c 		adjust_managed_page_count(page, 1);
page              751 drivers/virtio/virtio_balloon.c 	balloon_page_delete(page);
page              754 drivers/virtio/virtio_balloon.c 	set_page_pfns(vb, vb->pfns, page);
page              759 drivers/virtio/virtio_balloon.c 	put_page(page); /* balloon reference */
page              159 drivers/xen/balloon.c static void balloon_append(struct page *page)
page              161 drivers/xen/balloon.c 	__SetPageOffline(page);
page              164 drivers/xen/balloon.c 	if (PageHighMem(page)) {
page              165 drivers/xen/balloon.c 		list_add_tail(&page->lru, &ballooned_pages);
page              168 drivers/xen/balloon.c 		list_add(&page->lru, &ballooned_pages);
page              175 drivers/xen/balloon.c static struct page *balloon_retrieve(bool require_lowmem)
page              177 drivers/xen/balloon.c 	struct page *page;
page              182 drivers/xen/balloon.c 	page = list_entry(ballooned_pages.next, struct page, lru);
page              183 drivers/xen/balloon.c 	if (require_lowmem && PageHighMem(page))
page              185 drivers/xen/balloon.c 	list_del(&page->lru);
page              187 drivers/xen/balloon.c 	if (PageHighMem(page))
page              192 drivers/xen/balloon.c 	__ClearPageOffline(page);
page              193 drivers/xen/balloon.c 	return page;
page              196 drivers/xen/balloon.c static struct page *balloon_next_page(struct page *page)
page              198 drivers/xen/balloon.c 	struct list_head *next = page->lru.next;
page              201 drivers/xen/balloon.c 	return list_entry(next, struct page, lru);
page              367 drivers/xen/balloon.c static void xen_online_page(struct page *page, unsigned int order)
page              370 drivers/xen/balloon.c 	unsigned long start_pfn = page_to_pfn(page);
page              371 drivers/xen/balloon.c 	struct page *p;
page              418 drivers/xen/balloon.c 	struct page   *page;
page              423 drivers/xen/balloon.c 	page = list_first_entry_or_null(&ballooned_pages, struct page, lru);
page              425 drivers/xen/balloon.c 		if (!page) {
page              430 drivers/xen/balloon.c 		frame_list[i] = page_to_xen_pfn(page);
page              431 drivers/xen/balloon.c 		page = balloon_next_page(page);
page              439 drivers/xen/balloon.c 		page = balloon_retrieve(false);
page              440 drivers/xen/balloon.c 		BUG_ON(page == NULL);
page              442 drivers/xen/balloon.c 		xenmem_reservation_va_mapping_update(1, &page, &frame_list[i]);
page              445 drivers/xen/balloon.c 		free_reserved_page(page);
page              457 drivers/xen/balloon.c 	struct page *page, *tmp;
page              465 drivers/xen/balloon.c 		page = alloc_page(gfp);
page              466 drivers/xen/balloon.c 		if (page == NULL) {
page              471 drivers/xen/balloon.c 		adjust_managed_page_count(page, -1);
page              472 drivers/xen/balloon.c 		xenmem_reservation_scrub_page(page);
page              473 drivers/xen/balloon.c 		list_add(&page->lru, &pages);
page              490 drivers/xen/balloon.c 	list_for_each_entry_safe(page, tmp, &pages, lru) {
page              491 drivers/xen/balloon.c 		frame_list[i++] = xen_page_to_gfn(page);
page              493 drivers/xen/balloon.c 		xenmem_reservation_va_mapping_reset(1, &page);
page              495 drivers/xen/balloon.c 		list_del(&page->lru);
page              497 drivers/xen/balloon.c 		balloon_append(page);
page              597 drivers/xen/balloon.c int alloc_xenballooned_pages(int nr_pages, struct page **pages)
page              600 drivers/xen/balloon.c 	struct page *page;
page              608 drivers/xen/balloon.c 		page = balloon_retrieve(true);
page              609 drivers/xen/balloon.c 		if (page) {
page              610 drivers/xen/balloon.c 			pages[pgno++] = page;
page              619 drivers/xen/balloon.c 				ret = xen_alloc_p2m_entry(page_to_pfn(page));
page              644 drivers/xen/balloon.c void free_xenballooned_pages(int nr_pages, struct page **pages)
page                9 drivers/xen/biomerge.c 			       const struct page *page)
page               13 drivers/xen/biomerge.c 	unsigned long bfn2 = pfn_to_bfn(page_to_pfn(page));
page               92 drivers/xen/gntalloc.c 	struct page *page;	     /* The shared page */
page              140 drivers/xen/gntalloc.c 		gref->page = alloc_page(GFP_KERNEL|__GFP_ZERO);
page              141 drivers/xen/gntalloc.c 		if (!gref->page) {
page              148 drivers/xen/gntalloc.c 						 xen_page_to_gfn(gref->page),
page              187 drivers/xen/gntalloc.c 		uint8_t *tmp = kmap(gref->page);
page              189 drivers/xen/gntalloc.c 		kunmap(gref->page);
page              211 drivers/xen/gntalloc.c 	if (gref->page)
page              212 drivers/xen/gntalloc.c 		__free_page(gref->page);
page              549 drivers/xen/gntalloc.c 				gref->page);
page               64 drivers/xen/gntdev-common.h 	struct page **pages;
page               60 drivers/xen/gntdev-dmabuf.c 	struct page **pages;
page              201 drivers/xen/gntdev-dmabuf.c dmabuf_pages_to_sgt(struct page **pages, unsigned int nr_pages)
page              382 drivers/xen/gntdev-dmabuf.c 	struct page **pages;
page              522 drivers/xen/gntdev-dmabuf.c dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
page              657 drivers/xen/gntdev-dmabuf.c 		struct page *page = sg_page_iter_page(&sg_iter);
page              663 drivers/xen/gntdev-dmabuf.c 		if (!pfn_valid(page_to_pfn(page))) {
page              668 drivers/xen/gntdev-dmabuf.c 		gntdev_dmabuf->pages[i++] = page;
page              462 drivers/xen/gntdev.c static struct page *gntdev_vma_find_special_page(struct vm_area_struct *vma,
page              830 drivers/xen/gntdev.c 	struct page *pages[GNTDEV_COPY_BATCH];
page              840 drivers/xen/gntdev.c 	struct page *page;
page              844 drivers/xen/gntdev.c 	ret = get_user_pages_fast(addr, 1, writeable ? FOLL_WRITE : 0, &page);
page              848 drivers/xen/gntdev.c 	batch->pages[batch->nr_pages++] = page;
page              850 drivers/xen/gntdev.c 	xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(addr & ~PAGE_MASK);
page              362 drivers/xen/grant-table.c 	struct page *page;
page              386 drivers/xen/grant-table.c 			if (entry->page) {
page              388 drivers/xen/grant-table.c 					 entry->ref, page_to_pfn(entry->page));
page              389 drivers/xen/grant-table.c 				put_page(entry->page);
page              414 drivers/xen/grant-table.c 				struct page *page)
page              424 drivers/xen/grant-table.c 		entry->page = page;
page              436 drivers/xen/grant-table.c 	       what, ref, page ? page_to_pfn(page) : -1);
page              440 drivers/xen/grant-table.c 			       unsigned long page)
page              444 drivers/xen/grant-table.c 		if (page != 0)
page              445 drivers/xen/grant-table.c 			put_page(virt_to_page(page));
page              448 drivers/xen/grant-table.c 				    page ? virt_to_page(page) : NULL);
page              776 drivers/xen/grant-table.c int gnttab_pages_set_private(int nr_pages, struct page **pages)
page              802 drivers/xen/grant-table.c int gnttab_alloc_pages(int nr_pages, struct page **pages)
page              818 drivers/xen/grant-table.c void gnttab_pages_clear_private(int nr_pages, struct page **pages)
page              838 drivers/xen/grant-table.c void gnttab_free_pages(int nr_pages, struct page **pages)
page              873 drivers/xen/grant-table.c 		struct page *page = pfn_to_page(pfn);
page              875 drivers/xen/grant-table.c 		args->pages[i] = page;
page              876 drivers/xen/grant-table.c 		args->frames[i] = xen_page_to_gfn(page);
page              877 drivers/xen/grant-table.c 		xenmem_reservation_scrub_page(page);
page              984 drivers/xen/grant-table.c void gnttab_foreach_grant_in_range(struct page *page,
page              997 drivers/xen/grant-table.c 	xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(offset);
page             1010 drivers/xen/grant-table.c void gnttab_foreach_grant(struct page **pages,
page             1034 drivers/xen/grant-table.c 		    struct page **pages, unsigned int count)
page             1079 drivers/xen/grant-table.c 		      struct page **pages, unsigned int count)
page               30 drivers/xen/mem-reservation.c 					    struct page **pages,
page               36 drivers/xen/mem-reservation.c 		struct page *page = pages[i];
page               37 drivers/xen/mem-reservation.c 		unsigned long pfn = page_to_pfn(page);
page               39 drivers/xen/mem-reservation.c 		BUG_ON(!page);
page               50 drivers/xen/mem-reservation.c 		if (!PageHighMem(page)) {
page               64 drivers/xen/mem-reservation.c 					   struct page **pages)
page               69 drivers/xen/mem-reservation.c 		struct page *page = pages[i];
page               70 drivers/xen/mem-reservation.c 		unsigned long pfn = page_to_pfn(page);
page               78 drivers/xen/mem-reservation.c 		if (!PageHighMem(page)) {
page               34 drivers/xen/privcmd-buf.c 	struct page *pages[];
page               95 drivers/xen/privcmd.c 	struct page *p, *n;
page              124 drivers/xen/privcmd.c 			struct page *page = alloc_page(GFP_KERNEL);
page              127 drivers/xen/privcmd.c 			if (page == NULL)
page              130 drivers/xen/privcmd.c 			pagedata = page_address(page);
page              132 drivers/xen/privcmd.c 			list_add_tail(&page->lru, pagelist);
page              170 drivers/xen/privcmd.c 			struct page *page;
page              172 drivers/xen/privcmd.c 			page = list_entry(pos, struct page, lru);
page              173 drivers/xen/privcmd.c 			pagedata = page_address(page);
page              202 drivers/xen/privcmd.c 		struct page *page;
page              206 drivers/xen/privcmd.c 		page = list_entry(pos, struct page, lru);
page              207 drivers/xen/privcmd.c 		pagedata = page_address(page);
page              284 drivers/xen/privcmd.c 		struct page *page = list_first_entry(&pagelist,
page              285 drivers/xen/privcmd.c 						     struct page, lru);
page              286 drivers/xen/privcmd.c 		struct privcmd_mmap_entry *msg = page_address(page);
page              342 drivers/xen/privcmd.c 	struct page **pages = vma->vm_private_data;
page              343 drivers/xen/privcmd.c 	struct page **cur_pages = NULL;
page              424 drivers/xen/privcmd.c 	struct page **pages;
page              585 drivers/xen/privcmd.c 	struct page *pages[], unsigned int nr_pages)
page              612 drivers/xen/privcmd.c static void unlock_pages(struct page *pages[], unsigned int nr_pages)
page              631 drivers/xen/privcmd.c 	struct page **pages = NULL;
page              761 drivers/xen/privcmd.c 		struct page **pages;
page              895 drivers/xen/privcmd.c 	struct page **pages = vma->vm_private_data;
page              308 drivers/xen/pvcalls-back.c 	void *page;
page              319 drivers/xen/pvcalls-back.c 	ret = xenbus_map_ring_valloc(fedata->dev, &ref, 1, &page);
page              322 drivers/xen/pvcalls-back.c 	map->ring = page;
page              332 drivers/xen/pvcalls-back.c 				     (1 << map->ring_order), &page);
page              335 drivers/xen/pvcalls-back.c 	map->bytes = page;
page              364 drivers/xen/swiotlb-xen.c static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
page              369 drivers/xen/swiotlb-xen.c 	phys_addr_t map, phys = page_to_phys(page) + offset;
page              199 drivers/xen/xen-front-pgdir-shbuf.c #define xen_page_to_vaddr(page) \
page              200 drivers/xen/xen-front-pgdir-shbuf.c 	((uintptr_t)pfn_to_kaddr(page_to_xen_pfn(page)))
page              134 drivers/xen/xen-scsiback.c 	struct page *pages[VSCSI_MAX_GRANTS];
page              211 drivers/xen/xen-scsiback.c static void put_free_pages(struct page **page, int num)
page              220 drivers/xen/xen-scsiback.c 		gnttab_free_pages(n, page + num - n);
page              225 drivers/xen/xen-scsiback.c 		list_add(&page[i]->lru, &scsiback_free_pages);
page              230 drivers/xen/xen-scsiback.c static int get_free_page(struct page **page)
page              237 drivers/xen/xen-scsiback.c 		return gnttab_alloc_pages(1, page);
page              239 drivers/xen/xen-scsiback.c 	page[0] = list_first_entry(&scsiback_free_pages, struct page, lru);
page              240 drivers/xen/xen-scsiback.c 	list_del(&page[0]->lru);
page              246 drivers/xen/xen-scsiback.c static unsigned long vaddr_page(struct page *page)
page              248 drivers/xen/xen-scsiback.c 	unsigned long pfn = page_to_pfn(page);
page              272 drivers/xen/xen-scsiback.c 	struct page *pages[VSCSI_GRANT_BATCH];
page              418 drivers/xen/xen-scsiback.c 	struct page **pg, grant_handle_t *grant, int cnt)
page              441 drivers/xen/xen-scsiback.c 			struct scsiif_request_segment *seg, struct page **pg,
page              477 drivers/xen/xen-scsiback.c 	struct page **pg;
page             1456 drivers/xen/xen-scsiback.c 					     char *page)
page             1464 drivers/xen/xen-scsiback.c 	rb = snprintf(page, PAGE_SIZE, "%s\n", tpg->param_alias);
page             1471 drivers/xen/xen-scsiback.c 					      const char *page, size_t count)
page             1478 drivers/xen/xen-scsiback.c 	if (strlen(page) >= VSCSI_NAMELEN) {
page             1479 drivers/xen/xen-scsiback.c 		pr_err("param alias: %s, exceeds max: %d\n", page,
page             1485 drivers/xen/xen-scsiback.c 	len = snprintf(tpg->param_alias, VSCSI_NAMELEN, "%s", page);
page             1592 drivers/xen/xen-scsiback.c static ssize_t scsiback_tpg_nexus_show(struct config_item *item, char *page)
page             1606 drivers/xen/xen-scsiback.c 	ret = snprintf(page, PAGE_SIZE, "%s\n",
page             1614 drivers/xen/xen-scsiback.c 		const char *page, size_t count)
page             1625 drivers/xen/xen-scsiback.c 	if (!strncmp(page, "NULL", 4)) {
page             1634 drivers/xen/xen-scsiback.c 	if (strlen(page) >= VSCSI_NAMELEN) {
page             1636 drivers/xen/xen-scsiback.c 			page, VSCSI_NAMELEN);
page             1639 drivers/xen/xen-scsiback.c 	snprintf(&i_port[0], VSCSI_NAMELEN, "%s", page);
page             1696 drivers/xen/xen-scsiback.c scsiback_wwn_version_show(struct config_item *item, char *page)
page             1698 drivers/xen/xen-scsiback.c 	return sprintf(page, "xen-pvscsi fabric module %s on %s/%s on "
page             1878 drivers/xen/xen-scsiback.c 	struct page *page;
page             1881 drivers/xen/xen-scsiback.c 		if (get_free_page(&page))
page             1883 drivers/xen/xen-scsiback.c 		gnttab_free_pages(1, &page);
page               63 drivers/xen/xenbus/xenbus_client.c 			struct page *pages[XENBUS_MAX_RING_PAGES];
page              714 drivers/xen/xenbus/xenbus_probe.c 	unsigned long page = 0;
page              718 drivers/xen/xenbus/xenbus_probe.c 	page = get_zeroed_page(GFP_KERNEL);
page              719 drivers/xen/xenbus/xenbus_probe.c 	if (!page)
page              722 drivers/xen/xenbus/xenbus_probe.c 	xen_store_gfn = virt_to_gfn((void *)page);
page              739 drivers/xen/xenbus/xenbus_probe.c 	if (page != 0)
page              740 drivers/xen/xenbus/xenbus_probe.c 		free_page(page);
page               48 drivers/xen/xlate_mmu.c static void xen_for_each_gfn(struct page **pages, unsigned nr_gfn,
page               52 drivers/xen/xlate_mmu.c 	struct page *page;
page               57 drivers/xen/xlate_mmu.c 			page = pages[i / XEN_PFN_PER_PAGE];
page               58 drivers/xen/xlate_mmu.c 			xen_pfn = page_to_xen_pfn(page);
page               71 drivers/xen/xlate_mmu.c 	struct page **pages;
page               99 drivers/xen/xlate_mmu.c 	struct page *page = info->pages[info->index++];
page              100 drivers/xen/xlate_mmu.c 	pte_t pte = pte_mkspecial(pfn_pte(page_to_pfn(page), info->prot));
page              113 drivers/xen/xlate_mmu.c 	xen_for_each_gfn(&page, nr_gfn, setup_hparams, info);
page              148 drivers/xen/xlate_mmu.c 			      struct page **pages)
page              184 drivers/xen/xlate_mmu.c 			      int nr, struct page **pages)
page              217 drivers/xen/xlate_mmu.c 	struct page **pages;
page              268 drivers/xen/xlate_mmu.c 	struct page **pages;
page              276 drivers/xen/xlate_mmu.c 	struct page *page = r->pages[r->i];
page              277 drivers/xen/xlate_mmu.c 	pte_t pte = pte_mkspecial(pfn_pte(page_to_pfn(page), r->prot));
page              203 fs/9p/cache.c  int __v9fs_fscache_release_page(struct page *page, gfp_t gfp)
page              205 fs/9p/cache.c  	struct inode *inode = page->mapping->host;
page              210 fs/9p/cache.c  	return fscache_maybe_release_page(v9inode->fscache, page, gfp);
page              213 fs/9p/cache.c  void __v9fs_fscache_invalidate_page(struct page *page)
page              215 fs/9p/cache.c  	struct inode *inode = page->mapping->host;
page              220 fs/9p/cache.c  	if (PageFsCache(page)) {
page              221 fs/9p/cache.c  		fscache_wait_on_page_write(v9inode->fscache, page);
page              222 fs/9p/cache.c  		BUG_ON(!PageLocked(page));
page              223 fs/9p/cache.c  		fscache_uncache_page(v9inode->fscache, page);
page              227 fs/9p/cache.c  static void v9fs_vfs_readpage_complete(struct page *page, void *data,
page              231 fs/9p/cache.c  		SetPageUptodate(page);
page              233 fs/9p/cache.c  	unlock_page(page);
page              243 fs/9p/cache.c  int __v9fs_readpage_from_fscache(struct inode *inode, struct page *page)
page              248 fs/9p/cache.c  	p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
page              253 fs/9p/cache.c  					 page,
page              316 fs/9p/cache.c  void __v9fs_readpage_to_fscache(struct inode *inode, struct page *page)
page              321 fs/9p/cache.c  	p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
page              322 fs/9p/cache.c  	ret = fscache_write_page(v9inode->fscache, page,
page              326 fs/9p/cache.c  		v9fs_uncache_page(inode, page);
page              332 fs/9p/cache.c  void __v9fs_fscache_wait_on_page_write(struct inode *inode, struct page *page)
page              335 fs/9p/cache.c  	p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
page              336 fs/9p/cache.c  	if (PageFsCache(page))
page              337 fs/9p/cache.c  		fscache_wait_on_page_write(v9inode->fscache, page);
page               30 fs/9p/cache.h  extern int __v9fs_fscache_release_page(struct page *page, gfp_t gfp);
page               31 fs/9p/cache.h  extern void __v9fs_fscache_invalidate_page(struct page *page);
page               33 fs/9p/cache.h  					struct page *page);
page               38 fs/9p/cache.h  extern void __v9fs_readpage_to_fscache(struct inode *inode, struct page *page);
page               40 fs/9p/cache.h  					      struct page *page);
page               42 fs/9p/cache.h  static inline int v9fs_fscache_release_page(struct page *page,
page               45 fs/9p/cache.h  	return __v9fs_fscache_release_page(page, gfp);
page               48 fs/9p/cache.h  static inline void v9fs_fscache_invalidate_page(struct page *page)
page               50 fs/9p/cache.h  	__v9fs_fscache_invalidate_page(page);
page               54 fs/9p/cache.h  					     struct page *page)
page               56 fs/9p/cache.h  	return __v9fs_readpage_from_fscache(inode, page);
page               69 fs/9p/cache.h  					    struct page *page)
page               71 fs/9p/cache.h  	if (PageFsCache(page))
page               72 fs/9p/cache.h  		__v9fs_readpage_to_fscache(inode, page);
page               75 fs/9p/cache.h  static inline void v9fs_uncache_page(struct inode *inode, struct page *page)
page               78 fs/9p/cache.h  	fscache_uncache_page(v9inode->fscache, page);
page               79 fs/9p/cache.h  	BUG_ON(PageFsCache(page));
page               83 fs/9p/cache.h  						   struct page *page)
page               85 fs/9p/cache.h  	return __v9fs_fscache_wait_on_page_write(inode, page);
page              102 fs/9p/cache.h  static inline int v9fs_fscache_release_page(struct page *page,
page              107 fs/9p/cache.h  static inline void v9fs_fscache_invalidate_page(struct page *page) {}
page              110 fs/9p/cache.h  					     struct page *page)
page              124 fs/9p/cache.h  					    struct page *page)
page              127 fs/9p/cache.h  static inline void v9fs_uncache_page(struct inode *inode, struct page *page)
page              131 fs/9p/cache.h  						   struct page *page)
page               38 fs/9p/vfs_addr.c static int v9fs_fid_readpage(void *data, struct page *page)
page               41 fs/9p/vfs_addr.c 	struct inode *inode = page->mapping->host;
page               42 fs/9p/vfs_addr.c 	struct bio_vec bvec = {.bv_page = page, .bv_len = PAGE_SIZE};
page               48 fs/9p/vfs_addr.c 	BUG_ON(!PageLocked(page));
page               50 fs/9p/vfs_addr.c 	retval = v9fs_readpage_from_fscache(inode, page);
page               56 fs/9p/vfs_addr.c 	retval = p9_client_read(fid, page_offset(page), &to, &err);
page               58 fs/9p/vfs_addr.c 		v9fs_uncache_page(inode, page);
page               63 fs/9p/vfs_addr.c 	zero_user(page, retval, PAGE_SIZE - retval);
page               64 fs/9p/vfs_addr.c 	flush_dcache_page(page);
page               65 fs/9p/vfs_addr.c 	SetPageUptodate(page);
page               67 fs/9p/vfs_addr.c 	v9fs_readpage_to_fscache(inode, page);
page               71 fs/9p/vfs_addr.c 	unlock_page(page);
page               83 fs/9p/vfs_addr.c static int v9fs_vfs_readpage(struct file *filp, struct page *page)
page               85 fs/9p/vfs_addr.c 	return v9fs_fid_readpage(filp->private_data, page);
page              123 fs/9p/vfs_addr.c static int v9fs_release_page(struct page *page, gfp_t gfp)
page              125 fs/9p/vfs_addr.c 	if (PagePrivate(page))
page              127 fs/9p/vfs_addr.c 	return v9fs_fscache_release_page(page, gfp);
page              137 fs/9p/vfs_addr.c static void v9fs_invalidate_page(struct page *page, unsigned int offset,
page              145 fs/9p/vfs_addr.c 		v9fs_fscache_invalidate_page(page);
page              148 fs/9p/vfs_addr.c static int v9fs_vfs_writepage_locked(struct page *page)
page              150 fs/9p/vfs_addr.c 	struct inode *inode = page->mapping->host;
page              157 fs/9p/vfs_addr.c 	if (page->index == size >> PAGE_SHIFT)
page              162 fs/9p/vfs_addr.c 	bvec.bv_page = page;
page              170 fs/9p/vfs_addr.c 	set_page_writeback(page);
page              172 fs/9p/vfs_addr.c 	p9_client_write(v9inode->writeback_fid, page_offset(page), &from, &err);
page              174 fs/9p/vfs_addr.c 	end_page_writeback(page);
page              178 fs/9p/vfs_addr.c static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc)
page              182 fs/9p/vfs_addr.c 	p9_debug(P9_DEBUG_VFS, "page %p\n", page);
page              184 fs/9p/vfs_addr.c 	retval = v9fs_vfs_writepage_locked(page);
page              187 fs/9p/vfs_addr.c 			redirty_page_for_writepage(wbc, page);
page              190 fs/9p/vfs_addr.c 			SetPageError(page);
page              191 fs/9p/vfs_addr.c 			mapping_set_error(page->mapping, retval);
page              196 fs/9p/vfs_addr.c 	unlock_page(page);
page              205 fs/9p/vfs_addr.c static int v9fs_launder_page(struct page *page)
page              208 fs/9p/vfs_addr.c 	struct inode *inode = page->mapping->host;
page              210 fs/9p/vfs_addr.c 	v9fs_fscache_wait_on_page_write(inode, page);
page              211 fs/9p/vfs_addr.c 	if (clear_page_dirty_for_io(page)) {
page              212 fs/9p/vfs_addr.c 		retval = v9fs_vfs_writepage_locked(page);
page              257 fs/9p/vfs_addr.c 			    struct page **pagep, void **fsdata)
page              260 fs/9p/vfs_addr.c 	struct page *page;
page              270 fs/9p/vfs_addr.c 	page = grab_cache_page_write_begin(mapping, index, flags);
page              271 fs/9p/vfs_addr.c 	if (!page) {
page              276 fs/9p/vfs_addr.c 	if (PageUptodate(page))
page              282 fs/9p/vfs_addr.c 	retval = v9fs_fid_readpage(v9inode->writeback_fid, page);
page              283 fs/9p/vfs_addr.c 	put_page(page);
page              287 fs/9p/vfs_addr.c 	*pagep = page;
page              293 fs/9p/vfs_addr.c 			  struct page *page, void *fsdata)
page              296 fs/9p/vfs_addr.c 	struct inode *inode = page->mapping->host;
page              300 fs/9p/vfs_addr.c 	if (!PageUptodate(page)) {
page              305 fs/9p/vfs_addr.c 			SetPageUptodate(page);
page              316 fs/9p/vfs_addr.c 	set_page_dirty(page);
page              318 fs/9p/vfs_addr.c 	unlock_page(page);
page              319 fs/9p/vfs_addr.c 	put_page(page);
page              546 fs/9p/vfs_file.c 	struct page *page = vmf->page;
page              552 fs/9p/vfs_file.c 		 page, (unsigned long)filp->private_data);
page              559 fs/9p/vfs_file.c 	v9fs_fscache_wait_on_page_write(inode, page);
page              561 fs/9p/vfs_file.c 	lock_page(page);
page              562 fs/9p/vfs_file.c 	if (page->mapping != inode->i_mapping)
page              564 fs/9p/vfs_file.c 	wait_for_stable_page(page);
page              568 fs/9p/vfs_file.c 	unlock_page(page);
page               35 fs/adfs/inode.c static int adfs_writepage(struct page *page, struct writeback_control *wbc)
page               37 fs/adfs/inode.c 	return block_write_full_page(page, adfs_get_block, wbc);
page               40 fs/adfs/inode.c static int adfs_readpage(struct file *file, struct page *page)
page               42 fs/adfs/inode.c 	return block_read_full_page(page, adfs_get_block);
page               55 fs/adfs/inode.c 			struct page **pagep, void **fsdata)
page              372 fs/affs/file.c static int affs_writepage(struct page *page, struct writeback_control *wbc)
page              374 fs/affs/file.c 	return block_write_full_page(page, affs_get_block, wbc);
page              377 fs/affs/file.c static int affs_readpage(struct file *file, struct page *page)
page              379 fs/affs/file.c 	return block_read_full_page(page, affs_get_block);
page              417 fs/affs/file.c 			struct page **pagep, void **fsdata)
page              503 fs/affs/file.c affs_do_readpage_ofs(struct page *page, unsigned to, int create)
page              505 fs/affs/file.c 	struct inode *inode = page->mapping->host;
page              514 fs/affs/file.c 		 page->index, to);
page              517 fs/affs/file.c 	tmp = page->index << PAGE_SHIFT;
page              527 fs/affs/file.c 		data = kmap_atomic(page);
page              535 fs/affs/file.c 	flush_dcache_page(page);
page              611 fs/affs/file.c affs_readpage_ofs(struct file *file, struct page *page)
page              613 fs/affs/file.c 	struct inode *inode = page->mapping->host;
page              617 fs/affs/file.c 	pr_debug("%s(%lu, %ld)\n", __func__, inode->i_ino, page->index);
page              619 fs/affs/file.c 	if (((page->index + 1) << PAGE_SHIFT) > inode->i_size) {
page              621 fs/affs/file.c 		memset(page_address(page) + to, 0, PAGE_SIZE - to);
page              624 fs/affs/file.c 	err = affs_do_readpage_ofs(page, to, 0);
page              626 fs/affs/file.c 		SetPageUptodate(page);
page              627 fs/affs/file.c 	unlock_page(page);
page              633 fs/affs/file.c 				struct page **pagep, void **fsdata)
page              636 fs/affs/file.c 	struct page *page;
page              652 fs/affs/file.c 	page = grab_cache_page_write_begin(mapping, index, flags);
page              653 fs/affs/file.c 	if (!page)
page              655 fs/affs/file.c 	*pagep = page;
page              657 fs/affs/file.c 	if (PageUptodate(page))
page              661 fs/affs/file.c 	err = affs_do_readpage_ofs(page, PAGE_SIZE, 1);
page              663 fs/affs/file.c 		unlock_page(page);
page              664 fs/affs/file.c 		put_page(page);
page              671 fs/affs/file.c 				struct page *page, void *fsdata)
page              693 fs/affs/file.c 	data = page_address(page);
page              697 fs/affs/file.c 	tmp = (page->index << PAGE_SHIFT) + from;
page              789 fs/affs/file.c 	SetPageUptodate(page);
page              793 fs/affs/file.c 	tmp = (page->index << PAGE_SHIFT) + from;
page              798 fs/affs/file.c 	unlock_page(page);
page              799 fs/affs/file.c 	put_page(page);
page              856 fs/affs/file.c 		struct page *page;
page              861 fs/affs/file.c 		res = mapping->a_ops->write_begin(NULL, mapping, isize, 0, 0, &page, &fsdata);
page              863 fs/affs/file.c 			res = mapping->a_ops->write_end(NULL, mapping, isize, 0, 0, page, fsdata);
page               14 fs/affs/symlink.c static int affs_symlink_readpage(struct file *file, struct page *page)
page               17 fs/affs/symlink.c 	struct inode *inode = page->mapping->host;
page               18 fs/affs/symlink.c 	char *link = page_address(page);
page               60 fs/affs/symlink.c 	SetPageUptodate(page);
page               61 fs/affs/symlink.c 	unlock_page(page);
page               64 fs/affs/symlink.c 	SetPageError(page);
page               65 fs/affs/symlink.c 	unlock_page(page);
page               43 fs/afs/dir.c   static int afs_dir_releasepage(struct page *page, gfp_t gfp_flags);
page               44 fs/afs/dir.c   static void afs_dir_invalidatepage(struct page *page, unsigned int offset,
page               47 fs/afs/dir.c   static int afs_dir_set_page_dirty(struct page *page)
page              110 fs/afs/dir.c   static bool afs_dir_check_page(struct afs_vnode *dvnode, struct page *page,
page              120 fs/afs/dir.c   	off = page_offset(page);
page              132 fs/afs/dir.c   	dbuf = kmap(page);
page              139 fs/afs/dir.c   			kunmap(page);
page              151 fs/afs/dir.c   	kunmap(page);
page              238 fs/afs/dir.c   	if (nr_inline > (PAGE_SIZE - sizeof(*req)) / sizeof(struct page *))
page              253 fs/afs/dir.c   		req->pages = kcalloc(nr_pages, sizeof(struct page *),
page              449 fs/afs/dir.c   	struct page *page;
page              477 fs/afs/dir.c   		page = req->pages[blkoff / PAGE_SIZE];
page              478 fs/afs/dir.c   		if (!page) {
page              482 fs/afs/dir.c   		mark_page_accessed(page);
page              486 fs/afs/dir.c   		dbuf = kmap(page);
page              494 fs/afs/dir.c   				kunmap(page);
page              502 fs/afs/dir.c   		kunmap(page);
page             2017 fs/afs/dir.c   static int afs_dir_releasepage(struct page *page, gfp_t gfp_flags)
page             2019 fs/afs/dir.c   	struct afs_vnode *dvnode = AFS_FS_I(page->mapping->host);
page             2021 fs/afs/dir.c   	_enter("{{%llx:%llu}[%lu]}", dvnode->fid.vid, dvnode->fid.vnode, page->index);
page             2023 fs/afs/dir.c   	set_page_private(page, 0);
page             2024 fs/afs/dir.c   	ClearPagePrivate(page);
page             2037 fs/afs/dir.c   static void afs_dir_invalidatepage(struct page *page, unsigned int offset,
page             2040 fs/afs/dir.c   	struct afs_vnode *dvnode = AFS_FS_I(page->mapping->host);
page             2042 fs/afs/dir.c   	_enter("{%lu},%u,%u", page->index, offset, length);
page             2044 fs/afs/dir.c   	BUG_ON(!PageLocked(page));
page             2052 fs/afs/dir.c   		set_page_private(page, 0);
page             2053 fs/afs/dir.c   		ClearPagePrivate(page);
page              193 fs/afs/dir_edit.c 	struct page *page0, *page;
page              236 fs/afs/dir_edit.c 			page = page0;
page              242 fs/afs/dir_edit.c 			page = find_or_create_page(vnode->vfs_inode.i_mapping,
page              244 fs/afs/dir_edit.c 			if (!page)
page              246 fs/afs/dir_edit.c 			if (!PagePrivate(page)) {
page              247 fs/afs/dir_edit.c 				set_page_private(page, 1);
page              248 fs/afs/dir_edit.c 				SetPagePrivate(page);
page              250 fs/afs/dir_edit.c 			dir_page = kmap(page);
page              285 fs/afs/dir_edit.c 		if (page != page0) {
page              286 fs/afs/dir_edit.c 			unlock_page(page);
page              287 fs/afs/dir_edit.c 			kunmap(page);
page              288 fs/afs/dir_edit.c 			put_page(page);
page              304 fs/afs/dir_edit.c 	page = page0;
page              324 fs/afs/dir_edit.c 	if (page != page0) {
page              325 fs/afs/dir_edit.c 		unlock_page(page);
page              326 fs/afs/dir_edit.c 		kunmap(page);
page              327 fs/afs/dir_edit.c 		put_page(page);
page              348 fs/afs/dir_edit.c 	if (page != page0) {
page              349 fs/afs/dir_edit.c 		kunmap(page);
page              350 fs/afs/dir_edit.c 		put_page(page);
page              373 fs/afs/dir_edit.c 	struct page *page0, *page;
page              410 fs/afs/dir_edit.c 			page = find_lock_page(vnode->vfs_inode.i_mapping, index);
page              411 fs/afs/dir_edit.c 			if (!page)
page              413 fs/afs/dir_edit.c 			dir_page = kmap(page);
page              415 fs/afs/dir_edit.c 			page = page0;
page              432 fs/afs/dir_edit.c 		if (page != page0) {
page              433 fs/afs/dir_edit.c 			unlock_page(page);
page              434 fs/afs/dir_edit.c 			kunmap(page);
page              435 fs/afs/dir_edit.c 			put_page(page);
page              456 fs/afs/dir_edit.c 	if (page != page0) {
page              457 fs/afs/dir_edit.c 		unlock_page(page);
page              458 fs/afs/dir_edit.c 		kunmap(page);
page              459 fs/afs/dir_edit.c 		put_page(page);
page              481 fs/afs/dir_edit.c 	if (page != page0) {
page              482 fs/afs/dir_edit.c 		unlock_page(page);
page              483 fs/afs/dir_edit.c 		kunmap(page);
page              484 fs/afs/dir_edit.c 		put_page(page);
page               20 fs/afs/file.c  static int afs_readpage(struct file *file, struct page *page);
page               21 fs/afs/file.c  static void afs_invalidatepage(struct page *page, unsigned int offset,
page               23 fs/afs/file.c  static int afs_releasepage(struct page *page, gfp_t gfp_flags);
page              209 fs/afs/file.c  static void afs_file_readpage_read_complete(struct page *page,
page              213 fs/afs/file.c  	_enter("%p,%p,%d", page, data, error);
page              218 fs/afs/file.c  		SetPageUptodate(page);
page              219 fs/afs/file.c  	unlock_page(page);
page              272 fs/afs/file.c  int afs_page_filler(void *data, struct page *page)
page              274 fs/afs/file.c  	struct inode *inode = page->mapping->host;
page              280 fs/afs/file.c  	_enter("{%x},{%lu},{%lu}", key_serial(key), inode->i_ino, page->index);
page              282 fs/afs/file.c  	BUG_ON(!PageLocked(page));
page              291 fs/afs/file.c  					 page,
page              324 fs/afs/file.c  		req->pos = (loff_t)page->index << PAGE_SHIFT;
page              328 fs/afs/file.c  		req->pages[0] = page;
page              329 fs/afs/file.c  		get_page(page);
page              345 fs/afs/file.c  			fscache_uncache_page(vnode->cache, page);
page              347 fs/afs/file.c  			BUG_ON(PageFsCache(page));
page              357 fs/afs/file.c  		SetPageUptodate(page);
page              361 fs/afs/file.c  		if (PageFsCache(page) &&
page              362 fs/afs/file.c  		    fscache_write_page(vnode->cache, page, vnode->status.size,
page              364 fs/afs/file.c  			fscache_uncache_page(vnode->cache, page);
page              365 fs/afs/file.c  			BUG_ON(PageFsCache(page));
page              368 fs/afs/file.c  		unlock_page(page);
page              375 fs/afs/file.c  	SetPageError(page);
page              380 fs/afs/file.c  	unlock_page(page);
page              389 fs/afs/file.c  static int afs_readpage(struct file *file, struct page *page)
page              397 fs/afs/file.c  		ret = afs_page_filler(key, page);
page              399 fs/afs/file.c  		struct inode *inode = page->mapping->host;
page              404 fs/afs/file.c  			ret = afs_page_filler(key, page);
page              419 fs/afs/file.c  	struct page *page = req->pages[req->index];
page              422 fs/afs/file.c  	SetPageUptodate(page);
page              426 fs/afs/file.c  	if (PageFsCache(page) &&
page              427 fs/afs/file.c  	    fscache_write_page(vnode->cache, page, vnode->status.size,
page              429 fs/afs/file.c  		fscache_uncache_page(vnode->cache, page);
page              430 fs/afs/file.c  		BUG_ON(PageFsCache(page));
page              433 fs/afs/file.c  	unlock_page(page);
page              434 fs/afs/file.c  	put_page(page);
page              446 fs/afs/file.c  	struct page *first, *page;
page              458 fs/afs/file.c  		page = list_entry(p, struct page, lru);
page              459 fs/afs/file.c  		if (page->index != index)
page              486 fs/afs/file.c  		page = lru_to_page(pages);
page              487 fs/afs/file.c  		list_del(&page->lru);
page              488 fs/afs/file.c  		index = page->index;
page              489 fs/afs/file.c  		if (add_to_page_cache_lru(page, mapping, index,
page              492 fs/afs/file.c  			fscache_uncache_page(vnode->cache, page);
page              494 fs/afs/file.c  			put_page(page);
page              498 fs/afs/file.c  		req->pages[req->nr_pages++] = page;
page              524 fs/afs/file.c  		page = req->pages[i];
page              525 fs/afs/file.c  		if (page) {
page              527 fs/afs/file.c  			fscache_uncache_page(vnode->cache, page);
page              529 fs/afs/file.c  			SetPageError(page);
page              530 fs/afs/file.c  			unlock_page(page);
page              606 fs/afs/file.c  static void afs_invalidatepage(struct page *page, unsigned int offset,
page              609 fs/afs/file.c  	struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
page              612 fs/afs/file.c  	_enter("{%lu},%u,%u", page->index, offset, length);
page              614 fs/afs/file.c  	BUG_ON(!PageLocked(page));
page              619 fs/afs/file.c  		if (PageFsCache(page)) {
page              620 fs/afs/file.c  			struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
page              621 fs/afs/file.c  			fscache_wait_on_page_write(vnode->cache, page);
page              622 fs/afs/file.c  			fscache_uncache_page(vnode->cache, page);
page              626 fs/afs/file.c  		if (PagePrivate(page)) {
page              627 fs/afs/file.c  			priv = page_private(page);
page              629 fs/afs/file.c  					     page->index, priv);
page              630 fs/afs/file.c  			set_page_private(page, 0);
page              631 fs/afs/file.c  			ClearPagePrivate(page);
page              642 fs/afs/file.c  static int afs_releasepage(struct page *page, gfp_t gfp_flags)
page              644 fs/afs/file.c  	struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
page              648 fs/afs/file.c  	       vnode->fid.vid, vnode->fid.vnode, page->index, page->flags,
page              654 fs/afs/file.c  	if (!fscache_maybe_release_page(vnode->cache, page, gfp_flags)) {
page              660 fs/afs/file.c  	if (PagePrivate(page)) {
page              661 fs/afs/file.c  		priv = page_private(page);
page              663 fs/afs/file.c  				     page->index, priv);
page              664 fs/afs/file.c  		set_page_private(page, 0);
page              665 fs/afs/file.c  		ClearPagePrivate(page);
page              237 fs/afs/internal.h 	struct page		**pages;
page              238 fs/afs/internal.h 	struct page		*array[];
page              934 fs/afs/internal.h extern int afs_page_filler(void *, struct page *);
page             1337 fs/afs/internal.h extern int afs_set_page_dirty(struct page *);
page             1340 fs/afs/internal.h 			struct page **pagep, void **fsdata);
page             1343 fs/afs/internal.h 			struct page *page, void *fsdata);
page             1344 fs/afs/internal.h extern int afs_writepage(struct page *, struct writeback_control *);
page             1350 fs/afs/internal.h extern int afs_launder_page(struct page *);
page              122 fs/afs/mntpt.c 		struct page *page;
page              132 fs/afs/mntpt.c 		page = read_mapping_page(d_inode(mntpt)->i_mapping, 0, NULL);
page              133 fs/afs/mntpt.c 		if (IS_ERR(page))
page              134 fs/afs/mntpt.c 			return PTR_ERR(page);
page              136 fs/afs/mntpt.c 		if (PageError(page)) {
page              138 fs/afs/mntpt.c 			put_page(page);
page              142 fs/afs/mntpt.c 		buf = kmap(page);
page              146 fs/afs/mntpt.c 		kunmap(page);
page              147 fs/afs/mntpt.c 		put_page(page);
page              286 fs/afs/rxrpc.c 	struct page *pages[AFS_BVEC_MAX];
page               19 fs/afs/write.c int afs_set_page_dirty(struct page *page)
page               22 fs/afs/write.c 	return __set_page_dirty_nobuffers(page);
page               29 fs/afs/write.c 			 loff_t pos, unsigned int len, struct page *page)
page               41 fs/afs/write.c 		data = kmap(page);
page               43 fs/afs/write.c 		kunmap(page);
page               56 fs/afs/write.c 	req->pages[0] = page;
page               57 fs/afs/write.c 	get_page(page);
page               79 fs/afs/write.c 		    struct page **pagep, void **fsdata)
page               82 fs/afs/write.c 	struct page *page;
page               96 fs/afs/write.c 	BUILD_BUG_ON(PAGE_SIZE > 32768 && sizeof(page->private) < 8);
page               98 fs/afs/write.c 	page = grab_cache_page_write_begin(mapping, index, flags);
page               99 fs/afs/write.c 	if (!page)
page              102 fs/afs/write.c 	if (!PageUptodate(page) && len != PAGE_SIZE) {
page              103 fs/afs/write.c 		ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page);
page              105 fs/afs/write.c 			unlock_page(page);
page              106 fs/afs/write.c 			put_page(page);
page              110 fs/afs/write.c 		SetPageUptodate(page);
page              114 fs/afs/write.c 	*pagep = page;
page              121 fs/afs/write.c 	if (PagePrivate(page)) {
page              122 fs/afs/write.c 		priv = page_private(page);
page              129 fs/afs/write.c 		if (PageWriteback(page)) {
page              131 fs/afs/write.c 					     page->index, priv);
page              153 fs/afs/write.c 			     page->index, priv);
page              154 fs/afs/write.c 	SetPagePrivate(page);
page              155 fs/afs/write.c 	set_page_private(page, priv);
page              164 fs/afs/write.c 	ret = write_one_page(page);
page              170 fs/afs/write.c 	ret = lock_page_killable(page);
page              183 fs/afs/write.c 		  struct page *page, void *fsdata)
page              191 fs/afs/write.c 	       vnode->fid.vid, vnode->fid.vnode, page->index);
page              204 fs/afs/write.c 	if (!PageUptodate(page)) {
page              211 fs/afs/write.c 					    len - copied, page);
page              215 fs/afs/write.c 		SetPageUptodate(page);
page              218 fs/afs/write.c 	set_page_dirty(page);
page              219 fs/afs/write.c 	if (PageDirty(page))
page              224 fs/afs/write.c 	unlock_page(page);
page              225 fs/afs/write.c 	put_page(page);
page              254 fs/afs/write.c 			struct page *page = pv.pages[loop];
page              255 fs/afs/write.c 			ClearPageUptodate(page);
page              256 fs/afs/write.c 			SetPageError(page);
page              257 fs/afs/write.c 			end_page_writeback(page);
page              258 fs/afs/write.c 			if (page->index >= first)
page              259 fs/afs/write.c 				first = page->index + 1;
page              260 fs/afs/write.c 			lock_page(page);
page              261 fs/afs/write.c 			generic_error_remove_page(mapping, page);
page              262 fs/afs/write.c 			unlock_page(page);
page              297 fs/afs/write.c 			struct page *page = pv.pages[loop];
page              299 fs/afs/write.c 			redirty_page_for_writepage(wbc, page);
page              300 fs/afs/write.c 			end_page_writeback(page);
page              301 fs/afs/write.c 			if (page->index >= first)
page              302 fs/afs/write.c 				first = page->index + 1;
page              453 fs/afs/write.c 					   struct page *primary_page,
page              457 fs/afs/write.c 	struct page *pages[8], *page;
page              508 fs/afs/write.c 			page = pages[loop];
page              512 fs/afs/write.c 			if (page->index > final_page)
page              514 fs/afs/write.c 			if (!trylock_page(page))
page              516 fs/afs/write.c 			if (!PageDirty(page) || PageWriteback(page)) {
page              517 fs/afs/write.c 				unlock_page(page);
page              521 fs/afs/write.c 			priv = page_private(page);
page              526 fs/afs/write.c 				unlock_page(page);
page              532 fs/afs/write.c 					     page->index, priv);
page              534 fs/afs/write.c 			if (!clear_page_dirty_for_io(page))
page              536 fs/afs/write.c 			if (test_set_page_writeback(page))
page              538 fs/afs/write.c 			unlock_page(page);
page              539 fs/afs/write.c 			put_page(page);
page              609 fs/afs/write.c int afs_writepage(struct page *page, struct writeback_control *wbc)
page              613 fs/afs/write.c 	_enter("{%lx},", page->index);
page              615 fs/afs/write.c 	ret = afs_write_back_from_locked_page(page->mapping, wbc, page,
page              635 fs/afs/write.c 	struct page *page;
page              642 fs/afs/write.c 					PAGECACHE_TAG_DIRTY, 1, &page);
page              646 fs/afs/write.c 		_debug("wback %lx", page->index);
page              654 fs/afs/write.c 		ret = lock_page_killable(page);
page              656 fs/afs/write.c 			put_page(page);
page              661 fs/afs/write.c 		if (page->mapping != mapping || !PageDirty(page)) {
page              662 fs/afs/write.c 			unlock_page(page);
page              663 fs/afs/write.c 			put_page(page);
page              667 fs/afs/write.c 		if (PageWriteback(page)) {
page              668 fs/afs/write.c 			unlock_page(page);
page              670 fs/afs/write.c 				wait_on_page_writeback(page);
page              671 fs/afs/write.c 			put_page(page);
page              675 fs/afs/write.c 		if (!clear_page_dirty_for_io(page))
page              677 fs/afs/write.c 		ret = afs_write_back_from_locked_page(mapping, wbc, page, end);
page              678 fs/afs/write.c 		put_page(page);
page              784 fs/afs/write.c 	       vnode->fid.vid, vnode->fid.vnode, vmf->page->index);
page              792 fs/afs/write.c 	fscache_wait_on_page_write(vnode->cache, vmf->page);
page              795 fs/afs/write.c 	if (PageWriteback(vmf->page) &&
page              796 fs/afs/write.c 	    wait_on_page_bit_killable(vmf->page, PG_writeback) < 0)
page              799 fs/afs/write.c 	if (lock_page_killable(vmf->page) < 0)
page              806 fs/afs/write.c 	wait_on_page_writeback(vmf->page);
page              811 fs/afs/write.c 			     vmf->page->index, priv);
page              812 fs/afs/write.c 	SetPagePrivate(vmf->page);
page              813 fs/afs/write.c 	set_page_private(vmf->page, priv);
page              850 fs/afs/write.c int afs_launder_page(struct page *page)
page              852 fs/afs/write.c 	struct address_space *mapping = page->mapping;
page              858 fs/afs/write.c 	_enter("{%lx}", page->index);
page              860 fs/afs/write.c 	priv = page_private(page);
page              861 fs/afs/write.c 	if (clear_page_dirty_for_io(page)) {
page              864 fs/afs/write.c 		if (PagePrivate(page)) {
page              870 fs/afs/write.c 				     page->index, priv);
page              871 fs/afs/write.c 		ret = afs_store_data(mapping, page->index, page->index, t, f);
page              875 fs/afs/write.c 			     page->index, priv);
page              876 fs/afs/write.c 	set_page_private(page, 0);
page              877 fs/afs/write.c 	ClearPagePrivate(page);
page              880 fs/afs/write.c 	if (PageFsCache(page)) {
page              881 fs/afs/write.c 		fscache_wait_on_page_write(vnode->cache, page);
page              882 fs/afs/write.c 		fscache_uncache_page(vnode->cache, page);
page              127 fs/aio.c       	struct page		**ring_pages;
page              165 fs/aio.c       	struct page		*internal_pages[AIO_RING_PAGES];
page              312 fs/aio.c       		struct page *page;
page              315 fs/aio.c       		page = ctx->ring_pages[i];
page              316 fs/aio.c       		if (!page)
page              319 fs/aio.c       		put_page(page);
page              377 fs/aio.c       static int aio_migratepage(struct address_space *mapping, struct page *new,
page              378 fs/aio.c       			struct page *old, enum migrate_mode mode)
page              492 fs/aio.c       		ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *),
page              501 fs/aio.c       		struct page *page;
page              502 fs/aio.c       		page = find_or_create_page(file->f_mapping,
page              504 fs/aio.c       		if (!page)
page              507 fs/aio.c       			 current->pid, i, page_count(page));
page              508 fs/aio.c       		SetPageUptodate(page);
page              509 fs/aio.c       		unlock_page(page);
page              511 fs/aio.c       		ctx->ring_pages[i] = page;
page             1214 fs/aio.c       		struct page *page;
page             1221 fs/aio.c       		page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE];
page             1227 fs/aio.c       		ev = kmap(page);
page             1230 fs/aio.c       		kunmap(page);
page               42 fs/befs/linuxvfs.c static int befs_readpage(struct file *file, struct page *page);
page               50 fs/befs/linuxvfs.c static int befs_symlink_readpage(struct file *, struct page *);
page              111 fs/befs/linuxvfs.c befs_readpage(struct file *file, struct page *page)
page              113 fs/befs/linuxvfs.c 	return block_read_full_page(page, befs_get_block);
page              470 fs/befs/linuxvfs.c static int befs_symlink_readpage(struct file *unused, struct page *page)
page              472 fs/befs/linuxvfs.c 	struct inode *inode = page->mapping->host;
page              477 fs/befs/linuxvfs.c 	char *link = page_address(page);
page              490 fs/befs/linuxvfs.c 	SetPageUptodate(page);
page              491 fs/befs/linuxvfs.c 	unlock_page(page);
page              494 fs/befs/linuxvfs.c 	SetPageError(page);
page              495 fs/befs/linuxvfs.c 	unlock_page(page);
page              153 fs/bfs/file.c  static int bfs_writepage(struct page *page, struct writeback_control *wbc)
page              155 fs/bfs/file.c  	return block_write_full_page(page, bfs_get_block, wbc);
page              158 fs/bfs/file.c  static int bfs_readpage(struct file *file, struct page *page)
page              160 fs/bfs/file.c  	return block_read_full_page(page, bfs_get_block);
page              173 fs/bfs/file.c  			struct page **pagep, void **fsdata)
page             2346 fs/binfmt_elf.c 			struct page *page;
page             2349 fs/binfmt_elf.c 			page = get_dump_page(addr);
page             2350 fs/binfmt_elf.c 			if (page) {
page             2351 fs/binfmt_elf.c 				void *kaddr = kmap(page);
page             2353 fs/binfmt_elf.c 				kunmap(page);
page             2354 fs/binfmt_elf.c 				put_page(page);
page             1509 fs/binfmt_elf_fdpic.c 			struct page *page = get_dump_page(addr);
page             1510 fs/binfmt_elf_fdpic.c 			if (page) {
page             1511 fs/binfmt_elf_fdpic.c 				void *kaddr = kmap(page);
page             1513 fs/binfmt_elf_fdpic.c 				kunmap(page);
page             1514 fs/binfmt_elf_fdpic.c 				put_page(page);
page              549 fs/binfmt_misc.c static void entry_status(Node *e, char *page)
page              551 fs/binfmt_misc.c 	char *dp = page;
page              558 fs/binfmt_misc.c 		sprintf(page, "%s\n", status);
page              636 fs/binfmt_misc.c 	char *page;
page              638 fs/binfmt_misc.c 	page = (char *) __get_free_page(GFP_KERNEL);
page              639 fs/binfmt_misc.c 	if (!page)
page              642 fs/binfmt_misc.c 	entry_status(e, page);
page              644 fs/binfmt_misc.c 	res = simple_read_from_buffer(buf, nbytes, ppos, page, strlen(page));
page              646 fs/binfmt_misc.c 	free_page((unsigned long) page);
page              608 fs/block_dev.c static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
page              610 fs/block_dev.c 	return block_write_full_page(page, blkdev_get_block, wbc);
page              613 fs/block_dev.c static int blkdev_readpage(struct file * file, struct page * page)
page              615 fs/block_dev.c 	return block_read_full_page(page, blkdev_get_block);
page              626 fs/block_dev.c 			struct page **pagep, void **fsdata)
page              634 fs/block_dev.c 			struct page *page, void *fsdata)
page              637 fs/block_dev.c 	ret = block_write_end(file, mapping, pos, len, copied, page, fsdata);
page              639 fs/block_dev.c 	unlock_page(page);
page              640 fs/block_dev.c 	put_page(page);
page              701 fs/block_dev.c 			struct page *page)
page              712 fs/block_dev.c 	result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
page              739 fs/block_dev.c 			struct page *page, struct writeback_control *wbc)
page              750 fs/block_dev.c 	set_page_writeback(page);
page              751 fs/block_dev.c 	result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
page              754 fs/block_dev.c 		end_page_writeback(page);
page              756 fs/block_dev.c 		clean_page_buffers(page);
page              757 fs/block_dev.c 		unlock_page(page);
page             2023 fs/block_dev.c static int blkdev_releasepage(struct page *page, gfp_t wait)
page             2025 fs/block_dev.c 	struct super_block *super = BDEV_I(page->mapping->host)->bdev.bd_super;
page             2028 fs/block_dev.c 		return super->s_op->bdev_try_to_free_page(super, page, wait);
page             2030 fs/block_dev.c 	return try_to_free_buffers(page);
page              214 fs/btrfs/check-integrity.c 	struct page **pagev;
page             1616 fs/btrfs/check-integrity.c 	block_ctx->pagev = (struct page **)(block_ctx->datav + num_pages);
page               82 fs/btrfs/compression.c 	struct page *page;
page               94 fs/btrfs/compression.c 		page = cb->compressed_pages[i];
page               97 fs/btrfs/compression.c 		kaddr = kmap_atomic(page);
page              130 fs/btrfs/compression.c 	struct page *page;
page              177 fs/btrfs/compression.c 		page = cb->compressed_pages[index];
page              178 fs/btrfs/compression.c 		page->mapping = NULL;
page              179 fs/btrfs/compression.c 		put_page(page);
page              216 fs/btrfs/compression.c 	struct page *pages[16];
page              257 fs/btrfs/compression.c 	struct page *page;
page              288 fs/btrfs/compression.c 		page = cb->compressed_pages[index];
page              289 fs/btrfs/compression.c 		page->mapping = NULL;
page              290 fs/btrfs/compression.c 		put_page(page);
page              312 fs/btrfs/compression.c 				 struct page **compressed_pages,
page              321 fs/btrfs/compression.c 	struct page *page;
page              356 fs/btrfs/compression.c 		page = compressed_pages[pg_index];
page              357 fs/btrfs/compression.c 		page->mapping = inode->i_mapping;
page              359 fs/btrfs/compression.c 			submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE, bio,
page              362 fs/btrfs/compression.c 		page->mapping = NULL;
page              363 fs/btrfs/compression.c 		if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) <
page              392 fs/btrfs/compression.c 			bio_add_page(bio, page, PAGE_SIZE, 0);
page              437 fs/btrfs/compression.c 	struct page *page;
page              461 fs/btrfs/compression.c 		page = xa_load(&mapping->i_pages, pg_index);
page              462 fs/btrfs/compression.c 		if (page && !xa_is_value(page)) {
page              469 fs/btrfs/compression.c 		page = __page_cache_alloc(mapping_gfp_constraint(mapping,
page              471 fs/btrfs/compression.c 		if (!page)
page              474 fs/btrfs/compression.c 		if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
page              475 fs/btrfs/compression.c 			put_page(page);
page              485 fs/btrfs/compression.c 		set_page_extent_mapped(page);
page              497 fs/btrfs/compression.c 			unlock_page(page);
page              498 fs/btrfs/compression.c 			put_page(page);
page              503 fs/btrfs/compression.c 		if (page->index == end_index) {
page              510 fs/btrfs/compression.c 				userpage = kmap_atomic(page);
page              512 fs/btrfs/compression.c 				flush_dcache_page(page);
page              517 fs/btrfs/compression.c 		ret = bio_add_page(cb->orig_bio, page,
page              522 fs/btrfs/compression.c 			put_page(page);
page              525 fs/btrfs/compression.c 			unlock_page(page);
page              526 fs/btrfs/compression.c 			put_page(page);
page              555 fs/btrfs/compression.c 	struct page *page;
page              602 fs/btrfs/compression.c 	cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
page              636 fs/btrfs/compression.c 		page = cb->compressed_pages[pg_index];
page              637 fs/btrfs/compression.c 		page->mapping = inode->i_mapping;
page              638 fs/btrfs/compression.c 		page->index = em_start >> PAGE_SHIFT;
page              641 fs/btrfs/compression.c 			submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE,
page              644 fs/btrfs/compression.c 		page->mapping = NULL;
page              645 fs/btrfs/compression.c 		if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
page              683 fs/btrfs/compression.c 			bio_add_page(comp_bio, page, PAGE_SIZE, 0);
page             1033 fs/btrfs/compression.c 			 u64 start, struct page **pages,
page             1085 fs/btrfs/compression.c int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
page             1458 fs/btrfs/compression.c 	struct page *page;
page             1484 fs/btrfs/compression.c 		page = find_get_page(inode->i_mapping, index);
page             1485 fs/btrfs/compression.c 		in_data = kmap(page);
page             1498 fs/btrfs/compression.c 		kunmap(page);
page             1499 fs/btrfs/compression.c 		put_page(page);
page               33 fs/btrfs/compression.h 	struct page **compressed_pages;
page               81 fs/btrfs/compression.h 			 u64 start, struct page **pages,
page               85 fs/btrfs/compression.h int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
page               94 fs/btrfs/compression.h 				  struct page **compressed_pages,
page              145 fs/btrfs/compression.h 			      struct page **pages,
page              155 fs/btrfs/compression.h 			  struct page *dest_page,
page             2857 fs/btrfs/ctree.h int btrfs_bio_fits_in_stripe(struct page *page, size_t size, struct bio *bio,
page             2861 fs/btrfs/ctree.h int btrfs_readpage(struct file *file, struct page *page);
page             2876 fs/btrfs/ctree.h 				    struct page *page, size_t pg_offset,
page             2897 fs/btrfs/ctree.h int btrfs_run_delalloc_range(struct inode *inode, struct page *locked_page,
page             2900 fs/btrfs/ctree.h int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end);
page             2901 fs/btrfs/ctree.h void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start,
page             2947 fs/btrfs/ctree.h int btrfs_dirty_pages(struct inode *inode, struct page **pages,
page              205 fs/btrfs/disk-io.c 		struct page *page, size_t pg_offset, u64 start, u64 len,
page              513 fs/btrfs/disk-io.c static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page)
page              515 fs/btrfs/disk-io.c 	u64 start = page_offset(page);
page              522 fs/btrfs/disk-io.c 	eb = (struct extent_buffer *)page->private;
page              523 fs/btrfs/disk-io.c 	if (page != eb->pages[0])
page              533 fs/btrfs/disk-io.c 	if (WARN_ON(!PageUptodate(page)))
page              590 fs/btrfs/disk-io.c 				      u64 phy_offset, struct page *page,
page              596 fs/btrfs/disk-io.c 	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
page              603 fs/btrfs/disk-io.c 	if (!page->private)
page              606 fs/btrfs/disk-io.c 	eb = (struct extent_buffer *)page->private;
page              922 fs/btrfs/disk-io.c 			struct page *newpage, struct page *page,
page              929 fs/btrfs/disk-io.c 	if (PageDirty(page))
page              935 fs/btrfs/disk-io.c 	if (page_has_private(page) &&
page              936 fs/btrfs/disk-io.c 	    !try_to_release_page(page, GFP_KERNEL))
page              938 fs/btrfs/disk-io.c 	return migrate_page(mapping, newpage, page, mode);
page              965 fs/btrfs/disk-io.c static int btree_readpage(struct file *file, struct page *page)
page              968 fs/btrfs/disk-io.c 	tree = &BTRFS_I(page->mapping->host)->io_tree;
page              969 fs/btrfs/disk-io.c 	return extent_read_full_page(tree, page, btree_get_extent, 0);
page              972 fs/btrfs/disk-io.c static int btree_releasepage(struct page *page, gfp_t gfp_flags)
page              974 fs/btrfs/disk-io.c 	if (PageWriteback(page) || PageDirty(page))
page              977 fs/btrfs/disk-io.c 	return try_release_extent_buffer(page);
page              980 fs/btrfs/disk-io.c static void btree_invalidatepage(struct page *page, unsigned int offset,
page              984 fs/btrfs/disk-io.c 	tree = &BTRFS_I(page->mapping->host)->io_tree;
page              985 fs/btrfs/disk-io.c 	extent_invalidatepage(tree, page, offset);
page              986 fs/btrfs/disk-io.c 	btree_releasepage(page, GFP_NOFS);
page              987 fs/btrfs/disk-io.c 	if (PagePrivate(page)) {
page              988 fs/btrfs/disk-io.c 		btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info,
page              990 fs/btrfs/disk-io.c 			   (unsigned long long)page_offset(page));
page              991 fs/btrfs/disk-io.c 		ClearPagePrivate(page);
page              992 fs/btrfs/disk-io.c 		set_page_private(page, 0);
page              993 fs/btrfs/disk-io.c 		put_page(page);
page              997 fs/btrfs/disk-io.c static int btree_set_page_dirty(struct page *page)
page             1002 fs/btrfs/disk-io.c 	BUG_ON(!PagePrivate(page));
page             1003 fs/btrfs/disk-io.c 	eb = (struct extent_buffer *)page->private;
page             1009 fs/btrfs/disk-io.c 	return __set_page_dirty_nobuffers(page);
page              134 fs/btrfs/disk-io.h int btree_lock_page_hook(struct page *page, void *data,
page              137 fs/btrfs/disk-io.h 		struct page *page, size_t pg_offset, u64 start, u64 len,
page             1458 fs/btrfs/extent_io.c 	struct page *page;
page             1461 fs/btrfs/extent_io.c 		page = find_get_page(inode->i_mapping, index);
page             1462 fs/btrfs/extent_io.c 		BUG_ON(!page); /* Pages should be in the extent_io_tree */
page             1463 fs/btrfs/extent_io.c 		clear_page_dirty_for_io(page);
page             1464 fs/btrfs/extent_io.c 		put_page(page);
page             1473 fs/btrfs/extent_io.c 	struct page *page;
page             1476 fs/btrfs/extent_io.c 		page = find_get_page(inode->i_mapping, index);
page             1477 fs/btrfs/extent_io.c 		BUG_ON(!page); /* Pages should be in the extent_io_tree */
page             1478 fs/btrfs/extent_io.c 		__set_page_dirty_nobuffers(page);
page             1479 fs/btrfs/extent_io.c 		account_page_redirty(page);
page             1480 fs/btrfs/extent_io.c 		put_page(page);
page             1737 fs/btrfs/extent_io.c 				  struct page *locked_page,
page             1742 fs/btrfs/extent_io.c 					   struct page *locked_page,
page             1757 fs/btrfs/extent_io.c 					struct page *locked_page,
page             1787 fs/btrfs/extent_io.c 				    struct page *locked_page, u64 *start,
page             1868 fs/btrfs/extent_io.c 				  struct page *locked_page,
page             1875 fs/btrfs/extent_io.c 	struct page *pages[16];
page             1945 fs/btrfs/extent_io.c 				  struct page *locked_page,
page             2137 fs/btrfs/extent_io.c static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
page             2139 fs/btrfs/extent_io.c 	u64 start = page_offset(page);
page             2142 fs/btrfs/extent_io.c 		SetPageUptodate(page);
page             2180 fs/btrfs/extent_io.c 		      u64 length, u64 logical, struct page *page,
page             2241 fs/btrfs/extent_io.c 	bio_add_page(bio, page, length, pg_offset);
page             2271 fs/btrfs/extent_io.c 		struct page *p = eb->pages[i];
page             2290 fs/btrfs/extent_io.c 		     struct page *page, u64 ino, unsigned int pg_offset)
page             2332 fs/btrfs/extent_io.c 					  failrec->logical, page, pg_offset,
page             2530 fs/btrfs/extent_io.c 				    struct page *page, int pg_offset, int icsum,
page             2556 fs/btrfs/extent_io.c 	bio_add_page(bio, page, failrec->len, pg_offset);
page             2568 fs/btrfs/extent_io.c 			      struct page *page, u64 start, u64 end,
page             2572 fs/btrfs/extent_io.c 	struct inode *inode = page->mapping->host;
page             2597 fs/btrfs/extent_io.c 	bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
page             2598 fs/btrfs/extent_io.c 				      start - page_offset(page),
page             2620 fs/btrfs/extent_io.c void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
page             2625 fs/btrfs/extent_io.c 	btrfs_writepage_endio_finish_ordered(page, start, end, uptodate);
page             2628 fs/btrfs/extent_io.c 		ClearPageUptodate(page);
page             2629 fs/btrfs/extent_io.c 		SetPageError(page);
page             2631 fs/btrfs/extent_io.c 		mapping_set_error(page->mapping, ret);
page             2654 fs/btrfs/extent_io.c 		struct page *page = bvec->bv_page;
page             2655 fs/btrfs/extent_io.c 		struct inode *inode = page->mapping->host;
page             2674 fs/btrfs/extent_io.c 		start = page_offset(page);
page             2677 fs/btrfs/extent_io.c 		end_extent_writepage(page, error, start, end);
page             2678 fs/btrfs/extent_io.c 		end_page_writeback(page);
page             2725 fs/btrfs/extent_io.c 		struct page *page = bvec->bv_page;
page             2726 fs/btrfs/extent_io.c 		struct inode *inode = page->mapping->host;
page             2754 fs/btrfs/extent_io.c 		start = page_offset(page);
page             2761 fs/btrfs/extent_io.c 							      page, start, end,
page             2768 fs/btrfs/extent_io.c 						 page,
page             2787 fs/btrfs/extent_io.c 			ret = bio_readpage_error(bio, offset, page, start, end,
page             2797 fs/btrfs/extent_io.c 			eb = (struct extent_buffer *)page->private;
page             2813 fs/btrfs/extent_io.c 			if (page->index == end_index && off)
page             2814 fs/btrfs/extent_io.c 				zero_user_segment(page, off, PAGE_SIZE);
page             2815 fs/btrfs/extent_io.c 			SetPageUptodate(page);
page             2817 fs/btrfs/extent_io.c 			ClearPageUptodate(page);
page             2818 fs/btrfs/extent_io.c 			SetPageError(page);
page             2820 fs/btrfs/extent_io.c 		unlock_page(page);
page             2936 fs/btrfs/extent_io.c 			      struct page *page, u64 offset,
page             2964 fs/btrfs/extent_io.c 		if (btrfs_bio_fits_in_stripe(page, page_size, bio, bio_flags))
page             2969 fs/btrfs/extent_io.c 		    bio_add_page(bio, page, page_size, pg_offset) < page_size) {
page             2978 fs/btrfs/extent_io.c 				wbc_account_cgroup_owner(wbc, page, page_size);
page             2985 fs/btrfs/extent_io.c 	bio_add_page(bio, page, page_size, pg_offset);
page             2988 fs/btrfs/extent_io.c 	bio->bi_write_hint = page->mapping->host->i_write_hint;
page             2992 fs/btrfs/extent_io.c 		wbc_account_cgroup_owner(wbc, page, page_size);
page             3001 fs/btrfs/extent_io.c 				      struct page *page)
page             3003 fs/btrfs/extent_io.c 	if (!PagePrivate(page)) {
page             3004 fs/btrfs/extent_io.c 		SetPagePrivate(page);
page             3005 fs/btrfs/extent_io.c 		get_page(page);
page             3006 fs/btrfs/extent_io.c 		set_page_private(page, (unsigned long)eb);
page             3008 fs/btrfs/extent_io.c 		WARN_ON(page->private != (unsigned long)eb);
page             3012 fs/btrfs/extent_io.c void set_page_extent_mapped(struct page *page)
page             3014 fs/btrfs/extent_io.c 	if (!PagePrivate(page)) {
page             3015 fs/btrfs/extent_io.c 		SetPagePrivate(page);
page             3016 fs/btrfs/extent_io.c 		get_page(page);
page             3017 fs/btrfs/extent_io.c 		set_page_private(page, EXTENT_PAGE_PRIVATE);
page             3022 fs/btrfs/extent_io.c __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
page             3040 fs/btrfs/extent_io.c 	em = get_extent(BTRFS_I(inode), page, pg_offset, start, len, 0);
page             3056 fs/btrfs/extent_io.c 			 struct page *page,
page             3063 fs/btrfs/extent_io.c 	struct inode *inode = page->mapping->host;
page             3064 fs/btrfs/extent_io.c 	u64 start = page_offset(page);
page             3081 fs/btrfs/extent_io.c 	set_page_extent_mapped(page);
page             3083 fs/btrfs/extent_io.c 	if (!PageUptodate(page)) {
page             3084 fs/btrfs/extent_io.c 		if (cleancache_get_page(page) == 0) {
page             3091 fs/btrfs/extent_io.c 	if (page->index == last_byte >> PAGE_SHIFT) {
page             3097 fs/btrfs/extent_io.c 			userpage = kmap_atomic(page);
page             3099 fs/btrfs/extent_io.c 			flush_dcache_page(page);
page             3112 fs/btrfs/extent_io.c 			userpage = kmap_atomic(page);
page             3114 fs/btrfs/extent_io.c 			flush_dcache_page(page);
page             3122 fs/btrfs/extent_io.c 		em = __get_extent_map(inode, page, pg_offset, cur,
page             3125 fs/btrfs/extent_io.c 			SetPageError(page);
page             3204 fs/btrfs/extent_io.c 			userpage = kmap_atomic(page);
page             3206 fs/btrfs/extent_io.c 			flush_dcache_page(page);
page             3220 fs/btrfs/extent_io.c 			check_page_uptodate(tree, page);
page             3230 fs/btrfs/extent_io.c 			SetPageError(page);
page             3238 fs/btrfs/extent_io.c 					 page, offset, disk_io_size,
page             3248 fs/btrfs/extent_io.c 			SetPageError(page);
page             3257 fs/btrfs/extent_io.c 		if (!PageError(page))
page             3258 fs/btrfs/extent_io.c 			SetPageUptodate(page);
page             3259 fs/btrfs/extent_io.c 		unlock_page(page);
page             3265 fs/btrfs/extent_io.c 					     struct page *pages[], int nr_pages,
page             3285 fs/btrfs/extent_io.c 				   struct page *page,
page             3291 fs/btrfs/extent_io.c 	struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
page             3292 fs/btrfs/extent_io.c 	u64 start = page_offset(page);
page             3298 fs/btrfs/extent_io.c 	ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
page             3303 fs/btrfs/extent_io.c int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
page             3310 fs/btrfs/extent_io.c 	ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
page             3334 fs/btrfs/extent_io.c 		struct page *page, struct writeback_control *wbc,
page             3346 fs/btrfs/extent_io.c 		found = find_lock_delalloc_range(inode, page,
page             3353 fs/btrfs/extent_io.c 		ret = btrfs_run_delalloc_range(inode, page, delalloc_start,
page             3356 fs/btrfs/extent_io.c 			SetPageError(page);
page             3411 fs/btrfs/extent_io.c 				 struct page *page,
page             3419 fs/btrfs/extent_io.c 	u64 start = page_offset(page);
page             3434 fs/btrfs/extent_io.c 	ret = btrfs_writepage_cow_fixup(page, start, page_end);
page             3440 fs/btrfs/extent_io.c 			redirty_page_for_writepage(wbc, page);
page             3443 fs/btrfs/extent_io.c 		unlock_page(page);
page             3455 fs/btrfs/extent_io.c 		btrfs_writepage_endio_finish_ordered(page, start, page_end, 1);
page             3466 fs/btrfs/extent_io.c 			btrfs_writepage_endio_finish_ordered(page, cur,
page             3470 fs/btrfs/extent_io.c 		em = btrfs_get_extent(BTRFS_I(inode), page, pg_offset, cur,
page             3473 fs/btrfs/extent_io.c 			SetPageError(page);
page             3502 fs/btrfs/extent_io.c 				btrfs_writepage_endio_finish_ordered(page, cur,
page             3519 fs/btrfs/extent_io.c 		if (!PageWriteback(page)) {
page             3522 fs/btrfs/extent_io.c 			       page->index, cur, end);
page             3526 fs/btrfs/extent_io.c 					 page, offset, iosize, pg_offset,
page             3531 fs/btrfs/extent_io.c 			SetPageError(page);
page             3532 fs/btrfs/extent_io.c 			if (PageWriteback(page))
page             3533 fs/btrfs/extent_io.c 				end_page_writeback(page);
page             3554 fs/btrfs/extent_io.c static int __extent_writepage(struct page *page, struct writeback_control *wbc,
page             3557 fs/btrfs/extent_io.c 	struct inode *inode = page->mapping->host;
page             3558 fs/btrfs/extent_io.c 	u64 start = page_offset(page);
page             3570 fs/btrfs/extent_io.c 	trace___extent_writepage(page, inode, wbc);
page             3572 fs/btrfs/extent_io.c 	WARN_ON(!PageLocked(page));
page             3574 fs/btrfs/extent_io.c 	ClearPageError(page);
page             3577 fs/btrfs/extent_io.c 	if (page->index > end_index ||
page             3578 fs/btrfs/extent_io.c 	   (page->index == end_index && !pg_offset)) {
page             3579 fs/btrfs/extent_io.c 		page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
page             3580 fs/btrfs/extent_io.c 		unlock_page(page);
page             3584 fs/btrfs/extent_io.c 	if (page->index == end_index) {
page             3587 fs/btrfs/extent_io.c 		userpage = kmap_atomic(page);
page             3591 fs/btrfs/extent_io.c 		flush_dcache_page(page);
page             3596 fs/btrfs/extent_io.c 	set_page_extent_mapped(page);
page             3599 fs/btrfs/extent_io.c 		ret = writepage_delalloc(inode, page, wbc, start, &nr_written);
page             3606 fs/btrfs/extent_io.c 	ret = __extent_writepage_io(inode, page, wbc, epd,
page             3614 fs/btrfs/extent_io.c 		set_page_writeback(page);
page             3615 fs/btrfs/extent_io.c 		end_page_writeback(page);
page             3617 fs/btrfs/extent_io.c 	if (PageError(page)) {
page             3619 fs/btrfs/extent_io.c 		end_extent_writepage(page, ret, start, page_end);
page             3621 fs/btrfs/extent_io.c 	unlock_page(page);
page             3709 fs/btrfs/extent_io.c 		struct page *p = eb->pages[i];
page             3749 fs/btrfs/extent_io.c static void set_btree_ioerr(struct page *page)
page             3751 fs/btrfs/extent_io.c 	struct extent_buffer *eb = (struct extent_buffer *)page->private;
page             3754 fs/btrfs/extent_io.c 	SetPageError(page);
page             3828 fs/btrfs/extent_io.c 		struct page *page = bvec->bv_page;
page             3830 fs/btrfs/extent_io.c 		eb = (struct extent_buffer *)page->private;
page             3836 fs/btrfs/extent_io.c 			ClearPageUptodate(page);
page             3837 fs/btrfs/extent_io.c 			set_btree_ioerr(page);
page             3840 fs/btrfs/extent_io.c 		end_page_writeback(page);
page             3886 fs/btrfs/extent_io.c 		struct page *p = eb->pages[i];
page             3911 fs/btrfs/extent_io.c 			struct page *p = eb->pages[i];
page             3969 fs/btrfs/extent_io.c 			struct page *page = pvec.pages[i];
page             3971 fs/btrfs/extent_io.c 			if (!PagePrivate(page))
page             3975 fs/btrfs/extent_io.c 			if (!PagePrivate(page)) {
page             3980 fs/btrfs/extent_io.c 			eb = (struct extent_buffer *)page->private;
page             4167 fs/btrfs/extent_io.c 			struct page *page = pvec.pages[i];
page             4169 fs/btrfs/extent_io.c 			done_index = page->index + 1;
page             4177 fs/btrfs/extent_io.c 			if (!trylock_page(page)) {
page             4180 fs/btrfs/extent_io.c 				lock_page(page);
page             4183 fs/btrfs/extent_io.c 			if (unlikely(page->mapping != mapping)) {
page             4184 fs/btrfs/extent_io.c 				unlock_page(page);
page             4189 fs/btrfs/extent_io.c 				if (PageWriteback(page)) {
page             4193 fs/btrfs/extent_io.c 				wait_on_page_writeback(page);
page             4196 fs/btrfs/extent_io.c 			if (PageWriteback(page) ||
page             4197 fs/btrfs/extent_io.c 			    !clear_page_dirty_for_io(page)) {
page             4198 fs/btrfs/extent_io.c 				unlock_page(page);
page             4202 fs/btrfs/extent_io.c 			ret = __extent_writepage(page, wbc, epd);
page             4244 fs/btrfs/extent_io.c int extent_write_full_page(struct page *page, struct writeback_control *wbc)
page             4249 fs/btrfs/extent_io.c 		.tree = &BTRFS_I(page->mapping->host)->io_tree,
page             4254 fs/btrfs/extent_io.c 	ret = __extent_writepage(page, wbc, &epd);
page             4272 fs/btrfs/extent_io.c 	struct page *page;
page             4290 fs/btrfs/extent_io.c 		page = find_get_page(mapping, start >> PAGE_SHIFT);
page             4291 fs/btrfs/extent_io.c 		if (clear_page_dirty_for_io(page))
page             4292 fs/btrfs/extent_io.c 			ret = __extent_writepage(page, &wbc_writepages, &epd);
page             4294 fs/btrfs/extent_io.c 			btrfs_writepage_endio_finish_ordered(page, start,
page             4296 fs/btrfs/extent_io.c 			unlock_page(page);
page             4298 fs/btrfs/extent_io.c 		put_page(page);
page             4337 fs/btrfs/extent_io.c 	struct page *pagepool[16];
page             4347 fs/btrfs/extent_io.c 			struct page *page = lru_to_page(pages);
page             4349 fs/btrfs/extent_io.c 			prefetchw(&page->flags);
page             4350 fs/btrfs/extent_io.c 			list_del(&page->lru);
page             4351 fs/btrfs/extent_io.c 			if (add_to_page_cache_lru(page, mapping, page->index,
page             4353 fs/btrfs/extent_io.c 				put_page(page);
page             4357 fs/btrfs/extent_io.c 			pagepool[nr++] = page;
page             4358 fs/btrfs/extent_io.c 			contig_end = page_offset(page) + PAGE_SIZE - 1;
page             4386 fs/btrfs/extent_io.c 			  struct page *page, unsigned long offset)
page             4389 fs/btrfs/extent_io.c 	u64 start = page_offset(page);
page             4391 fs/btrfs/extent_io.c 	size_t blocksize = page->mapping->host->i_sb->s_blocksize;
page             4398 fs/btrfs/extent_io.c 	wait_on_page_writeback(page);
page             4410 fs/btrfs/extent_io.c 				    struct page *page, gfp_t mask)
page             4412 fs/btrfs/extent_io.c 	u64 start = page_offset(page);
page             4443 fs/btrfs/extent_io.c int try_release_extent_mapping(struct page *page, gfp_t mask)
page             4446 fs/btrfs/extent_io.c 	u64 start = page_offset(page);
page             4448 fs/btrfs/extent_io.c 	struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host);
page             4453 fs/btrfs/extent_io.c 	    page->mapping->host->i_size > SZ_16M) {
page             4485 fs/btrfs/extent_io.c 	return try_release_extent_state(tree, page, mask);
page             4874 fs/btrfs/extent_io.c 		struct page *page = eb->pages[i];
page             4876 fs/btrfs/extent_io.c 		if (!page)
page             4879 fs/btrfs/extent_io.c 			spin_lock(&page->mapping->private_lock);
page             4887 fs/btrfs/extent_io.c 		if (PagePrivate(page) &&
page             4888 fs/btrfs/extent_io.c 		    page->private == (unsigned long)eb) {
page             4890 fs/btrfs/extent_io.c 			BUG_ON(PageDirty(page));
page             4891 fs/btrfs/extent_io.c 			BUG_ON(PageWriteback(page));
page             4896 fs/btrfs/extent_io.c 			ClearPagePrivate(page);
page             4897 fs/btrfs/extent_io.c 			set_page_private(page, 0);
page             4899 fs/btrfs/extent_io.c 			put_page(page);
page             4903 fs/btrfs/extent_io.c 			spin_unlock(&page->mapping->private_lock);
page             4906 fs/btrfs/extent_io.c 		put_page(page);
page             4963 fs/btrfs/extent_io.c 	struct page *p;
page             5059 fs/btrfs/extent_io.c 		struct page *accessed)
page             5067 fs/btrfs/extent_io.c 		struct page *p = eb->pages[i];
page             5162 fs/btrfs/extent_io.c 	struct page *p;
page             5368 fs/btrfs/extent_io.c 	struct page *page;
page             5373 fs/btrfs/extent_io.c 		page = eb->pages[i];
page             5374 fs/btrfs/extent_io.c 		if (!PageDirty(page))
page             5377 fs/btrfs/extent_io.c 		lock_page(page);
page             5378 fs/btrfs/extent_io.c 		WARN_ON(!PagePrivate(page));
page             5380 fs/btrfs/extent_io.c 		clear_page_dirty_for_io(page);
page             5381 fs/btrfs/extent_io.c 		xa_lock_irq(&page->mapping->i_pages);
page             5382 fs/btrfs/extent_io.c 		if (!PageDirty(page))
page             5383 fs/btrfs/extent_io.c 			__xa_clear_mark(&page->mapping->i_pages,
page             5384 fs/btrfs/extent_io.c 					page_index(page), PAGECACHE_TAG_DIRTY);
page             5385 fs/btrfs/extent_io.c 		xa_unlock_irq(&page->mapping->i_pages);
page             5386 fs/btrfs/extent_io.c 		ClearPageError(page);
page             5387 fs/btrfs/extent_io.c 		unlock_page(page);
page             5421 fs/btrfs/extent_io.c 	struct page *page;
page             5427 fs/btrfs/extent_io.c 		page = eb->pages[i];
page             5428 fs/btrfs/extent_io.c 		if (page)
page             5429 fs/btrfs/extent_io.c 			ClearPageUptodate(page);
page             5436 fs/btrfs/extent_io.c 	struct page *page;
page             5442 fs/btrfs/extent_io.c 		page = eb->pages[i];
page             5443 fs/btrfs/extent_io.c 		SetPageUptodate(page);
page             5450 fs/btrfs/extent_io.c 	struct page *page;
page             5466 fs/btrfs/extent_io.c 		page = eb->pages[i];
page             5468 fs/btrfs/extent_io.c 			if (!trylock_page(page))
page             5471 fs/btrfs/extent_io.c 			lock_page(page);
page             5481 fs/btrfs/extent_io.c 		page = eb->pages[i];
page             5482 fs/btrfs/extent_io.c 		if (!PageUptodate(page)) {
page             5497 fs/btrfs/extent_io.c 		page = eb->pages[i];
page             5499 fs/btrfs/extent_io.c 		if (!PageUptodate(page)) {
page             5502 fs/btrfs/extent_io.c 				unlock_page(page);
page             5506 fs/btrfs/extent_io.c 			ClearPageError(page);
page             5507 fs/btrfs/extent_io.c 			err = __extent_read_full_page(tree, page,
page             5524 fs/btrfs/extent_io.c 			unlock_page(page);
page             5538 fs/btrfs/extent_io.c 		page = eb->pages[i];
page             5539 fs/btrfs/extent_io.c 		wait_on_page_locked(page);
page             5540 fs/btrfs/extent_io.c 		if (!PageUptodate(page))
page             5549 fs/btrfs/extent_io.c 		page = eb->pages[locked_pages];
page             5550 fs/btrfs/extent_io.c 		unlock_page(page);
page             5560 fs/btrfs/extent_io.c 	struct page *page;
page             5576 fs/btrfs/extent_io.c 		page = eb->pages[i];
page             5579 fs/btrfs/extent_io.c 		kaddr = page_address(page);
page             5595 fs/btrfs/extent_io.c 	struct page *page;
page             5608 fs/btrfs/extent_io.c 		page = eb->pages[i];
page             5611 fs/btrfs/extent_io.c 		kaddr = page_address(page);
page             5638 fs/btrfs/extent_io.c 	struct page *p;
page             5673 fs/btrfs/extent_io.c 	struct page *page;
page             5686 fs/btrfs/extent_io.c 		page = eb->pages[i];
page             5690 fs/btrfs/extent_io.c 		kaddr = page_address(page);
page             5729 fs/btrfs/extent_io.c 	struct page *page;
page             5741 fs/btrfs/extent_io.c 		page = eb->pages[i];
page             5742 fs/btrfs/extent_io.c 		WARN_ON(!PageUptodate(page));
page             5745 fs/btrfs/extent_io.c 		kaddr = page_address(page);
page             5760 fs/btrfs/extent_io.c 	struct page *page;
page             5771 fs/btrfs/extent_io.c 		page = eb->pages[i];
page             5772 fs/btrfs/extent_io.c 		WARN_ON(!PageUptodate(page));
page             5775 fs/btrfs/extent_io.c 		kaddr = page_address(page);
page             5805 fs/btrfs/extent_io.c 	struct page *page;
page             5815 fs/btrfs/extent_io.c 		page = dst->pages[i];
page             5816 fs/btrfs/extent_io.c 		WARN_ON(!PageUptodate(page));
page             5820 fs/btrfs/extent_io.c 		kaddr = page_address(page);
page             5873 fs/btrfs/extent_io.c 	struct page *page;
page             5878 fs/btrfs/extent_io.c 	page = eb->pages[i];
page             5879 fs/btrfs/extent_io.c 	WARN_ON(!PageUptodate(page));
page             5880 fs/btrfs/extent_io.c 	kaddr = page_address(page);
page             5895 fs/btrfs/extent_io.c 	struct page *page;
page             5903 fs/btrfs/extent_io.c 	page = eb->pages[i];
page             5904 fs/btrfs/extent_io.c 	WARN_ON(!PageUptodate(page));
page             5905 fs/btrfs/extent_io.c 	kaddr = page_address(page);
page             5914 fs/btrfs/extent_io.c 			page = eb->pages[++i];
page             5915 fs/btrfs/extent_io.c 			WARN_ON(!PageUptodate(page));
page             5916 fs/btrfs/extent_io.c 			kaddr = page_address(page);
page             5937 fs/btrfs/extent_io.c 	struct page *page;
page             5945 fs/btrfs/extent_io.c 	page = eb->pages[i];
page             5946 fs/btrfs/extent_io.c 	WARN_ON(!PageUptodate(page));
page             5947 fs/btrfs/extent_io.c 	kaddr = page_address(page);
page             5956 fs/btrfs/extent_io.c 			page = eb->pages[++i];
page             5957 fs/btrfs/extent_io.c 			WARN_ON(!PageUptodate(page));
page             5958 fs/btrfs/extent_io.c 			kaddr = page_address(page);
page             5973 fs/btrfs/extent_io.c static void copy_pages(struct page *dst_page, struct page *src_page,
page             6088 fs/btrfs/extent_io.c int try_release_extent_buffer(struct page *page)
page             6096 fs/btrfs/extent_io.c 	spin_lock(&page->mapping->private_lock);
page             6097 fs/btrfs/extent_io.c 	if (!PagePrivate(page)) {
page             6098 fs/btrfs/extent_io.c 		spin_unlock(&page->mapping->private_lock);
page             6102 fs/btrfs/extent_io.c 	eb = (struct extent_buffer *)page->private;
page             6113 fs/btrfs/extent_io.c 		spin_unlock(&page->mapping->private_lock);
page             6116 fs/btrfs/extent_io.c 	spin_unlock(&page->mapping->private_lock);
page              110 fs/btrfs/extent_io.h 				    struct page *page, u64 start, u64 end,
page              188 fs/btrfs/extent_io.h 	struct page *pages[INLINE_EXTENT_BUFFER_PAGES];
page              266 fs/btrfs/extent_io.h int try_release_extent_mapping(struct page *page, gfp_t mask);
page              267 fs/btrfs/extent_io.h int try_release_extent_buffer(struct page *page);
page              277 fs/btrfs/extent_io.h int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
page              407 fs/btrfs/extent_io.h 			  struct page *page, unsigned long offset);
page              408 fs/btrfs/extent_io.h int extent_write_full_page(struct page *page, struct writeback_control *wbc);
page              419 fs/btrfs/extent_io.h void set_page_extent_mapped(struct page *page);
page              497 fs/btrfs/extent_io.h 				  struct page *locked_page,
page              509 fs/btrfs/extent_io.h 		      u64 length, u64 logical, struct page *page,
page              514 fs/btrfs/extent_io.h 		     struct page *page, u64 ino, unsigned int pg_offset);
page              515 fs/btrfs/extent_io.h void end_extent_writepage(struct page *page, int err, u64 start, u64 end);
page              527 fs/btrfs/extent_io.h 	struct page *page;
page              546 fs/btrfs/extent_io.h 				    struct page *page, int pg_offset, int icsum,
page              553 fs/btrfs/extent_io.h 			     struct page *locked_page, u64 *start,
page              397 fs/btrfs/file.c 					 struct page **prepared_pages,
page              408 fs/btrfs/file.c 		struct page *page = prepared_pages[pg];
page              412 fs/btrfs/file.c 		copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
page              415 fs/btrfs/file.c 		flush_dcache_page(page);
page              426 fs/btrfs/file.c 		if (!PageUptodate(page) && copied < count)
page              450 fs/btrfs/file.c static void btrfs_drop_pages(struct page **pages, size_t num_pages)
page              515 fs/btrfs/file.c int btrfs_dirty_pages(struct inode *inode, struct page **pages,
page              567 fs/btrfs/file.c 		struct page *p = pages[i];
page             1395 fs/btrfs/file.c 				 struct page *page, u64 pos,
page             1401 fs/btrfs/file.c 	    !PageUptodate(page)) {
page             1402 fs/btrfs/file.c 		ret = btrfs_readpage(NULL, page);
page             1405 fs/btrfs/file.c 		lock_page(page);
page             1406 fs/btrfs/file.c 		if (!PageUptodate(page)) {
page             1407 fs/btrfs/file.c 			unlock_page(page);
page             1410 fs/btrfs/file.c 		if (page->mapping != inode->i_mapping) {
page             1411 fs/btrfs/file.c 			unlock_page(page);
page             1421 fs/btrfs/file.c static noinline int prepare_pages(struct inode *inode, struct page **pages,
page             1481 fs/btrfs/file.c lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
page             1593 fs/btrfs/file.c 	struct page **pages = NULL;
page             1605 fs/btrfs/file.c 			PAGE_SIZE / (sizeof(struct page *)));
page             1608 fs/btrfs/file.c 	pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
page              318 fs/btrfs/free-space-cache.c 	io_ctl->pages = kcalloc(num_pages, sizeof(struct page *), GFP_NOFS);
page              348 fs/btrfs/free-space-cache.c 	io_ctl->page = io_ctl->pages[io_ctl->index++];
page              349 fs/btrfs/free-space-cache.c 	io_ctl->cur = page_address(io_ctl->page);
page              374 fs/btrfs/free-space-cache.c 	struct page *page;
page              379 fs/btrfs/free-space-cache.c 		page = find_or_create_page(inode->i_mapping, i, mask);
page              380 fs/btrfs/free-space-cache.c 		if (!page) {
page              384 fs/btrfs/free-space-cache.c 		io_ctl->pages[i] = page;
page              385 fs/btrfs/free-space-cache.c 		if (uptodate && !PageUptodate(page)) {
page              386 fs/btrfs/free-space-cache.c 			btrfs_readpage(NULL, page);
page              387 fs/btrfs/free-space-cache.c 			lock_page(page);
page              388 fs/btrfs/free-space-cache.c 			if (page->mapping != inode->i_mapping) {
page              394 fs/btrfs/free-space-cache.c 			if (!PageUptodate(page)) {
page               41 fs/btrfs/free-space-cache.h 	struct page *page;
page               42 fs/btrfs/free-space-cache.h 	struct page **pages;
page               84 fs/btrfs/inode.c 				   struct page *locked_page,
page              108 fs/btrfs/inode.c 						 struct page *locked_page,
page              116 fs/btrfs/inode.c 	struct page *page;
page              119 fs/btrfs/inode.c 		page = find_get_page(inode->i_mapping, index);
page              121 fs/btrfs/inode.c 		if (!page)
page              123 fs/btrfs/inode.c 		ClearPagePrivate2(page);
page              124 fs/btrfs/inode.c 		put_page(page);
page              171 fs/btrfs/inode.c 				struct page **compressed_pages)
page              174 fs/btrfs/inode.c 	struct page *page = NULL;
page              216 fs/btrfs/inode.c 		struct page *cpage;
page              234 fs/btrfs/inode.c 		page = find_get_page(inode->i_mapping,
page              237 fs/btrfs/inode.c 		kaddr = kmap_atomic(page);
page              241 fs/btrfs/inode.c 		put_page(page);
page              271 fs/btrfs/inode.c 					  struct page **compressed_pages)
page              358 fs/btrfs/inode.c 	struct page **pages;
page              366 fs/btrfs/inode.c 	struct page *locked_page;
page              384 fs/btrfs/inode.c 				     struct page **pages,
page              479 fs/btrfs/inode.c 	struct page **pages = NULL;
page              547 fs/btrfs/inode.c 		pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
page              587 fs/btrfs/inode.c 			struct page *page = pages[nr_pages - 1];
page              594 fs/btrfs/inode.c 				kaddr = kmap_atomic(page);
page              884 fs/btrfs/inode.c 			struct page *p = async_extent->pages[0];
page              967 fs/btrfs/inode.c 				   struct page *locked_page,
page             1209 fs/btrfs/inode.c static int cow_file_range_async(struct inode *inode, struct page *locked_page,
page             1333 fs/btrfs/inode.c 				       struct page *locked_page,
page             1709 fs/btrfs/inode.c int btrfs_run_delalloc_range(struct inode *inode, struct page *locked_page,
page             2017 fs/btrfs/inode.c int btrfs_bio_fits_in_stripe(struct page *page, size_t size, struct bio *bio,
page             2020 fs/btrfs/inode.c 	struct inode *inode = page->mapping->host;
page             2170 fs/btrfs/inode.c 	struct page *page;
page             2181 fs/btrfs/inode.c 	struct page *page;
page             2189 fs/btrfs/inode.c 	page = fixup->page;
page             2191 fs/btrfs/inode.c 	page_start = page_offset(page);
page             2192 fs/btrfs/inode.c 	page_end = page_offset(page) + PAGE_SIZE - 1;
page             2201 fs/btrfs/inode.c 	lock_page(page);
page             2208 fs/btrfs/inode.c 	if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
page             2248 fs/btrfs/inode.c 	if (PagePrivate2(page))
page             2256 fs/btrfs/inode.c 		unlock_page(page);
page             2274 fs/btrfs/inode.c 	BUG_ON(!PageDirty(page));
page             2289 fs/btrfs/inode.c 		mapping_set_error(page->mapping, ret);
page             2290 fs/btrfs/inode.c 		end_extent_writepage(page, ret, page_start, page_end);
page             2291 fs/btrfs/inode.c 		clear_page_dirty_for_io(page);
page             2292 fs/btrfs/inode.c 		SetPageError(page);
page             2294 fs/btrfs/inode.c 	ClearPageChecked(page);
page             2295 fs/btrfs/inode.c 	unlock_page(page);
page             2296 fs/btrfs/inode.c 	put_page(page);
page             2318 fs/btrfs/inode.c int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end)
page             2320 fs/btrfs/inode.c 	struct inode *inode = page->mapping->host;
page             2325 fs/btrfs/inode.c 	if (TestClearPagePrivate2(page))
page             2335 fs/btrfs/inode.c 	if (PageChecked(page))
page             2349 fs/btrfs/inode.c 	SetPageChecked(page);
page             2350 fs/btrfs/inode.c 	get_page(page);
page             2352 fs/btrfs/inode.c 	fixup->page = page;
page             3339 fs/btrfs/inode.c void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start,
page             3342 fs/btrfs/inode.c 	struct inode *inode = page->mapping->host;
page             3347 fs/btrfs/inode.c 	trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
page             3349 fs/btrfs/inode.c 	ClearPagePrivate2(page);
page             3365 fs/btrfs/inode.c 				  int icsum, struct page *page,
page             3377 fs/btrfs/inode.c 	kaddr = kmap_atomic(page);
page             3393 fs/btrfs/inode.c 	flush_dcache_page(page);
page             3404 fs/btrfs/inode.c 				      u64 phy_offset, struct page *page,
page             3407 fs/btrfs/inode.c 	size_t offset = start - page_offset(page);
page             3408 fs/btrfs/inode.c 	struct inode *inode = page->mapping->host;
page             3412 fs/btrfs/inode.c 	if (PageChecked(page)) {
page             3413 fs/btrfs/inode.c 		ClearPageChecked(page);
page             3427 fs/btrfs/inode.c 	return __readpage_endio_check(inode, io_bio, phy_offset, page, offset,
page             5053 fs/btrfs/inode.c 	struct page *page;
page             5072 fs/btrfs/inode.c 	page = find_or_create_page(mapping, index, mask);
page             5073 fs/btrfs/inode.c 	if (!page) {
page             5081 fs/btrfs/inode.c 	if (!PageUptodate(page)) {
page             5082 fs/btrfs/inode.c 		ret = btrfs_readpage(NULL, page);
page             5083 fs/btrfs/inode.c 		lock_page(page);
page             5084 fs/btrfs/inode.c 		if (page->mapping != mapping) {
page             5085 fs/btrfs/inode.c 			unlock_page(page);
page             5086 fs/btrfs/inode.c 			put_page(page);
page             5089 fs/btrfs/inode.c 		if (!PageUptodate(page)) {
page             5094 fs/btrfs/inode.c 	wait_on_page_writeback(page);
page             5097 fs/btrfs/inode.c 	set_page_extent_mapped(page);
page             5103 fs/btrfs/inode.c 		unlock_page(page);
page             5104 fs/btrfs/inode.c 		put_page(page);
page             5125 fs/btrfs/inode.c 		kaddr = kmap(page);
page             5127 fs/btrfs/inode.c 			memset(kaddr + (block_start - page_offset(page)),
page             5130 fs/btrfs/inode.c 			memset(kaddr + (block_start - page_offset(page)) +  offset,
page             5132 fs/btrfs/inode.c 		flush_dcache_page(page);
page             5133 fs/btrfs/inode.c 		kunmap(page);
page             5135 fs/btrfs/inode.c 	ClearPageChecked(page);
page             5136 fs/btrfs/inode.c 	set_page_dirty(page);
page             5144 fs/btrfs/inode.c 	unlock_page(page);
page             5145 fs/btrfs/inode.c 	put_page(page);
page             6964 fs/btrfs/inode.c 				      struct page *page,
page             6989 fs/btrfs/inode.c 	ret = btrfs_decompress(compress_type, tmp, page,
page             7001 fs/btrfs/inode.c 		char *map = kmap(page);
page             7003 fs/btrfs/inode.c 		kunmap(page);
page             7018 fs/btrfs/inode.c 				    struct page *page,
page             7037 fs/btrfs/inode.c 	const bool new_inline = !page || create;
page             7048 fs/btrfs/inode.c 		else if (em->block_start == EXTENT_MAP_INLINE && page)
page             7180 fs/btrfs/inode.c 		extent_offset = page_offset(page) + pg_offset - extent_start;
page             7190 fs/btrfs/inode.c 		if (!PageUptodate(page)) {
page             7193 fs/btrfs/inode.c 				ret = uncompress_inline(path, page, pg_offset,
page             7200 fs/btrfs/inode.c 				map = kmap(page);
page             7208 fs/btrfs/inode.c 				kunmap(page);
page             7210 fs/btrfs/inode.c 			flush_dcache_page(page);
page             8010 fs/btrfs/inode.c 				   struct page *page, unsigned int pgoff,
page             8046 fs/btrfs/inode.c 	bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
page             8855 fs/btrfs/inode.c int btrfs_readpage(struct file *file, struct page *page)
page             8858 fs/btrfs/inode.c 	tree = &BTRFS_I(page->mapping->host)->io_tree;
page             8859 fs/btrfs/inode.c 	return extent_read_full_page(tree, page, btrfs_get_extent, 0);
page             8862 fs/btrfs/inode.c static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
page             8864 fs/btrfs/inode.c 	struct inode *inode = page->mapping->host;
page             8868 fs/btrfs/inode.c 		redirty_page_for_writepage(wbc, page);
page             8869 fs/btrfs/inode.c 		unlock_page(page);
page             8879 fs/btrfs/inode.c 		redirty_page_for_writepage(wbc, page);
page             8882 fs/btrfs/inode.c 	ret = extent_write_full_page(page, wbc);
page             8900 fs/btrfs/inode.c static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
page             8902 fs/btrfs/inode.c 	int ret = try_release_extent_mapping(page, gfp_flags);
page             8904 fs/btrfs/inode.c 		ClearPagePrivate(page);
page             8905 fs/btrfs/inode.c 		set_page_private(page, 0);
page             8906 fs/btrfs/inode.c 		put_page(page);
page             8911 fs/btrfs/inode.c static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
page             8913 fs/btrfs/inode.c 	if (PageWriteback(page) || PageDirty(page))
page             8915 fs/btrfs/inode.c 	return __btrfs_releasepage(page, gfp_flags);
page             8918 fs/btrfs/inode.c static void btrfs_invalidatepage(struct page *page, unsigned int offset,
page             8921 fs/btrfs/inode.c 	struct inode *inode = page->mapping->host;
page             8925 fs/btrfs/inode.c 	u64 page_start = page_offset(page);
page             8938 fs/btrfs/inode.c 	wait_on_page_writeback(page);
page             8942 fs/btrfs/inode.c 		btrfs_releasepage(page, GFP_NOFS);
page             8967 fs/btrfs/inode.c 		if (TestClearPagePrivate2(page)) {
page             9012 fs/btrfs/inode.c 	if (PageDirty(page))
page             9020 fs/btrfs/inode.c 		__btrfs_releasepage(page, GFP_NOFS);
page             9023 fs/btrfs/inode.c 	ClearPageChecked(page);
page             9024 fs/btrfs/inode.c 	if (PagePrivate(page)) {
page             9025 fs/btrfs/inode.c 		ClearPagePrivate(page);
page             9026 fs/btrfs/inode.c 		set_page_private(page, 0);
page             9027 fs/btrfs/inode.c 		put_page(page);
page             9048 fs/btrfs/inode.c 	struct page *page = vmf->page;
page             9069 fs/btrfs/inode.c 	page_start = page_offset(page);
page             9096 fs/btrfs/inode.c 	lock_page(page);
page             9099 fs/btrfs/inode.c 	if ((page->mapping != inode->i_mapping) ||
page             9104 fs/btrfs/inode.c 	wait_on_page_writeback(page);
page             9107 fs/btrfs/inode.c 	set_page_extent_mapped(page);
page             9118 fs/btrfs/inode.c 		unlock_page(page);
page             9124 fs/btrfs/inode.c 	if (page->index == ((size - 1) >> PAGE_SHIFT)) {
page             9163 fs/btrfs/inode.c 		kaddr = kmap(page);
page             9165 fs/btrfs/inode.c 		flush_dcache_page(page);
page             9166 fs/btrfs/inode.c 		kunmap(page);
page             9168 fs/btrfs/inode.c 	ClearPageChecked(page);
page             9169 fs/btrfs/inode.c 	set_page_dirty(page);
page             9170 fs/btrfs/inode.c 	SetPageUptodate(page);
page             9186 fs/btrfs/inode.c 	unlock_page(page);
page             10627 fs/btrfs/inode.c static int btrfs_set_page_dirty(struct page *page)
page             10629 fs/btrfs/inode.c 	return __set_page_dirty_nobuffers(page);
page             10717 fs/btrfs/inode.c 	struct page *page;
page             10720 fs/btrfs/inode.c 		page = find_get_page(inode->i_mapping, index);
page             10721 fs/btrfs/inode.c 		ASSERT(page); /* Pages should be in the extent_io_tree */
page             10722 fs/btrfs/inode.c 		set_page_writeback(page);
page             10723 fs/btrfs/inode.c 		put_page(page);
page             1235 fs/btrfs/ioctl.c 				    struct page **pages,
page             1269 fs/btrfs/ioctl.c 		struct page *page;
page             1271 fs/btrfs/ioctl.c 		page = find_or_create_page(inode->i_mapping,
page             1273 fs/btrfs/ioctl.c 		if (!page)
page             1276 fs/btrfs/ioctl.c 		page_start = page_offset(page);
page             1288 fs/btrfs/ioctl.c 			unlock_page(page);
page             1291 fs/btrfs/ioctl.c 			lock_page(page);
page             1296 fs/btrfs/ioctl.c 			if (page->mapping != inode->i_mapping) {
page             1297 fs/btrfs/ioctl.c 				unlock_page(page);
page             1298 fs/btrfs/ioctl.c 				put_page(page);
page             1303 fs/btrfs/ioctl.c 		if (!PageUptodate(page)) {
page             1304 fs/btrfs/ioctl.c 			btrfs_readpage(NULL, page);
page             1305 fs/btrfs/ioctl.c 			lock_page(page);
page             1306 fs/btrfs/ioctl.c 			if (!PageUptodate(page)) {
page             1307 fs/btrfs/ioctl.c 				unlock_page(page);
page             1308 fs/btrfs/ioctl.c 				put_page(page);
page             1314 fs/btrfs/ioctl.c 		if (page->mapping != inode->i_mapping) {
page             1315 fs/btrfs/ioctl.c 			unlock_page(page);
page             1316 fs/btrfs/ioctl.c 			put_page(page);
page             1320 fs/btrfs/ioctl.c 		pages[i] = page;
page             1408 fs/btrfs/ioctl.c 	struct page **pages = NULL;
page             1440 fs/btrfs/ioctl.c 	pages = kmalloc_array(max_cluster, sizeof(struct page *), GFP_KERNEL);
page              137 fs/btrfs/lzo.c 			      struct page **pages,
page              147 fs/btrfs/lzo.c 	struct page *in_page = NULL;
page              148 fs/btrfs/lzo.c 	struct page *out_page = NULL;
page              328 fs/btrfs/lzo.c 	struct page **pages_in = cb->compressed_pages;
page              448 fs/btrfs/lzo.c 			  struct page *dest_page,
page              157 fs/btrfs/raid56.c 	struct page **stripe_pages;
page              163 fs/btrfs/raid56.c 	struct page **bio_pages;
page              304 fs/btrfs/raid56.c 	struct page *s;
page              305 fs/btrfs/raid56.c 	struct page *d;
page              625 fs/btrfs/raid56.c static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe,
page              634 fs/btrfs/raid56.c static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
page              643 fs/btrfs/raid56.c static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
page              944 fs/btrfs/raid56.c static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
page              948 fs/btrfs/raid56.c 	struct page *p = NULL;
page             1045 fs/btrfs/raid56.c 	struct page *page;
page             1050 fs/btrfs/raid56.c 		page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
page             1051 fs/btrfs/raid56.c 		if (!page)
page             1053 fs/btrfs/raid56.c 		rbio->stripe_pages[i] = page;
page             1062 fs/btrfs/raid56.c 	struct page *page;
page             1069 fs/btrfs/raid56.c 		page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
page             1070 fs/btrfs/raid56.c 		if (!page)
page             1072 fs/btrfs/raid56.c 		rbio->stripe_pages[i] = page;
page             1084 fs/btrfs/raid56.c 			    struct page *page,
page             1116 fs/btrfs/raid56.c 			ret = bio_add_page(last, page, PAGE_SIZE, 0);
page             1128 fs/btrfs/raid56.c 	bio_add_page(bio, page, PAGE_SIZE, 0);
page             1248 fs/btrfs/raid56.c 		struct page *p;
page             1290 fs/btrfs/raid56.c 			struct page *page;
page             1292 fs/btrfs/raid56.c 				page = page_in_rbio(rbio, stripe, pagenr, 1);
page             1293 fs/btrfs/raid56.c 				if (!page)
page             1296 fs/btrfs/raid56.c 			       page = rbio_stripe_page(rbio, stripe, pagenr);
page             1300 fs/btrfs/raid56.c 				       page, stripe, pagenr, rbio->stripe_len);
page             1314 fs/btrfs/raid56.c 			struct page *page;
page             1316 fs/btrfs/raid56.c 				page = page_in_rbio(rbio, stripe, pagenr, 1);
page             1317 fs/btrfs/raid56.c 				if (!page)
page             1320 fs/btrfs/raid56.c 			       page = rbio_stripe_page(rbio, stripe, pagenr);
page             1323 fs/btrfs/raid56.c 			ret = rbio_add_io_page(rbio, &bio_list, page,
page             1535 fs/btrfs/raid56.c 			struct page *page;
page             1542 fs/btrfs/raid56.c 			page = page_in_rbio(rbio, stripe, pagenr, 1);
page             1543 fs/btrfs/raid56.c 			if (page)
page             1546 fs/btrfs/raid56.c 			page = rbio_stripe_page(rbio, stripe, pagenr);
page             1551 fs/btrfs/raid56.c 			if (PageUptodate(page))
page             1554 fs/btrfs/raid56.c 			ret = rbio_add_io_page(rbio, &bio_list, page,
page             1815 fs/btrfs/raid56.c 	struct page *page;
page             1857 fs/btrfs/raid56.c 				page = page_in_rbio(rbio, stripe, pagenr, 0);
page             1859 fs/btrfs/raid56.c 				page = rbio_stripe_page(rbio, stripe, pagenr);
page             1861 fs/btrfs/raid56.c 			pointers[stripe] = kmap(page);
page             1948 fs/btrfs/raid56.c 					page = rbio_stripe_page(rbio, faila, i);
page             1949 fs/btrfs/raid56.c 					SetPageUptodate(page);
page             1952 fs/btrfs/raid56.c 					page = rbio_stripe_page(rbio, failb, i);
page             1953 fs/btrfs/raid56.c 					SetPageUptodate(page);
page             1965 fs/btrfs/raid56.c 				page = page_in_rbio(rbio, stripe, pagenr, 0);
page             1967 fs/btrfs/raid56.c 				page = rbio_stripe_page(rbio, stripe, pagenr);
page             1969 fs/btrfs/raid56.c 			kunmap(page);
page             2085 fs/btrfs/raid56.c 			struct page *p;
page             2309 fs/btrfs/raid56.c void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
page             2320 fs/btrfs/raid56.c 	rbio->bio_pages[index] = page;
page             2332 fs/btrfs/raid56.c 	struct page *page;
page             2340 fs/btrfs/raid56.c 			page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
page             2341 fs/btrfs/raid56.c 			if (!page)
page             2343 fs/btrfs/raid56.c 			rbio->stripe_pages[index] = page;
page             2360 fs/btrfs/raid56.c 	struct page *p_page = NULL;
page             2361 fs/btrfs/raid56.c 	struct page *q_page = NULL;
page             2410 fs/btrfs/raid56.c 		struct page *p;
page             2463 fs/btrfs/raid56.c 		struct page *page;
page             2465 fs/btrfs/raid56.c 		page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
page             2467 fs/btrfs/raid56.c 			       page, rbio->scrubp, pagenr, rbio->stripe_len);
page             2476 fs/btrfs/raid56.c 		struct page *page;
page             2478 fs/btrfs/raid56.c 		page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
page             2479 fs/btrfs/raid56.c 		ret = rbio_add_io_page(rbio, &bio_list, page,
page             2636 fs/btrfs/raid56.c 			struct page *page;
page             2643 fs/btrfs/raid56.c 			page = page_in_rbio(rbio, stripe, pagenr, 1);
page             2644 fs/btrfs/raid56.c 			if (page)
page             2647 fs/btrfs/raid56.c 			page = rbio_stripe_page(rbio, stripe, pagenr);
page             2652 fs/btrfs/raid56.c 			if (PageUptodate(page))
page             2655 fs/btrfs/raid56.c 			ret = rbio_add_io_page(rbio, &bio_list, page,
page               39 fs/btrfs/raid56.h void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
page             3294 fs/btrfs/relocation.c 	struct page *page;
page             3326 fs/btrfs/relocation.c 		page = find_lock_page(inode->i_mapping, index);
page             3327 fs/btrfs/relocation.c 		if (!page) {
page             3331 fs/btrfs/relocation.c 			page = find_or_create_page(inode->i_mapping, index,
page             3333 fs/btrfs/relocation.c 			if (!page) {
page             3343 fs/btrfs/relocation.c 		if (PageReadahead(page)) {
page             3345 fs/btrfs/relocation.c 						   ra, NULL, page, index,
page             3349 fs/btrfs/relocation.c 		if (!PageUptodate(page)) {
page             3350 fs/btrfs/relocation.c 			btrfs_readpage(NULL, page);
page             3351 fs/btrfs/relocation.c 			lock_page(page);
page             3352 fs/btrfs/relocation.c 			if (!PageUptodate(page)) {
page             3353 fs/btrfs/relocation.c 				unlock_page(page);
page             3354 fs/btrfs/relocation.c 				put_page(page);
page             3364 fs/btrfs/relocation.c 		page_start = page_offset(page);
page             3369 fs/btrfs/relocation.c 		set_page_extent_mapped(page);
page             3382 fs/btrfs/relocation.c 			unlock_page(page);
page             3383 fs/btrfs/relocation.c 			put_page(page);
page             3395 fs/btrfs/relocation.c 		set_page_dirty(page);
page             3399 fs/btrfs/relocation.c 		unlock_page(page);
page             3400 fs/btrfs/relocation.c 		put_page(page);
page               64 fs/btrfs/scrub.c 	struct page		*page;
page              258 fs/btrfs/scrub.c static inline int scrub_is_page_on_raid56(struct scrub_page *page)
page              260 fs/btrfs/scrub.c 	return page->recover &&
page              261 fs/btrfs/scrub.c 	       (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
page              550 fs/btrfs/scrub.c 			WARN_ON(!sbio->pagev[i]->page);
page             1329 fs/btrfs/scrub.c 			struct scrub_page *page;
page             1334 fs/btrfs/scrub.c 			page = kzalloc(sizeof(*page), GFP_NOFS);
page             1335 fs/btrfs/scrub.c 			if (!page) {
page             1343 fs/btrfs/scrub.c 			scrub_page_get(page);
page             1344 fs/btrfs/scrub.c 			sblock->pagev[page_index] = page;
page             1345 fs/btrfs/scrub.c 			page->sblock = sblock;
page             1346 fs/btrfs/scrub.c 			page->flags = flags;
page             1347 fs/btrfs/scrub.c 			page->generation = generation;
page             1348 fs/btrfs/scrub.c 			page->logical = logical;
page             1349 fs/btrfs/scrub.c 			page->have_csum = have_csum;
page             1351 fs/btrfs/scrub.c 				memcpy(page->csum,
page             1364 fs/btrfs/scrub.c 			page->physical = bbio->stripes[stripe_index].physical +
page             1366 fs/btrfs/scrub.c 			page->dev = bbio->stripes[stripe_index].dev;
page             1369 fs/btrfs/scrub.c 			page->physical_for_dev_replace =
page             1373 fs/btrfs/scrub.c 			page->mirror_num = mirror_index + 1;
page             1375 fs/btrfs/scrub.c 			page->page = alloc_page(GFP_NOFS);
page             1376 fs/btrfs/scrub.c 			if (!page->page)
page             1380 fs/btrfs/scrub.c 			page->recover = recover;
page             1398 fs/btrfs/scrub.c 					struct scrub_page *page)
page             1404 fs/btrfs/scrub.c 	bio->bi_iter.bi_sector = page->logical >> 9;
page             1408 fs/btrfs/scrub.c 	mirror_num = page->sblock->pagev[0]->mirror_num;
page             1409 fs/btrfs/scrub.c 	ret = raid56_parity_recover(fs_info, bio, page->recover->bbio,
page             1410 fs/btrfs/scrub.c 				    page->recover->map_length,
page             1435 fs/btrfs/scrub.c 		struct scrub_page *page = sblock->pagev[page_num];
page             1437 fs/btrfs/scrub.c 		WARN_ON(!page->page);
page             1438 fs/btrfs/scrub.c 		bio_add_page(bio, page->page, PAGE_SIZE, 0);
page             1479 fs/btrfs/scrub.c 		struct scrub_page *page = sblock->pagev[page_num];
page             1481 fs/btrfs/scrub.c 		if (page->dev->bdev == NULL) {
page             1482 fs/btrfs/scrub.c 			page->io_error = 1;
page             1487 fs/btrfs/scrub.c 		WARN_ON(!page->page);
page             1489 fs/btrfs/scrub.c 		bio_set_dev(bio, page->dev->bdev);
page             1491 fs/btrfs/scrub.c 		bio_add_page(bio, page->page, PAGE_SIZE, 0);
page             1492 fs/btrfs/scrub.c 		bio->bi_iter.bi_sector = page->physical >> 9;
page             1496 fs/btrfs/scrub.c 			page->io_error = 1;
page             1556 fs/btrfs/scrub.c 	BUG_ON(page_bad->page == NULL);
page             1557 fs/btrfs/scrub.c 	BUG_ON(page_good->page == NULL);
page             1574 fs/btrfs/scrub.c 		ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
page             1619 fs/btrfs/scrub.c 	BUG_ON(spage->page == NULL);
page             1621 fs/btrfs/scrub.c 		void *mapped_buffer = kmap_atomic(spage->page);
page             1624 fs/btrfs/scrub.c 		flush_dcache_page(spage->page);
page             1675 fs/btrfs/scrub.c 	ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
page             1795 fs/btrfs/scrub.c 	struct page *page;
page             1808 fs/btrfs/scrub.c 	page = sblock->pagev[0]->page;
page             1809 fs/btrfs/scrub.c 	buffer = kmap_atomic(page);
page             1823 fs/btrfs/scrub.c 		BUG_ON(!sblock->pagev[index]->page);
page             1824 fs/btrfs/scrub.c 		page = sblock->pagev[index]->page;
page             1825 fs/btrfs/scrub.c 		buffer = kmap_atomic(page);
page             1843 fs/btrfs/scrub.c 	struct page *page;
page             1854 fs/btrfs/scrub.c 	page = sblock->pagev[0]->page;
page             1855 fs/btrfs/scrub.c 	mapped_buffer = kmap_atomic(page);
page             1893 fs/btrfs/scrub.c 		BUG_ON(!sblock->pagev[index]->page);
page             1894 fs/btrfs/scrub.c 		page = sblock->pagev[index]->page;
page             1895 fs/btrfs/scrub.c 		mapped_buffer = kmap_atomic(page);
page             1915 fs/btrfs/scrub.c 	struct page *page;
page             1928 fs/btrfs/scrub.c 	page = sblock->pagev[0]->page;
page             1929 fs/btrfs/scrub.c 	mapped_buffer = kmap_atomic(page);
page             1956 fs/btrfs/scrub.c 		BUG_ON(!sblock->pagev[index]->page);
page             1957 fs/btrfs/scrub.c 		page = sblock->pagev[index]->page;
page             1958 fs/btrfs/scrub.c 		mapped_buffer = kmap_atomic(page);
page             2014 fs/btrfs/scrub.c 		if (spage->page)
page             2015 fs/btrfs/scrub.c 			__free_page(spage->page);
page             2086 fs/btrfs/scrub.c 	ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
page             2202 fs/btrfs/scrub.c 		raid56_add_scrub_pages(rbio, spage->page, spage->logical);
page             2274 fs/btrfs/scrub.c 		spage->page = alloc_page(GFP_KERNEL);
page             2275 fs/btrfs/scrub.c 		if (!spage->page)
page             2584 fs/btrfs/scrub.c 		spage->page = alloc_page(GFP_KERNEL);
page             2585 fs/btrfs/scrub.c 		if (!spage->page)
page             4793 fs/btrfs/send.c 	struct page *page;
page             4828 fs/btrfs/send.c 		page = find_lock_page(inode->i_mapping, index);
page             4829 fs/btrfs/send.c 		if (!page) {
page             4833 fs/btrfs/send.c 			page = find_or_create_page(inode->i_mapping, index,
page             4835 fs/btrfs/send.c 			if (!page) {
page             4841 fs/btrfs/send.c 		if (PageReadahead(page)) {
page             4843 fs/btrfs/send.c 				NULL, page, index, last_index + 1 - index);
page             4846 fs/btrfs/send.c 		if (!PageUptodate(page)) {
page             4847 fs/btrfs/send.c 			btrfs_readpage(NULL, page);
page             4848 fs/btrfs/send.c 			lock_page(page);
page             4849 fs/btrfs/send.c 			if (!PageUptodate(page)) {
page             4850 fs/btrfs/send.c 				unlock_page(page);
page             4851 fs/btrfs/send.c 				put_page(page);
page             4857 fs/btrfs/send.c 		addr = kmap(page);
page             4859 fs/btrfs/send.c 		kunmap(page);
page             4860 fs/btrfs/send.c 		unlock_page(page);
page             4861 fs/btrfs/send.c 		put_page(page);
page               23 fs/btrfs/tests/extent-io-tests.c 	struct page *pages[16];
page               63 fs/btrfs/tests/extent-io-tests.c 	struct page *page;
page               64 fs/btrfs/tests/extent-io-tests.c 	struct page *locked_page = NULL;
page               94 fs/btrfs/tests/extent-io-tests.c 		page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL);
page               95 fs/btrfs/tests/extent-io-tests.c 		if (!page) {
page              100 fs/btrfs/tests/extent-io-tests.c 		SetPageDirty(page);
page              102 fs/btrfs/tests/extent-io-tests.c 			unlock_page(page);
page              104 fs/btrfs/tests/extent-io-tests.c 			get_page(page);
page              105 fs/btrfs/tests/extent-io-tests.c 			locked_page = page;
page              224 fs/btrfs/tests/extent-io-tests.c 	page = find_get_page(inode->i_mapping,
page              226 fs/btrfs/tests/extent-io-tests.c 	if (!page) {
page              230 fs/btrfs/tests/extent-io-tests.c 	ClearPageDirty(page);
page              231 fs/btrfs/tests/extent-io-tests.c 	put_page(page);
page             1433 fs/btrfs/volumes.c static void btrfs_release_disk_super(struct page *page)
page             1435 fs/btrfs/volumes.c 	kunmap(page);
page             1436 fs/btrfs/volumes.c 	put_page(page);
page             1440 fs/btrfs/volumes.c 				 struct page **page,
page             1460 fs/btrfs/volumes.c 	*page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
page             1463 fs/btrfs/volumes.c 	if (IS_ERR_OR_NULL(*page))
page             1466 fs/btrfs/volumes.c 	p = kmap(*page);
page             1473 fs/btrfs/volumes.c 		btrfs_release_disk_super(*page);
page             1507 fs/btrfs/volumes.c 	struct page *page;
page             1525 fs/btrfs/volumes.c 	if (btrfs_read_disk_super(bdev, bytenr, &page, &disk_super)) {
page             1536 fs/btrfs/volumes.c 	btrfs_release_disk_super(page);
page               94 fs/btrfs/zlib.c 			       struct page **pages,
page              104 fs/btrfs/zlib.c 	struct page *in_page = NULL;
page              105 fs/btrfs/zlib.c 	struct page *out_page = NULL;
page              242 fs/btrfs/zlib.c 	struct page **pages_in = cb->compressed_pages;
page              323 fs/btrfs/zlib.c 			   struct page *dest_page,
page              373 fs/btrfs/zstd.c 		struct page **pages,
page              382 fs/btrfs/zstd.c 	struct page *in_page = NULL;  /* The current page to read */
page              383 fs/btrfs/zstd.c 	struct page *out_page = NULL; /* The current page to write to */
page              554 fs/btrfs/zstd.c 	struct page **pages_in = cb->compressed_pages;
page              630 fs/btrfs/zstd.c 		struct page *dest_page,
page               83 fs/buffer.c    void buffer_check_dirty_writeback(struct page *page,
page               90 fs/buffer.c    	BUG_ON(!PageLocked(page));
page               92 fs/buffer.c    	if (!page_has_buffers(page))
page               95 fs/buffer.c    	if (PageWriteback(page))
page               98 fs/buffer.c    	head = page_buffers(page);
page              124 fs/buffer.c    __clear_page_buffers(struct page *page)
page              126 fs/buffer.c    	ClearPagePrivate(page);
page              127 fs/buffer.c    	set_page_private(page, 0);
page              128 fs/buffer.c    	put_page(page);
page              202 fs/buffer.c    	struct page *page;
page              207 fs/buffer.c    	page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED);
page              208 fs/buffer.c    	if (!page)
page              212 fs/buffer.c    	if (!page_has_buffers(page))
page              214 fs/buffer.c    	head = page_buffers(page);
page              244 fs/buffer.c    	put_page(page);
page              258 fs/buffer.c    	struct page *page;
page              263 fs/buffer.c    	page = bh->b_page;
page              269 fs/buffer.c    		SetPageError(page);
page              277 fs/buffer.c    	first = page_buffers(page);
page              299 fs/buffer.c    	if (page_uptodate && !PageError(page))
page              300 fs/buffer.c    		SetPageUptodate(page);
page              301 fs/buffer.c    	unlock_page(page);
page              319 fs/buffer.c    	struct page *page;
page              323 fs/buffer.c    	page = bh->b_page;
page              330 fs/buffer.c    		SetPageError(page);
page              333 fs/buffer.c    	first = page_buffers(page);
page              349 fs/buffer.c    	end_page_writeback(page);
page              575 fs/buffer.c    void __set_page_dirty(struct page *page, struct address_space *mapping,
page              581 fs/buffer.c    	if (page->mapping) {	/* Race with truncate? */
page              582 fs/buffer.c    		WARN_ON_ONCE(warn && !PageUptodate(page));
page              583 fs/buffer.c    		account_page_dirtied(page, mapping);
page              584 fs/buffer.c    		__xa_set_mark(&mapping->i_pages, page_index(page),
page              616 fs/buffer.c    int __set_page_dirty_buffers(struct page *page)
page              619 fs/buffer.c    	struct address_space *mapping = page_mapping(page);
page              622 fs/buffer.c    		return !TestSetPageDirty(page);
page              625 fs/buffer.c    	if (page_has_buffers(page)) {
page              626 fs/buffer.c    		struct buffer_head *head = page_buffers(page);
page              638 fs/buffer.c    	lock_page_memcg(page);
page              639 fs/buffer.c    	newly_dirty = !TestSetPageDirty(page);
page              643 fs/buffer.c    		__set_page_dirty(page, mapping, 1);
page              645 fs/buffer.c    	unlock_page_memcg(page);
page              814 fs/buffer.c    struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
page              825 fs/buffer.c    	memcg = get_mem_cgroup_from_page(page);
page              842 fs/buffer.c    		set_bh_page(bh, page, offset);
page              865 fs/buffer.c    link_dev_buffers(struct page *page, struct buffer_head *head)
page              875 fs/buffer.c    	attach_page_buffers(page, head);
page              894 fs/buffer.c    init_page_buffers(struct page *page, struct block_device *bdev,
page              897 fs/buffer.c    	struct buffer_head *head = page_buffers(page);
page              899 fs/buffer.c    	int uptodate = PageUptodate(page);
page              933 fs/buffer.c    	struct page *page;
page              949 fs/buffer.c    	page = find_or_create_page(inode->i_mapping, index, gfp_mask);
page              951 fs/buffer.c    	BUG_ON(!PageLocked(page));
page              953 fs/buffer.c    	if (page_has_buffers(page)) {
page              954 fs/buffer.c    		bh = page_buffers(page);
page              956 fs/buffer.c    			end_block = init_page_buffers(page, bdev,
page              961 fs/buffer.c    		if (!try_to_free_buffers(page))
page              968 fs/buffer.c    	bh = alloc_page_buffers(page, size, true);
page              976 fs/buffer.c    	link_dev_buffers(page, bh);
page              977 fs/buffer.c    	end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits,
page              983 fs/buffer.c    	unlock_page(page);
page              984 fs/buffer.c    	put_page(page);
page             1105 fs/buffer.c    		struct page *page = bh->b_page;
page             1108 fs/buffer.c    		lock_page_memcg(page);
page             1109 fs/buffer.c    		if (!TestSetPageDirty(page)) {
page             1110 fs/buffer.c    			mapping = page_mapping(page);
page             1112 fs/buffer.c    				__set_page_dirty(page, mapping, 0);
page             1114 fs/buffer.c    		unlock_page_memcg(page);
page             1412 fs/buffer.c    		struct page *page, unsigned long offset)
page             1414 fs/buffer.c    	bh->b_page = page;
page             1416 fs/buffer.c    	if (PageHighMem(page))
page             1422 fs/buffer.c    		bh->b_data = page_address(page) + offset;
page             1469 fs/buffer.c    void block_invalidatepage(struct page *page, unsigned int offset,
page             1476 fs/buffer.c    	BUG_ON(!PageLocked(page));
page             1477 fs/buffer.c    	if (!page_has_buffers(page))
page             1485 fs/buffer.c    	head = page_buffers(page);
page             1512 fs/buffer.c    		try_to_release_page(page, 0);
page             1524 fs/buffer.c    void create_empty_buffers(struct page *page,
page             1529 fs/buffer.c    	head = alloc_page_buffers(page, blocksize, true);
page             1538 fs/buffer.c    	spin_lock(&page->mapping->private_lock);
page             1539 fs/buffer.c    	if (PageUptodate(page) || PageDirty(page)) {
page             1542 fs/buffer.c    			if (PageDirty(page))
page             1544 fs/buffer.c    			if (PageUptodate(page))
page             1549 fs/buffer.c    	attach_page_buffers(page, head);
page             1550 fs/buffer.c    	spin_unlock(&page->mapping->private_lock);
page             1590 fs/buffer.c    			struct page *page = pvec.pages[i];
page             1592 fs/buffer.c    			if (!page_has_buffers(page))
page             1599 fs/buffer.c    			lock_page(page);
page             1601 fs/buffer.c    			if (!page_has_buffers(page))
page             1603 fs/buffer.c    			head = page_buffers(page);
page             1617 fs/buffer.c    			unlock_page(page);
page             1641 fs/buffer.c    static struct buffer_head *create_page_buffers(struct page *page, struct inode *inode, unsigned int b_state)
page             1643 fs/buffer.c    	BUG_ON(!PageLocked(page));
page             1645 fs/buffer.c    	if (!page_has_buffers(page))
page             1646 fs/buffer.c    		create_empty_buffers(page, 1 << READ_ONCE(inode->i_blkbits),
page             1648 fs/buffer.c    	return page_buffers(page);
page             1680 fs/buffer.c    int __block_write_full_page(struct inode *inode, struct page *page,
page             1692 fs/buffer.c    	head = create_page_buffers(page, inode,
page             1709 fs/buffer.c    	block = (sector_t)page->index << (PAGE_SHIFT - bbits);
page             1758 fs/buffer.c    			redirty_page_for_writepage(wbc, page);
page             1772 fs/buffer.c    	BUG_ON(PageWriteback(page));
page             1773 fs/buffer.c    	set_page_writeback(page);
page             1784 fs/buffer.c    	unlock_page(page);
page             1794 fs/buffer.c    		end_page_writeback(page);
page             1825 fs/buffer.c    	SetPageError(page);
page             1826 fs/buffer.c    	BUG_ON(PageWriteback(page));
page             1827 fs/buffer.c    	mapping_set_error(page->mapping, err);
page             1828 fs/buffer.c    	set_page_writeback(page);
page             1839 fs/buffer.c    	unlock_page(page);
page             1849 fs/buffer.c    void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
page             1854 fs/buffer.c    	BUG_ON(!PageLocked(page));
page             1855 fs/buffer.c    	if (!page_has_buffers(page))
page             1858 fs/buffer.c    	bh = head = page_buffers(page);
page             1865 fs/buffer.c    				if (!PageUptodate(page)) {
page             1871 fs/buffer.c    					zero_user(page, start, size);
page             1941 fs/buffer.c    int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
page             1946 fs/buffer.c    	struct inode *inode = page->mapping->host;
page             1953 fs/buffer.c    	BUG_ON(!PageLocked(page));
page             1958 fs/buffer.c    	head = create_page_buffers(page, inode, 0);
page             1962 fs/buffer.c    	block = (sector_t)page->index << (PAGE_SHIFT - bbits);
page             1968 fs/buffer.c    			if (PageUptodate(page)) {
page             1988 fs/buffer.c    				if (PageUptodate(page)) {
page             1995 fs/buffer.c    					zero_user_segments(page,
page             2001 fs/buffer.c    		if (PageUptodate(page)) {
page             2022 fs/buffer.c    		page_zero_new_buffers(page, from, to);
page             2026 fs/buffer.c    int __block_write_begin(struct page *page, loff_t pos, unsigned len,
page             2029 fs/buffer.c    	return __block_write_begin_int(page, pos, len, get_block, NULL);
page             2033 fs/buffer.c    static int __block_commit_write(struct inode *inode, struct page *page,
page             2041 fs/buffer.c    	bh = head = page_buffers(page);
page             2067 fs/buffer.c    		SetPageUptodate(page);
page             2078 fs/buffer.c    		unsigned flags, struct page **pagep, get_block_t *get_block)
page             2081 fs/buffer.c    	struct page *page;
page             2084 fs/buffer.c    	page = grab_cache_page_write_begin(mapping, index, flags);
page             2085 fs/buffer.c    	if (!page)
page             2088 fs/buffer.c    	status = __block_write_begin(page, pos, len, get_block);
page             2090 fs/buffer.c    		unlock_page(page);
page             2091 fs/buffer.c    		put_page(page);
page             2092 fs/buffer.c    		page = NULL;
page             2095 fs/buffer.c    	*pagep = page;
page             2102 fs/buffer.c    			struct page *page, void *fsdata)
page             2122 fs/buffer.c    		if (!PageUptodate(page))
page             2125 fs/buffer.c    		page_zero_new_buffers(page, start+copied, start+len);
page             2127 fs/buffer.c    	flush_dcache_page(page);
page             2130 fs/buffer.c    	__block_commit_write(inode, page, start, start+copied);
page             2138 fs/buffer.c    			struct page *page, void *fsdata)
page             2144 fs/buffer.c    	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
page             2158 fs/buffer.c    	unlock_page(page);
page             2159 fs/buffer.c    	put_page(page);
page             2182 fs/buffer.c    int block_is_partially_uptodate(struct page *page, unsigned long from,
page             2190 fs/buffer.c    	if (!page_has_buffers(page))
page             2193 fs/buffer.c    	head = page_buffers(page);
page             2227 fs/buffer.c    int block_read_full_page(struct page *page, get_block_t *get_block)
page             2229 fs/buffer.c    	struct inode *inode = page->mapping->host;
page             2236 fs/buffer.c    	head = create_page_buffers(page, inode, 0);
page             2240 fs/buffer.c    	iblock = (sector_t)page->index << (PAGE_SHIFT - bbits);
page             2258 fs/buffer.c    					SetPageError(page);
page             2261 fs/buffer.c    				zero_user(page, i * blocksize, blocksize);
page             2277 fs/buffer.c    		SetPageMappedToDisk(page);
page             2284 fs/buffer.c    		if (!PageError(page))
page             2285 fs/buffer.c    			SetPageUptodate(page);
page             2286 fs/buffer.c    		unlock_page(page);
page             2320 fs/buffer.c    	struct page *page;
page             2329 fs/buffer.c    				    AOP_FLAG_CONT_EXPAND, &page, &fsdata);
page             2333 fs/buffer.c    	err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
page             2346 fs/buffer.c    	struct page *page;
page             2365 fs/buffer.c    					    &page, &fsdata);
page             2368 fs/buffer.c    		zero_user(page, zerofrom, len);
page             2370 fs/buffer.c    						page, fsdata);
page             2398 fs/buffer.c    					    &page, &fsdata);
page             2401 fs/buffer.c    		zero_user(page, zerofrom, len);
page             2403 fs/buffer.c    						page, fsdata);
page             2419 fs/buffer.c    			struct page **pagep, void **fsdata,
page             2441 fs/buffer.c    int block_commit_write(struct page *page, unsigned from, unsigned to)
page             2443 fs/buffer.c    	struct inode *inode = page->mapping->host;
page             2444 fs/buffer.c    	__block_commit_write(inode,page,from,to);
page             2470 fs/buffer.c    	struct page *page = vmf->page;
page             2476 fs/buffer.c    	lock_page(page);
page             2478 fs/buffer.c    	if ((page->mapping != inode->i_mapping) ||
page             2479 fs/buffer.c    	    (page_offset(page) > size)) {
page             2486 fs/buffer.c    	if (((page->index + 1) << PAGE_SHIFT) > size)
page             2491 fs/buffer.c    	ret = __block_write_begin(page, 0, end, get_block);
page             2493 fs/buffer.c    		ret = block_commit_write(page, 0, end);
page             2497 fs/buffer.c    	set_page_dirty(page);
page             2498 fs/buffer.c    	wait_for_stable_page(page);
page             2501 fs/buffer.c    	unlock_page(page);
page             2521 fs/buffer.c    static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
page             2525 fs/buffer.c    	BUG_ON(!PageLocked(page));
page             2527 fs/buffer.c    	spin_lock(&page->mapping->private_lock);
page             2530 fs/buffer.c    		if (PageDirty(page))
page             2536 fs/buffer.c    	attach_page_buffers(page, head);
page             2537 fs/buffer.c    	spin_unlock(&page->mapping->private_lock);
page             2547 fs/buffer.c    			struct page **pagep, void **fsdata,
page             2554 fs/buffer.c    	struct page *page;
page             2568 fs/buffer.c    	page = grab_cache_page_write_begin(mapping, index, flags);
page             2569 fs/buffer.c    	if (!page)
page             2571 fs/buffer.c    	*pagep = page;
page             2574 fs/buffer.c    	if (page_has_buffers(page)) {
page             2575 fs/buffer.c    		ret = __block_write_begin(page, pos, len, get_block);
page             2581 fs/buffer.c    	if (PageMappedToDisk(page))
page             2593 fs/buffer.c    	head = alloc_page_buffers(page, blocksize, false);
page             2599 fs/buffer.c    	block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
page             2624 fs/buffer.c    		if (PageUptodate(page)) {
page             2629 fs/buffer.c    			zero_user_segments(page, block_start, from,
page             2659 fs/buffer.c    		SetPageMappedToDisk(page);
page             2674 fs/buffer.c    	attach_nobh_buffers(page, head);
page             2675 fs/buffer.c    	page_zero_new_buffers(page, from, to);
page             2678 fs/buffer.c    	unlock_page(page);
page             2679 fs/buffer.c    	put_page(page);
page             2688 fs/buffer.c    			struct page *page, void *fsdata)
page             2690 fs/buffer.c    	struct inode *inode = page->mapping->host;
page             2693 fs/buffer.c    	BUG_ON(fsdata != NULL && page_has_buffers(page));
page             2696 fs/buffer.c    		attach_nobh_buffers(page, head);
page             2697 fs/buffer.c    	if (page_has_buffers(page))
page             2699 fs/buffer.c    					copied, page, fsdata);
page             2701 fs/buffer.c    	SetPageUptodate(page);
page             2702 fs/buffer.c    	set_page_dirty(page);
page             2708 fs/buffer.c    	unlock_page(page);
page             2709 fs/buffer.c    	put_page(page);
page             2726 fs/buffer.c    int nobh_writepage(struct page *page, get_block_t *get_block,
page             2729 fs/buffer.c    	struct inode * const inode = page->mapping->host;
page             2736 fs/buffer.c    	if (page->index < end_index)
page             2741 fs/buffer.c    	if (page->index >= end_index+1 || !offset) {
page             2749 fs/buffer.c    		if (page->mapping->a_ops->invalidatepage)
page             2750 fs/buffer.c    			page->mapping->a_ops->invalidatepage(page, offset);
page             2752 fs/buffer.c    		unlock_page(page);
page             2763 fs/buffer.c    	zero_user_segment(page, offset, PAGE_SIZE);
page             2765 fs/buffer.c    	ret = mpage_writepage(page, get_block, wbc);
page             2767 fs/buffer.c    		ret = __block_write_full_page(inode, page, get_block, wbc,
page             2782 fs/buffer.c    	struct page *page;
page             2796 fs/buffer.c    	page = grab_cache_page(mapping, index);
page             2798 fs/buffer.c    	if (!page)
page             2801 fs/buffer.c    	if (page_has_buffers(page)) {
page             2803 fs/buffer.c    		unlock_page(page);
page             2804 fs/buffer.c    		put_page(page);
page             2825 fs/buffer.c    	if (!PageUptodate(page)) {
page             2826 fs/buffer.c    		err = mapping->a_ops->readpage(NULL, page);
page             2828 fs/buffer.c    			put_page(page);
page             2831 fs/buffer.c    		lock_page(page);
page             2832 fs/buffer.c    		if (!PageUptodate(page)) {
page             2836 fs/buffer.c    		if (page_has_buffers(page))
page             2839 fs/buffer.c    	zero_user(page, offset, length);
page             2840 fs/buffer.c    	set_page_dirty(page);
page             2844 fs/buffer.c    	unlock_page(page);
page             2845 fs/buffer.c    	put_page(page);
page             2860 fs/buffer.c    	struct page *page;
page             2874 fs/buffer.c    	page = grab_cache_page(mapping, index);
page             2876 fs/buffer.c    	if (!page)
page             2879 fs/buffer.c    	if (!page_has_buffers(page))
page             2880 fs/buffer.c    		create_empty_buffers(page, blocksize, 0);
page             2883 fs/buffer.c    	bh = page_buffers(page);
page             2903 fs/buffer.c    	if (PageUptodate(page))
page             2915 fs/buffer.c    	zero_user(page, offset, length);
page             2920 fs/buffer.c    	unlock_page(page);
page             2921 fs/buffer.c    	put_page(page);
page             2930 fs/buffer.c    int block_write_full_page(struct page *page, get_block_t *get_block,
page             2933 fs/buffer.c    	struct inode * const inode = page->mapping->host;
page             2939 fs/buffer.c    	if (page->index < end_index)
page             2940 fs/buffer.c    		return __block_write_full_page(inode, page, get_block, wbc,
page             2945 fs/buffer.c    	if (page->index >= end_index+1 || !offset) {
page             2951 fs/buffer.c    		do_invalidatepage(page, 0, PAGE_SIZE);
page             2952 fs/buffer.c    		unlock_page(page);
page             2963 fs/buffer.c    	zero_user_segment(page, offset, PAGE_SIZE);
page             2964 fs/buffer.c    	return __block_write_full_page(inode, page, get_block, wbc,
page             3219 fs/buffer.c    drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
page             3221 fs/buffer.c    	struct buffer_head *head = page_buffers(page);
page             3239 fs/buffer.c    	__clear_page_buffers(page);
page             3245 fs/buffer.c    int try_to_free_buffers(struct page *page)
page             3247 fs/buffer.c    	struct address_space * const mapping = page->mapping;
page             3251 fs/buffer.c    	BUG_ON(!PageLocked(page));
page             3252 fs/buffer.c    	if (PageWriteback(page))
page             3256 fs/buffer.c    		ret = drop_buffers(page, &buffers_to_free);
page             3261 fs/buffer.c    	ret = drop_buffers(page, &buffers_to_free);
page             3278 fs/buffer.c    		cancel_dirty_page(page);
page               97 fs/cachefiles/internal.h 	struct page			*back_page;	/* backing file page we're waiting for */
page               98 fs/cachefiles/internal.h 	struct page			*netfs_page;	/* netfs page we're going to fill */
page              107 fs/cachefiles/internal.h 	struct page			*netfs_page;	/* netfs page to copy */
page              209 fs/cachefiles/internal.h 					 struct page *, gfp_t);
page              213 fs/cachefiles/internal.h extern int cachefiles_allocate_page(struct fscache_retrieval *, struct page *,
page              217 fs/cachefiles/internal.h extern int cachefiles_write_page(struct fscache_storage *, struct page *);
page              218 fs/cachefiles/internal.h extern void cachefiles_uncache_page(struct fscache_object *, struct page *);
page               28 fs/cachefiles/rdwr.c 	struct page *page = wait->private;
page               36 fs/cachefiles/rdwr.c 	if (key->flags != &page->flags ||
page               40 fs/cachefiles/rdwr.c 	_debug("--- monitor %p %lx ---", page, page->flags);
page               42 fs/cachefiles/rdwr.c 	if (!PageUptodate(page) && !PageError(page)) {
page               81 fs/cachefiles/rdwr.c 	struct page *backpage = monitor->back_page, *backpage2;
page              231 fs/cachefiles/rdwr.c 					    struct page *netpage)
page              235 fs/cachefiles/rdwr.c 	struct page *newpage, *backpage;
page              393 fs/cachefiles/rdwr.c 				  struct page *page,
page              408 fs/cachefiles/rdwr.c 	_enter("{%p},{%lx},,,", object, page->index);
page              431 fs/cachefiles/rdwr.c 	block0 = page->index;
page              442 fs/cachefiles/rdwr.c 		ret = cachefiles_read_backing_file_one(object, op, page);
page              445 fs/cachefiles/rdwr.c 		fscache_mark_page_cached(op, page);
page              471 fs/cachefiles/rdwr.c 	struct page *newpage = NULL, *netpage, *_n, *backpage = NULL;
page              692 fs/cachefiles/rdwr.c 	struct page *page, *_n;
page              730 fs/cachefiles/rdwr.c 	list_for_each_entry_safe(page, _n, pages, lru) {
page              739 fs/cachefiles/rdwr.c 		block0 = page->index;
page              751 fs/cachefiles/rdwr.c 			list_move(&page->lru, &backpages);
page              754 fs/cachefiles/rdwr.c 		} else if (space && pagevec_add(&pagevec, page) == 0) {
page              798 fs/cachefiles/rdwr.c 			     struct page *page,
page              810 fs/cachefiles/rdwr.c 	_enter("%p,{%lx},", object, page->index);
page              814 fs/cachefiles/rdwr.c 		fscache_mark_page_cached(op, page);
page              842 fs/cachefiles/rdwr.c 	struct page *page;
page              856 fs/cachefiles/rdwr.c 		list_for_each_entry(page, pages, lru) {
page              857 fs/cachefiles/rdwr.c 			if (pagevec_add(&pagevec, page) == 0)
page              880 fs/cachefiles/rdwr.c int cachefiles_write_page(struct fscache_storage *op, struct page *page)
page              892 fs/cachefiles/rdwr.c 	ASSERT(page != NULL);
page              897 fs/cachefiles/rdwr.c 	_enter("%p,%p{%lx},,,", object, page, page->index);
page              909 fs/cachefiles/rdwr.c 	pos = (loff_t)page->index << PAGE_SHIFT;
page              938 fs/cachefiles/rdwr.c 	data = kmap(page);
page              940 fs/cachefiles/rdwr.c 	kunmap(page);
page              963 fs/cachefiles/rdwr.c void cachefiles_uncache_page(struct fscache_object *_object, struct page *page)
page              970 fs/cachefiles/rdwr.c 	_enter("%p,{%lu}", object, page->index);
page               62 fs/ceph/addr.c static inline struct ceph_snap_context *page_snap_context(struct page *page)
page               64 fs/ceph/addr.c 	if (PagePrivate(page))
page               65 fs/ceph/addr.c 		return (void *)page->private;
page               73 fs/ceph/addr.c static int ceph_set_page_dirty(struct page *page)
page               75 fs/ceph/addr.c 	struct address_space *mapping = page->mapping;
page               82 fs/ceph/addr.c 		return !TestSetPageDirty(page);
page               84 fs/ceph/addr.c 	if (PageDirty(page)) {
page               86 fs/ceph/addr.c 		     mapping->host, page, page->index);
page               87 fs/ceph/addr.c 		BUG_ON(!PagePrivate(page));
page              114 fs/ceph/addr.c 	     mapping->host, page, page->index,
page              124 fs/ceph/addr.c 	BUG_ON(PagePrivate(page));
page              125 fs/ceph/addr.c 	page->private = (unsigned long)snapc;
page              126 fs/ceph/addr.c 	SetPagePrivate(page);
page              128 fs/ceph/addr.c 	ret = __set_page_dirty_nobuffers(page);
page              129 fs/ceph/addr.c 	WARN_ON(!PageLocked(page));
page              130 fs/ceph/addr.c 	WARN_ON(!page->mapping);
page              140 fs/ceph/addr.c static void ceph_invalidatepage(struct page *page, unsigned int offset,
page              145 fs/ceph/addr.c 	struct ceph_snap_context *snapc = page_snap_context(page);
page              147 fs/ceph/addr.c 	inode = page->mapping->host;
page              152 fs/ceph/addr.c 		     inode, page, page->index, offset, length);
page              156 fs/ceph/addr.c 	ceph_invalidate_fscache_page(inode, page);
page              158 fs/ceph/addr.c 	WARN_ON(!PageLocked(page));
page              159 fs/ceph/addr.c 	if (!PagePrivate(page))
page              162 fs/ceph/addr.c 	ClearPageChecked(page);
page              165 fs/ceph/addr.c 	     inode, page, page->index);
page              169 fs/ceph/addr.c 	page->private = 0;
page              170 fs/ceph/addr.c 	ClearPagePrivate(page);
page              173 fs/ceph/addr.c static int ceph_releasepage(struct page *page, gfp_t g)
page              175 fs/ceph/addr.c 	dout("%p releasepage %p idx %lu (%sdirty)\n", page->mapping->host,
page              176 fs/ceph/addr.c 	     page, page->index, PageDirty(page) ? "" : "not ");
page              179 fs/ceph/addr.c 	if (!ceph_release_fscache_page(page, g))
page              182 fs/ceph/addr.c 	return !PagePrivate(page);
page              188 fs/ceph/addr.c static int ceph_do_readpage(struct file *filp, struct page *page)
page              194 fs/ceph/addr.c 	u64 off = page_offset(page);
page              198 fs/ceph/addr.c 		zero_user_segment(page, 0, PAGE_SIZE);
page              199 fs/ceph/addr.c 		SetPageUptodate(page);
page              210 fs/ceph/addr.c 		zero_user_segment(page, 0, PAGE_SIZE);
page              211 fs/ceph/addr.c 		SetPageUptodate(page);
page              215 fs/ceph/addr.c 	err = ceph_readpage_from_fscache(inode, page);
page              220 fs/ceph/addr.c 	     inode, filp, page, page->index);
page              224 fs/ceph/addr.c 				  &page, 1, 0);
page              228 fs/ceph/addr.c 		SetPageError(page);
page              229 fs/ceph/addr.c 		ceph_fscache_readpage_cancel(inode, page);
page              236 fs/ceph/addr.c 		zero_user_segment(page, err, PAGE_SIZE);
page              238 fs/ceph/addr.c 		flush_dcache_page(page);
page              240 fs/ceph/addr.c 	SetPageUptodate(page);
page              241 fs/ceph/addr.c 	ceph_readpage_to_fscache(inode, page);
page              247 fs/ceph/addr.c static int ceph_readpage(struct file *filp, struct page *page)
page              249 fs/ceph/addr.c 	int r = ceph_do_readpage(filp, page);
page              251 fs/ceph/addr.c 		unlock_page(page);
page              279 fs/ceph/addr.c 		struct page *page = osd_data->pages[i];
page              282 fs/ceph/addr.c 			ceph_fscache_readpage_cancel(inode, page);
page              288 fs/ceph/addr.c 			zero_user_segment(page, s, PAGE_SIZE);
page              290 fs/ceph/addr.c  		dout("finish_read %p uptodate %p idx %lu\n", inode, page,
page              291 fs/ceph/addr.c 		     page->index);
page              292 fs/ceph/addr.c 		flush_dcache_page(page);
page              293 fs/ceph/addr.c 		SetPageUptodate(page);
page              294 fs/ceph/addr.c 		ceph_readpage_to_fscache(inode, page);
page              296 fs/ceph/addr.c 		unlock_page(page);
page              297 fs/ceph/addr.c 		put_page(page);
page              313 fs/ceph/addr.c 	struct page *page = lru_to_page(page_list);
page              319 fs/ceph/addr.c 	struct page **pages;
page              341 fs/ceph/addr.c 				page = lru_to_page(page_list);
page              342 fs/ceph/addr.c 				list_del(&page->lru);
page              343 fs/ceph/addr.c 				put_page(page);
page              349 fs/ceph/addr.c 	off = (u64) page_offset(page);
page              352 fs/ceph/addr.c 	next_index = page->index;
page              353 fs/ceph/addr.c 	list_for_each_entry_reverse(page, page_list, lru) {
page              354 fs/ceph/addr.c 		if (page->index != next_index)
page              383 fs/ceph/addr.c 		page = list_entry(page_list->prev, struct page, lru);
page              384 fs/ceph/addr.c 		BUG_ON(PageLocked(page));
page              385 fs/ceph/addr.c 		list_del(&page->lru);
page              387 fs/ceph/addr.c  		dout("start_read %p adding %p idx %lu\n", inode, page,
page              388 fs/ceph/addr.c 		     page->index);
page              389 fs/ceph/addr.c 		if (add_to_page_cache_lru(page, &inode->i_data, page->index,
page              391 fs/ceph/addr.c 			ceph_fscache_uncache_page(inode, page);
page              392 fs/ceph/addr.c 			put_page(page);
page              394 fs/ceph/addr.c 			     inode, page);
page              403 fs/ceph/addr.c 		pages[i] = page;
page              547 fs/ceph/addr.c 				      struct page *page, u64 start)
page              550 fs/ceph/addr.c 	struct ceph_snap_context *snapc = page_snap_context(page);
page              568 fs/ceph/addr.c 	if (end > page_offset(page) + PAGE_SIZE)
page              569 fs/ceph/addr.c 		end = page_offset(page) + PAGE_SIZE;
page              579 fs/ceph/addr.c static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
page              585 fs/ceph/addr.c 	loff_t page_off = page_offset(page);
page              589 fs/ceph/addr.c 	dout("writepage %p idx %lu\n", page, page->index);
page              591 fs/ceph/addr.c 	inode = page->mapping->host;
page              596 fs/ceph/addr.c 	snapc = page_snap_context(page);
page              598 fs/ceph/addr.c 		dout("writepage %p page %p not dirty?\n", inode, page);
page              604 fs/ceph/addr.c 		     inode, page, snapc);
page              608 fs/ceph/addr.c 		redirty_page_for_writepage(wbc, page);
page              615 fs/ceph/addr.c 		dout("%p page eof %llu\n", page, ceph_wbc.i_size);
page              616 fs/ceph/addr.c 		page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
page              624 fs/ceph/addr.c 	     inode, page, page->index, page_off, len, snapc, snapc->seq);
page              630 fs/ceph/addr.c 	set_page_writeback(page);
page              635 fs/ceph/addr.c 				   &inode->i_mtime, &page, 1);
page              642 fs/ceph/addr.c 			dout("writepage interrupted page %p\n", page);
page              643 fs/ceph/addr.c 			redirty_page_for_writepage(wbc, page);
page              644 fs/ceph/addr.c 			end_page_writeback(page);
page              650 fs/ceph/addr.c 		     err, page);
page              654 fs/ceph/addr.c 		dout("writepage cleaned page %p\n", page);
page              657 fs/ceph/addr.c 	page->private = 0;
page              658 fs/ceph/addr.c 	ClearPagePrivate(page);
page              659 fs/ceph/addr.c 	end_page_writeback(page);
page              670 fs/ceph/addr.c static int ceph_writepage(struct page *page, struct writeback_control *wbc)
page              673 fs/ceph/addr.c 	struct inode *inode = page->mapping->host;
page              676 fs/ceph/addr.c 	err = writepage_nounlock(page, wbc);
page              682 fs/ceph/addr.c 	unlock_page(page);
page              698 fs/ceph/addr.c 	struct page *page;
page              737 fs/ceph/addr.c 			page = osd_data->pages[j];
page              738 fs/ceph/addr.c 			BUG_ON(!page);
page              739 fs/ceph/addr.c 			WARN_ON(!PageUptodate(page));
page              747 fs/ceph/addr.c 			ceph_put_snap_context(page_snap_context(page));
page              748 fs/ceph/addr.c 			page->private = 0;
page              749 fs/ceph/addr.c 			ClearPagePrivate(page);
page              750 fs/ceph/addr.c 			dout("unlocking %p\n", page);
page              751 fs/ceph/addr.c 			end_page_writeback(page);
page              755 fs/ceph/addr.c 							  page);
page              757 fs/ceph/addr.c 			unlock_page(page);
page              861 fs/ceph/addr.c 		struct page **pages = NULL, **data_pages;
page              863 fs/ceph/addr.c 		struct page *page;
page              877 fs/ceph/addr.c 			page = pvec.pages[i];
page              878 fs/ceph/addr.c 			dout("? %p idx %lu\n", page, page->index);
page              880 fs/ceph/addr.c 				lock_page(page);  /* first page */
page              881 fs/ceph/addr.c 			else if (!trylock_page(page))
page              885 fs/ceph/addr.c 			if (unlikely(!PageDirty(page)) ||
page              886 fs/ceph/addr.c 			    unlikely(page->mapping != mapping)) {
page              887 fs/ceph/addr.c 				dout("!dirty or !mapping %p\n", page);
page              888 fs/ceph/addr.c 				unlock_page(page);
page              892 fs/ceph/addr.c 			pgsnapc = page_snap_context(page);
page              900 fs/ceph/addr.c 				unlock_page(page);
page              903 fs/ceph/addr.c 			if (page_offset(page) >= ceph_wbc.i_size) {
page              905 fs/ceph/addr.c 				     page, ceph_wbc.i_size);
page              907 fs/ceph/addr.c 				    page_offset(page) >= i_size_read(inode)) &&
page              908 fs/ceph/addr.c 				    clear_page_dirty_for_io(page))
page              909 fs/ceph/addr.c 					mapping->a_ops->invalidatepage(page,
page              911 fs/ceph/addr.c 				unlock_page(page);
page              914 fs/ceph/addr.c 			if (strip_unit_end && (page->index > strip_unit_end)) {
page              915 fs/ceph/addr.c 				dout("end of strip unit %p\n", page);
page              916 fs/ceph/addr.c 				unlock_page(page);
page              919 fs/ceph/addr.c 			if (PageWriteback(page)) {
page              921 fs/ceph/addr.c 					dout("%p under writeback\n", page);
page              922 fs/ceph/addr.c 					unlock_page(page);
page              925 fs/ceph/addr.c 				dout("waiting on writeback %p\n", page);
page              926 fs/ceph/addr.c 				wait_on_page_writeback(page);
page              929 fs/ceph/addr.c 			if (!clear_page_dirty_for_io(page)) {
page              930 fs/ceph/addr.c 				dout("%p !clear_page_dirty_for_io\n", page);
page              931 fs/ceph/addr.c 				unlock_page(page);
page              947 fs/ceph/addr.c 				offset = (u64)page_offset(page);
page              955 fs/ceph/addr.c 				strip_unit_end = page->index +
page              970 fs/ceph/addr.c 			} else if (page->index !=
page              974 fs/ceph/addr.c 					redirty_page_for_writepage(wbc, page);
page              975 fs/ceph/addr.c 					unlock_page(page);
page              980 fs/ceph/addr.c 				offset = (u64)page_offset(page);
page              986 fs/ceph/addr.c 			     inode, page, page->index);
page              996 fs/ceph/addr.c 			pages[locked_pages++] = page;
page             1160 fs/ceph/addr.c 			struct page *page;
page             1167 fs/ceph/addr.c 					page = pvec.pages[i];
page             1168 fs/ceph/addr.c 					if (page_snap_context(page) != snapc)
page             1170 fs/ceph/addr.c 					wait_on_page_writeback(page);
page             1217 fs/ceph/addr.c 			    struct page *page)
page             1230 fs/ceph/addr.c 		dout(" page %p forced umount\n", page);
page             1231 fs/ceph/addr.c 		unlock_page(page);
page             1237 fs/ceph/addr.c 	wait_on_page_writeback(page);
page             1239 fs/ceph/addr.c 	snapc = page_snap_context(page);
page             1249 fs/ceph/addr.c 			     page, snapc);
page             1255 fs/ceph/addr.c 			unlock_page(page);
page             1268 fs/ceph/addr.c 		     page, snapc);
page             1269 fs/ceph/addr.c 		if (!clear_page_dirty_for_io(page))
page             1271 fs/ceph/addr.c 		r = writepage_nounlock(page, NULL);
page             1277 fs/ceph/addr.c 	if (PageUptodate(page)) {
page             1278 fs/ceph/addr.c 		dout(" page %p already uptodate\n", page);
page             1293 fs/ceph/addr.c 		     page, pos_in_page, end_in_page, (int)PAGE_SIZE);
page             1294 fs/ceph/addr.c 		zero_user_segments(page,
page             1301 fs/ceph/addr.c 	r = ceph_do_readpage(file, page);
page             1309 fs/ceph/addr.c 	unlock_page(page);
page             1319 fs/ceph/addr.c 			    struct page **pagep, void **fsdata)
page             1322 fs/ceph/addr.c 	struct page *page;
page             1328 fs/ceph/addr.c 		page = grab_cache_page_write_begin(mapping, index, 0);
page             1329 fs/ceph/addr.c 		if (!page)
page             1333 fs/ceph/addr.c 		     inode, page, (int)pos, (int)len);
page             1335 fs/ceph/addr.c 		r = ceph_update_writeable_page(file, pos, len, page);
page             1337 fs/ceph/addr.c 			put_page(page);
page             1339 fs/ceph/addr.c 			*pagep = page;
page             1351 fs/ceph/addr.c 			  struct page *page, void *fsdata)
page             1357 fs/ceph/addr.c 	     inode, page, (int)pos, (int)copied, (int)len);
page             1360 fs/ceph/addr.c 	if (!PageUptodate(page)) {
page             1365 fs/ceph/addr.c 		SetPageUptodate(page);
page             1372 fs/ceph/addr.c 	set_page_dirty(page);
page             1375 fs/ceph/addr.c 	unlock_page(page);
page             1376 fs/ceph/addr.c 	put_page(page);
page             1429 fs/ceph/addr.c 	struct page *pinned_page = NULL;
page             1478 fs/ceph/addr.c 		struct page *page = find_or_create_page(mapping, 0,
page             1481 fs/ceph/addr.c 		if (!page) {
page             1485 fs/ceph/addr.c 		err = __ceph_do_getattr(inode, page,
page             1488 fs/ceph/addr.c 			unlock_page(page);
page             1489 fs/ceph/addr.c 			put_page(page);
page             1494 fs/ceph/addr.c 			zero_user_segment(page, err, PAGE_SIZE);
page             1496 fs/ceph/addr.c 			flush_dcache_page(page);
page             1497 fs/ceph/addr.c 		SetPageUptodate(page);
page             1498 fs/ceph/addr.c 		vmf->page = page;
page             1522 fs/ceph/addr.c 	struct page *page = vmf->page;
page             1523 fs/ceph/addr.c 	loff_t off = page_offset(page);
page             1538 fs/ceph/addr.c 		struct page *locked_page = NULL;
page             1540 fs/ceph/addr.c 			lock_page(page);
page             1541 fs/ceph/addr.c 			locked_page = page;
page             1576 fs/ceph/addr.c 		lock_page(page);
page             1578 fs/ceph/addr.c 		if ((off > size) || (page->mapping != inode->i_mapping)) {
page             1579 fs/ceph/addr.c 			unlock_page(page);
page             1584 fs/ceph/addr.c 		err = ceph_update_writeable_page(vma->vm_file, off, len, page);
page             1587 fs/ceph/addr.c 			set_page_dirty(page);
page             1616 fs/ceph/addr.c void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
page             1620 fs/ceph/addr.c 	struct page *page;
page             1623 fs/ceph/addr.c 		page = locked_page;
page             1627 fs/ceph/addr.c 		page = find_or_create_page(mapping, 0,
page             1630 fs/ceph/addr.c 		if (!page)
page             1632 fs/ceph/addr.c 		if (PageUptodate(page)) {
page             1633 fs/ceph/addr.c 			unlock_page(page);
page             1634 fs/ceph/addr.c 			put_page(page);
page             1643 fs/ceph/addr.c 		void *kaddr = kmap_atomic(page);
page             1648 fs/ceph/addr.c 	if (page != locked_page) {
page             1650 fs/ceph/addr.c 			zero_user_segment(page, len, PAGE_SIZE);
page             1652 fs/ceph/addr.c 			flush_dcache_page(page);
page             1654 fs/ceph/addr.c 		SetPageUptodate(page);
page             1655 fs/ceph/addr.c 		unlock_page(page);
page             1656 fs/ceph/addr.c 		put_page(page);
page             1660 fs/ceph/addr.c int ceph_uninline_data(struct file *filp, struct page *locked_page)
page             1666 fs/ceph/addr.c 	struct page *page = NULL;
page             1683 fs/ceph/addr.c 		page = locked_page;
page             1684 fs/ceph/addr.c 		WARN_ON(!PageUptodate(page));
page             1687 fs/ceph/addr.c 		page = find_get_page(inode->i_mapping, 0);
page             1688 fs/ceph/addr.c 		if (page) {
page             1689 fs/ceph/addr.c 			if (PageUptodate(page)) {
page             1691 fs/ceph/addr.c 				lock_page(page);
page             1693 fs/ceph/addr.c 				put_page(page);
page             1694 fs/ceph/addr.c 				page = NULL;
page             1699 fs/ceph/addr.c 	if (page) {
page             1704 fs/ceph/addr.c 		page = __page_cache_alloc(GFP_NOFS);
page             1705 fs/ceph/addr.c 		if (!page) {
page             1709 fs/ceph/addr.c 		err = __ceph_do_getattr(inode, page,
page             1747 fs/ceph/addr.c 	osd_req_op_extent_osd_data_pages(req, 1, &page, len, 0, false, false);
page             1780 fs/ceph/addr.c 	if (page && page != locked_page) {
page             1782 fs/ceph/addr.c 			unlock_page(page);
page             1783 fs/ceph/addr.c 			put_page(page);
page             1785 fs/ceph/addr.c 			__free_pages(page, 0);
page             1822 fs/ceph/addr.c 	struct page **pages;
page              207 fs/ceph/cache.c static void ceph_readpage_from_fscache_complete(struct page *page, void *data, int error)
page              210 fs/ceph/cache.c 		SetPageUptodate(page);
page              212 fs/ceph/cache.c 	unlock_page(page);
page              226 fs/ceph/cache.c int ceph_readpage_from_fscache(struct inode *inode, struct page *page)
page              234 fs/ceph/cache.c 	ret = fscache_read_or_alloc_page(ci->fscache, page,
page              281 fs/ceph/cache.c void ceph_readpage_to_fscache(struct inode *inode, struct page *page)
page              286 fs/ceph/cache.c 	if (!PageFsCache(page))
page              292 fs/ceph/cache.c 	ret = fscache_write_page(ci->fscache, page, i_size_read(inode),
page              295 fs/ceph/cache.c 		 fscache_uncache_page(ci->fscache, page);
page              298 fs/ceph/cache.c void ceph_invalidate_fscache_page(struct inode* inode, struct page *page)
page              302 fs/ceph/cache.c 	if (!PageFsCache(page))
page              305 fs/ceph/cache.c 	fscache_wait_on_page_write(ci->fscache, page);
page              306 fs/ceph/cache.c 	fscache_uncache_page(ci->fscache, page);
page               27 fs/ceph/cache.h int ceph_readpage_from_fscache(struct inode *inode, struct page *page);
page               32 fs/ceph/cache.h void ceph_readpage_to_fscache(struct inode *inode, struct page *page);
page               33 fs/ceph/cache.h void ceph_invalidate_fscache_page(struct inode* inode, struct page *page);
page               47 fs/ceph/cache.h 					     struct page *page)
page               50 fs/ceph/cache.h 	return fscache_uncache_page(ci->fscache, page);
page               53 fs/ceph/cache.h static inline int ceph_release_fscache_page(struct page *page, gfp_t gfp)
page               55 fs/ceph/cache.h 	struct inode* inode = page->mapping->host;
page               57 fs/ceph/cache.h 	return fscache_maybe_release_page(ci->fscache, page, gfp);
page               61 fs/ceph/cache.h 						struct page *page)
page               64 fs/ceph/cache.h 	if (fscache_cookie_valid(ci->fscache) && PageFsCache(page))
page               65 fs/ceph/cache.h 		__fscache_uncache_page(ci->fscache, page);
page              122 fs/ceph/cache.h 					     struct page *pages)
page              127 fs/ceph/cache.h 					     struct page *page)
page              141 fs/ceph/cache.h 					    struct page *page)
page              150 fs/ceph/cache.h 						struct page *page)
page              154 fs/ceph/cache.h static inline int ceph_release_fscache_page(struct page *page, gfp_t gfp)
page              160 fs/ceph/cache.h 						struct page *page)
page             2744 fs/ceph/caps.c 		  loff_t endoff, int *got, struct page **pinned_page)
page             2809 fs/ceph/caps.c 			struct page *page =
page             2811 fs/ceph/caps.c 			if (page) {
page             2812 fs/ceph/caps.c 				if (PageUptodate(page)) {
page             2813 fs/ceph/caps.c 					*pinned_page = page;
page             2816 fs/ceph/caps.c 				put_page(page);
page              136 fs/ceph/dir.c  	if (!cache_ctl->page || ptr_pgoff != page_index(cache_ctl->page)) {
page              138 fs/ceph/dir.c  		cache_ctl->page = find_lock_page(&dir->i_data, ptr_pgoff);
page              139 fs/ceph/dir.c  		if (!cache_ctl->page) {
page              145 fs/ceph/dir.c  		unlock_page(cache_ctl->page);
page              146 fs/ceph/dir.c  		cache_ctl->dentries = kmap(cache_ctl->page);
page               91 fs/ceph/file.c 		struct page *pages[ITER_GET_BVECS_PAGES];
page              613 fs/ceph/file.c 		struct page **pages;
page             1138 fs/ceph/file.c 	struct page **pages;
page             1262 fs/ceph/file.c 	struct page *pinned_page = NULL;
page             1335 fs/ceph/file.c 		struct page *page = NULL;
page             1338 fs/ceph/file.c 			page = __page_cache_alloc(GFP_KERNEL);
page             1339 fs/ceph/file.c 			if (!page)
page             1343 fs/ceph/file.c 		statret = __ceph_do_getattr(inode, page,
page             1344 fs/ceph/file.c 					    CEPH_STAT_CAP_INLINE_DATA, !!page);
page             1346 fs/ceph/file.c 			if (page)
page             1347 fs/ceph/file.c 				__free_page(page);
page             1364 fs/ceph/file.c 					zero_user_segment(page, statret, end);
page             1365 fs/ceph/file.c 				ret = copy_page_to_iter(page,
page             1378 fs/ceph/file.c 			__free_pages(page, 0);
page             1664 fs/ceph/file.c 	struct page *page;
page             1667 fs/ceph/file.c 	page = find_lock_page(inode->i_mapping, index);
page             1668 fs/ceph/file.c 	if (page) {
page             1669 fs/ceph/file.c 		wait_on_page_writeback(page);
page             1670 fs/ceph/file.c 		zero_user(page, offset & (PAGE_SIZE - 1), size);
page             1671 fs/ceph/file.c 		unlock_page(page);
page             1672 fs/ceph/file.c 		put_page(page);
page              728 fs/ceph/inode.c static int fill_inode(struct inode *inode, struct page *locked_page,
page             1511 fs/ceph/inode.c 	if (ctl->page) {
page             1512 fs/ceph/inode.c 		kunmap(ctl->page);
page             1513 fs/ceph/inode.c 		put_page(ctl->page);
page             1514 fs/ceph/inode.c 		ctl->page = NULL;
page             1527 fs/ceph/inode.c 	if (!ctl->page || pgoff != page_index(ctl->page)) {
page             1530 fs/ceph/inode.c 			ctl->page = grab_cache_page(&dir->i_data, pgoff);
page             1532 fs/ceph/inode.c 			ctl->page = find_lock_page(&dir->i_data, pgoff);
page             1533 fs/ceph/inode.c 		if (!ctl->page) {
page             1539 fs/ceph/inode.c 		unlock_page(ctl->page);
page             1540 fs/ceph/inode.c 		ctl->dentries = kmap(ctl->page);
page             2240 fs/ceph/inode.c int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
page             3256 fs/ceph/mds_client.c 	struct page *page;
page             3296 fs/ceph/mds_client.c 	page = list_first_entry(&recon_state->pagelist->head, struct page, lru);
page             3297 fs/ceph/mds_client.c 	addr = kmap_atomic(page);
page             3697 fs/ceph/mds_client.c 		struct page *page =
page             3699 fs/ceph/mds_client.c 					struct page, lru);
page             3700 fs/ceph/mds_client.c 		__le32 *addr = kmap_atomic(page);
page              275 fs/ceph/mds_client.h 	struct page *r_locked_page;
page              738 fs/ceph/super.c 	size = sizeof (struct page *) * (page_count ? page_count : 1);
page              783 fs/ceph/super.h 	struct page  *page;
page              926 fs/ceph/super.h extern int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
page             1077 fs/ceph/super.h 			 loff_t endoff, int *got, struct page **pinned_page);
page             1088 fs/ceph/super.h extern int ceph_uninline_data(struct file *filp, struct page *locked_page);
page             1100 fs/ceph/super.h extern void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
page             1247 fs/ceph/xattr.c 			struct page *page = list_first_entry(&pagelist->head,
page             1248 fs/ceph/xattr.c 							     struct page, lru);
page             1249 fs/ceph/xattr.c 			void *addr = kmap_atomic(page);
page              191 fs/cifs/cifsglob.h 	struct page	**rq_pages;	/* pointer to array of page ptrs */
page             1335 fs/cifs/cifsglob.h 	struct page			**pages;
page             1361 fs/cifs/cifsglob.h 	struct page			**pages;
page              221 fs/cifs/cifsproto.h 					struct page *page,
page              561 fs/cifs/cifsproto.h struct cifs_writedata *cifs_writedata_direct_alloc(struct page **pages,
page              586 fs/cifs/cifsproto.h extern void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page,
page             2180 fs/cifs/cifssmb.c 		struct page *page = wdata->pages[i];
page             2182 fs/cifs/cifssmb.c 			__set_page_dirty_nobuffers(page);
page             2184 fs/cifs/cifssmb.c 			SetPageError(page);
page             2185 fs/cifs/cifssmb.c 		end_page_writeback(page);
page             2186 fs/cifs/cifssmb.c 		put_page(page);
page             2196 fs/cifs/cifssmb.c 	struct page **pages =
page             2197 fs/cifs/cifssmb.c 		kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
page             2205 fs/cifs/cifssmb.c cifs_writedata_direct_alloc(struct page **pages, work_func_t complete)
page              855 fs/cifs/connect.c cifs_read_page_from_socket(struct TCP_Server_Info *server, struct page *page,
page              860 fs/cifs/connect.c 		.bv_page = page, .bv_len = to_read, .bv_offset = page_offset};
page             2094 fs/cifs/file.c static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
page             2096 fs/cifs/file.c 	struct address_space *mapping = page->mapping;
page             2097 fs/cifs/file.c 	loff_t offset = (loff_t)page->index << PAGE_SHIFT;
page             2107 fs/cifs/file.c 	inode = page->mapping->host;
page             2110 fs/cifs/file.c 	write_data = kmap(page);
page             2114 fs/cifs/file.c 		kunmap(page);
page             2120 fs/cifs/file.c 		kunmap(page);
page             2148 fs/cifs/file.c 	kunmap(page);
page             2176 fs/cifs/file.c 	struct page *page;
page             2179 fs/cifs/file.c 		page = wdata->pages[i];
page             2188 fs/cifs/file.c 			lock_page(page);
page             2189 fs/cifs/file.c 		else if (!trylock_page(page))
page             2192 fs/cifs/file.c 		if (unlikely(page->mapping != mapping)) {
page             2193 fs/cifs/file.c 			unlock_page(page);
page             2197 fs/cifs/file.c 		if (!wbc->range_cyclic && page->index > end) {
page             2199 fs/cifs/file.c 			unlock_page(page);
page             2203 fs/cifs/file.c 		if (*next && (page->index != *next)) {
page             2205 fs/cifs/file.c 			unlock_page(page);
page             2210 fs/cifs/file.c 			wait_on_page_writeback(page);
page             2212 fs/cifs/file.c 		if (PageWriteback(page) ||
page             2213 fs/cifs/file.c 				!clear_page_dirty_for_io(page)) {
page             2214 fs/cifs/file.c 			unlock_page(page);
page             2222 fs/cifs/file.c 		set_page_writeback(page);
page             2223 fs/cifs/file.c 		if (page_offset(page) >= i_size_read(mapping->host)) {
page             2225 fs/cifs/file.c 			unlock_page(page);
page             2226 fs/cifs/file.c 			end_page_writeback(page);
page             2230 fs/cifs/file.c 		wdata->pages[i] = page;
page             2231 fs/cifs/file.c 		*next = page->index + 1;
page             2440 fs/cifs/file.c cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
page             2447 fs/cifs/file.c 	get_page(page);
page             2448 fs/cifs/file.c 	if (!PageUptodate(page))
page             2461 fs/cifs/file.c 	set_page_writeback(page);
page             2463 fs/cifs/file.c 	rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
page             2467 fs/cifs/file.c 		redirty_page_for_writepage(wbc, page);
page             2469 fs/cifs/file.c 		SetPageError(page);
page             2470 fs/cifs/file.c 		mapping_set_error(page->mapping, rc);
page             2472 fs/cifs/file.c 		SetPageUptodate(page);
page             2474 fs/cifs/file.c 	end_page_writeback(page);
page             2475 fs/cifs/file.c 	put_page(page);
page             2480 fs/cifs/file.c static int cifs_writepage(struct page *page, struct writeback_control *wbc)
page             2482 fs/cifs/file.c 	int rc = cifs_writepage_locked(page, wbc);
page             2483 fs/cifs/file.c 	unlock_page(page);
page             2489 fs/cifs/file.c 			struct page *page, void *fsdata)
page             2503 fs/cifs/file.c 		 page, pos, copied);
page             2505 fs/cifs/file.c 	if (PageChecked(page)) {
page             2507 fs/cifs/file.c 			SetPageUptodate(page);
page             2508 fs/cifs/file.c 		ClearPageChecked(page);
page             2509 fs/cifs/file.c 	} else if (!PageUptodate(page) && copied == PAGE_SIZE)
page             2510 fs/cifs/file.c 		SetPageUptodate(page);
page             2512 fs/cifs/file.c 	if (!PageUptodate(page)) {
page             2523 fs/cifs/file.c 		page_data = kmap(page);
page             2526 fs/cifs/file.c 		kunmap(page);
page             2532 fs/cifs/file.c 		set_page_dirty(page);
page             2542 fs/cifs/file.c 	unlock_page(page);
page             2543 fs/cifs/file.c 	put_page(page);
page             2638 fs/cifs/file.c cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
page             2832 fs/cifs/file.c 	struct page **pagevec;
page             3268 fs/cifs/file.c cifs_readdata_direct_alloc(struct page **pages, work_func_t complete)
page             3287 fs/cifs/file.c 	struct page **pages =
page             3288 fs/cifs/file.c 		kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
page             3322 fs/cifs/file.c 	struct page *page;
page             3326 fs/cifs/file.c 		page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
page             3327 fs/cifs/file.c 		if (!page) {
page             3331 fs/cifs/file.c 		rdata->pages[i] = page;
page             3375 fs/cifs/file.c 		struct page *page = rdata->pages[i];
page             3380 fs/cifs/file.c 			void *addr = kmap_atomic(page);
page             3385 fs/cifs/file.c 			written = copy_page_to_iter(page, 0, copy, iter);
page             3420 fs/cifs/file.c 		struct page *page = rdata->pages[i];
page             3434 fs/cifs/file.c 			put_page(page);
page             3448 fs/cifs/file.c 					page, page_offset, n, iter);
page             3455 fs/cifs/file.c 					server, page, page_offset, n);
page             3556 fs/cifs/file.c 	struct page **pagevec;
page             4042 fs/cifs/file.c 	struct page *page = vmf->page;
page             4044 fs/cifs/file.c 	lock_page(page);
page             4100 fs/cifs/file.c 		struct page *page = rdata->pages[i];
page             4102 fs/cifs/file.c 		lru_cache_add_file(page);
page             4106 fs/cifs/file.c 			flush_dcache_page(page);
page             4107 fs/cifs/file.c 			SetPageUptodate(page);
page             4110 fs/cifs/file.c 		unlock_page(page);
page             4114 fs/cifs/file.c 			cifs_readpage_to_fscache(rdata->mapping->host, page);
page             4118 fs/cifs/file.c 		put_page(page);
page             4144 fs/cifs/file.c 		struct page *page = rdata->pages[i];
page             4159 fs/cifs/file.c 			zero_user(page, len + page_offset, to_read - len);
page             4162 fs/cifs/file.c 		} else if (page->index > eof_index) {
page             4171 fs/cifs/file.c 			zero_user(page, 0, PAGE_SIZE);
page             4172 fs/cifs/file.c 			lru_cache_add_file(page);
page             4173 fs/cifs/file.c 			flush_dcache_page(page);
page             4174 fs/cifs/file.c 			SetPageUptodate(page);
page             4175 fs/cifs/file.c 			unlock_page(page);
page             4176 fs/cifs/file.c 			put_page(page);
page             4182 fs/cifs/file.c 			lru_cache_add_file(page);
page             4183 fs/cifs/file.c 			unlock_page(page);
page             4184 fs/cifs/file.c 			put_page(page);
page             4192 fs/cifs/file.c 					page, page_offset, n, iter);
page             4199 fs/cifs/file.c 					server, page, page_offset, n);
page             4230 fs/cifs/file.c 	struct page *page, *tpage;
page             4237 fs/cifs/file.c 	page = lru_to_page(page_list);
page             4244 fs/cifs/file.c 	__SetPageLocked(page);
page             4245 fs/cifs/file.c 	rc = add_to_page_cache_locked(page, mapping,
page             4246 fs/cifs/file.c 				      page->index, gfp);
page             4250 fs/cifs/file.c 		__ClearPageLocked(page);
page             4255 fs/cifs/file.c 	*offset = (loff_t)page->index << PAGE_SHIFT;
page             4258 fs/cifs/file.c 	list_move_tail(&page->lru, tmplist);
page             4261 fs/cifs/file.c 	expected_index = page->index + 1;
page             4262 fs/cifs/file.c 	list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
page             4264 fs/cifs/file.c 		if (page->index != expected_index)
page             4271 fs/cifs/file.c 		__SetPageLocked(page);
page             4272 fs/cifs/file.c 		if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
page             4273 fs/cifs/file.c 			__ClearPageLocked(page);
page             4276 fs/cifs/file.c 		list_move_tail(&page->lru, tmplist);
page             4335 fs/cifs/file.c 		struct page *page, *tpage;
page             4375 fs/cifs/file.c 			list_for_each_entry_safe(page, tpage, &tmplist, lru) {
page             4376 fs/cifs/file.c 				list_del(&page->lru);
page             4377 fs/cifs/file.c 				lru_cache_add_file(page);
page             4378 fs/cifs/file.c 				unlock_page(page);
page             4379 fs/cifs/file.c 				put_page(page);
page             4397 fs/cifs/file.c 		list_for_each_entry_safe(page, tpage, &tmplist, lru) {
page             4398 fs/cifs/file.c 			list_del(&page->lru);
page             4399 fs/cifs/file.c 			rdata->pages[rdata->nr_pages++] = page;
page             4414 fs/cifs/file.c 				page = rdata->pages[i];
page             4415 fs/cifs/file.c 				lru_cache_add_file(page);
page             4416 fs/cifs/file.c 				unlock_page(page);
page             4417 fs/cifs/file.c 				put_page(page);
page             4439 fs/cifs/file.c static int cifs_readpage_worker(struct file *file, struct page *page,
page             4446 fs/cifs/file.c 	rc = cifs_readpage_from_fscache(file_inode(file), page);
page             4450 fs/cifs/file.c 	read_data = kmap(page);
page             4470 fs/cifs/file.c 	flush_dcache_page(page);
page             4471 fs/cifs/file.c 	SetPageUptodate(page);
page             4474 fs/cifs/file.c 	cifs_readpage_to_fscache(file_inode(file), page);
page             4479 fs/cifs/file.c 	kunmap(page);
page             4480 fs/cifs/file.c 	unlock_page(page);
page             4486 fs/cifs/file.c static int cifs_readpage(struct file *file, struct page *page)
page             4488 fs/cifs/file.c 	loff_t offset = (loff_t)page->index << PAGE_SHIFT;
page             4501 fs/cifs/file.c 		 page, (int)offset, (int)offset);
page             4503 fs/cifs/file.c 	rc = cifs_readpage_worker(file, page, &offset);
page             4556 fs/cifs/file.c 			struct page **pagep, void **fsdata)
page             4563 fs/cifs/file.c 	struct page *page;
page             4569 fs/cifs/file.c 	page = grab_cache_page_write_begin(mapping, index, flags);
page             4570 fs/cifs/file.c 	if (!page) {
page             4575 fs/cifs/file.c 	if (PageUptodate(page))
page             4596 fs/cifs/file.c 			zero_user_segments(page, 0, offset,
page             4605 fs/cifs/file.c 			SetPageChecked(page);
page             4616 fs/cifs/file.c 		cifs_readpage_worker(file, page, &page_start);
page             4617 fs/cifs/file.c 		put_page(page);
page             4627 fs/cifs/file.c 	*pagep = page;
page             4631 fs/cifs/file.c static int cifs_release_page(struct page *page, gfp_t gfp)
page             4633 fs/cifs/file.c 	if (PagePrivate(page))
page             4636 fs/cifs/file.c 	return cifs_fscache_release_page(page, gfp);
page             4639 fs/cifs/file.c static void cifs_invalidate_page(struct page *page, unsigned int offset,
page             4642 fs/cifs/file.c 	struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
page             4645 fs/cifs/file.c 		cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
page             4648 fs/cifs/file.c static int cifs_launder_page(struct page *page)
page             4651 fs/cifs/file.c 	loff_t range_start = page_offset(page);
page             4660 fs/cifs/file.c 	cifs_dbg(FYI, "Launder page: %p\n", page);
page             4662 fs/cifs/file.c 	if (clear_page_dirty_for_io(page))
page             4663 fs/cifs/file.c 		rc = cifs_writepage_locked(page, &wbc);
page             4665 fs/cifs/file.c 	cifs_fscache_invalidate_page(page, page->mapping->host);
page              219 fs/cifs/fscache.c int cifs_fscache_release_page(struct page *page, gfp_t gfp)
page              221 fs/cifs/fscache.c 	if (PageFsCache(page)) {
page              222 fs/cifs/fscache.c 		struct inode *inode = page->mapping->host;
page              226 fs/cifs/fscache.c 			 __func__, page, cifsi->fscache);
page              227 fs/cifs/fscache.c 		if (!fscache_maybe_release_page(cifsi->fscache, page, gfp))
page              234 fs/cifs/fscache.c static void cifs_readpage_from_fscache_complete(struct page *page, void *ctx,
page              237 fs/cifs/fscache.c 	cifs_dbg(FYI, "%s: (0x%p/%d)\n", __func__, page, error);
page              239 fs/cifs/fscache.c 		SetPageUptodate(page);
page              240 fs/cifs/fscache.c 	unlock_page(page);
page              246 fs/cifs/fscache.c int __cifs_readpage_from_fscache(struct inode *inode, struct page *page)
page              251 fs/cifs/fscache.c 		 __func__, CIFS_I(inode)->fscache, page, inode);
page              252 fs/cifs/fscache.c 	ret = fscache_read_or_alloc_page(CIFS_I(inode)->fscache, page,
page              306 fs/cifs/fscache.c void __cifs_readpage_to_fscache(struct inode *inode, struct page *page)
page              312 fs/cifs/fscache.c 		 __func__, cifsi->fscache, page, inode);
page              313 fs/cifs/fscache.c 	ret = fscache_write_page(cifsi->fscache, page,
page              316 fs/cifs/fscache.c 		fscache_uncache_page(cifsi->fscache, page);
page              326 fs/cifs/fscache.c void __cifs_fscache_invalidate_page(struct page *page, struct inode *inode)
page              331 fs/cifs/fscache.c 	cifs_dbg(FYI, "%s: (0x%p/0x%p)\n", __func__, page, cookie);
page              332 fs/cifs/fscache.c 	fscache_wait_on_page_write(cookie, page);
page              333 fs/cifs/fscache.c 	fscache_uncache_page(cookie, page);
page               65 fs/cifs/fscache.h extern void __cifs_fscache_invalidate_page(struct page *, struct inode *);
page               66 fs/cifs/fscache.h extern int cifs_fscache_release_page(struct page *page, gfp_t gfp);
page               67 fs/cifs/fscache.h extern int __cifs_readpage_from_fscache(struct inode *, struct page *);
page               74 fs/cifs/fscache.h extern void __cifs_readpage_to_fscache(struct inode *, struct page *);
page               76 fs/cifs/fscache.h static inline void cifs_fscache_invalidate_page(struct page *page,
page               79 fs/cifs/fscache.h 	if (PageFsCache(page))
page               80 fs/cifs/fscache.h 		__cifs_fscache_invalidate_page(page, inode);
page               84 fs/cifs/fscache.h 					     struct page *page)
page               87 fs/cifs/fscache.h 		return __cifs_readpage_from_fscache(inode, page);
page              104 fs/cifs/fscache.h 					    struct page *page)
page              106 fs/cifs/fscache.h 	if (PageFsCache(page))
page              107 fs/cifs/fscache.h 		__cifs_readpage_to_fscache(inode, page);
page              133 fs/cifs/fscache.h static inline int cifs_fscache_release_page(struct page *page, gfp_t gfp)
page              138 fs/cifs/fscache.h static inline void cifs_fscache_invalidate_page(struct page *page,
page              141 fs/cifs/fscache.h cifs_readpage_from_fscache(struct inode *inode, struct page *page)
page              155 fs/cifs/fscache.h 			struct page *page) {}
page             2176 fs/cifs/inode.c 	struct page *page;
page             2179 fs/cifs/inode.c 	page = grab_cache_page(mapping, index);
page             2180 fs/cifs/inode.c 	if (!page)
page             2183 fs/cifs/inode.c 	zero_user_segment(page, offset, PAGE_SIZE);
page             2184 fs/cifs/inode.c 	unlock_page(page);
page             2185 fs/cifs/inode.c 	put_page(page);
page              850 fs/cifs/misc.c 	struct page **pages = NULL;
page              870 fs/cifs/misc.c 	if (max_pages * sizeof(struct page *) <= CIFS_AIO_KMALLOC_LIMIT)
page              871 fs/cifs/misc.c 		pages = kmalloc_array(max_pages, sizeof(struct page *),
page              875 fs/cifs/misc.c 		pages = vmalloc(array_size(max_pages, sizeof(struct page *)));
page              986 fs/cifs/misc.c void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page,
page              990 fs/cifs/misc.c 	*offset = (page == 0) ? rqst->rq_offset : 0;
page              992 fs/cifs/misc.c 	if (rqst->rq_npages == 1 || page == rqst->rq_npages-1)
page              994 fs/cifs/misc.c 	else if (page == 0)
page             3788 fs/cifs/smb2ops.c 	struct page **pages;
page             3797 fs/cifs/smb2ops.c 		pages = kmalloc_array(npages, sizeof(struct page *),
page             3859 fs/cifs/smb2ops.c 		 unsigned int buf_data_size, struct page **pages,
page             3892 fs/cifs/smb2ops.c read_data_into_pages(struct TCP_Server_Info *server, struct page **pages,
page             3899 fs/cifs/smb2ops.c 		struct page *page = pages[i];
page             3908 fs/cifs/smb2ops.c 			zero_user(page, len, PAGE_SIZE - len);
page             3911 fs/cifs/smb2ops.c 		length = cifs_read_page_from_socket(server, page, 0, n);
page             3921 fs/cifs/smb2ops.c init_read_bvec(struct page **pages, unsigned int npages, unsigned int data_size,
page             3950 fs/cifs/smb2ops.c 		 char *buf, unsigned int buf_len, struct page **pages,
page             4085 fs/cifs/smb2ops.c 	struct page **ppages;
page             4136 fs/cifs/smb2ops.c 	struct page **pages;
page             4156 fs/cifs/smb2ops.c 	pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
page               41 fs/cifs/smbdirect.c 		struct page *page, unsigned long offset,
page             1033 fs/cifs/smbdirect.c static int smbd_post_send_page(struct smbd_connection *info, struct page *page,
page             1039 fs/cifs/smbdirect.c 	sg_set_page(&sgl, page, size, offset);
page             1988 fs/cifs/smbdirect.c 		struct page *page, unsigned int page_offset,
page             2004 fs/cifs/smbdirect.c 	page_address = kmap_atomic(page);
page             2008 fs/cifs/smbdirect.c 		page, to_address, to_read);
page             2024 fs/cifs/smbdirect.c 	struct page *page;
page             2044 fs/cifs/smbdirect.c 		page = msg->msg_iter.bvec->bv_page;
page             2047 fs/cifs/smbdirect.c 		rc = smbd_recv_page(info, page, page_offset, to_read);
page             2447 fs/cifs/smbdirect.c 	struct smbd_connection *info, struct page *pages[], int num_pages,
page              307 fs/cifs/smbdirect.h 	struct smbd_connection *info, struct page *pages[], int num_pages,
page               23 fs/coda/symlink.c static int coda_symlink_filler(struct file *file, struct page *page)
page               25 fs/coda/symlink.c 	struct inode *inode = page->mapping->host;
page               29 fs/coda/symlink.c 	char *p = page_address(page);
page               36 fs/coda/symlink.c 	SetPageUptodate(page);
page               37 fs/coda/symlink.c 	unlock_page(page);
page               41 fs/coda/symlink.c 	SetPageError(page);
page               42 fs/coda/symlink.c 	unlock_page(page);
page               34 fs/configfs/file.c 	char			* page;
page               63 fs/configfs/file.c 	if (!buffer->page)
page               64 fs/configfs/file.c 		buffer->page = (char *) get_zeroed_page(GFP_KERNEL);
page               65 fs/configfs/file.c 	if (!buffer->page)
page               70 fs/configfs/file.c 		count = buffer->attr->show(buffer->item, buffer->page);
page              114 fs/configfs/file.c 		 __func__, count, *ppos, buffer->page);
page              115 fs/configfs/file.c 	retval = simple_read_from_buffer(buf, count, ppos, buffer->page,
page              228 fs/configfs/file.c 	if (!buffer->page)
page              229 fs/configfs/file.c 		buffer->page = (char *)__get_free_pages(GFP_KERNEL, 0);
page              230 fs/configfs/file.c 	if (!buffer->page)
page              235 fs/configfs/file.c 	error = copy_from_user(buffer->page,buf,count);
page              239 fs/configfs/file.c 	buffer->page[count] = 0;
page              251 fs/configfs/file.c 		res = buffer->attr->store(buffer->item, buffer->page, count);
page              452 fs/configfs/file.c 	if (buffer->page)
page              453 fs/configfs/file.c 		free_page((unsigned long)buffer->page);
page              186 fs/cramfs/inode.c 	struct page *pages[BLKS_PER_BUF];
page              216 fs/cramfs/inode.c 		struct page *page = NULL;
page              219 fs/cramfs/inode.c 			page = read_mapping_page(mapping, blocknr + i, NULL);
page              221 fs/cramfs/inode.c 			if (IS_ERR(page))
page              222 fs/cramfs/inode.c 				page = NULL;
page              224 fs/cramfs/inode.c 		pages[i] = page;
page              228 fs/cramfs/inode.c 		struct page *page = pages[i];
page              230 fs/cramfs/inode.c 		if (page) {
page              231 fs/cramfs/inode.c 			wait_on_page_locked(page);
page              232 fs/cramfs/inode.c 			if (!PageUptodate(page)) {
page              234 fs/cramfs/inode.c 				put_page(page);
page              247 fs/cramfs/inode.c 		struct page *page = pages[i];
page              249 fs/cramfs/inode.c 		if (page) {
page              250 fs/cramfs/inode.c 			memcpy(data, kmap(page), PAGE_SIZE);
page              251 fs/cramfs/inode.c 			kunmap(page);
page              252 fs/cramfs/inode.c 			put_page(page);
page              820 fs/cramfs/inode.c static int cramfs_readpage(struct file *file, struct page *page)
page              822 fs/cramfs/inode.c 	struct inode *inode = page->mapping->host;
page              829 fs/cramfs/inode.c 	pgdata = kmap(page);
page              831 fs/cramfs/inode.c 	if (page->index < maxblock) {
page              833 fs/cramfs/inode.c 		u32 blkptr_offset = OFFSET(inode) + page->index * 4;
page              854 fs/cramfs/inode.c 				if (page->index == maxblock - 1)
page              871 fs/cramfs/inode.c 			if (page->index)
page              916 fs/cramfs/inode.c 	flush_dcache_page(page);
page              917 fs/cramfs/inode.c 	kunmap(page);
page              918 fs/cramfs/inode.c 	SetPageUptodate(page);
page              919 fs/cramfs/inode.c 	unlock_page(page);
page              923 fs/cramfs/inode.c 	kunmap(page);
page              924 fs/cramfs/inode.c 	ClearPageUptodate(page);
page              925 fs/cramfs/inode.c 	SetPageError(page);
page              926 fs/cramfs/inode.c 	unlock_page(page);
page               35 fs/crypto/bio.c 		struct page *page = bv->bv_page;
page               36 fs/crypto/bio.c 		int ret = fscrypt_decrypt_pagecache_blocks(page, bv->bv_len,
page               39 fs/crypto/bio.c 			SetPageError(page);
page               41 fs/crypto/bio.c 			SetPageUptodate(page);
page               43 fs/crypto/bio.c 			unlock_page(page);
page               76 fs/crypto/bio.c 	struct page *ciphertext_page;
page              117 fs/crypto/crypto.c struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags)
page              128 fs/crypto/crypto.c void fscrypt_free_bounce_page(struct page *bounce_page)
page              153 fs/crypto/crypto.c 			u64 lblk_num, struct page *src_page,
page              154 fs/crypto/crypto.c 			struct page *dest_page, unsigned int len,
page              217 fs/crypto/crypto.c struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
page              223 fs/crypto/crypto.c 	const struct inode *inode = page->mapping->host;
page              226 fs/crypto/crypto.c 	struct page *ciphertext_page;
page              227 fs/crypto/crypto.c 	u64 lblk_num = ((u64)page->index << (PAGE_SHIFT - blockbits)) +
page              232 fs/crypto/crypto.c 	if (WARN_ON_ONCE(!PageLocked(page)))
page              244 fs/crypto/crypto.c 					  page, ciphertext_page,
page              252 fs/crypto/crypto.c 	set_page_private(ciphertext_page, (unsigned long)page);
page              274 fs/crypto/crypto.c int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page,
page              278 fs/crypto/crypto.c 	return fscrypt_crypt_block(inode, FS_ENCRYPT, lblk_num, page, page,
page              299 fs/crypto/crypto.c int fscrypt_decrypt_pagecache_blocks(struct page *page, unsigned int len,
page              302 fs/crypto/crypto.c 	const struct inode *inode = page->mapping->host;
page              305 fs/crypto/crypto.c 	u64 lblk_num = ((u64)page->index << (PAGE_SHIFT - blockbits)) +
page              310 fs/crypto/crypto.c 	if (WARN_ON_ONCE(!PageLocked(page)))
page              317 fs/crypto/crypto.c 		err = fscrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page,
page              318 fs/crypto/crypto.c 					  page, blocksize, i, GFP_NOFS);
page              342 fs/crypto/crypto.c int fscrypt_decrypt_block_inplace(const struct inode *inode, struct page *page,
page              346 fs/crypto/crypto.c 	return fscrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page, page,
page              237 fs/crypto/fscrypt_private.h 			       struct page *src_page, struct page *dest_page,
page              240 fs/crypto/fscrypt_private.h extern struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags);
page              429 fs/d_path.c    	char *page = __getname();
page              431 fs/d_path.c    	if (!page)
page              440 fs/d_path.c    		char *cwd = page + PATH_MAX;
page              458 fs/d_path.c    		len = PATH_MAX + page - cwd;
page              469 fs/d_path.c    	__putname(page);
page              342 fs/dax.c       		struct page *page = pfn_to_page(pfn);
page              344 fs/dax.c       		WARN_ON_ONCE(page->mapping);
page              345 fs/dax.c       		page->mapping = mapping;
page              346 fs/dax.c       		page->index = index + i++;
page              359 fs/dax.c       		struct page *page = pfn_to_page(pfn);
page              361 fs/dax.c       		WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
page              362 fs/dax.c       		WARN_ON_ONCE(page->mapping && page->mapping != mapping);
page              363 fs/dax.c       		page->mapping = NULL;
page              364 fs/dax.c       		page->index = 0;
page              368 fs/dax.c       static struct page *dax_busy_page(void *entry)
page              373 fs/dax.c       		struct page *page = pfn_to_page(pfn);
page              375 fs/dax.c       		if (page_ref_count(page) > 1)
page              376 fs/dax.c       			return page;
page              389 fs/dax.c       dax_entry_t dax_lock_page(struct page *page)
page              397 fs/dax.c       		struct address_space *mapping = READ_ONCE(page->mapping);
page              416 fs/dax.c       		if (mapping != page->mapping) {
page              420 fs/dax.c       		xas_set(&xas, page->index);
page              436 fs/dax.c       void dax_unlock_page(struct page *page, dax_entry_t cookie)
page              438 fs/dax.c       	struct address_space *mapping = page->mapping;
page              439 fs/dax.c       	XA_STATE(xas, &mapping->i_pages, page->index);
page              576 fs/dax.c       struct page *dax_layout_busy_page(struct address_space *mapping)
page              581 fs/dax.c       	struct page *page = NULL;
page              613 fs/dax.c       			page = dax_busy_page(entry);
page              615 fs/dax.c       		if (page)
page              626 fs/dax.c       	return page;
page              684 fs/dax.c       		sector_t sector, size_t size, struct page *to,
page             1422 fs/dax.c       	struct page *zero_page;
page               99 fs/direct-io.c 	struct page *cur_page;		/* The page */
page              149 fs/direct-io.c 		struct page *pages[DIO_PAGES];	/* page buffer */
page              175 fs/direct-io.c 		struct page *page = ZERO_PAGE(0);
page              183 fs/direct-io.c 		get_page(page);
page              184 fs/direct-io.c 		dio->pages[0] = page;
page              209 fs/direct-io.c static inline struct page *dio_get_page(struct dio *dio,
page              846 fs/direct-io.c submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
page              862 fs/direct-io.c 	if (sdio->cur_page == page &&
page              881 fs/direct-io.c 	get_page(page);		/* It is in dio */
page              882 fs/direct-io.c 	sdio->cur_page = page;
page              917 fs/direct-io.c 	struct page *page;
page              938 fs/direct-io.c 	page = ZERO_PAGE(0);
page              939 fs/direct-io.c 	if (submit_page_section(dio, sdio, page, 0, this_chunk_bytes,
page              970 fs/direct-io.c 		struct page *page;
page              973 fs/direct-io.c 		page = dio_get_page(dio, sdio);
page              974 fs/direct-io.c 		if (IS_ERR(page)) {
page              975 fs/direct-io.c 			ret = PTR_ERR(page);
page              996 fs/direct-io.c 					put_page(page);
page             1041 fs/direct-io.c 					put_page(page);
page             1054 fs/direct-io.c 					put_page(page);
page             1057 fs/direct-io.c 				zero_user(page, from, 1 << blkbits);
page             1088 fs/direct-io.c 			ret = submit_page_section(dio, sdio, page,
page             1094 fs/direct-io.c 				put_page(page);
page             1110 fs/direct-io.c 		put_page(page);
page              117 fs/dlm/lowcomms.c 	struct page *rx_page;
page              131 fs/dlm/lowcomms.c 	struct page *page;
page              974 fs/dlm/lowcomms.c 	__free_page(e->page);
page             1408 fs/dlm/lowcomms.c 	entry->page = alloc_page(allocation);
page             1409 fs/dlm/lowcomms.c 	if (!entry->page) {
page             1447 fs/dlm/lowcomms.c 		*ppc = page_address(e->page) + offset;
page             1512 fs/dlm/lowcomms.c 			ret = kernel_sendpage(con->sock, e->page, offset, len,
page              251 fs/ecryptfs/crypto.c 	struct page *pg;
page              374 fs/ecryptfs/crypto.c 				    struct page *page)
page              377 fs/ecryptfs/crypto.c 	       ((loff_t)page->index << PAGE_SHIFT);
page              394 fs/ecryptfs/crypto.c 			struct page *dst_page,
page              395 fs/ecryptfs/crypto.c 			struct page *src_page,
page              452 fs/ecryptfs/crypto.c int ecryptfs_encrypt_page(struct page *page)
page              457 fs/ecryptfs/crypto.c 	struct page *enc_extent_page = NULL;
page              462 fs/ecryptfs/crypto.c 	ecryptfs_inode = page->mapping->host;
page              477 fs/ecryptfs/crypto.c 		rc = crypt_extent(crypt_stat, enc_extent_page, page,
page              486 fs/ecryptfs/crypto.c 	lower_offset = lower_offset_for_page(crypt_stat, page);
page              521 fs/ecryptfs/crypto.c int ecryptfs_decrypt_page(struct page *page)
page              530 fs/ecryptfs/crypto.c 	ecryptfs_inode = page->mapping->host;
page              535 fs/ecryptfs/crypto.c 	lower_offset = lower_offset_for_page(crypt_stat, page);
page              536 fs/ecryptfs/crypto.c 	page_virt = kmap(page);
page              539 fs/ecryptfs/crypto.c 	kunmap(page);
page              550 fs/ecryptfs/crypto.c 		rc = crypt_extent(crypt_stat, page, page,
page             1140 fs/ecryptfs/crypto.c 	struct page *page;
page             1142 fs/ecryptfs/crypto.c 	page = alloc_pages(gfp_mask | __GFP_ZERO, order);
page             1143 fs/ecryptfs/crypto.c 	if (page)
page             1144 fs/ecryptfs/crypto.c 		return (unsigned long) page_address(page);
page               66 fs/ecryptfs/ecryptfs_kernel.h 	struct page *page;
page              587 fs/ecryptfs/ecryptfs_kernel.h int ecryptfs_encrypt_page(struct page *page);
page              588 fs/ecryptfs/ecryptfs_kernel.h int ecryptfs_decrypt_page(struct page *page);
page              671 fs/ecryptfs/ecryptfs_kernel.h 				      struct page *page_for_lower,
page              676 fs/ecryptfs/ecryptfs_kernel.h int ecryptfs_read_lower_page_segment(struct page *page_for_ecryptfs,
page              680 fs/ecryptfs/ecryptfs_kernel.h struct page *ecryptfs_get_locked_page(struct inode *inode, loff_t index);
page               33 fs/ecryptfs/mmap.c struct page *ecryptfs_get_locked_page(struct inode *inode, loff_t index)
page               35 fs/ecryptfs/mmap.c 	struct page *page = read_mapping_page(inode->i_mapping, index, NULL);
page               36 fs/ecryptfs/mmap.c 	if (!IS_ERR(page))
page               37 fs/ecryptfs/mmap.c 		lock_page(page);
page               38 fs/ecryptfs/mmap.c 	return page;
page               51 fs/ecryptfs/mmap.c static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc)
page               55 fs/ecryptfs/mmap.c 	rc = ecryptfs_encrypt_page(page);
page               58 fs/ecryptfs/mmap.c 				"page (upper index [0x%.16lx])\n", page->index);
page               59 fs/ecryptfs/mmap.c 		ClearPageUptodate(page);
page               62 fs/ecryptfs/mmap.c 	SetPageUptodate(page);
page               64 fs/ecryptfs/mmap.c 	unlock_page(page);
page              108 fs/ecryptfs/mmap.c ecryptfs_copy_up_encrypted_with_header(struct page *page,
page              117 fs/ecryptfs/mmap.c 		loff_t view_extent_num = ((((loff_t)page->index)
page              127 fs/ecryptfs/mmap.c 			page_virt = kmap_atomic(page);
page              134 fs/ecryptfs/mmap.c 					page_virt, page->mapping->host);
page              141 fs/ecryptfs/mmap.c 			flush_dcache_page(page);
page              154 fs/ecryptfs/mmap.c 				page, (lower_offset >> PAGE_SHIFT),
page              156 fs/ecryptfs/mmap.c 				crypt_stat->extent_size, page->mapping->host);
page              180 fs/ecryptfs/mmap.c static int ecryptfs_readpage(struct file *file, struct page *page)
page              183 fs/ecryptfs/mmap.c 		&ecryptfs_inode_to_private(page->mapping->host)->crypt_stat;
page              187 fs/ecryptfs/mmap.c 		rc = ecryptfs_read_lower_page_segment(page, page->index, 0,
page              189 fs/ecryptfs/mmap.c 						      page->mapping->host);
page              192 fs/ecryptfs/mmap.c 			rc = ecryptfs_copy_up_encrypted_with_header(page,
page              205 fs/ecryptfs/mmap.c 				page, page->index, 0, PAGE_SIZE,
page              206 fs/ecryptfs/mmap.c 				page->mapping->host);
page              214 fs/ecryptfs/mmap.c 		rc = ecryptfs_decrypt_page(page);
page              223 fs/ecryptfs/mmap.c 		ClearPageUptodate(page);
page              225 fs/ecryptfs/mmap.c 		SetPageUptodate(page);
page              227 fs/ecryptfs/mmap.c 			page->index);
page              228 fs/ecryptfs/mmap.c 	unlock_page(page);
page              235 fs/ecryptfs/mmap.c static int fill_zeros_to_end_of_page(struct page *page, unsigned int to)
page              237 fs/ecryptfs/mmap.c 	struct inode *inode = page->mapping->host;
page              240 fs/ecryptfs/mmap.c 	if ((i_size_read(inode) / PAGE_SIZE) != page->index)
page              245 fs/ecryptfs/mmap.c 	zero_user_segment(page, end_byte_in_page, PAGE_SIZE);
page              267 fs/ecryptfs/mmap.c 			struct page **pagep, void **fsdata)
page              270 fs/ecryptfs/mmap.c 	struct page *page;
page              274 fs/ecryptfs/mmap.c 	page = grab_cache_page_write_begin(mapping, index, flags);
page              275 fs/ecryptfs/mmap.c 	if (!page)
page              277 fs/ecryptfs/mmap.c 	*pagep = page;
page              280 fs/ecryptfs/mmap.c 	if (!PageUptodate(page)) {
page              286 fs/ecryptfs/mmap.c 				page, index, 0, PAGE_SIZE, mapping->host);
page              291 fs/ecryptfs/mmap.c 				ClearPageUptodate(page);
page              294 fs/ecryptfs/mmap.c 				SetPageUptodate(page);
page              298 fs/ecryptfs/mmap.c 					page, crypt_stat);
page              306 fs/ecryptfs/mmap.c 					ClearPageUptodate(page);
page              309 fs/ecryptfs/mmap.c 				SetPageUptodate(page);
page              312 fs/ecryptfs/mmap.c 					page, index, 0, PAGE_SIZE,
page              318 fs/ecryptfs/mmap.c 					ClearPageUptodate(page);
page              321 fs/ecryptfs/mmap.c 				SetPageUptodate(page);
page              325 fs/ecryptfs/mmap.c 			    >= i_size_read(page->mapping->host)) {
page              326 fs/ecryptfs/mmap.c 				zero_user(page, 0, PAGE_SIZE);
page              327 fs/ecryptfs/mmap.c 				SetPageUptodate(page);
page              329 fs/ecryptfs/mmap.c 				rc = ecryptfs_decrypt_page(page);
page              334 fs/ecryptfs/mmap.c 					       __func__, page->index, rc);
page              335 fs/ecryptfs/mmap.c 					ClearPageUptodate(page);
page              338 fs/ecryptfs/mmap.c 				SetPageUptodate(page);
page              345 fs/ecryptfs/mmap.c 		if (prev_page_end_size > i_size_read(page->mapping->host)) {
page              361 fs/ecryptfs/mmap.c 		zero_user(page, 0, PAGE_SIZE);
page              364 fs/ecryptfs/mmap.c 		unlock_page(page);
page              365 fs/ecryptfs/mmap.c 		put_page(page);
page              465 fs/ecryptfs/mmap.c 			struct page *page, void *fsdata)
page              478 fs/ecryptfs/mmap.c 		rc = ecryptfs_write_lower_page_segment(ecryptfs_inode, page, 0,
page              487 fs/ecryptfs/mmap.c 	if (!PageUptodate(page)) {
page              492 fs/ecryptfs/mmap.c 		SetPageUptodate(page);
page              495 fs/ecryptfs/mmap.c 	rc = fill_zeros_to_end_of_page(page, to);
page              501 fs/ecryptfs/mmap.c 	rc = ecryptfs_encrypt_page(page);
page              520 fs/ecryptfs/mmap.c 	unlock_page(page);
page              521 fs/ecryptfs/mmap.c 	put_page(page);
page               58 fs/ecryptfs/read_write.c 				      struct page *page_for_lower,
page               96 fs/ecryptfs/read_write.c 	struct page *ecryptfs_page;
page              244 fs/ecryptfs/read_write.c int ecryptfs_read_lower_page_segment(struct page *page_for_ecryptfs,
page               17 fs/efs/inode.c static int efs_readpage(struct file *file, struct page *page)
page               19 fs/efs/inode.c 	return block_read_full_page(page,efs_get_block);
page               15 fs/efs/symlink.c static int efs_symlink_readpage(struct file *file, struct page *page)
page               17 fs/efs/symlink.c 	char *link = page_address(page);
page               19 fs/efs/symlink.c 	struct inode * inode = page->mapping->host;
page               42 fs/efs/symlink.c 	SetPageUptodate(page);
page               43 fs/efs/symlink.c 	unlock_page(page);
page               46 fs/efs/symlink.c 	SetPageError(page);
page               47 fs/efs/symlink.c 	unlock_page(page);
page               19 fs/erofs/compress.h 	struct page **in, **out;
page               37 fs/erofs/compress.h static inline bool z_erofs_page_is_staging(struct page *page)
page               39 fs/erofs/compress.h 	return page->mapping == Z_EROFS_MAPPING_STAGING;
page               43 fs/erofs/compress.h 					   struct page *page)
page               45 fs/erofs/compress.h 	if (!z_erofs_page_is_staging(page))
page               49 fs/erofs/compress.h 	if (page_ref_count(page) > 1)
page               50 fs/erofs/compress.h 		put_page(page);
page               52 fs/erofs/compress.h 		list_add(&page->lru, pagepool);
page               19 fs/erofs/data.c 		struct page *page = bvec->bv_page;
page               22 fs/erofs/data.c 		DBG_BUGON(PageUptodate(page));
page               25 fs/erofs/data.c 			SetPageError(page);
page               27 fs/erofs/data.c 			SetPageUptodate(page);
page               29 fs/erofs/data.c 		unlock_page(page);
page               35 fs/erofs/data.c struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr)
page               38 fs/erofs/data.c 	struct page *page;
page               40 fs/erofs/data.c 	page = read_cache_page_gfp(mapping, blkaddr,
page               43 fs/erofs/data.c 	if (!IS_ERR(page))
page               44 fs/erofs/data.c 		lock_page(page);
page               45 fs/erofs/data.c 	return page;
page              129 fs/erofs/data.c 					      struct page *page,
page              136 fs/erofs/data.c 	erofs_off_t current_block = (erofs_off_t)page->index;
page              141 fs/erofs/data.c 	if (PageUptodate(page)) {
page              168 fs/erofs/data.c 			zero_user_segment(page, 0, PAGE_SIZE);
page              169 fs/erofs/data.c 			SetPageUptodate(page);
page              184 fs/erofs/data.c 			struct page *ipage;
page              196 fs/erofs/data.c 			vto = kmap_atomic(page);
page              201 fs/erofs/data.c 			flush_dcache_page(page);
page              203 fs/erofs/data.c 			SetPageUptodate(page);
page              230 fs/erofs/data.c 	err = bio_add_page(bio, page, PAGE_SIZE, 0);
page              249 fs/erofs/data.c 		SetPageError(page);
page              250 fs/erofs/data.c 		ClearPageUptodate(page);
page              253 fs/erofs/data.c 	unlock_page(page);
page              266 fs/erofs/data.c static int erofs_raw_access_readpage(struct file *file, struct page *page)
page              271 fs/erofs/data.c 	trace_erofs_readpage(page, true);
page              273 fs/erofs/data.c 	bio = erofs_read_raw_page(NULL, page->mapping,
page              274 fs/erofs/data.c 				  page, &last_block, 1, false);
page              291 fs/erofs/data.c 	struct page *page = list_last_entry(pages, struct page, lru);
page              293 fs/erofs/data.c 	trace_erofs_readpages(mapping->host, page, nr_pages, true);
page              296 fs/erofs/data.c 		page = list_entry(pages->prev, struct page, lru);
page              298 fs/erofs/data.c 		prefetchw(&page->flags);
page              299 fs/erofs/data.c 		list_del(&page->lru);
page              301 fs/erofs/data.c 		if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) {
page              302 fs/erofs/data.c 			bio = erofs_read_raw_page(bio, mapping, page,
page              308 fs/erofs/data.c 				       __func__, page->index,
page              316 fs/erofs/data.c 		put_page(page);
page               36 fs/erofs/decompressor.c 	struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL };
page               44 fs/erofs/decompressor.c 		struct page *const page = rq->out[i];
page               45 fs/erofs/decompressor.c 		struct page *victim;
page               57 fs/erofs/decompressor.c 		if (page) {
page               60 fs/erofs/decompressor.c 				if (kaddr + PAGE_SIZE == page_address(page))
page               65 fs/erofs/decompressor.c 				kaddr = page_address(page);
page               93 fs/erofs/decompressor.c 	struct page **in = rq->in;
page              192 fs/erofs/decompressor.c static void copy_from_pcpubuf(struct page **out, const char *dst,
page              201 fs/erofs/decompressor.c 		struct page *const page = *out++;
page              203 fs/erofs/decompressor.c 		if (page) {
page              204 fs/erofs/decompressor.c 			char *buf = kmap_atomic(page);
page               79 fs/erofs/dir.c 		struct page *dentry_page;
page              172 fs/erofs/inode.c 	struct page *page;
page              187 fs/erofs/inode.c 	page = erofs_get_meta_page(sb, blkaddr);
page              189 fs/erofs/inode.c 	if (IS_ERR(page)) {
page              191 fs/erofs/inode.c 			  vi->nid, PTR_ERR(page));
page              192 fs/erofs/inode.c 		return PTR_ERR(page);
page              195 fs/erofs/inode.c 	DBG_BUGON(!PageUptodate(page));
page              196 fs/erofs/inode.c 	data = page_address(page);
page              237 fs/erofs/inode.c 	unlock_page(page);
page              238 fs/erofs/inode.c 	put_page(page);
page              330 fs/erofs/internal.h 	struct page *mpage;
page              353 fs/erofs/internal.h struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr);
page              385 fs/erofs/internal.h struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail);
page              418 fs/erofs/internal.h 				  struct page *page);
page               90 fs/erofs/namei.c static struct page *find_target_block_classic(struct inode *dir,
page               97 fs/erofs/namei.c 	struct page *candidate = ERR_PTR(-ENOENT);
page              105 fs/erofs/namei.c 		struct page *page = read_mapping_page(mapping, mid, NULL);
page              107 fs/erofs/namei.c 		if (!IS_ERR(page)) {
page              108 fs/erofs/namei.c 			struct erofs_dirent *de = kmap_atomic(page);
page              118 fs/erofs/namei.c 				put_page(page);
page              123 fs/erofs/namei.c 				page = ERR_PTR(-EFSCORRUPTED);
page              150 fs/erofs/namei.c 				candidate = page;
page              153 fs/erofs/namei.c 				put_page(page);
page              163 fs/erofs/namei.c 		return page;
page              173 fs/erofs/namei.c 	struct page *page;
page              185 fs/erofs/namei.c 	page = find_target_block_classic(dir, &qn, &ndirents);
page              187 fs/erofs/namei.c 	if (IS_ERR(page))
page              188 fs/erofs/namei.c 		return PTR_ERR(page);
page              190 fs/erofs/namei.c 	data = kmap_atomic(page);
page              203 fs/erofs/namei.c 	put_page(page);
page              101 fs/erofs/super.c 	struct page *page;
page              107 fs/erofs/super.c 	page = read_mapping_page(sb->s_bdev->bd_inode->i_mapping, 0, NULL);
page              108 fs/erofs/super.c 	if (IS_ERR(page)) {
page              110 fs/erofs/super.c 		return PTR_ERR(page);
page              115 fs/erofs/super.c 	data = kmap_atomic(page);
page              159 fs/erofs/super.c 	put_page(page);
page              296 fs/erofs/super.c static int erofs_managed_cache_releasepage(struct page *page, gfp_t gfp_mask)
page              299 fs/erofs/super.c 	struct address_space *const mapping = page->mapping;
page              301 fs/erofs/super.c 	DBG_BUGON(!PageLocked(page));
page              304 fs/erofs/super.c 	if (PagePrivate(page))
page              305 fs/erofs/super.c 		ret = erofs_try_to_free_cached_page(mapping, page);
page              310 fs/erofs/super.c static void erofs_managed_cache_invalidatepage(struct page *page,
page              316 fs/erofs/super.c 	DBG_BUGON(!PageLocked(page));
page              322 fs/erofs/super.c 		while (!erofs_managed_cache_releasepage(page, GFP_NOFS))
page               10 fs/erofs/utils.c struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail)
page               12 fs/erofs/utils.c 	struct page *page;
page               15 fs/erofs/utils.c 		page = lru_to_page(pool);
page               16 fs/erofs/utils.c 		DBG_BUGON(page_ref_count(page) != 1);
page               17 fs/erofs/utils.c 		list_del(&page->lru);
page               19 fs/erofs/utils.c 		page = alloc_pages(gfp | (nofail ? __GFP_NOFAIL : 0), 0);
page               21 fs/erofs/utils.c 	return page;
page               12 fs/erofs/xattr.c 	struct page *page;
page               23 fs/erofs/xattr.c 		kunmap(it->page);
page               27 fs/erofs/xattr.c 	unlock_page(it->page);
page               28 fs/erofs/xattr.c 	put_page(it->page);
page               33 fs/erofs/xattr.c 	if (!it->page)
page               92 fs/erofs/xattr.c 	it.page = erofs_get_meta_page(sb, it.blkaddr);
page               93 fs/erofs/xattr.c 	if (IS_ERR(it.page)) {
page               94 fs/erofs/xattr.c 		ret = PTR_ERR(it.page);
page               99 fs/erofs/xattr.c 	it.kaddr = kmap(it.page);
page              122 fs/erofs/xattr.c 			it.page = erofs_get_meta_page(sb, ++it.blkaddr);
page              123 fs/erofs/xattr.c 			if (IS_ERR(it.page)) {
page              126 fs/erofs/xattr.c 				ret = PTR_ERR(it.page);
page              130 fs/erofs/xattr.c 			it.kaddr = kmap_atomic(it.page);
page              172 fs/erofs/xattr.c 	it->page = erofs_get_meta_page(it->sb, it->blkaddr);
page              173 fs/erofs/xattr.c 	if (IS_ERR(it->page)) {
page              174 fs/erofs/xattr.c 		int err = PTR_ERR(it->page);
page              176 fs/erofs/xattr.c 		it->page = NULL;
page              180 fs/erofs/xattr.c 	it->kaddr = kmap_atomic(it->page);
page              203 fs/erofs/xattr.c 	it->page = erofs_get_meta_page(inode->i_sb, it->blkaddr);
page              204 fs/erofs/xattr.c 	if (IS_ERR(it->page))
page              205 fs/erofs/xattr.c 		return PTR_ERR(it->page);
page              207 fs/erofs/xattr.c 	it->kaddr = kmap_atomic(it->page);
page              405 fs/erofs/xattr.c 			it->it.page = erofs_get_meta_page(sb, blkaddr);
page              406 fs/erofs/xattr.c 			if (IS_ERR(it->it.page))
page              407 fs/erofs/xattr.c 				return PTR_ERR(it->it.page);
page              409 fs/erofs/xattr.c 			it->it.kaddr = kmap_atomic(it->it.page);
page              627 fs/erofs/xattr.c 			it->it.page = erofs_get_meta_page(sb, blkaddr);
page              628 fs/erofs/xattr.c 			if (IS_ERR(it->it.page))
page              629 fs/erofs/xattr.c 				return PTR_ERR(it->it.page);
page              631 fs/erofs/xattr.c 			it->it.kaddr = kmap_atomic(it->it.page);
page               31 fs/erofs/zdata.c #define tag_compressed_page_justfound(page) \
page               32 fs/erofs/zdata.c 	tagptr_fold(compressed_page_t, page, 1)
page              136 fs/erofs/zdata.c 	struct page **compressedpages;
page              161 fs/erofs/zdata.c static struct page *z_pagemap_global[Z_EROFS_VMAP_GLOBAL_PAGES];
page              171 fs/erofs/zdata.c 	struct page **pages = clt->compressedpages;
page              179 fs/erofs/zdata.c 		struct page *page;
page              186 fs/erofs/zdata.c 		page = find_get_page(mc, index);
page              188 fs/erofs/zdata.c 		if (page) {
page              189 fs/erofs/zdata.c 			t = tag_compressed_page_justfound(page);
page              202 fs/erofs/zdata.c 		if (page)
page              203 fs/erofs/zdata.c 			put_page(page);
page              225 fs/erofs/zdata.c 		struct page *page = pcl->compressed_pages[i];
page              227 fs/erofs/zdata.c 		if (!page)
page              231 fs/erofs/zdata.c 		if (!trylock_page(page))
page              234 fs/erofs/zdata.c 		if (page->mapping != mapping)
page              239 fs/erofs/zdata.c 		set_page_private(page, 0);
page              240 fs/erofs/zdata.c 		ClearPagePrivate(page);
page              242 fs/erofs/zdata.c 		unlock_page(page);
page              243 fs/erofs/zdata.c 		put_page(page);
page              249 fs/erofs/zdata.c 				  struct page *page)
page              251 fs/erofs/zdata.c 	struct z_erofs_pcluster *const pcl = (void *)page_private(page);
page              259 fs/erofs/zdata.c 			if (pcl->compressed_pages[i] == page) {
page              268 fs/erofs/zdata.c 			ClearPagePrivate(page);
page              269 fs/erofs/zdata.c 			put_page(page);
page              277 fs/erofs/zdata.c 					  struct page *page)
page              283 fs/erofs/zdata.c 		if (!cmpxchg(clt->compressedpages++, NULL, page))
page              291 fs/erofs/zdata.c 			       struct page *page,
page              300 fs/erofs/zdata.c 	    z_erofs_try_inplace_io(clt, page))
page              304 fs/erofs/zdata.c 				      page, type, &occupied);
page              546 fs/erofs/zdata.c static inline struct page *__stagingpage_alloc(struct list_head *pagepool,
page              549 fs/erofs/zdata.c 	struct page *page = erofs_allocpage(pagepool, gfp, true);
page              551 fs/erofs/zdata.c 	page->mapping = Z_EROFS_MAPPING_STAGING;
page              552 fs/erofs/zdata.c 	return page;
page              570 fs/erofs/zdata.c 				struct page *page,
page              577 fs/erofs/zdata.c 	const loff_t offset = page_offset(page);
page              586 fs/erofs/zdata.c 	z_erofs_onlinepage_init(page);
page              643 fs/erofs/zdata.c 		zero_user_segment(page, cur, end);
page              657 fs/erofs/zdata.c 	err = z_erofs_attach_page(clt, page, page_type);
page              660 fs/erofs/zdata.c 		struct page *const newpage =
page              672 fs/erofs/zdata.c 	index = page->index - (map->m_la >> PAGE_SHIFT);
page              674 fs/erofs/zdata.c 	z_erofs_onlinepage_fixup(page, index, true);
page              689 fs/erofs/zdata.c 	z_erofs_onlinepage_endio(page);
page              692 fs/erofs/zdata.c 		  __func__, page, spiltted, map->m_llen);
page              697 fs/erofs/zdata.c 	SetPageError(page);
page              729 fs/erofs/zdata.c 		struct page *page = bvec->bv_page;
page              732 fs/erofs/zdata.c 		DBG_BUGON(PageUptodate(page));
page              733 fs/erofs/zdata.c 		DBG_BUGON(!page->mapping);
page              735 fs/erofs/zdata.c 		if (!sbi && !z_erofs_page_is_staging(page))
page              736 fs/erofs/zdata.c 			sbi = EROFS_SB(page->mapping->host->i_sb);
page              740 fs/erofs/zdata.c 			cachemngd = erofs_page_is_managed(sbi, page);
page              743 fs/erofs/zdata.c 			SetPageError(page);
page              745 fs/erofs/zdata.c 			SetPageUptodate(page);
page              748 fs/erofs/zdata.c 			unlock_page(page);
page              763 fs/erofs/zdata.c 	struct page *pages_onstack[Z_EROFS_VMAP_ONSTACK_PAGES];
page              764 fs/erofs/zdata.c 	struct page **pages, **compressed_pages, *page;
page              789 fs/erofs/zdata.c 		pages = kvmalloc_array(nr_pages, sizeof(struct page *),
page              809 fs/erofs/zdata.c 		page = z_erofs_pagevec_dequeue(&ctor, &page_type);
page              812 fs/erofs/zdata.c 		DBG_BUGON(!page);
page              813 fs/erofs/zdata.c 		DBG_BUGON(!page->mapping);
page              815 fs/erofs/zdata.c 		if (z_erofs_put_stagingpage(pagepool, page))
page              821 fs/erofs/zdata.c 			pagenr = z_erofs_onlinepage_index(page);
page              835 fs/erofs/zdata.c 		pages[pagenr] = page;
page              845 fs/erofs/zdata.c 		page = compressed_pages[i];
page              848 fs/erofs/zdata.c 		DBG_BUGON(!page);
page              849 fs/erofs/zdata.c 		DBG_BUGON(!page->mapping);
page              851 fs/erofs/zdata.c 		if (!z_erofs_page_is_staging(page)) {
page              852 fs/erofs/zdata.c 			if (erofs_page_is_managed(sbi, page)) {
page              853 fs/erofs/zdata.c 				if (!PageUptodate(page))
page              862 fs/erofs/zdata.c 			pagenr = z_erofs_onlinepage_index(page);
page              871 fs/erofs/zdata.c 			pages[pagenr] = page;
page              877 fs/erofs/zdata.c 		if (PageError(page)) {
page              878 fs/erofs/zdata.c 			DBG_BUGON(PageUptodate(page));
page              910 fs/erofs/zdata.c 		page = compressed_pages[i];
page              912 fs/erofs/zdata.c 		if (erofs_page_is_managed(sbi, page))
page              916 fs/erofs/zdata.c 		(void)z_erofs_put_stagingpage(pagepool, page);
page              922 fs/erofs/zdata.c 		page = pages[i];
page              923 fs/erofs/zdata.c 		if (!page)
page              926 fs/erofs/zdata.c 		DBG_BUGON(!page->mapping);
page              929 fs/erofs/zdata.c 		if (z_erofs_put_stagingpage(pagepool, page))
page              933 fs/erofs/zdata.c 			SetPageError(page);
page              935 fs/erofs/zdata.c 		z_erofs_onlinepage_endio(page);
page              991 fs/erofs/zdata.c static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
page             1003 fs/erofs/zdata.c 	struct page *oldpage, *page;
page             1009 fs/erofs/zdata.c 	page = READ_ONCE(pcl->compressed_pages[nr]);
page             1010 fs/erofs/zdata.c 	oldpage = page;
page             1012 fs/erofs/zdata.c 	if (!page)
page             1019 fs/erofs/zdata.c 	if (!nocache && page == PAGE_UNALLOCATED) {
page             1025 fs/erofs/zdata.c 	t = tagptr_init(compressed_page_t, page);
page             1027 fs/erofs/zdata.c 	page = tagptr_unfold_ptr(t);
page             1029 fs/erofs/zdata.c 	mapping = READ_ONCE(page->mapping);
page             1040 fs/erofs/zdata.c 		DBG_BUGON(!PageLocked(page));
page             1041 fs/erofs/zdata.c 		DBG_BUGON(PageUptodate(page));
page             1054 fs/erofs/zdata.c 	lock_page(page);
page             1057 fs/erofs/zdata.c 	DBG_BUGON(justfound && PagePrivate(page));
page             1060 fs/erofs/zdata.c 	if (page->mapping == mc) {
page             1061 fs/erofs/zdata.c 		WRITE_ONCE(pcl->compressed_pages[nr], page);
page             1063 fs/erofs/zdata.c 		ClearPageError(page);
page             1064 fs/erofs/zdata.c 		if (!PagePrivate(page)) {
page             1073 fs/erofs/zdata.c 			set_page_private(page, (unsigned long)pcl);
page             1074 fs/erofs/zdata.c 			SetPagePrivate(page);
page             1078 fs/erofs/zdata.c 		if (PageUptodate(page)) {
page             1079 fs/erofs/zdata.c 			unlock_page(page);
page             1080 fs/erofs/zdata.c 			page = NULL;
page             1089 fs/erofs/zdata.c 	DBG_BUGON(page->mapping);
page             1093 fs/erofs/zdata.c 	unlock_page(page);
page             1094 fs/erofs/zdata.c 	put_page(page);
page             1096 fs/erofs/zdata.c 	page = __stagingpage_alloc(pagepool, gfp);
page             1097 fs/erofs/zdata.c 	if (oldpage != cmpxchg(&pcl->compressed_pages[nr], oldpage, page)) {
page             1098 fs/erofs/zdata.c 		list_add(&page->lru, pagepool);
page             1104 fs/erofs/zdata.c 	if (add_to_page_cache_lru(page, mc, index + nr, gfp)) {
page             1105 fs/erofs/zdata.c 		page->mapping = Z_EROFS_MAPPING_STAGING;
page             1109 fs/erofs/zdata.c 	set_page_private(page, (unsigned long)pcl);
page             1110 fs/erofs/zdata.c 	SetPagePrivate(page);
page             1112 fs/erofs/zdata.c 	return page;
page             1233 fs/erofs/zdata.c 		struct page *page;
page             1253 fs/erofs/zdata.c 		page = pickup_page_for_submission(pcl, i, pagepool,
page             1256 fs/erofs/zdata.c 		if (!page) {
page             1281 fs/erofs/zdata.c 		err = bio_add_page(bio, page, PAGE_SIZE, 0);
page             1333 fs/erofs/zdata.c 					     struct page *page)
page             1335 fs/erofs/zdata.c 	struct inode *const inode = page->mapping->host;
page             1340 fs/erofs/zdata.c 	trace_erofs_readpage(page, false);
page             1342 fs/erofs/zdata.c 	f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT;
page             1344 fs/erofs/zdata.c 	err = z_erofs_do_read_page(&f, page, &pagepool);
page             1378 fs/erofs/zdata.c 	struct page *head = NULL;
page             1387 fs/erofs/zdata.c 		struct page *page = lru_to_page(pages);
page             1389 fs/erofs/zdata.c 		prefetchw(&page->flags);
page             1390 fs/erofs/zdata.c 		list_del(&page->lru);
page             1397 fs/erofs/zdata.c 		sync &= !(PageReadahead(page) && !head);
page             1399 fs/erofs/zdata.c 		if (add_to_page_cache_lru(page, mapping, page->index, gfp)) {
page             1400 fs/erofs/zdata.c 			list_add(&page->lru, &pagepool);
page             1404 fs/erofs/zdata.c 		set_page_private(page, (unsigned long)head);
page             1405 fs/erofs/zdata.c 		head = page;
page             1409 fs/erofs/zdata.c 		struct page *page = head;
page             1413 fs/erofs/zdata.c 		head = (void *)page_private(page);
page             1415 fs/erofs/zdata.c 		err = z_erofs_do_read_page(&f, page, &pagepool);
page             1419 fs/erofs/zdata.c 				  page->index, EROFS_I(inode)->nid);
page             1420 fs/erofs/zdata.c 		put_page(page);
page               63 fs/erofs/zdata.h 	struct page *compressed_pages[Z_EROFS_CLUSTER_MAX_PAGES];
page              104 fs/erofs/zdata.h 					 struct page *page)
page              106 fs/erofs/zdata.h 	return page->mapping == MNGD_MAPPING(sbi);
page              125 fs/erofs/zdata.h static inline unsigned int z_erofs_onlinepage_index(struct page *page)
page              129 fs/erofs/zdata.h 	DBG_BUGON(!PagePrivate(page));
page              130 fs/erofs/zdata.h 	u.v = &page_private(page);
page              135 fs/erofs/zdata.h static inline void z_erofs_onlinepage_init(struct page *page)
page              143 fs/erofs/zdata.h 	set_page_private(page, u.v);
page              145 fs/erofs/zdata.h 	SetPagePrivate(page);
page              148 fs/erofs/zdata.h static inline void z_erofs_onlinepage_fixup(struct page *page,
page              153 fs/erofs/zdata.h 	p = &page_private(page);
page              170 fs/erofs/zdata.h static inline void z_erofs_onlinepage_endio(struct page *page)
page              175 fs/erofs/zdata.h 	DBG_BUGON(!PagePrivate(page));
page              176 fs/erofs/zdata.h 	u.v = &page_private(page);
page              180 fs/erofs/zdata.h 		ClearPagePrivate(page);
page              181 fs/erofs/zdata.h 		if (!PageError(page))
page              182 fs/erofs/zdata.h 			SetPageUptodate(page);
page              183 fs/erofs/zdata.h 		unlock_page(page);
page              185 fs/erofs/zdata.h 	erofs_dbg("%s, page %p value %x", __func__, page, atomic_read(u.o));
page              189 fs/erofs/zdata.h 	min_t(unsigned int, THREAD_SIZE / 8 / sizeof(struct page *), 96U)
page               35 fs/erofs/zmap.c 	struct page *page;
page               53 fs/erofs/zmap.c 	page = erofs_get_meta_page(sb, erofs_blknr(pos));
page               54 fs/erofs/zmap.c 	if (IS_ERR(page)) {
page               55 fs/erofs/zmap.c 		err = PTR_ERR(page);
page               59 fs/erofs/zmap.c 	kaddr = kmap_atomic(page);
page               89 fs/erofs/zmap.c 	unlock_page(page);
page               90 fs/erofs/zmap.c 	put_page(page);
page              114 fs/erofs/zmap.c 	struct page *mpage = map->mpage;
page               31 fs/erofs/zpvec.h 	struct page *curr, *next;
page               49 fs/erofs/zpvec.h static inline struct page *
page               74 fs/erofs/zpvec.h 	struct page *next = z_erofs_pagevec_ctor_next_page(ctor, ctor->nr);
page               83 fs/erofs/zpvec.h 	ctor->nr = PAGE_SIZE / sizeof(struct page *);
page              109 fs/erofs/zpvec.h 					   struct page *page,
page              127 fs/erofs/zpvec.h 		ctor->next = page;
page              130 fs/erofs/zpvec.h 	ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, page, type);
page              134 fs/erofs/zpvec.h static inline struct page *
page              195 fs/exec.c      static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
page              198 fs/exec.c      	struct page *page;
page              218 fs/exec.c      			&page, NULL, NULL);
page              225 fs/exec.c      	return page;
page              228 fs/exec.c      static void put_arg_page(struct page *page)
page              230 fs/exec.c      	put_page(page);
page              238 fs/exec.c      		struct page *page)
page              240 fs/exec.c      	flush_cache_page(bprm->vma, pos, page_to_pfn(page));
page              299 fs/exec.c      static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
page              302 fs/exec.c      	struct page *page;
page              304 fs/exec.c      	page = bprm->page[pos / PAGE_SIZE];
page              305 fs/exec.c      	if (!page && write) {
page              306 fs/exec.c      		page = alloc_page(GFP_HIGHUSER|__GFP_ZERO);
page              307 fs/exec.c      		if (!page)
page              309 fs/exec.c      		bprm->page[pos / PAGE_SIZE] = page;
page              312 fs/exec.c      	return page;
page              315 fs/exec.c      static void put_arg_page(struct page *page)
page              321 fs/exec.c      	if (bprm->page[i]) {
page              322 fs/exec.c      		__free_page(bprm->page[i]);
page              323 fs/exec.c      		bprm->page[i] = NULL;
page              336 fs/exec.c      		struct page *page)
page              503 fs/exec.c      	struct page *kmapped_page = NULL;
page              558 fs/exec.c      				struct page *page;
page              560 fs/exec.c      				page = get_arg_page(bprm, pos, 1);
page              561 fs/exec.c      				if (!page) {
page              571 fs/exec.c      				kmapped_page = page;
page              821 fs/exec.c      		char *src = kmap(bprm->page[index]) + offset;
page              825 fs/exec.c      		kunmap(bprm->page[index]);
page             1603 fs/exec.c      	struct page *page;
page             1610 fs/exec.c      		page = get_arg_page(bprm, bprm->p, 0);
page             1611 fs/exec.c      		if (!page) {
page             1615 fs/exec.c      		kaddr = kmap_atomic(page);
page             1622 fs/exec.c      		put_arg_page(page);
page               69 fs/ext2/dir.c  static inline void ext2_put_page(struct page *page)
page               71 fs/ext2/dir.c  	kunmap(page);
page               72 fs/ext2/dir.c  	put_page(page);
page               90 fs/ext2/dir.c  static int ext2_commit_chunk(struct page *page, loff_t pos, unsigned len)
page               92 fs/ext2/dir.c  	struct address_space *mapping = page->mapping;
page               97 fs/ext2/dir.c  	block_write_end(NULL, mapping, pos, len, len, page, NULL);
page              105 fs/ext2/dir.c  		err = write_one_page(page);
page              109 fs/ext2/dir.c  		unlock_page(page);
page              115 fs/ext2/dir.c  static bool ext2_check_page(struct page *page, int quiet)
page              117 fs/ext2/dir.c  	struct inode *dir = page->mapping->host;
page              120 fs/ext2/dir.c  	char *kaddr = page_address(page);
page              127 fs/ext2/dir.c  	if ((dir->i_size >> PAGE_SHIFT) == page->index) {
page              152 fs/ext2/dir.c  	SetPageChecked(page);
page              181 fs/ext2/dir.c  			dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs,
page              191 fs/ext2/dir.c  			dir->i_ino, (page->index<<PAGE_SHIFT)+offs,
page              195 fs/ext2/dir.c  	SetPageError(page);
page              199 fs/ext2/dir.c  static struct page * ext2_get_page(struct inode *dir, unsigned long n,
page              203 fs/ext2/dir.c  	struct page *page = read_mapping_page(mapping, n, NULL);
page              204 fs/ext2/dir.c  	if (!IS_ERR(page)) {
page              205 fs/ext2/dir.c  		kmap(page);
page              206 fs/ext2/dir.c  		if (unlikely(!PageChecked(page))) {
page              207 fs/ext2/dir.c  			if (PageError(page) || !ext2_check_page(page, quiet))
page              211 fs/ext2/dir.c  	return page;
page              214 fs/ext2/dir.c  	ext2_put_page(page);
page              285 fs/ext2/dir.c  		struct page *page = ext2_get_page(inode, n, 0);
page              287 fs/ext2/dir.c  		if (IS_ERR(page)) {
page              292 fs/ext2/dir.c  			return PTR_ERR(page);
page              294 fs/ext2/dir.c  		kaddr = page_address(page);
page              309 fs/ext2/dir.c  				ext2_put_page(page);
page              321 fs/ext2/dir.c  					ext2_put_page(page);
page              327 fs/ext2/dir.c  		ext2_put_page(page);
page              341 fs/ext2/dir.c  			const struct qstr *child, struct page **res_page)
page              348 fs/ext2/dir.c  	struct page *page = NULL;
page              365 fs/ext2/dir.c  		page = ext2_get_page(dir, n, dir_has_error);
page              366 fs/ext2/dir.c  		if (!IS_ERR(page)) {
page              367 fs/ext2/dir.c  			kaddr = page_address(page);
page              374 fs/ext2/dir.c  					ext2_put_page(page);
page              381 fs/ext2/dir.c  			ext2_put_page(page);
page              400 fs/ext2/dir.c  	*res_page = page;
page              405 fs/ext2/dir.c  struct ext2_dir_entry_2 * ext2_dotdot (struct inode *dir, struct page **p)
page              407 fs/ext2/dir.c  	struct page *page = ext2_get_page(dir, 0, 0);
page              410 fs/ext2/dir.c  	if (!IS_ERR(page)) {
page              411 fs/ext2/dir.c  		de = ext2_next_entry((ext2_dirent *) page_address(page));
page              412 fs/ext2/dir.c  		*p = page;
page              421 fs/ext2/dir.c  	struct page *page;
page              423 fs/ext2/dir.c  	de = ext2_find_entry (dir, child, &page);
page              426 fs/ext2/dir.c  		ext2_put_page(page);
page              431 fs/ext2/dir.c  static int ext2_prepare_chunk(struct page *page, loff_t pos, unsigned len)
page              433 fs/ext2/dir.c  	return __block_write_begin(page, pos, len, ext2_get_block);
page              438 fs/ext2/dir.c  		   struct page *page, struct inode *inode, int update_times)
page              440 fs/ext2/dir.c  	loff_t pos = page_offset(page) +
page              441 fs/ext2/dir.c  			(char *) de - (char *) page_address(page);
page              445 fs/ext2/dir.c  	lock_page(page);
page              446 fs/ext2/dir.c  	err = ext2_prepare_chunk(page, pos, len);
page              450 fs/ext2/dir.c  	err = ext2_commit_chunk(page, pos, len);
page              451 fs/ext2/dir.c  	ext2_put_page(page);
page              469 fs/ext2/dir.c  	struct page *page = NULL;
page              485 fs/ext2/dir.c  		page = ext2_get_page(dir, n, 0);
page              486 fs/ext2/dir.c  		err = PTR_ERR(page);
page              487 fs/ext2/dir.c  		if (IS_ERR(page))
page              489 fs/ext2/dir.c  		lock_page(page);
page              490 fs/ext2/dir.c  		kaddr = page_address(page);
page              520 fs/ext2/dir.c  		unlock_page(page);
page              521 fs/ext2/dir.c  		ext2_put_page(page);
page              527 fs/ext2/dir.c  	pos = page_offset(page) +
page              528 fs/ext2/dir.c  		(char*)de - (char*)page_address(page);
page              529 fs/ext2/dir.c  	err = ext2_prepare_chunk(page, pos, rec_len);
page              542 fs/ext2/dir.c  	err = ext2_commit_chunk(page, pos, rec_len);
page              548 fs/ext2/dir.c  	ext2_put_page(page);
page              552 fs/ext2/dir.c  	unlock_page(page);
page              560 fs/ext2/dir.c  int ext2_delete_entry (struct ext2_dir_entry_2 * dir, struct page * page )
page              562 fs/ext2/dir.c  	struct inode *inode = page->mapping->host;
page              563 fs/ext2/dir.c  	char *kaddr = page_address(page);
page              583 fs/ext2/dir.c  		from = (char*)pde - (char*)page_address(page);
page              584 fs/ext2/dir.c  	pos = page_offset(page) + from;
page              585 fs/ext2/dir.c  	lock_page(page);
page              586 fs/ext2/dir.c  	err = ext2_prepare_chunk(page, pos, to - from);
page              591 fs/ext2/dir.c  	err = ext2_commit_chunk(page, pos, to - from);
page              596 fs/ext2/dir.c  	ext2_put_page(page);
page              605 fs/ext2/dir.c  	struct page *page = grab_cache_page(inode->i_mapping, 0);
page              611 fs/ext2/dir.c  	if (!page)
page              614 fs/ext2/dir.c  	err = ext2_prepare_chunk(page, 0, chunk_size);
page              616 fs/ext2/dir.c  		unlock_page(page);
page              619 fs/ext2/dir.c  	kaddr = kmap_atomic(page);
page              635 fs/ext2/dir.c  	err = ext2_commit_chunk(page, 0, chunk_size);
page              637 fs/ext2/dir.c  	put_page(page);
page              646 fs/ext2/dir.c  	struct page *page = NULL;
page              653 fs/ext2/dir.c  		page = ext2_get_page(inode, i, dir_has_error);
page              655 fs/ext2/dir.c  		if (IS_ERR(page)) {
page              660 fs/ext2/dir.c  		kaddr = page_address(page);
page              686 fs/ext2/dir.c  		ext2_put_page(page);
page              691 fs/ext2/dir.c  	ext2_put_page(page);
page              743 fs/ext2/ext2.h extern struct ext2_dir_entry_2 * ext2_find_entry (struct inode *,const struct qstr *, struct page **);
page              744 fs/ext2/ext2.h extern int ext2_delete_entry (struct ext2_dir_entry_2 *, struct page *);
page              746 fs/ext2/ext2.h extern struct ext2_dir_entry_2 * ext2_dotdot (struct inode *, struct page **);
page              747 fs/ext2/ext2.h extern void ext2_set_link(struct inode *, struct ext2_dir_entry_2 *, struct page *, struct inode *, int);
page              870 fs/ext2/inode.c static int ext2_writepage(struct page *page, struct writeback_control *wbc)
page              872 fs/ext2/inode.c 	return block_write_full_page(page, ext2_get_block, wbc);
page              875 fs/ext2/inode.c static int ext2_readpage(struct file *file, struct page *page)
page              877 fs/ext2/inode.c 	return mpage_readpage(page, ext2_get_block);
page              890 fs/ext2/inode.c 		struct page **pagep, void **fsdata)
page              903 fs/ext2/inode.c 			struct page *page, void *fsdata)
page              907 fs/ext2/inode.c 	ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
page              916 fs/ext2/inode.c 		struct page **pagep, void **fsdata)
page              927 fs/ext2/inode.c static int ext2_nobh_writepage(struct page *page,
page              930 fs/ext2/inode.c 	return nobh_writepage(page, ext2_get_block, wbc);
page              272 fs/ext2/namei.c 	struct page * page;
page              279 fs/ext2/namei.c 	de = ext2_find_entry (dir, &dentry->d_name, &page);
page              285 fs/ext2/namei.c 	err = ext2_delete_entry (de, page);
page              318 fs/ext2/namei.c 	struct page * dir_page = NULL;
page              320 fs/ext2/namei.c 	struct page * old_page;
page              349 fs/ext2/namei.c 		struct page *new_page;
page             3137 fs/ext4/ext4.h extern int ext4_readpage_inline(struct inode *inode, struct page *page);
page             3142 fs/ext4/ext4.h 					 struct page **pagep);
page             3146 fs/ext4/ext4.h 				      struct page *page);
page             3150 fs/ext4/ext4.h 				  struct page *page);
page             3155 fs/ext4/ext4.h 					   struct page **pagep,
page             3159 fs/ext4/ext4.h 					 struct page *page);
page             3240 fs/ext4/ext4.h 				struct list_head *pages, struct page *page,
page             3345 fs/ext4/ext4.h 			       struct page *page,
page              464 fs/ext4/inline.c static int ext4_read_inline_page(struct inode *inode, struct page *page)
page              471 fs/ext4/inline.c 	BUG_ON(!PageLocked(page));
page              473 fs/ext4/inline.c 	BUG_ON(page->index);
page              486 fs/ext4/inline.c 	kaddr = kmap_atomic(page);
page              488 fs/ext4/inline.c 	flush_dcache_page(page);
page              490 fs/ext4/inline.c 	zero_user_segment(page, len, PAGE_SIZE);
page              491 fs/ext4/inline.c 	SetPageUptodate(page);
page              498 fs/ext4/inline.c int ext4_readpage_inline(struct inode *inode, struct page *page)
page              512 fs/ext4/inline.c 	if (!page->index)
page              513 fs/ext4/inline.c 		ret = ext4_read_inline_page(inode, page);
page              514 fs/ext4/inline.c 	else if (!PageUptodate(page)) {
page              515 fs/ext4/inline.c 		zero_user_segment(page, 0, PAGE_SIZE);
page              516 fs/ext4/inline.c 		SetPageUptodate(page);
page              521 fs/ext4/inline.c 	unlock_page(page);
page              532 fs/ext4/inline.c 	struct page *page = NULL;
page              563 fs/ext4/inline.c 	page = grab_cache_page_write_begin(mapping, 0, flags);
page              564 fs/ext4/inline.c 	if (!page) {
page              579 fs/ext4/inline.c 	if (!PageUptodate(page)) {
page              580 fs/ext4/inline.c 		ret = ext4_read_inline_page(inode, page);
page              590 fs/ext4/inline.c 		ret = __block_write_begin(page, from, to,
page              593 fs/ext4/inline.c 		ret = __block_write_begin(page, from, to, ext4_get_block);
page              596 fs/ext4/inline.c 		ret = ext4_walk_page_buffers(handle, page_buffers(page),
page              602 fs/ext4/inline.c 		unlock_page(page);
page              603 fs/ext4/inline.c 		put_page(page);
page              604 fs/ext4/inline.c 		page = NULL;
page              624 fs/ext4/inline.c 	if (page)
page              625 fs/ext4/inline.c 		block_commit_write(page, from, to);
page              627 fs/ext4/inline.c 	if (page) {
page              628 fs/ext4/inline.c 		unlock_page(page);
page              629 fs/ext4/inline.c 		put_page(page);
page              649 fs/ext4/inline.c 				  struct page **pagep)
page              653 fs/ext4/inline.c 	struct page *page;
page              691 fs/ext4/inline.c 	page = grab_cache_page_write_begin(mapping, 0, flags);
page              692 fs/ext4/inline.c 	if (!page) {
page              697 fs/ext4/inline.c 	*pagep = page;
page              701 fs/ext4/inline.c 		unlock_page(page);
page              702 fs/ext4/inline.c 		put_page(page);
page              706 fs/ext4/inline.c 	if (!PageUptodate(page)) {
page              707 fs/ext4/inline.c 		ret = ext4_read_inline_page(inode, page);
page              709 fs/ext4/inline.c 			unlock_page(page);
page              710 fs/ext4/inline.c 			put_page(page);
page              730 fs/ext4/inline.c 			       unsigned copied, struct page *page)
page              737 fs/ext4/inline.c 		if (!PageUptodate(page)) {
page              753 fs/ext4/inline.c 	kaddr = kmap_atomic(page);
page              756 fs/ext4/inline.c 	SetPageUptodate(page);
page              758 fs/ext4/inline.c 	ClearPageDirty(page);
page              770 fs/ext4/inline.c 				  struct page *page)
page              783 fs/ext4/inline.c 	kaddr = kmap_atomic(page);
page              806 fs/ext4/inline.c 	struct page *page;
page              808 fs/ext4/inline.c 	page = grab_cache_page_write_begin(mapping, 0, flags);
page              809 fs/ext4/inline.c 	if (!page)
page              820 fs/ext4/inline.c 	if (!PageUptodate(page)) {
page              821 fs/ext4/inline.c 		ret = ext4_read_inline_page(inode, page);
page              826 fs/ext4/inline.c 	ret = __block_write_begin(page, 0, inline_size,
page              830 fs/ext4/inline.c 		unlock_page(page);
page              831 fs/ext4/inline.c 		put_page(page);
page              836 fs/ext4/inline.c 	SetPageDirty(page);
page              837 fs/ext4/inline.c 	SetPageUptodate(page);
page              843 fs/ext4/inline.c 	if (page) {
page              844 fs/ext4/inline.c 		unlock_page(page);
page              845 fs/ext4/inline.c 		put_page(page);
page              862 fs/ext4/inline.c 				    struct page **pagep,
page              867 fs/ext4/inline.c 	struct page *page;
page              909 fs/ext4/inline.c 	page = grab_cache_page_write_begin(mapping, 0, flags);
page              910 fs/ext4/inline.c 	if (!page) {
page              921 fs/ext4/inline.c 	if (!PageUptodate(page)) {
page              922 fs/ext4/inline.c 		ret = ext4_read_inline_page(inode, page);
page              931 fs/ext4/inline.c 	*pagep = page;
page              936 fs/ext4/inline.c 	unlock_page(page);
page              937 fs/ext4/inline.c 	put_page(page);
page              947 fs/ext4/inline.c 				  struct page *page)
page              951 fs/ext4/inline.c 	ret = ext4_write_inline_data_end(inode, pos, len, copied, page);
page              953 fs/ext4/inline.c 		unlock_page(page);
page              954 fs/ext4/inline.c 		put_page(page);
page              968 fs/ext4/inline.c 	unlock_page(page);
page              969 fs/ext4/inline.c 	put_page(page);
page              140 fs/ext4/inode.c static void ext4_invalidatepage(struct page *page, unsigned int offset,
page              142 fs/ext4/inode.c static int __ext4_journalled_writepage(struct page *page, unsigned int len);
page             1171 fs/ext4/inode.c static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
page             1176 fs/ext4/inode.c 	struct inode *inode = page->mapping->host;
page             1186 fs/ext4/inode.c 	BUG_ON(!PageLocked(page));
page             1191 fs/ext4/inode.c 	if (!page_has_buffers(page))
page             1192 fs/ext4/inode.c 		create_empty_buffers(page, blocksize, 0);
page             1193 fs/ext4/inode.c 	head = page_buffers(page);
page             1195 fs/ext4/inode.c 	block = (sector_t)page->index << (PAGE_SHIFT - bbits);
page             1201 fs/ext4/inode.c 			if (PageUptodate(page)) {
page             1215 fs/ext4/inode.c 				if (PageUptodate(page)) {
page             1222 fs/ext4/inode.c 					zero_user_segments(page, to, block_end,
page             1227 fs/ext4/inode.c 		if (PageUptodate(page)) {
page             1248 fs/ext4/inode.c 		page_zero_new_buffers(page, from, to);
page             1253 fs/ext4/inode.c 			err2 = fscrypt_decrypt_pagecache_blocks(page, blocksize,
page             1268 fs/ext4/inode.c 			    struct page **pagep, void **fsdata)
page             1274 fs/ext4/inode.c 	struct page *page;
page             1308 fs/ext4/inode.c 	page = grab_cache_page_write_begin(mapping, index, flags);
page             1309 fs/ext4/inode.c 	if (!page)
page             1311 fs/ext4/inode.c 	unlock_page(page);
page             1316 fs/ext4/inode.c 		put_page(page);
page             1320 fs/ext4/inode.c 	lock_page(page);
page             1321 fs/ext4/inode.c 	if (page->mapping != mapping) {
page             1323 fs/ext4/inode.c 		unlock_page(page);
page             1324 fs/ext4/inode.c 		put_page(page);
page             1329 fs/ext4/inode.c 	wait_for_stable_page(page);
page             1333 fs/ext4/inode.c 		ret = ext4_block_write_begin(page, pos, len,
page             1336 fs/ext4/inode.c 		ret = ext4_block_write_begin(page, pos, len,
page             1340 fs/ext4/inode.c 		ret = __block_write_begin(page, pos, len,
page             1343 fs/ext4/inode.c 		ret = __block_write_begin(page, pos, len, ext4_get_block);
page             1346 fs/ext4/inode.c 		ret = ext4_walk_page_buffers(handle, page_buffers(page),
page             1355 fs/ext4/inode.c 		unlock_page(page);
page             1383 fs/ext4/inode.c 		put_page(page);
page             1386 fs/ext4/inode.c 	*pagep = page;
page             1413 fs/ext4/inode.c 			  struct page *page, void *fsdata)
page             1426 fs/ext4/inode.c 						 copied, page);
page             1428 fs/ext4/inode.c 			unlock_page(page);
page             1429 fs/ext4/inode.c 			put_page(page);
page             1435 fs/ext4/inode.c 					 len, copied, page, fsdata);
page             1445 fs/ext4/inode.c 	unlock_page(page);
page             1446 fs/ext4/inode.c 	put_page(page);
page             1490 fs/ext4/inode.c 					    struct page *page,
page             1496 fs/ext4/inode.c 	bh = head = page_buffers(page);
page             1501 fs/ext4/inode.c 				if (!PageUptodate(page)) {
page             1507 fs/ext4/inode.c 					zero_user(page, start, size);
page             1521 fs/ext4/inode.c 				     struct page *page, void *fsdata)
page             1541 fs/ext4/inode.c 						 copied, page);
page             1543 fs/ext4/inode.c 			unlock_page(page);
page             1544 fs/ext4/inode.c 			put_page(page);
page             1548 fs/ext4/inode.c 	} else if (unlikely(copied < len) && !PageUptodate(page)) {
page             1550 fs/ext4/inode.c 		ext4_journalled_zero_new_buffers(handle, page, from, to);
page             1553 fs/ext4/inode.c 			ext4_journalled_zero_new_buffers(handle, page,
page             1555 fs/ext4/inode.c 		ret = ext4_walk_page_buffers(handle, page_buffers(page), from,
page             1559 fs/ext4/inode.c 			SetPageUptodate(page);
page             1565 fs/ext4/inode.c 	unlock_page(page);
page             1566 fs/ext4/inode.c 	put_page(page);
page             1717 fs/ext4/inode.c 			struct page *page = pvec.pages[i];
page             1719 fs/ext4/inode.c 			BUG_ON(!PageLocked(page));
page             1720 fs/ext4/inode.c 			BUG_ON(PageWriteback(page));
page             1722 fs/ext4/inode.c 				if (page_mapped(page))
page             1723 fs/ext4/inode.c 					clear_page_dirty_for_io(page);
page             1724 fs/ext4/inode.c 				block_invalidatepage(page, 0, PAGE_SIZE);
page             1725 fs/ext4/inode.c 				ClearPageUptodate(page);
page             1727 fs/ext4/inode.c 			unlock_page(page);
page             2000 fs/ext4/inode.c static int __ext4_journalled_writepage(struct page *page,
page             2003 fs/ext4/inode.c 	struct address_space *mapping = page->mapping;
page             2011 fs/ext4/inode.c 	ClearPageChecked(page);
page             2014 fs/ext4/inode.c 		BUG_ON(page->index != 0);
page             2016 fs/ext4/inode.c 		inode_bh = ext4_journalled_write_inline_data(inode, len, page);
page             2020 fs/ext4/inode.c 		page_bufs = page_buffers(page);
page             2033 fs/ext4/inode.c 	get_page(page);
page             2034 fs/ext4/inode.c 	unlock_page(page);
page             2040 fs/ext4/inode.c 		put_page(page);
page             2045 fs/ext4/inode.c 	lock_page(page);
page             2046 fs/ext4/inode.c 	put_page(page);
page             2047 fs/ext4/inode.c 	if (page->mapping != mapping) {
page             2075 fs/ext4/inode.c 	unlock_page(page);
page             2122 fs/ext4/inode.c static int ext4_writepage(struct page *page,
page             2129 fs/ext4/inode.c 	struct inode *inode = page->mapping->host;
page             2134 fs/ext4/inode.c 		inode->i_mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
page             2135 fs/ext4/inode.c 		unlock_page(page);
page             2139 fs/ext4/inode.c 	trace_ext4_writepage(page);
page             2141 fs/ext4/inode.c 	if (page->index == size >> PAGE_SHIFT &&
page             2147 fs/ext4/inode.c 	page_bufs = page_buffers(page);
page             2167 fs/ext4/inode.c 		redirty_page_for_writepage(wbc, page);
page             2177 fs/ext4/inode.c 			unlock_page(page);
page             2183 fs/ext4/inode.c 	if (PageChecked(page) && ext4_should_journal_data(inode))
page             2188 fs/ext4/inode.c 		return __ext4_journalled_writepage(page, len);
page             2193 fs/ext4/inode.c 		redirty_page_for_writepage(wbc, page);
page             2194 fs/ext4/inode.c 		unlock_page(page);
page             2197 fs/ext4/inode.c 	ret = ext4_bio_write_page(&io_submit, page, len, wbc, keep_towrite);
page             2204 fs/ext4/inode.c static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
page             2210 fs/ext4/inode.c 	BUG_ON(page->index != mpd->first_page);
page             2211 fs/ext4/inode.c 	clear_page_dirty_for_io(page);
page             2226 fs/ext4/inode.c 	if (page->index == size >> PAGE_SHIFT &&
page             2231 fs/ext4/inode.c 	err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false);
page             2390 fs/ext4/inode.c 			struct page *page = pvec.pages[i];
page             2392 fs/ext4/inode.c 			bh = head = page_buffers(page);
page             2431 fs/ext4/inode.c 			err = mpage_submit_page(mpd, page);
page             2660 fs/ext4/inode.c 			struct page *page = pvec.pages[i];
page             2674 fs/ext4/inode.c 			if (mpd->map.m_len > 0 && mpd->next_page != page->index)
page             2677 fs/ext4/inode.c 			lock_page(page);
page             2685 fs/ext4/inode.c 			if (!PageDirty(page) ||
page             2686 fs/ext4/inode.c 			    (PageWriteback(page) &&
page             2688 fs/ext4/inode.c 			    unlikely(page->mapping != mapping)) {
page             2689 fs/ext4/inode.c 				unlock_page(page);
page             2693 fs/ext4/inode.c 			wait_on_page_writeback(page);
page             2694 fs/ext4/inode.c 			BUG_ON(PageWriteback(page));
page             2697 fs/ext4/inode.c 				mpd->first_page = page->index;
page             2698 fs/ext4/inode.c 			mpd->next_page = page->index + 1;
page             2700 fs/ext4/inode.c 			lblk = ((ext4_lblk_t)page->index) <<
page             2702 fs/ext4/inode.c 			head = page_buffers(page);
page             3032 fs/ext4/inode.c 			       struct page **pagep, void **fsdata)
page             3035 fs/ext4/inode.c 	struct page *page;
page             3072 fs/ext4/inode.c 	page = grab_cache_page_write_begin(mapping, index, flags);
page             3073 fs/ext4/inode.c 	if (!page)
page             3075 fs/ext4/inode.c 	unlock_page(page);
page             3087 fs/ext4/inode.c 		put_page(page);
page             3091 fs/ext4/inode.c 	lock_page(page);
page             3092 fs/ext4/inode.c 	if (page->mapping != mapping) {
page             3094 fs/ext4/inode.c 		unlock_page(page);
page             3095 fs/ext4/inode.c 		put_page(page);
page             3100 fs/ext4/inode.c 	wait_for_stable_page(page);
page             3103 fs/ext4/inode.c 	ret = ext4_block_write_begin(page, pos, len,
page             3106 fs/ext4/inode.c 	ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
page             3109 fs/ext4/inode.c 		unlock_page(page);
page             3123 fs/ext4/inode.c 		put_page(page);
page             3127 fs/ext4/inode.c 	*pagep = page;
page             3135 fs/ext4/inode.c static int ext4_da_should_update_i_disksize(struct page *page,
page             3139 fs/ext4/inode.c 	struct inode *inode = page->mapping->host;
page             3143 fs/ext4/inode.c 	bh = page_buffers(page);
page             3157 fs/ext4/inode.c 			     struct page *page, void *fsdata)
page             3168 fs/ext4/inode.c 				      len, copied, page, fsdata);
page             3182 fs/ext4/inode.c 		    ext4_da_should_update_i_disksize(page, end)) {
page             3196 fs/ext4/inode.c 						     page);
page             3199 fs/ext4/inode.c 							page, fsdata);
page             3324 fs/ext4/inode.c static int ext4_readpage(struct file *file, struct page *page)
page             3327 fs/ext4/inode.c 	struct inode *inode = page->mapping->host;
page             3329 fs/ext4/inode.c 	trace_ext4_readpage(page);
page             3332 fs/ext4/inode.c 		ret = ext4_readpage_inline(inode, page);
page             3335 fs/ext4/inode.c 		return ext4_mpage_readpages(page->mapping, NULL, page, 1,
page             3354 fs/ext4/inode.c static void ext4_invalidatepage(struct page *page, unsigned int offset,
page             3357 fs/ext4/inode.c 	trace_ext4_invalidatepage(page, offset, length);
page             3360 fs/ext4/inode.c 	WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page)));
page             3362 fs/ext4/inode.c 	block_invalidatepage(page, offset, length);
page             3365 fs/ext4/inode.c static int __ext4_journalled_invalidatepage(struct page *page,
page             3369 fs/ext4/inode.c 	journal_t *journal = EXT4_JOURNAL(page->mapping->host);
page             3371 fs/ext4/inode.c 	trace_ext4_journalled_invalidatepage(page, offset, length);
page             3377 fs/ext4/inode.c 		ClearPageChecked(page);
page             3379 fs/ext4/inode.c 	return jbd2_journal_invalidatepage(journal, page, offset, length);
page             3383 fs/ext4/inode.c static void ext4_journalled_invalidatepage(struct page *page,
page             3387 fs/ext4/inode.c 	WARN_ON(__ext4_journalled_invalidatepage(page, offset, length) < 0);
page             3390 fs/ext4/inode.c static int ext4_releasepage(struct page *page, gfp_t wait)
page             3392 fs/ext4/inode.c 	journal_t *journal = EXT4_JOURNAL(page->mapping->host);
page             3394 fs/ext4/inode.c 	trace_ext4_releasepage(page);
page             3397 fs/ext4/inode.c 	if (PageChecked(page))
page             3400 fs/ext4/inode.c 		return jbd2_journal_try_to_free_buffers(journal, page, wait);
page             3402 fs/ext4/inode.c 		return try_to_free_buffers(page);
page             3910 fs/ext4/inode.c static int ext4_journalled_set_page_dirty(struct page *page)
page             3912 fs/ext4/inode.c 	SetPageChecked(page);
page             3913 fs/ext4/inode.c 	return __set_page_dirty_nobuffers(page);
page             3916 fs/ext4/inode.c static int ext4_set_page_dirty(struct page *page)
page             3918 fs/ext4/inode.c 	WARN_ON_ONCE(!PageLocked(page) && !PageDirty(page));
page             3919 fs/ext4/inode.c 	WARN_ON_ONCE(!page_has_buffers(page));
page             3920 fs/ext4/inode.c 	return __set_page_dirty_buffers(page);
page             4010 fs/ext4/inode.c 	struct page *page;
page             4013 fs/ext4/inode.c 	page = find_or_create_page(mapping, from >> PAGE_SHIFT,
page             4015 fs/ext4/inode.c 	if (!page)
page             4022 fs/ext4/inode.c 	if (!page_has_buffers(page))
page             4023 fs/ext4/inode.c 		create_empty_buffers(page, blocksize, 0);
page             4026 fs/ext4/inode.c 	bh = page_buffers(page);
page             4048 fs/ext4/inode.c 	if (PageUptodate(page))
page             4062 fs/ext4/inode.c 					page, blocksize, bh_offset(bh)));
page             4071 fs/ext4/inode.c 	zero_user(page, offset, length);
page             4085 fs/ext4/inode.c 	unlock_page(page);
page             4086 fs/ext4/inode.c 	put_page(page);
page             4231 fs/ext4/inode.c 	struct page *page;
page             4238 fs/ext4/inode.c 		page = dax_layout_busy_page(inode->i_mapping);
page             4239 fs/ext4/inode.c 		if (!page)
page             4242 fs/ext4/inode.c 		error = ___wait_var_event(&page->_refcount,
page             4243 fs/ext4/inode.c 				atomic_read(&page->_refcount) == 1,
page             5478 fs/ext4/inode.c 	struct page *page;
page             5497 fs/ext4/inode.c 		page = find_lock_page(inode->i_mapping,
page             5499 fs/ext4/inode.c 		if (!page)
page             5501 fs/ext4/inode.c 		ret = __ext4_journalled_invalidatepage(page, offset,
page             5503 fs/ext4/inode.c 		unlock_page(page);
page             5504 fs/ext4/inode.c 		put_page(page);
page             6240 fs/ext4/inode.c 	struct page *page = vmf->page;
page             6276 fs/ext4/inode.c 	lock_page(page);
page             6279 fs/ext4/inode.c 	if (page->mapping != mapping || page_offset(page) > size) {
page             6280 fs/ext4/inode.c 		unlock_page(page);
page             6285 fs/ext4/inode.c 	if (page->index == size >> PAGE_SHIFT)
page             6293 fs/ext4/inode.c 	if (page_has_buffers(page)) {
page             6294 fs/ext4/inode.c 		if (!ext4_walk_page_buffers(NULL, page_buffers(page),
page             6298 fs/ext4/inode.c 			wait_for_stable_page(page);
page             6303 fs/ext4/inode.c 	unlock_page(page);
page             6318 fs/ext4/inode.c 		if (ext4_walk_page_buffers(handle, page_buffers(page), 0,
page             6320 fs/ext4/inode.c 			unlock_page(page);
page              805 fs/ext4/mballoc.c static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
page              823 fs/ext4/mballoc.c 	mb_debug(1, "init page %lu\n", page->index);
page              825 fs/ext4/mballoc.c 	inode = page->mapping->host;
page              846 fs/ext4/mballoc.c 	first_group = page->index * blocks_per_page / 2;
page              860 fs/ext4/mballoc.c 		if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) {
page              884 fs/ext4/mballoc.c 	first_block = page->index * blocks_per_page;
page              905 fs/ext4/mballoc.c 		data = page_address(page) + (i * blocksize);
page              916 fs/ext4/mballoc.c 				group, page->index, i * blocksize);
page              936 fs/ext4/mballoc.c 				group, page->index, i * blocksize);
page              954 fs/ext4/mballoc.c 	SetPageUptodate(page);
page              978 fs/ext4/mballoc.c 	struct page *page;
page              992 fs/ext4/mballoc.c 	page = find_or_create_page(inode->i_mapping, pnum, gfp);
page              993 fs/ext4/mballoc.c 	if (!page)
page              995 fs/ext4/mballoc.c 	BUG_ON(page->mapping != inode->i_mapping);
page              996 fs/ext4/mballoc.c 	e4b->bd_bitmap_page = page;
page              997 fs/ext4/mballoc.c 	e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
page             1006 fs/ext4/mballoc.c 	page = find_or_create_page(inode->i_mapping, pnum, gfp);
page             1007 fs/ext4/mballoc.c 	if (!page)
page             1009 fs/ext4/mballoc.c 	BUG_ON(page->mapping != inode->i_mapping);
page             1010 fs/ext4/mballoc.c 	e4b->bd_buddy_page = page;
page             1037 fs/ext4/mballoc.c 	struct page *page;
page             1061 fs/ext4/mballoc.c 	page = e4b.bd_bitmap_page;
page             1062 fs/ext4/mballoc.c 	ret = ext4_mb_init_cache(page, NULL, gfp);
page             1065 fs/ext4/mballoc.c 	if (!PageUptodate(page)) {
page             1080 fs/ext4/mballoc.c 	page = e4b.bd_buddy_page;
page             1081 fs/ext4/mballoc.c 	ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp);
page             1084 fs/ext4/mballoc.c 	if (!PageUptodate(page)) {
page             1106 fs/ext4/mballoc.c 	struct page *page;
page             1146 fs/ext4/mballoc.c 	page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
page             1147 fs/ext4/mballoc.c 	if (page == NULL || !PageUptodate(page)) {
page             1148 fs/ext4/mballoc.c 		if (page)
page             1157 fs/ext4/mballoc.c 			put_page(page);
page             1158 fs/ext4/mballoc.c 		page = find_or_create_page(inode->i_mapping, pnum, gfp);
page             1159 fs/ext4/mballoc.c 		if (page) {
page             1160 fs/ext4/mballoc.c 			BUG_ON(page->mapping != inode->i_mapping);
page             1161 fs/ext4/mballoc.c 			if (!PageUptodate(page)) {
page             1162 fs/ext4/mballoc.c 				ret = ext4_mb_init_cache(page, NULL, gfp);
page             1164 fs/ext4/mballoc.c 					unlock_page(page);
page             1167 fs/ext4/mballoc.c 				mb_cmp_bitmaps(e4b, page_address(page) +
page             1170 fs/ext4/mballoc.c 			unlock_page(page);
page             1173 fs/ext4/mballoc.c 	if (page == NULL) {
page             1177 fs/ext4/mballoc.c 	if (!PageUptodate(page)) {
page             1183 fs/ext4/mballoc.c 	e4b->bd_bitmap_page = page;
page             1184 fs/ext4/mballoc.c 	e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
page             1190 fs/ext4/mballoc.c 	page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
page             1191 fs/ext4/mballoc.c 	if (page == NULL || !PageUptodate(page)) {
page             1192 fs/ext4/mballoc.c 		if (page)
page             1193 fs/ext4/mballoc.c 			put_page(page);
page             1194 fs/ext4/mballoc.c 		page = find_or_create_page(inode->i_mapping, pnum, gfp);
page             1195 fs/ext4/mballoc.c 		if (page) {
page             1196 fs/ext4/mballoc.c 			BUG_ON(page->mapping != inode->i_mapping);
page             1197 fs/ext4/mballoc.c 			if (!PageUptodate(page)) {
page             1198 fs/ext4/mballoc.c 				ret = ext4_mb_init_cache(page, e4b->bd_bitmap,
page             1201 fs/ext4/mballoc.c 					unlock_page(page);
page             1205 fs/ext4/mballoc.c 			unlock_page(page);
page             1208 fs/ext4/mballoc.c 	if (page == NULL) {
page             1212 fs/ext4/mballoc.c 	if (!PageUptodate(page)) {
page             1218 fs/ext4/mballoc.c 	e4b->bd_buddy_page = page;
page             1219 fs/ext4/mballoc.c 	e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
page             1227 fs/ext4/mballoc.c 	if (page)
page             1228 fs/ext4/mballoc.c 		put_page(page);
page              174 fs/ext4/mballoc.h 	struct page *ac_bitmap_page;
page              175 fs/ext4/mballoc.h 	struct page *ac_buddy_page;
page              185 fs/ext4/mballoc.h 	struct page *bd_buddy_page;
page              187 fs/ext4/mballoc.h 	struct page *bd_bitmap_page;
page              127 fs/ext4/move_extent.c 		      pgoff_t index1, pgoff_t index2, struct page *page[2])
page              142 fs/ext4/move_extent.c 	page[0] = grab_cache_page_write_begin(mapping[0], index1, fl);
page              143 fs/ext4/move_extent.c 	if (!page[0])
page              146 fs/ext4/move_extent.c 	page[1] = grab_cache_page_write_begin(mapping[1], index2, fl);
page              147 fs/ext4/move_extent.c 	if (!page[1]) {
page              148 fs/ext4/move_extent.c 		unlock_page(page[0]);
page              149 fs/ext4/move_extent.c 		put_page(page[0]);
page              157 fs/ext4/move_extent.c 	wait_on_page_writeback(page[0]);
page              158 fs/ext4/move_extent.c 	wait_on_page_writeback(page[1]);
page              160 fs/ext4/move_extent.c 		swap(page[0], page[1]);
page              167 fs/ext4/move_extent.c mext_page_mkuptodate(struct page *page, unsigned from, unsigned to)
page              169 fs/ext4/move_extent.c 	struct inode *inode = page->mapping->host;
page              174 fs/ext4/move_extent.c 	BUG_ON(!PageLocked(page));
page              175 fs/ext4/move_extent.c 	BUG_ON(PageWriteback(page));
page              177 fs/ext4/move_extent.c 	if (PageUptodate(page))
page              181 fs/ext4/move_extent.c 	if (!page_has_buffers(page))
page              182 fs/ext4/move_extent.c 		create_empty_buffers(page, blocksize, 0);
page              184 fs/ext4/move_extent.c 	head = page_buffers(page);
page              185 fs/ext4/move_extent.c 	block = (sector_t)page->index << (PAGE_SHIFT - inode->i_blkbits);
page              199 fs/ext4/move_extent.c 				SetPageError(page);
page              203 fs/ext4/move_extent.c 				zero_user(page, block_start, blocksize);
page              225 fs/ext4/move_extent.c 		SetPageUptodate(page);
page              253 fs/ext4/move_extent.c 	struct page *pagep[2] = {NULL, NULL};
page               68 fs/ext4/page-io.c 		struct page *page = bvec->bv_page;
page               69 fs/ext4/page-io.c 		struct page *bounce_page = NULL;
page               76 fs/ext4/page-io.c 		if (!page)
page               79 fs/ext4/page-io.c 		if (fscrypt_is_bounce_page(page)) {
page               80 fs/ext4/page-io.c 			bounce_page = page;
page               81 fs/ext4/page-io.c 			page = fscrypt_pagecache_page(bounce_page);
page               85 fs/ext4/page-io.c 			SetPageError(page);
page               86 fs/ext4/page-io.c 			mapping_set_error(page->mapping, -EIO);
page               88 fs/ext4/page-io.c 		bh = head = page_buffers(page);
page              110 fs/ext4/page-io.c 			end_page_writeback(page);
page              381 fs/ext4/page-io.c 			    struct page *page,
page              396 fs/ext4/page-io.c 	ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh));
page              399 fs/ext4/page-io.c 	wbc_account_cgroup_owner(io->io_wbc, page, bh->b_size);
page              405 fs/ext4/page-io.c 			struct page *page,
page              410 fs/ext4/page-io.c 	struct page *bounce_page = NULL;
page              411 fs/ext4/page-io.c 	struct inode *inode = page->mapping->host;
page              418 fs/ext4/page-io.c 	BUG_ON(!PageLocked(page));
page              419 fs/ext4/page-io.c 	BUG_ON(PageWriteback(page));
page              422 fs/ext4/page-io.c 		set_page_writeback_keepwrite(page);
page              424 fs/ext4/page-io.c 		set_page_writeback(page);
page              425 fs/ext4/page-io.c 	ClearPageError(page);
page              437 fs/ext4/page-io.c 		zero_user_segment(page, len, PAGE_SIZE);
page              445 fs/ext4/page-io.c 	bh = head = page_buffers(page);
page              468 fs/ext4/page-io.c 	bh = head = page_buffers(page);
page              489 fs/ext4/page-io.c 		bounce_page = fscrypt_encrypt_pagecache_blocks(page, enc_bytes,
page              512 fs/ext4/page-io.c 		ret = io_submit_add_bh(io, inode, bounce_page ?: page, bh);
page              530 fs/ext4/page-io.c 		redirty_page_for_writepage(wbc, page);
page              536 fs/ext4/page-io.c 	unlock_page(page);
page              539 fs/ext4/page-io.c 		end_page_writeback(page);
page               72 fs/ext4/readpage.c 	struct page *page;
page               77 fs/ext4/readpage.c 		page = bv->bv_page;
page               80 fs/ext4/readpage.c 		if (bio->bi_status || PageError(page)) {
page               81 fs/ext4/readpage.c 			ClearPageUptodate(page);
page               83 fs/ext4/readpage.c 			ClearPageError(page);
page               85 fs/ext4/readpage.c 			SetPageUptodate(page);
page               87 fs/ext4/readpage.c 		unlock_page(page);
page              226 fs/ext4/readpage.c 			 struct list_head *pages, struct page *page,
page              256 fs/ext4/readpage.c 			page = lru_to_page(pages);
page              258 fs/ext4/readpage.c 			prefetchw(&page->flags);
page              259 fs/ext4/readpage.c 			list_del(&page->lru);
page              260 fs/ext4/readpage.c 			if (add_to_page_cache_lru(page, mapping, page->index,
page              265 fs/ext4/readpage.c 		if (page_has_buffers(page))
page              268 fs/ext4/readpage.c 		block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
page              311 fs/ext4/readpage.c 					SetPageError(page);
page              312 fs/ext4/readpage.c 					zero_user_segment(page, 0,
page              314 fs/ext4/readpage.c 					unlock_page(page);
page              345 fs/ext4/readpage.c 			zero_user_segment(page, first_hole << blkbits,
page              348 fs/ext4/readpage.c 				if (ext4_need_verity(inode, page->index) &&
page              349 fs/ext4/readpage.c 				    !fsverity_verify_page(page))
page              351 fs/ext4/readpage.c 				SetPageUptodate(page);
page              352 fs/ext4/readpage.c 				unlock_page(page);
page              356 fs/ext4/readpage.c 			SetPageMappedToDisk(page);
page              359 fs/ext4/readpage.c 		    !PageUptodate(page) && cleancache_get_page(page) == 0) {
page              360 fs/ext4/readpage.c 			SetPageUptodate(page);
page              380 fs/ext4/readpage.c 			ctx = get_bio_post_read_ctx(inode, bio, page->index);
page              395 fs/ext4/readpage.c 		if (bio_add_page(bio, page, length, 0) < length)
page              411 fs/ext4/readpage.c 		if (!PageUptodate(page))
page              412 fs/ext4/readpage.c 			block_read_full_page(page, ext4_get_block);
page              414 fs/ext4/readpage.c 			unlock_page(page);
page              417 fs/ext4/readpage.c 			put_page(page);
page             1249 fs/ext4/super.c static int bdev_try_to_free_page(struct super_block *sb, struct page *page,
page             1254 fs/ext4/super.c 	WARN_ON(PageChecked(page));
page             1255 fs/ext4/super.c 	if (!page_has_buffers(page))
page             1258 fs/ext4/super.c 		return jbd2_journal_try_to_free_buffers(journal, page,
page             1260 fs/ext4/super.c 	return try_to_free_buffers(page);
page               30 fs/ext4/symlink.c 	struct page *cpage = NULL;
page               47 fs/ext4/verity.c 		struct page *page;
page               50 fs/ext4/verity.c 		page = read_mapping_page(inode->i_mapping, pos >> PAGE_SHIFT,
page               52 fs/ext4/verity.c 		if (IS_ERR(page))
page               53 fs/ext4/verity.c 			return PTR_ERR(page);
page               55 fs/ext4/verity.c 		addr = kmap_atomic(page);
page               59 fs/ext4/verity.c 		put_page(page);
page               81 fs/ext4/verity.c 		struct page *page;
page               87 fs/ext4/verity.c 					    &page, &fsdata);
page               91 fs/ext4/verity.c 		addr = kmap_atomic(page);
page               96 fs/ext4/verity.c 					  page, fsdata);
page              345 fs/ext4/verity.c static struct page *ext4_read_merkle_tree_page(struct inode *inode,
page              168 fs/f2fs/acl.c  						struct page *dpage)
page              204 fs/f2fs/acl.c  			struct posix_acl *acl, struct page *ipage)
page              334 fs/f2fs/acl.c  		struct page *dpage)
page              383 fs/f2fs/acl.c  int f2fs_init_acl(struct inode *inode, struct inode *dir, struct page *ipage,
page              384 fs/f2fs/acl.c  							struct page *dpage)
page               38 fs/f2fs/acl.h  extern int f2fs_init_acl(struct inode *, struct inode *, struct page *,
page               39 fs/f2fs/acl.h  							struct page *);
page               45 fs/f2fs/acl.h  				struct page *ipage, struct page *dpage)
page               37 fs/f2fs/checkpoint.c struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
page               40 fs/f2fs/checkpoint.c 	struct page *page = NULL;
page               42 fs/f2fs/checkpoint.c 	page = f2fs_grab_cache_page(mapping, index, false);
page               43 fs/f2fs/checkpoint.c 	if (!page) {
page               47 fs/f2fs/checkpoint.c 	f2fs_wait_on_page_writeback(page, META, true, true);
page               48 fs/f2fs/checkpoint.c 	if (!PageUptodate(page))
page               49 fs/f2fs/checkpoint.c 		SetPageUptodate(page);
page               50 fs/f2fs/checkpoint.c 	return page;
page               56 fs/f2fs/checkpoint.c static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
page               60 fs/f2fs/checkpoint.c 	struct page *page;
page               76 fs/f2fs/checkpoint.c 	page = f2fs_grab_cache_page(mapping, index, false);
page               77 fs/f2fs/checkpoint.c 	if (!page) {
page               81 fs/f2fs/checkpoint.c 	if (PageUptodate(page))
page               84 fs/f2fs/checkpoint.c 	fio.page = page;
page               88 fs/f2fs/checkpoint.c 		f2fs_put_page(page, 1);
page               92 fs/f2fs/checkpoint.c 	lock_page(page);
page               93 fs/f2fs/checkpoint.c 	if (unlikely(page->mapping != mapping)) {
page               94 fs/f2fs/checkpoint.c 		f2fs_put_page(page, 1);
page               98 fs/f2fs/checkpoint.c 	if (unlikely(!PageUptodate(page))) {
page               99 fs/f2fs/checkpoint.c 		f2fs_put_page(page, 1);
page              103 fs/f2fs/checkpoint.c 	return page;
page              106 fs/f2fs/checkpoint.c struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
page              111 fs/f2fs/checkpoint.c struct page *f2fs_get_meta_page_nofail(struct f2fs_sb_info *sbi, pgoff_t index)
page              113 fs/f2fs/checkpoint.c 	struct page *page;
page              117 fs/f2fs/checkpoint.c 	page = __get_meta_page(sbi, index, true);
page              118 fs/f2fs/checkpoint.c 	if (IS_ERR(page)) {
page              119 fs/f2fs/checkpoint.c 		if (PTR_ERR(page) == -EIO &&
page              124 fs/f2fs/checkpoint.c 	return page;
page              128 fs/f2fs/checkpoint.c struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index)
page              214 fs/f2fs/checkpoint.c 	struct page *page;
page              259 fs/f2fs/checkpoint.c 		page = f2fs_grab_cache_page(META_MAPPING(sbi),
page              261 fs/f2fs/checkpoint.c 		if (!page)
page              263 fs/f2fs/checkpoint.c 		if (PageUptodate(page)) {
page              264 fs/f2fs/checkpoint.c 			f2fs_put_page(page, 1);
page              268 fs/f2fs/checkpoint.c 		fio.page = page;
page              270 fs/f2fs/checkpoint.c 		f2fs_put_page(page, 0);
page              279 fs/f2fs/checkpoint.c 	struct page *page;
page              282 fs/f2fs/checkpoint.c 	page = find_get_page(META_MAPPING(sbi), index);
page              283 fs/f2fs/checkpoint.c 	if (!page || !PageUptodate(page))
page              285 fs/f2fs/checkpoint.c 	f2fs_put_page(page, 0);
page              291 fs/f2fs/checkpoint.c static int __f2fs_write_meta_page(struct page *page,
page              295 fs/f2fs/checkpoint.c 	struct f2fs_sb_info *sbi = F2FS_P_SB(page);
page              297 fs/f2fs/checkpoint.c 	trace_f2fs_writepage(page, META);
page              303 fs/f2fs/checkpoint.c 	if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0))
page              306 fs/f2fs/checkpoint.c 	f2fs_do_write_meta_page(sbi, page, io_type);
page              310 fs/f2fs/checkpoint.c 		f2fs_submit_merged_write_cond(sbi, NULL, page, 0, META);
page              312 fs/f2fs/checkpoint.c 	unlock_page(page);
page              320 fs/f2fs/checkpoint.c 	redirty_page_for_writepage(wbc, page);
page              324 fs/f2fs/checkpoint.c static int f2fs_write_meta_page(struct page *page,
page              327 fs/f2fs/checkpoint.c 	return __f2fs_write_meta_page(page, wbc, FS_META_IO);
page              384 fs/f2fs/checkpoint.c 			struct page *page = pvec.pages[i];
page              387 fs/f2fs/checkpoint.c 				prev = page->index - 1;
page              388 fs/f2fs/checkpoint.c 			if (nr_to_write != LONG_MAX && page->index != prev + 1) {
page              393 fs/f2fs/checkpoint.c 			lock_page(page);
page              395 fs/f2fs/checkpoint.c 			if (unlikely(page->mapping != mapping)) {
page              397 fs/f2fs/checkpoint.c 				unlock_page(page);
page              400 fs/f2fs/checkpoint.c 			if (!PageDirty(page)) {
page              405 fs/f2fs/checkpoint.c 			f2fs_wait_on_page_writeback(page, META, true, true);
page              407 fs/f2fs/checkpoint.c 			if (!clear_page_dirty_for_io(page))
page              410 fs/f2fs/checkpoint.c 			if (__f2fs_write_meta_page(page, &wbc, io_type)) {
page              411 fs/f2fs/checkpoint.c 				unlock_page(page);
page              415 fs/f2fs/checkpoint.c 			prev = page->index;
page              431 fs/f2fs/checkpoint.c static int f2fs_set_meta_page_dirty(struct page *page)
page              433 fs/f2fs/checkpoint.c 	trace_f2fs_set_page_dirty(page, META);
page              435 fs/f2fs/checkpoint.c 	if (!PageUptodate(page))
page              436 fs/f2fs/checkpoint.c 		SetPageUptodate(page);
page              437 fs/f2fs/checkpoint.c 	if (!PageDirty(page)) {
page              438 fs/f2fs/checkpoint.c 		__set_page_dirty_nobuffers(page);
page              439 fs/f2fs/checkpoint.c 		inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_META);
page              440 fs/f2fs/checkpoint.c 		f2fs_set_page_private(page, 0);
page              441 fs/f2fs/checkpoint.c 		f2fs_trace_pid(page);
page              704 fs/f2fs/checkpoint.c 		struct page *page;
page              707 fs/f2fs/checkpoint.c 		page = f2fs_get_meta_page(sbi, start_blk + i);
page              708 fs/f2fs/checkpoint.c 		if (IS_ERR(page)) {
page              709 fs/f2fs/checkpoint.c 			err = PTR_ERR(page);
page              713 fs/f2fs/checkpoint.c 		orphan_blk = (struct f2fs_orphan_block *)page_address(page);
page              718 fs/f2fs/checkpoint.c 				f2fs_put_page(page, 1);
page              722 fs/f2fs/checkpoint.c 		f2fs_put_page(page, 1);
page              746 fs/f2fs/checkpoint.c 	struct page *page = NULL;
page              761 fs/f2fs/checkpoint.c 		if (!page) {
page              762 fs/f2fs/checkpoint.c 			page = f2fs_grab_meta_page(sbi, start_blk++);
page              764 fs/f2fs/checkpoint.c 				(struct f2fs_orphan_block *)page_address(page);
page              779 fs/f2fs/checkpoint.c 			set_page_dirty(page);
page              780 fs/f2fs/checkpoint.c 			f2fs_put_page(page, 1);
page              783 fs/f2fs/checkpoint.c 			page = NULL;
page              787 fs/f2fs/checkpoint.c 	if (page) {
page              791 fs/f2fs/checkpoint.c 		set_page_dirty(page);
page              792 fs/f2fs/checkpoint.c 		f2fs_put_page(page, 1);
page              812 fs/f2fs/checkpoint.c 		struct f2fs_checkpoint **cp_block, struct page **cp_page,
page              843 fs/f2fs/checkpoint.c static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
page              846 fs/f2fs/checkpoint.c 	struct page *cp_page_1 = NULL, *cp_page_2 = NULL;
page              886 fs/f2fs/checkpoint.c 	struct page *cp1, *cp2, *cur_page;
page              999 fs/f2fs/checkpoint.c void f2fs_update_dirty_page(struct inode *inode, struct page *page)
page             1014 fs/f2fs/checkpoint.c 	f2fs_set_page_private(page, 0);
page             1015 fs/f2fs/checkpoint.c 	f2fs_trace_pid(page);
page             1349 fs/f2fs/checkpoint.c 	struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
page             1352 fs/f2fs/checkpoint.c 	f2fs_wait_on_page_writeback(page, META, true, true);
page             1354 fs/f2fs/checkpoint.c 	memcpy(page_address(page), src, PAGE_SIZE);
page             1356 fs/f2fs/checkpoint.c 	set_page_dirty(page);
page             1357 fs/f2fs/checkpoint.c 	if (unlikely(!clear_page_dirty_for_io(page)))
page             1361 fs/f2fs/checkpoint.c 	err = __f2fs_write_meta_page(page, &wbc, FS_CP_META_IO);
page             1363 fs/f2fs/checkpoint.c 		f2fs_put_page(page, 1);
page             1368 fs/f2fs/checkpoint.c 	f2fs_put_page(page, 0);
page               34 fs/f2fs/data.c static bool __is_cp_guaranteed(struct page *page)
page               36 fs/f2fs/data.c 	struct address_space *mapping = page->mapping;
page               51 fs/f2fs/data.c 			is_cold_data(page))
page               56 fs/f2fs/data.c static enum count_type __read_io_type(struct page *page)
page               58 fs/f2fs/data.c 	struct address_space *mapping = page_file_mapping(page);
page               89 fs/f2fs/data.c 	struct page *page;
page               94 fs/f2fs/data.c 		page = bv->bv_page;
page               97 fs/f2fs/data.c 		if (bio->bi_status || PageError(page)) {
page               98 fs/f2fs/data.c 			ClearPageUptodate(page);
page              100 fs/f2fs/data.c 			ClearPageError(page);
page              102 fs/f2fs/data.c 			SetPageUptodate(page);
page              104 fs/f2fs/data.c 		dec_page_count(F2FS_P_SB(page), __read_io_type(page));
page              105 fs/f2fs/data.c 		unlock_page(page);
page              199 fs/f2fs/data.c 		struct page *page = bvec->bv_page;
page              200 fs/f2fs/data.c 		enum count_type type = WB_DATA_TYPE(page);
page              202 fs/f2fs/data.c 		if (IS_DUMMY_WRITTEN_PAGE(page)) {
page              203 fs/f2fs/data.c 			set_page_private(page, (unsigned long)NULL);
page              204 fs/f2fs/data.c 			ClearPagePrivate(page);
page              205 fs/f2fs/data.c 			unlock_page(page);
page              206 fs/f2fs/data.c 			mempool_free(page, sbi->write_io_dummy);
page              213 fs/f2fs/data.c 		fscrypt_finalize_bounce_page(&page);
page              216 fs/f2fs/data.c 			mapping_set_error(page->mapping, -EIO);
page              221 fs/f2fs/data.c 		f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
page              222 fs/f2fs/data.c 					page->index != nid_of_node(page));
page              225 fs/f2fs/data.c 		if (f2fs_in_warm_node_list(sbi, page))
page              226 fs/f2fs/data.c 			f2fs_del_fsync_node_entry(sbi, page);
page              227 fs/f2fs/data.c 		clear_cold_data(page);
page              228 fs/f2fs/data.c 		end_page_writeback(page);
page              332 fs/f2fs/data.c 			struct page *page =
page              335 fs/f2fs/data.c 			f2fs_bug_on(sbi, !page);
page              337 fs/f2fs/data.c 			zero_user_segment(page, 0, PAGE_SIZE);
page              338 fs/f2fs/data.c 			SetPagePrivate(page);
page              339 fs/f2fs/data.c 			set_page_private(page, (unsigned long)DUMMY_WRITTEN_PAGE);
page              340 fs/f2fs/data.c 			lock_page(page);
page              341 fs/f2fs/data.c 			if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
page              378 fs/f2fs/data.c 						struct page *page, nid_t ino)
page              381 fs/f2fs/data.c 	struct page *target;
page              387 fs/f2fs/data.c 	if (!inode && !page && !ino)
page              398 fs/f2fs/data.c 		if (page && page == target)
page              428 fs/f2fs/data.c 				struct inode *inode, struct page *page,
page              440 fs/f2fs/data.c 			ret = __has_merged_page(io->bio, inode, page, ino);
page              458 fs/f2fs/data.c 				struct inode *inode, struct page *page,
page              461 fs/f2fs/data.c 	__submit_merged_write_cond(sbi, inode, page, ino, type, false);
page              478 fs/f2fs/data.c 	struct page *page = fio->encrypted_page ?
page              479 fs/f2fs/data.c 			fio->encrypted_page : fio->page;
page              486 fs/f2fs/data.c 	trace_f2fs_submit_page_bio(page, fio);
page              492 fs/f2fs/data.c 	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
page              498 fs/f2fs/data.c 		wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
page              503 fs/f2fs/data.c 			__read_io_type(page): WB_DATA_TYPE(fio->page));
page              549 fs/f2fs/data.c 	struct page *page = fio->encrypted_page ?
page              550 fs/f2fs/data.c 			fio->encrypted_page : fio->page;
page              556 fs/f2fs/data.c 	trace_f2fs_submit_page_bio(page, fio);
page              570 fs/f2fs/data.c 	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
page              577 fs/f2fs/data.c 		wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
page              579 fs/f2fs/data.c 	inc_page_count(fio->sbi, WB_DATA_TYPE(page));
page              588 fs/f2fs/data.c 							struct page *page)
page              593 fs/f2fs/data.c 	if (!__has_merged_page(*bio, NULL, page, 0))
page              605 fs/f2fs/data.c 	struct page *bio_page;
page              625 fs/f2fs/data.c 	bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
page              659 fs/f2fs/data.c 	trace_f2fs_submit_page_write(fio->page, fio);
page              713 fs/f2fs/data.c static int f2fs_submit_page_read(struct inode *inode, struct page *page,
page              719 fs/f2fs/data.c 	bio = f2fs_grab_read_bio(inode, blkaddr, 1, 0, page->index);
page              726 fs/f2fs/data.c 	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
page              730 fs/f2fs/data.c 	ClearPageError(page);
page              845 fs/f2fs/data.c struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
page              850 fs/f2fs/data.c 	struct page *page;
page              854 fs/f2fs/data.c 	page = f2fs_grab_cache_page(mapping, index, for_write);
page              855 fs/f2fs/data.c 	if (!page)
page              886 fs/f2fs/data.c 	if (PageUptodate(page)) {
page              887 fs/f2fs/data.c 		unlock_page(page);
page              888 fs/f2fs/data.c 		return page;
page              899 fs/f2fs/data.c 		zero_user_segment(page, 0, PAGE_SIZE);
page              900 fs/f2fs/data.c 		if (!PageUptodate(page))
page              901 fs/f2fs/data.c 			SetPageUptodate(page);
page              902 fs/f2fs/data.c 		unlock_page(page);
page              903 fs/f2fs/data.c 		return page;
page              906 fs/f2fs/data.c 	err = f2fs_submit_page_read(inode, page, dn.data_blkaddr);
page              909 fs/f2fs/data.c 	return page;
page              912 fs/f2fs/data.c 	f2fs_put_page(page, 1);
page              916 fs/f2fs/data.c struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index)
page              919 fs/f2fs/data.c 	struct page *page;
page              921 fs/f2fs/data.c 	page = find_get_page(mapping, index);
page              922 fs/f2fs/data.c 	if (page && PageUptodate(page))
page              923 fs/f2fs/data.c 		return page;
page              924 fs/f2fs/data.c 	f2fs_put_page(page, 0);
page              926 fs/f2fs/data.c 	page = f2fs_get_read_data_page(inode, index, 0, false);
page              927 fs/f2fs/data.c 	if (IS_ERR(page))
page              928 fs/f2fs/data.c 		return page;
page              930 fs/f2fs/data.c 	if (PageUptodate(page))
page              931 fs/f2fs/data.c 		return page;
page              933 fs/f2fs/data.c 	wait_on_page_locked(page);
page              934 fs/f2fs/data.c 	if (unlikely(!PageUptodate(page))) {
page              935 fs/f2fs/data.c 		f2fs_put_page(page, 0);
page              938 fs/f2fs/data.c 	return page;
page              946 fs/f2fs/data.c struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
page              950 fs/f2fs/data.c 	struct page *page;
page              952 fs/f2fs/data.c 	page = f2fs_get_read_data_page(inode, index, 0, for_write);
page              953 fs/f2fs/data.c 	if (IS_ERR(page))
page              954 fs/f2fs/data.c 		return page;
page              957 fs/f2fs/data.c 	lock_page(page);
page              958 fs/f2fs/data.c 	if (unlikely(page->mapping != mapping)) {
page              959 fs/f2fs/data.c 		f2fs_put_page(page, 1);
page              962 fs/f2fs/data.c 	if (unlikely(!PageUptodate(page))) {
page              963 fs/f2fs/data.c 		f2fs_put_page(page, 1);
page              966 fs/f2fs/data.c 	return page;
page              978 fs/f2fs/data.c struct page *f2fs_get_new_data_page(struct inode *inode,
page              979 fs/f2fs/data.c 		struct page *ipage, pgoff_t index, bool new_i_size)
page              982 fs/f2fs/data.c 	struct page *page;
page              986 fs/f2fs/data.c 	page = f2fs_grab_cache_page(mapping, index, true);
page              987 fs/f2fs/data.c 	if (!page) {
page              999 fs/f2fs/data.c 		f2fs_put_page(page, 1);
page             1005 fs/f2fs/data.c 	if (PageUptodate(page))
page             1009 fs/f2fs/data.c 		zero_user_segment(page, 0, PAGE_SIZE);
page             1010 fs/f2fs/data.c 		if (!PageUptodate(page))
page             1011 fs/f2fs/data.c 			SetPageUptodate(page);
page             1013 fs/f2fs/data.c 		f2fs_put_page(page, 1);
page             1017 fs/f2fs/data.c 		page = f2fs_get_lock_data_page(inode, index, true);
page             1018 fs/f2fs/data.c 		if (IS_ERR(page))
page             1019 fs/f2fs/data.c 			return page;
page             1025 fs/f2fs/data.c 	return page;
page             1470 fs/f2fs/data.c 	struct page *page;
page             1480 fs/f2fs/data.c 		page = f2fs_grab_cache_page(NODE_MAPPING(sbi),
page             1482 fs/f2fs/data.c 		if (!page)
page             1487 fs/f2fs/data.c 			f2fs_put_page(page, 1);
page             1499 fs/f2fs/data.c 		f2fs_put_page(page, 1);
page             1512 fs/f2fs/data.c 		page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false);
page             1513 fs/f2fs/data.c 		if (!page)
page             1518 fs/f2fs/data.c 			f2fs_put_page(page, 1);
page             1525 fs/f2fs/data.c 		f2fs_put_page(page, 1);
page             1638 fs/f2fs/data.c static int f2fs_read_single_page(struct inode *inode, struct page *page,
page             1654 fs/f2fs/data.c 	block_in_file = (sector_t)page_index(page);
page             1685 fs/f2fs/data.c 		SetPageMappedToDisk(page);
page             1687 fs/f2fs/data.c 		if (!PageUptodate(page) && (!PageSwapCache(page) &&
page             1688 fs/f2fs/data.c 					!cleancache_get_page(page))) {
page             1689 fs/f2fs/data.c 			SetPageUptodate(page);
page             1700 fs/f2fs/data.c 		zero_user_segment(page, 0, PAGE_SIZE);
page             1701 fs/f2fs/data.c 		if (f2fs_need_verity(inode, page->index) &&
page             1702 fs/f2fs/data.c 		    !fsverity_verify_page(page)) {
page             1706 fs/f2fs/data.c 		if (!PageUptodate(page))
page             1707 fs/f2fs/data.c 			SetPageUptodate(page);
page             1708 fs/f2fs/data.c 		unlock_page(page);
page             1724 fs/f2fs/data.c 				is_readahead ? REQ_RAHEAD : 0, page->index);
page             1738 fs/f2fs/data.c 	if (bio_add_page(bio, page, blocksize, 0) < blocksize)
page             1742 fs/f2fs/data.c 	ClearPageError(page);
page             1750 fs/f2fs/data.c 	unlock_page(page);
page             1766 fs/f2fs/data.c 			struct list_head *pages, struct page *page,
page             1786 fs/f2fs/data.c 			page = list_last_entry(pages, struct page, lru);
page             1788 fs/f2fs/data.c 			prefetchw(&page->flags);
page             1789 fs/f2fs/data.c 			list_del(&page->lru);
page             1790 fs/f2fs/data.c 			if (add_to_page_cache_lru(page, mapping,
page             1791 fs/f2fs/data.c 						  page_index(page),
page             1796 fs/f2fs/data.c 		ret = f2fs_read_single_page(inode, page, nr_pages, &map, &bio,
page             1799 fs/f2fs/data.c 			SetPageError(page);
page             1800 fs/f2fs/data.c 			zero_user_segment(page, 0, PAGE_SIZE);
page             1801 fs/f2fs/data.c 			unlock_page(page);
page             1805 fs/f2fs/data.c 			put_page(page);
page             1813 fs/f2fs/data.c static int f2fs_read_data_page(struct file *file, struct page *page)
page             1815 fs/f2fs/data.c 	struct inode *inode = page_file_mapping(page)->host;
page             1818 fs/f2fs/data.c 	trace_f2fs_readpage(page, DATA);
page             1822 fs/f2fs/data.c 		ret = f2fs_read_inline_data(inode, page);
page             1824 fs/f2fs/data.c 		ret = f2fs_mpage_readpages(page_file_mapping(page),
page             1825 fs/f2fs/data.c 						NULL, page, 1, false);
page             1834 fs/f2fs/data.c 	struct page *page = list_last_entry(pages, struct page, lru);
page             1836 fs/f2fs/data.c 	trace_f2fs_readpages(inode, page, nr_pages);
page             1847 fs/f2fs/data.c 	struct inode *inode = fio->page->mapping->host;
page             1848 fs/f2fs/data.c 	struct page *mpage;
page             1858 fs/f2fs/data.c 	fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(fio->page,
page             1945 fs/f2fs/data.c 		if (is_cold_data(fio->page))
page             1947 fs/f2fs/data.c 		if (IS_ATOMIC_WRITTEN_PAGE(fio->page))
page             1958 fs/f2fs/data.c 	struct inode *inode = fio->page->mapping->host;
page             1968 fs/f2fs/data.c 	struct page *page = fio->page;
page             1969 fs/f2fs/data.c 	struct inode *inode = page->mapping->host;
page             1978 fs/f2fs/data.c 			f2fs_lookup_extent_cache(inode, page->index, &ei)) {
page             1979 fs/f2fs/data.c 		fio->old_blkaddr = ei.blk + page->index - ei.fofs;
page             1994 fs/f2fs/data.c 	err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
page             2002 fs/f2fs/data.c 		ClearPageUptodate(page);
page             2003 fs/f2fs/data.c 		clear_cold_data(page);
page             2024 fs/f2fs/data.c 		set_page_writeback(page);
page             2025 fs/f2fs/data.c 		ClearPageError(page);
page             2033 fs/f2fs/data.c 			if (PageWriteback(page))
page             2034 fs/f2fs/data.c 				end_page_writeback(page);
page             2038 fs/f2fs/data.c 		trace_f2fs_do_write_data_page(fio->page, IPU);
page             2060 fs/f2fs/data.c 	set_page_writeback(page);
page             2061 fs/f2fs/data.c 	ClearPageError(page);
page             2065 fs/f2fs/data.c 	trace_f2fs_do_write_data_page(page, OPU);
page             2067 fs/f2fs/data.c 	if (page->index == 0)
page             2077 fs/f2fs/data.c static int __write_data_page(struct page *page, bool *submitted,
page             2083 fs/f2fs/data.c 	struct inode *inode = page->mapping->host;
page             2088 fs/f2fs/data.c 	loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT;
page             2099 fs/f2fs/data.c 		.page = page,
page             2109 fs/f2fs/data.c 	trace_f2fs_writepage(page, DATA);
page             2113 fs/f2fs/data.c 		mapping_set_error(page->mapping, -EIO);
page             2126 fs/f2fs/data.c 	if (page->index < end_index || f2fs_verity_in_progress(inode))
page             2134 fs/f2fs/data.c 	if ((page->index >= end_index + 1) || !offset)
page             2137 fs/f2fs/data.c 	zero_user_segment(page, offset, PAGE_SIZE);
page             2142 fs/f2fs/data.c 	if (f2fs_is_volatile_file(inode) && (!page->index ||
page             2163 fs/f2fs/data.c 		err = f2fs_write_inline_data(inode, page);
page             2192 fs/f2fs/data.c 		ClearPageUptodate(page);
page             2193 fs/f2fs/data.c 		clear_cold_data(page);
page             2197 fs/f2fs/data.c 		f2fs_submit_merged_write_cond(sbi, NULL, page, 0, DATA);
page             2203 fs/f2fs/data.c 	unlock_page(page);
page             2206 fs/f2fs/data.c 		f2fs_submit_ipu_bio(sbi, bio, page);
page             2211 fs/f2fs/data.c 		f2fs_submit_ipu_bio(sbi, bio, page);
page             2222 fs/f2fs/data.c 	redirty_page_for_writepage(wbc, page);
page             2231 fs/f2fs/data.c 	unlock_page(page);
page             2235 fs/f2fs/data.c static int f2fs_write_data_page(struct page *page,
page             2238 fs/f2fs/data.c 	return __write_data_page(page, NULL, NULL, NULL, wbc, FS_DATA_IO);
page             2306 fs/f2fs/data.c 			struct page *page = pvec.pages[i];
page             2316 fs/f2fs/data.c 			done_index = page->index;
page             2318 fs/f2fs/data.c 			lock_page(page);
page             2320 fs/f2fs/data.c 			if (unlikely(page->mapping != mapping)) {
page             2322 fs/f2fs/data.c 				unlock_page(page);
page             2326 fs/f2fs/data.c 			if (!PageDirty(page)) {
page             2331 fs/f2fs/data.c 			if (PageWriteback(page)) {
page             2333 fs/f2fs/data.c 					f2fs_wait_on_page_writeback(page,
page             2335 fs/f2fs/data.c 					f2fs_submit_ipu_bio(sbi, &bio, page);
page             2341 fs/f2fs/data.c 			if (!clear_page_dirty_for_io(page))
page             2344 fs/f2fs/data.c 			ret = __write_data_page(page, &submitted, &bio,
page             2352 fs/f2fs/data.c 					unlock_page(page);
page             2365 fs/f2fs/data.c 				done_index = page->index + 1;
page             2516 fs/f2fs/data.c 			struct page *page, loff_t pos, unsigned len,
page             2519 fs/f2fs/data.c 	struct inode *inode = page->mapping->host;
page             2520 fs/f2fs/data.c 	pgoff_t index = page->index;
page             2522 fs/f2fs/data.c 	struct page *ipage;
page             2560 fs/f2fs/data.c 			f2fs_do_read_inline_data(page, ipage);
page             2565 fs/f2fs/data.c 			err = f2fs_convert_inline_page(&dn, page);
page             2603 fs/f2fs/data.c 		struct page **pagep, void **fsdata)
page             2607 fs/f2fs/data.c 	struct page *page = NULL;
page             2643 fs/f2fs/data.c 	page = f2fs_pagecache_get_page(mapping, index,
page             2645 fs/f2fs/data.c 	if (!page) {
page             2650 fs/f2fs/data.c 	*pagep = page;
page             2652 fs/f2fs/data.c 	err = prepare_write_begin(sbi, page, pos, len,
page             2659 fs/f2fs/data.c 		unlock_page(page);
page             2661 fs/f2fs/data.c 		lock_page(page);
page             2662 fs/f2fs/data.c 		if (page->mapping != mapping) {
page             2664 fs/f2fs/data.c 			f2fs_put_page(page, 1);
page             2669 fs/f2fs/data.c 	f2fs_wait_on_page_writeback(page, DATA, false, true);
page             2671 fs/f2fs/data.c 	if (len == PAGE_SIZE || PageUptodate(page))
page             2676 fs/f2fs/data.c 		zero_user_segment(page, len, PAGE_SIZE);
page             2681 fs/f2fs/data.c 		zero_user_segment(page, 0, PAGE_SIZE);
page             2682 fs/f2fs/data.c 		SetPageUptodate(page);
page             2689 fs/f2fs/data.c 		err = f2fs_submit_page_read(inode, page, blkaddr);
page             2693 fs/f2fs/data.c 		lock_page(page);
page             2694 fs/f2fs/data.c 		if (unlikely(page->mapping != mapping)) {
page             2695 fs/f2fs/data.c 			f2fs_put_page(page, 1);
page             2698 fs/f2fs/data.c 		if (unlikely(!PageUptodate(page))) {
page             2706 fs/f2fs/data.c 	f2fs_put_page(page, 1);
page             2716 fs/f2fs/data.c 			struct page *page, void *fsdata)
page             2718 fs/f2fs/data.c 	struct inode *inode = page->mapping->host;
page             2727 fs/f2fs/data.c 	if (!PageUptodate(page)) {
page             2731 fs/f2fs/data.c 			SetPageUptodate(page);
page             2736 fs/f2fs/data.c 	set_page_dirty(page);
page             2742 fs/f2fs/data.c 	f2fs_put_page(page, 1);
page             2886 fs/f2fs/data.c void f2fs_invalidate_page(struct page *page, unsigned int offset,
page             2889 fs/f2fs/data.c 	struct inode *inode = page->mapping->host;
page             2896 fs/f2fs/data.c 	if (PageDirty(page)) {
page             2907 fs/f2fs/data.c 	clear_cold_data(page);
page             2909 fs/f2fs/data.c 	if (IS_ATOMIC_WRITTEN_PAGE(page))
page             2910 fs/f2fs/data.c 		return f2fs_drop_inmem_page(inode, page);
page             2912 fs/f2fs/data.c 	f2fs_clear_page_private(page);
page             2915 fs/f2fs/data.c int f2fs_release_page(struct page *page, gfp_t wait)
page             2918 fs/f2fs/data.c 	if (PageDirty(page))
page             2922 fs/f2fs/data.c 	if (IS_ATOMIC_WRITTEN_PAGE(page))
page             2925 fs/f2fs/data.c 	clear_cold_data(page);
page             2926 fs/f2fs/data.c 	f2fs_clear_page_private(page);
page             2930 fs/f2fs/data.c static int f2fs_set_data_page_dirty(struct page *page)
page             2932 fs/f2fs/data.c 	struct inode *inode = page_file_mapping(page)->host;
page             2934 fs/f2fs/data.c 	trace_f2fs_set_page_dirty(page, DATA);
page             2936 fs/f2fs/data.c 	if (!PageUptodate(page))
page             2937 fs/f2fs/data.c 		SetPageUptodate(page);
page             2938 fs/f2fs/data.c 	if (PageSwapCache(page))
page             2939 fs/f2fs/data.c 		return __set_page_dirty_nobuffers(page);
page             2942 fs/f2fs/data.c 		if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
page             2943 fs/f2fs/data.c 			f2fs_register_inmem_page(inode, page);
page             2953 fs/f2fs/data.c 	if (!PageDirty(page)) {
page             2954 fs/f2fs/data.c 		__set_page_dirty_nobuffers(page);
page             2955 fs/f2fs/data.c 		f2fs_update_dirty_page(inode, page);
page             2979 fs/f2fs/data.c 		struct page *newpage, struct page *page, enum migrate_mode mode)
page             2983 fs/f2fs/data.c 	bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page);
page             2985 fs/f2fs/data.c 	BUG_ON(PageWriteback(page));
page             2998 fs/f2fs/data.c 				page, extra_count);
page             3008 fs/f2fs/data.c 			if (cur->page == page) {
page             3009 fs/f2fs/data.c 				cur->page = newpage;
page             3013 fs/f2fs/data.c 		put_page(page);
page             3017 fs/f2fs/data.c 	if (PagePrivate(page)) {
page             3018 fs/f2fs/data.c 		f2fs_set_page_private(newpage, page_private(page));
page             3019 fs/f2fs/data.c 		f2fs_clear_page_private(page);
page             3023 fs/f2fs/data.c 		migrate_page_copy(newpage, page);
page             3025 fs/f2fs/data.c 		migrate_page_states(newpage, page);
page             3188 fs/f2fs/data.c void f2fs_clear_page_cache_dirty_tag(struct page *page)
page             3190 fs/f2fs/data.c 	struct address_space *mapping = page_mapping(page);
page             3194 fs/f2fs/data.c 	__xa_clear_mark(&mapping->i_pages, page_index(page),
page               86 fs/f2fs/dir.c  				struct page *dentry_page,
page               90 fs/f2fs/dir.c  				struct page **res_page)
page              256 fs/f2fs/dir.c  					struct page **res_page)
page              262 fs/f2fs/dir.c  	struct page *dentry_page;
page              307 fs/f2fs/dir.c  			struct fscrypt_name *fname, struct page **res_page)
page              353 fs/f2fs/dir.c  			const struct qstr *child, struct page **res_page)
page              382 fs/f2fs/dir.c  struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p)
page              390 fs/f2fs/dir.c  							struct page **page)
page              395 fs/f2fs/dir.c  	de = f2fs_find_entry(dir, qstr, page);
page              398 fs/f2fs/dir.c  		f2fs_put_page(*page, 0);
page              405 fs/f2fs/dir.c  		struct page *page, struct inode *inode)
page              408 fs/f2fs/dir.c  	lock_page(page);
page              409 fs/f2fs/dir.c  	f2fs_wait_on_page_writeback(page, type, true, true);
page              412 fs/f2fs/dir.c  	set_page_dirty(page);
page              416 fs/f2fs/dir.c  	f2fs_put_page(page, 1);
page              419 fs/f2fs/dir.c  static void init_dent_inode(const struct qstr *name, struct page *ipage)
page              446 fs/f2fs/dir.c  		struct inode *parent, struct page *page)
page              448 fs/f2fs/dir.c  	struct page *dentry_page;
page              453 fs/f2fs/dir.c  		return f2fs_make_empty_inline_dir(inode, parent, page);
page              455 fs/f2fs/dir.c  	dentry_page = f2fs_get_new_data_page(inode, page, 0, true);
page              469 fs/f2fs/dir.c  struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
page              471 fs/f2fs/dir.c  			struct page *dpage)
page              473 fs/f2fs/dir.c  	struct page *page;
page              478 fs/f2fs/dir.c  		page = f2fs_new_inode_page(inode);
page              479 fs/f2fs/dir.c  		if (IS_ERR(page))
page              480 fs/f2fs/dir.c  			return page;
page              484 fs/f2fs/dir.c  			get_page(page);
page              485 fs/f2fs/dir.c  			err = make_empty_dir(inode, dir, page);
page              487 fs/f2fs/dir.c  				lock_page(page);
page              490 fs/f2fs/dir.c  			put_page(page);
page              493 fs/f2fs/dir.c  		err = f2fs_init_acl(inode, dir, page, dpage);
page              497 fs/f2fs/dir.c  		err = f2fs_init_security(inode, dir, orig_name, page);
page              503 fs/f2fs/dir.c  			err = fscrypt_inherit_context(dir, inode, page, false);
page              508 fs/f2fs/dir.c  		page = f2fs_get_node_page(F2FS_I_SB(dir), inode->i_ino);
page              509 fs/f2fs/dir.c  		if (IS_ERR(page))
page              510 fs/f2fs/dir.c  			return page;
page              514 fs/f2fs/dir.c  		init_dent_inode(new_name, page);
page              534 fs/f2fs/dir.c  	return page;
page              538 fs/f2fs/dir.c  	f2fs_update_inode(inode, page);
page              539 fs/f2fs/dir.c  	f2fs_put_page(page, 1);
page              613 fs/f2fs/dir.c  	struct page *dentry_page = NULL;
page              616 fs/f2fs/dir.c  	struct page *page = NULL;
page              670 fs/f2fs/dir.c  		page = f2fs_init_inode_metadata(inode, dir, new_name,
page              672 fs/f2fs/dir.c  		if (IS_ERR(page)) {
page              673 fs/f2fs/dir.c  			err = PTR_ERR(page);
page              688 fs/f2fs/dir.c  			f2fs_update_inode(inode, page);
page              690 fs/f2fs/dir.c  		f2fs_put_page(page, 1);
page              731 fs/f2fs/dir.c  	struct page *page = NULL;
page              747 fs/f2fs/dir.c  		de = __f2fs_find_entry(dir, &fname, &page);
page              751 fs/f2fs/dir.c  		f2fs_put_page(page, 0);
page              753 fs/f2fs/dir.c  	} else if (IS_ERR(page)) {
page              754 fs/f2fs/dir.c  		err = PTR_ERR(page);
page              764 fs/f2fs/dir.c  	struct page *page;
page              768 fs/f2fs/dir.c  	page = f2fs_init_inode_metadata(inode, dir, NULL, NULL, NULL);
page              769 fs/f2fs/dir.c  	if (IS_ERR(page)) {
page              770 fs/f2fs/dir.c  		err = PTR_ERR(page);
page              773 fs/f2fs/dir.c  	f2fs_put_page(page, 1);
page              809 fs/f2fs/dir.c  void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
page              823 fs/f2fs/dir.c  		return f2fs_delete_inline_entry(dentry, page, dir, inode);
page              825 fs/f2fs/dir.c  	lock_page(page);
page              826 fs/f2fs/dir.c  	f2fs_wait_on_page_writeback(page, DATA, true, true);
page              828 fs/f2fs/dir.c  	dentry_blk = page_address(page);
page              837 fs/f2fs/dir.c  	set_page_dirty(page);
page              846 fs/f2fs/dir.c  		!f2fs_truncate_hole(dir, page->index, page->index + 1)) {
page              847 fs/f2fs/dir.c  		f2fs_clear_page_cache_dirty_tag(page);
page              848 fs/f2fs/dir.c  		clear_page_dirty_for_io(page);
page              849 fs/f2fs/dir.c  		f2fs_clear_page_private(page);
page              850 fs/f2fs/dir.c  		ClearPageUptodate(page);
page              851 fs/f2fs/dir.c  		clear_cold_data(page);
page              855 fs/f2fs/dir.c  	f2fs_put_page(page, 1);
page              861 fs/f2fs/dir.c  	struct page *dentry_page;
page              979 fs/f2fs/dir.c  	struct page *dentry_page = NULL;
page              254 fs/f2fs/f2fs.h 	struct page *page;	/* warm node page pointer */
page              848 fs/f2fs/f2fs.h 	struct page *inode_page;	/* its inode page, NULL is possible */
page              849 fs/f2fs/f2fs.h 	struct page *node_page;		/* cached direct node page */
page              860 fs/f2fs/f2fs.h 		struct page *ipage, struct page *npage, nid_t nid)
page             1057 fs/f2fs/f2fs.h 	struct page *page;	/* page to be written */
page             1058 fs/f2fs/f2fs.h 	struct page *encrypted_page;	/* encrypted page */
page             1512 fs/f2fs/f2fs.h static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page)
page             1514 fs/f2fs/f2fs.h 	return F2FS_M_SB(page_file_mapping(page));
page             1527 fs/f2fs/f2fs.h static inline struct f2fs_node *F2FS_NODE(struct page *page)
page             1529 fs/f2fs/f2fs.h 	return (struct f2fs_node *)page_address(page);
page             1532 fs/f2fs/f2fs.h static inline struct f2fs_inode *F2FS_INODE(struct page *page)
page             1534 fs/f2fs/f2fs.h 	return &((struct f2fs_node *)page_address(page))->i;
page             2129 fs/f2fs/f2fs.h static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
page             2132 fs/f2fs/f2fs.h 	struct page *page;
page             2136 fs/f2fs/f2fs.h 			page = find_get_page_flags(mapping, index,
page             2139 fs/f2fs/f2fs.h 			page = find_lock_page(mapping, index);
page             2140 fs/f2fs/f2fs.h 		if (page)
page             2141 fs/f2fs/f2fs.h 			return page;
page             2154 fs/f2fs/f2fs.h static inline struct page *f2fs_pagecache_get_page(
page             2166 fs/f2fs/f2fs.h static inline void f2fs_copy_page(struct page *src, struct page *dst)
page             2176 fs/f2fs/f2fs.h static inline void f2fs_put_page(struct page *page, int unlock)
page             2178 fs/f2fs/f2fs.h 	if (!page)
page             2182 fs/f2fs/f2fs.h 		f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page));
page             2183 fs/f2fs/f2fs.h 		unlock_page(page);
page             2185 fs/f2fs/f2fs.h 	put_page(page);
page             2267 fs/f2fs/f2fs.h static inline bool IS_INODE(struct page *page)
page             2269 fs/f2fs/f2fs.h 	struct f2fs_node *p = F2FS_NODE(page);
page             2287 fs/f2fs/f2fs.h 			struct page *node_page, unsigned int offset)
page             2619 fs/f2fs/f2fs.h static inline void *inline_xattr_addr(struct inode *inode, struct page *page)
page             2621 fs/f2fs/f2fs.h 	struct f2fs_inode *ri = F2FS_INODE(page);
page             2679 fs/f2fs/f2fs.h static inline void *inline_data_addr(struct inode *inode, struct page *page)
page             2681 fs/f2fs/f2fs.h 	struct f2fs_inode *ri = F2FS_INODE(page);
page             2901 fs/f2fs/f2fs.h static inline void f2fs_set_page_private(struct page *page,
page             2904 fs/f2fs/f2fs.h 	if (PagePrivate(page))
page             2907 fs/f2fs/f2fs.h 	get_page(page);
page             2908 fs/f2fs/f2fs.h 	SetPagePrivate(page);
page             2909 fs/f2fs/f2fs.h 	set_page_private(page, data);
page             2912 fs/f2fs/f2fs.h static inline void f2fs_clear_page_private(struct page *page)
page             2914 fs/f2fs/f2fs.h 	if (!PagePrivate(page))
page             2917 fs/f2fs/f2fs.h 	set_page_private(page, 0);
page             2918 fs/f2fs/f2fs.h 	ClearPagePrivate(page);
page             2919 fs/f2fs/f2fs.h 	f2fs_put_page(page, 0);
page             2944 fs/f2fs/f2fs.h bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page);
page             2945 fs/f2fs/f2fs.h void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page);
page             2949 fs/f2fs/f2fs.h void f2fs_update_inode(struct inode *inode, struct page *node_page);
page             2978 fs/f2fs/f2fs.h struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
page             2980 fs/f2fs/f2fs.h 			const struct qstr *orig_name, struct page *dpage);
page             2986 fs/f2fs/f2fs.h 			struct fscrypt_name *fname, struct page **res_page);
page             2988 fs/f2fs/f2fs.h 			const struct qstr *child, struct page **res_page);
page             2989 fs/f2fs/f2fs.h struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p);
page             2991 fs/f2fs/f2fs.h 			struct page **page);
page             2993 fs/f2fs/f2fs.h 			struct page *page, struct inode *inode);
page             3004 fs/f2fs/f2fs.h void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
page             3041 fs/f2fs/f2fs.h bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page);
page             3043 fs/f2fs/f2fs.h void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page);
page             3057 fs/f2fs/f2fs.h struct page *f2fs_new_inode_page(struct inode *inode);
page             3058 fs/f2fs/f2fs.h struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs);
page             3060 fs/f2fs/f2fs.h struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid);
page             3061 fs/f2fs/f2fs.h struct page *f2fs_get_node_page_ra(struct page *parent, int start);
page             3062 fs/f2fs/f2fs.h int f2fs_move_node_page(struct page *node_page, int gc_type);
page             3074 fs/f2fs/f2fs.h void f2fs_recover_inline_xattr(struct inode *inode, struct page *page);
page             3075 fs/f2fs/f2fs.h int f2fs_recover_xattr_data(struct inode *inode, struct page *page);
page             3076 fs/f2fs/f2fs.h int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page);
page             3089 fs/f2fs/f2fs.h void f2fs_register_inmem_page(struct inode *inode, struct page *page);
page             3092 fs/f2fs/f2fs.h void f2fs_drop_inmem_page(struct inode *inode, struct page *page);
page             3118 fs/f2fs/f2fs.h struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno);
page             3121 fs/f2fs/f2fs.h void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
page             3134 fs/f2fs/f2fs.h void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
page             3138 fs/f2fs/f2fs.h void f2fs_wait_on_page_writeback(struct page *page,
page             3160 fs/f2fs/f2fs.h struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
page             3161 fs/f2fs/f2fs.h struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
page             3162 fs/f2fs/f2fs.h struct page *f2fs_get_meta_page_nofail(struct f2fs_sb_info *sbi, pgoff_t index);
page             3163 fs/f2fs/f2fs.h struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index);
page             3186 fs/f2fs/f2fs.h void f2fs_update_dirty_page(struct inode *inode, struct page *page);
page             3202 fs/f2fs/f2fs.h 				struct inode *inode, struct page *page,
page             3218 fs/f2fs/f2fs.h struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
page             3220 fs/f2fs/f2fs.h struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index);
page             3221 fs/f2fs/f2fs.h struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
page             3223 fs/f2fs/f2fs.h struct page *f2fs_get_new_data_page(struct inode *inode,
page             3224 fs/f2fs/f2fs.h 			struct page *ipage, pgoff_t index, bool new_i_size);
page             3233 fs/f2fs/f2fs.h void f2fs_invalidate_page(struct page *page, unsigned int offset,
page             3235 fs/f2fs/f2fs.h int f2fs_release_page(struct page *page, gfp_t wait);
page             3237 fs/f2fs/f2fs.h int f2fs_migrate_page(struct address_space *mapping, struct page *newpage,
page             3238 fs/f2fs/f2fs.h 			struct page *page, enum migrate_mode mode);
page             3241 fs/f2fs/f2fs.h void f2fs_clear_page_cache_dirty_tag(struct page *page);
page             3493 fs/f2fs/f2fs.h void f2fs_do_read_inline_data(struct page *page, struct page *ipage);
page             3495 fs/f2fs/f2fs.h 						struct page *ipage, u64 from);
page             3496 fs/f2fs/f2fs.h int f2fs_read_inline_data(struct inode *inode, struct page *page);
page             3497 fs/f2fs/f2fs.h int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page);
page             3499 fs/f2fs/f2fs.h int f2fs_write_inline_data(struct inode *inode, struct page *page);
page             3500 fs/f2fs/f2fs.h bool f2fs_recover_inline_data(struct inode *inode, struct page *npage);
page             3502 fs/f2fs/f2fs.h 			struct fscrypt_name *fname, struct page **res_page);
page             3504 fs/f2fs/f2fs.h 			struct page *ipage);
page             3509 fs/f2fs/f2fs.h 				struct page *page, struct inode *dir,
page               50 fs/f2fs/file.c 	struct page *page = vmf->page;
page               75 fs/f2fs/file.c 	lock_page(page);
page               76 fs/f2fs/file.c 	if (unlikely(page->mapping != inode->i_mapping ||
page               77 fs/f2fs/file.c 			page_offset(page) > i_size_read(inode) ||
page               78 fs/f2fs/file.c 			!PageUptodate(page))) {
page               79 fs/f2fs/file.c 		unlock_page(page);
page               87 fs/f2fs/file.c 	err = f2fs_get_block(&dn, page->index);
page               91 fs/f2fs/file.c 		unlock_page(page);
page               96 fs/f2fs/file.c 	f2fs_wait_on_page_writeback(page, DATA, false, true);
page              104 fs/f2fs/file.c 	if (PageMappedToDisk(page))
page              108 fs/f2fs/file.c 	if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
page              113 fs/f2fs/file.c 		zero_user_segment(page, offset, PAGE_SIZE);
page              115 fs/f2fs/file.c 	set_page_dirty(page);
page              116 fs/f2fs/file.c 	if (!PageUptodate(page))
page              117 fs/f2fs/file.c 		SetPageUptodate(page);
page              122 fs/f2fs/file.c 	trace_f2fs_vm_page_mkwrite(page, DATA);
page              184 fs/f2fs/file.c 	struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
page              346 fs/f2fs/file.c 	struct page *page;
page              354 fs/f2fs/file.c 				      1, &page);
page              357 fs/f2fs/file.c 	pgofs = page->index;
page              358 fs/f2fs/file.c 	put_page(page);
page              578 fs/f2fs/file.c 	struct page *page;
page              584 fs/f2fs/file.c 		page = find_lock_page(mapping, index);
page              585 fs/f2fs/file.c 		if (page && PageUptodate(page))
page              587 fs/f2fs/file.c 		f2fs_put_page(page, 1);
page              591 fs/f2fs/file.c 	page = f2fs_get_lock_data_page(inode, index, true);
page              592 fs/f2fs/file.c 	if (IS_ERR(page))
page              593 fs/f2fs/file.c 		return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
page              595 fs/f2fs/file.c 	f2fs_wait_on_page_writeback(page, DATA, true, true);
page              596 fs/f2fs/file.c 	zero_user(page, offset, PAGE_SIZE - offset);
page              601 fs/f2fs/file.c 		set_page_dirty(page);
page              602 fs/f2fs/file.c 	f2fs_put_page(page, 1);
page              612 fs/f2fs/file.c 	struct page *ipage;
page              891 fs/f2fs/file.c 	struct page *page;
page              899 fs/f2fs/file.c 	page = f2fs_get_new_data_page(inode, NULL, index, false);
page              902 fs/f2fs/file.c 	if (IS_ERR(page))
page              903 fs/f2fs/file.c 		return PTR_ERR(page);
page              905 fs/f2fs/file.c 	f2fs_wait_on_page_writeback(page, DATA, true, true);
page              906 fs/f2fs/file.c 	zero_user(page, start, len);
page              907 fs/f2fs/file.c 	set_page_dirty(page);
page              908 fs/f2fs/file.c 	f2fs_put_page(page, 1);
page             1144 fs/f2fs/file.c 			struct page *psrc, *pdst;
page             2486 fs/f2fs/file.c 			struct page *page;
page             2488 fs/f2fs/file.c 			page = f2fs_get_lock_data_page(inode, idx, true);
page             2489 fs/f2fs/file.c 			if (IS_ERR(page)) {
page             2490 fs/f2fs/file.c 				err = PTR_ERR(page);
page             2494 fs/f2fs/file.c 			set_page_dirty(page);
page             2495 fs/f2fs/file.c 			f2fs_put_page(page, 1);
page             2819 fs/f2fs/file.c 	struct page *ipage;
page              526 fs/f2fs/gc.c   		struct page *node_page;
page              613 fs/f2fs/gc.c   	struct page *node_page;
page              663 fs/f2fs/gc.c   	struct page *page;
page              678 fs/f2fs/gc.c   	page = f2fs_grab_cache_page(mapping, index, true);
page              679 fs/f2fs/gc.c   	if (!page)
page              709 fs/f2fs/gc.c   	fio.page = page;
page              716 fs/f2fs/gc.c   	f2fs_wait_on_page_writeback(page, DATA, true, true);
page              732 fs/f2fs/gc.c   	f2fs_put_page(page, 1);
page              737 fs/f2fs/gc.c   	f2fs_put_page(page, 1);
page              762 fs/f2fs/gc.c   	struct page *page, *mpage;
page              768 fs/f2fs/gc.c   	page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
page              769 fs/f2fs/gc.c   	if (!page)
page              796 fs/f2fs/gc.c   		ClearPageUptodate(page);
page              805 fs/f2fs/gc.c   	f2fs_wait_on_page_writeback(page, DATA, true, true);
page              816 fs/f2fs/gc.c   	fio.page = page;
page              869 fs/f2fs/gc.c   	ClearPageError(page);
page              889 fs/f2fs/gc.c   	if (page->index == 0)
page              903 fs/f2fs/gc.c   	f2fs_put_page(page, 1);
page              910 fs/f2fs/gc.c   	struct page *page;
page              913 fs/f2fs/gc.c   	page = f2fs_get_lock_data_page(inode, bidx, true);
page              914 fs/f2fs/gc.c   	if (IS_ERR(page))
page              915 fs/f2fs/gc.c   		return PTR_ERR(page);
page              936 fs/f2fs/gc.c   		if (PageWriteback(page)) {
page              940 fs/f2fs/gc.c   		set_page_dirty(page);
page              941 fs/f2fs/gc.c   		set_cold_data(page);
page              951 fs/f2fs/gc.c   			.page = page,
page              956 fs/f2fs/gc.c   		bool is_dirty = PageDirty(page);
page              959 fs/f2fs/gc.c   		f2fs_wait_on_page_writeback(page, DATA, true, true);
page              961 fs/f2fs/gc.c   		set_page_dirty(page);
page              962 fs/f2fs/gc.c   		if (clear_page_dirty_for_io(page)) {
page              967 fs/f2fs/gc.c   		set_cold_data(page);
page              971 fs/f2fs/gc.c   			clear_cold_data(page);
page              977 fs/f2fs/gc.c   				set_page_dirty(page);
page              981 fs/f2fs/gc.c   	f2fs_put_page(page, 1);
page             1008 fs/f2fs/gc.c   		struct page *data_page;
page             1151 fs/f2fs/gc.c   	struct page *sum_page;
page               43 fs/f2fs/inline.c void f2fs_do_read_inline_data(struct page *page, struct page *ipage)
page               45 fs/f2fs/inline.c 	struct inode *inode = page->mapping->host;
page               48 fs/f2fs/inline.c 	if (PageUptodate(page))
page               51 fs/f2fs/inline.c 	f2fs_bug_on(F2FS_P_SB(page), page->index);
page               53 fs/f2fs/inline.c 	zero_user_segment(page, MAX_INLINE_DATA(inode), PAGE_SIZE);
page               57 fs/f2fs/inline.c 	dst_addr = kmap_atomic(page);
page               59 fs/f2fs/inline.c 	flush_dcache_page(page);
page               61 fs/f2fs/inline.c 	if (!PageUptodate(page))
page               62 fs/f2fs/inline.c 		SetPageUptodate(page);
page               66 fs/f2fs/inline.c 					struct page *ipage, u64 from)
page               83 fs/f2fs/inline.c int f2fs_read_inline_data(struct inode *inode, struct page *page)
page               85 fs/f2fs/inline.c 	struct page *ipage;
page               89 fs/f2fs/inline.c 		unlock_page(page);
page               98 fs/f2fs/inline.c 	if (page->index)
page               99 fs/f2fs/inline.c 		zero_user_segment(page, 0, PAGE_SIZE);
page              101 fs/f2fs/inline.c 		f2fs_do_read_inline_data(page, ipage);
page              103 fs/f2fs/inline.c 	if (!PageUptodate(page))
page              104 fs/f2fs/inline.c 		SetPageUptodate(page);
page              106 fs/f2fs/inline.c 	unlock_page(page);
page              110 fs/f2fs/inline.c int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
page              118 fs/f2fs/inline.c 		.page = page,
page              149 fs/f2fs/inline.c 	f2fs_bug_on(F2FS_P_SB(page), PageWriteback(page));
page              151 fs/f2fs/inline.c 	f2fs_do_read_inline_data(page, dn->inode_page);
page              152 fs/f2fs/inline.c 	set_page_dirty(page);
page              155 fs/f2fs/inline.c 	dirty = clear_page_dirty_for_io(page);
page              158 fs/f2fs/inline.c 	set_page_writeback(page);
page              159 fs/f2fs/inline.c 	ClearPageError(page);
page              163 fs/f2fs/inline.c 	f2fs_wait_on_page_writeback(page, DATA, true, true);
page              186 fs/f2fs/inline.c 	struct page *ipage, *page;
page              192 fs/f2fs/inline.c 	page = f2fs_grab_cache_page(inode->i_mapping, 0, false);
page              193 fs/f2fs/inline.c 	if (!page)
page              207 fs/f2fs/inline.c 		err = f2fs_convert_inline_page(&dn, page);
page              213 fs/f2fs/inline.c 	f2fs_put_page(page, 1);
page              220 fs/f2fs/inline.c int f2fs_write_inline_data(struct inode *inode, struct page *page)
page              236 fs/f2fs/inline.c 	f2fs_bug_on(F2FS_I_SB(inode), page->index);
page              239 fs/f2fs/inline.c 	src_addr = kmap_atomic(page);
page              245 fs/f2fs/inline.c 	f2fs_clear_page_cache_dirty_tag(page);
page              255 fs/f2fs/inline.c bool f2fs_recover_inline_data(struct inode *inode, struct page *npage)
page              260 fs/f2fs/inline.c 	struct page *ipage;
page              308 fs/f2fs/inline.c 			struct fscrypt_name *fname, struct page **res_page)
page              314 fs/f2fs/inline.c 	struct page *ipage;
page              340 fs/f2fs/inline.c 							struct page *ipage)
page              362 fs/f2fs/inline.c static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
page              365 fs/f2fs/inline.c 	struct page *page;
page              371 fs/f2fs/inline.c 	page = f2fs_grab_cache_page(dir->i_mapping, 0, false);
page              372 fs/f2fs/inline.c 	if (!page) {
page              384 fs/f2fs/inline.c 		set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK);
page              385 fs/f2fs/inline.c 		f2fs_warn(F2FS_P_SB(page), "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, run fsck to fix.",
page              391 fs/f2fs/inline.c 	f2fs_wait_on_page_writeback(page, DATA, true, true);
page              393 fs/f2fs/inline.c 	dentry_blk = page_address(page);
page              410 fs/f2fs/inline.c 	if (!PageUptodate(page))
page              411 fs/f2fs/inline.c 		SetPageUptodate(page);
page              412 fs/f2fs/inline.c 	set_page_dirty(page);
page              432 fs/f2fs/inline.c 	f2fs_put_page(page, 1);
page              483 fs/f2fs/inline.c static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
page              533 fs/f2fs/inline.c static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
page              547 fs/f2fs/inline.c 	struct page *ipage;
page              553 fs/f2fs/inline.c 	struct page *page = NULL;
page              574 fs/f2fs/inline.c 		page = f2fs_init_inode_metadata(inode, dir, new_name,
page              576 fs/f2fs/inline.c 		if (IS_ERR(page)) {
page              577 fs/f2fs/inline.c 			err = PTR_ERR(page);
page              595 fs/f2fs/inline.c 			f2fs_update_inode(inode, page);
page              597 fs/f2fs/inline.c 		f2fs_put_page(page, 1);
page              609 fs/f2fs/inline.c void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page,
page              618 fs/f2fs/inline.c 	lock_page(page);
page              619 fs/f2fs/inline.c 	f2fs_wait_on_page_writeback(page, NODE, true, true);
page              621 fs/f2fs/inline.c 	inline_dentry = inline_data_addr(dir, page);
page              628 fs/f2fs/inline.c 	set_page_dirty(page);
page              629 fs/f2fs/inline.c 	f2fs_put_page(page, 1);
page              641 fs/f2fs/inline.c 	struct page *ipage;
page              667 fs/f2fs/inline.c 	struct page *ipage = NULL;
page              706 fs/f2fs/inline.c 	struct page *ipage;
page              103 fs/f2fs/inode.c static void __recover_inline_status(struct inode *inode, struct page *ipage)
page              122 fs/f2fs/inode.c static bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
page              124 fs/f2fs/inode.c 	struct f2fs_inode *ri = &F2FS_NODE(page)->i;
page              129 fs/f2fs/inode.c 	if (!IS_INODE(page) || !(ri->i_inline & F2FS_EXTRA_ATTR))
page              139 fs/f2fs/inode.c static __u32 f2fs_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
page              141 fs/f2fs/inode.c 	struct f2fs_node *node = F2FS_NODE(page);
page              162 fs/f2fs/inode.c bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page)
page              171 fs/f2fs/inode.c 	if (!f2fs_enable_inode_chksum(sbi, page))
page              173 fs/f2fs/inode.c 	if (!f2fs_enable_inode_chksum(sbi, page) ||
page              174 fs/f2fs/inode.c 			PageDirty(page) || PageWriteback(page))
page              178 fs/f2fs/inode.c 	ri = &F2FS_NODE(page)->i;
page              180 fs/f2fs/inode.c 	calculated = f2fs_inode_chksum(sbi, page);
page              184 fs/f2fs/inode.c 			  page->index, ino_of_node(page), provided, calculated);
page              189 fs/f2fs/inode.c void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page)
page              191 fs/f2fs/inode.c 	struct f2fs_inode *ri = &F2FS_NODE(page)->i;
page              193 fs/f2fs/inode.c 	if (!f2fs_enable_inode_chksum(sbi, page))
page              196 fs/f2fs/inode.c 	ri->i_inode_checksum = cpu_to_le32(f2fs_inode_chksum(sbi, page));
page              199 fs/f2fs/inode.c static bool sanity_check_inode(struct inode *inode, struct page *node_page)
page              296 fs/f2fs/inode.c 	struct page *node_page;
page              500 fs/f2fs/inode.c void f2fs_update_inode(struct inode *inode, struct page *node_page)
page              593 fs/f2fs/inode.c 	struct page *node_page;
page              366 fs/f2fs/namei.c 	struct page *page;
page              367 fs/f2fs/namei.c 	unsigned long ino = f2fs_inode_by_name(d_inode(child), &dotdot, &page);
page              369 fs/f2fs/namei.c 		if (IS_ERR(page))
page              370 fs/f2fs/namei.c 			return ERR_CAST(page);
page              382 fs/f2fs/namei.c 	struct page *page;
page              399 fs/f2fs/namei.c 	de = f2fs_find_entry(dir, &dot, &page);
page              401 fs/f2fs/namei.c 		f2fs_put_page(page, 0);
page              402 fs/f2fs/namei.c 	} else if (IS_ERR(page)) {
page              403 fs/f2fs/namei.c 		err = PTR_ERR(page);
page              411 fs/f2fs/namei.c 	de = f2fs_find_entry(dir, &dotdot, &page);
page              413 fs/f2fs/namei.c 		f2fs_put_page(page, 0);
page              414 fs/f2fs/namei.c 	else if (IS_ERR(page))
page              415 fs/f2fs/namei.c 		err = PTR_ERR(page);
page              431 fs/f2fs/namei.c 	struct page *page;
page              450 fs/f2fs/namei.c 	de = __f2fs_find_entry(dir, &fname, &page);
page              454 fs/f2fs/namei.c 		if (IS_ERR(page)) {
page              455 fs/f2fs/namei.c 			err = PTR_ERR(page);
page              462 fs/f2fs/namei.c 	f2fs_put_page(page, 0);
page              517 fs/f2fs/namei.c 	struct page *page;
page              532 fs/f2fs/namei.c 	de = f2fs_find_entry(dir, &dentry->d_name, &page);
page              534 fs/f2fs/namei.c 		if (IS_ERR(page))
page              535 fs/f2fs/namei.c 			err = PTR_ERR(page);
page              545 fs/f2fs/namei.c 		f2fs_put_page(page, 0);
page              548 fs/f2fs/namei.c 	f2fs_delete_entry(de, page, dir, inode);
page              853 fs/f2fs/namei.c 	struct page *old_dir_page;
page              854 fs/f2fs/namei.c 	struct page *old_page, *new_page = NULL;
page             1047 fs/f2fs/namei.c 	struct page *old_dir_page, *new_dir_page;
page             1048 fs/f2fs/namei.c 	struct page *old_page, *new_page;
page             1235 fs/f2fs/namei.c 	struct page *page;
page             1241 fs/f2fs/namei.c 	page = read_mapping_page(inode->i_mapping, 0, NULL);
page             1242 fs/f2fs/namei.c 	if (IS_ERR(page))
page             1243 fs/f2fs/namei.c 		return ERR_CAST(page);
page             1245 fs/f2fs/namei.c 	target = fscrypt_get_symlink(inode, page_address(page),
page             1247 fs/f2fs/namei.c 	put_page(page);
page              100 fs/f2fs/node.c static void clear_node_page_dirty(struct page *page)
page              102 fs/f2fs/node.c 	if (PageDirty(page)) {
page              103 fs/f2fs/node.c 		f2fs_clear_page_cache_dirty_tag(page);
page              104 fs/f2fs/node.c 		clear_page_dirty_for_io(page);
page              105 fs/f2fs/node.c 		dec_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
page              107 fs/f2fs/node.c 	ClearPageUptodate(page);
page              110 fs/f2fs/node.c static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
page              115 fs/f2fs/node.c static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
page              117 fs/f2fs/node.c 	struct page *src_page;
page              118 fs/f2fs/node.c 	struct page *dst_page;
page              286 fs/f2fs/node.c bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page)
page              288 fs/f2fs/node.c 	return NODE_MAPPING(sbi) == page->mapping &&
page              289 fs/f2fs/node.c 			IS_DNODE(page) && is_cold_node(page);
page              301 fs/f2fs/node.c 							struct page *page)
page              309 fs/f2fs/node.c 	get_page(page);
page              310 fs/f2fs/node.c 	fn->page = page;
page              323 fs/f2fs/node.c void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page)
page              330 fs/f2fs/node.c 		if (fn->page == page) {
page              335 fs/f2fs/node.c 			put_page(page);
page              524 fs/f2fs/node.c 	struct page *page = NULL;
page              563 fs/f2fs/node.c 	page = f2fs_get_meta_page(sbi, index);
page              564 fs/f2fs/node.c 	if (IS_ERR(page))
page              565 fs/f2fs/node.c 		return PTR_ERR(page);
page              567 fs/f2fs/node.c 	nat_blk = (struct f2fs_nat_block *)page_address(page);
page              570 fs/f2fs/node.c 	f2fs_put_page(page, 1);
page              585 fs/f2fs/node.c static void f2fs_ra_node_pages(struct page *parent, int start, int n)
page              725 fs/f2fs/node.c 	struct page *npage[4];
page              726 fs/f2fs/node.c 	struct page *parent = NULL;
page              870 fs/f2fs/node.c 	struct page *page;
page              877 fs/f2fs/node.c 	page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
page              878 fs/f2fs/node.c 	if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
page              880 fs/f2fs/node.c 	else if (IS_ERR(page))
page              881 fs/f2fs/node.c 		return PTR_ERR(page);
page              884 fs/f2fs/node.c 	dn->node_page = page;
page              898 fs/f2fs/node.c 	struct page *page;
page              910 fs/f2fs/node.c 	page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
page              911 fs/f2fs/node.c 	if (IS_ERR(page)) {
page              912 fs/f2fs/node.c 		trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
page              913 fs/f2fs/node.c 		return PTR_ERR(page);
page              916 fs/f2fs/node.c 	f2fs_ra_node_pages(page, ofs, NIDS_PER_BLOCK);
page              918 fs/f2fs/node.c 	rn = F2FS_NODE(page);
page              928 fs/f2fs/node.c 			if (set_nid(page, i, 0, false))
page              942 fs/f2fs/node.c 				if (set_nid(page, i, 0, false))
page              954 fs/f2fs/node.c 		dn->node_page = page;
page              960 fs/f2fs/node.c 		f2fs_put_page(page, 1);
page              966 fs/f2fs/node.c 	f2fs_put_page(page, 1);
page              974 fs/f2fs/node.c 	struct page *pages[2];
page             1044 fs/f2fs/node.c 	struct page *page;
page             1052 fs/f2fs/node.c 	page = f2fs_get_node_page(sbi, inode->i_ino);
page             1053 fs/f2fs/node.c 	if (IS_ERR(page)) {
page             1054 fs/f2fs/node.c 		trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
page             1055 fs/f2fs/node.c 		return PTR_ERR(page);
page             1058 fs/f2fs/node.c 	set_new_dnode(&dn, inode, page, NULL, 0);
page             1059 fs/f2fs/node.c 	unlock_page(page);
page             1061 fs/f2fs/node.c 	ri = F2FS_INODE(page);
page             1114 fs/f2fs/node.c 			lock_page(page);
page             1115 fs/f2fs/node.c 			BUG_ON(page->mapping != NODE_MAPPING(sbi));
page             1116 fs/f2fs/node.c 			f2fs_wait_on_page_writeback(page, NODE, true, true);
page             1118 fs/f2fs/node.c 			set_page_dirty(page);
page             1119 fs/f2fs/node.c 			unlock_page(page);
page             1126 fs/f2fs/node.c 	f2fs_put_page(page, 0);
page             1137 fs/f2fs/node.c 	struct page *npage;
page             1205 fs/f2fs/node.c struct page *f2fs_new_inode_page(struct inode *inode)
page             1216 fs/f2fs/node.c struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
page             1220 fs/f2fs/node.c 	struct page *page;
page             1226 fs/f2fs/node.c 	page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false);
page             1227 fs/f2fs/node.c 	if (!page)
page             1248 fs/f2fs/node.c 	f2fs_wait_on_page_writeback(page, NODE, true, true);
page             1249 fs/f2fs/node.c 	fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
page             1250 fs/f2fs/node.c 	set_cold_node(page, S_ISDIR(dn->inode->i_mode));
page             1251 fs/f2fs/node.c 	if (!PageUptodate(page))
page             1252 fs/f2fs/node.c 		SetPageUptodate(page);
page             1253 fs/f2fs/node.c 	if (set_page_dirty(page))
page             1261 fs/f2fs/node.c 	return page;
page             1264 fs/f2fs/node.c 	clear_node_page_dirty(page);
page             1265 fs/f2fs/node.c 	f2fs_put_page(page, 1);
page             1274 fs/f2fs/node.c static int read_node_page(struct page *page, int op_flags)
page             1276 fs/f2fs/node.c 	struct f2fs_sb_info *sbi = F2FS_P_SB(page);
page             1283 fs/f2fs/node.c 		.page = page,
page             1288 fs/f2fs/node.c 	if (PageUptodate(page)) {
page             1289 fs/f2fs/node.c 		if (!f2fs_inode_chksum_verify(sbi, page)) {
page             1290 fs/f2fs/node.c 			ClearPageUptodate(page);
page             1296 fs/f2fs/node.c 	err = f2fs_get_node_info(sbi, page->index, &ni);
page             1302 fs/f2fs/node.c 		ClearPageUptodate(page);
page             1315 fs/f2fs/node.c 	struct page *apage;
page             1335 fs/f2fs/node.c static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
page             1336 fs/f2fs/node.c 					struct page *parent, int start)
page             1338 fs/f2fs/node.c 	struct page *page;
page             1346 fs/f2fs/node.c 	page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
page             1347 fs/f2fs/node.c 	if (!page)
page             1350 fs/f2fs/node.c 	err = read_node_page(page, 0);
page             1352 fs/f2fs/node.c 		f2fs_put_page(page, 1);
page             1362 fs/f2fs/node.c 	lock_page(page);
page             1364 fs/f2fs/node.c 	if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
page             1365 fs/f2fs/node.c 		f2fs_put_page(page, 1);
page             1369 fs/f2fs/node.c 	if (unlikely(!PageUptodate(page))) {
page             1374 fs/f2fs/node.c 	if (!f2fs_inode_chksum_verify(sbi, page)) {
page             1379 fs/f2fs/node.c 	if(unlikely(nid != nid_of_node(page))) {
page             1381 fs/f2fs/node.c 			  nid, nid_of_node(page), ino_of_node(page),
page             1382 fs/f2fs/node.c 			  ofs_of_node(page), cpver_of_node(page),
page             1383 fs/f2fs/node.c 			  next_blkaddr_of_node(page));
page             1386 fs/f2fs/node.c 		ClearPageUptodate(page);
page             1387 fs/f2fs/node.c 		f2fs_put_page(page, 1);
page             1390 fs/f2fs/node.c 	return page;
page             1393 fs/f2fs/node.c struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
page             1398 fs/f2fs/node.c struct page *f2fs_get_node_page_ra(struct page *parent, int start)
page             1409 fs/f2fs/node.c 	struct page *page;
page             1417 fs/f2fs/node.c 	page = f2fs_pagecache_get_page(inode->i_mapping, 0,
page             1419 fs/f2fs/node.c 	if (!page)
page             1422 fs/f2fs/node.c 	if (!PageUptodate(page))
page             1425 fs/f2fs/node.c 	if (!PageDirty(page))
page             1428 fs/f2fs/node.c 	if (!clear_page_dirty_for_io(page))
page             1431 fs/f2fs/node.c 	ret = f2fs_write_inline_data(inode, page);
page             1435 fs/f2fs/node.c 		set_page_dirty(page);
page             1437 fs/f2fs/node.c 	f2fs_put_page(page, 1);
page             1442 fs/f2fs/node.c static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
page             1446 fs/f2fs/node.c 	struct page *last_page = NULL;
page             1457 fs/f2fs/node.c 			struct page *page = pvec.pages[i];
page             1465 fs/f2fs/node.c 			if (!IS_DNODE(page) || !is_cold_node(page))
page             1467 fs/f2fs/node.c 			if (ino_of_node(page) != ino)
page             1470 fs/f2fs/node.c 			lock_page(page);
page             1472 fs/f2fs/node.c 			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
page             1474 fs/f2fs/node.c 				unlock_page(page);
page             1477 fs/f2fs/node.c 			if (ino_of_node(page) != ino)
page             1480 fs/f2fs/node.c 			if (!PageDirty(page)) {
page             1488 fs/f2fs/node.c 			get_page(page);
page             1489 fs/f2fs/node.c 			last_page = page;
page             1490 fs/f2fs/node.c 			unlock_page(page);
page             1498 fs/f2fs/node.c static int __write_node_page(struct page *page, bool atomic, bool *submitted,
page             1502 fs/f2fs/node.c 	struct f2fs_sb_info *sbi = F2FS_P_SB(page);
page             1507 fs/f2fs/node.c 		.ino = ino_of_node(page),
page             1511 fs/f2fs/node.c 		.page = page,
page             1519 fs/f2fs/node.c 	trace_f2fs_writepage(page, NODE);
page             1529 fs/f2fs/node.c 			IS_DNODE(page) && is_cold_node(page))
page             1533 fs/f2fs/node.c 	nid = nid_of_node(page);
page             1534 fs/f2fs/node.c 	f2fs_bug_on(sbi, page->index != nid);
page             1548 fs/f2fs/node.c 		ClearPageUptodate(page);
page             1551 fs/f2fs/node.c 		unlock_page(page);
page             1566 fs/f2fs/node.c 	if (f2fs_in_warm_node_list(sbi, page)) {
page             1567 fs/f2fs/node.c 		seq = f2fs_add_fsync_node_entry(sbi, page);
page             1572 fs/f2fs/node.c 	set_page_writeback(page);
page             1573 fs/f2fs/node.c 	ClearPageError(page);
page             1577 fs/f2fs/node.c 	set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
page             1582 fs/f2fs/node.c 		f2fs_submit_merged_write_cond(sbi, NULL, page, 0, NODE);
page             1586 fs/f2fs/node.c 	unlock_page(page);
page             1600 fs/f2fs/node.c 	redirty_page_for_writepage(wbc, page);
page             1604 fs/f2fs/node.c int f2fs_move_node_page(struct page *node_page, int gc_type)
page             1642 fs/f2fs/node.c static int f2fs_write_node_page(struct page *page,
page             1645 fs/f2fs/node.c 	return __write_node_page(page, false, NULL, wbc, false,
page             1656 fs/f2fs/node.c 	struct page *last_page = NULL;
page             1676 fs/f2fs/node.c 			struct page *page = pvec.pages[i];
page             1686 fs/f2fs/node.c 			if (!IS_DNODE(page) || !is_cold_node(page))
page             1688 fs/f2fs/node.c 			if (ino_of_node(page) != ino)
page             1691 fs/f2fs/node.c 			lock_page(page);
page             1693 fs/f2fs/node.c 			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
page             1695 fs/f2fs/node.c 				unlock_page(page);
page             1698 fs/f2fs/node.c 			if (ino_of_node(page) != ino)
page             1701 fs/f2fs/node.c 			if (!PageDirty(page) && page != last_page) {
page             1706 fs/f2fs/node.c 			f2fs_wait_on_page_writeback(page, NODE, true, true);
page             1708 fs/f2fs/node.c 			set_fsync_mark(page, 0);
page             1709 fs/f2fs/node.c 			set_dentry_mark(page, 0);
page             1711 fs/f2fs/node.c 			if (!atomic || page == last_page) {
page             1712 fs/f2fs/node.c 				set_fsync_mark(page, 1);
page             1713 fs/f2fs/node.c 				if (IS_INODE(page)) {
page             1716 fs/f2fs/node.c 						f2fs_update_inode(inode, page);
page             1717 fs/f2fs/node.c 					set_dentry_mark(page,
page             1721 fs/f2fs/node.c 				if (!PageDirty(page))
page             1722 fs/f2fs/node.c 					set_page_dirty(page);
page             1725 fs/f2fs/node.c 			if (!clear_page_dirty_for_io(page))
page             1728 fs/f2fs/node.c 			ret = __write_node_page(page, atomic &&
page             1729 fs/f2fs/node.c 						page == last_page,
page             1733 fs/f2fs/node.c 				unlock_page(page);
page             1740 fs/f2fs/node.c 			if (page == last_page) {
page             1741 fs/f2fs/node.c 				f2fs_put_page(page, 0);
page             1791 fs/f2fs/node.c static bool flush_dirty_inode(struct page *page)
page             1793 fs/f2fs/node.c 	struct f2fs_sb_info *sbi = F2FS_P_SB(page);
page             1795 fs/f2fs/node.c 	nid_t ino = ino_of_node(page);
page             1801 fs/f2fs/node.c 	f2fs_update_inode(inode, page);
page             1802 fs/f2fs/node.c 	unlock_page(page);
page             1829 fs/f2fs/node.c 			struct page *page = pvec.pages[i];
page             1846 fs/f2fs/node.c 			if (step == 0 && IS_DNODE(page))
page             1848 fs/f2fs/node.c 			if (step == 1 && (!IS_DNODE(page) ||
page             1849 fs/f2fs/node.c 						is_cold_node(page)))
page             1851 fs/f2fs/node.c 			if (step == 2 && (!IS_DNODE(page) ||
page             1852 fs/f2fs/node.c 						!is_cold_node(page)))
page             1856 fs/f2fs/node.c 				lock_page(page);
page             1857 fs/f2fs/node.c 			else if (!trylock_page(page))
page             1860 fs/f2fs/node.c 			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
page             1862 fs/f2fs/node.c 				unlock_page(page);
page             1866 fs/f2fs/node.c 			if (!PageDirty(page)) {
page             1872 fs/f2fs/node.c 			if (is_inline_node(page)) {
page             1873 fs/f2fs/node.c 				clear_inline_node(page);
page             1874 fs/f2fs/node.c 				unlock_page(page);
page             1875 fs/f2fs/node.c 				flush_inline_data(sbi, ino_of_node(page));
page             1880 fs/f2fs/node.c 			if (IS_INODE(page) && may_dirty) {
page             1882 fs/f2fs/node.c 				if (flush_dirty_inode(page))
page             1886 fs/f2fs/node.c 			f2fs_wait_on_page_writeback(page, NODE, true, true);
page             1888 fs/f2fs/node.c 			if (!clear_page_dirty_for_io(page))
page             1891 fs/f2fs/node.c 			set_fsync_mark(page, 0);
page             1892 fs/f2fs/node.c 			set_dentry_mark(page, 0);
page             1894 fs/f2fs/node.c 			ret = __write_node_page(page, false, &submitted,
page             1897 fs/f2fs/node.c 				unlock_page(page);
page             1933 fs/f2fs/node.c 	struct page *page;
page             1951 fs/f2fs/node.c 		page = fn->page;
page             1952 fs/f2fs/node.c 		get_page(page);
page             1955 fs/f2fs/node.c 		f2fs_wait_on_page_writeback(page, NODE, true, false);
page             1956 fs/f2fs/node.c 		if (TestClearPageError(page))
page             1959 fs/f2fs/node.c 		put_page(page);
page             2014 fs/f2fs/node.c static int f2fs_set_node_page_dirty(struct page *page)
page             2016 fs/f2fs/node.c 	trace_f2fs_set_page_dirty(page, NODE);
page             2018 fs/f2fs/node.c 	if (!PageUptodate(page))
page             2019 fs/f2fs/node.c 		SetPageUptodate(page);
page             2021 fs/f2fs/node.c 	if (IS_INODE(page))
page             2022 fs/f2fs/node.c 		f2fs_inode_chksum_set(F2FS_P_SB(page), page);
page             2024 fs/f2fs/node.c 	if (!PageDirty(page)) {
page             2025 fs/f2fs/node.c 		__set_page_dirty_nobuffers(page);
page             2026 fs/f2fs/node.c 		inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
page             2027 fs/f2fs/node.c 		f2fs_set_page_private(page, 0);
page             2028 fs/f2fs/node.c 		f2fs_trace_pid(page);
page             2222 fs/f2fs/node.c 			struct page *nat_page, nid_t start_nid)
page             2342 fs/f2fs/node.c 			struct page *page = get_current_nat_page(sbi, nid);
page             2344 fs/f2fs/node.c 			if (IS_ERR(page)) {
page             2345 fs/f2fs/node.c 				ret = PTR_ERR(page);
page             2347 fs/f2fs/node.c 				ret = scan_nat_page(sbi, page, nid);
page             2348 fs/f2fs/node.c 				f2fs_put_page(page, 1);
page             2515 fs/f2fs/node.c void f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
page             2519 fs/f2fs/node.c 	struct page *ipage;
page             2525 fs/f2fs/node.c 	ri = F2FS_INODE(page);
page             2534 fs/f2fs/node.c 	src_addr = inline_xattr_addr(inode, page);
page             2544 fs/f2fs/node.c int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
page             2551 fs/f2fs/node.c 	struct page *xpage;
page             2582 fs/f2fs/node.c 	memcpy(F2FS_NODE(xpage), F2FS_NODE(page), VALID_XATTR_BLOCK_SIZE);
page             2590 fs/f2fs/node.c int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
page             2593 fs/f2fs/node.c 	nid_t ino = ino_of_node(page);
page             2595 fs/f2fs/node.c 	struct page *ipage;
page             2619 fs/f2fs/node.c 	src = F2FS_INODE(page);
page             2681 fs/f2fs/node.c 			struct page *page = f2fs_get_tmp_page(sbi, idx);
page             2683 fs/f2fs/node.c 			if (IS_ERR(page))
page             2684 fs/f2fs/node.c 				return PTR_ERR(page);
page             2686 fs/f2fs/node.c 			rn = F2FS_NODE(page);
page             2691 fs/f2fs/node.c 			f2fs_put_page(page, 1);
page             2758 fs/f2fs/node.c 						struct page *page)
page             2762 fs/f2fs/node.c 	struct f2fs_nat_block *nat_blk = page_address(page);
page             2799 fs/f2fs/node.c 	struct page *page = NULL;
page             2813 fs/f2fs/node.c 		page = get_next_nat_page(sbi, start_nid);
page             2814 fs/f2fs/node.c 		if (IS_ERR(page))
page             2815 fs/f2fs/node.c 			return PTR_ERR(page);
page             2817 fs/f2fs/node.c 		nat_blk = page_address(page);
page             2853 fs/f2fs/node.c 		__update_nat_bits(sbi, start_nid, page);
page             2854 fs/f2fs/node.c 		f2fs_put_page(page, 1);
page             2944 fs/f2fs/node.c 		struct page *page;
page             2946 fs/f2fs/node.c 		page = f2fs_get_meta_page(sbi, nat_bits_addr++);
page             2947 fs/f2fs/node.c 		if (IS_ERR(page))
page             2948 fs/f2fs/node.c 			return PTR_ERR(page);
page             2951 fs/f2fs/node.c 					page_address(page), F2FS_BLKSIZE);
page             2952 fs/f2fs/node.c 		f2fs_put_page(page, 1);
page              236 fs/f2fs/node.h static inline nid_t ino_of_node(struct page *node_page)
page              242 fs/f2fs/node.h static inline nid_t nid_of_node(struct page *node_page)
page              248 fs/f2fs/node.h static inline unsigned int ofs_of_node(struct page *node_page)
page              255 fs/f2fs/node.h static inline __u64 cpver_of_node(struct page *node_page)
page              261 fs/f2fs/node.h static inline block_t next_blkaddr_of_node(struct page *node_page)
page              267 fs/f2fs/node.h static inline void fill_node_footer(struct page *page, nid_t nid,
page              270 fs/f2fs/node.h 	struct f2fs_node *rn = F2FS_NODE(page);
page              286 fs/f2fs/node.h static inline void copy_node_footer(struct page *dst, struct page *src)
page              293 fs/f2fs/node.h static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr)
page              295 fs/f2fs/node.h 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page));
page              296 fs/f2fs/node.h 	struct f2fs_node *rn = F2FS_NODE(page);
page              306 fs/f2fs/node.h static inline bool is_recoverable_dnode(struct page *page)
page              308 fs/f2fs/node.h 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page));
page              313 fs/f2fs/node.h 		return (cp_ver << 32) == (cpver_of_node(page) << 32);
page              318 fs/f2fs/node.h 	return cp_ver == cpver_of_node(page);
page              342 fs/f2fs/node.h static inline bool IS_DNODE(struct page *node_page)
page              360 fs/f2fs/node.h static inline int set_nid(struct page *p, int off, nid_t nid, bool i)
page              373 fs/f2fs/node.h static inline nid_t get_nid(struct page *p, int off, bool i)
page              388 fs/f2fs/node.h static inline int is_cold_data(struct page *page)
page              390 fs/f2fs/node.h 	return PageChecked(page);
page              393 fs/f2fs/node.h static inline void set_cold_data(struct page *page)
page              395 fs/f2fs/node.h 	SetPageChecked(page);
page              398 fs/f2fs/node.h static inline void clear_cold_data(struct page *page)
page              400 fs/f2fs/node.h 	ClearPageChecked(page);
page              403 fs/f2fs/node.h static inline int is_node(struct page *page, int type)
page              405 fs/f2fs/node.h 	struct f2fs_node *rn = F2FS_NODE(page);
page              409 fs/f2fs/node.h #define is_cold_node(page)	is_node(page, COLD_BIT_SHIFT)
page              410 fs/f2fs/node.h #define is_fsync_dnode(page)	is_node(page, FSYNC_BIT_SHIFT)
page              411 fs/f2fs/node.h #define is_dent_dnode(page)	is_node(page, DENT_BIT_SHIFT)
page              413 fs/f2fs/node.h static inline int is_inline_node(struct page *page)
page              415 fs/f2fs/node.h 	return PageChecked(page);
page              418 fs/f2fs/node.h static inline void set_inline_node(struct page *page)
page              420 fs/f2fs/node.h 	SetPageChecked(page);
page              423 fs/f2fs/node.h static inline void clear_inline_node(struct page *page)
page              425 fs/f2fs/node.h 	ClearPageChecked(page);
page              428 fs/f2fs/node.h static inline void set_cold_node(struct page *page, bool is_dir)
page              430 fs/f2fs/node.h 	struct f2fs_node *rn = F2FS_NODE(page);
page              440 fs/f2fs/node.h static inline void set_mark(struct page *page, int mark, int type)
page              442 fs/f2fs/node.h 	struct f2fs_node *rn = F2FS_NODE(page);
page              451 fs/f2fs/node.h 	f2fs_inode_chksum_set(F2FS_P_SB(page), page);
page              454 fs/f2fs/node.h #define set_dentry_mark(page, mark)	set_mark(page, mark, DENT_BIT_SHIFT)
page              455 fs/f2fs/node.h #define set_fsync_mark(page, mark)	set_mark(page, mark, FSYNC_BIT_SHIFT)
page              110 fs/f2fs/recovery.c static int recover_dentry(struct inode *inode, struct page *ipage,
page              117 fs/f2fs/recovery.c 	struct page *page;
page              146 fs/f2fs/recovery.c 	de = __f2fs_find_entry(dir, &fname, &page);
page              171 fs/f2fs/recovery.c 		f2fs_delete_entry(de, page, dir, einode);
page              174 fs/f2fs/recovery.c 	} else if (IS_ERR(page)) {
page              175 fs/f2fs/recovery.c 		err = PTR_ERR(page);
page              185 fs/f2fs/recovery.c 	f2fs_put_page(page, 0);
page              197 fs/f2fs/recovery.c static int recover_quota_data(struct inode *inode, struct page *page)
page              199 fs/f2fs/recovery.c 	struct f2fs_inode *raw = F2FS_INODE(page);
page              236 fs/f2fs/recovery.c static int recover_inode(struct inode *inode, struct page *page)
page              238 fs/f2fs/recovery.c 	struct f2fs_inode *raw = F2FS_INODE(page);
page              244 fs/f2fs/recovery.c 	err = recover_quota_data(inode, page);
page              292 fs/f2fs/recovery.c 		name = F2FS_INODE(page)->i_name;
page              295 fs/f2fs/recovery.c 		    ino_of_node(page), name, raw->i_inline);
page              303 fs/f2fs/recovery.c 	struct page *page = NULL;
page              320 fs/f2fs/recovery.c 		page = f2fs_get_tmp_page(sbi, blkaddr);
page              321 fs/f2fs/recovery.c 		if (IS_ERR(page)) {
page              322 fs/f2fs/recovery.c 			err = PTR_ERR(page);
page              326 fs/f2fs/recovery.c 		if (!is_recoverable_dnode(page)) {
page              327 fs/f2fs/recovery.c 			f2fs_put_page(page, 1);
page              331 fs/f2fs/recovery.c 		if (!is_fsync_dnode(page))
page              334 fs/f2fs/recovery.c 		entry = get_fsync_inode(head, ino_of_node(page));
page              339 fs/f2fs/recovery.c 					IS_INODE(page) && is_dent_dnode(page)) {
page              340 fs/f2fs/recovery.c 				err = f2fs_recover_inode_page(sbi, page);
page              342 fs/f2fs/recovery.c 					f2fs_put_page(page, 1);
page              352 fs/f2fs/recovery.c 			entry = add_fsync_inode(sbi, head, ino_of_node(page),
page              360 fs/f2fs/recovery.c 				f2fs_put_page(page, 1);
page              366 fs/f2fs/recovery.c 		if (IS_INODE(page) && is_dent_dnode(page))
page              371 fs/f2fs/recovery.c 			blkaddr == next_blkaddr_of_node(page)) {
page              374 fs/f2fs/recovery.c 				    next_blkaddr_of_node(page));
page              375 fs/f2fs/recovery.c 			f2fs_put_page(page, 1);
page              381 fs/f2fs/recovery.c 		blkaddr = next_blkaddr_of_node(page);
page              382 fs/f2fs/recovery.c 		f2fs_put_page(page, 1);
page              405 fs/f2fs/recovery.c 	struct page *sum_page, *node_page;
page              508 fs/f2fs/recovery.c 					struct page *page)
page              516 fs/f2fs/recovery.c 	if (IS_INODE(page)) {
page              517 fs/f2fs/recovery.c 		f2fs_recover_inline_xattr(inode, page);
page              518 fs/f2fs/recovery.c 	} else if (f2fs_has_xattr_block(ofs_of_node(page))) {
page              519 fs/f2fs/recovery.c 		err = f2fs_recover_xattr_data(inode, page);
page              526 fs/f2fs/recovery.c 	if (f2fs_recover_inline_data(inode, page))
page              530 fs/f2fs/recovery.c 	start = f2fs_start_bidx_of_node(ofs_of_node(page), inode);
page              531 fs/f2fs/recovery.c 	end = start + ADDRS_PER_PAGE(page, inode);
page              550 fs/f2fs/recovery.c 	f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
page              552 fs/f2fs/recovery.c 	if (ofs_of_node(dn.node_page) != ofs_of_node(page)) {
page              555 fs/f2fs/recovery.c 			  ofs_of_node(page));
page              564 fs/f2fs/recovery.c 		dest = datablock_addr(dn.inode, page, dn.ofs_in_node);
page              634 fs/f2fs/recovery.c 	copy_node_footer(dn.node_page, page);
page              636 fs/f2fs/recovery.c 					ofs_of_node(page), false);
page              651 fs/f2fs/recovery.c 	struct page *page = NULL;
page              667 fs/f2fs/recovery.c 		page = f2fs_get_tmp_page(sbi, blkaddr);
page              668 fs/f2fs/recovery.c 		if (IS_ERR(page)) {
page              669 fs/f2fs/recovery.c 			err = PTR_ERR(page);
page              673 fs/f2fs/recovery.c 		if (!is_recoverable_dnode(page)) {
page              674 fs/f2fs/recovery.c 			f2fs_put_page(page, 1);
page              678 fs/f2fs/recovery.c 		entry = get_fsync_inode(inode_list, ino_of_node(page));
page              686 fs/f2fs/recovery.c 		if (IS_INODE(page)) {
page              687 fs/f2fs/recovery.c 			err = recover_inode(entry->inode, page);
page              689 fs/f2fs/recovery.c 				f2fs_put_page(page, 1);
page              694 fs/f2fs/recovery.c 			err = recover_dentry(entry->inode, page, dir_list);
page              696 fs/f2fs/recovery.c 				f2fs_put_page(page, 1);
page              700 fs/f2fs/recovery.c 		err = do_recover_data(sbi, entry->inode, page);
page              702 fs/f2fs/recovery.c 			f2fs_put_page(page, 1);
page              710 fs/f2fs/recovery.c 		blkaddr = next_blkaddr_of_node(page);
page              711 fs/f2fs/recovery.c 		f2fs_put_page(page, 1);
page              186 fs/f2fs/segment.c void f2fs_register_inmem_page(struct inode *inode, struct page *page)
page              190 fs/f2fs/segment.c 	f2fs_trace_pid(page);
page              192 fs/f2fs/segment.c 	f2fs_set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE);
page              197 fs/f2fs/segment.c 	new->page = page;
page              201 fs/f2fs/segment.c 	get_page(page);
page              207 fs/f2fs/segment.c 	trace_f2fs_register_inmem_page(page, INMEM);
page              219 fs/f2fs/segment.c 		struct page *page = cur->page;
page              222 fs/f2fs/segment.c 			trace_f2fs_commit_inmem_page(page, INMEM_DROP);
page              229 fs/f2fs/segment.c 			if (!trylock_page(page))
page              232 fs/f2fs/segment.c 			lock_page(page);
page              235 fs/f2fs/segment.c 		f2fs_wait_on_page_writeback(page, DATA, true, true);
page              241 fs/f2fs/segment.c 			trace_f2fs_commit_inmem_page(page, INMEM_REVOKE);
page              244 fs/f2fs/segment.c 			err = f2fs_get_dnode_of_data(&dn, page->index,
page              273 fs/f2fs/segment.c 			ClearPageUptodate(page);
page              274 fs/f2fs/segment.c 			clear_cold_data(page);
page              276 fs/f2fs/segment.c 		f2fs_clear_page_private(page);
page              277 fs/f2fs/segment.c 		f2fs_put_page(page, 1);
page              349 fs/f2fs/segment.c void f2fs_drop_inmem_page(struct inode *inode, struct page *page)
page              356 fs/f2fs/segment.c 	f2fs_bug_on(sbi, !IS_ATOMIC_WRITTEN_PAGE(page));
page              360 fs/f2fs/segment.c 		if (cur->page == page)
page              364 fs/f2fs/segment.c 	f2fs_bug_on(sbi, list_empty(head) || cur->page != page);
page              371 fs/f2fs/segment.c 	ClearPageUptodate(page);
page              372 fs/f2fs/segment.c 	f2fs_clear_page_private(page);
page              373 fs/f2fs/segment.c 	f2fs_put_page(page, 0);
page              375 fs/f2fs/segment.c 	trace_f2fs_commit_inmem_page(page, INMEM_INVALIDATE);
page              398 fs/f2fs/segment.c 		struct page *page = cur->page;
page              400 fs/f2fs/segment.c 		lock_page(page);
page              401 fs/f2fs/segment.c 		if (page->mapping == inode->i_mapping) {
page              402 fs/f2fs/segment.c 			trace_f2fs_commit_inmem_page(page, INMEM);
page              404 fs/f2fs/segment.c 			f2fs_wait_on_page_writeback(page, DATA, true, true);
page              406 fs/f2fs/segment.c 			set_page_dirty(page);
page              407 fs/f2fs/segment.c 			if (clear_page_dirty_for_io(page)) {
page              412 fs/f2fs/segment.c 			fio.page = page;
page              423 fs/f2fs/segment.c 				unlock_page(page);
page              430 fs/f2fs/segment.c 		unlock_page(page);
page             2311 fs/f2fs/segment.c struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
page             2319 fs/f2fs/segment.c 	struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
page             2321 fs/f2fs/segment.c 	memcpy(page_address(page), src, PAGE_SIZE);
page             2322 fs/f2fs/segment.c 	set_page_dirty(page);
page             2323 fs/f2fs/segment.c 	f2fs_put_page(page, 1);
page             2336 fs/f2fs/segment.c 	struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
page             2340 fs/f2fs/segment.c 	dst = (struct f2fs_summary_block *)page_address(page);
page             2354 fs/f2fs/segment.c 	set_page_dirty(page);
page             2355 fs/f2fs/segment.c 	f2fs_put_page(page, 1);
page             2569 fs/f2fs/segment.c 	struct page *sum_page;
page             3011 fs/f2fs/segment.c 		struct inode *inode = fio->page->mapping->host;
page             3018 fs/f2fs/segment.c 		if (IS_DNODE(fio->page) && is_cold_node(fio->page))
page             3028 fs/f2fs/segment.c 		struct inode *inode = fio->page->mapping->host;
page             3030 fs/f2fs/segment.c 		if (is_cold_data(fio->page) || file_is_cold(inode))
page             3039 fs/f2fs/segment.c 		if (IS_DNODE(fio->page))
page             3040 fs/f2fs/segment.c 			return is_cold_node(fio->page) ? CURSEG_WARM_NODE :
page             3073 fs/f2fs/segment.c void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
page             3122 fs/f2fs/segment.c 	if (page && IS_NODESEG(type)) {
page             3123 fs/f2fs/segment.c 		fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
page             3125 fs/f2fs/segment.c 		f2fs_inode_chksum_set(sbi, page);
page             3176 fs/f2fs/segment.c 	f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
page             3195 fs/f2fs/segment.c void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
page             3204 fs/f2fs/segment.c 		.old_blkaddr = page->index,
page             3205 fs/f2fs/segment.c 		.new_blkaddr = page->index,
page             3206 fs/f2fs/segment.c 		.page = page,
page             3211 fs/f2fs/segment.c 	if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
page             3214 fs/f2fs/segment.c 	set_page_writeback(page);
page             3215 fs/f2fs/segment.c 	ClearPageError(page);
page             3218 fs/f2fs/segment.c 	stat_inc_meta_count(sbi, page->index);
page             3385 fs/f2fs/segment.c void f2fs_wait_on_page_writeback(struct page *page,
page             3388 fs/f2fs/segment.c 	if (PageWriteback(page)) {
page             3389 fs/f2fs/segment.c 		struct f2fs_sb_info *sbi = F2FS_P_SB(page);
page             3391 fs/f2fs/segment.c 		f2fs_submit_merged_write_cond(sbi, NULL, page, 0, type);
page             3393 fs/f2fs/segment.c 			wait_on_page_writeback(page);
page             3394 fs/f2fs/segment.c 			f2fs_bug_on(sbi, locked && PageWriteback(page));
page             3396 fs/f2fs/segment.c 			wait_for_stable_page(page);
page             3404 fs/f2fs/segment.c 	struct page *cpage;
page             3433 fs/f2fs/segment.c 	struct page *page;
page             3439 fs/f2fs/segment.c 	page = f2fs_get_meta_page(sbi, start++);
page             3440 fs/f2fs/segment.c 	if (IS_ERR(page))
page             3441 fs/f2fs/segment.c 		return PTR_ERR(page);
page             3442 fs/f2fs/segment.c 	kaddr = (unsigned char *)page_address(page);
page             3478 fs/f2fs/segment.c 			f2fs_put_page(page, 1);
page             3479 fs/f2fs/segment.c 			page = NULL;
page             3481 fs/f2fs/segment.c 			page = f2fs_get_meta_page(sbi, start++);
page             3482 fs/f2fs/segment.c 			if (IS_ERR(page))
page             3483 fs/f2fs/segment.c 				return PTR_ERR(page);
page             3484 fs/f2fs/segment.c 			kaddr = (unsigned char *)page_address(page);
page             3488 fs/f2fs/segment.c 	f2fs_put_page(page, 1);
page             3497 fs/f2fs/segment.c 	struct page *new;
page             3609 fs/f2fs/segment.c 	struct page *page;
page             3616 fs/f2fs/segment.c 	page = f2fs_grab_meta_page(sbi, blkaddr++);
page             3617 fs/f2fs/segment.c 	kaddr = (unsigned char *)page_address(page);
page             3640 fs/f2fs/segment.c 			if (!page) {
page             3641 fs/f2fs/segment.c 				page = f2fs_grab_meta_page(sbi, blkaddr++);
page             3642 fs/f2fs/segment.c 				kaddr = (unsigned char *)page_address(page);
page             3654 fs/f2fs/segment.c 			set_page_dirty(page);
page             3655 fs/f2fs/segment.c 			f2fs_put_page(page, 1);
page             3656 fs/f2fs/segment.c 			page = NULL;
page             3659 fs/f2fs/segment.c 	if (page) {
page             3660 fs/f2fs/segment.c 		set_page_dirty(page);
page             3661 fs/f2fs/segment.c 		f2fs_put_page(page, 1);
page             3713 fs/f2fs/segment.c static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
page             3719 fs/f2fs/segment.c static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
page             3723 fs/f2fs/segment.c 	struct page *page;
page             3729 fs/f2fs/segment.c 	page = f2fs_grab_meta_page(sbi, dst_off);
page             3730 fs/f2fs/segment.c 	seg_info_to_sit_page(sbi, page, start);
page             3732 fs/f2fs/segment.c 	set_page_dirty(page);
page             3735 fs/f2fs/segment.c 	return page;
page             3862 fs/f2fs/segment.c 		struct page *page = NULL;
page             3876 fs/f2fs/segment.c 			page = get_next_sit_page(sbi, start_segno);
page             3877 fs/f2fs/segment.c 			raw_sit = page_address(page);
page             3923 fs/f2fs/segment.c 			f2fs_put_page(page, 1);
page             4135 fs/f2fs/segment.c 			struct page *page;
page             4138 fs/f2fs/segment.c 			page = get_current_sit_page(sbi, start);
page             4139 fs/f2fs/segment.c 			if (IS_ERR(page))
page             4140 fs/f2fs/segment.c 				return PTR_ERR(page);
page             4141 fs/f2fs/segment.c 			sit_blk = (struct f2fs_sit_block *)page_address(page);
page             4143 fs/f2fs/segment.c 			f2fs_put_page(page, 1);
page              210 fs/f2fs/segment.h #define IS_ATOMIC_WRITTEN_PAGE(page)			\
page              211 fs/f2fs/segment.h 		(page_private(page) == (unsigned long)ATOMIC_WRITTEN_PAGE)
page              212 fs/f2fs/segment.h #define IS_DUMMY_WRITTEN_PAGE(page)			\
page              213 fs/f2fs/segment.h 		(page_private(page) == (unsigned long)DUMMY_WRITTEN_PAGE)
page              219 fs/f2fs/segment.h 	struct page *page;
page              377 fs/f2fs/segment.h 				struct page *page, unsigned int start)
page              386 fs/f2fs/segment.h 	raw_sit = (struct f2fs_sit_block *)page_address(page);
page             1772 fs/f2fs/super.c 	struct page *page;
page             1784 fs/f2fs/super.c 		page = read_cache_page_gfp(mapping, blkidx, GFP_NOFS);
page             1785 fs/f2fs/super.c 		if (IS_ERR(page)) {
page             1786 fs/f2fs/super.c 			if (PTR_ERR(page) == -ENOMEM) {
page             1791 fs/f2fs/super.c 			return PTR_ERR(page);
page             1794 fs/f2fs/super.c 		lock_page(page);
page             1796 fs/f2fs/super.c 		if (unlikely(page->mapping != mapping)) {
page             1797 fs/f2fs/super.c 			f2fs_put_page(page, 1);
page             1800 fs/f2fs/super.c 		if (unlikely(!PageUptodate(page))) {
page             1801 fs/f2fs/super.c 			f2fs_put_page(page, 1);
page             1806 fs/f2fs/super.c 		kaddr = kmap_atomic(page);
page             1809 fs/f2fs/super.c 		f2fs_put_page(page, 1);
page             1828 fs/f2fs/super.c 	struct page *page;
page             1839 fs/f2fs/super.c 							&page, &fsdata);
page             1849 fs/f2fs/super.c 		kaddr = kmap_atomic(page);
page             1852 fs/f2fs/super.c 		flush_dcache_page(page);
page             1855 fs/f2fs/super.c 						page, fsdata);
page               53 fs/f2fs/trace.c void f2fs_trace_pid(struct page *page)
page               55 fs/f2fs/trace.c 	struct inode *inode = page->mapping->host;
page               59 fs/f2fs/trace.c 	set_page_private(page, (unsigned long)pid);
page               98 fs/f2fs/trace.c 	inode = fio->page->mapping->host;
page               99 fs/f2fs/trace.c 	pid = page_private(fio->page);
page               32 fs/f2fs/trace.h extern void f2fs_trace_pid(struct page *);
page               47 fs/f2fs/verity.c 		struct page *page;
page               50 fs/f2fs/verity.c 		page = read_mapping_page(inode->i_mapping, pos >> PAGE_SHIFT,
page               52 fs/f2fs/verity.c 		if (IS_ERR(page))
page               53 fs/f2fs/verity.c 			return PTR_ERR(page);
page               55 fs/f2fs/verity.c 		addr = kmap_atomic(page);
page               59 fs/f2fs/verity.c 		put_page(page);
page               81 fs/f2fs/verity.c 		struct page *page;
page               87 fs/f2fs/verity.c 					    &page, &fsdata);
page               91 fs/f2fs/verity.c 		addr = kmap_atomic(page);
page               96 fs/f2fs/verity.c 					  page, fsdata);
page              225 fs/f2fs/verity.c static struct page *f2fs_read_merkle_tree_page(struct inode *inode,
page              117 fs/f2fs/xattr.c 		void *page)
page              125 fs/f2fs/xattr.c 				xattr->value_len, (struct page *)page, 0);
page              133 fs/f2fs/xattr.c 				const struct qstr *qstr, struct page *ipage)
page              258 fs/f2fs/xattr.c static int read_inline_xattr(struct inode *inode, struct page *ipage,
page              263 fs/f2fs/xattr.c 	struct page *page = NULL;
page              269 fs/f2fs/xattr.c 		page = f2fs_get_node_page(sbi, inode->i_ino);
page              270 fs/f2fs/xattr.c 		if (IS_ERR(page))
page              271 fs/f2fs/xattr.c 			return PTR_ERR(page);
page              273 fs/f2fs/xattr.c 		inline_addr = inline_xattr_addr(inode, page);
page              276 fs/f2fs/xattr.c 	f2fs_put_page(page, 1);
page              286 fs/f2fs/xattr.c 	struct page *xpage;
page              301 fs/f2fs/xattr.c static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
page              369 fs/f2fs/xattr.c static int read_all_xattrs(struct inode *inode, struct page *ipage,
page              413 fs/f2fs/xattr.c 				void *txattr_addr, struct page *ipage)
page              417 fs/f2fs/xattr.c 	struct page *in_page = NULL;
page              420 fs/f2fs/xattr.c 	struct page *xpage;
page              495 fs/f2fs/xattr.c 		void *buffer, size_t buffer_size, struct page *ipage)
page              608 fs/f2fs/xattr.c 			struct page *ipage, int flags)
page              740 fs/f2fs/xattr.c 				struct page *ipage, int flags)
page              129 fs/f2fs/xattr.h 				const void *, size_t, struct page *, int);
page              131 fs/f2fs/xattr.h 						size_t, struct page *);
page              138 fs/f2fs/xattr.h 		struct page *page, int flags)
page              144 fs/f2fs/xattr.h 			size_t buffer_size, struct page *dpage)
page              157 fs/f2fs/xattr.h 				const struct qstr *, struct page *);
page              160 fs/f2fs/xattr.h 				const struct qstr *qstr, struct page *ipage)
page              196 fs/fat/inode.c static int fat_writepage(struct page *page, struct writeback_control *wbc)
page              198 fs/fat/inode.c 	return block_write_full_page(page, fat_get_block, wbc);
page              207 fs/fat/inode.c static int fat_readpage(struct file *file, struct page *page)
page              209 fs/fat/inode.c 	return mpage_readpage(page, fat_get_block);
page              230 fs/fat/inode.c 			struct page **pagep, void **fsdata)
page              245 fs/fat/inode.c 			struct page *pagep, void *fsdata)
page               70 fs/freevxfs/vxfs_extern.h extern struct page *		vxfs_get_page(struct address_space *, u_long);
page               71 fs/freevxfs/vxfs_extern.h extern void			vxfs_put_page(struct page *);
page               41 fs/freevxfs/vxfs_immed.c static int	vxfs_immed_readpage(struct file *, struct page *);
page               66 fs/freevxfs/vxfs_immed.c vxfs_immed_readpage(struct file *fp, struct page *pp)
page              213 fs/freevxfs/vxfs_inode.c 	struct page			*pp;
page               81 fs/freevxfs/vxfs_lookup.c vxfs_find_entry(struct inode *ip, struct dentry *dp, struct page **ppp)
page               92 fs/freevxfs/vxfs_lookup.c 		struct page *pp;
page              159 fs/freevxfs/vxfs_lookup.c 	struct page			*pp;
page              241 fs/freevxfs/vxfs_lookup.c 		struct page *pp;
page               41 fs/freevxfs/vxfs_subr.c static int		vxfs_readpage(struct file *, struct page *);
page               50 fs/freevxfs/vxfs_subr.c vxfs_put_page(struct page *pp)
page               67 fs/freevxfs/vxfs_subr.c struct page *
page               70 fs/freevxfs/vxfs_subr.c 	struct page *			pp;
page              159 fs/freevxfs/vxfs_subr.c vxfs_readpage(struct file *file, struct page *page)
page              161 fs/freevxfs/vxfs_subr.c 	return block_read_full_page(page, vxfs_getblk);
page              249 fs/fs-writeback.c void __inode_attach_wb(struct inode *inode, struct page *page)
page              257 fs/fs-writeback.c 		if (page) {
page              258 fs/fs-writeback.c 			memcg_css = mem_cgroup_css_from_page(page);
page              361 fs/fs-writeback.c 	struct page *page;
page              404 fs/fs-writeback.c 	xas_for_each_marked(&xas, page, ULONG_MAX, PAGECACHE_TAG_DIRTY) {
page              405 fs/fs-writeback.c 		if (PageDirty(page)) {
page              412 fs/fs-writeback.c 	xas_for_each_marked(&xas, page, ULONG_MAX, PAGECACHE_TAG_WRITEBACK) {
page              413 fs/fs-writeback.c 		WARN_ON_ONCE(!PageWriteback(page));
page              723 fs/fs-writeback.c void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page,
page              738 fs/fs-writeback.c 	css = mem_cgroup_css_from_page(page);
page               19 fs/fscache/page.c bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page)
page               24 fs/fscache/page.c 	val = radix_tree_lookup(&cookie->stores, page->index);
page               26 fs/fscache/page.c 	trace_fscache_check_page(cookie, page, val, 0);
page               35 fs/fscache/page.c void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page)
page               39 fs/fscache/page.c 	trace_fscache_page(cookie, page, fscache_page_write_wait);
page               41 fs/fscache/page.c 	wait_event(*wq, !__fscache_check_page_write(cookie, page));
page               50 fs/fscache/page.c bool release_page_wait_timeout(struct fscache_cookie *cookie, struct page *page)
page               54 fs/fscache/page.c 	return wait_event_timeout(*wq, !__fscache_check_page_write(cookie, page),
page               63 fs/fscache/page.c 				  struct page *page,
page               66 fs/fscache/page.c 	struct page *xpage;
page               69 fs/fscache/page.c 	_enter("%p,%p,%x", cookie, page, gfp);
page               71 fs/fscache/page.c 	trace_fscache_page(cookie, page, fscache_page_maybe_release);
page               75 fs/fscache/page.c 	val = radix_tree_lookup(&cookie->stores, page->index);
page               79 fs/fscache/page.c 		__fscache_uncache_page(cookie, page);
page               85 fs/fscache/page.c 	if (radix_tree_tag_get(&cookie->stores, page->index,
page               96 fs/fscache/page.c 	if (radix_tree_tag_get(&cookie->stores, page->index,
page              104 fs/fscache/page.c 	xpage = radix_tree_delete(&cookie->stores, page->index);
page              105 fs/fscache/page.c 	trace_fscache_page(cookie, page, fscache_page_radix_delete);
page              111 fs/fscache/page.c 		ASSERTCMP(xpage, ==, page);
page              120 fs/fscache/page.c 	__fscache_uncache_page(cookie, page);
page              134 fs/fscache/page.c 	if (!release_page_wait_timeout(cookie, page))
page              136 fs/fscache/page.c 			page, page->index);
page              147 fs/fscache/page.c 				   struct page *page)
page              150 fs/fscache/page.c 	struct page *xpage = NULL, *val;
page              158 fs/fscache/page.c 		radix_tree_tag_clear(&cookie->stores, page->index,
page              160 fs/fscache/page.c 		trace_fscache_page(cookie, page, fscache_page_radix_clear_store);
page              161 fs/fscache/page.c 		if (!radix_tree_tag_get(&cookie->stores, page->index,
page              164 fs/fscache/page.c 			xpage = radix_tree_delete(&cookie->stores, page->index);
page              165 fs/fscache/page.c 			trace_fscache_page(cookie, page, fscache_page_radix_delete);
page              166 fs/fscache/page.c 			trace_fscache_page(cookie, page, fscache_page_write_end);
page              168 fs/fscache/page.c 			val = radix_tree_lookup(&cookie->stores, page->index);
page              169 fs/fscache/page.c 			trace_fscache_check_page(cookie, page, val, 1);
page              171 fs/fscache/page.c 			trace_fscache_page(cookie, page, fscache_page_write_end_pend);
page              177 fs/fscache/page.c 		trace_fscache_page(cookie, page, fscache_page_write_end_noc);
page              430 fs/fscache/page.c 				 struct page *page,
page              440 fs/fscache/page.c 	_enter("%p,%p,,,", cookie, page);
page              453 fs/fscache/page.c 	ASSERTCMP(page, !=, NULL);
page              458 fs/fscache/page.c 	op = fscache_alloc_retrieval(cookie, page->mapping,
page              465 fs/fscache/page.c 	trace_fscache_page_op(cookie, page, &op->op, fscache_page_op_retr_one);
page              499 fs/fscache/page.c 		ret = object->cache->ops->allocate_page(op, page, gfp);
page              505 fs/fscache/page.c 		ret = object->cache->ops->read_or_alloc_page(op, page, gfp);
page              676 fs/fscache/page.c 			 struct page *page,
page              684 fs/fscache/page.c 	_enter("%p,%p,,,", cookie, page);
page              692 fs/fscache/page.c 	ASSERTCMP(page, !=, NULL);
page              702 fs/fscache/page.c 	op = fscache_alloc_retrieval(cookie, page->mapping, NULL, NULL);
page              706 fs/fscache/page.c 	trace_fscache_page_op(cookie, page, &op->op, fscache_page_op_alloc_one);
page              732 fs/fscache/page.c 	ret = object->cache->ops->allocate_page(op, page, gfp);
page              768 fs/fscache/page.c 	struct page *page;
page              770 fs/fscache/page.c 	list_for_each_entry(page, pages, lru) {
page              771 fs/fscache/page.c 		if (PageFsCache(page))
page              772 fs/fscache/page.c 			__fscache_uncache_page(cookie, page);
page              794 fs/fscache/page.c 	struct page *page;
page              837 fs/fscache/page.c 	page = NULL;
page              843 fs/fscache/page.c 	page = results[0];
page              844 fs/fscache/page.c 	_debug("gang %d [%lx]", n, page->index);
page              846 fs/fscache/page.c 	radix_tree_tag_set(&cookie->stores, page->index,
page              848 fs/fscache/page.c 	radix_tree_tag_clear(&cookie->stores, page->index,
page              850 fs/fscache/page.c 	trace_fscache_page(cookie, page, fscache_page_radix_pend2store);
page              855 fs/fscache/page.c 	if (page->index >= op->store_limit)
page              860 fs/fscache/page.c 	ret = object->cache->ops->write_page(op, page);
page              862 fs/fscache/page.c 	trace_fscache_wrote_page(cookie, page, &op->op, ret);
page              863 fs/fscache/page.c 	fscache_end_page_write(object, page);
page              876 fs/fscache/page.c 	trace_fscache_wrote_page(cookie, page, &op->op, -ENOBUFS);
page              877 fs/fscache/page.c 	fscache_end_page_write(object, page);
page              896 fs/fscache/page.c 	struct page *page;
page              913 fs/fscache/page.c 			page = results[i];
page              914 fs/fscache/page.c 			radix_tree_delete(&cookie->stores, page->index);
page              915 fs/fscache/page.c 			trace_fscache_page(cookie, page, fscache_page_radix_delete);
page              916 fs/fscache/page.c 			trace_fscache_page(cookie, page, fscache_page_inval);
page              961 fs/fscache/page.c 			 struct page *page,
page              970 fs/fscache/page.c 	_enter("%p,%x,", cookie, (u32) page->flags);
page              973 fs/fscache/page.c 	ASSERT(PageFsCache(page));
page              996 fs/fscache/page.c 	trace_fscache_page_op(cookie, page, &op->op, fscache_page_op_write_one);
page             1009 fs/fscache/page.c 	trace_fscache_page(cookie, page, fscache_page_write);
page             1022 fs/fscache/page.c 	ret = radix_tree_insert(&cookie->stores, page->index, page);
page             1030 fs/fscache/page.c 	trace_fscache_page(cookie, page, fscache_page_radix_insert);
page             1031 fs/fscache/page.c 	radix_tree_tag_set(&cookie->stores, page->index,
page             1033 fs/fscache/page.c 	trace_fscache_page(cookie, page, fscache_page_radix_set_pend);
page             1034 fs/fscache/page.c 	get_page(page);
page             1075 fs/fscache/page.c 	radix_tree_delete(&cookie->stores, page->index);
page             1076 fs/fscache/page.c 	trace_fscache_page(cookie, page, fscache_page_radix_delete);
page             1079 fs/fscache/page.c 	put_page(page);
page             1108 fs/fscache/page.c void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
page             1112 fs/fscache/page.c 	_enter(",%p", page);
page             1115 fs/fscache/page.c 	ASSERTCMP(page, !=, NULL);
page             1120 fs/fscache/page.c 	if (!PageFsCache(page))
page             1123 fs/fscache/page.c 	trace_fscache_page(cookie, page, fscache_page_uncache);
page             1129 fs/fscache/page.c 		ClearPageFsCache(page);
page             1141 fs/fscache/page.c 	if (TestClearPageFsCache(page) &&
page             1145 fs/fscache/page.c 		object->cache->ops->uncache_page(object, page);
page             1165 fs/fscache/page.c void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
page             1173 fs/fscache/page.c 	trace_fscache_page(cookie, page, fscache_page_cached);
page             1175 fs/fscache/page.c 	_debug("- mark %p{%lx}", page, page->index);
page             1176 fs/fscache/page.c 	if (TestSetPageFsCache(page)) {
page             1181 fs/fscache/page.c 				cookie->def->name, page->index);
page             1187 fs/fscache/page.c 					      op->mapping, page);
page             1236 fs/fscache/page.c 			struct page *page = pvec.pages[i];
page             1237 fs/fscache/page.c 			if (PageFsCache(page)) {
page             1238 fs/fscache/page.c 				__fscache_wait_on_page_write(cookie, page);
page             1239 fs/fscache/page.c 				__fscache_uncache_page(cookie, page);
page              305 fs/fuse/cuse.c 	struct page *page;
page              323 fs/fuse/cuse.c 	struct page *page = ap->pages[0];
page              340 fs/fuse/cuse.c 	rc = cuse_parse_devinfo(page_address(page), ap->args.out_args[1].size,
page              408 fs/fuse/cuse.c 	__free_page(page);
page              426 fs/fuse/cuse.c 	struct page *page;
page              434 fs/fuse/cuse.c 	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
page              435 fs/fuse/cuse.c 	if (!page)
page              457 fs/fuse/cuse.c 	ap->pages = &ia->page;
page              459 fs/fuse/cuse.c 	ia->page = page;
page              467 fs/fuse/cuse.c 		__free_page(page);
page              645 fs/fuse/dev.c  	struct page *pg;
page              684 fs/fuse/dev.c  	struct page *page;
page              702 fs/fuse/dev.c  			cs->pg = buf->page;
page              711 fs/fuse/dev.c  			page = alloc_page(GFP_HIGHUSER);
page              712 fs/fuse/dev.c  			if (!page)
page              715 fs/fuse/dev.c  			buf->page = page;
page              720 fs/fuse/dev.c  			cs->pg = page;
page              728 fs/fuse/dev.c  		err = iov_iter_get_pages(cs->iter, &page, PAGE_SIZE, 1, &off);
page              734 fs/fuse/dev.c  		cs->pg = page;
page              763 fs/fuse/dev.c  static int fuse_check_page(struct page *page)
page              765 fs/fuse/dev.c  	if (page_mapcount(page) ||
page              766 fs/fuse/dev.c  	    page->mapping != NULL ||
page              767 fs/fuse/dev.c  	    page_count(page) != 1 ||
page              768 fs/fuse/dev.c  	    (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
page              776 fs/fuse/dev.c  		pr_warn("  page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
page              782 fs/fuse/dev.c  static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
page              785 fs/fuse/dev.c  	struct page *oldpage = *pagep;
page              786 fs/fuse/dev.c  	struct page *newpage;
page              811 fs/fuse/dev.c  	newpage = buf->page;
page              868 fs/fuse/dev.c  	cs->pg = buf->page;
page              878 fs/fuse/dev.c  static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
page              894 fs/fuse/dev.c  	get_page(page);
page              895 fs/fuse/dev.c  	buf->page = page;
page              910 fs/fuse/dev.c  static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
page              914 fs/fuse/dev.c  	struct page *page = *pagep;
page              916 fs/fuse/dev.c  	if (page && zeroing && count < PAGE_SIZE)
page              917 fs/fuse/dev.c  		clear_highpage(page);
page              920 fs/fuse/dev.c  		if (cs->write && cs->pipebufs && page) {
page              921 fs/fuse/dev.c  			return fuse_ref_page(cs, page, offset, count);
page              923 fs/fuse/dev.c  			if (cs->move_pages && page &&
page              934 fs/fuse/dev.c  		if (page) {
page              935 fs/fuse/dev.c  			void *mapaddr = kmap_atomic(page);
page              942 fs/fuse/dev.c  	if (page && !cs->write)
page              943 fs/fuse/dev.c  		flush_dcache_page(page);
page             1378 fs/fuse/dev.c  		put_page(bufs[page_nr].page);
page             1584 fs/fuse/dev.c  		struct page *page;
page             1588 fs/fuse/dev.c  		page = find_or_create_page(mapping, index,
page             1590 fs/fuse/dev.c  		if (!page)
page             1594 fs/fuse/dev.c  		err = fuse_copy_page(cs, &page, offset, this_num, 0);
page             1597 fs/fuse/dev.c  			SetPageUptodate(page);
page             1598 fs/fuse/dev.c  		unlock_page(page);
page             1599 fs/fuse/dev.c  		put_page(page);
page             1683 fs/fuse/dev.c  		struct page *page;
page             1686 fs/fuse/dev.c  		page = find_get_page(mapping, index);
page             1687 fs/fuse/dev.c  		if (!page)
page             1691 fs/fuse/dev.c  		ap->pages[ap->num_pages] = page;
page             1210 fs/fuse/dir.c  static int fuse_readlink_page(struct inode *inode, struct page *page)
page             1216 fs/fuse/dir.c  		.pages = &page,
page             1239 fs/fuse/dir.c  	link = page_address(page);
page             1249 fs/fuse/dir.c  	struct page *page;
page             1263 fs/fuse/dir.c  	page = alloc_page(GFP_KERNEL);
page             1265 fs/fuse/dir.c  	if (!page)
page             1268 fs/fuse/dir.c  	err = fuse_readlink_page(inode, page);
page             1270 fs/fuse/dir.c  		__free_page(page);
page             1274 fs/fuse/dir.c  	set_delayed_call(callback, page_put_link, page);
page             1276 fs/fuse/dir.c  	return page_address(page);
page             1763 fs/fuse/dir.c  static int fuse_symlink_readpage(struct file *null, struct page *page)
page             1765 fs/fuse/dir.c  	int err = fuse_readlink_page(page->mapping->host, page);
page             1768 fs/fuse/dir.c  		SetPageUptodate(page);
page             1770 fs/fuse/dir.c  	unlock_page(page);
page               22 fs/fuse/file.c static struct page **fuse_pages_alloc(unsigned int npages, gfp_t flags,
page               25 fs/fuse/file.c 	struct page **pages;
page               27 fs/fuse/file.c 	pages = kzalloc(npages * (sizeof(struct page *) +
page              781 fs/fuse/file.c static int fuse_do_readpage(struct file *file, struct page *page)
page              783 fs/fuse/file.c 	struct inode *inode = page->mapping->host;
page              785 fs/fuse/file.c 	loff_t pos = page_offset(page);
page              791 fs/fuse/file.c 		.ap.pages = &page,
page              802 fs/fuse/file.c 	fuse_wait_on_page_writeback(inode, page->index);
page              820 fs/fuse/file.c 	SetPageUptodate(page);
page              825 fs/fuse/file.c static int fuse_readpage(struct file *file, struct page *page)
page              827 fs/fuse/file.c 	struct inode *inode = page->mapping->host;
page              834 fs/fuse/file.c 	err = fuse_do_readpage(file, page);
page              837 fs/fuse/file.c 	unlock_page(page);
page              867 fs/fuse/file.c 		struct page *page = ap->pages[i];
page              870 fs/fuse/file.c 			SetPageUptodate(page);
page              872 fs/fuse/file.c 			SetPageError(page);
page              873 fs/fuse/file.c 		unlock_page(page);
page              874 fs/fuse/file.c 		put_page(page);
page              926 fs/fuse/file.c static int fuse_readpages_fill(void *_data, struct page *page)
page              934 fs/fuse/file.c 	fuse_wait_on_page_writeback(inode, page->index);
page              939 fs/fuse/file.c 	     ap->pages[ap->num_pages - 1]->index + 1 != page->index)) {
page              945 fs/fuse/file.c 			unlock_page(page);
page              952 fs/fuse/file.c 		unlock_page(page);
page              957 fs/fuse/file.c 	get_page(page);
page              958 fs/fuse/file.c 	ap->pages[ap->num_pages] = page;
page             1121 fs/fuse/file.c 		struct page *page = ap->pages[i];
page             1124 fs/fuse/file.c 			SetPageUptodate(page);
page             1132 fs/fuse/file.c 		unlock_page(page);
page             1133 fs/fuse/file.c 		put_page(page);
page             1154 fs/fuse/file.c 		struct page *page;
page             1167 fs/fuse/file.c 		page = grab_cache_page_write_begin(mapping, index, 0);
page             1168 fs/fuse/file.c 		if (!page)
page             1172 fs/fuse/file.c 			flush_dcache_page(page);
page             1174 fs/fuse/file.c 		tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
page             1175 fs/fuse/file.c 		flush_dcache_page(page);
page             1179 fs/fuse/file.c 			unlock_page(page);
page             1180 fs/fuse/file.c 			put_page(page);
page             1186 fs/fuse/file.c 		ap->pages[ap->num_pages] = page;
page             1814 fs/fuse/file.c static int fuse_writepage_locked(struct page *page)
page             1816 fs/fuse/file.c 	struct address_space *mapping = page->mapping;
page             1822 fs/fuse/file.c 	struct page *tmp_page;
page             1825 fs/fuse/file.c 	set_page_writeback(page);
page             1841 fs/fuse/file.c 	fuse_write_args_fill(&wpa->ia, wpa->ia.ff, page_offset(page), 0);
page             1843 fs/fuse/file.c 	copy_highpage(tmp_page, page);
page             1863 fs/fuse/file.c 	end_page_writeback(page);
page             1872 fs/fuse/file.c 	mapping_set_error(page->mapping, error);
page             1873 fs/fuse/file.c 	end_page_writeback(page);
page             1877 fs/fuse/file.c static int fuse_writepage(struct page *page, struct writeback_control *wbc)
page             1881 fs/fuse/file.c 	if (fuse_page_is_writeback(page->mapping->host, page->index)) {
page             1889 fs/fuse/file.c 		redirty_page_for_writepage(wbc, page);
page             1890 fs/fuse/file.c 		unlock_page(page);
page             1894 fs/fuse/file.c 	err = fuse_writepage_locked(page);
page             1895 fs/fuse/file.c 	unlock_page(page);
page             1904 fs/fuse/file.c 	struct page **orig_pages;
page             1912 fs/fuse/file.c 	struct page **pages;
page             1924 fs/fuse/file.c 	memcpy(pages, ap->pages, sizeof(struct page *) * ap->num_pages);
page             1960 fs/fuse/file.c 				     struct page *page)
page             1971 fs/fuse/file.c 	old_wpa = fuse_find_writeback(fi, page->index, page->index);
page             1984 fs/fuse/file.c 		if (curr_index == page->index) {
page             2010 fs/fuse/file.c static int fuse_writepages_fill(struct page *page,
page             2019 fs/fuse/file.c 	struct page *tmp_page;
page             2036 fs/fuse/file.c 	is_writeback = fuse_page_is_writeback(inode, page->index);
page             2041 fs/fuse/file.c 	     data->orig_pages[ap->num_pages - 1]->index + 1 != page->index)) {
page             2079 fs/fuse/file.c 		fuse_write_args_fill(&wpa->ia, data->ff, page_offset(page), 0);
page             2093 fs/fuse/file.c 	set_page_writeback(page);
page             2095 fs/fuse/file.c 	copy_highpage(tmp_page, page);
page             2104 fs/fuse/file.c 	if (is_writeback && fuse_writepage_in_flight(wpa, page)) {
page             2105 fs/fuse/file.c 		end_page_writeback(page);
page             2109 fs/fuse/file.c 	data->orig_pages[ap->num_pages] = page;
page             2120 fs/fuse/file.c 	unlock_page(page);
page             2143 fs/fuse/file.c 				  sizeof(struct page *),
page             2169 fs/fuse/file.c 		struct page **pagep, void **fsdata)
page             2173 fs/fuse/file.c 	struct page *page;
page             2179 fs/fuse/file.c 	page = grab_cache_page_write_begin(mapping, index, flags);
page             2180 fs/fuse/file.c 	if (!page)
page             2183 fs/fuse/file.c 	fuse_wait_on_page_writeback(mapping->host, page->index);
page             2185 fs/fuse/file.c 	if (PageUptodate(page) || len == PAGE_SIZE)
page             2195 fs/fuse/file.c 			zero_user_segment(page, 0, off);
page             2198 fs/fuse/file.c 	err = fuse_do_readpage(file, page);
page             2202 fs/fuse/file.c 	*pagep = page;
page             2206 fs/fuse/file.c 	unlock_page(page);
page             2207 fs/fuse/file.c 	put_page(page);
page             2214 fs/fuse/file.c 		struct page *page, void *fsdata)
page             2216 fs/fuse/file.c 	struct inode *inode = page->mapping->host;
page             2222 fs/fuse/file.c 	if (!PageUptodate(page)) {
page             2226 fs/fuse/file.c 			zero_user_segment(page, endoff, PAGE_SIZE);
page             2227 fs/fuse/file.c 		SetPageUptodate(page);
page             2231 fs/fuse/file.c 	set_page_dirty(page);
page             2234 fs/fuse/file.c 	unlock_page(page);
page             2235 fs/fuse/file.c 	put_page(page);
page             2240 fs/fuse/file.c static int fuse_launder_page(struct page *page)
page             2243 fs/fuse/file.c 	if (clear_page_dirty_for_io(page)) {
page             2244 fs/fuse/file.c 		struct inode *inode = page->mapping->host;
page             2245 fs/fuse/file.c 		err = fuse_writepage_locked(page);
page             2247 fs/fuse/file.c 			fuse_wait_on_page_writeback(inode, page->index);
page             2278 fs/fuse/file.c 	struct page *page = vmf->page;
page             2282 fs/fuse/file.c 	lock_page(page);
page             2283 fs/fuse/file.c 	if (page->mapping != inode->i_mapping) {
page             2284 fs/fuse/file.c 		unlock_page(page);
page             2288 fs/fuse/file.c 	fuse_wait_on_page_writeback(inode, page->index);
page              259 fs/fuse/fuse_i.h 	struct page **pages;
page               38 fs/fuse/readdir.c 	struct page *page;
page               65 fs/fuse/readdir.c 		page = find_lock_page(file->f_mapping, index);
page               67 fs/fuse/readdir.c 		page = find_or_create_page(file->f_mapping, index,
page               70 fs/fuse/readdir.c 	if (!page)
page               79 fs/fuse/readdir.c 	addr = kmap_atomic(page);
page               88 fs/fuse/readdir.c 	unlock_page(page);
page               89 fs/fuse/readdir.c 	put_page(page);
page              321 fs/fuse/readdir.c 	struct page *page;
page              330 fs/fuse/readdir.c 	page = alloc_page(GFP_KERNEL);
page              331 fs/fuse/readdir.c 	if (!page)
page              337 fs/fuse/readdir.c 	ap->pages = &page;
page              357 fs/fuse/readdir.c 			res = parse_dirplusfile(page_address(page), res,
page              360 fs/fuse/readdir.c 			res = parse_dirfile(page_address(page), res, file,
page              365 fs/fuse/readdir.c 	__free_page(page);
page              440 fs/fuse/readdir.c 	struct page *page;
page              514 fs/fuse/readdir.c 	page = find_get_page_flags(file->f_mapping, index,
page              517 fs/fuse/readdir.c 	if (!page) {
page              529 fs/fuse/readdir.c 		unlock_page(page);
page              530 fs/fuse/readdir.c 		put_page(page);
page              539 fs/fuse/readdir.c 	addr = kmap(page);
page              541 fs/fuse/readdir.c 	kunmap(page);
page              542 fs/fuse/readdir.c 	unlock_page(page);
page              543 fs/fuse/readdir.c 	put_page(page);
page              458 fs/fuse/virtio_fs.c 	struct page *page;
page              490 fs/fuse/virtio_fs.c 					page = ap->pages[i];
page              491 fs/fuse/virtio_fs.c 					zero_user_segment(page, len, thislen);
page              827 fs/fuse/virtio_fs.c 				       struct page **pages,
page               40 fs/gfs2/aops.c void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
page               43 fs/gfs2/aops.c 	struct buffer_head *head = page_buffers(page);
page               89 fs/gfs2/aops.c static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
page               91 fs/gfs2/aops.c 	struct inode *inode = page->mapping->host;
page              104 fs/gfs2/aops.c 	if (page->index > end_index || (page->index == end_index && !offset)) {
page              105 fs/gfs2/aops.c 		page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
page              109 fs/gfs2/aops.c 	return nobh_writepage(page, gfs2_get_block_noalloc, wbc);
page              112 fs/gfs2/aops.c 	redirty_page_for_writepage(wbc, page);
page              114 fs/gfs2/aops.c 	unlock_page(page);
page              121 fs/gfs2/aops.c static int gfs2_write_full_page(struct page *page, get_block_t *get_block,
page              124 fs/gfs2/aops.c 	struct inode * const inode = page->mapping->host;
page              137 fs/gfs2/aops.c 	if (page->index == end_index && offset)
page              138 fs/gfs2/aops.c 		zero_user_segment(page, offset, PAGE_SIZE);
page              140 fs/gfs2/aops.c 	return __block_write_full_page(inode, page, get_block, wbc,
page              155 fs/gfs2/aops.c static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
page              157 fs/gfs2/aops.c 	struct inode *inode = page->mapping->host;
page              161 fs/gfs2/aops.c 	if (PageChecked(page)) {
page              162 fs/gfs2/aops.c 		ClearPageChecked(page);
page              163 fs/gfs2/aops.c 		if (!page_has_buffers(page)) {
page              164 fs/gfs2/aops.c 			create_empty_buffers(page, inode->i_sb->s_blocksize,
page              167 fs/gfs2/aops.c 		gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize);
page              169 fs/gfs2/aops.c 	return gfs2_write_full_page(page, gfs2_get_block_noalloc, wbc);
page              181 fs/gfs2/aops.c static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
page              183 fs/gfs2/aops.c 	struct inode *inode = page->mapping->host;
page              190 fs/gfs2/aops.c 	if (PageChecked(page) || current->journal_info)
page              192 fs/gfs2/aops.c 	ret = __gfs2_jdata_writepage(page, wbc);
page              196 fs/gfs2/aops.c 	redirty_page_for_writepage(wbc, page);
page              198 fs/gfs2/aops.c 	unlock_page(page);
page              255 fs/gfs2/aops.c 		struct page *page = pvec->pages[i];
page              257 fs/gfs2/aops.c 		*done_index = page->index;
page              259 fs/gfs2/aops.c 		lock_page(page);
page              261 fs/gfs2/aops.c 		if (unlikely(page->mapping != mapping)) {
page              263 fs/gfs2/aops.c 			unlock_page(page);
page              267 fs/gfs2/aops.c 		if (!PageDirty(page)) {
page              272 fs/gfs2/aops.c 		if (PageWriteback(page)) {
page              274 fs/gfs2/aops.c 				wait_on_page_writeback(page);
page              279 fs/gfs2/aops.c 		BUG_ON(PageWriteback(page));
page              280 fs/gfs2/aops.c 		if (!clear_page_dirty_for_io(page))
page              285 fs/gfs2/aops.c 		ret = __gfs2_jdata_writepage(page, wbc);
page              288 fs/gfs2/aops.c 				unlock_page(page);
page              301 fs/gfs2/aops.c 				*done_index = page->index + 1;
page              437 fs/gfs2/aops.c static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
page              449 fs/gfs2/aops.c 	if (unlikely(page->index)) {
page              450 fs/gfs2/aops.c 		zero_user(page, 0, PAGE_SIZE);
page              451 fs/gfs2/aops.c 		SetPageUptodate(page);
page              459 fs/gfs2/aops.c 	kaddr = kmap_atomic(page);
page              465 fs/gfs2/aops.c 	flush_dcache_page(page);
page              467 fs/gfs2/aops.c 	SetPageUptodate(page);
page              483 fs/gfs2/aops.c static int __gfs2_readpage(void *file, struct page *page)
page              485 fs/gfs2/aops.c 	struct gfs2_inode *ip = GFS2_I(page->mapping->host);
page              486 fs/gfs2/aops.c 	struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
page              490 fs/gfs2/aops.c 	if (i_blocksize(page->mapping->host) == PAGE_SIZE &&
page              491 fs/gfs2/aops.c 	    !page_has_buffers(page)) {
page              492 fs/gfs2/aops.c 		error = iomap_readpage(page, &gfs2_iomap_ops);
page              494 fs/gfs2/aops.c 		error = stuffed_readpage(ip, page);
page              495 fs/gfs2/aops.c 		unlock_page(page);
page              497 fs/gfs2/aops.c 		error = mpage_readpage(page, gfs2_block_map);
page              516 fs/gfs2/aops.c static int gfs2_readpage(struct file *file, struct page *page)
page              518 fs/gfs2/aops.c 	struct address_space *mapping = page->mapping;
page              523 fs/gfs2/aops.c 	unlock_page(page);
page              529 fs/gfs2/aops.c 	lock_page(page);
page              530 fs/gfs2/aops.c 	if (page->mapping == mapping && !PageUptodate(page))
page              531 fs/gfs2/aops.c 		error = __gfs2_readpage(file, page);
page              533 fs/gfs2/aops.c 		unlock_page(page);
page              538 fs/gfs2/aops.c 		lock_page(page);
page              559 fs/gfs2/aops.c 	struct page *page;
page              566 fs/gfs2/aops.c 		page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
page              567 fs/gfs2/aops.c 		if (IS_ERR(page))
page              568 fs/gfs2/aops.c 			return PTR_ERR(page);
page              569 fs/gfs2/aops.c 		p = kmap_atomic(page);
page              572 fs/gfs2/aops.c 		put_page(page);
page              674 fs/gfs2/aops.c static int jdata_set_page_dirty(struct page *page)
page              676 fs/gfs2/aops.c 	SetPageChecked(page);
page              677 fs/gfs2/aops.c 	return __set_page_dirty_buffers(page);
page              729 fs/gfs2/aops.c static void gfs2_invalidatepage(struct page *page, unsigned int offset,
page              732 fs/gfs2/aops.c 	struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
page              738 fs/gfs2/aops.c 	BUG_ON(!PageLocked(page));
page              740 fs/gfs2/aops.c 		ClearPageChecked(page);
page              741 fs/gfs2/aops.c 	if (!page_has_buffers(page))
page              744 fs/gfs2/aops.c 	bh = head = page_buffers(page);
page              756 fs/gfs2/aops.c 		try_to_release_page(page, 0);
page              770 fs/gfs2/aops.c int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
page              772 fs/gfs2/aops.c 	struct address_space *mapping = page->mapping;
page              777 fs/gfs2/aops.c 	if (!page_has_buffers(page))
page              792 fs/gfs2/aops.c 	head = bh = page_buffers(page);
page              805 fs/gfs2/aops.c 	head = bh = page_buffers(page);
page              821 fs/gfs2/aops.c 	return try_to_free_buffers(page);
page               12 fs/gfs2/aops.h extern void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
page               56 fs/gfs2/bmap.c 			       u64 block, struct page *page)
page               62 fs/gfs2/bmap.c 	if (!page || page->index) {
page               63 fs/gfs2/bmap.c 		page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
page               64 fs/gfs2/bmap.c 		if (!page)
page               69 fs/gfs2/bmap.c 	if (!PageUptodate(page)) {
page               70 fs/gfs2/bmap.c 		void *kaddr = kmap(page);
page               78 fs/gfs2/bmap.c 		kunmap(page);
page               80 fs/gfs2/bmap.c 		SetPageUptodate(page);
page               83 fs/gfs2/bmap.c 	if (!page_has_buffers(page))
page               84 fs/gfs2/bmap.c 		create_empty_buffers(page, BIT(inode->i_blkbits),
page               87 fs/gfs2/bmap.c 	bh = page_buffers(page);
page              101 fs/gfs2/bmap.c 		unlock_page(page);
page              102 fs/gfs2/bmap.c 		put_page(page);
page              119 fs/gfs2/bmap.c int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
page              150 fs/gfs2/bmap.c 			error = gfs2_unstuffer_page(ip, dibh, block, page);
page             1042 fs/gfs2/bmap.c 				 unsigned copied, struct page *page,
page             1049 fs/gfs2/bmap.c 	if (page && !gfs2_is_stuffed(ip))
page             1050 fs/gfs2/bmap.c 		gfs2_page_add_databufs(ip, page, offset_in_page(pos), copied);
page               16 fs/gfs2/bmap.h struct page;
page               48 fs/gfs2/bmap.h extern int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page);
page              417 fs/gfs2/file.c static int gfs2_allocate_page_backing(struct page *page, unsigned int length)
page              419 fs/gfs2/file.c 	u64 pos = page_offset(page);
page              424 fs/gfs2/file.c 		if (gfs2_iomap_get_alloc(page->mapping->host, pos, length, &iomap))
page              447 fs/gfs2/file.c 	struct page *page = vmf->page;
page              453 fs/gfs2/file.c 	u64 pos = page_offset(page);
page              479 fs/gfs2/file.c 		lock_page(page);
page              480 fs/gfs2/file.c 		if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
page              482 fs/gfs2/file.c 			unlock_page(page);
page              511 fs/gfs2/file.c 	lock_page(page);
page              516 fs/gfs2/file.c 	if (size == 0 || (page->index > last_index))
page              523 fs/gfs2/file.c 	if (!PageUptodate(page) || page->mapping != inode->i_mapping)
page              529 fs/gfs2/file.c 		ret = gfs2_unstuff_dinode(ip, page);
page              531 fs/gfs2/file.c 		ret = gfs2_allocate_page_backing(page, PAGE_SIZE);
page              535 fs/gfs2/file.c 		unlock_page(page);
page              546 fs/gfs2/file.c 		set_page_dirty(page);
page              547 fs/gfs2/file.c 		wait_for_stable_page(page);
page               15 fs/gfs2/inode.h extern int gfs2_releasepage(struct page *page, gfp_t gfp_mask);
page              693 fs/gfs2/log.c  	struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
page              699 fs/gfs2/log.c  	lh = page_address(page);
page              749 fs/gfs2/log.c  	gfs2_log_write(sdp, page, sb->s_blocksize, 0, dblock);
page              175 fs/gfs2/lops.c 	struct page *page = bvec->bv_page;
page              178 fs/gfs2/lops.c 	bh = page_buffers(page);
page              207 fs/gfs2/lops.c 	struct page *page;
page              217 fs/gfs2/lops.c 		page = bvec->bv_page;
page              218 fs/gfs2/lops.c 		if (page_has_buffers(page))
page              221 fs/gfs2/lops.c 			mempool_free(page, gfs2_page_pool);
page              325 fs/gfs2/lops.c void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
page              333 fs/gfs2/lops.c 	ret = bio_add_page(bio, page, size, offset);
page              337 fs/gfs2/lops.c 		ret = bio_add_page(bio, page, size, offset);
page              369 fs/gfs2/lops.c void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
page              372 fs/gfs2/lops.c 	gfs2_log_write(sdp, page, sb->s_blocksize, 0,
page              386 fs/gfs2/lops.c 	struct page *page;
page              391 fs/gfs2/lops.c 		page = bvec->bv_page;
page              395 fs/gfs2/lops.c 			SetPageError(page);
page              396 fs/gfs2/lops.c 			mapping_set_error(page->mapping, err);
page              398 fs/gfs2/lops.c 		unlock_page(page);
page              414 fs/gfs2/lops.c 			      struct page *page)
page              418 fs/gfs2/lops.c 	void *kaddr = kmap_atomic(page);
page              459 fs/gfs2/lops.c 	struct page *page;
page              461 fs/gfs2/lops.c 	page = find_get_page(jd->jd_inode->i_mapping, index);
page              462 fs/gfs2/lops.c 	wait_on_page_locked(page);
page              464 fs/gfs2/lops.c 	if (PageError(page))
page              468 fs/gfs2/lops.c 		*done = gfs2_jhead_pg_srch(jd, head, page);
page              470 fs/gfs2/lops.c 	put_page(page); /* Once for find_get_page */
page              471 fs/gfs2/lops.c 	put_page(page); /* Once more for find_or_create_page */
page              511 fs/gfs2/lops.c 	struct page *page = NULL;
page              524 fs/gfs2/lops.c 			if (!page) {
page              525 fs/gfs2/lops.c 				page = find_or_create_page(mapping,
page              527 fs/gfs2/lops.c 				if (!page) {
page              539 fs/gfs2/lops.c 					sz = bio_add_page(bio, page, bsize, off);
page              560 fs/gfs2/lops.c 			sz = bio_add_page(bio, page, bsize, off);
page              565 fs/gfs2/lops.c 				page = NULL;
page              595 fs/gfs2/lops.c static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
page              598 fs/gfs2/lops.c 	struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
page              599 fs/gfs2/lops.c 	struct gfs2_log_descriptor *ld = page_address(page);
page              608 fs/gfs2/lops.c 	return page;
page              644 fs/gfs2/lops.c 	struct page *page;
page              657 fs/gfs2/lops.c 		page = gfs2_get_log_desc(sdp,
page              660 fs/gfs2/lops.c 		ld = page_address(page);
page              676 fs/gfs2/lops.c 		gfs2_log_write_page(sdp, page);
page              687 fs/gfs2/lops.c 				page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
page              688 fs/gfs2/lops.c 				ptr = page_address(page);
page              697 fs/gfs2/lops.c 				gfs2_log_write_page(sdp, page);
page              861 fs/gfs2/lops.c 	struct page *page;
page              869 fs/gfs2/lops.c 	page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke);
page              877 fs/gfs2/lops.c 			gfs2_log_write_page(sdp, page);
page              878 fs/gfs2/lops.c 			page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
page              879 fs/gfs2/lops.c 			mh = page_address(page);
page              887 fs/gfs2/lops.c 		*(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno);
page              892 fs/gfs2/lops.c 	gfs2_log_write_page(sdp, page);
page               22 fs/gfs2/lops.h extern void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
page               24 fs/gfs2/lops.h extern void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page);
page               33 fs/gfs2/meta_io.c static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wbc)
page               39 fs/gfs2/meta_io.c 	BUG_ON(!PageLocked(page));
page               40 fs/gfs2/meta_io.c 	BUG_ON(!page_has_buffers(page));
page               42 fs/gfs2/meta_io.c 	head = page_buffers(page);
page               58 fs/gfs2/meta_io.c 			redirty_page_for_writepage(wbc, page);
page               72 fs/gfs2/meta_io.c 	BUG_ON(PageWriteback(page));
page               73 fs/gfs2/meta_io.c 	set_page_writeback(page);
page               83 fs/gfs2/meta_io.c 	unlock_page(page);
page               86 fs/gfs2/meta_io.c 		end_page_writeback(page);
page              114 fs/gfs2/meta_io.c 	struct page *page;
page              129 fs/gfs2/meta_io.c 			page = grab_cache_page(mapping, index);
page              130 fs/gfs2/meta_io.c 			if (page)
page              135 fs/gfs2/meta_io.c 		page = find_get_page_flags(mapping, index,
page              137 fs/gfs2/meta_io.c 		if (!page)
page              141 fs/gfs2/meta_io.c 	if (!page_has_buffers(page))
page              142 fs/gfs2/meta_io.c 		create_empty_buffers(page, sdp->sd_sb.sb_bsize, 0);
page              145 fs/gfs2/meta_io.c 	for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page)
page              152 fs/gfs2/meta_io.c 	unlock_page(page);
page              153 fs/gfs2/meta_io.c 	put_page(page);
page              192 fs/gfs2/meta_io.c 		struct page *page = bvec->bv_page;
page              193 fs/gfs2/meta_io.c 		struct buffer_head *bh = page_buffers(page);
page              185 fs/gfs2/ops_fstype.c 	struct page *page = bio->bi_private;
page              188 fs/gfs2/ops_fstype.c 		SetPageUptodate(page);
page              191 fs/gfs2/ops_fstype.c 	unlock_page(page);
page              240 fs/gfs2/ops_fstype.c 	struct page *page;
page              243 fs/gfs2/ops_fstype.c 	page = alloc_page(GFP_NOFS);
page              244 fs/gfs2/ops_fstype.c 	if (unlikely(!page))
page              247 fs/gfs2/ops_fstype.c 	ClearPageUptodate(page);
page              248 fs/gfs2/ops_fstype.c 	ClearPageDirty(page);
page              249 fs/gfs2/ops_fstype.c 	lock_page(page);
page              254 fs/gfs2/ops_fstype.c 	bio_add_page(bio, page, PAGE_SIZE, 0);
page              257 fs/gfs2/ops_fstype.c 	bio->bi_private = page;
page              260 fs/gfs2/ops_fstype.c 	wait_on_page_locked(page);
page              262 fs/gfs2/ops_fstype.c 	if (!PageUptodate(page)) {
page              263 fs/gfs2/ops_fstype.c 		__free_page(page);
page              266 fs/gfs2/ops_fstype.c 	p = kmap(page);
page              268 fs/gfs2/ops_fstype.c 	kunmap(page);
page              269 fs/gfs2/ops_fstype.c 	__free_page(page);
page              693 fs/gfs2/quota.c 	struct page *page;
page              704 fs/gfs2/quota.c 	page = find_or_create_page(mapping, index, GFP_NOFS);
page              705 fs/gfs2/quota.c 	if (!page)
page              707 fs/gfs2/quota.c 	if (!page_has_buffers(page))
page              708 fs/gfs2/quota.c 		create_empty_buffers(page, bsize, 0);
page              710 fs/gfs2/quota.c 	bh = page_buffers(page);
page              725 fs/gfs2/quota.c 				zero_user(page, bnum * bsize, bh->b_size);
page              727 fs/gfs2/quota.c 		if (PageUptodate(page))
page              751 fs/gfs2/quota.c 	kaddr = kmap_atomic(page);
page              753 fs/gfs2/quota.c 	flush_dcache_page(page);
page              755 fs/gfs2/quota.c 	unlock_page(page);
page              756 fs/gfs2/quota.c 	put_page(page);
page              761 fs/gfs2/quota.c 	unlock_page(page);
page              762 fs/gfs2/quota.c 	put_page(page);
page               21 fs/hfs/bnode.c 	struct page *page;
page               24 fs/hfs/bnode.c 	page = node->page[0];
page               26 fs/hfs/bnode.c 	memcpy(buf, kmap(page) + off, len);
page               27 fs/hfs/bnode.c 	kunmap(page);
page               63 fs/hfs/bnode.c 	struct page *page;
page               66 fs/hfs/bnode.c 	page = node->page[0];
page               68 fs/hfs/bnode.c 	memcpy(kmap(page) + off, buf, len);
page               69 fs/hfs/bnode.c 	kunmap(page);
page               70 fs/hfs/bnode.c 	set_page_dirty(page);
page               88 fs/hfs/bnode.c 	struct page *page;
page               91 fs/hfs/bnode.c 	page = node->page[0];
page               93 fs/hfs/bnode.c 	memset(kmap(page) + off, 0, len);
page               94 fs/hfs/bnode.c 	kunmap(page);
page               95 fs/hfs/bnode.c 	set_page_dirty(page);
page              101 fs/hfs/bnode.c 	struct page *src_page, *dst_page;
page              108 fs/hfs/bnode.c 	src_page = src_node->page[0];
page              109 fs/hfs/bnode.c 	dst_page = dst_node->page[0];
page              119 fs/hfs/bnode.c 	struct page *page;
page              127 fs/hfs/bnode.c 	page = node->page[0];
page              128 fs/hfs/bnode.c 	ptr = kmap(page);
page              130 fs/hfs/bnode.c 	kunmap(page);
page              131 fs/hfs/bnode.c 	set_page_dirty(page);
page              240 fs/hfs/bnode.c 	struct page *page;
page              250 fs/hfs/bnode.c 		sizeof(struct page *);
page              281 fs/hfs/bnode.c 		page = read_mapping_page(mapping, block++, NULL);
page              282 fs/hfs/bnode.c 		if (IS_ERR(page))
page              284 fs/hfs/bnode.c 		if (PageError(page)) {
page              285 fs/hfs/bnode.c 			put_page(page);
page              288 fs/hfs/bnode.c 		node->page[i] = page;
page              338 fs/hfs/bnode.c 	desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) + node->page_offset);
page              344 fs/hfs/bnode.c 	kunmap(node->page[0]);
page              400 fs/hfs/bnode.c 		if (node->page[i])
page              401 fs/hfs/bnode.c 			put_page(node->page[i]);
page              408 fs/hfs/bnode.c 	struct page **pagep;
page              427 fs/hfs/bnode.c 	pagep = node->page;
page              467 fs/hfs/bnode.c 			if (!node->page[i])
page              469 fs/hfs/bnode.c 			mark_page_accessed(node->page[i]);
page               24 fs/hfs/btree.c 	struct page *page;
page               78 fs/hfs/btree.c 	page = read_mapping_page(mapping, 0, NULL);
page               79 fs/hfs/btree.c 	if (IS_ERR(page))
page               83 fs/hfs/btree.c 	head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
page              122 fs/hfs/btree.c 	kunmap(page);
page              123 fs/hfs/btree.c 	put_page(page);
page              127 fs/hfs/btree.c 	put_page(page);
page              164 fs/hfs/btree.c 	struct page *page;
page              171 fs/hfs/btree.c 	page = node->page[0];
page              172 fs/hfs/btree.c 	head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
page              183 fs/hfs/btree.c 	kunmap(page);
page              184 fs/hfs/btree.c 	set_page_dirty(page);
page              250 fs/hfs/btree.c 	struct page **pagep;
page              270 fs/hfs/btree.c 	pagep = node->page + (off >> PAGE_SHIFT);
page              315 fs/hfs/btree.c 		pagep = node->page + (off >> PAGE_SHIFT);
page              324 fs/hfs/btree.c 	struct page *page;
page              362 fs/hfs/btree.c 	page = node->page[off >> PAGE_SHIFT];
page              363 fs/hfs/btree.c 	data = kmap(page);
page              370 fs/hfs/btree.c 		kunmap(page);
page              375 fs/hfs/btree.c 	set_page_dirty(page);
page              376 fs/hfs/btree.c 	kunmap(page);
page               63 fs/hfs/btree.h 	struct page *page[0];
page              490 fs/hfs/extent.c 		struct page *page;
page              495 fs/hfs/extent.c 					    &page, &fsdata);
page              498 fs/hfs/extent.c 					page, fsdata);
page               31 fs/hfs/inode.c static int hfs_writepage(struct page *page, struct writeback_control *wbc)
page               33 fs/hfs/inode.c 	return block_write_full_page(page, hfs_get_block, wbc);
page               36 fs/hfs/inode.c static int hfs_readpage(struct file *file, struct page *page)
page               38 fs/hfs/inode.c 	return block_read_full_page(page, hfs_get_block);
page               53 fs/hfs/inode.c 			struct page **pagep, void **fsdata)
page               72 fs/hfs/inode.c static int hfs_releasepage(struct page *page, gfp_t mask)
page               74 fs/hfs/inode.c 	struct inode *inode = page->mapping->host;
page               97 fs/hfs/inode.c 		nidx = page->index >> (tree->node_size_shift - PAGE_SHIFT);
page              110 fs/hfs/inode.c 		nidx = page->index << (PAGE_SHIFT - tree->node_size_shift);
page              126 fs/hfs/inode.c 	return res ? try_to_free_buffers(page) : 0;
page               23 fs/hfsplus/bitmap.c 	struct page *page;
page               37 fs/hfsplus/bitmap.c 	page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL);
page               38 fs/hfsplus/bitmap.c 	if (IS_ERR(page)) {
page               42 fs/hfsplus/bitmap.c 	pptr = kmap(page);
page               77 fs/hfsplus/bitmap.c 		kunmap(page);
page               81 fs/hfsplus/bitmap.c 		page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
page               83 fs/hfsplus/bitmap.c 		if (IS_ERR(page)) {
page               87 fs/hfsplus/bitmap.c 		curr = pptr = kmap(page);
page              129 fs/hfsplus/bitmap.c 		set_page_dirty(page);
page              130 fs/hfsplus/bitmap.c 		kunmap(page);
page              132 fs/hfsplus/bitmap.c 		page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
page              134 fs/hfsplus/bitmap.c 		if (IS_ERR(page)) {
page              138 fs/hfsplus/bitmap.c 		pptr = kmap(page);
page              153 fs/hfsplus/bitmap.c 	set_page_dirty(page);
page              154 fs/hfsplus/bitmap.c 	kunmap(page);
page              167 fs/hfsplus/bitmap.c 	struct page *page;
page              185 fs/hfsplus/bitmap.c 	page = read_mapping_page(mapping, pnr, NULL);
page              186 fs/hfsplus/bitmap.c 	if (IS_ERR(page))
page              188 fs/hfsplus/bitmap.c 	pptr = kmap(page);
page              217 fs/hfsplus/bitmap.c 		set_page_dirty(page);
page              218 fs/hfsplus/bitmap.c 		kunmap(page);
page              219 fs/hfsplus/bitmap.c 		page = read_mapping_page(mapping, ++pnr, NULL);
page              220 fs/hfsplus/bitmap.c 		if (IS_ERR(page))
page              222 fs/hfsplus/bitmap.c 		pptr = kmap(page);
page              233 fs/hfsplus/bitmap.c 	set_page_dirty(page);
page              234 fs/hfsplus/bitmap.c 	kunmap(page);
page              242 fs/hfsplus/bitmap.c 	pr_crit("unable to mark blocks free: error %ld\n", PTR_ERR(page));
page               24 fs/hfsplus/bnode.c 	struct page **pagep;
page               28 fs/hfsplus/bnode.c 	pagep = node->page + (off >> PAGE_SHIFT);
page               77 fs/hfsplus/bnode.c 	struct page **pagep;
page               81 fs/hfsplus/bnode.c 	pagep = node->page + (off >> PAGE_SHIFT);
page              107 fs/hfsplus/bnode.c 	struct page **pagep;
page              111 fs/hfsplus/bnode.c 	pagep = node->page + (off >> PAGE_SHIFT);
page              130 fs/hfsplus/bnode.c 	struct page **src_page, **dst_page;
page              138 fs/hfsplus/bnode.c 	src_page = src_node->page + (src >> PAGE_SHIFT);
page              140 fs/hfsplus/bnode.c 	dst_page = dst_node->page + (dst >> PAGE_SHIFT);
page              187 fs/hfsplus/bnode.c 	struct page **src_page, **dst_page;
page              197 fs/hfsplus/bnode.c 		src_page = node->page + (src >> PAGE_SHIFT);
page              200 fs/hfsplus/bnode.c 		dst_page = node->page + (dst >> PAGE_SHIFT);
page              247 fs/hfsplus/bnode.c 		src_page = node->page + (src >> PAGE_SHIFT);
page              249 fs/hfsplus/bnode.c 		dst_page = node->page + (dst >> PAGE_SHIFT);
page              404 fs/hfsplus/bnode.c 	struct page *page;
page              415 fs/hfsplus/bnode.c 		sizeof(struct page *);
page              447 fs/hfsplus/bnode.c 		page = read_mapping_page(mapping, block, NULL);
page              448 fs/hfsplus/bnode.c 		if (IS_ERR(page))
page              450 fs/hfsplus/bnode.c 		if (PageError(page)) {
page              451 fs/hfsplus/bnode.c 			put_page(page);
page              454 fs/hfsplus/bnode.c 		node->page[i] = page;
page              505 fs/hfsplus/bnode.c 	desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) +
page              512 fs/hfsplus/bnode.c 	kunmap(node->page[0]);
page              568 fs/hfsplus/bnode.c 		if (node->page[i])
page              569 fs/hfsplus/bnode.c 			put_page(node->page[i]);
page              576 fs/hfsplus/bnode.c 	struct page **pagep;
page              595 fs/hfsplus/bnode.c 	pagep = node->page;
page              635 fs/hfsplus/bnode.c 			if (!node->page[i])
page              637 fs/hfsplus/bnode.c 			mark_page_accessed(node->page[i]);
page              139 fs/hfsplus/btree.c 	struct page *page;
page              161 fs/hfsplus/btree.c 	page = read_mapping_page(mapping, 0, NULL);
page              162 fs/hfsplus/btree.c 	if (IS_ERR(page))
page              166 fs/hfsplus/btree.c 	head = (struct hfs_btree_header_rec *)(kmap(page) +
page              243 fs/hfsplus/btree.c 	kunmap(page);
page              244 fs/hfsplus/btree.c 	put_page(page);
page              248 fs/hfsplus/btree.c 	put_page(page);
page              286 fs/hfsplus/btree.c 	struct page *page;
page              293 fs/hfsplus/btree.c 	page = node->page[0];
page              294 fs/hfsplus/btree.c 	head = (struct hfs_btree_header_rec *)(kmap(page) +
page              306 fs/hfsplus/btree.c 	kunmap(page);
page              307 fs/hfsplus/btree.c 	set_page_dirty(page);
page              376 fs/hfsplus/btree.c 	struct page **pagep;
page              396 fs/hfsplus/btree.c 	pagep = node->page + (off >> PAGE_SHIFT);
page              442 fs/hfsplus/btree.c 		pagep = node->page + (off >> PAGE_SHIFT);
page              451 fs/hfsplus/btree.c 	struct page *page;
page              492 fs/hfsplus/btree.c 	page = node->page[off >> PAGE_SHIFT];
page              493 fs/hfsplus/btree.c 	data = kmap(page);
page              501 fs/hfsplus/btree.c 		kunmap(page);
page              506 fs/hfsplus/btree.c 	set_page_dirty(page);
page              507 fs/hfsplus/btree.c 	kunmap(page);
page              556 fs/hfsplus/extents.c 		struct page *page;
page              561 fs/hfsplus/extents.c 					    &page, &fsdata);
page              565 fs/hfsplus/extents.c 			0, 0, page, fsdata);
page              100 fs/hfsplus/hfsplus_fs.h struct page;
page              120 fs/hfsplus/hfsplus_fs.h 	struct page *page[0];
page               25 fs/hfsplus/inode.c static int hfsplus_readpage(struct file *file, struct page *page)
page               27 fs/hfsplus/inode.c 	return block_read_full_page(page, hfsplus_get_block);
page               30 fs/hfsplus/inode.c static int hfsplus_writepage(struct page *page, struct writeback_control *wbc)
page               32 fs/hfsplus/inode.c 	return block_write_full_page(page, hfsplus_get_block, wbc);
page               47 fs/hfsplus/inode.c 			struct page **pagep, void **fsdata)
page               66 fs/hfsplus/inode.c static int hfsplus_releasepage(struct page *page, gfp_t mask)
page               68 fs/hfsplus/inode.c 	struct inode *inode = page->mapping->host;
page               92 fs/hfsplus/inode.c 		nidx = page->index >>
page              106 fs/hfsplus/inode.c 		nidx = page->index <<
page              123 fs/hfsplus/inode.c 	return res ? try_to_free_buffers(page) : 0;
page              132 fs/hfsplus/xattr.c 	struct page *page;
page              221 fs/hfsplus/xattr.c 		page = read_mapping_page(mapping, index, NULL);
page              222 fs/hfsplus/xattr.c 		if (IS_ERR(page)) {
page              223 fs/hfsplus/xattr.c 			err = PTR_ERR(page);
page              227 fs/hfsplus/xattr.c 		kaddr = kmap_atomic(page);
page              232 fs/hfsplus/xattr.c 		set_page_dirty(page);
page              233 fs/hfsplus/xattr.c 		put_page(page);
page              401 fs/hostfs/hostfs_kern.c static int hostfs_writepage(struct page *page, struct writeback_control *wbc)
page              403 fs/hostfs/hostfs_kern.c 	struct address_space *mapping = page->mapping;
page              406 fs/hostfs/hostfs_kern.c 	loff_t base = page_offset(page);
page              411 fs/hostfs/hostfs_kern.c 	if (page->index >= end_index)
page              414 fs/hostfs/hostfs_kern.c 	buffer = kmap(page);
page              418 fs/hostfs/hostfs_kern.c 		ClearPageUptodate(page);
page              425 fs/hostfs/hostfs_kern.c 	if (PageError(page))
page              426 fs/hostfs/hostfs_kern.c 		ClearPageError(page);
page              430 fs/hostfs/hostfs_kern.c 	kunmap(page);
page              432 fs/hostfs/hostfs_kern.c 	unlock_page(page);
page              436 fs/hostfs/hostfs_kern.c static int hostfs_readpage(struct file *file, struct page *page)
page              439 fs/hostfs/hostfs_kern.c 	loff_t start = page_offset(page);
page              442 fs/hostfs/hostfs_kern.c 	buffer = kmap(page);
page              446 fs/hostfs/hostfs_kern.c 		ClearPageUptodate(page);
page              447 fs/hostfs/hostfs_kern.c 		SetPageError(page);
page              454 fs/hostfs/hostfs_kern.c 	ClearPageError(page);
page              455 fs/hostfs/hostfs_kern.c 	SetPageUptodate(page);
page              458 fs/hostfs/hostfs_kern.c 	flush_dcache_page(page);
page              459 fs/hostfs/hostfs_kern.c 	kunmap(page);
page              460 fs/hostfs/hostfs_kern.c 	unlock_page(page);
page              466 fs/hostfs/hostfs_kern.c 			      struct page **pagep, void **fsdata)
page              478 fs/hostfs/hostfs_kern.c 			    struct page *page, void *fsdata)
page              485 fs/hostfs/hostfs_kern.c 	buffer = kmap(page);
page              487 fs/hostfs/hostfs_kern.c 	kunmap(page);
page              489 fs/hostfs/hostfs_kern.c 	if (!PageUptodate(page) && err == PAGE_SIZE)
page              490 fs/hostfs/hostfs_kern.c 		SetPageUptodate(page);
page              498 fs/hostfs/hostfs_kern.c 	unlock_page(page);
page              499 fs/hostfs/hostfs_kern.c 	put_page(page);
page              118 fs/hpfs/file.c static int hpfs_readpage(struct file *file, struct page *page)
page              120 fs/hpfs/file.c 	return mpage_readpage(page, hpfs_get_block);
page              123 fs/hpfs/file.c static int hpfs_writepage(struct page *page, struct writeback_control *wbc)
page              125 fs/hpfs/file.c 	return block_write_full_page(page, hpfs_get_block, wbc);
page              156 fs/hpfs/file.c 			struct page **pagep, void **fsdata)
page              172 fs/hpfs/file.c 			struct page *pagep, void *fsdata)
page              478 fs/hpfs/namei.c static int hpfs_symlink_readpage(struct file *file, struct page *page)
page              480 fs/hpfs/namei.c 	char *link = page_address(page);
page              481 fs/hpfs/namei.c 	struct inode *i = page->mapping->host;
page              495 fs/hpfs/namei.c 	SetPageUptodate(page);
page              496 fs/hpfs/namei.c 	unlock_page(page);
page              501 fs/hpfs/namei.c 	SetPageError(page);
page              502 fs/hpfs/namei.c 	unlock_page(page);
page              237 fs/hugetlbfs/inode.c hugetlbfs_read_actor(struct page *page, unsigned long offset,
page              254 fs/hugetlbfs/inode.c 		n = copy_page_to_iter(&page[i], offset, chunksize, to);
page              283 fs/hugetlbfs/inode.c 		struct page *page;
page              302 fs/hugetlbfs/inode.c 		page = find_lock_page(mapping, index);
page              303 fs/hugetlbfs/inode.c 		if (unlikely(page == NULL)) {
page              310 fs/hugetlbfs/inode.c 			unlock_page(page);
page              315 fs/hugetlbfs/inode.c 			copied = hugetlbfs_read_actor(page, offset, to, nr);
page              316 fs/hugetlbfs/inode.c 			put_page(page);
page              335 fs/hugetlbfs/inode.c 			struct page **pagep, void **fsdata)
page              342 fs/hugetlbfs/inode.c 			struct page *page, void *fsdata)
page              348 fs/hugetlbfs/inode.c static void remove_huge_page(struct page *page)
page              350 fs/hugetlbfs/inode.c 	ClearPageDirty(page);
page              351 fs/hugetlbfs/inode.c 	ClearPageUptodate(page);
page              352 fs/hugetlbfs/inode.c 	delete_from_page_cache(page);
page              439 fs/hugetlbfs/inode.c 			struct page *page = pvec.pages[i];
page              442 fs/hugetlbfs/inode.c 			index = page->index;
page              455 fs/hugetlbfs/inode.c 			if (unlikely(page_mapped(page))) {
page              465 fs/hugetlbfs/inode.c 			lock_page(page);
page              475 fs/hugetlbfs/inode.c 			VM_BUG_ON(PagePrivate(page));
page              476 fs/hugetlbfs/inode.c 			remove_huge_page(page);
page              484 fs/hugetlbfs/inode.c 			unlock_page(page);
page              625 fs/hugetlbfs/inode.c 		struct page *page;
page              651 fs/hugetlbfs/inode.c 		page = find_get_page(mapping, index);
page              652 fs/hugetlbfs/inode.c 		if (page) {
page              653 fs/hugetlbfs/inode.c 			put_page(page);
page              660 fs/hugetlbfs/inode.c 		page = alloc_huge_page(&pseudo_vma, addr, avoid_reserve);
page              662 fs/hugetlbfs/inode.c 		if (IS_ERR(page)) {
page              664 fs/hugetlbfs/inode.c 			error = PTR_ERR(page);
page              667 fs/hugetlbfs/inode.c 		clear_huge_page(page, addr, pages_per_huge_page(h));
page              668 fs/hugetlbfs/inode.c 		__SetPageUptodate(page);
page              669 fs/hugetlbfs/inode.c 		error = huge_add_to_page_cache(page, mapping, index);
page              671 fs/hugetlbfs/inode.c 			put_page(page);
page              682 fs/hugetlbfs/inode.c 		unlock_page(page);
page              683 fs/hugetlbfs/inode.c 		put_page(page);
page              871 fs/hugetlbfs/inode.c static int hugetlbfs_set_page_dirty(struct page *page)
page              873 fs/hugetlbfs/inode.c 	struct page *head = compound_head(page);
page              880 fs/hugetlbfs/inode.c 				struct page *newpage, struct page *page,
page              885 fs/hugetlbfs/inode.c 	rc = migrate_huge_page_move_mapping(mapping, newpage, page);
page              895 fs/hugetlbfs/inode.c 	if (page_private(page)) {
page              896 fs/hugetlbfs/inode.c 		set_page_private(newpage, page_private(page));
page              897 fs/hugetlbfs/inode.c 		set_page_private(page, 0);
page              901 fs/hugetlbfs/inode.c 		migrate_page_copy(newpage, page);
page              903 fs/hugetlbfs/inode.c 		migrate_page_states(newpage, page);
page              909 fs/hugetlbfs/inode.c 				struct page *page)
page              912 fs/hugetlbfs/inode.c 	pgoff_t index = page->index;
page              914 fs/hugetlbfs/inode.c 	remove_huge_page(page);
page               42 fs/internal.h  extern int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
page             3337 fs/io_uring.c  	struct page *page;
page             3342 fs/io_uring.c  	page = virt_to_head_page(ptr);
page             3343 fs/io_uring.c  	if (put_page_testzero(page))
page             3344 fs/io_uring.c  		free_compound_page(page);
page             3450 fs/io_uring.c  	struct page **pages = NULL;
page             3503 fs/io_uring.c  			pages = kvmalloc_array(nr_pages, sizeof(struct page *),
page             3706 fs/io_uring.c  	struct page *page;
page             3721 fs/io_uring.c  	page = virt_to_head_page(ptr);
page             3722 fs/io_uring.c  	if (sz > page_size(page))
page               23 fs/iomap/buffered-io.c iomap_page_create(struct inode *inode, struct page *page)
page               25 fs/iomap/buffered-io.c 	struct iomap_page *iop = to_iomap_page(page);
page               39 fs/iomap/buffered-io.c 	get_page(page);
page               40 fs/iomap/buffered-io.c 	set_page_private(page, (unsigned long)iop);
page               41 fs/iomap/buffered-io.c 	SetPagePrivate(page);
page               46 fs/iomap/buffered-io.c iomap_page_release(struct page *page)
page               48 fs/iomap/buffered-io.c 	struct iomap_page *iop = to_iomap_page(page);
page               54 fs/iomap/buffered-io.c 	ClearPagePrivate(page);
page               55 fs/iomap/buffered-io.c 	set_page_private(page, 0);
page               56 fs/iomap/buffered-io.c 	put_page(page);
page              121 fs/iomap/buffered-io.c iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
page              123 fs/iomap/buffered-io.c 	struct iomap_page *iop = to_iomap_page(page);
page              124 fs/iomap/buffered-io.c 	struct inode *inode = page->mapping->host;
page              139 fs/iomap/buffered-io.c 	if (uptodate && !PageError(page))
page              140 fs/iomap/buffered-io.c 		SetPageUptodate(page);
page              144 fs/iomap/buffered-io.c iomap_read_finish(struct iomap_page *iop, struct page *page)
page              147 fs/iomap/buffered-io.c 		unlock_page(page);
page              153 fs/iomap/buffered-io.c 	struct page *page = bvec->bv_page;
page              154 fs/iomap/buffered-io.c 	struct iomap_page *iop = to_iomap_page(page);
page              157 fs/iomap/buffered-io.c 		ClearPageUptodate(page);
page              158 fs/iomap/buffered-io.c 		SetPageError(page);
page              160 fs/iomap/buffered-io.c 		iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
page              163 fs/iomap/buffered-io.c 	iomap_read_finish(iop, page);
page              179 fs/iomap/buffered-io.c 	struct page		*cur_page;
page              187 fs/iomap/buffered-io.c iomap_read_inline_data(struct inode *inode, struct page *page,
page              193 fs/iomap/buffered-io.c 	if (PageUptodate(page))
page              196 fs/iomap/buffered-io.c 	BUG_ON(page->index);
page              199 fs/iomap/buffered-io.c 	addr = kmap_atomic(page);
page              203 fs/iomap/buffered-io.c 	SetPageUptodate(page);
page              211 fs/iomap/buffered-io.c 	struct page *page = ctx->cur_page;
page              212 fs/iomap/buffered-io.c 	struct iomap_page *iop = iomap_page_create(inode, page);
page              220 fs/iomap/buffered-io.c 		iomap_read_inline_data(inode, page, iomap);
page              230 fs/iomap/buffered-io.c 		zero_user(page, poff, plen);
page              231 fs/iomap/buffered-io.c 		iomap_set_range_uptodate(page, poff, plen);
page              245 fs/iomap/buffered-io.c 	    __bio_try_merge_page(ctx->bio, page, plen, poff, &same_page)) {
page              260 fs/iomap/buffered-io.c 		gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
page              277 fs/iomap/buffered-io.c 	bio_add_page(ctx->bio, page, plen, poff);
page              289 fs/iomap/buffered-io.c iomap_readpage(struct page *page, const struct iomap_ops *ops)
page              291 fs/iomap/buffered-io.c 	struct iomap_readpage_ctx ctx = { .cur_page = page };
page              292 fs/iomap/buffered-io.c 	struct inode *inode = page->mapping->host;
page              297 fs/iomap/buffered-io.c 		ret = iomap_apply(inode, page_offset(page) + poff,
page              302 fs/iomap/buffered-io.c 			SetPageError(page);
page              312 fs/iomap/buffered-io.c 		unlock_page(page);
page              324 fs/iomap/buffered-io.c static struct page *
page              329 fs/iomap/buffered-io.c 		struct page *page = lru_to_page(pages);
page              331 fs/iomap/buffered-io.c 		if (page_offset(page) >= (u64)pos + length)
page              334 fs/iomap/buffered-io.c 		list_del(&page->lru);
page              335 fs/iomap/buffered-io.c 		if (!add_to_page_cache_lru(page, inode->i_mapping, page->index,
page              337 fs/iomap/buffered-io.c 			return page;
page              346 fs/iomap/buffered-io.c 		put_page(page);
page              388 fs/iomap/buffered-io.c 	loff_t pos = page_offset(list_entry(pages->prev, struct page, lru));
page              389 fs/iomap/buffered-io.c 	loff_t last = page_offset(list_entry(pages->next, struct page, lru));
page              429 fs/iomap/buffered-io.c iomap_is_partially_uptodate(struct page *page, unsigned long from,
page              432 fs/iomap/buffered-io.c 	struct iomap_page *iop = to_iomap_page(page);
page              433 fs/iomap/buffered-io.c 	struct inode *inode = page->mapping->host;
page              456 fs/iomap/buffered-io.c iomap_releasepage(struct page *page, gfp_t gfp_mask)
page              463 fs/iomap/buffered-io.c 	if (PageDirty(page) || PageWriteback(page))
page              465 fs/iomap/buffered-io.c 	iomap_page_release(page);
page              471 fs/iomap/buffered-io.c iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
page              478 fs/iomap/buffered-io.c 		WARN_ON_ONCE(PageWriteback(page));
page              479 fs/iomap/buffered-io.c 		cancel_dirty_page(page);
page              480 fs/iomap/buffered-io.c 		iomap_page_release(page);
page              487 fs/iomap/buffered-io.c iomap_migrate_page(struct address_space *mapping, struct page *newpage,
page              488 fs/iomap/buffered-io.c 		struct page *page, enum migrate_mode mode)
page              492 fs/iomap/buffered-io.c 	ret = migrate_page_move_mapping(mapping, newpage, page, 0);
page              496 fs/iomap/buffered-io.c 	if (page_has_private(page)) {
page              497 fs/iomap/buffered-io.c 		ClearPagePrivate(page);
page              499 fs/iomap/buffered-io.c 		set_page_private(newpage, page_private(page));
page              500 fs/iomap/buffered-io.c 		set_page_private(page, 0);
page              501 fs/iomap/buffered-io.c 		put_page(page);
page              506 fs/iomap/buffered-io.c 		migrate_page_copy(newpage, page);
page              508 fs/iomap/buffered-io.c 		migrate_page_states(newpage, page);
page              528 fs/iomap/buffered-io.c iomap_read_page_sync(struct inode *inode, loff_t block_start, struct page *page,
page              536 fs/iomap/buffered-io.c 		zero_user_segments(page, poff, from, to, poff + plen);
page              537 fs/iomap/buffered-io.c 		iomap_set_range_uptodate(page, poff, plen);
page              545 fs/iomap/buffered-io.c 	__bio_add_page(&bio, page, plen, poff);
page              551 fs/iomap/buffered-io.c 		struct page *page, struct iomap *iomap)
page              553 fs/iomap/buffered-io.c 	struct iomap_page *iop = iomap_page_create(inode, page);
page              560 fs/iomap/buffered-io.c 	if (PageUptodate(page))
page              571 fs/iomap/buffered-io.c 			status = iomap_read_page_sync(inode, block_start, page,
page              584 fs/iomap/buffered-io.c 		struct page **pagep, struct iomap *iomap)
page              588 fs/iomap/buffered-io.c 	struct page *page;
page              602 fs/iomap/buffered-io.c 	page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
page              603 fs/iomap/buffered-io.c 	if (!page) {
page              609 fs/iomap/buffered-io.c 		iomap_read_inline_data(inode, page, iomap);
page              611 fs/iomap/buffered-io.c 		status = __block_write_begin_int(page, pos, len, NULL, iomap);
page              613 fs/iomap/buffered-io.c 		status = __iomap_write_begin(inode, pos, len, page, iomap);
page              618 fs/iomap/buffered-io.c 	*pagep = page;
page              622 fs/iomap/buffered-io.c 	unlock_page(page);
page              623 fs/iomap/buffered-io.c 	put_page(page);
page              633 fs/iomap/buffered-io.c iomap_set_page_dirty(struct page *page)
page              635 fs/iomap/buffered-io.c 	struct address_space *mapping = page_mapping(page);
page              639 fs/iomap/buffered-io.c 		return !TestSetPageDirty(page);
page              645 fs/iomap/buffered-io.c 	lock_page_memcg(page);
page              646 fs/iomap/buffered-io.c 	newly_dirty = !TestSetPageDirty(page);
page              648 fs/iomap/buffered-io.c 		__set_page_dirty(page, mapping, 0);
page              649 fs/iomap/buffered-io.c 	unlock_page_memcg(page);
page              659 fs/iomap/buffered-io.c 		unsigned copied, struct page *page, struct iomap *iomap)
page              661 fs/iomap/buffered-io.c 	flush_dcache_page(page);
page              674 fs/iomap/buffered-io.c 	if (unlikely(copied < len && !PageUptodate(page)))
page              676 fs/iomap/buffered-io.c 	iomap_set_range_uptodate(page, offset_in_page(pos), len);
page              677 fs/iomap/buffered-io.c 	iomap_set_page_dirty(page);
page              682 fs/iomap/buffered-io.c iomap_write_end_inline(struct inode *inode, struct page *page,
page              687 fs/iomap/buffered-io.c 	WARN_ON_ONCE(!PageUptodate(page));
page              690 fs/iomap/buffered-io.c 	addr = kmap_atomic(page);
page              700 fs/iomap/buffered-io.c 		unsigned copied, struct page *page, struct iomap *iomap)
page              707 fs/iomap/buffered-io.c 		ret = iomap_write_end_inline(inode, page, iomap, pos, copied);
page              710 fs/iomap/buffered-io.c 				page, NULL);
page              712 fs/iomap/buffered-io.c 		ret = __iomap_write_end(inode, pos, len, copied, page, iomap);
page              724 fs/iomap/buffered-io.c 	unlock_page(page);
page              729 fs/iomap/buffered-io.c 		page_ops->page_done(inode, pos, ret, page, iomap);
page              730 fs/iomap/buffered-io.c 	put_page(page);
page              747 fs/iomap/buffered-io.c 		struct page *page;
page              774 fs/iomap/buffered-io.c 		status = iomap_write_begin(inode, pos, bytes, flags, &page,
page              780 fs/iomap/buffered-io.c 			flush_dcache_page(page);
page              782 fs/iomap/buffered-io.c 		copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
page              784 fs/iomap/buffered-io.c 		flush_dcache_page(page);
page              786 fs/iomap/buffered-io.c 		status = iomap_write_end(inode, pos, bytes, copied, page,
page              838 fs/iomap/buffered-io.c static struct page *
page              842 fs/iomap/buffered-io.c 	struct page *page;
page              844 fs/iomap/buffered-io.c 	page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
page              845 fs/iomap/buffered-io.c 	if (IS_ERR(page))
page              846 fs/iomap/buffered-io.c 		return page;
page              847 fs/iomap/buffered-io.c 	if (!PageUptodate(page)) {
page              848 fs/iomap/buffered-io.c 		put_page(page);
page              851 fs/iomap/buffered-io.c 	return page;
page              862 fs/iomap/buffered-io.c 		struct page *page, *rpage;
page              874 fs/iomap/buffered-io.c 					   AOP_FLAG_NOFS, &page, iomap);
page              879 fs/iomap/buffered-io.c 		WARN_ON_ONCE(!PageUptodate(page));
page              881 fs/iomap/buffered-io.c 		status = iomap_write_end(inode, pos, bytes, bytes, page, iomap);
page              922 fs/iomap/buffered-io.c 	struct page *page;
page              925 fs/iomap/buffered-io.c 	status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page,
page              930 fs/iomap/buffered-io.c 	zero_user(page, offset, bytes);
page              931 fs/iomap/buffered-io.c 	mark_page_accessed(page);
page              933 fs/iomap/buffered-io.c 	return iomap_write_end(inode, pos, bytes, bytes, page, iomap);
page             1016 fs/iomap/buffered-io.c 	struct page *page = data;
page             1020 fs/iomap/buffered-io.c 		ret = __block_write_begin_int(page, pos, length, NULL, iomap);
page             1023 fs/iomap/buffered-io.c 		block_commit_write(page, 0, length);
page             1025 fs/iomap/buffered-io.c 		WARN_ON_ONCE(!PageUptodate(page));
page             1026 fs/iomap/buffered-io.c 		iomap_page_create(inode, page);
page             1027 fs/iomap/buffered-io.c 		set_page_dirty(page);
page             1035 fs/iomap/buffered-io.c 	struct page *page = vmf->page;
page             1041 fs/iomap/buffered-io.c 	lock_page(page);
page             1043 fs/iomap/buffered-io.c 	if ((page->mapping != inode->i_mapping) ||
page             1044 fs/iomap/buffered-io.c 	    (page_offset(page) > size)) {
page             1051 fs/iomap/buffered-io.c 	if (((page->index + 1) << PAGE_SHIFT) > size)
page             1056 fs/iomap/buffered-io.c 	offset = page_offset(page);
page             1059 fs/iomap/buffered-io.c 				IOMAP_WRITE | IOMAP_FAULT, ops, page,
page             1067 fs/iomap/buffered-io.c 	wait_for_stable_page(page);
page             1070 fs/iomap/buffered-io.c 	unlock_page(page);
page              181 fs/iomap/direct-io.c 	struct page *page = ZERO_PAGE(0);
page              191 fs/iomap/direct-io.c 	get_page(page);
page              192 fs/iomap/direct-io.c 	__bio_add_page(bio, page, len, 0);
page               18 fs/iomap/seek.c page_seek_hole_data(struct inode *inode, struct page *page, loff_t *lastoff,
page               24 fs/iomap/seek.c 	loff_t poff = page_offset(page);
page               43 fs/iomap/seek.c 		return PageUptodate(page) == seek_data;
page               45 fs/iomap/seek.c 	lock_page(page);
page               46 fs/iomap/seek.c 	if (unlikely(page->mapping != inode->i_mapping))
page               52 fs/iomap/seek.c 		if (ops->is_partially_uptodate(page, off, bsize) == seek_data) {
page               53 fs/iomap/seek.c 			unlock_page(page);
page               60 fs/iomap/seek.c 	unlock_page(page);
page               96 fs/iomap/seek.c 			struct page *page = pvec.pages[i];
page               98 fs/iomap/seek.c 			if (page_seek_hole_data(inode, page, &lastoff, whence))
page              100 fs/iomap/seek.c 			lastoff = page_offset(page) + PAGE_SIZE;
page               42 fs/isofs/compress.c 				      struct page **pages, unsigned poffset,
page              202 fs/isofs/compress.c 			     struct page **pages)
page              299 fs/isofs/compress.c static int zisofs_readpage(struct file *file, struct page *page)
page              309 fs/isofs/compress.c 	struct page **pages;
page              310 fs/isofs/compress.c 	pgoff_t index = page->index, end_index;
page              318 fs/isofs/compress.c 		SetPageUptodate(page);
page              319 fs/isofs/compress.c 		unlock_page(page);
page              337 fs/isofs/compress.c 		unlock_page(page);
page              340 fs/isofs/compress.c 	pages[full_page] = page;
page             1183 fs/isofs/inode.c static int isofs_readpage(struct file *file, struct page *page)
page             1185 fs/isofs/inode.c 	return mpage_readpage(page, isofs_get_block);
page              158 fs/isofs/namei.c 	struct page *page;
page              160 fs/isofs/namei.c 	page = alloc_page(GFP_USER);
page              161 fs/isofs/namei.c 	if (!page)
page              166 fs/isofs/namei.c 				page_address(page),
page              167 fs/isofs/namei.c 				1024 + page_address(page));
page              168 fs/isofs/namei.c 	__free_page(page);
page              693 fs/isofs/rock.c static int rock_ridge_symlink_readpage(struct file *file, struct page *page)
page              695 fs/isofs/rock.c 	struct inode *inode = page->mapping->host;
page              698 fs/isofs/rock.c 	char *link = page_address(page);
page              784 fs/isofs/rock.c 	SetPageUptodate(page);
page              785 fs/isofs/rock.c 	unlock_page(page);
page              800 fs/isofs/rock.c 	SetPageError(page);
page              801 fs/isofs/rock.c 	unlock_page(page);
page               65 fs/jbd2/commit.c 	struct page *page;
page               71 fs/jbd2/commit.c 	page = bh->b_page;
page               72 fs/jbd2/commit.c 	if (!page)
page               74 fs/jbd2/commit.c 	if (page->mapping)
page               78 fs/jbd2/commit.c 	if (!trylock_page(page))
page               81 fs/jbd2/commit.c 	get_page(page);
page               83 fs/jbd2/commit.c 	try_to_free_buffers(page);
page               84 fs/jbd2/commit.c 	unlock_page(page);
page               85 fs/jbd2/commit.c 	put_page(page);
page              305 fs/jbd2/commit.c 	struct page *page = bh->b_page;
page              309 fs/jbd2/commit.c 	addr = kmap_atomic(page);
page              329 fs/jbd2/commit.c 	struct page *page = bh->b_page;
page              338 fs/jbd2/commit.c 	addr = kmap_atomic(page);
page              345 fs/jbd2/journal.c 	struct page *new_page;
page              826 fs/jbd2/transaction.c 	struct page *page;
page              832 fs/jbd2/transaction.c 	page = bh->b_page;
page              834 fs/jbd2/transaction.c 	source = kmap_atomic(page);
page             2073 fs/jbd2/transaction.c 				struct page *page, gfp_t gfp_mask)
page             2079 fs/jbd2/transaction.c 	J_ASSERT(PageLocked(page));
page             2081 fs/jbd2/transaction.c 	head = page_buffers(page);
page             2103 fs/jbd2/transaction.c 	ret = try_to_free_buffers(page);
page             2370 fs/jbd2/transaction.c 				struct page *page,
page             2381 fs/jbd2/transaction.c 	if (!PageLocked(page))
page             2383 fs/jbd2/transaction.c 	if (!page_has_buffers(page))
page             2392 fs/jbd2/transaction.c 	head = bh = page_buffers(page);
page             2415 fs/jbd2/transaction.c 		if (may_free && try_to_free_buffers(page))
page             2416 fs/jbd2/transaction.c 			J_ASSERT(!page_has_buffers(page));
page               26 fs/jffs2/file.c 			struct page *pg, void *fsdata);
page               29 fs/jffs2/file.c 			struct page **pagep, void **fsdata);
page               30 fs/jffs2/file.c static int jffs2_readpage (struct file *filp, struct page *pg);
page               79 fs/jffs2/file.c static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
page              112 fs/jffs2/file.c int jffs2_do_readpage_unlock(void *data, struct page *pg)
page              120 fs/jffs2/file.c static int jffs2_readpage (struct file *filp, struct page *pg)
page              133 fs/jffs2/file.c 			struct page **pagep, void **fsdata)
page              135 fs/jffs2/file.c 	struct page *pg;
page              237 fs/jffs2/file.c 			struct page *pg, void *fsdata)
page             1174 fs/jffs2/gc.c  	struct page *page;
page             1329 fs/jffs2/gc.c  	page = read_cache_page(inode->i_mapping, start >> PAGE_SHIFT,
page             1331 fs/jffs2/gc.c  	if (IS_ERR(page)) {
page             1333 fs/jffs2/gc.c  			PTR_ERR(page));
page             1335 fs/jffs2/gc.c  		return PTR_ERR(page);
page             1338 fs/jffs2/gc.c  	pg_ptr = kmap(page);
page             1403 fs/jffs2/gc.c  	kunmap(page);
page             1404 fs/jffs2/gc.c  	put_page(page);
page              158 fs/jffs2/os-linux.h int jffs2_do_readpage_unlock(void *data, struct page *pg);
page              283 fs/jfs/inode.c static int jfs_writepage(struct page *page, struct writeback_control *wbc)
page              285 fs/jfs/inode.c 	return block_write_full_page(page, jfs_get_block, wbc);
page              294 fs/jfs/inode.c static int jfs_readpage(struct file *file, struct page *page)
page              296 fs/jfs/inode.c 	return mpage_readpage(page, jfs_get_block);
page              317 fs/jfs/inode.c 				struct page **pagep, void **fsdata)
page              497 fs/jfs/jfs_logmgr.c 		lsn = (log->page << L2LOGPSIZE) + dstoffset;
page              519 fs/jfs/jfs_logmgr.c 			tblk->pn = log->page;
page              529 fs/jfs/jfs_logmgr.c 			le16_to_cpu(lrd->type), log->bp, log->page, dstoffset);
page              570 fs/jfs/jfs_logmgr.c 	pn = log->page;
page              573 fs/jfs/jfs_logmgr.c 	lspn = le32_to_cpu(lp->h.page);
page              634 fs/jfs/jfs_logmgr.c 	log->page = (pn == log->size - 1) ? 2 : pn + 1;
page              638 fs/jfs/jfs_logmgr.c 	nextbp = lbmAllocate(log, log->page);
page              644 fs/jfs/jfs_logmgr.c 	lp->h.page = lp->t.page = cpu_to_le32(lspn + 1);
page             1335 fs/jfs/jfs_logmgr.c 		log->page = le32_to_cpu(logsuper->end) / LOGPSIZE;
page             1336 fs/jfs/jfs_logmgr.c 		log->eor = le32_to_cpu(logsuper->end) - (LOGPSIZE * log->page);
page             1342 fs/jfs/jfs_logmgr.c 		if ((rc = lbmRead(log, log->page, &bp)))
page             1348 fs/jfs/jfs_logmgr.c 			 le32_to_cpu(logsuper->end), log->page, log->eor,
page             1352 fs/jfs/jfs_logmgr.c 		bp->l_pn = log->page;
page             1610 fs/jfs/jfs_logmgr.c 					       sizeof(long), mp->page,
page             1611 fs/jfs/jfs_logmgr.c 					       sizeof(struct page), 0);
page             1685 fs/jfs/jfs_logmgr.c 		 lsn, log->page, log->eor);
page             1823 fs/jfs/jfs_logmgr.c 		struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
page             1825 fs/jfs/jfs_logmgr.c 		if (!page)
page             1827 fs/jfs/jfs_logmgr.c 		buffer = page_address(page);
page             1832 fs/jfs/jfs_logmgr.c 					__free_page(page);
page             1836 fs/jfs/jfs_logmgr.c 				get_page(page);
page             1839 fs/jfs/jfs_logmgr.c 			lbuf->l_page = page;
page             2441 fs/jfs/jfs_logmgr.c 	lp->h.page = lp->t.page = cpu_to_le32(npages - 3);
page             2461 fs/jfs/jfs_logmgr.c 		lp->h.page = lp->t.page = cpu_to_le32(lspn);
page              111 fs/jfs/jfs_logmgr.h 		__le32 page;	/* 4: log sequence page number */
page              119 fs/jfs/jfs_logmgr.h 		__le32 page;	/* 4: normally the same as h.page */
page              373 fs/jfs/jfs_logmgr.h 	int page;		/* 4: page number of eol page */
page              451 fs/jfs/jfs_logmgr.h 	struct page *l_page;	/* The page itself */
page               48 fs/jfs/jfs_metapage.c 			unlock_page(mp->page);
page               50 fs/jfs/jfs_metapage.c 			lock_page(mp->page);
page               79 fs/jfs/jfs_metapage.c #define mp_anchor(page) ((struct meta_anchor *)page_private(page))
page               81 fs/jfs/jfs_metapage.c static inline struct metapage *page_to_mp(struct page *page, int offset)
page               83 fs/jfs/jfs_metapage.c 	if (!PagePrivate(page))
page               85 fs/jfs/jfs_metapage.c 	return mp_anchor(page)->mp[offset >> L2PSIZE];
page               88 fs/jfs/jfs_metapage.c static inline int insert_metapage(struct page *page, struct metapage *mp)
page               94 fs/jfs/jfs_metapage.c 	if (PagePrivate(page))
page               95 fs/jfs/jfs_metapage.c 		a = mp_anchor(page);
page              100 fs/jfs/jfs_metapage.c 		set_page_private(page, (unsigned long)a);
page              101 fs/jfs/jfs_metapage.c 		SetPagePrivate(page);
page              102 fs/jfs/jfs_metapage.c 		kmap(page);
page              106 fs/jfs/jfs_metapage.c 		l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
page              115 fs/jfs/jfs_metapage.c static inline void remove_metapage(struct page *page, struct metapage *mp)
page              117 fs/jfs/jfs_metapage.c 	struct meta_anchor *a = mp_anchor(page);
page              118 fs/jfs/jfs_metapage.c 	int l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
page              128 fs/jfs/jfs_metapage.c 		set_page_private(page, 0);
page              129 fs/jfs/jfs_metapage.c 		ClearPagePrivate(page);
page              130 fs/jfs/jfs_metapage.c 		kunmap(page);
page              134 fs/jfs/jfs_metapage.c static inline void inc_io(struct page *page)
page              136 fs/jfs/jfs_metapage.c 	atomic_inc(&mp_anchor(page)->io_count);
page              139 fs/jfs/jfs_metapage.c static inline void dec_io(struct page *page, void (*handler) (struct page *))
page              141 fs/jfs/jfs_metapage.c 	if (atomic_dec_and_test(&mp_anchor(page)->io_count))
page              142 fs/jfs/jfs_metapage.c 		handler(page);
page              146 fs/jfs/jfs_metapage.c static inline struct metapage *page_to_mp(struct page *page, int offset)
page              148 fs/jfs/jfs_metapage.c 	return PagePrivate(page) ? (struct metapage *)page_private(page) : NULL;
page              151 fs/jfs/jfs_metapage.c static inline int insert_metapage(struct page *page, struct metapage *mp)
page              154 fs/jfs/jfs_metapage.c 		set_page_private(page, (unsigned long)mp);
page              155 fs/jfs/jfs_metapage.c 		SetPagePrivate(page);
page              156 fs/jfs/jfs_metapage.c 		kmap(page);
page              161 fs/jfs/jfs_metapage.c static inline void remove_metapage(struct page *page, struct metapage *mp)
page              163 fs/jfs/jfs_metapage.c 	set_page_private(page, 0);
page              164 fs/jfs/jfs_metapage.c 	ClearPagePrivate(page);
page              165 fs/jfs/jfs_metapage.c 	kunmap(page);
page              168 fs/jfs/jfs_metapage.c #define inc_io(page) do {} while(0)
page              169 fs/jfs/jfs_metapage.c #define dec_io(page, handler) handler(page)
page              220 fs/jfs/jfs_metapage.c static inline void drop_metapage(struct page *page, struct metapage *mp)
page              225 fs/jfs/jfs_metapage.c 	remove_metapage(page, mp);
page              259 fs/jfs/jfs_metapage.c static void last_read_complete(struct page *page)
page              261 fs/jfs/jfs_metapage.c 	if (!PageError(page))
page              262 fs/jfs/jfs_metapage.c 		SetPageUptodate(page);
page              263 fs/jfs/jfs_metapage.c 	unlock_page(page);
page              268 fs/jfs/jfs_metapage.c 	struct page *page = bio->bi_private;
page              272 fs/jfs/jfs_metapage.c 		SetPageError(page);
page              275 fs/jfs/jfs_metapage.c 	dec_io(page, last_read_complete);
page              301 fs/jfs/jfs_metapage.c static void last_write_complete(struct page *page)
page              307 fs/jfs/jfs_metapage.c 		mp = page_to_mp(page, offset);
page              318 fs/jfs/jfs_metapage.c 	end_page_writeback(page);
page              323 fs/jfs/jfs_metapage.c 	struct page *page = bio->bi_private;
page              325 fs/jfs/jfs_metapage.c 	BUG_ON(!PagePrivate(page));
page              329 fs/jfs/jfs_metapage.c 		SetPageError(page);
page              331 fs/jfs/jfs_metapage.c 	dec_io(page, last_write_complete);
page              335 fs/jfs/jfs_metapage.c static int metapage_writepage(struct page *page, struct writeback_control *wbc)
page              339 fs/jfs/jfs_metapage.c 	struct inode *inode = page->mapping->host;
page              355 fs/jfs/jfs_metapage.c 	page_start = (sector_t)page->index <<
page              357 fs/jfs/jfs_metapage.c 	BUG_ON(!PageLocked(page));
page              358 fs/jfs/jfs_metapage.c 	BUG_ON(PageWriteback(page));
page              359 fs/jfs/jfs_metapage.c 	set_page_writeback(page);
page              362 fs/jfs/jfs_metapage.c 		mp = page_to_mp(page, offset);
page              391 fs/jfs/jfs_metapage.c 			if (bio_add_page(bio, page, bio_bytes, bio_offset) <
page              398 fs/jfs/jfs_metapage.c 			inc_io(page);
page              405 fs/jfs/jfs_metapage.c 			inc_io(page);
page              423 fs/jfs/jfs_metapage.c 		bio->bi_private = page;
page              434 fs/jfs/jfs_metapage.c 		if (bio_add_page(bio, page, bio_bytes, bio_offset) < bio_bytes)
page              443 fs/jfs/jfs_metapage.c 		redirty_page_for_writepage(wbc, page);
page              445 fs/jfs/jfs_metapage.c 	unlock_page(page);
page              451 fs/jfs/jfs_metapage.c 		end_page_writeback(page);
page              463 fs/jfs/jfs_metapage.c 	unlock_page(page);
page              464 fs/jfs/jfs_metapage.c 	dec_io(page, last_write_complete);
page              467 fs/jfs/jfs_metapage.c 		dec_io(page, last_write_complete);
page              471 fs/jfs/jfs_metapage.c static int metapage_readpage(struct file *fp, struct page *page)
page              473 fs/jfs/jfs_metapage.c 	struct inode *inode = page->mapping->host;
page              483 fs/jfs/jfs_metapage.c 	BUG_ON(!PageLocked(page));
page              484 fs/jfs/jfs_metapage.c 	page_start = (sector_t)page->index <<
page              493 fs/jfs/jfs_metapage.c 			if (!PagePrivate(page))
page              494 fs/jfs/jfs_metapage.c 				insert_metapage(page, NULL);
page              495 fs/jfs/jfs_metapage.c 			inc_io(page);
page              504 fs/jfs/jfs_metapage.c 			bio->bi_private = page;
page              508 fs/jfs/jfs_metapage.c 			if (bio_add_page(bio, page, len, offset) < len)
page              517 fs/jfs/jfs_metapage.c 		unlock_page(page);
page              524 fs/jfs/jfs_metapage.c 	dec_io(page, last_read_complete);
page              528 fs/jfs/jfs_metapage.c static int metapage_releasepage(struct page *page, gfp_t gfp_mask)
page              535 fs/jfs/jfs_metapage.c 		mp = page_to_mp(page, offset);
page              550 fs/jfs/jfs_metapage.c 		remove_metapage(page, mp);
page              557 fs/jfs/jfs_metapage.c static void metapage_invalidatepage(struct page *page, unsigned int offset,
page              562 fs/jfs/jfs_metapage.c 	BUG_ON(PageWriteback(page));
page              564 fs/jfs/jfs_metapage.c 	metapage_releasepage(page, 0);
page              583 fs/jfs/jfs_metapage.c 	struct page *page;
page              614 fs/jfs/jfs_metapage.c 		page = grab_cache_page(mapping, page_index);
page              615 fs/jfs/jfs_metapage.c 		if (!page) {
page              619 fs/jfs/jfs_metapage.c 		SetPageUptodate(page);
page              621 fs/jfs/jfs_metapage.c 		page = read_mapping_page(mapping, page_index, NULL);
page              622 fs/jfs/jfs_metapage.c 		if (IS_ERR(page) || !PageUptodate(page)) {
page              626 fs/jfs/jfs_metapage.c 		lock_page(page);
page              629 fs/jfs/jfs_metapage.c 	mp = page_to_mp(page, page_offset);
page              655 fs/jfs/jfs_metapage.c 		mp->page = page;
page              662 fs/jfs/jfs_metapage.c 		mp->data = page_address(page) + page_offset;
page              664 fs/jfs/jfs_metapage.c 		if (unlikely(insert_metapage(page, mp))) {
page              676 fs/jfs/jfs_metapage.c 	unlock_page(page);
page              681 fs/jfs/jfs_metapage.c 	unlock_page(page);
page              688 fs/jfs/jfs_metapage.c 	get_page(mp->page);
page              689 fs/jfs/jfs_metapage.c 	lock_page(mp->page);
page              692 fs/jfs/jfs_metapage.c 	unlock_page(mp->page);
page              697 fs/jfs/jfs_metapage.c 	struct page *page = mp->page;
page              701 fs/jfs/jfs_metapage.c 	get_page(page);
page              702 fs/jfs/jfs_metapage.c 	lock_page(page);
page              703 fs/jfs/jfs_metapage.c 	set_page_dirty(page);
page              704 fs/jfs/jfs_metapage.c 	if (write_one_page(page))
page              707 fs/jfs/jfs_metapage.c 	put_page(page);
page              712 fs/jfs/jfs_metapage.c 	lock_page(mp->page);
page              719 fs/jfs/jfs_metapage.c 		unlock_page(mp->page);
page              722 fs/jfs/jfs_metapage.c 	get_page(mp->page);
page              725 fs/jfs/jfs_metapage.c 	unlock_page(mp->page);
page              731 fs/jfs/jfs_metapage.c 	struct page *page = mp->page;
page              734 fs/jfs/jfs_metapage.c 	BUG_ON(!page);
page              736 fs/jfs/jfs_metapage.c 	lock_page(page);
page              741 fs/jfs/jfs_metapage.c 		unlock_page(page);
page              742 fs/jfs/jfs_metapage.c 		put_page(page);
page              747 fs/jfs/jfs_metapage.c 		set_page_dirty(page);
page              750 fs/jfs/jfs_metapage.c 			if (write_one_page(page))
page              752 fs/jfs/jfs_metapage.c 			lock_page(page); /* write_one_page unlocks the page */
page              758 fs/jfs/jfs_metapage.c 	drop_metapage(page, mp);
page              760 fs/jfs/jfs_metapage.c 	unlock_page(page);
page              761 fs/jfs/jfs_metapage.c 	put_page(page);
page              773 fs/jfs/jfs_metapage.c 	struct page *page;
page              782 fs/jfs/jfs_metapage.c 		page = find_lock_page(mapping, lblock >> l2BlocksPerPage);
page              783 fs/jfs/jfs_metapage.c 		if (!page)
page              786 fs/jfs/jfs_metapage.c 			mp = page_to_mp(page, offset);
page              799 fs/jfs/jfs_metapage.c 		unlock_page(page);
page              800 fs/jfs/jfs_metapage.c 		put_page(page);
page               27 fs/jfs/jfs_metapage.h 	struct page *page;
page               93 fs/jfs/jfs_metapage.h 	struct page *page = mp->page;
page               94 fs/jfs/jfs_metapage.h 	lock_page(page);
page               97 fs/jfs/jfs_metapage.h 		get_page(page);
page               98 fs/jfs/jfs_metapage.h 		wait_on_page_writeback(page);
page              100 fs/jfs/jfs_metapage.h 	unlock_page(page);
page              110 fs/jfs/jfs_metapage.h 		wait_on_page_writeback(mp->page);
page              119 fs/jfs/jfs_metapage.h 		put_page(mp->page);
page              440 fs/libfs.c     int simple_readpage(struct file *file, struct page *page)
page              442 fs/libfs.c     	clear_highpage(page);
page              443 fs/libfs.c     	flush_dcache_page(page);
page              444 fs/libfs.c     	SetPageUptodate(page);
page              445 fs/libfs.c     	unlock_page(page);
page              452 fs/libfs.c     			struct page **pagep, void **fsdata)
page              454 fs/libfs.c     	struct page *page;
page              459 fs/libfs.c     	page = grab_cache_page_write_begin(mapping, index, flags);
page              460 fs/libfs.c     	if (!page)
page              463 fs/libfs.c     	*pagep = page;
page              465 fs/libfs.c     	if (!PageUptodate(page) && (len != PAGE_SIZE)) {
page              468 fs/libfs.c     		zero_user_segments(page, 0, from, from + len, PAGE_SIZE);
page              498 fs/libfs.c     			struct page *page, void *fsdata)
page              500 fs/libfs.c     	struct inode *inode = page->mapping->host;
page              504 fs/libfs.c     	if (!PageUptodate(page)) {
page              508 fs/libfs.c     			zero_user(page, from + copied, len - copied);
page              510 fs/libfs.c     		SetPageUptodate(page);
page              519 fs/libfs.c     	set_page_dirty(page);
page              520 fs/libfs.c     	unlock_page(page);
page              521 fs/libfs.c     	put_page(page);
page             1088 fs/libfs.c     int noop_set_page_dirty(struct page *page)
page             1104 fs/libfs.c     void noop_invalidatepage(struct page *page, unsigned int offset,
page             1138 fs/libfs.c     static int anon_set_page_dirty(struct page *page)
page               29 fs/minix/dir.c static inline void dir_put_page(struct page *page)
page               31 fs/minix/dir.c 	kunmap(page);
page               32 fs/minix/dir.c 	put_page(page);
page               49 fs/minix/dir.c static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len)
page               51 fs/minix/dir.c 	struct address_space *mapping = page->mapping;
page               54 fs/minix/dir.c 	block_write_end(NULL, mapping, pos, len, len, page, NULL);
page               61 fs/minix/dir.c 		err = write_one_page(page);
page               63 fs/minix/dir.c 		unlock_page(page);
page               67 fs/minix/dir.c static struct page * dir_get_page(struct inode *dir, unsigned long n)
page               70 fs/minix/dir.c 	struct page *page = read_mapping_page(mapping, n, NULL);
page               71 fs/minix/dir.c 	if (!IS_ERR(page))
page               72 fs/minix/dir.c 		kmap(page);
page               73 fs/minix/dir.c 	return page;
page              101 fs/minix/dir.c 		struct page *page = dir_get_page(inode, n);
page              103 fs/minix/dir.c 		if (IS_ERR(page))
page              105 fs/minix/dir.c 		kaddr = (char *)page_address(page);
page              124 fs/minix/dir.c 					dir_put_page(page);
page              130 fs/minix/dir.c 		dir_put_page(page);
page              151 fs/minix/dir.c minix_dirent *minix_find_entry(struct dentry *dentry, struct page **res_page)
page              160 fs/minix/dir.c 	struct page *page = NULL;
page              170 fs/minix/dir.c 		page = dir_get_page(dir, n);
page              171 fs/minix/dir.c 		if (IS_ERR(page))
page              174 fs/minix/dir.c 		kaddr = (char*)page_address(page);
page              191 fs/minix/dir.c 		dir_put_page(page);
page              196 fs/minix/dir.c 	*res_page = page;
page              207 fs/minix/dir.c 	struct page *page = NULL;
page              226 fs/minix/dir.c 		page = dir_get_page(dir, n);
page              227 fs/minix/dir.c 		err = PTR_ERR(page);
page              228 fs/minix/dir.c 		if (IS_ERR(page))
page              230 fs/minix/dir.c 		lock_page(page);
page              231 fs/minix/dir.c 		kaddr = (char*)page_address(page);
page              258 fs/minix/dir.c 		unlock_page(page);
page              259 fs/minix/dir.c 		dir_put_page(page);
page              265 fs/minix/dir.c 	pos = page_offset(page) + p - (char *)page_address(page);
page              266 fs/minix/dir.c 	err = minix_prepare_chunk(page, pos, sbi->s_dirsize);
page              277 fs/minix/dir.c 	err = dir_commit_chunk(page, pos, sbi->s_dirsize);
page              281 fs/minix/dir.c 	dir_put_page(page);
page              285 fs/minix/dir.c 	unlock_page(page);
page              289 fs/minix/dir.c int minix_delete_entry(struct minix_dir_entry *de, struct page *page)
page              291 fs/minix/dir.c 	struct inode *inode = page->mapping->host;
page              292 fs/minix/dir.c 	char *kaddr = page_address(page);
page              293 fs/minix/dir.c 	loff_t pos = page_offset(page) + (char*)de - kaddr;
page              298 fs/minix/dir.c 	lock_page(page);
page              299 fs/minix/dir.c 	err = minix_prepare_chunk(page, pos, len);
page              305 fs/minix/dir.c 		err = dir_commit_chunk(page, pos, len);
page              307 fs/minix/dir.c 		unlock_page(page);
page              309 fs/minix/dir.c 	dir_put_page(page);
page              317 fs/minix/dir.c 	struct page *page = grab_cache_page(inode->i_mapping, 0);
page              322 fs/minix/dir.c 	if (!page)
page              324 fs/minix/dir.c 	err = minix_prepare_chunk(page, 0, 2 * sbi->s_dirsize);
page              326 fs/minix/dir.c 		unlock_page(page);
page              330 fs/minix/dir.c 	kaddr = kmap_atomic(page);
page              352 fs/minix/dir.c 	err = dir_commit_chunk(page, 0, 2 * sbi->s_dirsize);
page              354 fs/minix/dir.c 	put_page(page);
page              363 fs/minix/dir.c 	struct page *page = NULL;
page              372 fs/minix/dir.c 		page = dir_get_page(inode, i);
page              373 fs/minix/dir.c 		if (IS_ERR(page))
page              376 fs/minix/dir.c 		kaddr = (char *)page_address(page);
page              402 fs/minix/dir.c 		dir_put_page(page);
page              407 fs/minix/dir.c 	dir_put_page(page);
page              412 fs/minix/dir.c void minix_set_link(struct minix_dir_entry *de, struct page *page,
page              415 fs/minix/dir.c 	struct inode *dir = page->mapping->host;
page              417 fs/minix/dir.c 	loff_t pos = page_offset(page) +
page              418 fs/minix/dir.c 			(char *)de-(char*)page_address(page);
page              421 fs/minix/dir.c 	lock_page(page);
page              423 fs/minix/dir.c 	err = minix_prepare_chunk(page, pos, sbi->s_dirsize);
page              429 fs/minix/dir.c 		err = dir_commit_chunk(page, pos, sbi->s_dirsize);
page              431 fs/minix/dir.c 		unlock_page(page);
page              433 fs/minix/dir.c 	dir_put_page(page);
page              438 fs/minix/dir.c struct minix_dir_entry * minix_dotdot (struct inode *dir, struct page **p)
page              440 fs/minix/dir.c 	struct page *page = dir_get_page(dir, 0);
page              444 fs/minix/dir.c 	if (!IS_ERR(page)) {
page              445 fs/minix/dir.c 		de = minix_next_entry(page_address(page), sbi);
page              446 fs/minix/dir.c 		*p = page;
page              453 fs/minix/dir.c 	struct page *page;
page              454 fs/minix/dir.c 	struct minix_dir_entry *de = minix_find_entry(dentry, &page);
page              458 fs/minix/dir.c 		struct address_space *mapping = page->mapping;
page              466 fs/minix/dir.c 		dir_put_page(page);
page              381 fs/minix/inode.c static int minix_writepage(struct page *page, struct writeback_control *wbc)
page              383 fs/minix/inode.c 	return block_write_full_page(page, minix_get_block, wbc);
page              386 fs/minix/inode.c static int minix_readpage(struct file *file, struct page *page)
page              388 fs/minix/inode.c 	return block_read_full_page(page,minix_get_block);
page              391 fs/minix/inode.c int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len)
page              393 fs/minix/inode.c 	return __block_write_begin(page, pos, len, minix_get_block);
page              408 fs/minix/inode.c 			struct page **pagep, void **fsdata)
page               56 fs/minix/minix.h extern int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len);
page               67 fs/minix/minix.h extern struct minix_dir_entry *minix_find_entry(struct dentry*, struct page**);
page               69 fs/minix/minix.h extern int minix_delete_entry(struct minix_dir_entry*, struct page*);
page               72 fs/minix/minix.h extern void minix_set_link(struct minix_dir_entry*, struct page*, struct inode*);
page               73 fs/minix/minix.h extern struct minix_dir_entry *minix_dotdot(struct inode*, struct page**);
page              152 fs/minix/namei.c 	struct page * page;
page              155 fs/minix/namei.c 	de = minix_find_entry(dentry, &page);
page              159 fs/minix/namei.c 	err = minix_delete_entry(de, page);
page              190 fs/minix/namei.c 	struct page * dir_page = NULL;
page              192 fs/minix/namei.c 	struct page * old_page;
page              211 fs/minix/namei.c 		struct page * new_page;
page               53 fs/mpage.c     		struct page *page = bv->bv_page;
page               54 fs/mpage.c     		page_endio(page, bio_op(bio),
page              104 fs/mpage.c     map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block) 
page              106 fs/mpage.c     	struct inode *inode = page->mapping->host;
page              110 fs/mpage.c     	if (!page_has_buffers(page)) {
page              117 fs/mpage.c     			SetPageUptodate(page);    
page              120 fs/mpage.c     		create_empty_buffers(page, i_blocksize(inode), 0);
page              122 fs/mpage.c     	head = page_buffers(page);
page              138 fs/mpage.c     	struct page *page;
page              158 fs/mpage.c     	struct page *page = args->page;
page              159 fs/mpage.c     	struct inode *inode = page->mapping->host;
page              180 fs/mpage.c     		gfp = readahead_gfp_mask(page->mapping);
page              183 fs/mpage.c     		gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
page              186 fs/mpage.c     	if (page_has_buffers(page))
page              189 fs/mpage.c     	block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
page              224 fs/mpage.c     	map_bh->b_page = page;
page              252 fs/mpage.c     			map_buffer_to_page(page, map_bh, page_block);
page              277 fs/mpage.c     		zero_user_segment(page, first_hole << blkbits, PAGE_SIZE);
page              279 fs/mpage.c     			SetPageUptodate(page);
page              280 fs/mpage.c     			unlock_page(page);
page              284 fs/mpage.c     		SetPageMappedToDisk(page);
page              287 fs/mpage.c     	if (fully_mapped && blocks_per_page == 1 && !PageUptodate(page) &&
page              288 fs/mpage.c     	    cleancache_get_page(page) == 0) {
page              289 fs/mpage.c     		SetPageUptodate(page);
page              303 fs/mpage.c     								page))
page              315 fs/mpage.c     	if (bio_add_page(args->bio, page, length, 0) < length) {
page              333 fs/mpage.c     	if (!PageUptodate(page))
page              334 fs/mpage.c     		block_read_full_page(page, args->get_block);
page              336 fs/mpage.c     		unlock_page(page);
page              395 fs/mpage.c     		struct page *page = lru_to_page(pages);
page              397 fs/mpage.c     		prefetchw(&page->flags);
page              398 fs/mpage.c     		list_del(&page->lru);
page              399 fs/mpage.c     		if (!add_to_page_cache_lru(page, mapping,
page              400 fs/mpage.c     					page->index,
page              402 fs/mpage.c     			args.page = page;
page              406 fs/mpage.c     		put_page(page);
page              418 fs/mpage.c     int mpage_readpage(struct page *page, get_block_t get_block)
page              421 fs/mpage.c     		.page = page,
page              461 fs/mpage.c     static void clean_buffers(struct page *page, unsigned first_unmapped)
page              465 fs/mpage.c     	if (!page_has_buffers(page))
page              467 fs/mpage.c     	head = page_buffers(page);
page              482 fs/mpage.c     	if (buffer_heads_over_limit && PageUptodate(page))
page              483 fs/mpage.c     		try_to_free_buffers(page);
page              491 fs/mpage.c     void clean_page_buffers(struct page *page)
page              493 fs/mpage.c     	clean_buffers(page, ~0U);
page              496 fs/mpage.c     static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
page              501 fs/mpage.c     	struct address_space *mapping = page->mapping;
page              502 fs/mpage.c     	struct inode *inode = page->mapping->host;
page              521 fs/mpage.c     	if (page_has_buffers(page)) {
page              522 fs/mpage.c     		struct buffer_head *head = page_buffers(page);
page              574 fs/mpage.c     	BUG_ON(!PageUptodate(page));
page              575 fs/mpage.c     	block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
page              577 fs/mpage.c     	map_bh.b_page = page;
page              607 fs/mpage.c     	if (page->index >= end_index) {
page              618 fs/mpage.c     		if (page->index > end_index || !offset)
page              620 fs/mpage.c     		zero_user_segment(page, offset, PAGE_SIZE);
page              633 fs/mpage.c     								page, wbc))
page              650 fs/mpage.c     	wbc_account_cgroup_owner(wbc, page, PAGE_SIZE);
page              652 fs/mpage.c     	if (bio_add_page(bio, page, length, 0) < length) {
page              657 fs/mpage.c     	clean_buffers(page, first_unmapped);
page              659 fs/mpage.c     	BUG_ON(PageWriteback(page));
page              660 fs/mpage.c     	set_page_writeback(page);
page              661 fs/mpage.c     	unlock_page(page);
page              678 fs/mpage.c     		ret = mapping->a_ops->writepage(page, wbc);
page              742 fs/mpage.c     int mpage_writepage(struct page *page, get_block_t get_block,
page              751 fs/mpage.c     	int ret = __mpage_writepage(page, wbc, &mpd);
page             4771 fs/namei.c     	struct page *page;
page             4775 fs/namei.c     		page = find_get_page(mapping, 0);
page             4776 fs/namei.c     		if (!page)
page             4778 fs/namei.c     		if (!PageUptodate(page)) {
page             4779 fs/namei.c     			put_page(page);
page             4783 fs/namei.c     		page = read_mapping_page(mapping, 0, NULL);
page             4784 fs/namei.c     		if (IS_ERR(page))
page             4785 fs/namei.c     			return (char*)page;
page             4787 fs/namei.c     	set_delayed_call(callback, page_put_link, page);
page             4789 fs/namei.c     	kaddr = page_address(page);
page             4819 fs/namei.c     	struct page *page;
page             4828 fs/namei.c     				flags, &page, &fsdata);
page             4832 fs/namei.c     	memcpy(page_address(page), symname, len-1);
page             4835 fs/namei.c     							page, fsdata);
page              147 fs/nfs/blocklayout/blocklayout.c 		struct page *page, struct pnfs_block_dev_map *map,
page              185 fs/nfs/blocklayout/blocklayout.c 	if (bio_add_page(bio, page, *len, offset) < *len) {
page              264 fs/nfs/blocklayout/blocklayout.c 	struct page **pages = header->args.pages;
page              408 fs/nfs/blocklayout/blocklayout.c 	struct page **pages = header->args.pages;
page              681 fs/nfs/blocklayout/blocklayout.c 	struct page *scratch;
page              504 fs/nfs/blocklayout/dev.c 	struct page *scratch;
page              581 fs/nfs/blocklayout/extent_tree.c 				sizeof(struct page *), GFP_NOFS);
page              599 fs/nfs/blocklayout/extent_tree.c 		struct page *page = NULL;
page              604 fs/nfs/blocklayout/extent_tree.c 			page = vmalloc_to_page(p);
page              605 fs/nfs/blocklayout/extent_tree.c 			arg->layoutupdate_pages[i++] = page;
page              606 fs/nfs/blocklayout/extent_tree.c 			get_page(page);
page               56 fs/nfs/dir.c   static void nfs_readdir_clear_array(struct page*);
page              150 fs/nfs/dir.c   	struct page	*page;
page              166 fs/nfs/dir.c   void nfs_readdir_init_array(struct page *page)
page              170 fs/nfs/dir.c   	array = kmap_atomic(page);
page              180 fs/nfs/dir.c   void nfs_readdir_clear_array(struct page *page)
page              185 fs/nfs/dir.c   	array = kmap_atomic(page);
page              214 fs/nfs/dir.c   int nfs_readdir_add_to_array(struct nfs_entry *entry, struct page *page)
page              216 fs/nfs/dir.c   	struct nfs_cache_array *array = kmap(page);
page              224 fs/nfs/dir.c   	if ((char *)&cache_entry[1] - (char *)page_address(page) > PAGE_SIZE)
page              238 fs/nfs/dir.c   	kunmap(page);
page              327 fs/nfs/dir.c   	array = kmap(desc->page);
page              339 fs/nfs/dir.c   	kunmap(desc->page);
page              345 fs/nfs/dir.c   int nfs_readdir_xdr_filler(struct page **pages, nfs_readdir_descriptor_t *desc,
page              536 fs/nfs/dir.c   				struct page **xdr_pages, struct page *page, unsigned int buflen)
page              540 fs/nfs/dir.c   	struct page *scratch;
page              568 fs/nfs/dir.c   		status = nfs_readdir_add_to_array(entry, page);
page              575 fs/nfs/dir.c   		array = kmap(page);
page              578 fs/nfs/dir.c   		kunmap(page);
page              586 fs/nfs/dir.c   void nfs_readdir_free_pages(struct page **pages, unsigned int npages)
page              598 fs/nfs/dir.c   int nfs_readdir_alloc_pages(struct page **pages, unsigned int npages)
page              603 fs/nfs/dir.c   		struct page *page = alloc_page(GFP_KERNEL);
page              604 fs/nfs/dir.c   		if (page == NULL)
page              606 fs/nfs/dir.c   		pages[i] = page;
page              616 fs/nfs/dir.c   int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page, struct inode *inode)
page              618 fs/nfs/dir.c   	struct page *pages[NFS_MAX_READDIR_PAGES];
page              625 fs/nfs/dir.c   	nfs_readdir_init_array(page);
page              642 fs/nfs/dir.c   	array = kmap(page);
page              654 fs/nfs/dir.c   		status = nfs_readdir_page_filler(desc, &entry, pages, page, pglen);
page              664 fs/nfs/dir.c   	kunmap(page);
page              679 fs/nfs/dir.c   int nfs_readdir_filler(void *data, struct page* page)
page              685 fs/nfs/dir.c   	ret = nfs_readdir_xdr_to_array(desc, page, inode);
page              688 fs/nfs/dir.c   	SetPageUptodate(page);
page              690 fs/nfs/dir.c   	if (invalidate_inode_pages2_range(inode->i_mapping, page->index + 1, -1) < 0) {
page              694 fs/nfs/dir.c   	unlock_page(page);
page              697 fs/nfs/dir.c   	nfs_readdir_clear_array(page);
page              698 fs/nfs/dir.c   	unlock_page(page);
page              705 fs/nfs/dir.c   	put_page(desc->page);
page              706 fs/nfs/dir.c   	desc->page = NULL;
page              710 fs/nfs/dir.c   struct page *get_cache_page(nfs_readdir_descriptor_t *desc)
page              725 fs/nfs/dir.c   	desc->page = get_cache_page(desc);
page              726 fs/nfs/dir.c   	if (IS_ERR(desc->page))
page              727 fs/nfs/dir.c   		return PTR_ERR(desc->page);
page              728 fs/nfs/dir.c   	res = lock_page_killable(desc->page);
page              732 fs/nfs/dir.c   	if (desc->page->mapping != NULL) {
page              737 fs/nfs/dir.c   	unlock_page(desc->page);
page              771 fs/nfs/dir.c   	array = kmap(desc->page);
page              792 fs/nfs/dir.c   	kunmap(desc->page);
page              813 fs/nfs/dir.c   	struct page	*page = NULL;
page              821 fs/nfs/dir.c   	page = alloc_page(GFP_HIGHUSER);
page              822 fs/nfs/dir.c   	if (!page) {
page              829 fs/nfs/dir.c   	desc->page = page;
page              832 fs/nfs/dir.c   	status = nfs_readdir_xdr_to_array(desc, page, inode);
page              839 fs/nfs/dir.c   	nfs_readdir_clear_array(desc->page);
page              909 fs/nfs/dir.c   		unlock_page(desc->page);
page             1958 fs/nfs/dir.c   	struct page *page;
page             1973 fs/nfs/dir.c   	page = alloc_page(GFP_USER);
page             1974 fs/nfs/dir.c   	if (!page)
page             1977 fs/nfs/dir.c   	kaddr = page_address(page);
page             1983 fs/nfs/dir.c   	error = NFS_PROTO(dir)->symlink(dir, dentry, page, pathlen, &attr);
page             1990 fs/nfs/dir.c   		__free_page(page);
page             1998 fs/nfs/dir.c   	if (!add_to_page_cache_lru(page, d_inode(dentry)->i_mapping, 0,
page             2000 fs/nfs/dir.c   		SetPageUptodate(page);
page             2001 fs/nfs/dir.c   		unlock_page(page);
page             2006 fs/nfs/dir.c   		put_page(page);
page             2008 fs/nfs/dir.c   		__free_page(page);
page              279 fs/nfs/direct.c static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
page              402 fs/nfs/direct.c 		struct page *page = req->wb_page;
page              404 fs/nfs/direct.c 		if (!PageCompound(page) && bytes < hdr->good_bytes &&
page              406 fs/nfs/direct.c 			set_page_dirty(page);
page              464 fs/nfs/direct.c 		struct page **pagevec;
page              870 fs/nfs/direct.c 		struct page **pagevec;
page              285 fs/nfs/file.c  static bool nfs_full_page_write(struct page *page, loff_t pos, unsigned int len)
page              287 fs/nfs/file.c  	unsigned int pglen = nfs_page_length(page);
page              294 fs/nfs/file.c  static bool nfs_want_read_modify_write(struct file *file, struct page *page,
page              301 fs/nfs/file.c  	if (PageUptodate(page) || PagePrivate(page) ||
page              302 fs/nfs/file.c  	    nfs_full_page_write(page, pos, len))
page              323 fs/nfs/file.c  			struct page **pagep, void **fsdata)
page              327 fs/nfs/file.c  	struct page *page;
page              334 fs/nfs/file.c  	page = grab_cache_page_write_begin(mapping, index, flags);
page              335 fs/nfs/file.c  	if (!page)
page              337 fs/nfs/file.c  	*pagep = page;
page              339 fs/nfs/file.c  	ret = nfs_flush_incompatible(file, page);
page              341 fs/nfs/file.c  		unlock_page(page);
page              342 fs/nfs/file.c  		put_page(page);
page              344 fs/nfs/file.c  		   nfs_want_read_modify_write(file, page, pos, len)) {
page              346 fs/nfs/file.c  		ret = nfs_readpage(file, page);
page              347 fs/nfs/file.c  		put_page(page);
page              356 fs/nfs/file.c  			struct page *page, void *fsdata)
page              369 fs/nfs/file.c  	if (!PageUptodate(page)) {
page              370 fs/nfs/file.c  		unsigned pglen = nfs_page_length(page);
page              374 fs/nfs/file.c  			zero_user_segments(page, 0, offset,
page              376 fs/nfs/file.c  			SetPageUptodate(page);
page              378 fs/nfs/file.c  			zero_user_segment(page, end, PAGE_SIZE);
page              380 fs/nfs/file.c  				SetPageUptodate(page);
page              382 fs/nfs/file.c  			zero_user_segment(page, pglen, PAGE_SIZE);
page              385 fs/nfs/file.c  	status = nfs_updatepage(file, page, offset, copied);
page              387 fs/nfs/file.c  	unlock_page(page);
page              388 fs/nfs/file.c  	put_page(page);
page              410 fs/nfs/file.c  static void nfs_invalidate_page(struct page *page, unsigned int offset,
page              414 fs/nfs/file.c  		 page, offset, length);
page              419 fs/nfs/file.c  	nfs_wb_page_cancel(page_file_mapping(page)->host, page);
page              421 fs/nfs/file.c  	nfs_fscache_invalidate_page(page, page->mapping->host);
page              430 fs/nfs/file.c  static int nfs_release_page(struct page *page, gfp_t gfp)
page              432 fs/nfs/file.c  	dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
page              435 fs/nfs/file.c  	if (PagePrivate(page))
page              437 fs/nfs/file.c  	return nfs_fscache_release_page(page, gfp);
page              440 fs/nfs/file.c  static void nfs_check_dirty_writeback(struct page *page,
page              444 fs/nfs/file.c  	struct address_space *mapping = page_file_mapping(page);
page              446 fs/nfs/file.c  	if (!mapping || PageSwapCache(page))
page              465 fs/nfs/file.c  	if (PagePrivate(page))
page              477 fs/nfs/file.c  static int nfs_launder_page(struct page *page)
page              479 fs/nfs/file.c  	struct inode *inode = page_file_mapping(page)->host;
page              483 fs/nfs/file.c  		inode->i_ino, (long long)page_offset(page));
page              485 fs/nfs/file.c  	nfs_fscache_wait_on_page_write(nfsi, page);
page              486 fs/nfs/file.c  	return nfs_wb_page(inode, page);
page              534 fs/nfs/file.c  	struct page *page = vmf->page;
page              543 fs/nfs/file.c  		(long long)page_offset(page));
page              548 fs/nfs/file.c  	nfs_fscache_wait_on_page_write(NFS_I(inode), page);
page              553 fs/nfs/file.c  	lock_page(page);
page              554 fs/nfs/file.c  	mapping = page_file_mapping(page);
page              558 fs/nfs/file.c  	wait_on_page_writeback(page);
page              560 fs/nfs/file.c  	pagelen = nfs_page_length(page);
page              565 fs/nfs/file.c  	if (nfs_flush_incompatible(filp, page) == 0 &&
page              566 fs/nfs/file.c  	    nfs_updatepage(filp, page, 0, pagelen) == 0)
page              571 fs/nfs/file.c  	unlock_page(page);
page              656 fs/nfs/filelayout/filelayout.c 	struct page *scratch;
page             1089 fs/nfs/filelayout/filelayout.c filelayout_search_commit_reqs(struct nfs_commit_info *cinfo, struct page *page)
page             1099 fs/nfs/filelayout/filelayout.c 			if (freq->wb_page == page)
page             1103 fs/nfs/filelayout/filelayout.c 			if (freq->wb_page == page)
page               75 fs/nfs/filelayout/filelayoutdev.c 	struct page *scratch;
page              372 fs/nfs/flexfilelayout/flexfilelayout.c 	struct page *scratch;
page              118 fs/nfs/flexfilelayout/flexfilelayout.h 	struct page *pages[1];
page               47 fs/nfs/flexfilelayout/flexfilelayoutdev.c 	struct page *scratch;
page              338 fs/nfs/fscache.c int nfs_fscache_release_page(struct page *page, gfp_t gfp)
page              340 fs/nfs/fscache.c 	if (PageFsCache(page)) {
page              341 fs/nfs/fscache.c 		struct fscache_cookie *cookie = nfs_i_fscache(page->mapping->host);
page              345 fs/nfs/fscache.c 			 cookie, page, NFS_I(page->mapping->host));
page              347 fs/nfs/fscache.c 		if (!fscache_maybe_release_page(cookie, page, gfp))
page              350 fs/nfs/fscache.c 		nfs_inc_fscache_stats(page->mapping->host,
page              361 fs/nfs/fscache.c void __nfs_fscache_invalidate_page(struct page *page, struct inode *inode)
page              368 fs/nfs/fscache.c 		 cookie, page, NFS_I(inode));
page              370 fs/nfs/fscache.c 	fscache_wait_on_page_write(cookie, page);
page              372 fs/nfs/fscache.c 	BUG_ON(!PageLocked(page));
page              373 fs/nfs/fscache.c 	fscache_uncache_page(cookie, page);
page              374 fs/nfs/fscache.c 	nfs_inc_fscache_stats(page->mapping->host,
page              382 fs/nfs/fscache.c static void nfs_readpage_from_fscache_complete(struct page *page,
page              388 fs/nfs/fscache.c 		 page, context, error);
page              393 fs/nfs/fscache.c 		SetPageUptodate(page);
page              394 fs/nfs/fscache.c 		unlock_page(page);
page              396 fs/nfs/fscache.c 		error = nfs_readpage_async(context, page->mapping->host, page);
page              398 fs/nfs/fscache.c 			unlock_page(page);
page              406 fs/nfs/fscache.c 				struct inode *inode, struct page *page)
page              412 fs/nfs/fscache.c 		 nfs_i_fscache(inode), page, page->index, page->flags, inode);
page              415 fs/nfs/fscache.c 					 page,
page              495 fs/nfs/fscache.c void __nfs_readpage_to_fscache(struct inode *inode, struct page *page, int sync)
page              501 fs/nfs/fscache.c 		 nfs_i_fscache(inode), page, page->index, page->flags, sync);
page              503 fs/nfs/fscache.c 	ret = fscache_write_page(nfs_i_fscache(inode), page,
page              507 fs/nfs/fscache.c 		 page, page->index, page->flags, ret);
page              510 fs/nfs/fscache.c 		fscache_uncache_page(nfs_i_fscache(inode), page);
page               96 fs/nfs/fscache.h extern void __nfs_fscache_invalidate_page(struct page *, struct inode *);
page               97 fs/nfs/fscache.h extern int nfs_fscache_release_page(struct page *, gfp_t);
page              100 fs/nfs/fscache.h 				       struct inode *, struct page *);
page              104 fs/nfs/fscache.h extern void __nfs_readpage_to_fscache(struct inode *, struct page *, int);
page              110 fs/nfs/fscache.h 						  struct page *page)
page              112 fs/nfs/fscache.h 	if (PageFsCache(page))
page              113 fs/nfs/fscache.h 		fscache_wait_on_page_write(nfsi->fscache, page);
page              120 fs/nfs/fscache.h static inline void nfs_fscache_invalidate_page(struct page *page,
page              123 fs/nfs/fscache.h 	if (PageFsCache(page))
page              124 fs/nfs/fscache.h 		__nfs_fscache_invalidate_page(page, inode);
page              132 fs/nfs/fscache.h 					    struct page *page)
page              135 fs/nfs/fscache.h 		return __nfs_readpage_from_fscache(ctx, inode, page);
page              159 fs/nfs/fscache.h 					   struct page *page,
page              162 fs/nfs/fscache.h 	if (PageFsCache(page))
page              163 fs/nfs/fscache.h 		__nfs_readpage_to_fscache(inode, page, sync);
page              206 fs/nfs/fscache.h static inline int nfs_fscache_release_page(struct page *page, gfp_t gfp)
page              210 fs/nfs/fscache.h static inline void nfs_fscache_invalidate_page(struct page *page,
page              213 fs/nfs/fscache.h 						  struct page *page) {}
page              217 fs/nfs/fscache.h 					    struct page *page)
page              230 fs/nfs/fscache.h 					   struct page *page, int sync) {}
page              535 fs/nfs/internal.h 		struct page *, struct page *, enum migrate_mode);
page              658 fs/nfs/internal.h void nfs_mark_page_unstable(struct page *page, struct nfs_commit_info *cinfo)
page              661 fs/nfs/internal.h 		struct inode *inode = page_file_mapping(page)->host;
page              663 fs/nfs/internal.h 		inc_node_page_state(page, NR_UNSTABLE_NFS);
page              673 fs/nfs/internal.h unsigned int nfs_page_length(struct page *page)
page              675 fs/nfs/internal.h 	loff_t i_size = i_size_read(page_file_mapping(page)->host);
page              678 fs/nfs/internal.h 		pgoff_t index = page_index(page);
page              242 fs/nfs/namespace.c 	char *page = (char *) __get_free_page(GFP_USER);
page              245 fs/nfs/namespace.c 	if (page == NULL)
page              248 fs/nfs/namespace.c 	devname = nfs_devname(dentry, page, PAGE_SIZE);
page              254 fs/nfs/namespace.c 	free_page((unsigned long)page);
page              425 fs/nfs/nfs2xdr.c static void encode_path(struct xdr_stream *xdr, struct page **pages, u32 length)
page               50 fs/nfs/nfs3acl.c 	struct page *pages[NFSACL_MAXPAGES] = { };
page              163 fs/nfs/nfs3acl.c 	struct page *pages[NFSACL_MAXPAGES];
page              221 fs/nfs/nfs3proc.c static int nfs3_proc_readlink(struct inode *inode, struct page *page,
page              229 fs/nfs/nfs3proc.c 		.pages		= &page
page              512 fs/nfs/nfs3proc.c nfs3_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page,
page              531 fs/nfs/nfs3proc.c 	data->arg.symlink.pages = &page;
page              632 fs/nfs/nfs3proc.c 		  u64 cookie, struct page **pages, unsigned int count, bool plus)
page              219 fs/nfs/nfs3xdr.c static void encode_nfspath3(struct xdr_stream *xdr, struct page **pages,
page              261 fs/nfs/nfs4_fs.h 		struct page *, const struct cred *);
page              302 fs/nfs/nfs4_fs.h 				  struct nfs4_fs_locations *, struct page *);
page              304 fs/nfs/nfs4_fs.h 		struct page *page, const struct cred *);
page              103 fs/nfs/nfs4namespace.c 				char *page, char *page2)
page              107 fs/nfs/nfs4namespace.c 	path = nfs4_path(dentry, page, PAGE_SIZE);
page              215 fs/nfs/nfs4namespace.c 	struct page *page;
page              220 fs/nfs/nfs4namespace.c 	page = alloc_page(GFP_KERNEL);
page              221 fs/nfs/nfs4namespace.c 	if (!page)
page              224 fs/nfs/nfs4namespace.c 	flavors = page_address(page);
page              235 fs/nfs/nfs4namespace.c 	put_page(page);
page              240 fs/nfs/nfs4namespace.c 				     char *page, char *page2,
page              278 fs/nfs/nfs4namespace.c 		snprintf(page, PAGE_SIZE, "%s:%s",
page              282 fs/nfs/nfs4namespace.c 		mnt = vfs_submount(mountdata->dentry, &nfs4_referral_fs_type, page, mountdata);
page              305 fs/nfs/nfs4namespace.c 	char *page = NULL, *page2 = NULL;
page              313 fs/nfs/nfs4namespace.c 	page = (char *) __get_free_page(GFP_USER);
page              314 fs/nfs/nfs4namespace.c 	if (!page)
page              322 fs/nfs/nfs4namespace.c 	error = nfs4_validate_fspath(dentry, locations, page, page2);
page              335 fs/nfs/nfs4namespace.c 		mnt = try_location(&mountdata, page, page2, location);
page              341 fs/nfs/nfs4namespace.c 	free_page((unsigned long) page);
page              356 fs/nfs/nfs4namespace.c 	struct page *page;
page              360 fs/nfs/nfs4namespace.c 	page = alloc_page(GFP_KERNEL);
page              361 fs/nfs/nfs4namespace.c 	if (page == NULL)
page              375 fs/nfs/nfs4namespace.c 	err = nfs4_proc_fs_locations(client, d_inode(parent), &dentry->d_name, fs_locations, page);
page              384 fs/nfs/nfs4namespace.c 	__free_page(page);
page              424 fs/nfs/nfs4namespace.c 		char *page, char *page2,
page              485 fs/nfs/nfs4namespace.c 	char *page = NULL, *page2 = NULL;
page              493 fs/nfs/nfs4namespace.c 	page = (char *) __get_free_page(GFP_USER);
page              494 fs/nfs/nfs4namespace.c 	if (!page)
page              508 fs/nfs/nfs4namespace.c 		error = nfs4_try_replacing_one_location(server, page,
page              515 fs/nfs/nfs4namespace.c 	free_page((unsigned long)page);
page             4029 fs/nfs/nfs4proc.c 	struct page *page = NULL;
page             4032 fs/nfs/nfs4proc.c 	page = alloc_page(GFP_KERNEL);
page             4033 fs/nfs/nfs4proc.c 	if (page == NULL)
page             4039 fs/nfs/nfs4proc.c 	status = nfs4_proc_fs_locations(client, dir, name, locations, page);
page             4061 fs/nfs/nfs4proc.c 	if (page)
page             4062 fs/nfs/nfs4proc.c 		__free_page(page);
page             4412 fs/nfs/nfs4proc.c static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
page             4419 fs/nfs/nfs4proc.c 		.pages    = &page,
page             4431 fs/nfs/nfs4proc.c static int nfs4_proc_readlink(struct inode *inode, struct page *page,
page             4439 fs/nfs/nfs4proc.c 		err = _nfs4_proc_readlink(inode, page, pgbase, pglen);
page             4776 fs/nfs/nfs4proc.c 		struct page *page, unsigned int len, struct iattr *sattr,
page             4791 fs/nfs/nfs4proc.c 	data->arg.u.symlink.pages = &page;
page             4803 fs/nfs/nfs4proc.c 		struct page *page, unsigned int len, struct iattr *sattr)
page             4814 fs/nfs/nfs4proc.c 		err = _nfs4_proc_symlink(dir, dentry, page, len, sattr, label);
page             4868 fs/nfs/nfs4proc.c 		u64 cookie, struct page **pages, unsigned int count, bool plus)
page             4906 fs/nfs/nfs4proc.c 		u64 cookie, struct page **pages, unsigned int count, bool plus)
page             5493 fs/nfs/nfs4proc.c 		struct page **pages)
page             5495 fs/nfs/nfs4proc.c 	struct page *newpage, **spages;
page             5567 fs/nfs/nfs4proc.c static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
page             5601 fs/nfs/nfs4proc.c 	struct page *pages[NFS4ACL_MAXPAGES + 1] = {NULL, };
page             5707 fs/nfs/nfs4proc.c 	struct page *pages[NFS4ACL_MAXPAGES];
page             7398 fs/nfs/nfs4proc.c 				   struct page *page)
page             7405 fs/nfs/nfs4proc.c 		.page = page,
page             7441 fs/nfs/nfs4proc.c 			   struct page *page)
page             7449 fs/nfs/nfs4proc.c 				fs_locations, page);
page             7466 fs/nfs/nfs4proc.c 				     struct page *page, const struct cred *cred)
page             7476 fs/nfs/nfs4proc.c 		.page		= page,
page             7523 fs/nfs/nfs4proc.c 				     struct page *page, const struct cred *cred)
page             7532 fs/nfs/nfs4proc.c 		.page		= page,
page             7582 fs/nfs/nfs4proc.c 			    struct page *page, const struct cred *cred)
page             7600 fs/nfs/nfs4proc.c 		status = ops->get_locations(inode, locations, page, cred);
page             9561 fs/nfs/nfs4proc.c 	struct page *page;
page             9567 fs/nfs/nfs4proc.c 	page = alloc_page(GFP_KERNEL);
page             9568 fs/nfs/nfs4proc.c 	if (!page) {
page             9573 fs/nfs/nfs4proc.c 	flavors = page_address(page);
page             9617 fs/nfs/nfs4proc.c 	put_page(page);
page             2048 fs/nfs/nfs4state.c 	struct page *page;
page             2057 fs/nfs/nfs4state.c 	page = alloc_page(GFP_KERNEL);
page             2059 fs/nfs/nfs4state.c 	if (page == NULL || locations == NULL) {
page             2065 fs/nfs/nfs4state.c 	result = nfs4_proc_get_locations(inode, locations, page, cred);
page             2096 fs/nfs/nfs4state.c 	if (page != NULL)
page             2097 fs/nfs/nfs4state.c 		__free_page(page);
page             2815 fs/nfs/nfs4xdr.c 	rpc_prepare_reply_pages(req, (struct page **)&args->page, 0,
page              300 fs/nfs/pagelist.c __nfs_create_request(struct nfs_lock_context *l_ctx, struct page *page,
page              321 fs/nfs/pagelist.c 	req->wb_page    = page;
page              322 fs/nfs/pagelist.c 	if (page) {
page              323 fs/nfs/pagelist.c 		req->wb_index = page_index(page);
page              324 fs/nfs/pagelist.c 		get_page(page);
page              346 fs/nfs/pagelist.c nfs_create_request(struct nfs_open_context *ctx, struct page *page,
page              354 fs/nfs/pagelist.c 	ret = __nfs_create_request(l_ctx, page, offset, offset, count);
page              416 fs/nfs/pagelist.c 	struct page *page = req->wb_page;
page              420 fs/nfs/pagelist.c 	if (page != NULL) {
page              421 fs/nfs/pagelist.c 		put_page(page);
page              509 fs/nfs/pagelist.c 			sizeof(struct page *) > PAGE_SIZE)
page              764 fs/nfs/pagelist.c 	struct page		**pages,
page              778 fs/nfs/pagelist.c 		pg_array->pagevec = kcalloc(pagecount, sizeof(struct page *), gfp_flags);
page              971 fs/nfs/pnfs.c  static void nfs4_free_pages(struct page **pages, size_t size)
page              986 fs/nfs/pnfs.c  static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
page              988 fs/nfs/pnfs.c  	struct page **pages;
page              991 fs/nfs/pnfs.c  	pages = kmalloc_array(size, sizeof(struct page *), gfp_flags);
page              162 fs/nfs/pnfs.h  						struct page *page);
page              213 fs/nfs/pnfs.h  	struct page **pages;
page              499 fs/nfs/pnfs.h  			struct page *page)
page              505 fs/nfs/pnfs.h  	return ld->search_commit_reqs(cinfo, page);
page              775 fs/nfs/pnfs.h  			struct page *page)
page              101 fs/nfs/pnfs_dev.c 	struct page **pages = NULL;
page              122 fs/nfs/pnfs_dev.c 	pages = kcalloc(max_pages, sizeof(struct page *), gfp_flags);
page              177 fs/nfs/proc.c  static int nfs_proc_readlink(struct inode *inode, struct page *page,
page              184 fs/nfs/proc.c  		.pages		= &page
page              389 fs/nfs/proc.c  nfs_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page,
page              398 fs/nfs/proc.c  		.pages		= &page,
page              494 fs/nfs/proc.c  		 u64 cookie, struct page **pages, unsigned int count, bool plus)
page               53 fs/nfs/read.c  int nfs_return_empty_page(struct page *page)
page               55 fs/nfs/read.c  	zero_user(page, 0, PAGE_SIZE);
page               56 fs/nfs/read.c  	SetPageUptodate(page);
page               57 fs/nfs/read.c  	unlock_page(page);
page               97 fs/nfs/read.c  	struct page *page = req->wb_page;
page              104 fs/nfs/read.c  		SetPageError(page);
page              106 fs/nfs/read.c  		struct address_space *mapping = page_file_mapping(page);
page              108 fs/nfs/read.c  		if (PageUptodate(page))
page              109 fs/nfs/read.c  			nfs_readpage_to_fscache(inode, page, 0);
page              110 fs/nfs/read.c  		else if (!PageError(page) && !PagePrivate(page))
page              111 fs/nfs/read.c  			generic_error_remove_page(mapping, page);
page              112 fs/nfs/read.c  		unlock_page(page);
page              118 fs/nfs/read.c  		       struct page *page)
page              125 fs/nfs/read.c  	len = nfs_page_length(page);
page              127 fs/nfs/read.c  		return nfs_return_empty_page(page);
page              128 fs/nfs/read.c  	new = nfs_create_request(ctx, page, 0, len);
page              130 fs/nfs/read.c  		unlock_page(page);
page              134 fs/nfs/read.c  		zero_user_segment(page, len, PAGE_SIZE);
page              168 fs/nfs/read.c  		struct page *page = req->wb_page;
page              179 fs/nfs/read.c  				zero_user_segment(page, start, end);
page              186 fs/nfs/read.c  				zero_user_segment(page, start, end);
page              310 fs/nfs/read.c  int nfs_readpage(struct file *file, struct page *page)
page              313 fs/nfs/read.c  	struct inode *inode = page_file_mapping(page)->host;
page              317 fs/nfs/read.c  		page, PAGE_SIZE, page_index(page));
page              328 fs/nfs/read.c  	error = nfs_wb_page(inode, page);
page              331 fs/nfs/read.c  	if (PageUptodate(page))
page              347 fs/nfs/read.c  		error = nfs_readpage_from_fscache(ctx, inode, page);
page              353 fs/nfs/read.c  	error = nfs_readpage_async(ctx, inode, page);
page              355 fs/nfs/read.c  		error = wait_on_page_locked_killable(page);
page              356 fs/nfs/read.c  		if (!PageUptodate(page) && !error)
page              363 fs/nfs/read.c  	unlock_page(page);
page              373 fs/nfs/read.c  readpage_async_filler(void *data, struct page *page)
page              380 fs/nfs/read.c  	len = nfs_page_length(page);
page              382 fs/nfs/read.c  		return nfs_return_empty_page(page);
page              384 fs/nfs/read.c  	new = nfs_create_request(desc->ctx, page, 0, len);
page              389 fs/nfs/read.c  		zero_user_segment(page, len, PAGE_SIZE);
page              399 fs/nfs/read.c  	unlock_page(page);
page              797 fs/nfs/super.c 	char *page = (char *) __get_free_page(GFP_KERNEL);
page              800 fs/nfs/super.c 	if (!page)
page              802 fs/nfs/super.c 	devname = nfs_path(&dummy, root, page, PAGE_SIZE, 0);
page              807 fs/nfs/super.c 	free_page((unsigned long)page);
page               29 fs/nfs/symlink.c static int nfs_symlink_filler(void *data, struct page *page)
page               34 fs/nfs/symlink.c 	error = NFS_PROTO(inode)->readlink(inode, page, 0, PAGE_SIZE);
page               37 fs/nfs/symlink.c 	SetPageUptodate(page);
page               38 fs/nfs/symlink.c 	unlock_page(page);
page               42 fs/nfs/symlink.c 	SetPageError(page);
page               43 fs/nfs/symlink.c 	unlock_page(page);
page               51 fs/nfs/symlink.c 	struct page *page;
page               58 fs/nfs/symlink.c 		page = find_get_page(inode->i_mapping, 0);
page               59 fs/nfs/symlink.c 		if (!page)
page               61 fs/nfs/symlink.c 		if (!PageUptodate(page)) {
page               62 fs/nfs/symlink.c 			put_page(page);
page               69 fs/nfs/symlink.c 		page = read_cache_page(&inode->i_data, 0, nfs_symlink_filler,
page               71 fs/nfs/symlink.c 		if (IS_ERR(page))
page               72 fs/nfs/symlink.c 			return ERR_CAST(page);
page               74 fs/nfs/symlink.c 	set_delayed_call(done, page_put_link, page);
page               75 fs/nfs/symlink.c 	return page_address(page);
page               66 fs/nfs/write.c 						struct page *page);
page              153 fs/nfs/write.c nfs_page_private_request(struct page *page)
page              155 fs/nfs/write.c 	if (!PagePrivate(page))
page              157 fs/nfs/write.c 	return (struct nfs_page *)page_private(page);
page              168 fs/nfs/write.c nfs_page_find_private_request(struct page *page)
page              170 fs/nfs/write.c 	struct address_space *mapping = page_file_mapping(page);
page              173 fs/nfs/write.c 	if (!PagePrivate(page))
page              176 fs/nfs/write.c 	req = nfs_page_private_request(page);
page              186 fs/nfs/write.c nfs_page_find_swap_request(struct page *page)
page              188 fs/nfs/write.c 	struct inode *inode = page_file_mapping(page)->host;
page              191 fs/nfs/write.c 	if (!PageSwapCache(page))
page              194 fs/nfs/write.c 	if (PageSwapCache(page)) {
page              196 fs/nfs/write.c 			page);
page              211 fs/nfs/write.c static struct nfs_page *nfs_page_find_head_request(struct page *page)
page              215 fs/nfs/write.c 	req = nfs_page_find_private_request(page);
page              217 fs/nfs/write.c 		req = nfs_page_find_swap_request(page);
page              222 fs/nfs/write.c static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
page              224 fs/nfs/write.c 	struct inode *inode = page_file_mapping(page)->host;
page              231 fs/nfs/write.c 	if (i_size > 0 && page_index(page) < end_index)
page              233 fs/nfs/write.c 	end = page_file_offset(page) + ((loff_t)offset+count);
page              257 fs/nfs/write.c static void nfs_mapping_set_error(struct page *page, int error)
page              259 fs/nfs/write.c 	SetPageError(page);
page              260 fs/nfs/write.c 	mapping_set_error(page_file_mapping(page), error);
page              350 fs/nfs/write.c static void nfs_set_page_writeback(struct page *page)
page              352 fs/nfs/write.c 	struct inode *inode = page_file_mapping(page)->host;
page              354 fs/nfs/write.c 	int ret = test_set_page_writeback(page);
page              476 fs/nfs/write.c nfs_lock_and_join_requests(struct page *page)
page              478 fs/nfs/write.c 	struct inode *inode = page_file_mapping(page)->host;
page              490 fs/nfs/write.c 	head = nfs_page_find_head_request(page);
page              504 fs/nfs/write.c 	if (head != nfs_page_private_request(page) && !PageSwapCache(page)) {
page              588 fs/nfs/write.c 	if (!(PagePrivate(page) || PageSwapCache(page))) {
page              616 fs/nfs/write.c 				struct page *page)
page              621 fs/nfs/write.c 	req = nfs_lock_and_join_requests(page);
page              628 fs/nfs/write.c 	nfs_set_page_writeback(page);
page              650 fs/nfs/write.c 		nfs_add_stats(page_file_mapping(page)->host,
page              659 fs/nfs/write.c static int nfs_do_writepage(struct page *page, struct writeback_control *wbc,
page              664 fs/nfs/write.c 	nfs_pageio_cond_complete(pgio, page_index(page));
page              665 fs/nfs/write.c 	ret = nfs_page_async_flush(pgio, page);
page              667 fs/nfs/write.c 		redirty_page_for_writepage(wbc, page);
page              676 fs/nfs/write.c static int nfs_writepage_locked(struct page *page,
page              680 fs/nfs/write.c 	struct inode *inode = page_file_mapping(page)->host;
page              686 fs/nfs/write.c 	err = nfs_do_writepage(page, wbc, &pgio);
page              696 fs/nfs/write.c int nfs_writepage(struct page *page, struct writeback_control *wbc)
page              700 fs/nfs/write.c 	ret = nfs_writepage_locked(page, wbc);
page              702 fs/nfs/write.c 		unlock_page(page);
page              706 fs/nfs/write.c static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data)
page              710 fs/nfs/write.c 	ret = nfs_do_writepage(page, wbc, data);
page              712 fs/nfs/write.c 		unlock_page(page);
page              833 fs/nfs/write.c 						struct page *page)
page              842 fs/nfs/write.c 	freq = pnfs_search_commit_reqs(inode, &cinfo, page);
page              848 fs/nfs/write.c 		if (freq->wb_page == page)
page              957 fs/nfs/write.c nfs_clear_page_commit(struct page *page)
page              959 fs/nfs/write.c 	dec_node_page_state(page, NR_UNSTABLE_NFS);
page              960 fs/nfs/write.c 	dec_wb_stat(&inode_to_bdi(page_file_mapping(page)->host)->wb,
page             1116 fs/nfs/write.c 		struct page *page,
page             1127 fs/nfs/write.c 	req = nfs_lock_and_join_requests(page);
page             1160 fs/nfs/write.c 	error = nfs_wb_page(inode, page);
page             1172 fs/nfs/write.c 		struct page *page, unsigned int offset, unsigned int bytes)
page             1174 fs/nfs/write.c 	struct inode *inode = page_file_mapping(page)->host;
page             1177 fs/nfs/write.c 	req = nfs_try_to_update_request(inode, page, offset, bytes);
page             1180 fs/nfs/write.c 	req = nfs_create_request(ctx, page, offset, bytes);
page             1188 fs/nfs/write.c static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
page             1193 fs/nfs/write.c 	req = nfs_setup_write_request(ctx, page, offset, count);
page             1197 fs/nfs/write.c 	nfs_grow_file(page, offset, count);
page             1204 fs/nfs/write.c int nfs_flush_incompatible(struct file *file, struct page *page)
page             1220 fs/nfs/write.c 		req = nfs_page_find_head_request(page);
page             1224 fs/nfs/write.c 		do_flush = req->wb_page != page ||
page             1234 fs/nfs/write.c 		status = nfs_wb_page(page_file_mapping(page)->host, page);
page             1291 fs/nfs/write.c static bool nfs_write_pageuptodate(struct page *page, struct inode *inode)
page             1305 fs/nfs/write.c 	return PageUptodate(page) != 0;
page             1323 fs/nfs/write.c static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode)
page             1331 fs/nfs/write.c 	if (!nfs_write_pageuptodate(page, inode))
page             1363 fs/nfs/write.c int nfs_updatepage(struct file *file, struct page *page,
page             1367 fs/nfs/write.c 	struct address_space *mapping = page_file_mapping(page);
page             1374 fs/nfs/write.c 		file, count, (long long)(page_file_offset(page) + offset));
page             1379 fs/nfs/write.c 	if (nfs_can_extend_write(file, page, inode)) {
page             1380 fs/nfs/write.c 		count = max(count + offset, nfs_page_length(page));
page             1384 fs/nfs/write.c 	status = nfs_writepage_setup(ctx, page, offset, count);
page             1388 fs/nfs/write.c 		__set_page_dirty_nobuffers(page);
page             2043 fs/nfs/write.c int nfs_wb_page_cancel(struct inode *inode, struct page *page)
page             2048 fs/nfs/write.c 	wait_on_page_writeback(page);
page             2052 fs/nfs/write.c 	req = nfs_lock_and_join_requests(page);
page             2071 fs/nfs/write.c int nfs_wb_page(struct inode *inode, struct page *page)
page             2073 fs/nfs/write.c 	loff_t range_start = page_file_offset(page);
page             2086 fs/nfs/write.c 		wait_on_page_writeback(page);
page             2087 fs/nfs/write.c 		if (clear_page_dirty_for_io(page)) {
page             2088 fs/nfs/write.c 			ret = nfs_writepage_locked(page, &wbc);
page             2094 fs/nfs/write.c 		if (!PagePrivate(page))
page             2106 fs/nfs/write.c int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
page             2107 fs/nfs/write.c 		struct page *page, enum migrate_mode mode)
page             2117 fs/nfs/write.c 	if (PagePrivate(page))
page             2120 fs/nfs/write.c 	if (!nfs_fscache_release_page(page, GFP_KERNEL))
page             2123 fs/nfs/write.c 	return migrate_page(mapping, newpage, page, mode);
page              441 fs/nfsd/nfs3proc.c 	struct page	**p;
page              503 fs/nfsd/nfs3proc.c 	struct page **p;
page              385 fs/nfsd/nfs3xdr.c 		struct page *p = *(rqstp->rq_next_page++);
page              591 fs/nfsd/nfs3xdr.c 		struct page *p = *(rqstp->rq_next_page++);
page              617 fs/nfsd/nfs3xdr.c 		struct page *p = *(rqstp->rq_next_page++);
page              921 fs/nfsd/nfs3xdr.c 	struct page **	page;
page              958 fs/nfsd/nfs3xdr.c 	for (page = cd->rqstp->rq_respages + 1;
page              959 fs/nfsd/nfs3xdr.c 				page < cd->rqstp->rq_next_page; page++) {
page              960 fs/nfsd/nfs3xdr.c 		curr_page_addr = page_address(*page);
page              975 fs/nfsd/nfs3xdr.c 	} else if (*(page+1) != NULL) {
page              982 fs/nfsd/nfs3xdr.c 		p1 = tmp = page_address(*(page+1));
page              258 fs/nfsd/nfsxdr.c 		struct page *p = *(rqstp->rq_next_page++);
page              822 fs/nfsd/vfs.c  	struct page **pp = rqstp->rq_next_page;
page              823 fs/nfsd/vfs.c  	struct page *page = buf->page;
page              829 fs/nfsd/vfs.c  		get_page(page);
page              831 fs/nfsd/vfs.c  		*(rqstp->rq_next_page++) = page;
page              834 fs/nfsd/vfs.c  	} else if (page != pp[-1]) {
page              835 fs/nfsd/vfs.c  		get_page(page);
page              838 fs/nfsd/vfs.c  		*(rqstp->rq_next_page++) = page;
page              365 fs/nfsd/xdr4.h 	struct page **pagelist;
page              396 fs/nfsd/xdr4.h 	struct page **	wr_pagelist;        /* request */
page              649 fs/nfsd/xdr4.h 	struct page **			pagelist;
page               61 fs/nilfs2/btnode.c 	struct page *page;
page               69 fs/nilfs2/btnode.c 	page = bh->b_page;
page              116 fs/nilfs2/btnode.c 	unlock_page(page);
page              117 fs/nilfs2/btnode.c 	put_page(page);
page              131 fs/nilfs2/btnode.c 	struct page *page = bh->b_page;
page              132 fs/nilfs2/btnode.c 	pgoff_t index = page_index(page);
page              135 fs/nilfs2/btnode.c 	get_page(page);
page              136 fs/nilfs2/btnode.c 	lock_page(page);
page              137 fs/nilfs2/btnode.c 	wait_on_page_writeback(page);
page              140 fs/nilfs2/btnode.c 	still_dirty = PageDirty(page);
page              141 fs/nilfs2/btnode.c 	mapping = page->mapping;
page              142 fs/nilfs2/btnode.c 	unlock_page(page);
page              143 fs/nilfs2/btnode.c 	put_page(page);
page              171 fs/nilfs2/btnode.c 		struct page *opage = obh->b_page;
page              224 fs/nilfs2/btnode.c 	struct page *opage;
page               67 fs/nilfs2/dir.c static inline void nilfs_put_page(struct page *page)
page               69 fs/nilfs2/dir.c 	kunmap(page);
page               70 fs/nilfs2/dir.c 	put_page(page);
page               87 fs/nilfs2/dir.c static int nilfs_prepare_chunk(struct page *page, unsigned int from,
page               90 fs/nilfs2/dir.c 	loff_t pos = page_offset(page) + from;
page               92 fs/nilfs2/dir.c 	return __block_write_begin(page, pos, to - from, nilfs_get_block);
page               95 fs/nilfs2/dir.c static void nilfs_commit_chunk(struct page *page,
page              100 fs/nilfs2/dir.c 	loff_t pos = page_offset(page) + from;
page              105 fs/nilfs2/dir.c 	nr_dirty = nilfs_page_count_clean_buffers(page, from, to);
page              106 fs/nilfs2/dir.c 	copied = block_write_end(NULL, mapping, pos, len, len, page, NULL);
page              113 fs/nilfs2/dir.c 	unlock_page(page);
page              116 fs/nilfs2/dir.c static bool nilfs_check_page(struct page *page)
page              118 fs/nilfs2/dir.c 	struct inode *dir = page->mapping->host;
page              121 fs/nilfs2/dir.c 	char *kaddr = page_address(page);
page              127 fs/nilfs2/dir.c 	if ((dir->i_size >> PAGE_SHIFT) == page->index) {
page              150 fs/nilfs2/dir.c 	SetPageChecked(page);
page              174 fs/nilfs2/dir.c 		    dir->i_ino, error, (page->index << PAGE_SHIFT) + offs,
page              182 fs/nilfs2/dir.c 		    dir->i_ino, (page->index << PAGE_SHIFT) + offs,
page              185 fs/nilfs2/dir.c 	SetPageError(page);
page              189 fs/nilfs2/dir.c static struct page *nilfs_get_page(struct inode *dir, unsigned long n)
page              192 fs/nilfs2/dir.c 	struct page *page = read_mapping_page(mapping, n, NULL);
page              194 fs/nilfs2/dir.c 	if (!IS_ERR(page)) {
page              195 fs/nilfs2/dir.c 		kmap(page);
page              196 fs/nilfs2/dir.c 		if (unlikely(!PageChecked(page))) {
page              197 fs/nilfs2/dir.c 			if (PageError(page) || !nilfs_check_page(page))
page              201 fs/nilfs2/dir.c 	return page;
page              204 fs/nilfs2/dir.c 	nilfs_put_page(page);
page              278 fs/nilfs2/dir.c 		struct page *page = nilfs_get_page(inode, n);
page              280 fs/nilfs2/dir.c 		if (IS_ERR(page)) {
page              285 fs/nilfs2/dir.c 		kaddr = page_address(page);
page              292 fs/nilfs2/dir.c 				nilfs_put_page(page);
page              305 fs/nilfs2/dir.c 					nilfs_put_page(page);
page              311 fs/nilfs2/dir.c 		nilfs_put_page(page);
page              326 fs/nilfs2/dir.c 		 struct page **res_page)
page              333 fs/nilfs2/dir.c 	struct page *page = NULL;
page              350 fs/nilfs2/dir.c 		page = nilfs_get_page(dir, n);
page              351 fs/nilfs2/dir.c 		if (!IS_ERR(page)) {
page              352 fs/nilfs2/dir.c 			kaddr = page_address(page);
page              359 fs/nilfs2/dir.c 					nilfs_put_page(page);
page              366 fs/nilfs2/dir.c 			nilfs_put_page(page);
page              383 fs/nilfs2/dir.c 	*res_page = page;
page              388 fs/nilfs2/dir.c struct nilfs_dir_entry *nilfs_dotdot(struct inode *dir, struct page **p)
page              390 fs/nilfs2/dir.c 	struct page *page = nilfs_get_page(dir, 0);
page              393 fs/nilfs2/dir.c 	if (!IS_ERR(page)) {
page              395 fs/nilfs2/dir.c 			(struct nilfs_dir_entry *)page_address(page));
page              396 fs/nilfs2/dir.c 		*p = page;
page              405 fs/nilfs2/dir.c 	struct page *page;
page              407 fs/nilfs2/dir.c 	de = nilfs_find_entry(dir, qstr, &page);
page              410 fs/nilfs2/dir.c 		kunmap(page);
page              411 fs/nilfs2/dir.c 		put_page(page);
page              418 fs/nilfs2/dir.c 		    struct page *page, struct inode *inode)
page              420 fs/nilfs2/dir.c 	unsigned int from = (char *)de - (char *)page_address(page);
page              422 fs/nilfs2/dir.c 	struct address_space *mapping = page->mapping;
page              425 fs/nilfs2/dir.c 	lock_page(page);
page              426 fs/nilfs2/dir.c 	err = nilfs_prepare_chunk(page, from, to);
page              430 fs/nilfs2/dir.c 	nilfs_commit_chunk(page, mapping, from, to);
page              431 fs/nilfs2/dir.c 	nilfs_put_page(page);
page              446 fs/nilfs2/dir.c 	struct page *page = NULL;
page              462 fs/nilfs2/dir.c 		page = nilfs_get_page(dir, n);
page              463 fs/nilfs2/dir.c 		err = PTR_ERR(page);
page              464 fs/nilfs2/dir.c 		if (IS_ERR(page))
page              466 fs/nilfs2/dir.c 		lock_page(page);
page              467 fs/nilfs2/dir.c 		kaddr = page_address(page);
page              497 fs/nilfs2/dir.c 		unlock_page(page);
page              498 fs/nilfs2/dir.c 		nilfs_put_page(page);
page              504 fs/nilfs2/dir.c 	from = (char *)de - (char *)page_address(page);
page              506 fs/nilfs2/dir.c 	err = nilfs_prepare_chunk(page, from, to);
page              521 fs/nilfs2/dir.c 	nilfs_commit_chunk(page, page->mapping, from, to);
page              526 fs/nilfs2/dir.c 	nilfs_put_page(page);
page              530 fs/nilfs2/dir.c 	unlock_page(page);
page              538 fs/nilfs2/dir.c int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct page *page)
page              540 fs/nilfs2/dir.c 	struct address_space *mapping = page->mapping;
page              542 fs/nilfs2/dir.c 	char *kaddr = page_address(page);
page              562 fs/nilfs2/dir.c 		from = (char *)pde - (char *)page_address(page);
page              563 fs/nilfs2/dir.c 	lock_page(page);
page              564 fs/nilfs2/dir.c 	err = nilfs_prepare_chunk(page, from, to);
page              569 fs/nilfs2/dir.c 	nilfs_commit_chunk(page, mapping, from, to);
page              572 fs/nilfs2/dir.c 	nilfs_put_page(page);
page              582 fs/nilfs2/dir.c 	struct page *page = grab_cache_page(mapping, 0);
page              588 fs/nilfs2/dir.c 	if (!page)
page              591 fs/nilfs2/dir.c 	err = nilfs_prepare_chunk(page, 0, chunk_size);
page              593 fs/nilfs2/dir.c 		unlock_page(page);
page              596 fs/nilfs2/dir.c 	kaddr = kmap_atomic(page);
page              612 fs/nilfs2/dir.c 	nilfs_commit_chunk(page, mapping, 0, chunk_size);
page              614 fs/nilfs2/dir.c 	put_page(page);
page              623 fs/nilfs2/dir.c 	struct page *page = NULL;
page              630 fs/nilfs2/dir.c 		page = nilfs_get_page(inode, i);
page              631 fs/nilfs2/dir.c 		if (IS_ERR(page))
page              634 fs/nilfs2/dir.c 		kaddr = page_address(page);
page              660 fs/nilfs2/dir.c 		nilfs_put_page(page);
page              665 fs/nilfs2/dir.c 	nilfs_put_page(page);
page               48 fs/nilfs2/file.c 	struct page *page = vmf->page;
page               57 fs/nilfs2/file.c 	lock_page(page);
page               58 fs/nilfs2/file.c 	if (page->mapping != inode->i_mapping ||
page               59 fs/nilfs2/file.c 	    page_offset(page) >= i_size_read(inode) || !PageUptodate(page)) {
page               60 fs/nilfs2/file.c 		unlock_page(page);
page               68 fs/nilfs2/file.c 	if (PageMappedToDisk(page))
page               71 fs/nilfs2/file.c 	if (page_has_buffers(page)) {
page               75 fs/nilfs2/file.c 		bh = head = page_buffers(page);
page               84 fs/nilfs2/file.c 			SetPageMappedToDisk(page);
page               88 fs/nilfs2/file.c 	unlock_page(page);
page              108 fs/nilfs2/file.c 	wait_for_stable_page(page);
page              143 fs/nilfs2/inode.c static int nilfs_readpage(struct file *file, struct page *page)
page              145 fs/nilfs2/inode.c 	return mpage_readpage(page, nilfs_get_block);
page              180 fs/nilfs2/inode.c static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
page              182 fs/nilfs2/inode.c 	struct inode *inode = page->mapping->host;
page              192 fs/nilfs2/inode.c 		nilfs_clear_dirty_page(page, false);
page              193 fs/nilfs2/inode.c 		unlock_page(page);
page              197 fs/nilfs2/inode.c 	redirty_page_for_writepage(wbc, page);
page              198 fs/nilfs2/inode.c 	unlock_page(page);
page              210 fs/nilfs2/inode.c static int nilfs_set_page_dirty(struct page *page)
page              212 fs/nilfs2/inode.c 	struct inode *inode = page->mapping->host;
page              213 fs/nilfs2/inode.c 	int ret = __set_page_dirty_nobuffers(page);
page              215 fs/nilfs2/inode.c 	if (page_has_buffers(page)) {
page              226 fs/nilfs2/inode.c 		bh = head = page_buffers(page);
page              258 fs/nilfs2/inode.c 			     struct page **pagep, void **fsdata)
page              278 fs/nilfs2/inode.c 			   struct page *page, void *fsdata)
page              285 fs/nilfs2/inode.c 	nr_dirty = nilfs_page_count_clean_buffers(page, start,
page              287 fs/nilfs2/inode.c 	copied = generic_write_end(file, mapping, pos, len, copied, page,
page              360 fs/nilfs2/mdt.c 	struct page *page;
page              365 fs/nilfs2/mdt.c 	page = find_lock_page(inode->i_mapping, index);
page              366 fs/nilfs2/mdt.c 	if (!page)
page              369 fs/nilfs2/mdt.c 	wait_on_page_writeback(page);
page              373 fs/nilfs2/mdt.c 	if (page_has_buffers(page)) {
page              376 fs/nilfs2/mdt.c 		bh = nilfs_page_get_nth_block(page, block - first_block);
page              379 fs/nilfs2/mdt.c 	still_dirty = PageDirty(page);
page              380 fs/nilfs2/mdt.c 	unlock_page(page);
page              381 fs/nilfs2/mdt.c 	put_page(page);
page              401 fs/nilfs2/mdt.c nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc)
page              403 fs/nilfs2/mdt.c 	struct inode *inode = page->mapping->host;
page              414 fs/nilfs2/mdt.c 		nilfs_clear_dirty_page(page, false);
page              415 fs/nilfs2/mdt.c 		unlock_page(page);
page              419 fs/nilfs2/mdt.c 	redirty_page_for_writepage(wbc, page);
page              420 fs/nilfs2/mdt.c 	unlock_page(page);
page              547 fs/nilfs2/mdt.c 	struct page *page;
page              550 fs/nilfs2/mdt.c 	page = grab_cache_page(&shadow->frozen_data, bh->b_page->index);
page              551 fs/nilfs2/mdt.c 	if (!page)
page              554 fs/nilfs2/mdt.c 	if (!page_has_buffers(page))
page              555 fs/nilfs2/mdt.c 		create_empty_buffers(page, 1 << blkbits, 0);
page              557 fs/nilfs2/mdt.c 	bh_frozen = nilfs_page_get_nth_block(page, bh_offset(bh) >> blkbits);
page              569 fs/nilfs2/mdt.c 	unlock_page(page);
page              570 fs/nilfs2/mdt.c 	put_page(page);
page              579 fs/nilfs2/mdt.c 	struct page *page;
page              582 fs/nilfs2/mdt.c 	page = find_lock_page(&shadow->frozen_data, bh->b_page->index);
page              583 fs/nilfs2/mdt.c 	if (page) {
page              584 fs/nilfs2/mdt.c 		if (page_has_buffers(page)) {
page              586 fs/nilfs2/mdt.c 			bh_frozen = nilfs_page_get_nth_block(page, n);
page              588 fs/nilfs2/mdt.c 		unlock_page(page);
page              589 fs/nilfs2/mdt.c 		put_page(page);
page              261 fs/nilfs2/namei.c 	struct page *page;
page              265 fs/nilfs2/namei.c 	de = nilfs_find_entry(dir, &dentry->d_name, &page);
page              280 fs/nilfs2/namei.c 	err = nilfs_delete_entry(de, page);
page              347 fs/nilfs2/namei.c 	struct page *dir_page = NULL;
page              349 fs/nilfs2/namei.c 	struct page *old_page;
page              374 fs/nilfs2/namei.c 		struct page *new_page;
page              235 fs/nilfs2/nilfs.h nilfs_find_entry(struct inode *, const struct qstr *, struct page **);
page              236 fs/nilfs2/nilfs.h extern int nilfs_delete_entry(struct nilfs_dir_entry *, struct page *);
page              238 fs/nilfs2/nilfs.h extern struct nilfs_dir_entry *nilfs_dotdot(struct inode *, struct page **);
page              240 fs/nilfs2/nilfs.h 			   struct page *, struct inode *);
page               29 fs/nilfs2/page.c __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index,
page               36 fs/nilfs2/page.c 	if (!page_has_buffers(page))
page               37 fs/nilfs2/page.c 		create_empty_buffers(page, 1 << blkbits, b_state);
page               40 fs/nilfs2/page.c 	bh = nilfs_page_get_nth_block(page, block - first_block);
page               54 fs/nilfs2/page.c 	struct page *page;
page               57 fs/nilfs2/page.c 	page = grab_cache_page(mapping, index);
page               58 fs/nilfs2/page.c 	if (unlikely(!page))
page               61 fs/nilfs2/page.c 	bh = __nilfs_get_page_block(page, blkoff, index, blkbits, b_state);
page               63 fs/nilfs2/page.c 		unlock_page(page);
page               64 fs/nilfs2/page.c 		put_page(page);
page               77 fs/nilfs2/page.c 	struct page *page = bh->b_page;
page               85 fs/nilfs2/page.c 	if (nilfs_page_buffers_clean(page))
page               86 fs/nilfs2/page.c 		__nilfs_clear_page_dirty(page);
page               89 fs/nilfs2/page.c 	ClearPageUptodate(page);
page               90 fs/nilfs2/page.c 	ClearPageMappedToDisk(page);
page              104 fs/nilfs2/page.c 	struct page *spage = sbh->b_page, *dpage = dbh->b_page;
page              141 fs/nilfs2/page.c int nilfs_page_buffers_clean(struct page *page)
page              145 fs/nilfs2/page.c 	bh = head = page_buffers(page);
page              154 fs/nilfs2/page.c void nilfs_page_bug(struct page *page)
page              159 fs/nilfs2/page.c 	if (unlikely(!page)) {
page              164 fs/nilfs2/page.c 	m = page->mapping;
page              169 fs/nilfs2/page.c 	       page, page_ref_count(page),
page              170 fs/nilfs2/page.c 	       (unsigned long long)page->index, page->flags, m, ino);
page              172 fs/nilfs2/page.c 	if (page_has_buffers(page)) {
page              176 fs/nilfs2/page.c 		bh = head = page_buffers(page);
page              197 fs/nilfs2/page.c static void nilfs_copy_page(struct page *dst, struct page *src, int copy_dirty)
page              255 fs/nilfs2/page.c 		struct page *page = pvec.pages[i], *dpage;
page              257 fs/nilfs2/page.c 		lock_page(page);
page              258 fs/nilfs2/page.c 		if (unlikely(!PageDirty(page)))
page              259 fs/nilfs2/page.c 			NILFS_PAGE_BUG(page, "inconsistent dirty state");
page              261 fs/nilfs2/page.c 		dpage = grab_cache_page(dmap, page->index);
page              265 fs/nilfs2/page.c 			unlock_page(page);
page              268 fs/nilfs2/page.c 		if (unlikely(!page_has_buffers(page)))
page              269 fs/nilfs2/page.c 			NILFS_PAGE_BUG(page,
page              272 fs/nilfs2/page.c 		nilfs_copy_page(dpage, page, 1);
page              277 fs/nilfs2/page.c 		unlock_page(page);
page              309 fs/nilfs2/page.c 		struct page *page = pvec.pages[i], *dpage;
page              310 fs/nilfs2/page.c 		pgoff_t offset = page->index;
page              312 fs/nilfs2/page.c 		lock_page(page);
page              317 fs/nilfs2/page.c 			nilfs_copy_page(dpage, page, 0);
page              322 fs/nilfs2/page.c 			struct page *p;
page              327 fs/nilfs2/page.c 			WARN_ON(page != p);
page              332 fs/nilfs2/page.c 			p = __xa_store(&dmap->i_pages, offset, page, GFP_NOFS);
page              335 fs/nilfs2/page.c 				page->mapping = NULL;
page              336 fs/nilfs2/page.c 				put_page(page);
page              338 fs/nilfs2/page.c 				page->mapping = dmap;
page              340 fs/nilfs2/page.c 				if (PageDirty(page))
page              346 fs/nilfs2/page.c 		unlock_page(page);
page              370 fs/nilfs2/page.c 			struct page *page = pvec.pages[i];
page              372 fs/nilfs2/page.c 			lock_page(page);
page              373 fs/nilfs2/page.c 			nilfs_clear_dirty_page(page, silent);
page              374 fs/nilfs2/page.c 			unlock_page(page);
page              386 fs/nilfs2/page.c void nilfs_clear_dirty_page(struct page *page, bool silent)
page              388 fs/nilfs2/page.c 	struct inode *inode = page->mapping->host;
page              391 fs/nilfs2/page.c 	BUG_ON(!PageLocked(page));
page              396 fs/nilfs2/page.c 			  page_offset(page), inode->i_ino);
page              398 fs/nilfs2/page.c 	ClearPageUptodate(page);
page              399 fs/nilfs2/page.c 	ClearPageMappedToDisk(page);
page              401 fs/nilfs2/page.c 	if (page_has_buffers(page)) {
page              408 fs/nilfs2/page.c 		bh = head = page_buffers(page);
page              421 fs/nilfs2/page.c 	__nilfs_clear_page_dirty(page);
page              424 fs/nilfs2/page.c unsigned int nilfs_page_count_clean_buffers(struct page *page,
page              431 fs/nilfs2/page.c 	for (bh = head = page_buffers(page), block_start = 0;
page              461 fs/nilfs2/page.c int __nilfs_clear_page_dirty(struct page *page)
page              463 fs/nilfs2/page.c 	struct address_space *mapping = page->mapping;
page              467 fs/nilfs2/page.c 		if (test_bit(PG_dirty, &page->flags)) {
page              468 fs/nilfs2/page.c 			__xa_clear_mark(&mapping->i_pages, page_index(page),
page              471 fs/nilfs2/page.c 			return clear_page_dirty_for_io(page);
page              476 fs/nilfs2/page.c 	return TestClearPageDirty(page);
page              501 fs/nilfs2/page.c 	struct page *page;
page              523 fs/nilfs2/page.c 		page = pvec.pages[i];
page              525 fs/nilfs2/page.c 		lock_page(page);
page              526 fs/nilfs2/page.c 		if (page_has_buffers(page)) {
page              529 fs/nilfs2/page.c 			bh = head = page_buffers(page);
page              547 fs/nilfs2/page.c 		unlock_page(page);
page              551 fs/nilfs2/page.c 	index = page->index + 1;
page              557 fs/nilfs2/page.c 	unlock_page(page);
page               33 fs/nilfs2/page.h int __nilfs_clear_page_dirty(struct page *);
page               39 fs/nilfs2/page.h int nilfs_page_buffers_clean(struct page *);
page               40 fs/nilfs2/page.h void nilfs_page_bug(struct page *);
page               44 fs/nilfs2/page.h void nilfs_clear_dirty_page(struct page *, bool);
page               47 fs/nilfs2/page.h unsigned int nilfs_page_count_clean_buffers(struct page *, unsigned int,
page               53 fs/nilfs2/page.h #define NILFS_PAGE_BUG(page, m, a...) \
page               54 fs/nilfs2/page.h 	do { nilfs_page_bug(page); BUG(); } while (0)
page               57 fs/nilfs2/page.h nilfs_page_get_nth_block(struct page *page, unsigned int count)
page               59 fs/nilfs2/page.h 	struct buffer_head *bh = page_buffers(page);
page              475 fs/nilfs2/recovery.c 				     struct page *page)
page              484 fs/nilfs2/recovery.c 	kaddr = kmap_atomic(page);
page              500 fs/nilfs2/recovery.c 	struct page *page;
page              514 fs/nilfs2/recovery.c 					0, &page, nilfs_get_block);
page              524 fs/nilfs2/recovery.c 		err = nilfs_recovery_copy_block(nilfs, rb, page);
page              533 fs/nilfs2/recovery.c 				blocksize, page, NULL);
page              535 fs/nilfs2/recovery.c 		unlock_page(page);
page              536 fs/nilfs2/recovery.c 		put_page(page);
page              542 fs/nilfs2/recovery.c 		unlock_page(page);
page              543 fs/nilfs2/recovery.c 		put_page(page);
page              711 fs/nilfs2/segment.c 		struct page *page = pvec.pages[i];
page              713 fs/nilfs2/segment.c 		lock_page(page);
page              714 fs/nilfs2/segment.c 		if (!page_has_buffers(page))
page              715 fs/nilfs2/segment.c 			create_empty_buffers(page, i_blocksize(inode), 0);
page              716 fs/nilfs2/segment.c 		unlock_page(page);
page              718 fs/nilfs2/segment.c 		bh = head = page_buffers(page);
page             1628 fs/nilfs2/segment.c static void nilfs_begin_page_io(struct page *page)
page             1630 fs/nilfs2/segment.c 	if (!page || PageWriteback(page))
page             1637 fs/nilfs2/segment.c 	lock_page(page);
page             1638 fs/nilfs2/segment.c 	clear_page_dirty_for_io(page);
page             1639 fs/nilfs2/segment.c 	set_page_writeback(page);
page             1640 fs/nilfs2/segment.c 	unlock_page(page);
page             1646 fs/nilfs2/segment.c 	struct page *bd_page = NULL, *fs_page = NULL;
page             1702 fs/nilfs2/segment.c static void nilfs_end_page_io(struct page *page, int err)
page             1704 fs/nilfs2/segment.c 	if (!page)
page             1707 fs/nilfs2/segment.c 	if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page)) {
page             1712 fs/nilfs2/segment.c 		if (PageDirty(page)) {
page             1720 fs/nilfs2/segment.c 			lock_page(page);
page             1721 fs/nilfs2/segment.c 			if (nilfs_page_buffers_clean(page))
page             1722 fs/nilfs2/segment.c 				__nilfs_clear_page_dirty(page);
page             1723 fs/nilfs2/segment.c 			unlock_page(page);
page             1729 fs/nilfs2/segment.c 		if (!nilfs_page_buffers_clean(page))
page             1730 fs/nilfs2/segment.c 			__set_page_dirty_nobuffers(page);
page             1731 fs/nilfs2/segment.c 		ClearPageError(page);
page             1733 fs/nilfs2/segment.c 		__set_page_dirty_nobuffers(page);
page             1734 fs/nilfs2/segment.c 		SetPageError(page);
page             1737 fs/nilfs2/segment.c 	end_page_writeback(page);
page             1743 fs/nilfs2/segment.c 	struct page *bd_page = NULL, *fs_page = NULL;
page             1820 fs/nilfs2/segment.c 	struct page *bd_page = NULL, *fs_page = NULL;
page               49 fs/ntfs/aops.c 	struct page *page;
page               54 fs/ntfs/aops.c 	page = bh->b_page;
page               55 fs/ntfs/aops.c 	vi = page->mapping->host;
page               64 fs/ntfs/aops.c 		file_ofs = ((s64)page->index << PAGE_SHIFT) +
page               82 fs/ntfs/aops.c 			kaddr = kmap_atomic(page);
page               85 fs/ntfs/aops.c 			flush_dcache_page(page);
page               90 fs/ntfs/aops.c 		SetPageError(page);
page               94 fs/ntfs/aops.c 	first = page_buffers(page);
page              122 fs/ntfs/aops.c 		if (likely(page_uptodate && !PageError(page)))
page              123 fs/ntfs/aops.c 			SetPageUptodate(page);
page              133 fs/ntfs/aops.c 		kaddr = kmap_atomic(page);
page              138 fs/ntfs/aops.c 		flush_dcache_page(page);
page              139 fs/ntfs/aops.c 		if (likely(page_uptodate && !PageError(page)))
page              140 fs/ntfs/aops.c 			SetPageUptodate(page);
page              142 fs/ntfs/aops.c 	unlock_page(page);
page              167 fs/ntfs/aops.c static int ntfs_read_block(struct page *page)
page              184 fs/ntfs/aops.c 	vi = page->mapping->host;
page              194 fs/ntfs/aops.c 	if (!page_has_buffers(page)) {
page              195 fs/ntfs/aops.c 		create_empty_buffers(page, blocksize, 0);
page              196 fs/ntfs/aops.c 		if (unlikely(!page_has_buffers(page))) {
page              197 fs/ntfs/aops.c 			unlock_page(page);
page              201 fs/ntfs/aops.c 	bh = head = page_buffers(page);
page              215 fs/ntfs/aops.c 	iblock = (s64)page->index << (PAGE_SHIFT - blocksize_bits);
page              305 fs/ntfs/aops.c 			SetPageError(page);
page              324 fs/ntfs/aops.c 		zero_user(page, i * blocksize, blocksize);
page              355 fs/ntfs/aops.c 	if (likely(!PageError(page)))
page              356 fs/ntfs/aops.c 		SetPageUptodate(page);
page              359 fs/ntfs/aops.c 	unlock_page(page);
page              381 fs/ntfs/aops.c static int ntfs_readpage(struct file *file, struct page *page)
page              394 fs/ntfs/aops.c 	BUG_ON(!PageLocked(page));
page              395 fs/ntfs/aops.c 	vi = page->mapping->host;
page              398 fs/ntfs/aops.c 	if (unlikely(page->index >= (i_size + PAGE_SIZE - 1) >>
page              400 fs/ntfs/aops.c 		zero_user(page, 0, PAGE_SIZE);
page              408 fs/ntfs/aops.c 	if (PageUptodate(page)) {
page              409 fs/ntfs/aops.c 		unlock_page(page);
page              432 fs/ntfs/aops.c 			return ntfs_read_compressed_block(page);
page              438 fs/ntfs/aops.c 		return ntfs_read_block(page);
page              448 fs/ntfs/aops.c 	if (unlikely(page->index > 0)) {
page              449 fs/ntfs/aops.c 		zero_user(page, 0, PAGE_SIZE);
page              489 fs/ntfs/aops.c 	addr = kmap_atomic(page);
page              496 fs/ntfs/aops.c 	flush_dcache_page(page);
page              503 fs/ntfs/aops.c 	SetPageUptodate(page);
page              505 fs/ntfs/aops.c 	unlock_page(page);
page              533 fs/ntfs/aops.c static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
page              551 fs/ntfs/aops.c 	vi = page->mapping->host;
page              556 fs/ntfs/aops.c 			"0x%lx.", ni->mft_no, ni->type, page->index);
page              562 fs/ntfs/aops.c 	if (!page_has_buffers(page)) {
page              563 fs/ntfs/aops.c 		BUG_ON(!PageUptodate(page));
page              564 fs/ntfs/aops.c 		create_empty_buffers(page, blocksize,
page              566 fs/ntfs/aops.c 		if (unlikely(!page_has_buffers(page))) {
page              574 fs/ntfs/aops.c 			redirty_page_for_writepage(wbc, page);
page              575 fs/ntfs/aops.c 			unlock_page(page);
page              579 fs/ntfs/aops.c 	bh = head = page_buffers(page);
page              585 fs/ntfs/aops.c 	block = (s64)page->index << (PAGE_SHIFT - blocksize_bits);
page              673 fs/ntfs/aops.c 			if (!PageUptodate(page)) {
page              731 fs/ntfs/aops.c 			kaddr = kmap_atomic(page);
page              778 fs/ntfs/aops.c 			zero_user(page, bh_offset(bh), blocksize);
page              805 fs/ntfs/aops.c 	if (unlikely(!PageUptodate(page))) {
page              815 fs/ntfs/aops.c 			SetPageUptodate(page);
page              849 fs/ntfs/aops.c 			redirty_page_for_writepage(wbc, page);
page              852 fs/ntfs/aops.c 			SetPageError(page);
page              855 fs/ntfs/aops.c 	BUG_ON(PageWriteback(page));
page              856 fs/ntfs/aops.c 	set_page_writeback(page);	/* Keeps try_to_free_buffers() away. */
page              868 fs/ntfs/aops.c 	unlock_page(page);
page              872 fs/ntfs/aops.c 		end_page_writeback(page);
page              902 fs/ntfs/aops.c static int ntfs_write_mst_block(struct page *page,
page              906 fs/ntfs/aops.c 	struct inode *vi = page->mapping->host;
page              924 fs/ntfs/aops.c 			"0x%lx.", vi->i_ino, ni->type, page->index);
page              946 fs/ntfs/aops.c 	bh = head = page_buffers(page);
page              955 fs/ntfs/aops.c 	rec_block = block = (sector_t)page->index <<
page             1105 fs/ntfs/aops.c 	kaddr = kmap(page);
page             1107 fs/ntfs/aops.c 	BUG_ON(!PageUptodate(page));
page             1108 fs/ntfs/aops.c 	ClearPageUptodate(page);
page             1122 fs/ntfs/aops.c 			mft_no = (((s64)page->index << PAGE_SHIFT) + ofs)
page             1161 fs/ntfs/aops.c 					ni->type, page->index, ofs);
page             1177 fs/ntfs/aops.c 	flush_dcache_page(page);
page             1209 fs/ntfs/aops.c 					page->index, bh_offset(tbh));
page             1238 fs/ntfs/aops.c 			mft_no = (((s64)page->index << PAGE_SHIFT) + ofs)
page             1258 fs/ntfs/aops.c 	flush_dcache_page(page);
page             1281 fs/ntfs/aops.c 	SetPageUptodate(page);
page             1282 fs/ntfs/aops.c 	kunmap(page);
page             1290 fs/ntfs/aops.c 			SetPageError(page);
page             1296 fs/ntfs/aops.c 				"record 0x%lx.", page->index <<
page             1298 fs/ntfs/aops.c 		redirty_page_for_writepage(wbc, page);
page             1299 fs/ntfs/aops.c 		unlock_page(page);
page             1306 fs/ntfs/aops.c 		BUG_ON(PageWriteback(page));
page             1307 fs/ntfs/aops.c 		set_page_writeback(page);
page             1308 fs/ntfs/aops.c 		unlock_page(page);
page             1309 fs/ntfs/aops.c 		end_page_writeback(page);
page             1339 fs/ntfs/aops.c static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
page             1342 fs/ntfs/aops.c 	struct inode *vi = page->mapping->host;
page             1351 fs/ntfs/aops.c 	BUG_ON(!PageLocked(page));
page             1354 fs/ntfs/aops.c 	if (unlikely(page->index >= (i_size + PAGE_SIZE - 1) >>
page             1360 fs/ntfs/aops.c 		block_invalidatepage(page, 0, PAGE_SIZE);
page             1361 fs/ntfs/aops.c 		unlock_page(page);
page             1376 fs/ntfs/aops.c 			unlock_page(page);
page             1387 fs/ntfs/aops.c 			unlock_page(page);
page             1394 fs/ntfs/aops.c 			unlock_page(page);
page             1403 fs/ntfs/aops.c 		if (page->index >= (i_size >> PAGE_SHIFT)) {
page             1406 fs/ntfs/aops.c 			zero_user_segment(page, ofs, PAGE_SIZE);
page             1410 fs/ntfs/aops.c 			return ntfs_write_mst_block(page, wbc);
page             1412 fs/ntfs/aops.c 		return ntfs_write_block(page, wbc);
page             1422 fs/ntfs/aops.c 	BUG_ON(page_has_buffers(page));
page             1423 fs/ntfs/aops.c 	BUG_ON(!PageUptodate(page));
page             1424 fs/ntfs/aops.c 	if (unlikely(page->index > 0)) {
page             1426 fs/ntfs/aops.c 				"Aborting write.", page->index);
page             1427 fs/ntfs/aops.c 		BUG_ON(PageWriteback(page));
page             1428 fs/ntfs/aops.c 		set_page_writeback(page);
page             1429 fs/ntfs/aops.c 		unlock_page(page);
page             1430 fs/ntfs/aops.c 		end_page_writeback(page);
page             1466 fs/ntfs/aops.c 	BUG_ON(PageWriteback(page));
page             1467 fs/ntfs/aops.c 	set_page_writeback(page);
page             1468 fs/ntfs/aops.c 	unlock_page(page);
page             1483 fs/ntfs/aops.c 	addr = kmap_atomic(page);
page             1491 fs/ntfs/aops.c 	flush_dcache_page(page);
page             1494 fs/ntfs/aops.c 	end_page_writeback(page);
page             1508 fs/ntfs/aops.c 		redirty_page_for_writepage(wbc, page);
page             1513 fs/ntfs/aops.c 		SetPageError(page);
page             1516 fs/ntfs/aops.c 	unlock_page(page);
page             1715 fs/ntfs/aops.c void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) {
page             1716 fs/ntfs/aops.c 	struct address_space *mapping = page->mapping;
page             1721 fs/ntfs/aops.c 	BUG_ON(!PageUptodate(page));
page             1725 fs/ntfs/aops.c 	if (unlikely(!page_has_buffers(page))) {
page             1727 fs/ntfs/aops.c 		bh = head = alloc_page_buffers(page, bh_size, true);
page             1729 fs/ntfs/aops.c 		if (likely(!page_has_buffers(page))) {
page             1738 fs/ntfs/aops.c 			attach_page_buffers(page, head);
page             1742 fs/ntfs/aops.c 	bh = head = page_buffers(page);
page             1753 fs/ntfs/aops.c 	__set_page_dirty_nobuffers(page);
page               26 fs/ntfs/aops.h static inline void ntfs_unmap_page(struct page *page)
page               28 fs/ntfs/aops.h 	kunmap(page);
page               29 fs/ntfs/aops.h 	put_page(page);
page               72 fs/ntfs/aops.h static inline struct page *ntfs_map_page(struct address_space *mapping,
page               75 fs/ntfs/aops.h 	struct page *page = read_mapping_page(mapping, index, NULL);
page               77 fs/ntfs/aops.h 	if (!IS_ERR(page)) {
page               78 fs/ntfs/aops.h 		kmap(page);
page               79 fs/ntfs/aops.h 		if (!PageError(page))
page               80 fs/ntfs/aops.h 			return page;
page               81 fs/ntfs/aops.h 		ntfs_unmap_page(page);
page               84 fs/ntfs/aops.h 	return page;
page               89 fs/ntfs/aops.h extern void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs);
page               78 fs/ntfs/attrib.c 	struct page *put_this_page = NULL;
page              140 fs/ntfs/attrib.c 				put_this_page = old_ctx.ntfs_ino->page;
page             1530 fs/ntfs/attrib.c 	struct page *page;
page             1566 fs/ntfs/attrib.c 		page = find_or_create_page(vi->i_mapping, 0,
page             1568 fs/ntfs/attrib.c 		if (unlikely(!page))
page             1583 fs/ntfs/attrib.c 		page = NULL;
page             1644 fs/ntfs/attrib.c 	if (page && !PageUptodate(page)) {
page             1645 fs/ntfs/attrib.c 		kaddr = kmap_atomic(page);
page             1651 fs/ntfs/attrib.c 		flush_dcache_page(page);
page             1652 fs/ntfs/attrib.c 		SetPageUptodate(page);
page             1734 fs/ntfs/attrib.c 	if (page) {
page             1735 fs/ntfs/attrib.c 		set_page_dirty(page);
page             1736 fs/ntfs/attrib.c 		unlock_page(page);
page             1737 fs/ntfs/attrib.c 		put_page(page);
page             1793 fs/ntfs/attrib.c 	if (page) {
page             1794 fs/ntfs/attrib.c 		kaddr = kmap_atomic(page);
page             1823 fs/ntfs/attrib.c 		unlock_page(page);
page             1824 fs/ntfs/attrib.c 		put_page(page);
page             2483 fs/ntfs/attrib.c 	struct page *page;
page             2515 fs/ntfs/attrib.c 		page = read_mapping_page(mapping, idx, NULL);
page             2516 fs/ntfs/attrib.c 		if (IS_ERR(page)) {
page             2519 fs/ntfs/attrib.c 			return PTR_ERR(page);
page             2528 fs/ntfs/attrib.c 		kaddr = kmap_atomic(page);
page             2530 fs/ntfs/attrib.c 		flush_dcache_page(page);
page             2532 fs/ntfs/attrib.c 		set_page_dirty(page);
page             2533 fs/ntfs/attrib.c 		put_page(page);
page             2543 fs/ntfs/attrib.c 		page = grab_cache_page(mapping, idx);
page             2544 fs/ntfs/attrib.c 		if (unlikely(!page)) {
page             2549 fs/ntfs/attrib.c 		kaddr = kmap_atomic(page);
page             2551 fs/ntfs/attrib.c 		flush_dcache_page(page);
page             2557 fs/ntfs/attrib.c 		if (page_has_buffers(page)) {
page             2560 fs/ntfs/attrib.c 			bh = head = page_buffers(page);
page             2566 fs/ntfs/attrib.c 		SetPageUptodate(page);
page             2571 fs/ntfs/attrib.c 		set_page_dirty(page);
page             2573 fs/ntfs/attrib.c 		unlock_page(page);
page             2574 fs/ntfs/attrib.c 		put_page(page);
page             2580 fs/ntfs/attrib.c 		page = read_mapping_page(mapping, idx, NULL);
page             2581 fs/ntfs/attrib.c 		if (IS_ERR(page)) {
page             2584 fs/ntfs/attrib.c 			return PTR_ERR(page);
page             2586 fs/ntfs/attrib.c 		kaddr = kmap_atomic(page);
page             2588 fs/ntfs/attrib.c 		flush_dcache_page(page);
page             2590 fs/ntfs/attrib.c 		set_page_dirty(page);
page             2591 fs/ntfs/attrib.c 		put_page(page);
page               39 fs/ntfs/bitmap.c 	struct page *page;
page               61 fs/ntfs/bitmap.c 	page = ntfs_map_page(mapping, index);
page               62 fs/ntfs/bitmap.c 	if (IS_ERR(page)) {
page               65 fs/ntfs/bitmap.c 					"%li), aborting.", PTR_ERR(page));
page               66 fs/ntfs/bitmap.c 		return PTR_ERR(page);
page               68 fs/ntfs/bitmap.c 	kaddr = page_address(page);
page              110 fs/ntfs/bitmap.c 		flush_dcache_page(page);
page              111 fs/ntfs/bitmap.c 		set_page_dirty(page);
page              112 fs/ntfs/bitmap.c 		ntfs_unmap_page(page);
page              113 fs/ntfs/bitmap.c 		page = ntfs_map_page(mapping, ++index);
page              114 fs/ntfs/bitmap.c 		if (IS_ERR(page))
page              116 fs/ntfs/bitmap.c 		kaddr = page_address(page);
page              146 fs/ntfs/bitmap.c 	flush_dcache_page(page);
page              147 fs/ntfs/bitmap.c 	set_page_dirty(page);
page              148 fs/ntfs/bitmap.c 	ntfs_unmap_page(page);
page              158 fs/ntfs/bitmap.c 		return PTR_ERR(page);
page              167 fs/ntfs/bitmap.c 				"%li), aborting.", PTR_ERR(page));
page              173 fs/ntfs/bitmap.c 				"Unmount and run chkdsk.", PTR_ERR(page), pos);
page              176 fs/ntfs/bitmap.c 	return PTR_ERR(page);
page               86 fs/ntfs/compress.c static void zero_partial_compressed_page(struct page *page,
page               89 fs/ntfs/compress.c 	u8 *kp = page_address(page);
page               93 fs/ntfs/compress.c 	if (((s64)page->index << PAGE_SHIFT) >= initialized_size) {
page              105 fs/ntfs/compress.c static inline void handle_bounds_compressed_page(struct page *page,
page              108 fs/ntfs/compress.c 	if ((page->index >= (initialized_size >> PAGE_SHIFT)) &&
page              110 fs/ntfs/compress.c 		zero_partial_compressed_page(page, initialized_size);
page              152 fs/ntfs/compress.c static int ntfs_decompress(struct page *dest_pages[], int completed_pages[],
page              168 fs/ntfs/compress.c 	struct page *dp;	/* Current destination page being worked on. */
page              462 fs/ntfs/compress.c int ntfs_read_compressed_block(struct page *page)
page              466 fs/ntfs/compress.c 	struct address_space *mapping = page->mapping;
page              475 fs/ntfs/compress.c 	unsigned long offset, index = page->index;
page              502 fs/ntfs/compress.c 	struct page **pages;
page              515 fs/ntfs/compress.c 	pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS);
page              526 fs/ntfs/compress.c 		unlock_page(page);
page              537 fs/ntfs/compress.c 	pages[xpage] = page;
page              553 fs/ntfs/compress.c 		zero_user(page, 0, PAGE_SIZE);
page              555 fs/ntfs/compress.c 		SetPageUptodate(page);
page              556 fs/ntfs/compress.c 		unlock_page(page);
page              564 fs/ntfs/compress.c 		page = pages[i];
page              565 fs/ntfs/compress.c 		if (page) {
page              571 fs/ntfs/compress.c 			if (!PageDirty(page) && (!PageUptodate(page) ||
page              572 fs/ntfs/compress.c 					PageError(page))) {
page              573 fs/ntfs/compress.c 				ClearPageError(page);
page              574 fs/ntfs/compress.c 				kmap(page);
page              577 fs/ntfs/compress.c 			unlock_page(page);
page              578 fs/ntfs/compress.c 			put_page(page);
page              738 fs/ntfs/compress.c 			page = pages[cur_page];
page              739 fs/ntfs/compress.c 			if (page) {
page              741 fs/ntfs/compress.c 					clear_page(page_address(page));
page              743 fs/ntfs/compress.c 					memset(page_address(page) + cur_ofs, 0,
page              746 fs/ntfs/compress.c 				flush_dcache_page(page);
page              747 fs/ntfs/compress.c 				kunmap(page);
page              748 fs/ntfs/compress.c 				SetPageUptodate(page);
page              749 fs/ntfs/compress.c 				unlock_page(page);
page              753 fs/ntfs/compress.c 					put_page(page);
page              763 fs/ntfs/compress.c 			page = pages[cur_page];
page              764 fs/ntfs/compress.c 			if (page)
page              765 fs/ntfs/compress.c 				memset(page_address(page) + cur_ofs, 0,
page              795 fs/ntfs/compress.c 			page = pages[cur_page];
page              796 fs/ntfs/compress.c 			if (page)
page              797 fs/ntfs/compress.c 				memcpy(page_address(page) + cur_ofs, cb_pos,
page              806 fs/ntfs/compress.c 			page = pages[cur_page];
page              807 fs/ntfs/compress.c 			if (page)
page              808 fs/ntfs/compress.c 				memcpy(page_address(page) + cur_ofs, cb_pos,
page              817 fs/ntfs/compress.c 			page = pages[cur2_page];
page              818 fs/ntfs/compress.c 			if (page) {
page              823 fs/ntfs/compress.c 				handle_bounds_compressed_page(page, i_size,
page              825 fs/ntfs/compress.c 				flush_dcache_page(page);
page              826 fs/ntfs/compress.c 				kunmap(page);
page              827 fs/ntfs/compress.c 				SetPageUptodate(page);
page              828 fs/ntfs/compress.c 				unlock_page(page);
page              832 fs/ntfs/compress.c 					put_page(page);
page              860 fs/ntfs/compress.c 				page = pages[prev_cur_page];
page              861 fs/ntfs/compress.c 				if (page) {
page              862 fs/ntfs/compress.c 					flush_dcache_page(page);
page              863 fs/ntfs/compress.c 					kunmap(page);
page              864 fs/ntfs/compress.c 					unlock_page(page);
page              866 fs/ntfs/compress.c 						put_page(page);
page              886 fs/ntfs/compress.c 		page = pages[cur_page];
page              887 fs/ntfs/compress.c 		if (page) {
page              891 fs/ntfs/compress.c 					"0x%lx.", ni->mft_no, page->index);
page              892 fs/ntfs/compress.c 			flush_dcache_page(page);
page              893 fs/ntfs/compress.c 			kunmap(page);
page              894 fs/ntfs/compress.c 			unlock_page(page);
page              896 fs/ntfs/compress.c 				put_page(page);
page              938 fs/ntfs/compress.c 		page = pages[i];
page              939 fs/ntfs/compress.c 		if (page) {
page              940 fs/ntfs/compress.c 			flush_dcache_page(page);
page              941 fs/ntfs/compress.c 			kunmap(page);
page              942 fs/ntfs/compress.c 			unlock_page(page);
page              944 fs/ntfs/compress.c 				put_page(page);
page               81 fs/ntfs/dir.c  	struct page *page;
page              307 fs/ntfs/dir.c  	page = ntfs_map_page(ia_mapping, vcn <<
page              309 fs/ntfs/dir.c  	if (IS_ERR(page)) {
page              311 fs/ntfs/dir.c  				-PTR_ERR(page));
page              312 fs/ntfs/dir.c  		err = PTR_ERR(page);
page              315 fs/ntfs/dir.c  	lock_page(page);
page              316 fs/ntfs/dir.c  	kaddr = (u8*)page_address(page);
page              436 fs/ntfs/dir.c  			unlock_page(page);
page              437 fs/ntfs/dir.c  			ntfs_unmap_page(page);
page              469 fs/ntfs/dir.c  				unlock_page(page);
page              470 fs/ntfs/dir.c  				ntfs_unmap_page(page);
page              552 fs/ntfs/dir.c  			unlock_page(page);
page              553 fs/ntfs/dir.c  			ntfs_unmap_page(page);
page              566 fs/ntfs/dir.c  		unlock_page(page);
page              567 fs/ntfs/dir.c  		ntfs_unmap_page(page);
page              573 fs/ntfs/dir.c  	unlock_page(page);
page              574 fs/ntfs/dir.c  	ntfs_unmap_page(page);
page              635 fs/ntfs/dir.c  	struct page *page;
page              785 fs/ntfs/dir.c  	page = ntfs_map_page(ia_mapping, vcn <<
page              787 fs/ntfs/dir.c  	if (IS_ERR(page)) {
page              789 fs/ntfs/dir.c  				-PTR_ERR(page));
page              790 fs/ntfs/dir.c  		err = PTR_ERR(page);
page              793 fs/ntfs/dir.c  	lock_page(page);
page              794 fs/ntfs/dir.c  	kaddr = (u8*)page_address(page);
page              897 fs/ntfs/dir.c  			unlock_page(page);
page              898 fs/ntfs/dir.c  			ntfs_unmap_page(page);
page              961 fs/ntfs/dir.c  			unlock_page(page);
page              962 fs/ntfs/dir.c  			ntfs_unmap_page(page);
page              973 fs/ntfs/dir.c  	unlock_page(page);
page              974 fs/ntfs/dir.c  	ntfs_unmap_page(page);
page             1012 fs/ntfs/dir.c  		ntfs_inode *ndir, struct page *ia_page, INDEX_ENTRY *ie,
page             1100 fs/ntfs/dir.c  	struct page *bmp_page = NULL, *ia_page = NULL;
page              107 fs/ntfs/file.c 	struct page *page = NULL;
page              216 fs/ntfs/file.c 		page = read_mapping_page(mapping, index, NULL);
page              217 fs/ntfs/file.c 		if (IS_ERR(page)) {
page              218 fs/ntfs/file.c 			err = PTR_ERR(page);
page              221 fs/ntfs/file.c 		if (unlikely(PageError(page))) {
page              222 fs/ntfs/file.c 			put_page(page);
page              236 fs/ntfs/file.c 		set_page_dirty(page);
page              237 fs/ntfs/file.c 		put_page(page);
page              495 fs/ntfs/file.c 		pgoff_t index, const unsigned nr_pages, struct page **pages,
page              496 fs/ntfs/file.c 		struct page **cached_page)
page              570 fs/ntfs/file.c static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
page              577 fs/ntfs/file.c 	struct page *page;
page              613 fs/ntfs/file.c 		page = pages[u];
page              614 fs/ntfs/file.c 		BUG_ON(!page);
page              619 fs/ntfs/file.c 		if (!page_has_buffers(page)) {
page              620 fs/ntfs/file.c 			create_empty_buffers(page, blocksize, 0);
page              621 fs/ntfs/file.c 			if (unlikely(!page_has_buffers(page)))
page              641 fs/ntfs/file.c 	page = pages[u];
page              642 fs/ntfs/file.c 	bh_pos = (s64)page->index << PAGE_SHIFT;
page              643 fs/ntfs/file.c 	bh = head = page_buffers(page);
page              666 fs/ntfs/file.c 			if (PageUptodate(page)) {
page              689 fs/ntfs/file.c 					zero_user(page, bh_offset(bh),
page              724 fs/ntfs/file.c 			if (PageUptodate(page)) {
page              764 fs/ntfs/file.c 						zero_user(page, bh_offset(bh),
page              783 fs/ntfs/file.c 					zero_user(page, bh_offset(bh),
page              796 fs/ntfs/file.c 				kaddr = kmap_atomic(page);
page              806 fs/ntfs/file.c 				flush_dcache_page(page);
page              819 fs/ntfs/file.c 			if (PageUptodate(page)) {
page              823 fs/ntfs/file.c 				zero_user(page, bh_offset(bh), blocksize);
page              943 fs/ntfs/file.c 				if (PageUptodate(page)) {
page              947 fs/ntfs/file.c 					zero_user(page, bh_offset(bh),
page             1195 fs/ntfs/file.c 			page = bh->b_page;
page             1196 fs/ntfs/file.c 			bh_pos = ((s64)page->index << PAGE_SHIFT) +
page             1207 fs/ntfs/file.c 				zero_user_segment(page, bh_offset(bh) + ofs,
page             1334 fs/ntfs/file.c 		page = pages[u];
page             1335 fs/ntfs/file.c 		bh = head = page_buffers(page);
page             1338 fs/ntfs/file.c 					((s64)page->index << PAGE_SHIFT) +
page             1345 fs/ntfs/file.c 				if (PageUptodate(page))
page             1348 fs/ntfs/file.c 					zero_user(page, bh_offset(bh),
page             1360 fs/ntfs/file.c static inline void ntfs_flush_dcache_pages(struct page **pages,
page             1385 fs/ntfs/file.c 		struct page **pages, const unsigned nr_pages,
page             1406 fs/ntfs/file.c 		struct page *page;
page             1409 fs/ntfs/file.c 		page = pages[u];
page             1410 fs/ntfs/file.c 		bh_pos = (s64)page->index << PAGE_SHIFT;
page             1411 fs/ntfs/file.c 		bh = head = page_buffers(page);
page             1429 fs/ntfs/file.c 		if (!partial && !PageUptodate(page))
page             1430 fs/ntfs/file.c 			SetPageUptodate(page);
page             1539 fs/ntfs/file.c static int ntfs_commit_pages_after_write(struct page **pages,
page             1546 fs/ntfs/file.c 	struct page *page;
page             1557 fs/ntfs/file.c 	page = pages[0];
page             1558 fs/ntfs/file.c 	BUG_ON(!page);
page             1559 fs/ntfs/file.c 	vi = page->mapping->host;
page             1563 fs/ntfs/file.c 			vi->i_ino, ni->type, page->index, nr_pages,
page             1609 fs/ntfs/file.c 	kaddr = kmap_atomic(page);
page             1621 fs/ntfs/file.c 	if (!PageUptodate(page)) {
page             1628 fs/ntfs/file.c 		flush_dcache_page(page);
page             1629 fs/ntfs/file.c 		SetPageUptodate(page);
page             1655 fs/ntfs/file.c 		if (PageUptodate(page)) {
page             1663 fs/ntfs/file.c 			__set_page_dirty_nobuffers(page);
page             1685 fs/ntfs/file.c static size_t ntfs_copy_from_user_iter(struct page **pages, unsigned nr_pages,
page             1688 fs/ntfs/file.c 	struct page **last_page = pages + nr_pages;
page             1737 fs/ntfs/file.c 	struct page *pages[NTFS_MAX_PAGES_PER_CLUSTER];
page             1738 fs/ntfs/file.c 	struct page *cached_page = NULL;
page               52 fs/ntfs/index.c 			struct page *page = ictx->page;
page               53 fs/ntfs/index.c 			if (page) {
page               54 fs/ntfs/index.c 				BUG_ON(!PageLocked(page));
page               55 fs/ntfs/index.c 				unlock_page(page);
page               56 fs/ntfs/index.c 				ntfs_unmap_page(page);
page              120 fs/ntfs/index.c 	struct page *page;
page              200 fs/ntfs/index.c 			ictx->page = NULL;
page              264 fs/ntfs/index.c 	page = ntfs_map_page(ia_mapping, vcn <<
page              266 fs/ntfs/index.c 	if (IS_ERR(page)) {
page              268 fs/ntfs/index.c 				-PTR_ERR(page));
page              269 fs/ntfs/index.c 		err = PTR_ERR(page);
page              272 fs/ntfs/index.c 	lock_page(page);
page              273 fs/ntfs/index.c 	kaddr = (u8*)page_address(page);
page              369 fs/ntfs/index.c 			ictx->page = page;
page              420 fs/ntfs/index.c 		unlock_page(page);
page              421 fs/ntfs/index.c 		ntfs_unmap_page(page);
page              427 fs/ntfs/index.c 	unlock_page(page);
page              428 fs/ntfs/index.c 	ntfs_unmap_page(page);
page               71 fs/ntfs/index.h 	struct page *page;
page              102 fs/ntfs/index.h 		flush_dcache_page(ictx->page);
page              128 fs/ntfs/index.h 		mark_ntfs_record_dirty(ictx->page,
page              129 fs/ntfs/index.h 				(u8*)ictx->ia - (u8*)page_address(ictx->page));
page              343 fs/ntfs/inode.c 	BUG_ON(ni->page);
page              377 fs/ntfs/inode.c 	ni->page = NULL;
page             2264 fs/ntfs/inode.c 	BUG_ON(ni->page);
page             3020 fs/ntfs/inode.c 			mark_ntfs_record_dirty(ctx->ntfs_ino->page,
page               73 fs/ntfs/inode.h 	struct page *page;	/* The page containing the mft record of the
page              144 fs/ntfs/lcnalloc.c 	struct page *page = NULL;
page              262 fs/ntfs/lcnalloc.c 		if (likely(page)) {
page              265 fs/ntfs/lcnalloc.c 				flush_dcache_page(page);
page              266 fs/ntfs/lcnalloc.c 				set_page_dirty(page);
page              269 fs/ntfs/lcnalloc.c 			ntfs_unmap_page(page);
page              271 fs/ntfs/lcnalloc.c 		page = ntfs_map_page(mapping, last_read_pos >>
page              273 fs/ntfs/lcnalloc.c 		if (IS_ERR(page)) {
page              274 fs/ntfs/lcnalloc.c 			err = PTR_ERR(page);
page              279 fs/ntfs/lcnalloc.c 		buf = page_address(page) + buf_size;
page              730 fs/ntfs/lcnalloc.c 	if (likely(page && !IS_ERR(page))) {
page              733 fs/ntfs/lcnalloc.c 			flush_dcache_page(page);
page              734 fs/ntfs/lcnalloc.c 			set_page_dirty(page);
page              737 fs/ntfs/lcnalloc.c 		ntfs_unmap_page(page);
page              376 fs/ntfs/logfile.c 		struct page *page;
page              387 fs/ntfs/logfile.c 			page = ntfs_map_page(vi->i_mapping, idx);
page              388 fs/ntfs/logfile.c 			if (IS_ERR(page)) {
page              391 fs/ntfs/logfile.c 				err = PTR_ERR(page);
page              397 fs/ntfs/logfile.c 			memcpy((u8*)trp + have_read, page_address(page), size);
page              398 fs/ntfs/logfile.c 			ntfs_unmap_page(page);
page              477 fs/ntfs/logfile.c 	struct page *page = NULL;
page              530 fs/ntfs/logfile.c 		if (!page || page->index != idx) {
page              531 fs/ntfs/logfile.c 			if (page)
page              532 fs/ntfs/logfile.c 				ntfs_unmap_page(page);
page              533 fs/ntfs/logfile.c 			page = ntfs_map_page(mapping, idx);
page              534 fs/ntfs/logfile.c 			if (IS_ERR(page)) {
page              540 fs/ntfs/logfile.c 		kaddr = (u8*)page_address(page) + (pos & ~PAGE_MASK);
page              593 fs/ntfs/logfile.c 			ntfs_unmap_page(page);
page              600 fs/ntfs/logfile.c 	if (page)
page              601 fs/ntfs/logfile.c 		ntfs_unmap_page(page);
page               41 fs/ntfs/mft.c  	struct page *page;
page               45 fs/ntfs/mft.c  	BUG_ON(ni->page);
page               64 fs/ntfs/mft.c  			page = ERR_PTR(-ENOENT);
page               73 fs/ntfs/mft.c  	page = ntfs_map_page(mft_vi->i_mapping, index);
page               74 fs/ntfs/mft.c  	if (!IS_ERR(page)) {
page               76 fs/ntfs/mft.c  		if (likely(ntfs_is_mft_recordp((le32*)(page_address(page) +
page               78 fs/ntfs/mft.c  			ni->page = page;
page               80 fs/ntfs/mft.c  			return page_address(page) + ofs;
page               84 fs/ntfs/mft.c  		ntfs_unmap_page(page);
page               85 fs/ntfs/mft.c  		page = ERR_PTR(-EIO);
page               89 fs/ntfs/mft.c  	ni->page = NULL;
page               91 fs/ntfs/mft.c  	return (void*)page;
page              182 fs/ntfs/mft.c  	BUG_ON(!ni->page);
page              185 fs/ntfs/mft.c  	ntfs_unmap_page(ni->page);
page              186 fs/ntfs/mft.c  	ni->page = NULL;
page              204 fs/ntfs/mft.c  	struct page *page = ni->page;
page              206 fs/ntfs/mft.c  	BUG_ON(!page);
page              390 fs/ntfs/mft.c  	mark_ntfs_record_dirty(ni->page, ni->page_ofs);
page              457 fs/ntfs/mft.c  	struct page *page;
page              480 fs/ntfs/mft.c  	page = ntfs_map_page(vol->mftmirr_ino->i_mapping, mft_no >>
page              482 fs/ntfs/mft.c  	if (IS_ERR(page)) {
page              484 fs/ntfs/mft.c  		err = PTR_ERR(page);
page              487 fs/ntfs/mft.c  	lock_page(page);
page              488 fs/ntfs/mft.c  	BUG_ON(!PageUptodate(page));
page              489 fs/ntfs/mft.c  	ClearPageUptodate(page);
page              493 fs/ntfs/mft.c  	kmirr = page_address(page) + page_ofs;
page              497 fs/ntfs/mft.c  	if (unlikely(!page_has_buffers(page))) {
page              500 fs/ntfs/mft.c  		bh = head = alloc_page_buffers(page, blocksize, true);
page              507 fs/ntfs/mft.c  		attach_page_buffers(page, head);
page              509 fs/ntfs/mft.c  	bh = head = page_buffers(page);
page              514 fs/ntfs/mft.c  	m_start = kmirr - (u8*)page_address(page);
page              610 fs/ntfs/mft.c  	flush_dcache_page(page);
page              611 fs/ntfs/mft.c  	SetPageUptodate(page);
page              612 fs/ntfs/mft.c  	unlock_page(page);
page              613 fs/ntfs/mft.c  	ntfs_unmap_page(page);
page              663 fs/ntfs/mft.c  	struct page *page = ni->page;
page              676 fs/ntfs/mft.c  	BUG_ON(!PageLocked(page));
page              689 fs/ntfs/mft.c  	bh = head = page_buffers(page);
page              799 fs/ntfs/mft.c  			if (PageUptodate(page))
page             1133 fs/ntfs/mft.c  	struct page *page;
page             1187 fs/ntfs/mft.c  			page = ntfs_map_page(mftbmp_mapping,
page             1189 fs/ntfs/mft.c  			if (IS_ERR(page)) {
page             1192 fs/ntfs/mft.c  				return PTR_ERR(page);
page             1194 fs/ntfs/mft.c  			buf = (u8*)page_address(page) + page_ofs;
page             1209 fs/ntfs/mft.c  						ntfs_unmap_page(page);
page             1213 fs/ntfs/mft.c  					flush_dcache_page(page);
page             1214 fs/ntfs/mft.c  					set_page_dirty(page);
page             1215 fs/ntfs/mft.c  					ntfs_unmap_page(page);
page             1227 fs/ntfs/mft.c  			ntfs_unmap_page(page);
page             1278 fs/ntfs/mft.c  	struct page *page;
page             1325 fs/ntfs/mft.c  	page = ntfs_map_page(vol->lcnbmp_ino->i_mapping,
page             1327 fs/ntfs/mft.c  	if (IS_ERR(page)) {
page             1330 fs/ntfs/mft.c  		return PTR_ERR(page);
page             1332 fs/ntfs/mft.c  	b = (u8*)page_address(page) + (ll & ~PAGE_MASK);
page             1338 fs/ntfs/mft.c  		flush_dcache_page(page);
page             1339 fs/ntfs/mft.c  		set_page_dirty(page);
page             1341 fs/ntfs/mft.c  		ntfs_unmap_page(page);
page             1349 fs/ntfs/mft.c  		ntfs_unmap_page(page);
page             2090 fs/ntfs/mft.c  	struct page *page;
page             2115 fs/ntfs/mft.c  	page = ntfs_map_page(mft_vi->i_mapping, index);
page             2116 fs/ntfs/mft.c  	if (IS_ERR(page)) {
page             2119 fs/ntfs/mft.c  		return PTR_ERR(page);
page             2121 fs/ntfs/mft.c  	lock_page(page);
page             2122 fs/ntfs/mft.c  	BUG_ON(!PageUptodate(page));
page             2123 fs/ntfs/mft.c  	ClearPageUptodate(page);
page             2124 fs/ntfs/mft.c  	m = (MFT_RECORD*)((u8*)page_address(page) + ofs);
page             2129 fs/ntfs/mft.c  		SetPageUptodate(page);
page             2130 fs/ntfs/mft.c  		unlock_page(page);
page             2131 fs/ntfs/mft.c  		ntfs_unmap_page(page);
page             2134 fs/ntfs/mft.c  	flush_dcache_page(page);
page             2135 fs/ntfs/mft.c  	SetPageUptodate(page);
page             2136 fs/ntfs/mft.c  	unlock_page(page);
page             2142 fs/ntfs/mft.c  	mark_ntfs_record_dirty(page, ofs);
page             2143 fs/ntfs/mft.c  	ntfs_unmap_page(page);
page             2244 fs/ntfs/mft.c  	struct page *page;
page             2516 fs/ntfs/mft.c  	page = ntfs_map_page(vol->mft_ino->i_mapping, index);
page             2517 fs/ntfs/mft.c  	if (IS_ERR(page)) {
page             2520 fs/ntfs/mft.c  		err = PTR_ERR(page);
page             2523 fs/ntfs/mft.c  	lock_page(page);
page             2524 fs/ntfs/mft.c  	BUG_ON(!PageUptodate(page));
page             2525 fs/ntfs/mft.c  	ClearPageUptodate(page);
page             2526 fs/ntfs/mft.c  	m = (MFT_RECORD*)((u8*)page_address(page) + ofs);
page             2538 fs/ntfs/mft.c  			SetPageUptodate(page);
page             2539 fs/ntfs/mft.c  			unlock_page(page);
page             2540 fs/ntfs/mft.c  			ntfs_unmap_page(page);
page             2557 fs/ntfs/mft.c  			SetPageUptodate(page);
page             2558 fs/ntfs/mft.c  			unlock_page(page);
page             2559 fs/ntfs/mft.c  			ntfs_unmap_page(page);
page             2571 fs/ntfs/mft.c  	flush_dcache_page(page);
page             2572 fs/ntfs/mft.c  	SetPageUptodate(page);
page             2596 fs/ntfs/mft.c  			flush_dcache_page(page);
page             2598 fs/ntfs/mft.c  			mark_ntfs_record_dirty(page, ofs);
page             2599 fs/ntfs/mft.c  			unlock_page(page);
page             2600 fs/ntfs/mft.c  			ntfs_unmap_page(page);
page             2611 fs/ntfs/mft.c  		mark_ntfs_record_dirty(page, ofs);
page             2612 fs/ntfs/mft.c  		unlock_page(page);
page             2617 fs/ntfs/mft.c  		ntfs_unmap_page(page);
page             2630 fs/ntfs/mft.c  			flush_dcache_page(page);
page             2632 fs/ntfs/mft.c  			mark_ntfs_record_dirty(page, ofs);
page             2633 fs/ntfs/mft.c  			unlock_page(page);
page             2634 fs/ntfs/mft.c  			ntfs_unmap_page(page);
page             2700 fs/ntfs/mft.c  		ni->page = page;
page             2712 fs/ntfs/mft.c  		mark_ntfs_record_dirty(page, ofs);
page             2713 fs/ntfs/mft.c  		unlock_page(page);
page               43 fs/ntfs/mft.h  	flush_dcache_page(ni->page);
page               90 fs/ntfs/mft.h  	struct page *page = ni->page;
page               93 fs/ntfs/mft.h  	BUG_ON(!page);
page               94 fs/ntfs/mft.h  	lock_page(page);
page               96 fs/ntfs/mft.h  	unlock_page(page);
page               74 fs/ntfs/ntfs.h extern int ntfs_read_compressed_block(struct page *page);
page             1077 fs/ntfs/super.c 	struct page *mft_page, *mirr_page;
page             1256 fs/ntfs/super.c 	struct page *page;
page             1306 fs/ntfs/super.c 	page = ntfs_map_page(vi->i_mapping, 0);
page             1307 fs/ntfs/super.c 	if (IS_ERR(page)) {
page             1309 fs/ntfs/super.c 		ret = PTR_ERR(page);
page             1312 fs/ntfs/super.c 	kaddr = (u32*)page_address(page);
page             1336 fs/ntfs/super.c 	ntfs_unmap_page(page);
page             1430 fs/ntfs/super.c 	struct page *page;
page             1526 fs/ntfs/super.c 	page = ntfs_map_page(vol->usnjrnl_max_ino->i_mapping, 0);
page             1527 fs/ntfs/super.c 	if (IS_ERR(page)) {
page             1532 fs/ntfs/super.c 	uh = (USN_HEADER*)page_address(page);
page             1540 fs/ntfs/super.c 		ntfs_unmap_page(page);
page             1551 fs/ntfs/super.c 			ntfs_unmap_page(page);
page             1564 fs/ntfs/super.c 		ntfs_unmap_page(page);
page             1567 fs/ntfs/super.c 	ntfs_unmap_page(page);
page             1583 fs/ntfs/super.c 	struct page *page;
page             1609 fs/ntfs/super.c 		page = ntfs_map_page(ino->i_mapping, index);
page             1610 fs/ntfs/super.c 		if (IS_ERR(page))
page             1613 fs/ntfs/super.c 				page_address(page), size);
page             1614 fs/ntfs/super.c 		ntfs_unmap_page(page);
page             1648 fs/ntfs/super.c 	struct page *page;
page             1678 fs/ntfs/super.c 		page = ntfs_map_page(ino->i_mapping, index);
page             1679 fs/ntfs/super.c 		if (IS_ERR(page))
page             1682 fs/ntfs/super.c 				page_address(page), size);
page             1683 fs/ntfs/super.c 		ntfs_unmap_page(page);
page             2449 fs/ntfs/super.c 	struct page *page;
page             2472 fs/ntfs/super.c 		page = read_mapping_page(mapping, index, NULL);
page             2474 fs/ntfs/super.c 		if (IS_ERR(page)) {
page             2480 fs/ntfs/super.c 		kaddr = kmap_atomic(page);
page             2491 fs/ntfs/super.c 		put_page(page);
page             2529 fs/ntfs/super.c 	struct page *page;
page             2543 fs/ntfs/super.c 		page = read_mapping_page(mapping, index, NULL);
page             2545 fs/ntfs/super.c 		if (IS_ERR(page)) {
page             2551 fs/ntfs/super.c 		kaddr = kmap_atomic(page);
page             2562 fs/ntfs/super.c 		put_page(page);
page               38 fs/ntfs/usnjrnl.c 		struct page *page;
page               41 fs/ntfs/usnjrnl.c 		page = ntfs_map_page(vol->usnjrnl_max_ino->i_mapping, 0);
page               42 fs/ntfs/usnjrnl.c 		if (IS_ERR(page)) {
page               47 fs/ntfs/usnjrnl.c 		uh = (USN_HEADER*)page_address(page);
page               60 fs/ntfs/usnjrnl.c 		flush_dcache_page(page);
page               61 fs/ntfs/usnjrnl.c 		set_page_dirty(page);
page               62 fs/ntfs/usnjrnl.c 		ntfs_unmap_page(page);
page             6807 fs/ocfs2/alloc.c 			      struct page *page, int zero, u64 *phys)
page             6810 fs/ocfs2/alloc.c 	loff_t start_byte = ((loff_t)page->index << PAGE_SHIFT) + from;
page             6813 fs/ocfs2/alloc.c 	ret = ocfs2_map_page_blocks(page, phys, inode, from, to, 0);
page             6818 fs/ocfs2/alloc.c 		zero_user_segment(page, from, to);
page             6825 fs/ocfs2/alloc.c 	ret = walk_page_buffers(handle, page_buffers(page),
page             6838 fs/ocfs2/alloc.c 		SetPageUptodate(page);
page             6840 fs/ocfs2/alloc.c 	flush_dcache_page(page);
page             6844 fs/ocfs2/alloc.c 				     loff_t end, struct page **pages,
page             6848 fs/ocfs2/alloc.c 	struct page *page;
page             6859 fs/ocfs2/alloc.c 		page = pages[i];
page             6862 fs/ocfs2/alloc.c 		if ((end >> PAGE_SHIFT) == page->index)
page             6868 fs/ocfs2/alloc.c 		ocfs2_map_and_dirty_page(inode, handle, from, to, page, 1,
page             6871 fs/ocfs2/alloc.c 		start = (page->index + 1) << PAGE_SHIFT;
page             6879 fs/ocfs2/alloc.c 		     struct page **pages, int *num)
page             6916 fs/ocfs2/alloc.c 				struct page **pages, int *num)
page             6939 fs/ocfs2/alloc.c 	struct page **pages = NULL;
page             6952 fs/ocfs2/alloc.c 			sizeof(struct page *), GFP_NOFS);
page             7060 fs/ocfs2/alloc.c 	struct page **pages = NULL;
page             7069 fs/ocfs2/alloc.c 				sizeof(struct page *), GFP_NOFS);
page              260 fs/ocfs2/alloc.h 		     struct page **pages, int *num);
page              263 fs/ocfs2/alloc.h 			      struct page *page, int zero, u64 *phys);
page              222 fs/ocfs2/aops.c int ocfs2_read_inline_data(struct inode *inode, struct page *page,
page              246 fs/ocfs2/aops.c 	kaddr = kmap_atomic(page);
page              251 fs/ocfs2/aops.c 	flush_dcache_page(page);
page              254 fs/ocfs2/aops.c 	SetPageUptodate(page);
page              259 fs/ocfs2/aops.c static int ocfs2_readpage_inline(struct inode *inode, struct page *page)
page              264 fs/ocfs2/aops.c 	BUG_ON(!PageLocked(page));
page              273 fs/ocfs2/aops.c 	ret = ocfs2_read_inline_data(inode, page, di_bh);
page              275 fs/ocfs2/aops.c 	unlock_page(page);
page              281 fs/ocfs2/aops.c static int ocfs2_readpage(struct file *file, struct page *page)
page              283 fs/ocfs2/aops.c 	struct inode *inode = page->mapping->host;
page              285 fs/ocfs2/aops.c 	loff_t start = (loff_t)page->index << PAGE_SHIFT;
page              289 fs/ocfs2/aops.c 			     (page ? page->index : 0));
page              291 fs/ocfs2/aops.c 	ret = ocfs2_inode_lock_with_page(inode, NULL, 0, page);
page              305 fs/ocfs2/aops.c 		unlock_page(page);
page              323 fs/ocfs2/aops.c 		zero_user(page, 0, PAGE_SIZE);
page              324 fs/ocfs2/aops.c 		SetPageUptodate(page);
page              330 fs/ocfs2/aops.c 		ret = ocfs2_readpage_inline(inode, page);
page              332 fs/ocfs2/aops.c 		ret = block_read_full_page(page, ocfs2_get_block);
page              341 fs/ocfs2/aops.c 		unlock_page(page);
page              361 fs/ocfs2/aops.c 	struct page *last;
page              412 fs/ocfs2/aops.c static int ocfs2_writepage(struct page *page, struct writeback_control *wbc)
page              415 fs/ocfs2/aops.c 		(unsigned long long)OCFS2_I(page->mapping->host)->ip_blkno,
page              416 fs/ocfs2/aops.c 		page->index);
page              418 fs/ocfs2/aops.c 	return block_write_full_page(page, ocfs2_get_block, wbc);
page              511 fs/ocfs2/aops.c static int ocfs2_releasepage(struct page *page, gfp_t wait)
page              513 fs/ocfs2/aops.c 	if (!page_has_buffers(page))
page              515 fs/ocfs2/aops.c 	return try_to_free_buffers(page);
page              553 fs/ocfs2/aops.c static void ocfs2_clear_page_regions(struct page *page,
page              562 fs/ocfs2/aops.c 	kaddr = kmap_atomic(page);
page              583 fs/ocfs2/aops.c static int ocfs2_should_read_blk(struct inode *inode, struct page *page,
page              586 fs/ocfs2/aops.c 	u64 offset = page_offset(page) + block_start;
page              604 fs/ocfs2/aops.c int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
page              613 fs/ocfs2/aops.c 	if (!page_has_buffers(page))
page              614 fs/ocfs2/aops.c 		create_empty_buffers(page, bsize, 0);
page              616 fs/ocfs2/aops.c 	head = page_buffers(page);
page              628 fs/ocfs2/aops.c 			if (PageUptodate(page))
page              645 fs/ocfs2/aops.c 		if (PageUptodate(page)) {
page              650 fs/ocfs2/aops.c 			   ocfs2_should_read_blk(inode, page, block_start) &&
page              684 fs/ocfs2/aops.c 		zero_user(page, block_start, bh->b_size);
page              760 fs/ocfs2/aops.c 	struct page			*w_pages[OCFS2_MAX_CTXT_PAGES];
page              761 fs/ocfs2/aops.c 	struct page			*w_target_page;
page              790 fs/ocfs2/aops.c void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages)
page              888 fs/ocfs2/aops.c static void ocfs2_zero_new_buffers(struct page *page, unsigned from, unsigned to)
page              893 fs/ocfs2/aops.c 	BUG_ON(!PageLocked(page));
page              894 fs/ocfs2/aops.c 	if (!page_has_buffers(page))
page              897 fs/ocfs2/aops.c 	bh = head = page_buffers(page);
page              904 fs/ocfs2/aops.c 				if (!PageUptodate(page)) {
page              910 fs/ocfs2/aops.c 					zero_user_segment(page, start, end);
page              935 fs/ocfs2/aops.c 	struct page *tmppage;
page              955 fs/ocfs2/aops.c 					struct page *page, u32 cpos,
page              970 fs/ocfs2/aops.c 	new = new | ((i_size_read(inode) <= page_offset(page)) &&
page              971 fs/ocfs2/aops.c 			(page_offset(page) <= user_pos));
page              973 fs/ocfs2/aops.c 	if (page == wc->w_target_page) {
page              978 fs/ocfs2/aops.c 			ret = ocfs2_map_page_blocks(page, p_blkno, inode,
page              982 fs/ocfs2/aops.c 			ret = ocfs2_map_page_blocks(page, p_blkno, inode,
page             1006 fs/ocfs2/aops.c 		ret = ocfs2_map_page_blocks(page, p_blkno, inode,
page             1024 fs/ocfs2/aops.c 	if (new && !PageUptodate(page))
page             1025 fs/ocfs2/aops.c 		ocfs2_clear_page_regions(page, OCFS2_SB(inode->i_sb),
page             1028 fs/ocfs2/aops.c 	flush_dcache_page(page);
page             1041 fs/ocfs2/aops.c 				      struct page *mmap_page)
page             1491 fs/ocfs2/aops.c 	struct page *page;
page             1502 fs/ocfs2/aops.c 	page = find_or_create_page(mapping, 0, GFP_NOFS);
page             1503 fs/ocfs2/aops.c 	if (!page) {
page             1513 fs/ocfs2/aops.c 	wc->w_pages[0] = wc->w_target_page = page;
page             1528 fs/ocfs2/aops.c 	if (!PageUptodate(page)) {
page             1529 fs/ocfs2/aops.c 		ret = ocfs2_read_inline_data(inode, page, wc->w_di_bh);
page             1553 fs/ocfs2/aops.c 					  unsigned len, struct page *mmap_page,
page             1661 fs/ocfs2/aops.c 			     struct page **pagep, void **fsdata,
page             1662 fs/ocfs2/aops.c 			     struct buffer_head *di_bh, struct page *mmap_page)
page             1897 fs/ocfs2/aops.c 			     struct page **pagep, void **fsdata)
page             1973 fs/ocfs2/aops.c 	struct page *tmppage;
page             2079 fs/ocfs2/aops.c 			   struct page *page, void *fsdata)
page               14 fs/ocfs2/aops.h 							 struct page *page,
page               18 fs/ocfs2/aops.h int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
page               22 fs/ocfs2/aops.h void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages);
page               43 fs/ocfs2/aops.h 			     struct page **pagep, void **fsdata,
page               44 fs/ocfs2/aops.h 			     struct buffer_head *di_bh, struct page *mmap_page);
page               46 fs/ocfs2/aops.h int ocfs2_read_inline_data(struct inode *inode, struct page *page,
page              219 fs/ocfs2/cluster/heartbeat.c 	struct page             **hr_slot_data;
page              519 fs/ocfs2/cluster/heartbeat.c 	struct page *page;
page              542 fs/ocfs2/cluster/heartbeat.c 		page = reg->hr_slot_data[current_page];
page              550 fs/ocfs2/cluster/heartbeat.c 		len = bio_add_page(bio, page, vec_len, vec_start);
page             1502 fs/ocfs2/cluster/heartbeat.c 	struct page *page;
page             1511 fs/ocfs2/cluster/heartbeat.c 			page = reg->hr_slot_data[i];
page             1512 fs/ocfs2/cluster/heartbeat.c 			if (page)
page             1513 fs/ocfs2/cluster/heartbeat.c 				__free_page(page);
page             1538 fs/ocfs2/cluster/heartbeat.c 				 const char *page,
page             1543 fs/ocfs2/cluster/heartbeat.c 	char *p = (char *)page;
page             1564 fs/ocfs2/cluster/heartbeat.c 					    char *page)
page             1566 fs/ocfs2/cluster/heartbeat.c 	return sprintf(page, "%u\n", to_o2hb_region(item)->hr_block_bytes);
page             1570 fs/ocfs2/cluster/heartbeat.c 					     const char *page,
page             1581 fs/ocfs2/cluster/heartbeat.c 	status = o2hb_read_block_input(reg, page, &block_bytes,
page             1593 fs/ocfs2/cluster/heartbeat.c 					    char *page)
page             1595 fs/ocfs2/cluster/heartbeat.c 	return sprintf(page, "%llu\n", to_o2hb_region(item)->hr_start_block);
page             1599 fs/ocfs2/cluster/heartbeat.c 					     const char *page,
page             1604 fs/ocfs2/cluster/heartbeat.c 	char *p = (char *)page;
page             1618 fs/ocfs2/cluster/heartbeat.c static ssize_t o2hb_region_blocks_show(struct config_item *item, char *page)
page             1620 fs/ocfs2/cluster/heartbeat.c 	return sprintf(page, "%d\n", to_o2hb_region(item)->hr_blocks);
page             1624 fs/ocfs2/cluster/heartbeat.c 					const char *page,
page             1629 fs/ocfs2/cluster/heartbeat.c 	char *p = (char *)page;
page             1646 fs/ocfs2/cluster/heartbeat.c static ssize_t o2hb_region_dev_show(struct config_item *item, char *page)
page             1651 fs/ocfs2/cluster/heartbeat.c 		ret = sprintf(page, "%s\n", to_o2hb_region(item)->hr_dev_name);
page             1674 fs/ocfs2/cluster/heartbeat.c 	struct page *page;
page             1699 fs/ocfs2/cluster/heartbeat.c 	reg->hr_slot_data = kcalloc(reg->hr_num_pages, sizeof(struct page *),
page             1705 fs/ocfs2/cluster/heartbeat.c 		page = alloc_page(GFP_KERNEL);
page             1706 fs/ocfs2/cluster/heartbeat.c 		if (!page)
page             1709 fs/ocfs2/cluster/heartbeat.c 		reg->hr_slot_data[i] = page;
page             1712 fs/ocfs2/cluster/heartbeat.c 		raw = page_address(page);
page             1762 fs/ocfs2/cluster/heartbeat.c 				     const char *page,
page             1769 fs/ocfs2/cluster/heartbeat.c 	char *p = (char *)page;
page             1927 fs/ocfs2/cluster/heartbeat.c static ssize_t o2hb_region_pid_show(struct config_item *item, char *page)
page             1940 fs/ocfs2/cluster/heartbeat.c 	return sprintf(page, "%u\n", pid);
page             2139 fs/ocfs2/cluster/heartbeat.c 		char *page)
page             2141 fs/ocfs2/cluster/heartbeat.c 	return sprintf(page, "%u\n", o2hb_dead_threshold);
page             2145 fs/ocfs2/cluster/heartbeat.c 		const char *page, size_t count)
page             2148 fs/ocfs2/cluster/heartbeat.c 	char *p = (char *)page;
page             2161 fs/ocfs2/cluster/heartbeat.c 		char *page)
page             2163 fs/ocfs2/cluster/heartbeat.c 	return sprintf(page, "%s\n",
page             2168 fs/ocfs2/cluster/heartbeat.c 		const char *page, size_t count)
page             2174 fs/ocfs2/cluster/heartbeat.c 	len = (page[count - 1] == '\n') ? count - 1 : count;
page             2179 fs/ocfs2/cluster/heartbeat.c 		if (strncasecmp(page, o2hb_heartbeat_mode_desc[i], len))
page              164 fs/ocfs2/cluster/nodemanager.c static ssize_t o2nm_node_num_show(struct config_item *item, char *page)
page              166 fs/ocfs2/cluster/nodemanager.c 	return sprintf(page, "%d\n", to_o2nm_node(item)->nd_num);
page              185 fs/ocfs2/cluster/nodemanager.c static ssize_t o2nm_node_num_store(struct config_item *item, const char *page,
page              191 fs/ocfs2/cluster/nodemanager.c 	char *p = (char *)page;
page              235 fs/ocfs2/cluster/nodemanager.c static ssize_t o2nm_node_ipv4_port_show(struct config_item *item, char *page)
page              237 fs/ocfs2/cluster/nodemanager.c 	return sprintf(page, "%u\n", ntohs(to_o2nm_node(item)->nd_ipv4_port));
page              241 fs/ocfs2/cluster/nodemanager.c 					 const char *page, size_t count)
page              245 fs/ocfs2/cluster/nodemanager.c 	char *p = (char *)page;
page              263 fs/ocfs2/cluster/nodemanager.c static ssize_t o2nm_node_ipv4_address_show(struct config_item *item, char *page)
page              265 fs/ocfs2/cluster/nodemanager.c 	return sprintf(page, "%pI4\n", &to_o2nm_node(item)->nd_ipv4_address);
page              269 fs/ocfs2/cluster/nodemanager.c 					    const char *page,
page              279 fs/ocfs2/cluster/nodemanager.c 	ret = sscanf(page, "%3u.%3u.%3u.%3u", &octets[3], &octets[2],
page              319 fs/ocfs2/cluster/nodemanager.c static ssize_t o2nm_node_local_show(struct config_item *item, char *page)
page              321 fs/ocfs2/cluster/nodemanager.c 	return sprintf(page, "%d\n", to_o2nm_node(item)->nd_local);
page              324 fs/ocfs2/cluster/nodemanager.c static ssize_t o2nm_node_local_store(struct config_item *item, const char *page,
page              330 fs/ocfs2/cluster/nodemanager.c 	char *p = (char *)page;
page              426 fs/ocfs2/cluster/nodemanager.c static ssize_t o2nm_cluster_attr_write(const char *page, ssize_t count,
page              430 fs/ocfs2/cluster/nodemanager.c 	char *p = (char *)page;
page              447 fs/ocfs2/cluster/nodemanager.c 	char *page)
page              449 fs/ocfs2/cluster/nodemanager.c 	return sprintf(page, "%u\n", to_o2nm_cluster(item)->cl_idle_timeout_ms);
page              453 fs/ocfs2/cluster/nodemanager.c 	const char *page, size_t count)
page              459 fs/ocfs2/cluster/nodemanager.c 	ret =  o2nm_cluster_attr_write(page, count, &val);
page              483 fs/ocfs2/cluster/nodemanager.c 	struct config_item *item, char *page)
page              485 fs/ocfs2/cluster/nodemanager.c 	return sprintf(page, "%u\n",
page              490 fs/ocfs2/cluster/nodemanager.c 	struct config_item *item, const char *page, size_t count)
page              496 fs/ocfs2/cluster/nodemanager.c 	ret =  o2nm_cluster_attr_write(page, count, &val);
page              520 fs/ocfs2/cluster/nodemanager.c 	struct config_item *item, char *page)
page              522 fs/ocfs2/cluster/nodemanager.c 	return sprintf(page, "%u\n",
page              527 fs/ocfs2/cluster/nodemanager.c 	struct config_item *item, const char *page, size_t count)
page              529 fs/ocfs2/cluster/nodemanager.c 	return o2nm_cluster_attr_write(page, count,
page              534 fs/ocfs2/cluster/nodemanager.c 	struct config_item *item, char *page)
page              540 fs/ocfs2/cluster/nodemanager.c 		ret = sprintf(page, "%s\n",
page              546 fs/ocfs2/cluster/nodemanager.c 	struct config_item *item, const char *page, size_t count)
page              550 fs/ocfs2/cluster/nodemanager.c 	if (page[count - 1] != '\n')
page              556 fs/ocfs2/cluster/nodemanager.c 		if (strncasecmp(page, o2nm_fence_method_desc[i], count - 1))
page              415 fs/ocfs2/cluster/tcp.c 	struct page *page = NULL;
page              418 fs/ocfs2/cluster/tcp.c 	page = alloc_page(GFP_NOFS);
page              420 fs/ocfs2/cluster/tcp.c 	if (sc == NULL || page == NULL)
page              444 fs/ocfs2/cluster/tcp.c 	sc->sc_page = page;
page              447 fs/ocfs2/cluster/tcp.c 	page = NULL;
page              450 fs/ocfs2/cluster/tcp.c 	if (page)
page              451 fs/ocfs2/cluster/tcp.c 		__free_page(page);
page              149 fs/ocfs2/cluster/tcp_internal.h 	struct page 		*sc_page;
page             2534 fs/ocfs2/dlmglue.c 			      struct page *page)
page             2540 fs/ocfs2/dlmglue.c 		unlock_page(page);
page              145 fs/ocfs2/dlmglue.h 			      struct page *page);
page              755 fs/ocfs2/file.c 	struct page *page;
page              774 fs/ocfs2/file.c 	page = find_or_create_page(mapping, index, GFP_NOFS);
page              775 fs/ocfs2/file.c 	if (!page) {
page              803 fs/ocfs2/file.c 		ret = __block_write_begin(page, block_start + 1, 0,
page              812 fs/ocfs2/file.c 		ret = block_commit_write(page, block_start + 1,
page              838 fs/ocfs2/file.c 	unlock_page(page);
page              839 fs/ocfs2/file.c 	put_page(page);
page               44 fs/ocfs2/mmap.c 			  vma, vmf->page, vmf->pgoff);
page               49 fs/ocfs2/mmap.c 			struct buffer_head *di_bh, struct page *page)
page               55 fs/ocfs2/mmap.c 	loff_t pos = page_offset(page);
page               58 fs/ocfs2/mmap.c 	struct page *locked_page = NULL;
page               77 fs/ocfs2/mmap.c 	if ((page->mapping != inode->i_mapping) ||
page               78 fs/ocfs2/mmap.c 	    (!PageUptodate(page)) ||
page               79 fs/ocfs2/mmap.c 	    (page_offset(page) >= size))
page               92 fs/ocfs2/mmap.c 	if (page->index == last_index)
page               96 fs/ocfs2/mmap.c 				       &locked_page, &fsdata, di_bh, page);
page              117 fs/ocfs2/mmap.c 	struct page *page = vmf->page;
page              146 fs/ocfs2/mmap.c 	ret = __ocfs2_page_mkwrite(vmf->vma->vm_file, di_bh, page);
page             1188 fs/ocfs2/ocfs2_trace.h 		 unsigned int flags, void *page,
page             1191 fs/ocfs2/ocfs2_trace.h 		page, clusters, extents_to_split),
page             1199 fs/ocfs2/ocfs2_trace.h 		__field(void *, page)
page             1210 fs/ocfs2/ocfs2_trace.h 		__entry->page = page;
page             1217 fs/ocfs2/ocfs2_trace.h 		  __entry->flags, __entry->page, __entry->clusters,
page             1251 fs/ocfs2/ocfs2_trace.h 		 void *area, void *page, unsigned long pgoff),
page             1252 fs/ocfs2/ocfs2_trace.h 	TP_ARGS(ino, area, page, pgoff),
page             1256 fs/ocfs2/ocfs2_trace.h 		__field(void *, page)
page             1262 fs/ocfs2/ocfs2_trace.h 		__entry->page = page;
page             1266 fs/ocfs2/ocfs2_trace.h 		  __entry->ino, __entry->area, __entry->page, __entry->pgoff)
page             2911 fs/ocfs2/refcounttree.c 	struct page *page;
page             2942 fs/ocfs2/refcounttree.c 		page = find_or_create_page(mapping, page_index, GFP_NOFS);
page             2943 fs/ocfs2/refcounttree.c 		if (!page) {
page             2954 fs/ocfs2/refcounttree.c 			if (PageDirty(page)) {
page             2958 fs/ocfs2/refcounttree.c 				ret = write_one_page(page);
page             2963 fs/ocfs2/refcounttree.c 		if (!PageUptodate(page)) {
page             2964 fs/ocfs2/refcounttree.c 			ret = block_read_full_page(page, ocfs2_get_block);
page             2969 fs/ocfs2/refcounttree.c 			lock_page(page);
page             2972 fs/ocfs2/refcounttree.c 		if (page_has_buffers(page)) {
page             2973 fs/ocfs2/refcounttree.c 			ret = walk_page_buffers(handle, page_buffers(page),
page             2984 fs/ocfs2/refcounttree.c 					 page, 0, &new_block);
page             2985 fs/ocfs2/refcounttree.c 		mark_page_accessed(page);
page             2987 fs/ocfs2/refcounttree.c 		unlock_page(page);
page             2988 fs/ocfs2/refcounttree.c 		put_page(page);
page             2989 fs/ocfs2/refcounttree.c 		page = NULL;
page             3150 fs/ocfs2/refcounttree.c 	struct page *page;
page             3171 fs/ocfs2/refcounttree.c 		page = find_or_create_page(inode->i_mapping,
page             3173 fs/ocfs2/refcounttree.c 		BUG_ON(!page);
page             3175 fs/ocfs2/refcounttree.c 		wait_on_page_writeback(page);
page             3176 fs/ocfs2/refcounttree.c 		if (PageError(page)) {
page             3180 fs/ocfs2/refcounttree.c 			mark_page_accessed(page);
page             3182 fs/ocfs2/refcounttree.c 		unlock_page(page);
page             3183 fs/ocfs2/refcounttree.c 		put_page(page);
page             3184 fs/ocfs2/refcounttree.c 		page = NULL;
page               57 fs/ocfs2/symlink.c static int ocfs2_fast_symlink_readpage(struct file *unused, struct page *page)
page               59 fs/ocfs2/symlink.c 	struct inode *inode = page->mapping->host;
page               76 fs/ocfs2/symlink.c 	kaddr = kmap_atomic(page);
page               79 fs/ocfs2/symlink.c 	SetPageUptodate(page);
page               80 fs/ocfs2/symlink.c 	unlock_page(page);
page              287 fs/omfs/file.c static int omfs_readpage(struct file *file, struct page *page)
page              289 fs/omfs/file.c 	return block_read_full_page(page, omfs_get_block);
page              298 fs/omfs/file.c static int omfs_writepage(struct page *page, struct writeback_control *wbc)
page              300 fs/omfs/file.c 	return block_write_full_page(page, omfs_get_block, wbc);
page              321 fs/omfs/file.c 			struct page **pagep, void **fsdata)
page               18 fs/orangefs/inode.c static int orangefs_writepage_locked(struct page *page,
page               21 fs/orangefs/inode.c 	struct inode *inode = page->mapping->host;
page               29 fs/orangefs/inode.c 	set_page_writeback(page);
page               32 fs/orangefs/inode.c 	if (PagePrivate(page)) {
page               33 fs/orangefs/inode.c 		wr = (struct orangefs_write_range *)page_private(page);
page               42 fs/orangefs/inode.c 		off = page_offset(page);
page               51 fs/orangefs/inode.c 	bv.bv_page = page;
page               60 fs/orangefs/inode.c 		SetPageError(page);
page               61 fs/orangefs/inode.c 		mapping_set_error(page->mapping, ret);
page               67 fs/orangefs/inode.c 		set_page_private(page, 0);
page               68 fs/orangefs/inode.c 		ClearPagePrivate(page);
page               69 fs/orangefs/inode.c 		put_page(page);
page               74 fs/orangefs/inode.c static int orangefs_writepage(struct page *page, struct writeback_control *wbc)
page               77 fs/orangefs/inode.c 	ret = orangefs_writepage_locked(page, wbc);
page               78 fs/orangefs/inode.c 	unlock_page(page);
page               79 fs/orangefs/inode.c 	end_page_writeback(page);
page               90 fs/orangefs/inode.c 	struct page **pages;
page              161 fs/orangefs/inode.c static int orangefs_writepages_callback(struct page *page,
page              168 fs/orangefs/inode.c 	if (!PagePrivate(page)) {
page              169 fs/orangefs/inode.c 		unlock_page(page);
page              175 fs/orangefs/inode.c 	wr = (struct orangefs_write_range *)page_private(page);
page              183 fs/orangefs/inode.c 		ow->pages[ow->npages++] = page;
page              195 fs/orangefs/inode.c 		ow->pages[ow->npages++] = page;
page              205 fs/orangefs/inode.c 		ret = orangefs_writepage_locked(page, wbc);
page              206 fs/orangefs/inode.c 		mapping_set_error(page->mapping, ret);
page              207 fs/orangefs/inode.c 		unlock_page(page);
page              208 fs/orangefs/inode.c 		end_page_writeback(page);
page              228 fs/orangefs/inode.c 	ow->pages = kcalloc(ow->maxpages, sizeof(struct page *), GFP_KERNEL);
page              250 fs/orangefs/inode.c static int orangefs_launder_page(struct page *);
page              252 fs/orangefs/inode.c static int orangefs_readpage(struct file *file, struct page *page)
page              254 fs/orangefs/inode.c 	struct inode *inode = page->mapping->host;
page              260 fs/orangefs/inode.c 	struct page *next_page;
page              303 fs/orangefs/inode.c 	if (PageDirty(page))
page              304 fs/orangefs/inode.c 		orangefs_launder_page(page);
page              306 fs/orangefs/inode.c 	off = page_offset(page);
page              308 fs/orangefs/inode.c 	bv.bv_page = page;
page              319 fs/orangefs/inode.c 	flush_dcache_page(page);
page              321 fs/orangefs/inode.c 		SetPageError(page);
page              322 fs/orangefs/inode.c 		unlock_page(page);
page              325 fs/orangefs/inode.c 		SetPageUptodate(page);
page              326 fs/orangefs/inode.c 		if (PageError(page))
page              327 fs/orangefs/inode.c 			ClearPageError(page);
page              331 fs/orangefs/inode.c 	unlock_page(page);
page              390 fs/orangefs/inode.c     loff_t pos, unsigned len, unsigned flags, struct page **pagep,
page              394 fs/orangefs/inode.c 	struct page *page;
page              400 fs/orangefs/inode.c 	page = grab_cache_page_write_begin(mapping, index, flags);
page              401 fs/orangefs/inode.c 	if (!page)
page              404 fs/orangefs/inode.c 	*pagep = page;
page              406 fs/orangefs/inode.c 	if (PageDirty(page) && !PagePrivate(page)) {
page              412 fs/orangefs/inode.c 		ret = orangefs_launder_page(page);
page              416 fs/orangefs/inode.c 	if (PagePrivate(page)) {
page              418 fs/orangefs/inode.c 		wr = (struct orangefs_write_range *)page_private(page);
page              425 fs/orangefs/inode.c 			ret = orangefs_launder_page(page);
page              439 fs/orangefs/inode.c 	SetPagePrivate(page);
page              440 fs/orangefs/inode.c 	set_page_private(page, (unsigned long)wr);
page              441 fs/orangefs/inode.c 	get_page(page);
page              447 fs/orangefs/inode.c     loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata)
page              449 fs/orangefs/inode.c 	struct inode *inode = page->mapping->host;
page              460 fs/orangefs/inode.c 	if (!PageUptodate(page)) {
page              463 fs/orangefs/inode.c 			zero_user(page, from + copied, len - copied);
page              466 fs/orangefs/inode.c 		if (pos == page_offset(page) &&
page              468 fs/orangefs/inode.c 			zero_user_segment(page, from + copied, PAGE_SIZE);
page              469 fs/orangefs/inode.c 			SetPageUptodate(page);
page              473 fs/orangefs/inode.c 	set_page_dirty(page);
page              474 fs/orangefs/inode.c 	unlock_page(page);
page              475 fs/orangefs/inode.c 	put_page(page);
page              481 fs/orangefs/inode.c static void orangefs_invalidatepage(struct page *page,
page              486 fs/orangefs/inode.c 	wr = (struct orangefs_write_range *)page_private(page);
page              489 fs/orangefs/inode.c 		kfree((struct orangefs_write_range *)page_private(page));
page              490 fs/orangefs/inode.c 		set_page_private(page, 0);
page              491 fs/orangefs/inode.c 		ClearPagePrivate(page);
page              492 fs/orangefs/inode.c 		put_page(page);
page              495 fs/orangefs/inode.c 	} else if (page_offset(page) + offset <= wr->pos &&
page              496 fs/orangefs/inode.c 	    wr->pos + wr->len <= page_offset(page) + offset + length) {
page              497 fs/orangefs/inode.c 		kfree((struct orangefs_write_range *)page_private(page));
page              498 fs/orangefs/inode.c 		set_page_private(page, 0);
page              499 fs/orangefs/inode.c 		ClearPagePrivate(page);
page              500 fs/orangefs/inode.c 		put_page(page);
page              502 fs/orangefs/inode.c 		cancel_dirty_page(page);
page              505 fs/orangefs/inode.c 	} else if (wr->pos < page_offset(page) + offset &&
page              506 fs/orangefs/inode.c 	    wr->pos + wr->len <= page_offset(page) + offset + length &&
page              507 fs/orangefs/inode.c 	     page_offset(page) + offset < wr->pos + wr->len) {
page              509 fs/orangefs/inode.c 		x = wr->pos + wr->len - (page_offset(page) + offset);
page              515 fs/orangefs/inode.c 	} else if (page_offset(page) + offset <= wr->pos &&
page              516 fs/orangefs/inode.c 	    page_offset(page) + offset + length < wr->pos + wr->len &&
page              517 fs/orangefs/inode.c 	    wr->pos < page_offset(page) + offset + length) {
page              519 fs/orangefs/inode.c 		x = page_offset(page) + offset + length - wr->pos;
page              526 fs/orangefs/inode.c 	} else if (wr->pos < page_offset(page) + offset &&
page              527 fs/orangefs/inode.c 	    page_offset(page) + offset + length < wr->pos + wr->len) {
page              539 fs/orangefs/inode.c 		if (!((page_offset(page) + offset + length <= wr->pos) ^
page              540 fs/orangefs/inode.c 		    (wr->pos + wr->len <= page_offset(page) + offset))) {
page              543 fs/orangefs/inode.c 			    page_offset(page) + offset, length);
page              555 fs/orangefs/inode.c 	orangefs_launder_page(page);
page              558 fs/orangefs/inode.c static int orangefs_releasepage(struct page *page, gfp_t foo)
page              560 fs/orangefs/inode.c 	return !PagePrivate(page);
page              563 fs/orangefs/inode.c static void orangefs_freepage(struct page *page)
page              565 fs/orangefs/inode.c 	if (PagePrivate(page)) {
page              566 fs/orangefs/inode.c 		kfree((struct orangefs_write_range *)page_private(page));
page              567 fs/orangefs/inode.c 		set_page_private(page, 0);
page              568 fs/orangefs/inode.c 		ClearPagePrivate(page);
page              569 fs/orangefs/inode.c 		put_page(page);
page              573 fs/orangefs/inode.c static int orangefs_launder_page(struct page *page)
page              580 fs/orangefs/inode.c 	wait_on_page_writeback(page);
page              581 fs/orangefs/inode.c 	if (clear_page_dirty_for_io(page)) {
page              582 fs/orangefs/inode.c 		r = orangefs_writepage_locked(page, &wbc);
page              583 fs/orangefs/inode.c 		end_page_writeback(page);
page              721 fs/orangefs/inode.c 	struct page *page = vmf->page;
page              735 fs/orangefs/inode.c 	lock_page(page);
page              736 fs/orangefs/inode.c 	if (PageDirty(page) && !PagePrivate(page)) {
page              742 fs/orangefs/inode.c 		if (orangefs_launder_page(page)) {
page              747 fs/orangefs/inode.c 	if (PagePrivate(page)) {
page              748 fs/orangefs/inode.c 		wr = (struct orangefs_write_range *)page_private(page);
page              751 fs/orangefs/inode.c 			wr->pos = page_offset(page);
page              755 fs/orangefs/inode.c 			if (orangefs_launder_page(page)) {
page              766 fs/orangefs/inode.c 	wr->pos = page_offset(page);
page              770 fs/orangefs/inode.c 	SetPagePrivate(page);
page              771 fs/orangefs/inode.c 	set_page_private(page, (unsigned long)wr);
page              772 fs/orangefs/inode.c 	get_page(page);
page              776 fs/orangefs/inode.c 	if (page->mapping != inode->i_mapping) {
page              777 fs/orangefs/inode.c 		unlock_page(page);
page              787 fs/orangefs/inode.c 	set_page_dirty(page);
page              788 fs/orangefs/inode.c 	wait_for_stable_page(page);
page              142 fs/orangefs/orangefs-bufmap.c 	struct page **page_array;	/* array of mapped pages */
page              154 fs/orangefs/orangefs-bufmap.c 	struct page **page_array;
page              247 fs/orangefs/orangefs-bufmap.c 		kcalloc(bufmap->page_count, sizeof(struct page *), GFP_KERNEL);
page              501 fs/orangefs/orangefs-bufmap.c 		struct page *page = to->page_array[i];
page              505 fs/orangefs/orangefs-bufmap.c 		if (copy_page_from_iter(page, 0, n, iter) != n)
page              530 fs/orangefs/orangefs-bufmap.c 		struct page *page = from->page_array[i];
page              534 fs/orangefs/orangefs-bufmap.c 		n = copy_page_to_iter(page, 0, n, iter);
page              125 fs/pipe.c      	struct page *page = buf->page;
page              132 fs/pipe.c      	if (page_count(page) == 1 && !pipe->tmp_page)
page              133 fs/pipe.c      		pipe->tmp_page = page;
page              135 fs/pipe.c      		put_page(page);
page              141 fs/pipe.c      	struct page *page = buf->page;
page              143 fs/pipe.c      	if (page_count(page) == 1) {
page              144 fs/pipe.c      		memcg_kmem_uncharge(page, 0);
page              145 fs/pipe.c      		__SetPageLocked(page);
page              166 fs/pipe.c      	struct page *page = buf->page;
page              173 fs/pipe.c      	if (page_count(page) == 1) {
page              174 fs/pipe.c      		lock_page(page);
page              194 fs/pipe.c      	return try_get_page(buf->page);
page              225 fs/pipe.c      	put_page(buf->page);
page              306 fs/pipe.c      			written = copy_page_to_iter(buf->page, buf->offset, chars, to);
page              413 fs/pipe.c      			ret = copy_page_from_iter(buf->page, offset, chars, from);
page              438 fs/pipe.c      			struct page *page = pipe->tmp_page;
page              441 fs/pipe.c      			if (!page) {
page              442 fs/pipe.c      				page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT);
page              443 fs/pipe.c      				if (unlikely(!page)) {
page              447 fs/pipe.c      				pipe->tmp_page = page;
page              455 fs/pipe.c      			copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
page              464 fs/pipe.c      			buf->page = page;
page              220 fs/proc/base.c 	char *page;
page              226 fs/proc/base.c 	page = (char *)__get_free_page(GFP_KERNEL);
page              227 fs/proc/base.c 	if (!page)
page              231 fs/proc/base.c 	got = access_remote_vm(mm, arg_start, page, PAGE_SIZE, FOLL_ANON);
page              233 fs/proc/base.c 		int len = strnlen(page, got);
page              243 fs/proc/base.c 			len -= copy_to_user(buf, page+pos, len);
page              249 fs/proc/base.c 	free_page((unsigned long)page);
page              258 fs/proc/base.c 	char *page, c;
page              312 fs/proc/base.c 	page = (char *)__get_free_page(GFP_KERNEL);
page              313 fs/proc/base.c 	if (!page)
page              321 fs/proc/base.c 		got = access_remote_vm(mm, pos, page, size, FOLL_ANON);
page              324 fs/proc/base.c 		got -= copy_to_user(buf, page, got);
page              336 fs/proc/base.c 	free_page((unsigned long)page);
page              822 fs/proc/base.c 	char *page;
page              828 fs/proc/base.c 	page = (char *)__get_free_page(GFP_KERNEL);
page              829 fs/proc/base.c 	if (!page)
page              841 fs/proc/base.c 		if (write && copy_from_user(page, buf, this_len)) {
page              846 fs/proc/base.c 		this_len = access_remote_vm(mm, addr, page, this_len, flags);
page              853 fs/proc/base.c 		if (!write && copy_to_user(buf, page, this_len)) {
page              867 fs/proc/base.c 	free_page((unsigned long) page);
page              923 fs/proc/base.c 	char *page;
page              933 fs/proc/base.c 	page = (char *)__get_free_page(GFP_KERNEL);
page              934 fs/proc/base.c 	if (!page)
page              958 fs/proc/base.c 		retval = access_remote_vm(mm, (env_start + src), page, this_len, FOLL_ANON);
page              965 fs/proc/base.c 		if (copy_to_user(buf, page, retval)) {
page              979 fs/proc/base.c 	free_page((unsigned long) page);
page             2557 fs/proc/base.c 	void *page;
page             2585 fs/proc/base.c 	page = memdup_user(buf, count);
page             2586 fs/proc/base.c 	if (IS_ERR(page)) {
page             2587 fs/proc/base.c 		rv = PTR_ERR(page);
page             2597 fs/proc/base.c 				  file->f_path.dentry->d_name.name, page,
page             2601 fs/proc/base.c 	kfree(page);
page              190 fs/proc/kcore.c 	struct page *p;
page               33 fs/proc/page.c 	struct page *ppage;
page               90 fs/proc/page.c u64 stable_page_flags(struct page *page)
page               99 fs/proc/page.c 	if (!page)
page              102 fs/proc/page.c 	k = page->flags;
page              111 fs/proc/page.c 	if (!PageSlab(page) && page_mapped(page))
page              113 fs/proc/page.c 	if (PageAnon(page))
page              115 fs/proc/page.c 	if (PageKsm(page))
page              122 fs/proc/page.c 	if (PageHead(page))
page              124 fs/proc/page.c 	if (PageTail(page))
page              126 fs/proc/page.c 	if (PageHuge(page))
page              134 fs/proc/page.c 	else if (PageTransCompound(page)) {
page              135 fs/proc/page.c 		struct page *head = compound_head(page);
page              143 fs/proc/page.c 	} else if (is_zero_pfn(page_to_pfn(page)))
page              152 fs/proc/page.c 	if (PageBuddy(page))
page              154 fs/proc/page.c 	else if (page_count(page) == 0 && is_free_buddy_page(page))
page              157 fs/proc/page.c 	if (PageOffline(page))
page              159 fs/proc/page.c 	if (PageTable(page))
page              162 fs/proc/page.c 	if (page_is_idle(page))
page              168 fs/proc/page.c 	if (PageTail(page) && PageSlab(compound_head(page)))
page              181 fs/proc/page.c 	if (PageSwapCache(page))
page              210 fs/proc/page.c 	struct page *ppage;
page              255 fs/proc/page.c 	struct page *ppage;
page              434 fs/proc/task_mmu.c 		struct page *page, unsigned long size, unsigned long pss,
page              439 fs/proc/task_mmu.c 	if (PageAnon(page))
page              441 fs/proc/task_mmu.c 	else if (PageSwapBacked(page))
page              449 fs/proc/task_mmu.c 	if (dirty || PageDirty(page)) {
page              462 fs/proc/task_mmu.c static void smaps_account(struct mem_size_stats *mss, struct page *page,
page              465 fs/proc/task_mmu.c 	int i, nr = compound ? compound_nr(page) : 1;
page              472 fs/proc/task_mmu.c 	if (PageAnon(page)) {
page              474 fs/proc/task_mmu.c 		if (!PageSwapBacked(page) && !dirty && !PageDirty(page))
page              480 fs/proc/task_mmu.c 	if (young || page_is_young(page) || PageReferenced(page))
page              491 fs/proc/task_mmu.c 	if (page_count(page) == 1) {
page              492 fs/proc/task_mmu.c 		smaps_page_accumulate(mss, page, size, size << PSS_SHIFT, dirty,
page              496 fs/proc/task_mmu.c 	for (i = 0; i < nr; i++, page++) {
page              497 fs/proc/task_mmu.c 		int mapcount = page_mapcount(page);
page              501 fs/proc/task_mmu.c 		smaps_page_accumulate(mss, page, PAGE_SIZE, pss, dirty, locked,
page              527 fs/proc/task_mmu.c 	struct page *page = NULL;
page              530 fs/proc/task_mmu.c 		page = vm_normal_page(vma, addr, *pte);
page              548 fs/proc/task_mmu.c 			page = migration_entry_to_page(swpent);
page              550 fs/proc/task_mmu.c 			page = device_private_entry_to_page(swpent);
page              553 fs/proc/task_mmu.c 		page = find_get_entry(vma->vm_file->f_mapping,
page              555 fs/proc/task_mmu.c 		if (!page)
page              558 fs/proc/task_mmu.c 		if (xa_is_value(page))
page              561 fs/proc/task_mmu.c 			put_page(page);
page              566 fs/proc/task_mmu.c 	if (!page)
page              569 fs/proc/task_mmu.c 	smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), locked);
page              579 fs/proc/task_mmu.c 	struct page *page;
page              582 fs/proc/task_mmu.c 	page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP);
page              583 fs/proc/task_mmu.c 	if (IS_ERR_OR_NULL(page))
page              585 fs/proc/task_mmu.c 	if (PageAnon(page))
page              587 fs/proc/task_mmu.c 	else if (PageSwapBacked(page))
page              589 fs/proc/task_mmu.c 	else if (is_zone_device_page(page))
page              593 fs/proc/task_mmu.c 	smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked);
page              713 fs/proc/task_mmu.c 	struct page *page = NULL;
page              716 fs/proc/task_mmu.c 		page = vm_normal_page(vma, addr, *pte);
page              721 fs/proc/task_mmu.c 			page = migration_entry_to_page(swpent);
page              723 fs/proc/task_mmu.c 			page = device_private_entry_to_page(swpent);
page              725 fs/proc/task_mmu.c 	if (page) {
page              726 fs/proc/task_mmu.c 		int mapcount = page_mapcount(page);
page             1055 fs/proc/task_mmu.c 	struct page *page;
page             1067 fs/proc/task_mmu.c 		page = pmd_page(*pmd);
page             1071 fs/proc/task_mmu.c 		test_and_clear_page_young(page);
page             1072 fs/proc/task_mmu.c 		ClearPageReferenced(page);
page             1093 fs/proc/task_mmu.c 		page = vm_normal_page(vma, addr, ptent);
page             1094 fs/proc/task_mmu.c 		if (!page)
page             1099 fs/proc/task_mmu.c 		test_and_clear_page_young(page);
page             1100 fs/proc/task_mmu.c 		ClearPageReferenced(page);
page             1328 fs/proc/task_mmu.c 	struct page *page = NULL;
page             1334 fs/proc/task_mmu.c 		page = vm_normal_page(vma, addr, pte);
page             1347 fs/proc/task_mmu.c 			page = migration_entry_to_page(entry);
page             1350 fs/proc/task_mmu.c 			page = device_private_entry_to_page(entry);
page             1353 fs/proc/task_mmu.c 	if (page && !PageAnon(page))
page             1355 fs/proc/task_mmu.c 	if (page && page_mapcount(page) == 1)
page             1377 fs/proc/task_mmu.c 		struct page *page = NULL;
page             1383 fs/proc/task_mmu.c 			page = pmd_page(pmd);
page             1407 fs/proc/task_mmu.c 			page = migration_entry_to_page(entry);
page             1411 fs/proc/task_mmu.c 		if (page && page_mapcount(page) == 1)
page             1472 fs/proc/task_mmu.c 		struct page *page = pte_page(pte);
page             1474 fs/proc/task_mmu.c 		if (!PageAnon(page))
page             1477 fs/proc/task_mmu.c 		if (page_mapcount(page) == 1)
page             1667 fs/proc/task_mmu.c static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
page             1670 fs/proc/task_mmu.c 	int count = page_mapcount(page);
page             1673 fs/proc/task_mmu.c 	if (pte_dirty || PageDirty(page))
page             1676 fs/proc/task_mmu.c 	if (PageSwapCache(page))
page             1679 fs/proc/task_mmu.c 	if (PageActive(page) || PageUnevictable(page))
page             1682 fs/proc/task_mmu.c 	if (PageWriteback(page))
page             1685 fs/proc/task_mmu.c 	if (PageAnon(page))
page             1691 fs/proc/task_mmu.c 	md->node[page_to_nid(page)] += nr_pages;
page             1694 fs/proc/task_mmu.c static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
page             1697 fs/proc/task_mmu.c 	struct page *page;
page             1703 fs/proc/task_mmu.c 	page = vm_normal_page(vma, addr, pte);
page             1704 fs/proc/task_mmu.c 	if (!page)
page             1707 fs/proc/task_mmu.c 	if (PageReserved(page))
page             1710 fs/proc/task_mmu.c 	nid = page_to_nid(page);
page             1714 fs/proc/task_mmu.c 	return page;
page             1718 fs/proc/task_mmu.c static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
page             1722 fs/proc/task_mmu.c 	struct page *page;
page             1728 fs/proc/task_mmu.c 	page = vm_normal_page_pmd(vma, addr, pmd);
page             1729 fs/proc/task_mmu.c 	if (!page)
page             1732 fs/proc/task_mmu.c 	if (PageReserved(page))
page             1735 fs/proc/task_mmu.c 	nid = page_to_nid(page);
page             1739 fs/proc/task_mmu.c 	return page;
page             1755 fs/proc/task_mmu.c 		struct page *page;
page             1757 fs/proc/task_mmu.c 		page = can_gather_numa_stats_pmd(*pmd, vma, addr);
page             1758 fs/proc/task_mmu.c 		if (page)
page             1759 fs/proc/task_mmu.c 			gather_stats(page, md, pmd_dirty(*pmd),
page             1770 fs/proc/task_mmu.c 		struct page *page = can_gather_numa_stats(*pte, vma, addr);
page             1771 fs/proc/task_mmu.c 		if (!page)
page             1773 fs/proc/task_mmu.c 		gather_stats(page, md, pte_dirty(*pte), 1);
page             1786 fs/proc/task_mmu.c 	struct page *page;
page             1791 fs/proc/task_mmu.c 	page = pte_page(huge_pte);
page             1792 fs/proc/task_mmu.c 	if (!page)
page             1796 fs/proc/task_mmu.c 	gather_stats(page, md, pte_dirty(huge_pte), 1);
page              417 fs/proc/vmcore.c 	struct page *page;
page              422 fs/proc/vmcore.c 	page = find_or_create_page(mapping, index, GFP_KERNEL);
page              423 fs/proc/vmcore.c 	if (!page)
page              425 fs/proc/vmcore.c 	if (!PageUptodate(page)) {
page              427 fs/proc/vmcore.c 		buf = __va((page_to_pfn(page) << PAGE_SHIFT));
page              430 fs/proc/vmcore.c 			unlock_page(page);
page              431 fs/proc/vmcore.c 			put_page(page);
page              434 fs/proc/vmcore.c 		SetPageUptodate(page);
page              436 fs/proc/vmcore.c 	unlock_page(page);
page              437 fs/proc/vmcore.c 	vmf->page = page;
page              404 fs/pstore/ram_core.c 	struct page **pages;
page              419 fs/pstore/ram_core.c 	pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL);
page              249 fs/qnx4/inode.c static int qnx4_readpage(struct file *file, struct page *page)
page              251 fs/qnx4/inode.c 	return block_read_full_page(page,qnx4_get_block);
page               27 fs/qnx6/dir.c  static struct page *qnx6_get_page(struct inode *dir, unsigned long n)
page               30 fs/qnx6/dir.c  	struct page *page = read_mapping_page(mapping, n, NULL);
page               31 fs/qnx6/dir.c  	if (!IS_ERR(page))
page               32 fs/qnx6/dir.c  		kmap(page);
page               33 fs/qnx6/dir.c  	return page;
page               47 fs/qnx6/dir.c  					 struct page **p)
page               55 fs/qnx6/dir.c  	struct page *page = read_mapping_page(mapping, n, NULL);
page               56 fs/qnx6/dir.c  	if (IS_ERR(page))
page               57 fs/qnx6/dir.c  		return ERR_CAST(page);
page               58 fs/qnx6/dir.c  	kmap(*p = page);
page               59 fs/qnx6/dir.c  	return (struct qnx6_long_filename *)(page_address(page) + offs);
page               70 fs/qnx6/dir.c  	struct page *page;
page               79 fs/qnx6/dir.c  	lf = qnx6_longname(s, de, &page);
page               90 fs/qnx6/dir.c  		qnx6_put_page(page);
page              103 fs/qnx6/dir.c  		qnx6_put_page(page);
page              107 fs/qnx6/dir.c  	qnx6_put_page(page);
page              128 fs/qnx6/dir.c  		struct page *page = qnx6_get_page(inode, n);
page              133 fs/qnx6/dir.c  		if (IS_ERR(page)) {
page              136 fs/qnx6/dir.c  			return PTR_ERR(page);
page              138 fs/qnx6/dir.c  		de = ((struct qnx6_dir_entry *)page_address(page)) + start;
page              167 fs/qnx6/dir.c  		qnx6_put_page(page);
page              180 fs/qnx6/dir.c  	struct page *page;
page              182 fs/qnx6/dir.c  	struct qnx6_long_filename *lf = qnx6_longname(s, de, &page);
page              189 fs/qnx6/dir.c  		qnx6_put_page(page);
page              193 fs/qnx6/dir.c  		qnx6_put_page(page);
page              196 fs/qnx6/dir.c  	qnx6_put_page(page);
page              214 fs/qnx6/dir.c  			 struct page **res_page)
page              218 fs/qnx6/dir.c  	struct page *page = NULL;
page              235 fs/qnx6/dir.c  		page = qnx6_get_page(dir, n);
page              236 fs/qnx6/dir.c  		if (!IS_ERR(page)) {
page              240 fs/qnx6/dir.c  			de = (struct qnx6_dir_entry *)page_address(page);
page              259 fs/qnx6/dir.c  			qnx6_put_page(page);
page              268 fs/qnx6/dir.c  	*res_page = page;
page               97 fs/qnx6/inode.c static int qnx6_readpage(struct file *file, struct page *page)
page               99 fs/qnx6/inode.c 	return mpage_readpage(page, qnx6_get_block);
page              188 fs/qnx6/inode.c 	struct page *page = read_mapping_page(mapping, 0, NULL);
page              189 fs/qnx6/inode.c 	if (IS_ERR(page))
page              191 fs/qnx6/inode.c 	kmap(page);
page              192 fs/qnx6/inode.c 	dir_entry = page_address(page);
page              198 fs/qnx6/inode.c 	qnx6_put_page(page);
page              529 fs/qnx6/inode.c 	struct page *page;
page              551 fs/qnx6/inode.c 	page = read_mapping_page(mapping, n, NULL);
page              552 fs/qnx6/inode.c 	if (IS_ERR(page)) {
page              556 fs/qnx6/inode.c 		return ERR_CAST(page);
page              558 fs/qnx6/inode.c 	kmap(page);
page              559 fs/qnx6/inode.c 	raw_inode = ((struct qnx6_inode_entry *)page_address(page)) + offs;
page              592 fs/qnx6/inode.c 	qnx6_put_page(page);
page               20 fs/qnx6/namei.c 	struct page *page;
page               28 fs/qnx6/namei.c 	ino = qnx6_find_entry(len, dir, name, &page);
page               31 fs/qnx6/namei.c 		qnx6_put_page(page);
page              129 fs/qnx6/qnx6.h static inline void qnx6_put_page(struct page *page)
page              131 fs/qnx6/qnx6.h 	kunmap(page);
page              132 fs/qnx6/qnx6.h 	put_page(page);
page              136 fs/qnx6/qnx6.h 				struct page **res_page);
page               65 fs/ramfs/file-nommu.c 	struct page *pages;
page              105 fs/ramfs/file-nommu.c 		struct page *page = pages + loop;
page              107 fs/ramfs/file-nommu.c 		ret = add_to_page_cache_lru(page, inode->i_mapping, loop,
page              113 fs/ramfs/file-nommu.c 		SetPageDirty(page);
page              114 fs/ramfs/file-nommu.c 		SetPageUptodate(page);
page              116 fs/ramfs/file-nommu.c 		unlock_page(page);
page              117 fs/ramfs/file-nommu.c 		put_page(page);
page              207 fs/ramfs/file-nommu.c 	struct page **pages = NULL, **ptr, *page;
page              223 fs/ramfs/file-nommu.c 	pages = kcalloc(lpages, sizeof(struct page *), GFP_KERNEL);
page              233 fs/ramfs/file-nommu.c 	page = *ptr++;
page              234 fs/ramfs/file-nommu.c 	page++;
page              236 fs/ramfs/file-nommu.c 		if (*ptr++ != page++)
page             1813 fs/read_write.c static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset)
page             1815 fs/read_write.c 	struct page *page;
page             1817 fs/read_write.c 	page = read_mapping_page(inode->i_mapping, offset >> PAGE_SHIFT, NULL);
page             1818 fs/read_write.c 	if (IS_ERR(page))
page             1819 fs/read_write.c 		return page;
page             1820 fs/read_write.c 	if (!PageUptodate(page)) {
page             1821 fs/read_write.c 		put_page(page);
page             1824 fs/read_write.c 	return page;
page             1831 fs/read_write.c static void vfs_lock_two_pages(struct page *page1, struct page *page2)
page             1843 fs/read_write.c static void vfs_unlock_two_pages(struct page *page1, struct page *page2)
page             1862 fs/read_write.c 	struct page *src_page;
page             1863 fs/read_write.c 	struct page *dest_page;
page              176 fs/reiserfs/file.c int reiserfs_commit_page(struct inode *inode, struct page *page,
page              201 fs/reiserfs/file.c 	for (bh = head = page_buffers(page), block_start = 0;
page              223 fs/reiserfs/file.c 				    (new || page->index >= i_size_index)) {
page              241 fs/reiserfs/file.c 		SetPageUptodate(page);
page               24 fs/reiserfs/inode.c int reiserfs_commit_write(struct file *f, struct page *page,
page              193 fs/reiserfs/inode.c static inline void fix_tail_page_for_writing(struct page *page)
page              197 fs/reiserfs/inode.c 	if (page && page_has_buffers(page)) {
page              198 fs/reiserfs/inode.c 		head = page_buffers(page);
page              583 fs/reiserfs/inode.c 	struct page *tail_page;
page              584 fs/reiserfs/inode.c 	struct page *hole_page = bh_result->b_page;
page             2187 fs/reiserfs/inode.c 			  struct page **page_result,
page             2202 fs/reiserfs/inode.c 	struct page *page;
page             2214 fs/reiserfs/inode.c 	page = grab_cache_page(inode->i_mapping, index);
page             2216 fs/reiserfs/inode.c 	if (!page) {
page             2222 fs/reiserfs/inode.c 	error = __block_write_begin(page, start, offset - start,
page             2227 fs/reiserfs/inode.c 	head = page_buffers(page);
page             2250 fs/reiserfs/inode.c 	*page_result = page;
page             2256 fs/reiserfs/inode.c 	unlock_page(page);
page             2257 fs/reiserfs/inode.c 	put_page(page);
page             2274 fs/reiserfs/inode.c 	struct page *page = NULL;
page             2282 fs/reiserfs/inode.c 		error = grab_tail_page(inode, &page, &bh);
page             2293 fs/reiserfs/inode.c 			page = NULL;
page             2323 fs/reiserfs/inode.c 	err2 = reiserfs_do_truncate(&th, inode, page, update_timestamps);
page             2340 fs/reiserfs/inode.c 	if (page) {
page             2345 fs/reiserfs/inode.c 			zero_user(page, offset, length);
page             2350 fs/reiserfs/inode.c 		unlock_page(page);
page             2351 fs/reiserfs/inode.c 		put_page(page);
page             2358 fs/reiserfs/inode.c 	if (page) {
page             2359 fs/reiserfs/inode.c 		unlock_page(page);
page             2360 fs/reiserfs/inode.c 		put_page(page);
page             2527 fs/reiserfs/inode.c static int reiserfs_write_full_page(struct page *page,
page             2530 fs/reiserfs/inode.c 	struct inode *inode = page->mapping->host;
page             2538 fs/reiserfs/inode.c 	int checked = PageChecked(page);
page             2546 fs/reiserfs/inode.c 		redirty_page_for_writepage(wbc, page);
page             2547 fs/reiserfs/inode.c 		unlock_page(page);
page             2557 fs/reiserfs/inode.c 	if (!page_has_buffers(page)) {
page             2558 fs/reiserfs/inode.c 		create_empty_buffers(page, s->s_blocksize,
page             2561 fs/reiserfs/inode.c 	head = page_buffers(page);
page             2567 fs/reiserfs/inode.c 	if (page->index >= end_index) {
page             2572 fs/reiserfs/inode.c 		if (page->index >= end_index + 1 || !last_offset) {
page             2573 fs/reiserfs/inode.c 			unlock_page(page);
page             2576 fs/reiserfs/inode.c 		zero_user_segment(page, last_offset, PAGE_SIZE);
page             2579 fs/reiserfs/inode.c 	block = page->index << (PAGE_SHIFT - s->s_blocksize_bits);
page             2615 fs/reiserfs/inode.c 		ClearPageChecked(page);
page             2645 fs/reiserfs/inode.c 				redirty_page_for_writepage(wbc, page);
page             2662 fs/reiserfs/inode.c 	BUG_ON(PageWriteback(page));
page             2663 fs/reiserfs/inode.c 	set_page_writeback(page);
page             2664 fs/reiserfs/inode.c 	unlock_page(page);
page             2699 fs/reiserfs/inode.c 			SetPageUptodate(page);
page             2700 fs/reiserfs/inode.c 		end_page_writeback(page);
page             2710 fs/reiserfs/inode.c 	ClearPageUptodate(page);
page             2726 fs/reiserfs/inode.c 	SetPageError(page);
page             2727 fs/reiserfs/inode.c 	BUG_ON(PageWriteback(page));
page             2728 fs/reiserfs/inode.c 	set_page_writeback(page);
page             2729 fs/reiserfs/inode.c 	unlock_page(page);
page             2743 fs/reiserfs/inode.c static int reiserfs_readpage(struct file *f, struct page *page)
page             2745 fs/reiserfs/inode.c 	return block_read_full_page(page, reiserfs_get_block);
page             2748 fs/reiserfs/inode.c static int reiserfs_writepage(struct page *page, struct writeback_control *wbc)
page             2750 fs/reiserfs/inode.c 	struct inode *inode = page->mapping->host;
page             2752 fs/reiserfs/inode.c 	return reiserfs_write_full_page(page, wbc);
page             2764 fs/reiserfs/inode.c 				struct page **pagep, void **fsdata)
page             2767 fs/reiserfs/inode.c 	struct page *page;
page             2781 fs/reiserfs/inode.c 	page = grab_cache_page_write_begin(mapping, index, flags);
page             2782 fs/reiserfs/inode.c 	if (!page)
page             2784 fs/reiserfs/inode.c 	*pagep = page;
page             2787 fs/reiserfs/inode.c 	fix_tail_page_for_writing(page);
page             2797 fs/reiserfs/inode.c 	ret = __block_write_begin(page, pos, len, reiserfs_get_block);
page             2827 fs/reiserfs/inode.c 		unlock_page(page);
page             2828 fs/reiserfs/inode.c 		put_page(page);
page             2835 fs/reiserfs/inode.c int __reiserfs_write_begin(struct page *page, unsigned from, unsigned len)
page             2837 fs/reiserfs/inode.c 	struct inode *inode = page->mapping->host;
page             2846 fs/reiserfs/inode.c 	fix_tail_page_for_writing(page);
page             2857 fs/reiserfs/inode.c 	ret = __block_write_begin(page, from, len, reiserfs_get_block);
page             2897 fs/reiserfs/inode.c 			      struct page *page, void *fsdata)
page             2899 fs/reiserfs/inode.c 	struct inode *inode = page->mapping->host;
page             2917 fs/reiserfs/inode.c 		if (!PageUptodate(page))
page             2920 fs/reiserfs/inode.c 		page_zero_new_buffers(page, start + copied, start + len);
page             2922 fs/reiserfs/inode.c 	flush_dcache_page(page);
page             2924 fs/reiserfs/inode.c 	reiserfs_commit_page(inode, page, start, start + copied);
page             2979 fs/reiserfs/inode.c 	unlock_page(page);
page             2980 fs/reiserfs/inode.c 	put_page(page);
page             2998 fs/reiserfs/inode.c int reiserfs_commit_write(struct file *f, struct page *page,
page             3001 fs/reiserfs/inode.c 	struct inode *inode = page->mapping->host;
page             3002 fs/reiserfs/inode.c 	loff_t pos = ((loff_t) page->index << PAGE_SHIFT) + to;
page             3015 fs/reiserfs/inode.c 	reiserfs_commit_page(inode, page, from, to);
page             3158 fs/reiserfs/inode.c static void reiserfs_invalidatepage(struct page *page, unsigned int offset,
page             3162 fs/reiserfs/inode.c 	struct inode *inode = page->mapping->host;
page             3168 fs/reiserfs/inode.c 	BUG_ON(!PageLocked(page));
page             3171 fs/reiserfs/inode.c 		ClearPageChecked(page);
page             3173 fs/reiserfs/inode.c 	if (!page_has_buffers(page))
page             3176 fs/reiserfs/inode.c 	head = page_buffers(page);
page             3204 fs/reiserfs/inode.c 		ret = try_to_release_page(page, 0);
page             3211 fs/reiserfs/inode.c static int reiserfs_set_page_dirty(struct page *page)
page             3213 fs/reiserfs/inode.c 	struct inode *inode = page->mapping->host;
page             3215 fs/reiserfs/inode.c 		SetPageChecked(page);
page             3216 fs/reiserfs/inode.c 		return __set_page_dirty_nobuffers(page);
page             3218 fs/reiserfs/inode.c 	return __set_page_dirty_buffers(page);
page             3230 fs/reiserfs/inode.c static int reiserfs_releasepage(struct page *page, gfp_t unused_gfp_flags)
page             3232 fs/reiserfs/inode.c 	struct inode *inode = page->mapping->host;
page             3238 fs/reiserfs/inode.c 	WARN_ON(PageChecked(page));
page             3240 fs/reiserfs/inode.c 	head = page_buffers(page);
page             3254 fs/reiserfs/inode.c 		ret = try_to_free_buffers(page);
page              161 fs/reiserfs/ioctl.c int reiserfs_commit_write(struct file *f, struct page *page,
page              172 fs/reiserfs/ioctl.c 	struct page *page;
page              209 fs/reiserfs/ioctl.c 	page = grab_cache_page(mapping, index);
page              211 fs/reiserfs/ioctl.c 	if (!page) {
page              214 fs/reiserfs/ioctl.c 	retval = __reiserfs_write_begin(page, write_from, 0);
page              219 fs/reiserfs/ioctl.c 	flush_dcache_page(page);
page              220 fs/reiserfs/ioctl.c 	retval = reiserfs_commit_write(NULL, page, write_from, write_from);
page              224 fs/reiserfs/ioctl.c 	unlock_page(page);
page              225 fs/reiserfs/ioctl.c 	put_page(page);
page              608 fs/reiserfs/journal.c 	struct page *page = bh->b_page;
page              609 fs/reiserfs/journal.c 	if (!page->mapping && trylock_page(page)) {
page              610 fs/reiserfs/journal.c 		get_page(page);
page              612 fs/reiserfs/journal.c 		if (!page->mapping)
page              613 fs/reiserfs/journal.c 			try_to_free_buffers(page);
page              614 fs/reiserfs/journal.c 		unlock_page(page);
page              615 fs/reiserfs/journal.c 		put_page(page);
page             4191 fs/reiserfs/journal.c 			struct page *page;
page             4199 fs/reiserfs/journal.c 			page = cn->bh->b_page;
page             4200 fs/reiserfs/journal.c 			addr = kmap(page);
page             4204 fs/reiserfs/journal.c 			kunmap(page);
page             2918 fs/reiserfs/reiserfs.h int reiserfs_commit_page(struct inode *inode, struct page *page,
page             3032 fs/reiserfs/reiserfs.h 			   struct page *page, loff_t new_file_size);
page             3044 fs/reiserfs/reiserfs.h 			 struct inode *inode, struct page *,
page             3107 fs/reiserfs/reiserfs.h int __reiserfs_write_begin(struct page *page, unsigned from, unsigned len);
page             3173 fs/reiserfs/reiserfs.h 		    struct page *, struct treepath *, const struct cpu_key *,
page             1501 fs/reiserfs/stree.c static void unmap_buffers(struct page *page, loff_t pos)
page             1509 fs/reiserfs/stree.c 	if (page) {
page             1510 fs/reiserfs/stree.c 		if (page_has_buffers(page)) {
page             1513 fs/reiserfs/stree.c 			head = page_buffers(page);
page             1538 fs/reiserfs/stree.c 				    struct page *page,
page             1556 fs/reiserfs/stree.c 	    !page || (REISERFS_I(inode)->i_flags & i_nopack_mask)) {
page             1566 fs/reiserfs/stree.c 	return indirect2direct(th, inode, page, path, item_key,
page             1622 fs/reiserfs/stree.c 			   struct page *page, loff_t new_file_size)
page             1673 fs/reiserfs/stree.c 			    maybe_indirect_to_direct(th, inode, page,
page             1825 fs/reiserfs/stree.c 		unmap_buffers(page, tail_pos);
page             1860 fs/reiserfs/stree.c 			 struct page *page,	/* up to date for last block */
page             1953 fs/reiserfs/stree.c 					   inode, page, new_file_size);
page              203 fs/reiserfs/tail_conversion.c 		    struct inode *inode, struct page *page,
page              245 fs/reiserfs/tail_conversion.c 	tail = (char *)kmap(page);	/* this can schedule */
page              293 fs/reiserfs/tail_conversion.c 		kunmap(page);
page              296 fs/reiserfs/tail_conversion.c 	kunmap(page);
page              427 fs/reiserfs/xattr.c static inline void reiserfs_put_page(struct page *page)
page              429 fs/reiserfs/xattr.c 	kunmap(page);
page              430 fs/reiserfs/xattr.c 	put_page(page);
page              433 fs/reiserfs/xattr.c static struct page *reiserfs_get_page(struct inode *dir, size_t n)
page              436 fs/reiserfs/xattr.c 	struct page *page;
page              442 fs/reiserfs/xattr.c 	page = read_mapping_page(mapping, n >> PAGE_SHIFT, NULL);
page              443 fs/reiserfs/xattr.c 	if (!IS_ERR(page)) {
page              444 fs/reiserfs/xattr.c 		kmap(page);
page              445 fs/reiserfs/xattr.c 		if (PageError(page))
page              448 fs/reiserfs/xattr.c 	return page;
page              451 fs/reiserfs/xattr.c 	reiserfs_put_page(page);
page              469 fs/reiserfs/xattr.c int reiserfs_commit_write(struct file *f, struct page *page,
page              525 fs/reiserfs/xattr.c 	struct page *page;
page              557 fs/reiserfs/xattr.c 		page = reiserfs_get_page(d_inode(dentry), file_pos);
page              558 fs/reiserfs/xattr.c 		if (IS_ERR(page)) {
page              559 fs/reiserfs/xattr.c 			err = PTR_ERR(page);
page              563 fs/reiserfs/xattr.c 		lock_page(page);
page              564 fs/reiserfs/xattr.c 		data = page_address(page);
page              578 fs/reiserfs/xattr.c 		err = __reiserfs_write_begin(page, page_offset, chunk + skip);
page              582 fs/reiserfs/xattr.c 			err = reiserfs_commit_write(NULL, page, page_offset,
page              587 fs/reiserfs/xattr.c 		unlock_page(page);
page              588 fs/reiserfs/xattr.c 		reiserfs_put_page(page);
page              664 fs/reiserfs/xattr.c 	struct page *page;
page              708 fs/reiserfs/xattr.c 		page = reiserfs_get_page(d_inode(dentry), file_pos);
page              709 fs/reiserfs/xattr.c 		if (IS_ERR(page)) {
page              710 fs/reiserfs/xattr.c 			err = PTR_ERR(page);
page              714 fs/reiserfs/xattr.c 		lock_page(page);
page              715 fs/reiserfs/xattr.c 		data = page_address(page);
page              723 fs/reiserfs/xattr.c 				unlock_page(page);
page              724 fs/reiserfs/xattr.c 				reiserfs_put_page(page);
page              735 fs/reiserfs/xattr.c 		unlock_page(page);
page              736 fs/reiserfs/xattr.c 		reiserfs_put_page(page);
page              102 fs/romfs/super.c static int romfs_readpage(struct file *file, struct page *page)
page              104 fs/romfs/super.c 	struct inode *inode = page->mapping->host;
page              110 fs/romfs/super.c 	buf = kmap(page);
page              115 fs/romfs/super.c 	offset = page_offset(page);
page              127 fs/romfs/super.c 			SetPageError(page);
page              136 fs/romfs/super.c 		SetPageUptodate(page);
page              138 fs/romfs/super.c 	flush_dcache_page(page);
page              139 fs/romfs/super.c 	kunmap(page);
page              140 fs/romfs/super.c 	unlock_page(page);
page               50 fs/splice.c    	struct page *page = buf->page;
page               53 fs/splice.c    	lock_page(page);
page               55 fs/splice.c    	mapping = page_mapping(page);
page               57 fs/splice.c    		WARN_ON(!PageUptodate(page));
page               67 fs/splice.c    		wait_on_page_writeback(page);
page               69 fs/splice.c    		if (page_has_private(page) &&
page               70 fs/splice.c    		    !try_to_release_page(page, GFP_KERNEL))
page               77 fs/splice.c    		if (remove_mapping(mapping, page)) {
page               88 fs/splice.c    	unlock_page(page);
page               95 fs/splice.c    	put_page(buf->page);
page              106 fs/splice.c    	struct page *page = buf->page;
page              109 fs/splice.c    	if (!PageUptodate(page)) {
page              110 fs/splice.c    		lock_page(page);
page              116 fs/splice.c    		if (!page->mapping) {
page              124 fs/splice.c    		if (!PageUptodate(page)) {
page              132 fs/splice.c    		unlock_page(page);
page              137 fs/splice.c    	unlock_page(page);
page              203 fs/splice.c    		buf->page = spd->pages[page_nr];
page              261 fs/splice.c    	spd->pages = kmalloc_array(buffers, sizeof(struct page *), GFP_KERNEL);
page              371 fs/splice.c    	struct page **pages;
page              449 fs/splice.c    	return file->f_op->sendpage(file, buf->page, buf->offset,
page              739 fs/splice.c    			array[n].bv_page = buf->page;
page              793 fs/splice.c    	data = kmap(buf->page);
page              795 fs/splice.c    	kunmap(buf->page);
page             1220 fs/splice.c    		struct page *pages[16];
page             1234 fs/splice.c    				buf.page = pages[n];
page             1256 fs/splice.c    	int n = copy_page_to_iter(buf->page, buf->offset, sd->len, sd->u.data);
page              364 fs/squashfs/file.c void squashfs_fill_page(struct page *page, struct squashfs_cache_entry *buffer, int offset, int avail)
page              369 fs/squashfs/file.c 	pageaddr = kmap_atomic(page);
page              374 fs/squashfs/file.c 	flush_dcache_page(page);
page              376 fs/squashfs/file.c 		SetPageUptodate(page);
page              378 fs/squashfs/file.c 		SetPageError(page);
page              382 fs/squashfs/file.c void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer,
page              385 fs/squashfs/file.c 	struct inode *inode = page->mapping->host;
page              388 fs/squashfs/file.c 	int start_index = page->index & ~mask, end_index = start_index | mask;
page              398 fs/squashfs/file.c 		struct page *push_page;
page              403 fs/squashfs/file.c 		push_page = (i == page->index) ? page :
page              404 fs/squashfs/file.c 			grab_cache_page_nowait(page->mapping, i);
page              415 fs/squashfs/file.c 		if (i != page->index)
page              421 fs/squashfs/file.c static int squashfs_readpage_fragment(struct page *page, int expected)
page              423 fs/squashfs/file.c 	struct inode *inode = page->mapping->host;
page              434 fs/squashfs/file.c 		squashfs_copy_cache(page, buffer, expected,
page              441 fs/squashfs/file.c static int squashfs_readpage_sparse(struct page *page, int expected)
page              443 fs/squashfs/file.c 	squashfs_copy_cache(page, NULL, expected, 0);
page              447 fs/squashfs/file.c static int squashfs_readpage(struct file *file, struct page *page)
page              449 fs/squashfs/file.c 	struct inode *inode = page->mapping->host;
page              451 fs/squashfs/file.c 	int index = page->index >> (msblk->block_log - PAGE_SHIFT);
page              460 fs/squashfs/file.c 				page->index, squashfs_i(inode)->start);
page              462 fs/squashfs/file.c 	if (page->index >= ((i_size_read(inode) + PAGE_SIZE - 1) >>
page              474 fs/squashfs/file.c 			res = squashfs_readpage_sparse(page, expected);
page              476 fs/squashfs/file.c 			res = squashfs_readpage_block(page, block, bsize, expected);
page              478 fs/squashfs/file.c 		res = squashfs_readpage_fragment(page, expected);
page              484 fs/squashfs/file.c 	SetPageError(page);
page              486 fs/squashfs/file.c 	pageaddr = kmap_atomic(page);
page              489 fs/squashfs/file.c 	flush_dcache_page(page);
page              490 fs/squashfs/file.c 	if (!PageError(page))
page              491 fs/squashfs/file.c 		SetPageUptodate(page);
page              492 fs/squashfs/file.c 	unlock_page(page);
page               21 fs/squashfs/file_cache.c int squashfs_readpage_block(struct page *page, u64 block, int bsize, int expected)
page               23 fs/squashfs/file_cache.c 	struct inode *i = page->mapping->host;
page               32 fs/squashfs/file_cache.c 		squashfs_copy_cache(page, buffer, expected, 0);
page               21 fs/squashfs/file_direct.c static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
page               22 fs/squashfs/file_direct.c 	int pages, struct page **page, int bytes);
page               25 fs/squashfs/file_direct.c int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
page               37 fs/squashfs/file_direct.c 	struct page **page;
page               46 fs/squashfs/file_direct.c 	page = kmalloc_array(pages, sizeof(void *), GFP_KERNEL);
page               47 fs/squashfs/file_direct.c 	if (page == NULL)
page               54 fs/squashfs/file_direct.c 	actor = squashfs_page_actor_init_special(page, pages, 0);
page               60 fs/squashfs/file_direct.c 		page[i] = (n == target_page->index) ? target_page :
page               63 fs/squashfs/file_direct.c 		if (page[i] == NULL) {
page               68 fs/squashfs/file_direct.c 		if (PageUptodate(page[i])) {
page               69 fs/squashfs/file_direct.c 			unlock_page(page[i]);
page               70 fs/squashfs/file_direct.c 			put_page(page[i]);
page               71 fs/squashfs/file_direct.c 			page[i] = NULL;
page               85 fs/squashfs/file_direct.c 							page, expected);
page              105 fs/squashfs/file_direct.c 		pageaddr = kmap_atomic(page[pages - 1]);
page              112 fs/squashfs/file_direct.c 		flush_dcache_page(page[i]);
page              113 fs/squashfs/file_direct.c 		SetPageUptodate(page[i]);
page              114 fs/squashfs/file_direct.c 		unlock_page(page[i]);
page              115 fs/squashfs/file_direct.c 		if (page[i] != target_page)
page              116 fs/squashfs/file_direct.c 			put_page(page[i]);
page              120 fs/squashfs/file_direct.c 	kfree(page);
page              129 fs/squashfs/file_direct.c 		if (page[i] == NULL || page[i] == target_page)
page              131 fs/squashfs/file_direct.c 		flush_dcache_page(page[i]);
page              132 fs/squashfs/file_direct.c 		SetPageError(page[i]);
page              133 fs/squashfs/file_direct.c 		unlock_page(page[i]);
page              134 fs/squashfs/file_direct.c 		put_page(page[i]);
page              139 fs/squashfs/file_direct.c 	kfree(page);
page              144 fs/squashfs/file_direct.c static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
page              145 fs/squashfs/file_direct.c 	int pages, struct page **page, int bytes)
page              162 fs/squashfs/file_direct.c 		if (page[n] == NULL)
page              165 fs/squashfs/file_direct.c 		squashfs_fill_page(page[n], buffer, offset, avail);
page              166 fs/squashfs/file_direct.c 		unlock_page(page[n]);
page              167 fs/squashfs/file_direct.c 		if (page[n] != target_page)
page              168 fs/squashfs/file_direct.c 			put_page(page[n]);
page               63 fs/squashfs/page_actor.c 	return actor->pageaddr = kmap_atomic(actor->page[0]);
page               72 fs/squashfs/page_actor.c 		kmap_atomic(actor->page[actor->next_page++]);
page               81 fs/squashfs/page_actor.c struct squashfs_page_actor *squashfs_page_actor_init_special(struct page **page,
page               90 fs/squashfs/page_actor.c 	actor->page = page;
page               11 fs/squashfs/page_actor.h 	void	**page;
page               17 fs/squashfs/page_actor.h static inline struct squashfs_page_actor *squashfs_page_actor_init(void **page,
page               26 fs/squashfs/page_actor.h 	actor->page = page;
page               35 fs/squashfs/page_actor.h 	return actor->page[0];
page               41 fs/squashfs/page_actor.h 		actor->page[actor->next_page++];
page               52 fs/squashfs/page_actor.h 		struct page	**page;
page               64 fs/squashfs/page_actor.h extern struct squashfs_page_actor *squashfs_page_actor_init_special(struct page
page               57 fs/squashfs/squashfs.h void squashfs_fill_page(struct page *, struct squashfs_cache_entry *, int, int);
page               58 fs/squashfs/squashfs.h void squashfs_copy_cache(struct page *, struct squashfs_cache_entry *, int,
page               62 fs/squashfs/squashfs.h extern int squashfs_readpage_block(struct page *, u64, int, int);
page               33 fs/squashfs/symlink.c static int squashfs_symlink_readpage(struct file *file, struct page *page)
page               35 fs/squashfs/symlink.c 	struct inode *inode = page->mapping->host;
page               38 fs/squashfs/symlink.c 	int index = page->index << PAGE_SHIFT;
page               47 fs/squashfs/symlink.c 			"%llx, offset %x\n", page->index, block, offset);
page               80 fs/squashfs/symlink.c 		pageaddr = kmap_atomic(page);
page               91 fs/squashfs/symlink.c 	flush_dcache_page(page);
page               92 fs/squashfs/symlink.c 	SetPageUptodate(page);
page               93 fs/squashfs/symlink.c 	unlock_page(page);
page               97 fs/squashfs/symlink.c 	SetPageError(page);
page               98 fs/squashfs/symlink.c 	unlock_page(page);
page               31 fs/sysv/dir.c  static inline void dir_put_page(struct page *page)
page               33 fs/sysv/dir.c  	kunmap(page);
page               34 fs/sysv/dir.c  	put_page(page);
page               37 fs/sysv/dir.c  static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len)
page               39 fs/sysv/dir.c  	struct address_space *mapping = page->mapping;
page               43 fs/sysv/dir.c  	block_write_end(NULL, mapping, pos, len, len, page, NULL);
page               49 fs/sysv/dir.c  		err = write_one_page(page);
page               51 fs/sysv/dir.c  		unlock_page(page);
page               55 fs/sysv/dir.c  static struct page * dir_get_page(struct inode *dir, unsigned long n)
page               58 fs/sysv/dir.c  	struct page *page = read_mapping_page(mapping, n, NULL);
page               59 fs/sysv/dir.c  	if (!IS_ERR(page))
page               60 fs/sysv/dir.c  		kmap(page);
page               61 fs/sysv/dir.c  	return page;
page               83 fs/sysv/dir.c  		struct page *page = dir_get_page(inode, n);
page               85 fs/sysv/dir.c  		if (IS_ERR(page))
page               87 fs/sysv/dir.c  		kaddr = (char *)page_address(page);
page               99 fs/sysv/dir.c  				dir_put_page(page);
page              103 fs/sysv/dir.c  		dir_put_page(page);
page              127 fs/sysv/dir.c  struct sysv_dir_entry *sysv_find_entry(struct dentry *dentry, struct page **res_page)
page              134 fs/sysv/dir.c  	struct page *page = NULL;
page              146 fs/sysv/dir.c  		page = dir_get_page(dir, n);
page              147 fs/sysv/dir.c  		if (!IS_ERR(page)) {
page              148 fs/sysv/dir.c  			kaddr = (char*)page_address(page);
page              158 fs/sysv/dir.c  			dir_put_page(page);
page              169 fs/sysv/dir.c  	*res_page = page;
page              178 fs/sysv/dir.c  	struct page *page = NULL;
page              188 fs/sysv/dir.c  		page = dir_get_page(dir, n);
page              189 fs/sysv/dir.c  		err = PTR_ERR(page);
page              190 fs/sysv/dir.c  		if (IS_ERR(page))
page              192 fs/sysv/dir.c  		kaddr = (char*)page_address(page);
page              203 fs/sysv/dir.c  		dir_put_page(page);
page              209 fs/sysv/dir.c  	pos = page_offset(page) +
page              210 fs/sysv/dir.c  			(char*)de - (char*)page_address(page);
page              211 fs/sysv/dir.c  	lock_page(page);
page              212 fs/sysv/dir.c  	err = sysv_prepare_chunk(page, pos, SYSV_DIRSIZE);
page              218 fs/sysv/dir.c  	err = dir_commit_chunk(page, pos, SYSV_DIRSIZE);
page              222 fs/sysv/dir.c  	dir_put_page(page);
page              226 fs/sysv/dir.c  	unlock_page(page);
page              230 fs/sysv/dir.c  int sysv_delete_entry(struct sysv_dir_entry *de, struct page *page)
page              232 fs/sysv/dir.c  	struct inode *inode = page->mapping->host;
page              233 fs/sysv/dir.c  	char *kaddr = (char*)page_address(page);
page              234 fs/sysv/dir.c  	loff_t pos = page_offset(page) + (char *)de - kaddr;
page              237 fs/sysv/dir.c  	lock_page(page);
page              238 fs/sysv/dir.c  	err = sysv_prepare_chunk(page, pos, SYSV_DIRSIZE);
page              241 fs/sysv/dir.c  	err = dir_commit_chunk(page, pos, SYSV_DIRSIZE);
page              242 fs/sysv/dir.c  	dir_put_page(page);
page              250 fs/sysv/dir.c  	struct page *page = grab_cache_page(inode->i_mapping, 0);
page              255 fs/sysv/dir.c  	if (!page)
page              257 fs/sysv/dir.c  	err = sysv_prepare_chunk(page, 0, 2 * SYSV_DIRSIZE);
page              259 fs/sysv/dir.c  		unlock_page(page);
page              262 fs/sysv/dir.c  	kmap(page);
page              264 fs/sysv/dir.c  	base = (char*)page_address(page);
page              274 fs/sysv/dir.c  	kunmap(page);
page              275 fs/sysv/dir.c  	err = dir_commit_chunk(page, 0, 2 * SYSV_DIRSIZE);
page              277 fs/sysv/dir.c  	put_page(page);
page              287 fs/sysv/dir.c  	struct page *page = NULL;
page              293 fs/sysv/dir.c  		page = dir_get_page(inode, i);
page              295 fs/sysv/dir.c  		if (IS_ERR(page))
page              298 fs/sysv/dir.c  		kaddr = (char *)page_address(page);
page              317 fs/sysv/dir.c  		dir_put_page(page);
page              322 fs/sysv/dir.c  	dir_put_page(page);
page              327 fs/sysv/dir.c  void sysv_set_link(struct sysv_dir_entry *de, struct page *page,
page              330 fs/sysv/dir.c  	struct inode *dir = page->mapping->host;
page              331 fs/sysv/dir.c  	loff_t pos = page_offset(page) +
page              332 fs/sysv/dir.c  			(char *)de-(char*)page_address(page);
page              335 fs/sysv/dir.c  	lock_page(page);
page              336 fs/sysv/dir.c  	err = sysv_prepare_chunk(page, pos, SYSV_DIRSIZE);
page              339 fs/sysv/dir.c  	err = dir_commit_chunk(page, pos, SYSV_DIRSIZE);
page              340 fs/sysv/dir.c  	dir_put_page(page);
page              345 fs/sysv/dir.c  struct sysv_dir_entry * sysv_dotdot (struct inode *dir, struct page **p)
page              347 fs/sysv/dir.c  	struct page *page = dir_get_page(dir, 0);
page              350 fs/sysv/dir.c  	if (!IS_ERR(page)) {
page              351 fs/sysv/dir.c  		de = (struct sysv_dir_entry*) page_address(page) + 1;
page              352 fs/sysv/dir.c  		*p = page;
page              359 fs/sysv/dir.c  	struct page *page;
page              360 fs/sysv/dir.c  	struct sysv_dir_entry *de = sysv_find_entry (dentry, &page);
page              365 fs/sysv/dir.c  		dir_put_page(page);
page              454 fs/sysv/itree.c static int sysv_writepage(struct page *page, struct writeback_control *wbc)
page              456 fs/sysv/itree.c 	return block_write_full_page(page,get_block,wbc);
page              459 fs/sysv/itree.c static int sysv_readpage(struct file *file, struct page *page)
page              461 fs/sysv/itree.c 	return block_read_full_page(page,get_block);
page              464 fs/sysv/itree.c int sysv_prepare_chunk(struct page *page, loff_t pos, unsigned len)
page              466 fs/sysv/itree.c 	return __block_write_begin(page, pos, len, get_block);
page              481 fs/sysv/itree.c 			struct page **pagep, void **fsdata)
page              151 fs/sysv/namei.c 	struct page * page;
page              155 fs/sysv/namei.c 	de = sysv_find_entry(dentry, &page);
page              159 fs/sysv/namei.c 	err = sysv_delete_entry (de, page);
page              195 fs/sysv/namei.c 	struct page * dir_page = NULL;
page              197 fs/sysv/namei.c 	struct page * old_page;
page              216 fs/sysv/namei.c 		struct page * new_page;
page              137 fs/sysv/sysv.h extern int sysv_prepare_chunk(struct page *page, loff_t pos, unsigned len);
page              150 fs/sysv/sysv.h extern struct sysv_dir_entry *sysv_find_entry(struct dentry *, struct page **);
page              152 fs/sysv/sysv.h extern int sysv_delete_entry(struct sysv_dir_entry *, struct page *);
page              155 fs/sysv/sysv.h extern void sysv_set_link(struct sysv_dir_entry *, struct page *,
page              157 fs/sysv/sysv.h extern struct sysv_dir_entry *sysv_dotdot(struct inode *, struct page **);
page               99 fs/ubifs/file.c static int do_readpage(struct page *page)
page              105 fs/ubifs/file.c 	struct inode *inode = page->mapping->host;
page              110 fs/ubifs/file.c 		inode->i_ino, page->index, i_size, page->flags);
page              111 fs/ubifs/file.c 	ubifs_assert(c, !PageChecked(page));
page              112 fs/ubifs/file.c 	ubifs_assert(c, !PagePrivate(page));
page              114 fs/ubifs/file.c 	addr = kmap(page);
page              116 fs/ubifs/file.c 	block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
page              120 fs/ubifs/file.c 		SetPageChecked(page);
page              162 fs/ubifs/file.c 			SetPageChecked(page);
page              167 fs/ubifs/file.c 			  page->index, inode->i_ino, err);
page              174 fs/ubifs/file.c 	SetPageUptodate(page);
page              175 fs/ubifs/file.c 	ClearPageError(page);
page              176 fs/ubifs/file.c 	flush_dcache_page(page);
page              177 fs/ubifs/file.c 	kunmap(page);
page              182 fs/ubifs/file.c 	ClearPageUptodate(page);
page              183 fs/ubifs/file.c 	SetPageError(page);
page              184 fs/ubifs/file.c 	flush_dcache_page(page);
page              185 fs/ubifs/file.c 	kunmap(page);
page              218 fs/ubifs/file.c 			    loff_t pos, unsigned len, struct page **pagep,
page              226 fs/ubifs/file.c 	struct page *page;
page              247 fs/ubifs/file.c 	page = grab_cache_page_write_begin(mapping, index, flags);
page              248 fs/ubifs/file.c 	if (unlikely(!page)) {
page              253 fs/ubifs/file.c 	if (!PageUptodate(page)) {
page              255 fs/ubifs/file.c 			SetPageChecked(page);
page              257 fs/ubifs/file.c 			err = do_readpage(page);
page              259 fs/ubifs/file.c 				unlock_page(page);
page              260 fs/ubifs/file.c 				put_page(page);
page              266 fs/ubifs/file.c 		SetPageUptodate(page);
page              267 fs/ubifs/file.c 		ClearPageError(page);
page              270 fs/ubifs/file.c 	if (PagePrivate(page))
page              282 fs/ubifs/file.c 	else if (!PageChecked(page))
page              308 fs/ubifs/file.c 	*pagep = page;
page              325 fs/ubifs/file.c static int allocate_budget(struct ubifs_info *c, struct page *page,
page              330 fs/ubifs/file.c 	if (PagePrivate(page)) {
page              357 fs/ubifs/file.c 		if (PageChecked(page))
page              423 fs/ubifs/file.c 			     struct page **pagep, void **fsdata)
page              431 fs/ubifs/file.c 	struct page *page;
page              440 fs/ubifs/file.c 	page = grab_cache_page_write_begin(mapping, index, flags);
page              441 fs/ubifs/file.c 	if (unlikely(!page))
page              444 fs/ubifs/file.c 	if (!PageUptodate(page)) {
page              456 fs/ubifs/file.c 			SetPageChecked(page);
page              459 fs/ubifs/file.c 			err = do_readpage(page);
page              461 fs/ubifs/file.c 				unlock_page(page);
page              462 fs/ubifs/file.c 				put_page(page);
page              467 fs/ubifs/file.c 		SetPageUptodate(page);
page              468 fs/ubifs/file.c 		ClearPageError(page);
page              471 fs/ubifs/file.c 	err = allocate_budget(c, page, ui, appending);
page              479 fs/ubifs/file.c 			ClearPageChecked(page);
page              480 fs/ubifs/file.c 			ClearPageUptodate(page);
page              493 fs/ubifs/file.c 		unlock_page(page);
page              494 fs/ubifs/file.c 		put_page(page);
page              505 fs/ubifs/file.c 	*pagep = page;
page              520 fs/ubifs/file.c static void cancel_budget(struct ubifs_info *c, struct page *page,
page              528 fs/ubifs/file.c 	if (!PagePrivate(page)) {
page              529 fs/ubifs/file.c 		if (PageChecked(page))
page              538 fs/ubifs/file.c 			   struct page *page, void *fsdata)
page              547 fs/ubifs/file.c 		inode->i_ino, pos, page->index, len, copied, inode->i_size);
page              561 fs/ubifs/file.c 		cancel_budget(c, page, ui, appending);
page              562 fs/ubifs/file.c 		ClearPageChecked(page);
page              568 fs/ubifs/file.c 		copied = do_readpage(page);
page              572 fs/ubifs/file.c 	if (!PagePrivate(page)) {
page              573 fs/ubifs/file.c 		SetPagePrivate(page);
page              575 fs/ubifs/file.c 		__set_page_dirty_nobuffers(page);
page              592 fs/ubifs/file.c 	unlock_page(page);
page              593 fs/ubifs/file.c 	put_page(page);
page              606 fs/ubifs/file.c static int populate_page(struct ubifs_info *c, struct page *page,
page              610 fs/ubifs/file.c 	struct inode *inode = page->mapping->host;
page              617 fs/ubifs/file.c 		inode->i_ino, page->index, i_size, page->flags);
page              619 fs/ubifs/file.c 	addr = zaddr = kmap(page);
page              622 fs/ubifs/file.c 	if (!i_size || page->index > end_index) {
page              628 fs/ubifs/file.c 	page_block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
page              679 fs/ubifs/file.c 	if (end_index == page->index) {
page              688 fs/ubifs/file.c 		SetPageChecked(page);
page              692 fs/ubifs/file.c 	SetPageUptodate(page);
page              693 fs/ubifs/file.c 	ClearPageError(page);
page              694 fs/ubifs/file.c 	flush_dcache_page(page);
page              695 fs/ubifs/file.c 	kunmap(page);
page              700 fs/ubifs/file.c 	ClearPageUptodate(page);
page              701 fs/ubifs/file.c 	SetPageError(page);
page              702 fs/ubifs/file.c 	flush_dcache_page(page);
page              703 fs/ubifs/file.c 	kunmap(page);
page              718 fs/ubifs/file.c 			      struct page *page1)
page              785 fs/ubifs/file.c 		struct page *page;
page              789 fs/ubifs/file.c 		page = pagecache_get_page(mapping, page_offset,
page              792 fs/ubifs/file.c 		if (!page)
page              794 fs/ubifs/file.c 		if (!PageUptodate(page))
page              795 fs/ubifs/file.c 			err = populate_page(c, page, bu, &n);
page              796 fs/ubifs/file.c 		unlock_page(page);
page              797 fs/ubifs/file.c 		put_page(page);
page              827 fs/ubifs/file.c static int ubifs_bulk_read(struct page *page)
page              829 fs/ubifs/file.c 	struct inode *inode = page->mapping->host;
page              832 fs/ubifs/file.c 	pgoff_t index = page->index, last_page_read = ui->last_page_read;
page              880 fs/ubifs/file.c 		      page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT);
page              881 fs/ubifs/file.c 	err = ubifs_do_bulk_read(c, bu, page);
page              893 fs/ubifs/file.c static int ubifs_readpage(struct file *file, struct page *page)
page              895 fs/ubifs/file.c 	if (ubifs_bulk_read(page))
page              897 fs/ubifs/file.c 	do_readpage(page);
page              898 fs/ubifs/file.c 	unlock_page(page);
page              902 fs/ubifs/file.c static int do_writepage(struct page *page, int len)
page              908 fs/ubifs/file.c 	struct inode *inode = page->mapping->host;
page              914 fs/ubifs/file.c 	ubifs_assert(c, page->index <= ui->synced_i_size >> PAGE_SHIFT);
page              919 fs/ubifs/file.c 	set_page_writeback(page);
page              921 fs/ubifs/file.c 	addr = kmap(page);
page              922 fs/ubifs/file.c 	block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
page              937 fs/ubifs/file.c 		SetPageError(page);
page              939 fs/ubifs/file.c 			  page->index, inode->i_ino, err);
page              943 fs/ubifs/file.c 	ubifs_assert(c, PagePrivate(page));
page              944 fs/ubifs/file.c 	if (PageChecked(page))
page              950 fs/ubifs/file.c 	ClearPagePrivate(page);
page              951 fs/ubifs/file.c 	ClearPageChecked(page);
page              953 fs/ubifs/file.c 	kunmap(page);
page              954 fs/ubifs/file.c 	unlock_page(page);
page              955 fs/ubifs/file.c 	end_page_writeback(page);
page             1005 fs/ubifs/file.c static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
page             1007 fs/ubifs/file.c 	struct inode *inode = page->mapping->host;
page             1016 fs/ubifs/file.c 		inode->i_ino, page->index, page->flags);
page             1017 fs/ubifs/file.c 	ubifs_assert(c, PagePrivate(page));
page             1020 fs/ubifs/file.c 	if (page->index > end_index || (page->index == end_index && !len)) {
page             1030 fs/ubifs/file.c 	if (page->index < end_index) {
page             1031 fs/ubifs/file.c 		if (page->index >= synced_i_size >> PAGE_SHIFT) {
page             1044 fs/ubifs/file.c 		return do_writepage(page, PAGE_SIZE);
page             1054 fs/ubifs/file.c 	kaddr = kmap_atomic(page);
page             1056 fs/ubifs/file.c 	flush_dcache_page(page);
page             1065 fs/ubifs/file.c 	return do_writepage(page, len);
page             1068 fs/ubifs/file.c 	unlock_page(page);
page             1146 fs/ubifs/file.c 		struct page *page;
page             1148 fs/ubifs/file.c 		page = find_lock_page(inode->i_mapping, index);
page             1149 fs/ubifs/file.c 		if (page) {
page             1150 fs/ubifs/file.c 			if (PageDirty(page)) {
page             1159 fs/ubifs/file.c 				ubifs_assert(c, PagePrivate(page));
page             1161 fs/ubifs/file.c 				clear_page_dirty_for_io(page);
page             1165 fs/ubifs/file.c 				err = do_writepage(page, offset);
page             1166 fs/ubifs/file.c 				put_page(page);
page             1179 fs/ubifs/file.c 				unlock_page(page);
page             1180 fs/ubifs/file.c 				put_page(page);
page             1289 fs/ubifs/file.c static void ubifs_invalidatepage(struct page *page, unsigned int offset,
page             1292 fs/ubifs/file.c 	struct inode *inode = page->mapping->host;
page             1295 fs/ubifs/file.c 	ubifs_assert(c, PagePrivate(page));
page             1300 fs/ubifs/file.c 	if (PageChecked(page))
page             1306 fs/ubifs/file.c 	ClearPagePrivate(page);
page             1307 fs/ubifs/file.c 	ClearPageChecked(page);
page             1447 fs/ubifs/file.c static int ubifs_set_page_dirty(struct page *page)
page             1450 fs/ubifs/file.c 	struct inode *inode = page->mapping->host;
page             1453 fs/ubifs/file.c 	ret = __set_page_dirty_nobuffers(page);
page             1464 fs/ubifs/file.c 		struct page *newpage, struct page *page, enum migrate_mode mode)
page             1468 fs/ubifs/file.c 	rc = migrate_page_move_mapping(mapping, newpage, page, 0);
page             1472 fs/ubifs/file.c 	if (PagePrivate(page)) {
page             1473 fs/ubifs/file.c 		ClearPagePrivate(page);
page             1478 fs/ubifs/file.c 		migrate_page_copy(newpage, page);
page             1480 fs/ubifs/file.c 		migrate_page_states(newpage, page);
page             1485 fs/ubifs/file.c static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
page             1487 fs/ubifs/file.c 	struct inode *inode = page->mapping->host;
page             1494 fs/ubifs/file.c 	if (PageWriteback(page))
page             1496 fs/ubifs/file.c 	ubifs_assert(c, PagePrivate(page));
page             1498 fs/ubifs/file.c 	ClearPagePrivate(page);
page             1499 fs/ubifs/file.c 	ClearPageChecked(page);
page             1509 fs/ubifs/file.c 	struct page *page = vmf->page;
page             1516 fs/ubifs/file.c 	dbg_gen("ino %lu, pg %lu, i_size %lld",	inode->i_ino, page->index,
page             1557 fs/ubifs/file.c 	lock_page(page);
page             1558 fs/ubifs/file.c 	if (unlikely(page->mapping != inode->i_mapping ||
page             1559 fs/ubifs/file.c 		     page_offset(page) > i_size_read(inode))) {
page             1564 fs/ubifs/file.c 	if (PagePrivate(page))
page             1567 fs/ubifs/file.c 		if (!PageChecked(page))
page             1569 fs/ubifs/file.c 		SetPagePrivate(page);
page             1571 fs/ubifs/file.c 		__set_page_dirty_nobuffers(page);
page             1587 fs/ubifs/file.c 	wait_for_stable_page(page);
page             1591 fs/ubifs/file.c 	unlock_page(page);
page               41 fs/udf/file.c  static void __udf_adinicb_readpage(struct page *page)
page               43 fs/udf/file.c  	struct inode *inode = page->mapping->host;
page               52 fs/udf/file.c  	kaddr = kmap_atomic(page);
page               55 fs/udf/file.c  	flush_dcache_page(page);
page               56 fs/udf/file.c  	SetPageUptodate(page);
page               60 fs/udf/file.c  static int udf_adinicb_readpage(struct file *file, struct page *page)
page               62 fs/udf/file.c  	BUG_ON(!PageLocked(page));
page               63 fs/udf/file.c  	__udf_adinicb_readpage(page);
page               64 fs/udf/file.c  	unlock_page(page);
page               69 fs/udf/file.c  static int udf_adinicb_writepage(struct page *page,
page               72 fs/udf/file.c  	struct inode *inode = page->mapping->host;
page               76 fs/udf/file.c  	BUG_ON(!PageLocked(page));
page               78 fs/udf/file.c  	kaddr = kmap_atomic(page);
page               81 fs/udf/file.c  	SetPageUptodate(page);
page               84 fs/udf/file.c  	unlock_page(page);
page               91 fs/udf/file.c  			unsigned len, unsigned flags, struct page **pagep,
page               94 fs/udf/file.c  	struct page *page;
page               98 fs/udf/file.c  	page = grab_cache_page_write_begin(mapping, 0, flags);
page               99 fs/udf/file.c  	if (!page)
page              101 fs/udf/file.c  	*pagep = page;
page              103 fs/udf/file.c  	if (!PageUptodate(page))
page              104 fs/udf/file.c  		__udf_adinicb_readpage(page);
page              116 fs/udf/file.c  				 struct page *page, void *fsdata)
page              118 fs/udf/file.c  	struct inode *inode = page->mapping->host;
page              122 fs/udf/file.c  	set_page_dirty(page);
page              123 fs/udf/file.c  	unlock_page(page);
page              124 fs/udf/file.c  	put_page(page);
page              182 fs/udf/inode.c static int udf_writepage(struct page *page, struct writeback_control *wbc)
page              184 fs/udf/inode.c 	return block_write_full_page(page, udf_get_block, wbc);
page              193 fs/udf/inode.c static int udf_readpage(struct file *file, struct page *page)
page              195 fs/udf/inode.c 	return mpage_readpage(page, udf_get_block);
page              206 fs/udf/inode.c 			struct page **pagep, void **fsdata)
page              254 fs/udf/inode.c 	struct page *page;
page              281 fs/udf/inode.c 	page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
page              282 fs/udf/inode.c 	if (!page)
page              285 fs/udf/inode.c 	if (!PageUptodate(page)) {
page              286 fs/udf/inode.c 		kaddr = kmap_atomic(page);
page              291 fs/udf/inode.c 		flush_dcache_page(page);
page              292 fs/udf/inode.c 		SetPageUptodate(page);
page              306 fs/udf/inode.c 	err = inode->i_data.a_ops->writepage(page, &udf_wbc);
page              309 fs/udf/inode.c 		lock_page(page);
page              311 fs/udf/inode.c 		kaddr = kmap_atomic(page);
page              315 fs/udf/inode.c 		unlock_page(page);
page              320 fs/udf/inode.c 	put_page(page);
page              104 fs/udf/symlink.c static int udf_symlink_filler(struct file *file, struct page *page)
page              106 fs/udf/symlink.c 	struct inode *inode = page->mapping->host;
page              110 fs/udf/symlink.c 	unsigned char *p = page_address(page);
page              143 fs/udf/symlink.c 	SetPageUptodate(page);
page              144 fs/udf/symlink.c 	unlock_page(page);
page              149 fs/udf/symlink.c 	SetPageError(page);
page              151 fs/udf/symlink.c 	unlock_page(page);
page              160 fs/udf/symlink.c 	struct page *page;
page              163 fs/udf/symlink.c 	page = read_mapping_page(inode->i_mapping, 0, NULL);
page              164 fs/udf/symlink.c 	if (IS_ERR(page))
page              165 fs/udf/symlink.c 		return PTR_ERR(page);
page              175 fs/udf/symlink.c 	stat->size = strlen(page_address(page));
page              176 fs/udf/symlink.c 	put_page(page);
page              241 fs/ufs/balloc.c 			       sector_t newb, struct page *locked_page)
page              250 fs/ufs/balloc.c 	struct page *page;
page              267 fs/ufs/balloc.c 			page = ufs_get_locked_page(mapping, index);
page              268 fs/ufs/balloc.c 			if (!page)/* it was truncated */
page              270 fs/ufs/balloc.c 			if (IS_ERR(page)) {/* or EIO */
page              277 fs/ufs/balloc.c 			page = locked_page;
page              279 fs/ufs/balloc.c 		head = page_buffers(page);
page              320 fs/ufs/balloc.c 			ufs_put_locked_page(page);
page              346 fs/ufs/balloc.c 			   struct page *locked_page)
page               45 fs/ufs/dir.c   static int ufs_commit_chunk(struct page *page, loff_t pos, unsigned len)
page               47 fs/ufs/dir.c   	struct address_space *mapping = page->mapping;
page               52 fs/ufs/dir.c   	block_write_end(NULL, mapping, pos, len, len, page, NULL);
page               58 fs/ufs/dir.c   		err = write_one_page(page);
page               60 fs/ufs/dir.c   		unlock_page(page);
page               64 fs/ufs/dir.c   static inline void ufs_put_page(struct page *page)
page               66 fs/ufs/dir.c   	kunmap(page);
page               67 fs/ufs/dir.c   	put_page(page);
page               74 fs/ufs/dir.c   	struct page *page;
page               76 fs/ufs/dir.c   	de = ufs_find_entry(dir, qstr, &page);
page               79 fs/ufs/dir.c   		ufs_put_page(page);
page               87 fs/ufs/dir.c   		  struct page *page, struct inode *inode,
page               90 fs/ufs/dir.c   	loff_t pos = page_offset(page) +
page               91 fs/ufs/dir.c   			(char *) de - (char *) page_address(page);
page               95 fs/ufs/dir.c   	lock_page(page);
page               96 fs/ufs/dir.c   	err = ufs_prepare_chunk(page, pos, len);
page              102 fs/ufs/dir.c   	err = ufs_commit_chunk(page, pos, len);
page              103 fs/ufs/dir.c   	ufs_put_page(page);
page              110 fs/ufs/dir.c   static bool ufs_check_page(struct page *page)
page              112 fs/ufs/dir.c   	struct inode *dir = page->mapping->host;
page              114 fs/ufs/dir.c   	char *kaddr = page_address(page);
page              121 fs/ufs/dir.c   	if ((dir->i_size >> PAGE_SHIFT) == page->index) {
page              147 fs/ufs/dir.c   	SetPageChecked(page);
page              175 fs/ufs/dir.c   		   dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs,
page              183 fs/ufs/dir.c   		   dir->i_ino, (page->index<<PAGE_SHIFT)+offs);
page              185 fs/ufs/dir.c   	SetPageError(page);
page              189 fs/ufs/dir.c   static struct page *ufs_get_page(struct inode *dir, unsigned long n)
page              192 fs/ufs/dir.c   	struct page *page = read_mapping_page(mapping, n, NULL);
page              193 fs/ufs/dir.c   	if (!IS_ERR(page)) {
page              194 fs/ufs/dir.c   		kmap(page);
page              195 fs/ufs/dir.c   		if (unlikely(!PageChecked(page))) {
page              196 fs/ufs/dir.c   			if (PageError(page) || !ufs_check_page(page))
page              200 fs/ufs/dir.c   	return page;
page              203 fs/ufs/dir.c   	ufs_put_page(page);
page              229 fs/ufs/dir.c   struct ufs_dir_entry *ufs_dotdot(struct inode *dir, struct page **p)
page              231 fs/ufs/dir.c   	struct page *page = ufs_get_page(dir, 0);
page              234 fs/ufs/dir.c   	if (!IS_ERR(page)) {
page              236 fs/ufs/dir.c   				    (struct ufs_dir_entry *)page_address(page));
page              237 fs/ufs/dir.c   		*p = page;
page              251 fs/ufs/dir.c   				     struct page **res_page)
page              259 fs/ufs/dir.c   	struct page *page = NULL;
page              278 fs/ufs/dir.c   		page = ufs_get_page(dir, n);
page              279 fs/ufs/dir.c   		if (!IS_ERR(page)) {
page              280 fs/ufs/dir.c   			kaddr = page_address(page);
page              288 fs/ufs/dir.c   			ufs_put_page(page);
page              297 fs/ufs/dir.c   	*res_page = page;
page              314 fs/ufs/dir.c   	struct page *page = NULL;
page              332 fs/ufs/dir.c   		page = ufs_get_page(dir, n);
page              333 fs/ufs/dir.c   		err = PTR_ERR(page);
page              334 fs/ufs/dir.c   		if (IS_ERR(page))
page              336 fs/ufs/dir.c   		lock_page(page);
page              337 fs/ufs/dir.c   		kaddr = page_address(page);
page              367 fs/ufs/dir.c   		unlock_page(page);
page              368 fs/ufs/dir.c   		ufs_put_page(page);
page              374 fs/ufs/dir.c   	pos = page_offset(page) +
page              375 fs/ufs/dir.c   			(char*)de - (char*)page_address(page);
page              376 fs/ufs/dir.c   	err = ufs_prepare_chunk(page, pos, rec_len);
page              393 fs/ufs/dir.c   	err = ufs_commit_chunk(page, pos, rec_len);
page              399 fs/ufs/dir.c   	ufs_put_page(page);
page              403 fs/ufs/dir.c   	unlock_page(page);
page              444 fs/ufs/dir.c   		struct page *page = ufs_get_page(inode, n);
page              446 fs/ufs/dir.c   		if (IS_ERR(page)) {
page              453 fs/ufs/dir.c   		kaddr = page_address(page);
page              479 fs/ufs/dir.c   					ufs_put_page(page);
page              485 fs/ufs/dir.c   		ufs_put_page(page);
page              496 fs/ufs/dir.c   		     struct page * page)
page              499 fs/ufs/dir.c   	char *kaddr = page_address(page);
page              525 fs/ufs/dir.c   		from = (char*)pde - (char*)page_address(page);
page              527 fs/ufs/dir.c   	pos = page_offset(page) + from;
page              528 fs/ufs/dir.c   	lock_page(page);
page              529 fs/ufs/dir.c   	err = ufs_prepare_chunk(page, pos, to - from);
page              534 fs/ufs/dir.c   	err = ufs_commit_chunk(page, pos, to - from);
page              538 fs/ufs/dir.c   	ufs_put_page(page);
page              547 fs/ufs/dir.c   	struct page *page = grab_cache_page(mapping, 0);
page              553 fs/ufs/dir.c   	if (!page)
page              556 fs/ufs/dir.c   	err = ufs_prepare_chunk(page, 0, chunk_size);
page              558 fs/ufs/dir.c   		unlock_page(page);
page              562 fs/ufs/dir.c   	kmap(page);
page              563 fs/ufs/dir.c   	base = (char*)page_address(page);
page              580 fs/ufs/dir.c   	kunmap(page);
page              582 fs/ufs/dir.c   	err = ufs_commit_chunk(page, 0, chunk_size);
page              584 fs/ufs/dir.c   	put_page(page);
page              594 fs/ufs/dir.c   	struct page *page = NULL;
page              600 fs/ufs/dir.c   		page = ufs_get_page(inode, i);
page              602 fs/ufs/dir.c   		if (IS_ERR(page))
page              605 fs/ufs/dir.c   		kaddr = page_address(page);
page              632 fs/ufs/dir.c   		ufs_put_page(page);
page              637 fs/ufs/dir.c   	ufs_put_page(page);
page              222 fs/ufs/inode.c 		  int *err, struct page *locked_page)
page              257 fs/ufs/inode.c 		  int *new, struct page *locked_page)
page              336 fs/ufs/inode.c 		  int *new, struct page *locked_page)
page              470 fs/ufs/inode.c static int ufs_writepage(struct page *page, struct writeback_control *wbc)
page              472 fs/ufs/inode.c 	return block_write_full_page(page,ufs_getfrag_block,wbc);
page              475 fs/ufs/inode.c static int ufs_readpage(struct file *file, struct page *page)
page              477 fs/ufs/inode.c 	return block_read_full_page(page,ufs_getfrag_block);
page              480 fs/ufs/inode.c int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len)
page              482 fs/ufs/inode.c 	return __block_write_begin(page, pos, len, ufs_getfrag_block);
page              499 fs/ufs/inode.c 			struct page **pagep, void **fsdata)
page              513 fs/ufs/inode.c 			struct page *page, void *fsdata)
page              517 fs/ufs/inode.c 	ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
page             1056 fs/ufs/inode.c 	struct page *lastpage;
page              209 fs/ufs/namei.c 	struct page *page;
page              212 fs/ufs/namei.c 	de = ufs_find_entry(dir, &dentry->d_name, &page);
page              216 fs/ufs/namei.c 	err = ufs_delete_entry(dir, de, page);
page              249 fs/ufs/namei.c 	struct page *dir_page = NULL;
page              251 fs/ufs/namei.c 	struct page *old_page;
page              270 fs/ufs/namei.c 		struct page *new_page;
page               94 fs/ufs/ufs.h   			     unsigned, int *, struct page *);
page              105 fs/ufs/ufs.h   extern struct ufs_dir_entry *ufs_find_entry(struct inode *, const struct qstr *, struct page **);
page              106 fs/ufs/ufs.h   extern int ufs_delete_entry(struct inode *, struct ufs_dir_entry *, struct page *);
page              108 fs/ufs/ufs.h   extern struct ufs_dir_entry *ufs_dotdot(struct inode *, struct page **);
page              110 fs/ufs/ufs.h   			 struct page *page, struct inode *inode, bool update_times);
page              244 fs/ufs/util.c  struct page *ufs_get_locked_page(struct address_space *mapping,
page              248 fs/ufs/util.c  	struct page *page = find_lock_page(mapping, index);
page              249 fs/ufs/util.c  	if (!page) {
page              250 fs/ufs/util.c  		page = read_mapping_page(mapping, index, NULL);
page              252 fs/ufs/util.c  		if (IS_ERR(page)) {
page              256 fs/ufs/util.c  			return page;
page              259 fs/ufs/util.c  		lock_page(page);
page              261 fs/ufs/util.c  		if (unlikely(page->mapping == NULL)) {
page              263 fs/ufs/util.c  			unlock_page(page);
page              264 fs/ufs/util.c  			put_page(page);
page              268 fs/ufs/util.c  		if (!PageUptodate(page) || PageError(page)) {
page              269 fs/ufs/util.c  			unlock_page(page);
page              270 fs/ufs/util.c  			put_page(page);
page              279 fs/ufs/util.c  	if (!page_has_buffers(page))
page              280 fs/ufs/util.c  		create_empty_buffers(page, 1 << inode->i_blkbits, 0);
page              281 fs/ufs/util.c  	return page;
page              261 fs/ufs/util.h  extern int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len);
page              282 fs/ufs/util.h  extern struct page *ufs_get_locked_page(struct address_space *mapping,
page              284 fs/ufs/util.h  static inline void ufs_put_locked_page(struct page *page)
page              286 fs/ufs/util.h         unlock_page(page);
page              287 fs/ufs/util.h         put_page(page);
page               40 fs/verity/enable.c 		struct page *src_page;
page              123 fs/verity/fsverity_private.h 		       struct ahash_request *req, struct page *page, u8 *out);
page              186 fs/verity/hash_algs.c 		       struct ahash_request *req, struct page *page, u8 *out)
page              196 fs/verity/hash_algs.c 	sg_set_page(&sg, page, PAGE_SIZE, 0);
page               43 fs/verity/verify.c static void extract_hash(struct page *hpage, unsigned int hoffset,
page               87 fs/verity/verify.c 			struct ahash_request *req, struct page *data_page)
page               96 fs/verity/verify.c 	struct page *hpages[FS_VERITY_MAX_LEVELS];
page              113 fs/verity/verify.c 		struct page *hpage;
page              150 fs/verity/verify.c 		struct page *hpage = hpages[level - 1];
page              187 fs/verity/verify.c bool fsverity_verify_page(struct page *page)
page              189 fs/verity/verify.c 	struct inode *inode = page->mapping->host;
page              198 fs/verity/verify.c 	valid = verify_page(inode, vi, req, page);
page              237 fs/verity/verify.c 		struct page *page = bv->bv_page;
page              239 fs/verity/verify.c 		if (!PageError(page) && !verify_page(inode, vi, req, page))
page              240 fs/verity/verify.c 			SetPageError(page);
page              122 fs/xfs/kmem.h  static inline struct page *
page              758 fs/xfs/xfs_aops.c 	struct page		*page,
page              786 fs/xfs/xfs_aops.c 	merged = __bio_try_merge_page(wpc->ioend->io_bio, page, len, poff,
page              795 fs/xfs/xfs_aops.c 		bio_add_page(wpc->ioend->io_bio, page, len, poff);
page              799 fs/xfs/xfs_aops.c 	wbc_account_cgroup_owner(wbc, page, len);
page              804 fs/xfs/xfs_aops.c 	struct page		*page,
page              808 fs/xfs/xfs_aops.c 	trace_xfs_invalidatepage(page->mapping->host, page, offset, length);
page              809 fs/xfs/xfs_aops.c 	iomap_invalidatepage(page, offset, length);
page              825 fs/xfs/xfs_aops.c 	struct page		*page)
page              827 fs/xfs/xfs_aops.c 	struct inode		*inode = page->mapping->host;
page              830 fs/xfs/xfs_aops.c 	loff_t			offset = page_offset(page);
page              839 fs/xfs/xfs_aops.c 			page, ip->i_ino, offset);
page              846 fs/xfs/xfs_aops.c 	xfs_vm_invalidatepage(page, 0, PAGE_SIZE);
page              870 fs/xfs/xfs_aops.c 	struct page		*page,
page              874 fs/xfs/xfs_aops.c 	struct iomap_page	*iop = to_iomap_page(page);
page              888 fs/xfs/xfs_aops.c 	for (i = 0, file_offset = page_offset(page);
page              899 fs/xfs/xfs_aops.c 		xfs_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
page              905 fs/xfs/xfs_aops.c 	ASSERT(PageLocked(page));
page              906 fs/xfs/xfs_aops.c 	ASSERT(!PageWriteback(page));
page              924 fs/xfs/xfs_aops.c 			xfs_aops_discard_page(page);
page              925 fs/xfs/xfs_aops.c 			ClearPageUptodate(page);
page              926 fs/xfs/xfs_aops.c 			unlock_page(page);
page              938 fs/xfs/xfs_aops.c 		set_page_writeback_keepwrite(page);
page              940 fs/xfs/xfs_aops.c 		clear_page_dirty_for_io(page);
page              941 fs/xfs/xfs_aops.c 		set_page_writeback(page);
page              944 fs/xfs/xfs_aops.c 	unlock_page(page);
page              965 fs/xfs/xfs_aops.c 		end_page_writeback(page);
page              967 fs/xfs/xfs_aops.c 	mapping_set_error(page->mapping, error);
page              980 fs/xfs/xfs_aops.c 	struct page		*page,
page              985 fs/xfs/xfs_aops.c 	struct inode		*inode = page->mapping->host;
page              990 fs/xfs/xfs_aops.c 	trace_xfs_writepage(inode, page, 0, 0);
page             1028 fs/xfs/xfs_aops.c 	if (page->index < end_index)
page             1029 fs/xfs/xfs_aops.c 		end_offset = (xfs_off_t)(page->index + 1) << PAGE_SHIFT;
page             1061 fs/xfs/xfs_aops.c 		if (page->index > end_index ||
page             1062 fs/xfs/xfs_aops.c 		    (page->index == end_index && offset_into_page == 0))
page             1073 fs/xfs/xfs_aops.c 		zero_user_segment(page, offset_into_page, PAGE_SIZE);
page             1079 fs/xfs/xfs_aops.c 	return xfs_writepage_map(wpc, wbc, inode, page, end_offset);
page             1082 fs/xfs/xfs_aops.c 	redirty_page_for_writepage(wbc, page);
page             1083 fs/xfs/xfs_aops.c 	unlock_page(page);
page             1089 fs/xfs/xfs_aops.c 	struct page		*page,
page             1095 fs/xfs/xfs_aops.c 	ret = xfs_do_writepage(page, wbc, &wpc);
page             1128 fs/xfs/xfs_aops.c 	struct page		*page,
page             1131 fs/xfs/xfs_aops.c 	trace_xfs_releasepage(page->mapping->host, page, 0, 0);
page             1132 fs/xfs/xfs_aops.c 	return iomap_releasepage(page, gfp_mask);
page             1161 fs/xfs/xfs_aops.c 	struct page		*page)
page             1163 fs/xfs/xfs_aops.c 	trace_xfs_vm_readpage(page->mapping->host, 1);
page             1164 fs/xfs/xfs_aops.c 	return iomap_readpage(page, &xfs_iomap_ops);
page               35 fs/xfs/xfs_bio_io.c 		struct page	*page = kmem_to_page(data);
page               39 fs/xfs/xfs_bio_io.c 		while (bio_add_page(bio, page, len, off) != len) {
page              277 fs/xfs/xfs_buf.c 			bp->b_pages = kmem_alloc(sizeof(struct page *) *
page              282 fs/xfs/xfs_buf.c 		memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
page              323 fs/xfs/xfs_buf.c 			struct page	*page = bp->b_pages[i];
page              325 fs/xfs/xfs_buf.c 			__free_page(page);
page              401 fs/xfs/xfs_buf.c 		struct page	*page;
page              404 fs/xfs/xfs_buf.c 		page = alloc_page(gfp_mask);
page              405 fs/xfs/xfs_buf.c 		if (unlikely(page == NULL)) {
page              433 fs/xfs/xfs_buf.c 		bp->b_pages[i] = page;
page             1510 fs/xfs/xfs_buf.c 	struct page		*page;
page             1516 fs/xfs/xfs_buf.c 	page = bp->b_pages[offset >> PAGE_SHIFT];
page             1517 fs/xfs/xfs_buf.c 	return page_address(page) + (offset & (PAGE_SIZE-1));
page             1530 fs/xfs/xfs_buf.c 		struct page	*page;
page             1535 fs/xfs/xfs_buf.c 		page = bp->b_pages[page_index];
page             1541 fs/xfs/xfs_buf.c 		memset(page_address(page) + page_offset, 0, csize);
page              157 fs/xfs/xfs_buf.h 	struct page		**b_pages;	/* array of page pointers */
page              158 fs/xfs/xfs_buf.h 	struct page		*b_page_array[XB_PAGES]; /* inline pages */
page              746 fs/xfs/xfs_file.c 	struct page		*page;
page              750 fs/xfs/xfs_file.c 	page = dax_layout_busy_page(inode->i_mapping);
page              751 fs/xfs/xfs_file.c 	if (!page)
page              755 fs/xfs/xfs_file.c 	return ___wait_var_event(&page->_refcount,
page              756 fs/xfs/xfs_file.c 			atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
page             1702 fs/xfs/xfs_log.c 		struct page	*page = kmem_to_page(data);
page             1706 fs/xfs/xfs_log.c 		WARN_ON_ONCE(bio_add_page(bio, page, len, off) != len);
page             1162 fs/xfs/xfs_trace.h 	TP_PROTO(struct inode *inode, struct page *page, unsigned long off,
page             1164 fs/xfs/xfs_trace.h 	TP_ARGS(inode, page, off, len),
page             1176 fs/xfs/xfs_trace.h 		__entry->pgoff = page_offset(page);
page             1193 fs/xfs/xfs_trace.h 	TP_PROTO(struct inode *inode, struct page *page, unsigned long off, \
page             1195 fs/xfs/xfs_trace.h 	TP_ARGS(inode, page, off, len))
page               49 include/asm-generic/cacheflush.h static inline void flush_dcache_page(struct page *page)
page               74 include/asm-generic/cacheflush.h 				     struct page *page)
page               81 include/asm-generic/cacheflush.h 					   struct page *page,
page              100 include/asm-generic/cacheflush.h #define copy_to_user_page(vma, page, vaddr, dst, src, len)	\
page              103 include/asm-generic/cacheflush.h 		flush_icache_user_range(vma, page, vaddr, len); \
page              108 include/asm-generic/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
page                5 include/asm-generic/hugetlb.h static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
page                7 include/asm-generic/hugetlb.h 	return mk_pte(page, pgprot);
page               34 include/asm-generic/memory_model.h #define __page_to_pfn(page)	((unsigned long)((page) - mem_map) + \
page               45 include/asm-generic/memory_model.h ({	const struct page *__pg = (pg);					\
page               55 include/asm-generic/memory_model.h #define __page_to_pfn(page)	(unsigned long)((page) - vmemmap)
page               63 include/asm-generic/memory_model.h ({	const struct page *__pg = (pg);				\
page               28 include/asm-generic/page.h #define clear_page(page)	memset((page), 0, PAGE_SIZE)
page               31 include/asm-generic/page.h #define clear_user_page(page, vaddr, pg)	clear_page(page)
page               49 include/asm-generic/page.h typedef struct page *pgtable_t;
page               85 include/asm-generic/page.h #define page_to_virt(page)	pfn_to_virt(page_to_pfn(page))
page               88 include/asm-generic/page.h #define page_to_phys(page)      ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
page               61 include/asm-generic/pgalloc.h 	struct page *pte;
page               99 include/asm-generic/pgalloc.h static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
page              213 include/asm-generic/tlb.h 	struct page		*pages[0];
page              227 include/asm-generic/tlb.h extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
page              280 include/asm-generic/tlb.h 	struct page		*__pages[MMU_GATHER_BUNDLE];
page              414 include/asm-generic/tlb.h 					struct page *page, int page_size)
page              416 include/asm-generic/tlb.h 	if (__tlb_remove_page_size(tlb, page, page_size))
page              420 include/asm-generic/tlb.h static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
page              422 include/asm-generic/tlb.h 	return __tlb_remove_page_size(tlb, page, PAGE_SIZE);
page              429 include/asm-generic/tlb.h static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
page              431 include/asm-generic/tlb.h 	return tlb_remove_page_size(tlb, page, PAGE_SIZE);
page               91 include/crypto/algapi.h 			struct page *page;
page               96 include/crypto/algapi.h 			u8 *page;
page              107 include/crypto/algapi.h 	void *page;
page              120 include/crypto/algapi.h 		struct page *page;
page               61 include/crypto/if_alg.h 	struct page *pages[ALG_MAX_PAGES];
page              232 include/crypto/if_alg.h ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
page               23 include/crypto/internal/hash.h 	struct page *pg;
page               37 include/crypto/internal/skcipher.h 			struct page *page;
page               42 include/crypto/internal/skcipher.h 			u8 *page;
page               55 include/crypto/internal/skcipher.h 	u8 *page;
page               54 include/crypto/scatterwalk.h static inline struct page *scatterwalk_page(struct scatter_walk *walk)
page               81 include/crypto/scatterwalk.h 		struct page *page;
page               83 include/crypto/scatterwalk.h 		page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT);
page               88 include/crypto/scatterwalk.h 		if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE && !PageSlab(page))
page               89 include/crypto/scatterwalk.h 			flush_dcache_page(page);
page               38 include/drm/drm_cache.h void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
page              386 include/drm/drm_gem.h struct page **drm_gem_get_pages(struct drm_gem_object *obj);
page              387 include/drm/drm_gem.h void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
page               37 include/drm/drm_gem_shmem_helper.h 	struct page **pages;
page              129 include/drm/drm_legacy.h 	struct page **pagelist;
page               93 include/drm/drm_prime.h struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages);
page              106 include/drm/drm_prime.h int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
page              252 include/drm/ttm/ttm_bo_api.h 	struct page *page;
page              737 include/drm/ttm/ttm_bo_api.h void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot);
page              427 include/drm/ttm/ttm_bo_driver.h 	struct page *dummy_read_page;
page               90 include/drm/ttm/ttm_memory.h 				     struct page *page, uint64_t size,
page               93 include/drm/ttm/ttm_memory.h 				     struct page *page, uint64_t size);
page               40 include/drm/ttm/ttm_set_memory.h static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
page               45 include/drm/ttm/ttm_set_memory.h static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
page               50 include/drm/ttm/ttm_set_memory.h static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
page               55 include/drm/ttm/ttm_set_memory.h static inline int ttm_set_pages_wb(struct page *page, int numpages)
page               57 include/drm/ttm/ttm_set_memory.h 	return set_pages_wb(page, numpages);
page               60 include/drm/ttm/ttm_set_memory.h static inline int ttm_set_pages_wc(struct page *page, int numpages)
page               62 include/drm/ttm/ttm_set_memory.h 	unsigned long addr = (unsigned long)page_address(page);
page               67 include/drm/ttm/ttm_set_memory.h static inline int ttm_set_pages_uc(struct page *page, int numpages)
page               69 include/drm/ttm/ttm_set_memory.h 	return set_pages_uc(page, numpages);
page               78 include/drm/ttm/ttm_set_memory.h static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
page               87 include/drm/ttm/ttm_set_memory.h static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
page               96 include/drm/ttm/ttm_set_memory.h static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
page              105 include/drm/ttm/ttm_set_memory.h static inline int ttm_set_pages_wb(struct page *page, int numpages)
page              110 include/drm/ttm/ttm_set_memory.h 		unmap_page_from_agp(page++);
page              116 include/drm/ttm/ttm_set_memory.h static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
page              121 include/drm/ttm/ttm_set_memory.h static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
page              126 include/drm/ttm/ttm_set_memory.h static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
page              131 include/drm/ttm/ttm_set_memory.h static inline int ttm_set_pages_wb(struct page *page, int numpages)
page              138 include/drm/ttm/ttm_set_memory.h static inline int ttm_set_pages_wc(struct page *page, int numpages)
page              143 include/drm/ttm/ttm_set_memory.h static inline int ttm_set_pages_uc(struct page *page, int numpages)
page              108 include/drm/ttm/ttm_tt.h 	struct page **pages;
page               73 include/linux/agp_backend.h 	struct page **pages;
page              119 include/linux/async_tx.h 		      enum dma_transaction_type tx_type, struct page **dst,
page              120 include/linux/async_tx.h 		      int dst_count, struct page **src, int src_count,
page              141 include/linux/async_tx.h 	struct page *page;
page              162 include/linux/async_tx.h async_xor(struct page *dest, struct page **src_list, unsigned int offset,
page              166 include/linux/async_tx.h async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
page              171 include/linux/async_tx.h async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
page              178 include/linux/async_tx.h async_gen_syndrome(struct page **blocks, unsigned int offset, int src_cnt,
page              182 include/linux/async_tx.h async_syndrome_val(struct page **blocks, unsigned int offset, int src_cnt,
page              183 include/linux/async_tx.h 		   size_t len, enum sum_check_flags *pqres, struct page *spare,
page              188 include/linux/async_tx.h 			struct page **ptrs, struct async_submit_ctl *submit);
page              192 include/linux/async_tx.h 			struct page **ptrs, struct async_submit_ctl *submit);
page              203 include/linux/atmdev.h 	int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
page               17 include/linux/backing-dev-defs.h struct page;
page               37 include/linux/badblocks.h 	u64 *page;		/* badblock list */
page               50 include/linux/badblocks.h ssize_t badblocks_show(struct badblocks *bb, char *page, int unack);
page               51 include/linux/badblocks.h ssize_t badblocks_store(struct badblocks *bb, const char *page, size_t len,
page               58 include/linux/balloon_compaction.h 	int (*migratepage)(struct balloon_dev_info *, struct page *newpage,
page               59 include/linux/balloon_compaction.h 			struct page *page, enum migrate_mode mode);
page               63 include/linux/balloon_compaction.h extern struct page *balloon_page_alloc(void);
page               65 include/linux/balloon_compaction.h 				 struct page *page);
page               66 include/linux/balloon_compaction.h extern struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info);
page               83 include/linux/balloon_compaction.h extern bool balloon_page_isolate(struct page *page,
page               85 include/linux/balloon_compaction.h extern void balloon_page_putback(struct page *page);
page               87 include/linux/balloon_compaction.h 				struct page *newpage,
page               88 include/linux/balloon_compaction.h 				struct page *page, enum migrate_mode mode);
page              100 include/linux/balloon_compaction.h 				       struct page *page)
page              102 include/linux/balloon_compaction.h 	__SetPageOffline(page);
page              103 include/linux/balloon_compaction.h 	__SetPageMovable(page, balloon->inode->i_mapping);
page              104 include/linux/balloon_compaction.h 	set_page_private(page, (unsigned long)balloon);
page              105 include/linux/balloon_compaction.h 	list_add(&page->lru, &balloon->pages);
page              116 include/linux/balloon_compaction.h static inline void balloon_page_delete(struct page *page)
page              118 include/linux/balloon_compaction.h 	__ClearPageOffline(page);
page              119 include/linux/balloon_compaction.h 	__ClearPageMovable(page);
page              120 include/linux/balloon_compaction.h 	set_page_private(page, 0);
page              125 include/linux/balloon_compaction.h 	if (!PageIsolated(page))
page              126 include/linux/balloon_compaction.h 		list_del(&page->lru);
page              133 include/linux/balloon_compaction.h static inline struct balloon_dev_info *balloon_page_device(struct page *page)
page              135 include/linux/balloon_compaction.h 	return (struct balloon_dev_info *)page_private(page);
page              146 include/linux/balloon_compaction.h 				       struct page *page)
page              148 include/linux/balloon_compaction.h 	__SetPageOffline(page);
page              149 include/linux/balloon_compaction.h 	list_add(&page->lru, &balloon->pages);
page              152 include/linux/balloon_compaction.h static inline void balloon_page_delete(struct page *page)
page              154 include/linux/balloon_compaction.h 	__ClearPageOffline(page);
page              155 include/linux/balloon_compaction.h 	list_del(&page->lru);
page              158 include/linux/balloon_compaction.h static inline bool balloon_page_isolate(struct page *page)
page              163 include/linux/balloon_compaction.h static inline void balloon_page_putback(struct page *page)
page              168 include/linux/balloon_compaction.h static inline int balloon_page_migrate(struct page *newpage,
page              169 include/linux/balloon_compaction.h 				struct page *page, enum migrate_mode mode)
page              188 include/linux/balloon_compaction.h static inline void balloon_page_push(struct list_head *pages, struct page *page)
page              190 include/linux/balloon_compaction.h 	list_add(&page->lru, pages);
page              200 include/linux/balloon_compaction.h static inline struct page *balloon_page_pop(struct list_head *pages)
page              202 include/linux/balloon_compaction.h 	struct page *page = list_first_entry_or_null(pages, struct page, lru);
page              204 include/linux/balloon_compaction.h 	if (!page)
page              207 include/linux/balloon_compaction.h 	list_del(&page->lru);
page              208 include/linux/balloon_compaction.h 	return page;
page               23 include/linux/binfmts.h 	struct page *page[MAX_ARG_PAGES];
page              285 include/linux/bio.h static inline struct page *bio_first_page_all(struct bio *bio)
page              435 include/linux/bio.h extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
page              436 include/linux/bio.h extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
page              438 include/linux/bio.h bool __bio_try_merge_page(struct bio *bio, struct page *page,
page              440 include/linux/bio.h void __bio_add_page(struct bio *bio, struct page *page,
page              505 include/linux/bio.h void bio_associate_blkg_from_page(struct bio *bio, struct page *page);
page              508 include/linux/bio.h 						struct page *page) { }
page              751 include/linux/bio.h extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
page              815 include/linux/bio.h static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
page               16 include/linux/blk_types.h struct page;
page              805 include/linux/blkdev.h 	struct page **pages;
page             1212 include/linux/blkdev.h 		sector_t nr_sects, gfp_t gfp_mask, struct page *page);
page             1490 include/linux/blkdev.h typedef struct {struct page *v;} Sector;
page             1699 include/linux/blkdev.h 	int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int);
page             1719 include/linux/blkdev.h extern int bdev_read_page(struct block_device *, sector_t, struct page *);
page             1720 include/linux/blkdev.h extern int bdev_write_page(struct block_device *, sector_t, struct page *,
page               49 include/linux/buffer_head.h struct page;
page               66 include/linux/buffer_head.h 	struct page *b_page;		/* the page this bh is mapped to */
page              141 include/linux/buffer_head.h #define page_buffers(page)					\
page              143 include/linux/buffer_head.h 		BUG_ON(!PagePrivate(page));			\
page              144 include/linux/buffer_head.h 		((struct buffer_head *)page_private(page));	\
page              146 include/linux/buffer_head.h #define page_has_buffers(page)	PagePrivate(page)
page              148 include/linux/buffer_head.h void buffer_check_dirty_writeback(struct page *page,
page              159 include/linux/buffer_head.h 		struct page *page, unsigned long offset);
page              160 include/linux/buffer_head.h int try_to_free_buffers(struct page *);
page              161 include/linux/buffer_head.h struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
page              163 include/linux/buffer_head.h void create_empty_buffers(struct page *, unsigned long,
page              217 include/linux/buffer_head.h void block_invalidatepage(struct page *page, unsigned int offset,
page              219 include/linux/buffer_head.h int block_write_full_page(struct page *page, get_block_t *get_block,
page              221 include/linux/buffer_head.h int __block_write_full_page(struct inode *inode, struct page *page,
page              224 include/linux/buffer_head.h int block_read_full_page(struct page*, get_block_t*);
page              225 include/linux/buffer_head.h int block_is_partially_uptodate(struct page *page, unsigned long from,
page              228 include/linux/buffer_head.h 		unsigned flags, struct page **pagep, get_block_t *get_block);
page              229 include/linux/buffer_head.h int __block_write_begin(struct page *page, loff_t pos, unsigned len,
page              233 include/linux/buffer_head.h 				struct page *, void *);
page              236 include/linux/buffer_head.h 				struct page *, void *);
page              237 include/linux/buffer_head.h void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
page              238 include/linux/buffer_head.h void clean_page_buffers(struct page *page);
page              240 include/linux/buffer_head.h 			unsigned, unsigned, struct page **, void **,
page              243 include/linux/buffer_head.h int block_commit_write(struct page *page, unsigned from, unsigned to);
page              261 include/linux/buffer_head.h 				struct page **, void **, get_block_t*);
page              264 include/linux/buffer_head.h 				struct page *, void *);
page              266 include/linux/buffer_head.h int nobh_writepage(struct page *page, get_block_t *get_block,
page              275 include/linux/buffer_head.h static inline void attach_page_buffers(struct page *page,
page              278 include/linux/buffer_head.h 	get_page(page);
page              279 include/linux/buffer_head.h 	SetPagePrivate(page);
page              280 include/linux/buffer_head.h 	set_page_private(page, (unsigned long)head);
page              407 include/linux/buffer_head.h extern int __set_page_dirty_buffers(struct page *page);
page              412 include/linux/buffer_head.h static inline int try_to_free_buffers(struct page *page) { return 1; }
page               19 include/linux/bvec.h 	struct page	*bv_page;
page              304 include/linux/ceph/libceph.h extern void ceph_release_page_vector(struct page **pages, int num_pages);
page              305 include/linux/ceph/libceph.h extern void ceph_put_page_vector(struct page **pages, int num_pages,
page              307 include/linux/ceph/libceph.h extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags);
page              308 include/linux/ceph/libceph.h extern int ceph_copy_user_to_page_vector(struct page **pages,
page              311 include/linux/ceph/libceph.h extern void ceph_copy_to_page_vector(struct page **pages,
page              314 include/linux/ceph/libceph.h extern void ceph_copy_from_page_vector(struct page **pages,
page              317 include/linux/ceph/libceph.h extern void ceph_zero_page_vector_range(int off, int len, struct page **pages);
page              178 include/linux/ceph/messenger.h 			struct page	**pages;
page              205 include/linux/ceph/messenger.h 			struct page	*page;		/* page from list */
page              360 include/linux/ceph/messenger.h void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
page               67 include/linux/ceph/osd_client.h 			struct page	**pages;
page              285 include/linux/ceph/osd_client.h 	struct page ***preply_pages;
page              407 include/linux/ceph/osd_client.h 					struct page **pages, u64 length,
page              426 include/linux/ceph/osd_client.h 					struct page **pages, u64 length,
page              451 include/linux/ceph/osd_client.h 					struct page **pages, u64 length,
page              460 include/linux/ceph/osd_client.h 					struct page **pages, u64 length,
page              509 include/linux/ceph/osd_client.h 		   struct page *req_page, size_t req_len,
page              510 include/linux/ceph/osd_client.h 		   struct page **resp_pages, size_t *resp_len);
page              517 include/linux/ceph/osd_client.h 			       struct page **pages, int nr_pages,
page              527 include/linux/ceph/osd_client.h 				struct page **pages, int nr_pages);
page              563 include/linux/ceph/osd_client.h 		     struct page ***preply_pages,
page               33 include/linux/cleancache.h 			pgoff_t, struct page *);
page               35 include/linux/cleancache.h 			pgoff_t, struct page *);
page               44 include/linux/cleancache.h extern int  __cleancache_get_page(struct page *);
page               45 include/linux/cleancache.h extern void __cleancache_put_page(struct page *);
page               46 include/linux/cleancache.h extern void __cleancache_invalidate_page(struct address_space *, struct page *);
page               56 include/linux/cleancache.h static inline bool cleancache_fs_enabled(struct page *page)
page               58 include/linux/cleancache.h 	return cleancache_fs_enabled_mapping(page->mapping);
page               91 include/linux/cleancache.h static inline int cleancache_get_page(struct page *page)
page               93 include/linux/cleancache.h 	if (cleancache_enabled && cleancache_fs_enabled(page))
page               94 include/linux/cleancache.h 		return __cleancache_get_page(page);
page               98 include/linux/cleancache.h static inline void cleancache_put_page(struct page *page)
page              100 include/linux/cleancache.h 	if (cleancache_enabled && cleancache_fs_enabled(page))
page              101 include/linux/cleancache.h 		__cleancache_put_page(page);
page              105 include/linux/cleancache.h 					struct page *page)
page              109 include/linux/cleancache.h 		__cleancache_invalidate_page(mapping, page);
page               35 include/linux/cma.h extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
page               37 include/linux/cma.h extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
page               97 include/linux/compaction.h 		struct page **page);
page              146 include/linux/dax.h struct page *dax_layout_busy_page(struct address_space *mapping);
page              147 include/linux/dax.h dax_entry_t dax_lock_page(struct page *page);
page              148 include/linux/dax.h void dax_unlock_page(struct page *page, dax_entry_t cookie);
page              177 include/linux/dax.h static inline struct page *dax_layout_busy_page(struct address_space *mapping)
page              188 include/linux/dax.h static inline dax_entry_t dax_lock_page(struct page *page)
page              190 include/linux/dax.h 	if (IS_DAX(page->mapping->host))
page              195 include/linux/dax.h static inline void dax_unlock_page(struct page *page, dax_entry_t cookie)
page               25 include/linux/devcoredump.h 	struct page *page;
page               32 include/linux/devcoredump.h 		page = sg_page(iter);
page               33 include/linux/devcoredump.h 		if (page)
page               34 include/linux/devcoredump.h 			__free_page(page);
page               25 include/linux/dm-io.h 	struct page *page;
page               56 include/linux/dma-contiguous.h struct page;
page              111 include/linux/dma-contiguous.h struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
page              113 include/linux/dma-contiguous.h bool dma_release_from_contiguous(struct device *dev, struct page *pages,
page              115 include/linux/dma-contiguous.h struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp);
page              116 include/linux/dma-contiguous.h void dma_free_contiguous(struct device *dev, struct page *page, size_t size);
page              146 include/linux/dma-contiguous.h struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
page              153 include/linux/dma-contiguous.h bool dma_release_from_contiguous(struct device *dev, struct page *pages,
page              160 include/linux/dma-contiguous.h static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size,
page              166 include/linux/dma-contiguous.h static inline void dma_free_contiguous(struct device *dev, struct page *page,
page              169 include/linux/dma-contiguous.h 	__free_pages(page, get_order(size));
page               24 include/linux/dma-debug.h extern void debug_dma_map_page(struct device *dev, struct page *page,
page               70 include/linux/dma-debug.h extern void debug_dma_assert_idle(struct page *page);
page               83 include/linux/dma-debug.h static inline void debug_dma_map_page(struct device *dev, struct page *page,
page              160 include/linux/dma-debug.h static inline void debug_dma_assert_idle(struct page *page)
page               77 include/linux/dma-direct.h struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
page               79 include/linux/dma-direct.h void __dma_direct_free_pages(struct device *dev, size_t size, struct page *page);
page               93 include/linux/dma-mapping.h 	dma_addr_t (*map_page)(struct device *dev, struct page *page,
page              203 include/linux/dma-mapping.h dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
page              278 include/linux/dma-mapping.h 		struct page *page, size_t offset, size_t size,
page              286 include/linux/dma-mapping.h 		addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
page              288 include/linux/dma-mapping.h 		addr = ops->map_page(dev, page, offset, size, dir, attrs);
page              289 include/linux/dma-mapping.h 	debug_dma_map_page(dev, page, offset, size, dir, addr);
page              470 include/linux/dma-mapping.h 		struct page *page, size_t offset, size_t size,
page              628 include/linux/dma-mapping.h struct page **dma_common_find_pages(void *cpu_addr);
page              629 include/linux/dma-mapping.h void *dma_common_contiguous_remap(struct page *page, size_t size,
page              632 include/linux/dma-mapping.h void *dma_common_pages_remap(struct page **pages, size_t size,
page              637 include/linux/dma-mapping.h void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags);
page              106 include/linux/dma-noncoherent.h void arch_dma_prep_coherent(struct page *page, size_t size);
page              108 include/linux/dma-noncoherent.h static inline void arch_dma_prep_coherent(struct page *page, size_t size)
page              597 include/linux/edac.h 					   unsigned long page);
page              161 include/linux/efi.h 	struct page		**pages;
page              218 include/linux/f2fs_fs.h #define ADDRS_PER_PAGE(page, inode)	\
page              219 include/linux/f2fs_fs.h 	(IS_INODE(page) ? ADDRS_PER_INODE(inode) : ADDRS_PER_BLOCK(inode))
page              423 include/linux/firewire.h 	struct page **pages;
page               15 include/linux/firmware.h 	struct page **pages;
page               19 include/linux/frontswap.h 	int (*store)(unsigned, pgoff_t, struct page *); /* store a page */
page               20 include/linux/frontswap.h 	int (*load)(unsigned, pgoff_t, struct page *); /* load a page */
page               35 include/linux/frontswap.h extern int __frontswap_store(struct page *page);
page               36 include/linux/frontswap.h extern int __frontswap_load(struct page *page);
page               87 include/linux/frontswap.h static inline int frontswap_store(struct page *page)
page               90 include/linux/frontswap.h 		return __frontswap_store(page);
page               95 include/linux/frontswap.h static inline int frontswap_load(struct page *page)
page               98 include/linux/frontswap.h 		return __frontswap_load(page);
page              292 include/linux/fs.h struct page;
page              359 include/linux/fs.h typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
page              363 include/linux/fs.h 	int (*writepage)(struct page *page, struct writeback_control *wbc);
page              364 include/linux/fs.h 	int (*readpage)(struct file *, struct page *);
page              370 include/linux/fs.h 	int (*set_page_dirty)(struct page *page);
page              381 include/linux/fs.h 				struct page **pagep, void **fsdata);
page              384 include/linux/fs.h 				struct page *page, void *fsdata);
page              388 include/linux/fs.h 	void (*invalidatepage) (struct page *, unsigned int, unsigned int);
page              389 include/linux/fs.h 	int (*releasepage) (struct page *, gfp_t);
page              390 include/linux/fs.h 	void (*freepage)(struct page *);
page              397 include/linux/fs.h 			struct page *, struct page *, enum migrate_mode);
page              398 include/linux/fs.h 	bool (*isolate_page)(struct page *, isolate_mode_t);
page              399 include/linux/fs.h 	void (*putback_page)(struct page *);
page              400 include/linux/fs.h 	int (*launder_page) (struct page *);
page              401 include/linux/fs.h 	int (*is_partially_uptodate) (struct page *, unsigned long,
page              403 include/linux/fs.h 	void (*is_dirty_writeback) (struct page *, bool *, bool *);
page              404 include/linux/fs.h 	int (*error_remove_page)(struct address_space *, struct page *);
page              420 include/linux/fs.h 				struct page **pagep, void **fsdata);
page              424 include/linux/fs.h 				struct page *page, void *fsdata);
page             1843 include/linux/fs.h 	ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
page             1971 include/linux/fs.h 	int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
page             3122 include/linux/fs.h extern void block_sync_page(struct page *page);
page             3308 include/linux/fs.h extern int noop_set_page_dirty(struct page *page);
page             3309 include/linux/fs.h extern void noop_invalidatepage(struct page *page, unsigned int offset,
page             3313 include/linux/fs.h extern int simple_readpage(struct file *file, struct page *page);
page             3316 include/linux/fs.h 			struct page **pagep, void **fsdata);
page             3319 include/linux/fs.h 			struct page *page, void *fsdata);
page             3350 include/linux/fs.h 				struct page *, struct page *,
page             3353 include/linux/fs.h 				struct page *, struct page *,
page              155 include/linux/fscache-cache.h 					     struct page *page,
page              296 include/linux/fscache-cache.h 	int (*write_page)(struct fscache_storage *op, struct page *page);
page              303 include/linux/fscache-cache.h 			     struct page *page);
page              479 include/linux/fscache-cache.h 				  struct page *page, int error)
page              481 include/linux/fscache-cache.h 	op->end_io_func(page, op->context, error);
page              542 include/linux/fscache-cache.h 				     struct page *page);
page               36 include/linux/fscache.h #define PageFsCache(page)		PagePrivate2((page))
page               37 include/linux/fscache.h #define SetPageFsCache(page)		SetPagePrivate2((page))
page               38 include/linux/fscache.h #define ClearPageFsCache(page)		ClearPagePrivate2((page))
page               39 include/linux/fscache.h #define TestSetPageFsCache(page)	TestSetPagePrivate2((page))
page               40 include/linux/fscache.h #define TestClearPageFsCache(page)	TestClearPagePrivate2((page))
page               50 include/linux/fscache.h typedef void (*fscache_rw_complete_t)(struct page *page,
page              112 include/linux/fscache.h 				 struct page *page);
page              204 include/linux/fscache.h 					struct page *,
page              215 include/linux/fscache.h extern int __fscache_alloc_page(struct fscache_cookie *, struct page *, gfp_t);
page              216 include/linux/fscache.h extern int __fscache_write_page(struct fscache_cookie *, struct page *, loff_t, gfp_t);
page              217 include/linux/fscache.h extern void __fscache_uncache_page(struct fscache_cookie *, struct page *);
page              218 include/linux/fscache.h extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *);
page              219 include/linux/fscache.h extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *);
page              220 include/linux/fscache.h extern bool __fscache_maybe_release_page(struct fscache_cookie *, struct page *,
page              541 include/linux/fscache.h 			       struct page *page,
page              547 include/linux/fscache.h 		return __fscache_read_or_alloc_page(cookie, page, end_io_func,
page              625 include/linux/fscache.h 		       struct page *page,
page              629 include/linux/fscache.h 		return __fscache_alloc_page(cookie, page, gfp);
page              675 include/linux/fscache.h 		       struct page *page,
page              680 include/linux/fscache.h 		return __fscache_write_page(cookie, page, object_size, gfp);
page              701 include/linux/fscache.h 			  struct page *page)
page              704 include/linux/fscache.h 		__fscache_uncache_page(cookie, page);
page              719 include/linux/fscache.h 			      struct page *page)
page              722 include/linux/fscache.h 		return __fscache_check_page_write(cookie, page);
page              739 include/linux/fscache.h 				struct page *page)
page              742 include/linux/fscache.h 		__fscache_wait_on_page_write(cookie, page);
page              762 include/linux/fscache.h 				struct page *page,
page              765 include/linux/fscache.h 	if (fscache_cookie_valid(cookie) && PageFsCache(page))
page              766 include/linux/fscache.h 		return __fscache_maybe_release_page(cookie, page, gfp);
page              108 include/linux/fscrypt.h extern struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
page              113 include/linux/fscrypt.h 					 struct page *page, unsigned int len,
page              117 include/linux/fscrypt.h extern int fscrypt_decrypt_pagecache_blocks(struct page *page, unsigned int len,
page              120 include/linux/fscrypt.h 					 struct page *page, unsigned int len,
page              123 include/linux/fscrypt.h static inline bool fscrypt_is_bounce_page(struct page *page)
page              125 include/linux/fscrypt.h 	return page->mapping == NULL;
page              128 include/linux/fscrypt.h static inline struct page *fscrypt_pagecache_page(struct page *bounce_page)
page              130 include/linux/fscrypt.h 	return (struct page *)page_private(bounce_page);
page              133 include/linux/fscrypt.h extern void fscrypt_free_bounce_page(struct page *bounce_page);
page              308 include/linux/fscrypt.h static inline struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
page              317 include/linux/fscrypt.h 						struct page *page,
page              325 include/linux/fscrypt.h static inline int fscrypt_decrypt_pagecache_blocks(struct page *page,
page              333 include/linux/fscrypt.h 						struct page *page,
page              340 include/linux/fscrypt.h static inline bool fscrypt_is_bounce_page(struct page *page)
page              345 include/linux/fscrypt.h static inline struct page *fscrypt_pagecache_page(struct page *bounce_page)
page              351 include/linux/fscrypt.h static inline void fscrypt_free_bounce_page(struct page *bounce_page)
page              770 include/linux/fscrypt.h static inline void fscrypt_finalize_bounce_page(struct page **pagep)
page              772 include/linux/fscrypt.h 	struct page *page = *pagep;
page              774 include/linux/fscrypt.h 	if (fscrypt_is_bounce_page(page)) {
page              775 include/linux/fscrypt.h 		*pagep = fscrypt_pagecache_page(page);
page              776 include/linux/fscrypt.h 		fscrypt_free_bounce_page(page);
page               89 include/linux/fsverity.h 	struct page *(*read_merkle_tree_page)(struct inode *inode,
page              133 include/linux/fsverity.h extern bool fsverity_verify_page(struct page *page);
page              178 include/linux/fsverity.h static inline bool fsverity_verify_page(struct page *page)
page              483 include/linux/gfp.h static inline void arch_free_page(struct page *page, int order) { }
page              486 include/linux/gfp.h static inline void arch_alloc_page(struct page *page, int order) { }
page              489 include/linux/gfp.h struct page *
page              493 include/linux/gfp.h static inline struct page *
page              503 include/linux/gfp.h static inline struct page *
page              517 include/linux/gfp.h static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
page              527 include/linux/gfp.h extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);
page              529 include/linux/gfp.h static inline struct page *
page              534 include/linux/gfp.h extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
page              566 include/linux/gfp.h extern void __free_pages(struct page *page, unsigned int order);
page              568 include/linux/gfp.h extern void free_unref_page(struct page *page);
page              572 include/linux/gfp.h extern void __page_frag_cache_drain(struct page *page, unsigned int count);
page              577 include/linux/gfp.h #define __free_page(page) __free_pages((page), 0)
page              620 include/linux/gfp.h extern void init_cma_reserved_pageblock(struct page *page);
page               15 include/linux/highmem.h static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
page               21 include/linux/highmem.h static inline void flush_kernel_dcache_page(struct page *page)
page               67 include/linux/highmem.h struct page *kmap_to_page(void *addr);
page               73 include/linux/highmem.h static inline struct page *kmap_to_page(void *addr)
page               81 include/linux/highmem.h static inline void *kmap(struct page *page)
page               84 include/linux/highmem.h 	return page_address(page);
page               87 include/linux/highmem.h static inline void kunmap(struct page *page)
page               91 include/linux/highmem.h static inline void *kmap_atomic(struct page *page)
page               95 include/linux/highmem.h 	return page_address(page);
page               97 include/linux/highmem.h #define kmap_atomic_prot(page, prot)	kmap_atomic(page)
page              151 include/linux/highmem.h 	BUILD_BUG_ON(__same_type((addr), struct page *));       \
page              158 include/linux/highmem.h static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
page              160 include/linux/highmem.h 	void *addr = kmap_atomic(page);
page              161 include/linux/highmem.h 	clear_user_page(addr, vaddr, page);
page              181 include/linux/highmem.h static inline struct page *
page              186 include/linux/highmem.h 	struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
page              189 include/linux/highmem.h 	if (page)
page              190 include/linux/highmem.h 		clear_user_highpage(page, vaddr);
page              192 include/linux/highmem.h 	return page;
page              204 include/linux/highmem.h static inline struct page *
page              211 include/linux/highmem.h static inline void clear_highpage(struct page *page)
page              213 include/linux/highmem.h 	void *kaddr = kmap_atomic(page);
page              218 include/linux/highmem.h static inline void zero_user_segments(struct page *page,
page              222 include/linux/highmem.h 	void *kaddr = kmap_atomic(page);
page              233 include/linux/highmem.h 	flush_dcache_page(page);
page              236 include/linux/highmem.h static inline void zero_user_segment(struct page *page,
page              239 include/linux/highmem.h 	zero_user_segments(page, start, end, 0, 0);
page              242 include/linux/highmem.h static inline void zero_user(struct page *page,
page              245 include/linux/highmem.h 	zero_user_segments(page, start, start + size, 0, 0);
page              250 include/linux/highmem.h static inline void copy_user_highpage(struct page *to, struct page *from,
page              266 include/linux/highmem.h static inline void copy_highpage(struct page *to, struct page *from)
page              207 include/linux/hmm.h static inline struct page *hmm_device_entry_to_page(const struct hmm_range *range,
page              248 include/linux/hmm.h 						  struct page *page)
page              250 include/linux/hmm.h 	return (page_to_pfn(page) << range->pfn_shift) |
page               28 include/linux/huge_mm.h extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
page              161 include/linux/huge_mm.h extern void prep_transhuge_page(struct page *page);
page              162 include/linux/huge_mm.h extern void free_transhuge_page(struct page *page);
page              164 include/linux/huge_mm.h bool can_split_huge_page(struct page *page, int *pextra_pins);
page              165 include/linux/huge_mm.h int split_huge_page_to_list(struct page *page, struct list_head *list);
page              166 include/linux/huge_mm.h static inline int split_huge_page(struct page *page)
page              168 include/linux/huge_mm.h 	return split_huge_page_to_list(page, NULL);
page              170 include/linux/huge_mm.h void deferred_split_huge_page(struct page *page);
page              173 include/linux/huge_mm.h 		unsigned long address, bool freeze, struct page *page);
page              186 include/linux/huge_mm.h 		bool freeze, struct page *page);
page              234 include/linux/huge_mm.h static inline int hpage_nr_pages(struct page *page)
page              236 include/linux/huge_mm.h 	if (unlikely(PageTransHuge(page)))
page              241 include/linux/huge_mm.h struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
page              243 include/linux/huge_mm.h struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
page              248 include/linux/huge_mm.h extern struct page *huge_zero_page;
page              250 include/linux/huge_mm.h static inline bool is_huge_zero_page(struct page *page)
page              252 include/linux/huge_mm.h 	return READ_ONCE(huge_zero_page) == page;
page              265 include/linux/huge_mm.h struct page *mm_get_huge_zero_page(struct mm_struct *mm);
page              268 include/linux/huge_mm.h #define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
page              275 include/linux/huge_mm.h static inline struct list_head *page_deferred_list(struct page *page)
page              281 include/linux/huge_mm.h 	return &page[2].deferred_list;
page              311 include/linux/huge_mm.h static inline void prep_transhuge_page(struct page *page) {}
page              318 include/linux/huge_mm.h can_split_huge_page(struct page *page, int *pextra_pins)
page              324 include/linux/huge_mm.h split_huge_page_to_list(struct page *page, struct list_head *list)
page              328 include/linux/huge_mm.h static inline int split_huge_page(struct page *page)
page              332 include/linux/huge_mm.h static inline void deferred_split_huge_page(struct page *page) {}
page              337 include/linux/huge_mm.h 		unsigned long address, bool freeze, struct page *page) {}
page              339 include/linux/huge_mm.h 		unsigned long address, bool freeze, struct page *page) {}
page              377 include/linux/huge_mm.h static inline bool is_huge_zero_page(struct page *page)
page              392 include/linux/huge_mm.h static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
page              398 include/linux/huge_mm.h static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
page               74 include/linux/hugetlb.h 			 struct page **, struct vm_area_struct **,
page               78 include/linux/hugetlb.h 			  unsigned long, unsigned long, struct page *);
page               82 include/linux/hugetlb.h 			  struct page *ref_page);
page               85 include/linux/hugetlb.h 				struct page *ref_page);
page               96 include/linux/hugetlb.h 				struct page **pagep);
page              102 include/linux/hugetlb.h bool isolate_huge_page(struct page *page, struct list_head *list);
page              103 include/linux/hugetlb.h void putback_active_hugepage(struct page *page);
page              104 include/linux/hugetlb.h void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
page              105 include/linux/hugetlb.h void free_huge_page(struct page *page);
page              125 include/linux/hugetlb.h struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
page              127 include/linux/hugetlb.h struct page *follow_huge_pd(struct vm_area_struct *vma,
page              130 include/linux/hugetlb.h struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
page              132 include/linux/hugetlb.h struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
page              134 include/linux/hugetlb.h struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
page              190 include/linux/hugetlb.h static inline bool isolate_huge_page(struct page *page, struct list_head *list)
page              205 include/linux/hugetlb.h 			unsigned long end, struct page *ref_page)
page              212 include/linux/hugetlb.h 			unsigned long end, struct page *ref_page)
page              360 include/linux/hugetlb.h struct page *alloc_huge_page(struct vm_area_struct *vma,
page              362 include/linux/hugetlb.h struct page *alloc_huge_page_node(struct hstate *h, int nid);
page              363 include/linux/hugetlb.h struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
page              365 include/linux/hugetlb.h struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
page              367 include/linux/hugetlb.h struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
page              369 include/linux/hugetlb.h int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
page              450 include/linux/hugetlb.h 				       struct page *page, int writable)
page              456 include/linux/hugetlb.h static inline struct hstate *page_hstate(struct page *page)
page              458 include/linux/hugetlb.h 	VM_BUG_ON_PAGE(!PageHuge(page), page);
page              459 include/linux/hugetlb.h 	return size_to_hstate(page_size(page));
page              472 include/linux/hugetlb.h pgoff_t __basepage_index(struct page *page);
page              475 include/linux/hugetlb.h static inline pgoff_t basepage_index(struct page *page)
page              477 include/linux/hugetlb.h 	if (!PageCompound(page))
page              478 include/linux/hugetlb.h 		return page->index;
page              480 include/linux/hugetlb.h 	return __basepage_index(page);
page              483 include/linux/hugetlb.h extern int dissolve_free_huge_page(struct page *page);
page              596 include/linux/hugetlb.h static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
page              603 include/linux/hugetlb.h static inline struct page *alloc_huge_page_node(struct hstate *h, int nid)
page              608 include/linux/hugetlb.h static inline struct page *
page              614 include/linux/hugetlb.h static inline struct page *alloc_huge_page_vma(struct hstate *h,
page              641 include/linux/hugetlb.h static inline struct hstate *page_hstate(struct page *page)
page              696 include/linux/hugetlb.h static inline pgoff_t basepage_index(struct page *page)
page              698 include/linux/hugetlb.h 	return page->index;
page              701 include/linux/hugetlb.h static inline int dissolve_free_huge_page(struct page *page)
page               29 include/linux/hugetlb_cgroup.h static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
page               31 include/linux/hugetlb_cgroup.h 	VM_BUG_ON_PAGE(!PageHuge(page), page);
page               33 include/linux/hugetlb_cgroup.h 	if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
page               35 include/linux/hugetlb_cgroup.h 	return (struct hugetlb_cgroup *)page[2].private;
page               39 include/linux/hugetlb_cgroup.h int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
page               41 include/linux/hugetlb_cgroup.h 	VM_BUG_ON_PAGE(!PageHuge(page), page);
page               43 include/linux/hugetlb_cgroup.h 	if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
page               45 include/linux/hugetlb_cgroup.h 	page[2].private	= (unsigned long)h_cg;
page               58 include/linux/hugetlb_cgroup.h 					 struct page *page);
page               60 include/linux/hugetlb_cgroup.h 					 struct page *page);
page               64 include/linux/hugetlb_cgroup.h extern void hugetlb_cgroup_migrate(struct page *oldhpage,
page               65 include/linux/hugetlb_cgroup.h 				   struct page *newhpage);
page               68 include/linux/hugetlb_cgroup.h static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
page               74 include/linux/hugetlb_cgroup.h int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
page               94 include/linux/hugetlb_cgroup.h 			     struct page *page)
page               99 include/linux/hugetlb_cgroup.h hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, struct page *page)
page              113 include/linux/hugetlb_cgroup.h static inline void hugetlb_cgroup_migrate(struct page *oldhpage,
page              114 include/linux/hugetlb_cgroup.h 					  struct page *newhpage)
page              740 include/linux/hyperv.h 	struct page *ringbuffer_page;
page               17 include/linux/iomap.h struct page;
page               93 include/linux/iomap.h 			struct page *page, struct iomap *iomap);
page              145 include/linux/iomap.h static inline struct iomap_page *to_iomap_page(struct page *page)
page              147 include/linux/iomap.h 	if (page_has_private(page))
page              148 include/linux/iomap.h 		return (struct iomap_page *)page_private(page);
page              154 include/linux/iomap.h int iomap_readpage(struct page *page, const struct iomap_ops *ops);
page              157 include/linux/iomap.h int iomap_set_page_dirty(struct page *page);
page              158 include/linux/iomap.h int iomap_is_partially_uptodate(struct page *page, unsigned long from,
page              160 include/linux/iomap.h int iomap_releasepage(struct page *page, gfp_t gfp_mask);
page              161 include/linux/iomap.h void iomap_invalidatepage(struct page *page, unsigned int offset,
page              164 include/linux/iomap.h int iomap_migrate_page(struct address_space *mapping, struct page *newpage,
page              165 include/linux/iomap.h 		struct page *page, enum migrate_mode mode);
page              607 include/linux/irqchip/arm-gic-v3.h 		struct page	*pend_page;
page               22 include/linux/irqchip/arm-gic-v4.h 	struct page		*vprop_page;
page               33 include/linux/irqchip/arm-gic-v4.h 	struct page 		*vpt_page;
page             1377 include/linux/jbd2.h 				struct page *, unsigned int, unsigned int);
page             1378 include/linux/jbd2.h extern int	 jbd2_journal_try_to_free_buffers(journal_t *, struct page *, gfp_t);
page                8 include/linux/kasan.h struct page;
page               43 include/linux/kasan.h void kasan_alloc_pages(struct page *page, unsigned int order);
page               44 include/linux/kasan.h void kasan_free_pages(struct page *page, unsigned int order);
page               49 include/linux/kasan.h void kasan_poison_slab(struct page *page);
page               99 include/linux/kasan.h static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
page              100 include/linux/kasan.h static inline void kasan_free_pages(struct page *page, unsigned int order) {}
page              106 include/linux/kasan.h static inline void kasan_poison_slab(struct page *page) {}
page              250 include/linux/kexec.h 	struct page *control_code_page;
page              251 include/linux/kexec.h 	struct page *swap_page;
page              303 include/linux/kexec.h extern struct page *kimage_alloc_control_pages(struct kimage *image,
page              317 include/linux/kexec.h #define kexec_flush_icache_page(page)
page              348 include/linux/kexec.h static inline unsigned long page_to_boot_pfn(struct page *page)
page              350 include/linux/kexec.h 	return page_to_pfn(page);
page              355 include/linux/kexec.h static inline struct page *boot_pfn_to_page(unsigned long boot_pfn)
page              374 include/linux/kprobes.h void free_insn_page(void *page);
page               30 include/linux/ks0108.h extern void ks0108_page(unsigned char page);
page               51 include/linux/ksm.h struct page *ksm_might_need_to_copy(struct page *page,
page               54 include/linux/ksm.h void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
page               55 include/linux/ksm.h void ksm_migrate_page(struct page *newpage, struct page *oldpage);
page               56 include/linux/ksm.h bool reuse_ksm_page(struct page *page,
page               77 include/linux/ksm.h static inline struct page *ksm_might_need_to_copy(struct page *page,
page               80 include/linux/ksm.h 	return page;
page               83 include/linux/ksm.h static inline void rmap_walk_ksm(struct page *page,
page               88 include/linux/ksm.h static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
page               91 include/linux/ksm.h static inline bool reuse_ksm_page(struct page *page,
page              133 include/linux/kvm_host.h static inline bool is_error_page(struct page *page)
page              135 include/linux/kvm_host.h 	return IS_ERR(page);
page              238 include/linux/kvm_host.h 	struct page *page;
page              703 include/linux/kvm_host.h 			    struct page **pages, int nr_pages);
page              705 include/linux/kvm_host.h struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
page              711 include/linux/kvm_host.h void kvm_release_page_clean(struct page *page);
page              712 include/linux/kvm_host.h void kvm_release_page_dirty(struct page *page);
page              713 include/linux/kvm_host.h void kvm_set_page_accessed(struct page *page);
page              764 include/linux/kvm_host.h struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
page             1079 include/linux/kvm_host.h static inline struct page *kvm_vcpu_gpa_to_page(struct kvm_vcpu *vcpu,
page               26 include/linux/memcontrol.h struct page;
page              375 include/linux/memcontrol.h int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
page              378 include/linux/memcontrol.h int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm,
page              381 include/linux/memcontrol.h void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
page              383 include/linux/memcontrol.h void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
page              385 include/linux/memcontrol.h void mem_cgroup_uncharge(struct page *page);
page              388 include/linux/memcontrol.h void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
page              429 include/linux/memcontrol.h struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *);
page              435 include/linux/memcontrol.h struct mem_cgroup *get_mem_cgroup_from_page(struct page *page);
page              521 include/linux/memcontrol.h struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
page              522 include/linux/memcontrol.h ino_t page_cgroup_ino(struct page *page);
page              586 include/linux/memcontrol.h struct mem_cgroup *lock_page_memcg(struct page *page);
page              588 include/linux/memcontrol.h void unlock_page_memcg(struct page *page);
page              653 include/linux/memcontrol.h static inline void __mod_memcg_page_state(struct page *page,
page              656 include/linux/memcontrol.h 	if (page->mem_cgroup)
page              657 include/linux/memcontrol.h 		__mod_memcg_state(page->mem_cgroup, idx, val);
page              660 include/linux/memcontrol.h static inline void mod_memcg_page_state(struct page *page,
page              663 include/linux/memcontrol.h 	if (page->mem_cgroup)
page              664 include/linux/memcontrol.h 		mod_memcg_state(page->mem_cgroup, idx, val);
page              720 include/linux/memcontrol.h static inline void __mod_lruvec_page_state(struct page *page,
page              723 include/linux/memcontrol.h 	pg_data_t *pgdat = page_pgdat(page);
page              727 include/linux/memcontrol.h 	if (!page->mem_cgroup) {
page              732 include/linux/memcontrol.h 	lruvec = mem_cgroup_lruvec(pgdat, page->mem_cgroup);
page              736 include/linux/memcontrol.h static inline void mod_lruvec_page_state(struct page *page,
page              742 include/linux/memcontrol.h 	__mod_lruvec_page_state(page, idx, val);
page              764 include/linux/memcontrol.h static inline void count_memcg_page_event(struct page *page,
page              767 include/linux/memcontrol.h 	if (page->mem_cgroup)
page              768 include/linux/memcontrol.h 		count_memcg_events(page->mem_cgroup, idx, 1);
page              820 include/linux/memcontrol.h void mem_cgroup_split_huge_fixup(struct page *head);
page              862 include/linux/memcontrol.h static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
page              871 include/linux/memcontrol.h static inline int mem_cgroup_try_charge_delay(struct page *page,
page              881 include/linux/memcontrol.h static inline void mem_cgroup_commit_charge(struct page *page,
page              887 include/linux/memcontrol.h static inline void mem_cgroup_cancel_charge(struct page *page,
page              893 include/linux/memcontrol.h static inline void mem_cgroup_uncharge(struct page *page)
page              901 include/linux/memcontrol.h static inline void mem_cgroup_migrate(struct page *old, struct page *new)
page              911 include/linux/memcontrol.h static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
page              928 include/linux/memcontrol.h static inline struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
page             1010 include/linux/memcontrol.h static inline struct mem_cgroup *lock_page_memcg(struct page *page)
page             1019 include/linux/memcontrol.h static inline void unlock_page_memcg(struct page *page)
page             1078 include/linux/memcontrol.h static inline void __mod_memcg_page_state(struct page *page,
page             1084 include/linux/memcontrol.h static inline void mod_memcg_page_state(struct page *page,
page             1114 include/linux/memcontrol.h static inline void __mod_lruvec_page_state(struct page *page,
page             1117 include/linux/memcontrol.h 	__mod_node_page_state(page_pgdat(page), idx, val);
page             1120 include/linux/memcontrol.h static inline void mod_lruvec_page_state(struct page *page,
page             1123 include/linux/memcontrol.h 	mod_node_page_state(page_pgdat(page), idx, val);
page             1129 include/linux/memcontrol.h 	struct page *page = virt_to_head_page(p);
page             1131 include/linux/memcontrol.h 	__mod_node_page_state(page_pgdat(page), idx, val);
page             1146 include/linux/memcontrol.h static inline void mem_cgroup_split_huge_fixup(struct page *head)
page             1162 include/linux/memcontrol.h static inline void count_memcg_page_event(struct page *page,
page             1188 include/linux/memcontrol.h static inline void __inc_memcg_page_state(struct page *page,
page             1191 include/linux/memcontrol.h 	__mod_memcg_page_state(page, idx, 1);
page             1195 include/linux/memcontrol.h static inline void __dec_memcg_page_state(struct page *page,
page             1198 include/linux/memcontrol.h 	__mod_memcg_page_state(page, idx, -1);
page             1213 include/linux/memcontrol.h static inline void __inc_lruvec_page_state(struct page *page,
page             1216 include/linux/memcontrol.h 	__mod_lruvec_page_state(page, idx, 1);
page             1219 include/linux/memcontrol.h static inline void __dec_lruvec_page_state(struct page *page,
page             1222 include/linux/memcontrol.h 	__mod_lruvec_page_state(page, idx, -1);
page             1250 include/linux/memcontrol.h static inline void inc_memcg_page_state(struct page *page,
page             1253 include/linux/memcontrol.h 	mod_memcg_page_state(page, idx, 1);
page             1257 include/linux/memcontrol.h static inline void dec_memcg_page_state(struct page *page,
page             1260 include/linux/memcontrol.h 	mod_memcg_page_state(page, idx, -1);
page             1275 include/linux/memcontrol.h static inline void inc_lruvec_page_state(struct page *page,
page             1278 include/linux/memcontrol.h 	mod_lruvec_page_state(page, idx, 1);
page             1281 include/linux/memcontrol.h static inline void dec_lruvec_page_state(struct page *page,
page             1284 include/linux/memcontrol.h 	mod_lruvec_page_state(page, idx, -1);
page             1294 include/linux/memcontrol.h void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
page             1297 include/linux/memcontrol.h static inline void mem_cgroup_track_foreign_dirty(struct page *page,
page             1303 include/linux/memcontrol.h 	if (unlikely(&page->mem_cgroup->css != wb->memcg_css))
page             1304 include/linux/memcontrol.h 		mem_cgroup_track_foreign_dirty_slowpath(page, wb);
page             1324 include/linux/memcontrol.h static inline void mem_cgroup_track_foreign_dirty(struct page *page,
page             1377 include/linux/memcontrol.h int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
page             1378 include/linux/memcontrol.h void __memcg_kmem_uncharge(struct page *page, int order);
page             1379 include/linux/memcontrol.h int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
page             1404 include/linux/memcontrol.h static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
page             1407 include/linux/memcontrol.h 		return __memcg_kmem_charge(page, gfp, order);
page             1411 include/linux/memcontrol.h static inline void memcg_kmem_uncharge(struct page *page, int order)
page             1414 include/linux/memcontrol.h 		__memcg_kmem_uncharge(page, order);
page             1417 include/linux/memcontrol.h static inline int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp,
page             1421 include/linux/memcontrol.h 		return __memcg_kmem_charge_memcg(page, gfp, order, memcg);
page             1425 include/linux/memcontrol.h static inline void memcg_kmem_uncharge_memcg(struct page *page, int order,
page             1446 include/linux/memcontrol.h static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
page             1451 include/linux/memcontrol.h static inline void memcg_kmem_uncharge(struct page *page, int order)
page             1455 include/linux/memcontrol.h static inline int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
page             1460 include/linux/memcontrol.h static inline void __memcg_kmem_uncharge(struct page *page, int order)
page               10 include/linux/memory_hotplug.h struct page;
page               26 include/linux/memory_hotplug.h 	struct page *___page = NULL;				   \
page               95 include/linux/memory_hotplug.h extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
page              103 include/linux/memory_hotplug.h typedef void (*online_page_callback_t)(struct page *page, unsigned int order);
page              108 include/linux/memory_hotplug.h extern void __online_page_set_limits(struct page *page);
page              109 include/linux/memory_hotplug.h extern void __online_page_increment_counters(struct page *page);
page              110 include/linux/memory_hotplug.h extern void __online_page_free(struct page *page);
page              222 include/linux/memory_hotplug.h extern void put_page_bootmem(struct page *page);
page              223 include/linux/memory_hotplug.h extern void get_page_bootmem(unsigned long ingo, struct page *page,
page              238 include/linux/memory_hotplug.h 	struct page *___page = NULL;		\
page              357 include/linux/memory_hotplug.h extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
page              205 include/linux/mempolicy.h extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
page              302 include/linux/mempolicy.h static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
page               73 include/linux/memremap.h 	void (*page_free)(struct page *page);
page               10 include/linux/migrate.h typedef struct page *new_page_t(struct page *page, unsigned long private);
page               11 include/linux/migrate.h typedef void free_page_t(struct page *page, unsigned long private);
page               34 include/linux/migrate.h static inline struct page *new_page_nodemask(struct page *page,
page               39 include/linux/migrate.h 	struct page *new_page = NULL;
page               41 include/linux/migrate.h 	if (PageHuge(page))
page               42 include/linux/migrate.h 		return alloc_huge_page_nodemask(page_hstate(compound_head(page)),
page               45 include/linux/migrate.h 	if (PageTransHuge(page)) {
page               50 include/linux/migrate.h 	if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
page               66 include/linux/migrate.h 			struct page *newpage, struct page *page,
page               70 include/linux/migrate.h extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
page               71 include/linux/migrate.h extern void putback_movable_page(struct page *page);
page               75 include/linux/migrate.h extern void migrate_page_states(struct page *newpage, struct page *page);
page               76 include/linux/migrate.h extern void migrate_page_copy(struct page *newpage, struct page *page);
page               78 include/linux/migrate.h 				  struct page *newpage, struct page *page);
page               80 include/linux/migrate.h 		struct page *newpage, struct page *page, int extra_count);
page               88 include/linux/migrate.h static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
page               94 include/linux/migrate.h static inline void migrate_page_states(struct page *newpage, struct page *page)
page               98 include/linux/migrate.h static inline void migrate_page_copy(struct page *newpage,
page               99 include/linux/migrate.h 				     struct page *page) {}
page              102 include/linux/migrate.h 				  struct page *newpage, struct page *page)
page              110 include/linux/migrate.h extern int PageMovable(struct page *page);
page              111 include/linux/migrate.h extern void __SetPageMovable(struct page *page, struct address_space *mapping);
page              112 include/linux/migrate.h extern void __ClearPageMovable(struct page *page);
page              114 include/linux/migrate.h static inline int PageMovable(struct page *page) { return 0; };
page              115 include/linux/migrate.h static inline void __SetPageMovable(struct page *page,
page              119 include/linux/migrate.h static inline void __ClearPageMovable(struct page *page)
page              126 include/linux/migrate.h extern int migrate_misplaced_page(struct page *page,
page              133 include/linux/migrate.h static inline int migrate_misplaced_page(struct page *page,
page              145 include/linux/migrate.h 			struct page *page, int node);
page              151 include/linux/migrate.h 			struct page *page, int node)
page              171 include/linux/migrate.h static inline struct page *migrate_pfn_to_page(unsigned long mpfn)
page              150 include/linux/mm.h static inline void __mm_zero_struct_page(struct page *page)
page              152 include/linux/mm.h 	unsigned long *_pp = (void *)page;
page              155 include/linux/mm.h 	BUILD_BUG_ON(sizeof(struct page) & 7);
page              156 include/linux/mm.h 	BUILD_BUG_ON(sizeof(struct page) < 56);
page              157 include/linux/mm.h 	BUILD_BUG_ON(sizeof(struct page) > 80);
page              159 include/linux/mm.h 	switch (sizeof(struct page)) {
page              177 include/linux/mm.h #define mm_zero_struct_page(pp)  ((void)memset((pp), 0, sizeof(struct page)))
page              213 include/linux/mm.h #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
page              221 include/linux/mm.h #define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
page              430 include/linux/mm.h 	struct page *cow_page;		/* Page handler may use for COW fault */
page              432 include/linux/mm.h 	struct page *page;		/* ->fault handlers should return a
page              525 include/linux/mm.h 	struct page *(*find_special_page)(struct vm_area_struct *vma,
page              605 include/linux/mm.h static inline int put_page_testzero(struct page *page)
page              607 include/linux/mm.h 	VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
page              608 include/linux/mm.h 	return page_ref_dec_and_test(page);
page              617 include/linux/mm.h static inline int get_page_unless_zero(struct page *page)
page              619 include/linux/mm.h 	return page_ref_add_unless(page, 1, 0);
page              634 include/linux/mm.h struct page *vmalloc_to_page(const void *addr);
page              704 include/linux/mm.h static inline int compound_mapcount(struct page *page)
page              706 include/linux/mm.h 	VM_BUG_ON_PAGE(!PageCompound(page), page);
page              707 include/linux/mm.h 	page = compound_head(page);
page              708 include/linux/mm.h 	return atomic_read(compound_mapcount_ptr(page)) + 1;
page              716 include/linux/mm.h static inline void page_mapcount_reset(struct page *page)
page              718 include/linux/mm.h 	atomic_set(&(page)->_mapcount, -1);
page              721 include/linux/mm.h int __page_mapcount(struct page *page);
page              731 include/linux/mm.h static inline int page_mapcount(struct page *page)
page              733 include/linux/mm.h 	if (unlikely(PageCompound(page)))
page              734 include/linux/mm.h 		return __page_mapcount(page);
page              735 include/linux/mm.h 	return atomic_read(&page->_mapcount) + 1;
page              739 include/linux/mm.h int total_mapcount(struct page *page);
page              740 include/linux/mm.h int page_trans_huge_mapcount(struct page *page, int *total_mapcount);
page              742 include/linux/mm.h static inline int total_mapcount(struct page *page)
page              744 include/linux/mm.h 	return page_mapcount(page);
page              746 include/linux/mm.h static inline int page_trans_huge_mapcount(struct page *page,
page              749 include/linux/mm.h 	int mapcount = page_mapcount(page);
page              756 include/linux/mm.h static inline struct page *virt_to_head_page(const void *x)
page              758 include/linux/mm.h 	struct page *page = virt_to_page(x);
page              760 include/linux/mm.h 	return compound_head(page);
page              763 include/linux/mm.h void __put_page(struct page *page);
page              767 include/linux/mm.h void split_page(struct page *page, unsigned int order);
page              774 include/linux/mm.h typedef void compound_page_dtor(struct page *);
page              790 include/linux/mm.h static inline void set_compound_page_dtor(struct page *page,
page              793 include/linux/mm.h 	VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page);
page              794 include/linux/mm.h 	page[1].compound_dtor = compound_dtor;
page              797 include/linux/mm.h static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
page              799 include/linux/mm.h 	VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page);
page              800 include/linux/mm.h 	return compound_page_dtors[page[1].compound_dtor];
page              803 include/linux/mm.h static inline unsigned int compound_order(struct page *page)
page              805 include/linux/mm.h 	if (!PageHead(page))
page              807 include/linux/mm.h 	return page[1].compound_order;
page              810 include/linux/mm.h static inline void set_compound_order(struct page *page, unsigned int order)
page              812 include/linux/mm.h 	page[1].compound_order = order;
page              816 include/linux/mm.h static inline unsigned long compound_nr(struct page *page)
page              818 include/linux/mm.h 	return 1UL << compound_order(page);
page              822 include/linux/mm.h static inline unsigned long page_size(struct page *page)
page              824 include/linux/mm.h 	return PAGE_SIZE << compound_order(page);
page              828 include/linux/mm.h static inline unsigned int page_shift(struct page *page)
page              830 include/linux/mm.h 	return PAGE_SHIFT + compound_order(page);
page              833 include/linux/mm.h void free_compound_page(struct page *page);
page              850 include/linux/mm.h 		struct page *page);
page              962 include/linux/mm.h static inline enum zone_type page_zonenum(const struct page *page)
page              964 include/linux/mm.h 	return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
page              968 include/linux/mm.h static inline bool is_zone_device_page(const struct page *page)
page              970 include/linux/mm.h 	return page_zonenum(page) == ZONE_DEVICE;
page              975 include/linux/mm.h static inline bool is_zone_device_page(const struct page *page)
page              982 include/linux/mm.h void __put_devmap_managed_page(struct page *page);
page              984 include/linux/mm.h static inline bool put_devmap_managed_page(struct page *page)
page              988 include/linux/mm.h 	if (!is_zone_device_page(page))
page              990 include/linux/mm.h 	switch (page->pgmap->type) {
page              993 include/linux/mm.h 		__put_devmap_managed_page(page);
page             1002 include/linux/mm.h static inline bool put_devmap_managed_page(struct page *page)
page             1008 include/linux/mm.h static inline bool is_device_private_page(const struct page *page)
page             1012 include/linux/mm.h 		is_zone_device_page(page) &&
page             1013 include/linux/mm.h 		page->pgmap->type == MEMORY_DEVICE_PRIVATE;
page             1016 include/linux/mm.h static inline bool is_pci_p2pdma_page(const struct page *page)
page             1020 include/linux/mm.h 		is_zone_device_page(page) &&
page             1021 include/linux/mm.h 		page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA;
page             1025 include/linux/mm.h #define page_ref_zero_or_close_to_overflow(page) \
page             1026 include/linux/mm.h 	((unsigned int) page_ref_count(page) + 127u <= 127u)
page             1028 include/linux/mm.h static inline void get_page(struct page *page)
page             1030 include/linux/mm.h 	page = compound_head(page);
page             1035 include/linux/mm.h 	VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(page), page);
page             1036 include/linux/mm.h 	page_ref_inc(page);
page             1039 include/linux/mm.h static inline __must_check bool try_get_page(struct page *page)
page             1041 include/linux/mm.h 	page = compound_head(page);
page             1042 include/linux/mm.h 	if (WARN_ON_ONCE(page_ref_count(page) <= 0))
page             1044 include/linux/mm.h 	page_ref_inc(page);
page             1048 include/linux/mm.h static inline void put_page(struct page *page)
page             1050 include/linux/mm.h 	page = compound_head(page);
page             1058 include/linux/mm.h 	if (put_devmap_managed_page(page))
page             1061 include/linux/mm.h 	if (put_page_testzero(page))
page             1062 include/linux/mm.h 		__put_page(page);
page             1080 include/linux/mm.h static inline void put_user_page(struct page *page)
page             1082 include/linux/mm.h 	put_page(page);
page             1085 include/linux/mm.h void put_user_pages_dirty_lock(struct page **pages, unsigned long npages,
page             1088 include/linux/mm.h void put_user_pages(struct page **pages, unsigned long npages);
page             1102 include/linux/mm.h static inline int page_zone_id(struct page *page)
page             1104 include/linux/mm.h 	return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
page             1108 include/linux/mm.h extern int page_to_nid(const struct page *page);
page             1110 include/linux/mm.h static inline int page_to_nid(const struct page *page)
page             1112 include/linux/mm.h 	struct page *p = (struct page *)page;
page             1156 include/linux/mm.h static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
page             1158 include/linux/mm.h 	return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
page             1161 include/linux/mm.h static inline int page_cpupid_last(struct page *page)
page             1163 include/linux/mm.h 	return page->_last_cpupid;
page             1165 include/linux/mm.h static inline void page_cpupid_reset_last(struct page *page)
page             1167 include/linux/mm.h 	page->_last_cpupid = -1 & LAST_CPUPID_MASK;
page             1170 include/linux/mm.h static inline int page_cpupid_last(struct page *page)
page             1172 include/linux/mm.h 	return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
page             1175 include/linux/mm.h extern int page_cpupid_xchg_last(struct page *page, int cpupid);
page             1177 include/linux/mm.h static inline void page_cpupid_reset_last(struct page *page)
page             1179 include/linux/mm.h 	page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
page             1183 include/linux/mm.h static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
page             1185 include/linux/mm.h 	return page_to_nid(page); /* XXX */
page             1188 include/linux/mm.h static inline int page_cpupid_last(struct page *page)
page             1190 include/linux/mm.h 	return page_to_nid(page); /* XXX */
page             1218 include/linux/mm.h static inline void page_cpupid_reset_last(struct page *page)
page             1229 include/linux/mm.h static inline u8 page_kasan_tag(const struct page *page)
page             1231 include/linux/mm.h 	return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
page             1234 include/linux/mm.h static inline void page_kasan_tag_set(struct page *page, u8 tag)
page             1236 include/linux/mm.h 	page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
page             1237 include/linux/mm.h 	page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
page             1240 include/linux/mm.h static inline void page_kasan_tag_reset(struct page *page)
page             1242 include/linux/mm.h 	page_kasan_tag_set(page, 0xff);
page             1245 include/linux/mm.h static inline u8 page_kasan_tag(const struct page *page)
page             1250 include/linux/mm.h static inline void page_kasan_tag_set(struct page *page, u8 tag) { }
page             1251 include/linux/mm.h static inline void page_kasan_tag_reset(struct page *page) { }
page             1254 include/linux/mm.h static inline struct zone *page_zone(const struct page *page)
page             1256 include/linux/mm.h 	return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
page             1259 include/linux/mm.h static inline pg_data_t *page_pgdat(const struct page *page)
page             1261 include/linux/mm.h 	return NODE_DATA(page_to_nid(page));
page             1265 include/linux/mm.h static inline void set_page_section(struct page *page, unsigned long section)
page             1267 include/linux/mm.h 	page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
page             1268 include/linux/mm.h 	page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
page             1271 include/linux/mm.h static inline unsigned long page_to_section(const struct page *page)
page             1273 include/linux/mm.h 	return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
page             1277 include/linux/mm.h static inline void set_page_zone(struct page *page, enum zone_type zone)
page             1279 include/linux/mm.h 	page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
page             1280 include/linux/mm.h 	page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
page             1283 include/linux/mm.h static inline void set_page_node(struct page *page, unsigned long node)
page             1285 include/linux/mm.h 	page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
page             1286 include/linux/mm.h 	page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
page             1289 include/linux/mm.h static inline void set_page_links(struct page *page, enum zone_type zone,
page             1292 include/linux/mm.h 	set_page_zone(page, zone);
page             1293 include/linux/mm.h 	set_page_node(page, node);
page             1295 include/linux/mm.h 	set_page_section(page, pfn_to_section_nr(pfn));
page             1300 include/linux/mm.h static inline struct mem_cgroup *page_memcg(struct page *page)
page             1302 include/linux/mm.h 	return page->mem_cgroup;
page             1304 include/linux/mm.h static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
page             1307 include/linux/mm.h 	return READ_ONCE(page->mem_cgroup);
page             1310 include/linux/mm.h static inline struct mem_cgroup *page_memcg(struct page *page)
page             1314 include/linux/mm.h static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
page             1326 include/linux/mm.h static __always_inline void *lowmem_page_address(const struct page *page)
page             1328 include/linux/mm.h 	return page_to_virt(page);
page             1336 include/linux/mm.h static inline void *page_address(const struct page *page)
page             1338 include/linux/mm.h 	return page->virtual;
page             1340 include/linux/mm.h static inline void set_page_address(struct page *page, void *address)
page             1342 include/linux/mm.h 	page->virtual = address;
page             1348 include/linux/mm.h void *page_address(const struct page *page);
page             1349 include/linux/mm.h void set_page_address(struct page *page, void *virtual);
page             1354 include/linux/mm.h #define page_address(page) lowmem_page_address(page)
page             1355 include/linux/mm.h #define set_page_address(page, address)  do { } while(0)
page             1359 include/linux/mm.h extern void *page_rmapping(struct page *page);
page             1360 include/linux/mm.h extern struct anon_vma *page_anon_vma(struct page *page);
page             1361 include/linux/mm.h extern struct address_space *page_mapping(struct page *page);
page             1363 include/linux/mm.h extern struct address_space *__page_file_mapping(struct page *);
page             1366 include/linux/mm.h struct address_space *page_file_mapping(struct page *page)
page             1368 include/linux/mm.h 	if (unlikely(PageSwapCache(page)))
page             1369 include/linux/mm.h 		return __page_file_mapping(page);
page             1371 include/linux/mm.h 	return page->mapping;
page             1374 include/linux/mm.h extern pgoff_t __page_file_index(struct page *page);
page             1380 include/linux/mm.h static inline pgoff_t page_index(struct page *page)
page             1382 include/linux/mm.h 	if (unlikely(PageSwapCache(page)))
page             1383 include/linux/mm.h 		return __page_file_index(page);
page             1384 include/linux/mm.h 	return page->index;
page             1387 include/linux/mm.h bool page_mapped(struct page *page);
page             1388 include/linux/mm.h struct address_space *page_mapping(struct page *page);
page             1389 include/linux/mm.h struct address_space *page_mapping_file(struct page *page);
page             1396 include/linux/mm.h static inline bool page_is_pfmemalloc(struct page *page)
page             1402 include/linux/mm.h 	return page->index == -1UL;
page             1409 include/linux/mm.h static inline void set_page_pfmemalloc(struct page *page)
page             1411 include/linux/mm.h 	page->index = -1UL;
page             1414 include/linux/mm.h static inline void clear_page_pfmemalloc(struct page *page)
page             1416 include/linux/mm.h 	page->index = 0;
page             1451 include/linux/mm.h struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
page             1453 include/linux/mm.h struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
page             1483 include/linux/mm.h int truncate_inode_page(struct address_space *mapping, struct page *page);
page             1484 include/linux/mm.h int generic_error_remove_page(struct address_space *mapping, struct page *page);
page             1485 include/linux/mm.h int invalidate_inode_page(struct page *page);
page             1534 include/linux/mm.h 			    unsigned int gup_flags, struct page **pages,
page             1537 include/linux/mm.h 			    unsigned int gup_flags, struct page **pages,
page             1540 include/linux/mm.h 		    unsigned int gup_flags, struct page **pages, int *locked);
page             1542 include/linux/mm.h 		    struct page **pages, unsigned int gup_flags);
page             1545 include/linux/mm.h 			unsigned int gup_flags, struct page **pages);
page             1575 include/linux/mm.h static inline struct page **frame_vector_pages(struct frame_vector *vec)
page             1583 include/linux/mm.h 	return (struct page **)(vec->ptrs);
page             1595 include/linux/mm.h 			struct page **pages);
page             1596 include/linux/mm.h int get_kernel_page(unsigned long start, int write, struct page **pages);
page             1597 include/linux/mm.h struct page *get_dump_page(unsigned long addr);
page             1599 include/linux/mm.h extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
page             1600 include/linux/mm.h extern void do_invalidatepage(struct page *page, unsigned int offset,
page             1603 include/linux/mm.h void __set_page_dirty(struct page *, struct address_space *, int warn);
page             1604 include/linux/mm.h int __set_page_dirty_nobuffers(struct page *page);
page             1605 include/linux/mm.h int __set_page_dirty_no_writeback(struct page *page);
page             1607 include/linux/mm.h 				struct page *page);
page             1608 include/linux/mm.h void account_page_dirtied(struct page *page, struct address_space *mapping);
page             1609 include/linux/mm.h void account_page_cleaned(struct page *page, struct address_space *mapping,
page             1611 include/linux/mm.h int set_page_dirty(struct page *page);
page             1612 include/linux/mm.h int set_page_dirty_lock(struct page *page);
page             1613 include/linux/mm.h void __cancel_dirty_page(struct page *page);
page             1614 include/linux/mm.h static inline void cancel_dirty_page(struct page *page)
page             1617 include/linux/mm.h 	if (PageDirty(page))
page             1618 include/linux/mm.h 		__cancel_dirty_page(page);
page             1620 include/linux/mm.h int clear_page_dirty_for_io(struct page *page);
page             1639 include/linux/mm.h 			  struct page **pages);
page             1674 include/linux/mm.h static inline int mm_counter_file(struct page *page)
page             1676 include/linux/mm.h 	if (PageSwapBacked(page))
page             1681 include/linux/mm.h static inline int mm_counter(struct page *page)
page             1683 include/linux/mm.h 	if (PageAnon(page))
page             1685 include/linux/mm.h 	return mm_counter_file(page);
page             1892 include/linux/mm.h extern bool ptlock_alloc(struct page *page);
page             1893 include/linux/mm.h extern void ptlock_free(struct page *page);
page             1895 include/linux/mm.h static inline spinlock_t *ptlock_ptr(struct page *page)
page             1897 include/linux/mm.h 	return page->ptl;
page             1904 include/linux/mm.h static inline bool ptlock_alloc(struct page *page)
page             1909 include/linux/mm.h static inline void ptlock_free(struct page *page)
page             1913 include/linux/mm.h static inline spinlock_t *ptlock_ptr(struct page *page)
page             1915 include/linux/mm.h 	return &page->ptl;
page             1924 include/linux/mm.h static inline bool ptlock_init(struct page *page)
page             1933 include/linux/mm.h 	VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
page             1934 include/linux/mm.h 	if (!ptlock_alloc(page))
page             1936 include/linux/mm.h 	spin_lock_init(ptlock_ptr(page));
page             1949 include/linux/mm.h static inline bool ptlock_init(struct page *page) { return true; }
page             1950 include/linux/mm.h static inline void ptlock_free(struct page *page) {}
page             1959 include/linux/mm.h static inline bool pgtable_pte_page_ctor(struct page *page)
page             1961 include/linux/mm.h 	if (!ptlock_init(page))
page             1963 include/linux/mm.h 	__SetPageTable(page);
page             1964 include/linux/mm.h 	inc_zone_page_state(page, NR_PAGETABLE);
page             1968 include/linux/mm.h static inline void pgtable_pte_page_dtor(struct page *page)
page             1970 include/linux/mm.h 	ptlock_free(page);
page             1971 include/linux/mm.h 	__ClearPageTable(page);
page             1972 include/linux/mm.h 	dec_zone_page_state(page, NR_PAGETABLE);
page             2004 include/linux/mm.h static struct page *pmd_to_page(pmd_t *pmd)
page             2015 include/linux/mm.h static inline bool pgtable_pmd_page_ctor(struct page *page)
page             2018 include/linux/mm.h 	page->pmd_huge_pte = NULL;
page             2020 include/linux/mm.h 	return ptlock_init(page);
page             2023 include/linux/mm.h static inline void pgtable_pmd_page_dtor(struct page *page)
page             2026 include/linux/mm.h 	VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
page             2028 include/linux/mm.h 	ptlock_free(page);
page             2040 include/linux/mm.h static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; }
page             2041 include/linux/mm.h static inline void pgtable_pmd_page_dtor(struct page *page) {}
page             2093 include/linux/mm.h extern void free_highmem_page(struct page *page);
page             2096 include/linux/mm.h extern void adjust_managed_page_count(struct page *page, long count);
page             2102 include/linux/mm.h static inline void __free_reserved_page(struct page *page)
page             2104 include/linux/mm.h 	ClearPageReserved(page);
page             2105 include/linux/mm.h 	init_page_count(page);
page             2106 include/linux/mm.h 	__free_page(page);
page             2109 include/linux/mm.h static inline void free_reserved_page(struct page *page)
page             2111 include/linux/mm.h 	__free_reserved_page(page);
page             2112 include/linux/mm.h 	adjust_managed_page_count(page, 1);
page             2115 include/linux/mm.h static inline void mark_page_reserved(struct page *page)
page             2117 include/linux/mm.h 	SetPageReserved(page);
page             2118 include/linux/mm.h 	adjust_managed_page_count(page, -1);
page             2336 include/linux/mm.h 				   unsigned long flags, struct page **pages);
page             2427 include/linux/mm.h int __must_check write_one_page(struct page *page);
page             2445 include/linux/mm.h 				struct page *pg,
page             2547 include/linux/mm.h int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
page             2548 include/linux/mm.h int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
page             2550 include/linux/mm.h int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
page             2563 include/linux/mm.h 				unsigned long addr, struct page *page)
page             2565 include/linux/mm.h 	int err = vm_insert_page(vma, addr, page);
page             2582 include/linux/mm.h struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
page             2650 include/linux/mm.h extern void kernel_poison_pages(struct page *page, int numpages, int enable);
page             2653 include/linux/mm.h static inline void kernel_poison_pages(struct page *page, int numpages,
page             2708 include/linux/mm.h extern void __kernel_map_pages(struct page *page, int numpages, int enable);
page             2715 include/linux/mm.h kernel_map_pages(struct page *page, int numpages, int enable)
page             2717 include/linux/mm.h 	__kernel_map_pages(page, numpages, enable);
page             2720 include/linux/mm.h extern bool kernel_page_present(struct page *page);
page             2724 include/linux/mm.h kernel_map_pages(struct page *page, int numpages, int enable) {}
page             2726 include/linux/mm.h static inline bool kernel_page_present(struct page *page) { return true; }
page             2773 include/linux/mm.h struct page * __populate_section_memmap(unsigned long pfn,
page             2794 include/linux/mm.h void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
page             2806 include/linux/mm.h extern int get_hwpoison_page(struct page *page);
page             2807 include/linux/mm.h #define put_hwpoison_page(page)	put_page(page)
page             2810 include/linux/mm.h extern void shake_page(struct page *p, int access);
page             2812 include/linux/mm.h extern int soft_offline_page(struct page *page, int flags);
page             2851 include/linux/mm.h extern void clear_huge_page(struct page *page,
page             2854 include/linux/mm.h extern void copy_user_huge_page(struct page *dst, struct page *src,
page             2858 include/linux/mm.h extern long copy_huge_page_from_user(struct page *dst_page,
page             2878 include/linux/mm.h static inline bool page_is_guard(struct page *page)
page             2883 include/linux/mm.h 	return PageGuard(page);
page             2888 include/linux/mm.h static inline bool page_is_guard(struct page *page) { return false; }
page             2897 include/linux/mm.h extern int memcmp_pages(struct page *page1, struct page *page2);
page             2899 include/linux/mm.h static inline int pages_identical(struct page *page1, struct page *page2)
page               21 include/linux/mm_inline.h static inline int page_is_file_cache(struct page *page)
page               23 include/linux/mm_inline.h 	return !PageSwapBacked(page);
page               47 include/linux/mm_inline.h static __always_inline void add_page_to_lru_list(struct page *page,
page               50 include/linux/mm_inline.h 	update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
page               51 include/linux/mm_inline.h 	list_add(&page->lru, &lruvec->lists[lru]);
page               54 include/linux/mm_inline.h static __always_inline void add_page_to_lru_list_tail(struct page *page,
page               57 include/linux/mm_inline.h 	update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
page               58 include/linux/mm_inline.h 	list_add_tail(&page->lru, &lruvec->lists[lru]);
page               61 include/linux/mm_inline.h static __always_inline void del_page_from_lru_list(struct page *page,
page               64 include/linux/mm_inline.h 	list_del(&page->lru);
page               65 include/linux/mm_inline.h 	update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page));
page               76 include/linux/mm_inline.h static inline enum lru_list page_lru_base_type(struct page *page)
page               78 include/linux/mm_inline.h 	if (page_is_file_cache(page))
page               90 include/linux/mm_inline.h static __always_inline enum lru_list page_off_lru(struct page *page)
page               94 include/linux/mm_inline.h 	if (PageUnevictable(page)) {
page               95 include/linux/mm_inline.h 		__ClearPageUnevictable(page);
page               98 include/linux/mm_inline.h 		lru = page_lru_base_type(page);
page               99 include/linux/mm_inline.h 		if (PageActive(page)) {
page              100 include/linux/mm_inline.h 			__ClearPageActive(page);
page              114 include/linux/mm_inline.h static __always_inline enum lru_list page_lru(struct page *page)
page              118 include/linux/mm_inline.h 	if (PageUnevictable(page))
page              121 include/linux/mm_inline.h 		lru = page_lru_base_type(page);
page              122 include/linux/mm_inline.h 		if (PageActive(page))
page              107 include/linux/mm_types.h 					struct page *next;
page              224 include/linux/mm_types.h static inline atomic_t *compound_mapcount_ptr(struct page *page)
page              226 include/linux/mm_types.h 	return &page[1].compound_mapcount;
page              232 include/linux/mm_types.h #define STRUCT_PAGE_MAX_SHIFT	(order_base_2(sizeof(struct page)))
page              237 include/linux/mm_types.h #define page_private(page)		((page)->private)
page              238 include/linux/mm_types.h #define set_page_private(page, v)	((page)->private = (v))
page              726 include/linux/mm_types.h 	struct page **pages;
page               65 include/linux/mm_types_task.h 	struct page *page;
page                8 include/linux/mmdebug.h struct page;
page               12 include/linux/mmdebug.h extern void dump_page(struct page *page, const char *reason);
page               13 include/linux/mmdebug.h extern void __dump_page(struct page *page, const char *reason);
page               19 include/linux/mmdebug.h #define VM_BUG_ON_PAGE(cond, page)					\
page               22 include/linux/mmdebug.h 			dump_page(page, "VM_BUG_ON_PAGE(" __stringify(cond)")");\
page               46 include/linux/mmdebug.h #define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond)
page               62 include/linux/mmdebug.h #define VM_BUG_ON_PGFLAGS(cond, page) VM_BUG_ON_PAGE(cond, page)
page               64 include/linux/mmdebug.h #define VM_BUG_ON_PGFLAGS(cond, page) BUILD_BUG_ON_INVALID(cond)
page               94 include/linux/mmzone.h #define get_pageblock_migratetype(page)					\
page               95 include/linux/mmzone.h 	get_pfnblock_flags_mask(page, page_to_pfn(page),		\
page              104 include/linux/mmzone.h static inline void add_to_free_area(struct page *page, struct free_area *area,
page              107 include/linux/mmzone.h 	list_add(&page->lru, &area->free_list[migratetype]);
page              112 include/linux/mmzone.h static inline void add_to_free_area_tail(struct page *page, struct free_area *area,
page              115 include/linux/mmzone.h 	list_add_tail(&page->lru, &area->free_list[migratetype]);
page              121 include/linux/mmzone.h void add_to_free_area_random(struct page *page, struct free_area *area,
page              124 include/linux/mmzone.h static inline void add_to_free_area_random(struct page *page,
page              127 include/linux/mmzone.h 	add_to_free_area(page, area, migratetype);
page              132 include/linux/mmzone.h static inline void move_to_free_area(struct page *page, struct free_area *area,
page              135 include/linux/mmzone.h 	list_move(&page->lru, &area->free_list[migratetype]);
page              138 include/linux/mmzone.h static inline struct page *get_page_from_free_area(struct free_area *area,
page              142 include/linux/mmzone.h 					struct page, lru);
page              145 include/linux/mmzone.h static inline void del_page_from_free_area(struct page *page,
page              148 include/linux/mmzone.h 	list_del(&page->lru);
page              149 include/linux/mmzone.h 	__ClearPageBuddy(page);
page              150 include/linux/mmzone.h 	set_page_private(page, 0);
page              678 include/linux/mmzone.h extern struct page *mem_map;
page              703 include/linux/mmzone.h 	struct page *node_mem_map;
page             1182 include/linux/mmzone.h struct page;
page             1269 include/linux/mmzone.h static inline struct page *__section_mem_map_addr(struct mem_section *section)
page             1273 include/linux/mmzone.h 	return (struct page *)map;
page             1444 include/linux/mmzone.h 					struct page *page, struct zone *zone);
page             1447 include/linux/mmzone.h 					struct page *page, struct zone *zone)
page               19 include/linux/mpage.h int mpage_readpage(struct page *page, get_block_t get_block);
page               22 include/linux/mpage.h int mpage_writepage(struct page *page, get_block_t *get_block,
page               80 include/linux/mtd/nand.h 	unsigned int page;
page              462 include/linux/mtd/nand.h 	pos->page = do_div(tmp, nand->memorg.pages_per_eraseblock);
page              492 include/linux/mtd/nand.h 	if (a->page != b->page)
page              493 include/linux/mtd/nand.h 		return a->page < b->page ? -1 : 1;
page              514 include/linux/mtd/nand.h 	npages = pos->page +
page              539 include/linux/mtd/nand.h 	       pos->page;
page              553 include/linux/mtd/nand.h 	pos->page = 0;
page              575 include/linux/mtd/nand.h 	pos->page = 0;
page              595 include/linux/mtd/nand.h 	pos->page = 0;
page              610 include/linux/mtd/nand.h 	if (pos->page >= nand->memorg.pages_per_eraseblock - 1)
page              613 include/linux/mtd/nand.h 	pos->page++;
page              373 include/linux/mtd/rawnand.h 			     int oob_required, int page);
page              375 include/linux/mtd/rawnand.h 			      int oob_required, int page);
page              377 include/linux/mtd/rawnand.h 			 int oob_required, int page);
page              379 include/linux/mtd/rawnand.h 			    uint32_t len, uint8_t *buf, int page);
page              382 include/linux/mtd/rawnand.h 			     int oob_required, int page);
page              384 include/linux/mtd/rawnand.h 			  int oob_required, int page);
page              385 include/linux/mtd/rawnand.h 	int (*write_oob_raw)(struct nand_chip *chip, int page);
page              386 include/linux/mtd/rawnand.h 	int (*read_oob_raw)(struct nand_chip *chip, int page);
page              387 include/linux/mtd/rawnand.h 	int (*read_oob)(struct nand_chip *chip, int page);
page              388 include/linux/mtd/rawnand.h 	int (*write_oob)(struct nand_chip *chip, int page);
page             1101 include/linux/mtd/rawnand.h 		int page;
page             1306 include/linux/mtd/rawnand.h int nand_write_oob_std(struct nand_chip *chip, int page);
page             1309 include/linux/mtd/rawnand.h int nand_read_oob_std(struct nand_chip *chip, int page);
page             1317 include/linux/mtd/rawnand.h 		       int page);
page             1321 include/linux/mtd/rawnand.h 			int oob_required, int page);
page             1332 include/linux/mtd/rawnand.h int nand_read_page_op(struct nand_chip *chip, unsigned int page,
page             1337 include/linux/mtd/rawnand.h int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
page             1339 include/linux/mtd/rawnand.h int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
page             1343 include/linux/mtd/rawnand.h int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
page             1403 include/linux/mtd/rawnand.h 	chip->pagecache.page = -1;
page              127 include/linux/net.h struct page;
page              188 include/linux/net.h 	ssize_t		(*sendpage)  (struct socket *sock, struct page *page,
page              200 include/linux/net.h 	int		(*sendpage_locked)(struct sock *sk, struct page *page,
page              309 include/linux/net.h int kernel_sendpage(struct socket *sock, struct page *page, int offset,
page              311 include/linux/net.h int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset,
page              534 include/linux/nfs_fs.h extern int  nfs_writepage(struct page *page, struct writeback_control *wbc);
page              536 include/linux/nfs_fs.h extern int  nfs_flush_incompatible(struct file *file, struct page *page);
page              537 include/linux/nfs_fs.h extern int  nfs_updatepage(struct file *, struct page *, unsigned int, unsigned int);
page              545 include/linux/nfs_fs.h extern int nfs_wb_page(struct inode *inode, struct page *page);
page              546 include/linux/nfs_fs.h extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
page              560 include/linux/nfs_fs.h extern int  nfs_readpage(struct file *, struct page *);
page              564 include/linux/nfs_fs.h 			       struct page *);
page               44 include/linux/nfs_page.h 	struct page		*wb_page;	/* page to read in/write out */
page              117 include/linux/nfs_page.h 					    struct page *page,
page              237 include/linux/nfs_xdr.h 	struct page **pages;
page              296 include/linux/nfs_xdr.h 	struct page *layoutupdate_page;
page              297 include/linux/nfs_xdr.h 	struct page **layoutupdate_pages;
page              645 include/linux/nfs_xdr.h 	struct page **		pages;
page              786 include/linux/nfs_xdr.h 	struct page **			acl_pages;
page              797 include/linux/nfs_xdr.h 	struct page **			acl_pages;
page              807 include/linux/nfs_xdr.h 	struct page *			acl_scratch;
page              828 include/linux/nfs_xdr.h 	struct page **		pages;
page              837 include/linux/nfs_xdr.h 	struct page **		pages;
page              843 include/linux/nfs_xdr.h 	struct page **		pages;
page              853 include/linux/nfs_xdr.h 	struct page **		pages;
page              865 include/linux/nfs_xdr.h 	struct page **		pages;
page              906 include/linux/nfs_xdr.h 	struct page **		pages;
page              933 include/linux/nfs_xdr.h 	struct page **		pages;
page              951 include/linux/nfs_xdr.h 	struct page **		pages;
page              998 include/linux/nfs_xdr.h 			struct page **	pages;
page             1116 include/linux/nfs_xdr.h 	struct page **			pages;	/* zero-copy data */
page             1133 include/linux/nfs_xdr.h 	struct page **			pages;   /* zero-copy data */
page             1210 include/linux/nfs_xdr.h 	struct page *page;
page             1491 include/linux/nfs_xdr.h 	struct page		**pagevec;
page             1493 include/linux/nfs_xdr.h 	struct page		*page_array[NFS_PAGEVEC_SIZE];
page             1656 include/linux/nfs_xdr.h 	int	(*readlink)(struct inode *, struct page *, unsigned int,
page             1670 include/linux/nfs_xdr.h 	int	(*symlink) (struct inode *, struct dentry *, struct page *,
page             1675 include/linux/nfs_xdr.h 			    u64, struct page **, unsigned int, bool);
page              170 include/linux/page-flags.h struct page;	/* forward declaration */
page              172 include/linux/page-flags.h static inline struct page *compound_head(struct page *page)
page              174 include/linux/page-flags.h 	unsigned long head = READ_ONCE(page->compound_head);
page              177 include/linux/page-flags.h 		return (struct page *) (head - 1);
page              178 include/linux/page-flags.h 	return page;
page              181 include/linux/page-flags.h static __always_inline int PageTail(struct page *page)
page              183 include/linux/page-flags.h 	return READ_ONCE(page->compound_head) & 1;
page              186 include/linux/page-flags.h static __always_inline int PageCompound(struct page *page)
page              188 include/linux/page-flags.h 	return test_bit(PG_head, &page->flags) || PageTail(page);
page              192 include/linux/page-flags.h static inline int PagePoisoned(const struct page *page)
page              194 include/linux/page-flags.h 	return page->flags == PAGE_POISON_PATTERN;
page              198 include/linux/page-flags.h void page_init_poison(struct page *page, size_t size);
page              200 include/linux/page-flags.h static inline void page_init_poison(struct page *page, size_t size)
page              228 include/linux/page-flags.h #define PF_POISONED_CHECK(page) ({					\
page              229 include/linux/page-flags.h 		VM_BUG_ON_PGFLAGS(PagePoisoned(page), page);		\
page              230 include/linux/page-flags.h 		page; })
page              231 include/linux/page-flags.h #define PF_ANY(page, enforce)	PF_POISONED_CHECK(page)
page              232 include/linux/page-flags.h #define PF_HEAD(page, enforce)	PF_POISONED_CHECK(compound_head(page))
page              233 include/linux/page-flags.h #define PF_ONLY_HEAD(page, enforce) ({					\
page              234 include/linux/page-flags.h 		VM_BUG_ON_PGFLAGS(PageTail(page), page);		\
page              235 include/linux/page-flags.h 		PF_POISONED_CHECK(page); })
page              236 include/linux/page-flags.h #define PF_NO_TAIL(page, enforce) ({					\
page              237 include/linux/page-flags.h 		VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page);	\
page              238 include/linux/page-flags.h 		PF_POISONED_CHECK(compound_head(page)); })
page              239 include/linux/page-flags.h #define PF_NO_COMPOUND(page, enforce) ({				\
page              240 include/linux/page-flags.h 		VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page);	\
page              241 include/linux/page-flags.h 		PF_POISONED_CHECK(page); })
page              247 include/linux/page-flags.h static __always_inline int Page##uname(struct page *page)		\
page              248 include/linux/page-flags.h 	{ return test_bit(PG_##lname, &policy(page, 0)->flags); }
page              251 include/linux/page-flags.h static __always_inline void SetPage##uname(struct page *page)		\
page              252 include/linux/page-flags.h 	{ set_bit(PG_##lname, &policy(page, 1)->flags); }
page              255 include/linux/page-flags.h static __always_inline void ClearPage##uname(struct page *page)		\
page              256 include/linux/page-flags.h 	{ clear_bit(PG_##lname, &policy(page, 1)->flags); }
page              259 include/linux/page-flags.h static __always_inline void __SetPage##uname(struct page *page)		\
page              260 include/linux/page-flags.h 	{ __set_bit(PG_##lname, &policy(page, 1)->flags); }
page              263 include/linux/page-flags.h static __always_inline void __ClearPage##uname(struct page *page)	\
page              264 include/linux/page-flags.h 	{ __clear_bit(PG_##lname, &policy(page, 1)->flags); }
page              267 include/linux/page-flags.h static __always_inline int TestSetPage##uname(struct page *page)	\
page              268 include/linux/page-flags.h 	{ return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
page              271 include/linux/page-flags.h static __always_inline int TestClearPage##uname(struct page *page)	\
page              272 include/linux/page-flags.h 	{ return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
page              289 include/linux/page-flags.h static inline int Page##uname(const struct page *page) { return 0; }
page              292 include/linux/page-flags.h static inline void SetPage##uname(struct page *page) {  }
page              295 include/linux/page-flags.h static inline void ClearPage##uname(struct page *page) {  }
page              298 include/linux/page-flags.h static inline void __ClearPage##uname(struct page *page) {  }
page              301 include/linux/page-flags.h static inline int TestSetPage##uname(struct page *page) { return 0; }
page              304 include/linux/page-flags.h static inline int TestClearPage##uname(struct page *page) { return 0; }
page              380 include/linux/page-flags.h static __always_inline int PageSwapCache(struct page *page)
page              383 include/linux/page-flags.h 	page = compound_head(page);
page              385 include/linux/page-flags.h 	return PageSwapBacked(page) && test_bit(PG_swapcache, &page->flags);
page              417 include/linux/page-flags.h extern bool set_hwpoison_free_buddy_page(struct page *page);
page              420 include/linux/page-flags.h static inline bool set_hwpoison_free_buddy_page(struct page *page)
page              456 include/linux/page-flags.h static __always_inline int PageMappingFlags(struct page *page)
page              458 include/linux/page-flags.h 	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
page              461 include/linux/page-flags.h static __always_inline int PageAnon(struct page *page)
page              463 include/linux/page-flags.h 	page = compound_head(page);
page              464 include/linux/page-flags.h 	return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
page              467 include/linux/page-flags.h static __always_inline int __PageMovable(struct page *page)
page              469 include/linux/page-flags.h 	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
page              480 include/linux/page-flags.h static __always_inline int PageKsm(struct page *page)
page              482 include/linux/page-flags.h 	page = compound_head(page);
page              483 include/linux/page-flags.h 	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
page              490 include/linux/page-flags.h u64 stable_page_flags(struct page *page);
page              492 include/linux/page-flags.h static inline int PageUptodate(struct page *page)
page              495 include/linux/page-flags.h 	page = compound_head(page);
page              496 include/linux/page-flags.h 	ret = test_bit(PG_uptodate, &(page)->flags);
page              511 include/linux/page-flags.h static __always_inline void __SetPageUptodate(struct page *page)
page              513 include/linux/page-flags.h 	VM_BUG_ON_PAGE(PageTail(page), page);
page              515 include/linux/page-flags.h 	__set_bit(PG_uptodate, &page->flags);
page              518 include/linux/page-flags.h static __always_inline void SetPageUptodate(struct page *page)
page              520 include/linux/page-flags.h 	VM_BUG_ON_PAGE(PageTail(page), page);
page              527 include/linux/page-flags.h 	set_bit(PG_uptodate, &page->flags);
page              532 include/linux/page-flags.h int test_clear_page_writeback(struct page *page);
page              533 include/linux/page-flags.h int __test_set_page_writeback(struct page *page, bool keep_write);
page              535 include/linux/page-flags.h #define test_set_page_writeback(page)			\
page              536 include/linux/page-flags.h 	__test_set_page_writeback(page, false)
page              537 include/linux/page-flags.h #define test_set_page_writeback_keepwrite(page)	\
page              538 include/linux/page-flags.h 	__test_set_page_writeback(page, true)
page              540 include/linux/page-flags.h static inline void set_page_writeback(struct page *page)
page              542 include/linux/page-flags.h 	test_set_page_writeback(page);
page              545 include/linux/page-flags.h static inline void set_page_writeback_keepwrite(struct page *page)
page              547 include/linux/page-flags.h 	test_set_page_writeback_keepwrite(page);
page              552 include/linux/page-flags.h static __always_inline void set_compound_head(struct page *page, struct page *head)
page              554 include/linux/page-flags.h 	WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
page              557 include/linux/page-flags.h static __always_inline void clear_compound_head(struct page *page)
page              559 include/linux/page-flags.h 	WRITE_ONCE(page->compound_head, 0);
page              563 include/linux/page-flags.h static inline void ClearPageCompound(struct page *page)
page              565 include/linux/page-flags.h 	BUG_ON(!PageHead(page));
page              566 include/linux/page-flags.h 	ClearPageHead(page);
page              573 include/linux/page-flags.h int PageHuge(struct page *page);
page              574 include/linux/page-flags.h int PageHeadHuge(struct page *page);
page              575 include/linux/page-flags.h bool page_huge_active(struct page *page);
page              580 include/linux/page-flags.h static inline bool page_huge_active(struct page *page)
page              596 include/linux/page-flags.h static inline int PageTransHuge(struct page *page)
page              598 include/linux/page-flags.h 	VM_BUG_ON_PAGE(PageTail(page), page);
page              599 include/linux/page-flags.h 	return PageHead(page);
page              607 include/linux/page-flags.h static inline int PageTransCompound(struct page *page)
page              609 include/linux/page-flags.h 	return PageCompound(page);
page              633 include/linux/page-flags.h static inline int PageTransCompoundMap(struct page *page)
page              635 include/linux/page-flags.h 	struct page *head;
page              637 include/linux/page-flags.h 	if (!PageTransCompound(page))
page              640 include/linux/page-flags.h 	if (PageAnon(page))
page              641 include/linux/page-flags.h 		return atomic_read(&page->_mapcount) < 0;
page              643 include/linux/page-flags.h 	head = compound_head(page);
page              645 include/linux/page-flags.h 	return atomic_read(&page->_mapcount) ==
page              654 include/linux/page-flags.h static inline int PageTransTail(struct page *page)
page              656 include/linux/page-flags.h 	return PageTail(page);
page              672 include/linux/page-flags.h static inline int PageDoubleMap(struct page *page)
page              674 include/linux/page-flags.h 	return PageHead(page) && test_bit(PG_double_map, &page[1].flags);
page              677 include/linux/page-flags.h static inline void SetPageDoubleMap(struct page *page)
page              679 include/linux/page-flags.h 	VM_BUG_ON_PAGE(!PageHead(page), page);
page              680 include/linux/page-flags.h 	set_bit(PG_double_map, &page[1].flags);
page              683 include/linux/page-flags.h static inline void ClearPageDoubleMap(struct page *page)
page              685 include/linux/page-flags.h 	VM_BUG_ON_PAGE(!PageHead(page), page);
page              686 include/linux/page-flags.h 	clear_bit(PG_double_map, &page[1].flags);
page              688 include/linux/page-flags.h static inline int TestSetPageDoubleMap(struct page *page)
page              690 include/linux/page-flags.h 	VM_BUG_ON_PAGE(!PageHead(page), page);
page              691 include/linux/page-flags.h 	return test_and_set_bit(PG_double_map, &page[1].flags);
page              694 include/linux/page-flags.h static inline int TestClearPageDoubleMap(struct page *page)
page              696 include/linux/page-flags.h 	VM_BUG_ON_PAGE(!PageHead(page), page);
page              697 include/linux/page-flags.h 	return test_and_clear_bit(PG_double_map, &page[1].flags);
page              728 include/linux/page-flags.h #define PageType(page, flag)						\
page              729 include/linux/page-flags.h 	((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
page              731 include/linux/page-flags.h static inline int page_has_type(struct page *page)
page              733 include/linux/page-flags.h 	return (int)page->page_type < PAGE_MAPCOUNT_RESERVE;
page              737 include/linux/page-flags.h static __always_inline int Page##uname(struct page *page)		\
page              739 include/linux/page-flags.h 	return PageType(page, PG_##lname);				\
page              741 include/linux/page-flags.h static __always_inline void __SetPage##uname(struct page *page)		\
page              743 include/linux/page-flags.h 	VM_BUG_ON_PAGE(!PageType(page, 0), page);			\
page              744 include/linux/page-flags.h 	page->page_type &= ~PG_##lname;					\
page              746 include/linux/page-flags.h static __always_inline void __ClearPage##uname(struct page *page)	\
page              748 include/linux/page-flags.h 	VM_BUG_ON_PAGE(!Page##uname(page), page);			\
page              749 include/linux/page-flags.h 	page->page_type |= PG_##lname;					\
page              783 include/linux/page-flags.h extern bool is_free_buddy_page(struct page *page);
page              791 include/linux/page-flags.h static inline int PageSlabPfmemalloc(struct page *page)
page              793 include/linux/page-flags.h 	VM_BUG_ON_PAGE(!PageSlab(page), page);
page              794 include/linux/page-flags.h 	return PageActive(page);
page              797 include/linux/page-flags.h static inline void SetPageSlabPfmemalloc(struct page *page)
page              799 include/linux/page-flags.h 	VM_BUG_ON_PAGE(!PageSlab(page), page);
page              800 include/linux/page-flags.h 	SetPageActive(page);
page              803 include/linux/page-flags.h static inline void __ClearPageSlabPfmemalloc(struct page *page)
page              805 include/linux/page-flags.h 	VM_BUG_ON_PAGE(!PageSlab(page), page);
page              806 include/linux/page-flags.h 	__ClearPageActive(page);
page              809 include/linux/page-flags.h static inline void ClearPageSlabPfmemalloc(struct page *page)
page              811 include/linux/page-flags.h 	VM_BUG_ON_PAGE(!PageSlab(page), page);
page              812 include/linux/page-flags.h 	ClearPageActive(page);
page              852 include/linux/page-flags.h static inline int page_has_private(struct page *page)
page              854 include/linux/page-flags.h 	return !!(page->flags & PAGE_FLAGS_PRIVATE);
page               10 include/linux/page-isolation.h static inline bool is_migrate_isolate_page(struct page *page)
page               12 include/linux/page-isolation.h 	return get_pageblock_migratetype(page) == MIGRATE_ISOLATE;
page               23 include/linux/page-isolation.h static inline bool is_migrate_isolate_page(struct page *page)
page               36 include/linux/page-isolation.h bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
page               38 include/linux/page-isolation.h void set_pageblock_migratetype(struct page *page, int migratetype);
page               39 include/linux/page-isolation.h int move_freepages_block(struct zone *zone, struct page *page,
page               63 include/linux/page-isolation.h struct page *alloc_migrate_target(struct page *page, unsigned long private);
page               54 include/linux/page_ext.h struct page_ext *lookup_page_ext(const struct page *page);
page               70 include/linux/page_ext.h static inline struct page_ext *lookup_page_ext(const struct page *page)
page               12 include/linux/page_idle.h static inline bool page_is_young(struct page *page)
page               14 include/linux/page_idle.h 	return PageYoung(page);
page               17 include/linux/page_idle.h static inline void set_page_young(struct page *page)
page               19 include/linux/page_idle.h 	SetPageYoung(page);
page               22 include/linux/page_idle.h static inline bool test_and_clear_page_young(struct page *page)
page               24 include/linux/page_idle.h 	return TestClearPageYoung(page);
page               27 include/linux/page_idle.h static inline bool page_is_idle(struct page *page)
page               29 include/linux/page_idle.h 	return PageIdle(page);
page               32 include/linux/page_idle.h static inline void set_page_idle(struct page *page)
page               34 include/linux/page_idle.h 	SetPageIdle(page);
page               37 include/linux/page_idle.h static inline void clear_page_idle(struct page *page)
page               39 include/linux/page_idle.h 	ClearPageIdle(page);
page               48 include/linux/page_idle.h static inline bool page_is_young(struct page *page)
page               50 include/linux/page_idle.h 	struct page_ext *page_ext = lookup_page_ext(page);
page               58 include/linux/page_idle.h static inline void set_page_young(struct page *page)
page               60 include/linux/page_idle.h 	struct page_ext *page_ext = lookup_page_ext(page);
page               68 include/linux/page_idle.h static inline bool test_and_clear_page_young(struct page *page)
page               70 include/linux/page_idle.h 	struct page_ext *page_ext = lookup_page_ext(page);
page               78 include/linux/page_idle.h static inline bool page_is_idle(struct page *page)
page               80 include/linux/page_idle.h 	struct page_ext *page_ext = lookup_page_ext(page);
page               88 include/linux/page_idle.h static inline void set_page_idle(struct page *page)
page               90 include/linux/page_idle.h 	struct page_ext *page_ext = lookup_page_ext(page);
page               98 include/linux/page_idle.h static inline void clear_page_idle(struct page *page)
page              100 include/linux/page_idle.h 	struct page_ext *page_ext = lookup_page_ext(page);
page              111 include/linux/page_idle.h static inline bool page_is_young(struct page *page)
page              116 include/linux/page_idle.h static inline void set_page_young(struct page *page)
page              120 include/linux/page_idle.h static inline bool test_and_clear_page_young(struct page *page)
page              125 include/linux/page_idle.h static inline bool page_is_idle(struct page *page)
page              130 include/linux/page_idle.h static inline void set_page_idle(struct page *page)
page              134 include/linux/page_idle.h static inline void clear_page_idle(struct page *page)
page               11 include/linux/page_owner.h extern void __reset_page_owner(struct page *page, unsigned int order);
page               12 include/linux/page_owner.h extern void __set_page_owner(struct page *page,
page               14 include/linux/page_owner.h extern void __split_page_owner(struct page *page, unsigned int order);
page               15 include/linux/page_owner.h extern void __copy_page_owner(struct page *oldpage, struct page *newpage);
page               16 include/linux/page_owner.h extern void __set_page_owner_migrate_reason(struct page *page, int reason);
page               17 include/linux/page_owner.h extern void __dump_page_owner(struct page *page);
page               21 include/linux/page_owner.h static inline void reset_page_owner(struct page *page, unsigned int order)
page               24 include/linux/page_owner.h 		__reset_page_owner(page, order);
page               27 include/linux/page_owner.h static inline void set_page_owner(struct page *page,
page               31 include/linux/page_owner.h 		__set_page_owner(page, order, gfp_mask);
page               34 include/linux/page_owner.h static inline void split_page_owner(struct page *page, unsigned int order)
page               37 include/linux/page_owner.h 		__split_page_owner(page, order);
page               39 include/linux/page_owner.h static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
page               44 include/linux/page_owner.h static inline void set_page_owner_migrate_reason(struct page *page, int reason)
page               47 include/linux/page_owner.h 		__set_page_owner_migrate_reason(page, reason);
page               49 include/linux/page_owner.h static inline void dump_page_owner(struct page *page)
page               52 include/linux/page_owner.h 		__dump_page_owner(page);
page               55 include/linux/page_owner.h static inline void reset_page_owner(struct page *page, unsigned int order)
page               58 include/linux/page_owner.h static inline void set_page_owner(struct page *page,
page               62 include/linux/page_owner.h static inline void split_page_owner(struct page *page,
page               66 include/linux/page_owner.h static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
page               69 include/linux/page_owner.h static inline void set_page_owner_migrate_reason(struct page *page, int reason)
page               72 include/linux/page_owner.h static inline void dump_page_owner(struct page *page)
page               29 include/linux/page_ref.h extern void __page_ref_set(struct page *page, int v);
page               30 include/linux/page_ref.h extern void __page_ref_mod(struct page *page, int v);
page               31 include/linux/page_ref.h extern void __page_ref_mod_and_test(struct page *page, int v, int ret);
page               32 include/linux/page_ref.h extern void __page_ref_mod_and_return(struct page *page, int v, int ret);
page               33 include/linux/page_ref.h extern void __page_ref_mod_unless(struct page *page, int v, int u);
page               34 include/linux/page_ref.h extern void __page_ref_freeze(struct page *page, int v, int ret);
page               35 include/linux/page_ref.h extern void __page_ref_unfreeze(struct page *page, int v);
page               41 include/linux/page_ref.h static inline void __page_ref_set(struct page *page, int v)
page               44 include/linux/page_ref.h static inline void __page_ref_mod(struct page *page, int v)
page               47 include/linux/page_ref.h static inline void __page_ref_mod_and_test(struct page *page, int v, int ret)
page               50 include/linux/page_ref.h static inline void __page_ref_mod_and_return(struct page *page, int v, int ret)
page               53 include/linux/page_ref.h static inline void __page_ref_mod_unless(struct page *page, int v, int u)
page               56 include/linux/page_ref.h static inline void __page_ref_freeze(struct page *page, int v, int ret)
page               59 include/linux/page_ref.h static inline void __page_ref_unfreeze(struct page *page, int v)
page               65 include/linux/page_ref.h static inline int page_ref_count(struct page *page)
page               67 include/linux/page_ref.h 	return atomic_read(&page->_refcount);
page               70 include/linux/page_ref.h static inline int page_count(struct page *page)
page               72 include/linux/page_ref.h 	return atomic_read(&compound_head(page)->_refcount);
page               75 include/linux/page_ref.h static inline void set_page_count(struct page *page, int v)
page               77 include/linux/page_ref.h 	atomic_set(&page->_refcount, v);
page               79 include/linux/page_ref.h 		__page_ref_set(page, v);
page               86 include/linux/page_ref.h static inline void init_page_count(struct page *page)
page               88 include/linux/page_ref.h 	set_page_count(page, 1);
page               91 include/linux/page_ref.h static inline void page_ref_add(struct page *page, int nr)
page               93 include/linux/page_ref.h 	atomic_add(nr, &page->_refcount);
page               95 include/linux/page_ref.h 		__page_ref_mod(page, nr);
page               98 include/linux/page_ref.h static inline void page_ref_sub(struct page *page, int nr)
page              100 include/linux/page_ref.h 	atomic_sub(nr, &page->_refcount);
page              102 include/linux/page_ref.h 		__page_ref_mod(page, -nr);
page              105 include/linux/page_ref.h static inline void page_ref_inc(struct page *page)
page              107 include/linux/page_ref.h 	atomic_inc(&page->_refcount);
page              109 include/linux/page_ref.h 		__page_ref_mod(page, 1);
page              112 include/linux/page_ref.h static inline void page_ref_dec(struct page *page)
page              114 include/linux/page_ref.h 	atomic_dec(&page->_refcount);
page              116 include/linux/page_ref.h 		__page_ref_mod(page, -1);
page              119 include/linux/page_ref.h static inline int page_ref_sub_and_test(struct page *page, int nr)
page              121 include/linux/page_ref.h 	int ret = atomic_sub_and_test(nr, &page->_refcount);
page              124 include/linux/page_ref.h 		__page_ref_mod_and_test(page, -nr, ret);
page              128 include/linux/page_ref.h static inline int page_ref_inc_return(struct page *page)
page              130 include/linux/page_ref.h 	int ret = atomic_inc_return(&page->_refcount);
page              133 include/linux/page_ref.h 		__page_ref_mod_and_return(page, 1, ret);
page              137 include/linux/page_ref.h static inline int page_ref_dec_and_test(struct page *page)
page              139 include/linux/page_ref.h 	int ret = atomic_dec_and_test(&page->_refcount);
page              142 include/linux/page_ref.h 		__page_ref_mod_and_test(page, -1, ret);
page              146 include/linux/page_ref.h static inline int page_ref_dec_return(struct page *page)
page              148 include/linux/page_ref.h 	int ret = atomic_dec_return(&page->_refcount);
page              151 include/linux/page_ref.h 		__page_ref_mod_and_return(page, -1, ret);
page              155 include/linux/page_ref.h static inline int page_ref_add_unless(struct page *page, int nr, int u)
page              157 include/linux/page_ref.h 	int ret = atomic_add_unless(&page->_refcount, nr, u);
page              160 include/linux/page_ref.h 		__page_ref_mod_unless(page, nr, ret);
page              164 include/linux/page_ref.h static inline int page_ref_freeze(struct page *page, int count)
page              166 include/linux/page_ref.h 	int ret = likely(atomic_cmpxchg(&page->_refcount, count, 0) == count);
page              169 include/linux/page_ref.h 		__page_ref_freeze(page, count, ret);
page              173 include/linux/page_ref.h static inline void page_ref_unfreeze(struct page *page, int count)
page              175 include/linux/page_ref.h 	VM_BUG_ON_PAGE(page_count(page) != 0, page);
page              178 include/linux/page_ref.h 	atomic_set_release(&page->_refcount, count);
page              180 include/linux/page_ref.h 		__page_ref_unfreeze(page, count);
page               55 include/linux/pageblock-flags.h struct page;
page               57 include/linux/pageblock-flags.h unsigned long get_pfnblock_flags_mask(struct page *page,
page               62 include/linux/pageblock-flags.h void set_pfnblock_flags_mask(struct page *page,
page               69 include/linux/pageblock-flags.h #define get_pageblock_flags_group(page, start_bitidx, end_bitidx) \
page               70 include/linux/pageblock-flags.h 	get_pfnblock_flags_mask(page, page_to_pfn(page),		\
page               73 include/linux/pageblock-flags.h #define set_pageblock_flags_group(page, flags, start_bitidx, end_bitidx) \
page               74 include/linux/pageblock-flags.h 	set_pfnblock_flags_mask(page, flags, page_to_pfn(page),		\
page               79 include/linux/pageblock-flags.h #define get_pageblock_skip(page) \
page               80 include/linux/pageblock-flags.h 			get_pageblock_flags_group(page, PB_migrate_skip,     \
page               82 include/linux/pageblock-flags.h #define clear_pageblock_skip(page) \
page               83 include/linux/pageblock-flags.h 			set_pageblock_flags_group(page, 0, PB_migrate_skip,  \
page               85 include/linux/pageblock-flags.h #define set_pageblock_skip(page) \
page               86 include/linux/pageblock-flags.h 			set_pageblock_flags_group(page, 1, PB_migrate_skip,  \
page               89 include/linux/pageblock-flags.h static inline bool get_pageblock_skip(struct page *page)
page               93 include/linux/pageblock-flags.h static inline void clear_pageblock_skip(struct page *page)
page               96 include/linux/pageblock-flags.h static inline void set_pageblock_skip(struct page *page)
page              121 include/linux/pagemap.h void release_pages(struct page **pages, int nr);
page              167 include/linux/pagemap.h static inline int __page_cache_add_speculative(struct page *page, int count)
page              182 include/linux/pagemap.h 	VM_BUG_ON_PAGE(page_count(page) == 0, page);
page              183 include/linux/pagemap.h 	page_ref_add(page, count);
page              186 include/linux/pagemap.h 	if (unlikely(!page_ref_add_unless(page, count, 0))) {
page              195 include/linux/pagemap.h 	VM_BUG_ON_PAGE(PageTail(page), page);
page              200 include/linux/pagemap.h static inline int page_cache_get_speculative(struct page *page)
page              202 include/linux/pagemap.h 	return __page_cache_add_speculative(page, 1);
page              205 include/linux/pagemap.h static inline int page_cache_add_speculative(struct page *page, int count)
page              207 include/linux/pagemap.h 	return __page_cache_add_speculative(page, count);
page              211 include/linux/pagemap.h extern struct page *__page_cache_alloc(gfp_t gfp);
page              213 include/linux/pagemap.h static inline struct page *__page_cache_alloc(gfp_t gfp)
page              219 include/linux/pagemap.h static inline struct page *page_cache_alloc(struct address_space *x)
page              229 include/linux/pagemap.h typedef int filler_t(void *, struct page *);
page              244 include/linux/pagemap.h struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
page              257 include/linux/pagemap.h static inline struct page *find_get_page(struct address_space *mapping,
page              263 include/linux/pagemap.h static inline struct page *find_get_page_flags(struct address_space *mapping,
page              282 include/linux/pagemap.h static inline struct page *find_lock_page(struct address_space *mapping,
page              307 include/linux/pagemap.h static inline struct page *find_or_create_page(struct address_space *mapping,
page              328 include/linux/pagemap.h static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
page              336 include/linux/pagemap.h static inline struct page *find_subpage(struct page *page, pgoff_t offset)
page              338 include/linux/pagemap.h 	if (PageHuge(page))
page              339 include/linux/pagemap.h 		return page;
page              341 include/linux/pagemap.h 	VM_BUG_ON_PAGE(PageTail(page), page);
page              343 include/linux/pagemap.h 	return page + (offset & (compound_nr(page) - 1));
page              346 include/linux/pagemap.h struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
page              347 include/linux/pagemap.h struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
page              349 include/linux/pagemap.h 			  unsigned int nr_entries, struct page **entries,
page              353 include/linux/pagemap.h 			struct page **pages);
page              356 include/linux/pagemap.h 			struct page **pages)
page              362 include/linux/pagemap.h 			       unsigned int nr_pages, struct page **pages);
page              365 include/linux/pagemap.h 			struct page **pages);
page              368 include/linux/pagemap.h 			struct page **pages)
page              374 include/linux/pagemap.h struct page *grab_cache_page_write_begin(struct address_space *mapping,
page              380 include/linux/pagemap.h static inline struct page *grab_cache_page(struct address_space *mapping,
page              386 include/linux/pagemap.h extern struct page * read_cache_page(struct address_space *mapping,
page              388 include/linux/pagemap.h extern struct page * read_cache_page_gfp(struct address_space *mapping,
page              393 include/linux/pagemap.h static inline struct page *read_mapping_page(struct address_space *mapping,
page              403 include/linux/pagemap.h static inline pgoff_t page_to_index(struct page *page)
page              407 include/linux/pagemap.h 	if (likely(!PageTransTail(page)))
page              408 include/linux/pagemap.h 		return page->index;
page              414 include/linux/pagemap.h 	pgoff = compound_head(page)->index;
page              415 include/linux/pagemap.h 	pgoff += page - compound_head(page);
page              423 include/linux/pagemap.h static inline pgoff_t page_to_pgoff(struct page *page)
page              425 include/linux/pagemap.h 	if (unlikely(PageHeadHuge(page)))
page              426 include/linux/pagemap.h 		return page->index << compound_order(page);
page              428 include/linux/pagemap.h 	return page_to_index(page);
page              434 include/linux/pagemap.h static inline loff_t page_offset(struct page *page)
page              436 include/linux/pagemap.h 	return ((loff_t)page->index) << PAGE_SHIFT;
page              439 include/linux/pagemap.h static inline loff_t page_file_offset(struct page *page)
page              441 include/linux/pagemap.h 	return ((loff_t)page_index(page)) << PAGE_SHIFT;
page              458 include/linux/pagemap.h extern void __lock_page(struct page *page);
page              459 include/linux/pagemap.h extern int __lock_page_killable(struct page *page);
page              460 include/linux/pagemap.h extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
page              462 include/linux/pagemap.h extern void unlock_page(struct page *page);
page              467 include/linux/pagemap.h static inline int trylock_page(struct page *page)
page              469 include/linux/pagemap.h 	page = compound_head(page);
page              470 include/linux/pagemap.h 	return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
page              476 include/linux/pagemap.h static inline void lock_page(struct page *page)
page              479 include/linux/pagemap.h 	if (!trylock_page(page))
page              480 include/linux/pagemap.h 		__lock_page(page);
page              488 include/linux/pagemap.h static inline int lock_page_killable(struct page *page)
page              491 include/linux/pagemap.h 	if (!trylock_page(page))
page              492 include/linux/pagemap.h 		return __lock_page_killable(page);
page              503 include/linux/pagemap.h static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
page              507 include/linux/pagemap.h 	return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
page              514 include/linux/pagemap.h extern void wait_on_page_bit(struct page *page, int bit_nr);
page              515 include/linux/pagemap.h extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
page              524 include/linux/pagemap.h static inline void wait_on_page_locked(struct page *page)
page              526 include/linux/pagemap.h 	if (PageLocked(page))
page              527 include/linux/pagemap.h 		wait_on_page_bit(compound_head(page), PG_locked);
page              530 include/linux/pagemap.h static inline int wait_on_page_locked_killable(struct page *page)
page              532 include/linux/pagemap.h 	if (!PageLocked(page))
page              534 include/linux/pagemap.h 	return wait_on_page_bit_killable(compound_head(page), PG_locked);
page              537 include/linux/pagemap.h extern void put_and_wait_on_page_locked(struct page *page);
page              539 include/linux/pagemap.h void wait_on_page_writeback(struct page *page);
page              540 include/linux/pagemap.h extern void end_page_writeback(struct page *page);
page              541 include/linux/pagemap.h void wait_for_stable_page(struct page *page);
page              543 include/linux/pagemap.h void page_endio(struct page *page, bool is_write, int err);
page              548 include/linux/pagemap.h extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
page              607 include/linux/pagemap.h int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
page              609 include/linux/pagemap.h int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
page              611 include/linux/pagemap.h extern void delete_from_page_cache(struct page *page);
page              612 include/linux/pagemap.h extern void __delete_from_page_cache(struct page *page, void *shadow);
page              613 include/linux/pagemap.h int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
page              621 include/linux/pagemap.h static inline int add_to_page_cache(struct page *page,
page              626 include/linux/pagemap.h 	__SetPageLocked(page);
page              627 include/linux/pagemap.h 	error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
page              629 include/linux/pagemap.h 		__ClearPageLocked(page);
page               17 include/linux/pagevec.h struct page;
page               23 include/linux/pagevec.h 	struct page *pages[PAGEVEC_SIZE];
page               79 include/linux/pagevec.h static inline unsigned pagevec_add(struct pagevec *pvec, struct page *page)
page               81 include/linux/pagevec.h 	pvec->pages[pvec->nr++] = page;
page               51 include/linux/pci-dma-compat.h pci_map_page(struct pci_dev *hwdev, struct page *page,
page               54 include/linux/pci-dma-compat.h 	return dma_map_page(&hwdev->dev, page, offset, size, (enum dma_data_direction)direction);
page               37 include/linux/pci-p2pdma.h int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev,
page               39 include/linux/pci-p2pdma.h ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev,
page               97 include/linux/pci-p2pdma.h static inline int pci_p2pdma_enable_store(const char *page,
page              103 include/linux/pci-p2pdma.h static inline ssize_t pci_p2pdma_enable_show(char *page,
page              106 include/linux/pci-p2pdma.h 	return sprintf(page, "none\n");
page              837 include/linux/perf_event.h 	int				page;
page             1447 include/linux/perf_event.h 			      char *page);
page             1466 include/linux/perf_event.h 			       char *page)				\
page             1469 include/linux/perf_event.h 	return sprintf(page, _format "\n");				\
page               57 include/linux/pfn_t.h static inline struct page *pfn_t_to_page(pfn_t pfn)
page               69 include/linux/pfn_t.h static inline pfn_t page_to_pfn_t(struct page *page)
page               71 include/linux/pfn_t.h 	return pfn_to_pfn_t(page_to_pfn(page));
page              610 include/linux/phy.h 	int (*write_page)(struct phy_device *dev, int page);
page              990 include/linux/phy.h int phy_select_page(struct phy_device *phydev, int page);
page              992 include/linux/phy.h int phy_read_paged(struct phy_device *phydev, int page, u32 regnum);
page              993 include/linux/phy.h int phy_write_paged(struct phy_device *phydev, int page, u32 regnum, u16 val);
page              994 include/linux/phy.h int phy_modify_paged_changed(struct phy_device *phydev, int page, u32 regnum,
page              996 include/linux/phy.h int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum,
page               22 include/linux/pipe_fs_i.h 	struct page *page;
page               58 include/linux/pipe_fs_i.h 	struct page *tmp_page;
page              133 include/linux/pktcdvd.h 	struct page		*pages[PACKET_MAX_SIZE / FRAMES_PER_PAGE];
page               46 include/linux/relay.h 	struct page **page_array;	/* array of current buffer pages */
page              163 include/linux/rmap.h struct anon_vma *page_get_anon_vma(struct page *page);
page              172 include/linux/rmap.h void page_move_anon_rmap(struct page *, struct vm_area_struct *);
page              173 include/linux/rmap.h void page_add_anon_rmap(struct page *, struct vm_area_struct *,
page              175 include/linux/rmap.h void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
page              177 include/linux/rmap.h void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,
page              179 include/linux/rmap.h void page_add_file_rmap(struct page *, bool);
page              180 include/linux/rmap.h void page_remove_rmap(struct page *, bool);
page              182 include/linux/rmap.h void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
page              184 include/linux/rmap.h void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
page              187 include/linux/rmap.h static inline void page_dup_rmap(struct page *page, bool compound)
page              189 include/linux/rmap.h 	atomic_inc(compound ? compound_mapcount_ptr(page) : &page->_mapcount);
page              195 include/linux/rmap.h int page_referenced(struct page *, int is_locked,
page              198 include/linux/rmap.h bool try_to_unmap(struct page *, enum ttu_flags flags);
page              206 include/linux/rmap.h 	struct page *page;
page              228 include/linux/rmap.h unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
page              236 include/linux/rmap.h int page_mkclean(struct page *);
page              242 include/linux/rmap.h void try_to_munlock(struct page *);
page              244 include/linux/rmap.h void remove_migration_ptes(struct page *old, struct page *new, bool locked);
page              249 include/linux/rmap.h struct anon_vma *page_lock_anon_vma_read(struct page *page);
page              251 include/linux/rmap.h int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
page              268 include/linux/rmap.h 	bool (*rmap_one)(struct page *page, struct vm_area_struct *vma,
page              270 include/linux/rmap.h 	int (*done)(struct page *page);
page              271 include/linux/rmap.h 	struct anon_vma *(*anon_lock)(struct page *page);
page              275 include/linux/rmap.h void rmap_walk(struct page *page, struct rmap_walk_control *rwc);
page              276 include/linux/rmap.h void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc);
page              284 include/linux/rmap.h static inline int page_referenced(struct page *page, int is_locked,
page              292 include/linux/rmap.h #define try_to_unmap(page, refs) false
page              294 include/linux/rmap.h static inline int page_mkclean(struct page *page)
page               87 include/linux/scatterlist.h static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
page               95 include/linux/scatterlist.h 	BUG_ON((unsigned long) page & (SG_CHAIN | SG_END));
page               99 include/linux/scatterlist.h 	sg->page_link = page_link | (unsigned long) page;
page              116 include/linux/scatterlist.h static inline void sg_set_page(struct scatterlist *sg, struct page *page,
page              119 include/linux/scatterlist.h 	sg_assign_page(sg, page);
page              124 include/linux/scatterlist.h static inline struct page *sg_page(struct scatterlist *sg)
page              129 include/linux/scatterlist.h 	return (struct page *)((sg)->page_link & ~(SG_CHAIN | SG_END));
page              275 include/linux/scatterlist.h int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
page              279 include/linux/scatterlist.h int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
page              382 include/linux/scatterlist.h static inline struct page *sg_page_iter_page(struct sg_page_iter *piter)
page              449 include/linux/scatterlist.h 	struct page		*page;		/* currently mapped page */
page               23 include/linux/sched/numa_balancing.h extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
page               41 include/linux/sched/numa_balancing.h 				struct page *page, int src_nid, int dst_cpu)
page               18 include/linux/set_memory.h static inline int set_direct_map_invalid_noflush(struct page *page)
page               22 include/linux/set_memory.h static inline int set_direct_map_default_noflush(struct page *page)
page               74 include/linux/shmem_fs.h extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
page               95 include/linux/shmem_fs.h 		struct page **pagep, enum sgp_type sgp);
page               97 include/linux/shmem_fs.h static inline struct page *shmem_read_mapping_page(
page              130 include/linux/shmem_fs.h 				  struct page **pagep);
page              364 include/linux/skbuff.h static inline bool skb_frag_must_loop(struct page *p)
page             1147 include/linux/skbuff.h int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
page             2105 include/linux/skbuff.h 					struct page *page, int off, int size)
page             2114 include/linux/skbuff.h 	frag->bv_page		  = page;
page             2118 include/linux/skbuff.h 	page = compound_head(page);
page             2119 include/linux/skbuff.h 	if (page_is_pfmemalloc(page))
page             2138 include/linux/skbuff.h 				      struct page *page, int off, int size)
page             2140 include/linux/skbuff.h 	__skb_fill_page_desc(skb, i, page, off, size);
page             2144 include/linux/skbuff.h void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
page             2843 include/linux/skbuff.h static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
page             2859 include/linux/skbuff.h static inline struct page *dev_alloc_pages(unsigned int order)
page             2872 include/linux/skbuff.h static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
page             2877 include/linux/skbuff.h static inline struct page *dev_alloc_page(void)
page             2887 include/linux/skbuff.h static inline void skb_propagate_pfmemalloc(struct page *page,
page             2890 include/linux/skbuff.h 	if (page_is_pfmemalloc(page))
page             2940 include/linux/skbuff.h static inline struct page *skb_frag_page(const skb_frag_t *frag)
page             3037 include/linux/skbuff.h static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
page             3039 include/linux/skbuff.h 	frag->bv_page = page;
page             3051 include/linux/skbuff.h 				     struct page *page)
page             3053 include/linux/skbuff.h 	__skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
page             3237 include/linux/skbuff.h 				    const struct page *page, int off)
page             3244 include/linux/skbuff.h 		return page == skb_frag_page(frag) &&
page              220 include/linux/skmsg.h static inline struct page *sk_msg_page(struct sk_msg *msg, int which)
page              243 include/linux/skmsg.h static inline void sk_msg_page_add(struct sk_msg *msg, struct page *page,
page              248 include/linux/skmsg.h 	get_page(page);
page              250 include/linux/skmsg.h 	sg_set_page(sge, page, len, offset);
page              195 include/linux/slab.h void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
page              199 include/linux/slab.h 				       struct page *page, bool to_user) { }
page               92 include/linux/slab_def.h static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
page               95 include/linux/slab_def.h 	void *object = x - (x - page->s_mem) % cache->size;
page               96 include/linux/slab_def.h 	void *last_object = page->s_mem + (cache->num - 1) * cache->size;
page              111 include/linux/slab_def.h 					const struct page *page, void *obj)
page              113 include/linux/slab_def.h 	u32 offset = (obj - page->s_mem);
page               44 include/linux/slub_def.h 	struct page *page;	/* The slab from which we are allocating */
page               46 include/linux/slub_def.h 	struct page *partial;	/* Partially allocated frozen slabs */
page              169 include/linux/slub_def.h void object_err(struct kmem_cache *s, struct page *page,
page              174 include/linux/slub_def.h static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
page              176 include/linux/slub_def.h 	void *object = x - (x - page_address(page)) % cache->size;
page              177 include/linux/slub_def.h 	void *last_object = page_address(page) +
page              178 include/linux/slub_def.h 		(page->objects - 1) * cache->size;
page               57 include/linux/splice.h 	struct page **pages;		/* page map */
page              179 include/linux/sunrpc/clnt.h void rpc_prepare_reply_pages(struct rpc_rqst *req, struct page **pages,
page               67 include/linux/sunrpc/gss_api.h 		struct page		**inpages);
page              128 include/linux/sunrpc/gss_api.h 			struct page		**inpages);
page               85 include/linux/sunrpc/gss_krb5.h 			   struct page **pages); /* v2 encryption function */
page              255 include/linux/sunrpc/gss_krb5.h 		struct xdr_buf *outbuf, struct page **pages);
page              272 include/linux/sunrpc/gss_krb5.h 		    int offset, struct page **pages);
page              312 include/linux/sunrpc/gss_krb5.h 		     struct page **pages);
page              251 include/linux/sunrpc/svc.h 	struct page		*rq_pages[RPCSVC_MAXPAGES + 1];
page              252 include/linux/sunrpc/svc.h 	struct page *		*rq_respages;	/* points into rq_pages */
page              253 include/linux/sunrpc/svc.h 	struct page *		*rq_next_page; /* next reply page to use */
page              254 include/linux/sunrpc/svc.h 	struct page *		*rq_page_end;  /* one past the last page */
page              365 include/linux/sunrpc/svc.h 		struct page **pp = --rqstp->rq_next_page;
page              521 include/linux/sunrpc/svc.h 					 struct page **pages,
page              140 include/linux/sunrpc/svc_rdma.h 	struct page		*rc_pages[RPCSVC_MAXPAGES];
page              150 include/linux/sunrpc/svc_rdma.h 	struct page		*sc_pages[RPCSVC_MAXPAGES];
page               39 include/linux/sunrpc/svcsock.h 	struct page *		sk_pages[RPCSVC_MAXPAGES];	/* received data */
page               57 include/linux/sunrpc/xdr.h 	struct page **	pages;		/* Array of pages */
page              132 include/linux/sunrpc/xdr.h 			 struct page **, unsigned int, unsigned int);
page              224 include/linux/sunrpc/xdr.h extern void _copy_from_pages(char *p, struct page **pages, size_t pgbase,
page              237 include/linux/sunrpc/xdr.h 	struct page **page_ptr;	/* pointer to the current page */
page              257 include/linux/sunrpc/xdr.h extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages,
page              263 include/linux/sunrpc/xdr.h 		struct page **pages, unsigned int len);
page               82 include/linux/sunrpc/xprt.h 	struct page		**rq_enc_pages;	/* scratch pages for use by
page              440 include/linux/suspend.h extern int swsusp_page_is_forbidden(struct page *);
page              441 include/linux/suspend.h extern void swsusp_set_page_free(struct page *);
page              442 include/linux/suspend.h extern void swsusp_unset_page_free(struct page *);
page              457 include/linux/suspend.h static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
page              458 include/linux/suspend.h static inline void swsusp_set_page_free(struct page *p) {}
page              459 include/linux/suspend.h static inline void swsusp_unset_page_free(struct page *p) {}
page              310 include/linux/swap.h void *workingset_eviction(struct page *page);
page              311 include/linux/swap.h void workingset_refault(struct page *page, void *shadow);
page              312 include/linux/swap.h void workingset_activation(struct page *page);
page              331 include/linux/swap.h extern void lru_cache_add(struct page *);
page              332 include/linux/swap.h extern void lru_cache_add_anon(struct page *page);
page              333 include/linux/swap.h extern void lru_cache_add_file(struct page *page);
page              334 include/linux/swap.h extern void lru_add_page_tail(struct page *page, struct page *page_tail,
page              336 include/linux/swap.h extern void activate_page(struct page *);
page              337 include/linux/swap.h extern void mark_page_accessed(struct page *);
page              341 include/linux/swap.h extern void rotate_reclaimable_page(struct page *page);
page              342 include/linux/swap.h extern void deactivate_file_page(struct page *page);
page              343 include/linux/swap.h extern void deactivate_page(struct page *page);
page              344 include/linux/swap.h extern void mark_page_lazyfree(struct page *page);
page              347 include/linux/swap.h extern void lru_cache_add_active_or_unevictable(struct page *page,
page              354 include/linux/swap.h extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
page              365 include/linux/swap.h extern int remove_mapping(struct address_space *mapping, struct page *page);
page              377 include/linux/swap.h extern int page_evictable(struct page *page);
page              388 include/linux/swap.h extern int swap_readpage(struct page *page, bool do_poll);
page              389 include/linux/swap.h extern int swap_writepage(struct page *page, struct writeback_control *wbc);
page              391 include/linux/swap.h extern int __swap_writepage(struct page *page, struct writeback_control *wbc,
page              393 include/linux/swap.h extern int swap_set_page_dirty(struct page *page);
page              410 include/linux/swap.h extern int add_to_swap(struct page *page);
page              411 include/linux/swap.h extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t);
page              412 include/linux/swap.h extern int __add_to_swap_cache(struct page *page, swp_entry_t entry);
page              413 include/linux/swap.h extern void __delete_from_swap_cache(struct page *, swp_entry_t entry);
page              414 include/linux/swap.h extern void delete_from_swap_cache(struct page *);
page              415 include/linux/swap.h extern void free_page_and_swap_cache(struct page *);
page              416 include/linux/swap.h extern void free_pages_and_swap_cache(struct page **, int);
page              417 include/linux/swap.h extern struct page *lookup_swap_cache(swp_entry_t entry,
page              420 include/linux/swap.h extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
page              423 include/linux/swap.h extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t,
page              426 include/linux/swap.h extern struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
page              428 include/linux/swap.h extern struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
page              449 include/linux/swap.h extern swp_entry_t get_swap_page(struct page *page);
page              450 include/linux/swap.h extern void put_swap_page(struct page *page, swp_entry_t entry);
page              462 include/linux/swap.h extern sector_t map_swap_page(struct page *, struct block_device **);
page              464 include/linux/swap.h extern int page_swapcount(struct page *);
page              468 include/linux/swap.h extern struct swap_info_struct *page_swap_info(struct page *);
page              470 include/linux/swap.h extern bool reuse_swap_page(struct page *, int *);
page              471 include/linux/swap.h extern int try_to_free_swap(struct page *);
page              484 include/linux/swap.h static inline int swap_readpage(struct page *page, bool do_poll)
page              504 include/linux/swap.h #define free_page_and_swap_cache(page) \
page              505 include/linux/swap.h 	put_page(page)
page              534 include/linux/swap.h static inline void put_swap_page(struct page *page, swp_entry_t swp)
page              538 include/linux/swap.h static inline struct page *swap_cluster_readahead(swp_entry_t entry,
page              544 include/linux/swap.h static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
page              550 include/linux/swap.h static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
page              555 include/linux/swap.h static inline struct page *lookup_swap_cache(swp_entry_t swp,
page              562 include/linux/swap.h static inline int add_to_swap(struct page *page)
page              567 include/linux/swap.h static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
page              573 include/linux/swap.h static inline void __delete_from_swap_cache(struct page *page,
page              578 include/linux/swap.h static inline void delete_from_swap_cache(struct page *page)
page              582 include/linux/swap.h static inline int page_swapcount(struct page *page)
page              602 include/linux/swap.h #define reuse_swap_page(page, total_map_swapcount) \
page              603 include/linux/swap.h 	(page_trans_huge_mapcount(page, total_map_swapcount) == 1)
page              605 include/linux/swap.h static inline int try_to_free_swap(struct page *page)
page              610 include/linux/swap.h static inline swp_entry_t get_swap_page(struct page *page)
page              659 include/linux/swap.h extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry);
page              660 include/linux/swap.h extern int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry);
page              663 include/linux/swap.h extern bool mem_cgroup_swap_full(struct page *page);
page              665 include/linux/swap.h static inline void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
page              669 include/linux/swap.h static inline int mem_cgroup_try_charge_swap(struct page *page,
page              685 include/linux/swap.h static inline bool mem_cgroup_swap_full(struct page *page)
page              101 include/linux/swapops.h static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
page              104 include/linux/swapops.h 			 page_to_pfn(page));
page              128 include/linux/swapops.h static inline struct page *device_private_entry_to_page(swp_entry_t entry)
page              133 include/linux/swapops.h static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
page              157 include/linux/swapops.h static inline struct page *device_private_entry_to_page(swp_entry_t entry)
page              164 include/linux/swapops.h static inline swp_entry_t make_migration_entry(struct page *page, int write)
page              166 include/linux/swapops.h 	BUG_ON(!PageLocked(compound_head(page)));
page              169 include/linux/swapops.h 			page_to_pfn(page));
page              188 include/linux/swapops.h static inline struct page *migration_entry_to_page(swp_entry_t entry)
page              190 include/linux/swapops.h 	struct page *p = pfn_to_page(swp_offset(entry));
page              212 include/linux/swapops.h #define make_migration_entry(page, write) swp_entry(0, 0)
page              223 include/linux/swapops.h static inline struct page *migration_entry_to_page(swp_entry_t entry)
page              246 include/linux/swapops.h 		struct page *page);
page              249 include/linux/swapops.h 		struct page *new);
page              277 include/linux/swapops.h 		struct page *page)
page              283 include/linux/swapops.h 		struct page *new)
page              313 include/linux/swapops.h static inline swp_entry_t make_hwpoison_entry(struct page *page)
page              315 include/linux/swapops.h 	BUG_ON(!PageLocked(page));
page              316 include/linux/swapops.h 	return swp_entry(SWP_HWPOISON, page_to_pfn(page));
page              336 include/linux/swapops.h static inline swp_entry_t make_hwpoison_entry(struct page *page)
page               10 include/linux/swiotlb.h struct page;
page              111 include/linux/tee_drv.h 			    struct page **pages, size_t num_pages,
page              195 include/linux/tee_drv.h 	struct page **pages;
page              420 include/linux/tee_drv.h static inline struct page **tee_shm_get_pages(struct tee_shm *shm,
page               13 include/linux/uio.h struct page;
page              115 include/linux/uio.h size_t iov_iter_copy_from_user_atomic(struct page *page,
page              121 include/linux/uio.h size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
page              123 include/linux/uio.h size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
page              225 include/linux/uio.h ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
page              227 include/linux/uio.h ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
page               23 include/linux/uprobes.h struct page;
page              139 include/linux/uprobes.h extern void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
page               12 include/linux/usb/gadget_configfs.h 		const char *page, size_t len)		\
page               17 include/linux/usb/gadget_configfs.h 	ret = usb_string_copy(page, &gs->__name);	\
page               24 include/linux/usb/gadget_configfs.h static ssize_t __struct##_##__name##_show(struct config_item *item, char *page) \
page               27 include/linux/usb/gadget_configfs.h 	return sprintf(page, "%s\n", gs->__name ?: "");	\
page               46 include/linux/vmalloc.h 	struct page		**pages;
page               77 include/linux/vmalloc.h extern void *vm_map_ram(struct page **pages, unsigned int count,
page              119 include/linux/vmalloc.h extern void *vmap(struct page **pages, unsigned int count,
page              159 include/linux/vmalloc.h 			struct page **pages);
page              162 include/linux/vmalloc.h 				    pgprot_t prot, struct page **pages);
page              175 include/linux/vmalloc.h 			pgprot_t prot, struct page **pages)
page              244 include/linux/vmstat.h void __inc_zone_page_state(struct page *, enum zone_stat_item);
page              245 include/linux/vmstat.h void __dec_zone_page_state(struct page *, enum zone_stat_item);
page              248 include/linux/vmstat.h void __inc_node_page_state(struct page *, enum node_stat_item);
page              249 include/linux/vmstat.h void __dec_node_page_state(struct page *, enum node_stat_item);
page              252 include/linux/vmstat.h void inc_zone_page_state(struct page *, enum zone_stat_item);
page              253 include/linux/vmstat.h void dec_zone_page_state(struct page *, enum zone_stat_item);
page              256 include/linux/vmstat.h void inc_node_page_state(struct page *, enum node_stat_item);
page              257 include/linux/vmstat.h void dec_node_page_state(struct page *, enum node_stat_item);
page              322 include/linux/vmstat.h static inline void __inc_zone_page_state(struct page *page,
page              325 include/linux/vmstat.h 	__inc_zone_state(page_zone(page), item);
page              328 include/linux/vmstat.h static inline void __inc_node_page_state(struct page *page,
page              331 include/linux/vmstat.h 	__inc_node_state(page_pgdat(page), item);
page              335 include/linux/vmstat.h static inline void __dec_zone_page_state(struct page *page,
page              338 include/linux/vmstat.h 	__dec_zone_state(page_zone(page), item);
page              341 include/linux/vmstat.h static inline void __dec_node_page_state(struct page *page,
page              344 include/linux/vmstat.h 	__dec_node_state(page_pgdat(page), item);
page              213 include/linux/writeback.h void __inode_attach_wb(struct inode *inode, struct page *page);
page              218 include/linux/writeback.h void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page,
page              233 include/linux/writeback.h static inline void inode_attach_wb(struct inode *inode, struct page *page)
page              236 include/linux/writeback.h 		__inode_attach_wb(inode, page);
page              295 include/linux/writeback.h static inline void inode_attach_wb(struct inode *inode, struct page *page)
page              324 include/linux/writeback.h 					    struct page *page, size_t bytes)
page              391 include/linux/writeback.h typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
page              406 include/linux/writeback.h void account_page_redirty(struct page *page);
page               49 include/media/videobuf-dma-sg.h 	struct page         **pages;
page               53 include/media/videobuf-dma-sg.h 	struct page         **vaddr_pages;
page              678 include/net/bluetooth/hci.h 	__u8     page;
page             1199 include/net/bluetooth/hci.h 	__u8     page;
page             1203 include/net/bluetooth/hci.h 	__u8     page;
page             1917 include/net/bluetooth/hci.h 	__u8     page;
page               48 include/net/cfg802154.h 	int	(*set_channel)(struct wpan_phy *wpan_phy, u8 page, u8 channel);
page              313 include/net/ieee802154_netdev.h 			u8 channel, u8 page, u8 cap);
page              322 include/net/ieee802154_netdev.h 			u8 channel, u8 page, u8 bcn_ord, u8 sf_ord,
page              325 include/net/ieee802154_netdev.h 			u8 type, u32 channels, u8 page, u8 duration);
page               30 include/net/inet_common.h ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
page              219 include/net/ip.h ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
page              217 include/net/mac802154.h 	int		(*set_channel)(struct ieee802154_hw *hw, u8 page,
page              117 include/net/page_pool.h struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
page              119 include/net/page_pool.h static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
page              153 include/net/page_pool.h 			  struct page *page, bool allow_direct);
page              156 include/net/page_pool.h 				      struct page *page, bool allow_direct)
page              162 include/net/page_pool.h 	__page_pool_put_page(pool, page, allow_direct);
page              167 include/net/page_pool.h 					    struct page *page)
page              169 include/net/page_pool.h 	__page_pool_put_page(pool, page, true);
page              177 include/net/page_pool.h void page_pool_unmap_page(struct page_pool *pool, struct page *page);
page              179 include/net/page_pool.h 					  struct page *page)
page              182 include/net/page_pool.h 	page_pool_unmap_page(pool, page);
page              186 include/net/page_pool.h static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
page              188 include/net/page_pool.h 	return page->dma_addr;
page             1115 include/net/sock.h 	int			(*sendpage)(struct sock *sk, struct page *page,
page             1677 include/net/sock.h ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset,
page             1679 include/net/sock.h ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page,
page             2014 include/net/sock.h 					   struct page *page,
page             2019 include/net/sock.h 	err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) + off,
page              325 include/net/tcp.h int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
page              327 include/net/tcp.h int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
page              329 include/net/tcp.h ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
page              362 include/net/tls.h int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
page              364 include/net/tls.h int tls_sw_sendpage(struct sock *sk, struct page *page,
page              380 include/net/tls.h int tls_device_sendpage(struct sock *sk, struct page *page,
page               56 include/net/xdp_sock.h 	struct page **pgs;
page               49 include/rdma/ib_umem_odp.h 	struct page		**page_list;
page             3974 include/rdma/ib_verbs.h 				  struct page *page,
page             3979 include/rdma/ib_verbs.h 	return dma_map_page(dev->dma_device, page, offset, size, direction);
page              477 include/rdma/rdmavt_qp.h 	void *page;
page              327 include/scsi/libfcoe.h 	struct page *crc_eof_page;
page              405 include/scsi/scsi_device.h extern int scsi_get_vpd_page(struct scsi_device *, u8 page, unsigned char *buf,
page               15 include/soc/tegra/mc.h struct page;
page               86 include/sound/memalloc.h 	struct page **page_table;	/* page table (for vmap/vunmap) */
page               74 include/sound/pcm.h 	struct page *(*page)(struct snd_pcm_substream *substream,
page             1192 include/sound/pcm.h struct page *snd_pcm_lib_get_vmalloc_page(struct snd_pcm_substream *substream,
page             1240 include/sound/pcm.h struct page *snd_pcm_sgbuf_ops_page(struct snd_pcm_substream *substream,
page              380 include/sound/soc-component.h struct page *snd_soc_pcm_component_page(struct snd_pcm_substream *substream,
page              862 include/trace/events/afs.h 		     pgoff_t page, unsigned long priv),
page              864 include/trace/events/afs.h 	    TP_ARGS(vnode, where, page, priv),
page              869 include/trace/events/afs.h 		    __field(pgoff_t,			page		)
page              876 include/trace/events/afs.h 		    __entry->page = page;
page              881 include/trace/events/afs.h 		      __entry->vnode, __entry->page, __entry->where,
page              562 include/trace/events/btrfs.h 	TP_PROTO(const struct page *page, const struct inode *inode,
page              565 include/trace/events/btrfs.h 	TP_ARGS(page, inode, wbc),
page              583 include/trace/events/btrfs.h 		__entry->index		= page->index;
page              611 include/trace/events/btrfs.h 	TP_PROTO(const struct page *page, const struct inode *inode,
page              614 include/trace/events/btrfs.h 	TP_ARGS(page, inode, wbc)
page              619 include/trace/events/btrfs.h 	TP_PROTO(const struct page *page, u64 start, u64 end, int uptodate),
page              621 include/trace/events/btrfs.h 	TP_ARGS(page, start, end, uptodate),
page              632 include/trace/events/btrfs.h 	TP_fast_assign_btrfs(btrfs_sb(page->mapping->host->i_sb),
page              633 include/trace/events/btrfs.h 		__entry->ino	= btrfs_ino(BTRFS_I(page->mapping->host));
page              634 include/trace/events/btrfs.h 		__entry->index	= page->index;
page              639 include/trace/events/btrfs.h 			 BTRFS_I(page->mapping->host)->root->root_key.objectid;
page               13 include/trace/events/cma.h 	TP_PROTO(unsigned long pfn, const struct page *page,
page               16 include/trace/events/cma.h 	TP_ARGS(pfn, page, count, align),
page               20 include/trace/events/cma.h 		__field(const struct page *, page)
page               27 include/trace/events/cma.h 		__entry->page = page;
page               34 include/trace/events/cma.h 		  __entry->page,
page               41 include/trace/events/cma.h 	TP_PROTO(unsigned long pfn, const struct page *page,
page               44 include/trace/events/cma.h 	TP_ARGS(pfn, page, count),
page               48 include/trace/events/cma.h 		__field(const struct page *, page)
page               54 include/trace/events/cma.h 		__entry->page = page;
page               60 include/trace/events/cma.h 		  __entry->page,
page               83 include/trace/events/erofs.h 	TP_PROTO(struct page *page, bool raw),
page               85 include/trace/events/erofs.h 	TP_ARGS(page, raw),
page               97 include/trace/events/erofs.h 		__entry->dev	= page->mapping->host->i_sb->s_dev;
page               98 include/trace/events/erofs.h 		__entry->nid	= EROFS_I(page->mapping->host)->nid;
page               99 include/trace/events/erofs.h 		__entry->dir	= S_ISDIR(page->mapping->host->i_mode);
page              100 include/trace/events/erofs.h 		__entry->index	= page->index;
page              101 include/trace/events/erofs.h 		__entry->uptodate = PageUptodate(page);
page              116 include/trace/events/erofs.h 	TP_PROTO(struct inode *inode, struct page *page, unsigned int nrpage,
page              119 include/trace/events/erofs.h 	TP_ARGS(inode, page, nrpage, raw),
page              132 include/trace/events/erofs.h 		__entry->start	= page->index;
page              525 include/trace/events/ext4.h 	TP_PROTO(struct page *page),
page              527 include/trace/events/ext4.h 	TP_ARGS(page),
page              537 include/trace/events/ext4.h 		__entry->dev	= page->mapping->host->i_sb->s_dev;
page              538 include/trace/events/ext4.h 		__entry->ino	= page->mapping->host->i_ino;
page              539 include/trace/events/ext4.h 		__entry->index	= page->index;
page              550 include/trace/events/ext4.h 	TP_PROTO(struct page *page),
page              552 include/trace/events/ext4.h 	TP_ARGS(page)
page              557 include/trace/events/ext4.h 	TP_PROTO(struct page *page),
page              559 include/trace/events/ext4.h 	TP_ARGS(page)
page              564 include/trace/events/ext4.h 	TP_PROTO(struct page *page),
page              566 include/trace/events/ext4.h 	TP_ARGS(page)
page              570 include/trace/events/ext4.h 	TP_PROTO(struct page *page, unsigned int offset, unsigned int length),
page              572 include/trace/events/ext4.h 	TP_ARGS(page, offset, length),
page              583 include/trace/events/ext4.h 		__entry->dev	= page->mapping->host->i_sb->s_dev;
page              584 include/trace/events/ext4.h 		__entry->ino	= page->mapping->host->i_ino;
page              585 include/trace/events/ext4.h 		__entry->index	= page->index;
page              598 include/trace/events/ext4.h 	TP_PROTO(struct page *page, unsigned int offset, unsigned int length),
page              600 include/trace/events/ext4.h 	TP_ARGS(page, offset, length)
page              604 include/trace/events/ext4.h 	TP_PROTO(struct page *page, unsigned int offset, unsigned int length),
page              606 include/trace/events/ext4.h 	TP_ARGS(page, offset, length)
page             1005 include/trace/events/f2fs.h 	TP_PROTO(struct page *page, struct f2fs_io_info *fio),
page             1007 include/trace/events/f2fs.h 	TP_ARGS(page, fio),
page             1022 include/trace/events/f2fs.h 		__entry->dev		= page_file_mapping(page)->host->i_sb->s_dev;
page             1023 include/trace/events/f2fs.h 		__entry->ino		= page_file_mapping(page)->host->i_ino;
page             1024 include/trace/events/f2fs.h 		__entry->index		= page->index;
page             1046 include/trace/events/f2fs.h 	TP_PROTO(struct page *page, struct f2fs_io_info *fio),
page             1048 include/trace/events/f2fs.h 	TP_ARGS(page, fio),
page             1050 include/trace/events/f2fs.h 	TP_CONDITION(page->mapping)
page             1055 include/trace/events/f2fs.h 	TP_PROTO(struct page *page, struct f2fs_io_info *fio),
page             1057 include/trace/events/f2fs.h 	TP_ARGS(page, fio),
page             1059 include/trace/events/f2fs.h 	TP_CONDITION(page->mapping)
page             1195 include/trace/events/f2fs.h 	TP_PROTO(struct page *page, int type),
page             1197 include/trace/events/f2fs.h 	TP_ARGS(page, type),
page             1210 include/trace/events/f2fs.h 		__entry->dev	= page_file_mapping(page)->host->i_sb->s_dev;
page             1211 include/trace/events/f2fs.h 		__entry->ino	= page_file_mapping(page)->host->i_ino;
page             1214 include/trace/events/f2fs.h 			S_ISDIR(page_file_mapping(page)->host->i_mode);
page             1215 include/trace/events/f2fs.h 		__entry->index	= page->index;
page             1216 include/trace/events/f2fs.h 		__entry->dirty	= PageDirty(page);
page             1217 include/trace/events/f2fs.h 		__entry->uptodate = PageUptodate(page);
page             1232 include/trace/events/f2fs.h 	TP_PROTO(struct page *page, int type),
page             1234 include/trace/events/f2fs.h 	TP_ARGS(page, type)
page             1239 include/trace/events/f2fs.h 	TP_PROTO(struct page *page, int type),
page             1241 include/trace/events/f2fs.h 	TP_ARGS(page, type)
page             1246 include/trace/events/f2fs.h 	TP_PROTO(struct page *page, int type),
page             1248 include/trace/events/f2fs.h 	TP_ARGS(page, type)
page             1253 include/trace/events/f2fs.h 	TP_PROTO(struct page *page, int type),
page             1255 include/trace/events/f2fs.h 	TP_ARGS(page, type)
page             1260 include/trace/events/f2fs.h 	TP_PROTO(struct page *page, int type),
page             1262 include/trace/events/f2fs.h 	TP_ARGS(page, type)
page             1267 include/trace/events/f2fs.h 	TP_PROTO(struct page *page, int type),
page             1269 include/trace/events/f2fs.h 	TP_ARGS(page, type)
page             1274 include/trace/events/f2fs.h 	TP_PROTO(struct page *page, int type),
page             1276 include/trace/events/f2fs.h 	TP_ARGS(page, type)
page             1371 include/trace/events/f2fs.h 	TP_PROTO(struct inode *inode, struct page *page, unsigned int nrpage),
page             1373 include/trace/events/f2fs.h 	TP_ARGS(inode, page, nrpage),
page             1385 include/trace/events/f2fs.h 		__entry->start	= page->index;
page               18 include/trace/events/filemap.h 	TP_PROTO(struct page *page),
page               20 include/trace/events/filemap.h 	TP_ARGS(page),
page               30 include/trace/events/filemap.h 		__entry->pfn = page_to_pfn(page);
page               31 include/trace/events/filemap.h 		__entry->i_ino = page->mapping->host->i_ino;
page               32 include/trace/events/filemap.h 		__entry->index = page->index;
page               33 include/trace/events/filemap.h 		if (page->mapping->host->i_sb)
page               34 include/trace/events/filemap.h 			__entry->s_dev = page->mapping->host->i_sb->s_dev;
page               36 include/trace/events/filemap.h 			__entry->s_dev = page->mapping->host->i_rdev;
page               48 include/trace/events/filemap.h 	TP_PROTO(struct page *page),
page               49 include/trace/events/filemap.h 	TP_ARGS(page)
page               53 include/trace/events/filemap.h 	TP_PROTO(struct page *page),
page               54 include/trace/events/filemap.h 	TP_ARGS(page)
page               65 include/trace/events/fs_dax.h 		struct page *zero_page,
page               72 include/trace/events/fs_dax.h 		__field(struct page *, zero_page)
page               99 include/trace/events/fs_dax.h 		struct page *zero_page, void *radix_entry), \
page              367 include/trace/events/fscache.h 	    TP_PROTO(struct fscache_cookie *cookie, struct page *page,
page              370 include/trace/events/fscache.h 	    TP_ARGS(cookie, page, why),
page              374 include/trace/events/fscache.h 		    __field(pgoff_t,			page		)
page              380 include/trace/events/fscache.h 		    __entry->page		= page->index;
page              387 include/trace/events/fscache.h 		      __entry->page)
page              391 include/trace/events/fscache.h 	    TP_PROTO(struct fscache_cookie *cookie, struct page *page,
page              394 include/trace/events/fscache.h 	    TP_ARGS(cookie, page, val, n),
page              398 include/trace/events/fscache.h 		    __field(void *,			page		)
page              405 include/trace/events/fscache.h 		    __entry->page		= page;
page              411 include/trace/events/fscache.h 		      __entry->cookie, __entry->page, __entry->val, __entry->n)
page              454 include/trace/events/fscache.h 	    TP_PROTO(struct fscache_cookie *cookie, struct page *page,
page              457 include/trace/events/fscache.h 	    TP_ARGS(cookie, page, op, what),
page              461 include/trace/events/fscache.h 		    __field(pgoff_t,			page		)
page              468 include/trace/events/fscache.h 		    __entry->page		= page ? page->index : 0;
page              476 include/trace/events/fscache.h 		      __entry->page, __entry->op)
page              480 include/trace/events/fscache.h 	    TP_PROTO(struct fscache_cookie *cookie, struct page *page,
page              483 include/trace/events/fscache.h 	    TP_ARGS(cookie, page, op, ret),
page              487 include/trace/events/fscache.h 		    __field(pgoff_t,			page		)
page              494 include/trace/events/fscache.h 		    __entry->page		= page->index;
page              500 include/trace/events/fscache.h 		      __entry->cookie, __entry->page, __entry->op, __entry->ret)
page              520 include/trace/events/fscache.h 		    __entry->results0		= results[0] ? ((struct page *)results[0])->index : (pgoff_t)-1;
page               51 include/trace/events/huge_memory.h 	TP_PROTO(struct mm_struct *mm, struct page *page, bool writable,
page               54 include/trace/events/huge_memory.h 	TP_ARGS(mm, page, writable, referenced, none_or_zero, status, unmapped),
page               68 include/trace/events/huge_memory.h 		__entry->pfn = page ? page_to_pfn(page) : -1;
page              112 include/trace/events/huge_memory.h 	TP_PROTO(struct page *page, int none_or_zero,
page              115 include/trace/events/huge_memory.h 	TP_ARGS(page, none_or_zero, referenced, writable, status),
page              126 include/trace/events/huge_memory.h 		__entry->pfn = page ? page_to_pfn(page) : -1;
page              154 include/trace/events/kmem.h 	TP_PROTO(struct page *page, unsigned int order),
page              156 include/trace/events/kmem.h 	TP_ARGS(page, order),
page              164 include/trace/events/kmem.h 		__entry->pfn		= page_to_pfn(page);
page              176 include/trace/events/kmem.h 	TP_PROTO(struct page *page),
page              178 include/trace/events/kmem.h 	TP_ARGS(page),
page              185 include/trace/events/kmem.h 		__entry->pfn		= page_to_pfn(page);
page              195 include/trace/events/kmem.h 	TP_PROTO(struct page *page, unsigned int order,
page              198 include/trace/events/kmem.h 	TP_ARGS(page, order, gfp_flags, migratetype),
page              208 include/trace/events/kmem.h 		__entry->pfn		= page ? page_to_pfn(page) : -1UL;
page              224 include/trace/events/kmem.h 	TP_PROTO(struct page *page, unsigned int order, int migratetype),
page              226 include/trace/events/kmem.h 	TP_ARGS(page, order, migratetype),
page              235 include/trace/events/kmem.h 		__entry->pfn		= page ? page_to_pfn(page) : -1UL;
page              250 include/trace/events/kmem.h 	TP_PROTO(struct page *page, unsigned int order, int migratetype),
page              252 include/trace/events/kmem.h 	TP_ARGS(page, order, migratetype)
page              257 include/trace/events/kmem.h 	TP_PROTO(struct page *page, unsigned int order, int migratetype),
page              259 include/trace/events/kmem.h 	TP_ARGS(page, order, migratetype),
page              268 include/trace/events/kmem.h 		__entry->pfn		= page ? page_to_pfn(page) : -1UL;
page              280 include/trace/events/kmem.h 	TP_PROTO(struct page *page,
page              284 include/trace/events/kmem.h 	TP_ARGS(page,
page              298 include/trace/events/kmem.h 		__entry->pfn			= page_to_pfn(page);
page              304 include/trace/events/kmem.h 					get_pageblock_migratetype(page));
page               41 include/trace/events/page_pool.h 		 const struct page *page, u32 release),
page               43 include/trace/events/page_pool.h 	TP_ARGS(pool, page, release),
page               47 include/trace/events/page_pool.h 		__field(const struct page *,		page)
page               53 include/trace/events/page_pool.h 		__entry->page		= page;
page               58 include/trace/events/page_pool.h 		  __entry->pool, __entry->page, __entry->release)
page               64 include/trace/events/page_pool.h 		 const struct page *page, u32 hold),
page               66 include/trace/events/page_pool.h 	TP_ARGS(pool, page, hold),
page               70 include/trace/events/page_pool.h 		__field(const struct page *,		page)
page               76 include/trace/events/page_pool.h 		__entry->page	= page;
page               81 include/trace/events/page_pool.h 		  __entry->pool, __entry->page, __entry->hold)
page               15 include/trace/events/page_ref.h 	TP_PROTO(struct page *page, int v),
page               17 include/trace/events/page_ref.h 	TP_ARGS(page, v),
page               30 include/trace/events/page_ref.h 		__entry->pfn = page_to_pfn(page);
page               31 include/trace/events/page_ref.h 		__entry->flags = page->flags;
page               32 include/trace/events/page_ref.h 		__entry->count = page_ref_count(page);
page               33 include/trace/events/page_ref.h 		__entry->mapcount = page_mapcount(page);
page               34 include/trace/events/page_ref.h 		__entry->mapping = page->mapping;
page               35 include/trace/events/page_ref.h 		__entry->mt = get_pageblock_migratetype(page);
page               49 include/trace/events/page_ref.h 	TP_PROTO(struct page *page, int v),
page               51 include/trace/events/page_ref.h 	TP_ARGS(page, v)
page               56 include/trace/events/page_ref.h 	TP_PROTO(struct page *page, int v),
page               58 include/trace/events/page_ref.h 	TP_ARGS(page, v)
page               63 include/trace/events/page_ref.h 	TP_PROTO(struct page *page, int v, int ret),
page               65 include/trace/events/page_ref.h 	TP_ARGS(page, v, ret),
page               79 include/trace/events/page_ref.h 		__entry->pfn = page_to_pfn(page);
page               80 include/trace/events/page_ref.h 		__entry->flags = page->flags;
page               81 include/trace/events/page_ref.h 		__entry->count = page_ref_count(page);
page               82 include/trace/events/page_ref.h 		__entry->mapcount = page_mapcount(page);
page               83 include/trace/events/page_ref.h 		__entry->mapping = page->mapping;
page               84 include/trace/events/page_ref.h 		__entry->mt = get_pageblock_migratetype(page);
page               99 include/trace/events/page_ref.h 	TP_PROTO(struct page *page, int v, int ret),
page              101 include/trace/events/page_ref.h 	TP_ARGS(page, v, ret)
page              106 include/trace/events/page_ref.h 	TP_PROTO(struct page *page, int v, int ret),
page              108 include/trace/events/page_ref.h 	TP_ARGS(page, v, ret)
page              113 include/trace/events/page_ref.h 	TP_PROTO(struct page *page, int v, int ret),
page              115 include/trace/events/page_ref.h 	TP_ARGS(page, v, ret)
page              120 include/trace/events/page_ref.h 	TP_PROTO(struct page *page, int v, int ret),
page              122 include/trace/events/page_ref.h 	TP_ARGS(page, v, ret)
page              127 include/trace/events/page_ref.h 	TP_PROTO(struct page *page, int v),
page              129 include/trace/events/page_ref.h 	TP_ARGS(page, v)
page               19 include/trace/events/pagemap.h #define trace_pagemap_flags(page) ( \
page               20 include/trace/events/pagemap.h 	(PageAnon(page)		? PAGEMAP_ANONYMOUS  : PAGEMAP_FILE) | \
page               21 include/trace/events/pagemap.h 	(page_mapped(page)	? PAGEMAP_MAPPED     : 0) | \
page               22 include/trace/events/pagemap.h 	(PageSwapCache(page)	? PAGEMAP_SWAPCACHE  : 0) | \
page               23 include/trace/events/pagemap.h 	(PageSwapBacked(page)	? PAGEMAP_SWAPBACKED : 0) | \
page               24 include/trace/events/pagemap.h 	(PageMappedToDisk(page)	? PAGEMAP_MAPPEDDISK : 0) | \
page               25 include/trace/events/pagemap.h 	(page_has_private(page) ? PAGEMAP_BUFFERS    : 0) \
page               31 include/trace/events/pagemap.h 		struct page *page,
page               35 include/trace/events/pagemap.h 	TP_ARGS(page, lru),
page               38 include/trace/events/pagemap.h 		__field(struct page *,	page	)
page               45 include/trace/events/pagemap.h 		__entry->page	= page;
page               46 include/trace/events/pagemap.h 		__entry->pfn	= page_to_pfn(page);
page               48 include/trace/events/pagemap.h 		__entry->flags	= trace_pagemap_flags(page);
page               53 include/trace/events/pagemap.h 			__entry->page,
page               66 include/trace/events/pagemap.h 	TP_PROTO(struct page *page),
page               68 include/trace/events/pagemap.h 	TP_ARGS(page),
page               71 include/trace/events/pagemap.h 		__field(struct page *,	page	)
page               76 include/trace/events/pagemap.h 		__entry->page	= page;
page               77 include/trace/events/pagemap.h 		__entry->pfn	= page_to_pfn(page);
page               81 include/trace/events/pagemap.h 	TP_printk("page=%p pfn=%lu", __entry->page, __entry->pfn)
page             1529 include/trace/events/rpcrdma.h 		const void *page
page             1532 include/trace/events/rpcrdma.h 	TP_ARGS(rdma, page),
page             1535 include/trace/events/rpcrdma.h 		__field(const void *, page);
page             1541 include/trace/events/rpcrdma.h 		__entry->page = page;
page             1547 include/trace/events/rpcrdma.h 		__get_str(addr), __get_str(device), __entry->page
page              314 include/trace/events/vmscan.h 	TP_PROTO(struct page *page),
page              316 include/trace/events/vmscan.h 	TP_ARGS(page),
page              324 include/trace/events/vmscan.h 		__entry->pfn = page_to_pfn(page);
page              326 include/trace/events/vmscan.h 						page_is_file_cache(page));
page               58 include/trace/events/writeback.h 	TP_PROTO(struct page *page, struct address_space *mapping),
page               60 include/trace/events/writeback.h 	TP_ARGS(page, mapping),
page               73 include/trace/events/writeback.h 		__entry->index = page->index;
page               85 include/trace/events/writeback.h 	TP_PROTO(struct page *page, struct address_space *mapping),
page               87 include/trace/events/writeback.h 	TP_ARGS(page, mapping)
page               92 include/trace/events/writeback.h 	TP_PROTO(struct page *page, struct address_space *mapping),
page               94 include/trace/events/writeback.h 	TP_ARGS(page, mapping)
page              240 include/trace/events/writeback.h 	TP_PROTO(struct page *page, struct bdi_writeback *wb),
page              242 include/trace/events/writeback.h 	TP_ARGS(page, wb),
page              254 include/trace/events/writeback.h 		struct address_space *mapping = page_mapping(page);
page              262 include/trace/events/writeback.h 		__entry->page_cgroup_ino = page->mem_cgroup->css.cgroup->kn->id.ino;
page              382 include/trace/events/xdp.h 		 const struct page *page),
page              384 include/trace/events/xdp.h 	TP_ARGS(mem, page),
page              387 include/trace/events/xdp.h 		__field(const struct page *,	page)
page              393 include/trace/events/xdp.h 		__entry->page		= page;
page              401 include/trace/events/xdp.h 		  __entry->page
page               88 include/uapi/linux/i2o-dev.h 	unsigned int page;	/* HTML page */
page               90 include/xen/arm/page.h 				   struct page **pages, unsigned int count);
page               94 include/xen/arm/page.h 				     struct page **pages, unsigned int count);
page               27 include/xen/balloon.h int alloc_xenballooned_pages(int nr_pages, struct page **pages);
page               28 include/xen/balloon.h void free_xenballooned_pages(int nr_pages, struct page **pages);
page               76 include/xen/grant_table.h 	struct page **pages;
page              102 include/xen/grant_table.h 			       unsigned long page);
page              137 include/xen/grant_table.h 	struct page *page, int readonly)
page              139 include/xen/grant_table.h 	gnttab_grant_foreign_access_ref(ref, domid, xen_page_to_gfn(page),
page              198 include/xen/grant_table.h int gnttab_alloc_pages(int nr_pages, struct page **pages);
page              199 include/xen/grant_table.h void gnttab_free_pages(int nr_pages, struct page **pages);
page              209 include/xen/grant_table.h 	struct page **pages;
page              219 include/xen/grant_table.h int gnttab_pages_set_private(int nr_pages, struct page **pages);
page              220 include/xen/grant_table.h void gnttab_pages_clear_private(int nr_pages, struct page **pages);
page              224 include/xen/grant_table.h 		    struct page **pages, unsigned int count);
page              227 include/xen/grant_table.h 		      struct page **pages, unsigned int count);
page              250 include/xen/grant_table.h static inline struct xen_page_foreign *xen_page_foreign(struct page *page)
page              252 include/xen/grant_table.h 	if (!PageForeign(page))
page              255 include/xen/grant_table.h 	return (struct xen_page_foreign *)page->private;
page              258 include/xen/grant_table.h 	return (struct xen_page_foreign *)&page->private;
page              273 include/xen/grant_table.h void gnttab_foreach_grant_in_range(struct page *page,
page              280 include/xen/grant_table.h static inline void gnttab_for_one_grant(struct page *page, unsigned int offset,
page              288 include/xen/grant_table.h 	gnttab_foreach_grant_in_range(page, offset, len, fn, data);
page              292 include/xen/grant_table.h void gnttab_foreach_grant(struct page **pages,
page              857 include/xen/interface/io/displif.h #define XENDISPL_IN_RING(page) \
page              858 include/xen/interface/io/displif.h 	((struct xendispl_evt *)((char *)(page) + XENDISPL_IN_RING_OFFS))
page              859 include/xen/interface/io/displif.h #define XENDISPL_IN_RING_REF(page, idx) \
page              860 include/xen/interface/io/displif.h 	(XENDISPL_IN_RING((page))[(idx) % XENDISPL_IN_RING_LEN])
page               95 include/xen/interface/io/fbif.h #define XENFB_IN_RING(page) \
page               96 include/xen/interface/io/fbif.h 	((union xenfb_in_event *)((char *)(page) + XENFB_IN_RING_OFFS))
page               97 include/xen/interface/io/fbif.h #define XENFB_IN_RING_REF(page, idx) \
page               98 include/xen/interface/io/fbif.h 	(XENFB_IN_RING((page))[(idx) % XENFB_IN_RING_LEN])
page              103 include/xen/interface/io/fbif.h #define XENFB_OUT_RING(page) \
page              104 include/xen/interface/io/fbif.h 	((union xenfb_out_event *)((char *)(page) + XENFB_OUT_RING_OFFS))
page              105 include/xen/interface/io/fbif.h #define XENFB_OUT_RING_REF(page, idx) \
page              106 include/xen/interface/io/fbif.h 	(XENFB_OUT_RING((page))[(idx) % XENFB_OUT_RING_LEN])
page              542 include/xen/interface/io/kbdif.h #define XENKBD_IN_RING(page) \
page              543 include/xen/interface/io/kbdif.h 	((union xenkbd_in_event *)((char *)(page) + XENKBD_IN_RING_OFFS))
page              544 include/xen/interface/io/kbdif.h #define XENKBD_IN_RING_REF(page, idx) \
page              545 include/xen/interface/io/kbdif.h 	(XENKBD_IN_RING((page))[(idx) % XENKBD_IN_RING_LEN])
page              550 include/xen/interface/io/kbdif.h #define XENKBD_OUT_RING(page) \
page              551 include/xen/interface/io/kbdif.h 	((union xenkbd_out_event *)((char *)(page) + XENKBD_OUT_RING_OFFS))
page              552 include/xen/interface/io/kbdif.h #define XENKBD_OUT_RING_REF(page, idx) \
page              553 include/xen/interface/io/kbdif.h 	(XENKBD_OUT_RING((page))[(idx) % XENKBD_OUT_RING_LEN])
page             1076 include/xen/interface/io/sndif.h #define XENSND_IN_RING(page) \
page             1077 include/xen/interface/io/sndif.h 	((struct xensnd_evt *)((char *)(page) + XENSND_IN_RING_OFFS))
page             1078 include/xen/interface/io/sndif.h #define XENSND_IN_RING_REF(page, idx) \
page             1079 include/xen/interface/io/sndif.h 	(XENSND_IN_RING((page))[(idx) % XENSND_IN_RING_LEN])
page               22 include/xen/mem-reservation.h static inline void xenmem_reservation_scrub_page(struct page *page)
page               25 include/xen/mem-reservation.h 		clear_highpage(page);
page               30 include/xen/mem-reservation.h 					    struct page **pages,
page               34 include/xen/mem-reservation.h 					   struct page **pages);
page               38 include/xen/mem-reservation.h 							struct page **pages,
page               48 include/xen/mem-reservation.h 						       struct page **pages)
page               20 include/xen/page.h #define page_to_xen_pfn(page)		\
page               21 include/xen/page.h 	((page_to_pfn(page)) << (PAGE_SHIFT - XEN_PAGE_SHIFT))
page               32 include/xen/page.h static inline unsigned long xen_page_to_gfn(struct page *page)
page               34 include/xen/page.h 	return pfn_to_gfn(page_to_xen_pfn(page));
page               47 include/xen/xen-front-pgdir-shbuf.h 	struct page **pages;
page               64 include/xen/xen-front-pgdir-shbuf.h 	struct page **pages;
page               67 include/xen/xen-ops.h 		  unsigned int domid, bool no_translate, struct page **pages);
page               72 include/xen/xen-ops.h 				bool no_translate, struct page **pages)
page               87 include/xen/xen-ops.h 			      struct page **pages);
page               89 include/xen/xen-ops.h 			      int nr, struct page **pages);
page              100 include/xen/xen-ops.h 					    struct page **pages)
page              106 include/xen/xen-ops.h 					    int nr, struct page **pages)
page              137 include/xen/xen-ops.h 					     struct page **pages)
page              173 include/xen/xen-ops.h 					     struct page **pages)
page              198 include/xen/xen-ops.h 					     struct page **pages)
page              208 include/xen/xen-ops.h 			       int numpgs, struct page **pages);
page               46 include/xen/xen.h struct page;
page               49 include/xen/xen.h 		const struct page *page);
page              359 init/do_mounts.c static void __init get_fs_names(char *page)
page              361 init/do_mounts.c 	char *s = page;
page              364 init/do_mounts.c 		strcpy(page, root_fs_names);
page              370 init/do_mounts.c 		int len = get_filesystem_list(page);
page              373 init/do_mounts.c 		page[len] = '\0';
page              374 init/do_mounts.c 		for (p = page-1; p; p = next) {
page              406 init/do_mounts.c 	struct page *page = alloc_page(GFP_KERNEL);
page              407 init/do_mounts.c 	char *fs_names = page_address(page);
page              461 init/do_mounts.c 	put_page(page);
page              290 kernel/bpf/cpumap.c 			struct page *page = virt_to_page(f);
page              296 kernel/bpf/cpumap.c 			prefetchw(page);
page              248 kernel/bpf/stackmap.c 	struct page *page;
page              256 kernel/bpf/stackmap.c 	page = find_get_page(vma->vm_file->f_mapping, 0);
page              257 kernel/bpf/stackmap.c 	if (!page)
page              261 kernel/bpf/stackmap.c 	page_addr = kmap_atomic(page);
page              278 kernel/bpf/stackmap.c 	put_page(page);
page              417 kernel/crash_core.c 	VMCOREINFO_STRUCT_SIZE(page);
page              423 kernel/crash_core.c 	VMCOREINFO_OFFSET(page, flags);
page              424 kernel/crash_core.c 	VMCOREINFO_OFFSET(page, _refcount);
page              425 kernel/crash_core.c 	VMCOREINFO_OFFSET(page, mapping);
page              426 kernel/crash_core.c 	VMCOREINFO_OFFSET(page, lru);
page              427 kernel/crash_core.c 	VMCOREINFO_OFFSET(page, _mapcount);
page              428 kernel/crash_core.c 	VMCOREINFO_OFFSET(page, private);
page              429 kernel/crash_core.c 	VMCOREINFO_OFFSET(page, compound_dtor);
page              430 kernel/crash_core.c 	VMCOREINFO_OFFSET(page, compound_order);
page              431 kernel/crash_core.c 	VMCOREINFO_OFFSET(page, compound_head);
page              381 kernel/debug/kdb/kdb_support.c 	struct page *page;
page              386 kernel/debug/kdb/kdb_support.c 	page = pfn_to_page(pfn);
page              387 kernel/debug/kdb/kdb_support.c 	vaddr = kmap_atomic(page);
page              199 kernel/dma/coherent.c 		int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
page              203 kernel/dma/coherent.c 		bitmap_release_region(mem->bitmap, page, order);
page              192 kernel/dma/contiguous.c struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
page              211 kernel/dma/contiguous.c bool dma_release_from_contiguous(struct device *dev, struct page *pages,
page              231 kernel/dma/contiguous.c struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
page              234 kernel/dma/contiguous.c 	struct page *page = NULL;
page              247 kernel/dma/contiguous.c 		page = cma_alloc(cma, count, cma_align, gfp & __GFP_NOWARN);
page              250 kernel/dma/contiguous.c 	return page;
page              264 kernel/dma/contiguous.c void dma_free_contiguous(struct device *dev, struct page *page, size_t size)
page              266 kernel/dma/contiguous.c 	if (!cma_release(dev_get_cma_area(dev), page,
page              268 kernel/dma/contiguous.c 		__free_pages(page, get_order(size));
page              564 kernel/dma/debug.c void debug_dma_assert_idle(struct page *page)
page              576 kernel/dma/debug.c 	if (!page)
page              579 kernel/dma/debug.c 	cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT;
page             1094 kernel/dma/debug.c 			    struct page *page, size_t offset)
page             1101 kernel/dma/debug.c 		if (PageHighMem(page))
page             1103 kernel/dma/debug.c 		addr = page_address(page) + offset;
page             1111 kernel/dma/debug.c 			if (page != stack_vm_area->pages[i])
page             1255 kernel/dma/debug.c void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
page             1272 kernel/dma/debug.c 	entry->pfn	 = page_to_pfn(page);
page             1279 kernel/dma/debug.c 	check_for_stack(dev, page, offset);
page             1281 kernel/dma/debug.c 	if (!PageHighMem(page)) {
page             1282 kernel/dma/debug.c 		void *addr = page_address(page) + offset;
page               86 kernel/dma/direct.c struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
page               91 kernel/dma/direct.c 	struct page *page = NULL;
page              101 kernel/dma/direct.c 	page = dma_alloc_contiguous(dev, alloc_size, gfp);
page              102 kernel/dma/direct.c 	if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
page              103 kernel/dma/direct.c 		dma_free_contiguous(dev, page, alloc_size);
page              104 kernel/dma/direct.c 		page = NULL;
page              107 kernel/dma/direct.c 	if (!page)
page              108 kernel/dma/direct.c 		page = alloc_pages_node(node, gfp, get_order(alloc_size));
page              109 kernel/dma/direct.c 	if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
page              110 kernel/dma/direct.c 		dma_free_contiguous(dev, page, size);
page              111 kernel/dma/direct.c 		page = NULL;
page              126 kernel/dma/direct.c 	return page;
page              132 kernel/dma/direct.c 	struct page *page;
page              135 kernel/dma/direct.c 	page = __dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
page              136 kernel/dma/direct.c 	if (!page)
page              142 kernel/dma/direct.c 		if (!PageHighMem(page))
page              143 kernel/dma/direct.c 			arch_dma_prep_coherent(page, size);
page              144 kernel/dma/direct.c 		*dma_handle = phys_to_dma(dev, page_to_phys(page));
page              146 kernel/dma/direct.c 		return page;
page              149 kernel/dma/direct.c 	if (PageHighMem(page)) {
page              157 kernel/dma/direct.c 		__dma_direct_free_pages(dev, size, page);
page              161 kernel/dma/direct.c 	ret = page_address(page);
page              164 kernel/dma/direct.c 		*dma_handle = __phys_to_dma(dev, page_to_phys(page));
page              166 kernel/dma/direct.c 		*dma_handle = phys_to_dma(dev, page_to_phys(page));
page              172 kernel/dma/direct.c 		arch_dma_prep_coherent(page, size);
page              179 kernel/dma/direct.c void __dma_direct_free_pages(struct device *dev, size_t size, struct page *page)
page              181 kernel/dma/direct.c 	dma_free_contiguous(dev, page, size);
page              333 kernel/dma/direct.c dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
page              337 kernel/dma/direct.c 	phys_addr_t phys = page_to_phys(page) + offset;
page               14 kernel/dma/dummy.c static dma_addr_t dma_dummy_map_page(struct device *dev, struct page *page,
page              115 kernel/dma/mapping.c 	struct page *page;
page              128 kernel/dma/mapping.c 		page = pfn_to_page(pfn);
page              130 kernel/dma/mapping.c 		page = virt_to_page(cpu_addr);
page              135 kernel/dma/mapping.c 		sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
page               14 kernel/dma/remap.c struct page **dma_common_find_pages(void *cpu_addr)
page               23 kernel/dma/remap.c static struct vm_struct *__dma_common_pages_remap(struct page **pages,
page               44 kernel/dma/remap.c void *dma_common_pages_remap(struct page **pages, size_t size,
page               62 kernel/dma/remap.c void *dma_common_contiguous_remap(struct page *page, size_t size,
page               66 kernel/dma/remap.c 	struct page **pages;
page               69 kernel/dma/remap.c 	pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL);
page               74 kernel/dma/remap.c 		pages[i] = nth_page(page, i);
page              127 kernel/dma/remap.c 	struct page *page;
page              132 kernel/dma/remap.c 		page = dma_alloc_from_contiguous(NULL, nr_pages,
page              135 kernel/dma/remap.c 		page = alloc_pages(dma_atomic_pool_gfp(), pool_size_order);
page              136 kernel/dma/remap.c 	if (!page)
page              139 kernel/dma/remap.c 	arch_dma_prep_coherent(page, atomic_pool_size);
page              145 kernel/dma/remap.c 	addr = dma_common_contiguous_remap(page, atomic_pool_size,
page              152 kernel/dma/remap.c 				page_to_phys(page), atomic_pool_size, -1);
page              167 kernel/dma/remap.c 	if (!dma_release_from_contiguous(NULL, page, nr_pages))
page              168 kernel/dma/remap.c 		__free_pages(page, pool_size_order);
page              184 kernel/dma/remap.c void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
page              217 kernel/dma/remap.c 	struct page *page = NULL;
page              223 kernel/dma/remap.c 		ret = dma_alloc_from_pool(size, &page, flags);
page              229 kernel/dma/remap.c 	page = __dma_direct_alloc_pages(dev, size, dma_handle, flags, attrs);
page              230 kernel/dma/remap.c 	if (!page)
page              234 kernel/dma/remap.c 	arch_dma_prep_coherent(page, size);
page              237 kernel/dma/remap.c 	ret = dma_common_contiguous_remap(page, size,
page              241 kernel/dma/remap.c 		__dma_direct_free_pages(dev, size, page);
page              247 kernel/dma/remap.c 	*dma_handle = phys_to_dma(dev, page_to_phys(page));
page              256 kernel/dma/remap.c 		struct page *page = pfn_to_page(__phys_to_pfn(phys));
page              259 kernel/dma/remap.c 		__dma_direct_free_pages(dev, size, page);
page               29 kernel/dma/virt.c static dma_addr_t dma_virt_map_page(struct device *dev, struct page *page,
page               34 kernel/dma/virt.c 	return (uintptr_t)(page_address(page) + offset);
page             5451 kernel/events/core.c 	vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
page             5452 kernel/events/core.c 	if (!vmf->page)
page             5455 kernel/events/core.c 	get_page(vmf->page);
page             5456 kernel/events/core.c 	vmf->page->mapping = vmf->vma->vm_file->f_mapping;
page             5457 kernel/events/core.c 	vmf->page->index   = vmf->pgoff;
page             6527 kernel/events/core.c 	struct page *p = NULL;
page             9920 kernel/events/core.c 				    char *page)
page             9924 kernel/events/core.c 	return snprintf(page, PAGE_SIZE - 1, "%d\n", pmu->nr_addr_filters);
page             9931 kernel/events/core.c type_show(struct device *dev, struct device_attribute *attr, char *page)
page             9935 kernel/events/core.c 	return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
page             9942 kernel/events/core.c 				char *page)
page             9946 kernel/events/core.c 	return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms);
page             12259 kernel/events/core.c 			      char *page)
page             12265 kernel/events/core.c 		return sprintf(page, "%s\n", pmu_attr->event_str);
page               95 kernel/events/internal.h extern struct page *
page              145 kernel/events/internal.h 			handle->page++;					\
page              146 kernel/events/internal.h 			handle->page &= rb->nr_pages - 1;		\
page              147 kernel/events/internal.h 			handle->addr = rb->data_pages[handle->page];	\
page              234 kernel/events/ring_buffer.c 	handle->page = (offset >> page_shift) & (rb->nr_pages - 1);
page              236 kernel/events/ring_buffer.c 	handle->addr = rb->data_pages[handle->page] + offset;
page              567 kernel/events/ring_buffer.c static struct page *rb_alloc_aux_page(int node, int order)
page              569 kernel/events/ring_buffer.c 	struct page *page;
page              575 kernel/events/ring_buffer.c 		page = alloc_pages_node(node, PERF_AUX_GFP, order);
page              576 kernel/events/ring_buffer.c 	} while (!page && order--);
page              578 kernel/events/ring_buffer.c 	if (page && order) {
page              585 kernel/events/ring_buffer.c 		split_page(page, order);
page              586 kernel/events/ring_buffer.c 		SetPagePrivate(page);
page              587 kernel/events/ring_buffer.c 		set_page_private(page, order);
page              590 kernel/events/ring_buffer.c 	return page;
page              595 kernel/events/ring_buffer.c 	struct page *page = virt_to_page(rb->aux_pages[idx]);
page              597 kernel/events/ring_buffer.c 	ClearPagePrivate(page);
page              598 kernel/events/ring_buffer.c 	page->mapping = NULL;
page              599 kernel/events/ring_buffer.c 	__free_page(page);
page              663 kernel/events/ring_buffer.c 		struct page *page;
page              667 kernel/events/ring_buffer.c 		page = rb_alloc_aux_page(node, order);
page              668 kernel/events/ring_buffer.c 		if (!page)
page              671 kernel/events/ring_buffer.c 		for (last = rb->aux_nr_pages + (1 << page_private(page));
page              673 kernel/events/ring_buffer.c 			rb->aux_pages[rb->aux_nr_pages] = page_address(page++);
page              684 kernel/events/ring_buffer.c 		struct page *page = virt_to_page(rb->aux_pages[0]);
page              686 kernel/events/ring_buffer.c 		if (page_private(page) != max_order)
page              732 kernel/events/ring_buffer.c static struct page *
page              746 kernel/events/ring_buffer.c 	struct page *page;
page              750 kernel/events/ring_buffer.c 	page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
page              751 kernel/events/ring_buffer.c 	if (!page)
page              754 kernel/events/ring_buffer.c 	return page_address(page);
page              804 kernel/events/ring_buffer.c 	struct page *page = virt_to_page((void *)addr);
page              806 kernel/events/ring_buffer.c 	page->mapping = NULL;
page              807 kernel/events/ring_buffer.c 	__free_page(page);
page              826 kernel/events/ring_buffer.c static struct page *
page              838 kernel/events/ring_buffer.c 	struct page *page = vmalloc_to_page(addr);
page              840 kernel/events/ring_buffer.c 	page->mapping = NULL;
page              905 kernel/events/ring_buffer.c struct page *
page              104 kernel/events/uprobes.c 	struct page 			*pages[2];
page              155 kernel/events/uprobes.c 				struct page *old_page, struct page *new_page)
page              159 kernel/events/uprobes.c 		.page = compound_head(old_page),
page              250 kernel/events/uprobes.c static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len)
page              252 kernel/events/uprobes.c 	void *kaddr = kmap_atomic(page);
page              257 kernel/events/uprobes.c static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len)
page              259 kernel/events/uprobes.c 	void *kaddr = kmap_atomic(page);
page              264 kernel/events/uprobes.c static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode)
page              278 kernel/events/uprobes.c 	copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE);
page              377 kernel/events/uprobes.c 	struct page *page;
page              386 kernel/events/uprobes.c 			FOLL_WRITE, &page, &vma, NULL);
page              395 kernel/events/uprobes.c 	kaddr = kmap_atomic(page);
page              409 kernel/events/uprobes.c 	put_page(page);
page              473 kernel/events/uprobes.c 	struct page *old_page, *new_page;
page              528 kernel/events/uprobes.c 		struct page *orig_page;
page              800 kernel/events/uprobes.c 	struct page *page;
page              807 kernel/events/uprobes.c 		page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp);
page              809 kernel/events/uprobes.c 		page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
page              810 kernel/events/uprobes.c 	if (IS_ERR(page))
page              811 kernel/events/uprobes.c 		return PTR_ERR(page);
page              813 kernel/events/uprobes.c 	copy_from_page(page, offset, insn, nbytes);
page              814 kernel/events/uprobes.c 	put_page(page);
page             1675 kernel/events/uprobes.c void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
page             1679 kernel/events/uprobes.c 	copy_to_page(page, vaddr, src, len);
page             1687 kernel/events/uprobes.c 	flush_dcache_page(page);
page             2018 kernel/events/uprobes.c 	struct page *page;
page             2038 kernel/events/uprobes.c 	result = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &page,
page             2043 kernel/events/uprobes.c 	copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
page             2044 kernel/events/uprobes.c 	put_page(page);
page              840 kernel/exit.c  	if (tsk->task_frag.page)
page              841 kernel/exit.c  		put_page(tsk->task_frag.page);
page              257 kernel/fork.c  	struct page *page = alloc_pages_node(node, THREADINFO_GFP,
page              260 kernel/fork.c  	if (likely(page)) {
page              261 kernel/fork.c  		tsk->stack = page_address(page);
page              392 kernel/fork.c  		struct page *first_page = virt_to_page(stack);
page              928 kernel/fork.c  	tsk->task_frag.page = NULL;
page              576 kernel/futex.c 	struct page *page, *tail;
page              613 kernel/futex.c 	err = get_user_pages_fast(address, 1, FOLL_WRITE, &page);
page              619 kernel/futex.c 		err = get_user_pages_fast(address, 1, 0, &page);
page              645 kernel/futex.c 	tail = page;
page              646 kernel/futex.c 	page = compound_head(page);
page              647 kernel/futex.c 	mapping = READ_ONCE(page->mapping);
page              672 kernel/futex.c 		lock_page(page);
page              673 kernel/futex.c 		shmem_swizzled = PageSwapCache(page) || page->mapping;
page              674 kernel/futex.c 		unlock_page(page);
page              675 kernel/futex.c 		put_page(page);
page              693 kernel/futex.c 	if (PageAnon(page)) {
page              723 kernel/futex.c 		if (READ_ONCE(page->mapping) != mapping) {
page              725 kernel/futex.c 			put_page(page);
page              733 kernel/futex.c 			put_page(page);
page              747 kernel/futex.c 	put_page(page);
page              278 kernel/kcov.c  	struct page *page;
page              296 kernel/kcov.c  			page = vmalloc_to_page(kcov->area + off);
page              297 kernel/kcov.c  			if (vm_insert_page(vma, vma->vm_start + off, page))
page              144 kernel/kexec_core.c static struct page *kimage_alloc_page(struct kimage *image,
page              299 kernel/kexec_core.c static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
page              301 kernel/kexec_core.c 	struct page *pages;
page              326 kernel/kexec_core.c static void kimage_free_pages(struct page *page)
page              330 kernel/kexec_core.c 	order = page_private(page);
page              333 kernel/kexec_core.c 	arch_kexec_pre_free_pages(page_address(page), count);
page              336 kernel/kexec_core.c 		ClearPageReserved(page + i);
page              337 kernel/kexec_core.c 	__free_pages(page, order);
page              342 kernel/kexec_core.c 	struct page *page, *next;
page              344 kernel/kexec_core.c 	list_for_each_entry_safe(page, next, list, lru) {
page              345 kernel/kexec_core.c 		list_del(&page->lru);
page              346 kernel/kexec_core.c 		kimage_free_pages(page);
page              350 kernel/kexec_core.c static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
page              367 kernel/kexec_core.c 	struct page *pages;
page              416 kernel/kexec_core.c static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
page              441 kernel/kexec_core.c 	struct page *pages;
page              483 kernel/kexec_core.c struct page *kimage_alloc_control_pages(struct kimage *image,
page              486 kernel/kexec_core.c 	struct page *pages = NULL;
page              502 kernel/kexec_core.c 	struct page *vmcoreinfo_page;
page              541 kernel/kexec_core.c 		struct page *page;
page              543 kernel/kexec_core.c 		page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
page              544 kernel/kexec_core.c 		if (!page)
page              547 kernel/kexec_core.c 		ind_page = page_address(page);
page              572 kernel/kexec_core.c static int kimage_add_page(struct kimage *image, unsigned long page)
page              576 kernel/kexec_core.c 	page &= PAGE_MASK;
page              577 kernel/kexec_core.c 	result = kimage_add_entry(image, page | IND_SOURCE);
page              607 kernel/kexec_core.c 	struct page *page;
page              609 kernel/kexec_core.c 	page = boot_pfn_to_page(entry >> PAGE_SHIFT);
page              610 kernel/kexec_core.c 	kimage_free_pages(page);
page              660 kernel/kexec_core.c 					unsigned long page)
page              669 kernel/kexec_core.c 			if (page == destination)
page              678 kernel/kexec_core.c static struct page *kimage_alloc_page(struct kimage *image,
page              700 kernel/kexec_core.c 	struct page *page;
page              707 kernel/kexec_core.c 	list_for_each_entry(page, &image->dest_pages, lru) {
page              708 kernel/kexec_core.c 		addr = page_to_boot_pfn(page) << PAGE_SHIFT;
page              710 kernel/kexec_core.c 			list_del(&page->lru);
page              711 kernel/kexec_core.c 			return page;
page              714 kernel/kexec_core.c 	page = NULL;
page              719 kernel/kexec_core.c 		page = kimage_alloc_pages(gfp_mask, 0);
page              720 kernel/kexec_core.c 		if (!page)
page              723 kernel/kexec_core.c 		if (page_to_boot_pfn(page) >
page              725 kernel/kexec_core.c 			list_add(&page->lru, &image->unusable_pages);
page              728 kernel/kexec_core.c 		addr = page_to_boot_pfn(page) << PAGE_SHIFT;
page              748 kernel/kexec_core.c 			struct page *old_page;
page              752 kernel/kexec_core.c 			copy_highpage(page, old_page);
page              765 kernel/kexec_core.c 			page = old_page;
page              769 kernel/kexec_core.c 		list_add(&page->lru, &image->dest_pages);
page              772 kernel/kexec_core.c 	return page;
page              798 kernel/kexec_core.c 		struct page *page;
page              802 kernel/kexec_core.c 		page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
page              803 kernel/kexec_core.c 		if (!page) {
page              807 kernel/kexec_core.c 		result = kimage_add_page(image, page_to_boot_pfn(page)
page              812 kernel/kexec_core.c 		ptr = kmap(page);
page              825 kernel/kexec_core.c 		kunmap(page);
page              866 kernel/kexec_core.c 		struct page *page;
page              870 kernel/kexec_core.c 		page = boot_pfn_to_page(maddr >> PAGE_SHIFT);
page              871 kernel/kexec_core.c 		if (!page) {
page              875 kernel/kexec_core.c 		arch_kexec_post_alloc_pages(page_address(page), 1, 0);
page              876 kernel/kexec_core.c 		ptr = kmap(page);
page              891 kernel/kexec_core.c 		kexec_flush_icache_page(page);
page              892 kernel/kexec_core.c 		kunmap(page);
page              893 kernel/kexec_core.c 		arch_kexec_pre_free_pages(page_address(page), 1);
page              112 kernel/kprobes.c void __weak free_insn_page(void *page)
page              114 kernel/kprobes.c 	module_memfree(page);
page              687 kernel/locking/locktorture.c static void __torture_print_stats(char *page,
page              705 kernel/locking/locktorture.c 	page += sprintf(page,
page               80 kernel/power/snapshot.c static int swsusp_page_is_free(struct page *);
page               81 kernel/power/snapshot.c static void swsusp_set_page_forbidden(struct page *);
page               82 kernel/power/snapshot.c static void swsusp_unset_page_forbidden(struct page *);
page              192 kernel/power/snapshot.c static struct page *alloc_image_page(gfp_t gfp_mask)
page              194 kernel/power/snapshot.c 	struct page *page;
page              196 kernel/power/snapshot.c 	page = alloc_page(gfp_mask);
page              197 kernel/power/snapshot.c 	if (page) {
page              198 kernel/power/snapshot.c 		swsusp_set_page_forbidden(page);
page              199 kernel/power/snapshot.c 		swsusp_set_page_free(page);
page              201 kernel/power/snapshot.c 	return page;
page              222 kernel/power/snapshot.c 	struct page *page;
page              226 kernel/power/snapshot.c 	page = virt_to_page(addr);
page              228 kernel/power/snapshot.c 	swsusp_unset_page_forbidden(page);
page              230 kernel/power/snapshot.c 		swsusp_unset_page_free(page);
page              232 kernel/power/snapshot.c 	__free_page(page);
page             1000 kernel/power/snapshot.c void swsusp_set_page_free(struct page *page)
page             1003 kernel/power/snapshot.c 		memory_bm_set_bit(free_pages_map, page_to_pfn(page));
page             1006 kernel/power/snapshot.c static int swsusp_page_is_free(struct page *page)
page             1009 kernel/power/snapshot.c 		memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
page             1012 kernel/power/snapshot.c void swsusp_unset_page_free(struct page *page)
page             1015 kernel/power/snapshot.c 		memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
page             1018 kernel/power/snapshot.c static void swsusp_set_page_forbidden(struct page *page)
page             1021 kernel/power/snapshot.c 		memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
page             1024 kernel/power/snapshot.c int swsusp_page_is_forbidden(struct page *page)
page             1027 kernel/power/snapshot.c 		memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
page             1030 kernel/power/snapshot.c static void swsusp_unset_page_forbidden(struct page *page)
page             1033 kernel/power/snapshot.c 		memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
page             1219 kernel/power/snapshot.c static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
page             1221 kernel/power/snapshot.c 	struct page *page;
page             1226 kernel/power/snapshot.c 	page = pfn_to_online_page(pfn);
page             1227 kernel/power/snapshot.c 	if (!page || page_zone(page) != zone)
page             1230 kernel/power/snapshot.c 	BUG_ON(!PageHighMem(page));
page             1232 kernel/power/snapshot.c 	if (swsusp_page_is_forbidden(page) ||  swsusp_page_is_free(page))
page             1235 kernel/power/snapshot.c 	if (PageReserved(page) || PageOffline(page))
page             1238 kernel/power/snapshot.c 	if (page_is_guard(page))
page             1241 kernel/power/snapshot.c 	return page;
page             1283 kernel/power/snapshot.c static struct page *saveable_page(struct zone *zone, unsigned long pfn)
page             1285 kernel/power/snapshot.c 	struct page *page;
page             1290 kernel/power/snapshot.c 	page = pfn_to_online_page(pfn);
page             1291 kernel/power/snapshot.c 	if (!page || page_zone(page) != zone)
page             1294 kernel/power/snapshot.c 	BUG_ON(PageHighMem(page));
page             1296 kernel/power/snapshot.c 	if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
page             1299 kernel/power/snapshot.c 	if (PageOffline(page))
page             1302 kernel/power/snapshot.c 	if (PageReserved(page)
page             1303 kernel/power/snapshot.c 	    && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
page             1306 kernel/power/snapshot.c 	if (page_is_guard(page))
page             1309 kernel/power/snapshot.c 	return page;
page             1354 kernel/power/snapshot.c static void safe_copy_page(void *dst, struct page *s_page)
page             1366 kernel/power/snapshot.c static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn)
page             1374 kernel/power/snapshot.c 	struct page *s_page, *d_page;
page             1491 kernel/power/snapshot.c 		struct page *page = pfn_to_page(fr_pfn);
page             1495 kernel/power/snapshot.c 		hibernate_restore_unprotect_page(page_address(page));
page             1496 kernel/power/snapshot.c 		__free_page(page);
page             1526 kernel/power/snapshot.c 		struct page *page;
page             1528 kernel/power/snapshot.c 		page = alloc_image_page(mask);
page             1529 kernel/power/snapshot.c 		if (!page)
page             1531 kernel/power/snapshot.c 		memory_bm_set_bit(&copy_bm, page_to_pfn(page));
page             1532 kernel/power/snapshot.c 		if (PageHighMem(page))
page             1628 kernel/power/snapshot.c 		struct page *page = pfn_to_page(pfn);
page             1630 kernel/power/snapshot.c 		if (PageHighMem(page)) {
page             1642 kernel/power/snapshot.c 		swsusp_unset_page_forbidden(page);
page             1643 kernel/power/snapshot.c 		swsusp_unset_page_free(page);
page             1644 kernel/power/snapshot.c 		__free_page(page);
page             1920 kernel/power/snapshot.c 		struct page *page;
page             1922 kernel/power/snapshot.c 		page = alloc_image_page(__GFP_HIGHMEM|__GFP_KSWAPD_RECLAIM);
page             1923 kernel/power/snapshot.c 		memory_bm_set_bit(bm, page_to_pfn(page));
page             1959 kernel/power/snapshot.c 			struct page *page;
page             1961 kernel/power/snapshot.c 			page = alloc_image_page(GFP_ATOMIC);
page             1962 kernel/power/snapshot.c 			if (!page)
page             1964 kernel/power/snapshot.c 			memory_bm_set_bit(copy_bm, page_to_pfn(page));
page             2119 kernel/power/snapshot.c 		struct page *page;
page             2121 kernel/power/snapshot.c 		page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
page             2122 kernel/power/snapshot.c 		if (PageHighMem(page)) {
page             2130 kernel/power/snapshot.c 			kaddr = kmap_atomic(page);
page             2135 kernel/power/snapshot.c 			handle->buffer = page_address(page);
page             2244 kernel/power/snapshot.c 	struct page *copy_page;	/* data is here now */
page             2245 kernel/power/snapshot.c 	struct page *orig_page;	/* data was here before the suspend */
page             2315 kernel/power/snapshot.c 		struct page *page;
page             2317 kernel/power/snapshot.c 		page = alloc_page(__GFP_HIGHMEM);
page             2318 kernel/power/snapshot.c 		if (!swsusp_page_is_free(page)) {
page             2320 kernel/power/snapshot.c 			memory_bm_set_bit(bm, page_to_pfn(page));
page             2324 kernel/power/snapshot.c 		swsusp_set_page_forbidden(page);
page             2325 kernel/power/snapshot.c 		swsusp_set_page_free(page);
page             2332 kernel/power/snapshot.c static struct page *last_highmem_page;
page             2352 kernel/power/snapshot.c static void *get_highmem_page_buffer(struct page *page,
page             2358 kernel/power/snapshot.c 	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
page             2363 kernel/power/snapshot.c 		last_highmem_page = page;
page             2375 kernel/power/snapshot.c 	pbe->orig_page = page;
page             2377 kernel/power/snapshot.c 		struct page *tmp;
page             2434 kernel/power/snapshot.c static inline void *get_highmem_page_buffer(struct page *page,
page             2541 kernel/power/snapshot.c 	struct page *page;
page             2547 kernel/power/snapshot.c 	page = pfn_to_page(pfn);
page             2548 kernel/power/snapshot.c 	if (PageHighMem(page))
page             2549 kernel/power/snapshot.c 		return get_highmem_page_buffer(page, ca);
page             2551 kernel/power/snapshot.c 	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
page             2556 kernel/power/snapshot.c 		return page_address(page);
page             2567 kernel/power/snapshot.c 	pbe->orig_address = page_address(page);
page             2691 kernel/power/snapshot.c static inline void swap_two_pages_data(struct page *p1, struct page *p2,
page              241 kernel/power/swap.c 	struct page *page = bio_first_page_all(bio);
page              250 kernel/power/swap.c 		put_page(page);
page              252 kernel/power/swap.c 		flush_icache_range((unsigned long)page_address(page),
page              253 kernel/power/swap.c 				   (unsigned long)page_address(page) + PAGE_SIZE);
page              266 kernel/power/swap.c 	struct page *page = virt_to_page(addr);
page              275 kernel/power/swap.c 	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
page              679 kernel/power/swap.c 	unsigned char *page = NULL;
page              692 kernel/power/swap.c 	page = (void *)__get_free_page(GFP_NOIO | __GFP_HIGH);
page              693 kernel/power/swap.c 	if (!page) {
page              836 kernel/power/swap.c 				memcpy(page, data[thr].cmp + off, PAGE_SIZE);
page              838 kernel/power/swap.c 				ret = swap_write_page(handle, page, &hb);
page              868 kernel/power/swap.c 	if (page) free_page((unsigned long)page);
page             1170 kernel/power/swap.c 	unsigned char **page = NULL;
page             1183 kernel/power/swap.c 	page = vmalloc(array_size(LZO_MAX_RD_PAGES, sizeof(*page)));
page             1184 kernel/power/swap.c 	if (!page) {
page             1260 kernel/power/swap.c 		page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
page             1265 kernel/power/swap.c 		if (!page[i]) {
page             1293 kernel/power/swap.c 			ret = swap_read_page(handle, page[ring], &hb);
page             1336 kernel/power/swap.c 			data[thr].cmp_len = *(size_t *)page[pg];
page             1359 kernel/power/swap.c 				       page[pg], PAGE_SIZE);
page             1451 kernel/power/swap.c 		free_page((unsigned long)page[i]);
page             1463 kernel/power/swap.c 	vfree(page);
page              336 kernel/profile.c 	struct page *page;
page              344 kernel/profile.c 			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[i]);
page              346 kernel/profile.c 			__free_page(page);
page              355 kernel/profile.c 	struct page *page;
page              363 kernel/profile.c 		page = __alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
page              364 kernel/profile.c 		if (!page) {
page              368 kernel/profile.c 		per_cpu(cpu_profile_hits, cpu)[i] = page_address(page);
page               44 kernel/relay.c 	struct page *page;
page               51 kernel/relay.c 	page = vmalloc_to_page(buf->start + (pgoff << PAGE_SHIFT));
page               52 kernel/relay.c 	if (!page)
page               54 kernel/relay.c 	get_page(page);
page               55 kernel/relay.c 	vmf->page = page;
page               71 kernel/relay.c static struct page **relay_alloc_page_array(unsigned int n_pages)
page               73 kernel/relay.c 	const size_t pa_size = n_pages * sizeof(struct page *);
page               82 kernel/relay.c static void relay_free_page_array(struct page **array)
page             1180 kernel/relay.c 	rbuf = (struct rchan_buf *)page_private(buf->page);
page             1214 kernel/relay.c 	struct page *pages[PIPE_DEF_BUFFERS];
page             1408 kernel/sched/fair.c bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
page             1416 kernel/sched/fair.c 	last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
page              325 kernel/trace/ring_buffer.c 	struct buffer_data_page *page;	/* Actual data page */
page              354 kernel/trace/ring_buffer.c 	free_page((unsigned long)bpage->page);
page              870 kernel/trace/ring_buffer.c 		struct buffer_page *page, struct list_head *list)
page              876 kernel/trace/ring_buffer.c 	if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
page              889 kernel/trace/ring_buffer.c static bool rb_is_reader_page(struct buffer_page *page)
page              891 kernel/trace/ring_buffer.c 	struct list_head *list = page->list.prev;
page              893 kernel/trace/ring_buffer.c 	return rb_list_head(list->next) != &page->list;
page             1010 kernel/trace/ring_buffer.c 	struct buffer_page *page;
page             1022 kernel/trace/ring_buffer.c 	page = head = cpu_buffer->head_page;
page             1031 kernel/trace/ring_buffer.c 			if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
page             1032 kernel/trace/ring_buffer.c 				cpu_buffer->head_page = page;
page             1033 kernel/trace/ring_buffer.c 				return page;
page             1035 kernel/trace/ring_buffer.c 			rb_inc_page(cpu_buffer, &page);
page             1036 kernel/trace/ring_buffer.c 		} while (page != head);
page             1116 kernel/trace/ring_buffer.c 		local_set(&next_page->page->commit, 0);
page             1226 kernel/trace/ring_buffer.c 		struct page *page;
page             1235 kernel/trace/ring_buffer.c 		page = alloc_pages_node(cpu_to_node(cpu), mflags, 0);
page             1236 kernel/trace/ring_buffer.c 		if (!page)
page             1238 kernel/trace/ring_buffer.c 		bpage->page = page_address(page);
page             1239 kernel/trace/ring_buffer.c 		rb_init_page(bpage->page);
page             1290 kernel/trace/ring_buffer.c 	struct page *page;
page             1317 kernel/trace/ring_buffer.c 	page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
page             1318 kernel/trace/ring_buffer.c 	if (!page)
page             1320 kernel/trace/ring_buffer.c 	bpage->page = page_address(page);
page             1321 kernel/trace/ring_buffer.c 	rb_init_page(bpage->page);
page             1907 kernel/trace/ring_buffer.c 	return bpage->page->data + index;
page             1925 kernel/trace/ring_buffer.c 	return local_read(&bpage->page->commit);
page             1963 kernel/trace/ring_buffer.c 	iter->read_stamp = iter->head_page->page->time_stamp;
page             2430 kernel/trace/ring_buffer.c 	if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
page             2482 kernel/trace/ring_buffer.c 		local_set(&cpu_buffer->commit_page->page->commit,
page             2488 kernel/trace/ring_buffer.c 				cpu_buffer->commit_page->page->time_stamp;
page             2495 kernel/trace/ring_buffer.c 		local_set(&cpu_buffer->commit_page->page->commit,
page             2498 kernel/trace/ring_buffer.c 			   local_read(&cpu_buffer->commit_page->page->commit) &
page             2570 kernel/trace/ring_buffer.c 	return cpu_buffer->commit_page->page == (void *)addr &&
page             2591 kernel/trace/ring_buffer.c 				cpu_buffer->commit_page->page->time_stamp;
page             2862 kernel/trace/ring_buffer.c 		tail_page->page->time_stamp = info->ts;
page             3023 kernel/trace/ring_buffer.c 	if (likely(bpage->page == (void *)addr)) {
page             3035 kernel/trace/ring_buffer.c 		if (bpage->page == (void *)addr) {
page             3369 kernel/trace/ring_buffer.c 		ret = bpage->page->time_stamp;
page             3558 kernel/trace/ring_buffer.c 		iter->read_stamp = iter->head_page->page->time_stamp;
page             3720 kernel/trace/ring_buffer.c 	local_set(&cpu_buffer->reader_page->page->commit, 0);
page             3798 kernel/trace/ring_buffer.c 		cpu_buffer->read_stamp = reader->page->time_stamp;
page             4359 kernel/trace/ring_buffer.c 	local_set(&cpu_buffer->head_page->page->commit, 0);
page             4370 kernel/trace/ring_buffer.c 	local_set(&cpu_buffer->reader_page->page->commit, 0);
page             4598 kernel/trace/ring_buffer.c 	struct page *page;
page             4618 kernel/trace/ring_buffer.c 	page = alloc_pages_node(cpu_to_node(cpu),
page             4620 kernel/trace/ring_buffer.c 	if (!page)
page             4623 kernel/trace/ring_buffer.c 	bpage = page_address(page);
page             4644 kernel/trace/ring_buffer.c 	struct page *page = virt_to_page(bpage);
page             4648 kernel/trace/ring_buffer.c 	if (page_ref_count(page) > 1)
page             4756 kernel/trace/ring_buffer.c 		struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
page             4814 kernel/trace/ring_buffer.c 		bpage = reader->page;
page             4815 kernel/trace/ring_buffer.c 		reader->page = *data_page;
page             2392 kernel/trace/trace.c 	struct page *page;
page             2401 kernel/trace/trace.c 		page = alloc_pages_node(cpu_to_node(cpu),
page             2403 kernel/trace/trace.c 		if (!page)
page             2406 kernel/trace/trace.c 		event = page_address(page);
page             6169 kernel/trace/trace.c 	struct page *pages_def[PIPE_DEF_BUFFERS];
page             7350 kernel/trace/trace.c 	void			*page;
page             7359 kernel/trace/trace.c 	ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
page             7413 kernel/trace/trace.c 	struct page *pages_def[PIPE_DEF_BUFFERS];
page             7447 kernel/trace/trace.c 		struct page *page;
page             7458 kernel/trace/trace.c 		ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
page             7459 kernel/trace/trace.c 		if (IS_ERR(ref->page)) {
page             7460 kernel/trace/trace.c 			ret = PTR_ERR(ref->page);
page             7461 kernel/trace/trace.c 			ref->page = NULL;
page             7467 kernel/trace/trace.c 		r = ring_buffer_read_page(ref->buffer, &ref->page,
page             7471 kernel/trace/trace.c 						   ref->page);
page             7476 kernel/trace/trace.c 		page = virt_to_page(ref->page);
page             7478 kernel/trace/trace.c 		spd.pages[i] = page;
page              852 kernel/trace/trace_uprobe.c 		struct page *p = alloc_pages_node(cpu_to_node(cpu),
page              156 lib/iov_iter.c static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
page              178 lib/iov_iter.c 		kaddr = kmap_atomic(page);
page              209 lib/iov_iter.c 	kaddr = kmap(page);
page              226 lib/iov_iter.c 	kunmap(page);
page              240 lib/iov_iter.c static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
page              262 lib/iov_iter.c 		kaddr = kmap_atomic(page);
page              293 lib/iov_iter.c 	kaddr = kmap(page);
page              310 lib/iov_iter.c 	kunmap(page);
page              352 lib/iov_iter.c 			pipe->bufs[idx].page,
page              367 lib/iov_iter.c static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
page              388 lib/iov_iter.c 		if (offset == off && buf->page == page) {
page              401 lib/iov_iter.c 	get_page(buf->page = page);
page              457 lib/iov_iter.c static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
page              459 lib/iov_iter.c 	char *from = kmap_atomic(page);
page              464 lib/iov_iter.c static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
page              466 lib/iov_iter.c 	char *to = kmap_atomic(page);
page              471 lib/iov_iter.c static void memzero_page(struct page *page, size_t offset, size_t len)
page              473 lib/iov_iter.c 	char *addr = kmap_atomic(page);
page              522 lib/iov_iter.c 		struct page *page = alloc_page(GFP_USER);
page              523 lib/iov_iter.c 		if (!page)
page              527 lib/iov_iter.c 		pipe->bufs[idx].page = page;
page              555 lib/iov_iter.c 		memcpy_to_page(pipe->bufs[idx].page, off, addr, chunk);
page              589 lib/iov_iter.c 		char *p = kmap_atomic(pipe->bufs[idx].page);
page              631 lib/iov_iter.c static unsigned long memcpy_mcsafe_to_page(struct page *page, size_t offset,
page              637 lib/iov_iter.c 	to = kmap_atomic(page);
page              661 lib/iov_iter.c 		rem = memcpy_mcsafe_to_page(pipe->bufs[idx].page, off, addr,
page              863 lib/iov_iter.c static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
page              865 lib/iov_iter.c 	struct page *head;
page              878 lib/iov_iter.c 	head = compound_head(page);
page              879 lib/iov_iter.c 	v += (page - head) << PAGE_SHIFT;
page              887 lib/iov_iter.c size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
page              890 lib/iov_iter.c 	if (unlikely(!page_copy_sane(page, offset, bytes)))
page              893 lib/iov_iter.c 		void *kaddr = kmap_atomic(page);
page              900 lib/iov_iter.c 		return copy_page_to_iter_iovec(page, offset, bytes, i);
page              902 lib/iov_iter.c 		return copy_page_to_iter_pipe(page, offset, bytes, i);
page              906 lib/iov_iter.c size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
page              909 lib/iov_iter.c 	if (unlikely(!page_copy_sane(page, offset, bytes)))
page              916 lib/iov_iter.c 		void *kaddr = kmap_atomic(page);
page              921 lib/iov_iter.c 		return copy_page_from_iter_iovec(page, offset, bytes, i);
page              940 lib/iov_iter.c 		memzero_page(pipe->bufs[idx].page, off, chunk);
page              963 lib/iov_iter.c size_t iov_iter_copy_from_user_atomic(struct page *page,
page              966 lib/iov_iter.c 	char *kaddr = kmap_atomic(page), *p = kaddr + offset;
page              967 lib/iov_iter.c 	if (unlikely(!page_copy_sane(page, offset, bytes))) {
page             1233 lib/iov_iter.c 				struct page **pages,
page             1245 lib/iov_iter.c 		get_page(*pages++ = pipe->bufs[idx].page);
page             1254 lib/iov_iter.c 		   struct page **pages, size_t maxsize, unsigned maxpages,
page             1276 lib/iov_iter.c 		   struct page **pages, size_t maxsize, unsigned maxpages,
page             1316 lib/iov_iter.c static struct page **get_pages_array(size_t n)
page             1318 lib/iov_iter.c 	return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
page             1322 lib/iov_iter.c 		   struct page ***pages, size_t maxsize,
page             1325 lib/iov_iter.c 	struct page **p;
page             1356 lib/iov_iter.c 		   struct page ***pages, size_t maxsize,
page             1359 lib/iov_iter.c 	struct page **p;
page              301 lib/kfifo.c    	struct page *page;
page              310 lib/kfifo.c    	page = virt_to_page(buf);
page              315 lib/kfifo.c    		struct page *npage;
page              320 lib/kfifo.c    		if (page_to_phys(page) != page_to_phys(npage) - l) {
page              321 lib/kfifo.c    			sg_set_page(sgl, page, l - off, off);
page              325 lib/kfifo.c    			page = npage;
page              330 lib/kfifo.c    	sg_set_page(sgl, page, len, off);
page              389 lib/scatterlist.c int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
page              464 lib/scatterlist.c int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
page              491 lib/scatterlist.c 	struct page *page;
page              515 lib/scatterlist.c 		page = alloc_pages(gfp, order);
page              516 lib/scatterlist.c 		if (!page) {
page              521 lib/scatterlist.c 		sg_set_page(sg, page, elem_len, 0);
page              563 lib/scatterlist.c 	struct page *page;
page              569 lib/scatterlist.c 		page = sg_page(sg);
page              570 lib/scatterlist.c 		if (page)
page              571 lib/scatterlist.c 			__free_pages(page, order);
page              774 lib/scatterlist.c 	miter->page = sg_page_iter_page(&miter->piter);
page              778 lib/scatterlist.c 		miter->addr = kmap_atomic(miter->page) + miter->__offset;
page              780 lib/scatterlist.c 		miter->addr = kmap(miter->page) + miter->__offset;
page              810 lib/scatterlist.c 		    !PageSlab(miter->page))
page              811 lib/scatterlist.c 			flush_kernel_dcache_page(miter->page);
page              817 lib/scatterlist.c 			kunmap(miter->page);
page              819 lib/scatterlist.c 		miter->page = NULL;
page              229 lib/stackdepot.c 	struct page *page = NULL;
page              267 lib/stackdepot.c 		page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER);
page              268 lib/stackdepot.c 		if (page)
page              269 lib/stackdepot.c 			prealloc = page_address(page);
page             6503 lib/test_bpf.c 	struct page *page;
page             6524 lib/test_bpf.c 		page = alloc_page(GFP_KERNEL);
page             6526 lib/test_bpf.c 		if (!page)
page             6529 lib/test_bpf.c 		ptr = kmap(page);
page             6533 lib/test_bpf.c 		kunmap(page);
page             6534 lib/test_bpf.c 		skb_add_rx_frag(skb, 0, page, 0, MAX_DATA, MAX_DATA);
page             6540 lib/test_bpf.c 	__free_page(page);
page             6807 lib/test_bpf.c 	struct page *page[2];
page             6811 lib/test_bpf.c 		page[i] = alloc_page(GFP_KERNEL);
page             6812 lib/test_bpf.c 		if (!page[i]) {
page             6834 lib/test_bpf.c 		skb_add_rx_frag(skb[i], 0, page[i], 0, 64, 64);
page             6853 lib/test_bpf.c 	__free_page(page[1]);
page             6857 lib/test_bpf.c 	__free_page(page[0]);
page              348 lib/test_kasan.c 	struct page *page;
page              358 lib/test_kasan.c 	page = virt_to_page(ptr);
page              360 lib/test_kasan.c 	kfree(page_address(page) + offset);
page               65 lib/test_meminit.c 	struct page *page;
page               69 lib/test_meminit.c 	page = alloc_pages(GFP_KERNEL, order);
page               70 lib/test_meminit.c 	buf = page_address(page);
page               72 lib/test_meminit.c 	__free_pages(page, order);
page               74 lib/test_meminit.c 	page = alloc_pages(GFP_KERNEL, order);
page               75 lib/test_meminit.c 	buf = page_address(page);
page               79 lib/test_meminit.c 	__free_pages(page, order);
page              156 mm/backing-dev.c 			   struct device_attribute *attr, char *page)	\
page              160 mm/backing-dev.c 	return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr);	\
page              206 mm/backing-dev.c 					  char *page)
page              210 mm/backing-dev.c 	return snprintf(page, PAGE_SIZE-1, "%d\n",
page               15 mm/balloon_compaction.c 				     struct page *page)
page               23 mm/balloon_compaction.c 	BUG_ON(!trylock_page(page));
page               24 mm/balloon_compaction.c 	balloon_page_insert(b_dev_info, page);
page               25 mm/balloon_compaction.c 	unlock_page(page);
page               43 mm/balloon_compaction.c 	struct page *page, *tmp;
page               48 mm/balloon_compaction.c 	list_for_each_entry_safe(page, tmp, pages, lru) {
page               49 mm/balloon_compaction.c 		list_del(&page->lru);
page               50 mm/balloon_compaction.c 		balloon_page_enqueue_one(b_dev_info, page);
page               79 mm/balloon_compaction.c 	struct page *page, *tmp;
page               84 mm/balloon_compaction.c 	list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
page               93 mm/balloon_compaction.c 		if (!trylock_page(page))
page               97 mm/balloon_compaction.c 		    PageIsolated(page)) {
page               99 mm/balloon_compaction.c 			unlock_page(page);
page              102 mm/balloon_compaction.c 		balloon_page_delete(page);
page              104 mm/balloon_compaction.c 		list_add(&page->lru, pages);
page              105 mm/balloon_compaction.c 		unlock_page(page);
page              124 mm/balloon_compaction.c struct page *balloon_page_alloc(void)
page              126 mm/balloon_compaction.c 	struct page *page = alloc_page(balloon_mapping_gfp_mask() |
page              129 mm/balloon_compaction.c 	return page;
page              147 mm/balloon_compaction.c 			  struct page *page)
page              152 mm/balloon_compaction.c 	balloon_page_enqueue_one(b_dev_info, page);
page              177 mm/balloon_compaction.c struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
page              200 mm/balloon_compaction.c 	return list_first_entry(&pages, struct page, lru);
page              206 mm/balloon_compaction.c bool balloon_page_isolate(struct page *page, isolate_mode_t mode)
page              209 mm/balloon_compaction.c 	struct balloon_dev_info *b_dev_info = balloon_page_device(page);
page              213 mm/balloon_compaction.c 	list_del(&page->lru);
page              220 mm/balloon_compaction.c void balloon_page_putback(struct page *page)
page              222 mm/balloon_compaction.c 	struct balloon_dev_info *b_dev_info = balloon_page_device(page);
page              226 mm/balloon_compaction.c 	list_add(&page->lru, &b_dev_info->pages);
page              234 mm/balloon_compaction.c 		struct page *newpage, struct page *page,
page              237 mm/balloon_compaction.c 	struct balloon_dev_info *balloon = balloon_page_device(page);
page              247 mm/balloon_compaction.c 	VM_BUG_ON_PAGE(!PageLocked(page), page);
page              250 mm/balloon_compaction.c 	return balloon->migratepage(balloon, newpage, page, mode);
page              176 mm/cleancache.c int __cleancache_get_page(struct page *page)
page              187 mm/cleancache.c 	VM_BUG_ON_PAGE(!PageLocked(page), page);
page              188 mm/cleancache.c 	pool_id = page->mapping->host->i_sb->cleancache_poolid;
page              192 mm/cleancache.c 	if (cleancache_get_key(page->mapping->host, &key) < 0)
page              195 mm/cleancache.c 	ret = cleancache_ops->get_page(pool_id, key, page->index, page);
page              215 mm/cleancache.c void __cleancache_put_page(struct page *page)
page              225 mm/cleancache.c 	VM_BUG_ON_PAGE(!PageLocked(page), page);
page              226 mm/cleancache.c 	pool_id = page->mapping->host->i_sb->cleancache_poolid;
page              228 mm/cleancache.c 		cleancache_get_key(page->mapping->host, &key) >= 0) {
page              229 mm/cleancache.c 		cleancache_ops->put_page(pool_id, key, page->index, page);
page              244 mm/cleancache.c 					struct page *page)
page              254 mm/cleancache.c 		VM_BUG_ON_PAGE(!PageLocked(page), page);
page              257 mm/cleancache.c 					key, page->index);
page              417 mm/cma.c       struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
page              425 mm/cma.c       	struct page *page = NULL;
page              468 mm/cma.c       			page = pfn_to_page(pfn);
page              482 mm/cma.c       	trace_cma_alloc(pfn, page, count, align);
page              489 mm/cma.c       	if (page) {
page              491 mm/cma.c       			page_kasan_tag_reset(page + i);
page              500 mm/cma.c       	pr_debug("%s(): returned %p\n", __func__, page);
page              501 mm/cma.c       	return page;
page              514 mm/cma.c       bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
page               20 mm/cma_debug.c 	struct page *p;
page              134 mm/cma_debug.c 	struct page *p;
page               55 mm/compaction.c 	struct page *page, *next;
page               58 mm/compaction.c 	list_for_each_entry_safe(page, next, freelist, lru) {
page               59 mm/compaction.c 		unsigned long pfn = page_to_pfn(page);
page               60 mm/compaction.c 		list_del(&page->lru);
page               61 mm/compaction.c 		__free_page(page);
page               72 mm/compaction.c 	struct page *page, *next;
page               75 mm/compaction.c 	list_for_each_entry_safe(page, next, list, lru) {
page               76 mm/compaction.c 		list_del(&page->lru);
page               78 mm/compaction.c 		order = page_private(page);
page               81 mm/compaction.c 		post_alloc_hook(page, order, __GFP_MOVABLE);
page               83 mm/compaction.c 			split_page(page, order);
page               86 mm/compaction.c 			list_add(&page->lru, &tmp_list);
page               87 mm/compaction.c 			page++;
page               96 mm/compaction.c int PageMovable(struct page *page)
page              100 mm/compaction.c 	VM_BUG_ON_PAGE(!PageLocked(page), page);
page              101 mm/compaction.c 	if (!__PageMovable(page))
page              104 mm/compaction.c 	mapping = page_mapping(page);
page              112 mm/compaction.c void __SetPageMovable(struct page *page, struct address_space *mapping)
page              114 mm/compaction.c 	VM_BUG_ON_PAGE(!PageLocked(page), page);
page              115 mm/compaction.c 	VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page);
page              116 mm/compaction.c 	page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE);
page              120 mm/compaction.c void __ClearPageMovable(struct page *page)
page              122 mm/compaction.c 	VM_BUG_ON_PAGE(!PageLocked(page), page);
page              123 mm/compaction.c 	VM_BUG_ON_PAGE(!PageMovable(page), page);
page              129 mm/compaction.c 	page->mapping = (void *)((unsigned long)page->mapping &
page              206 mm/compaction.c 					struct page *page)
page              211 mm/compaction.c 	return !get_pageblock_skip(page);
page              227 mm/compaction.c static bool pageblock_skip_persistent(struct page *page)
page              229 mm/compaction.c 	if (!PageCompound(page))
page              232 mm/compaction.c 	page = compound_head(page);
page              234 mm/compaction.c 	if (compound_order(page) >= pageblock_order)
page              244 mm/compaction.c 	struct page *page = pfn_to_online_page(pfn);
page              245 mm/compaction.c 	struct page *block_page;
page              246 mm/compaction.c 	struct page *end_page;
page              249 mm/compaction.c 	if (!page)
page              251 mm/compaction.c 	if (zone != page_zone(page))
page              253 mm/compaction.c 	if (pageblock_skip_persistent(page))
page              260 mm/compaction.c 	if (check_source && check_target && !get_pageblock_skip(page))
page              268 mm/compaction.c 	    get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
page              276 mm/compaction.c 		page = block_page;
page              294 mm/compaction.c 			if (check_source && PageLRU(page)) {
page              295 mm/compaction.c 				clear_pageblock_skip(page);
page              299 mm/compaction.c 			if (check_target && PageBuddy(page)) {
page              300 mm/compaction.c 				clear_pageblock_skip(page);
page              305 mm/compaction.c 		page += (1 << PAGE_ALLOC_COSTLY_ORDER);
page              307 mm/compaction.c 	} while (page <= end_page);
page              388 mm/compaction.c static bool test_and_set_skip(struct compact_control *cc, struct page *page,
page              400 mm/compaction.c 	skip = get_pageblock_skip(page);
page              402 mm/compaction.c 		set_pageblock_skip(page);
page              429 mm/compaction.c 			struct page *page, unsigned long pfn)
page              436 mm/compaction.c 	if (!page)
page              439 mm/compaction.c 	set_pageblock_skip(page);
page              447 mm/compaction.c 					struct page *page)
page              452 mm/compaction.c static inline bool pageblock_skip_persistent(struct page *page)
page              458 mm/compaction.c 			struct page *page, unsigned long pfn)
page              466 mm/compaction.c static bool test_and_set_skip(struct compact_control *cc, struct page *page,
page              543 mm/compaction.c 	struct page *cursor;
page              558 mm/compaction.c 		struct page *page = cursor;
page              580 mm/compaction.c 		if (PageCompound(page)) {
page              581 mm/compaction.c 			const unsigned int order = compound_order(page);
page              590 mm/compaction.c 		if (!PageBuddy(page))
page              605 mm/compaction.c 			if (!PageBuddy(page))
page              610 mm/compaction.c 		order = page_order(page);
page              611 mm/compaction.c 		isolated = __isolate_free_page(page, order);
page              614 mm/compaction.c 		set_page_private(page, order);
page              618 mm/compaction.c 		list_add_tail(&page->lru, freelist);
page              790 mm/compaction.c 	struct page *page = NULL, *valid_page = NULL;
page              860 mm/compaction.c 		page = pfn_to_page(low_pfn);
page              869 mm/compaction.c 			if (!cc->ignore_skip_hint && get_pageblock_skip(page)) {
page              873 mm/compaction.c 			valid_page = page;
page              882 mm/compaction.c 		if (PageBuddy(page)) {
page              883 mm/compaction.c 			unsigned long freepage_order = page_order_unsafe(page);
page              902 mm/compaction.c 		if (PageCompound(page)) {
page              903 mm/compaction.c 			const unsigned int order = compound_order(page);
page              915 mm/compaction.c 		if (!PageLRU(page)) {
page              920 mm/compaction.c 			if (unlikely(__PageMovable(page)) &&
page              921 mm/compaction.c 					!PageIsolated(page)) {
page              928 mm/compaction.c 				if (!isolate_movable_page(page, isolate_mode))
page              940 mm/compaction.c 		if (!page_mapping(page) &&
page              941 mm/compaction.c 		    page_count(page) > page_mapcount(page))
page              948 mm/compaction.c 		if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page))
page              959 mm/compaction.c 				if (test_and_set_skip(cc, page, low_pfn))
page              964 mm/compaction.c 			if (!PageLRU(page))
page              972 mm/compaction.c 			if (unlikely(PageCompound(page))) {
page              973 mm/compaction.c 				low_pfn += compound_nr(page) - 1;
page              978 mm/compaction.c 		lruvec = mem_cgroup_page_lruvec(page, pgdat);
page              981 mm/compaction.c 		if (__isolate_lru_page(page, isolate_mode) != 0)
page              984 mm/compaction.c 		VM_BUG_ON_PAGE(PageCompound(page), page);
page              987 mm/compaction.c 		del_page_from_lru_list(page, lruvec, page_lru(page));
page              988 mm/compaction.c 		inc_node_page_state(page,
page              989 mm/compaction.c 				NR_ISOLATED_ANON + page_is_file_cache(page));
page              992 mm/compaction.c 		list_add(&page->lru, &cc->migratepages);
page             1124 mm/compaction.c 							struct page *page)
page             1128 mm/compaction.c 	if (pageblock_skip_persistent(page))
page             1134 mm/compaction.c 	block_mt = get_pageblock_migratetype(page);
page             1144 mm/compaction.c 							struct page *page)
page             1147 mm/compaction.c 	if (PageBuddy(page)) {
page             1153 mm/compaction.c 		if (page_order_unsafe(page) >= pageblock_order)
page             1161 mm/compaction.c 	if (is_migrate_movable(get_pageblock_migratetype(page)))
page             1192 mm/compaction.c move_freelist_head(struct list_head *freelist, struct page *freepage)
page             1210 mm/compaction.c move_freelist_tail(struct list_head *freelist, struct page *freepage)
page             1225 mm/compaction.c 	struct page *page = pfn_to_page(pfn);
page             1253 mm/compaction.c 		set_pageblock_skip(page);
page             1282 mm/compaction.c 	struct page *page = NULL;
page             1317 mm/compaction.c 	     !page && order >= 0;
page             1321 mm/compaction.c 		struct page *freepage;
page             1343 mm/compaction.c 				page = freepage;
page             1359 mm/compaction.c 		if (!page && high_pfn) {
page             1360 mm/compaction.c 			page = pfn_to_page(high_pfn);
page             1363 mm/compaction.c 			freepage = page;
page             1370 mm/compaction.c 		if (page) {
page             1371 mm/compaction.c 			if (__isolate_free_page(page, order)) {
page             1372 mm/compaction.c 				set_page_private(page, order);
page             1375 mm/compaction.c 				list_add_tail(&page->lru, &cc->freepages);
page             1380 mm/compaction.c 				page = NULL;
page             1394 mm/compaction.c 	if (!page) {
page             1403 mm/compaction.c 				page = pfn_to_page(highest);
page             1407 mm/compaction.c 					page = pfn_to_page(min_pfn);
page             1420 mm/compaction.c 	if (!page)
page             1423 mm/compaction.c 	low_pfn = page_to_pfn(page);
page             1435 mm/compaction.c 	struct page *page;
page             1484 mm/compaction.c 		page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
page             1486 mm/compaction.c 		if (!page)
page             1490 mm/compaction.c 		if (!suitable_migration_target(cc, page))
page             1494 mm/compaction.c 		if (!isolation_suitable(cc, page))
page             1503 mm/compaction.c 			update_pageblock_skip(cc, page, block_start_pfn);
page             1549 mm/compaction.c static struct page *compaction_alloc(struct page *migratepage,
page             1553 mm/compaction.c 	struct page *freepage;
page             1562 mm/compaction.c 	freepage = list_entry(cc->freepages.next, struct page, lru);
page             1574 mm/compaction.c static void compaction_free(struct page *page, unsigned long data)
page             1578 mm/compaction.c 	list_add(&page->lru, &cc->freepages);
page             1679 mm/compaction.c 		struct page *freepage;
page             1746 mm/compaction.c 	struct page *page;
page             1790 mm/compaction.c 		page = pageblock_pfn_to_page(block_start_pfn,
page             1792 mm/compaction.c 		if (!page)
page             1803 mm/compaction.c 		    !fast_find_block && !isolation_suitable(cc, page))
page             1814 mm/compaction.c 		if (!suitable_migration_source(cc, page)) {
page             2253 mm/compaction.c 		if (capc && capc->page) {
page             2291 mm/compaction.c 		struct page **capture)
page             2310 mm/compaction.c 		.page = NULL,
page             2321 mm/compaction.c 	*capture = capc.page;
page             2341 mm/compaction.c 		enum compact_priority prio, struct page **capture)
page               45 mm/debug.c     void __dump_page(struct page *page, const char *reason)
page               48 mm/debug.c     	bool page_poisoned = PagePoisoned(page);
page               58 mm/debug.c     		pr_warn("page:%px is uninitialized and poisoned", page);
page               62 mm/debug.c     	mapping = page_mapping(page);
page               69 mm/debug.c     	mapcount = PageSlab(page) ? 0 : page_mapcount(page);
page               71 mm/debug.c     	if (PageCompound(page))
page               74 mm/debug.c     			page, page_ref_count(page), mapcount,
page               75 mm/debug.c     			page->mapping, page_to_pgoff(page),
page               76 mm/debug.c     			compound_mapcount(page));
page               79 mm/debug.c     			page, page_ref_count(page), mapcount,
page               80 mm/debug.c     			page->mapping, page_to_pgoff(page));
page               81 mm/debug.c     	if (PageKsm(page))
page               83 mm/debug.c     	else if (PageAnon(page))
page               95 mm/debug.c     	pr_warn("%sflags: %#lx(%pGp)\n", type, page->flags, &page->flags);
page               99 mm/debug.c     			sizeof(unsigned long), page,
page              100 mm/debug.c     			sizeof(struct page), false);
page              106 mm/debug.c     	if (!page_poisoned && page->mem_cgroup)
page              107 mm/debug.c     		pr_warn("page->mem_cgroup:%px\n", page->mem_cgroup);
page              111 mm/debug.c     void dump_page(struct page *page, const char *reason)
page              113 mm/debug.c     	__dump_page(page, reason);
page              114 mm/debug.c     	dump_page_owner(page);
page              239 mm/debug.c     void page_init_poison(struct page *page, size_t size)
page              242 mm/debug.c     		memset(page, PAGE_POISON_PATTERN, size);
page                8 mm/debug_page_ref.c void __page_ref_set(struct page *page, int v)
page               10 mm/debug_page_ref.c 	trace_page_ref_set(page, v);
page               15 mm/debug_page_ref.c void __page_ref_mod(struct page *page, int v)
page               17 mm/debug_page_ref.c 	trace_page_ref_mod(page, v);
page               22 mm/debug_page_ref.c void __page_ref_mod_and_test(struct page *page, int v, int ret)
page               24 mm/debug_page_ref.c 	trace_page_ref_mod_and_test(page, v, ret);
page               29 mm/debug_page_ref.c void __page_ref_mod_and_return(struct page *page, int v, int ret)
page               31 mm/debug_page_ref.c 	trace_page_ref_mod_and_return(page, v, ret);
page               36 mm/debug_page_ref.c void __page_ref_mod_unless(struct page *page, int v, int u)
page               38 mm/debug_page_ref.c 	trace_page_ref_mod_unless(page, v, u);
page               43 mm/debug_page_ref.c void __page_ref_freeze(struct page *page, int v, int ret)
page               45 mm/debug_page_ref.c 	trace_page_ref_freeze(page, v, ret);
page               50 mm/debug_page_ref.c void __page_ref_unfreeze(struct page *page, int v)
page               52 mm/debug_page_ref.c 	trace_page_ref_unfreeze(page, v);
page               70 mm/dmapool.c   	struct dma_page *page;
page               86 mm/dmapool.c   		list_for_each_entry(page, &pool->page_list, page_list) {
page               88 mm/dmapool.c   			blocks += page->in_use;
page              205 mm/dmapool.c   static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
page              216 mm/dmapool.c   		*(int *)(page->vaddr + offset) = next;
page              223 mm/dmapool.c   	struct dma_page *page;
page              225 mm/dmapool.c   	page = kmalloc(sizeof(*page), mem_flags);
page              226 mm/dmapool.c   	if (!page)
page              228 mm/dmapool.c   	page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
page              229 mm/dmapool.c   					 &page->dma, mem_flags);
page              230 mm/dmapool.c   	if (page->vaddr) {
page              232 mm/dmapool.c   		memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
page              234 mm/dmapool.c   		pool_initialise_page(pool, page);
page              235 mm/dmapool.c   		page->in_use = 0;
page              236 mm/dmapool.c   		page->offset = 0;
page              238 mm/dmapool.c   		kfree(page);
page              239 mm/dmapool.c   		page = NULL;
page              241 mm/dmapool.c   	return page;
page              244 mm/dmapool.c   static inline bool is_page_busy(struct dma_page *page)
page              246 mm/dmapool.c   	return page->in_use != 0;
page              249 mm/dmapool.c   static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
page              251 mm/dmapool.c   	dma_addr_t dma = page->dma;
page              254 mm/dmapool.c   	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
page              256 mm/dmapool.c   	dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
page              257 mm/dmapool.c   	list_del(&page->page_list);
page              258 mm/dmapool.c   	kfree(page);
page              287 mm/dmapool.c   		struct dma_page *page;
page              288 mm/dmapool.c   		page = list_entry(pool->page_list.next,
page              290 mm/dmapool.c   		if (is_page_busy(page)) {
page              294 mm/dmapool.c   					pool->name, page->vaddr);
page              297 mm/dmapool.c   				       pool->name, page->vaddr);
page              299 mm/dmapool.c   			list_del(&page->page_list);
page              300 mm/dmapool.c   			kfree(page);
page              302 mm/dmapool.c   			pool_free_page(pool, page);
page              323 mm/dmapool.c   	struct dma_page *page;
page              330 mm/dmapool.c   	list_for_each_entry(page, &pool->page_list, page_list) {
page              331 mm/dmapool.c   		if (page->offset < pool->allocation)
page              338 mm/dmapool.c   	page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
page              339 mm/dmapool.c   	if (!page)
page              344 mm/dmapool.c   	list_add(&page->page_list, &pool->page_list);
page              346 mm/dmapool.c   	page->in_use++;
page              347 mm/dmapool.c   	offset = page->offset;
page              348 mm/dmapool.c   	page->offset = *(int *)(page->vaddr + offset);
page              349 mm/dmapool.c   	retval = offset + page->vaddr;
page              350 mm/dmapool.c   	*handle = offset + page->dma;
page              356 mm/dmapool.c   		for (i = sizeof(page->offset); i < pool->size; i++) {
page              390 mm/dmapool.c   	struct dma_page *page;
page              392 mm/dmapool.c   	list_for_each_entry(page, &pool->page_list, page_list) {
page              393 mm/dmapool.c   		if (dma < page->dma)
page              395 mm/dmapool.c   		if ((dma - page->dma) < pool->allocation)
page              396 mm/dmapool.c   			return page;
page              412 mm/dmapool.c   	struct dma_page *page;
page              417 mm/dmapool.c   	page = pool_find_page(pool, dma);
page              418 mm/dmapool.c   	if (!page) {
page              430 mm/dmapool.c   	offset = vaddr - page->vaddr;
page              434 mm/dmapool.c   	if ((dma - page->dma) != offset) {
page              446 mm/dmapool.c   		unsigned int chain = page->offset;
page              449 mm/dmapool.c   				chain = *(int *)(page->vaddr + chain);
page              465 mm/dmapool.c   	page->in_use--;
page              466 mm/dmapool.c   	*(int *)vaddr = page->offset;
page              467 mm/dmapool.c   	page->offset = offset;
page              120 mm/filemap.c   				   struct page *page, void *shadow)
page              122 mm/filemap.c   	XA_STATE(xas, &mapping->i_pages, page->index);
page              128 mm/filemap.c   	if (!PageHuge(page)) {
page              129 mm/filemap.c   		xas_set_order(&xas, page->index, compound_order(page));
page              130 mm/filemap.c   		nr = compound_nr(page);
page              133 mm/filemap.c   	VM_BUG_ON_PAGE(!PageLocked(page), page);
page              134 mm/filemap.c   	VM_BUG_ON_PAGE(PageTail(page), page);
page              135 mm/filemap.c   	VM_BUG_ON_PAGE(nr != 1 && shadow, page);
page              140 mm/filemap.c   	page->mapping = NULL;
page              157 mm/filemap.c   				      struct page *page)
page              166 mm/filemap.c   	if (PageUptodate(page) && PageMappedToDisk(page))
page              167 mm/filemap.c   		cleancache_put_page(page);
page              169 mm/filemap.c   		cleancache_invalidate_page(mapping, page);
page              171 mm/filemap.c   	VM_BUG_ON_PAGE(PageTail(page), page);
page              172 mm/filemap.c   	VM_BUG_ON_PAGE(page_mapped(page), page);
page              173 mm/filemap.c   	if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(page_mapped(page))) {
page              177 mm/filemap.c   			 current->comm, page_to_pfn(page));
page              178 mm/filemap.c   		dump_page(page, "still mapped when deleted");
page              182 mm/filemap.c   		mapcount = page_mapcount(page);
page              184 mm/filemap.c   		    page_count(page) >= mapcount + 2) {
page              191 mm/filemap.c   			page_mapcount_reset(page);
page              192 mm/filemap.c   			page_ref_sub(page, mapcount);
page              197 mm/filemap.c   	if (PageHuge(page))
page              200 mm/filemap.c   	nr = hpage_nr_pages(page);
page              202 mm/filemap.c   	__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
page              203 mm/filemap.c   	if (PageSwapBacked(page)) {
page              204 mm/filemap.c   		__mod_node_page_state(page_pgdat(page), NR_SHMEM, -nr);
page              205 mm/filemap.c   		if (PageTransHuge(page))
page              206 mm/filemap.c   			__dec_node_page_state(page, NR_SHMEM_THPS);
page              207 mm/filemap.c   	} else if (PageTransHuge(page)) {
page              208 mm/filemap.c   		__dec_node_page_state(page, NR_FILE_THPS);
page              222 mm/filemap.c   	if (WARN_ON_ONCE(PageDirty(page)))
page              223 mm/filemap.c   		account_page_cleaned(page, mapping, inode_to_wb(mapping->host));
page              231 mm/filemap.c   void __delete_from_page_cache(struct page *page, void *shadow)
page              233 mm/filemap.c   	struct address_space *mapping = page->mapping;
page              235 mm/filemap.c   	trace_mm_filemap_delete_from_page_cache(page);
page              237 mm/filemap.c   	unaccount_page_cache_page(mapping, page);
page              238 mm/filemap.c   	page_cache_delete(mapping, page, shadow);
page              242 mm/filemap.c   				struct page *page)
page              244 mm/filemap.c   	void (*freepage)(struct page *);
page              248 mm/filemap.c   		freepage(page);
page              250 mm/filemap.c   	if (PageTransHuge(page) && !PageHuge(page)) {
page              251 mm/filemap.c   		page_ref_sub(page, HPAGE_PMD_NR);
page              252 mm/filemap.c   		VM_BUG_ON_PAGE(page_count(page) <= 0, page);
page              254 mm/filemap.c   		put_page(page);
page              266 mm/filemap.c   void delete_from_page_cache(struct page *page)
page              268 mm/filemap.c   	struct address_space *mapping = page_mapping(page);
page              271 mm/filemap.c   	BUG_ON(!PageLocked(page));
page              273 mm/filemap.c   	__delete_from_page_cache(page, NULL);
page              276 mm/filemap.c   	page_cache_free_page(mapping, page);
page              300 mm/filemap.c   	struct page *page;
page              303 mm/filemap.c   	xas_for_each(&xas, page, ULONG_MAX) {
page              308 mm/filemap.c   		if (xa_is_value(page))
page              317 mm/filemap.c   		if (page != pvec->pages[i]) {
page              318 mm/filemap.c   			VM_BUG_ON_PAGE(page->index > pvec->pages[i]->index,
page              319 mm/filemap.c   					page);
page              323 mm/filemap.c   		WARN_ON_ONCE(!PageLocked(page));
page              325 mm/filemap.c   		if (page->index == xas.xa_index)
page              326 mm/filemap.c   			page->mapping = NULL;
page              334 mm/filemap.c   		if (page->index + compound_nr(page) - 1 == xas.xa_index)
page              475 mm/filemap.c   	struct page *page;
page              484 mm/filemap.c   		page = xas_find(&xas, max);
page              485 mm/filemap.c   		if (xas_retry(&xas, page))
page              488 mm/filemap.c   		if (xa_is_value(page))
page              499 mm/filemap.c   	return page != NULL;
page              524 mm/filemap.c   			struct page *page = pvec.pages[i];
page              526 mm/filemap.c   			wait_on_page_writeback(page);
page              527 mm/filemap.c   			ClearPageError(page);
page              811 mm/filemap.c   int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
page              814 mm/filemap.c   	void (*freepage)(struct page *) = mapping->a_ops->freepage;
page              850 mm/filemap.c   static int __add_to_page_cache_locked(struct page *page,
page              856 mm/filemap.c   	int huge = PageHuge(page);
page              861 mm/filemap.c   	VM_BUG_ON_PAGE(!PageLocked(page), page);
page              862 mm/filemap.c   	VM_BUG_ON_PAGE(PageSwapBacked(page), page);
page              866 mm/filemap.c   		error = mem_cgroup_try_charge(page, current->mm,
page              872 mm/filemap.c   	get_page(page);
page              873 mm/filemap.c   	page->mapping = mapping;
page              874 mm/filemap.c   	page->index = offset;
page              881 mm/filemap.c   		xas_store(&xas, page);
page              894 mm/filemap.c   			__inc_node_page_state(page, NR_FILE_PAGES);
page              903 mm/filemap.c   		mem_cgroup_commit_charge(page, memcg, false, false);
page              904 mm/filemap.c   	trace_mm_filemap_add_to_page_cache(page);
page              907 mm/filemap.c   	page->mapping = NULL;
page              910 mm/filemap.c   		mem_cgroup_cancel_charge(page, memcg, false);
page              911 mm/filemap.c   	put_page(page);
page              928 mm/filemap.c   int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
page              931 mm/filemap.c   	return __add_to_page_cache_locked(page, mapping, offset,
page              936 mm/filemap.c   int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
page              942 mm/filemap.c   	__SetPageLocked(page);
page              943 mm/filemap.c   	ret = __add_to_page_cache_locked(page, mapping, offset,
page              946 mm/filemap.c   		__ClearPageLocked(page);
page              956 mm/filemap.c   		WARN_ON_ONCE(PageActive(page));
page              958 mm/filemap.c   			workingset_refault(page, shadow);
page              959 mm/filemap.c   		lru_cache_add(page);
page              966 mm/filemap.c   struct page *__page_cache_alloc(gfp_t gfp)
page              969 mm/filemap.c   	struct page *page;
page              976 mm/filemap.c   			page = __alloc_pages_node(n, gfp, 0);
page              977 mm/filemap.c   		} while (!page && read_mems_allowed_retry(cpuset_mems_cookie));
page              979 mm/filemap.c   		return page;
page             1000 mm/filemap.c   static wait_queue_head_t *page_waitqueue(struct page *page)
page             1002 mm/filemap.c   	return &page_wait_table[hash_ptr(page, PAGE_WAIT_TABLE_BITS)];
page             1017 mm/filemap.c   	struct page *page;
page             1023 mm/filemap.c   	struct page *page;
page             1034 mm/filemap.c   	if (wait_page->page != key->page)
page             1049 mm/filemap.c   	if (test_bit(key->bit_nr, &key->page->flags))
page             1055 mm/filemap.c   static void wake_up_page_bit(struct page *page, int bit_nr)
page             1057 mm/filemap.c   	wait_queue_head_t *q = page_waitqueue(page);
page             1062 mm/filemap.c   	key.page = page;
page             1097 mm/filemap.c   		ClearPageWaiters(page);
page             1109 mm/filemap.c   static void wake_up_page(struct page *page, int bit)
page             1111 mm/filemap.c   	if (!PageWaiters(page))
page             1113 mm/filemap.c   	wake_up_page_bit(page, bit);
page             1132 mm/filemap.c   	struct page *page, int bit_nr, int state, enum behavior behavior)
page             1143 mm/filemap.c   	    !PageUptodate(page) && PageWorkingset(page)) {
page             1144 mm/filemap.c   		if (!PageSwapBacked(page)) {
page             1155 mm/filemap.c   	wait_page.page = page;
page             1163 mm/filemap.c   			SetPageWaiters(page);
page             1170 mm/filemap.c   		bit_is_set = test_bit(bit_nr, &page->flags);
page             1172 mm/filemap.c   			put_page(page);
page             1178 mm/filemap.c   			if (!test_and_set_bit_lock(bit_nr, &page->flags))
page             1181 mm/filemap.c   			if (!test_bit(bit_nr, &page->flags))
page             1221 mm/filemap.c   void wait_on_page_bit(struct page *page, int bit_nr)
page             1223 mm/filemap.c   	wait_queue_head_t *q = page_waitqueue(page);
page             1224 mm/filemap.c   	wait_on_page_bit_common(q, page, bit_nr, TASK_UNINTERRUPTIBLE, SHARED);
page             1228 mm/filemap.c   int wait_on_page_bit_killable(struct page *page, int bit_nr)
page             1230 mm/filemap.c   	wait_queue_head_t *q = page_waitqueue(page);
page             1231 mm/filemap.c   	return wait_on_page_bit_common(q, page, bit_nr, TASK_KILLABLE, SHARED);
page             1245 mm/filemap.c   void put_and_wait_on_page_locked(struct page *page)
page             1249 mm/filemap.c   	page = compound_head(page);
page             1250 mm/filemap.c   	q = page_waitqueue(page);
page             1251 mm/filemap.c   	wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE, DROP);
page             1261 mm/filemap.c   void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter)
page             1263 mm/filemap.c   	wait_queue_head_t *q = page_waitqueue(page);
page             1268 mm/filemap.c   	SetPageWaiters(page);
page             1311 mm/filemap.c   void unlock_page(struct page *page)
page             1314 mm/filemap.c   	page = compound_head(page);
page             1315 mm/filemap.c   	VM_BUG_ON_PAGE(!PageLocked(page), page);
page             1316 mm/filemap.c   	if (clear_bit_unlock_is_negative_byte(PG_locked, &page->flags))
page             1317 mm/filemap.c   		wake_up_page_bit(page, PG_locked);
page             1325 mm/filemap.c   void end_page_writeback(struct page *page)
page             1334 mm/filemap.c   	if (PageReclaim(page)) {
page             1335 mm/filemap.c   		ClearPageReclaim(page);
page             1336 mm/filemap.c   		rotate_reclaimable_page(page);
page             1339 mm/filemap.c   	if (!test_clear_page_writeback(page))
page             1343 mm/filemap.c   	wake_up_page(page, PG_writeback);
page             1351 mm/filemap.c   void page_endio(struct page *page, bool is_write, int err)
page             1355 mm/filemap.c   			SetPageUptodate(page);
page             1357 mm/filemap.c   			ClearPageUptodate(page);
page             1358 mm/filemap.c   			SetPageError(page);
page             1360 mm/filemap.c   		unlock_page(page);
page             1365 mm/filemap.c   			SetPageError(page);
page             1366 mm/filemap.c   			mapping = page_mapping(page);
page             1370 mm/filemap.c   		end_page_writeback(page);
page             1379 mm/filemap.c   void __lock_page(struct page *__page)
page             1381 mm/filemap.c   	struct page *page = compound_head(__page);
page             1382 mm/filemap.c   	wait_queue_head_t *q = page_waitqueue(page);
page             1383 mm/filemap.c   	wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE,
page             1388 mm/filemap.c   int __lock_page_killable(struct page *__page)
page             1390 mm/filemap.c   	struct page *page = compound_head(__page);
page             1391 mm/filemap.c   	wait_queue_head_t *q = page_waitqueue(page);
page             1392 mm/filemap.c   	return wait_on_page_bit_common(q, page, PG_locked, TASK_KILLABLE,
page             1408 mm/filemap.c   int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
page             1421 mm/filemap.c   			wait_on_page_locked_killable(page);
page             1423 mm/filemap.c   			wait_on_page_locked(page);
page             1429 mm/filemap.c   			ret = __lock_page_killable(page);
page             1435 mm/filemap.c   			__lock_page(page);
page             1525 mm/filemap.c   struct page *find_get_entry(struct address_space *mapping, pgoff_t offset)
page             1528 mm/filemap.c   	struct page *page;
page             1533 mm/filemap.c   	page = xas_load(&xas);
page             1534 mm/filemap.c   	if (xas_retry(&xas, page))
page             1540 mm/filemap.c   	if (!page || xa_is_value(page))
page             1543 mm/filemap.c   	if (!page_cache_get_speculative(page))
page             1551 mm/filemap.c   	if (unlikely(page != xas_reload(&xas))) {
page             1552 mm/filemap.c   		put_page(page);
page             1555 mm/filemap.c   	page = find_subpage(page, offset);
page             1559 mm/filemap.c   	return page;
page             1579 mm/filemap.c   struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset)
page             1581 mm/filemap.c   	struct page *page;
page             1584 mm/filemap.c   	page = find_get_entry(mapping, offset);
page             1585 mm/filemap.c   	if (page && !xa_is_value(page)) {
page             1586 mm/filemap.c   		lock_page(page);
page             1588 mm/filemap.c   		if (unlikely(page_mapping(page) != mapping)) {
page             1589 mm/filemap.c   			unlock_page(page);
page             1590 mm/filemap.c   			put_page(page);
page             1593 mm/filemap.c   		VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page);
page             1595 mm/filemap.c   	return page;
page             1629 mm/filemap.c   struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
page             1632 mm/filemap.c   	struct page *page;
page             1635 mm/filemap.c   	page = find_get_entry(mapping, offset);
page             1636 mm/filemap.c   	if (xa_is_value(page))
page             1637 mm/filemap.c   		page = NULL;
page             1638 mm/filemap.c   	if (!page)
page             1643 mm/filemap.c   			if (!trylock_page(page)) {
page             1644 mm/filemap.c   				put_page(page);
page             1648 mm/filemap.c   			lock_page(page);
page             1652 mm/filemap.c   		if (unlikely(compound_head(page)->mapping != mapping)) {
page             1653 mm/filemap.c   			unlock_page(page);
page             1654 mm/filemap.c   			put_page(page);
page             1657 mm/filemap.c   		VM_BUG_ON_PAGE(page->index != offset, page);
page             1661 mm/filemap.c   		mark_page_accessed(page);
page             1664 mm/filemap.c   	if (!page && (fgp_flags & FGP_CREAT)) {
page             1671 mm/filemap.c   		page = __page_cache_alloc(gfp_mask);
page             1672 mm/filemap.c   		if (!page)
page             1680 mm/filemap.c   			__SetPageReferenced(page);
page             1682 mm/filemap.c   		err = add_to_page_cache_lru(page, mapping, offset, gfp_mask);
page             1684 mm/filemap.c   			put_page(page);
page             1685 mm/filemap.c   			page = NULL;
page             1694 mm/filemap.c   		if (page && (fgp_flags & FGP_FOR_MMAP))
page             1695 mm/filemap.c   			unlock_page(page);
page             1698 mm/filemap.c   	return page;
page             1726 mm/filemap.c   			  struct page **entries, pgoff_t *indices)
page             1729 mm/filemap.c   	struct page *page;
page             1736 mm/filemap.c   	xas_for_each(&xas, page, ULONG_MAX) {
page             1737 mm/filemap.c   		if (xas_retry(&xas, page))
page             1744 mm/filemap.c   		if (xa_is_value(page))
page             1747 mm/filemap.c   		if (!page_cache_get_speculative(page))
page             1751 mm/filemap.c   		if (unlikely(page != xas_reload(&xas)))
page             1753 mm/filemap.c   		page = find_subpage(page, xas.xa_index);
page             1757 mm/filemap.c   		entries[ret] = page;
page             1762 mm/filemap.c   		put_page(page);
page             1793 mm/filemap.c   			      struct page **pages)
page             1796 mm/filemap.c   	struct page *page;
page             1803 mm/filemap.c   	xas_for_each(&xas, page, end) {
page             1804 mm/filemap.c   		if (xas_retry(&xas, page))
page             1807 mm/filemap.c   		if (xa_is_value(page))
page             1810 mm/filemap.c   		if (!page_cache_get_speculative(page))
page             1814 mm/filemap.c   		if (unlikely(page != xas_reload(&xas)))
page             1817 mm/filemap.c   		pages[ret] = find_subpage(page, xas.xa_index);
page             1824 mm/filemap.c   		put_page(page);
page             1858 mm/filemap.c   			       unsigned int nr_pages, struct page **pages)
page             1861 mm/filemap.c   	struct page *page;
page             1868 mm/filemap.c   	for (page = xas_load(&xas); page; page = xas_next(&xas)) {
page             1869 mm/filemap.c   		if (xas_retry(&xas, page))
page             1875 mm/filemap.c   		if (xa_is_value(page))
page             1878 mm/filemap.c   		if (!page_cache_get_speculative(page))
page             1882 mm/filemap.c   		if (unlikely(page != xas_reload(&xas)))
page             1885 mm/filemap.c   		pages[ret] = find_subpage(page, xas.xa_index);
page             1890 mm/filemap.c   		put_page(page);
page             1915 mm/filemap.c   			struct page **pages)
page             1918 mm/filemap.c   	struct page *page;
page             1925 mm/filemap.c   	xas_for_each_marked(&xas, page, end, tag) {
page             1926 mm/filemap.c   		if (xas_retry(&xas, page))
page             1933 mm/filemap.c   		if (xa_is_value(page))
page             1936 mm/filemap.c   		if (!page_cache_get_speculative(page))
page             1940 mm/filemap.c   		if (unlikely(page != xas_reload(&xas)))
page             1943 mm/filemap.c   		pages[ret] = find_subpage(page, xas.xa_index);
page             1950 mm/filemap.c   		put_page(page);
page             2035 mm/filemap.c   		struct page *page;
page             2047 mm/filemap.c   		page = find_get_page(mapping, index);
page             2048 mm/filemap.c   		if (!page) {
page             2054 mm/filemap.c   			page = find_get_page(mapping, index);
page             2055 mm/filemap.c   			if (unlikely(page == NULL))
page             2058 mm/filemap.c   		if (PageReadahead(page)) {
page             2060 mm/filemap.c   					ra, filp, page,
page             2063 mm/filemap.c   		if (!PageUptodate(page)) {
page             2065 mm/filemap.c   				put_page(page);
page             2074 mm/filemap.c   			error = wait_on_page_locked_killable(page);
page             2077 mm/filemap.c   			if (PageUptodate(page))
page             2086 mm/filemap.c   			if (!trylock_page(page))
page             2089 mm/filemap.c   			if (!page->mapping)
page             2091 mm/filemap.c   			if (!mapping->a_ops->is_partially_uptodate(page,
page             2094 mm/filemap.c   			unlock_page(page);
page             2109 mm/filemap.c   			put_page(page);
page             2118 mm/filemap.c   				put_page(page);
page             2129 mm/filemap.c   			flush_dcache_page(page);
page             2136 mm/filemap.c   			mark_page_accessed(page);
page             2144 mm/filemap.c   		ret = copy_page_to_iter(page, offset, nr, iter);
page             2150 mm/filemap.c   		put_page(page);
page             2162 mm/filemap.c   		error = lock_page_killable(page);
page             2168 mm/filemap.c   		if (!page->mapping) {
page             2169 mm/filemap.c   			unlock_page(page);
page             2170 mm/filemap.c   			put_page(page);
page             2175 mm/filemap.c   		if (PageUptodate(page)) {
page             2176 mm/filemap.c   			unlock_page(page);
page             2186 mm/filemap.c   		ClearPageError(page);
page             2188 mm/filemap.c   		error = mapping->a_ops->readpage(filp, page);
page             2192 mm/filemap.c   				put_page(page);
page             2199 mm/filemap.c   		if (!PageUptodate(page)) {
page             2200 mm/filemap.c   			error = lock_page_killable(page);
page             2203 mm/filemap.c   			if (!PageUptodate(page)) {
page             2204 mm/filemap.c   				if (page->mapping == NULL) {
page             2208 mm/filemap.c   					unlock_page(page);
page             2209 mm/filemap.c   					put_page(page);
page             2212 mm/filemap.c   				unlock_page(page);
page             2217 mm/filemap.c   			unlock_page(page);
page             2224 mm/filemap.c   		put_page(page);
page             2232 mm/filemap.c   		page = page_cache_alloc(mapping);
page             2233 mm/filemap.c   		if (!page) {
page             2237 mm/filemap.c   		error = add_to_page_cache_lru(page, mapping, index,
page             2240 mm/filemap.c   			put_page(page);
page             2343 mm/filemap.c   static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
page             2346 mm/filemap.c   	if (trylock_page(page))
page             2359 mm/filemap.c   		if (__lock_page_killable(page)) {
page             2371 mm/filemap.c   		__lock_page(page);
page             2432 mm/filemap.c   					    struct page *page)
page             2445 mm/filemap.c   	if (PageReadahead(page)) {
page             2448 mm/filemap.c   					   page, offset, ra->ra_pages);
page             2486 mm/filemap.c   	struct page *page;
page             2496 mm/filemap.c   	page = find_get_page(mapping, offset);
page             2497 mm/filemap.c   	if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) {
page             2502 mm/filemap.c   		fpin = do_async_mmap_readahead(vmf, page);
page             2503 mm/filemap.c   	} else if (!page) {
page             2510 mm/filemap.c   		page = pagecache_get_page(mapping, offset,
page             2513 mm/filemap.c   		if (!page) {
page             2520 mm/filemap.c   	if (!lock_page_maybe_drop_mmap(vmf, page, &fpin))
page             2524 mm/filemap.c   	if (unlikely(compound_head(page)->mapping != mapping)) {
page             2525 mm/filemap.c   		unlock_page(page);
page             2526 mm/filemap.c   		put_page(page);
page             2529 mm/filemap.c   	VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page);
page             2535 mm/filemap.c   	if (unlikely(!PageUptodate(page)))
page             2544 mm/filemap.c   		unlock_page(page);
page             2554 mm/filemap.c   		unlock_page(page);
page             2555 mm/filemap.c   		put_page(page);
page             2559 mm/filemap.c   	vmf->page = page;
page             2569 mm/filemap.c   	ClearPageError(page);
page             2571 mm/filemap.c   	error = mapping->a_ops->readpage(file, page);
page             2573 mm/filemap.c   		wait_on_page_locked(page);
page             2574 mm/filemap.c   		if (!PageUptodate(page))
page             2579 mm/filemap.c   	put_page(page);
page             2594 mm/filemap.c   	if (page)
page             2595 mm/filemap.c   		put_page(page);
page             2610 mm/filemap.c   	struct page *page;
page             2613 mm/filemap.c   	xas_for_each(&xas, page, end_pgoff) {
page             2614 mm/filemap.c   		if (xas_retry(&xas, page))
page             2616 mm/filemap.c   		if (xa_is_value(page))
page             2623 mm/filemap.c   		if (PageLocked(page))
page             2625 mm/filemap.c   		if (!page_cache_get_speculative(page))
page             2629 mm/filemap.c   		if (unlikely(page != xas_reload(&xas)))
page             2631 mm/filemap.c   		page = find_subpage(page, xas.xa_index);
page             2633 mm/filemap.c   		if (!PageUptodate(page) ||
page             2634 mm/filemap.c   				PageReadahead(page) ||
page             2635 mm/filemap.c   				PageHWPoison(page))
page             2637 mm/filemap.c   		if (!trylock_page(page))
page             2640 mm/filemap.c   		if (page->mapping != mapping || !PageUptodate(page))
page             2644 mm/filemap.c   		if (page->index >= max_idx)
page             2654 mm/filemap.c   		if (alloc_set_pte(vmf, NULL, page))
page             2656 mm/filemap.c   		unlock_page(page);
page             2659 mm/filemap.c   		unlock_page(page);
page             2661 mm/filemap.c   		put_page(page);
page             2673 mm/filemap.c   	struct page *page = vmf->page;
page             2679 mm/filemap.c   	lock_page(page);
page             2680 mm/filemap.c   	if (page->mapping != inode->i_mapping) {
page             2681 mm/filemap.c   		unlock_page(page);
page             2690 mm/filemap.c   	set_page_dirty(page);
page             2691 mm/filemap.c   	wait_for_stable_page(page);
page             2744 mm/filemap.c   static struct page *wait_on_page_read(struct page *page)
page             2746 mm/filemap.c   	if (!IS_ERR(page)) {
page             2747 mm/filemap.c   		wait_on_page_locked(page);
page             2748 mm/filemap.c   		if (!PageUptodate(page)) {
page             2749 mm/filemap.c   			put_page(page);
page             2750 mm/filemap.c   			page = ERR_PTR(-EIO);
page             2753 mm/filemap.c   	return page;
page             2756 mm/filemap.c   static struct page *do_read_cache_page(struct address_space *mapping,
page             2758 mm/filemap.c   				int (*filler)(void *, struct page *),
page             2762 mm/filemap.c   	struct page *page;
page             2765 mm/filemap.c   	page = find_get_page(mapping, index);
page             2766 mm/filemap.c   	if (!page) {
page             2767 mm/filemap.c   		page = __page_cache_alloc(gfp);
page             2768 mm/filemap.c   		if (!page)
page             2770 mm/filemap.c   		err = add_to_page_cache_lru(page, mapping, index, gfp);
page             2772 mm/filemap.c   			put_page(page);
page             2781 mm/filemap.c   			err = filler(data, page);
page             2783 mm/filemap.c   			err = mapping->a_ops->readpage(data, page);
page             2786 mm/filemap.c   			put_page(page);
page             2790 mm/filemap.c   		page = wait_on_page_read(page);
page             2791 mm/filemap.c   		if (IS_ERR(page))
page             2792 mm/filemap.c   			return page;
page             2795 mm/filemap.c   	if (PageUptodate(page))
page             2829 mm/filemap.c   	wait_on_page_locked(page);
page             2830 mm/filemap.c   	if (PageUptodate(page))
page             2834 mm/filemap.c   	lock_page(page);
page             2837 mm/filemap.c   	if (!page->mapping) {
page             2838 mm/filemap.c   		unlock_page(page);
page             2839 mm/filemap.c   		put_page(page);
page             2844 mm/filemap.c   	if (PageUptodate(page)) {
page             2845 mm/filemap.c   		unlock_page(page);
page             2851 mm/filemap.c   	mark_page_accessed(page);
page             2852 mm/filemap.c   	return page;
page             2869 mm/filemap.c   struct page *read_cache_page(struct address_space *mapping,
page             2871 mm/filemap.c   				int (*filler)(void *, struct page *),
page             2892 mm/filemap.c   struct page *read_cache_page_gfp(struct address_space *mapping,
page             3124 mm/filemap.c   				struct page **pagep, void **fsdata)
page             3135 mm/filemap.c   				struct page *page, void *fsdata)
page             3139 mm/filemap.c   	return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
page             3225 mm/filemap.c   struct page *grab_cache_page_write_begin(struct address_space *mapping,
page             3228 mm/filemap.c   	struct page *page;
page             3234 mm/filemap.c   	page = pagecache_get_page(mapping, index, fgp_flags,
page             3236 mm/filemap.c   	if (page)
page             3237 mm/filemap.c   		wait_for_stable_page(page);
page             3239 mm/filemap.c   	return page;
page             3253 mm/filemap.c   		struct page *page;
page             3285 mm/filemap.c   						&page, &fsdata);
page             3290 mm/filemap.c   			flush_dcache_page(page);
page             3292 mm/filemap.c   		copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
page             3293 mm/filemap.c   		flush_dcache_page(page);
page             3296 mm/filemap.c   						page, fsdata);
page             3471 mm/filemap.c   int try_to_release_page(struct page *page, gfp_t gfp_mask)
page             3473 mm/filemap.c   	struct address_space * const mapping = page->mapping;
page             3475 mm/filemap.c   	BUG_ON(!PageLocked(page));
page             3476 mm/filemap.c   	if (PageWriteback(page))
page             3480 mm/filemap.c   		return mapping->a_ops->releasepage(page, gfp_mask);
page             3481 mm/filemap.c   	return try_to_free_buffers(page);
page               76 mm/frame_vector.c 			gup_flags, (struct page **)(vec->ptrs), &locked);
page              126 mm/frame_vector.c 	struct page **pages;
page              158 mm/frame_vector.c 	struct page **pages;
page              166 mm/frame_vector.c 	pages = (struct page **)nums;
page              184 mm/frame_vector.c 	struct page **pages;
page              188 mm/frame_vector.c 	pages = (struct page **)(vec->ptrs);
page              245 mm/frontswap.c int __frontswap_store(struct page *page)
page              248 mm/frontswap.c 	swp_entry_t entry = { .val = page_private(page), };
page              255 mm/frontswap.c 	VM_BUG_ON(!PageLocked(page));
page              272 mm/frontswap.c 		ret = ops->store(type, offset, page);
page              294 mm/frontswap.c int __frontswap_load(struct page *page)
page              297 mm/frontswap.c 	swp_entry_t entry = { .val = page_private(page), };
page              304 mm/frontswap.c 	VM_BUG_ON(!PageLocked(page));
page              312 mm/frontswap.c 		ret = ops->load(type, offset, page);
page              319 mm/frontswap.c 			SetPageDirty(page);
page               54 mm/gup.c       void put_user_pages_dirty_lock(struct page **pages, unsigned long npages,
page               71 mm/gup.c       		struct page *page = compound_head(pages[index]);
page               92 mm/gup.c       		if (!PageDirty(page))
page               93 mm/gup.c       			set_page_dirty_lock(page);
page               94 mm/gup.c       		put_user_page(page);
page              108 mm/gup.c       void put_user_pages(struct page **pages, unsigned long npages)
page              123 mm/gup.c       static struct page *no_page_table(struct vm_area_struct *vma,
page              182 mm/gup.c       static struct page *follow_page_pte(struct vm_area_struct *vma,
page              187 mm/gup.c       	struct page *page;
page              222 mm/gup.c       	page = vm_normal_page(vma, address, pte);
page              223 mm/gup.c       	if (!page && pte_devmap(pte) && (flags & FOLL_GET)) {
page              230 mm/gup.c       			page = pte_page(pte);
page              233 mm/gup.c       	} else if (unlikely(!page)) {
page              236 mm/gup.c       			page = ERR_PTR(-EFAULT);
page              241 mm/gup.c       			page = pte_page(pte);
page              246 mm/gup.c       			page = ERR_PTR(ret);
page              251 mm/gup.c       	if (flags & FOLL_SPLIT && PageTransCompound(page)) {
page              253 mm/gup.c       		get_page(page);
page              255 mm/gup.c       		lock_page(page);
page              256 mm/gup.c       		ret = split_huge_page(page);
page              257 mm/gup.c       		unlock_page(page);
page              258 mm/gup.c       		put_page(page);
page              265 mm/gup.c       		if (unlikely(!try_get_page(page))) {
page              266 mm/gup.c       			page = ERR_PTR(-ENOMEM);
page              272 mm/gup.c       		    !pte_dirty(pte) && !PageDirty(page))
page              273 mm/gup.c       			set_page_dirty(page);
page              279 mm/gup.c       		mark_page_accessed(page);
page              283 mm/gup.c       		if (PageTransCompound(page))
page              295 mm/gup.c       		if (page->mapping && trylock_page(page)) {
page              303 mm/gup.c       			mlock_vma_page(page);
page              304 mm/gup.c       			unlock_page(page);
page              309 mm/gup.c       	return page;
page              317 mm/gup.c       static struct page *follow_pmd_mask(struct vm_area_struct *vma,
page              324 mm/gup.c       	struct page *page;
page              336 mm/gup.c       		page = follow_huge_pmd(mm, address, pmd, flags);
page              337 mm/gup.c       		if (page)
page              338 mm/gup.c       			return page;
page              342 mm/gup.c       		page = follow_huge_pd(vma, address,
page              345 mm/gup.c       		if (page)
page              346 mm/gup.c       			return page;
page              368 mm/gup.c       		page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
page              370 mm/gup.c       		if (page)
page              371 mm/gup.c       			return page;
page              398 mm/gup.c       		page = pmd_page(*pmd);
page              399 mm/gup.c       		if (is_huge_zero_page(page)) {
page              406 mm/gup.c       			if (unlikely(!try_get_page(page))) {
page              411 mm/gup.c       			lock_page(page);
page              412 mm/gup.c       			ret = split_huge_page(page);
page              413 mm/gup.c       			unlock_page(page);
page              414 mm/gup.c       			put_page(page);
page              426 mm/gup.c       	page = follow_trans_huge_pmd(vma, address, pmd, flags);
page              429 mm/gup.c       	return page;
page              432 mm/gup.c       static struct page *follow_pud_mask(struct vm_area_struct *vma,
page              439 mm/gup.c       	struct page *page;
page              446 mm/gup.c       		page = follow_huge_pud(mm, address, pud, flags);
page              447 mm/gup.c       		if (page)
page              448 mm/gup.c       			return page;
page              452 mm/gup.c       		page = follow_huge_pd(vma, address,
page              455 mm/gup.c       		if (page)
page              456 mm/gup.c       			return page;
page              461 mm/gup.c       		page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap);
page              463 mm/gup.c       		if (page)
page              464 mm/gup.c       			return page;
page              472 mm/gup.c       static struct page *follow_p4d_mask(struct vm_area_struct *vma,
page              478 mm/gup.c       	struct page *page;
page              488 mm/gup.c       		page = follow_huge_pd(vma, address,
page              491 mm/gup.c       		if (page)
page              492 mm/gup.c       			return page;
page              517 mm/gup.c       static struct page *follow_page_mask(struct vm_area_struct *vma,
page              522 mm/gup.c       	struct page *page;
page              528 mm/gup.c       	page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
page              529 mm/gup.c       	if (!IS_ERR(page)) {
page              531 mm/gup.c       		return page;
page              540 mm/gup.c       		page = follow_huge_pgd(mm, address, pgd, flags);
page              541 mm/gup.c       		if (page)
page              542 mm/gup.c       			return page;
page              546 mm/gup.c       		page = follow_huge_pd(vma, address,
page              549 mm/gup.c       		if (page)
page              550 mm/gup.c       			return page;
page              557 mm/gup.c       struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
page              561 mm/gup.c       	struct page *page;
page              563 mm/gup.c       	page = follow_page_mask(vma, address, foll_flags, &ctx);
page              566 mm/gup.c       	return page;
page              571 mm/gup.c       		struct page **page)
page              603 mm/gup.c       	if (!page)
page              605 mm/gup.c       	*page = vm_normal_page(*vma, address, *pte);
page              606 mm/gup.c       	if (!*page) {
page              609 mm/gup.c       		*page = pte_page(*pte);
page              611 mm/gup.c       	if (unlikely(!try_get_page(*page))) {
page              790 mm/gup.c       		unsigned int gup_flags, struct page **pages,
page              813 mm/gup.c       		struct page *page;
page              858 mm/gup.c       		page = follow_page_mask(vma, start, foll_flags, &ctx);
page              859 mm/gup.c       		if (!page) {
page              876 mm/gup.c       		} else if (PTR_ERR(page) == -EEXIST) {
page              882 mm/gup.c       		} else if (IS_ERR(page)) {
page              883 mm/gup.c       			ret = PTR_ERR(page);
page              887 mm/gup.c       			pages[i] = page;
page              888 mm/gup.c       			flush_anon_page(vma, page, start);
page              889 mm/gup.c       			flush_dcache_page(page);
page             1017 mm/gup.c       						struct page **pages,
page             1166 mm/gup.c       		unsigned int gup_flags, struct page **pages,
page             1317 mm/gup.c       struct page *get_dump_page(unsigned long addr)
page             1320 mm/gup.c       	struct page *page;
page             1323 mm/gup.c       			     FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
page             1326 mm/gup.c       	flush_cache_page(vma, addr, page_to_pfn(page));
page             1327 mm/gup.c       	return page;
page             1333 mm/gup.c       		unsigned long nr_pages, struct page **pages,
page             1397 mm/gup.c       static struct page *new_non_cma_page(struct page *page, unsigned long private)
page             1403 mm/gup.c       	int nid = page_to_nid(page);
page             1413 mm/gup.c       	if (PageHighMem(page))
page             1417 mm/gup.c       	if (PageHuge(page)) {
page             1418 mm/gup.c       		struct hstate *h = page_hstate(page);
page             1426 mm/gup.c       	if (PageTransHuge(page)) {
page             1427 mm/gup.c       		struct page *thp;
page             1452 mm/gup.c       					struct page **pages,
page             1465 mm/gup.c       		struct page *head = compound_head(pages[i]);
page             1539 mm/gup.c       					struct page **pages,
page             1555 mm/gup.c       				  struct page **pages,
page             1606 mm/gup.c       						  struct page **pages,
page             1623 mm/gup.c       		unsigned int gup_flags, struct page **pages,
page             1653 mm/gup.c       			   unsigned int gup_flags, struct page **pages,
page             1687 mm/gup.c       			     struct page **pages, unsigned int gup_flags)
page             1801 mm/gup.c       					    struct page **pages)
page             1804 mm/gup.c       		struct page *page = pages[--(*nr)];
page             1806 mm/gup.c       		ClearPageReferenced(page);
page             1807 mm/gup.c       		put_page(page);
page             1815 mm/gup.c       static inline struct page *try_get_compound_head(struct page *page, int refs)
page             1817 mm/gup.c       	struct page *head = compound_head(page);
page             1827 mm/gup.c       			 unsigned int flags, struct page **pages, int *nr)
page             1836 mm/gup.c       		struct page *head, *page;
page             1861 mm/gup.c       		page = pte_page(pte);
page             1863 mm/gup.c       		head = try_get_compound_head(page, 1);
page             1872 mm/gup.c       		VM_BUG_ON_PAGE(compound_head(page) != head, page);
page             1874 mm/gup.c       		SetPageReferenced(page);
page             1875 mm/gup.c       		pages[*nr] = page;
page             1900 mm/gup.c       			 unsigned int flags, struct page **pages, int *nr)
page             1908 mm/gup.c       		unsigned long end, struct page **pages, int *nr)
page             1914 mm/gup.c       		struct page *page = pfn_to_page(pfn);
page             1921 mm/gup.c       		SetPageReferenced(page);
page             1922 mm/gup.c       		pages[*nr] = page;
page             1923 mm/gup.c       		get_page(page);
page             1934 mm/gup.c       		unsigned long end, struct page **pages, int *nr)
page             1951 mm/gup.c       		unsigned long end, struct page **pages, int *nr)
page             1968 mm/gup.c       		unsigned long end, struct page **pages, int *nr)
page             1975 mm/gup.c       		unsigned long end, struct page **pages, int *nr)
page             1992 mm/gup.c       		       struct page **pages, int *nr)
page             1995 mm/gup.c       	struct page *head, *page;
page             2014 mm/gup.c       	page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
page             2016 mm/gup.c       		VM_BUG_ON(compound_head(page) != head);
page             2017 mm/gup.c       		pages[*nr] = page;
page             2019 mm/gup.c       		page++;
page             2043 mm/gup.c       		struct page **pages, int *nr)
page             2061 mm/gup.c       		struct page **pages, int *nr)
page             2069 mm/gup.c       			struct page **pages, int *nr)
page             2071 mm/gup.c       	struct page *head, *page;
page             2084 mm/gup.c       	page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
page             2086 mm/gup.c       		pages[*nr] = page;
page             2088 mm/gup.c       		page++;
page             2110 mm/gup.c       		unsigned long end, unsigned int flags, struct page **pages, int *nr)
page             2112 mm/gup.c       	struct page *head, *page;
page             2125 mm/gup.c       	page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
page             2127 mm/gup.c       		pages[*nr] = page;
page             2129 mm/gup.c       		page++;
page             2152 mm/gup.c       			struct page **pages, int *nr)
page             2155 mm/gup.c       	struct page *head, *page;
page             2162 mm/gup.c       	page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
page             2164 mm/gup.c       		pages[*nr] = page;
page             2166 mm/gup.c       		page++;
page             2188 mm/gup.c       		unsigned int flags, struct page **pages, int *nr)
page             2231 mm/gup.c       			 unsigned int flags, struct page **pages, int *nr)
page             2259 mm/gup.c       			 unsigned int flags, struct page **pages, int *nr)
page             2284 mm/gup.c       		unsigned int flags, struct page **pages, int *nr)
page             2310 mm/gup.c       		unsigned int flags, struct page **pages, int *nr)
page             2340 mm/gup.c       			  struct page **pages)
page             2385 mm/gup.c       				   unsigned int gup_flags, struct page **pages)
page             2424 mm/gup.c       			unsigned int gup_flags, struct page **pages)
page               28 mm/gup_benchmark.c 	struct page **pages;
page               58 mm/highmem.c   static inline unsigned int get_pkmap_color(struct page *page)
page              150 mm/highmem.c   struct page *kmap_to_page(void *vaddr)
page              171 mm/highmem.c   		struct page *page;
page              193 mm/highmem.c   		page = pte_page(pkmap_page_table[i]);
page              196 mm/highmem.c   		set_page_address(page, NULL);
page              213 mm/highmem.c   static inline unsigned long map_new_virtual(struct page *page)
page              218 mm/highmem.c   	unsigned int color = get_pkmap_color(page);
page              250 mm/highmem.c   			if (page_address(page))
page              251 mm/highmem.c   				return (unsigned long)page_address(page);
page              259 mm/highmem.c   		   &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
page              262 mm/highmem.c   	set_page_address(page, (void *)vaddr);
page              275 mm/highmem.c   void *kmap_high(struct page *page)
page              284 mm/highmem.c   	vaddr = (unsigned long)page_address(page);
page              286 mm/highmem.c   		vaddr = map_new_virtual(page);
page              306 mm/highmem.c   void *kmap_high_get(struct page *page)
page              311 mm/highmem.c   	vaddr = (unsigned long)page_address(page);
page              328 mm/highmem.c   void kunmap_high(struct page *page)
page              334 mm/highmem.c   	unsigned int color = get_pkmap_color(page);
page              338 mm/highmem.c   	vaddr = (unsigned long)page_address(page);
page              382 mm/highmem.c   	struct page *page;
page              397 mm/highmem.c   static struct page_address_slot *page_slot(const struct page *page)
page              399 mm/highmem.c   	return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)];
page              408 mm/highmem.c   void *page_address(const struct page *page)
page              414 mm/highmem.c   	if (!PageHighMem(page))
page              415 mm/highmem.c   		return lowmem_page_address(page);
page              417 mm/highmem.c   	pas = page_slot(page);
page              424 mm/highmem.c   			if (pam->page == page) {
page              442 mm/highmem.c   void set_page_address(struct page *page, void *virtual)
page              448 mm/highmem.c   	BUG_ON(!PageHighMem(page));
page              450 mm/highmem.c   	pas = page_slot(page);
page              453 mm/highmem.c   		pam->page = page;
page              462 mm/highmem.c   			if (pam->page == page) {
page              977 mm/hmm.c       		struct page *page;
page              989 mm/hmm.c       		page = hmm_device_entry_to_page(range, range->pfns[i]);
page              990 mm/hmm.c       		if (page == NULL)
page             1003 mm/hmm.c       		daddrs[i] = dma_map_page(device, page, 0, PAGE_SIZE, dir);
page             1017 mm/hmm.c       		struct page *page;
page             1019 mm/hmm.c       		page = hmm_device_entry_to_page(range, range->pfns[i]);
page             1020 mm/hmm.c       		if (page == NULL)
page             1070 mm/hmm.c       		struct page *page;
page             1072 mm/hmm.c       		page = hmm_device_entry_to_page(range, range->pfns[i]);
page             1073 mm/hmm.c       		if (page == NULL)
page             1085 mm/hmm.c       				set_page_dirty(page);
page               63 mm/huge_memory.c struct page *huge_zero_page __read_mostly;
page               80 mm/huge_memory.c static struct page *get_huge_zero_page(void)
page               82 mm/huge_memory.c 	struct page *zero_page;
page              116 mm/huge_memory.c struct page *mm_get_huge_zero_page(struct mm_struct *mm)
page              147 mm/huge_memory.c 		struct page *zero_page = xchg(&huge_zero_page, NULL);
page              492 mm/huge_memory.c static inline struct deferred_split *get_deferred_split_queue(struct page *page)
page              494 mm/huge_memory.c 	struct mem_cgroup *memcg = compound_head(page)->mem_cgroup;
page              495 mm/huge_memory.c 	struct pglist_data *pgdat = NODE_DATA(page_to_nid(page));
page              503 mm/huge_memory.c static inline struct deferred_split *get_deferred_split_queue(struct page *page)
page              505 mm/huge_memory.c 	struct pglist_data *pgdat = NODE_DATA(page_to_nid(page));
page              511 mm/huge_memory.c void prep_transhuge_page(struct page *page)
page              518 mm/huge_memory.c 	INIT_LIST_HEAD(page_deferred_list(page));
page              519 mm/huge_memory.c 	set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
page              576 mm/huge_memory.c 			struct page *page, gfp_t gfp)
page              584 mm/huge_memory.c 	VM_BUG_ON_PAGE(!PageCompound(page), page);
page              586 mm/huge_memory.c 	if (mem_cgroup_try_charge_delay(page, vma->vm_mm, gfp, &memcg, true)) {
page              587 mm/huge_memory.c 		put_page(page);
page              598 mm/huge_memory.c 	clear_huge_page(page, vmf->address, HPAGE_PMD_NR);
page              604 mm/huge_memory.c 	__SetPageUptodate(page);
page              621 mm/huge_memory.c 			mem_cgroup_cancel_charge(page, memcg, true);
page              622 mm/huge_memory.c 			put_page(page);
page              629 mm/huge_memory.c 		entry = mk_huge_pmd(page, vma->vm_page_prot);
page              631 mm/huge_memory.c 		page_add_new_anon_rmap(page, vma, haddr, true);
page              632 mm/huge_memory.c 		mem_cgroup_commit_charge(page, memcg, false, true);
page              633 mm/huge_memory.c 		lru_cache_add_active_or_unevictable(page, vma);
page              649 mm/huge_memory.c 	mem_cgroup_cancel_charge(page, memcg, true);
page              650 mm/huge_memory.c 	put_page(page);
page              693 mm/huge_memory.c 		struct page *zero_page)
page              711 mm/huge_memory.c 	struct page *page;
page              724 mm/huge_memory.c 		struct page *zero_page;
page              760 mm/huge_memory.c 	page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
page              761 mm/huge_memory.c 	if (unlikely(!page)) {
page              765 mm/huge_memory.c 	prep_transhuge_page(page);
page              766 mm/huge_memory.c 	return __do_huge_pmd_anonymous_page(vmf, page, gfp);
page              935 mm/huge_memory.c struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
page              940 mm/huge_memory.c 	struct page *page;
page              972 mm/huge_memory.c 	page = pfn_to_page(pfn);
page              973 mm/huge_memory.c 	get_page(page);
page              975 mm/huge_memory.c 	return page;
page              983 mm/huge_memory.c 	struct page *src_page;
page             1034 mm/huge_memory.c 		struct page *zero_page;
page             1081 mm/huge_memory.c struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
page             1086 mm/huge_memory.c 	struct page *page;
page             1112 mm/huge_memory.c 	page = pfn_to_page(pfn);
page             1113 mm/huge_memory.c 	get_page(page);
page             1115 mm/huge_memory.c 	return page;
page             1199 mm/huge_memory.c 			pmd_t orig_pmd, struct page *page)
page             1208 mm/huge_memory.c 	struct page **pages;
page             1211 mm/huge_memory.c 	pages = kmalloc_array(HPAGE_PMD_NR, sizeof(struct page *),
page             1220 mm/huge_memory.c 					       vmf->address, page_to_nid(page));
page             1241 mm/huge_memory.c 		copy_user_highpage(pages[i], page + i,
page             1254 mm/huge_memory.c 	VM_BUG_ON_PAGE(!PageHead(page), page);
page             1287 mm/huge_memory.c 	page_remove_rmap(page, true);
page             1297 mm/huge_memory.c 	put_page(page);
page             1318 mm/huge_memory.c 	struct page *page = NULL, *new_page;
page             1333 mm/huge_memory.c 	page = pmd_page(orig_pmd);
page             1334 mm/huge_memory.c 	VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page);
page             1339 mm/huge_memory.c 	if (!trylock_page(page)) {
page             1340 mm/huge_memory.c 		get_page(page);
page             1342 mm/huge_memory.c 		lock_page(page);
page             1345 mm/huge_memory.c 			unlock_page(page);
page             1346 mm/huge_memory.c 			put_page(page);
page             1349 mm/huge_memory.c 		put_page(page);
page             1351 mm/huge_memory.c 	if (reuse_swap_page(page, NULL)) {
page             1358 mm/huge_memory.c 		unlock_page(page);
page             1361 mm/huge_memory.c 	unlock_page(page);
page             1362 mm/huge_memory.c 	get_page(page);
page             1375 mm/huge_memory.c 		if (!page) {
page             1379 mm/huge_memory.c 			ret = do_huge_pmd_wp_page_fallback(vmf, orig_pmd, page);
page             1384 mm/huge_memory.c 			put_page(page);
page             1394 mm/huge_memory.c 		if (page)
page             1395 mm/huge_memory.c 			put_page(page);
page             1404 mm/huge_memory.c 	if (!page)
page             1407 mm/huge_memory.c 		copy_user_huge_page(new_page, page, vmf->address,
page             1416 mm/huge_memory.c 	if (page)
page             1417 mm/huge_memory.c 		put_page(page);
page             1433 mm/huge_memory.c 		if (!page) {
page             1436 mm/huge_memory.c 			VM_BUG_ON_PAGE(!PageHead(page), page);
page             1437 mm/huge_memory.c 			page_remove_rmap(page, true);
page             1438 mm/huge_memory.c 			put_page(page);
page             1465 mm/huge_memory.c struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
page             1471 mm/huge_memory.c 	struct page *page = NULL;
page             1486 mm/huge_memory.c 	page = pmd_page(*pmd);
page             1487 mm/huge_memory.c 	VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
page             1512 mm/huge_memory.c 		if (PageAnon(page) && compound_mapcount(page) != 1)
page             1514 mm/huge_memory.c 		if (PageDoubleMap(page) || !page->mapping)
page             1516 mm/huge_memory.c 		if (!trylock_page(page))
page             1519 mm/huge_memory.c 		if (page->mapping && !PageDoubleMap(page))
page             1520 mm/huge_memory.c 			mlock_vma_page(page);
page             1521 mm/huge_memory.c 		unlock_page(page);
page             1524 mm/huge_memory.c 	page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
page             1525 mm/huge_memory.c 	VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
page             1527 mm/huge_memory.c 		get_page(page);
page             1530 mm/huge_memory.c 	return page;
page             1538 mm/huge_memory.c 	struct page *page;
page             1557 mm/huge_memory.c 		page = pmd_page(*vmf->pmd);
page             1558 mm/huge_memory.c 		if (!get_page_unless_zero(page))
page             1561 mm/huge_memory.c 		put_and_wait_on_page_locked(page);
page             1565 mm/huge_memory.c 	page = pmd_page(pmd);
page             1566 mm/huge_memory.c 	BUG_ON(is_huge_zero_page(page));
page             1567 mm/huge_memory.c 	page_nid = page_to_nid(page);
page             1568 mm/huge_memory.c 	last_cpupid = page_cpupid_last(page);
page             1583 mm/huge_memory.c 	page_locked = trylock_page(page);
page             1584 mm/huge_memory.c 	target_nid = mpol_misplaced(page, vma, haddr);
page             1594 mm/huge_memory.c 		if (!get_page_unless_zero(page))
page             1597 mm/huge_memory.c 		put_and_wait_on_page_locked(page);
page             1605 mm/huge_memory.c 	get_page(page);
page             1607 mm/huge_memory.c 	anon_vma = page_lock_anon_vma_read(page);
page             1612 mm/huge_memory.c 		unlock_page(page);
page             1613 mm/huge_memory.c 		put_page(page);
page             1620 mm/huge_memory.c 		put_page(page);
page             1658 mm/huge_memory.c 				vmf->pmd, pmd, vmf->address, page, target_nid);
page             1667 mm/huge_memory.c 	BUG_ON(!PageLocked(page));
page             1675 mm/huge_memory.c 	unlock_page(page);
page             1699 mm/huge_memory.c 	struct page *page;
page             1719 mm/huge_memory.c 	page = pmd_page(orig_pmd);
page             1724 mm/huge_memory.c 	if (page_mapcount(page) != 1)
page             1727 mm/huge_memory.c 	if (!trylock_page(page))
page             1735 mm/huge_memory.c 		get_page(page);
page             1737 mm/huge_memory.c 		split_huge_page(page);
page             1738 mm/huge_memory.c 		unlock_page(page);
page             1739 mm/huge_memory.c 		put_page(page);
page             1743 mm/huge_memory.c 	if (PageDirty(page))
page             1744 mm/huge_memory.c 		ClearPageDirty(page);
page             1745 mm/huge_memory.c 	unlock_page(page);
page             1756 mm/huge_memory.c 	mark_page_lazyfree(page);
page             1804 mm/huge_memory.c 		struct page *page = NULL;
page             1808 mm/huge_memory.c 			page = pmd_page(orig_pmd);
page             1809 mm/huge_memory.c 			page_remove_rmap(page, true);
page             1810 mm/huge_memory.c 			VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
page             1811 mm/huge_memory.c 			VM_BUG_ON_PAGE(!PageHead(page), page);
page             1817 mm/huge_memory.c 			page = pfn_to_page(swp_offset(entry));
page             1822 mm/huge_memory.c 		if (PageAnon(page)) {
page             1828 mm/huge_memory.c 			add_mm_counter(tlb->mm, mm_counter_file(page), -HPAGE_PMD_NR);
page             1833 mm/huge_memory.c 			tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE);
page             2143 mm/huge_memory.c 	struct page *page;
page             2168 mm/huge_memory.c 		page = pmd_page(_pmd);
page             2169 mm/huge_memory.c 		if (!PageDirty(page) && pmd_dirty(_pmd))
page             2170 mm/huge_memory.c 			set_page_dirty(page);
page             2171 mm/huge_memory.c 		if (!PageReferenced(page) && pmd_young(_pmd))
page             2172 mm/huge_memory.c 			SetPageReferenced(page);
page             2173 mm/huge_memory.c 		page_remove_rmap(page, true);
page             2174 mm/huge_memory.c 		put_page(page);
page             2175 mm/huge_memory.c 		add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
page             2217 mm/huge_memory.c 		page = pfn_to_page(swp_offset(entry));
page             2222 mm/huge_memory.c 		page = pmd_page(old_pmd);
page             2224 mm/huge_memory.c 			SetPageDirty(page);
page             2229 mm/huge_memory.c 	VM_BUG_ON_PAGE(!page_count(page), page);
page             2230 mm/huge_memory.c 	page_ref_add(page, HPAGE_PMD_NR - 1);
page             2248 mm/huge_memory.c 			swp_entry = make_migration_entry(page + i, write);
page             2253 mm/huge_memory.c 			entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot));
page             2265 mm/huge_memory.c 		atomic_inc(&page[i]._mapcount);
page             2273 mm/huge_memory.c 	if (compound_mapcount(page) > 1 && !TestSetPageDoubleMap(page)) {
page             2275 mm/huge_memory.c 			atomic_inc(&page[i]._mapcount);
page             2278 mm/huge_memory.c 	if (atomic_add_negative(-1, compound_mapcount_ptr(page))) {
page             2280 mm/huge_memory.c 		__dec_node_page_state(page, NR_ANON_THPS);
page             2281 mm/huge_memory.c 		if (TestClearPageDoubleMap(page)) {
page             2284 mm/huge_memory.c 				atomic_dec(&page[i]._mapcount);
page             2293 mm/huge_memory.c 			page_remove_rmap(page + i, false);
page             2294 mm/huge_memory.c 			put_page(page + i);
page             2300 mm/huge_memory.c 		unsigned long address, bool freeze, struct page *page)
page             2315 mm/huge_memory.c 	VM_BUG_ON(freeze && !page);
page             2316 mm/huge_memory.c 	if (page && page != pmd_page(*pmd))
page             2320 mm/huge_memory.c 		page = pmd_page(*pmd);
page             2321 mm/huge_memory.c 		if (PageMlocked(page))
page             2322 mm/huge_memory.c 			clear_page_mlock(page);
page             2345 mm/huge_memory.c 		bool freeze, struct page *page)
page             2366 mm/huge_memory.c 	__split_huge_pmd(vma, pmd, address, freeze, page);
page             2410 mm/huge_memory.c static void unmap_page(struct page *page)
page             2416 mm/huge_memory.c 	VM_BUG_ON_PAGE(!PageHead(page), page);
page             2418 mm/huge_memory.c 	if (PageAnon(page))
page             2421 mm/huge_memory.c 	unmap_success = try_to_unmap(page, ttu_flags);
page             2422 mm/huge_memory.c 	VM_BUG_ON_PAGE(!unmap_success, page);
page             2425 mm/huge_memory.c static void remap_page(struct page *page)
page             2428 mm/huge_memory.c 	if (PageTransHuge(page)) {
page             2429 mm/huge_memory.c 		remove_migration_ptes(page, page, true);
page             2432 mm/huge_memory.c 			remove_migration_ptes(page + i, page + i, true);
page             2436 mm/huge_memory.c static void __split_huge_page_tail(struct page *head, int tail,
page             2439 mm/huge_memory.c 	struct page *page_tail = head + tail;
page             2498 mm/huge_memory.c static void __split_huge_page(struct page *page, struct list_head *list,
page             2501 mm/huge_memory.c 	struct page *head = compound_head(page);
page             2530 mm/huge_memory.c 		} else if (!PageAnon(page)) {
page             2563 mm/huge_memory.c 		struct page *subpage = head + i;
page             2564 mm/huge_memory.c 		if (subpage == page)
page             2579 mm/huge_memory.c int total_mapcount(struct page *page)
page             2583 mm/huge_memory.c 	VM_BUG_ON_PAGE(PageTail(page), page);
page             2585 mm/huge_memory.c 	if (likely(!PageCompound(page)))
page             2586 mm/huge_memory.c 		return atomic_read(&page->_mapcount) + 1;
page             2588 mm/huge_memory.c 	compound = compound_mapcount(page);
page             2589 mm/huge_memory.c 	if (PageHuge(page))
page             2593 mm/huge_memory.c 		ret += atomic_read(&page[i]._mapcount) + 1;
page             2595 mm/huge_memory.c 	if (!PageAnon(page))
page             2597 mm/huge_memory.c 	if (PageDoubleMap(page))
page             2626 mm/huge_memory.c int page_trans_huge_mapcount(struct page *page, int *total_mapcount)
page             2631 mm/huge_memory.c 	VM_BUG_ON_PAGE(PageHuge(page), page);
page             2633 mm/huge_memory.c 	if (likely(!PageTransCompound(page))) {
page             2634 mm/huge_memory.c 		mapcount = atomic_read(&page->_mapcount) + 1;
page             2640 mm/huge_memory.c 	page = compound_head(page);
page             2644 mm/huge_memory.c 		mapcount = atomic_read(&page[i]._mapcount) + 1;
page             2648 mm/huge_memory.c 	if (PageDoubleMap(page)) {
page             2652 mm/huge_memory.c 	mapcount = compound_mapcount(page);
page             2661 mm/huge_memory.c bool can_split_huge_page(struct page *page, int *pextra_pins)
page             2666 mm/huge_memory.c 	if (PageAnon(page))
page             2667 mm/huge_memory.c 		extra_pins = PageSwapCache(page) ? HPAGE_PMD_NR : 0;
page             2672 mm/huge_memory.c 	return total_mapcount(page) == page_count(page) - extra_pins - 1;
page             2694 mm/huge_memory.c int split_huge_page_to_list(struct page *page, struct list_head *list)
page             2696 mm/huge_memory.c 	struct page *head = compound_head(page);
page             2698 mm/huge_memory.c 	struct deferred_split *ds_queue = get_deferred_split_queue(page);
page             2707 mm/huge_memory.c 	VM_BUG_ON_PAGE(!PageLocked(page), page);
page             2708 mm/huge_memory.c 	VM_BUG_ON_PAGE(!PageCompound(page), page);
page             2710 mm/huge_memory.c 	if (PageWriteback(page))
page             2761 mm/huge_memory.c 	mlocked = PageMlocked(page);
page             2794 mm/huge_memory.c 			if (PageSwapBacked(page))
page             2795 mm/huge_memory.c 				__dec_node_page_state(page, NR_SHMEM_THPS);
page             2797 mm/huge_memory.c 				__dec_node_page_state(page, NR_FILE_THPS);
page             2801 mm/huge_memory.c 		__split_huge_page(page, list, end, flags);
page             2812 mm/huge_memory.c 			if (PageTail(page))
page             2814 mm/huge_memory.c 			dump_page(page, "total_mapcount(head) > 0");
page             2837 mm/huge_memory.c void free_transhuge_page(struct page *page)
page             2839 mm/huge_memory.c 	struct deferred_split *ds_queue = get_deferred_split_queue(page);
page             2843 mm/huge_memory.c 	if (!list_empty(page_deferred_list(page))) {
page             2845 mm/huge_memory.c 		list_del(page_deferred_list(page));
page             2848 mm/huge_memory.c 	free_compound_page(page);
page             2851 mm/huge_memory.c void deferred_split_huge_page(struct page *page)
page             2853 mm/huge_memory.c 	struct deferred_split *ds_queue = get_deferred_split_queue(page);
page             2855 mm/huge_memory.c 	struct mem_cgroup *memcg = compound_head(page)->mem_cgroup;
page             2859 mm/huge_memory.c 	VM_BUG_ON_PAGE(!PageTransHuge(page), page);
page             2871 mm/huge_memory.c 	if (PageSwapCache(page))
page             2875 mm/huge_memory.c 	if (list_empty(page_deferred_list(page))) {
page             2877 mm/huge_memory.c 		list_add_tail(page_deferred_list(page), &ds_queue->split_queue);
page             2881 mm/huge_memory.c 			memcg_set_shrinker_bit(memcg, page_to_nid(page),
page             2908 mm/huge_memory.c 	struct page *page;
page             2919 mm/huge_memory.c 		page = list_entry((void *)pos, struct page, mapping);
page             2920 mm/huge_memory.c 		page = compound_head(page);
page             2921 mm/huge_memory.c 		if (get_page_unless_zero(page)) {
page             2922 mm/huge_memory.c 			list_move(page_deferred_list(page), &list);
page             2925 mm/huge_memory.c 			list_del_init(page_deferred_list(page));
page             2934 mm/huge_memory.c 		page = list_entry((void *)pos, struct page, mapping);
page             2935 mm/huge_memory.c 		if (!trylock_page(page))
page             2938 mm/huge_memory.c 		if (!split_huge_page(page))
page             2940 mm/huge_memory.c 		unlock_page(page);
page             2942 mm/huge_memory.c 		put_page(page);
page             2970 mm/huge_memory.c 	struct page *page;
page             2983 mm/huge_memory.c 			page = pfn_to_page(pfn);
page             2984 mm/huge_memory.c 			if (!get_page_unless_zero(page))
page             2987 mm/huge_memory.c 			if (zone != page_zone(page))
page             2990 mm/huge_memory.c 			if (!PageHead(page) || PageHuge(page) || !PageLRU(page))
page             2994 mm/huge_memory.c 			lock_page(page);
page             2995 mm/huge_memory.c 			if (!split_huge_page(page))
page             2997 mm/huge_memory.c 			unlock_page(page);
page             2999 mm/huge_memory.c 			put_page(page);
page             3021 mm/huge_memory.c 		struct page *page)
page             3036 mm/huge_memory.c 		set_page_dirty(page);
page             3037 mm/huge_memory.c 	entry = make_migration_entry(page, pmd_write(pmdval));
page             3042 mm/huge_memory.c 	page_remove_rmap(page, true);
page             3043 mm/huge_memory.c 	put_page(page);
page             3046 mm/huge_memory.c void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
page              866 mm/hugetlb.c   static void enqueue_huge_page(struct hstate *h, struct page *page)
page              868 mm/hugetlb.c   	int nid = page_to_nid(page);
page              869 mm/hugetlb.c   	list_move(&page->lru, &h->hugepage_freelists[nid]);
page              874 mm/hugetlb.c   static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
page              876 mm/hugetlb.c   	struct page *page;
page              878 mm/hugetlb.c   	list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
page              879 mm/hugetlb.c   		if (!PageHWPoison(page))
page              885 mm/hugetlb.c   	if (&h->hugepage_freelists[nid] == &page->lru)
page              887 mm/hugetlb.c   	list_move(&page->lru, &h->hugepage_activelist);
page              888 mm/hugetlb.c   	set_page_refcounted(page);
page              891 mm/hugetlb.c   	return page;
page              894 mm/hugetlb.c   static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid,
page              908 mm/hugetlb.c   		struct page *page;
page              920 mm/hugetlb.c   		page = dequeue_huge_page_node_exact(h, node);
page              921 mm/hugetlb.c   		if (page)
page              922 mm/hugetlb.c   			return page;
page              939 mm/hugetlb.c   static struct page *dequeue_huge_page_vma(struct hstate *h,
page              944 mm/hugetlb.c   	struct page *page;
page              965 mm/hugetlb.c   	page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
page              966 mm/hugetlb.c   	if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
page              967 mm/hugetlb.c   		SetPagePrivate(page);
page              972 mm/hugetlb.c   	return page;
page             1050 mm/hugetlb.c   static void destroy_compound_gigantic_page(struct page *page,
page             1055 mm/hugetlb.c   	struct page *p = page + 1;
page             1057 mm/hugetlb.c   	atomic_set(compound_mapcount_ptr(page), 0);
page             1058 mm/hugetlb.c   	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
page             1063 mm/hugetlb.c   	set_compound_order(page, 0);
page             1064 mm/hugetlb.c   	__ClearPageHead(page);
page             1067 mm/hugetlb.c   static void free_gigantic_page(struct page *page, unsigned int order)
page             1069 mm/hugetlb.c   	free_contig_range(page_to_pfn(page), 1 << order);
page             1085 mm/hugetlb.c   	struct page *page;
page             1088 mm/hugetlb.c   		page = pfn_to_online_page(i);
page             1089 mm/hugetlb.c   		if (!page)
page             1092 mm/hugetlb.c   		if (page_zone(page) != z)
page             1095 mm/hugetlb.c   		if (PageReserved(page))
page             1098 mm/hugetlb.c   		if (page_count(page) > 0)
page             1101 mm/hugetlb.c   		if (PageHuge(page))
page             1115 mm/hugetlb.c   static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
page             1154 mm/hugetlb.c   static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
page             1155 mm/hugetlb.c   static void prep_compound_gigantic_page(struct page *page, unsigned int order);
page             1157 mm/hugetlb.c   static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
page             1165 mm/hugetlb.c   static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
page             1170 mm/hugetlb.c   static inline void free_gigantic_page(struct page *page, unsigned int order) { }
page             1171 mm/hugetlb.c   static inline void destroy_compound_gigantic_page(struct page *page,
page             1175 mm/hugetlb.c   static void update_and_free_page(struct hstate *h, struct page *page)
page             1183 mm/hugetlb.c   	h->nr_huge_pages_node[page_to_nid(page)]--;
page             1185 mm/hugetlb.c   		page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
page             1190 mm/hugetlb.c   	VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
page             1191 mm/hugetlb.c   	set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
page             1192 mm/hugetlb.c   	set_page_refcounted(page);
page             1194 mm/hugetlb.c   		destroy_compound_gigantic_page(page, huge_page_order(h));
page             1195 mm/hugetlb.c   		free_gigantic_page(page, huge_page_order(h));
page             1197 mm/hugetlb.c   		__free_pages(page, huge_page_order(h));
page             1218 mm/hugetlb.c   bool page_huge_active(struct page *page)
page             1220 mm/hugetlb.c   	VM_BUG_ON_PAGE(!PageHuge(page), page);
page             1221 mm/hugetlb.c   	return PageHead(page) && PagePrivate(&page[1]);
page             1225 mm/hugetlb.c   static void set_page_huge_active(struct page *page)
page             1227 mm/hugetlb.c   	VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
page             1228 mm/hugetlb.c   	SetPagePrivate(&page[1]);
page             1231 mm/hugetlb.c   static void clear_page_huge_active(struct page *page)
page             1233 mm/hugetlb.c   	VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
page             1234 mm/hugetlb.c   	ClearPagePrivate(&page[1]);
page             1241 mm/hugetlb.c   static inline bool PageHugeTemporary(struct page *page)
page             1243 mm/hugetlb.c   	if (!PageHuge(page))
page             1246 mm/hugetlb.c   	return (unsigned long)page[2].mapping == -1U;
page             1249 mm/hugetlb.c   static inline void SetPageHugeTemporary(struct page *page)
page             1251 mm/hugetlb.c   	page[2].mapping = (void *)-1U;
page             1254 mm/hugetlb.c   static inline void ClearPageHugeTemporary(struct page *page)
page             1256 mm/hugetlb.c   	page[2].mapping = NULL;
page             1259 mm/hugetlb.c   static void __free_huge_page(struct page *page)
page             1265 mm/hugetlb.c   	struct hstate *h = page_hstate(page);
page             1266 mm/hugetlb.c   	int nid = page_to_nid(page);
page             1268 mm/hugetlb.c   		(struct hugepage_subpool *)page_private(page);
page             1271 mm/hugetlb.c   	VM_BUG_ON_PAGE(page_count(page), page);
page             1272 mm/hugetlb.c   	VM_BUG_ON_PAGE(page_mapcount(page), page);
page             1274 mm/hugetlb.c   	set_page_private(page, 0);
page             1275 mm/hugetlb.c   	page->mapping = NULL;
page             1276 mm/hugetlb.c   	restore_reserve = PagePrivate(page);
page             1277 mm/hugetlb.c   	ClearPagePrivate(page);
page             1299 mm/hugetlb.c   	clear_page_huge_active(page);
page             1301 mm/hugetlb.c   				     pages_per_huge_page(h), page);
page             1305 mm/hugetlb.c   	if (PageHugeTemporary(page)) {
page             1306 mm/hugetlb.c   		list_del(&page->lru);
page             1307 mm/hugetlb.c   		ClearPageHugeTemporary(page);
page             1308 mm/hugetlb.c   		update_and_free_page(h, page);
page             1311 mm/hugetlb.c   		list_del(&page->lru);
page             1312 mm/hugetlb.c   		update_and_free_page(h, page);
page             1316 mm/hugetlb.c   		arch_clear_hugepage_flags(page);
page             1317 mm/hugetlb.c   		enqueue_huge_page(h, page);
page             1337 mm/hugetlb.c   	struct page *page;
page             1342 mm/hugetlb.c   		page = container_of((struct address_space **)node,
page             1343 mm/hugetlb.c   				     struct page, mapping);
page             1345 mm/hugetlb.c   		__free_huge_page(page);
page             1350 mm/hugetlb.c   void free_huge_page(struct page *page)
page             1361 mm/hugetlb.c   		if (llist_add((struct llist_node *)&page->mapping,
page             1367 mm/hugetlb.c   	__free_huge_page(page);
page             1370 mm/hugetlb.c   static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
page             1372 mm/hugetlb.c   	INIT_LIST_HEAD(&page->lru);
page             1373 mm/hugetlb.c   	set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
page             1375 mm/hugetlb.c   	set_hugetlb_cgroup(page, NULL);
page             1381 mm/hugetlb.c   static void prep_compound_gigantic_page(struct page *page, unsigned int order)
page             1385 mm/hugetlb.c   	struct page *p = page + 1;
page             1388 mm/hugetlb.c   	set_compound_order(page, order);
page             1389 mm/hugetlb.c   	__ClearPageReserved(page);
page             1390 mm/hugetlb.c   	__SetPageHead(page);
page             1391 mm/hugetlb.c   	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
page             1406 mm/hugetlb.c   		set_compound_head(p, page);
page             1408 mm/hugetlb.c   	atomic_set(compound_mapcount_ptr(page), -1);
page             1416 mm/hugetlb.c   int PageHuge(struct page *page)
page             1418 mm/hugetlb.c   	if (!PageCompound(page))
page             1421 mm/hugetlb.c   	page = compound_head(page);
page             1422 mm/hugetlb.c   	return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
page             1430 mm/hugetlb.c   int PageHeadHuge(struct page *page_head)
page             1438 mm/hugetlb.c   pgoff_t __basepage_index(struct page *page)
page             1440 mm/hugetlb.c   	struct page *page_head = compound_head(page);
page             1445 mm/hugetlb.c   		return page_index(page);
page             1448 mm/hugetlb.c   		compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
page             1450 mm/hugetlb.c   		compound_idx = page - page_head;
page             1455 mm/hugetlb.c   static struct page *alloc_buddy_huge_page(struct hstate *h,
page             1460 mm/hugetlb.c   	struct page *page;
page             1477 mm/hugetlb.c   	page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
page             1478 mm/hugetlb.c   	if (page)
page             1488 mm/hugetlb.c   	if (node_alloc_noretry && page && !alloc_try_hard)
page             1496 mm/hugetlb.c   	if (node_alloc_noretry && !page && alloc_try_hard)
page             1499 mm/hugetlb.c   	return page;
page             1506 mm/hugetlb.c   static struct page *alloc_fresh_huge_page(struct hstate *h,
page             1510 mm/hugetlb.c   	struct page *page;
page             1513 mm/hugetlb.c   		page = alloc_gigantic_page(h, gfp_mask, nid, nmask);
page             1515 mm/hugetlb.c   		page = alloc_buddy_huge_page(h, gfp_mask,
page             1517 mm/hugetlb.c   	if (!page)
page             1521 mm/hugetlb.c   		prep_compound_gigantic_page(page, huge_page_order(h));
page             1522 mm/hugetlb.c   	prep_new_huge_page(h, page, page_to_nid(page));
page             1524 mm/hugetlb.c   	return page;
page             1534 mm/hugetlb.c   	struct page *page;
page             1539 mm/hugetlb.c   		page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed,
page             1541 mm/hugetlb.c   		if (page)
page             1545 mm/hugetlb.c   	if (!page)
page             1548 mm/hugetlb.c   	put_page(page); /* free it into the hugepage allocator */
page             1572 mm/hugetlb.c   			struct page *page =
page             1574 mm/hugetlb.c   					  struct page, lru);
page             1575 mm/hugetlb.c   			list_del(&page->lru);
page             1582 mm/hugetlb.c   			update_and_free_page(h, page);
page             1601 mm/hugetlb.c   int dissolve_free_huge_page(struct page *page)
page             1606 mm/hugetlb.c   	if (!PageHuge(page))
page             1610 mm/hugetlb.c   	if (!PageHuge(page)) {
page             1615 mm/hugetlb.c   	if (!page_count(page)) {
page             1616 mm/hugetlb.c   		struct page *head = compound_head(page);
page             1625 mm/hugetlb.c   		if (PageHWPoison(head) && page != head) {
page             1626 mm/hugetlb.c   			SetPageHWPoison(page);
page             1652 mm/hugetlb.c   	struct page *page;
page             1659 mm/hugetlb.c   		page = pfn_to_page(pfn);
page             1660 mm/hugetlb.c   		rc = dissolve_free_huge_page(page);
page             1671 mm/hugetlb.c   static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
page             1674 mm/hugetlb.c   	struct page *page = NULL;
page             1684 mm/hugetlb.c   	page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
page             1685 mm/hugetlb.c   	if (!page)
page             1697 mm/hugetlb.c   		SetPageHugeTemporary(page);
page             1699 mm/hugetlb.c   		put_page(page);
page             1703 mm/hugetlb.c   		h->surplus_huge_pages_node[page_to_nid(page)]++;
page             1709 mm/hugetlb.c   	return page;
page             1712 mm/hugetlb.c   struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
page             1715 mm/hugetlb.c   	struct page *page;
page             1720 mm/hugetlb.c   	page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
page             1721 mm/hugetlb.c   	if (!page)
page             1728 mm/hugetlb.c   	SetPageHugeTemporary(page);
page             1730 mm/hugetlb.c   	return page;
page             1737 mm/hugetlb.c   struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
page             1740 mm/hugetlb.c   	struct page *page;
page             1747 mm/hugetlb.c   	page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask);
page             1750 mm/hugetlb.c   	return page;
page             1754 mm/hugetlb.c   struct page *alloc_huge_page_node(struct hstate *h, int nid)
page             1757 mm/hugetlb.c   	struct page *page = NULL;
page             1764 mm/hugetlb.c   		page = dequeue_huge_page_nodemask(h, gfp_mask, nid, NULL);
page             1767 mm/hugetlb.c   	if (!page)
page             1768 mm/hugetlb.c   		page = alloc_migrate_huge_page(h, gfp_mask, nid, NULL);
page             1770 mm/hugetlb.c   	return page;
page             1774 mm/hugetlb.c   struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
page             1781 mm/hugetlb.c   		struct page *page;
page             1783 mm/hugetlb.c   		page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
page             1784 mm/hugetlb.c   		if (page) {
page             1786 mm/hugetlb.c   			return page;
page             1795 mm/hugetlb.c   struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
page             1800 mm/hugetlb.c   	struct page *page;
page             1806 mm/hugetlb.c   	page = alloc_huge_page_nodemask(h, node, nodemask);
page             1809 mm/hugetlb.c   	return page;
page             1819 mm/hugetlb.c   	struct page *page, *tmp;
page             1837 mm/hugetlb.c   		page = alloc_surplus_huge_page(h, htlb_alloc_mask(h),
page             1839 mm/hugetlb.c   		if (!page) {
page             1843 mm/hugetlb.c   		list_add(&page->lru, &surplus_list);
page             1878 mm/hugetlb.c   	list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
page             1885 mm/hugetlb.c   		put_page_testzero(page);
page             1886 mm/hugetlb.c   		VM_BUG_ON_PAGE(page_count(page), page);
page             1887 mm/hugetlb.c   		enqueue_huge_page(h, page);
page             1893 mm/hugetlb.c   	list_for_each_entry_safe(page, tmp, &surplus_list, lru)
page             1894 mm/hugetlb.c   		put_page(page);
page             2083 mm/hugetlb.c   			struct page *page)
page             2085 mm/hugetlb.c   	if (unlikely(PagePrivate(page))) {
page             2100 mm/hugetlb.c   			ClearPagePrivate(page);
page             2108 mm/hugetlb.c   				ClearPagePrivate(page);
page             2114 mm/hugetlb.c   struct page *alloc_huge_page(struct vm_area_struct *vma,
page             2119 mm/hugetlb.c   	struct page *page;
page             2171 mm/hugetlb.c   	page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
page             2172 mm/hugetlb.c   	if (!page) {
page             2174 mm/hugetlb.c   		page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
page             2175 mm/hugetlb.c   		if (!page)
page             2178 mm/hugetlb.c   			SetPagePrivate(page);
page             2182 mm/hugetlb.c   		list_move(&page->lru, &h->hugepage_activelist);
page             2185 mm/hugetlb.c   	hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
page             2188 mm/hugetlb.c   	set_page_private(page, (unsigned long)spool);
page             2206 mm/hugetlb.c   	return page;
page             2251 mm/hugetlb.c   static void __init prep_compound_huge_page(struct page *page,
page             2255 mm/hugetlb.c   		prep_compound_gigantic_page(page, order);
page             2257 mm/hugetlb.c   		prep_compound_page(page, order);
page             2266 mm/hugetlb.c   		struct page *page = virt_to_page(m);
page             2269 mm/hugetlb.c   		WARN_ON(page_count(page) != 1);
page             2270 mm/hugetlb.c   		prep_compound_huge_page(page, h->order);
page             2271 mm/hugetlb.c   		WARN_ON(PageReserved(page));
page             2272 mm/hugetlb.c   		prep_new_huge_page(h, page, page_to_nid(page));
page             2273 mm/hugetlb.c   		put_page(page); /* free it into the hugepage allocator */
page             2282 mm/hugetlb.c   			adjust_managed_page_count(page, 1 << h->order);
page             2370 mm/hugetlb.c   		struct page *page, *next;
page             2372 mm/hugetlb.c   		list_for_each_entry_safe(page, next, freel, lru) {
page             2375 mm/hugetlb.c   			if (PageHighMem(page))
page             2377 mm/hugetlb.c   			list_del(&page->lru);
page             2378 mm/hugetlb.c   			update_and_free_page(h, page);
page             2380 mm/hugetlb.c   			h->free_huge_pages_node[page_to_nid(page)]--;
page             3364 mm/hugetlb.c   static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
page             3370 mm/hugetlb.c   		entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
page             3373 mm/hugetlb.c   		entry = huge_pte_wrprotect(mk_huge_pte(page,
page             3378 mm/hugetlb.c   	entry = arch_make_huge_pte(entry, vma, page, writable);
page             3423 mm/hugetlb.c   	struct page *ptepage;
page             3521 mm/hugetlb.c   			    struct page *ref_page)
page             3528 mm/hugetlb.c   	struct page *page;
page             3583 mm/hugetlb.c   		page = pte_page(pte);
page             3590 mm/hugetlb.c   			if (page != ref_page) {
page             3605 mm/hugetlb.c   			set_page_dirty(page);
page             3608 mm/hugetlb.c   		page_remove_rmap(page, true);
page             3611 mm/hugetlb.c   		tlb_remove_page_size(tlb, page, huge_page_size(h));
page             3624 mm/hugetlb.c   			  unsigned long end, struct page *ref_page)
page             3642 mm/hugetlb.c   			  unsigned long end, struct page *ref_page)
page             3672 mm/hugetlb.c   			      struct page *page, unsigned long address)
page             3716 mm/hugetlb.c   					     address + huge_page_size(h), page);
page             3729 mm/hugetlb.c   		       struct page *pagecache_page, spinlock_t *ptl)
page             3733 mm/hugetlb.c   	struct page *old_page, *new_page;
page             3852 mm/hugetlb.c   static struct page *hugetlbfs_pagecache_page(struct hstate *h,
page             3873 mm/hugetlb.c   	struct page *page;
page             3878 mm/hugetlb.c   	page = find_get_page(mapping, idx);
page             3879 mm/hugetlb.c   	if (page)
page             3880 mm/hugetlb.c   		put_page(page);
page             3881 mm/hugetlb.c   	return page != NULL;
page             3884 mm/hugetlb.c   int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
page             3889 mm/hugetlb.c   	int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
page             3893 mm/hugetlb.c   	ClearPagePrivate(page);
page             3899 mm/hugetlb.c   	set_page_dirty(page);
page             3916 mm/hugetlb.c   	struct page *page;
page             3938 mm/hugetlb.c   	page = find_lock_page(mapping, idx);
page             3939 mm/hugetlb.c   	if (!page) {
page             3974 mm/hugetlb.c   		page = alloc_huge_page(vma, haddr, 0);
page             3975 mm/hugetlb.c   		if (IS_ERR(page)) {
page             3995 mm/hugetlb.c   			ret = vmf_error(PTR_ERR(page));
page             3998 mm/hugetlb.c   		clear_huge_page(page, address, pages_per_huge_page(h));
page             3999 mm/hugetlb.c   		__SetPageUptodate(page);
page             4003 mm/hugetlb.c   			int err = huge_add_to_page_cache(page, mapping, idx);
page             4005 mm/hugetlb.c   				put_page(page);
page             4011 mm/hugetlb.c   			lock_page(page);
page             4024 mm/hugetlb.c   		if (unlikely(PageHWPoison(page))) {
page             4056 mm/hugetlb.c   		ClearPagePrivate(page);
page             4057 mm/hugetlb.c   		hugepage_add_new_anon_rmap(page, vma, haddr);
page             4059 mm/hugetlb.c   		page_dup_rmap(page, true);
page             4060 mm/hugetlb.c   	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
page             4067 mm/hugetlb.c   		ret = hugetlb_cow(mm, vma, address, ptep, page, ptl);
page             4078 mm/hugetlb.c   		set_page_huge_active(page);
page             4080 mm/hugetlb.c   	unlock_page(page);
page             4087 mm/hugetlb.c   	unlock_page(page);
page             4088 mm/hugetlb.c   	restore_reserve_on_error(h, vma, haddr, page);
page             4089 mm/hugetlb.c   	put_page(page);
page             4127 mm/hugetlb.c   	struct page *page = NULL;
page             4128 mm/hugetlb.c   	struct page *pagecache_page = NULL;
page             4210 mm/hugetlb.c   	page = pte_page(entry);
page             4211 mm/hugetlb.c   	if (page != pagecache_page)
page             4212 mm/hugetlb.c   		if (!trylock_page(page)) {
page             4217 mm/hugetlb.c   	get_page(page);
page             4232 mm/hugetlb.c   	if (page != pagecache_page)
page             4233 mm/hugetlb.c   		unlock_page(page);
page             4234 mm/hugetlb.c   	put_page(page);
page             4252 mm/hugetlb.c   		wait_on_page_locked(page);
page             4265 mm/hugetlb.c   			    struct page **pagep)
page             4275 mm/hugetlb.c   	struct page *page;
page             4279 mm/hugetlb.c   		page = alloc_huge_page(dst_vma, dst_addr, 0);
page             4280 mm/hugetlb.c   		if (IS_ERR(page))
page             4283 mm/hugetlb.c   		ret = copy_huge_page_from_user(page,
page             4290 mm/hugetlb.c   			*pagep = page;
page             4295 mm/hugetlb.c   		page = *pagep;
page             4304 mm/hugetlb.c   	__SetPageUptodate(page);
page             4324 mm/hugetlb.c   		ret = huge_add_to_page_cache(page, mapping, idx);
page             4351 mm/hugetlb.c   		page_dup_rmap(page, true);
page             4353 mm/hugetlb.c   		ClearPagePrivate(page);
page             4354 mm/hugetlb.c   		hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
page             4357 mm/hugetlb.c   	_dst_pte = make_huge_pte(dst_vma, page, dst_vma->vm_flags & VM_WRITE);
page             4372 mm/hugetlb.c   	set_page_huge_active(page);
page             4374 mm/hugetlb.c   		unlock_page(page);
page             4381 mm/hugetlb.c   		unlock_page(page);
page             4383 mm/hugetlb.c   	put_page(page);
page             4388 mm/hugetlb.c   			 struct page **pages, struct vm_area_struct **vmas,
page             4402 mm/hugetlb.c   		struct page *page;
page             4497 mm/hugetlb.c   		page = pte_page(huge_ptep_get(pte));
page             4503 mm/hugetlb.c   		if (unlikely(page_count(page) <= 0)) {
page             4513 mm/hugetlb.c   			pages[i] = mem_map_offset(page, pfn_offset);
page             5054 mm/hugetlb.c   struct page * __weak
page             5061 mm/hugetlb.c   struct page * __weak
page             5069 mm/hugetlb.c   struct page * __weak
page             5073 mm/hugetlb.c   	struct page *page = NULL;
page             5087 mm/hugetlb.c   		page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
page             5089 mm/hugetlb.c   			get_page(page);
page             5103 mm/hugetlb.c   	return page;
page             5106 mm/hugetlb.c   struct page * __weak
page             5116 mm/hugetlb.c   struct page * __weak
page             5125 mm/hugetlb.c   bool isolate_huge_page(struct page *page, struct list_head *list)
page             5129 mm/hugetlb.c   	VM_BUG_ON_PAGE(!PageHead(page), page);
page             5131 mm/hugetlb.c   	if (!page_huge_active(page) || !get_page_unless_zero(page)) {
page             5135 mm/hugetlb.c   	clear_page_huge_active(page);
page             5136 mm/hugetlb.c   	list_move_tail(&page->lru, list);
page             5142 mm/hugetlb.c   void putback_active_hugepage(struct page *page)
page             5144 mm/hugetlb.c   	VM_BUG_ON_PAGE(!PageHead(page), page);
page             5146 mm/hugetlb.c   	set_page_huge_active(page);
page             5147 mm/hugetlb.c   	list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
page             5149 mm/hugetlb.c   	put_page(page);
page             5152 mm/hugetlb.c   void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
page              126 mm/hugetlb_cgroup.c 				       struct page *page)
page              133 mm/hugetlb_cgroup.c 	page_hcg = hugetlb_cgroup_from_page(page);
page              142 mm/hugetlb_cgroup.c 	nr_pages = compound_nr(page);
page              152 mm/hugetlb_cgroup.c 	set_hugetlb_cgroup(page, parent);
page              165 mm/hugetlb_cgroup.c 	struct page *page;
page              171 mm/hugetlb_cgroup.c 			list_for_each_entry(page, &h->hugepage_activelist, lru)
page              172 mm/hugetlb_cgroup.c 				hugetlb_cgroup_move_parent(idx, h_cg, page);
page              216 mm/hugetlb_cgroup.c 				  struct page *page)
page              221 mm/hugetlb_cgroup.c 	set_hugetlb_cgroup(page, h_cg);
page              229 mm/hugetlb_cgroup.c 				  struct page *page)
page              236 mm/hugetlb_cgroup.c 	h_cg = hugetlb_cgroup_from_page(page);
page              239 mm/hugetlb_cgroup.c 	set_hugetlb_cgroup(page, NULL);
page              416 mm/hugetlb_cgroup.c void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
page               17 mm/hwpoison-inject.c 	struct page *p;
page               18 mm/hwpoison-inject.c 	struct page *hpage;
page               70 mm/internal.h  static inline void set_page_refcounted(struct page *page)
page               72 mm/internal.h  	VM_BUG_ON_PAGE(PageTail(page), page);
page               73 mm/internal.h  	VM_BUG_ON_PAGE(page_ref_count(page), page);
page               74 mm/internal.h  	set_page_count(page, 1);
page               88 mm/internal.h  extern int isolate_lru_page(struct page *page);
page               89 mm/internal.h  extern void putback_lru_page(struct page *page);
page              147 mm/internal.h  extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
page              150 mm/internal.h  static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
page              159 mm/internal.h  extern int __isolate_free_page(struct page *page, unsigned int order);
page              160 mm/internal.h  extern void memblock_free_pages(struct page *page, unsigned long pfn,
page              162 mm/internal.h  extern void __free_pages_core(struct page *page, unsigned int order);
page              163 mm/internal.h  extern void prep_compound_page(struct page *page, unsigned int order);
page              164 mm/internal.h  extern void post_alloc_hook(struct page *page, unsigned int order,
page              214 mm/internal.h  	struct page *page;
page              236 mm/internal.h  static inline unsigned int page_order(struct page *page)
page              239 mm/internal.h  	return page_private(page);
page              253 mm/internal.h  #define page_order_unsafe(page)		READ_ONCE(page_private(page))
page              308 mm/internal.h  extern void mlock_vma_page(struct page *page);
page              309 mm/internal.h  extern unsigned int munlock_vma_page(struct page *page);
page              320 mm/internal.h  extern void clear_page_mlock(struct page *page);
page              327 mm/internal.h  static inline void mlock_migrate_page(struct page *newpage, struct page *page)
page              329 mm/internal.h  	if (TestClearPageMlocked(page)) {
page              330 mm/internal.h  		int nr_pages = hpage_nr_pages(page);
page              333 mm/internal.h  		__mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
page              345 mm/internal.h  __vma_address(struct page *page, struct vm_area_struct *vma)
page              347 mm/internal.h  	pgoff_t pgoff = page_to_pgoff(page);
page              352 mm/internal.h  vma_address(struct page *page, struct vm_area_struct *vma)
page              356 mm/internal.h  	start = __vma_address(page, vma);
page              357 mm/internal.h  	end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
page              387 mm/internal.h  static inline void clear_page_mlock(struct page *page) { }
page              388 mm/internal.h  static inline void mlock_vma_page(struct page *page) { }
page              389 mm/internal.h  static inline void mlock_migrate_page(struct page *new, struct page *old) { }
page              398 mm/internal.h  static inline struct page *mem_map_offset(struct page *base, int offset)
page              409 mm/internal.h  static inline struct page *mem_map_next(struct page *iter,
page              410 mm/internal.h  						struct page *base, int offset)
page              486 mm/internal.h  extern int hwpoison_filter(struct page *p);
page              568 mm/internal.h  static inline bool is_migrate_highatomic_page(struct page *page)
page              570 mm/internal.h  	return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC;
page              574 mm/internal.h  extern struct page *alloc_new_node_page(struct page *page, unsigned long node);
page              214 mm/kasan/common.c void kasan_alloc_pages(struct page *page, unsigned int order)
page              219 mm/kasan/common.c 	if (unlikely(PageHighMem(page)))
page              224 mm/kasan/common.c 		page_kasan_tag_set(page + i, tag);
page              225 mm/kasan/common.c 	kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
page              228 mm/kasan/common.c void kasan_free_pages(struct page *page, unsigned int order)
page              230 mm/kasan/common.c 	if (likely(!PageHighMem(page)))
page              231 mm/kasan/common.c 		kasan_poison_shadow(page_address(page),
page              335 mm/kasan/common.c void kasan_poison_slab(struct page *page)
page              339 mm/kasan/common.c 	for (i = 0; i < compound_nr(page); i++)
page              340 mm/kasan/common.c 		page_kasan_tag_reset(page + i);
page              341 mm/kasan/common.c 	kasan_poison_shadow(page_address(page), page_size(page),
page              531 mm/kasan/common.c 	struct page *page;
page              541 mm/kasan/common.c 	page = virt_to_page(ptr);
page              544 mm/kasan/common.c 	redzone_end = (unsigned long)ptr + page_size(page);
page              555 mm/kasan/common.c 	struct page *page;
page              560 mm/kasan/common.c 	page = virt_to_head_page(object);
page              562 mm/kasan/common.c 	if (unlikely(!PageSlab(page)))
page              565 mm/kasan/common.c 		return __kasan_kmalloc(page->slab_cache, object, size,
page              571 mm/kasan/common.c 	struct page *page;
page              573 mm/kasan/common.c 	page = virt_to_head_page(ptr);
page              575 mm/kasan/common.c 	if (unlikely(!PageSlab(page))) {
page              576 mm/kasan/common.c 		if (ptr != page_address(page)) {
page              580 mm/kasan/common.c 		kasan_poison_shadow(ptr, page_size(page), KASAN_FREE_PAGE);
page              582 mm/kasan/common.c 		__kasan_slab_free(page->slab_cache, ptr, ip, false);
page              159 mm/kasan/kasan.h struct page *kasan_addr_to_page(const void *addr);
page              114 mm/kasan/report.c struct page *kasan_addr_to_page(const void *addr)
page              372 mm/kasan/report.c 	struct page *page = kasan_addr_to_page(addr);
page              377 mm/kasan/report.c 	if (page && PageSlab(page)) {
page              378 mm/kasan/report.c 		struct kmem_cache *cache = page->slab_cache;
page              379 mm/kasan/report.c 		void *object = nearest_obj(cache, page,	addr);
page              389 mm/kasan/report.c 	if (page) {
page              391 mm/kasan/report.c 		dump_page(page, "kasan: bad access detected");
page               42 mm/kasan/tags_report.c 	struct page *page;
page               50 mm/kasan/tags_report.c 	page = kasan_addr_to_page(addr);
page               51 mm/kasan/tags_report.c 	if (page && PageSlab(page)) {
page               52 mm/kasan/tags_report.c 		cache = page->slab_cache;
page               53 mm/kasan/tags_report.c 		object = nearest_obj(cache, page, (void *)addr);
page              516 mm/khugepaged.c static void release_pte_page(struct page *page)
page              518 mm/khugepaged.c 	dec_node_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page));
page              519 mm/khugepaged.c 	unlock_page(page);
page              520 mm/khugepaged.c 	putback_lru_page(page);
page              536 mm/khugepaged.c 	struct page *page = NULL;
page              558 mm/khugepaged.c 		page = vm_normal_page(vma, address, pteval);
page              559 mm/khugepaged.c 		if (unlikely(!page)) {
page              565 mm/khugepaged.c 		if (PageCompound(page)) {
page              570 mm/khugepaged.c 		VM_BUG_ON_PAGE(!PageAnon(page), page);
page              578 mm/khugepaged.c 		if (!trylock_page(page)) {
page              588 mm/khugepaged.c 		if (page_count(page) != 1 + PageSwapCache(page)) {
page              589 mm/khugepaged.c 			unlock_page(page);
page              596 mm/khugepaged.c 			if (PageSwapCache(page) &&
page              597 mm/khugepaged.c 			    !reuse_swap_page(page, NULL)) {
page              598 mm/khugepaged.c 				unlock_page(page);
page              612 mm/khugepaged.c 		if (isolate_lru_page(page)) {
page              613 mm/khugepaged.c 			unlock_page(page);
page              617 mm/khugepaged.c 		inc_node_page_state(page,
page              618 mm/khugepaged.c 				NR_ISOLATED_ANON + page_is_file_cache(page));
page              619 mm/khugepaged.c 		VM_BUG_ON_PAGE(!PageLocked(page), page);
page              620 mm/khugepaged.c 		VM_BUG_ON_PAGE(PageLRU(page), page);
page              624 mm/khugepaged.c 		    page_is_young(page) || PageReferenced(page) ||
page              631 mm/khugepaged.c 			trace_mm_collapse_huge_page_isolate(page, none_or_zero,
page              641 mm/khugepaged.c 	trace_mm_collapse_huge_page_isolate(page, none_or_zero,
page              646 mm/khugepaged.c static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
page              653 mm/khugepaged.c 				_pte++, page++, address += PAGE_SIZE) {
page              655 mm/khugepaged.c 		struct page *src_page;
page              658 mm/khugepaged.c 			clear_user_highpage(page, address);
page              674 mm/khugepaged.c 			copy_user_highpage(page, src_page, address, vma);
page              763 mm/khugepaged.c static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
page              780 mm/khugepaged.c static struct page *
page              781 mm/khugepaged.c khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
page              802 mm/khugepaged.c static inline struct page *alloc_khugepaged_hugepage(void)
page              804 mm/khugepaged.c 	struct page *page;
page              806 mm/khugepaged.c 	page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
page              808 mm/khugepaged.c 	if (page)
page              809 mm/khugepaged.c 		prep_transhuge_page(page);
page              810 mm/khugepaged.c 	return page;
page              813 mm/khugepaged.c static struct page *khugepaged_alloc_hugepage(bool *wait)
page              815 mm/khugepaged.c 	struct page *hpage;
page              833 mm/khugepaged.c static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
page              844 mm/khugepaged.c static struct page *
page              845 mm/khugepaged.c khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
page              948 mm/khugepaged.c 				   struct page **hpage,
page              954 mm/khugepaged.c 	struct page *new_page;
page             1118 mm/khugepaged.c 			       struct page **hpage)
page             1123 mm/khugepaged.c 	struct page *page = NULL;
page             1166 mm/khugepaged.c 		page = vm_normal_page(vma, _address, pteval);
page             1167 mm/khugepaged.c 		if (unlikely(!page)) {
page             1173 mm/khugepaged.c 		if (PageCompound(page)) {
page             1184 mm/khugepaged.c 		node = page_to_nid(page);
page             1190 mm/khugepaged.c 		if (!PageLRU(page)) {
page             1194 mm/khugepaged.c 		if (PageLocked(page)) {
page             1198 mm/khugepaged.c 		if (!PageAnon(page)) {
page             1208 mm/khugepaged.c 		if (page_count(page) != 1 + PageSwapCache(page)) {
page             1213 mm/khugepaged.c 		    page_is_young(page) || PageReferenced(page) ||
page             1235 mm/khugepaged.c 	trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
page             1294 mm/khugepaged.c 	struct page *hpage = NULL;
page             1323 mm/khugepaged.c 		struct page *page;
page             1333 mm/khugepaged.c 		page = vm_normal_page(vma, addr, *pte);
page             1335 mm/khugepaged.c 		if (!page || !PageCompound(page))
page             1339 mm/khugepaged.c 			hpage = compound_head(page);
page             1358 mm/khugepaged.c 		if (WARN_ON(hpage + i != page))
page             1366 mm/khugepaged.c 		struct page *page;
page             1370 mm/khugepaged.c 		page = vm_normal_page(vma, addr, *pte);
page             1371 mm/khugepaged.c 		page_remove_rmap(page, false);
page             1494 mm/khugepaged.c 		struct page **hpage, int node)
page             1498 mm/khugepaged.c 	struct page *new_page;
page             1551 mm/khugepaged.c 		struct page *page = xas_next(&xas);
page             1555 mm/khugepaged.c 			if (!page) {
page             1577 mm/khugepaged.c 			if (xa_is_value(page) || !PageUptodate(page)) {
page             1580 mm/khugepaged.c 				if (shmem_getpage(mapping->host, index, &page,
page             1585 mm/khugepaged.c 			} else if (trylock_page(page)) {
page             1586 mm/khugepaged.c 				get_page(page);
page             1593 mm/khugepaged.c 			if (!page || xa_is_value(page)) {
page             1600 mm/khugepaged.c 				page = find_lock_page(mapping, index);
page             1601 mm/khugepaged.c 				if (unlikely(page == NULL)) {
page             1605 mm/khugepaged.c 			} else if (trylock_page(page)) {
page             1606 mm/khugepaged.c 				get_page(page);
page             1618 mm/khugepaged.c 		VM_BUG_ON_PAGE(!PageLocked(page), page);
page             1621 mm/khugepaged.c 		if (unlikely(!PageUptodate(page))) {
page             1630 mm/khugepaged.c 		if (PageTransCompound(page)) {
page             1635 mm/khugepaged.c 		if (page_mapping(page) != mapping) {
page             1640 mm/khugepaged.c 		if (!is_shmem && PageDirty(page)) {
page             1650 mm/khugepaged.c 		if (isolate_lru_page(page)) {
page             1655 mm/khugepaged.c 		if (page_has_private(page) &&
page             1656 mm/khugepaged.c 		    !try_to_release_page(page, GFP_KERNEL)) {
page             1658 mm/khugepaged.c 			putback_lru_page(page);
page             1662 mm/khugepaged.c 		if (page_mapped(page))
page             1668 mm/khugepaged.c 		VM_BUG_ON_PAGE(page != xas_load(&xas), page);
page             1669 mm/khugepaged.c 		VM_BUG_ON_PAGE(page_mapped(page), page);
page             1677 mm/khugepaged.c 		if (!page_ref_freeze(page, 3)) {
page             1680 mm/khugepaged.c 			putback_lru_page(page);
page             1688 mm/khugepaged.c 		list_add_tail(&page->lru, &pagelist);
page             1694 mm/khugepaged.c 		unlock_page(page);
page             1695 mm/khugepaged.c 		put_page(page);
page             1720 mm/khugepaged.c 		struct page *page, *tmp;
page             1727 mm/khugepaged.c 		list_for_each_entry_safe(page, tmp, &pagelist, lru) {
page             1728 mm/khugepaged.c 			while (index < page->index) {
page             1732 mm/khugepaged.c 			copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
page             1733 mm/khugepaged.c 					page);
page             1734 mm/khugepaged.c 			list_del(&page->lru);
page             1735 mm/khugepaged.c 			page->mapping = NULL;
page             1736 mm/khugepaged.c 			page_ref_unfreeze(page, 1);
page             1737 mm/khugepaged.c 			ClearPageActive(page);
page             1738 mm/khugepaged.c 			ClearPageUnevictable(page);
page             1739 mm/khugepaged.c 			unlock_page(page);
page             1740 mm/khugepaged.c 			put_page(page);
page             1768 mm/khugepaged.c 		struct page *page;
page             1778 mm/khugepaged.c 		xas_for_each(&xas, page, end - 1) {
page             1779 mm/khugepaged.c 			page = list_first_entry_or_null(&pagelist,
page             1780 mm/khugepaged.c 					struct page, lru);
page             1781 mm/khugepaged.c 			if (!page || xas.xa_index < page->index) {
page             1790 mm/khugepaged.c 			VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
page             1793 mm/khugepaged.c 			list_del(&page->lru);
page             1794 mm/khugepaged.c 			page_ref_unfreeze(page, 2);
page             1795 mm/khugepaged.c 			xas_store(&xas, page);
page             1798 mm/khugepaged.c 			unlock_page(page);
page             1799 mm/khugepaged.c 			putback_lru_page(page);
page             1816 mm/khugepaged.c 		struct file *file, pgoff_t start, struct page **hpage)
page             1818 mm/khugepaged.c 	struct page *page = NULL;
page             1829 mm/khugepaged.c 	xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
page             1830 mm/khugepaged.c 		if (xas_retry(&xas, page))
page             1833 mm/khugepaged.c 		if (xa_is_value(page)) {
page             1841 mm/khugepaged.c 		if (PageTransCompound(page)) {
page             1846 mm/khugepaged.c 		node = page_to_nid(page);
page             1853 mm/khugepaged.c 		if (!PageLRU(page)) {
page             1858 mm/khugepaged.c 		if (page_count(page) !=
page             1859 mm/khugepaged.c 		    1 + page_mapcount(page) + page_has_private(page)) {
page             1892 mm/khugepaged.c 		struct file *file, pgoff_t start, struct page **hpage)
page             1904 mm/khugepaged.c 					    struct page **hpage)
page             2045 mm/khugepaged.c 	struct page *hpage = NULL;
page             1448 mm/kmemleak.c  			struct page *page = pfn_to_online_page(pfn);
page             1450 mm/kmemleak.c  			if (!page)
page             1454 mm/kmemleak.c  			if (page_to_nid(page) != i)
page             1457 mm/kmemleak.c  			if (page_count(page) == 0)
page             1459 mm/kmemleak.c  			scan_block(page, page + 1, NULL);
page              472 mm/ksm.c       	struct page *page;
page              477 mm/ksm.c       		page = follow_page(vma, addr,
page              479 mm/ksm.c       		if (IS_ERR_OR_NULL(page))
page              481 mm/ksm.c       		if (PageKsm(page))
page              486 mm/ksm.c       		put_page(page);
page              552 mm/ksm.c       static struct page *get_mergeable_page(struct rmap_item *rmap_item)
page              557 mm/ksm.c       	struct page *page;
page              564 mm/ksm.c       	page = follow_page(vma, addr, FOLL_GET);
page              565 mm/ksm.c       	if (IS_ERR_OR_NULL(page))
page              567 mm/ksm.c       	if (PageAnon(page)) {
page              568 mm/ksm.c       		flush_anon_page(vma, page, addr);
page              569 mm/ksm.c       		flush_dcache_page(page);
page              571 mm/ksm.c       		put_page(page);
page              573 mm/ksm.c       		page = NULL;
page              576 mm/ksm.c       	return page;
page              694 mm/ksm.c       static struct page *get_ksm_page(struct stable_node *stable_node,
page              697 mm/ksm.c       	struct page *page;
page              705 mm/ksm.c       	page = pfn_to_page(kpfn);
page              706 mm/ksm.c       	if (READ_ONCE(page->mapping) != expected_mapping)
page              719 mm/ksm.c       	while (!get_page_unless_zero(page)) {
page              728 mm/ksm.c       		if (!PageSwapCache(page))
page              733 mm/ksm.c       	if (READ_ONCE(page->mapping) != expected_mapping) {
page              734 mm/ksm.c       		put_page(page);
page              739 mm/ksm.c       		if (!trylock_page(page)) {
page              740 mm/ksm.c       			put_page(page);
page              744 mm/ksm.c       		lock_page(page);
page              747 mm/ksm.c       		if (READ_ONCE(page->mapping) != expected_mapping) {
page              748 mm/ksm.c       			unlock_page(page);
page              749 mm/ksm.c       			put_page(page);
page              753 mm/ksm.c       	return page;
page              777 mm/ksm.c       		struct page *page;
page              780 mm/ksm.c       		page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK);
page              781 mm/ksm.c       		if (!page)
page              785 mm/ksm.c       		unlock_page(page);
page              786 mm/ksm.c       		put_page(page);
page              860 mm/ksm.c       static inline struct stable_node *page_stable_node(struct page *page)
page              862 mm/ksm.c       	return PageKsm(page) ? page_rmapping(page) : NULL;
page              865 mm/ksm.c       static inline void set_page_stable_node(struct page *page,
page              868 mm/ksm.c       	page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
page              877 mm/ksm.c       	struct page *page;
page              880 mm/ksm.c       	page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK);
page              881 mm/ksm.c       	if (!page) {
page              894 mm/ksm.c       	if (!page_mapped(page)) {
page              903 mm/ksm.c       		set_page_stable_node(page, NULL);
page              908 mm/ksm.c       	unlock_page(page);
page              909 mm/ksm.c       	put_page(page);
page             1023 mm/ksm.c       static u32 calc_checksum(struct page *page)
page             1026 mm/ksm.c       	void *addr = kmap_atomic(page);
page             1032 mm/ksm.c       static int write_protect_page(struct vm_area_struct *vma, struct page *page,
page             1037 mm/ksm.c       		.page = page,
page             1044 mm/ksm.c       	pvmw.address = page_address_in_vma(page, vma);
page             1048 mm/ksm.c       	BUG_ON(PageTransCompound(page));
page             1065 mm/ksm.c       		swapped = PageSwapCache(page);
page             1066 mm/ksm.c       		flush_cache_page(vma, pvmw.address, page_to_pfn(page));
page             1086 mm/ksm.c       		if (page_mapcount(page) + 1 + swapped != page_count(page)) {
page             1091 mm/ksm.c       			set_page_dirty(page);
page             1119 mm/ksm.c       static int replace_page(struct vm_area_struct *vma, struct page *page,
page             1120 mm/ksm.c       			struct page *kpage, pte_t orig_pte)
page             1131 mm/ksm.c       	addr = page_address_in_vma(page, vma);
page             1179 mm/ksm.c       	page_remove_rmap(page, false);
page             1180 mm/ksm.c       	if (!page_mapped(page))
page             1181 mm/ksm.c       		try_to_free_swap(page);
page             1182 mm/ksm.c       	put_page(page);
page             1202 mm/ksm.c       				 struct page *page, struct page *kpage)
page             1207 mm/ksm.c       	if (page == kpage)			/* ksm page forked */
page             1210 mm/ksm.c       	if (!PageAnon(page))
page             1220 mm/ksm.c       	if (!trylock_page(page))
page             1223 mm/ksm.c       	if (PageTransCompound(page)) {
page             1224 mm/ksm.c       		if (split_huge_page(page))
page             1234 mm/ksm.c       	if (write_protect_page(vma, page, &orig_pte) == 0) {
page             1241 mm/ksm.c       			set_page_stable_node(page, NULL);
page             1242 mm/ksm.c       			mark_page_accessed(page);
page             1247 mm/ksm.c       			if (!PageDirty(page))
page             1248 mm/ksm.c       				SetPageDirty(page);
page             1250 mm/ksm.c       		} else if (pages_identical(page, kpage))
page             1251 mm/ksm.c       			err = replace_page(vma, page, kpage, orig_pte);
page             1255 mm/ksm.c       		munlock_vma_page(page);
page             1257 mm/ksm.c       			unlock_page(page);
page             1260 mm/ksm.c       			page = kpage;		/* for final unlock */
page             1265 mm/ksm.c       	unlock_page(page);
page             1277 mm/ksm.c       				      struct page *page, struct page *kpage)
page             1288 mm/ksm.c       	err = try_to_merge_one_page(vma, page, kpage);
page             1313 mm/ksm.c       static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item,
page             1314 mm/ksm.c       					   struct page *page,
page             1316 mm/ksm.c       					   struct page *tree_page)
page             1320 mm/ksm.c       	err = try_to_merge_with_ksm_page(rmap_item, page, NULL);
page             1323 mm/ksm.c       							tree_page, page);
page             1331 mm/ksm.c       	return err ? NULL : page;
page             1354 mm/ksm.c       static struct page *stable_node_dup(struct stable_node **_stable_node_dup,
page             1361 mm/ksm.c       	struct page *_tree_page, *tree_page = NULL;
page             1500 mm/ksm.c       static struct page *__stable_node_chain(struct stable_node **_stable_node_dup,
page             1522 mm/ksm.c       static __always_inline struct page *chain_prune(struct stable_node **s_n_d,
page             1529 mm/ksm.c       static __always_inline struct page *chain(struct stable_node **s_n_d,
page             1534 mm/ksm.c       	struct page *tree_page;
page             1551 mm/ksm.c       static struct page *stable_tree_search(struct page *page)
page             1560 mm/ksm.c       	page_node = page_stable_node(page);
page             1563 mm/ksm.c       		get_page(page);
page             1564 mm/ksm.c       		return page;
page             1567 mm/ksm.c       	nid = get_kpfn_nid(page_to_pfn(page));
page             1574 mm/ksm.c       		struct page *tree_page;
page             1631 mm/ksm.c       		ret = memcmp_pages(page, tree_page);
page             1648 mm/ksm.c       				if (page_mapcount(page) > 1)
page             1707 mm/ksm.c       		get_page(page);
page             1708 mm/ksm.c       		return page;
page             1733 mm/ksm.c       				get_page(page);
page             1735 mm/ksm.c       				page = NULL;
page             1738 mm/ksm.c       			page = NULL;
page             1749 mm/ksm.c       				get_page(page);
page             1751 mm/ksm.c       				page = NULL;
page             1753 mm/ksm.c       			page = NULL;
page             1758 mm/ksm.c       	return page;
page             1803 mm/ksm.c       static struct stable_node *stable_tree_insert(struct page *kpage)
page             1821 mm/ksm.c       		struct page *tree_page;
page             1924 mm/ksm.c       					      struct page *page,
page             1925 mm/ksm.c       					      struct page **tree_pagep)
page             1932 mm/ksm.c       	nid = get_kpfn_nid(page_to_pfn(page));
page             1938 mm/ksm.c       		struct page *tree_page;
page             1950 mm/ksm.c       		if (page == tree_page) {
page             1955 mm/ksm.c       		ret = memcmp_pages(page, tree_page);
page             2035 mm/ksm.c       static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
page             2039 mm/ksm.c       	struct page *tree_page = NULL;
page             2041 mm/ksm.c       	struct page *kpage;
page             2046 mm/ksm.c       	stable_node = page_stable_node(page);
page             2067 mm/ksm.c       	kpage = stable_tree_search(page);
page             2068 mm/ksm.c       	if (kpage == page && rmap_item->head == stable_node) {
page             2079 mm/ksm.c       		err = try_to_merge_with_ksm_page(rmap_item, page, kpage);
page             2100 mm/ksm.c       	checksum = calc_checksum(page);
page             2116 mm/ksm.c       			err = try_to_merge_one_page(vma, page,
page             2134 mm/ksm.c       		unstable_tree_search_insert(rmap_item, page, &tree_page);
page             2138 mm/ksm.c       		kpage = try_to_merge_two_pages(rmap_item, page,
page             2150 mm/ksm.c       		split = PageTransCompound(page)
page             2151 mm/ksm.c       			&& compound_head(page) == compound_head(tree_page);
page             2188 mm/ksm.c       			if (!trylock_page(page))
page             2190 mm/ksm.c       			split_huge_page(page);
page             2191 mm/ksm.c       			unlock_page(page);
page             2224 mm/ksm.c       static struct rmap_item *scan_get_next_rmap_item(struct page **page)
page             2257 mm/ksm.c       			struct page *page;
page             2261 mm/ksm.c       				page = get_ksm_page(stable_node,
page             2263 mm/ksm.c       				if (page)
page             2264 mm/ksm.c       					put_page(page);
page             2305 mm/ksm.c       			*page = follow_page(vma, ksm_scan.address, FOLL_GET);
page             2306 mm/ksm.c       			if (IS_ERR_OR_NULL(*page)) {
page             2311 mm/ksm.c       			if (PageAnon(*page)) {
page             2312 mm/ksm.c       				flush_anon_page(vma, *page, ksm_scan.address);
page             2313 mm/ksm.c       				flush_dcache_page(*page);
page             2321 mm/ksm.c       					put_page(*page);
page             2325 mm/ksm.c       			put_page(*page);
page             2390 mm/ksm.c       	struct page *uninitialized_var(page);
page             2394 mm/ksm.c       		rmap_item = scan_get_next_rmap_item(&page);
page             2397 mm/ksm.c       		cmp_and_merge_page(page, rmap_item);
page             2398 mm/ksm.c       		put_page(page);
page             2567 mm/ksm.c       struct page *ksm_might_need_to_copy(struct page *page,
page             2570 mm/ksm.c       	struct anon_vma *anon_vma = page_anon_vma(page);
page             2571 mm/ksm.c       	struct page *new_page;
page             2573 mm/ksm.c       	if (PageKsm(page)) {
page             2574 mm/ksm.c       		if (page_stable_node(page) &&
page             2576 mm/ksm.c       			return page;	/* no need to copy it */
page             2578 mm/ksm.c       		return page;		/* no need to copy it */
page             2580 mm/ksm.c       		 page->index == linear_page_index(vma, address)) {
page             2581 mm/ksm.c       		return page;		/* still no need to copy it */
page             2583 mm/ksm.c       	if (!PageUptodate(page))
page             2584 mm/ksm.c       		return page;		/* let do_swap_page report the error */
page             2588 mm/ksm.c       		copy_user_highpage(new_page, page, address, vma);
page             2598 mm/ksm.c       void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
page             2604 mm/ksm.c       	VM_BUG_ON_PAGE(!PageKsm(page), page);
page             2610 mm/ksm.c       	VM_BUG_ON_PAGE(!PageLocked(page), page);
page             2612 mm/ksm.c       	stable_node = page_stable_node(page);
page             2647 mm/ksm.c       			if (!rwc->rmap_one(page, vma, addr, rwc->arg)) {
page             2651 mm/ksm.c       			if (rwc->done && rwc->done(page)) {
page             2662 mm/ksm.c       bool reuse_ksm_page(struct page *page,
page             2667 mm/ksm.c       	if (WARN_ON(is_zero_pfn(page_to_pfn(page))) ||
page             2668 mm/ksm.c       			WARN_ON(!page_mapped(page)) ||
page             2669 mm/ksm.c       			WARN_ON(!PageLocked(page))) {
page             2670 mm/ksm.c       		dump_page(page, "reuse_ksm_page");
page             2675 mm/ksm.c       	if (PageSwapCache(page) || !page_stable_node(page))
page             2678 mm/ksm.c       	if (!page_ref_freeze(page, 1))
page             2681 mm/ksm.c       	page_move_anon_rmap(page, vma);
page             2682 mm/ksm.c       	page->index = linear_page_index(vma, address);
page             2683 mm/ksm.c       	page_ref_unfreeze(page, 1);
page             2688 mm/ksm.c       void ksm_migrate_page(struct page *newpage, struct page *oldpage)
page               62 mm/list_lru.c  	struct page *page;
page               66 mm/list_lru.c  	page = virt_to_head_page(ptr);
page               67 mm/list_lru.c  	return memcg_from_slab_page(page);
page              196 mm/madvise.c   		struct page *page;
page              209 mm/madvise.c   		page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
page              211 mm/madvise.c   		if (page)
page              212 mm/madvise.c   			put_page(page);
page              227 mm/madvise.c   	struct page *page;
page              233 mm/madvise.c   		page = find_get_entry(mapping, index);
page              234 mm/madvise.c   		if (!xa_is_value(page)) {
page              235 mm/madvise.c   			if (page)
page              236 mm/madvise.c   				put_page(page);
page              239 mm/madvise.c   		swap = radix_to_swp_entry(page);
page              240 mm/madvise.c   		page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
page              242 mm/madvise.c   		if (page)
page              243 mm/madvise.c   			put_page(page);
page              311 mm/madvise.c   	struct page *page = NULL;
page              337 mm/madvise.c   		page = pmd_page(orig_pmd);
page              340 mm/madvise.c   		if (page_mapcount(page) != 1)
page              346 mm/madvise.c   			get_page(page);
page              348 mm/madvise.c   			lock_page(page);
page              349 mm/madvise.c   			err = split_huge_page(page);
page              350 mm/madvise.c   			unlock_page(page);
page              351 mm/madvise.c   			put_page(page);
page              365 mm/madvise.c   		ClearPageReferenced(page);
page              366 mm/madvise.c   		test_and_clear_page_young(page);
page              368 mm/madvise.c   			if (!isolate_lru_page(page)) {
page              369 mm/madvise.c   				if (PageUnevictable(page))
page              370 mm/madvise.c   					putback_lru_page(page);
page              372 mm/madvise.c   					list_add(&page->lru, &page_list);
page              375 mm/madvise.c   			deactivate_page(page);
page              400 mm/madvise.c   		page = vm_normal_page(vma, addr, ptent);
page              401 mm/madvise.c   		if (!page)
page              408 mm/madvise.c   		if (PageTransCompound(page)) {
page              409 mm/madvise.c   			if (page_mapcount(page) != 1)
page              411 mm/madvise.c   			get_page(page);
page              412 mm/madvise.c   			if (!trylock_page(page)) {
page              413 mm/madvise.c   				put_page(page);
page              417 mm/madvise.c   			if (split_huge_page(page)) {
page              418 mm/madvise.c   				unlock_page(page);
page              419 mm/madvise.c   				put_page(page);
page              423 mm/madvise.c   			unlock_page(page);
page              424 mm/madvise.c   			put_page(page);
page              432 mm/madvise.c   		if (page_mapcount(page) != 1)
page              435 mm/madvise.c   		VM_BUG_ON_PAGE(PageTransCompound(page), page);
page              451 mm/madvise.c   		ClearPageReferenced(page);
page              452 mm/madvise.c   		test_and_clear_page_young(page);
page              454 mm/madvise.c   			if (!isolate_lru_page(page)) {
page              455 mm/madvise.c   				if (PageUnevictable(page))
page              456 mm/madvise.c   					putback_lru_page(page);
page              458 mm/madvise.c   					list_add(&page->lru, &page_list);
page              461 mm/madvise.c   			deactivate_page(page);
page              571 mm/madvise.c   	struct page *page;
page              609 mm/madvise.c   		page = vm_normal_page(vma, addr, ptent);
page              610 mm/madvise.c   		if (!page)
page              618 mm/madvise.c   		if (PageTransCompound(page)) {
page              619 mm/madvise.c   			if (page_mapcount(page) != 1)
page              621 mm/madvise.c   			get_page(page);
page              622 mm/madvise.c   			if (!trylock_page(page)) {
page              623 mm/madvise.c   				put_page(page);
page              627 mm/madvise.c   			if (split_huge_page(page)) {
page              628 mm/madvise.c   				unlock_page(page);
page              629 mm/madvise.c   				put_page(page);
page              633 mm/madvise.c   			unlock_page(page);
page              634 mm/madvise.c   			put_page(page);
page              641 mm/madvise.c   		VM_BUG_ON_PAGE(PageTransCompound(page), page);
page              643 mm/madvise.c   		if (PageSwapCache(page) || PageDirty(page)) {
page              644 mm/madvise.c   			if (!trylock_page(page))
page              650 mm/madvise.c   			if (page_mapcount(page) != 1) {
page              651 mm/madvise.c   				unlock_page(page);
page              655 mm/madvise.c   			if (PageSwapCache(page) && !try_to_free_swap(page)) {
page              656 mm/madvise.c   				unlock_page(page);
page              660 mm/madvise.c   			ClearPageDirty(page);
page              661 mm/madvise.c   			unlock_page(page);
page              679 mm/madvise.c   		mark_page_lazyfree(page);
page              871 mm/madvise.c   	struct page *page;
page              883 mm/madvise.c   		ret = get_user_pages_fast(start, 1, 0, &page);
page              886 mm/madvise.c   		pfn = page_to_pfn(page);
page              893 mm/madvise.c   		order = compound_order(compound_head(page));
page              895 mm/madvise.c   		if (PageHWPoison(page)) {
page              896 mm/madvise.c   			put_page(page);
page              904 mm/madvise.c   			ret = soft_offline_page(page, MF_COUNT_INCREASED);
page              919 mm/madvise.c   		put_page(page);
page              458 mm/memcontrol.c struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
page              462 mm/memcontrol.c 	memcg = page->mem_cgroup;
page              483 mm/memcontrol.c ino_t page_cgroup_ino(struct page *page)
page              489 mm/memcontrol.c 	if (PageSlab(page) && !PageTail(page))
page              490 mm/memcontrol.c 		memcg = memcg_from_slab_page(page);
page              492 mm/memcontrol.c 		memcg = READ_ONCE(page->mem_cgroup);
page              502 mm/memcontrol.c mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page)
page              504 mm/memcontrol.c 	int nid = page_to_nid(page);
page              516 mm/memcontrol.c soft_limit_tree_from_page(struct page *page)
page              518 mm/memcontrol.c 	int nid = page_to_nid(page);
page              598 mm/memcontrol.c static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
page              604 mm/memcontrol.c 	mctz = soft_limit_tree_from_page(page);
page              612 mm/memcontrol.c 		mz = mem_cgroup_page_nodeinfo(memcg, page);
page              771 mm/memcontrol.c 	struct page *page = virt_to_head_page(p);
page              772 mm/memcontrol.c 	pg_data_t *pgdat = page_pgdat(page);
page              777 mm/memcontrol.c 	memcg = memcg_from_slab_page(page);
page              846 mm/memcontrol.c 					 struct page *page,
page              853 mm/memcontrol.c 	if (PageAnon(page))
page              857 mm/memcontrol.c 		if (PageSwapBacked(page))
page              862 mm/memcontrol.c 		VM_BUG_ON_PAGE(!PageTransHuge(page), page);
page              909 mm/memcontrol.c static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
page              925 mm/memcontrol.c 			mem_cgroup_update_tree(memcg, page);
page              989 mm/memcontrol.c struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
page              991 mm/memcontrol.c 	struct mem_cgroup *memcg = page->mem_cgroup;
page             1247 mm/memcontrol.c struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat)
page             1258 mm/memcontrol.c 	memcg = page->mem_cgroup;
page             1266 mm/memcontrol.c 	mz = mem_cgroup_page_nodeinfo(memcg, page);
page             2086 mm/memcontrol.c struct mem_cgroup *lock_page_memcg(struct page *page)
page             2107 mm/memcontrol.c 	memcg = page->mem_cgroup;
page             2115 mm/memcontrol.c 	if (memcg != page->mem_cgroup) {
page             2156 mm/memcontrol.c void unlock_page_memcg(struct page *page)
page             2158 mm/memcontrol.c 	__unlock_page_memcg(page->mem_cgroup);
page             2729 mm/memcontrol.c static void lock_page_lru(struct page *page, int *isolated)
page             2731 mm/memcontrol.c 	pg_data_t *pgdat = page_pgdat(page);
page             2734 mm/memcontrol.c 	if (PageLRU(page)) {
page             2737 mm/memcontrol.c 		lruvec = mem_cgroup_page_lruvec(page, pgdat);
page             2738 mm/memcontrol.c 		ClearPageLRU(page);
page             2739 mm/memcontrol.c 		del_page_from_lru_list(page, lruvec, page_lru(page));
page             2745 mm/memcontrol.c static void unlock_page_lru(struct page *page, int isolated)
page             2747 mm/memcontrol.c 	pg_data_t *pgdat = page_pgdat(page);
page             2752 mm/memcontrol.c 		lruvec = mem_cgroup_page_lruvec(page, pgdat);
page             2753 mm/memcontrol.c 		VM_BUG_ON_PAGE(PageLRU(page), page);
page             2754 mm/memcontrol.c 		SetPageLRU(page);
page             2755 mm/memcontrol.c 		add_page_to_lru_list(page, lruvec, page_lru(page));
page             2760 mm/memcontrol.c static void commit_charge(struct page *page, struct mem_cgroup *memcg,
page             2765 mm/memcontrol.c 	VM_BUG_ON_PAGE(page->mem_cgroup, page);
page             2772 mm/memcontrol.c 		lock_page_lru(page, &isolated);
page             2788 mm/memcontrol.c 	page->mem_cgroup = memcg;
page             2791 mm/memcontrol.c 		unlock_page_lru(page, isolated);
page             2803 mm/memcontrol.c 	struct page *page;
page             2808 mm/memcontrol.c 	page = virt_to_head_page(p);
page             2815 mm/memcontrol.c 	if (PageSlab(page))
page             2816 mm/memcontrol.c 		return memcg_from_slab_page(page);
page             2819 mm/memcontrol.c 	return page->mem_cgroup;
page             3013 mm/memcontrol.c int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
page             3050 mm/memcontrol.c int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
page             3060 mm/memcontrol.c 		ret = __memcg_kmem_charge_memcg(page, gfp, order, memcg);
page             3062 mm/memcontrol.c 			page->mem_cgroup = memcg;
page             3063 mm/memcontrol.c 			__SetPageKmemcg(page);
page             3090 mm/memcontrol.c void __memcg_kmem_uncharge(struct page *page, int order)
page             3092 mm/memcontrol.c 	struct mem_cgroup *memcg = page->mem_cgroup;
page             3098 mm/memcontrol.c 	VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
page             3100 mm/memcontrol.c 	page->mem_cgroup = NULL;
page             3103 mm/memcontrol.c 	if (PageKmemcg(page))
page             3104 mm/memcontrol.c 		__ClearPageKmemcg(page);
page             3116 mm/memcontrol.c void mem_cgroup_split_huge_fixup(struct page *head)
page             4506 mm/memcontrol.c void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
page             4509 mm/memcontrol.c 	struct mem_cgroup *memcg = page->mem_cgroup;
page             4516 mm/memcontrol.c 	trace_track_foreign_dirty(page, wb);
page             5361 mm/memcontrol.c 	struct page	*page;
page             5372 mm/memcontrol.c static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
page             5375 mm/memcontrol.c 	struct page *page = vm_normal_page(vma, addr, ptent);
page             5377 mm/memcontrol.c 	if (!page || !page_mapped(page))
page             5379 mm/memcontrol.c 	if (PageAnon(page)) {
page             5386 mm/memcontrol.c 	if (!get_page_unless_zero(page))
page             5389 mm/memcontrol.c 	return page;
page             5393 mm/memcontrol.c static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
page             5396 mm/memcontrol.c 	struct page *page = NULL;
page             5408 mm/memcontrol.c 		page = device_private_entry_to_page(ent);
page             5413 mm/memcontrol.c 		if (!page_ref_add_unless(page, 1, 1))
page             5415 mm/memcontrol.c 		return page;
page             5422 mm/memcontrol.c 	page = find_get_page(swap_address_space(ent), swp_offset(ent));
page             5426 mm/memcontrol.c 	return page;
page             5429 mm/memcontrol.c static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
page             5436 mm/memcontrol.c static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
page             5439 mm/memcontrol.c 	struct page *page = NULL;
page             5455 mm/memcontrol.c 		page = find_get_entry(mapping, pgoff);
page             5456 mm/memcontrol.c 		if (xa_is_value(page)) {
page             5457 mm/memcontrol.c 			swp_entry_t swp = radix_to_swp_entry(page);
page             5460 mm/memcontrol.c 			page = find_get_page(swap_address_space(swp),
page             5464 mm/memcontrol.c 		page = find_get_page(mapping, pgoff);
page             5466 mm/memcontrol.c 	page = find_get_page(mapping, pgoff);
page             5468 mm/memcontrol.c 	return page;
page             5483 mm/memcontrol.c static int mem_cgroup_move_account(struct page *page,
page             5491 mm/memcontrol.c 	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
page             5496 mm/memcontrol.c 	VM_BUG_ON_PAGE(PageLRU(page), page);
page             5497 mm/memcontrol.c 	VM_BUG_ON(compound && !PageTransHuge(page));
page             5504 mm/memcontrol.c 	if (!trylock_page(page))
page             5508 mm/memcontrol.c 	if (page->mem_cgroup != from)
page             5511 mm/memcontrol.c 	anon = PageAnon(page);
page             5513 mm/memcontrol.c 	pgdat = page_pgdat(page);
page             5519 mm/memcontrol.c 	if (!anon && page_mapped(page)) {
page             5529 mm/memcontrol.c 	if (!anon && PageDirty(page)) {
page             5530 mm/memcontrol.c 		struct address_space *mapping = page_mapping(page);
page             5538 mm/memcontrol.c 	if (PageWriteback(page)) {
page             5550 mm/memcontrol.c 	page->mem_cgroup = to;
page             5557 mm/memcontrol.c 	mem_cgroup_charge_statistics(to, page, compound, nr_pages);
page             5558 mm/memcontrol.c 	memcg_check_events(to, page);
page             5559 mm/memcontrol.c 	mem_cgroup_charge_statistics(from, page, compound, -nr_pages);
page             5560 mm/memcontrol.c 	memcg_check_events(from, page);
page             5563 mm/memcontrol.c 	unlock_page(page);
page             5597 mm/memcontrol.c 	struct page *page = NULL;
page             5602 mm/memcontrol.c 		page = mc_handle_present_pte(vma, addr, ptent);
page             5604 mm/memcontrol.c 		page = mc_handle_swap_pte(vma, ptent, &ent);
page             5606 mm/memcontrol.c 		page = mc_handle_file_pte(vma, addr, ptent, &ent);
page             5608 mm/memcontrol.c 	if (!page && !ent.val)
page             5610 mm/memcontrol.c 	if (page) {
page             5616 mm/memcontrol.c 		if (page->mem_cgroup == mc.from) {
page             5618 mm/memcontrol.c 			if (is_device_private_page(page))
page             5621 mm/memcontrol.c 				target->page = page;
page             5624 mm/memcontrol.c 			put_page(page);
page             5630 mm/memcontrol.c 	if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
page             5648 mm/memcontrol.c 	struct page *page = NULL;
page             5656 mm/memcontrol.c 	page = pmd_page(pmd);
page             5657 mm/memcontrol.c 	VM_BUG_ON_PAGE(!page || !PageHead(page), page);
page             5660 mm/memcontrol.c 	if (page->mem_cgroup == mc.from) {
page             5663 mm/memcontrol.c 			get_page(page);
page             5664 mm/memcontrol.c 			target->page = page;
page             5886 mm/memcontrol.c 	struct page *page;
page             5896 mm/memcontrol.c 			page = target.page;
page             5897 mm/memcontrol.c 			if (!isolate_lru_page(page)) {
page             5898 mm/memcontrol.c 				if (!mem_cgroup_move_account(page, true,
page             5903 mm/memcontrol.c 				putback_lru_page(page);
page             5905 mm/memcontrol.c 			put_page(page);
page             5907 mm/memcontrol.c 			page = target.page;
page             5908 mm/memcontrol.c 			if (!mem_cgroup_move_account(page, true,
page             5913 mm/memcontrol.c 			put_page(page);
page             5936 mm/memcontrol.c 			page = target.page;
page             5943 mm/memcontrol.c 			if (PageTransCompound(page))
page             5945 mm/memcontrol.c 			if (!device && isolate_lru_page(page))
page             5947 mm/memcontrol.c 			if (!mem_cgroup_move_account(page, false,
page             5954 mm/memcontrol.c 				putback_lru_page(page);
page             5956 mm/memcontrol.c 			put_page(page);
page             6513 mm/memcontrol.c int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
page             6518 mm/memcontrol.c 	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
page             6524 mm/memcontrol.c 	if (PageSwapCache(page)) {
page             6532 mm/memcontrol.c 		VM_BUG_ON_PAGE(!PageLocked(page), page);
page             6533 mm/memcontrol.c 		if (compound_head(page)->mem_cgroup)
page             6537 mm/memcontrol.c 			swp_entry_t ent = { .val = page_private(page), };
page             6559 mm/memcontrol.c int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm,
page             6566 mm/memcontrol.c 	ret = mem_cgroup_try_charge(page, mm, gfp_mask, memcgp, compound);
page             6568 mm/memcontrol.c 	mem_cgroup_throttle_swaprate(memcg, page_to_nid(page), gfp_mask);
page             6589 mm/memcontrol.c void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
page             6592 mm/memcontrol.c 	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
page             6594 mm/memcontrol.c 	VM_BUG_ON_PAGE(!page->mapping, page);
page             6595 mm/memcontrol.c 	VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
page             6607 mm/memcontrol.c 	commit_charge(page, memcg, lrucare);
page             6610 mm/memcontrol.c 	mem_cgroup_charge_statistics(memcg, page, compound, nr_pages);
page             6611 mm/memcontrol.c 	memcg_check_events(memcg, page);
page             6614 mm/memcontrol.c 	if (do_memsw_account() && PageSwapCache(page)) {
page             6615 mm/memcontrol.c 		swp_entry_t entry = { .val = page_private(page) };
page             6633 mm/memcontrol.c void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
page             6636 mm/memcontrol.c 	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
page             6659 mm/memcontrol.c 	struct page *dummy_page;
page             6695 mm/memcontrol.c static void uncharge_page(struct page *page, struct uncharge_gather *ug)
page             6697 mm/memcontrol.c 	VM_BUG_ON_PAGE(PageLRU(page), page);
page             6698 mm/memcontrol.c 	VM_BUG_ON_PAGE(page_count(page) && !is_zone_device_page(page) &&
page             6699 mm/memcontrol.c 			!PageHWPoison(page) , page);
page             6701 mm/memcontrol.c 	if (!page->mem_cgroup)
page             6710 mm/memcontrol.c 	if (ug->memcg != page->mem_cgroup) {
page             6715 mm/memcontrol.c 		ug->memcg = page->mem_cgroup;
page             6718 mm/memcontrol.c 	if (!PageKmemcg(page)) {
page             6721 mm/memcontrol.c 		if (PageTransHuge(page)) {
page             6722 mm/memcontrol.c 			nr_pages = compound_nr(page);
page             6725 mm/memcontrol.c 		if (PageAnon(page))
page             6729 mm/memcontrol.c 			if (PageSwapBacked(page))
page             6734 mm/memcontrol.c 		ug->nr_kmem += compound_nr(page);
page             6735 mm/memcontrol.c 		__ClearPageKmemcg(page);
page             6738 mm/memcontrol.c 	ug->dummy_page = page;
page             6739 mm/memcontrol.c 	page->mem_cgroup = NULL;
page             6755 mm/memcontrol.c 		struct page *page;
page             6757 mm/memcontrol.c 		page = list_entry(next, struct page, lru);
page             6758 mm/memcontrol.c 		next = page->lru.next;
page             6760 mm/memcontrol.c 		uncharge_page(page, &ug);
page             6774 mm/memcontrol.c void mem_cgroup_uncharge(struct page *page)
page             6782 mm/memcontrol.c 	if (!page->mem_cgroup)
page             6786 mm/memcontrol.c 	uncharge_page(page, &ug);
page             6816 mm/memcontrol.c void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
page             7032 mm/memcontrol.c void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
page             7038 mm/memcontrol.c 	VM_BUG_ON_PAGE(PageLRU(page), page);
page             7039 mm/memcontrol.c 	VM_BUG_ON_PAGE(page_count(page), page);
page             7044 mm/memcontrol.c 	memcg = page->mem_cgroup;
page             7056 mm/memcontrol.c 	nr_entries = hpage_nr_pages(page);
page             7062 mm/memcontrol.c 	VM_BUG_ON_PAGE(oldid, page);
page             7065 mm/memcontrol.c 	page->mem_cgroup = NULL;
page             7083 mm/memcontrol.c 	mem_cgroup_charge_statistics(memcg, page, PageTransHuge(page),
page             7085 mm/memcontrol.c 	memcg_check_events(memcg, page);
page             7100 mm/memcontrol.c int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
page             7102 mm/memcontrol.c 	unsigned int nr_pages = hpage_nr_pages(page);
page             7110 mm/memcontrol.c 	memcg = page->mem_cgroup;
page             7135 mm/memcontrol.c 	VM_BUG_ON_PAGE(oldid, page);
page             7183 mm/memcontrol.c bool mem_cgroup_swap_full(struct page *page)
page             7187 mm/memcontrol.c 	VM_BUG_ON_PAGE(!PageLocked(page), page);
page             7194 mm/memcontrol.c 	memcg = page->mem_cgroup;
page               33 mm/memfd.c     	struct page *page;
page               39 mm/memfd.c     	xas_for_each(xas, page, ULONG_MAX) {
page               40 mm/memfd.c     		if (xa_is_value(page))
page               42 mm/memfd.c     		page = find_subpage(page, xas->xa_index);
page               43 mm/memfd.c     		if (page_count(page) - page_mapcount(page) > 1)
page               69 mm/memfd.c     	struct page *page;
page               88 mm/memfd.c     		xas_for_each_marked(&xas, page, ULONG_MAX, MEMFD_TAG_PINNED) {
page               90 mm/memfd.c     			if (xa_is_value(page))
page               92 mm/memfd.c     			page = find_subpage(page, xas.xa_index);
page               93 mm/memfd.c     			if (page_count(page) - page_mapcount(page) != 1) {
page               81 mm/memory-failure.c static int hwpoison_filter_dev(struct page *p)
page              111 mm/memory-failure.c static int hwpoison_filter_flags(struct page *p)
page              136 mm/memory-failure.c static int hwpoison_filter_task(struct page *p)
page              147 mm/memory-failure.c static int hwpoison_filter_task(struct page *p) { return 0; }
page              150 mm/memory-failure.c int hwpoison_filter(struct page *p)
page              167 mm/memory-failure.c int hwpoison_filter(struct page *p)
page              241 mm/memory-failure.c void shake_page(struct page *p, int access)
page              264 mm/memory-failure.c static unsigned long dev_pagemap_mapping_shift(struct page *page,
page              267 mm/memory-failure.c 	unsigned long address = vma_address(page, vma);
page              308 mm/memory-failure.c static void add_to_kill(struct task_struct *tsk, struct page *p,
page              438 mm/memory-failure.c static void collect_procs_anon(struct page *page, struct list_head *to_kill,
page              446 mm/memory-failure.c 	av = page_lock_anon_vma_read(page);
page              450 mm/memory-failure.c 	pgoff = page_to_pgoff(page);
page              461 mm/memory-failure.c 			if (!page_mapped_in_vma(page, vma))
page              464 mm/memory-failure.c 				add_to_kill(t, page, vma, to_kill, tkc);
page              474 mm/memory-failure.c static void collect_procs_file(struct page *page, struct list_head *to_kill,
page              479 mm/memory-failure.c 	struct address_space *mapping = page->mapping;
page              484 mm/memory-failure.c 		pgoff_t pgoff = page_to_pgoff(page);
page              499 mm/memory-failure.c 				add_to_kill(t, page, vma, to_kill, tkc);
page              512 mm/memory-failure.c static void collect_procs(struct page *page, struct list_head *tokill,
page              517 mm/memory-failure.c 	if (!page->mapping)
page              523 mm/memory-failure.c 	if (PageAnon(page))
page              524 mm/memory-failure.c 		collect_procs_anon(page, tokill, &tk, force_early);
page              526 mm/memory-failure.c 		collect_procs_file(page, tokill, &tk, force_early);
page              568 mm/memory-failure.c static int delete_from_lru_cache(struct page *p)
page              593 mm/memory-failure.c static int truncate_error_page(struct page *p, unsigned long pfn,
page              631 mm/memory-failure.c static int me_kernel(struct page *p, unsigned long pfn)
page              639 mm/memory-failure.c static int me_unknown(struct page *p, unsigned long pfn)
page              648 mm/memory-failure.c static int me_pagecache_clean(struct page *p, unsigned long pfn)
page              689 mm/memory-failure.c static int me_pagecache_dirty(struct page *p, unsigned long pfn)
page              755 mm/memory-failure.c static int me_swapcache_dirty(struct page *p, unsigned long pfn)
page              767 mm/memory-failure.c static int me_swapcache_clean(struct page *p, unsigned long pfn)
page              783 mm/memory-failure.c static int me_huge_page(struct page *p, unsigned long pfn)
page              786 mm/memory-failure.c 	struct page *hpage = compound_head(p);
page              839 mm/memory-failure.c 	int (*action)(struct page *p, unsigned long pfn);
page              897 mm/memory-failure.c static int page_action(struct page_state *ps, struct page *p,
page              930 mm/memory-failure.c int get_hwpoison_page(struct page *page)
page              932 mm/memory-failure.c 	struct page *head = compound_head(page);
page              943 mm/memory-failure.c 				page_to_pfn(page));
page              949 mm/memory-failure.c 		if (head == compound_head(page))
page              953 mm/memory-failure.c 			page_to_pfn(page));
page              965 mm/memory-failure.c static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
page              966 mm/memory-failure.c 				  int flags, struct page **hpagep)
page              973 mm/memory-failure.c 	struct page *hpage = *hpagep;
page             1061 mm/memory-failure.c static int identify_page_state(unsigned long pfn, struct page *p,
page             1086 mm/memory-failure.c 	struct page *p = pfn_to_page(pfn);
page             1087 mm/memory-failure.c 	struct page *head = compound_head(p);
page             1159 mm/memory-failure.c 	struct page *page = pfn_to_page(pfn);
page             1175 mm/memory-failure.c 	cookie = dax_lock_page(page);
page             1179 mm/memory-failure.c 	if (hwpoison_filter(page)) {
page             1196 mm/memory-failure.c 	SetPageHWPoison(page);
page             1205 mm/memory-failure.c 	collect_procs(page, &tokill, flags & MF_ACTION_REQUIRED);
page             1217 mm/memory-failure.c 		start = (page->index << PAGE_SHIFT) & ~(size - 1);
page             1218 mm/memory-failure.c 		unmap_mapping_range(page->mapping, start, start + size, 0);
page             1223 mm/memory-failure.c 	dax_unlock_page(page, cookie);
page             1250 mm/memory-failure.c 	struct page *p;
page             1251 mm/memory-failure.c 	struct page *hpage;
page             1252 mm/memory-failure.c 	struct page *orig_head;
page             1535 mm/memory-failure.c 	struct page *page;
page             1536 mm/memory-failure.c 	struct page *p;
page             1545 mm/memory-failure.c 	page = compound_head(p);
page             1553 mm/memory-failure.c 	if (page_count(page) > 1) {
page             1559 mm/memory-failure.c 	if (page_mapped(page)) {
page             1565 mm/memory-failure.c 	if (page_mapping(page)) {
page             1576 mm/memory-failure.c 	if (!PageHuge(page) && PageTransHuge(page)) {
page             1590 mm/memory-failure.c 	lock_page(page);
page             1597 mm/memory-failure.c 	if (TestClearPageHWPoison(page)) {
page             1603 mm/memory-failure.c 	unlock_page(page);
page             1605 mm/memory-failure.c 	put_hwpoison_page(page);
page             1607 mm/memory-failure.c 		put_hwpoison_page(page);
page             1613 mm/memory-failure.c static struct page *new_page(struct page *p, unsigned long private)
page             1626 mm/memory-failure.c static int __get_any_page(struct page *p, unsigned long pfn, int flags)
page             1656 mm/memory-failure.c static int get_any_page(struct page *page, unsigned long pfn, int flags)
page             1658 mm/memory-failure.c 	int ret = __get_any_page(page, pfn, flags);
page             1660 mm/memory-failure.c 	if (ret == 1 && !PageHuge(page) &&
page             1661 mm/memory-failure.c 	    !PageLRU(page) && !__PageMovable(page)) {
page             1665 mm/memory-failure.c 		put_hwpoison_page(page);
page             1666 mm/memory-failure.c 		shake_page(page, 1);
page             1671 mm/memory-failure.c 		ret = __get_any_page(page, pfn, 0);
page             1672 mm/memory-failure.c 		if (ret == 1 && !PageLRU(page)) {
page             1674 mm/memory-failure.c 			put_hwpoison_page(page);
page             1676 mm/memory-failure.c 				pfn, page->flags, &page->flags);
page             1683 mm/memory-failure.c static int soft_offline_huge_page(struct page *page, int flags)
page             1686 mm/memory-failure.c 	unsigned long pfn = page_to_pfn(page);
page             1687 mm/memory-failure.c 	struct page *hpage = compound_head(page);
page             1718 mm/memory-failure.c 			pfn, ret, page->flags, &page->flags);
page             1731 mm/memory-failure.c 		ret = dissolve_free_huge_page(page);
page             1733 mm/memory-failure.c 			if (set_hwpoison_free_buddy_page(page))
page             1742 mm/memory-failure.c static int __soft_offline_page(struct page *page, int flags)
page             1745 mm/memory-failure.c 	unsigned long pfn = page_to_pfn(page);
page             1753 mm/memory-failure.c 	lock_page(page);
page             1754 mm/memory-failure.c 	wait_on_page_writeback(page);
page             1755 mm/memory-failure.c 	if (PageHWPoison(page)) {
page             1756 mm/memory-failure.c 		unlock_page(page);
page             1757 mm/memory-failure.c 		put_hwpoison_page(page);
page             1765 mm/memory-failure.c 	ret = invalidate_inode_page(page);
page             1766 mm/memory-failure.c 	unlock_page(page);
page             1772 mm/memory-failure.c 		put_hwpoison_page(page);
page             1774 mm/memory-failure.c 		SetPageHWPoison(page);
page             1784 mm/memory-failure.c 	if (PageLRU(page))
page             1785 mm/memory-failure.c 		ret = isolate_lru_page(page);
page             1787 mm/memory-failure.c 		ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
page             1792 mm/memory-failure.c 	put_hwpoison_page(page);
page             1800 mm/memory-failure.c 		if (!__PageMovable(page))
page             1801 mm/memory-failure.c 			inc_node_page_state(page, NR_ISOLATED_ANON +
page             1802 mm/memory-failure.c 						page_is_file_cache(page));
page             1803 mm/memory-failure.c 		list_add(&page->lru, &pagelist);
page             1811 mm/memory-failure.c 				pfn, ret, page->flags, &page->flags);
page             1817 mm/memory-failure.c 			pfn, ret, page_count(page), page->flags, &page->flags);
page             1822 mm/memory-failure.c static int soft_offline_in_use_page(struct page *page, int flags)
page             1826 mm/memory-failure.c 	struct page *hpage = compound_head(page);
page             1828 mm/memory-failure.c 	if (!PageHuge(page) && PageTransHuge(hpage)) {
page             1829 mm/memory-failure.c 		lock_page(page);
page             1830 mm/memory-failure.c 		if (!PageAnon(page) || unlikely(split_huge_page(page))) {
page             1831 mm/memory-failure.c 			unlock_page(page);
page             1832 mm/memory-failure.c 			if (!PageAnon(page))
page             1833 mm/memory-failure.c 				pr_info("soft offline: %#lx: non anonymous thp\n", page_to_pfn(page));
page             1835 mm/memory-failure.c 				pr_info("soft offline: %#lx: thp split failed\n", page_to_pfn(page));
page             1836 mm/memory-failure.c 			put_hwpoison_page(page);
page             1839 mm/memory-failure.c 		unlock_page(page);
page             1849 mm/memory-failure.c 	mt = get_pageblock_migratetype(page);
page             1850 mm/memory-failure.c 	set_pageblock_migratetype(page, MIGRATE_ISOLATE);
page             1851 mm/memory-failure.c 	if (PageHuge(page))
page             1852 mm/memory-failure.c 		ret = soft_offline_huge_page(page, flags);
page             1854 mm/memory-failure.c 		ret = __soft_offline_page(page, flags);
page             1855 mm/memory-failure.c 	set_pageblock_migratetype(page, mt);
page             1859 mm/memory-failure.c static int soft_offline_free_page(struct page *page)
page             1861 mm/memory-failure.c 	int rc = dissolve_free_huge_page(page);
page             1864 mm/memory-failure.c 		if (set_hwpoison_free_buddy_page(page))
page             1894 mm/memory-failure.c int soft_offline_page(struct page *page, int flags)
page             1897 mm/memory-failure.c 	unsigned long pfn = page_to_pfn(page);
page             1899 mm/memory-failure.c 	if (is_zone_device_page(page)) {
page             1903 mm/memory-failure.c 			put_page(page);
page             1907 mm/memory-failure.c 	if (PageHWPoison(page)) {
page             1910 mm/memory-failure.c 			put_hwpoison_page(page);
page             1915 mm/memory-failure.c 	ret = get_any_page(page, pfn, flags);
page             1919 mm/memory-failure.c 		ret = soft_offline_in_use_page(page, flags);
page             1921 mm/memory-failure.c 		ret = soft_offline_free_page(page);
page               94 mm/memory.c    struct page *mem_map;
page              482 mm/memory.c    			  pte_t pte, struct page *page)
page              519 mm/memory.c    	if (page)
page              520 mm/memory.c    		dump_page(page, "bad pte");
page              574 mm/memory.c    struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
page              630 mm/memory.c    struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
page              684 mm/memory.c    	struct page *page;
page              704 mm/memory.c    			page = migration_entry_to_page(entry);
page              706 mm/memory.c    			rss[mm_counter(page)]++;
page              721 mm/memory.c    			page = device_private_entry_to_page(entry);
page              732 mm/memory.c    			get_page(page);
page              733 mm/memory.c    			rss[mm_counter(page)]++;
page              734 mm/memory.c    			page_dup_rmap(page, false);
page              770 mm/memory.c    	page = vm_normal_page(vma, addr, pte);
page              771 mm/memory.c    	if (page) {
page              772 mm/memory.c    		get_page(page);
page              773 mm/memory.c    		page_dup_rmap(page, false);
page              774 mm/memory.c    		rss[mm_counter(page)]++;
page              776 mm/memory.c    		page = pte_page(pte);
page             1033 mm/memory.c    			struct page *page;
page             1035 mm/memory.c    			page = vm_normal_page(vma, addr, ptent);
page             1036 mm/memory.c    			if (unlikely(details) && page) {
page             1043 mm/memory.c    				    details->check_mapping != page_rmapping(page))
page             1049 mm/memory.c    			if (unlikely(!page))
page             1052 mm/memory.c    			if (!PageAnon(page)) {
page             1055 mm/memory.c    					set_page_dirty(page);
page             1059 mm/memory.c    					mark_page_accessed(page);
page             1061 mm/memory.c    			rss[mm_counter(page)]--;
page             1062 mm/memory.c    			page_remove_rmap(page, false);
page             1063 mm/memory.c    			if (unlikely(page_mapcount(page) < 0))
page             1064 mm/memory.c    				print_bad_pte(vma, addr, ptent, page);
page             1065 mm/memory.c    			if (unlikely(__tlb_remove_page(tlb, page))) {
page             1075 mm/memory.c    			struct page *page = device_private_entry_to_page(entry);
page             1084 mm/memory.c    				    page_rmapping(page))
page             1089 mm/memory.c    			rss[mm_counter(page)]--;
page             1090 mm/memory.c    			page_remove_rmap(page, false);
page             1091 mm/memory.c    			put_page(page);
page             1102 mm/memory.c    			struct page *page;
page             1104 mm/memory.c    			page = migration_entry_to_page(entry);
page             1105 mm/memory.c    			rss[mm_counter(page)]--;
page             1423 mm/memory.c    			struct page *page, pgprot_t prot)
page             1431 mm/memory.c    	if (PageAnon(page) || PageSlab(page) || page_has_type(page))
page             1434 mm/memory.c    	flush_dcache_page(page);
page             1443 mm/memory.c    	get_page(page);
page             1444 mm/memory.c    	inc_mm_counter_fast(mm, mm_counter_file(page));
page             1445 mm/memory.c    	page_add_file_rmap(page, false);
page             1446 mm/memory.c    	set_pte_at(mm, addr, pte, mk_pte(page, prot));
page             1485 mm/memory.c    			struct page *page)
page             1489 mm/memory.c    	if (!page_count(page))
page             1496 mm/memory.c    	return insert_page(vma, addr, page, vma->vm_page_prot);
page             1511 mm/memory.c    static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
page             1554 mm/memory.c    int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
page             1574 mm/memory.c    int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
page             1746 mm/memory.c    		struct page *page;
page             1753 mm/memory.c    		page = pfn_to_page(pfn_t_to_pfn(pfn));
page             1754 mm/memory.c    		err = insert_page(vma, addr, page, pgprot);
page             2148 mm/memory.c    static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
page             2199 mm/memory.c    	struct page *page = vmf->page;
page             2214 mm/memory.c    		lock_page(page);
page             2215 mm/memory.c    		if (!page->mapping) {
page             2216 mm/memory.c    			unlock_page(page);
page             2221 mm/memory.c    		VM_BUG_ON_PAGE(!PageLocked(page), page);
page             2234 mm/memory.c    	struct page *page = vmf->page;
page             2238 mm/memory.c    	dirtied = set_page_dirty(page);
page             2239 mm/memory.c    	VM_BUG_ON_PAGE(PageAnon(page), page);
page             2246 mm/memory.c    	mapping = page_rmapping(page);
page             2247 mm/memory.c    	unlock_page(page);
page             2287 mm/memory.c    	struct page *page = vmf->page;
page             2294 mm/memory.c    	if (page)
page             2295 mm/memory.c    		page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1);
page             2325 mm/memory.c    	struct page *old_page = vmf->page;
page             2326 mm/memory.c    	struct page *new_page = NULL;
page             2517 mm/memory.c    	get_page(vmf->page);
page             2526 mm/memory.c    			put_page(vmf->page);
page             2531 mm/memory.c    			unlock_page(vmf->page);
page             2532 mm/memory.c    			put_page(vmf->page);
page             2537 mm/memory.c    		lock_page(vmf->page);
page             2540 mm/memory.c    	put_page(vmf->page);
page             2568 mm/memory.c    	vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
page             2569 mm/memory.c    	if (!vmf->page) {
page             2589 mm/memory.c    	if (PageAnon(vmf->page)) {
page             2591 mm/memory.c    		if (PageKsm(vmf->page) && (PageSwapCache(vmf->page) ||
page             2592 mm/memory.c    					   page_count(vmf->page) != 1))
page             2594 mm/memory.c    		if (!trylock_page(vmf->page)) {
page             2595 mm/memory.c    			get_page(vmf->page);
page             2597 mm/memory.c    			lock_page(vmf->page);
page             2601 mm/memory.c    				unlock_page(vmf->page);
page             2603 mm/memory.c    				put_page(vmf->page);
page             2606 mm/memory.c    			put_page(vmf->page);
page             2608 mm/memory.c    		if (PageKsm(vmf->page)) {
page             2609 mm/memory.c    			bool reused = reuse_ksm_page(vmf->page, vmf->vma,
page             2611 mm/memory.c    			unlock_page(vmf->page);
page             2617 mm/memory.c    		if (reuse_swap_page(vmf->page, &total_map_swapcount)) {
page             2626 mm/memory.c    				page_move_anon_rmap(vmf->page, vma);
page             2628 mm/memory.c    			unlock_page(vmf->page);
page             2632 mm/memory.c    		unlock_page(vmf->page);
page             2641 mm/memory.c    	get_page(vmf->page);
page             2754 mm/memory.c    	struct page *page = NULL, *swapcache;
page             2771 mm/memory.c    			vmf->page = device_private_entry_to_page(entry);
page             2772 mm/memory.c    			ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
page             2784 mm/memory.c    	page = lookup_swap_cache(entry, vma, vmf->address);
page             2785 mm/memory.c    	swapcache = page;
page             2787 mm/memory.c    	if (!page) {
page             2793 mm/memory.c    			page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
page             2795 mm/memory.c    			if (page) {
page             2796 mm/memory.c    				__SetPageLocked(page);
page             2797 mm/memory.c    				__SetPageSwapBacked(page);
page             2798 mm/memory.c    				set_page_private(page, entry.val);
page             2799 mm/memory.c    				lru_cache_add_anon(page);
page             2800 mm/memory.c    				swap_readpage(page, true);
page             2803 mm/memory.c    			page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
page             2805 mm/memory.c    			swapcache = page;
page             2808 mm/memory.c    		if (!page) {
page             2825 mm/memory.c    	} else if (PageHWPoison(page)) {
page             2835 mm/memory.c    	locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags);
page             2849 mm/memory.c    	if (unlikely((!PageSwapCache(page) ||
page             2850 mm/memory.c    			page_private(page) != entry.val)) && swapcache)
page             2853 mm/memory.c    	page = ksm_might_need_to_copy(page, vma, vmf->address);
page             2854 mm/memory.c    	if (unlikely(!page)) {
page             2856 mm/memory.c    		page = swapcache;
page             2860 mm/memory.c    	if (mem_cgroup_try_charge_delay(page, vma->vm_mm, GFP_KERNEL,
page             2874 mm/memory.c    	if (unlikely(!PageUptodate(page))) {
page             2891 mm/memory.c    	pte = mk_pte(page, vma->vm_page_prot);
page             2892 mm/memory.c    	if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
page             2898 mm/memory.c    	flush_icache_page(vma, page);
page             2906 mm/memory.c    	if (unlikely(page != swapcache && swapcache)) {
page             2907 mm/memory.c    		page_add_new_anon_rmap(page, vma, vmf->address, false);
page             2908 mm/memory.c    		mem_cgroup_commit_charge(page, memcg, false, false);
page             2909 mm/memory.c    		lru_cache_add_active_or_unevictable(page, vma);
page             2911 mm/memory.c    		do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
page             2912 mm/memory.c    		mem_cgroup_commit_charge(page, memcg, true, false);
page             2913 mm/memory.c    		activate_page(page);
page             2917 mm/memory.c    	if (mem_cgroup_swap_full(page) ||
page             2918 mm/memory.c    	    (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
page             2919 mm/memory.c    		try_to_free_swap(page);
page             2920 mm/memory.c    	unlock_page(page);
page             2921 mm/memory.c    	if (page != swapcache && swapcache) {
page             2948 mm/memory.c    	mem_cgroup_cancel_charge(page, memcg, false);
page             2951 mm/memory.c    	unlock_page(page);
page             2953 mm/memory.c    	put_page(page);
page             2954 mm/memory.c    	if (page != swapcache && swapcache) {
page             2970 mm/memory.c    	struct page *page;
page             3018 mm/memory.c    	page = alloc_zeroed_user_highpage_movable(vma, vmf->address);
page             3019 mm/memory.c    	if (!page)
page             3022 mm/memory.c    	if (mem_cgroup_try_charge_delay(page, vma->vm_mm, GFP_KERNEL, &memcg,
page             3031 mm/memory.c    	__SetPageUptodate(page);
page             3033 mm/memory.c    	entry = mk_pte(page, vma->vm_page_prot);
page             3049 mm/memory.c    		mem_cgroup_cancel_charge(page, memcg, false);
page             3050 mm/memory.c    		put_page(page);
page             3055 mm/memory.c    	page_add_new_anon_rmap(page, vma, vmf->address, false);
page             3056 mm/memory.c    	mem_cgroup_commit_charge(page, memcg, false, false);
page             3057 mm/memory.c    	lru_cache_add_active_or_unevictable(page, vma);
page             3067 mm/memory.c    	mem_cgroup_cancel_charge(page, memcg, false);
page             3068 mm/memory.c    	put_page(page);
page             3071 mm/memory.c    	put_page(page);
page             3113 mm/memory.c    	if (unlikely(PageHWPoison(vmf->page))) {
page             3115 mm/memory.c    			unlock_page(vmf->page);
page             3116 mm/memory.c    		put_page(vmf->page);
page             3117 mm/memory.c    		vmf->page = NULL;
page             3122 mm/memory.c    		lock_page(vmf->page);
page             3124 mm/memory.c    		VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
page             3203 mm/memory.c    static vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
page             3216 mm/memory.c    	page = compound_head(page);
page             3234 mm/memory.c    		flush_icache_page(vma, page + i);
page             3236 mm/memory.c    	entry = mk_huge_pmd(page, vma->vm_page_prot);
page             3240 mm/memory.c    	add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR);
page             3241 mm/memory.c    	page_add_file_rmap(page, true);
page             3260 mm/memory.c    static vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
page             3284 mm/memory.c    		struct page *page)
page             3291 mm/memory.c    	if (pmd_none(*vmf->pmd) && PageTransCompound(page) &&
page             3294 mm/memory.c    		VM_BUG_ON_PAGE(memcg, page);
page             3296 mm/memory.c    		ret = do_set_pmd(vmf, page);
page             3311 mm/memory.c    	flush_icache_page(vma, page);
page             3312 mm/memory.c    	entry = mk_pte(page, vma->vm_page_prot);
page             3318 mm/memory.c    		page_add_new_anon_rmap(page, vma, vmf->address, false);
page             3319 mm/memory.c    		mem_cgroup_commit_charge(page, memcg, false, false);
page             3320 mm/memory.c    		lru_cache_add_active_or_unevictable(page, vma);
page             3322 mm/memory.c    		inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
page             3323 mm/memory.c    		page_add_file_rmap(page, false);
page             3351 mm/memory.c    	struct page *page;
page             3357 mm/memory.c    		page = vmf->cow_page;
page             3359 mm/memory.c    		page = vmf->page;
page             3368 mm/memory.c    		ret = alloc_set_pte(vmf, vmf->memcg, page);
page             3510 mm/memory.c    	unlock_page(vmf->page);
page             3512 mm/memory.c    		put_page(vmf->page);
page             3540 mm/memory.c    	copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
page             3544 mm/memory.c    	unlock_page(vmf->page);
page             3545 mm/memory.c    	put_page(vmf->page);
page             3569 mm/memory.c    		unlock_page(vmf->page);
page             3573 mm/memory.c    			put_page(vmf->page);
page             3581 mm/memory.c    		unlock_page(vmf->page);
page             3582 mm/memory.c    		put_page(vmf->page);
page             3648 mm/memory.c    static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
page             3652 mm/memory.c    	get_page(page);
page             3660 mm/memory.c    	return mpol_misplaced(page, vma, addr);
page             3666 mm/memory.c    	struct page *page = NULL;
page             3699 mm/memory.c    	page = vm_normal_page(vma, vmf->address, pte);
page             3700 mm/memory.c    	if (!page) {
page             3706 mm/memory.c    	if (PageCompound(page)) {
page             3726 mm/memory.c    	if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED))
page             3729 mm/memory.c    	last_cpupid = page_cpupid_last(page);
page             3730 mm/memory.c    	page_nid = page_to_nid(page);
page             3731 mm/memory.c    	target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid,
page             3735 mm/memory.c    		put_page(page);
page             3740 mm/memory.c    	migrated = migrate_misplaced_page(page, vma, target_nid);
page             4327 mm/memory.c    		struct page *page = NULL;
page             4330 mm/memory.c    				gup_flags, &page, &vma, NULL);
page             4355 mm/memory.c    			maddr = kmap(page);
page             4357 mm/memory.c    				copy_to_user_page(vma, page, addr,
page             4359 mm/memory.c    				set_page_dirty_lock(page);
page             4361 mm/memory.c    				copy_from_user_page(vma, page, addr,
page             4364 mm/memory.c    			kunmap(page);
page             4365 mm/memory.c    			put_page(page);
page             4524 mm/memory.c    static void clear_gigantic_page(struct page *page,
page             4529 mm/memory.c    	struct page *p = page;
page             4533 mm/memory.c    	     i++, p = mem_map_next(p, page, i)) {
page             4541 mm/memory.c    	struct page *page = arg;
page             4543 mm/memory.c    	clear_user_highpage(page + idx, addr);
page             4546 mm/memory.c    void clear_huge_page(struct page *page,
page             4553 mm/memory.c    		clear_gigantic_page(page, addr, pages_per_huge_page);
page             4557 mm/memory.c    	process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page);
page             4560 mm/memory.c    static void copy_user_gigantic_page(struct page *dst, struct page *src,
page             4566 mm/memory.c    	struct page *dst_base = dst;
page             4567 mm/memory.c    	struct page *src_base = src;
page             4580 mm/memory.c    	struct page *dst;
page             4581 mm/memory.c    	struct page *src;
page             4593 mm/memory.c    void copy_user_huge_page(struct page *dst, struct page *src,
page             4614 mm/memory.c    long copy_huge_page_from_user(struct page *dst_page,
page             4657 mm/memory.c    bool ptlock_alloc(struct page *page)
page             4664 mm/memory.c    	page->ptl = ptl;
page             4668 mm/memory.c    void ptlock_free(struct page *page)
page             4670 mm/memory.c    	kmem_cache_free(page_ptl_cachep, page->ptl);
page               52 mm/memory_hotplug.c static void generic_online_page(struct page *page, unsigned int order);
page              138 mm/memory_hotplug.c void get_page_bootmem(unsigned long info,  struct page *page,
page              141 mm/memory_hotplug.c 	page->freelist = (void *)type;
page              142 mm/memory_hotplug.c 	SetPagePrivate(page);
page              143 mm/memory_hotplug.c 	set_page_private(page, info);
page              144 mm/memory_hotplug.c 	page_ref_inc(page);
page              147 mm/memory_hotplug.c void put_page_bootmem(struct page *page)
page              151 mm/memory_hotplug.c 	type = (unsigned long) page->freelist;
page              155 mm/memory_hotplug.c 	if (page_ref_dec_return(page) == 1) {
page              156 mm/memory_hotplug.c 		page->freelist = NULL;
page              157 mm/memory_hotplug.c 		ClearPagePrivate(page);
page              158 mm/memory_hotplug.c 		set_page_private(page, 0);
page              159 mm/memory_hotplug.c 		INIT_LIST_HEAD(&page->lru);
page              160 mm/memory_hotplug.c 		free_reserved_page(page);
page              170 mm/memory_hotplug.c 	struct page *page, *memmap;
page              183 mm/memory_hotplug.c 	page = virt_to_page(memmap);
page              184 mm/memory_hotplug.c 	mapsize = sizeof(struct page) * PAGES_PER_SECTION;
page              188 mm/memory_hotplug.c 	for (i = 0; i < mapsize; i++, page++)
page              189 mm/memory_hotplug.c 		get_page_bootmem(section_nr, page, SECTION_INFO);
page              192 mm/memory_hotplug.c 	page = virt_to_page(usage);
page              196 mm/memory_hotplug.c 	for (i = 0; i < mapsize; i++, page++)
page              197 mm/memory_hotplug.c 		get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
page              205 mm/memory_hotplug.c 	struct page *page, *memmap;
page              216 mm/memory_hotplug.c 	page = virt_to_page(usage);
page              220 mm/memory_hotplug.c 	for (i = 0; i < mapsize; i++, page++)
page              221 mm/memory_hotplug.c 		get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
page              229 mm/memory_hotplug.c 	struct page *page;
page              232 mm/memory_hotplug.c 	page = virt_to_page(pgdat);
page              234 mm/memory_hotplug.c 	for (i = 0; i < nr_pages; i++, page++)
page              235 mm/memory_hotplug.c 		get_page_bootmem(node, page, NODE_INFO);
page              582 mm/memory_hotplug.c void __online_page_set_limits(struct page *page)
page              587 mm/memory_hotplug.c void __online_page_increment_counters(struct page *page)
page              589 mm/memory_hotplug.c 	adjust_managed_page_count(page, 1);
page              593 mm/memory_hotplug.c void __online_page_free(struct page *page)
page              595 mm/memory_hotplug.c 	__free_reserved_page(page);
page              599 mm/memory_hotplug.c static void generic_online_page(struct page *page, unsigned int order)
page              607 mm/memory_hotplug.c 		kernel_map_pages(page, 1 << order, 1);
page              608 mm/memory_hotplug.c 	__free_pages_core(page, order);
page              611 mm/memory_hotplug.c 	if (PageHighMem(page))
page             1144 mm/memory_hotplug.c static inline int pageblock_free(struct page *page)
page             1146 mm/memory_hotplug.c 	return PageBuddy(page) && page_order(page) >= pageblock_order;
page             1152 mm/memory_hotplug.c 	struct page *page = pfn_to_page(pfn);
page             1158 mm/memory_hotplug.c 	if (pageblock_free(page)) {
page             1161 mm/memory_hotplug.c 		order = page_order(page);
page             1171 mm/memory_hotplug.c 	struct page *page = pfn_to_page(pfn);
page             1181 mm/memory_hotplug.c 	if (!node_online(page_to_nid(page)))
page             1184 mm/memory_hotplug.c 	zone = page_zone(page);
page             1185 mm/memory_hotplug.c 	pfn = page_to_pfn(page);
page             1189 mm/memory_hotplug.c 	return !has_unmovable_pages(zone, page, 0, MIGRATE_MOVABLE, SKIP_HWPOISON);
page             1221 mm/memory_hotplug.c 	struct page *page;
page             1241 mm/memory_hotplug.c 			page = pfn_to_page(pfn + i);
page             1242 mm/memory_hotplug.c 			if (zone && page_zone(page) != zone)
page             1246 mm/memory_hotplug.c 			zone = page_zone(page);
page             1271 mm/memory_hotplug.c 		struct page *page, *head;
page             1276 mm/memory_hotplug.c 		page = pfn_to_page(pfn);
page             1277 mm/memory_hotplug.c 		if (PageLRU(page))
page             1279 mm/memory_hotplug.c 		if (__PageMovable(page))
page             1282 mm/memory_hotplug.c 		if (!PageHuge(page))
page             1284 mm/memory_hotplug.c 		head = compound_head(page);
page             1287 mm/memory_hotplug.c 		skip = compound_nr(head) - (page - head);
page             1293 mm/memory_hotplug.c static struct page *new_node_page(struct page *page, unsigned long private)
page             1295 mm/memory_hotplug.c 	int nid = page_to_nid(page);
page             1307 mm/memory_hotplug.c 	return new_page_nodemask(page, nid, &nmask);
page             1314 mm/memory_hotplug.c 	struct page *page;
page             1321 mm/memory_hotplug.c 		page = pfn_to_page(pfn);
page             1323 mm/memory_hotplug.c 		if (PageHuge(page)) {
page             1324 mm/memory_hotplug.c 			struct page *head = compound_head(page);
page             1328 mm/memory_hotplug.c 		} else if (PageTransHuge(page))
page             1329 mm/memory_hotplug.c 			pfn = page_to_pfn(compound_head(page))
page             1330 mm/memory_hotplug.c 				+ hpage_nr_pages(page) - 1;
page             1339 mm/memory_hotplug.c 		if (PageHWPoison(page)) {
page             1340 mm/memory_hotplug.c 			if (WARN_ON(PageLRU(page)))
page             1341 mm/memory_hotplug.c 				isolate_lru_page(page);
page             1342 mm/memory_hotplug.c 			if (page_mapped(page))
page             1343 mm/memory_hotplug.c 				try_to_unmap(page, TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS);
page             1347 mm/memory_hotplug.c 		if (!get_page_unless_zero(page))
page             1353 mm/memory_hotplug.c 		if (PageLRU(page))
page             1354 mm/memory_hotplug.c 			ret = isolate_lru_page(page);
page             1356 mm/memory_hotplug.c 			ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
page             1358 mm/memory_hotplug.c 			list_add_tail(&page->lru, &source);
page             1359 mm/memory_hotplug.c 			if (!__PageMovable(page))
page             1360 mm/memory_hotplug.c 				inc_node_page_state(page, NR_ISOLATED_ANON +
page             1361 mm/memory_hotplug.c 						    page_is_file_cache(page));
page             1365 mm/memory_hotplug.c 			dump_page(page, "isolation failed");
page             1367 mm/memory_hotplug.c 		put_page(page);
page             1374 mm/memory_hotplug.c 			list_for_each_entry(page, &source, lru) {
page             1376 mm/memory_hotplug.c 				       page_to_pfn(page), ret);
page             1377 mm/memory_hotplug.c 				dump_page(page, "migration failure");
page              406 mm/mempolicy.c static int migrate_page_add(struct page *page, struct list_head *pagelist,
page              422 mm/mempolicy.c static inline bool queue_pages_required(struct page *page,
page              425 mm/mempolicy.c 	int nid = page_to_nid(page);
page              445 mm/mempolicy.c 	struct page *page;
page              453 mm/mempolicy.c 	page = pmd_page(*pmd);
page              454 mm/mempolicy.c 	if (is_huge_zero_page(page)) {
page              460 mm/mempolicy.c 	if (!queue_pages_required(page, qp))
page              467 mm/mempolicy.c 		    migrate_page_add(page, qp->pagelist, flags)) {
page              494 mm/mempolicy.c 	struct page *page;
page              517 mm/mempolicy.c 		page = vm_normal_page(vma, addr, *pte);
page              518 mm/mempolicy.c 		if (!page)
page              524 mm/mempolicy.c 		if (PageReserved(page))
page              526 mm/mempolicy.c 		if (!queue_pages_required(page, qp))
page              540 mm/mempolicy.c 			if (migrate_page_add(page, qp->pagelist, flags))
page              561 mm/mempolicy.c 	struct page *page;
page              569 mm/mempolicy.c 	page = pte_page(entry);
page              570 mm/mempolicy.c 	if (!queue_pages_required(page, qp))
page              574 mm/mempolicy.c 	    (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
page              575 mm/mempolicy.c 		isolate_huge_page(page, qp->pagelist);
page              856 mm/mempolicy.c 	struct page *p;
page              971 mm/mempolicy.c static int migrate_page_add(struct page *page, struct list_head *pagelist,
page              974 mm/mempolicy.c 	struct page *head = compound_head(page);
page             1000 mm/mempolicy.c struct page *alloc_new_node_page(struct page *page, unsigned long node)
page             1002 mm/mempolicy.c 	if (PageHuge(page))
page             1003 mm/mempolicy.c 		return alloc_huge_page_node(page_hstate(compound_head(page)),
page             1005 mm/mempolicy.c 	else if (PageTransHuge(page)) {
page             1006 mm/mempolicy.c 		struct page *thp;
page             1165 mm/mempolicy.c static struct page *new_page(struct page *page, unsigned long start)
page             1172 mm/mempolicy.c 		address = page_address_in_vma(page, vma);
page             1178 mm/mempolicy.c 	if (PageHuge(page)) {
page             1179 mm/mempolicy.c 		return alloc_huge_page_vma(page_hstate(compound_head(page)),
page             1181 mm/mempolicy.c 	} else if (PageTransHuge(page)) {
page             1182 mm/mempolicy.c 		struct page *thp;
page             1199 mm/mempolicy.c static int migrate_page_add(struct page *page, struct list_head *pagelist,
page             1211 mm/mempolicy.c static struct page *new_page(struct page *page, unsigned long start)
page             2057 mm/mempolicy.c static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
page             2060 mm/mempolicy.c 	struct page *page;
page             2062 mm/mempolicy.c 	page = __alloc_pages(gfp, order, nid);
page             2065 mm/mempolicy.c 		return page;
page             2066 mm/mempolicy.c 	if (page && page_to_nid(page) == nid) {
page             2068 mm/mempolicy.c 		__inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT);
page             2071 mm/mempolicy.c 	return page;
page             2097 mm/mempolicy.c struct page *
page             2102 mm/mempolicy.c 	struct page *page;
page             2113 mm/mempolicy.c 		page = alloc_page_interleave(gfp, order, nid);
page             2136 mm/mempolicy.c 			page = __alloc_pages_node(hpage_node,
page             2145 mm/mempolicy.c 			if (!page && (gfp & __GFP_DIRECT_RECLAIM))
page             2146 mm/mempolicy.c 				page = __alloc_pages_node(hpage_node,
page             2155 mm/mempolicy.c 	page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
page             2158 mm/mempolicy.c 	return page;
page             2177 mm/mempolicy.c struct page *alloc_pages_current(gfp_t gfp, unsigned order)
page             2180 mm/mempolicy.c 	struct page *page;
page             2190 mm/mempolicy.c 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
page             2192 mm/mempolicy.c 		page = __alloc_pages_nodemask(gfp, order,
page             2196 mm/mempolicy.c 	return page;
page             2385 mm/mempolicy.c int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
page             2389 mm/mempolicy.c 	int curnid = page_to_nid(page);
page             2439 mm/mempolicy.c 		if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
page               67 mm/mempool.c   		void *addr = kmap_atomic((struct page *)element);
page               91 mm/mempool.c   		void *addr = kmap_atomic((struct page *)element);
page              106 mm/memremap.c  	struct page *first_page;
page              413 mm/memremap.c  void __put_devmap_managed_page(struct page *page)
page              415 mm/memremap.c  	int count = page_ref_dec_return(page);
page              423 mm/memremap.c  		__ClearPageActive(page);
page              424 mm/memremap.c  		__ClearPageWaiters(page);
page              426 mm/memremap.c  		mem_cgroup_uncharge(page);
page              449 mm/memremap.c  		if (is_device_private_page(page))
page              450 mm/memremap.c  			page->mapping = NULL;
page              452 mm/memremap.c  		page->pgmap->ops->page_free(page);
page              454 mm/memremap.c  		__put_page(page);
page               85 mm/migrate.c   int isolate_movable_page(struct page *page, isolate_mode_t mode)
page               98 mm/migrate.c   	if (unlikely(!get_page_unless_zero(page)))
page              106 mm/migrate.c   	if (unlikely(!__PageMovable(page)))
page              119 mm/migrate.c   	if (unlikely(!trylock_page(page)))
page              122 mm/migrate.c   	if (!PageMovable(page) || PageIsolated(page))
page              125 mm/migrate.c   	mapping = page_mapping(page);
page              126 mm/migrate.c   	VM_BUG_ON_PAGE(!mapping, page);
page              128 mm/migrate.c   	if (!mapping->a_ops->isolate_page(page, mode))
page              132 mm/migrate.c   	WARN_ON_ONCE(PageIsolated(page));
page              133 mm/migrate.c   	__SetPageIsolated(page);
page              134 mm/migrate.c   	unlock_page(page);
page              139 mm/migrate.c   	unlock_page(page);
page              141 mm/migrate.c   	put_page(page);
page              147 mm/migrate.c   void putback_movable_page(struct page *page)
page              151 mm/migrate.c   	VM_BUG_ON_PAGE(!PageLocked(page), page);
page              152 mm/migrate.c   	VM_BUG_ON_PAGE(!PageMovable(page), page);
page              153 mm/migrate.c   	VM_BUG_ON_PAGE(!PageIsolated(page), page);
page              155 mm/migrate.c   	mapping = page_mapping(page);
page              156 mm/migrate.c   	mapping->a_ops->putback_page(page);
page              157 mm/migrate.c   	__ClearPageIsolated(page);
page              170 mm/migrate.c   	struct page *page;
page              171 mm/migrate.c   	struct page *page2;
page              173 mm/migrate.c   	list_for_each_entry_safe(page, page2, l, lru) {
page              174 mm/migrate.c   		if (unlikely(PageHuge(page))) {
page              175 mm/migrate.c   			putback_active_hugepage(page);
page              178 mm/migrate.c   		list_del(&page->lru);
page              184 mm/migrate.c   		if (unlikely(__PageMovable(page))) {
page              185 mm/migrate.c   			VM_BUG_ON_PAGE(!PageIsolated(page), page);
page              186 mm/migrate.c   			lock_page(page);
page              187 mm/migrate.c   			if (PageMovable(page))
page              188 mm/migrate.c   				putback_movable_page(page);
page              190 mm/migrate.c   				__ClearPageIsolated(page);
page              191 mm/migrate.c   			unlock_page(page);
page              192 mm/migrate.c   			put_page(page);
page              194 mm/migrate.c   			mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
page              195 mm/migrate.c   					page_is_file_cache(page), -hpage_nr_pages(page));
page              196 mm/migrate.c   			putback_lru_page(page);
page              204 mm/migrate.c   static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
page              208 mm/migrate.c   		.page = old,
page              213 mm/migrate.c   	struct page *new;
page              217 mm/migrate.c   	VM_BUG_ON_PAGE(PageTail(page), page);
page              219 mm/migrate.c   		if (PageKsm(page))
page              220 mm/migrate.c   			new = page;
page              222 mm/migrate.c   			new = page - pvmw.page->index +
page              228 mm/migrate.c   			VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
page              275 mm/migrate.c   		if (PageTransHuge(page) && PageMlocked(page))
page              276 mm/migrate.c   			clear_page_mlock(page);
page              289 mm/migrate.c   void remove_migration_ptes(struct page *old, struct page *new, bool locked)
page              312 mm/migrate.c   	struct page *page;
page              323 mm/migrate.c   	page = migration_entry_to_page(entry);
page              330 mm/migrate.c   	if (!get_page_unless_zero(page))
page              333 mm/migrate.c   	put_and_wait_on_page_locked(page);
page              358 mm/migrate.c   	struct page *page;
page              363 mm/migrate.c   	page = migration_entry_to_page(pmd_to_swp_entry(*pmd));
page              364 mm/migrate.c   	if (!get_page_unless_zero(page))
page              367 mm/migrate.c   	put_and_wait_on_page_locked(page);
page              374 mm/migrate.c   static int expected_page_refs(struct address_space *mapping, struct page *page)
page              382 mm/migrate.c   	expected_count += is_device_private_page(page);
page              384 mm/migrate.c   		expected_count += hpage_nr_pages(page) + page_has_private(page);
page              398 mm/migrate.c   		struct page *newpage, struct page *page, int extra_count)
page              400 mm/migrate.c   	XA_STATE(xas, &mapping->i_pages, page_index(page));
page              403 mm/migrate.c   	int expected_count = expected_page_refs(mapping, page) + extra_count;
page              407 mm/migrate.c   		if (page_count(page) != expected_count)
page              411 mm/migrate.c   		newpage->index = page->index;
page              412 mm/migrate.c   		newpage->mapping = page->mapping;
page              413 mm/migrate.c   		if (PageSwapBacked(page))
page              419 mm/migrate.c   	oldzone = page_zone(page);
page              423 mm/migrate.c   	if (page_count(page) != expected_count || xas_load(&xas) != page) {
page              428 mm/migrate.c   	if (!page_ref_freeze(page, expected_count)) {
page              437 mm/migrate.c   	newpage->index = page->index;
page              438 mm/migrate.c   	newpage->mapping = page->mapping;
page              439 mm/migrate.c   	page_ref_add(newpage, hpage_nr_pages(page)); /* add cache reference */
page              440 mm/migrate.c   	if (PageSwapBacked(page)) {
page              442 mm/migrate.c   		if (PageSwapCache(page)) {
page              444 mm/migrate.c   			set_page_private(newpage, page_private(page));
page              447 mm/migrate.c   		VM_BUG_ON_PAGE(PageSwapCache(page), page);
page              451 mm/migrate.c   	dirty = PageDirty(page);
page              453 mm/migrate.c   		ClearPageDirty(page);
page              458 mm/migrate.c   	if (PageTransHuge(page)) {
page              472 mm/migrate.c   	page_ref_unfreeze(page, expected_count - hpage_nr_pages(page));
page              490 mm/migrate.c   		if (PageSwapBacked(page) && !PageSwapCache(page)) {
page              512 mm/migrate.c   				   struct page *newpage, struct page *page)
page              514 mm/migrate.c   	XA_STATE(xas, &mapping->i_pages, page_index(page));
page              518 mm/migrate.c   	expected_count = 2 + page_has_private(page);
page              519 mm/migrate.c   	if (page_count(page) != expected_count || xas_load(&xas) != page) {
page              524 mm/migrate.c   	if (!page_ref_freeze(page, expected_count)) {
page              529 mm/migrate.c   	newpage->index = page->index;
page              530 mm/migrate.c   	newpage->mapping = page->mapping;
page              536 mm/migrate.c   	page_ref_unfreeze(page, expected_count - 1);
page              548 mm/migrate.c   static void __copy_gigantic_page(struct page *dst, struct page *src,
page              552 mm/migrate.c   	struct page *dst_base = dst;
page              553 mm/migrate.c   	struct page *src_base = src;
page              565 mm/migrate.c   static void copy_huge_page(struct page *dst, struct page *src)
page              594 mm/migrate.c   void migrate_page_states(struct page *newpage, struct page *page)
page              598 mm/migrate.c   	if (PageError(page))
page              600 mm/migrate.c   	if (PageReferenced(page))
page              602 mm/migrate.c   	if (PageUptodate(page))
page              604 mm/migrate.c   	if (TestClearPageActive(page)) {
page              605 mm/migrate.c   		VM_BUG_ON_PAGE(PageUnevictable(page), page);
page              607 mm/migrate.c   	} else if (TestClearPageUnevictable(page))
page              609 mm/migrate.c   	if (PageWorkingset(page))
page              611 mm/migrate.c   	if (PageChecked(page))
page              613 mm/migrate.c   	if (PageMappedToDisk(page))
page              617 mm/migrate.c   	if (PageDirty(page))
page              620 mm/migrate.c   	if (page_is_young(page))
page              622 mm/migrate.c   	if (page_is_idle(page))
page              629 mm/migrate.c   	cpupid = page_cpupid_xchg_last(page, -1);
page              632 mm/migrate.c   	ksm_migrate_page(newpage, page);
page              637 mm/migrate.c   	if (PageSwapCache(page))
page              638 mm/migrate.c   		ClearPageSwapCache(page);
page              639 mm/migrate.c   	ClearPagePrivate(page);
page              640 mm/migrate.c   	set_page_private(page, 0);
page              649 mm/migrate.c   	copy_page_owner(page, newpage);
page              651 mm/migrate.c   	mem_cgroup_migrate(page, newpage);
page              655 mm/migrate.c   void migrate_page_copy(struct page *newpage, struct page *page)
page              657 mm/migrate.c   	if (PageHuge(page) || PageTransHuge(page))
page              658 mm/migrate.c   		copy_huge_page(newpage, page);
page              660 mm/migrate.c   		copy_highpage(newpage, page);
page              662 mm/migrate.c   	migrate_page_states(newpage, page);
page              677 mm/migrate.c   		struct page *newpage, struct page *page,
page              682 mm/migrate.c   	BUG_ON(PageWriteback(page));	/* Writeback must be complete */
page              684 mm/migrate.c   	rc = migrate_page_move_mapping(mapping, newpage, page, 0);
page              690 mm/migrate.c   		migrate_page_copy(newpage, page);
page              692 mm/migrate.c   		migrate_page_states(newpage, page);
page              737 mm/migrate.c   		struct page *newpage, struct page *page, enum migrate_mode mode,
page              744 mm/migrate.c   	if (!page_has_buffers(page))
page              745 mm/migrate.c   		return migrate_page(mapping, newpage, page, mode);
page              748 mm/migrate.c   	expected_count = expected_page_refs(mapping, page);
page              749 mm/migrate.c   	if (page_count(page) != expected_count)
page              752 mm/migrate.c   	head = page_buffers(page);
page              783 mm/migrate.c   	rc = migrate_page_move_mapping(mapping, newpage, page, 0);
page              787 mm/migrate.c   	ClearPagePrivate(page);
page              788 mm/migrate.c   	set_page_private(newpage, page_private(page));
page              789 mm/migrate.c   	set_page_private(page, 0);
page              790 mm/migrate.c   	put_page(page);
page              803 mm/migrate.c   		migrate_page_copy(newpage, page);
page              805 mm/migrate.c   		migrate_page_states(newpage, page);
page              827 mm/migrate.c   		struct page *newpage, struct page *page, enum migrate_mode mode)
page              829 mm/migrate.c   	return __buffer_migrate_page(mapping, newpage, page, mode, false);
page              840 mm/migrate.c   		struct page *newpage, struct page *page, enum migrate_mode mode)
page              842 mm/migrate.c   	return __buffer_migrate_page(mapping, newpage, page, mode, true);
page              849 mm/migrate.c   static int writeout(struct address_space *mapping, struct page *page)
page              864 mm/migrate.c   	if (!clear_page_dirty_for_io(page))
page              876 mm/migrate.c   	remove_migration_ptes(page, page, false);
page              878 mm/migrate.c   	rc = mapping->a_ops->writepage(page, &wbc);
page              882 mm/migrate.c   		lock_page(page);
page              891 mm/migrate.c   	struct page *newpage, struct page *page, enum migrate_mode mode)
page              893 mm/migrate.c   	if (PageDirty(page)) {
page              902 mm/migrate.c   		return writeout(mapping, page);
page              909 mm/migrate.c   	if (page_has_private(page) &&
page              910 mm/migrate.c   	    !try_to_release_page(page, GFP_KERNEL))
page              913 mm/migrate.c   	return migrate_page(mapping, newpage, page, mode);
page              927 mm/migrate.c   static int move_to_new_page(struct page *newpage, struct page *page,
page              932 mm/migrate.c   	bool is_lru = !__PageMovable(page);
page              934 mm/migrate.c   	VM_BUG_ON_PAGE(!PageLocked(page), page);
page              937 mm/migrate.c   	mapping = page_mapping(page);
page              941 mm/migrate.c   			rc = migrate_page(mapping, newpage, page, mode);
page              951 mm/migrate.c   							page, mode);
page              954 mm/migrate.c   							page, mode);
page              960 mm/migrate.c   		VM_BUG_ON_PAGE(!PageIsolated(page), page);
page              961 mm/migrate.c   		if (!PageMovable(page)) {
page              963 mm/migrate.c   			__ClearPageIsolated(page);
page              968 mm/migrate.c   						page, mode);
page              970 mm/migrate.c   			!PageIsolated(page));
page              978 mm/migrate.c   		if (__PageMovable(page)) {
page              979 mm/migrate.c   			VM_BUG_ON_PAGE(!PageIsolated(page), page);
page              985 mm/migrate.c   			__ClearPageIsolated(page);
page              993 mm/migrate.c   		if (!PageMappingFlags(page))
page              994 mm/migrate.c   			page->mapping = NULL;
page             1004 mm/migrate.c   static int __unmap_and_move(struct page *page, struct page *newpage,
page             1010 mm/migrate.c   	bool is_lru = !__PageMovable(page);
page             1012 mm/migrate.c   	if (!trylock_page(page)) {
page             1032 mm/migrate.c   		lock_page(page);
page             1035 mm/migrate.c   	if (PageWriteback(page)) {
page             1052 mm/migrate.c   		wait_on_page_writeback(page);
page             1069 mm/migrate.c   	if (PageAnon(page) && !PageKsm(page))
page             1070 mm/migrate.c   		anon_vma = page_get_anon_vma(page);
page             1084 mm/migrate.c   		rc = move_to_new_page(newpage, page, mode);
page             1100 mm/migrate.c   	if (!page->mapping) {
page             1101 mm/migrate.c   		VM_BUG_ON_PAGE(PageAnon(page), page);
page             1102 mm/migrate.c   		if (page_has_private(page)) {
page             1103 mm/migrate.c   			try_to_free_buffers(page);
page             1106 mm/migrate.c   	} else if (page_mapped(page)) {
page             1108 mm/migrate.c   		VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
page             1109 mm/migrate.c   				page);
page             1110 mm/migrate.c   		try_to_unmap(page,
page             1115 mm/migrate.c   	if (!page_mapped(page))
page             1116 mm/migrate.c   		rc = move_to_new_page(newpage, page, mode);
page             1119 mm/migrate.c   		remove_migration_ptes(page,
page             1120 mm/migrate.c   			rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
page             1128 mm/migrate.c   	unlock_page(page);
page             1166 mm/migrate.c   				   unsigned long private, struct page *page,
page             1171 mm/migrate.c   	struct page *newpage;
page             1173 mm/migrate.c   	if (!thp_migration_supported() && PageTransHuge(page))
page             1176 mm/migrate.c   	newpage = get_new_page(page, private);
page             1180 mm/migrate.c   	if (page_count(page) == 1) {
page             1182 mm/migrate.c   		ClearPageActive(page);
page             1183 mm/migrate.c   		ClearPageUnevictable(page);
page             1184 mm/migrate.c   		if (unlikely(__PageMovable(page))) {
page             1185 mm/migrate.c   			lock_page(page);
page             1186 mm/migrate.c   			if (!PageMovable(page))
page             1187 mm/migrate.c   				__ClearPageIsolated(page);
page             1188 mm/migrate.c   			unlock_page(page);
page             1197 mm/migrate.c   	rc = __unmap_and_move(page, newpage, force, mode);
page             1209 mm/migrate.c   		list_del(&page->lru);
page             1216 mm/migrate.c   		if (likely(!__PageMovable(page)))
page             1217 mm/migrate.c   			mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
page             1218 mm/migrate.c   					page_is_file_cache(page), -hpage_nr_pages(page));
page             1227 mm/migrate.c   		put_page(page);
page             1234 mm/migrate.c   			if (set_hwpoison_free_buddy_page(page))
page             1239 mm/migrate.c   			if (likely(!__PageMovable(page))) {
page             1240 mm/migrate.c   				putback_lru_page(page);
page             1244 mm/migrate.c   			lock_page(page);
page             1245 mm/migrate.c   			if (PageMovable(page))
page             1246 mm/migrate.c   				putback_movable_page(page);
page             1248 mm/migrate.c   				__ClearPageIsolated(page);
page             1249 mm/migrate.c   			unlock_page(page);
page             1250 mm/migrate.c   			put_page(page);
page             1282 mm/migrate.c   				struct page *hpage, int force,
page             1287 mm/migrate.c   	struct page *new_hpage;
page             1407 mm/migrate.c   	struct page *page;
page             1408 mm/migrate.c   	struct page *page2;
page             1418 mm/migrate.c   		list_for_each_entry_safe(page, page2, from, lru) {
page             1422 mm/migrate.c   			if (PageHuge(page))
page             1424 mm/migrate.c   						put_new_page, private, page,
page             1428 mm/migrate.c   						private, page, pass > 2, mode,
page             1444 mm/migrate.c   				if (PageTransHuge(page) && !PageHuge(page)) {
page             1445 mm/migrate.c   					lock_page(page);
page             1446 mm/migrate.c   					rc = split_huge_page_to_list(page, from);
page             1447 mm/migrate.c   					unlock_page(page);
page             1449 mm/migrate.c   						list_safe_reset_next(page, page2, lru);
page             1529 mm/migrate.c   	struct page *page;
page             1541 mm/migrate.c   	page = follow_page(vma, addr, follflags);
page             1543 mm/migrate.c   	err = PTR_ERR(page);
page             1544 mm/migrate.c   	if (IS_ERR(page))
page             1548 mm/migrate.c   	if (!page)
page             1552 mm/migrate.c   	if (page_to_nid(page) == node)
page             1556 mm/migrate.c   	if (page_mapcount(page) > 1 && !migrate_all)
page             1559 mm/migrate.c   	if (PageHuge(page)) {
page             1560 mm/migrate.c   		if (PageHead(page)) {
page             1561 mm/migrate.c   			isolate_huge_page(page, pagelist);
page             1565 mm/migrate.c   		struct page *head;
page             1567 mm/migrate.c   		head = compound_head(page);
page             1584 mm/migrate.c   	put_page(page);
page             1723 mm/migrate.c   		struct page *page;
page             1731 mm/migrate.c   		page = follow_page(vma, addr, FOLL_DUMP);
page             1733 mm/migrate.c   		err = PTR_ERR(page);
page             1734 mm/migrate.c   		if (IS_ERR(page))
page             1737 mm/migrate.c   		err = page ? page_to_nid(page) : -ENOENT;
page             1905 mm/migrate.c   static struct page *alloc_misplaced_dst_page(struct page *page,
page             1909 mm/migrate.c   	struct page *newpage;
page             1920 mm/migrate.c   static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
page             1924 mm/migrate.c   	VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
page             1927 mm/migrate.c   	if (!migrate_balanced_pgdat(pgdat, compound_nr(page)))
page             1930 mm/migrate.c   	if (isolate_lru_page(page))
page             1940 mm/migrate.c   	if (PageTransHuge(page) && page_count(page) != 3) {
page             1941 mm/migrate.c   		putback_lru_page(page);
page             1945 mm/migrate.c   	page_lru = page_is_file_cache(page);
page             1946 mm/migrate.c   	mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
page             1947 mm/migrate.c   				hpage_nr_pages(page));
page             1954 mm/migrate.c   	put_page(page);
page             1960 mm/migrate.c   	struct page *page = pmd_page(pmd);
page             1961 mm/migrate.c   	return PageLocked(page);
page             1969 mm/migrate.c   int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
page             1981 mm/migrate.c   	if (page_mapcount(page) != 1 && page_is_file_cache(page) &&
page             1989 mm/migrate.c   	if (page_is_file_cache(page) && PageDirty(page))
page             1992 mm/migrate.c   	isolated = numamigrate_isolate_page(pgdat, page);
page             1996 mm/migrate.c   	list_add(&page->lru, &migratepages);
page             2002 mm/migrate.c   			list_del(&page->lru);
page             2003 mm/migrate.c   			dec_node_page_state(page, NR_ISOLATED_ANON +
page             2004 mm/migrate.c   					page_is_file_cache(page));
page             2005 mm/migrate.c   			putback_lru_page(page);
page             2014 mm/migrate.c   	put_page(page);
page             2028 mm/migrate.c   				struct page *page, int node)
page             2033 mm/migrate.c   	struct page *new_page = NULL;
page             2034 mm/migrate.c   	int page_lru = page_is_file_cache(page);
page             2044 mm/migrate.c   	isolated = numamigrate_isolate_page(pgdat, page);
page             2052 mm/migrate.c   	if (PageSwapBacked(page))
page             2056 mm/migrate.c   	new_page->mapping = page->mapping;
page             2057 mm/migrate.c   	new_page->index = page->index;
page             2060 mm/migrate.c   	migrate_page_copy(new_page, page);
page             2065 mm/migrate.c   	if (unlikely(!pmd_same(*pmd, entry) || !page_ref_freeze(page, 2))) {
page             2070 mm/migrate.c   			SetPageActive(page);
page             2072 mm/migrate.c   			SetPageUnevictable(page);
page             2078 mm/migrate.c   		get_page(page);
page             2079 mm/migrate.c   		putback_lru_page(page);
page             2080 mm/migrate.c   		mod_node_page_state(page_pgdat(page),
page             2112 mm/migrate.c   	page_ref_unfreeze(page, 2);
page             2113 mm/migrate.c   	mlock_migrate_page(new_page, page);
page             2114 mm/migrate.c   	page_remove_rmap(page, true);
page             2124 mm/migrate.c   	unlock_page(page);
page             2125 mm/migrate.c   	put_page(page);			/* Drop the rmap reference */
page             2126 mm/migrate.c   	put_page(page);			/* Drop the LRU isolation reference */
page             2131 mm/migrate.c   	mod_node_page_state(page_pgdat(page),
page             2147 mm/migrate.c   	unlock_page(page);
page             2148 mm/migrate.c   	put_page(page);
page             2205 mm/migrate.c   		struct page *page;
page             2213 mm/migrate.c   		page = pmd_page(*pmdp);
page             2214 mm/migrate.c   		if (is_huge_zero_page(page)) {
page             2223 mm/migrate.c   			get_page(page);
page             2225 mm/migrate.c   			if (unlikely(!trylock_page(page)))
page             2228 mm/migrate.c   			ret = split_huge_page(page);
page             2229 mm/migrate.c   			unlock_page(page);
page             2230 mm/migrate.c   			put_page(page);
page             2248 mm/migrate.c   		struct page *page;
page             2272 mm/migrate.c   			page = device_private_entry_to_page(entry);
page             2273 mm/migrate.c   			mpfn = migrate_pfn(page_to_pfn(page)) |
page             2284 mm/migrate.c   			page = vm_normal_page(migrate->vma, addr, pte);
page             2290 mm/migrate.c   		if (!page || !page->mapping || PageTransCompound(page)) {
page             2304 mm/migrate.c   		get_page(page);
page             2312 mm/migrate.c   		if (trylock_page(page)) {
page             2319 mm/migrate.c   			entry = make_migration_entry(page, mpfn &
page             2331 mm/migrate.c   			page_remove_rmap(page, false);
page             2332 mm/migrate.c   			put_page(page);
page             2388 mm/migrate.c   static bool migrate_vma_check_page(struct page *page)
page             2402 mm/migrate.c   	if (PageCompound(page))
page             2406 mm/migrate.c   	if (is_zone_device_page(page)) {
page             2420 mm/migrate.c   		return is_device_private_page(page);
page             2424 mm/migrate.c   	if (page_mapping(page))
page             2425 mm/migrate.c   		extra += 1 + page_has_private(page);
page             2427 mm/migrate.c   	if ((page_count(page) - extra) > page_mapcount(page))
page             2452 mm/migrate.c   		struct page *page = migrate_pfn_to_page(migrate->src[i]);
page             2455 mm/migrate.c   		if (!page)
page             2467 mm/migrate.c   			if (!trylock_page(page)) {
page             2470 mm/migrate.c   				put_page(page);
page             2478 mm/migrate.c   		if (!is_zone_device_page(page)) {
page             2479 mm/migrate.c   			if (!PageLRU(page) && allow_drain) {
page             2485 mm/migrate.c   			if (isolate_lru_page(page)) {
page             2492 mm/migrate.c   					unlock_page(page);
page             2494 mm/migrate.c   					put_page(page);
page             2500 mm/migrate.c   			put_page(page);
page             2503 mm/migrate.c   		if (!migrate_vma_check_page(page)) {
page             2509 mm/migrate.c   				if (!is_zone_device_page(page)) {
page             2510 mm/migrate.c   					get_page(page);
page             2511 mm/migrate.c   					putback_lru_page(page);
page             2515 mm/migrate.c   				unlock_page(page);
page             2518 mm/migrate.c   				if (!is_zone_device_page(page))
page             2519 mm/migrate.c   					putback_lru_page(page);
page             2521 mm/migrate.c   					put_page(page);
page             2527 mm/migrate.c   		struct page *page = migrate_pfn_to_page(migrate->src[i]);
page             2529 mm/migrate.c   		if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
page             2532 mm/migrate.c   		remove_migration_pte(page, migrate->vma, addr, page);
page             2535 mm/migrate.c   		unlock_page(page);
page             2536 mm/migrate.c   		put_page(page);
page             2560 mm/migrate.c   		struct page *page = migrate_pfn_to_page(migrate->src[i]);
page             2562 mm/migrate.c   		if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
page             2565 mm/migrate.c   		if (page_mapped(page)) {
page             2566 mm/migrate.c   			try_to_unmap(page, flags);
page             2567 mm/migrate.c   			if (page_mapped(page))
page             2571 mm/migrate.c   		if (migrate_vma_check_page(page))
page             2581 mm/migrate.c   		struct page *page = migrate_pfn_to_page(migrate->src[i]);
page             2583 mm/migrate.c   		if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
page             2586 mm/migrate.c   		remove_migration_ptes(page, page, false);
page             2589 mm/migrate.c   		unlock_page(page);
page             2592 mm/migrate.c   		if (is_zone_device_page(page))
page             2593 mm/migrate.c   			put_page(page);
page             2595 mm/migrate.c   			putback_lru_page(page);
page             2705 mm/migrate.c   				    struct page *page,
page             2758 mm/migrate.c   	if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg, false))
page             2766 mm/migrate.c   	__SetPageUptodate(page);
page             2768 mm/migrate.c   	if (is_zone_device_page(page)) {
page             2769 mm/migrate.c   		if (is_device_private_page(page)) {
page             2772 mm/migrate.c   			swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE);
page             2776 mm/migrate.c   		entry = mk_pte(page, vma->vm_page_prot);
page             2788 mm/migrate.c   			mem_cgroup_cancel_charge(page, memcg, false);
page             2794 mm/migrate.c   		mem_cgroup_cancel_charge(page, memcg, false);
page             2804 mm/migrate.c   		mem_cgroup_cancel_charge(page, memcg, false);
page             2809 mm/migrate.c   	page_add_new_anon_rmap(page, vma, addr, false);
page             2810 mm/migrate.c   	mem_cgroup_commit_charge(page, memcg, false, false);
page             2811 mm/migrate.c   	if (!is_zone_device_page(page))
page             2812 mm/migrate.c   		lru_cache_add_active_or_unevictable(page, vma);
page             2813 mm/migrate.c   	get_page(page);
page             2851 mm/migrate.c   		struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
page             2852 mm/migrate.c   		struct page *page = migrate_pfn_to_page(migrate->src[i]);
page             2861 mm/migrate.c   		if (!page) {
page             2881 mm/migrate.c   		mapping = page_mapping(page);
page             2903 mm/migrate.c   		r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY);
page             2935 mm/migrate.c   		struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
page             2936 mm/migrate.c   		struct page *page = migrate_pfn_to_page(migrate->src[i]);
page             2938 mm/migrate.c   		if (!page) {
page             2951 mm/migrate.c   			newpage = page;
page             2954 mm/migrate.c   		remove_migration_ptes(page, newpage, false);
page             2955 mm/migrate.c   		unlock_page(page);
page             2958 mm/migrate.c   		if (is_zone_device_page(page))
page             2959 mm/migrate.c   			put_page(page);
page             2961 mm/migrate.c   			putback_lru_page(page);
page             2963 mm/migrate.c   		if (newpage != page) {
page               54 mm/mincore.c   	struct page *page;
page               64 mm/mincore.c   		page = find_get_entry(mapping, pgoff);
page               69 mm/mincore.c   		if (xa_is_value(page)) {
page               70 mm/mincore.c   			swp_entry_t swp = radix_to_swp_entry(page);
page               76 mm/mincore.c   				page = find_get_page(swap_address_space(swp),
page               80 mm/mincore.c   				page = NULL;
page               83 mm/mincore.c   		page = find_get_page(mapping, pgoff);
page               85 mm/mincore.c   	page = find_get_page(mapping, pgoff);
page               87 mm/mincore.c   	if (page) {
page               88 mm/mincore.c   		present = PageUptodate(page);
page               89 mm/mincore.c   		put_page(page);
page               59 mm/mlock.c     void clear_page_mlock(struct page *page)
page               61 mm/mlock.c     	if (!TestClearPageMlocked(page))
page               64 mm/mlock.c     	mod_zone_page_state(page_zone(page), NR_MLOCK,
page               65 mm/mlock.c     			    -hpage_nr_pages(page));
page               73 mm/mlock.c     	if (!isolate_lru_page(page)) {
page               74 mm/mlock.c     		putback_lru_page(page);
page               79 mm/mlock.c     		if (PageUnevictable(page))
page               88 mm/mlock.c     void mlock_vma_page(struct page *page)
page               91 mm/mlock.c     	BUG_ON(!PageLocked(page));
page               93 mm/mlock.c     	VM_BUG_ON_PAGE(PageTail(page), page);
page               94 mm/mlock.c     	VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page);
page               96 mm/mlock.c     	if (!TestSetPageMlocked(page)) {
page               97 mm/mlock.c     		mod_zone_page_state(page_zone(page), NR_MLOCK,
page               98 mm/mlock.c     				    hpage_nr_pages(page));
page              100 mm/mlock.c     		if (!isolate_lru_page(page))
page              101 mm/mlock.c     			putback_lru_page(page);
page              109 mm/mlock.c     static bool __munlock_isolate_lru_page(struct page *page, bool getpage)
page              111 mm/mlock.c     	if (PageLRU(page)) {
page              114 mm/mlock.c     		lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
page              116 mm/mlock.c     			get_page(page);
page              117 mm/mlock.c     		ClearPageLRU(page);
page              118 mm/mlock.c     		del_page_from_lru_list(page, lruvec, page_lru(page));
page              131 mm/mlock.c     static void __munlock_isolated_page(struct page *page)
page              137 mm/mlock.c     	if (page_mapcount(page) > 1)
page              138 mm/mlock.c     		try_to_munlock(page);
page              141 mm/mlock.c     	if (!PageMlocked(page))
page              144 mm/mlock.c     	putback_lru_page(page);
page              156 mm/mlock.c     static void __munlock_isolation_failed(struct page *page)
page              158 mm/mlock.c     	if (PageUnevictable(page))
page              182 mm/mlock.c     unsigned int munlock_vma_page(struct page *page)
page              185 mm/mlock.c     	pg_data_t *pgdat = page_pgdat(page);
page              188 mm/mlock.c     	BUG_ON(!PageLocked(page));
page              190 mm/mlock.c     	VM_BUG_ON_PAGE(PageTail(page), page);
page              199 mm/mlock.c     	if (!TestClearPageMlocked(page)) {
page              205 mm/mlock.c     	nr_pages = hpage_nr_pages(page);
page              206 mm/mlock.c     	__mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
page              208 mm/mlock.c     	if (__munlock_isolate_lru_page(page, true)) {
page              210 mm/mlock.c     		__munlock_isolated_page(page);
page              213 mm/mlock.c     	__munlock_isolation_failed(page);
page              246 mm/mlock.c     static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec,
page              249 mm/mlock.c     	VM_BUG_ON_PAGE(PageLRU(page), page);
page              250 mm/mlock.c     	VM_BUG_ON_PAGE(!PageLocked(page), page);
page              252 mm/mlock.c     	if (page_mapcount(page) <= 1 && page_evictable(page)) {
page              253 mm/mlock.c     		pagevec_add(pvec, page);
page              254 mm/mlock.c     		if (TestClearPageUnevictable(page))
page              256 mm/mlock.c     		unlock_page(page);
page              303 mm/mlock.c     		struct page *page = pvec->pages[i];
page              305 mm/mlock.c     		if (TestClearPageMlocked(page)) {
page              310 mm/mlock.c     			if (__munlock_isolate_lru_page(page, false))
page              313 mm/mlock.c     				__munlock_isolation_failed(page);
page              335 mm/mlock.c     		struct page *page = pvec->pages[i];
page              337 mm/mlock.c     		if (page) {
page              338 mm/mlock.c     			lock_page(page);
page              339 mm/mlock.c     			if (!__putback_lru_fast_prepare(page, &pvec_putback,
page              345 mm/mlock.c     				get_page(page); /* for putback_lru_page() */
page              346 mm/mlock.c     				__munlock_isolated_page(page);
page              347 mm/mlock.c     				unlock_page(page);
page              348 mm/mlock.c     				put_page(page); /* from follow_page_mask() */
page              396 mm/mlock.c     		struct page *page = NULL;
page              399 mm/mlock.c     			page = vm_normal_page(vma, start, *pte);
page              404 mm/mlock.c     		if (!page || page_zone(page) != zone)
page              411 mm/mlock.c     		if (PageTransCompound(page))
page              414 mm/mlock.c     		get_page(page);
page              420 mm/mlock.c     		if (pagevec_add(pvec, page) == 0)
page              451 mm/mlock.c     		struct page *page;
page              465 mm/mlock.c     		page = follow_page(vma, start, FOLL_GET | FOLL_DUMP);
page              467 mm/mlock.c     		if (page && !IS_ERR(page)) {
page              468 mm/mlock.c     			if (PageTransTail(page)) {
page              469 mm/mlock.c     				VM_BUG_ON_PAGE(PageMlocked(page), page);
page              470 mm/mlock.c     				put_page(page); /* follow_page_mask() */
page              471 mm/mlock.c     			} else if (PageTransHuge(page)) {
page              472 mm/mlock.c     				lock_page(page);
page              479 mm/mlock.c     				page_mask = munlock_vma_page(page);
page              480 mm/mlock.c     				unlock_page(page);
page              481 mm/mlock.c     				put_page(page); /* follow_page_mask() */
page              488 mm/mlock.c     				pagevec_add(&pvec, page);
page              489 mm/mlock.c     				zone = page_zone(page);
page             3367 mm/mmap.c      	struct page **pages;
page             3384 mm/mmap.c      		struct page *page = *pages;
page             3385 mm/mmap.c      		get_page(page);
page             3386 mm/mmap.c      		vmf->page = page;
page             3458 mm/mmap.c      			    unsigned long vm_flags, struct page **pages)
page               66 mm/mmu_gather.c bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size)
page               81 mm/mmu_gather.c 	batch->pages[batch->nr++] = page;
page               87 mm/mmu_gather.c 	VM_BUG_ON_PAGE(batch->nr > batch->max, page);
page               77 mm/mmzone.c    					struct page *page, struct zone *zone)
page               79 mm/mmzone.c    	if (page_to_pfn(page) != pfn)
page               82 mm/mmzone.c    	if (page_zone(page) != zone)
page              100 mm/mmzone.c    int page_cpupid_xchg_last(struct page *page, int cpupid)
page              106 mm/mmzone.c    		old_flags = flags = page->flags;
page              107 mm/mmzone.c    		last_cpupid = page_cpupid_last(page);
page              111 mm/mmzone.c    	} while (unlikely(cmpxchg(&page->flags, old_flags, flags) != old_flags));
page               81 mm/mprotect.c  				struct page *page;
page               83 mm/mprotect.c  				page = vm_normal_page(vma, addr, oldpte);
page               84 mm/mprotect.c  				if (!page || PageKsm(page))
page               89 mm/mprotect.c  				    page_mapcount(page) != 1)
page               97 mm/mprotect.c  				if (page_is_file_cache(page) && PageDirty(page))
page              108 mm/mprotect.c  				if (target_node == page_to_nid(page))
page               48 mm/nommu.c     struct page *mem_map;
page               75 mm/nommu.c     	struct page *page;
page               84 mm/nommu.c     	page = virt_to_head_page(objp);
page               90 mm/nommu.c     	if (PageSlab(page))
page               99 mm/nommu.c     	if (!PageCompound(page)) {
page              111 mm/nommu.c     	return page_size(page);
page              177 mm/nommu.c     struct page *vmalloc_to_page(const void *addr)
page              330 mm/nommu.c     void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
page              343 mm/nommu.c     void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
page              387 mm/nommu.c     		   struct page *page)
page              393 mm/nommu.c     int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
page              400 mm/nommu.c     int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
page              535 mm/nommu.c     		struct page *page = virt_to_page(from);
page              538 mm/nommu.c     		put_page(page);
page             1645 mm/nommu.c     struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
page             2110 mm/page-writeback.c 	void *page;
page             2113 mm/page-writeback.c 	xas_for_each_marked(&xas, page, end, PAGECACHE_TAG_DIRTY) {
page             2201 mm/page-writeback.c 			struct page *page = pvec.pages[i];
page             2203 mm/page-writeback.c 			done_index = page->index;
page             2205 mm/page-writeback.c 			lock_page(page);
page             2215 mm/page-writeback.c 			if (unlikely(page->mapping != mapping)) {
page             2217 mm/page-writeback.c 				unlock_page(page);
page             2221 mm/page-writeback.c 			if (!PageDirty(page)) {
page             2226 mm/page-writeback.c 			if (PageWriteback(page)) {
page             2228 mm/page-writeback.c 					wait_on_page_writeback(page);
page             2233 mm/page-writeback.c 			BUG_ON(PageWriteback(page));
page             2234 mm/page-writeback.c 			if (!clear_page_dirty_for_io(page))
page             2238 mm/page-writeback.c 			error = (*writepage)(page, wbc, data);
page             2253 mm/page-writeback.c 					unlock_page(page);
page             2257 mm/page-writeback.c 					done_index = page->index + 1;
page             2299 mm/page-writeback.c static int __writepage(struct page *page, struct writeback_control *wbc,
page             2303 mm/page-writeback.c 	int ret = mapping->a_ops->writepage(page, wbc);
page             2366 mm/page-writeback.c int write_one_page(struct page *page)
page             2368 mm/page-writeback.c 	struct address_space *mapping = page->mapping;
page             2375 mm/page-writeback.c 	BUG_ON(!PageLocked(page));
page             2377 mm/page-writeback.c 	wait_on_page_writeback(page);
page             2379 mm/page-writeback.c 	if (clear_page_dirty_for_io(page)) {
page             2380 mm/page-writeback.c 		get_page(page);
page             2381 mm/page-writeback.c 		ret = mapping->a_ops->writepage(page, &wbc);
page             2383 mm/page-writeback.c 			wait_on_page_writeback(page);
page             2384 mm/page-writeback.c 		put_page(page);
page             2386 mm/page-writeback.c 		unlock_page(page);
page             2398 mm/page-writeback.c int __set_page_dirty_no_writeback(struct page *page)
page             2400 mm/page-writeback.c 	if (!PageDirty(page))
page             2401 mm/page-writeback.c 		return !TestSetPageDirty(page);
page             2412 mm/page-writeback.c void account_page_dirtied(struct page *page, struct address_space *mapping)
page             2416 mm/page-writeback.c 	trace_writeback_dirty_page(page, mapping);
page             2421 mm/page-writeback.c 		inode_attach_wb(inode, page);
page             2424 mm/page-writeback.c 		__inc_lruvec_page_state(page, NR_FILE_DIRTY);
page             2425 mm/page-writeback.c 		__inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
page             2426 mm/page-writeback.c 		__inc_node_page_state(page, NR_DIRTIED);
page             2433 mm/page-writeback.c 		mem_cgroup_track_foreign_dirty(page, wb);
page             2442 mm/page-writeback.c void account_page_cleaned(struct page *page, struct address_space *mapping,
page             2446 mm/page-writeback.c 		dec_lruvec_page_state(page, NR_FILE_DIRTY);
page             2447 mm/page-writeback.c 		dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
page             2465 mm/page-writeback.c int __set_page_dirty_nobuffers(struct page *page)
page             2467 mm/page-writeback.c 	lock_page_memcg(page);
page             2468 mm/page-writeback.c 	if (!TestSetPageDirty(page)) {
page             2469 mm/page-writeback.c 		struct address_space *mapping = page_mapping(page);
page             2473 mm/page-writeback.c 			unlock_page_memcg(page);
page             2478 mm/page-writeback.c 		BUG_ON(page_mapping(page) != mapping);
page             2479 mm/page-writeback.c 		WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
page             2480 mm/page-writeback.c 		account_page_dirtied(page, mapping);
page             2481 mm/page-writeback.c 		__xa_set_mark(&mapping->i_pages, page_index(page),
page             2484 mm/page-writeback.c 		unlock_page_memcg(page);
page             2492 mm/page-writeback.c 	unlock_page_memcg(page);
page             2504 mm/page-writeback.c void account_page_redirty(struct page *page)
page             2506 mm/page-writeback.c 	struct address_space *mapping = page->mapping;
page             2515 mm/page-writeback.c 		dec_node_page_state(page, NR_DIRTIED);
page             2527 mm/page-writeback.c int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
page             2532 mm/page-writeback.c 	ret = __set_page_dirty_nobuffers(page);
page             2533 mm/page-writeback.c 	account_page_redirty(page);
page             2549 mm/page-writeback.c int set_page_dirty(struct page *page)
page             2551 mm/page-writeback.c 	struct address_space *mapping = page_mapping(page);
page             2553 mm/page-writeback.c 	page = compound_head(page);
page             2555 mm/page-writeback.c 		int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
page             2566 mm/page-writeback.c 		if (PageReclaim(page))
page             2567 mm/page-writeback.c 			ClearPageReclaim(page);
page             2572 mm/page-writeback.c 		return (*spd)(page);
page             2574 mm/page-writeback.c 	if (!PageDirty(page)) {
page             2575 mm/page-writeback.c 		if (!TestSetPageDirty(page))
page             2592 mm/page-writeback.c int set_page_dirty_lock(struct page *page)
page             2596 mm/page-writeback.c 	lock_page(page);
page             2597 mm/page-writeback.c 	ret = set_page_dirty(page);
page             2598 mm/page-writeback.c 	unlock_page(page);
page             2616 mm/page-writeback.c void __cancel_dirty_page(struct page *page)
page             2618 mm/page-writeback.c 	struct address_space *mapping = page_mapping(page);
page             2625 mm/page-writeback.c 		lock_page_memcg(page);
page             2628 mm/page-writeback.c 		if (TestClearPageDirty(page))
page             2629 mm/page-writeback.c 			account_page_cleaned(page, mapping, wb);
page             2632 mm/page-writeback.c 		unlock_page_memcg(page);
page             2634 mm/page-writeback.c 		ClearPageDirty(page);
page             2653 mm/page-writeback.c int clear_page_dirty_for_io(struct page *page)
page             2655 mm/page-writeback.c 	struct address_space *mapping = page_mapping(page);
page             2658 mm/page-writeback.c 	BUG_ON(!PageLocked(page));
page             2690 mm/page-writeback.c 		if (page_mkclean(page))
page             2691 mm/page-writeback.c 			set_page_dirty(page);
page             2701 mm/page-writeback.c 		if (TestClearPageDirty(page)) {
page             2702 mm/page-writeback.c 			dec_lruvec_page_state(page, NR_FILE_DIRTY);
page             2703 mm/page-writeback.c 			dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
page             2710 mm/page-writeback.c 	return TestClearPageDirty(page);
page             2714 mm/page-writeback.c int test_clear_page_writeback(struct page *page)
page             2716 mm/page-writeback.c 	struct address_space *mapping = page_mapping(page);
page             2721 mm/page-writeback.c 	memcg = lock_page_memcg(page);
page             2722 mm/page-writeback.c 	lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
page             2729 mm/page-writeback.c 		ret = TestClearPageWriteback(page);
page             2731 mm/page-writeback.c 			__xa_clear_mark(&mapping->i_pages, page_index(page),
page             2747 mm/page-writeback.c 		ret = TestClearPageWriteback(page);
page             2757 mm/page-writeback.c 		dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
page             2758 mm/page-writeback.c 		inc_node_page_state(page, NR_WRITTEN);
page             2764 mm/page-writeback.c int __test_set_page_writeback(struct page *page, bool keep_write)
page             2766 mm/page-writeback.c 	struct address_space *mapping = page_mapping(page);
page             2769 mm/page-writeback.c 	lock_page_memcg(page);
page             2771 mm/page-writeback.c 		XA_STATE(xas, &mapping->i_pages, page_index(page));
page             2778 mm/page-writeback.c 		ret = TestSetPageWriteback(page);
page             2797 mm/page-writeback.c 		if (!PageDirty(page))
page             2803 mm/page-writeback.c 		ret = TestSetPageWriteback(page);
page             2806 mm/page-writeback.c 		inc_lruvec_page_state(page, NR_WRITEBACK);
page             2807 mm/page-writeback.c 		inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
page             2809 mm/page-writeback.c 	unlock_page_memcg(page);
page             2818 mm/page-writeback.c void wait_on_page_writeback(struct page *page)
page             2820 mm/page-writeback.c 	if (PageWriteback(page)) {
page             2821 mm/page-writeback.c 		trace_wait_on_page_writeback(page, page_mapping(page));
page             2822 mm/page-writeback.c 		wait_on_page_bit(page, PG_writeback);
page             2835 mm/page-writeback.c void wait_for_stable_page(struct page *page)
page             2837 mm/page-writeback.c 	if (bdi_cap_stable_pages_required(inode_to_bdi(page->mapping->host)))
page             2838 mm/page-writeback.c 		wait_on_page_writeback(page);
page              196 mm/page_alloc.c static inline int get_pcppage_migratetype(struct page *page)
page              198 mm/page_alloc.c 	return page->index;
page              201 mm/page_alloc.c static inline void set_pcppage_migratetype(struct page *page, int migratetype)
page              203 mm/page_alloc.c 	page->index = migratetype;
page              248 mm/page_alloc.c static void __free_pages_ok(struct page *page, unsigned int order);
page              383 mm/page_alloc.c static inline void kasan_free_nondeferred_pages(struct page *page, int order)
page              386 mm/page_alloc.c 		kasan_free_pages(page, order);
page              449 mm/page_alloc.c static inline unsigned long *get_pageblock_bitmap(struct page *page,
page              455 mm/page_alloc.c 	return page_zone(page)->pageblock_flags;
page              459 mm/page_alloc.c static inline int pfn_to_bitidx(struct page *page, unsigned long pfn)
page              465 mm/page_alloc.c 	pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages);
page              479 mm/page_alloc.c static __always_inline unsigned long __get_pfnblock_flags_mask(struct page *page,
page              488 mm/page_alloc.c 	bitmap = get_pageblock_bitmap(page, pfn);
page              489 mm/page_alloc.c 	bitidx = pfn_to_bitidx(page, pfn);
page              498 mm/page_alloc.c unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
page              502 mm/page_alloc.c 	return __get_pfnblock_flags_mask(page, pfn, end_bitidx, mask);
page              505 mm/page_alloc.c static __always_inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn)
page              507 mm/page_alloc.c 	return __get_pfnblock_flags_mask(page, pfn, PB_migrate_end, MIGRATETYPE_MASK);
page              518 mm/page_alloc.c void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
page              530 mm/page_alloc.c 	bitmap = get_pageblock_bitmap(page, pfn);
page              531 mm/page_alloc.c 	bitidx = pfn_to_bitidx(page, pfn);
page              535 mm/page_alloc.c 	VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
page              550 mm/page_alloc.c void set_pageblock_migratetype(struct page *page, int migratetype)
page              556 mm/page_alloc.c 	set_pageblock_flags_group(page, (unsigned long)migratetype,
page              561 mm/page_alloc.c static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
page              565 mm/page_alloc.c 	unsigned long pfn = page_to_pfn(page);
page              584 mm/page_alloc.c static int page_is_consistent(struct zone *zone, struct page *page)
page              586 mm/page_alloc.c 	if (!pfn_valid_within(page_to_pfn(page)))
page              588 mm/page_alloc.c 	if (zone != page_zone(page))
page              596 mm/page_alloc.c static int __maybe_unused bad_range(struct zone *zone, struct page *page)
page              598 mm/page_alloc.c 	if (page_outside_zone_boundaries(zone, page))
page              600 mm/page_alloc.c 	if (!page_is_consistent(zone, page))
page              606 mm/page_alloc.c static inline int __maybe_unused bad_range(struct zone *zone, struct page *page)
page              612 mm/page_alloc.c static void bad_page(struct page *page, const char *reason,
page              640 mm/page_alloc.c 		current->comm, page_to_pfn(page));
page              641 mm/page_alloc.c 	__dump_page(page, reason);
page              642 mm/page_alloc.c 	bad_flags &= page->flags;
page              646 mm/page_alloc.c 	dump_page_owner(page);
page              652 mm/page_alloc.c 	page_mapcount_reset(page); /* remove PageBuddy */
page              671 mm/page_alloc.c void free_compound_page(struct page *page)
page              673 mm/page_alloc.c 	mem_cgroup_uncharge(page);
page              674 mm/page_alloc.c 	__free_pages_ok(page, compound_order(page));
page              677 mm/page_alloc.c void prep_compound_page(struct page *page, unsigned int order)
page              682 mm/page_alloc.c 	set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
page              683 mm/page_alloc.c 	set_compound_order(page, order);
page              684 mm/page_alloc.c 	__SetPageHead(page);
page              686 mm/page_alloc.c 		struct page *p = page + i;
page              689 mm/page_alloc.c 		set_compound_head(p, page);
page              691 mm/page_alloc.c 	atomic_set(compound_mapcount_ptr(page), -1);
page              738 mm/page_alloc.c static inline bool set_page_guard(struct zone *zone, struct page *page,
page              747 mm/page_alloc.c 	__SetPageGuard(page);
page              748 mm/page_alloc.c 	INIT_LIST_HEAD(&page->lru);
page              749 mm/page_alloc.c 	set_page_private(page, order);
page              756 mm/page_alloc.c static inline void clear_page_guard(struct zone *zone, struct page *page,
page              762 mm/page_alloc.c 	__ClearPageGuard(page);
page              764 mm/page_alloc.c 	set_page_private(page, 0);
page              769 mm/page_alloc.c static inline bool set_page_guard(struct zone *zone, struct page *page,
page              771 mm/page_alloc.c static inline void clear_page_guard(struct zone *zone, struct page *page,
page              775 mm/page_alloc.c static inline void set_page_order(struct page *page, unsigned int order)
page              777 mm/page_alloc.c 	set_page_private(page, order);
page              778 mm/page_alloc.c 	__SetPageBuddy(page);
page              794 mm/page_alloc.c static inline int page_is_buddy(struct page *page, struct page *buddy,
page              798 mm/page_alloc.c 		if (page_zone_id(page) != page_zone_id(buddy))
page              812 mm/page_alloc.c 		if (page_zone_id(page) != page_zone_id(buddy))
page              829 mm/page_alloc.c 		!capc->page &&
page              835 mm/page_alloc.c compaction_capture(struct capture_control *capc, struct page *page,
page              855 mm/page_alloc.c 	capc->page = page;
page              866 mm/page_alloc.c compaction_capture(struct capture_control *capc, struct page *page,
page              897 mm/page_alloc.c static inline void __free_one_page(struct page *page,
page              904 mm/page_alloc.c 	struct page *buddy;
page              911 mm/page_alloc.c 	VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
page              917 mm/page_alloc.c 	VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
page              918 mm/page_alloc.c 	VM_BUG_ON_PAGE(bad_range(zone, page), page);
page              922 mm/page_alloc.c 		if (compaction_capture(capc, page, order, migratetype)) {
page              928 mm/page_alloc.c 		buddy = page + (buddy_pfn - pfn);
page              932 mm/page_alloc.c 		if (!page_is_buddy(page, buddy, order))
page              943 mm/page_alloc.c 		page = page + (combined_pfn - pfn);
page              960 mm/page_alloc.c 			buddy = page + (buddy_pfn - pfn);
page              973 mm/page_alloc.c 	set_page_order(page, order);
page              985 mm/page_alloc.c 		struct page *higher_page, *higher_buddy;
page              987 mm/page_alloc.c 		higher_page = page + (combined_pfn - pfn);
page              992 mm/page_alloc.c 			add_to_free_area_tail(page, &zone->free_area[order],
page              999 mm/page_alloc.c 		add_to_free_area_random(page, &zone->free_area[order],
page             1002 mm/page_alloc.c 		add_to_free_area(page, &zone->free_area[order], migratetype);
page             1011 mm/page_alloc.c static inline bool page_expected_state(struct page *page,
page             1014 mm/page_alloc.c 	if (unlikely(atomic_read(&page->_mapcount) != -1))
page             1017 mm/page_alloc.c 	if (unlikely((unsigned long)page->mapping |
page             1018 mm/page_alloc.c 			page_ref_count(page) |
page             1020 mm/page_alloc.c 			(unsigned long)page->mem_cgroup |
page             1022 mm/page_alloc.c 			(page->flags & check_flags)))
page             1028 mm/page_alloc.c static void free_pages_check_bad(struct page *page)
page             1036 mm/page_alloc.c 	if (unlikely(atomic_read(&page->_mapcount) != -1))
page             1038 mm/page_alloc.c 	if (unlikely(page->mapping != NULL))
page             1040 mm/page_alloc.c 	if (unlikely(page_ref_count(page) != 0))
page             1042 mm/page_alloc.c 	if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) {
page             1047 mm/page_alloc.c 	if (unlikely(page->mem_cgroup))
page             1050 mm/page_alloc.c 	bad_page(page, bad_reason, bad_flags);
page             1053 mm/page_alloc.c static inline int free_pages_check(struct page *page)
page             1055 mm/page_alloc.c 	if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
page             1059 mm/page_alloc.c 	free_pages_check_bad(page);
page             1063 mm/page_alloc.c static int free_tail_pages_check(struct page *head_page, struct page *page)
page             1077 mm/page_alloc.c 	switch (page - head_page) {
page             1080 mm/page_alloc.c 		if (unlikely(compound_mapcount(page))) {
page             1081 mm/page_alloc.c 			bad_page(page, "nonzero compound_mapcount", 0);
page             1092 mm/page_alloc.c 		if (page->mapping != TAIL_MAPPING) {
page             1093 mm/page_alloc.c 			bad_page(page, "corrupted mapping in tail page", 0);
page             1098 mm/page_alloc.c 	if (unlikely(!PageTail(page))) {
page             1099 mm/page_alloc.c 		bad_page(page, "PageTail not set", 0);
page             1102 mm/page_alloc.c 	if (unlikely(compound_head(page) != head_page)) {
page             1103 mm/page_alloc.c 		bad_page(page, "compound_head not consistent", 0);
page             1108 mm/page_alloc.c 	page->mapping = NULL;
page             1109 mm/page_alloc.c 	clear_compound_head(page);
page             1113 mm/page_alloc.c static void kernel_init_free_pages(struct page *page, int numpages)
page             1118 mm/page_alloc.c 		clear_highpage(page + i);
page             1121 mm/page_alloc.c static __always_inline bool free_pages_prepare(struct page *page,
page             1126 mm/page_alloc.c 	VM_BUG_ON_PAGE(PageTail(page), page);
page             1128 mm/page_alloc.c 	trace_mm_page_free(page, order);
page             1135 mm/page_alloc.c 		bool compound = PageCompound(page);
page             1138 mm/page_alloc.c 		VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
page             1141 mm/page_alloc.c 			ClearPageDoubleMap(page);
page             1144 mm/page_alloc.c 				bad += free_tail_pages_check(page, page + i);
page             1145 mm/page_alloc.c 			if (unlikely(free_pages_check(page + i))) {
page             1149 mm/page_alloc.c 			(page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
page             1152 mm/page_alloc.c 	if (PageMappingFlags(page))
page             1153 mm/page_alloc.c 		page->mapping = NULL;
page             1154 mm/page_alloc.c 	if (memcg_kmem_enabled() && PageKmemcg(page))
page             1155 mm/page_alloc.c 		__memcg_kmem_uncharge(page, order);
page             1157 mm/page_alloc.c 		bad += free_pages_check(page);
page             1161 mm/page_alloc.c 	page_cpupid_reset_last(page);
page             1162 mm/page_alloc.c 	page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
page             1163 mm/page_alloc.c 	reset_page_owner(page, order);
page             1165 mm/page_alloc.c 	if (!PageHighMem(page)) {
page             1166 mm/page_alloc.c 		debug_check_no_locks_freed(page_address(page),
page             1168 mm/page_alloc.c 		debug_check_no_obj_freed(page_address(page),
page             1172 mm/page_alloc.c 		kernel_init_free_pages(page, 1 << order);
page             1174 mm/page_alloc.c 	kernel_poison_pages(page, 1 << order, 0);
page             1180 mm/page_alloc.c 	arch_free_page(page, order);
page             1183 mm/page_alloc.c 		kernel_map_pages(page, 1 << order, 0);
page             1185 mm/page_alloc.c 	kasan_free_nondeferred_pages(page, order);
page             1196 mm/page_alloc.c static bool free_pcp_prepare(struct page *page)
page             1198 mm/page_alloc.c 	return free_pages_prepare(page, 0, true);
page             1201 mm/page_alloc.c static bool bulkfree_pcp_prepare(struct page *page)
page             1204 mm/page_alloc.c 		return free_pages_check(page);
page             1215 mm/page_alloc.c static bool free_pcp_prepare(struct page *page)
page             1218 mm/page_alloc.c 		return free_pages_prepare(page, 0, true);
page             1220 mm/page_alloc.c 		return free_pages_prepare(page, 0, false);
page             1223 mm/page_alloc.c static bool bulkfree_pcp_prepare(struct page *page)
page             1225 mm/page_alloc.c 	return free_pages_check(page);
page             1229 mm/page_alloc.c static inline void prefetch_buddy(struct page *page)
page             1231 mm/page_alloc.c 	unsigned long pfn = page_to_pfn(page);
page             1233 mm/page_alloc.c 	struct page *buddy = page + (buddy_pfn - pfn);
page             1256 mm/page_alloc.c 	struct page *page, *tmp;
page             1281 mm/page_alloc.c 			page = list_last_entry(list, struct page, lru);
page             1283 mm/page_alloc.c 			list_del(&page->lru);
page             1286 mm/page_alloc.c 			if (bulkfree_pcp_prepare(page))
page             1289 mm/page_alloc.c 			list_add_tail(&page->lru, &head);
page             1301 mm/page_alloc.c 				prefetch_buddy(page);
page             1312 mm/page_alloc.c 	list_for_each_entry_safe(page, tmp, &head, lru) {
page             1313 mm/page_alloc.c 		int mt = get_pcppage_migratetype(page);
page             1315 mm/page_alloc.c 		VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
page             1318 mm/page_alloc.c 			mt = get_pageblock_migratetype(page);
page             1320 mm/page_alloc.c 		__free_one_page(page, page_to_pfn(page), zone, 0, mt);
page             1321 mm/page_alloc.c 		trace_mm_page_pcpu_drain(page, 0, mt);
page             1327 mm/page_alloc.c 				struct page *page, unsigned long pfn,
page             1334 mm/page_alloc.c 		migratetype = get_pfnblock_migratetype(page, pfn);
page             1336 mm/page_alloc.c 	__free_one_page(page, pfn, zone, order, migratetype);
page             1340 mm/page_alloc.c static void __meminit __init_single_page(struct page *page, unsigned long pfn,
page             1343 mm/page_alloc.c 	mm_zero_struct_page(page);
page             1344 mm/page_alloc.c 	set_page_links(page, zone, nid, pfn);
page             1345 mm/page_alloc.c 	init_page_count(page);
page             1346 mm/page_alloc.c 	page_mapcount_reset(page);
page             1347 mm/page_alloc.c 	page_cpupid_reset_last(page);
page             1348 mm/page_alloc.c 	page_kasan_tag_reset(page);
page             1350 mm/page_alloc.c 	INIT_LIST_HEAD(&page->lru);
page             1354 mm/page_alloc.c 		set_page_address(page, __va(pfn << PAGE_SHIFT));
page             1397 mm/page_alloc.c 			struct page *page = pfn_to_page(start_pfn);
page             1402 mm/page_alloc.c 			INIT_LIST_HEAD(&page->lru);
page             1409 mm/page_alloc.c 			__SetPageReserved(page);
page             1414 mm/page_alloc.c static void __free_pages_ok(struct page *page, unsigned int order)
page             1418 mm/page_alloc.c 	unsigned long pfn = page_to_pfn(page);
page             1420 mm/page_alloc.c 	if (!free_pages_prepare(page, order, true))
page             1423 mm/page_alloc.c 	migratetype = get_pfnblock_migratetype(page, pfn);
page             1426 mm/page_alloc.c 	free_one_page(page_zone(page), page, pfn, order, migratetype);
page             1430 mm/page_alloc.c void __free_pages_core(struct page *page, unsigned int order)
page             1433 mm/page_alloc.c 	struct page *p = page;
page             1445 mm/page_alloc.c 	atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
page             1446 mm/page_alloc.c 	set_page_refcounted(page);
page             1447 mm/page_alloc.c 	__free_pages(page, order);
page             1490 mm/page_alloc.c void __init memblock_free_pages(struct page *page, unsigned long pfn,
page             1495 mm/page_alloc.c 	__free_pages_core(page, order);
page             1515 mm/page_alloc.c struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
page             1518 mm/page_alloc.c 	struct page *start_page;
page             1519 mm/page_alloc.c 	struct page *end_page;
page             1574 mm/page_alloc.c 	struct page *page;
page             1580 mm/page_alloc.c 	page = pfn_to_page(pfn);
page             1585 mm/page_alloc.c 		set_pageblock_migratetype(page, MIGRATE_MOVABLE);
page             1586 mm/page_alloc.c 		__free_pages_core(page, pageblock_order);
page             1590 mm/page_alloc.c 	for (i = 0; i < nr_pages; i++, page++, pfn++) {
page             1592 mm/page_alloc.c 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
page             1593 mm/page_alloc.c 		__free_pages_core(page, 0);
page             1665 mm/page_alloc.c 	struct page *page = NULL;
page             1669 mm/page_alloc.c 			page = NULL;
page             1671 mm/page_alloc.c 		} else if (!page || !(pfn & nr_pgmask)) {
page             1672 mm/page_alloc.c 			page = pfn_to_page(pfn);
page             1675 mm/page_alloc.c 			page++;
page             1677 mm/page_alloc.c 		__init_single_page(page, pfn, zid, nid);
page             1974 mm/page_alloc.c void __init init_cma_reserved_pageblock(struct page *page)
page             1977 mm/page_alloc.c 	struct page *p = page;
page             1984 mm/page_alloc.c 	set_pageblock_migratetype(page, MIGRATE_CMA);
page             1988 mm/page_alloc.c 		p = page;
page             1995 mm/page_alloc.c 		set_page_refcounted(page);
page             1996 mm/page_alloc.c 		__free_pages(page, pageblock_order);
page             1999 mm/page_alloc.c 	adjust_managed_page_count(page, pageblock_nr_pages);
page             2017 mm/page_alloc.c static inline void expand(struct zone *zone, struct page *page,
page             2027 mm/page_alloc.c 		VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
page             2035 mm/page_alloc.c 		if (set_page_guard(zone, &page[size], high, migratetype))
page             2038 mm/page_alloc.c 		add_to_free_area(&page[size], area, migratetype);
page             2039 mm/page_alloc.c 		set_page_order(&page[size], high);
page             2043 mm/page_alloc.c static void check_new_page_bad(struct page *page)
page             2048 mm/page_alloc.c 	if (unlikely(atomic_read(&page->_mapcount) != -1))
page             2050 mm/page_alloc.c 	if (unlikely(page->mapping != NULL))
page             2052 mm/page_alloc.c 	if (unlikely(page_ref_count(page) != 0))
page             2054 mm/page_alloc.c 	if (unlikely(page->flags & __PG_HWPOISON)) {
page             2058 mm/page_alloc.c 		page_mapcount_reset(page); /* remove PageBuddy */
page             2061 mm/page_alloc.c 	if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
page             2066 mm/page_alloc.c 	if (unlikely(page->mem_cgroup))
page             2069 mm/page_alloc.c 	bad_page(page, bad_reason, bad_flags);
page             2075 mm/page_alloc.c static inline int check_new_page(struct page *page)
page             2077 mm/page_alloc.c 	if (likely(page_expected_state(page,
page             2081 mm/page_alloc.c 	check_new_page_bad(page);
page             2097 mm/page_alloc.c static inline bool check_pcp_refill(struct page *page)
page             2100 mm/page_alloc.c 		return check_new_page(page);
page             2105 mm/page_alloc.c static inline bool check_new_pcp(struct page *page)
page             2107 mm/page_alloc.c 	return check_new_page(page);
page             2115 mm/page_alloc.c static inline bool check_pcp_refill(struct page *page)
page             2117 mm/page_alloc.c 	return check_new_page(page);
page             2119 mm/page_alloc.c static inline bool check_new_pcp(struct page *page)
page             2122 mm/page_alloc.c 		return check_new_page(page);
page             2128 mm/page_alloc.c static bool check_new_pages(struct page *page, unsigned int order)
page             2132 mm/page_alloc.c 		struct page *p = page + i;
page             2141 mm/page_alloc.c inline void post_alloc_hook(struct page *page, unsigned int order,
page             2144 mm/page_alloc.c 	set_page_private(page, 0);
page             2145 mm/page_alloc.c 	set_page_refcounted(page);
page             2147 mm/page_alloc.c 	arch_alloc_page(page, order);
page             2149 mm/page_alloc.c 		kernel_map_pages(page, 1 << order, 1);
page             2150 mm/page_alloc.c 	kasan_alloc_pages(page, order);
page             2151 mm/page_alloc.c 	kernel_poison_pages(page, 1 << order, 1);
page             2152 mm/page_alloc.c 	set_page_owner(page, order, gfp_flags);
page             2155 mm/page_alloc.c static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
page             2158 mm/page_alloc.c 	post_alloc_hook(page, order, gfp_flags);
page             2161 mm/page_alloc.c 		kernel_init_free_pages(page, 1 << order);
page             2164 mm/page_alloc.c 		prep_compound_page(page, order);
page             2173 mm/page_alloc.c 		set_page_pfmemalloc(page);
page             2175 mm/page_alloc.c 		clear_page_pfmemalloc(page);
page             2183 mm/page_alloc.c struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
page             2188 mm/page_alloc.c 	struct page *page;
page             2193 mm/page_alloc.c 		page = get_page_from_free_area(area, migratetype);
page             2194 mm/page_alloc.c 		if (!page)
page             2196 mm/page_alloc.c 		del_page_from_free_area(page, area);
page             2197 mm/page_alloc.c 		expand(zone, page, order, current_order, area, migratetype);
page             2198 mm/page_alloc.c 		set_pcppage_migratetype(page, migratetype);
page             2199 mm/page_alloc.c 		return page;
page             2223 mm/page_alloc.c static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone,
page             2229 mm/page_alloc.c static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
page             2239 mm/page_alloc.c 			  struct page *start_page, struct page *end_page,
page             2242 mm/page_alloc.c 	struct page *page;
page             2246 mm/page_alloc.c 	for (page = start_page; page <= end_page;) {
page             2247 mm/page_alloc.c 		if (!pfn_valid_within(page_to_pfn(page))) {
page             2248 mm/page_alloc.c 			page++;
page             2252 mm/page_alloc.c 		if (!PageBuddy(page)) {
page             2259 mm/page_alloc.c 					(PageLRU(page) || __PageMovable(page)))
page             2262 mm/page_alloc.c 			page++;
page             2267 mm/page_alloc.c 		VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
page             2268 mm/page_alloc.c 		VM_BUG_ON_PAGE(page_zone(page) != zone, page);
page             2270 mm/page_alloc.c 		order = page_order(page);
page             2271 mm/page_alloc.c 		move_to_free_area(page, &zone->free_area[order], migratetype);
page             2272 mm/page_alloc.c 		page += 1 << order;
page             2279 mm/page_alloc.c int move_freepages_block(struct zone *zone, struct page *page,
page             2283 mm/page_alloc.c 	struct page *start_page, *end_page;
page             2288 mm/page_alloc.c 	start_pfn = page_to_pfn(page);
page             2296 mm/page_alloc.c 		start_page = page;
page             2304 mm/page_alloc.c static void change_pageblock_range(struct page *pageblock_page,
page             2391 mm/page_alloc.c static void steal_suitable_fallback(struct zone *zone, struct page *page,
page             2394 mm/page_alloc.c 	unsigned int current_order = page_order(page);
page             2399 mm/page_alloc.c 	old_block_type = get_pageblock_migratetype(page);
page             2410 mm/page_alloc.c 		change_pageblock_range(page, current_order, start_type);
page             2427 mm/page_alloc.c 	free_pages = move_freepages_block(zone, page, start_type,
page             2461 mm/page_alloc.c 		set_pageblock_migratetype(page, start_type);
page             2467 mm/page_alloc.c 	move_to_free_area(page, area, start_type);
page             2511 mm/page_alloc.c static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
page             2532 mm/page_alloc.c 	mt = get_pageblock_migratetype(page);
page             2536 mm/page_alloc.c 		set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
page             2537 mm/page_alloc.c 		move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
page             2560 mm/page_alloc.c 	struct page *page;
page             2578 mm/page_alloc.c 			page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
page             2579 mm/page_alloc.c 			if (!page)
page             2589 mm/page_alloc.c 			if (is_migrate_highatomic_page(page)) {
page             2611 mm/page_alloc.c 			set_pageblock_migratetype(page, ac->migratetype);
page             2612 mm/page_alloc.c 			ret = move_freepages_block(zone, page, ac->migratetype,
page             2642 mm/page_alloc.c 	struct page *page;
page             2701 mm/page_alloc.c 	page = get_page_from_free_area(area, fallback_mt);
page             2703 mm/page_alloc.c 	steal_suitable_fallback(zone, page, alloc_flags, start_migratetype,
page             2706 mm/page_alloc.c 	trace_mm_page_alloc_extfrag(page, order, current_order,
page             2717 mm/page_alloc.c static __always_inline struct page *
page             2721 mm/page_alloc.c 	struct page *page;
page             2724 mm/page_alloc.c 	page = __rmqueue_smallest(zone, order, migratetype);
page             2725 mm/page_alloc.c 	if (unlikely(!page)) {
page             2727 mm/page_alloc.c 			page = __rmqueue_cma_fallback(zone, order);
page             2729 mm/page_alloc.c 		if (!page && __rmqueue_fallback(zone, order, migratetype,
page             2734 mm/page_alloc.c 	trace_mm_page_alloc_zone_locked(page, order, migratetype);
page             2735 mm/page_alloc.c 	return page;
page             2751 mm/page_alloc.c 		struct page *page = __rmqueue(zone, order, migratetype,
page             2753 mm/page_alloc.c 		if (unlikely(page == NULL))
page             2756 mm/page_alloc.c 		if (unlikely(check_pcp_refill(page)))
page             2769 mm/page_alloc.c 		list_add_tail(&page->lru, list);
page             2771 mm/page_alloc.c 		if (is_migrate_cma(get_pcppage_migratetype(page)))
page             2973 mm/page_alloc.c 	struct page *page;
page             2983 mm/page_alloc.c 			page = pfn_to_page(pfn);
page             2990 mm/page_alloc.c 			if (page_zone(page) != zone)
page             2993 mm/page_alloc.c 			if (!swsusp_page_is_forbidden(page))
page             2994 mm/page_alloc.c 				swsusp_unset_page_free(page);
page             2998 mm/page_alloc.c 		list_for_each_entry(page,
page             3002 mm/page_alloc.c 			pfn = page_to_pfn(page);
page             3016 mm/page_alloc.c static bool free_unref_page_prepare(struct page *page, unsigned long pfn)
page             3020 mm/page_alloc.c 	if (!free_pcp_prepare(page))
page             3023 mm/page_alloc.c 	migratetype = get_pfnblock_migratetype(page, pfn);
page             3024 mm/page_alloc.c 	set_pcppage_migratetype(page, migratetype);
page             3028 mm/page_alloc.c static void free_unref_page_commit(struct page *page, unsigned long pfn)
page             3030 mm/page_alloc.c 	struct zone *zone = page_zone(page);
page             3034 mm/page_alloc.c 	migratetype = get_pcppage_migratetype(page);
page             3046 mm/page_alloc.c 			free_one_page(zone, page, pfn, 0, migratetype);
page             3053 mm/page_alloc.c 	list_add(&page->lru, &pcp->lists[migratetype]);
page             3064 mm/page_alloc.c void free_unref_page(struct page *page)
page             3067 mm/page_alloc.c 	unsigned long pfn = page_to_pfn(page);
page             3069 mm/page_alloc.c 	if (!free_unref_page_prepare(page, pfn))
page             3073 mm/page_alloc.c 	free_unref_page_commit(page, pfn);
page             3082 mm/page_alloc.c 	struct page *page, *next;
page             3087 mm/page_alloc.c 	list_for_each_entry_safe(page, next, list, lru) {
page             3088 mm/page_alloc.c 		pfn = page_to_pfn(page);
page             3089 mm/page_alloc.c 		if (!free_unref_page_prepare(page, pfn))
page             3090 mm/page_alloc.c 			list_del(&page->lru);
page             3091 mm/page_alloc.c 		set_page_private(page, pfn);
page             3095 mm/page_alloc.c 	list_for_each_entry_safe(page, next, list, lru) {
page             3096 mm/page_alloc.c 		unsigned long pfn = page_private(page);
page             3098 mm/page_alloc.c 		set_page_private(page, 0);
page             3099 mm/page_alloc.c 		trace_mm_page_free_batched(page);
page             3100 mm/page_alloc.c 		free_unref_page_commit(page, pfn);
page             3123 mm/page_alloc.c void split_page(struct page *page, unsigned int order)
page             3127 mm/page_alloc.c 	VM_BUG_ON_PAGE(PageCompound(page), page);
page             3128 mm/page_alloc.c 	VM_BUG_ON_PAGE(!page_count(page), page);
page             3131 mm/page_alloc.c 		set_page_refcounted(page + i);
page             3132 mm/page_alloc.c 	split_page_owner(page, order);
page             3136 mm/page_alloc.c int __isolate_free_page(struct page *page, unsigned int order)
page             3138 mm/page_alloc.c 	struct free_area *area = &page_zone(page)->free_area[order];
page             3143 mm/page_alloc.c 	BUG_ON(!PageBuddy(page));
page             3145 mm/page_alloc.c 	zone = page_zone(page);
page             3146 mm/page_alloc.c 	mt = get_pageblock_migratetype(page);
page             3164 mm/page_alloc.c 	del_page_from_free_area(page, area);
page             3171 mm/page_alloc.c 		struct page *endpage = page + (1 << order) - 1;
page             3172 mm/page_alloc.c 		for (; page < endpage; page += pageblock_nr_pages) {
page             3173 mm/page_alloc.c 			int mt = get_pageblock_migratetype(page);
page             3176 mm/page_alloc.c 				set_pageblock_migratetype(page,
page             3213 mm/page_alloc.c static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
page             3218 mm/page_alloc.c 	struct page *page;
page             3229 mm/page_alloc.c 		page = list_first_entry(list, struct page, lru);
page             3230 mm/page_alloc.c 		list_del(&page->lru);
page             3232 mm/page_alloc.c 	} while (check_new_pcp(page));
page             3234 mm/page_alloc.c 	return page;
page             3238 mm/page_alloc.c static struct page *rmqueue_pcplist(struct zone *preferred_zone,
page             3244 mm/page_alloc.c 	struct page *page;
page             3250 mm/page_alloc.c 	page = __rmqueue_pcplist(zone,  migratetype, alloc_flags, pcp, list);
page             3251 mm/page_alloc.c 	if (page) {
page             3252 mm/page_alloc.c 		__count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
page             3256 mm/page_alloc.c 	return page;
page             3263 mm/page_alloc.c struct page *rmqueue(struct zone *preferred_zone,
page             3269 mm/page_alloc.c 	struct page *page;
page             3272 mm/page_alloc.c 		page = rmqueue_pcplist(preferred_zone, zone, gfp_flags,
page             3285 mm/page_alloc.c 		page = NULL;
page             3287 mm/page_alloc.c 			page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
page             3288 mm/page_alloc.c 			if (page)
page             3289 mm/page_alloc.c 				trace_mm_page_alloc_zone_locked(page, order, migratetype);
page             3291 mm/page_alloc.c 		if (!page)
page             3292 mm/page_alloc.c 			page = __rmqueue(zone, order, migratetype, alloc_flags);
page             3293 mm/page_alloc.c 	} while (page && check_new_pages(page, order));
page             3295 mm/page_alloc.c 	if (!page)
page             3298 mm/page_alloc.c 				  get_pcppage_migratetype(page));
page             3300 mm/page_alloc.c 	__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
page             3311 mm/page_alloc.c 	VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
page             3312 mm/page_alloc.c 	return page;
page             3577 mm/page_alloc.c static struct page *
page             3595 mm/page_alloc.c 		struct page *page;
page             3690 mm/page_alloc.c 		page = rmqueue(ac->preferred_zoneref->zone, zone, order,
page             3692 mm/page_alloc.c 		if (page) {
page             3693 mm/page_alloc.c 			prep_new_page(page, order, gfp_mask, alloc_flags);
page             3700 mm/page_alloc.c 				reserve_highatomic_pageblock(page, zone, order);
page             3702 mm/page_alloc.c 			return page;
page             3768 mm/page_alloc.c static inline struct page *
page             3773 mm/page_alloc.c 	struct page *page;
page             3775 mm/page_alloc.c 	page = get_page_from_freelist(gfp_mask, order,
page             3781 mm/page_alloc.c 	if (!page)
page             3782 mm/page_alloc.c 		page = get_page_from_freelist(gfp_mask, order,
page             3785 mm/page_alloc.c 	return page;
page             3788 mm/page_alloc.c static inline struct page *
page             3799 mm/page_alloc.c 	struct page *page;
page             3820 mm/page_alloc.c 	page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
page             3823 mm/page_alloc.c 	if (page)
page             3868 mm/page_alloc.c 			page = __alloc_pages_cpuset_fallback(gfp_mask, order,
page             3873 mm/page_alloc.c 	return page;
page             3884 mm/page_alloc.c static struct page *
page             3889 mm/page_alloc.c 	struct page *page = NULL;
page             3900 mm/page_alloc.c 								prio, &page);
page             3912 mm/page_alloc.c 	if (page)
page             3913 mm/page_alloc.c 		prep_new_page(page, order, gfp_mask, alloc_flags);
page             3916 mm/page_alloc.c 	if (!page)
page             3917 mm/page_alloc.c 		page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
page             3919 mm/page_alloc.c 	if (page) {
page             3920 mm/page_alloc.c 		struct zone *zone = page_zone(page);
page             3925 mm/page_alloc.c 		return page;
page             4017 mm/page_alloc.c static inline struct page *
page             4135 mm/page_alloc.c static inline struct page *
page             4140 mm/page_alloc.c 	struct page *page = NULL;
page             4148 mm/page_alloc.c 	page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
page             4155 mm/page_alloc.c 	if (!page && !drained) {
page             4162 mm/page_alloc.c 	return page;
page             4398 mm/page_alloc.c static inline struct page *
page             4404 mm/page_alloc.c 	struct page *page = NULL;
page             4453 mm/page_alloc.c 	page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
page             4454 mm/page_alloc.c 	if (page)
page             4470 mm/page_alloc.c 		page = __alloc_pages_direct_compact(gfp_mask, order,
page             4474 mm/page_alloc.c 		if (page)
page             4547 mm/page_alloc.c 	page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
page             4548 mm/page_alloc.c 	if (page)
page             4560 mm/page_alloc.c 	page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
page             4562 mm/page_alloc.c 	if (page)
page             4566 mm/page_alloc.c 	page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
page             4568 mm/page_alloc.c 	if (page)
page             4604 mm/page_alloc.c 	page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
page             4605 mm/page_alloc.c 	if (page)
page             4658 mm/page_alloc.c 		page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac);
page             4659 mm/page_alloc.c 		if (page)
page             4669 mm/page_alloc.c 	return page;
page             4722 mm/page_alloc.c struct page *
page             4726 mm/page_alloc.c 	struct page *page;
page             4754 mm/page_alloc.c 	page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
page             4755 mm/page_alloc.c 	if (likely(page))
page             4774 mm/page_alloc.c 	page = __alloc_pages_slowpath(alloc_mask, order, &ac);
page             4777 mm/page_alloc.c 	if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
page             4778 mm/page_alloc.c 	    unlikely(__memcg_kmem_charge(page, gfp_mask, order) != 0)) {
page             4779 mm/page_alloc.c 		__free_pages(page, order);
page             4780 mm/page_alloc.c 		page = NULL;
page             4783 mm/page_alloc.c 	trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
page             4785 mm/page_alloc.c 	return page;
page             4796 mm/page_alloc.c 	struct page *page;
page             4798 mm/page_alloc.c 	page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order);
page             4799 mm/page_alloc.c 	if (!page)
page             4801 mm/page_alloc.c 	return (unsigned long) page_address(page);
page             4811 mm/page_alloc.c static inline void free_the_page(struct page *page, unsigned int order)
page             4814 mm/page_alloc.c 		free_unref_page(page);
page             4816 mm/page_alloc.c 		__free_pages_ok(page, order);
page             4819 mm/page_alloc.c void __free_pages(struct page *page, unsigned int order)
page             4821 mm/page_alloc.c 	if (put_page_testzero(page))
page             4822 mm/page_alloc.c 		free_the_page(page, order);
page             4847 mm/page_alloc.c static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
page             4850 mm/page_alloc.c 	struct page *page = NULL;
page             4856 mm/page_alloc.c 	page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
page             4858 mm/page_alloc.c 	nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
page             4860 mm/page_alloc.c 	if (unlikely(!page))
page             4861 mm/page_alloc.c 		page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
page             4863 mm/page_alloc.c 	nc->va = page ? page_address(page) : NULL;
page             4865 mm/page_alloc.c 	return page;
page             4868 mm/page_alloc.c void __page_frag_cache_drain(struct page *page, unsigned int count)
page             4870 mm/page_alloc.c 	VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
page             4872 mm/page_alloc.c 	if (page_ref_sub_and_test(page, count))
page             4873 mm/page_alloc.c 		free_the_page(page, compound_order(page));
page             4881 mm/page_alloc.c 	struct page *page;
page             4886 mm/page_alloc.c 		page = __page_frag_cache_refill(nc, gfp_mask);
page             4887 mm/page_alloc.c 		if (!page)
page             4897 mm/page_alloc.c 		page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
page             4900 mm/page_alloc.c 		nc->pfmemalloc = page_is_pfmemalloc(page);
page             4907 mm/page_alloc.c 		page = virt_to_page(nc->va);
page             4909 mm/page_alloc.c 		if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
page             4917 mm/page_alloc.c 		set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
page             4936 mm/page_alloc.c 	struct page *page = virt_to_head_page(addr);
page             4938 mm/page_alloc.c 	if (unlikely(put_page_testzero(page)))
page             4939 mm/page_alloc.c 		free_the_page(page, compound_order(page));
page             5002 mm/page_alloc.c 	struct page *p;
page             5882 mm/page_alloc.c 	struct page *page;
page             5921 mm/page_alloc.c 		page = pfn_to_page(pfn);
page             5922 mm/page_alloc.c 		__init_single_page(page, pfn, zone, nid);
page             5924 mm/page_alloc.c 			__SetPageReserved(page);
page             5939 mm/page_alloc.c 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
page             5972 mm/page_alloc.c 		struct page *page = pfn_to_page(pfn);
page             5974 mm/page_alloc.c 		__init_single_page(page, pfn, zone_idx, nid);
page             5983 mm/page_alloc.c 		__SetPageReserved(page);
page             5990 mm/page_alloc.c 		page->pgmap = pgmap;
page             5991 mm/page_alloc.c 		page->zone_device_data = NULL;
page             6009 mm/page_alloc.c 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
page             6675 mm/page_alloc.c 	return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
page             6835 mm/page_alloc.c 		struct page *map;
page             6844 mm/page_alloc.c 		size =  (end - start) * sizeof(struct page);
page             7482 mm/page_alloc.c void adjust_managed_page_count(struct page *page, long count)
page             7484 mm/page_alloc.c 	atomic_long_add(count, &page_zone(page)->managed_pages);
page             7487 mm/page_alloc.c 	if (PageHighMem(page))
page             7501 mm/page_alloc.c 		struct page *page = virt_to_page(pos);
page             7511 mm/page_alloc.c 		direct_map_addr = page_address(page);
page             7515 mm/page_alloc.c 		free_reserved_page(page);
page             7526 mm/page_alloc.c void free_highmem_page(struct page *page)
page             7528 mm/page_alloc.c 	__free_reserved_page(page);
page             7530 mm/page_alloc.c 	atomic_long_inc(&page_zone(page)->managed_pages);
page             8191 mm/page_alloc.c bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
page             8196 mm/page_alloc.c 	unsigned long pfn = page_to_pfn(page);
page             8207 mm/page_alloc.c 	if (is_migrate_cma_page(page)) {
page             8226 mm/page_alloc.c 		page = pfn_to_page(check);
page             8228 mm/page_alloc.c 		if (PageReserved(page))
page             8244 mm/page_alloc.c 		if (PageHuge(page)) {
page             8245 mm/page_alloc.c 			struct page *head = compound_head(page);
page             8251 mm/page_alloc.c 			skip_pages = compound_nr(head) - (page - head);
page             8262 mm/page_alloc.c 		if (!page_ref_count(page)) {
page             8263 mm/page_alloc.c 			if (PageBuddy(page))
page             8264 mm/page_alloc.c 				iter += (1 << page_order(page)) - 1;
page             8272 mm/page_alloc.c 		if ((flags & SKIP_HWPOISON) && PageHWPoison(page))
page             8275 mm/page_alloc.c 		if (__PageMovable(page))
page             8278 mm/page_alloc.c 		if (!PageLRU(page))
page             8520 mm/page_alloc.c 		struct page *page = pfn_to_page(pfn);
page             8522 mm/page_alloc.c 		count += page_count(page) != 1;
page             8523 mm/page_alloc.c 		__free_page(page);
page             8569 mm/page_alloc.c 	struct page *page;
page             8592 mm/page_alloc.c 		page = pfn_to_page(pfn);
page             8597 mm/page_alloc.c 		if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
page             8599 mm/page_alloc.c 			SetPageReserved(page);
page             8604 mm/page_alloc.c 		BUG_ON(page_count(page));
page             8605 mm/page_alloc.c 		BUG_ON(!PageBuddy(page));
page             8606 mm/page_alloc.c 		order = page_order(page);
page             8612 mm/page_alloc.c 		del_page_from_free_area(page, &zone->free_area[order]);
page             8614 mm/page_alloc.c 			SetPageReserved((page+i));
page             8623 mm/page_alloc.c bool is_free_buddy_page(struct page *page)
page             8625 mm/page_alloc.c 	struct zone *zone = page_zone(page);
page             8626 mm/page_alloc.c 	unsigned long pfn = page_to_pfn(page);
page             8632 mm/page_alloc.c 		struct page *page_head = page - (pfn & ((1 << order) - 1));
page             8648 mm/page_alloc.c bool set_hwpoison_free_buddy_page(struct page *page)
page             8650 mm/page_alloc.c 	struct zone *zone = page_zone(page);
page             8651 mm/page_alloc.c 	unsigned long pfn = page_to_pfn(page);
page             8658 mm/page_alloc.c 		struct page *page_head = page - (pfn & ((1 << order) - 1));
page             8661 mm/page_alloc.c 			if (!TestSetPageHWPoison(page))
page              115 mm/page_ext.c  struct page_ext *lookup_page_ext(const struct page *page)
page              117 mm/page_ext.c  	unsigned long pfn = page_to_pfn(page);
page              121 mm/page_ext.c  	base = NODE_DATA(page_to_nid(page))->node_page_ext;
page              130 mm/page_ext.c  	index = pfn - round_down(node_start_pfn(page_to_nid(page)),
page              190 mm/page_ext.c  struct page_ext *lookup_page_ext(const struct page *page)
page              192 mm/page_ext.c  	unsigned long pfn = page_to_pfn(page);
page              262 mm/page_ext.c  		struct page *page = virt_to_page(addr);
page              267 mm/page_ext.c  		BUG_ON(PageReserved(page));
page               31 mm/page_idle.c static struct page *page_idle_get_page(unsigned long pfn)
page               33 mm/page_idle.c 	struct page *page;
page               39 mm/page_idle.c 	page = pfn_to_page(pfn);
page               40 mm/page_idle.c 	if (!page || !PageLRU(page) ||
page               41 mm/page_idle.c 	    !get_page_unless_zero(page))
page               44 mm/page_idle.c 	pgdat = page_pgdat(page);
page               46 mm/page_idle.c 	if (unlikely(!PageLRU(page))) {
page               47 mm/page_idle.c 		put_page(page);
page               48 mm/page_idle.c 		page = NULL;
page               51 mm/page_idle.c 	return page;
page               54 mm/page_idle.c static bool page_idle_clear_pte_refs_one(struct page *page,
page               59 mm/page_idle.c 		.page = page,
page               84 mm/page_idle.c 		clear_page_idle(page);
page               90 mm/page_idle.c 		set_page_young(page);
page               95 mm/page_idle.c static void page_idle_clear_pte_refs(struct page *page)
page              107 mm/page_idle.c 	if (!page_mapped(page) ||
page              108 mm/page_idle.c 	    !page_rmapping(page))
page              111 mm/page_idle.c 	need_lock = !PageAnon(page) || PageKsm(page);
page              112 mm/page_idle.c 	if (need_lock && !trylock_page(page))
page              115 mm/page_idle.c 	rmap_walk(page, (struct rmap_walk_control *)&rwc);
page              118 mm/page_idle.c 		unlock_page(page);
page              126 mm/page_idle.c 	struct page *page;
page              145 mm/page_idle.c 		page = page_idle_get_page(pfn);
page              146 mm/page_idle.c 		if (page) {
page              147 mm/page_idle.c 			if (page_is_idle(page)) {
page              153 mm/page_idle.c 				page_idle_clear_pte_refs(page);
page              154 mm/page_idle.c 				if (page_is_idle(page))
page              157 mm/page_idle.c 			put_page(page);
page              171 mm/page_idle.c 	struct page *page;
page              189 mm/page_idle.c 			page = page_idle_get_page(pfn);
page              190 mm/page_idle.c 			if (page) {
page              191 mm/page_idle.c 				page_idle_clear_pte_refs(page);
page              192 mm/page_idle.c 				set_page_idle(page);
page              193 mm/page_idle.c 				put_page(page);
page               30 mm/page_io.c   				struct page *page, bio_end_io_t end_io)
page               38 mm/page_io.c   		bio->bi_iter.bi_sector = map_swap_page(page, &bdev);
page               43 mm/page_io.c   		bio_add_page(bio, page, PAGE_SIZE * hpage_nr_pages(page), 0);
page               50 mm/page_io.c   	struct page *page = bio_first_page_all(bio);
page               53 mm/page_io.c   		SetPageError(page);
page               62 mm/page_io.c   		set_page_dirty(page);
page               66 mm/page_io.c   		ClearPageReclaim(page);
page               68 mm/page_io.c   	end_page_writeback(page);
page               72 mm/page_io.c   static void swap_slot_free_notify(struct page *page)
page               84 mm/page_io.c   	if (unlikely(!PageSwapCache(page)))
page               87 mm/page_io.c   	sis = page_swap_info(page);
page              108 mm/page_io.c   	entry.val = page_private(page);
page              114 mm/page_io.c   		SetPageDirty(page);
page              122 mm/page_io.c   	struct page *page = bio_first_page_all(bio);
page              126 mm/page_io.c   		SetPageError(page);
page              127 mm/page_io.c   		ClearPageUptodate(page);
page              134 mm/page_io.c   	SetPageUptodate(page);
page              135 mm/page_io.c   	swap_slot_free_notify(page);
page              137 mm/page_io.c   	unlock_page(page);
page              244 mm/page_io.c   int swap_writepage(struct page *page, struct writeback_control *wbc)
page              248 mm/page_io.c   	if (try_to_free_swap(page)) {
page              249 mm/page_io.c   		unlock_page(page);
page              252 mm/page_io.c   	if (frontswap_store(page) == 0) {
page              253 mm/page_io.c   		set_page_writeback(page);
page              254 mm/page_io.c   		unlock_page(page);
page              255 mm/page_io.c   		end_page_writeback(page);
page              258 mm/page_io.c   	ret = __swap_writepage(page, wbc, end_swap_bio_write);
page              263 mm/page_io.c   static sector_t swap_page_sector(struct page *page)
page              265 mm/page_io.c   	return (sector_t)__page_file_index(page) << (PAGE_SHIFT - 9);
page              268 mm/page_io.c   static inline void count_swpout_vm_event(struct page *page)
page              271 mm/page_io.c   	if (unlikely(PageTransHuge(page)))
page              274 mm/page_io.c   	count_vm_events(PSWPOUT, hpage_nr_pages(page));
page              277 mm/page_io.c   int __swap_writepage(struct page *page, struct writeback_control *wbc,
page              282 mm/page_io.c   	struct swap_info_struct *sis = page_swap_info(page);
page              284 mm/page_io.c   	VM_BUG_ON_PAGE(!PageSwapCache(page), page);
page              290 mm/page_io.c   			.bv_page = page,
page              298 mm/page_io.c   		kiocb.ki_pos = page_file_offset(page);
page              300 mm/page_io.c   		set_page_writeback(page);
page              301 mm/page_io.c   		unlock_page(page);
page              317 mm/page_io.c   			set_page_dirty(page);
page              318 mm/page_io.c   			ClearPageReclaim(page);
page              320 mm/page_io.c   					   page_file_offset(page));
page              322 mm/page_io.c   		end_page_writeback(page);
page              326 mm/page_io.c   	ret = bdev_write_page(sis->bdev, swap_page_sector(page), page, wbc);
page              328 mm/page_io.c   		count_swpout_vm_event(page);
page              333 mm/page_io.c   	bio = get_swap_bio(GFP_NOIO, page, end_write_func);
page              335 mm/page_io.c   		set_page_dirty(page);
page              336 mm/page_io.c   		unlock_page(page);
page              341 mm/page_io.c   	bio_associate_blkg_from_page(bio, page);
page              342 mm/page_io.c   	count_swpout_vm_event(page);
page              343 mm/page_io.c   	set_page_writeback(page);
page              344 mm/page_io.c   	unlock_page(page);
page              350 mm/page_io.c   int swap_readpage(struct page *page, bool synchronous)
page              354 mm/page_io.c   	struct swap_info_struct *sis = page_swap_info(page);
page              358 mm/page_io.c   	VM_BUG_ON_PAGE(!PageSwapCache(page) && !synchronous, page);
page              359 mm/page_io.c   	VM_BUG_ON_PAGE(!PageLocked(page), page);
page              360 mm/page_io.c   	VM_BUG_ON_PAGE(PageUptodate(page), page);
page              361 mm/page_io.c   	if (frontswap_load(page) == 0) {
page              362 mm/page_io.c   		SetPageUptodate(page);
page              363 mm/page_io.c   		unlock_page(page);
page              371 mm/page_io.c   		ret = mapping->a_ops->readpage(swap_file, page);
page              377 mm/page_io.c   	ret = bdev_read_page(sis->bdev, swap_page_sector(page), page);
page              379 mm/page_io.c   		if (trylock_page(page)) {
page              380 mm/page_io.c   			swap_slot_free_notify(page);
page              381 mm/page_io.c   			unlock_page(page);
page              389 mm/page_io.c   	bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read);
page              391 mm/page_io.c   		unlock_page(page);
page              424 mm/page_io.c   int swap_set_page_dirty(struct page *page)
page              426 mm/page_io.c   	struct swap_info_struct *sis = page_swap_info(page);
page              431 mm/page_io.c   		VM_BUG_ON_PAGE(!PageSwapCache(page), page);
page              432 mm/page_io.c   		return mapping->a_ops->set_page_dirty(page);
page              434 mm/page_io.c   		return __set_page_dirty_no_writeback(page);
page               18 mm/page_isolation.c static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags)
page               26 mm/page_isolation.c 	zone = page_zone(page);
page               35 mm/page_isolation.c 	if (is_migrate_isolate_page(page))
page               38 mm/page_isolation.c 	pfn = page_to_pfn(page);
page               62 mm/page_isolation.c 	if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype,
page               74 mm/page_isolation.c 		int mt = get_pageblock_migratetype(page);
page               76 mm/page_isolation.c 		set_pageblock_migratetype(page, MIGRATE_ISOLATE);
page               78 mm/page_isolation.c 		nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE,
page               90 mm/page_isolation.c static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
page               97 mm/page_isolation.c 	struct page *buddy;
page               99 mm/page_isolation.c 	zone = page_zone(page);
page              101 mm/page_isolation.c 	if (!is_migrate_isolate_page(page))
page              112 mm/page_isolation.c 	if (PageBuddy(page)) {
page              113 mm/page_isolation.c 		order = page_order(page);
page              115 mm/page_isolation.c 			pfn = page_to_pfn(page);
page              117 mm/page_isolation.c 			buddy = page + (buddy_pfn - pfn);
page              121 mm/page_isolation.c 				__isolate_free_page(page, order);
page              133 mm/page_isolation.c 		nr_pages = move_freepages_block(zone, page, migratetype, NULL);
page              136 mm/page_isolation.c 	set_pageblock_migratetype(page, migratetype);
page              141 mm/page_isolation.c 		post_alloc_hook(page, order, __GFP_MOVABLE);
page              142 mm/page_isolation.c 		__free_pages(page, order);
page              146 mm/page_isolation.c static inline struct page *
page              152 mm/page_isolation.c 		struct page *page;
page              154 mm/page_isolation.c 		page = pfn_to_online_page(pfn + i);
page              155 mm/page_isolation.c 		if (!page)
page              157 mm/page_isolation.c 		return page;
page              198 mm/page_isolation.c 	struct page *page;
page              207 mm/page_isolation.c 		page = __first_valid_page(pfn, pageblock_nr_pages);
page              208 mm/page_isolation.c 		if (page) {
page              209 mm/page_isolation.c 			if (set_migratetype_isolate(page, migratetype, flags)) {
page              221 mm/page_isolation.c 		struct page *page = pfn_to_online_page(pfn);
page              222 mm/page_isolation.c 		if (!page)
page              224 mm/page_isolation.c 		unset_migratetype_isolate(page, migratetype);
page              237 mm/page_isolation.c 	struct page *page;
page              245 mm/page_isolation.c 		page = __first_valid_page(pfn, pageblock_nr_pages);
page              246 mm/page_isolation.c 		if (!page || !is_migrate_isolate_page(page))
page              248 mm/page_isolation.c 		unset_migratetype_isolate(page, migratetype);
page              262 mm/page_isolation.c 	struct page *page;
page              269 mm/page_isolation.c 		page = pfn_to_page(pfn);
page              270 mm/page_isolation.c 		if (PageBuddy(page))
page              276 mm/page_isolation.c 			pfn += 1 << page_order(page);
page              277 mm/page_isolation.c 		else if (skip_hwpoisoned_pages && PageHWPoison(page))
page              292 mm/page_isolation.c 	struct page *page;
page              301 mm/page_isolation.c 		page = __first_valid_page(pfn, pageblock_nr_pages);
page              302 mm/page_isolation.c 		if (page && !is_migrate_isolate_page(page))
page              305 mm/page_isolation.c 	page = __first_valid_page(start_pfn, end_pfn - start_pfn);
page              306 mm/page_isolation.c 	if ((pfn < end_pfn) || !page)
page              309 mm/page_isolation.c 	zone = page_zone(page);
page              320 mm/page_isolation.c struct page *alloc_migrate_target(struct page *page, unsigned long private)
page              322 mm/page_isolation.c 	return new_page_nodemask(page, numa_node_id(), &node_states[N_MEMORY]);
page              142 mm/page_owner.c void __reset_page_owner(struct page *page, unsigned int order)
page              151 mm/page_owner.c 	page_ext = lookup_page_ext(page);
page              162 mm/page_owner.c static inline void __set_page_owner_handle(struct page *page,
page              182 mm/page_owner.c noinline void __set_page_owner(struct page *page, unsigned int order,
page              185 mm/page_owner.c 	struct page_ext *page_ext = lookup_page_ext(page);
page              192 mm/page_owner.c 	__set_page_owner_handle(page, page_ext, handle, order, gfp_mask);
page              195 mm/page_owner.c void __set_page_owner_migrate_reason(struct page *page, int reason)
page              197 mm/page_owner.c 	struct page_ext *page_ext = lookup_page_ext(page);
page              207 mm/page_owner.c void __split_page_owner(struct page *page, unsigned int order)
page              210 mm/page_owner.c 	struct page_ext *page_ext = lookup_page_ext(page);
page              223 mm/page_owner.c void __copy_page_owner(struct page *oldpage, struct page *newpage)
page              256 mm/page_owner.c 	struct page *page;
page              274 mm/page_owner.c 		page = pfn_to_online_page(pfn);
page              275 mm/page_owner.c 		if (!page) {
page              283 mm/page_owner.c 		pageblock_mt = get_pageblock_migratetype(page);
page              290 mm/page_owner.c 			page = pfn_to_page(pfn);
page              292 mm/page_owner.c 			if (page_zone(page) != zone)
page              295 mm/page_owner.c 			if (PageBuddy(page)) {
page              298 mm/page_owner.c 				freepage_order = page_order_unsafe(page);
page              304 mm/page_owner.c 			if (PageReserved(page))
page              307 mm/page_owner.c 			page_ext = lookup_page_ext(page);
page              339 mm/page_owner.c 		struct page *page, struct page_owner *page_owner,
page              361 mm/page_owner.c 	pageblock_mt = get_pageblock_migratetype(page);
page              369 mm/page_owner.c 			page->flags, &page->flags);
page              402 mm/page_owner.c void __dump_page_owner(struct page *page)
page              404 mm/page_owner.c 	struct page_ext *page_ext = lookup_page_ext(page);
page              460 mm/page_owner.c 	struct page *page;
page              468 mm/page_owner.c 	page = NULL;
page              492 mm/page_owner.c 		page = pfn_to_page(pfn);
page              493 mm/page_owner.c 		if (PageBuddy(page)) {
page              494 mm/page_owner.c 			unsigned long freepage_order = page_order_unsafe(page);
page              501 mm/page_owner.c 		page_ext = lookup_page_ext(page);
page              539 mm/page_owner.c 		return print_page_owner(buf, count, pfn, page,
page              569 mm/page_owner.c 			struct page *page;
page              575 mm/page_owner.c 			page = pfn_to_page(pfn);
page              577 mm/page_owner.c 			if (page_zone(page) != zone)
page              587 mm/page_owner.c 			if (PageBuddy(page)) {
page              588 mm/page_owner.c 				unsigned long order = page_order_unsafe(page);
page              595 mm/page_owner.c 			if (PageReserved(page))
page              598 mm/page_owner.c 			page_ext = lookup_page_ext(page);
page              607 mm/page_owner.c 			__set_page_owner_handle(page, page_ext, early_handle,
page               40 mm/page_poison.c static void poison_page(struct page *page)
page               42 mm/page_poison.c 	void *addr = kmap_atomic(page);
page               51 mm/page_poison.c static void poison_pages(struct page *page, int n)
page               56 mm/page_poison.c 		poison_page(page + i);
page               96 mm/page_poison.c static void unpoison_page(struct page *page)
page              100 mm/page_poison.c 	addr = kmap_atomic(page);
page              110 mm/page_poison.c static void unpoison_pages(struct page *page, int n)
page              115 mm/page_poison.c 		unpoison_page(page + i);
page              118 mm/page_poison.c void kernel_poison_pages(struct page *page, int numpages, int enable)
page              124 mm/page_poison.c 		unpoison_pages(page, numpages);
page              126 mm/page_poison.c 		poison_pages(page, numpages);
page              130 mm/page_poison.c void __kernel_map_pages(struct page *page, int numpages, int enable)
page               55 mm/page_vma_mapped.c static inline bool pfn_in_hpage(struct page *hpage, unsigned long pfn)
page              111 mm/page_vma_mapped.c 	return pfn_in_hpage(pvmw->page, pfn);
page              141 mm/page_vma_mapped.c 	struct page *page = pvmw->page;
page              154 mm/page_vma_mapped.c 	if (unlikely(PageHuge(pvmw->page))) {
page              156 mm/page_vma_mapped.c 		pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
page              160 mm/page_vma_mapped.c 		pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
page              188 mm/page_vma_mapped.c 			if (pmd_page(*pvmw->pmd) != page)
page              198 mm/page_vma_mapped.c 					if (migration_entry_to_page(entry) != page)
page              219 mm/page_vma_mapped.c 		if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
page              225 mm/page_vma_mapped.c 					__vma_address(pvmw->page, pvmw->vma) +
page              226 mm/page_vma_mapped.c 					hpage_nr_pages(pvmw->page) * PAGE_SIZE)
page              257 mm/page_vma_mapped.c int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
page              260 mm/page_vma_mapped.c 		.page = page,
page              266 mm/page_vma_mapped.c 	start = __vma_address(page, vma);
page              267 mm/page_vma_mapped.c 	end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
page               51 mm/percpu-km.c 	struct page *pages;
page               96 mm/percpu-km.c static struct page *pcpu_addr_to_page(void *addr)
page               12 mm/percpu-vm.c static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk,
page               31 mm/percpu-vm.c static struct page **pcpu_get_pages(void)
page               33 mm/percpu-vm.c 	static struct page **pages;
page               54 mm/percpu-vm.c 			    struct page **pages, int page_start, int page_end)
page               61 mm/percpu-vm.c 			struct page *page = pages[pcpu_page_idx(cpu, i)];
page               63 mm/percpu-vm.c 			if (page)
page               64 mm/percpu-vm.c 				__free_page(page);
page               82 mm/percpu-vm.c 			    struct page **pages, int page_start, int page_end,
page               92 mm/percpu-vm.c 			struct page **pagep = &pages[pcpu_page_idx(cpu, i)];
page              153 mm/percpu-vm.c 			     struct page **pages, int page_start, int page_end)
page              160 mm/percpu-vm.c 			struct page *page;
page              162 mm/percpu-vm.c 			page = pcpu_chunk_page(chunk, cpu, i);
page              163 mm/percpu-vm.c 			WARN_ON(!page);
page              164 mm/percpu-vm.c 			pages[pcpu_page_idx(cpu, i)] = page;
page              192 mm/percpu-vm.c static int __pcpu_map_pages(unsigned long addr, struct page **pages,
page              214 mm/percpu-vm.c 			  struct page **pages, int page_start, int page_end)
page              278 mm/percpu-vm.c 	struct page **pages;
page              311 mm/percpu-vm.c 	struct page **pages;
page              369 mm/percpu-vm.c static struct page *pcpu_addr_to_page(void *addr)
page              245 mm/percpu.c    static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
page              247 mm/percpu.c    	page->index = (unsigned long)pcpu;
page              251 mm/percpu.c    static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
page              253 mm/percpu.c    	return (struct pcpu_chunk *)page->index;
page             1531 mm/percpu.c    static struct page *pcpu_addr_to_page(void *addr);
page             2837 mm/percpu.c    	struct page **pages;
page              177 mm/pgtable-generic.c 							  struct page, lru);
page               30 mm/process_vm_access.c static int process_vm_rw_pages(struct page **pages,
page               38 mm/process_vm_access.c 		struct page *page = *pages++;
page               46 mm/process_vm_access.c 			copied = copy_page_from_iter(page, offset, copy, iter);
page               47 mm/process_vm_access.c 			set_page_dirty_lock(page);
page               49 mm/process_vm_access.c 			copied = copy_page_to_iter(page, offset, copy, iter);
page               77 mm/process_vm_access.c 				    struct page **process_pages,
page              157 mm/process_vm_access.c 	struct page *pp_stack[PVM_MAX_PP_ARRAY_COUNT];
page              158 mm/process_vm_access.c 	struct page **process_pages = pp_stack;
page               48 mm/readahead.c 					     struct page *page)
page               50 mm/readahead.c 	if (page_has_private(page)) {
page               51 mm/readahead.c 		if (!trylock_page(page))
page               53 mm/readahead.c 		page->mapping = mapping;
page               54 mm/readahead.c 		do_invalidatepage(page, 0, PAGE_SIZE);
page               55 mm/readahead.c 		page->mapping = NULL;
page               56 mm/readahead.c 		unlock_page(page);
page               58 mm/readahead.c 	put_page(page);
page               67 mm/readahead.c 	struct page *victim;
page               89 mm/readahead.c 			int (*filler)(void *, struct page *), void *data)
page               91 mm/readahead.c 	struct page *page;
page               95 mm/readahead.c 		page = lru_to_page(pages);
page               96 mm/readahead.c 		list_del(&page->lru);
page               97 mm/readahead.c 		if (add_to_page_cache_lru(page, mapping, page->index,
page               99 mm/readahead.c 			read_cache_pages_invalidate_page(mapping, page);
page              102 mm/readahead.c 		put_page(page);
page              104 mm/readahead.c 		ret = filler(data, page);
page              133 mm/readahead.c 		struct page *page = lru_to_page(pages);
page              134 mm/readahead.c 		list_del(&page->lru);
page              135 mm/readahead.c 		if (!add_to_page_cache_lru(page, mapping, page->index, gfp))
page              136 mm/readahead.c 			mapping->a_ops->readpage(filp, page);
page              137 mm/readahead.c 		put_page(page);
page              160 mm/readahead.c 	struct page *page;
page              182 mm/readahead.c 		page = xa_load(&mapping->i_pages, page_offset);
page              183 mm/readahead.c 		if (page && !xa_is_value(page)) {
page              196 mm/readahead.c 		page = __page_cache_alloc(gfp_mask);
page              197 mm/readahead.c 		if (!page)
page              199 mm/readahead.c 		page->index = page_offset;
page              200 mm/readahead.c 		list_add(&page->lru, &page_pool);
page              202 mm/readahead.c 			SetPageReadahead(page);
page              549 mm/readahead.c 			   struct page *page, pgoff_t offset,
page              559 mm/readahead.c 	if (PageWriteback(page))
page              562 mm/readahead.c 	ClearPageReadahead(page);
page              465 mm/rmap.c      struct anon_vma *page_get_anon_vma(struct page *page)
page              471 mm/rmap.c      	anon_mapping = (unsigned long)READ_ONCE(page->mapping);
page              474 mm/rmap.c      	if (!page_mapped(page))
page              490 mm/rmap.c      	if (!page_mapped(page)) {
page              508 mm/rmap.c      struct anon_vma *page_lock_anon_vma_read(struct page *page)
page              515 mm/rmap.c      	anon_mapping = (unsigned long)READ_ONCE(page->mapping);
page              518 mm/rmap.c      	if (!page_mapped(page))
page              529 mm/rmap.c      		if (!page_mapped(page)) {
page              542 mm/rmap.c      	if (!page_mapped(page)) {
page              688 mm/rmap.c      unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
page              691 mm/rmap.c      	if (PageAnon(page)) {
page              692 mm/rmap.c      		struct anon_vma *page__anon_vma = page_anon_vma(page);
page              700 mm/rmap.c      	} else if (page->mapping) {
page              701 mm/rmap.c      		if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping)
page              705 mm/rmap.c      	address = __vma_address(page, vma);
page              754 mm/rmap.c      static bool page_referenced_one(struct page *page, struct vm_area_struct *vma,
page              759 mm/rmap.c      		.page = page,
page              801 mm/rmap.c      		clear_page_idle(page);
page              802 mm/rmap.c      	if (test_and_clear_page_young(page))
page              837 mm/rmap.c      int page_referenced(struct page *page,
page              844 mm/rmap.c      		.mapcount = total_mapcount(page),
page              857 mm/rmap.c      	if (!page_rmapping(page))
page              860 mm/rmap.c      	if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
page              861 mm/rmap.c      		we_locked = trylock_page(page);
page              875 mm/rmap.c      	rmap_walk(page, &rwc);
page              879 mm/rmap.c      		unlock_page(page);
page              884 mm/rmap.c      static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
page              888 mm/rmap.c      		.page = page,
page              902 mm/rmap.c      				min(vma->vm_end, address + page_size(page)));
page              930 mm/rmap.c      			flush_cache_page(vma, address, page_to_pfn(page));
page              966 mm/rmap.c      int page_mkclean(struct page *page)
page              976 mm/rmap.c      	BUG_ON(!PageLocked(page));
page              978 mm/rmap.c      	if (!page_mapped(page))
page              981 mm/rmap.c      	mapping = page_mapping(page);
page              985 mm/rmap.c      	rmap_walk(page, &rwc);
page             1001 mm/rmap.c      void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
page             1005 mm/rmap.c      	page = compound_head(page);
page             1007 mm/rmap.c      	VM_BUG_ON_PAGE(!PageLocked(page), page);
page             1016 mm/rmap.c      	WRITE_ONCE(page->mapping, (struct address_space *) anon_vma);
page             1026 mm/rmap.c      static void __page_set_anon_rmap(struct page *page,
page             1033 mm/rmap.c      	if (PageAnon(page))
page             1045 mm/rmap.c      	page->mapping = (struct address_space *) anon_vma;
page             1046 mm/rmap.c      	page->index = linear_page_index(vma, address);
page             1055 mm/rmap.c      static void __page_check_anon_rmap(struct page *page,
page             1071 mm/rmap.c      	BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root);
page             1072 mm/rmap.c      	BUG_ON(page_to_pgoff(page) != linear_page_index(vma, address));
page             1088 mm/rmap.c      void page_add_anon_rmap(struct page *page,
page             1091 mm/rmap.c      	do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0);
page             1099 mm/rmap.c      void do_page_add_anon_rmap(struct page *page,
page             1107 mm/rmap.c      		VM_BUG_ON_PAGE(!PageLocked(page), page);
page             1108 mm/rmap.c      		VM_BUG_ON_PAGE(!PageTransHuge(page), page);
page             1109 mm/rmap.c      		mapcount = compound_mapcount_ptr(page);
page             1112 mm/rmap.c      		first = atomic_inc_and_test(&page->_mapcount);
page             1116 mm/rmap.c      		int nr = compound ? hpage_nr_pages(page) : 1;
page             1124 mm/rmap.c      			__inc_node_page_state(page, NR_ANON_THPS);
page             1125 mm/rmap.c      		__mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr);
page             1127 mm/rmap.c      	if (unlikely(PageKsm(page)))
page             1130 mm/rmap.c      	VM_BUG_ON_PAGE(!PageLocked(page), page);
page             1134 mm/rmap.c      		__page_set_anon_rmap(page, vma, address,
page             1137 mm/rmap.c      		__page_check_anon_rmap(page, vma, address);
page             1151 mm/rmap.c      void page_add_new_anon_rmap(struct page *page,
page             1154 mm/rmap.c      	int nr = compound ? hpage_nr_pages(page) : 1;
page             1157 mm/rmap.c      	__SetPageSwapBacked(page);
page             1159 mm/rmap.c      		VM_BUG_ON_PAGE(!PageTransHuge(page), page);
page             1161 mm/rmap.c      		atomic_set(compound_mapcount_ptr(page), 0);
page             1162 mm/rmap.c      		__inc_node_page_state(page, NR_ANON_THPS);
page             1165 mm/rmap.c      		VM_BUG_ON_PAGE(PageTransCompound(page), page);
page             1167 mm/rmap.c      		atomic_set(&page->_mapcount, 0);
page             1169 mm/rmap.c      	__mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr);
page             1170 mm/rmap.c      	__page_set_anon_rmap(page, vma, address, 1);
page             1180 mm/rmap.c      void page_add_file_rmap(struct page *page, bool compound)
page             1184 mm/rmap.c      	VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
page             1185 mm/rmap.c      	lock_page_memcg(page);
page             1186 mm/rmap.c      	if (compound && PageTransHuge(page)) {
page             1188 mm/rmap.c      			if (atomic_inc_and_test(&page[i]._mapcount))
page             1191 mm/rmap.c      		if (!atomic_inc_and_test(compound_mapcount_ptr(page)))
page             1193 mm/rmap.c      		if (PageSwapBacked(page))
page             1194 mm/rmap.c      			__inc_node_page_state(page, NR_SHMEM_PMDMAPPED);
page             1196 mm/rmap.c      			__inc_node_page_state(page, NR_FILE_PMDMAPPED);
page             1198 mm/rmap.c      		if (PageTransCompound(page) && page_mapping(page)) {
page             1199 mm/rmap.c      			VM_WARN_ON_ONCE(!PageLocked(page));
page             1201 mm/rmap.c      			SetPageDoubleMap(compound_head(page));
page             1202 mm/rmap.c      			if (PageMlocked(page))
page             1203 mm/rmap.c      				clear_page_mlock(compound_head(page));
page             1205 mm/rmap.c      		if (!atomic_inc_and_test(&page->_mapcount))
page             1208 mm/rmap.c      	__mod_lruvec_page_state(page, NR_FILE_MAPPED, nr);
page             1210 mm/rmap.c      	unlock_page_memcg(page);
page             1213 mm/rmap.c      static void page_remove_file_rmap(struct page *page, bool compound)
page             1217 mm/rmap.c      	VM_BUG_ON_PAGE(compound && !PageHead(page), page);
page             1218 mm/rmap.c      	lock_page_memcg(page);
page             1221 mm/rmap.c      	if (unlikely(PageHuge(page))) {
page             1223 mm/rmap.c      		atomic_dec(compound_mapcount_ptr(page));
page             1228 mm/rmap.c      	if (compound && PageTransHuge(page)) {
page             1230 mm/rmap.c      			if (atomic_add_negative(-1, &page[i]._mapcount))
page             1233 mm/rmap.c      		if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
page             1235 mm/rmap.c      		if (PageSwapBacked(page))
page             1236 mm/rmap.c      			__dec_node_page_state(page, NR_SHMEM_PMDMAPPED);
page             1238 mm/rmap.c      			__dec_node_page_state(page, NR_FILE_PMDMAPPED);
page             1240 mm/rmap.c      		if (!atomic_add_negative(-1, &page->_mapcount))
page             1249 mm/rmap.c      	__mod_lruvec_page_state(page, NR_FILE_MAPPED, -nr);
page             1251 mm/rmap.c      	if (unlikely(PageMlocked(page)))
page             1252 mm/rmap.c      		clear_page_mlock(page);
page             1254 mm/rmap.c      	unlock_page_memcg(page);
page             1257 mm/rmap.c      static void page_remove_anon_compound_rmap(struct page *page)
page             1261 mm/rmap.c      	if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
page             1265 mm/rmap.c      	if (unlikely(PageHuge(page)))
page             1271 mm/rmap.c      	__dec_node_page_state(page, NR_ANON_THPS);
page             1273 mm/rmap.c      	if (TestClearPageDoubleMap(page)) {
page             1279 mm/rmap.c      			if (atomic_add_negative(-1, &page[i]._mapcount))
page             1286 mm/rmap.c      	if (unlikely(PageMlocked(page)))
page             1287 mm/rmap.c      		clear_page_mlock(page);
page             1290 mm/rmap.c      		__mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, -nr);
page             1291 mm/rmap.c      		deferred_split_huge_page(page);
page             1302 mm/rmap.c      void page_remove_rmap(struct page *page, bool compound)
page             1304 mm/rmap.c      	if (!PageAnon(page))
page             1305 mm/rmap.c      		return page_remove_file_rmap(page, compound);
page             1308 mm/rmap.c      		return page_remove_anon_compound_rmap(page);
page             1311 mm/rmap.c      	if (!atomic_add_negative(-1, &page->_mapcount))
page             1319 mm/rmap.c      	__dec_node_page_state(page, NR_ANON_MAPPED);
page             1321 mm/rmap.c      	if (unlikely(PageMlocked(page)))
page             1322 mm/rmap.c      		clear_page_mlock(page);
page             1324 mm/rmap.c      	if (PageTransCompound(page))
page             1325 mm/rmap.c      		deferred_split_huge_page(compound_head(page));
page             1341 mm/rmap.c      static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
page             1346 mm/rmap.c      		.page = page,
page             1351 mm/rmap.c      	struct page *subpage;
page             1361 mm/rmap.c      	    is_zone_device_page(page) && !is_device_private_page(page))
page             1366 mm/rmap.c      				flags & TTU_SPLIT_FREEZE, page);
page             1379 mm/rmap.c      				min(vma->vm_end, address + page_size(page)));
page             1380 mm/rmap.c      	if (PageHuge(page)) {
page             1394 mm/rmap.c      			VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
page             1396 mm/rmap.c      			set_pmd_migration_entry(&pvmw, page);
page             1409 mm/rmap.c      				if (!PageTransCompound(page)) {
page             1414 mm/rmap.c      					mlock_vma_page(page);
page             1425 mm/rmap.c      		VM_BUG_ON_PAGE(!pvmw.pte, page);
page             1427 mm/rmap.c      		subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
page             1430 mm/rmap.c      		if (PageHuge(page)) {
page             1460 mm/rmap.c      		    is_zone_device_page(page)) {
page             1471 mm/rmap.c      			entry = make_migration_entry(page, 0);
page             1487 mm/rmap.c      			subpage = page;
page             1520 mm/rmap.c      			set_page_dirty(page);
page             1525 mm/rmap.c      		if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
page             1527 mm/rmap.c      			if (PageHuge(page)) {
page             1528 mm/rmap.c      				hugetlb_count_sub(compound_nr(page), mm);
page             1533 mm/rmap.c      				dec_mm_counter(mm, mm_counter(page));
page             1548 mm/rmap.c      			dec_mm_counter(mm, mm_counter(page));
page             1579 mm/rmap.c      		} else if (PageAnon(page)) {
page             1586 mm/rmap.c      			if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) {
page             1597 mm/rmap.c      			if (!PageSwapBacked(page)) {
page             1598 mm/rmap.c      				if (!PageDirty(page)) {
page             1611 mm/rmap.c      				SetPageSwapBacked(page);
page             1655 mm/rmap.c      			dec_mm_counter(mm, mm_counter_file(page));
page             1665 mm/rmap.c      		page_remove_rmap(subpage, PageHuge(page));
page             1666 mm/rmap.c      		put_page(page);
page             1693 mm/rmap.c      static int page_mapcount_is_zero(struct page *page)
page             1695 mm/rmap.c      	return !total_mapcount(page);
page             1708 mm/rmap.c      bool try_to_unmap(struct page *page, enum ttu_flags flags)
page             1726 mm/rmap.c      	    && !PageKsm(page) && PageAnon(page))
page             1730 mm/rmap.c      		rmap_walk_locked(page, &rwc);
page             1732 mm/rmap.c      		rmap_walk(page, &rwc);
page             1734 mm/rmap.c      	return !page_mapcount(page) ? true : false;
page             1737 mm/rmap.c      static int page_not_mapped(struct page *page)
page             1739 mm/rmap.c      	return !page_mapped(page);
page             1751 mm/rmap.c      void try_to_munlock(struct page *page)
page             1761 mm/rmap.c      	VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page);
page             1762 mm/rmap.c      	VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page);
page             1764 mm/rmap.c      	rmap_walk(page, &rwc);
page             1776 mm/rmap.c      static struct anon_vma *rmap_walk_anon_lock(struct page *page,
page             1782 mm/rmap.c      		return rwc->anon_lock(page);
page             1790 mm/rmap.c      	anon_vma = page_anon_vma(page);
page             1812 mm/rmap.c      static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
page             1820 mm/rmap.c      		anon_vma = page_anon_vma(page);
page             1822 mm/rmap.c      		VM_BUG_ON_PAGE(!anon_vma, page);
page             1824 mm/rmap.c      		anon_vma = rmap_walk_anon_lock(page, rwc);
page             1829 mm/rmap.c      	pgoff_start = page_to_pgoff(page);
page             1830 mm/rmap.c      	pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
page             1834 mm/rmap.c      		unsigned long address = vma_address(page, vma);
page             1841 mm/rmap.c      		if (!rwc->rmap_one(page, vma, address, rwc->arg))
page             1843 mm/rmap.c      		if (rwc->done && rwc->done(page))
page             1864 mm/rmap.c      static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
page             1867 mm/rmap.c      	struct address_space *mapping = page_mapping(page);
page             1877 mm/rmap.c      	VM_BUG_ON_PAGE(!PageLocked(page), page);
page             1882 mm/rmap.c      	pgoff_start = page_to_pgoff(page);
page             1883 mm/rmap.c      	pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
page             1888 mm/rmap.c      		unsigned long address = vma_address(page, vma);
page             1895 mm/rmap.c      		if (!rwc->rmap_one(page, vma, address, rwc->arg))
page             1897 mm/rmap.c      		if (rwc->done && rwc->done(page))
page             1906 mm/rmap.c      void rmap_walk(struct page *page, struct rmap_walk_control *rwc)
page             1908 mm/rmap.c      	if (unlikely(PageKsm(page)))
page             1909 mm/rmap.c      		rmap_walk_ksm(page, rwc);
page             1910 mm/rmap.c      	else if (PageAnon(page))
page             1911 mm/rmap.c      		rmap_walk_anon(page, rwc, false);
page             1913 mm/rmap.c      		rmap_walk_file(page, rwc, false);
page             1917 mm/rmap.c      void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc)
page             1920 mm/rmap.c      	VM_BUG_ON_PAGE(PageKsm(page), page);
page             1921 mm/rmap.c      	if (PageAnon(page))
page             1922 mm/rmap.c      		rmap_walk_anon(page, rwc, true);
page             1924 mm/rmap.c      		rmap_walk_file(page, rwc, true);
page             1933 mm/rmap.c      void hugepage_add_anon_rmap(struct page *page,
page             1939 mm/rmap.c      	BUG_ON(!PageLocked(page));
page             1942 mm/rmap.c      	first = atomic_inc_and_test(compound_mapcount_ptr(page));
page             1944 mm/rmap.c      		__page_set_anon_rmap(page, vma, address, 0);
page             1947 mm/rmap.c      void hugepage_add_new_anon_rmap(struct page *page,
page             1951 mm/rmap.c      	atomic_set(compound_mapcount_ptr(page), 0);
page             1952 mm/rmap.c      	__page_set_anon_rmap(page, vma, address, 1);
page              139 mm/shmem.c     static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
page              140 mm/shmem.c     static int shmem_replace_page(struct page **pagep, gfp_t gfp,
page              143 mm/shmem.c     			     struct page **pagep, enum sgp_type sgp,
page              147 mm/shmem.c     		struct page **pagep, enum sgp_type sgp,
page              152 mm/shmem.c     		struct page **pagep, enum sgp_type sgp)
page              467 mm/shmem.c     	struct page *page;
page              519 mm/shmem.c     		page = find_get_page(inode->i_mapping,
page              521 mm/shmem.c     		if (!page)
page              525 mm/shmem.c     		if (!PageTransHuge(page)) {
page              526 mm/shmem.c     			put_page(page);
page              537 mm/shmem.c     		if (!trylock_page(page)) {
page              538 mm/shmem.c     			put_page(page);
page              542 mm/shmem.c     		ret = split_huge_page(page);
page              543 mm/shmem.c     		unlock_page(page);
page              544 mm/shmem.c     		put_page(page);
page              606 mm/shmem.c     static int shmem_add_to_page_cache(struct page *page,
page              610 mm/shmem.c     	XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page));
page              612 mm/shmem.c     	unsigned long nr = compound_nr(page);
page              614 mm/shmem.c     	VM_BUG_ON_PAGE(PageTail(page), page);
page              615 mm/shmem.c     	VM_BUG_ON_PAGE(index != round_down(index, nr), page);
page              616 mm/shmem.c     	VM_BUG_ON_PAGE(!PageLocked(page), page);
page              617 mm/shmem.c     	VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
page              618 mm/shmem.c     	VM_BUG_ON(expected && PageTransHuge(page));
page              620 mm/shmem.c     	page_ref_add(page, nr);
page              621 mm/shmem.c     	page->mapping = mapping;
page              622 mm/shmem.c     	page->index = index;
page              634 mm/shmem.c     		xas_store(&xas, page);
page              639 mm/shmem.c     		if (PageTransHuge(page)) {
page              641 mm/shmem.c     			__inc_node_page_state(page, NR_SHMEM_THPS);
page              644 mm/shmem.c     		__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
page              645 mm/shmem.c     		__mod_node_page_state(page_pgdat(page), NR_SHMEM, nr);
page              651 mm/shmem.c     		page->mapping = NULL;
page              652 mm/shmem.c     		page_ref_sub(page, nr);
page              662 mm/shmem.c     static void shmem_delete_from_page_cache(struct page *page, void *radswap)
page              664 mm/shmem.c     	struct address_space *mapping = page->mapping;
page              667 mm/shmem.c     	VM_BUG_ON_PAGE(PageCompound(page), page);
page              670 mm/shmem.c     	error = shmem_replace_entry(mapping, page->index, page, radswap);
page              671 mm/shmem.c     	page->mapping = NULL;
page              673 mm/shmem.c     	__dec_node_page_state(page, NR_FILE_PAGES);
page              674 mm/shmem.c     	__dec_node_page_state(page, NR_SHMEM);
page              676 mm/shmem.c     	put_page(page);
page              706 mm/shmem.c     	struct page *page;
page              710 mm/shmem.c     	xas_for_each(&xas, page, end - 1) {
page              711 mm/shmem.c     		if (xas_retry(&xas, page))
page              713 mm/shmem.c     		if (xa_is_value(page))
page              822 mm/shmem.c     			struct page *page = pvec.pages[i];
page              828 mm/shmem.c     			if (xa_is_value(page)) {
page              832 mm/shmem.c     								index, page);
page              836 mm/shmem.c     			VM_BUG_ON_PAGE(page_to_pgoff(page) != index, page);
page              838 mm/shmem.c     			if (!trylock_page(page))
page              841 mm/shmem.c     			if (PageTransTail(page)) {
page              843 mm/shmem.c     				clear_highpage(page);
page              844 mm/shmem.c     				unlock_page(page);
page              846 mm/shmem.c     			} else if (PageTransHuge(page)) {
page              852 mm/shmem.c     					clear_highpage(page);
page              853 mm/shmem.c     					unlock_page(page);
page              860 mm/shmem.c     			if (!unfalloc || !PageUptodate(page)) {
page              861 mm/shmem.c     				VM_BUG_ON_PAGE(PageTail(page), page);
page              862 mm/shmem.c     				if (page_mapping(page) == mapping) {
page              863 mm/shmem.c     					VM_BUG_ON_PAGE(PageWriteback(page), page);
page              864 mm/shmem.c     					truncate_inode_page(mapping, page);
page              867 mm/shmem.c     			unlock_page(page);
page              876 mm/shmem.c     		struct page *page = NULL;
page              877 mm/shmem.c     		shmem_getpage(inode, start - 1, &page, SGP_READ);
page              878 mm/shmem.c     		if (page) {
page              884 mm/shmem.c     			zero_user_segment(page, partial_start, top);
page              885 mm/shmem.c     			set_page_dirty(page);
page              886 mm/shmem.c     			unlock_page(page);
page              887 mm/shmem.c     			put_page(page);
page              891 mm/shmem.c     		struct page *page = NULL;
page              892 mm/shmem.c     		shmem_getpage(inode, end, &page, SGP_READ);
page              893 mm/shmem.c     		if (page) {
page              894 mm/shmem.c     			zero_user_segment(page, 0, partial_end);
page              895 mm/shmem.c     			set_page_dirty(page);
page              896 mm/shmem.c     			unlock_page(page);
page              897 mm/shmem.c     			put_page(page);
page              919 mm/shmem.c     			struct page *page = pvec.pages[i];
page              925 mm/shmem.c     			if (xa_is_value(page)) {
page              928 mm/shmem.c     				if (shmem_free_swap(mapping, index, page)) {
page              937 mm/shmem.c     			lock_page(page);
page              939 mm/shmem.c     			if (PageTransTail(page)) {
page              941 mm/shmem.c     				clear_highpage(page);
page              942 mm/shmem.c     				unlock_page(page);
page              951 mm/shmem.c     			} else if (PageTransHuge(page)) {
page              957 mm/shmem.c     					clear_highpage(page);
page              958 mm/shmem.c     					unlock_page(page);
page              965 mm/shmem.c     			if (!unfalloc || !PageUptodate(page)) {
page              966 mm/shmem.c     				VM_BUG_ON_PAGE(PageTail(page), page);
page              967 mm/shmem.c     				if (page_mapping(page) == mapping) {
page              968 mm/shmem.c     					VM_BUG_ON_PAGE(PageWriteback(page), page);
page              969 mm/shmem.c     					truncate_inode_page(mapping, page);
page              972 mm/shmem.c     					unlock_page(page);
page              977 mm/shmem.c     			unlock_page(page);
page             1123 mm/shmem.c     				   struct page **entries, pgoff_t *indices,
page             1127 mm/shmem.c     	struct page *page;
page             1135 mm/shmem.c     	xas_for_each(&xas, page, ULONG_MAX) {
page             1136 mm/shmem.c     		if (xas_retry(&xas, page))
page             1139 mm/shmem.c     		if (!xa_is_value(page))
page             1142 mm/shmem.c     		entry = radix_to_swp_entry(page);
page             1150 mm/shmem.c     		entries[ret] = page;
page             1177 mm/shmem.c     		struct page *page = pvec.pages[i];
page             1179 mm/shmem.c     		if (!xa_is_value(page))
page             1182 mm/shmem.c     					  &page, SGP_CACHE,
page             1186 mm/shmem.c     			unlock_page(page);
page             1187 mm/shmem.c     			put_page(page);
page             1293 mm/shmem.c     static int shmem_writepage(struct page *page, struct writeback_control *wbc)
page             1301 mm/shmem.c     	VM_BUG_ON_PAGE(PageCompound(page), page);
page             1302 mm/shmem.c     	BUG_ON(!PageLocked(page));
page             1303 mm/shmem.c     	mapping = page->mapping;
page             1304 mm/shmem.c     	index = page->index;
page             1335 mm/shmem.c     	if (!PageUptodate(page)) {
page             1351 mm/shmem.c     		clear_highpage(page);
page             1352 mm/shmem.c     		flush_dcache_page(page);
page             1353 mm/shmem.c     		SetPageUptodate(page);
page             1356 mm/shmem.c     	swap = get_swap_page(page);
page             1372 mm/shmem.c     	if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
page             1379 mm/shmem.c     		shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
page             1382 mm/shmem.c     		BUG_ON(page_mapped(page));
page             1383 mm/shmem.c     		swap_writepage(page, wbc);
page             1388 mm/shmem.c     	put_swap_page(page, swap);
page             1390 mm/shmem.c     	set_page_dirty(page);
page             1393 mm/shmem.c     	unlock_page(page);
page             1450 mm/shmem.c     static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
page             1454 mm/shmem.c     	struct page *page;
page             1460 mm/shmem.c     	page = swap_cluster_readahead(swap, gfp, &vmf);
page             1463 mm/shmem.c     	return page;
page             1466 mm/shmem.c     static struct page *shmem_alloc_hugepage(gfp_t gfp,
page             1472 mm/shmem.c     	struct page *page;
page             1483 mm/shmem.c     	page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN,
page             1486 mm/shmem.c     	if (page)
page             1487 mm/shmem.c     		prep_transhuge_page(page);
page             1488 mm/shmem.c     	return page;
page             1491 mm/shmem.c     static struct page *shmem_alloc_page(gfp_t gfp,
page             1495 mm/shmem.c     	struct page *page;
page             1498 mm/shmem.c     	page = alloc_page_vma(gfp, &pvma, 0);
page             1501 mm/shmem.c     	return page;
page             1504 mm/shmem.c     static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
page             1509 mm/shmem.c     	struct page *page;
page             1521 mm/shmem.c     		page = shmem_alloc_hugepage(gfp, info, index);
page             1523 mm/shmem.c     		page = shmem_alloc_page(gfp, info, index);
page             1524 mm/shmem.c     	if (page) {
page             1525 mm/shmem.c     		__SetPageLocked(page);
page             1526 mm/shmem.c     		__SetPageSwapBacked(page);
page             1527 mm/shmem.c     		return page;
page             1548 mm/shmem.c     static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
page             1550 mm/shmem.c     	return page_zonenum(page) > gfp_zone(gfp);
page             1553 mm/shmem.c     static int shmem_replace_page(struct page **pagep, gfp_t gfp,
page             1556 mm/shmem.c     	struct page *oldpage, *newpage;
page             1627 mm/shmem.c     			     struct page **pagep, enum sgp_type sgp,
page             1635 mm/shmem.c     	struct page *page;
page             1644 mm/shmem.c     	page = lookup_swap_cache(swap, NULL, 0);
page             1645 mm/shmem.c     	if (!page) {
page             1653 mm/shmem.c     		page = shmem_swapin(swap, gfp, info, index);
page             1654 mm/shmem.c     		if (!page) {
page             1661 mm/shmem.c     	lock_page(page);
page             1662 mm/shmem.c     	if (!PageSwapCache(page) || page_private(page) != swap.val ||
page             1667 mm/shmem.c     	if (!PageUptodate(page)) {
page             1671 mm/shmem.c     	wait_on_page_writeback(page);
page             1673 mm/shmem.c     	if (shmem_should_replace_page(page, gfp)) {
page             1674 mm/shmem.c     		error = shmem_replace_page(&page, gfp, info, index);
page             1679 mm/shmem.c     	error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg,
page             1682 mm/shmem.c     		error = shmem_add_to_page_cache(page, mapping, index,
page             1695 mm/shmem.c     			mem_cgroup_cancel_charge(page, memcg, false);
page             1696 mm/shmem.c     			delete_from_swap_cache(page);
page             1702 mm/shmem.c     	mem_cgroup_commit_charge(page, memcg, true, false);
page             1710 mm/shmem.c     		mark_page_accessed(page);
page             1712 mm/shmem.c     	delete_from_swap_cache(page);
page             1713 mm/shmem.c     	set_page_dirty(page);
page             1716 mm/shmem.c     	*pagep = page;
page             1722 mm/shmem.c     	if (page) {
page             1723 mm/shmem.c     		unlock_page(page);
page             1724 mm/shmem.c     		put_page(page);
page             1741 mm/shmem.c     	struct page **pagep, enum sgp_type sgp, gfp_t gfp,
page             1750 mm/shmem.c     	struct page *page;
page             1770 mm/shmem.c     	page = find_lock_entry(mapping, index);
page             1771 mm/shmem.c     	if (xa_is_value(page)) {
page             1772 mm/shmem.c     		error = shmem_swapin_page(inode, index, &page,
page             1777 mm/shmem.c     		*pagep = page;
page             1781 mm/shmem.c     	if (page && sgp == SGP_WRITE)
page             1782 mm/shmem.c     		mark_page_accessed(page);
page             1785 mm/shmem.c     	if (page && !PageUptodate(page)) {
page             1788 mm/shmem.c     		unlock_page(page);
page             1789 mm/shmem.c     		put_page(page);
page             1790 mm/shmem.c     		page = NULL;
page             1792 mm/shmem.c     	if (page || sgp == SGP_READ) {
page             1793 mm/shmem.c     		*pagep = page;
page             1834 mm/shmem.c     	page = shmem_alloc_and_acct_page(gfp, inode, index, true);
page             1835 mm/shmem.c     	if (IS_ERR(page)) {
page             1837 mm/shmem.c     		page = shmem_alloc_and_acct_page(gfp, inode,
page             1840 mm/shmem.c     	if (IS_ERR(page)) {
page             1843 mm/shmem.c     		error = PTR_ERR(page);
page             1844 mm/shmem.c     		page = NULL;
page             1863 mm/shmem.c     	if (PageTransHuge(page))
page             1869 mm/shmem.c     		__SetPageReferenced(page);
page             1871 mm/shmem.c     	error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg,
page             1872 mm/shmem.c     					    PageTransHuge(page));
page             1875 mm/shmem.c     	error = shmem_add_to_page_cache(page, mapping, hindex,
page             1878 mm/shmem.c     		mem_cgroup_cancel_charge(page, memcg,
page             1879 mm/shmem.c     					 PageTransHuge(page));
page             1882 mm/shmem.c     	mem_cgroup_commit_charge(page, memcg, false,
page             1883 mm/shmem.c     				 PageTransHuge(page));
page             1884 mm/shmem.c     	lru_cache_add_anon(page);
page             1887 mm/shmem.c     	info->alloced += compound_nr(page);
page             1888 mm/shmem.c     	inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page);
page             1893 mm/shmem.c     	if (PageTransHuge(page) &&
page             1924 mm/shmem.c     	if (sgp != SGP_WRITE && !PageUptodate(page)) {
page             1925 mm/shmem.c     		struct page *head = compound_head(page);
page             1939 mm/shmem.c     			ClearPageDirty(page);
page             1940 mm/shmem.c     			delete_from_page_cache(page);
page             1948 mm/shmem.c     	*pagep = page + index - hindex;
page             1955 mm/shmem.c     	shmem_inode_unacct_blocks(inode, compound_nr(page));
page             1957 mm/shmem.c     	if (PageTransHuge(page)) {
page             1958 mm/shmem.c     		unlock_page(page);
page             1959 mm/shmem.c     		put_page(page);
page             1963 mm/shmem.c     	if (page) {
page             1964 mm/shmem.c     		unlock_page(page);
page             1965 mm/shmem.c     		put_page(page);
page             2066 mm/shmem.c     	err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp,
page             2314 mm/shmem.c     				  struct page **pagep)
page             2324 mm/shmem.c     	struct page *page;
page             2334 mm/shmem.c     		page = shmem_alloc_page(gfp, info, pgoff);
page             2335 mm/shmem.c     		if (!page)
page             2339 mm/shmem.c     			page_kaddr = kmap_atomic(page);
page             2347 mm/shmem.c     				*pagep = page;
page             2353 mm/shmem.c     			clear_highpage(page);
page             2356 mm/shmem.c     		page = *pagep;
page             2360 mm/shmem.c     	VM_BUG_ON(PageLocked(page) || PageSwapBacked(page));
page             2361 mm/shmem.c     	__SetPageLocked(page);
page             2362 mm/shmem.c     	__SetPageSwapBacked(page);
page             2363 mm/shmem.c     	__SetPageUptodate(page);
page             2371 mm/shmem.c     	ret = mem_cgroup_try_charge_delay(page, dst_mm, gfp, &memcg, false);
page             2375 mm/shmem.c     	ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL,
page             2380 mm/shmem.c     	mem_cgroup_commit_charge(page, memcg, false, false);
page             2382 mm/shmem.c     	_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
page             2393 mm/shmem.c     		set_page_dirty(page);
page             2407 mm/shmem.c     	lru_cache_add_anon(page);
page             2415 mm/shmem.c     	inc_mm_counter(dst_mm, mm_counter_file(page));
page             2416 mm/shmem.c     	page_add_file_rmap(page, false);
page             2422 mm/shmem.c     	unlock_page(page);
page             2428 mm/shmem.c     	ClearPageDirty(page);
page             2429 mm/shmem.c     	delete_from_page_cache(page);
page             2431 mm/shmem.c     	mem_cgroup_cancel_charge(page, memcg, false);
page             2433 mm/shmem.c     	unlock_page(page);
page             2434 mm/shmem.c     	put_page(page);
page             2445 mm/shmem.c     			   struct page **pagep)
page             2456 mm/shmem.c     	struct page *page = NULL;
page             2459 mm/shmem.c     				      dst_addr, 0, true, &page);
page             2475 mm/shmem.c     			struct page **pagep, void **fsdata)
page             2496 mm/shmem.c     			struct page *page, void *fsdata)
page             2503 mm/shmem.c     	if (!PageUptodate(page)) {
page             2504 mm/shmem.c     		struct page *head = compound_head(page);
page             2505 mm/shmem.c     		if (PageTransCompound(page)) {
page             2509 mm/shmem.c     				if (head + i == page)
page             2517 mm/shmem.c     			zero_user_segments(page, 0, from,
page             2522 mm/shmem.c     	set_page_dirty(page);
page             2523 mm/shmem.c     	unlock_page(page);
page             2524 mm/shmem.c     	put_page(page);
page             2553 mm/shmem.c     		struct page *page = NULL;
page             2567 mm/shmem.c     		error = shmem_getpage(inode, index, &page, sgp);
page             2573 mm/shmem.c     		if (page) {
page             2575 mm/shmem.c     				set_page_dirty(page);
page             2576 mm/shmem.c     			unlock_page(page);
page             2589 mm/shmem.c     				if (page)
page             2590 mm/shmem.c     					put_page(page);
page             2596 mm/shmem.c     		if (page) {
page             2603 mm/shmem.c     				flush_dcache_page(page);
page             2608 mm/shmem.c     				mark_page_accessed(page);
page             2610 mm/shmem.c     			page = ZERO_PAGE(0);
page             2611 mm/shmem.c     			get_page(page);
page             2618 mm/shmem.c     		ret = copy_page_to_iter(page, offset, nr, to);
page             2624 mm/shmem.c     		put_page(page);
page             2645 mm/shmem.c     	struct page *page;
page             2669 mm/shmem.c     			page = pvec.pages[i];
page             2670 mm/shmem.c     			if (page && !xa_is_value(page)) {
page             2671 mm/shmem.c     				if (!PageUptodate(page))
page             2672 mm/shmem.c     					page = NULL;
page             2675 mm/shmem.c     			    (page && whence == SEEK_DATA) ||
page             2676 mm/shmem.c     			    (!page && whence == SEEK_HOLE)) {
page             2802 mm/shmem.c     		struct page *page;
page             2813 mm/shmem.c     			error = shmem_getpage(inode, index, &page, SGP_FALLOC);
page             2829 mm/shmem.c     		if (!PageUptodate(page))
page             2839 mm/shmem.c     		set_page_dirty(page);
page             2840 mm/shmem.c     		unlock_page(page);
page             2841 mm/shmem.c     		put_page(page);
page             3105 mm/shmem.c     	struct page *page;
page             3136 mm/shmem.c     		error = shmem_getpage(inode, 0, &page, SGP_WRITE);
page             3143 mm/shmem.c     		memcpy(page_address(page), symname, len);
page             3144 mm/shmem.c     		SetPageUptodate(page);
page             3145 mm/shmem.c     		set_page_dirty(page);
page             3146 mm/shmem.c     		unlock_page(page);
page             3147 mm/shmem.c     		put_page(page);
page             3166 mm/shmem.c     	struct page *page = NULL;
page             3169 mm/shmem.c     		page = find_get_page(inode->i_mapping, 0);
page             3170 mm/shmem.c     		if (!page)
page             3172 mm/shmem.c     		if (!PageUptodate(page)) {
page             3173 mm/shmem.c     			put_page(page);
page             3177 mm/shmem.c     		error = shmem_getpage(inode, 0, &page, SGP_READ);
page             3180 mm/shmem.c     		unlock_page(page);
page             3182 mm/shmem.c     	set_delayed_call(done, shmem_put_link, page);
page             3183 mm/shmem.c     	return page_address(page);
page             4217 mm/shmem.c     struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
page             4222 mm/shmem.c     	struct page *page;
page             4226 mm/shmem.c     	error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE,
page             4229 mm/shmem.c     		page = ERR_PTR(error);
page             4231 mm/shmem.c     		unlock_page(page);
page             4232 mm/shmem.c     	return page;
page               61 mm/shuffle.c   static struct page * __meminit shuffle_valid_page(unsigned long pfn, int order)
page               63 mm/shuffle.c   	struct page *page;
page               79 mm/shuffle.c   	page = pfn_to_page(pfn);
page               80 mm/shuffle.c   	if (!PageBuddy(page))
page               87 mm/shuffle.c   	if (page_order(page) != order)
page               90 mm/shuffle.c   	return page;
page              118 mm/shuffle.c   		struct page *page_i, *page_j;
page              186 mm/shuffle.c   void add_to_free_area_random(struct page *page, struct free_area *area,
page              202 mm/shuffle.c   		add_to_free_area(page, area, migratetype);
page              204 mm/shuffle.c   		add_to_free_area_tail(page, area, migratetype);
page              220 mm/slab.c      				struct kmem_cache_node *n, struct page *page,
page              374 mm/slab.c      static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
page              377 mm/slab.c      	return page->s_mem + cache->size * idx;
page              552 mm/slab.c      					struct page *page, void *objp)
page              558 mm/slab.c      	page_node = page_to_nid(page);
page             1359 mm/slab.c      static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
page             1362 mm/slab.c      	struct page *page;
page             1366 mm/slab.c      	page = __alloc_pages_node(nodeid, flags, cachep->gfporder);
page             1367 mm/slab.c      	if (!page) {
page             1372 mm/slab.c      	if (charge_slab_page(page, flags, cachep->gfporder, cachep)) {
page             1373 mm/slab.c      		__free_pages(page, cachep->gfporder);
page             1377 mm/slab.c      	__SetPageSlab(page);
page             1379 mm/slab.c      	if (sk_memalloc_socks() && page_is_pfmemalloc(page))
page             1380 mm/slab.c      		SetPageSlabPfmemalloc(page);
page             1382 mm/slab.c      	return page;
page             1388 mm/slab.c      static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
page             1392 mm/slab.c      	BUG_ON(!PageSlab(page));
page             1393 mm/slab.c      	__ClearPageSlabPfmemalloc(page);
page             1394 mm/slab.c      	__ClearPageSlab(page);
page             1395 mm/slab.c      	page_mapcount_reset(page);
page             1396 mm/slab.c      	page->mapping = NULL;
page             1400 mm/slab.c      	uncharge_slab_page(page, order, cachep);
page             1401 mm/slab.c      	__free_pages(page, order);
page             1407 mm/slab.c      	struct page *page;
page             1409 mm/slab.c      	page = container_of(head, struct page, rcu_head);
page             1410 mm/slab.c      	cachep = page->slab_cache;
page             1412 mm/slab.c      	kmem_freepages(cachep, page);
page             1548 mm/slab.c      		struct page *page = virt_to_head_page(objp);
page             1551 mm/slab.c      		objnr = obj_to_index(cachep, page, objp);
page             1553 mm/slab.c      			objp = index_to_obj(cachep, page, objnr - 1);
page             1559 mm/slab.c      			objp = index_to_obj(cachep, page, objnr + 1);
page             1570 mm/slab.c      						struct page *page)
page             1575 mm/slab.c      		poison_obj(cachep, page->freelist - obj_offset(cachep),
page             1580 mm/slab.c      		void *objp = index_to_obj(cachep, page, i);
page             1596 mm/slab.c      						struct page *page)
page             1610 mm/slab.c      static void slab_destroy(struct kmem_cache *cachep, struct page *page)
page             1614 mm/slab.c      	freelist = page->freelist;
page             1615 mm/slab.c      	slab_destroy_debugcheck(cachep, page);
page             1617 mm/slab.c      		call_rcu(&page->rcu_head, kmem_rcu_free);
page             1619 mm/slab.c      		kmem_freepages(cachep, page);
page             1631 mm/slab.c      	struct page *page, *n;
page             1633 mm/slab.c      	list_for_each_entry_safe(page, n, list, slab_list) {
page             1634 mm/slab.c      		list_del(&page->slab_list);
page             1635 mm/slab.c      		slab_destroy(cachep, page);
page             2186 mm/slab.c      	struct page *page;
page             2198 mm/slab.c      		page = list_entry(p, struct page, slab_list);
page             2199 mm/slab.c      		list_del(&page->slab_list);
page             2208 mm/slab.c      		slab_destroy(cache, page);
page             2294 mm/slab.c      				   struct page *page, int colour_off,
page             2298 mm/slab.c      	void *addr = page_address(page);
page             2300 mm/slab.c      	page->s_mem = addr + colour_off;
page             2301 mm/slab.c      	page->active = 0;
page             2320 mm/slab.c      static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx)
page             2322 mm/slab.c      	return ((freelist_idx_t *)page->freelist)[idx];
page             2325 mm/slab.c      static inline void set_free_obj(struct page *page,
page             2328 mm/slab.c      	((freelist_idx_t *)(page->freelist))[idx] = val;
page             2331 mm/slab.c      static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page)
page             2337 mm/slab.c      		void *objp = index_to_obj(cachep, page, i);
page             2421 mm/slab.c      static void swap_free_obj(struct page *page, unsigned int a, unsigned int b)
page             2423 mm/slab.c      	swap(((freelist_idx_t *)page->freelist)[a],
page             2424 mm/slab.c      		((freelist_idx_t *)page->freelist)[b]);
page             2431 mm/slab.c      static bool shuffle_freelist(struct kmem_cache *cachep, struct page *page)
page             2448 mm/slab.c      		page->freelist = index_to_obj(cachep, page, objfreelist) +
page             2459 mm/slab.c      			set_free_obj(page, i, i);
page             2465 mm/slab.c      			swap_free_obj(page, i, rand);
page             2469 mm/slab.c      			set_free_obj(page, i, next_random_slot(&state));
page             2473 mm/slab.c      		set_free_obj(page, cachep->num - 1, objfreelist);
page             2479 mm/slab.c      				struct page *page)
page             2486 mm/slab.c      			    struct page *page)
page             2492 mm/slab.c      	cache_init_objs_debug(cachep, page);
page             2495 mm/slab.c      	shuffled = shuffle_freelist(cachep, page);
page             2498 mm/slab.c      		page->freelist = index_to_obj(cachep, page, cachep->num - 1) +
page             2503 mm/slab.c      		objp = index_to_obj(cachep, page, i);
page             2514 mm/slab.c      			set_free_obj(page, i, i);
page             2518 mm/slab.c      static void *slab_get_obj(struct kmem_cache *cachep, struct page *page)
page             2522 mm/slab.c      	objp = index_to_obj(cachep, page, get_free_obj(page, page->active));
page             2523 mm/slab.c      	page->active++;
page             2529 mm/slab.c      			struct page *page, void *objp)
page             2531 mm/slab.c      	unsigned int objnr = obj_to_index(cachep, page, objp);
page             2536 mm/slab.c      	for (i = page->active; i < cachep->num; i++) {
page             2537 mm/slab.c      		if (get_free_obj(page, i) == objnr) {
page             2544 mm/slab.c      	page->active--;
page             2545 mm/slab.c      	if (!page->freelist)
page             2546 mm/slab.c      		page->freelist = objp + obj_offset(cachep);
page             2548 mm/slab.c      	set_free_obj(page, page->active, objnr);
page             2556 mm/slab.c      static void slab_map_pages(struct kmem_cache *cache, struct page *page,
page             2559 mm/slab.c      	page->slab_cache = cache;
page             2560 mm/slab.c      	page->freelist = freelist;
page             2567 mm/slab.c      static struct page *cache_grow_begin(struct kmem_cache *cachep,
page             2575 mm/slab.c      	struct page *page;
page             2599 mm/slab.c      	page = kmem_getpages(cachep, local_flags, nodeid);
page             2600 mm/slab.c      	if (!page)
page             2603 mm/slab.c      	page_node = page_to_nid(page);
page             2622 mm/slab.c      	kasan_poison_slab(page);
page             2625 mm/slab.c      	freelist = alloc_slabmgmt(cachep, page, offset,
page             2630 mm/slab.c      	slab_map_pages(cachep, page, freelist);
page             2632 mm/slab.c      	cache_init_objs(cachep, page);
page             2637 mm/slab.c      	return page;
page             2640 mm/slab.c      	kmem_freepages(cachep, page);
page             2647 mm/slab.c      static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
page             2654 mm/slab.c      	if (!page)
page             2657 mm/slab.c      	INIT_LIST_HEAD(&page->slab_list);
page             2658 mm/slab.c      	n = get_node(cachep, page_to_nid(page));
page             2662 mm/slab.c      	if (!page->active) {
page             2663 mm/slab.c      		list_add_tail(&page->slab_list, &n->slabs_free);
page             2666 mm/slab.c      		fixup_slab_list(cachep, n, page, &list);
page             2669 mm/slab.c      	n->free_objects += cachep->num - page->active;
page             2717 mm/slab.c      	struct page *page;
page             2723 mm/slab.c      	page = virt_to_head_page(objp);
page             2733 mm/slab.c      	objnr = obj_to_index(cachep, page, objp);
page             2736 mm/slab.c      	BUG_ON(objp != index_to_obj(cachep, page, objnr));
page             2766 mm/slab.c      				struct kmem_cache_node *n, struct page *page,
page             2770 mm/slab.c      	list_del(&page->slab_list);
page             2771 mm/slab.c      	if (page->active == cachep->num) {
page             2772 mm/slab.c      		list_add(&page->slab_list, &n->slabs_full);
page             2777 mm/slab.c      				void **objp = page->freelist;
page             2783 mm/slab.c      			page->freelist = NULL;
page             2786 mm/slab.c      		list_add(&page->slab_list, &n->slabs_partial);
page             2790 mm/slab.c      static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n,
page             2791 mm/slab.c      					struct page *page, bool pfmemalloc)
page             2793 mm/slab.c      	if (!page)
page             2797 mm/slab.c      		return page;
page             2799 mm/slab.c      	if (!PageSlabPfmemalloc(page))
page             2800 mm/slab.c      		return page;
page             2804 mm/slab.c      		ClearPageSlabPfmemalloc(page);
page             2805 mm/slab.c      		return page;
page             2809 mm/slab.c      	list_del(&page->slab_list);
page             2810 mm/slab.c      	if (!page->active) {
page             2811 mm/slab.c      		list_add_tail(&page->slab_list, &n->slabs_free);
page             2814 mm/slab.c      		list_add_tail(&page->slab_list, &n->slabs_partial);
page             2816 mm/slab.c      	list_for_each_entry(page, &n->slabs_partial, slab_list) {
page             2817 mm/slab.c      		if (!PageSlabPfmemalloc(page))
page             2818 mm/slab.c      			return page;
page             2822 mm/slab.c      	list_for_each_entry(page, &n->slabs_free, slab_list) {
page             2823 mm/slab.c      		if (!PageSlabPfmemalloc(page)) {
page             2825 mm/slab.c      			return page;
page             2832 mm/slab.c      static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)
page             2834 mm/slab.c      	struct page *page;
page             2837 mm/slab.c      	page = list_first_entry_or_null(&n->slabs_partial, struct page,
page             2839 mm/slab.c      	if (!page) {
page             2841 mm/slab.c      		page = list_first_entry_or_null(&n->slabs_free, struct page,
page             2843 mm/slab.c      		if (page)
page             2848 mm/slab.c      		page = get_valid_first_slab(n, page, pfmemalloc);
page             2850 mm/slab.c      	return page;
page             2856 mm/slab.c      	struct page *page;
page             2864 mm/slab.c      	page = get_first_slab(n, true);
page             2865 mm/slab.c      	if (!page) {
page             2870 mm/slab.c      	obj = slab_get_obj(cachep, page);
page             2873 mm/slab.c      	fixup_slab_list(cachep, n, page, &list);
page             2886 mm/slab.c      		struct array_cache *ac, struct page *page, int batchcount)
page             2892 mm/slab.c      	BUG_ON(page->active >= cachep->num);
page             2894 mm/slab.c      	while (page->active < cachep->num && batchcount--) {
page             2899 mm/slab.c      		ac->entry[ac->avail++] = slab_get_obj(cachep, page);
page             2912 mm/slab.c      	struct page *page;
page             2945 mm/slab.c      		page = get_first_slab(n, false);
page             2946 mm/slab.c      		if (!page)
page             2951 mm/slab.c      		batchcount = alloc_block(cachep, ac, page, batchcount);
page             2952 mm/slab.c      		fixup_slab_list(cachep, n, page, &list);
page             2971 mm/slab.c      		page = cache_grow_begin(cachep, gfp_exact_node(flags), node);
page             2978 mm/slab.c      		if (!ac->avail && page)
page             2979 mm/slab.c      			alloc_block(cachep, ac, page, batchcount);
page             2980 mm/slab.c      		cache_grow_end(cachep, page);
page             3110 mm/slab.c      	struct page *page;
page             3146 mm/slab.c      		page = cache_grow_begin(cache, flags, numa_mem_id());
page             3147 mm/slab.c      		cache_grow_end(cache, page);
page             3148 mm/slab.c      		if (page) {
page             3149 mm/slab.c      			nid = page_to_nid(page);
page             3173 mm/slab.c      	struct page *page;
page             3184 mm/slab.c      	page = get_first_slab(n, false);
page             3185 mm/slab.c      	if (!page)
page             3194 mm/slab.c      	BUG_ON(page->active == cachep->num);
page             3196 mm/slab.c      	obj = slab_get_obj(cachep, page);
page             3199 mm/slab.c      	fixup_slab_list(cachep, n, page, &list);
page             3207 mm/slab.c      	page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid);
page             3208 mm/slab.c      	if (page) {
page             3210 mm/slab.c      		obj = slab_get_obj(cachep, page);
page             3212 mm/slab.c      	cache_grow_end(cachep, page);
page             3332 mm/slab.c      	struct page *page;
page             3338 mm/slab.c      		struct page *page;
page             3342 mm/slab.c      		page = virt_to_head_page(objp);
page             3343 mm/slab.c      		list_del(&page->slab_list);
page             3345 mm/slab.c      		slab_put_obj(cachep, page, objp);
page             3349 mm/slab.c      		if (page->active == 0) {
page             3350 mm/slab.c      			list_add(&page->slab_list, &n->slabs_free);
page             3357 mm/slab.c      			list_add_tail(&page->slab_list, &n->slabs_partial);
page             3364 mm/slab.c      		page = list_last_entry(&n->slabs_free, struct page, slab_list);
page             3365 mm/slab.c      		list_move(&page->slab_list, list);
page             3401 mm/slab.c      		struct page *page;
page             3403 mm/slab.c      		list_for_each_entry(page, &n->slabs_free, slab_list) {
page             3404 mm/slab.c      			BUG_ON(page->active);
page             3460 mm/slab.c      		struct page *page = virt_to_head_page(objp);
page             3462 mm/slab.c      		if (unlikely(PageSlabPfmemalloc(page))) {
page             3463 mm/slab.c      			cache_free_pfmemalloc(cachep, page, objp);
page             4167 mm/slab.c      void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
page             4177 mm/slab.c      	cachep = page->slab_cache;
page             4178 mm/slab.c      	objnr = obj_to_index(cachep, page, (void *)ptr);
page             4182 mm/slab.c      	offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
page              332 mm/slab.h      static inline struct mem_cgroup *memcg_from_slab_page(struct page *page)
page              336 mm/slab.h      	s = READ_ONCE(page->slab_cache);
page              347 mm/slab.h      static __always_inline int memcg_charge_slab(struct page *page,
page              362 mm/slab.h      		mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
page              368 mm/slab.h      	ret = memcg_kmem_charge_memcg(page, gfp, order, memcg);
page              372 mm/slab.h      	lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg);
page              387 mm/slab.h      static __always_inline void memcg_uncharge_slab(struct page *page, int order,
page              396 mm/slab.h      		lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg);
page              398 mm/slab.h      		memcg_kmem_uncharge_memcg(page, order, memcg);
page              400 mm/slab.h      		mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
page              441 mm/slab.h      static inline struct mem_cgroup *memcg_from_slab_page(struct page *page)
page              446 mm/slab.h      static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
page              452 mm/slab.h      static inline void memcg_uncharge_slab(struct page *page, int order,
page              470 mm/slab.h      	struct page *page;
page              472 mm/slab.h      	page = virt_to_head_page(obj);
page              473 mm/slab.h      	if (WARN_ONCE(!PageSlab(page), "%s: Object is not a Slab page!\n",
page              476 mm/slab.h      	return page->slab_cache;
page              479 mm/slab.h      static __always_inline int charge_slab_page(struct page *page,
page              484 mm/slab.h      		mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
page              489 mm/slab.h      	return memcg_charge_slab(page, gfp, order, s);
page              492 mm/slab.h      static __always_inline void uncharge_slab_page(struct page *page, int order,
page              496 mm/slab.h      		mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
page              501 mm/slab.h      	memcg_uncharge_slab(page, order, s);
page             1314 mm/slab_common.c 	struct page *page;
page             1317 mm/slab_common.c 	page = alloc_pages(flags, order);
page             1318 mm/slab_common.c 	if (likely(page)) {
page             1319 mm/slab_common.c 		ret = page_address(page);
page             1320 mm/slab_common.c 		mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
page              108 mm/slob.c      static inline int slob_page_free(struct page *sp)
page              113 mm/slob.c      static void set_slob_page_free(struct page *sp, struct list_head *list)
page              119 mm/slob.c      static inline void clear_slob_page_free(struct page *sp)
page              193 mm/slob.c      	struct page *page;
page              197 mm/slob.c      		page = __alloc_pages_node(node, gfp, order);
page              200 mm/slob.c      		page = alloc_pages(gfp, order);
page              202 mm/slob.c      	if (!page)
page              205 mm/slob.c      	mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
page              207 mm/slob.c      	return page_address(page);
page              212 mm/slob.c      	struct page *sp = virt_to_page(b);
page              237 mm/slob.c      static void *slob_page_alloc(struct page *sp, size_t size, int align,
page              304 mm/slob.c      	struct page *sp;
page              384 mm/slob.c      	struct page *sp;
page              538 mm/slob.c      	struct page *sp;
page              564 mm/slob.c      	struct page *sp;
page              352 mm/slub.c      static __always_inline void slab_lock(struct page *page)
page              354 mm/slub.c      	VM_BUG_ON_PAGE(PageTail(page), page);
page              355 mm/slub.c      	bit_spin_lock(PG_locked, &page->flags);
page              358 mm/slub.c      static __always_inline void slab_unlock(struct page *page)
page              360 mm/slub.c      	VM_BUG_ON_PAGE(PageTail(page), page);
page              361 mm/slub.c      	__bit_spin_unlock(PG_locked, &page->flags);
page              365 mm/slub.c      static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
page              374 mm/slub.c      		if (cmpxchg_double(&page->freelist, &page->counters,
page              381 mm/slub.c      		slab_lock(page);
page              382 mm/slub.c      		if (page->freelist == freelist_old &&
page              383 mm/slub.c      					page->counters == counters_old) {
page              384 mm/slub.c      			page->freelist = freelist_new;
page              385 mm/slub.c      			page->counters = counters_new;
page              386 mm/slub.c      			slab_unlock(page);
page              389 mm/slub.c      		slab_unlock(page);
page              402 mm/slub.c      static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
page              410 mm/slub.c      		if (cmpxchg_double(&page->freelist, &page->counters,
page              420 mm/slub.c      		slab_lock(page);
page              421 mm/slub.c      		if (page->freelist == freelist_old &&
page              422 mm/slub.c      					page->counters == counters_old) {
page              423 mm/slub.c      			page->freelist = freelist_new;
page              424 mm/slub.c      			page->counters = counters_new;
page              425 mm/slub.c      			slab_unlock(page);
page              429 mm/slub.c      		slab_unlock(page);
page              450 mm/slub.c      static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
page              453 mm/slub.c      	void *addr = page_address(page);
page              455 mm/slub.c      	for (p = page->freelist; p; p = get_freepointer(s, p))
page              509 mm/slub.c      				struct page *page, void *object)
page              516 mm/slub.c      	base = page_address(page);
page              519 mm/slub.c      	if (object < base || object >= base + page->objects * s->size ||
page              612 mm/slub.c      static void print_page_info(struct page *page)
page              615 mm/slub.c      	       page, page->objects, page->inuse, page->freelist, page->flags);
page              647 mm/slub.c      static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
page              650 mm/slub.c      	u8 *addr = page_address(page);
page              654 mm/slub.c      	print_page_info(page);
page              689 mm/slub.c      void object_err(struct kmem_cache *s, struct page *page,
page              693 mm/slub.c      	print_trailer(s, page, object);
page              696 mm/slub.c      static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page,
page              706 mm/slub.c      	print_page_info(page);
page              733 mm/slub.c      static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
page              753 mm/slub.c      	print_trailer(s, page, object);
page              797 mm/slub.c      static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
page              814 mm/slub.c      	return check_bytes_and_report(s, page, p, "Object padding",
page              819 mm/slub.c      static int slab_pad_check(struct kmem_cache *s, struct page *page)
page              831 mm/slub.c      	start = page_address(page);
page              832 mm/slub.c      	length = page_size(page);
page              847 mm/slub.c      	slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
page              854 mm/slub.c      static int check_object(struct kmem_cache *s, struct page *page,
page              861 mm/slub.c      		if (!check_bytes_and_report(s, page, object, "Redzone",
page              865 mm/slub.c      		if (!check_bytes_and_report(s, page, object, "Redzone",
page              870 mm/slub.c      			check_bytes_and_report(s, page, p, "Alignment padding",
page              878 mm/slub.c      			(!check_bytes_and_report(s, page, p, "Poison", p,
page              880 mm/slub.c      			 !check_bytes_and_report(s, page, p, "Poison",
page              886 mm/slub.c      		check_pad_bytes(s, page, p);
page              897 mm/slub.c      	if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
page              898 mm/slub.c      		object_err(s, page, p, "Freepointer corrupt");
page              910 mm/slub.c      static int check_slab(struct kmem_cache *s, struct page *page)
page              916 mm/slub.c      	if (!PageSlab(page)) {
page              917 mm/slub.c      		slab_err(s, page, "Not a valid slab page");
page              921 mm/slub.c      	maxobj = order_objects(compound_order(page), s->size);
page              922 mm/slub.c      	if (page->objects > maxobj) {
page              923 mm/slub.c      		slab_err(s, page, "objects %u > max %u",
page              924 mm/slub.c      			page->objects, maxobj);
page              927 mm/slub.c      	if (page->inuse > page->objects) {
page              928 mm/slub.c      		slab_err(s, page, "inuse %u > max %u",
page              929 mm/slub.c      			page->inuse, page->objects);
page              933 mm/slub.c      	slab_pad_check(s, page);
page              941 mm/slub.c      static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
page              948 mm/slub.c      	fp = page->freelist;
page              949 mm/slub.c      	while (fp && nr <= page->objects) {
page              952 mm/slub.c      		if (!check_valid_pointer(s, page, fp)) {
page              954 mm/slub.c      				object_err(s, page, object,
page              958 mm/slub.c      				slab_err(s, page, "Freepointer corrupt");
page              959 mm/slub.c      				page->freelist = NULL;
page              960 mm/slub.c      				page->inuse = page->objects;
page              971 mm/slub.c      	max_objects = order_objects(compound_order(page), s->size);
page              975 mm/slub.c      	if (page->objects != max_objects) {
page              976 mm/slub.c      		slab_err(s, page, "Wrong number of objects. Found %d but should be %d",
page              977 mm/slub.c      			 page->objects, max_objects);
page              978 mm/slub.c      		page->objects = max_objects;
page              981 mm/slub.c      	if (page->inuse != page->objects - nr) {
page              982 mm/slub.c      		slab_err(s, page, "Wrong object count. Counter is %d but counted were %d",
page              983 mm/slub.c      			 page->inuse, page->objects - nr);
page              984 mm/slub.c      		page->inuse = page->objects - nr;
page              990 mm/slub.c      static void trace(struct kmem_cache *s, struct page *page, void *object,
page              997 mm/slub.c      			object, page->inuse,
page              998 mm/slub.c      			page->freelist);
page             1012 mm/slub.c      	struct kmem_cache_node *n, struct page *page)
page             1018 mm/slub.c      	list_add(&page->slab_list, &n->full);
page             1021 mm/slub.c      static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
page             1027 mm/slub.c      	list_del(&page->slab_list);
page             1067 mm/slub.c      static void setup_object_debug(struct kmem_cache *s, struct page *page,
page             1078 mm/slub.c      void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr)
page             1084 mm/slub.c      	memset(addr, POISON_INUSE, page_size(page));
page             1089 mm/slub.c      					struct page *page, void *object)
page             1091 mm/slub.c      	if (!check_slab(s, page))
page             1094 mm/slub.c      	if (!check_valid_pointer(s, page, object)) {
page             1095 mm/slub.c      		object_err(s, page, object, "Freelist Pointer check fails");
page             1099 mm/slub.c      	if (!check_object(s, page, object, SLUB_RED_INACTIVE))
page             1106 mm/slub.c      					struct page *page,
page             1110 mm/slub.c      		if (!alloc_consistency_checks(s, page, object))
page             1117 mm/slub.c      	trace(s, page, object, 1);
page             1122 mm/slub.c      	if (PageSlab(page)) {
page             1129 mm/slub.c      		page->inuse = page->objects;
page             1130 mm/slub.c      		page->freelist = NULL;
page             1136 mm/slub.c      		struct page *page, void *object, unsigned long addr)
page             1138 mm/slub.c      	if (!check_valid_pointer(s, page, object)) {
page             1139 mm/slub.c      		slab_err(s, page, "Invalid object pointer 0x%p", object);
page             1143 mm/slub.c      	if (on_freelist(s, page, object)) {
page             1144 mm/slub.c      		object_err(s, page, object, "Object already free");
page             1148 mm/slub.c      	if (!check_object(s, page, object, SLUB_RED_ACTIVE))
page             1151 mm/slub.c      	if (unlikely(s != page->slab_cache)) {
page             1152 mm/slub.c      		if (!PageSlab(page)) {
page             1153 mm/slub.c      			slab_err(s, page, "Attempt to free object(0x%p) outside of slab",
page             1155 mm/slub.c      		} else if (!page->slab_cache) {
page             1160 mm/slub.c      			object_err(s, page, object,
page             1169 mm/slub.c      	struct kmem_cache *s, struct page *page,
page             1173 mm/slub.c      	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
page             1180 mm/slub.c      	slab_lock(page);
page             1183 mm/slub.c      		if (!check_slab(s, page))
page             1191 mm/slub.c      		if (!free_consistency_checks(s, page, object, addr))
page             1197 mm/slub.c      	trace(s, page, object, 0);
page             1210 mm/slub.c      		slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n",
page             1213 mm/slub.c      	slab_unlock(page);
page             1343 mm/slub.c      			struct page *page, void *object) {}
page             1345 mm/slub.c      void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) {}
page             1348 mm/slub.c      	struct page *page, void *object, unsigned long addr) { return 0; }
page             1351 mm/slub.c      	struct kmem_cache *s, struct page *page,
page             1355 mm/slub.c      static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
page             1357 mm/slub.c      static inline int check_object(struct kmem_cache *s, struct page *page,
page             1360 mm/slub.c      					struct page *page) {}
page             1362 mm/slub.c      					struct page *page) {}
page             1472 mm/slub.c      static void *setup_object(struct kmem_cache *s, struct page *page,
page             1475 mm/slub.c      	setup_object_debug(s, page, object);
page             1488 mm/slub.c      static inline struct page *alloc_slab_page(struct kmem_cache *s,
page             1491 mm/slub.c      	struct page *page;
page             1495 mm/slub.c      		page = alloc_pages(flags, order);
page             1497 mm/slub.c      		page = __alloc_pages_node(node, flags, order);
page             1499 mm/slub.c      	if (page && charge_slab_page(page, flags, order, s)) {
page             1500 mm/slub.c      		__free_pages(page, order);
page             1501 mm/slub.c      		page = NULL;
page             1504 mm/slub.c      	return page;
page             1549 mm/slub.c      static void *next_freelist_entry(struct kmem_cache *s, struct page *page,
page             1571 mm/slub.c      static bool shuffle_freelist(struct kmem_cache *s, struct page *page)
page             1578 mm/slub.c      	if (page->objects < 2 || !s->random_seq)
page             1584 mm/slub.c      	page_limit = page->objects * s->size;
page             1585 mm/slub.c      	start = fixup_red_left(s, page_address(page));
page             1588 mm/slub.c      	cur = next_freelist_entry(s, page, &pos, start, page_limit,
page             1590 mm/slub.c      	cur = setup_object(s, page, cur);
page             1591 mm/slub.c      	page->freelist = cur;
page             1593 mm/slub.c      	for (idx = 1; idx < page->objects; idx++) {
page             1594 mm/slub.c      		next = next_freelist_entry(s, page, &pos, start, page_limit,
page             1596 mm/slub.c      		next = setup_object(s, page, next);
page             1610 mm/slub.c      static inline bool shuffle_freelist(struct kmem_cache *s, struct page *page)
page             1616 mm/slub.c      static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
page             1618 mm/slub.c      	struct page *page;
page             1640 mm/slub.c      	page = alloc_slab_page(s, alloc_gfp, node, oo);
page             1641 mm/slub.c      	if (unlikely(!page)) {
page             1648 mm/slub.c      		page = alloc_slab_page(s, alloc_gfp, node, oo);
page             1649 mm/slub.c      		if (unlikely(!page))
page             1654 mm/slub.c      	page->objects = oo_objects(oo);
page             1656 mm/slub.c      	page->slab_cache = s;
page             1657 mm/slub.c      	__SetPageSlab(page);
page             1658 mm/slub.c      	if (page_is_pfmemalloc(page))
page             1659 mm/slub.c      		SetPageSlabPfmemalloc(page);
page             1661 mm/slub.c      	kasan_poison_slab(page);
page             1663 mm/slub.c      	start = page_address(page);
page             1665 mm/slub.c      	setup_page_debug(s, page, start);
page             1667 mm/slub.c      	shuffle = shuffle_freelist(s, page);
page             1671 mm/slub.c      		start = setup_object(s, page, start);
page             1672 mm/slub.c      		page->freelist = start;
page             1673 mm/slub.c      		for (idx = 0, p = start; idx < page->objects - 1; idx++) {
page             1675 mm/slub.c      			next = setup_object(s, page, next);
page             1682 mm/slub.c      	page->inuse = page->objects;
page             1683 mm/slub.c      	page->frozen = 1;
page             1688 mm/slub.c      	if (!page)
page             1691 mm/slub.c      	inc_slabs_node(s, page_to_nid(page), page->objects);
page             1693 mm/slub.c      	return page;
page             1696 mm/slub.c      static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
page             1710 mm/slub.c      static void __free_slab(struct kmem_cache *s, struct page *page)
page             1712 mm/slub.c      	int order = compound_order(page);
page             1718 mm/slub.c      		slab_pad_check(s, page);
page             1719 mm/slub.c      		for_each_object(p, s, page_address(page),
page             1720 mm/slub.c      						page->objects)
page             1721 mm/slub.c      			check_object(s, page, p, SLUB_RED_INACTIVE);
page             1724 mm/slub.c      	__ClearPageSlabPfmemalloc(page);
page             1725 mm/slub.c      	__ClearPageSlab(page);
page             1727 mm/slub.c      	page->mapping = NULL;
page             1730 mm/slub.c      	uncharge_slab_page(page, order, s);
page             1731 mm/slub.c      	__free_pages(page, order);
page             1736 mm/slub.c      	struct page *page = container_of(h, struct page, rcu_head);
page             1738 mm/slub.c      	__free_slab(page->slab_cache, page);
page             1741 mm/slub.c      static void free_slab(struct kmem_cache *s, struct page *page)
page             1744 mm/slub.c      		call_rcu(&page->rcu_head, rcu_free_slab);
page             1746 mm/slub.c      		__free_slab(s, page);
page             1749 mm/slub.c      static void discard_slab(struct kmem_cache *s, struct page *page)
page             1751 mm/slub.c      	dec_slabs_node(s, page_to_nid(page), page->objects);
page             1752 mm/slub.c      	free_slab(s, page);
page             1759 mm/slub.c      __add_partial(struct kmem_cache_node *n, struct page *page, int tail)
page             1763 mm/slub.c      		list_add_tail(&page->slab_list, &n->partial);
page             1765 mm/slub.c      		list_add(&page->slab_list, &n->partial);
page             1769 mm/slub.c      				struct page *page, int tail)
page             1772 mm/slub.c      	__add_partial(n, page, tail);
page             1776 mm/slub.c      					struct page *page)
page             1779 mm/slub.c      	list_del(&page->slab_list);
page             1790 mm/slub.c      		struct kmem_cache_node *n, struct page *page,
page             1795 mm/slub.c      	struct page new;
page             1804 mm/slub.c      	freelist = page->freelist;
page             1805 mm/slub.c      	counters = page->counters;
page             1809 mm/slub.c      		new.inuse = page->objects;
page             1818 mm/slub.c      	if (!__cmpxchg_double_slab(s, page,
page             1824 mm/slub.c      	remove_partial(n, page);
page             1829 mm/slub.c      static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
page             1830 mm/slub.c      static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
page             1838 mm/slub.c      	struct page *page, *page2;
page             1853 mm/slub.c      	list_for_each_entry_safe(page, page2, &n->partial, slab_list) {
page             1856 mm/slub.c      		if (!pfmemalloc_match(page, flags))
page             1859 mm/slub.c      		t = acquire_slab(s, n, page, object == NULL, &objects);
page             1865 mm/slub.c      			c->page = page;
page             1869 mm/slub.c      			put_cpu_partial(s, page, 0);
page             2036 mm/slub.c      static void deactivate_slab(struct kmem_cache *s, struct page *page,
page             2040 mm/slub.c      	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
page             2045 mm/slub.c      	struct page new;
page             2046 mm/slub.c      	struct page old;
page             2048 mm/slub.c      	if (page->freelist) {
page             2066 mm/slub.c      			prior = page->freelist;
page             2067 mm/slub.c      			counters = page->counters;
page             2073 mm/slub.c      		} while (!__cmpxchg_double_slab(s, page,
page             2097 mm/slub.c      	old.freelist = page->freelist;
page             2098 mm/slub.c      	old.counters = page->counters;
page             2140 mm/slub.c      			remove_partial(n, page);
page             2142 mm/slub.c      			remove_full(s, n, page);
page             2145 mm/slub.c      			add_partial(n, page, tail);
page             2147 mm/slub.c      			add_full(s, n, page);
page             2151 mm/slub.c      	if (!__cmpxchg_double_slab(s, page,
page             2166 mm/slub.c      		discard_slab(s, page);
page             2170 mm/slub.c      	c->page = NULL;
page             2186 mm/slub.c      	struct page *page, *discard_page = NULL;
page             2188 mm/slub.c      	while ((page = c->partial)) {
page             2189 mm/slub.c      		struct page new;
page             2190 mm/slub.c      		struct page old;
page             2192 mm/slub.c      		c->partial = page->next;
page             2194 mm/slub.c      		n2 = get_node(s, page_to_nid(page));
page             2205 mm/slub.c      			old.freelist = page->freelist;
page             2206 mm/slub.c      			old.counters = page->counters;
page             2214 mm/slub.c      		} while (!__cmpxchg_double_slab(s, page,
page             2220 mm/slub.c      			page->next = discard_page;
page             2221 mm/slub.c      			discard_page = page;
page             2223 mm/slub.c      			add_partial(n, page, DEACTIVATE_TO_TAIL);
page             2232 mm/slub.c      		page = discard_page;
page             2236 mm/slub.c      		discard_slab(s, page);
page             2249 mm/slub.c      static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
page             2252 mm/slub.c      	struct page *oldpage;
page             2282 mm/slub.c      		pobjects += page->objects - page->inuse;
page             2284 mm/slub.c      		page->pages = pages;
page             2285 mm/slub.c      		page->pobjects = pobjects;
page             2286 mm/slub.c      		page->next = oldpage;
page             2288 mm/slub.c      	} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
page             2304 mm/slub.c      	deactivate_slab(s, c->page, c->freelist, c);
page             2318 mm/slub.c      	if (c->page)
page             2336 mm/slub.c      	return c->page || slub_percpu_partial(c);
page             2367 mm/slub.c      static inline int node_match(struct page *page, int node)
page             2370 mm/slub.c      	if (node != NUMA_NO_NODE && page_to_nid(page) != node)
page             2377 mm/slub.c      static int count_free(struct page *page)
page             2379 mm/slub.c      	return page->objects - page->inuse;
page             2390 mm/slub.c      					int (*get_count)(struct page *))
page             2394 mm/slub.c      	struct page *page;
page             2397 mm/slub.c      	list_for_each_entry(page, &n->partial, slab_list)
page             2398 mm/slub.c      		x += get_count(page);
page             2446 mm/slub.c      	struct page *page;
page             2455 mm/slub.c      	page = new_slab(s, flags, node);
page             2456 mm/slub.c      	if (page) {
page             2458 mm/slub.c      		if (c->page)
page             2465 mm/slub.c      		freelist = page->freelist;
page             2466 mm/slub.c      		page->freelist = NULL;
page             2469 mm/slub.c      		c->page = page;
page             2476 mm/slub.c      static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
page             2478 mm/slub.c      	if (unlikely(PageSlabPfmemalloc(page)))
page             2494 mm/slub.c      static inline void *get_freelist(struct kmem_cache *s, struct page *page)
page             2496 mm/slub.c      	struct page new;
page             2501 mm/slub.c      		freelist = page->freelist;
page             2502 mm/slub.c      		counters = page->counters;
page             2507 mm/slub.c      		new.inuse = page->objects;
page             2510 mm/slub.c      	} while (!__cmpxchg_double_slab(s, page,
page             2541 mm/slub.c      	struct page *page;
page             2543 mm/slub.c      	page = c->page;
page             2544 mm/slub.c      	if (!page) {
page             2556 mm/slub.c      	if (unlikely(!node_match(page, node))) {
page             2566 mm/slub.c      			deactivate_slab(s, page, c->freelist, c);
page             2576 mm/slub.c      	if (unlikely(!pfmemalloc_match(page, gfpflags))) {
page             2577 mm/slub.c      		deactivate_slab(s, page, c->freelist, c);
page             2586 mm/slub.c      	freelist = get_freelist(s, page);
page             2589 mm/slub.c      		c->page = NULL;
page             2602 mm/slub.c      	VM_BUG_ON(!c->page->frozen);
page             2610 mm/slub.c      		page = c->page = slub_percpu_partial(c);
page             2611 mm/slub.c      		slub_set_percpu_partial(c, page);
page             2623 mm/slub.c      	page = c->page;
page             2624 mm/slub.c      	if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags)))
page             2629 mm/slub.c      			!alloc_debug_processing(s, page, freelist, addr))
page             2632 mm/slub.c      	deactivate_slab(s, page, get_freepointer(s, freelist), c);
page             2687 mm/slub.c      	struct page *page;
page             2728 mm/slub.c      	page = c->page;
page             2729 mm/slub.c      	if (unlikely(!object || !node_match(page, node))) {
page             2836 mm/slub.c      static void __slab_free(struct kmem_cache *s, struct page *page,
page             2843 mm/slub.c      	struct page new;
page             2851 mm/slub.c      	    !free_debug_processing(s, page, head, tail, cnt, addr))
page             2859 mm/slub.c      		prior = page->freelist;
page             2860 mm/slub.c      		counters = page->counters;
page             2879 mm/slub.c      				n = get_node(s, page_to_nid(page));
page             2893 mm/slub.c      	} while (!cmpxchg_double_slab(s, page,
page             2905 mm/slub.c      			put_cpu_partial(s, page, 1);
page             2925 mm/slub.c      		remove_full(s, n, page);
page             2926 mm/slub.c      		add_partial(n, page, DEACTIVATE_TO_TAIL);
page             2937 mm/slub.c      		remove_partial(n, page);
page             2941 mm/slub.c      		remove_full(s, n, page);
page             2946 mm/slub.c      	discard_slab(s, page);
page             2965 mm/slub.c      				struct page *page, void *head, void *tail,
page             2987 mm/slub.c      	if (likely(page == c->page)) {
page             3002 mm/slub.c      		__slab_free(s, page, head, tail_obj, cnt, addr);
page             3006 mm/slub.c      static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
page             3015 mm/slub.c      		do_slab_free(s, page, head, tail, cnt, addr);
page             3036 mm/slub.c      	struct page *page;
page             3062 mm/slub.c      	struct page *page;
page             3065 mm/slub.c      	df->page = NULL;
page             3075 mm/slub.c      	page = virt_to_head_page(object);
page             3078 mm/slub.c      		if (unlikely(!PageSlab(page))) {
page             3079 mm/slub.c      			BUG_ON(!PageCompound(page));
page             3081 mm/slub.c      			__free_pages(page, compound_order(page));
page             3086 mm/slub.c      		df->s = page->slab_cache;
page             3092 mm/slub.c      	df->page = page;
page             3105 mm/slub.c      		if (df->page == virt_to_head_page(object)) {
page             3136 mm/slub.c      		if (!df.page)
page             3139 mm/slub.c      		slab_free(df.s, df.page, df.freelist, df.tail, df.cnt,_RET_IP_);
page             3387 mm/slub.c      	struct page *page;
page             3392 mm/slub.c      	page = new_slab(kmem_cache_node, GFP_NOWAIT, node);
page             3394 mm/slub.c      	BUG_ON(!page);
page             3395 mm/slub.c      	if (page_to_nid(page) != node) {
page             3400 mm/slub.c      	n = page->freelist;
page             3408 mm/slub.c      	page->freelist = get_freepointer(kmem_cache_node, n);
page             3409 mm/slub.c      	page->inuse = 1;
page             3410 mm/slub.c      	page->frozen = 0;
page             3413 mm/slub.c      	inc_slabs_node(kmem_cache_node, node, page->objects);
page             3419 mm/slub.c      	__add_partial(n, page, DEACTIVATE_TO_HEAD);
page             3690 mm/slub.c      static void list_slab_objects(struct kmem_cache *s, struct page *page,
page             3694 mm/slub.c      	void *addr = page_address(page);
page             3696 mm/slub.c      	unsigned long *map = bitmap_zalloc(page->objects, GFP_ATOMIC);
page             3699 mm/slub.c      	slab_err(s, page, text, s->name);
page             3700 mm/slub.c      	slab_lock(page);
page             3702 mm/slub.c      	get_map(s, page, map);
page             3703 mm/slub.c      	for_each_object(p, s, addr, page->objects) {
page             3710 mm/slub.c      	slab_unlock(page);
page             3723 mm/slub.c      	struct page *page, *h;
page             3727 mm/slub.c      	list_for_each_entry_safe(page, h, &n->partial, slab_list) {
page             3728 mm/slub.c      		if (!page->inuse) {
page             3729 mm/slub.c      			remove_partial(n, page);
page             3730 mm/slub.c      			list_add(&page->slab_list, &discard);
page             3732 mm/slub.c      			list_slab_objects(s, page,
page             3738 mm/slub.c      	list_for_each_entry_safe(page, h, &discard, slab_list)
page             3739 mm/slub.c      		discard_slab(s, page);
page             3830 mm/slub.c      	struct page *page;
page             3835 mm/slub.c      	page = alloc_pages_node(node, flags, order);
page             3836 mm/slub.c      	if (page) {
page             3837 mm/slub.c      		ptr = page_address(page);
page             3838 mm/slub.c      		mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
page             3885 mm/slub.c      void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
page             3895 mm/slub.c      	s = page->slab_cache;
page             3898 mm/slub.c      	if (ptr < page_address(page))
page             3903 mm/slub.c      	offset = (ptr - page_address(page)) % s->size;
page             3938 mm/slub.c      	struct page *page;
page             3943 mm/slub.c      	page = virt_to_head_page(object);
page             3945 mm/slub.c      	if (unlikely(!PageSlab(page))) {
page             3946 mm/slub.c      		WARN_ON(!PageCompound(page));
page             3947 mm/slub.c      		return page_size(page);
page             3950 mm/slub.c      	return slab_ksize(page->slab_cache);
page             3956 mm/slub.c      	struct page *page;
page             3964 mm/slub.c      	page = virt_to_head_page(x);
page             3965 mm/slub.c      	if (unlikely(!PageSlab(page))) {
page             3966 mm/slub.c      		unsigned int order = compound_order(page);
page             3968 mm/slub.c      		BUG_ON(!PageCompound(page));
page             3970 mm/slub.c      		mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
page             3972 mm/slub.c      		__free_pages(page, order);
page             3975 mm/slub.c      	slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
page             3995 mm/slub.c      	struct page *page;
page             3996 mm/slub.c      	struct page *t;
page             4016 mm/slub.c      		list_for_each_entry_safe(page, t, &n->partial, slab_list) {
page             4017 mm/slub.c      			int free = page->objects - page->inuse;
page             4025 mm/slub.c      			if (free == page->objects) {
page             4026 mm/slub.c      				list_move(&page->slab_list, &discard);
page             4029 mm/slub.c      				list_move(&page->slab_list, promote + free - 1);
page             4042 mm/slub.c      		list_for_each_entry_safe(page, t, &discard, slab_list)
page             4043 mm/slub.c      			discard_slab(s, page);
page             4226 mm/slub.c      		struct page *p;
page             4393 mm/slub.c      static int count_inuse(struct page *page)
page             4395 mm/slub.c      	return page->inuse;
page             4398 mm/slub.c      static int count_total(struct page *page)
page             4400 mm/slub.c      	return page->objects;
page             4405 mm/slub.c      static int validate_slab(struct kmem_cache *s, struct page *page,
page             4409 mm/slub.c      	void *addr = page_address(page);
page             4411 mm/slub.c      	if (!check_slab(s, page) ||
page             4412 mm/slub.c      			!on_freelist(s, page, NULL))
page             4416 mm/slub.c      	bitmap_zero(map, page->objects);
page             4418 mm/slub.c      	get_map(s, page, map);
page             4419 mm/slub.c      	for_each_object(p, s, addr, page->objects) {
page             4421 mm/slub.c      			if (!check_object(s, page, p, SLUB_RED_INACTIVE))
page             4425 mm/slub.c      	for_each_object(p, s, addr, page->objects)
page             4427 mm/slub.c      			if (!check_object(s, page, p, SLUB_RED_ACTIVE))
page             4432 mm/slub.c      static void validate_slab_slab(struct kmem_cache *s, struct page *page,
page             4435 mm/slub.c      	slab_lock(page);
page             4436 mm/slub.c      	validate_slab(s, page, map);
page             4437 mm/slub.c      	slab_unlock(page);
page             4444 mm/slub.c      	struct page *page;
page             4449 mm/slub.c      	list_for_each_entry(page, &n->partial, slab_list) {
page             4450 mm/slub.c      		validate_slab_slab(s, page, map);
page             4460 mm/slub.c      	list_for_each_entry(page, &n->full, slab_list) {
page             4461 mm/slub.c      		validate_slab_slab(s, page, map);
page             4616 mm/slub.c      		struct page *page, enum track_item alloc,
page             4619 mm/slub.c      	void *addr = page_address(page);
page             4622 mm/slub.c      	bitmap_zero(map, page->objects);
page             4623 mm/slub.c      	get_map(s, page, map);
page             4625 mm/slub.c      	for_each_object(p, s, addr, page->objects)
page             4650 mm/slub.c      		struct page *page;
page             4656 mm/slub.c      		list_for_each_entry(page, &n->partial, slab_list)
page             4657 mm/slub.c      			process_slab(&t, s, page, alloc, map);
page             4658 mm/slub.c      		list_for_each_entry(page, &n->full, slab_list)
page             4659 mm/slub.c      			process_slab(&t, s, page, alloc, map);
page             4825 mm/slub.c      			struct page *page;
page             4827 mm/slub.c      			page = READ_ONCE(c->page);
page             4828 mm/slub.c      			if (!page)
page             4831 mm/slub.c      			node = page_to_nid(page);
page             4833 mm/slub.c      				x = page->objects;
page             4835 mm/slub.c      				x = page->inuse;
page             4842 mm/slub.c      			page = slub_percpu_partial_read_once(c);
page             4843 mm/slub.c      			if (page) {
page             4844 mm/slub.c      				node = page_to_nid(page);
page             4850 mm/slub.c      					x = page->pages;
page             5079 mm/slub.c      		struct page *page;
page             5081 mm/slub.c      		page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
page             5083 mm/slub.c      		if (page) {
page             5084 mm/slub.c      			pages += page->pages;
page             5085 mm/slub.c      			objects += page->pobjects;
page             5093 mm/slub.c      		struct page *page;
page             5095 mm/slub.c      		page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
page             5097 mm/slub.c      		if (page && len < PAGE_SIZE - 20)
page             5099 mm/slub.c      				page->pobjects, page->pages);
page               56 mm/sparse-vmemmap.c 		struct page *page;
page               58 mm/sparse-vmemmap.c 		page = alloc_pages_node(node, gfp_mask, order);
page               59 mm/sparse-vmemmap.c 		if (page)
page               60 mm/sparse-vmemmap.c 			return page_address(page);
page              248 mm/sparse-vmemmap.c struct page * __meminit __populate_section_memmap(unsigned long pfn,
page              264 mm/sparse-vmemmap.c 	end = start + nr_pages * sizeof(struct page);
page               47 mm/sparse.c    int page_to_nid(const struct page *page)
page               49 mm/sparse.c    	return section_to_node_table[page_to_section(page)];
page              314 mm/sparse.c    static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
page              326 mm/sparse.c    struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
page              330 mm/sparse.c    	return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
page              334 mm/sparse.c    		unsigned long pnum, struct page *mem_map,
page              442 mm/sparse.c    	return ALIGN(sizeof(struct page) * PAGES_PER_SECTION, PMD_SIZE);
page              448 mm/sparse.c    	return PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
page              451 mm/sparse.c    struct page __init *__populate_section_memmap(unsigned long pfn,
page              455 mm/sparse.c    	struct page *map = sparse_buffer_alloc(size);
page              533 mm/sparse.c    	struct page *map;
page              650 mm/sparse.c    static struct page * __meminit populate_section_memmap(unsigned long pfn,
page              660 mm/sparse.c    	unsigned long end = start + nr_pages * sizeof(struct page);
page              664 mm/sparse.c    static void free_map_bootmem(struct page *memmap)
page              672 mm/sparse.c    struct page * __meminit populate_section_memmap(unsigned long pfn,
page              675 mm/sparse.c    	struct page *page, *ret;
page              676 mm/sparse.c    	unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION;
page              678 mm/sparse.c    	page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
page              679 mm/sparse.c    	if (page)
page              688 mm/sparse.c    	ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
page              697 mm/sparse.c    	struct page *memmap = pfn_to_page(pfn);
page              703 mm/sparse.c    			   get_order(sizeof(struct page) * PAGES_PER_SECTION));
page              706 mm/sparse.c    static void free_map_bootmem(struct page *memmap)
page              710 mm/sparse.c    	struct page *page = virt_to_page(memmap);
page              712 mm/sparse.c    	nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
page              715 mm/sparse.c    	for (i = 0; i < nr_pages; i++, page++) {
page              716 mm/sparse.c    		magic = (unsigned long) page->freelist;
page              720 mm/sparse.c    		maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
page              721 mm/sparse.c    		removing_section_nr = page_private(page);
page              732 mm/sparse.c    			put_page_bootmem(page);
page              744 mm/sparse.c    	struct page *memmap = NULL;
page              809 mm/sparse.c    static struct page * __meminit section_activate(int nid, unsigned long pfn,
page              816 mm/sparse.c    	struct page *memmap;
page              882 mm/sparse.c    	struct page *memmap;
page              897 mm/sparse.c    	page_init_poison(memmap, sizeof(struct page) * nr_pages);
page              912 mm/sparse.c    static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
page              933 mm/sparse.c    static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
page               60 mm/swap.c      static void __page_cache_release(struct page *page)
page               62 mm/swap.c      	if (PageLRU(page)) {
page               63 mm/swap.c      		pg_data_t *pgdat = page_pgdat(page);
page               68 mm/swap.c      		lruvec = mem_cgroup_page_lruvec(page, pgdat);
page               69 mm/swap.c      		VM_BUG_ON_PAGE(!PageLRU(page), page);
page               70 mm/swap.c      		__ClearPageLRU(page);
page               71 mm/swap.c      		del_page_from_lru_list(page, lruvec, page_off_lru(page));
page               74 mm/swap.c      	__ClearPageWaiters(page);
page               77 mm/swap.c      static void __put_single_page(struct page *page)
page               79 mm/swap.c      	__page_cache_release(page);
page               80 mm/swap.c      	mem_cgroup_uncharge(page);
page               81 mm/swap.c      	free_unref_page(page);
page               84 mm/swap.c      static void __put_compound_page(struct page *page)
page               94 mm/swap.c      	if (!PageHuge(page))
page               95 mm/swap.c      		__page_cache_release(page);
page               96 mm/swap.c      	dtor = get_compound_page_dtor(page);
page               97 mm/swap.c      	(*dtor)(page);
page              100 mm/swap.c      void __put_page(struct page *page)
page              102 mm/swap.c      	if (is_zone_device_page(page)) {
page              103 mm/swap.c      		put_dev_pagemap(page->pgmap);
page              112 mm/swap.c      	if (unlikely(PageCompound(page)))
page              113 mm/swap.c      		__put_compound_page(page);
page              115 mm/swap.c      		__put_single_page(page);
page              129 mm/swap.c      		struct page *victim;
page              152 mm/swap.c      		struct page **pages)
page              179 mm/swap.c      int get_kernel_page(unsigned long start, int write, struct page **pages)
page              191 mm/swap.c      	void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg),
page              200 mm/swap.c      		struct page *page = pvec->pages[i];
page              201 mm/swap.c      		struct pglist_data *pagepgdat = page_pgdat(page);
page              210 mm/swap.c      		lruvec = mem_cgroup_page_lruvec(page, pgdat);
page              211 mm/swap.c      		(*move_fn)(page, lruvec, arg);
page              219 mm/swap.c      static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
page              224 mm/swap.c      	if (PageLRU(page) && !PageUnevictable(page)) {
page              225 mm/swap.c      		del_page_from_lru_list(page, lruvec, page_lru(page));
page              226 mm/swap.c      		ClearPageActive(page);
page              227 mm/swap.c      		add_page_to_lru_list_tail(page, lruvec, page_lru(page));
page              249 mm/swap.c      void rotate_reclaimable_page(struct page *page)
page              251 mm/swap.c      	if (!PageLocked(page) && !PageDirty(page) &&
page              252 mm/swap.c      	    !PageUnevictable(page) && PageLRU(page)) {
page              256 mm/swap.c      		get_page(page);
page              259 mm/swap.c      		if (!pagevec_add(pvec, page) || PageCompound(page))
page              275 mm/swap.c      static void __activate_page(struct page *page, struct lruvec *lruvec,
page              278 mm/swap.c      	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
page              279 mm/swap.c      		int file = page_is_file_cache(page);
page              280 mm/swap.c      		int lru = page_lru_base_type(page);
page              282 mm/swap.c      		del_page_from_lru_list(page, lruvec, lru);
page              283 mm/swap.c      		SetPageActive(page);
page              285 mm/swap.c      		add_page_to_lru_list(page, lruvec, lru);
page              286 mm/swap.c      		trace_mm_lru_activate(page);
page              307 mm/swap.c      void activate_page(struct page *page)
page              309 mm/swap.c      	page = compound_head(page);
page              310 mm/swap.c      	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
page              313 mm/swap.c      		get_page(page);
page              314 mm/swap.c      		if (!pagevec_add(pvec, page) || PageCompound(page))
page              325 mm/swap.c      void activate_page(struct page *page)
page              327 mm/swap.c      	pg_data_t *pgdat = page_pgdat(page);
page              329 mm/swap.c      	page = compound_head(page);
page              331 mm/swap.c      	__activate_page(page, mem_cgroup_page_lruvec(page, pgdat), NULL);
page              336 mm/swap.c      static void __lru_cache_activate_page(struct page *page)
page              352 mm/swap.c      		struct page *pagevec_page = pvec->pages[i];
page              354 mm/swap.c      		if (pagevec_page == page) {
page              355 mm/swap.c      			SetPageActive(page);
page              373 mm/swap.c      void mark_page_accessed(struct page *page)
page              375 mm/swap.c      	page = compound_head(page);
page              376 mm/swap.c      	if (!PageActive(page) && !PageUnevictable(page) &&
page              377 mm/swap.c      			PageReferenced(page)) {
page              385 mm/swap.c      		if (PageLRU(page))
page              386 mm/swap.c      			activate_page(page);
page              388 mm/swap.c      			__lru_cache_activate_page(page);
page              389 mm/swap.c      		ClearPageReferenced(page);
page              390 mm/swap.c      		if (page_is_file_cache(page))
page              391 mm/swap.c      			workingset_activation(page);
page              392 mm/swap.c      	} else if (!PageReferenced(page)) {
page              393 mm/swap.c      		SetPageReferenced(page);
page              395 mm/swap.c      	if (page_is_idle(page))
page              396 mm/swap.c      		clear_page_idle(page);
page              400 mm/swap.c      static void __lru_cache_add(struct page *page)
page              404 mm/swap.c      	get_page(page);
page              405 mm/swap.c      	if (!pagevec_add(pvec, page) || PageCompound(page))
page              414 mm/swap.c      void lru_cache_add_anon(struct page *page)
page              416 mm/swap.c      	if (PageActive(page))
page              417 mm/swap.c      		ClearPageActive(page);
page              418 mm/swap.c      	__lru_cache_add(page);
page              421 mm/swap.c      void lru_cache_add_file(struct page *page)
page              423 mm/swap.c      	if (PageActive(page))
page              424 mm/swap.c      		ClearPageActive(page);
page              425 mm/swap.c      	__lru_cache_add(page);
page              438 mm/swap.c      void lru_cache_add(struct page *page)
page              440 mm/swap.c      	VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
page              441 mm/swap.c      	VM_BUG_ON_PAGE(PageLRU(page), page);
page              442 mm/swap.c      	__lru_cache_add(page);
page              455 mm/swap.c      void lru_cache_add_active_or_unevictable(struct page *page,
page              458 mm/swap.c      	VM_BUG_ON_PAGE(PageLRU(page), page);
page              461 mm/swap.c      		SetPageActive(page);
page              462 mm/swap.c      	else if (!TestSetPageMlocked(page)) {
page              468 mm/swap.c      		__mod_zone_page_state(page_zone(page), NR_MLOCK,
page              469 mm/swap.c      				    hpage_nr_pages(page));
page              472 mm/swap.c      	lru_cache_add(page);
page              496 mm/swap.c      static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
page              502 mm/swap.c      	if (!PageLRU(page))
page              505 mm/swap.c      	if (PageUnevictable(page))
page              509 mm/swap.c      	if (page_mapped(page))
page              512 mm/swap.c      	active = PageActive(page);
page              513 mm/swap.c      	file = page_is_file_cache(page);
page              514 mm/swap.c      	lru = page_lru_base_type(page);
page              516 mm/swap.c      	del_page_from_lru_list(page, lruvec, lru + active);
page              517 mm/swap.c      	ClearPageActive(page);
page              518 mm/swap.c      	ClearPageReferenced(page);
page              520 mm/swap.c      	if (PageWriteback(page) || PageDirty(page)) {
page              526 mm/swap.c      		add_page_to_lru_list(page, lruvec, lru);
page              527 mm/swap.c      		SetPageReclaim(page);
page              533 mm/swap.c      		add_page_to_lru_list_tail(page, lruvec, lru);
page              542 mm/swap.c      static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
page              545 mm/swap.c      	if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
page              546 mm/swap.c      		int file = page_is_file_cache(page);
page              547 mm/swap.c      		int lru = page_lru_base_type(page);
page              549 mm/swap.c      		del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE);
page              550 mm/swap.c      		ClearPageActive(page);
page              551 mm/swap.c      		ClearPageReferenced(page);
page              552 mm/swap.c      		add_page_to_lru_list(page, lruvec, lru);
page              554 mm/swap.c      		__count_vm_events(PGDEACTIVATE, hpage_nr_pages(page));
page              559 mm/swap.c      static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
page              562 mm/swap.c      	if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
page              563 mm/swap.c      	    !PageSwapCache(page) && !PageUnevictable(page)) {
page              564 mm/swap.c      		bool active = PageActive(page);
page              566 mm/swap.c      		del_page_from_lru_list(page, lruvec,
page              568 mm/swap.c      		ClearPageActive(page);
page              569 mm/swap.c      		ClearPageReferenced(page);
page              575 mm/swap.c      		ClearPageSwapBacked(page);
page              576 mm/swap.c      		add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE);
page              578 mm/swap.c      		__count_vm_events(PGLAZYFREE, hpage_nr_pages(page));
page              579 mm/swap.c      		count_memcg_page_event(page, PGLAZYFREE);
page              629 mm/swap.c      void deactivate_file_page(struct page *page)
page              635 mm/swap.c      	if (PageUnevictable(page))
page              638 mm/swap.c      	if (likely(get_page_unless_zero(page))) {
page              641 mm/swap.c      		if (!pagevec_add(pvec, page) || PageCompound(page))
page              655 mm/swap.c      void deactivate_page(struct page *page)
page              657 mm/swap.c      	if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
page              660 mm/swap.c      		get_page(page);
page              661 mm/swap.c      		if (!pagevec_add(pvec, page) || PageCompound(page))
page              674 mm/swap.c      void mark_page_lazyfree(struct page *page)
page              676 mm/swap.c      	if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
page              677 mm/swap.c      	    !PageSwapCache(page) && !PageUnevictable(page)) {
page              680 mm/swap.c      		get_page(page);
page              681 mm/swap.c      		if (!pagevec_add(pvec, page) || PageCompound(page))
page              760 mm/swap.c      void release_pages(struct page **pages, int nr)
page              770 mm/swap.c      		struct page *page = pages[i];
page              782 mm/swap.c      		if (is_huge_zero_page(page))
page              785 mm/swap.c      		if (is_zone_device_page(page)) {
page              797 mm/swap.c      			if (put_devmap_managed_page(page))
page              801 mm/swap.c      		page = compound_head(page);
page              802 mm/swap.c      		if (!put_page_testzero(page))
page              805 mm/swap.c      		if (PageCompound(page)) {
page              810 mm/swap.c      			__put_compound_page(page);
page              814 mm/swap.c      		if (PageLRU(page)) {
page              815 mm/swap.c      			struct pglist_data *pgdat = page_pgdat(page);
page              826 mm/swap.c      			lruvec = mem_cgroup_page_lruvec(page, locked_pgdat);
page              827 mm/swap.c      			VM_BUG_ON_PAGE(!PageLRU(page), page);
page              828 mm/swap.c      			__ClearPageLRU(page);
page              829 mm/swap.c      			del_page_from_lru_list(page, lruvec, page_off_lru(page));
page              833 mm/swap.c      		__ClearPageActive(page);
page              834 mm/swap.c      		__ClearPageWaiters(page);
page              836 mm/swap.c      		list_add(&page->lru, &pages_to_free);
page              869 mm/swap.c      void lru_add_page_tail(struct page *page, struct page *page_tail,
page              874 mm/swap.c      	VM_BUG_ON_PAGE(!PageHead(page), page);
page              875 mm/swap.c      	VM_BUG_ON_PAGE(PageCompound(page_tail), page);
page              876 mm/swap.c      	VM_BUG_ON_PAGE(PageLRU(page_tail), page);
page              882 mm/swap.c      	if (likely(PageLRU(page)))
page              883 mm/swap.c      		list_add_tail(&page_tail->lru, &page->lru);
page              900 mm/swap.c      	if (!PageUnevictable(page))
page              905 mm/swap.c      static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
page              909 mm/swap.c      	int was_unevictable = TestClearPageUnevictable(page);
page              911 mm/swap.c      	VM_BUG_ON_PAGE(PageLRU(page), page);
page              913 mm/swap.c      	SetPageLRU(page);
page              942 mm/swap.c      	if (page_evictable(page)) {
page              943 mm/swap.c      		lru = page_lru(page);
page              944 mm/swap.c      		update_page_reclaim_stat(lruvec, page_is_file_cache(page),
page              945 mm/swap.c      					 PageActive(page));
page              950 mm/swap.c      		ClearPageActive(page);
page              951 mm/swap.c      		SetPageUnevictable(page);
page              956 mm/swap.c      	add_page_to_lru_list(page, lruvec, lru);
page              957 mm/swap.c      	trace_mm_lru_insertion(page, lru);
page             1014 mm/swap.c      		struct page *page = pvec->pages[i];
page             1015 mm/swap.c      		if (!xa_is_value(page))
page             1016 mm/swap.c      			pvec->pages[j++] = page;
page               10 mm/swap_cgroup.c 	struct page **map;
page               41 mm/swap_cgroup.c 	struct page *page;
page               48 mm/swap_cgroup.c 		page = alloc_page(GFP_KERNEL | __GFP_ZERO);
page               49 mm/swap_cgroup.c 		if (!page)
page               51 mm/swap_cgroup.c 		ctrl->map[idx] = page;
page               68 mm/swap_cgroup.c 	struct page *mappage;
page              208 mm/swap_cgroup.c 	struct page **map;
page              225 mm/swap_cgroup.c 			struct page *page = map[i];
page              226 mm/swap_cgroup.c 			if (page)
page              227 mm/swap_cgroup.c 				__free_page(page);
page              310 mm/swap_slots.c swp_entry_t get_swap_page(struct page *page)
page              317 mm/swap_slots.c 	if (PageTransHuge(page)) {
page              355 mm/swap_slots.c 	if (mem_cgroup_try_charge_swap(page, entry)) {
page              356 mm/swap_slots.c 		put_swap_page(page, entry);
page              114 mm/swap_state.c int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp)
page              118 mm/swap_state.c 	XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page));
page              119 mm/swap_state.c 	unsigned long i, nr = compound_nr(page);
page              121 mm/swap_state.c 	VM_BUG_ON_PAGE(!PageLocked(page), page);
page              122 mm/swap_state.c 	VM_BUG_ON_PAGE(PageSwapCache(page), page);
page              123 mm/swap_state.c 	VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
page              125 mm/swap_state.c 	page_ref_add(page, nr);
page              126 mm/swap_state.c 	SetPageSwapCache(page);
page              134 mm/swap_state.c 			VM_BUG_ON_PAGE(xas.xa_index != idx + i, page);
page              135 mm/swap_state.c 			set_page_private(page + i, entry.val + i);
page              136 mm/swap_state.c 			xas_store(&xas, page);
page              140 mm/swap_state.c 		__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
page              149 mm/swap_state.c 	ClearPageSwapCache(page);
page              150 mm/swap_state.c 	page_ref_sub(page, nr);
page              158 mm/swap_state.c void __delete_from_swap_cache(struct page *page, swp_entry_t entry)
page              161 mm/swap_state.c 	int i, nr = hpage_nr_pages(page);
page              165 mm/swap_state.c 	VM_BUG_ON_PAGE(!PageLocked(page), page);
page              166 mm/swap_state.c 	VM_BUG_ON_PAGE(!PageSwapCache(page), page);
page              167 mm/swap_state.c 	VM_BUG_ON_PAGE(PageWriteback(page), page);
page              171 mm/swap_state.c 		VM_BUG_ON_PAGE(entry != page, entry);
page              172 mm/swap_state.c 		set_page_private(page + i, 0);
page              175 mm/swap_state.c 	ClearPageSwapCache(page);
page              177 mm/swap_state.c 	__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
page              188 mm/swap_state.c int add_to_swap(struct page *page)
page              193 mm/swap_state.c 	VM_BUG_ON_PAGE(!PageLocked(page), page);
page              194 mm/swap_state.c 	VM_BUG_ON_PAGE(!PageUptodate(page), page);
page              196 mm/swap_state.c 	entry = get_swap_page(page);
page              211 mm/swap_state.c 	err = add_to_swap_cache(page, entry,
page              229 mm/swap_state.c 	set_page_dirty(page);
page              234 mm/swap_state.c 	put_swap_page(page, entry);
page              244 mm/swap_state.c void delete_from_swap_cache(struct page *page)
page              246 mm/swap_state.c 	swp_entry_t entry = { .val = page_private(page) };
page              250 mm/swap_state.c 	__delete_from_swap_cache(page, entry);
page              253 mm/swap_state.c 	put_swap_page(page, entry);
page              254 mm/swap_state.c 	page_ref_sub(page, hpage_nr_pages(page));
page              265 mm/swap_state.c static inline void free_swap_cache(struct page *page)
page              267 mm/swap_state.c 	if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
page              268 mm/swap_state.c 		try_to_free_swap(page);
page              269 mm/swap_state.c 		unlock_page(page);
page              277 mm/swap_state.c void free_page_and_swap_cache(struct page *page)
page              279 mm/swap_state.c 	free_swap_cache(page);
page              280 mm/swap_state.c 	if (!is_huge_zero_page(page))
page              281 mm/swap_state.c 		put_page(page);
page              288 mm/swap_state.c void free_pages_and_swap_cache(struct page **pages, int nr)
page              290 mm/swap_state.c 	struct page **pagep = pages;
page              310 mm/swap_state.c struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
page              313 mm/swap_state.c 	struct page *page;
page              319 mm/swap_state.c 	page = find_get_page(swap_address_space(entry), swp_offset(entry));
page              323 mm/swap_state.c 	if (page) {
page              332 mm/swap_state.c 		if (unlikely(PageTransCompound(page)))
page              333 mm/swap_state.c 			return page;
page              335 mm/swap_state.c 		readahead = TestClearPageReadahead(page);
page              356 mm/swap_state.c 	return page;
page              359 mm/swap_state.c struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
page              363 mm/swap_state.c 	struct page *found_page = NULL, *new_page = NULL;
page              448 mm/swap_state.c struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
page              452 mm/swap_state.c 	struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
page              539 mm/swap_state.c struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
page              542 mm/swap_state.c 	struct page *page;
page              576 mm/swap_state.c 		page = __read_swap_cache_async(
page              579 mm/swap_state.c 		if (!page)
page              582 mm/swap_state.c 			swap_readpage(page, false);
page              584 mm/swap_state.c 				SetPageReadahead(page);
page              588 mm/swap_state.c 		put_page(page);
page              722 mm/swap_state.c static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
page              727 mm/swap_state.c 	struct page *page;
page              749 mm/swap_state.c 		page = __read_swap_cache_async(entry, gfp_mask, vma,
page              751 mm/swap_state.c 		if (!page)
page              754 mm/swap_state.c 			swap_readpage(page, false);
page              756 mm/swap_state.c 				SetPageReadahead(page);
page              760 mm/swap_state.c 		put_page(page);
page              781 mm/swap_state.c struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
page              131 mm/swapfile.c  	struct page *page;
page              134 mm/swapfile.c  	page = find_get_page(swap_address_space(entry), offset);
page              135 mm/swapfile.c  	if (!page)
page              144 mm/swapfile.c  	if (trylock_page(page)) {
page              146 mm/swapfile.c  		    ((flags & TTRS_UNMAPPED) && !page_mapped(page)) ||
page              147 mm/swapfile.c  		    ((flags & TTRS_FULL) && mem_cgroup_swap_full(page)))
page              148 mm/swapfile.c  			ret = try_to_free_swap(page);
page              149 mm/swapfile.c  		unlock_page(page);
page              151 mm/swapfile.c  	put_page(page);
page             1325 mm/swapfile.c  void put_swap_page(struct page *page, swp_entry_t entry)
page             1334 mm/swapfile.c  	int size = swap_entry_size(hpage_nr_pages(page));
page             1429 mm/swapfile.c  int page_swapcount(struct page *page)
page             1437 mm/swapfile.c  	entry.val = page_private(page);
page             1501 mm/swapfile.c  	struct page *page;
page             1520 mm/swapfile.c  	page = vmalloc_to_page(p->swap_map + offset);
page             1522 mm/swapfile.c  	VM_BUG_ON(page_private(page) != SWP_CONTINUED);
page             1525 mm/swapfile.c  		page = list_next_entry(page, lru);
page             1526 mm/swapfile.c  		map = kmap_atomic(page);
page             1565 mm/swapfile.c  static bool page_swapped(struct page *page)
page             1570 mm/swapfile.c  	if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!PageTransCompound(page)))
page             1571 mm/swapfile.c  		return page_swapcount(page) != 0;
page             1573 mm/swapfile.c  	page = compound_head(page);
page             1574 mm/swapfile.c  	entry.val = page_private(page);
page             1581 mm/swapfile.c  static int page_trans_huge_map_swapcount(struct page *page, int *total_mapcount,
page             1592 mm/swapfile.c  	VM_BUG_ON_PAGE(PageHuge(page), page);
page             1594 mm/swapfile.c  	if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!PageTransCompound(page))) {
page             1595 mm/swapfile.c  		mapcount = page_trans_huge_mapcount(page, total_mapcount);
page             1596 mm/swapfile.c  		if (PageSwapCache(page))
page             1597 mm/swapfile.c  			swapcount = page_swapcount(page);
page             1603 mm/swapfile.c  	page = compound_head(page);
page             1606 mm/swapfile.c  	if (PageSwapCache(page)) {
page             1609 mm/swapfile.c  		entry.val = page_private(page);
page             1619 mm/swapfile.c  		mapcount = atomic_read(&page[i]._mapcount) + 1;
page             1628 mm/swapfile.c  	if (PageDoubleMap(page)) {
page             1632 mm/swapfile.c  	mapcount = compound_mapcount(page);
page             1653 mm/swapfile.c  bool reuse_swap_page(struct page *page, int *total_map_swapcount)
page             1657 mm/swapfile.c  	VM_BUG_ON_PAGE(!PageLocked(page), page);
page             1658 mm/swapfile.c  	if (unlikely(PageKsm(page)))
page             1660 mm/swapfile.c  	count = page_trans_huge_map_swapcount(page, &total_mapcount,
page             1664 mm/swapfile.c  	if (count == 1 && PageSwapCache(page) &&
page             1665 mm/swapfile.c  	    (likely(!PageTransCompound(page)) ||
page             1667 mm/swapfile.c  	     total_swapcount == page_swapcount(page))) {
page             1668 mm/swapfile.c  		if (!PageWriteback(page)) {
page             1669 mm/swapfile.c  			page = compound_head(page);
page             1670 mm/swapfile.c  			delete_from_swap_cache(page);
page             1671 mm/swapfile.c  			SetPageDirty(page);
page             1676 mm/swapfile.c  			entry.val = page_private(page);
page             1693 mm/swapfile.c  int try_to_free_swap(struct page *page)
page             1695 mm/swapfile.c  	VM_BUG_ON_PAGE(!PageLocked(page), page);
page             1697 mm/swapfile.c  	if (!PageSwapCache(page))
page             1699 mm/swapfile.c  	if (PageWriteback(page))
page             1701 mm/swapfile.c  	if (page_swapped(page))
page             1722 mm/swapfile.c  	page = compound_head(page);
page             1723 mm/swapfile.c  	delete_from_swap_cache(page);
page             1724 mm/swapfile.c  	SetPageDirty(page);
page             1854 mm/swapfile.c  		unsigned long addr, swp_entry_t entry, struct page *page)
page             1856 mm/swapfile.c  	struct page *swapcache;
page             1862 mm/swapfile.c  	swapcache = page;
page             1863 mm/swapfile.c  	page = ksm_might_need_to_copy(page, vma, addr);
page             1864 mm/swapfile.c  	if (unlikely(!page))
page             1867 mm/swapfile.c  	if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL,
page             1875 mm/swapfile.c  		mem_cgroup_cancel_charge(page, memcg, false);
page             1882 mm/swapfile.c  	get_page(page);
page             1884 mm/swapfile.c  		   pte_mkold(mk_pte(page, vma->vm_page_prot)));
page             1885 mm/swapfile.c  	if (page == swapcache) {
page             1886 mm/swapfile.c  		page_add_anon_rmap(page, vma, addr, false);
page             1887 mm/swapfile.c  		mem_cgroup_commit_charge(page, memcg, true, false);
page             1889 mm/swapfile.c  		page_add_new_anon_rmap(page, vma, addr, false);
page             1890 mm/swapfile.c  		mem_cgroup_commit_charge(page, memcg, false, false);
page             1891 mm/swapfile.c  		lru_cache_add_active_or_unevictable(page, vma);
page             1898 mm/swapfile.c  	activate_page(page);
page             1902 mm/swapfile.c  	if (page != swapcache) {
page             1903 mm/swapfile.c  		unlock_page(page);
page             1904 mm/swapfile.c  		put_page(page);
page             1914 mm/swapfile.c  	struct page *page;
page             1943 mm/swapfile.c  		page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, &vmf);
page             1944 mm/swapfile.c  		if (!page) {
page             1950 mm/swapfile.c  		lock_page(page);
page             1951 mm/swapfile.c  		wait_on_page_writeback(page);
page             1952 mm/swapfile.c  		ret = unuse_pte(vma, pmd, addr, entry, page);
page             1954 mm/swapfile.c  			unlock_page(page);
page             1955 mm/swapfile.c  			put_page(page);
page             1959 mm/swapfile.c  		try_to_free_swap(page);
page             1960 mm/swapfile.c  		unlock_page(page);
page             1961 mm/swapfile.c  		put_page(page);
page             2131 mm/swapfile.c  	struct page *page;
page             2185 mm/swapfile.c  		page = find_get_page(swap_address_space(entry), i);
page             2186 mm/swapfile.c  		if (!page)
page             2195 mm/swapfile.c  		lock_page(page);
page             2196 mm/swapfile.c  		wait_on_page_writeback(page);
page             2197 mm/swapfile.c  		try_to_free_swap(page);
page             2198 mm/swapfile.c  		unlock_page(page);
page             2199 mm/swapfile.c  		put_page(page);
page             2274 mm/swapfile.c  sector_t map_swap_page(struct page *page, struct block_device **bdev)
page             2277 mm/swapfile.c  	entry.val = page_private(page);
page             3113 mm/swapfile.c  	struct page *page = NULL;
page             3166 mm/swapfile.c  	page = read_mapping_page(mapping, 0, swap_file);
page             3167 mm/swapfile.c  	if (IS_ERR(page)) {
page             3168 mm/swapfile.c  		error = PTR_ERR(page);
page             3171 mm/swapfile.c  	swap_header = kmap(page);
page             3336 mm/swapfile.c  	if (page && !IS_ERR(page)) {
page             3337 mm/swapfile.c  		kunmap(page);
page             3338 mm/swapfile.c  		put_page(page);
page             3484 mm/swapfile.c  struct swap_info_struct *page_swap_info(struct page *page)
page             3486 mm/swapfile.c  	swp_entry_t entry = { .val = page_private(page) };
page             3493 mm/swapfile.c  struct address_space *__page_file_mapping(struct page *page)
page             3495 mm/swapfile.c  	return page_swap_info(page)->swap_file->f_mapping;
page             3499 mm/swapfile.c  pgoff_t __page_file_index(struct page *page)
page             3501 mm/swapfile.c  	swp_entry_t swap = { .val = page_private(page) };
page             3525 mm/swapfile.c  	struct page *head;
page             3526 mm/swapfile.c  	struct page *page;
page             3527 mm/swapfile.c  	struct page *list_page;
page             3536 mm/swapfile.c  	page = alloc_page(gfp_mask | __GFP_HIGHMEM);
page             3563 mm/swapfile.c  	if (!page) {
page             3610 mm/swapfile.c  	list_add_tail(&page->lru, &head->lru);
page             3611 mm/swapfile.c  	page = NULL;			/* now it's attached, don't free it */
page             3619 mm/swapfile.c  	if (page)
page             3620 mm/swapfile.c  		__free_page(page);
page             3636 mm/swapfile.c  	struct page *head;
page             3637 mm/swapfile.c  	struct page *page;
page             3649 mm/swapfile.c  	page = list_entry(head->lru.next, struct page, lru);
page             3650 mm/swapfile.c  	map = kmap_atomic(page) + offset;
page             3661 mm/swapfile.c  			page = list_entry(page->lru.next, struct page, lru);
page             3662 mm/swapfile.c  			BUG_ON(page == head);
page             3663 mm/swapfile.c  			map = kmap_atomic(page) + offset;
page             3667 mm/swapfile.c  			page = list_entry(page->lru.next, struct page, lru);
page             3668 mm/swapfile.c  			if (page == head) {
page             3672 mm/swapfile.c  			map = kmap_atomic(page) + offset;
page             3677 mm/swapfile.c  		page = list_entry(page->lru.prev, struct page, lru);
page             3678 mm/swapfile.c  		while (page != head) {
page             3679 mm/swapfile.c  			map = kmap_atomic(page) + offset;
page             3682 mm/swapfile.c  			page = list_entry(page->lru.prev, struct page, lru);
page             3693 mm/swapfile.c  			page = list_entry(page->lru.next, struct page, lru);
page             3694 mm/swapfile.c  			BUG_ON(page == head);
page             3695 mm/swapfile.c  			map = kmap_atomic(page) + offset;
page             3702 mm/swapfile.c  		page = list_entry(page->lru.prev, struct page, lru);
page             3703 mm/swapfile.c  		while (page != head) {
page             3704 mm/swapfile.c  			map = kmap_atomic(page) + offset;
page             3708 mm/swapfile.c  			page = list_entry(page->lru.prev, struct page, lru);
page             3726 mm/swapfile.c  		struct page *head;
page             3729 mm/swapfile.c  			struct page *page, *next;
page             3731 mm/swapfile.c  			list_for_each_entry_safe(page, next, &head->lru, lru) {
page             3732 mm/swapfile.c  				list_del(&page->lru);
page             3733 mm/swapfile.c  				__free_page(page);
page               83 mm/truncate.c  		struct page *page = pvec->pages[i];
page               86 mm/truncate.c  		if (!xa_is_value(page)) {
page               87 mm/truncate.c  			pvec->pages[j++] = page;
page               99 mm/truncate.c  		__clear_shadow_entry(mapping, index, page);
page              152 mm/truncate.c  void do_invalidatepage(struct page *page, unsigned int offset,
page              155 mm/truncate.c  	void (*invalidatepage)(struct page *, unsigned int, unsigned int);
page              157 mm/truncate.c  	invalidatepage = page->mapping->a_ops->invalidatepage;
page              163 mm/truncate.c  		(*invalidatepage)(page, offset, length);
page              177 mm/truncate.c  truncate_cleanup_page(struct address_space *mapping, struct page *page)
page              179 mm/truncate.c  	if (page_mapped(page)) {
page              180 mm/truncate.c  		pgoff_t nr = PageTransHuge(page) ? HPAGE_PMD_NR : 1;
page              181 mm/truncate.c  		unmap_mapping_pages(mapping, page->index, nr, false);
page              184 mm/truncate.c  	if (page_has_private(page))
page              185 mm/truncate.c  		do_invalidatepage(page, 0, PAGE_SIZE);
page              192 mm/truncate.c  	cancel_dirty_page(page);
page              193 mm/truncate.c  	ClearPageMappedToDisk(page);
page              205 mm/truncate.c  invalidate_complete_page(struct address_space *mapping, struct page *page)
page              209 mm/truncate.c  	if (page->mapping != mapping)
page              212 mm/truncate.c  	if (page_has_private(page) && !try_to_release_page(page, 0))
page              215 mm/truncate.c  	ret = remove_mapping(mapping, page);
page              220 mm/truncate.c  int truncate_inode_page(struct address_space *mapping, struct page *page)
page              222 mm/truncate.c  	VM_BUG_ON_PAGE(PageTail(page), page);
page              224 mm/truncate.c  	if (page->mapping != mapping)
page              227 mm/truncate.c  	truncate_cleanup_page(mapping, page);
page              228 mm/truncate.c  	delete_from_page_cache(page);
page              235 mm/truncate.c  int generic_error_remove_page(struct address_space *mapping, struct page *page)
page              245 mm/truncate.c  	return truncate_inode_page(mapping, page);
page              255 mm/truncate.c  int invalidate_inode_page(struct page *page)
page              257 mm/truncate.c  	struct address_space *mapping = page_mapping(page);
page              260 mm/truncate.c  	if (PageDirty(page) || PageWriteback(page))
page              262 mm/truncate.c  	if (page_mapped(page))
page              264 mm/truncate.c  	return invalidate_complete_page(mapping, page);
page              341 mm/truncate.c  			struct page *page = pvec.pages[i];
page              348 mm/truncate.c  			if (xa_is_value(page))
page              351 mm/truncate.c  			if (!trylock_page(page))
page              353 mm/truncate.c  			WARN_ON(page_to_index(page) != index);
page              354 mm/truncate.c  			if (PageWriteback(page)) {
page              355 mm/truncate.c  				unlock_page(page);
page              358 mm/truncate.c  			if (page->mapping != mapping) {
page              359 mm/truncate.c  				unlock_page(page);
page              362 mm/truncate.c  			pagevec_add(&locked_pvec, page);
page              375 mm/truncate.c  		struct page *page = find_lock_page(mapping, start - 1);
page              376 mm/truncate.c  		if (page) {
page              383 mm/truncate.c  			wait_on_page_writeback(page);
page              384 mm/truncate.c  			zero_user_segment(page, partial_start, top);
page              385 mm/truncate.c  			cleancache_invalidate_page(mapping, page);
page              386 mm/truncate.c  			if (page_has_private(page))
page              387 mm/truncate.c  				do_invalidatepage(page, partial_start,
page              389 mm/truncate.c  			unlock_page(page);
page              390 mm/truncate.c  			put_page(page);
page              394 mm/truncate.c  		struct page *page = find_lock_page(mapping, end);
page              395 mm/truncate.c  		if (page) {
page              396 mm/truncate.c  			wait_on_page_writeback(page);
page              397 mm/truncate.c  			zero_user_segment(page, 0, partial_end);
page              398 mm/truncate.c  			cleancache_invalidate_page(mapping, page);
page              399 mm/truncate.c  			if (page_has_private(page))
page              400 mm/truncate.c  				do_invalidatepage(page, 0,
page              402 mm/truncate.c  			unlock_page(page);
page              403 mm/truncate.c  			put_page(page);
page              433 mm/truncate.c  			struct page *page = pvec.pages[i];
page              443 mm/truncate.c  			if (xa_is_value(page))
page              446 mm/truncate.c  			lock_page(page);
page              447 mm/truncate.c  			WARN_ON(page_to_index(page) != index);
page              448 mm/truncate.c  			wait_on_page_writeback(page);
page              449 mm/truncate.c  			truncate_inode_page(mapping, page);
page              450 mm/truncate.c  			unlock_page(page);
page              561 mm/truncate.c  			struct page *page = pvec.pages[i];
page              568 mm/truncate.c  			if (xa_is_value(page)) {
page              570 mm/truncate.c  							     page);
page              574 mm/truncate.c  			if (!trylock_page(page))
page              577 mm/truncate.c  			WARN_ON(page_to_index(page) != index);
page              580 mm/truncate.c  			if (PageTransTail(page)) {
page              581 mm/truncate.c  				unlock_page(page);
page              583 mm/truncate.c  			} else if (PageTransHuge(page)) {
page              592 mm/truncate.c  					unlock_page(page);
page              597 mm/truncate.c  				get_page(page);
page              607 mm/truncate.c  			ret = invalidate_inode_page(page);
page              608 mm/truncate.c  			unlock_page(page);
page              614 mm/truncate.c  				deactivate_file_page(page);
page              615 mm/truncate.c  			if (PageTransHuge(page))
page              616 mm/truncate.c  				put_page(page);
page              636 mm/truncate.c  invalidate_complete_page2(struct address_space *mapping, struct page *page)
page              640 mm/truncate.c  	if (page->mapping != mapping)
page              643 mm/truncate.c  	if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
page              647 mm/truncate.c  	if (PageDirty(page))
page              650 mm/truncate.c  	BUG_ON(page_has_private(page));
page              651 mm/truncate.c  	__delete_from_page_cache(page, NULL);
page              655 mm/truncate.c  		mapping->a_ops->freepage(page);
page              657 mm/truncate.c  	put_page(page);	/* pagecache ref */
page              664 mm/truncate.c  static int do_launder_page(struct address_space *mapping, struct page *page)
page              666 mm/truncate.c  	if (!PageDirty(page))
page              668 mm/truncate.c  	if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
page              670 mm/truncate.c  	return mapping->a_ops->launder_page(page);
page              704 mm/truncate.c  			struct page *page = pvec.pages[i];
page              711 mm/truncate.c  			if (xa_is_value(page)) {
page              713 mm/truncate.c  								   index, page))
page              718 mm/truncate.c  			lock_page(page);
page              719 mm/truncate.c  			WARN_ON(page_to_index(page) != index);
page              720 mm/truncate.c  			if (page->mapping != mapping) {
page              721 mm/truncate.c  				unlock_page(page);
page              724 mm/truncate.c  			wait_on_page_writeback(page);
page              725 mm/truncate.c  			if (page_mapped(page)) {
page              741 mm/truncate.c  			BUG_ON(page_mapped(page));
page              742 mm/truncate.c  			ret2 = do_launder_page(mapping, page);
page              744 mm/truncate.c  				if (!invalidate_complete_page2(mapping, page))
page              749 mm/truncate.c  			unlock_page(page);
page              869 mm/truncate.c  	struct page *page;
page              882 mm/truncate.c  	page = find_lock_page(inode->i_mapping, index);
page              884 mm/truncate.c  	if (!page)
page              890 mm/truncate.c  	if (page_mkclean(page))
page              891 mm/truncate.c  		set_page_dirty(page);
page              892 mm/truncate.c  	unlock_page(page);
page              893 mm/truncate.c  	put_page(page);
page              161 mm/usercopy.c  				   struct page *page, bool to_user)
page              165 mm/usercopy.c  	struct page *endpage;
page              198 mm/usercopy.c  	if (likely(endpage == page))
page              206 mm/usercopy.c  	is_reserved = PageReserved(page);
page              207 mm/usercopy.c  	is_cma = is_migrate_cma_page(page);
page              212 mm/usercopy.c  		page = virt_to_head_page(ptr);
page              213 mm/usercopy.c  		if (is_reserved && !PageReserved(page))
page              216 mm/usercopy.c  		if (is_cma && !is_migrate_cma_page(page))
page              226 mm/usercopy.c  	struct page *page;
page              236 mm/usercopy.c  	page = compound_head(kmap_to_page((void *)ptr));
page              238 mm/usercopy.c  	if (PageSlab(page)) {
page              240 mm/usercopy.c  		__check_heap_object(ptr, n, page, to_user);
page              243 mm/usercopy.c  		check_page_span(ptr, n, page, to_user);
page               26 mm/userfaultfd.c 			    struct page **pagep)
page               33 mm/userfaultfd.c 	struct page *page;
page               39 mm/userfaultfd.c 		page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr);
page               40 mm/userfaultfd.c 		if (!page)
page               43 mm/userfaultfd.c 		page_kaddr = kmap_atomic(page);
page               52 mm/userfaultfd.c 			*pagep = page;
page               57 mm/userfaultfd.c 		page = *pagep;
page               66 mm/userfaultfd.c 	__SetPageUptodate(page);
page               69 mm/userfaultfd.c 	if (mem_cgroup_try_charge(page, dst_mm, GFP_KERNEL, &memcg, false))
page               72 mm/userfaultfd.c 	_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
page               91 mm/userfaultfd.c 	page_add_new_anon_rmap(page, dst_vma, dst_addr, false);
page               92 mm/userfaultfd.c 	mem_cgroup_commit_charge(page, memcg, false, false);
page               93 mm/userfaultfd.c 	lru_cache_add_active_or_unevictable(page, dst_vma);
page              106 mm/userfaultfd.c 	mem_cgroup_cancel_charge(page, memcg, false);
page              108 mm/userfaultfd.c 	put_page(page);
page              186 mm/userfaultfd.c 	struct page *page;
page              207 mm/userfaultfd.c 	page = NULL;
page              290 mm/userfaultfd.c 						dst_addr, src_addr, &page);
page              299 mm/userfaultfd.c 			BUG_ON(!page);
page              301 mm/userfaultfd.c 			err = copy_huge_page_from_user(page,
page              313 mm/userfaultfd.c 			BUG_ON(page);
page              330 mm/userfaultfd.c 	if (page) {
page              373 mm/userfaultfd.c 			SetPagePrivate(page);
page              375 mm/userfaultfd.c 			ClearPagePrivate(page);
page              376 mm/userfaultfd.c 		put_page(page);
page              398 mm/userfaultfd.c 						struct page **page,
page              416 mm/userfaultfd.c 					       dst_addr, src_addr, page);
page              424 mm/userfaultfd.c 						     src_addr, page);
page              445 mm/userfaultfd.c 	struct page *page;
page              460 mm/userfaultfd.c 	page = NULL;
page              557 mm/userfaultfd.c 				       src_addr, &page, zeropage);
page              564 mm/userfaultfd.c 			BUG_ON(!page);
page              566 mm/userfaultfd.c 			page_kaddr = kmap(page);
page              570 mm/userfaultfd.c 			kunmap(page);
page              577 mm/userfaultfd.c 			BUG_ON(page);
page              594 mm/userfaultfd.c 	if (page)
page              595 mm/userfaultfd.c 		put_page(page);
page              615 mm/util.c      static inline void *__page_rmapping(struct page *page)
page              619 mm/util.c      	mapping = (unsigned long)page->mapping;
page              626 mm/util.c      void *page_rmapping(struct page *page)
page              628 mm/util.c      	page = compound_head(page);
page              629 mm/util.c      	return __page_rmapping(page);
page              636 mm/util.c      bool page_mapped(struct page *page)
page              640 mm/util.c      	if (likely(!PageCompound(page)))
page              641 mm/util.c      		return atomic_read(&page->_mapcount) >= 0;
page              642 mm/util.c      	page = compound_head(page);
page              643 mm/util.c      	if (atomic_read(compound_mapcount_ptr(page)) >= 0)
page              645 mm/util.c      	if (PageHuge(page))
page              647 mm/util.c      	for (i = 0; i < compound_nr(page); i++) {
page              648 mm/util.c      		if (atomic_read(&page[i]._mapcount) >= 0)
page              655 mm/util.c      struct anon_vma *page_anon_vma(struct page *page)
page              659 mm/util.c      	page = compound_head(page);
page              660 mm/util.c      	mapping = (unsigned long)page->mapping;
page              663 mm/util.c      	return __page_rmapping(page);
page              666 mm/util.c      struct address_space *page_mapping(struct page *page)
page              670 mm/util.c      	page = compound_head(page);
page              673 mm/util.c      	if (unlikely(PageSlab(page)))
page              676 mm/util.c      	if (unlikely(PageSwapCache(page))) {
page              679 mm/util.c      		entry.val = page_private(page);
page              683 mm/util.c      	mapping = page->mapping;
page              694 mm/util.c      struct address_space *page_mapping_file(struct page *page)
page              696 mm/util.c      	if (unlikely(PageSwapCache(page)))
page              698 mm/util.c      	return page_mapping(page);
page              702 mm/util.c      int __page_mapcount(struct page *page)
page              706 mm/util.c      	ret = atomic_read(&page->_mapcount) + 1;
page              711 mm/util.c      	if (!PageAnon(page) && !PageHuge(page))
page              713 mm/util.c      	page = compound_head(page);
page              714 mm/util.c      	ret += atomic_read(compound_mapcount_ptr(page)) + 1;
page              715 mm/util.c      	if (PageDoubleMap(page))
page              910 mm/util.c      int memcmp_pages(struct page *page1, struct page *page2)
page              139 mm/vmalloc.c   		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
page              152 mm/vmalloc.c   		struct page *page = pages[*nr];
page              156 mm/vmalloc.c   		if (WARN_ON(!page))
page              158 mm/vmalloc.c   		set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
page              165 mm/vmalloc.c   		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
page              182 mm/vmalloc.c   		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
page              199 mm/vmalloc.c   		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
page              222 mm/vmalloc.c   				   pgprot_t prot, struct page **pages)
page              243 mm/vmalloc.c   			   pgprot_t prot, struct page **pages)
page              270 mm/vmalloc.c   struct page *vmalloc_to_page(const void *vmalloc_addr)
page              273 mm/vmalloc.c   	struct page *page = NULL;
page              312 mm/vmalloc.c   		page = pte_page(pte);
page              314 mm/vmalloc.c   	return page;
page             1778 mm/vmalloc.c   void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
page             1963 mm/vmalloc.c   			     pgprot_t prot, struct page **pages)
page             2006 mm/vmalloc.c   int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages)
page             2172 mm/vmalloc.c   				       int (*set_direct_map)(struct page *page))
page             2255 mm/vmalloc.c   			struct page *page = area->pages[i];
page             2257 mm/vmalloc.c   			BUG_ON(!page);
page             2258 mm/vmalloc.c   			__free_pages(page, 0);
page             2370 mm/vmalloc.c   void *vmap(struct page **pages, unsigned int count,
page             2401 mm/vmalloc.c   	struct page **pages;
page             2410 mm/vmalloc.c   	array_size = (nr_pages * sizeof(struct page *));
page             2430 mm/vmalloc.c   		struct page *page;
page             2433 mm/vmalloc.c   			page = alloc_page(alloc_mask|highmem_mask);
page             2435 mm/vmalloc.c   			page = alloc_pages_node(node, alloc_mask|highmem_mask, 0);
page             2437 mm/vmalloc.c   		if (unlikely(!page)) {
page             2443 mm/vmalloc.c   		area->pages[i] = page;
page             2748 mm/vmalloc.c   	struct page *p;
page             2787 mm/vmalloc.c   	struct page *p;
page             3021 mm/vmalloc.c   		struct page *page = vmalloc_to_page(kaddr);
page             3024 mm/vmalloc.c   		ret = vm_insert_page(vma, uaddr, page);
page              140 mm/vmscan.c    			struct page *prev;				\
page              154 mm/vmscan.c    			struct page *prev;				\
page              766 mm/vmscan.c    static inline int is_page_cache_freeable(struct page *page)
page              773 mm/vmscan.c    	int page_cache_pins = PageTransHuge(page) && PageSwapCache(page) ?
page              775 mm/vmscan.c    	return page_count(page) - page_has_private(page) == 1 + page_cache_pins;
page              802 mm/vmscan.c    				struct page *page, int error)
page              804 mm/vmscan.c    	lock_page(page);
page              805 mm/vmscan.c    	if (page_mapping(page) == mapping)
page              807 mm/vmscan.c    	unlock_page(page);
page              826 mm/vmscan.c    static pageout_t pageout(struct page *page, struct address_space *mapping,
page              845 mm/vmscan.c    	if (!is_page_cache_freeable(page))
page              852 mm/vmscan.c    		if (page_has_private(page)) {
page              853 mm/vmscan.c    			if (try_to_free_buffers(page)) {
page              854 mm/vmscan.c    				ClearPageDirty(page);
page              866 mm/vmscan.c    	if (clear_page_dirty_for_io(page)) {
page              876 mm/vmscan.c    		SetPageReclaim(page);
page              877 mm/vmscan.c    		res = mapping->a_ops->writepage(page, &wbc);
page              879 mm/vmscan.c    			handle_write_error(mapping, page, res);
page              881 mm/vmscan.c    			ClearPageReclaim(page);
page              885 mm/vmscan.c    		if (!PageWriteback(page)) {
page              887 mm/vmscan.c    			ClearPageReclaim(page);
page              889 mm/vmscan.c    		trace_mm_vmscan_writepage(page);
page              890 mm/vmscan.c    		inc_node_page_state(page, NR_VMSCAN_WRITE);
page              901 mm/vmscan.c    static int __remove_mapping(struct address_space *mapping, struct page *page,
page              907 mm/vmscan.c    	BUG_ON(!PageLocked(page));
page              908 mm/vmscan.c    	BUG_ON(mapping != page_mapping(page));
page              936 mm/vmscan.c    	refcount = 1 + compound_nr(page);
page              937 mm/vmscan.c    	if (!page_ref_freeze(page, refcount))
page              940 mm/vmscan.c    	if (unlikely(PageDirty(page))) {
page              941 mm/vmscan.c    		page_ref_unfreeze(page, refcount);
page              945 mm/vmscan.c    	if (PageSwapCache(page)) {
page              946 mm/vmscan.c    		swp_entry_t swap = { .val = page_private(page) };
page              947 mm/vmscan.c    		mem_cgroup_swapout(page, swap);
page              948 mm/vmscan.c    		__delete_from_swap_cache(page, swap);
page              950 mm/vmscan.c    		put_swap_page(page, swap);
page              952 mm/vmscan.c    		void (*freepage)(struct page *);
page              972 mm/vmscan.c    		if (reclaimed && page_is_file_cache(page) &&
page              974 mm/vmscan.c    			shadow = workingset_eviction(page);
page              975 mm/vmscan.c    		__delete_from_page_cache(page, shadow);
page              979 mm/vmscan.c    			freepage(page);
page              995 mm/vmscan.c    int remove_mapping(struct address_space *mapping, struct page *page)
page              997 mm/vmscan.c    	if (__remove_mapping(mapping, page, false)) {
page             1003 mm/vmscan.c    		page_ref_unfreeze(page, 1);
page             1018 mm/vmscan.c    void putback_lru_page(struct page *page)
page             1020 mm/vmscan.c    	lru_cache_add(page);
page             1021 mm/vmscan.c    	put_page(page);		/* drop ref from isolate */
page             1031 mm/vmscan.c    static enum page_references page_check_references(struct page *page,
page             1037 mm/vmscan.c    	referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
page             1039 mm/vmscan.c    	referenced_page = TestClearPageReferenced(page);
page             1049 mm/vmscan.c    		if (PageSwapBacked(page))
page             1065 mm/vmscan.c    		SetPageReferenced(page);
page             1080 mm/vmscan.c    	if (referenced_page && !PageSwapBacked(page))
page             1087 mm/vmscan.c    static void page_check_dirty_writeback(struct page *page,
page             1096 mm/vmscan.c    	if (!page_is_file_cache(page) ||
page             1097 mm/vmscan.c    	    (PageAnon(page) && !PageSwapBacked(page))) {
page             1104 mm/vmscan.c    	*dirty = PageDirty(page);
page             1105 mm/vmscan.c    	*writeback = PageWriteback(page);
page             1108 mm/vmscan.c    	if (!page_has_private(page))
page             1111 mm/vmscan.c    	mapping = page_mapping(page);
page             1113 mm/vmscan.c    		mapping->a_ops->is_dirty_writeback(page, dirty, writeback);
page             1136 mm/vmscan.c    		struct page *page;
page             1144 mm/vmscan.c    		page = lru_to_page(page_list);
page             1145 mm/vmscan.c    		list_del(&page->lru);
page             1147 mm/vmscan.c    		if (!trylock_page(page))
page             1150 mm/vmscan.c    		VM_BUG_ON_PAGE(PageActive(page), page);
page             1152 mm/vmscan.c    		nr_pages = compound_nr(page);
page             1157 mm/vmscan.c    		if (unlikely(!page_evictable(page)))
page             1160 mm/vmscan.c    		if (!sc->may_unmap && page_mapped(page))
page             1164 mm/vmscan.c    			(PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
page             1172 mm/vmscan.c    		page_check_dirty_writeback(page, &dirty, &writeback);
page             1185 mm/vmscan.c    		mapping = page_mapping(page);
page             1188 mm/vmscan.c    		    (writeback && PageReclaim(page)))
page             1233 mm/vmscan.c    		if (PageWriteback(page)) {
page             1236 mm/vmscan.c    			    PageReclaim(page) &&
page             1243 mm/vmscan.c    			    !PageReclaim(page) || !may_enter_fs) {
page             1255 mm/vmscan.c    				SetPageReclaim(page);
page             1261 mm/vmscan.c    				unlock_page(page);
page             1262 mm/vmscan.c    				wait_on_page_writeback(page);
page             1264 mm/vmscan.c    				list_add_tail(&page->lru, page_list);
page             1270 mm/vmscan.c    			references = page_check_references(page, sc);
page             1288 mm/vmscan.c    		if (PageAnon(page) && PageSwapBacked(page)) {
page             1289 mm/vmscan.c    			if (!PageSwapCache(page)) {
page             1292 mm/vmscan.c    				if (PageTransHuge(page)) {
page             1294 mm/vmscan.c    					if (!can_split_huge_page(page, NULL))
page             1301 mm/vmscan.c    					if (!compound_mapcount(page) &&
page             1302 mm/vmscan.c    					    split_huge_page_to_list(page,
page             1306 mm/vmscan.c    				if (!add_to_swap(page)) {
page             1307 mm/vmscan.c    					if (!PageTransHuge(page))
page             1310 mm/vmscan.c    					if (split_huge_page_to_list(page,
page             1316 mm/vmscan.c    					if (!add_to_swap(page))
page             1323 mm/vmscan.c    				mapping = page_mapping(page);
page             1325 mm/vmscan.c    		} else if (unlikely(PageTransHuge(page))) {
page             1327 mm/vmscan.c    			if (split_huge_page_to_list(page, page_list))
page             1338 mm/vmscan.c    		if ((nr_pages > 1) && !PageTransHuge(page)) {
page             1347 mm/vmscan.c    		if (page_mapped(page)) {
page             1350 mm/vmscan.c    			if (unlikely(PageTransHuge(page)))
page             1352 mm/vmscan.c    			if (!try_to_unmap(page, flags)) {
page             1358 mm/vmscan.c    		if (PageDirty(page)) {
page             1369 mm/vmscan.c    			if (page_is_file_cache(page) &&
page             1370 mm/vmscan.c    			    (!current_is_kswapd() || !PageReclaim(page) ||
page             1378 mm/vmscan.c    				inc_node_page_state(page, NR_VMSCAN_IMMEDIATE);
page             1379 mm/vmscan.c    				SetPageReclaim(page);
page             1397 mm/vmscan.c    			switch (pageout(page, mapping, sc)) {
page             1403 mm/vmscan.c    				if (PageWriteback(page))
page             1405 mm/vmscan.c    				if (PageDirty(page))
page             1412 mm/vmscan.c    				if (!trylock_page(page))
page             1414 mm/vmscan.c    				if (PageDirty(page) || PageWriteback(page))
page             1416 mm/vmscan.c    				mapping = page_mapping(page);
page             1443 mm/vmscan.c    		if (page_has_private(page)) {
page             1444 mm/vmscan.c    			if (!try_to_release_page(page, sc->gfp_mask))
page             1446 mm/vmscan.c    			if (!mapping && page_count(page) == 1) {
page             1447 mm/vmscan.c    				unlock_page(page);
page             1448 mm/vmscan.c    				if (put_page_testzero(page))
page             1464 mm/vmscan.c    		if (PageAnon(page) && !PageSwapBacked(page)) {
page             1466 mm/vmscan.c    			if (!page_ref_freeze(page, 1))
page             1468 mm/vmscan.c    			if (PageDirty(page)) {
page             1469 mm/vmscan.c    				page_ref_unfreeze(page, 1);
page             1474 mm/vmscan.c    			count_memcg_page_event(page, PGLAZYFREED);
page             1475 mm/vmscan.c    		} else if (!mapping || !__remove_mapping(mapping, page, true))
page             1478 mm/vmscan.c    		unlock_page(page);
page             1490 mm/vmscan.c    		if (unlikely(PageTransHuge(page)))
page             1491 mm/vmscan.c    			(*get_compound_page_dtor(page))(page);
page             1493 mm/vmscan.c    			list_add(&page->lru, &free_pages);
page             1507 mm/vmscan.c    		if (PageSwapCache(page) && (mem_cgroup_swap_full(page) ||
page             1508 mm/vmscan.c    						PageMlocked(page)))
page             1509 mm/vmscan.c    			try_to_free_swap(page);
page             1510 mm/vmscan.c    		VM_BUG_ON_PAGE(PageActive(page), page);
page             1511 mm/vmscan.c    		if (!PageMlocked(page)) {
page             1512 mm/vmscan.c    			int type = page_is_file_cache(page);
page             1513 mm/vmscan.c    			SetPageActive(page);
page             1515 mm/vmscan.c    			count_memcg_page_event(page, PGACTIVATE);
page             1518 mm/vmscan.c    		unlock_page(page);
page             1520 mm/vmscan.c    		list_add(&page->lru, &ret_pages);
page             1521 mm/vmscan.c    		VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
page             1546 mm/vmscan.c    	struct page *page, *next;
page             1549 mm/vmscan.c    	list_for_each_entry_safe(page, next, page_list, lru) {
page             1550 mm/vmscan.c    		if (page_is_file_cache(page) && !PageDirty(page) &&
page             1551 mm/vmscan.c    		    !__PageMovable(page) && !PageUnevictable(page)) {
page             1552 mm/vmscan.c    			ClearPageActive(page);
page             1553 mm/vmscan.c    			list_move(&page->lru, &clean_pages);
page             1574 mm/vmscan.c    int __isolate_lru_page(struct page *page, isolate_mode_t mode)
page             1579 mm/vmscan.c    	if (!PageLRU(page))
page             1583 mm/vmscan.c    	if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
page             1598 mm/vmscan.c    		if (PageWriteback(page))
page             1601 mm/vmscan.c    		if (PageDirty(page)) {
page             1614 mm/vmscan.c    			if (!trylock_page(page))
page             1617 mm/vmscan.c    			mapping = page_mapping(page);
page             1619 mm/vmscan.c    			unlock_page(page);
page             1625 mm/vmscan.c    	if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
page             1628 mm/vmscan.c    	if (likely(get_page_unless_zero(page))) {
page             1634 mm/vmscan.c    		ClearPageLRU(page);
page             1700 mm/vmscan.c    		struct page *page;
page             1702 mm/vmscan.c    		page = lru_to_page(src);
page             1703 mm/vmscan.c    		prefetchw_prev_lru_page(page, src, flags);
page             1705 mm/vmscan.c    		VM_BUG_ON_PAGE(!PageLRU(page), page);
page             1707 mm/vmscan.c    		nr_pages = compound_nr(page);
page             1710 mm/vmscan.c    		if (page_zonenum(page) > sc->reclaim_idx) {
page             1711 mm/vmscan.c    			list_move(&page->lru, &pages_skipped);
page             1712 mm/vmscan.c    			nr_skipped[page_zonenum(page)] += nr_pages;
page             1727 mm/vmscan.c    		switch (__isolate_lru_page(page, mode)) {
page             1730 mm/vmscan.c    			nr_zone_taken[page_zonenum(page)] += nr_pages;
page             1731 mm/vmscan.c    			list_move(&page->lru, dst);
page             1736 mm/vmscan.c    			list_move(&page->lru, src);
page             1796 mm/vmscan.c    int isolate_lru_page(struct page *page)
page             1800 mm/vmscan.c    	VM_BUG_ON_PAGE(!page_count(page), page);
page             1801 mm/vmscan.c    	WARN_RATELIMIT(PageTail(page), "trying to isolate tail page");
page             1803 mm/vmscan.c    	if (PageLRU(page)) {
page             1804 mm/vmscan.c    		pg_data_t *pgdat = page_pgdat(page);
page             1808 mm/vmscan.c    		lruvec = mem_cgroup_page_lruvec(page, pgdat);
page             1809 mm/vmscan.c    		if (PageLRU(page)) {
page             1810 mm/vmscan.c    			int lru = page_lru(page);
page             1811 mm/vmscan.c    			get_page(page);
page             1812 mm/vmscan.c    			ClearPageLRU(page);
page             1813 mm/vmscan.c    			del_page_from_lru_list(page, lruvec, lru);
page             1884 mm/vmscan.c    	struct page *page;
page             1888 mm/vmscan.c    		page = lru_to_page(list);
page             1889 mm/vmscan.c    		VM_BUG_ON_PAGE(PageLRU(page), page);
page             1890 mm/vmscan.c    		if (unlikely(!page_evictable(page))) {
page             1891 mm/vmscan.c    			list_del(&page->lru);
page             1893 mm/vmscan.c    			putback_lru_page(page);
page             1897 mm/vmscan.c    		lruvec = mem_cgroup_page_lruvec(page, pgdat);
page             1899 mm/vmscan.c    		SetPageLRU(page);
page             1900 mm/vmscan.c    		lru = page_lru(page);
page             1902 mm/vmscan.c    		nr_pages = hpage_nr_pages(page);
page             1903 mm/vmscan.c    		update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
page             1904 mm/vmscan.c    		list_move(&page->lru, &lruvec->lists[lru]);
page             1906 mm/vmscan.c    		if (put_page_testzero(page)) {
page             1907 mm/vmscan.c    			__ClearPageLRU(page);
page             1908 mm/vmscan.c    			__ClearPageActive(page);
page             1909 mm/vmscan.c    			del_page_from_lru_list(page, lruvec, lru);
page             1911 mm/vmscan.c    			if (unlikely(PageCompound(page))) {
page             1913 mm/vmscan.c    				(*get_compound_page_dtor(page))(page);
page             1916 mm/vmscan.c    				list_add(&page->lru, &pages_to_free);
page             2054 mm/vmscan.c    	struct page *page;
page             2078 mm/vmscan.c    		page = lru_to_page(&l_hold);
page             2079 mm/vmscan.c    		list_del(&page->lru);
page             2081 mm/vmscan.c    		if (unlikely(!page_evictable(page))) {
page             2082 mm/vmscan.c    			putback_lru_page(page);
page             2087 mm/vmscan.c    			if (page_has_private(page) && trylock_page(page)) {
page             2088 mm/vmscan.c    				if (page_has_private(page))
page             2089 mm/vmscan.c    					try_to_release_page(page, 0);
page             2090 mm/vmscan.c    				unlock_page(page);
page             2094 mm/vmscan.c    		if (page_referenced(page, 0, sc->target_mem_cgroup,
page             2096 mm/vmscan.c    			nr_rotated += hpage_nr_pages(page);
page             2106 mm/vmscan.c    			if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
page             2107 mm/vmscan.c    				list_add(&page->lru, &l_active);
page             2112 mm/vmscan.c    		ClearPageActive(page);	/* we are de-activating */
page             2113 mm/vmscan.c    		SetPageWorkingset(page);
page             2114 mm/vmscan.c    		list_add(&page->lru, &l_inactive);
page             2152 mm/vmscan.c    	struct page *page;
page             2162 mm/vmscan.c    		page = lru_to_page(page_list);
page             2164 mm/vmscan.c    			nid = page_to_nid(page);
page             2168 mm/vmscan.c    		if (nid == page_to_nid(page)) {
page             2169 mm/vmscan.c    			ClearPageActive(page);
page             2170 mm/vmscan.c    			list_move(&page->lru, &node_page_list);
page             2179 mm/vmscan.c    			page = lru_to_page(&node_page_list);
page             2180 mm/vmscan.c    			list_del(&page->lru);
page             2181 mm/vmscan.c    			putback_lru_page(page);
page             2193 mm/vmscan.c    			page = lru_to_page(&node_page_list);
page             2194 mm/vmscan.c    			list_del(&page->lru);
page             2195 mm/vmscan.c    			putback_lru_page(page);
page             4318 mm/vmscan.c    int page_evictable(struct page *page)
page             4324 mm/vmscan.c    	ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
page             4347 mm/vmscan.c    		struct page *page = pvec->pages[i];
page             4348 mm/vmscan.c    		struct pglist_data *pagepgdat = page_pgdat(page);
page             4357 mm/vmscan.c    		lruvec = mem_cgroup_page_lruvec(page, pgdat);
page             4359 mm/vmscan.c    		if (!PageLRU(page) || !PageUnevictable(page))
page             4362 mm/vmscan.c    		if (page_evictable(page)) {
page             4363 mm/vmscan.c    			enum lru_list lru = page_lru_base_type(page);
page             4365 mm/vmscan.c    			VM_BUG_ON_PAGE(PageActive(page), page);
page             4366 mm/vmscan.c    			ClearPageUnevictable(page);
page             4367 mm/vmscan.c    			del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
page             4368 mm/vmscan.c    			add_page_to_lru_list(page, lruvec, lru);
page              411 mm/vmstat.c    void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
page              413 mm/vmstat.c    	__inc_zone_state(page_zone(page), item);
page              417 mm/vmstat.c    void __inc_node_page_state(struct page *page, enum node_stat_item item)
page              419 mm/vmstat.c    	__inc_node_state(page_pgdat(page), item);
page              455 mm/vmstat.c    void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
page              457 mm/vmstat.c    	__dec_zone_state(page_zone(page), item);
page              461 mm/vmstat.c    void __dec_node_page_state(struct page *page, enum node_stat_item item)
page              463 mm/vmstat.c    	__dec_node_state(page_pgdat(page), item);
page              525 mm/vmstat.c    void inc_zone_page_state(struct page *page, enum zone_stat_item item)
page              527 mm/vmstat.c    	mod_zone_state(page_zone(page), item, 1, 1);
page              531 mm/vmstat.c    void dec_zone_page_state(struct page *page, enum zone_stat_item item)
page              533 mm/vmstat.c    	mod_zone_state(page_zone(page), item, -1, -1);
page              587 mm/vmstat.c    void inc_node_page_state(struct page *page, enum node_stat_item item)
page              589 mm/vmstat.c    	mod_node_state(page_pgdat(page), item, 1, 1);
page              593 mm/vmstat.c    void dec_node_page_state(struct page *page, enum node_stat_item item)
page              595 mm/vmstat.c    	mod_node_state(page_pgdat(page), item, -1, -1);
page              613 mm/vmstat.c    void inc_zone_page_state(struct page *page, enum zone_stat_item item)
page              618 mm/vmstat.c    	zone = page_zone(page);
page              625 mm/vmstat.c    void dec_zone_page_state(struct page *page, enum zone_stat_item item)
page              630 mm/vmstat.c    	__dec_zone_page_state(page, item);
page              656 mm/vmstat.c    void inc_node_page_state(struct page *page, enum node_stat_item item)
page              661 mm/vmstat.c    	pgdat = page_pgdat(page);
page              668 mm/vmstat.c    void dec_node_page_state(struct page *page, enum node_stat_item item)
page              673 mm/vmstat.c    	__dec_node_page_state(page, item);
page             1441 mm/vmstat.c    		struct page *page;
page             1443 mm/vmstat.c    		page = pfn_to_online_page(pfn);
page             1444 mm/vmstat.c    		if (!page)
page             1448 mm/vmstat.c    		if (!memmap_valid_within(pfn, page, zone))
page             1451 mm/vmstat.c    		if (page_zone(page) != zone)
page             1454 mm/vmstat.c    		mtype = get_pageblock_migratetype(page);
page              223 mm/workingset.c void *workingset_eviction(struct page *page)
page              225 mm/workingset.c 	struct pglist_data *pgdat = page_pgdat(page);
page              226 mm/workingset.c 	struct mem_cgroup *memcg = page_memcg(page);
page              232 mm/workingset.c 	VM_BUG_ON_PAGE(PageLRU(page), page);
page              233 mm/workingset.c 	VM_BUG_ON_PAGE(page_count(page), page);
page              234 mm/workingset.c 	VM_BUG_ON_PAGE(!PageLocked(page), page);
page              238 mm/workingset.c 	return pack_shadow(memcgid, pgdat, eviction, PageWorkingset(page));
page              249 mm/workingset.c void workingset_refault(struct page *page, void *shadow)
page              315 mm/workingset.c 	SetPageActive(page);
page              321 mm/workingset.c 		SetPageWorkingset(page);
page              332 mm/workingset.c void workingset_activation(struct page *page)
page              345 mm/workingset.c 	memcg = page_memcg_rcu(page);
page              348 mm/workingset.c 	lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg);
page              298 mm/z3fold.c    static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
page              301 mm/z3fold.c    	struct z3fold_header *zhdr = page_address(page);
page              304 mm/z3fold.c    	INIT_LIST_HEAD(&page->lru);
page              305 mm/z3fold.c    	clear_bit(PAGE_HEADLESS, &page->private);
page              306 mm/z3fold.c    	clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
page              307 mm/z3fold.c    	clear_bit(NEEDS_COMPACTING, &page->private);
page              308 mm/z3fold.c    	clear_bit(PAGE_STALE, &page->private);
page              309 mm/z3fold.c    	clear_bit(PAGE_CLAIMED, &page->private);
page              333 mm/z3fold.c    static void free_z3fold_page(struct page *page, bool headless)
page              336 mm/z3fold.c    		lock_page(page);
page              337 mm/z3fold.c    		__ClearPageMovable(page);
page              338 mm/z3fold.c    		unlock_page(page);
page              340 mm/z3fold.c    	ClearPagePrivate(page);
page              341 mm/z3fold.c    	__free_page(page);
page              443 mm/z3fold.c    	struct page *page = virt_to_page(zhdr);
page              447 mm/z3fold.c    	set_bit(PAGE_STALE, &page->private);
page              448 mm/z3fold.c    	clear_bit(NEEDS_COMPACTING, &page->private);
page              450 mm/z3fold.c    	if (!list_empty(&page->lru))
page              451 mm/z3fold.c    		list_del_init(&page->lru);
page              498 mm/z3fold.c    		struct page *page = virt_to_page(zhdr);
page              501 mm/z3fold.c    		if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
page              505 mm/z3fold.c    		free_z3fold_page(page, false);
page              566 mm/z3fold.c    	struct page *page = virt_to_page(zhdr);
page              568 mm/z3fold.c    	if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
page              571 mm/z3fold.c    	if (unlikely(PageIsolated(page)))
page              614 mm/z3fold.c    	struct page *page;
page              616 mm/z3fold.c    	page = virt_to_page(zhdr);
page              621 mm/z3fold.c    	if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
page              634 mm/z3fold.c    	if (unlikely(PageIsolated(page) ||
page              635 mm/z3fold.c    		     test_bit(PAGE_CLAIMED, &page->private) ||
page              636 mm/z3fold.c    		     test_bit(PAGE_STALE, &page->private))) {
page              659 mm/z3fold.c    	struct page *page;
page              692 mm/z3fold.c    		page = virt_to_page(zhdr);
page              693 mm/z3fold.c    		if (test_bit(NEEDS_COMPACTING, &page->private)) {
page              736 mm/z3fold.c    			page = virt_to_page(zhdr);
page              737 mm/z3fold.c    			if (test_bit(NEEDS_COMPACTING, &page->private)) {
page              869 mm/z3fold.c    	struct page *page = NULL;
page              905 mm/z3fold.c    			page = virt_to_page(zhdr);
page              911 mm/z3fold.c    	page = NULL;
page              925 mm/z3fold.c    			page = virt_to_page(zhdr);
page              930 mm/z3fold.c    	if (!page)
page              931 mm/z3fold.c    		page = alloc_page(gfp);
page              933 mm/z3fold.c    	if (!page)
page              936 mm/z3fold.c    	zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp);
page              938 mm/z3fold.c    		__free_page(page);
page              944 mm/z3fold.c    		set_bit(PAGE_HEADLESS, &page->private);
page              948 mm/z3fold.c    		lock_page(page);
page              949 mm/z3fold.c    		__SetPageMovable(page, pool->inode->i_mapping);
page              950 mm/z3fold.c    		unlock_page(page);
page              952 mm/z3fold.c    		if (trylock_page(page)) {
page              953 mm/z3fold.c    			__SetPageMovable(page, pool->inode->i_mapping);
page              954 mm/z3fold.c    			unlock_page(page);
page              973 mm/z3fold.c    	if (!list_empty(&page->lru))
page              974 mm/z3fold.c    		list_del(&page->lru);
page              976 mm/z3fold.c    	list_add(&page->lru, &pool->lru);
page              999 mm/z3fold.c    	struct page *page;
page             1004 mm/z3fold.c    	page = virt_to_page(zhdr);
page             1005 mm/z3fold.c    	page_claimed = test_and_set_bit(PAGE_CLAIMED, &page->private);
page             1007 mm/z3fold.c    	if (test_bit(PAGE_HEADLESS, &page->private)) {
page             1015 mm/z3fold.c    			list_del(&page->lru);
page             1017 mm/z3fold.c    			free_z3fold_page(page, true);
page             1054 mm/z3fold.c    	if (unlikely(PageIsolated(page)) ||
page             1055 mm/z3fold.c    	    test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
page             1057 mm/z3fold.c    		clear_bit(PAGE_CLAIMED, &page->private);
page             1067 mm/z3fold.c    		clear_bit(PAGE_CLAIMED, &page->private);
page             1072 mm/z3fold.c    	clear_bit(PAGE_CLAIMED, &page->private);
page             1116 mm/z3fold.c    	struct page *page = NULL;
page             1132 mm/z3fold.c    			page = list_entry(pos, struct page, lru);
page             1137 mm/z3fold.c    			if (test_and_set_bit(PAGE_CLAIMED, &page->private)) {
page             1138 mm/z3fold.c    				page = NULL;
page             1142 mm/z3fold.c    			if (unlikely(PageIsolated(page))) {
page             1143 mm/z3fold.c    				clear_bit(PAGE_CLAIMED, &page->private);
page             1144 mm/z3fold.c    				page = NULL;
page             1147 mm/z3fold.c    			zhdr = page_address(page);
page             1148 mm/z3fold.c    			if (test_bit(PAGE_HEADLESS, &page->private))
page             1152 mm/z3fold.c    				clear_bit(PAGE_CLAIMED, &page->private);
page             1165 mm/z3fold.c    		list_del_init(&page->lru);
page             1168 mm/z3fold.c    		if (!test_bit(PAGE_HEADLESS, &page->private)) {
page             1214 mm/z3fold.c    		if (test_bit(PAGE_HEADLESS, &page->private)) {
page             1216 mm/z3fold.c    				free_z3fold_page(page, true);
page             1221 mm/z3fold.c    			list_add(&page->lru, &pool->lru);
page             1223 mm/z3fold.c    			clear_bit(PAGE_CLAIMED, &page->private);
page             1237 mm/z3fold.c    			list_add(&page->lru, &pool->lru);
page             1240 mm/z3fold.c    			clear_bit(PAGE_CLAIMED, &page->private);
page             1263 mm/z3fold.c    	struct page *page;
page             1269 mm/z3fold.c    	page = virt_to_page(zhdr);
page             1271 mm/z3fold.c    	if (test_bit(PAGE_HEADLESS, &page->private))
page             1282 mm/z3fold.c    		set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
page             1309 mm/z3fold.c    	struct page *page;
page             1313 mm/z3fold.c    	page = virt_to_page(zhdr);
page             1315 mm/z3fold.c    	if (test_bit(PAGE_HEADLESS, &page->private))
page             1321 mm/z3fold.c    		clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
page             1337 mm/z3fold.c    static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
page             1342 mm/z3fold.c    	VM_BUG_ON_PAGE(!PageMovable(page), page);
page             1343 mm/z3fold.c    	VM_BUG_ON_PAGE(PageIsolated(page), page);
page             1345 mm/z3fold.c    	if (test_bit(PAGE_HEADLESS, &page->private) ||
page             1346 mm/z3fold.c    	    test_bit(PAGE_CLAIMED, &page->private))
page             1349 mm/z3fold.c    	zhdr = page_address(page);
page             1351 mm/z3fold.c    	if (test_bit(NEEDS_COMPACTING, &page->private) ||
page             1352 mm/z3fold.c    	    test_bit(PAGE_STALE, &page->private))
page             1362 mm/z3fold.c    		if (!list_empty(&page->lru))
page             1363 mm/z3fold.c    			list_del(&page->lru);
page             1373 mm/z3fold.c    static int z3fold_page_migrate(struct address_space *mapping, struct page *newpage,
page             1374 mm/z3fold.c    			       struct page *page, enum migrate_mode mode)
page             1380 mm/z3fold.c    	VM_BUG_ON_PAGE(!PageMovable(page), page);
page             1381 mm/z3fold.c    	VM_BUG_ON_PAGE(!PageIsolated(page), page);
page             1384 mm/z3fold.c    	zhdr = page_address(page);
page             1400 mm/z3fold.c    	newpage->private = page->private;
page             1401 mm/z3fold.c    	page->private = 0;
page             1410 mm/z3fold.c    	new_mapping = page_mapping(page);
page             1411 mm/z3fold.c    	__ClearPageMovable(page);
page             1412 mm/z3fold.c    	ClearPagePrivate(page);
page             1432 mm/z3fold.c    	page_mapcount_reset(page);
page             1433 mm/z3fold.c    	put_page(page);
page             1437 mm/z3fold.c    static void z3fold_page_putback(struct page *page)
page             1442 mm/z3fold.c    	zhdr = page_address(page);
page             1448 mm/z3fold.c    	INIT_LIST_HEAD(&page->lru);
page             1454 mm/z3fold.c    	list_add(&page->lru, &pool->lru);
page              239 mm/zbud.c      static struct zbud_header *init_zbud_page(struct page *page)
page              241 mm/zbud.c      	struct zbud_header *zhdr = page_address(page);
page              360 mm/zbud.c      	struct page *page;
page              386 mm/zbud.c      	page = alloc_page(gfp);
page              387 mm/zbud.c      	if (!page)
page              391 mm/zbud.c      	zhdr = init_zbud_page(page);
page              217 mm/zsmalloc.c  static void SetPageHugeObject(struct page *page)
page              219 mm/zsmalloc.c  	SetPageOwnerPriv1(page);
page              222 mm/zsmalloc.c  static void ClearPageHugeObject(struct page *page)
page              224 mm/zsmalloc.c  	ClearPageOwnerPriv1(page);
page              227 mm/zsmalloc.c  static int PageHugeObject(struct page *page)
page              229 mm/zsmalloc.c  	return PageOwnerPriv1(page);
page              288 mm/zsmalloc.c  	struct page *first_page;
page              469 mm/zsmalloc.c  static __maybe_unused int is_first_page(struct page *page)
page              471 mm/zsmalloc.c  	return PagePrivate(page);
page              486 mm/zsmalloc.c  static inline struct page *get_first_page(struct zspage *zspage)
page              488 mm/zsmalloc.c  	struct page *first_page = zspage->first_page;
page              494 mm/zsmalloc.c  static inline int get_first_obj_offset(struct page *page)
page              496 mm/zsmalloc.c  	return page->units;
page              499 mm/zsmalloc.c  static inline void set_first_obj_offset(struct page *page, int offset)
page              501 mm/zsmalloc.c  	page->units = offset;
page              824 mm/zsmalloc.c  static struct zspage *get_zspage(struct page *page)
page              826 mm/zsmalloc.c  	struct zspage *zspage = (struct zspage *)page->private;
page              832 mm/zsmalloc.c  static struct page *get_next_page(struct page *page)
page              834 mm/zsmalloc.c  	if (unlikely(PageHugeObject(page)))
page              837 mm/zsmalloc.c  	return page->freelist;
page              846 mm/zsmalloc.c  static void obj_to_location(unsigned long obj, struct page **page,
page              850 mm/zsmalloc.c  	*page = pfn_to_page(obj >> OBJ_INDEX_BITS);
page              859 mm/zsmalloc.c  static unsigned long location_to_obj(struct page *page, unsigned int obj_idx)
page              863 mm/zsmalloc.c  	obj = page_to_pfn(page) << OBJ_INDEX_BITS;
page              875 mm/zsmalloc.c  static unsigned long obj_to_head(struct page *page, void *obj)
page              877 mm/zsmalloc.c  	if (unlikely(PageHugeObject(page))) {
page              878 mm/zsmalloc.c  		VM_BUG_ON_PAGE(!is_first_page(page), page);
page              879 mm/zsmalloc.c  		return page->index;
page              904 mm/zsmalloc.c  static void reset_page(struct page *page)
page              906 mm/zsmalloc.c  	__ClearPageMovable(page);
page              907 mm/zsmalloc.c  	ClearPagePrivate(page);
page              908 mm/zsmalloc.c  	set_page_private(page, 0);
page              909 mm/zsmalloc.c  	page_mapcount_reset(page);
page              910 mm/zsmalloc.c  	ClearPageHugeObject(page);
page              911 mm/zsmalloc.c  	page->freelist = NULL;
page              916 mm/zsmalloc.c  	struct page *cursor, *fail;
page              938 mm/zsmalloc.c  	struct page *page, *next;
page              949 mm/zsmalloc.c  	next = page = get_first_page(zspage);
page              951 mm/zsmalloc.c  		VM_BUG_ON_PAGE(!PageLocked(page), page);
page              952 mm/zsmalloc.c  		next = get_next_page(page);
page              953 mm/zsmalloc.c  		reset_page(page);
page              954 mm/zsmalloc.c  		unlock_page(page);
page              955 mm/zsmalloc.c  		dec_zone_page_state(page, NR_ZSPAGES);
page              956 mm/zsmalloc.c  		put_page(page);
page              957 mm/zsmalloc.c  		page = next;
page              958 mm/zsmalloc.c  	} while (page != NULL);
page              987 mm/zsmalloc.c  	struct page *page = get_first_page(zspage);
page              989 mm/zsmalloc.c  	while (page) {
page              990 mm/zsmalloc.c  		struct page *next_page;
page              994 mm/zsmalloc.c  		set_first_obj_offset(page, off);
page              996 mm/zsmalloc.c  		vaddr = kmap_atomic(page);
page             1009 mm/zsmalloc.c  		next_page = get_next_page(page);
page             1020 mm/zsmalloc.c  		page = next_page;
page             1028 mm/zsmalloc.c  				struct page *pages[])
page             1031 mm/zsmalloc.c  	struct page *page;
page             1032 mm/zsmalloc.c  	struct page *prev_page = NULL;
page             1044 mm/zsmalloc.c  		page = pages[i];
page             1045 mm/zsmalloc.c  		set_page_private(page, (unsigned long)zspage);
page             1046 mm/zsmalloc.c  		page->freelist = NULL;
page             1048 mm/zsmalloc.c  			zspage->first_page = page;
page             1049 mm/zsmalloc.c  			SetPagePrivate(page);
page             1052 mm/zsmalloc.c  				SetPageHugeObject(page);
page             1054 mm/zsmalloc.c  			prev_page->freelist = page;
page             1056 mm/zsmalloc.c  		prev_page = page;
page             1068 mm/zsmalloc.c  	struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE];
page             1079 mm/zsmalloc.c  		struct page *page;
page             1081 mm/zsmalloc.c  		page = alloc_page(gfp);
page             1082 mm/zsmalloc.c  		if (!page) {
page             1091 mm/zsmalloc.c  		inc_zone_page_state(page, NR_ZSPAGES);
page             1092 mm/zsmalloc.c  		pages[i] = page;
page             1139 mm/zsmalloc.c  				struct page *pages[2], int off, int size)
page             1147 mm/zsmalloc.c  				struct page *pages[2], int off, int size)
page             1177 mm/zsmalloc.c  			struct page *pages[2], int off, int size)
page             1205 mm/zsmalloc.c  			struct page *pages[2], int off, int size)
page             1295 mm/zsmalloc.c  	struct page *page;
page             1303 mm/zsmalloc.c  	struct page *pages[2];
page             1317 mm/zsmalloc.c  	obj_to_location(obj, &page, &obj_idx);
page             1318 mm/zsmalloc.c  	zspage = get_zspage(page);
page             1331 mm/zsmalloc.c  		area->vm_addr = kmap_atomic(page);
page             1337 mm/zsmalloc.c  	pages[0] = page;
page             1338 mm/zsmalloc.c  	pages[1] = get_next_page(page);
page             1343 mm/zsmalloc.c  	if (likely(!PageHugeObject(page)))
page             1353 mm/zsmalloc.c  	struct page *page;
page             1363 mm/zsmalloc.c  	obj_to_location(obj, &page, &obj_idx);
page             1364 mm/zsmalloc.c  	zspage = get_zspage(page);
page             1373 mm/zsmalloc.c  		struct page *pages[2];
page             1375 mm/zsmalloc.c  		pages[0] = page;
page             1376 mm/zsmalloc.c  		pages[1] = get_next_page(page);
page             1414 mm/zsmalloc.c  	struct page *m_page;
page             1519 mm/zsmalloc.c  	struct page *f_page;
page             1543 mm/zsmalloc.c  	struct page *f_page;
page             1588 mm/zsmalloc.c  	struct page *s_page, *d_page;
page             1653 mm/zsmalloc.c  					struct page *page, int *obj_idx)
page             1659 mm/zsmalloc.c  	void *addr = kmap_atomic(page);
page             1661 mm/zsmalloc.c  	offset = get_first_obj_offset(page);
page             1665 mm/zsmalloc.c  		head = obj_to_head(page, addr + offset);
page             1686 mm/zsmalloc.c  	struct page *s_page;
page             1689 mm/zsmalloc.c  	struct page *d_page;
page             1700 mm/zsmalloc.c  	struct page *s_page = cc->s_page;
page             1701 mm/zsmalloc.c  	struct page *d_page = cc->d_page;
page             1797 mm/zsmalloc.c  	struct page *page = get_first_page(zspage);
page             1800 mm/zsmalloc.c  		lock_page(page);
page             1801 mm/zsmalloc.c  	} while ((page = get_next_page(page)) != NULL);
page             1893 mm/zsmalloc.c  				struct page *newpage, struct page *oldpage)
page             1895 mm/zsmalloc.c  	struct page *page;
page             1896 mm/zsmalloc.c  	struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE] = {NULL, };
page             1899 mm/zsmalloc.c  	page = get_first_page(zspage);
page             1901 mm/zsmalloc.c  		if (page == oldpage)
page             1904 mm/zsmalloc.c  			pages[idx] = page;
page             1906 mm/zsmalloc.c  	} while ((page = get_next_page(page)) != NULL);
page             1915 mm/zsmalloc.c  static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
page             1928 mm/zsmalloc.c  	VM_BUG_ON_PAGE(!PageMovable(page), page);
page             1929 mm/zsmalloc.c  	VM_BUG_ON_PAGE(PageIsolated(page), page);
page             1931 mm/zsmalloc.c  	zspage = get_zspage(page);
page             1939 mm/zsmalloc.c  	mapping = page_mapping(page);
page             1971 mm/zsmalloc.c  static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
page             1972 mm/zsmalloc.c  		struct page *page, enum migrate_mode mode)
page             1979 mm/zsmalloc.c  	struct page *dummy;
page             1995 mm/zsmalloc.c  	VM_BUG_ON_PAGE(!PageMovable(page), page);
page             1996 mm/zsmalloc.c  	VM_BUG_ON_PAGE(!PageIsolated(page), page);
page             1998 mm/zsmalloc.c  	zspage = get_zspage(page);
page             2005 mm/zsmalloc.c  	offset = get_first_obj_offset(page);
page             2017 mm/zsmalloc.c  	s_addr = kmap_atomic(page);
page             2019 mm/zsmalloc.c  		head = obj_to_head(page, s_addr + pos);
page             2037 mm/zsmalloc.c  		head = obj_to_head(page, addr);
page             2052 mm/zsmalloc.c  	replace_sub_page(class, zspage, newpage, page);
page             2072 mm/zsmalloc.c  	if (page_zone(newpage) != page_zone(page)) {
page             2073 mm/zsmalloc.c  		dec_zone_page_state(page, NR_ZSPAGES);
page             2077 mm/zsmalloc.c  	reset_page(page);
page             2078 mm/zsmalloc.c  	put_page(page);
page             2079 mm/zsmalloc.c  	page = newpage;
page             2085 mm/zsmalloc.c  		head = obj_to_head(page, addr);
page             2100 mm/zsmalloc.c  static void zs_page_putback(struct page *page)
page             2109 mm/zsmalloc.c  	VM_BUG_ON_PAGE(!PageMovable(page), page);
page             2110 mm/zsmalloc.c  	VM_BUG_ON_PAGE(!PageIsolated(page), page);
page             2112 mm/zsmalloc.c  	zspage = get_zspage(page);
page             2114 mm/zsmalloc.c  	mapping = page_mapping(page);
page             2235 mm/zsmalloc.c  	struct page *page = get_first_page(zspage);
page             2238 mm/zsmalloc.c  		WARN_ON(!trylock_page(page));
page             2239 mm/zsmalloc.c  		__SetPageMovable(page, pool->inode->i_mapping);
page             2240 mm/zsmalloc.c  		unlock_page(page);
page             2241 mm/zsmalloc.c  	} while ((page = get_next_page(page)) != NULL);
page              815 mm/zswap.c     				struct page **retpage)
page              847 mm/zswap.c     	struct page *page;
page              875 mm/zswap.c     	switch (zswap_get_swap_cache_page(swpentry, &page)) {
page              882 mm/zswap.c     		put_page(page);
page              890 mm/zswap.c     		dst = kmap_atomic(page);
page              900 mm/zswap.c     		SetPageUptodate(page);
page              904 mm/zswap.c     	SetPageReclaim(page);
page              907 mm/zswap.c     	__swap_writepage(page, &wbc, end_swap_bio_write);
page              908 mm/zswap.c     	put_page(page);
page              964 mm/zswap.c     	unsigned long *page;
page              966 mm/zswap.c     	page = (unsigned long *)ptr;
page              967 mm/zswap.c     	for (pos = 1; pos < PAGE_SIZE / sizeof(*page); pos++) {
page              968 mm/zswap.c     		if (page[pos] != page[0])
page              971 mm/zswap.c     	*value = page[0];
page              977 mm/zswap.c     	unsigned long *page;
page              979 mm/zswap.c     	page = (unsigned long *)ptr;
page              980 mm/zswap.c     	memset_l(page, value, PAGE_SIZE / sizeof(unsigned long));
page              988 mm/zswap.c     				struct page *page)
page             1002 mm/zswap.c     	if (PageTransHuge(page)) {
page             1040 mm/zswap.c     		src = kmap_atomic(page);
page             1062 mm/zswap.c     	src = kmap_atomic(page);
page             1130 mm/zswap.c     				struct page *page)
page             1150 mm/zswap.c     		dst = kmap_atomic(page);
page             1161 mm/zswap.c     	dst = kmap_atomic(page);
page               22 net/9p/trans_common.c void p9_release_pages(struct page **pages, int nr_pages)
page               15 net/9p/trans_common.h void p9_release_pages(struct page **, int);
page              214 net/9p/trans_virtio.c 	       struct page **pdata, int nr_pages, size_t offs, int count)
page              305 net/9p/trans_virtio.c 			       struct page ***pages,
page              357 net/9p/trans_virtio.c 		*pages = kmalloc_array(nr_pages, sizeof(struct page *),
page              394 net/9p/trans_virtio.c 	struct page **in_pages = NULL, **out_pages = NULL;
page              212 net/atm/mpoa_proc.c 	char *page, *p;
page              221 net/atm/mpoa_proc.c 	page = (char *)__get_free_page(GFP_KERNEL);
page              222 net/atm/mpoa_proc.c 	if (!page)
page              225 net/atm/mpoa_proc.c 	for (p = page, len = 0; len < nbytes; p++, len++) {
page              227 net/atm/mpoa_proc.c 			free_page((unsigned long)page);
page              236 net/atm/mpoa_proc.c 	if (!parse_qos(page))
page              237 net/atm/mpoa_proc.c 		printk("mpoa: proc_mpc_write: could not parse '%s'\n", page);
page              239 net/atm/mpoa_proc.c 	free_page((unsigned long)page);
page              320 net/atm/proc.c 	unsigned long page;
page              325 net/atm/proc.c 	page = get_zeroed_page(GFP_KERNEL);
page              326 net/atm/proc.c 	if (!page)
page              332 net/atm/proc.c 		length = dev->ops->proc_read(dev, pos, (char *)page);
page              337 net/atm/proc.c 		if (copy_to_user(buf, (char *)page, length))
page              341 net/atm/proc.c 	free_page(page);
page              485 net/bluetooth/hci_core.c 		cp.page = 0x01;
page              781 net/bluetooth/hci_core.c 		cp.page = p;
page              690 net/bluetooth/hci_event.c 	if (rp->page < HCI_MAX_PAGES)
page              691 net/bluetooth/hci_event.c 		memcpy(hdev->features[rp->page], rp->features, 8);
page             3103 net/bluetooth/hci_event.c 		cp.page = 0x01;
page             4125 net/bluetooth/hci_event.c 	if (ev->page < HCI_MAX_PAGES)
page             4126 net/bluetooth/hci_event.c 		memcpy(conn->features[ev->page], ev->features, 8);
page             4128 net/bluetooth/hci_event.c 	if (!ev->status && ev->page == 0x01) {
page               35 net/ceph/cls_lock_client.c 	struct page *lock_op_page;
page               98 net/ceph/cls_lock_client.c 	struct page *unlock_op_page;
page              147 net/ceph/cls_lock_client.c 	struct page *break_op_page;
page              196 net/ceph/cls_lock_client.c 	struct page *cookie_op_page;
page              336 net/ceph/cls_lock_client.c 	struct page *get_info_op_page, *reply_page;
page              390 net/ceph/cls_lock_client.c 	struct page **pages;
page              193 net/ceph/crypto.c 		struct page *page;
page              197 net/ceph/crypto.c 			page = vmalloc_to_page(buf);
page              199 net/ceph/crypto.c 			page = virt_to_page(buf);
page              201 net/ceph/crypto.c 		sg_set_page(sg, page, len, off);
page              187 net/ceph/messenger.c static struct page *zero_page;		/* used in certain error cases */
page              526 net/ceph/messenger.c static int ceph_tcp_recvpage(struct socket *sock, struct page *page,
page              530 net/ceph/messenger.c 		.bv_page = page,
page              569 net/ceph/messenger.c static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
page              572 net/ceph/messenger.c 	ssize_t (*sendpage)(struct socket *sock, struct page *page,
page              585 net/ceph/messenger.c 	if (page_count(page) >= 1 && !PageSlab(page))
page              590 net/ceph/messenger.c 	ret = sendpage(sock, page, offset, size, flags);
page              829 net/ceph/messenger.c static struct page *ceph_msg_data_bio_next(struct ceph_msg_data_cursor *cursor,
page              845 net/ceph/messenger.c 	struct page *page = bio_iter_page(it->bio, it->iter);
page              858 net/ceph/messenger.c 		       page == bio_iter_page(it->bio, it->iter)))
page              890 net/ceph/messenger.c static struct page *ceph_msg_data_bvecs_next(struct ceph_msg_data_cursor *cursor,
page              906 net/ceph/messenger.c 	struct page *page = bvec_iter_page(bvecs, cursor->bvec_iter);
page              919 net/ceph/messenger.c 		       page == bvec_iter_page(bvecs, cursor->bvec_iter)))
page              954 net/ceph/messenger.c static struct page *
page             1010 net/ceph/messenger.c 	struct page *page;
page             1021 net/ceph/messenger.c 	page = list_first_entry(&pagelist->head, struct page, lru);
page             1024 net/ceph/messenger.c 	cursor->page = page;
page             1029 net/ceph/messenger.c static struct page *
page             1041 net/ceph/messenger.c 	BUG_ON(!cursor->page);
page             1051 net/ceph/messenger.c 	return cursor->page;
page             1081 net/ceph/messenger.c 	BUG_ON(list_is_last(&cursor->page->lru, &pagelist->head));
page             1082 net/ceph/messenger.c 	cursor->page = list_next_entry(cursor->page, lru);
page             1142 net/ceph/messenger.c static struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor,
page             1146 net/ceph/messenger.c 	struct page *page;
page             1150 net/ceph/messenger.c 		page = ceph_msg_data_pagelist_next(cursor, page_offset, length);
page             1153 net/ceph/messenger.c 		page = ceph_msg_data_pages_next(cursor, page_offset, length);
page             1157 net/ceph/messenger.c 		page = ceph_msg_data_bio_next(cursor, page_offset, length);
page             1161 net/ceph/messenger.c 		page = ceph_msg_data_bvecs_next(cursor, page_offset, length);
page             1165 net/ceph/messenger.c 		page = NULL;
page             1169 net/ceph/messenger.c 	BUG_ON(!page);
page             1176 net/ceph/messenger.c 	return page;
page             1539 net/ceph/messenger.c static u32 ceph_crc32c_page(u32 crc, struct page *page,
page             1545 net/ceph/messenger.c 	kaddr = kmap(page);
page             1548 net/ceph/messenger.c 	kunmap(page);
page             1582 net/ceph/messenger.c 		struct page *page;
page             1592 net/ceph/messenger.c 		page = ceph_msg_data_next(cursor, &page_offset, &length, NULL);
page             1595 net/ceph/messenger.c 		ret = ceph_tcp_sendpage(con->sock, page, page_offset, length,
page             1604 net/ceph/messenger.c 			crc = ceph_crc32c_page(crc, page, page_offset, length);
page             2324 net/ceph/messenger.c 	struct page *page;
page             2341 net/ceph/messenger.c 		page = ceph_msg_data_next(cursor, &page_offset, &length, NULL);
page             2342 net/ceph/messenger.c 		ret = ceph_tcp_recvpage(con->sock, page, page_offset, length);
page             2351 net/ceph/messenger.c 			crc = ceph_crc32c_page(crc, page, page_offset, ret);
page             3261 net/ceph/messenger.c void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
page              133 net/ceph/osd_client.c 			struct page **pages, u64 length, u32 alignment,
page              191 net/ceph/osd_client.c 			unsigned int which, struct page **pages,
page              204 net/ceph/osd_client.c 			unsigned int which, struct page **pages,
page              291 net/ceph/osd_client.c 			unsigned int which, struct page **pages, u64 length,
page              323 net/ceph/osd_client.c 			unsigned int which, struct page **pages, u64 length,
page             4821 net/ceph/osd_client.c 		     struct page ***preply_pages,
page             4825 net/ceph/osd_client.c 	struct page **pages;
page             4995 net/ceph/osd_client.c 	struct page **pages;
page             5066 net/ceph/osd_client.c 		   struct page *req_page, size_t req_len,
page             5067 net/ceph/osd_client.c 		   struct page **resp_pages, size_t *resp_len)
page             5241 net/ceph/osd_client.c 			struct page **pages, int num_pages, int page_align)
page             5281 net/ceph/osd_client.c 			 struct page **pages, int num_pages)
page             5321 net/ceph/osd_client.c 	struct page **pages;
page             5515 net/ceph/osd_client.c 		struct page **pages;
page               32 net/ceph/pagelist.c 		struct page *page = list_entry(pl->head.prev, struct page, lru);
page               33 net/ceph/pagelist.c 		kunmap(page);
page               44 net/ceph/pagelist.c 		struct page *page = list_first_entry(&pl->head, struct page,
page               46 net/ceph/pagelist.c 		list_del(&page->lru);
page               47 net/ceph/pagelist.c 		__free_page(page);
page               56 net/ceph/pagelist.c 	struct page *page;
page               59 net/ceph/pagelist.c 		page = __page_cache_alloc(GFP_NOFS);
page               61 net/ceph/pagelist.c 		page = list_first_entry(&pl->free_list, struct page, lru);
page               62 net/ceph/pagelist.c 		list_del(&page->lru);
page               65 net/ceph/pagelist.c 	if (!page)
page               69 net/ceph/pagelist.c 	list_add_tail(&page->lru, &pl->head);
page               70 net/ceph/pagelist.c 	pl->mapped_tail = kmap(page);
page              110 net/ceph/pagelist.c 		struct page *page = __page_cache_alloc(GFP_NOFS);
page              111 net/ceph/pagelist.c 		if (!page)
page              113 net/ceph/pagelist.c 		list_add_tail(&page->lru, &pl->free_list);
page              124 net/ceph/pagelist.c 		struct page *page = list_first_entry(&pl->free_list,
page              125 net/ceph/pagelist.c 						     struct page, lru);
page              126 net/ceph/pagelist.c 		list_del(&page->lru);
page              127 net/ceph/pagelist.c 		__free_page(page);
page              153 net/ceph/pagelist.c 	struct page *page;
page              159 net/ceph/pagelist.c 		page = list_entry(pl->head.prev, struct page, lru);
page              161 net/ceph/pagelist.c 		list_move_tail(&page->lru, &pl->free_list);
page              166 net/ceph/pagelist.c 		page = list_entry(pl->head.prev, struct page, lru);
page              167 net/ceph/pagelist.c 		pl->mapped_tail = kmap(page);
page               13 net/ceph/pagevec.c void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty)
page               26 net/ceph/pagevec.c void ceph_release_page_vector(struct page **pages, int num_pages)
page               39 net/ceph/pagevec.c struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags)
page               41 net/ceph/pagevec.c 	struct page **pages;
page               61 net/ceph/pagevec.c int ceph_copy_user_to_page_vector(struct page **pages,
page               87 net/ceph/pagevec.c void ceph_copy_to_page_vector(struct page **pages,
page              110 net/ceph/pagevec.c void ceph_copy_from_page_vector(struct page **pages,
page              137 net/ceph/pagevec.c void ceph_zero_page_vector_range(int off, int len, struct page **pages)
page              447 net/core/datagram.c 			struct page *page = skb_frag_page(frag);
page              448 net/core/datagram.c 			u8 *vaddr = kmap(page);
page              455 net/core/datagram.c 			kunmap(page);
page              629 net/core/datagram.c 		struct page *pages[MAX_SKB_FRAGS];
page             2226 net/core/filter.c 	struct page *page;
page             2273 net/core/filter.c 	page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP,
page             2275 net/core/filter.c 	if (unlikely(!page))
page             2278 net/core/filter.c 	raw = page_address(page);
page             2294 net/core/filter.c 	sg_set_page(&msg->sg.data[first_sge], page, copy, 0);
page             2351 net/core/filter.c 	struct page *page;
page             2382 net/core/filter.c 	page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP,
page             2384 net/core/filter.c 	if (unlikely(!page))
page             2390 net/core/filter.c 		raw = page_address(page);
page             2456 net/core/filter.c 	sg_set_page(&msg->sg.data[new], page, len + copy, 0);
page             2573 net/core/filter.c 				struct page *page, *orig;
page             2576 net/core/filter.c 				page = alloc_pages(__GFP_NOWARN |
page             2579 net/core/filter.c 				if (unlikely(!page))
page             2585 net/core/filter.c 				to = page_address(page);
page             2588 net/core/filter.c 				sg_set_page(sge, page, a + b, 0);
page               85 net/core/page_pool.c static struct page *__page_pool_get_cached(struct page_pool *pool)
page               89 net/core/page_pool.c 	struct page *page;
page               95 net/core/page_pool.c 			page = pool->alloc.cache[--pool->alloc.count];
page               96 net/core/page_pool.c 			return page;
page              109 net/core/page_pool.c 	page = __ptr_ring_consume(r);
page              115 net/core/page_pool.c 	return page;
page              120 net/core/page_pool.c static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
page              123 net/core/page_pool.c 	struct page *page;
page              141 net/core/page_pool.c 	page = alloc_pages_node(pool->p.nid, gfp, pool->p.order);
page              142 net/core/page_pool.c 	if (!page)
page              153 net/core/page_pool.c 	dma = dma_map_page_attrs(pool->p.dev, page, 0,
page              157 net/core/page_pool.c 		put_page(page);
page              160 net/core/page_pool.c 	page->dma_addr = dma;
page              166 net/core/page_pool.c 	trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt);
page              169 net/core/page_pool.c 	return page;
page              175 net/core/page_pool.c struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp)
page              177 net/core/page_pool.c 	struct page *page;
page              180 net/core/page_pool.c 	page = __page_pool_get_cached(pool);
page              181 net/core/page_pool.c 	if (page)
page              182 net/core/page_pool.c 		return page;
page              185 net/core/page_pool.c 	page = __page_pool_alloc_pages_slow(pool, gfp);
page              186 net/core/page_pool.c 	return page;
page              211 net/core/page_pool.c 				   struct page *page)
page              219 net/core/page_pool.c 	dma = page->dma_addr;
page              224 net/core/page_pool.c 	page->dma_addr = 0;
page              230 net/core/page_pool.c 	trace_page_pool_state_release(pool, page, count);
page              234 net/core/page_pool.c void page_pool_unmap_page(struct page_pool *pool, struct page *page)
page              239 net/core/page_pool.c 	__page_pool_clean_page(pool, page);
page              244 net/core/page_pool.c static void __page_pool_return_page(struct page_pool *pool, struct page *page)
page              246 net/core/page_pool.c 	__page_pool_clean_page(pool, page);
page              248 net/core/page_pool.c 	put_page(page);
page              256 net/core/page_pool.c 				   struct page *page)
page              261 net/core/page_pool.c 		ret = ptr_ring_produce(&pool->ring, page);
page              263 net/core/page_pool.c 		ret = ptr_ring_produce_bh(&pool->ring, page);
page              273 net/core/page_pool.c static bool __page_pool_recycle_direct(struct page *page,
page              280 net/core/page_pool.c 	pool->alloc.cache[pool->alloc.count++] = page;
page              285 net/core/page_pool.c 			  struct page *page, bool allow_direct)
page              293 net/core/page_pool.c 	if (likely(page_ref_count(page) == 1)) {
page              297 net/core/page_pool.c 			if (__page_pool_recycle_direct(page, pool))
page              300 net/core/page_pool.c 		if (!__page_pool_recycle_into_ring(pool, page)) {
page              302 net/core/page_pool.c 			__page_pool_return_page(pool, page);
page              319 net/core/page_pool.c 	__page_pool_clean_page(pool, page);
page              320 net/core/page_pool.c 	put_page(page);
page              326 net/core/page_pool.c 	struct page *page;
page              329 net/core/page_pool.c 	while ((page = ptr_ring_consume_bh(&pool->ring))) {
page              331 net/core/page_pool.c 		if (!(page_ref_count(page) == 1))
page              333 net/core/page_pool.c 				__func__, page_ref_count(page));
page              335 net/core/page_pool.c 		__page_pool_return_page(pool, page);
page              354 net/core/page_pool.c 	struct page *page;
page              361 net/core/page_pool.c 		page = pool->alloc.cache[--pool->alloc.count];
page              362 net/core/page_pool.c 		__page_pool_return_page(pool, page);
page              280 net/core/pktgen.c 	struct page *page;
page             1162 net/core/pktgen.c 			if (pkt_dev->page) {
page             1163 net/core/pktgen.c 				put_page(pkt_dev->page);
page             1164 net/core/pktgen.c 				pkt_dev->page = NULL;
page             2642 net/core/pktgen.c 			if (unlikely(!pkt_dev->page)) {
page             2647 net/core/pktgen.c 				pkt_dev->page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
page             2648 net/core/pktgen.c 				if (!pkt_dev->page)
page             2651 net/core/pktgen.c 			get_page(pkt_dev->page);
page             2652 net/core/pktgen.c 			skb_frag_set_page(skb, i, pkt_dev->page);
page             3777 net/core/pktgen.c 	if (pkt_dev->page)
page             3778 net/core/pktgen.c 		put_page(pkt_dev->page);
page              363 net/core/skbuff.c 	struct page_frag_cache page;
page              375 net/core/skbuff.c 	return page_frag_alloc(&nc->page, fragsz, gfp_mask);
page              454 net/core/skbuff.c 		nc = this_cpu_ptr(&napi_alloc_cache.page);
page              519 net/core/skbuff.c 	data = page_frag_alloc(&nc->page, len, gfp_mask);
page              530 net/core/skbuff.c 	if (nc->page.pfmemalloc)
page              543 net/core/skbuff.c void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
page              546 net/core/skbuff.c 	skb_fill_page_desc(skb, i, page, off, size);
page              785 net/core/skbuff.c 		struct page *p;
page             1345 net/core/skbuff.c 	struct page *page, *head = NULL;
page             1357 net/core/skbuff.c 		page = alloc_page(gfp_mask);
page             1358 net/core/skbuff.c 		if (!page) {
page             1360 net/core/skbuff.c 				struct page *next = (struct page *)page_private(head);
page             1366 net/core/skbuff.c 		set_page_private(page, (unsigned long)head);
page             1367 net/core/skbuff.c 		head = page;
page             1370 net/core/skbuff.c 	page = head;
page             1375 net/core/skbuff.c 		struct page *p;
page             1386 net/core/skbuff.c 					page = (struct page *)page_private(page);
page             1389 net/core/skbuff.c 				memcpy(page_address(page) + d_off,
page             1405 net/core/skbuff.c 		head = (struct page *)page_private(head);
page             2217 net/core/skbuff.c 			struct page *p;
page             2275 net/core/skbuff.c static struct page *linear_to_page(struct page *page, unsigned int *len,
page             2286 net/core/skbuff.c 	memcpy(page_address(pfrag->page) + pfrag->offset,
page             2287 net/core/skbuff.c 	       page_address(page) + *offset, *len);
page             2291 net/core/skbuff.c 	return pfrag->page;
page             2295 net/core/skbuff.c 			     struct page *page,
page             2299 net/core/skbuff.c 		spd->pages[spd->nr_pages - 1] == page &&
page             2308 net/core/skbuff.c 			  struct pipe_inode_info *pipe, struct page *page,
page             2317 net/core/skbuff.c 		page = linear_to_page(page, len, &offset, sk);
page             2318 net/core/skbuff.c 		if (!page)
page             2321 net/core/skbuff.c 	if (spd_can_coalesce(spd, page, offset)) {
page             2325 net/core/skbuff.c 	get_page(page);
page             2326 net/core/skbuff.c 	spd->pages[spd->nr_pages] = page;
page             2334 net/core/skbuff.c static bool __splice_segment(struct page *page, unsigned int poff,
page             2358 net/core/skbuff.c 		if (spd_fill_page(spd, pipe, page, &flen, poff,
page             2430 net/core/skbuff.c 	struct page *pages[MAX_SKB_FRAGS];
page             2578 net/core/skbuff.c 			struct page *p;
page             2657 net/core/skbuff.c 			struct page *p;
page             2756 net/core/skbuff.c 			struct page *p;
page             2943 net/core/skbuff.c 	struct page *page;
page             2960 net/core/skbuff.c 			page = virt_to_head_page(from->head);
page             2961 net/core/skbuff.c 			offset = from->data - (unsigned char *)page_address(page);
page             2962 net/core/skbuff.c 			__skb_fill_page_desc(to, 0, page, offset, plen);
page             2963 net/core/skbuff.c 			get_page(page);
page             3588 net/core/skbuff.c int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
page             3593 net/core/skbuff.c 	if (skb_can_coalesce(skb, i, page, offset)) {
page             3596 net/core/skbuff.c 		get_page(page);
page             3597 net/core/skbuff.c 		skb_fill_page_desc(skb, i, page, offset, size);
page             3631 net/core/skbuff.c 	struct page *page;
page             3633 net/core/skbuff.c 	page = virt_to_head_page(frag_skb->head);
page             3634 net/core/skbuff.c 	__skb_frag_set_page(&head_frag, page);
page             3636 net/core/skbuff.c 			 (unsigned char *)page_address(page));
page             4036 net/core/skbuff.c 		struct page *page = virt_to_head_page(skb->head);
page             4044 net/core/skbuff.c 			       (unsigned char *)page_address(page) +
page             4049 net/core/skbuff.c 		__skb_frag_set_page(frag, page);
page             5052 net/core/skbuff.c 		struct page *page;
page             5064 net/core/skbuff.c 		page = virt_to_head_page(from->head);
page             5065 net/core/skbuff.c 		offset = from->data - (unsigned char *)page_address(page);
page             5068 net/core/skbuff.c 				   page, offset, skb_headlen(from));
page             5653 net/core/skbuff.c 	struct page *page;
page             5675 net/core/skbuff.c 				page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) |
page             5679 net/core/skbuff.c 				if (page)
page             5687 net/core/skbuff.c 		page = alloc_page(gfp_mask);
page             5688 net/core/skbuff.c 		if (!page)
page             5693 net/core/skbuff.c 		skb_fill_page_desc(skb, i, page, 0, chunk);
page               50 net/core/skmsg.c 		    sg_page(sge) == pfrag->page &&
page               61 net/core/skmsg.c 			sg_set_page(sge, pfrag->page, use, orig_offset);
page               62 net/core/skmsg.c 			get_page(pfrag->page);
page              303 net/core/skmsg.c 	struct page *pages[MAX_MSG_FRAGS];
page             1716 net/core/sock.c 	if (sk->sk_frag.page) {
page             1717 net/core/sock.c 		put_page(sk->sk_frag.page);
page             1718 net/core/sock.c 		sk->sk_frag.page = NULL;
page             2365 net/core/sock.c 	if (pfrag->page) {
page             2366 net/core/sock.c 		if (page_ref_count(pfrag->page) == 1) {
page             2372 net/core/sock.c 		put_page(pfrag->page);
page             2379 net/core/sock.c 		pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
page             2383 net/core/sock.c 		if (likely(pfrag->page)) {
page             2388 net/core/sock.c 	pfrag->page = alloc_page(gfp);
page             2389 net/core/sock.c 	if (likely(pfrag->page)) {
page             2737 net/core/sock.c ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
page             2742 net/core/sock.c 	char *kaddr = kmap(page);
page             2746 net/core/sock.c 	kunmap(page);
page             2751 net/core/sock.c ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page,
page             2757 net/core/sock.c 	char *kaddr = kmap(page);
page             2762 net/core/sock.c 	kunmap(page);
page             2900 net/core/sock.c 	sk->sk_frag.page	=	NULL;
page              371 net/core/xdp.c 	struct page *page;
page              378 net/core/xdp.c 		page = virt_to_head_page(data);
page              380 net/core/xdp.c 		page_pool_put_page(xa->page_pool, page, napi_direct);
page              387 net/core/xdp.c 		page = virt_to_page(data); /* Assumes order0 page*/
page              388 net/core/xdp.c 		put_page(page);
page              425 net/core/xdp.c 	struct page *page;
page              429 net/core/xdp.c 	page = virt_to_head_page(data);
page              431 net/core/xdp.c 		page_pool_release_page(xa->page_pool, page);
page              472 net/core/xdp.c 	struct page *page;
page              482 net/core/xdp.c 	page = dev_alloc_page();
page              483 net/core/xdp.c 	if (!page)
page              486 net/core/xdp.c 	addr = page_to_virt(page);
page              177 net/ieee802154/nl-mac.c 	u8 page;
page              206 net/ieee802154/nl-mac.c 		page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]);
page              208 net/ieee802154/nl-mac.c 		page = 0;
page              212 net/ieee802154/nl-mac.c 			page,
page              301 net/ieee802154/nl-mac.c 	u8 page;
page              342 net/ieee802154/nl-mac.c 		page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]);
page              344 net/ieee802154/nl-mac.c 		page = 0;
page              353 net/ieee802154/nl-mac.c 	ret = ieee802154_mlme_ops(dev)->start_req(dev, &addr, channel, page,
page              374 net/ieee802154/nl-mac.c 	u8 page;
page              392 net/ieee802154/nl-mac.c 		page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]);
page              394 net/ieee802154/nl-mac.c 		page = 0;
page              397 net/ieee802154/nl-mac.c 						 page, duration);
page              335 net/ieee802154/nl802154.c 	unsigned long page;
page              341 net/ieee802154/nl802154.c 	for (page = 0; page <= IEEE802154_MAX_PAGE; page++) {
page              343 net/ieee802154/nl802154.c 				rdev->wpan_phy.supported.channels[page]))
page              968 net/ieee802154/nl802154.c 	u8 channel, page;
page              974 net/ieee802154/nl802154.c 	page = nla_get_u8(info->attrs[NL802154_ATTR_PAGE]);
page              978 net/ieee802154/nl802154.c 	if (page > IEEE802154_MAX_PAGE || channel > IEEE802154_MAX_CHANNEL ||
page              979 net/ieee802154/nl802154.c 	    !(rdev->wpan_phy.supported.channels[page] & BIT(channel)))
page              982 net/ieee802154/nl802154.c 	return rdev_set_channel(rdev, page, channel);
page               76 net/ieee802154/rdev-ops.h rdev_set_channel(struct cfg802154_registered_device *rdev, u8 page, u8 channel)
page               80 net/ieee802154/rdev-ops.h 	trace_802154_rdev_set_channel(&rdev->wpan_phy, page, channel);
page               81 net/ieee802154/rdev-ops.h 	ret = rdev->ops->set_channel(&rdev->wpan_phy, page, channel);
page              103 net/ieee802154/trace.h 	TP_PROTO(struct wpan_phy *wpan_phy, u8 page, u8 channel),
page              104 net/ieee802154/trace.h 	TP_ARGS(wpan_phy, page, channel),
page              107 net/ieee802154/trace.h 		__field(u8, page)
page              112 net/ieee802154/trace.h 		__entry->page = page;
page              116 net/ieee802154/trace.h 		  __entry->page, __entry->channel)
page              812 net/ipv4/af_inet.c ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
page              821 net/ipv4/af_inet.c 		return sk->sk_prot->sendpage(sk, page, offset, size, flags);
page              822 net/ipv4/af_inet.c 	return sock_no_sendpage(sock, page, offset, size, flags);
page              278 net/ipv4/esp4.c 	struct page *page;
page              314 net/ipv4/esp4.c 			page = pfrag->page;
page              315 net/ipv4/esp4.c 			get_page(page);
page              317 net/ipv4/esp4.c 			vaddr = kmap_atomic(page);
page              327 net/ipv4/esp4.c 			__skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
page              373 net/ipv4/esp4.c 	struct page *page;
page              431 net/ipv4/esp4.c 		page = pfrag->page;
page              432 net/ipv4/esp4.c 		get_page(page);
page              434 net/ipv4/esp4.c 		__skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
page              943 net/ipv4/ip_output.c csum_page(struct page *page, int offset, int copy)
page              947 net/ipv4/ip_output.c 	kaddr = kmap(page);
page              949 net/ipv4/ip_output.c 	kunmap(page);
page             1188 net/ipv4/ip_output.c 			if (!skb_can_coalesce(skb, i, pfrag->page,
page             1194 net/ipv4/ip_output.c 				__skb_fill_page_desc(skb, i, pfrag->page,
page             1197 net/ipv4/ip_output.c 				get_page(pfrag->page);
page             1201 net/ipv4/ip_output.c 				    page_address(pfrag->page) + pfrag->offset,
page             1322 net/ipv4/ip_output.c ssize_t	ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
page             1426 net/ipv4/ip_output.c 		if (skb_append_pagefrags(skb, page, offset, len)) {
page             1433 net/ipv4/ip_output.c 			csum = csum_page(page, offset, len);
page              964 net/ipv4/tcp.c ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
page              974 net/ipv4/tcp.c 	    WARN_ONCE(PageSlab(page), "page must not be a Slab one"))
page             1024 net/ipv4/tcp.c 		can_coalesce = skb_can_coalesce(skb, i, page, offset);
page             1035 net/ipv4/tcp.c 			get_page(page);
page             1036 net/ipv4/tcp.c 			skb_fill_page_desc(skb, i, page, offset, copy);
page             1106 net/ipv4/tcp.c int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
page             1110 net/ipv4/tcp.c 		return sock_no_sendpage_locked(sk, page, offset, size, flags);
page             1114 net/ipv4/tcp.c 	return do_tcp_sendpages(sk, page, offset, size, flags);
page             1118 net/ipv4/tcp.c int tcp_sendpage(struct sock *sk, struct page *page, int offset,
page             1124 net/ipv4/tcp.c 	ret = tcp_sendpage_locked(sk, page, offset, size, flags);
page             1330 net/ipv4/tcp.c 			if (!skb_can_coalesce(skb, i, pfrag->page,
page             1345 net/ipv4/tcp.c 						       pfrag->page,
page             1355 net/ipv4/tcp.c 				skb_fill_page_desc(skb, i, pfrag->page,
page             1357 net/ipv4/tcp.c 				page_ref_inc(pfrag->page);
page             2686 net/ipv4/tcp.c 	if (sk->sk_frag.page) {
page             2687 net/ipv4/tcp.c 		put_page(sk->sk_frag.page);
page             2688 net/ipv4/tcp.c 		sk->sk_frag.page = NULL;
page             3828 net/ipv4/tcp.c 		struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT);
page             3830 net/ipv4/tcp.c 		sg_set_page(&sg, page, skb_frag_size(f),
page               64 net/ipv4/tcp_bpf.c 			struct page *page;
page               69 net/ipv4/tcp_bpf.c 			page = sg_page(sge);
page               72 net/ipv4/tcp_bpf.c 			ret = copy_page_to_iter(page, sge->offset, copy, iter);
page               88 net/ipv4/tcp_bpf.c 						put_page(page);
page              222 net/ipv4/tcp_bpf.c 	struct page *page;
page              233 net/ipv4/tcp_bpf.c 		page = sg_page(sge);
page              241 net/ipv4/tcp_bpf.c 						     page, off, size, flags);
page              243 net/ipv4/tcp_bpf.c 			ret = do_tcp_sendpages(sk, page, off, size, flags);
page              261 net/ipv4/tcp_bpf.c 			put_page(page);
page              485 net/ipv4/tcp_bpf.c static int tcp_bpf_sendpage(struct sock *sk, struct page *page, int offset,
page              495 net/ipv4/tcp_bpf.c 		return tcp_sendpage(sk, page, offset, size, flags);
page              509 net/ipv4/tcp_bpf.c 	sk_msg_page_add(msg, page, size, offset);
page             1245 net/ipv4/udp.c int udp_sendpage(struct sock *sk, struct page *page, int offset,
page             1277 net/ipv4/udp.c 			     page, offset, size, flags);
page             1280 net/ipv4/udp.c 		return sock_no_sendpage(sk->sk_socket, page, offset,
page               28 net/ipv4/udp_impl.h int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
page              231 net/ipv6/esp6.c 	struct page *page;
page              259 net/ipv6/esp6.c 			page = pfrag->page;
page              260 net/ipv6/esp6.c 			get_page(page);
page              262 net/ipv6/esp6.c 			vaddr = kmap_atomic(page);
page              272 net/ipv6/esp6.c 			__skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
page              316 net/ipv6/esp6.c 	struct page *page;
page              372 net/ipv6/esp6.c 		page = pfrag->page;
page              373 net/ipv6/esp6.c 		get_page(page);
page              375 net/ipv6/esp6.c 		__skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
page             1609 net/ipv6/ip6_output.c 			if (!skb_can_coalesce(skb, i, pfrag->page,
page             1615 net/ipv6/ip6_output.c 				__skb_fill_page_desc(skb, i, pfrag->page,
page             1618 net/ipv6/ip6_output.c 				get_page(pfrag->page);
page             1622 net/ipv6/ip6_output.c 				    page_address(pfrag->page) + pfrag->offset,
page              757 net/kcm/kcmsock.c static ssize_t kcm_sendpage(struct socket *sock, struct page *page,
page              789 net/kcm/kcmsock.c 		if (skb_can_coalesce(skb, i, page, offset)) {
page              837 net/kcm/kcmsock.c 	get_page(page);
page              838 net/kcm/kcmsock.c 	skb_fill_page_desc(skb, i, page, offset, size);
page              964 net/kcm/kcmsock.c 		if (!skb_can_coalesce(skb, i, pfrag->page,
page              992 net/kcm/kcmsock.c 					       pfrag->page,
page             1002 net/kcm/kcmsock.c 			skb_fill_page_desc(skb, i, pfrag->page,
page             1004 net/kcm/kcmsock.c 			get_page(pfrag->page);
page              106 net/mac802154/cfg.c ieee802154_set_channel(struct wpan_phy *wpan_phy, u8 page, u8 channel)
page              113 net/mac802154/cfg.c 	if (wpan_phy->current_page == page &&
page              117 net/mac802154/cfg.c 	ret = drv_set_channel(local, page, channel);
page              119 net/mac802154/cfg.c 		wpan_phy->current_page = page;
page               59 net/mac802154/driver-ops.h drv_set_channel(struct ieee802154_local *local, u8 page, u8 channel)
page               65 net/mac802154/driver-ops.h 	trace_802154_drv_set_channel(local, page, channel);
page               66 net/mac802154/driver-ops.h 	ret = local->ops->set_channel(&local->hw, page, channel);
page              132 net/mac802154/ieee802154_i.h void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan);
page               26 net/mac802154/mac_cmd.c 				    u8 channel, u8 page,
page               40 net/mac802154/mac_cmd.c 	mac802154_dev_set_page_channel(dev, page, channel);
page               21 net/mac802154/mib.c void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan)
page               31 net/mac802154/mib.c 	res = drv_set_channel(local, page, chan);
page               36 net/mac802154/mib.c 		local->phy->current_page = page;
page               79 net/mac802154/trace.h 	TP_PROTO(struct ieee802154_local *local, u8 page, u8 channel),
page               80 net/mac802154/trace.h 	TP_ARGS(local, page, channel),
page               83 net/mac802154/trace.h 		__field(u8, page)
page               88 net/mac802154/trace.h 		__entry->page = page;
page               92 net/mac802154/trace.h 		  __entry->page, __entry->channel)
page              354 net/packet/af_packet.c static inline struct page * __pure pgv_to_page(void *addr)
page             2479 net/packet/af_packet.c 	struct page *page;
page             2535 net/packet/af_packet.c 		page = pgv_to_page(data);
page             2537 net/packet/af_packet.c 		flush_dcache_page(page);
page             2538 net/packet/af_packet.c 		get_page(page);
page             2539 net/packet/af_packet.c 		skb_fill_page_desc(skb, nr_frags, page, offset, len);
page             4489 net/packet/af_packet.c 			struct page *page;
page             4494 net/packet/af_packet.c 				page = pgv_to_page(kaddr);
page             4495 net/packet/af_packet.c 				err = vm_insert_page(vma, start, page);
page              244 net/rds/ib_rdma.c 			struct page *page = sg_page(&ibmr->sg[i]);
page              248 net/rds/ib_rdma.c 			WARN_ON(!page->mapping && irqs_disabled());
page              249 net/rds/ib_rdma.c 			set_page_dirty(page);
page              250 net/rds/ib_rdma.c 			put_page(page);
page               65 net/rds/info.c 	struct page **pages;
page              167 net/rds/info.c 	struct page **pages = NULL;
page              191 net/rds/info.c 	pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
page              389 net/rds/message.c 		struct page *pages;
page               41 net/rds/page.c 	struct page	*r_page;
page               73 net/rds/page.c 	struct page *page;
page               80 net/rds/page.c 		page = alloc_page(gfp);
page               81 net/rds/page.c 		if (!page) {
page               84 net/rds/page.c 			sg_set_page(scat, page, PAGE_SIZE, 0);
page              122 net/rds/page.c 		page = alloc_page(gfp);
page              127 net/rds/page.c 		if (!page) {
page              134 net/rds/page.c 			__free_page(page);
page              139 net/rds/page.c 		rem->r_page = page;
page              157 net/rds/rdma.c 			struct page **pages, int write)
page              179 net/rds/rdma.c 	struct page **pages = NULL;
page              216 net/rds/rdma.c 	pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
page              460 net/rds/rdma.c 		struct page *page = sg_page(&ro->op_sg[i]);
page              466 net/rds/rdma.c 			WARN_ON(!page->mapping && irqs_disabled());
page              467 net/rds/rdma.c 			set_page_dirty(page);
page              469 net/rds/rdma.c 		put_page(page);
page              479 net/rds/rdma.c 	struct page *page = sg_page(ao->op_sg);
page              484 net/rds/rdma.c 	set_page_dirty(page);
page              485 net/rds/rdma.c 	put_page(page);
page              580 net/rds/rdma.c 	struct page **pages = NULL;
page              614 net/rds/rdma.c 	pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
page              789 net/rds/rdma.c 	struct page *page = NULL;
page              845 net/rds/rdma.c 	ret = rds_pin_pages(args->local_addr, 1, &page, 1);
page              850 net/rds/rdma.c 	sg_set_page(rm->atomic.op_sg, page, 8, offset_in_page(args->local_addr));
page              873 net/rds/rdma.c 	if (page)
page              874 net/rds/rdma.c 		put_page(page);
page             1868 net/smc/af_smc.c static ssize_t smc_sendpage(struct socket *sock, struct page *page,
page             1883 net/smc/af_smc.c 		rc = kernel_sendpage(smc->clcsock, page, offset,
page             1886 net/smc/af_smc.c 		rc = sock_no_sendpage(sock, page, offset, size, flags);
page              150 net/smc/smc_core.h 	struct page		*pages;
page              128 net/smc/smc_rx.c 	put_page(buf->page);
page              125 net/socket.c   static ssize_t sock_sendpage(struct file *file, struct page *page,
page              923 net/socket.c   static ssize_t sock_sendpage(struct file *file, struct page *page,
page              935 net/socket.c   	return kernel_sendpage(sock, page, offset, size, flags);
page             3759 net/socket.c   int kernel_sendpage(struct socket *sock, struct page *page, int offset,
page             3763 net/socket.c   		return sock->ops->sendpage(sock, page, offset, size, flags);
page             3765 net/socket.c   	return sock_no_sendpage(sock, page, offset, size, flags);
page             3781 net/socket.c   int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset,
page             3787 net/socket.c   		return sock->ops->sendpage_locked(sk, page, offset, size,
page             3790 net/socket.c   	return sock_no_sendpage_locked(sk, page, offset, size, flags);
page             1802 net/sunrpc/auth_gss/auth_gss.c 				sizeof(struct page *),
page             1828 net/sunrpc/auth_gss/auth_gss.c 	struct page	**inpages;
page              457 net/sunrpc/auth_gss/gss_krb5_crypto.c 	struct page **pages;
page              471 net/sunrpc/auth_gss/gss_krb5_crypto.c 	struct page *in_page;
page              531 net/sunrpc/auth_gss/gss_krb5_crypto.c 		    int offset, struct page **pages)
page              678 net/sunrpc/auth_gss/gss_krb5_crypto.c 		   u32 offset, u8 *iv, struct page **pages, int encrypt)
page              684 net/sunrpc/auth_gss/gss_krb5_crypto.c 	struct page **save_pages;
page              734 net/sunrpc/auth_gss/gss_krb5_crypto.c 		     struct xdr_buf *buf, struct page **pages)
page              742 net/sunrpc/auth_gss/gss_krb5_crypto.c 	struct page **save_pages;
page              159 net/sunrpc/auth_gss/gss_krb5_wrap.c 		struct xdr_buf *buf, struct page **pages)
page              168 net/sunrpc/auth_gss/gss_krb5_wrap.c 	struct page		**tmp_pages;
page              447 net/sunrpc/auth_gss/gss_krb5_wrap.c 		     struct xdr_buf *buf, struct page **pages)
page              599 net/sunrpc/auth_gss/gss_krb5_wrap.c 		  struct xdr_buf *buf, struct page **pages)
page              432 net/sunrpc/auth_gss/gss_mech_switch.c 	 struct page	**inpages)
page              214 net/sunrpc/auth_gss/gss_rpc_upcall.c 	arg->pages = kcalloc(arg->npages, sizeof(struct page *), GFP_KERNEL);
page              787 net/sunrpc/auth_gss/gss_rpc_xdr.c 	struct page *scratch;
page              124 net/sunrpc/auth_gss/gss_rpc_xdr.h 	struct page **pages;	/* Array of contiguous pages */
page              137 net/sunrpc/auth_gss/gss_rpc_xdr.h 	struct page **pages;
page             1110 net/sunrpc/auth_gss/svcauth_gss.c 	in_token->pages = kcalloc(pages, sizeof(struct page *), GFP_KERNEL);
page             1714 net/sunrpc/auth_gss/svcauth_gss.c 	struct page **inpages = NULL;
page               69 net/sunrpc/backchannel_rqst.c 	struct page *page;
page               71 net/sunrpc/backchannel_rqst.c 	page = alloc_page(gfp_flags);
page               72 net/sunrpc/backchannel_rqst.c 	if (page == NULL)
page               74 net/sunrpc/backchannel_rqst.c 	xdr_buf_init(buf, page_address(page), PAGE_SIZE);
page              913 net/sunrpc/cache.c 	struct page *page;
page              920 net/sunrpc/cache.c 	page = find_or_create_page(mapping, 0, GFP_KERNEL);
page              921 net/sunrpc/cache.c 	if (!page)
page              924 net/sunrpc/cache.c 	kaddr = kmap(page);
page              926 net/sunrpc/cache.c 	kunmap(page);
page              927 net/sunrpc/cache.c 	unlock_page(page);
page              928 net/sunrpc/cache.c 	put_page(page);
page             1243 net/sunrpc/clnt.c void rpc_prepare_reply_pages(struct rpc_rqst *req, struct page **pages,
page               76 net/sunrpc/socklib.c 	struct page	**ppage = xdr->pages;
page               54 net/sunrpc/sunrpc.h 		    struct page *headpage, unsigned long headoffset,
page               55 net/sunrpc/sunrpc.h 		    struct page *tailpage, unsigned long tailoffset);
page              581 net/sunrpc/svc.c 		struct page *p = alloc_pages_node(node, GFP_KERNEL, 0);
page             1646 net/sunrpc/svc.c unsigned int svc_fill_write_vector(struct svc_rqst *rqstp, struct page **pages,
page              648 net/sunrpc/svc_xprt.c 			struct page *p = alloc_page(GFP_KERNEL);
page              181 net/sunrpc/svcsock.c 		    struct page *headpage, unsigned long headoffset,
page              182 net/sunrpc/svcsock.c 		    struct page *tailpage, unsigned long tailoffset)
page              186 net/sunrpc/svcsock.c 	struct page	**ppage = xdr->pages;
page             1005 net/sunrpc/svcsock.c static int copy_pages_to_kvecs(struct kvec *vec, struct page **pages, int len)
page              178 net/sunrpc/xdr.c 		 struct page **pages, unsigned int base, unsigned int len)
page              219 net/sunrpc/xdr.c _shift_data_right_pages(struct page **pages, size_t pgto_base,
page              222 net/sunrpc/xdr.c 	struct page **pgfrom, **pgto;
page              280 net/sunrpc/xdr.c _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
page              282 net/sunrpc/xdr.c 	struct page **pgto;
page              324 net/sunrpc/xdr.c _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
page              326 net/sunrpc/xdr.c 	struct page **pgfrom;
page              565 net/sunrpc/xdr.c 	void *page;
page              569 net/sunrpc/xdr.c 	page = page_address(*xdr->page_ptr);
page              570 net/sunrpc/xdr.c 	memcpy(xdr->scratch.iov_base, page, shift);
page              571 net/sunrpc/xdr.c 	memmove(page, page + shift, (void *)xdr->p - page);
page              757 net/sunrpc/xdr.c void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
page              886 net/sunrpc/xdr.c 			   struct page **pages, unsigned int len)
page             1341 net/sunrpc/xdr.c 	struct page **ppages = NULL;
page              230 net/sunrpc/xprtrdma/rpc_rdma.c 	struct page **ppages;
page              622 net/sunrpc/xprtrdma/rpc_rdma.c 	struct page *page, **ppages;
page              650 net/sunrpc/xprtrdma/rpc_rdma.c 		page = virt_to_page(xdr->tail[0].iov_base);
page              698 net/sunrpc/xprtrdma/rpc_rdma.c 		page = virt_to_page(xdr->tail[0].iov_base);
page              705 net/sunrpc/xprtrdma/rpc_rdma.c 			ib_dma_map_page(rdmab_device(rb), page, page_base, len,
page              943 net/sunrpc/xprtrdma/rpc_rdma.c 	struct page **ppages;
page              141 net/sunrpc/xprtrdma/svc_rdma_backchannel.c 	struct page *page;
page              149 net/sunrpc/xprtrdma/svc_rdma_backchannel.c 	page = alloc_page(RPCRDMA_DEF_GFP);
page              150 net/sunrpc/xprtrdma/svc_rdma_backchannel.c 	if (!page)
page              152 net/sunrpc/xprtrdma/svc_rdma_backchannel.c 	rqst->rq_buffer = page_address(page);
page              156 net/sunrpc/xprtrdma/svc_rdma_backchannel.c 		put_page(page);
page              373 net/sunrpc/xprtrdma/svc_rdma_rw.c 	struct page **page;
page              378 net/sunrpc/xprtrdma/svc_rdma_rw.c 	page = xdr->pages + page_no;
page              385 net/sunrpc/xprtrdma/svc_rdma_rw.c 		sg_set_page(sg, *page, sge_bytes, page_off);
page              391 net/sunrpc/xprtrdma/svc_rdma_rw.c 		page++;
page              487 net/sunrpc/xprtrdma/svc_rdma_sendto.c 				 struct page *page,
page              494 net/sunrpc/xprtrdma/svc_rdma_sendto.c 	dma_addr = ib_dma_map_page(dev, page, offset, len, DMA_TO_DEVICE);
page              504 net/sunrpc/xprtrdma/svc_rdma_sendto.c 	trace_svcrdma_dma_map_page(rdma, page);
page              604 net/sunrpc/xprtrdma/svc_rdma_sendto.c 		struct page **ppages;
page              648 net/sunrpc/xprtrdma/svc_rdma_sendto.c 	struct page **ppages;
page              295 net/sunrpc/xprtrdma/xprt_rdma.h 	struct page	*mr_page;	/* owning page, if any */
page             2606 net/sunrpc/xprtsock.c 	struct page *page;
page             2615 net/sunrpc/xprtsock.c 	page = alloc_page(GFP_KERNEL);
page             2616 net/sunrpc/xprtsock.c 	if (!page)
page             2619 net/sunrpc/xprtsock.c 	buf = page_address(page);
page             2651 net/sunrpc/xprtsock.c 	struct page *tailpage;
page              239 net/tls/tls_device.c 	if (skb_frag_page(frag) == pfrag->page &&
page              244 net/tls/tls_device.c 		__skb_frag_set_page(frag, pfrag->page);
page              248 net/tls/tls_device.c 		get_page(pfrag->page);
page              333 net/tls/tls_device.c 	__skb_frag_set_page(frag, pfrag->page);
page              337 net/tls/tls_device.c 	get_page(pfrag->page);
page              473 net/tls/tls_device.c 		rc = tls_device_copy_data(page_address(pfrag->page) +
page              547 net/tls/tls_device.c int tls_device_sendpage(struct sock *sk, struct page *page,
page              552 net/tls/tls_device.c 	char *kaddr = kmap(page);
page              572 net/tls/tls_device.c 	kunmap(page);
page              107 net/tls/tls_main.c 	struct page *p;
page             1135 net/tls/tls_sw.c static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
page             1201 net/tls/tls_sw.c 		sk_msg_page_add(msg_pl, page, copy, offset);
page             1251 net/tls/tls_sw.c int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
page             1259 net/tls/tls_sw.c 	return tls_sw_do_sendpage(sk, page, offset, size, flags);
page             1262 net/tls/tls_sw.c int tls_sw_sendpage(struct sock *sk, struct page *page,
page             1274 net/tls/tls_sw.c 	ret = tls_sw_do_sendpage(sk, page, offset, size, flags);
page             1331 net/tls/tls_sw.c 	struct page *pages[MAX_SKB_FRAGS];
page              655 net/unix/af_unix.c static ssize_t unix_stream_sendpage(struct socket *, struct page *, int offset,
page             1886 net/unix/af_unix.c static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
page             1963 net/unix/af_unix.c 	if (skb_append_pagefrags(skb, page, offset, size)) {
page              561 net/wireless/util.c __frame_add_frag(struct sk_buff *skb, struct page *page,
page              567 net/wireless/util.c 	get_page(page);
page              568 net/wireless/util.c 	page_offset = ptr - page_address(page);
page              569 net/wireless/util.c 	skb_add_rx_frag(skb, sh->nr_frags, page, page_offset, len, size);
page              578 net/wireless/util.c 	struct page *frag_page;
page              971 net/xdp/xsk.c  	struct page *qpg;
page               69 net/xfrm/xfrm_ipcomp.c 		struct page *page;
page               76 net/xfrm/xfrm_ipcomp.c 		page = alloc_page(GFP_ATOMIC);
page               79 net/xfrm/xfrm_ipcomp.c 		if (!page)
page               82 net/xfrm/xfrm_ipcomp.c 		__skb_frag_set_page(frag, page);
page              498 net/xfrm/xfrm_state.c 	if (x->xfrag.page)
page              499 net/xfrm/xfrm_state.c 		put_page(x->xfrag.page);
page               47 samples/configfs/configfs_sample.c static ssize_t childless_showme_show(struct config_item *item, char *page)
page               52 samples/configfs/configfs_sample.c 	pos = sprintf(page, "%d\n", childless->showme);
page               58 samples/configfs/configfs_sample.c static ssize_t childless_storeme_show(struct config_item *item, char *page)
page               60 samples/configfs/configfs_sample.c 	return sprintf(page, "%d\n", to_childless(item)->storeme);
page               64 samples/configfs/configfs_sample.c 		const char *page, size_t count)
page               68 samples/configfs/configfs_sample.c 	char *p = (char *) page;
page               82 samples/configfs/configfs_sample.c static ssize_t childless_description_show(struct config_item *item, char *page)
page               84 samples/configfs/configfs_sample.c 	return sprintf(page,
page              142 samples/configfs/configfs_sample.c static ssize_t simple_child_storeme_show(struct config_item *item, char *page)
page              144 samples/configfs/configfs_sample.c 	return sprintf(page, "%d\n", to_simple_child(item)->storeme);
page              148 samples/configfs/configfs_sample.c 		const char *page, size_t count)
page              152 samples/configfs/configfs_sample.c 	char *p = (char *) page;
page              217 samples/configfs/configfs_sample.c 		char *page)
page              219 samples/configfs/configfs_sample.c 	return sprintf(page,
page              296 samples/configfs/configfs_sample.c 		char *page)
page              298 samples/configfs/configfs_sample.c 	return sprintf(page,
page              153 samples/vfio-mdev/mbochs.c 	struct page **pages;
page              172 samples/vfio-mdev/mbochs.c 	struct page **pages;
page              203 samples/vfio-mdev/mbochs.c static struct page *__mbochs_get_page(struct mdev_state *mdev_state,
page              205 samples/vfio-mdev/mbochs.c static struct page *mbochs_get_page(struct mdev_state *mdev_state,
page              443 samples/vfio-mdev/mbochs.c 	struct page *pg;
page              541 samples/vfio-mdev/mbochs.c 				    sizeof(struct page *),
page              700 samples/vfio-mdev/mbochs.c static struct page *__mbochs_get_page(struct mdev_state *mdev_state,
page              716 samples/vfio-mdev/mbochs.c static struct page *mbochs_get_page(struct mdev_state *mdev_state,
page              719 samples/vfio-mdev/mbochs.c 	struct page *page;
page              725 samples/vfio-mdev/mbochs.c 	page = __mbochs_get_page(mdev_state, pgoff);
page              728 samples/vfio-mdev/mbochs.c 	return page;
page              757 samples/vfio-mdev/mbochs.c 	vmf->page = mbochs_get_page(mdev_state, page_offset);
page              758 samples/vfio-mdev/mbochs.c 	if (!vmf->page)
page              794 samples/vfio-mdev/mbochs.c 	vmf->page = dmabuf->pages[vmf->pgoff];
page              795 samples/vfio-mdev/mbochs.c 	get_page(vmf->page);
page              897 samples/vfio-mdev/mbochs.c 	struct page *page = dmabuf->pages[page_num];
page              899 samples/vfio-mdev/mbochs.c 	return kmap(page);
page              932 samples/vfio-mdev/mbochs.c 	dmabuf->pages = kcalloc(dmabuf->pagecount, sizeof(struct page *),
page               21 scripts/kconfig/lxdialog/textbox.c static char *page;
page               57 scripts/kconfig/lxdialog/textbox.c 	page = buf;	/* page is pointer to start of page to be displayed */
page              143 scripts/kconfig/lxdialog/textbox.c 				page = buf;
page              154 scripts/kconfig/lxdialog/textbox.c 			page = buf + strlen(buf);
page              253 scripts/kconfig/lxdialog/textbox.c 		while (s < page && (s = strchr(s, '\n'))) {
page              274 scripts/kconfig/lxdialog/textbox.c 		if (*page == '\0') {
page              280 scripts/kconfig/lxdialog/textbox.c 		if (page == buf) {
page              284 scripts/kconfig/lxdialog/textbox.c 		page--;
page              286 scripts/kconfig/lxdialog/textbox.c 			if (page == buf) {
page              290 scripts/kconfig/lxdialog/textbox.c 			page--;
page              291 scripts/kconfig/lxdialog/textbox.c 		} while (*page != '\n');
page              292 scripts/kconfig/lxdialog/textbox.c 		page++;
page              309 scripts/kconfig/lxdialog/textbox.c 		end = page;
page              311 scripts/kconfig/lxdialog/textbox.c 		update_text(buf, page - buf, end - buf, data);
page              362 scripts/kconfig/lxdialog/textbox.c 	while (*page != '\n') {
page              363 scripts/kconfig/lxdialog/textbox.c 		if (*page == '\0') {
page              367 scripts/kconfig/lxdialog/textbox.c 			line[i++] = *(page++);
page              372 scripts/kconfig/lxdialog/textbox.c 			page++;
page              378 scripts/kconfig/lxdialog/textbox.c 		page++;		/* move past '\n' */
page              392 scripts/kconfig/lxdialog/textbox.c 	percent = (page - buf) * 100 / strlen(buf);
page               27 security/keys/big_key.c 	struct page		*pages[];
page              166 security/keys/big_key.c 		      sizeof(struct page) * npg +
page              150 security/selinux/avc.c int avc_get_hash_stats(struct selinux_avc *avc, char *page)
page              174 security/selinux/avc.c 	return scnprintf(page, PAGE_SIZE, "entries: %d\nbuckets used: %d/%d\n"
page              185 security/selinux/include/avc.h int avc_get_hash_stats(struct selinux_avc *avc, char *page);
page              367 security/selinux/include/security.h extern struct page *selinux_kernel_status_page(struct selinux_state *state);
page              140 security/selinux/selinuxfs.c 	char *page = NULL;
page              151 security/selinux/selinuxfs.c 	page = memdup_user_nul(buf, count);
page              152 security/selinux/selinuxfs.c 	if (IS_ERR(page))
page              153 security/selinux/selinuxfs.c 		return PTR_ERR(page);
page              156 security/selinux/selinuxfs.c 	if (sscanf(page, "%d", &new_value) != 1)
page              186 security/selinux/selinuxfs.c 	kfree(page);
page              223 security/selinux/selinuxfs.c 	struct page    *status = selinux_kernel_status_page(fsi->state);
page              236 security/selinux/selinuxfs.c 	struct page    *status = filp->private_data;
page              248 security/selinux/selinuxfs.c 	struct page    *status = filp->private_data;
page              280 security/selinux/selinuxfs.c 	char *page;
page              292 security/selinux/selinuxfs.c 	page = memdup_user_nul(buf, count);
page              293 security/selinux/selinuxfs.c 	if (IS_ERR(page))
page              294 security/selinux/selinuxfs.c 		return PTR_ERR(page);
page              297 security/selinux/selinuxfs.c 	if (sscanf(page, "%d", &new_value) != 1)
page              315 security/selinux/selinuxfs.c 	kfree(page);
page              459 security/selinux/selinuxfs.c 	struct page *page;
page              468 security/selinux/selinuxfs.c 	page = vmalloc_to_page(plm->data + offset);
page              469 security/selinux/selinuxfs.c 	get_page(page);
page              471 security/selinux/selinuxfs.c 	vmf->page = page;
page              643 security/selinux/selinuxfs.c 	char *page;
page              661 security/selinux/selinuxfs.c 	page = memdup_user_nul(buf, count);
page              662 security/selinux/selinuxfs.c 	if (IS_ERR(page))
page              663 security/selinux/selinuxfs.c 		return PTR_ERR(page);
page              666 security/selinux/selinuxfs.c 	if (sscanf(page, "%u", &new_value) != 1)
page              672 security/selinux/selinuxfs.c 	kfree(page);
page             1172 security/selinux/selinuxfs.c 	char *page = NULL;
page             1187 security/selinux/selinuxfs.c 	page = (char *)get_zeroed_page(GFP_KERNEL);
page             1188 security/selinux/selinuxfs.c 	if (!page)
page             1196 security/selinux/selinuxfs.c 	length = scnprintf(page, PAGE_SIZE, "%d %d", cur_enforcing,
page             1199 security/selinux/selinuxfs.c 	ret = simple_read_from_buffer(buf, count, ppos, page, length);
page             1201 security/selinux/selinuxfs.c 	free_page((unsigned long)page);
page             1213 security/selinux/selinuxfs.c 	char *page = NULL;
page             1226 security/selinux/selinuxfs.c 	page = memdup_user_nul(buf, count);
page             1227 security/selinux/selinuxfs.c 	if (IS_ERR(page))
page             1228 security/selinux/selinuxfs.c 		return PTR_ERR(page);
page             1245 security/selinux/selinuxfs.c 	if (sscanf(page, "%d", &new_value) != 1)
page             1256 security/selinux/selinuxfs.c 	kfree(page);
page             1271 security/selinux/selinuxfs.c 	char *page = NULL;
page             1282 security/selinux/selinuxfs.c 	page = memdup_user_nul(buf, count);
page             1283 security/selinux/selinuxfs.c 	if (IS_ERR(page))
page             1284 security/selinux/selinuxfs.c 		return PTR_ERR(page);
page             1296 security/selinux/selinuxfs.c 	if (sscanf(page, "%d", &new_value) != 1)
page             1309 security/selinux/selinuxfs.c 	kfree(page);
page             1334 security/selinux/selinuxfs.c 	char **names = NULL, *page;
page             1351 security/selinux/selinuxfs.c 	page = (char *)get_zeroed_page(GFP_KERNEL);
page             1352 security/selinux/selinuxfs.c 	if (!page)
page             1373 security/selinux/selinuxfs.c 		len = snprintf(page, PAGE_SIZE, "/%s/%s", BOOL_DIR_NAME, names[i]);
page             1381 security/selinux/selinuxfs.c 		ret = security_genfs_sid(fsi->state, "selinuxfs", page,
page             1385 security/selinux/selinuxfs.c 					   page);
page             1399 security/selinux/selinuxfs.c 	free_page((unsigned long)page);
page             1402 security/selinux/selinuxfs.c 	free_page((unsigned long)page);
page             1435 security/selinux/selinuxfs.c 	char *page;
page             1453 security/selinux/selinuxfs.c 	page = memdup_user_nul(buf, count);
page             1454 security/selinux/selinuxfs.c 	if (IS_ERR(page))
page             1455 security/selinux/selinuxfs.c 		return PTR_ERR(page);
page             1458 security/selinux/selinuxfs.c 	if (sscanf(page, "%u", &new_value) != 1)
page             1465 security/selinux/selinuxfs.c 	kfree(page);
page             1474 security/selinux/selinuxfs.c 	char *page;
page             1477 security/selinux/selinuxfs.c 	page = (char *)__get_free_page(GFP_KERNEL);
page             1478 security/selinux/selinuxfs.c 	if (!page)
page             1481 security/selinux/selinuxfs.c 	length = avc_get_hash_stats(state->avc, page);
page             1483 security/selinux/selinuxfs.c 		length = simple_read_from_buffer(buf, count, ppos, page, length);
page             1484 security/selinux/selinuxfs.c 	free_page((unsigned long)page);
page               32 security/selinux/ss/services.h 	struct page *status_page;
page               42 security/selinux/ss/status.c struct page *selinux_kernel_status_page(struct selinux_state *state)
page               45 security/selinux/ss/status.c 	struct page		       *result = NULL;
page              578 security/tomoyo/common.h 	struct page *page;    /* Previously dumped page. */
page              901 security/tomoyo/domain.c 	struct page *page;
page              918 security/tomoyo/domain.c 				FOLL_FORCE, &page, NULL, NULL) <= 0)
page              921 security/tomoyo/domain.c 	page = bprm->page[pos / PAGE_SIZE];
page              923 security/tomoyo/domain.c 	if (page != dump->page) {
page              930 security/tomoyo/domain.c 		char *kaddr = kmap_atomic(page);
page              932 security/tomoyo/domain.c 		dump->page = page;
page              939 security/tomoyo/domain.c 	put_page(page);
page              264 sound/core/pcm_memory.c struct page *snd_pcm_sgbuf_ops_page(struct snd_pcm_substream *substream, unsigned long offset)
page              408 sound/core/pcm_memory.c struct page *snd_pcm_lib_get_vmalloc_page(struct snd_pcm_substream *substream,
page             3247 sound/core/pcm_native.c 	vmf->page = virt_to_page(runtime->status);
page             3248 sound/core/pcm_native.c 	get_page(vmf->page);
page             3283 sound/core/pcm_native.c 	vmf->page = virt_to_page(runtime->control);
page             3284 sound/core/pcm_native.c 	get_page(vmf->page);
page             3354 sound/core/pcm_native.c static inline struct page *
page             3369 sound/core/pcm_native.c 	struct page * page;
page             3379 sound/core/pcm_native.c 	if (substream->ops->page)
page             3380 sound/core/pcm_native.c 		page = substream->ops->page(substream, offset);
page             3382 sound/core/pcm_native.c 		page = snd_pcm_default_page_ops(substream, offset);
page             3383 sound/core/pcm_native.c 	if (!page)
page             3385 sound/core/pcm_native.c 	get_page(page);
page             3386 sound/core/pcm_native.c 	vmf->page = page;
page             3426 sound/core/pcm_native.c 	if (IS_ENABLED(CONFIG_HAS_DMA) && !substream->ops->page &&
page               63 sound/core/sgbuf.c 	struct page **pgtable;
page              768 sound/drivers/aloop.c 	.page =		snd_pcm_lib_get_vmalloc_page,
page              649 sound/drivers/dummy.c static struct page *dummy_pcm_page(struct snd_pcm_substream *substream,
page              678 sound/drivers/dummy.c 	.page =		dummy_pcm_page,
page              870 sound/drivers/vx/vx_pcm.c 	.page =		snd_pcm_lib_get_vmalloc_page,
page             1091 sound/drivers/vx/vx_pcm.c 	.page =		snd_pcm_lib_get_vmalloc_page,
page              328 sound/firewire/bebob/bebob_pcm.c 		.page		= snd_pcm_lib_get_vmalloc_page,
page              340 sound/firewire/bebob/bebob_pcm.c 		.page		= snd_pcm_lib_get_vmalloc_page,
page              382 sound/firewire/dice/dice-pcm.c 		.page      = snd_pcm_lib_get_vmalloc_page,
page              394 sound/firewire/dice/dice-pcm.c 		.page      = snd_pcm_lib_get_vmalloc_page,
page              307 sound/firewire/digi00x/digi00x-pcm.c 		.page		= snd_pcm_lib_get_vmalloc_page,
page              319 sound/firewire/digi00x/digi00x-pcm.c 		.page		= snd_pcm_lib_get_vmalloc_page,
page              351 sound/firewire/fireface/ff-pcm.c 		.page		= snd_pcm_lib_get_vmalloc_page,
page              363 sound/firewire/fireface/ff-pcm.c 		.page		= snd_pcm_lib_get_vmalloc_page,
page              356 sound/firewire/fireworks/fireworks_pcm.c 		.page		= snd_pcm_lib_get_vmalloc_page,
page              368 sound/firewire/fireworks/fireworks_pcm.c 		.page		= snd_pcm_lib_get_vmalloc_page,
page              456 sound/firewire/isight.c 		.page      = snd_pcm_lib_get_vmalloc_page,
page              333 sound/firewire/motu/motu-pcm.c 		.page      = snd_pcm_lib_get_vmalloc_page,
page              345 sound/firewire/motu/motu-pcm.c 		.page      = snd_pcm_lib_get_vmalloc_page,
page              399 sound/firewire/oxfw/oxfw-pcm.c 		.page      = snd_pcm_lib_get_vmalloc_page,
page              411 sound/firewire/oxfw/oxfw-pcm.c 		.page      = snd_pcm_lib_get_vmalloc_page,
page              239 sound/firewire/tascam/tascam-pcm.c 		.page		= snd_pcm_lib_get_vmalloc_page,
page              251 sound/firewire/tascam/tascam-pcm.c 		.page		= snd_pcm_lib_get_vmalloc_page,
page               61 sound/isa/wavefront/wavefront_fx.c 		     int page,
page               66 sound/isa/wavefront/wavefront_fx.c 	if (page < 0 || page > 7) {
page               81 sound/isa/wavefront/wavefront_fx.c 		outb (page, dev->fx_dsp_page);
page               87 sound/isa/wavefront/wavefront_fx.c 			page, addr, data[0]);
page               93 sound/isa/wavefront/wavefront_fx.c 		outb (page, dev->fx_dsp_page);
page              107 sound/isa/wavefront/wavefront_fx.c 				    page, addr, (unsigned long) data, cnt);
page              224 sound/mips/hal2.h 	u32 page;		/* DOC Page register */
page              673 sound/mips/sgio2audio.c 	.page =        snd_pcm_lib_get_vmalloc_page,
page              685 sound/mips/sgio2audio.c 	.page =        snd_pcm_lib_get_vmalloc_page,
page              697 sound/mips/sgio2audio.c 	.page =        snd_pcm_lib_get_vmalloc_page,
page              503 sound/pci/ac97/ac97_codec.c 		unsigned short page = (kcontrol->private_value >> 26) & 0x0f;
page              506 sound/pci/ac97/ac97_codec.c 		snd_ac97_update_bits(ac97, AC97_INT_PAGING, AC97_PAGE_MASK, page);
page               53 sound/pci/ac97/ac97_patch.c static int ac97_update_bits_page(struct snd_ac97 *ac97, unsigned short reg, unsigned short mask, unsigned short value, unsigned short page)
page               60 sound/pci/ac97/ac97_patch.c 	snd_ac97_update_bits(ac97, AC97_INT_PAGING, AC97_PAGE_MASK, page);
page               13 sound/pci/ac97/ac97_patch.h #define AC97_PAGE_SINGLE_VALUE(reg,shift,mask,invert,page) \
page               14 sound/pci/ac97/ac97_patch.h 	(AC97_SINGLE_VALUE(reg,shift,mask,invert) | (1<<25) | ((page) << 26))
page               20 sound/pci/ac97/ac97_patch.h #define AC97_PAGE_SINGLE(xname, reg, shift, mask, invert, page)		\
page               24 sound/pci/ac97/ac97_patch.h   .private_value =  AC97_PAGE_SINGLE_VALUE(reg, shift, mask, invert, page) }
page             1167 sound/pci/au88x0/au88x0_core.c 	int page, p, pp, delta, i;
page             1169 sound/pci/au88x0/au88x0_core.c 	page =
page             1173 sound/pci/au88x0/au88x0_core.c 		delta = (page - dma->period_real) & 3;
page             1175 sound/pci/au88x0/au88x0_core.c 		delta = (page - dma->period_real);
page             1204 sound/pci/au88x0/au88x0_core.c 	dma->period_real = page;
page             1248 sound/pci/au88x0/au88x0_core.c 	int temp, page, delta;
page             1251 sound/pci/au88x0/au88x0_core.c 	page = (temp & ADB_SUBBUF_MASK) >> ADB_SUBBUF_SHIFT;
page             1253 sound/pci/au88x0/au88x0_core.c 		delta = (page - dma->period_real) & 3;
page             1255 sound/pci/au88x0/au88x0_core.c 		delta = (page - dma->period_real);
page             1438 sound/pci/au88x0/au88x0_core.c 	int page, p, pp, delta, i;
page             1440 sound/pci/au88x0/au88x0_core.c 	page =
page             1444 sound/pci/au88x0/au88x0_core.c 		delta = (page - dma->period_real) & 3;
page             1446 sound/pci/au88x0/au88x0_core.c 		delta = (page - dma->period_real);
page             1477 sound/pci/au88x0/au88x0_core.c 	dma->period_real = page;
page              439 sound/pci/au88x0/au88x0_pcm.c 	.page = snd_pcm_sgbuf_ops_page,
page              548 sound/pci/bt87x.c 	.page = snd_pcm_sgbuf_ops_page,
page              382 sound/pci/ctxfi/ctpcm.c 	.page		= snd_pcm_sgbuf_ops_page,
page              395 sound/pci/ctxfi/ctpcm.c 	.page		= snd_pcm_sgbuf_ops_page,
page              519 sound/pci/echoaudio/echoaudio.c 	int err, per, rest, page, edge, offs;
page              563 sound/pci/echoaudio/echoaudio.c 	for (offs = page = per = 0; offs < params_buffer_bytes(hw_params);
page              584 sound/pci/echoaudio/echoaudio.c 				page++;
page              827 sound/pci/echoaudio/echoaudio.c 	.page = snd_pcm_sgbuf_ops_page,
page              838 sound/pci/echoaudio/echoaudio.c 	.page = snd_pcm_sgbuf_ops_page,
page              851 sound/pci/echoaudio/echoaudio.c 	.page = snd_pcm_sgbuf_ops_page,
page              863 sound/pci/echoaudio/echoaudio.c 	.page = snd_pcm_sgbuf_ops_page,
page             1369 sound/pci/emu10k1/emupcm.c 	.page =			snd_pcm_sgbuf_ops_page,
page             1393 sound/pci/emu10k1/emupcm.c 	.page =			snd_pcm_sgbuf_ops_page,
page               21 sound/pci/emu10k1/memory.c #define __set_ptb_entry(emu,page,addr) \
page               22 sound/pci/emu10k1/memory.c 	(((__le32 *)(emu)->ptb_pages.area)[page] = \
page               23 sound/pci/emu10k1/memory.c 	 cpu_to_le32(((addr) << (emu->address_mode)) | (page)))
page               24 sound/pci/emu10k1/memory.c #define __get_ptb_entry(emu, page) \
page               25 sound/pci/emu10k1/memory.c 	(le32_to_cpu(((__le32 *)(emu)->ptb_pages.area)[page]))
page               33 sound/pci/emu10k1/memory.c #define aligned_page_offset(page)	((page) << PAGE_SHIFT)
page               37 sound/pci/emu10k1/memory.c #define set_ptb_entry(emu,page,addr)	__set_ptb_entry(emu,page,addr)
page               39 sound/pci/emu10k1/memory.c #define set_silent_ptb(emu,page)	__set_ptb_entry(emu,page,emu->silent_page.addr)
page               42 sound/pci/emu10k1/memory.c static inline void set_ptb_entry(struct snd_emu10k1 *emu, int page, dma_addr_t addr)
page               45 sound/pci/emu10k1/memory.c 	page *= UNIT_PAGES;
page               46 sound/pci/emu10k1/memory.c 	for (i = 0; i < UNIT_PAGES; i++, page++) {
page               47 sound/pci/emu10k1/memory.c 		__set_ptb_entry(emu, page, addr);
page               48 sound/pci/emu10k1/memory.c 		dev_dbg(emu->card->dev, "mapped page %d to entry %.8x\n", page,
page               49 sound/pci/emu10k1/memory.c 			(unsigned int)__get_ptb_entry(emu, page));
page               53 sound/pci/emu10k1/memory.c static inline void set_silent_ptb(struct snd_emu10k1 *emu, int page)
page               56 sound/pci/emu10k1/memory.c 	page *= UNIT_PAGES;
page               57 sound/pci/emu10k1/memory.c 	for (i = 0; i < UNIT_PAGES; i++, page++) {
page               59 sound/pci/emu10k1/memory.c 		__set_ptb_entry(emu, page, emu->silent_page.addr);
page               61 sound/pci/emu10k1/memory.c 			page, (unsigned int)__get_ptb_entry(emu, page));
page               97 sound/pci/emu10k1/memory.c 	int page = 1, found_page = -ENOMEM;
page              107 sound/pci/emu10k1/memory.c 		size = blk->mapped_page - page;
page              110 sound/pci/emu10k1/memory.c 			return page;
page              116 sound/pci/emu10k1/memory.c 			found_page = page;
page              118 sound/pci/emu10k1/memory.c 		page = blk->mapped_page + blk->pages;
page              120 sound/pci/emu10k1/memory.c 	size = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0) - page;
page              123 sound/pci/emu10k1/memory.c 		return page;
page              136 sound/pci/emu10k1/memory.c 	int page, pg;
page              139 sound/pci/emu10k1/memory.c 	page = search_empty_map_area(emu, blk->pages, &next);
page              140 sound/pci/emu10k1/memory.c 	if (page < 0) /* not found */
page              141 sound/pci/emu10k1/memory.c 		return page;
page              142 sound/pci/emu10k1/memory.c 	if (page == 0) {
page              150 sound/pci/emu10k1/memory.c 	blk->mapped_page = page;
page              153 sound/pci/emu10k1/memory.c 		set_ptb_entry(emu, page, emu->page_addr_table[pg]);
page              154 sound/pci/emu10k1/memory.c 		page++;
page              206 sound/pci/emu10k1/memory.c 	int page, psize;
page              209 sound/pci/emu10k1/memory.c 	page = 0;
page              212 sound/pci/emu10k1/memory.c 		if (page + psize <= blk->first_page)
page              214 sound/pci/emu10k1/memory.c 		page = blk->last_page + 1;
page              216 sound/pci/emu10k1/memory.c 	if (page + psize > emu->max_cache_pages)
page              224 sound/pci/emu10k1/memory.c 	blk->mem.offset = aligned_page_offset(page); /* set aligned offset */
page              302 sound/pci/emu10k1/memory.c 	int page, err, idx;
page              325 sound/pci/emu10k1/memory.c 	for (page = blk->first_page; page <= blk->last_page; page++, idx++) {
page              338 sound/pci/emu10k1/memory.c 		emu->page_addr_table[page] = addr;
page              339 sound/pci/emu10k1/memory.c 		emu->page_ptr_table[page] = NULL;
page              477 sound/pci/emu10k1/memory.c 	int page;
page              482 sound/pci/emu10k1/memory.c 	for (page = first_page; page <= last_page; page++) {
page              483 sound/pci/emu10k1/memory.c 		if (emu->page_ptr_table[page] == NULL)
page              485 sound/pci/emu10k1/memory.c 		dmab.area = emu->page_ptr_table[page];
page              486 sound/pci/emu10k1/memory.c 		dmab.addr = emu->page_addr_table[page];
page              497 sound/pci/emu10k1/memory.c 		emu->page_addr_table[page] = 0;
page              498 sound/pci/emu10k1/memory.c 		emu->page_ptr_table[page] = NULL;
page              507 sound/pci/emu10k1/memory.c 	int page, first_page, last_page;
page              513 sound/pci/emu10k1/memory.c 	for (page = first_page; page <= last_page; page++) {
page              521 sound/pci/emu10k1/memory.c 		emu->page_addr_table[page] = dmab.addr;
page              522 sound/pci/emu10k1/memory.c 		emu->page_ptr_table[page] = dmab.area;
page              528 sound/pci/emu10k1/memory.c 	last_page = page - 1;
page              547 sound/pci/emu10k1/memory.c static inline void *offset_ptr(struct snd_emu10k1 *emu, int page, int offset)
page              550 sound/pci/emu10k1/memory.c 	if (snd_BUG_ON(page < 0 || page >= emu->max_cache_pages))
page              552 sound/pci/emu10k1/memory.c 	ptr = emu->page_ptr_table[page];
page              555 sound/pci/emu10k1/memory.c 			"access to NULL ptr: page = %d\n", page);
page              568 sound/pci/emu10k1/memory.c 	int page, nextofs, end_offset, temp, temp1;
page              574 sound/pci/emu10k1/memory.c 	page = get_aligned_page(offset);
page              576 sound/pci/emu10k1/memory.c 		nextofs = aligned_page_offset(page + 1);
page              581 sound/pci/emu10k1/memory.c 		ptr = offset_ptr(emu, page + p->first_page, offset);
page              585 sound/pci/emu10k1/memory.c 		page++;
page              598 sound/pci/emu10k1/memory.c 	int page, nextofs, end_offset, temp, temp1;
page              604 sound/pci/emu10k1/memory.c 	page = get_aligned_page(offset);
page              606 sound/pci/emu10k1/memory.c 		nextofs = aligned_page_offset(page + 1);
page              611 sound/pci/emu10k1/memory.c 		ptr = offset_ptr(emu, page + p->first_page, offset);
page              616 sound/pci/emu10k1/memory.c 		page++;
page              704 sound/pci/hda/hda_controller.c 	.page = snd_pcm_sgbuf_ops_page,
page              585 sound/pci/lola/lola_pcm.c 	.page = snd_pcm_sgbuf_ops_page,
page             1664 sound/pci/riptide/riptide.c 	.page = snd_pcm_sgbuf_ops_page,
page             1675 sound/pci/riptide/riptide.c 	.page = snd_pcm_sgbuf_ops_page,
page             6371 sound/pci/rme9652/hdspm.c 	.page = snd_pcm_sgbuf_ops_page,
page             2079 sound/pci/trident/trident_main.c 	.page =		snd_pcm_sgbuf_ops_page,
page             2124 sound/pci/trident/trident_main.c 	.page =		snd_pcm_sgbuf_ops_page,
page               22 sound/pci/trident/trident_memory.c #define __set_tlb_bus(trident,page,ptr,addr) \
page               23 sound/pci/trident/trident_memory.c 	do { (trident)->tlb.entries[page] = cpu_to_le32((addr) & ~(SNDRV_TRIDENT_PAGE_SIZE-1)); \
page               24 sound/pci/trident/trident_memory.c 	     (trident)->tlb.shadow_entries[page] = (ptr); } while (0)
page               25 sound/pci/trident/trident_memory.c #define __tlb_to_ptr(trident,page) \
page               26 sound/pci/trident/trident_memory.c 	(void*)((trident)->tlb.shadow_entries[page])
page               27 sound/pci/trident/trident_memory.c #define __tlb_to_addr(trident,page) \
page               28 sound/pci/trident/trident_memory.c 	(dma_addr_t)le32_to_cpu((trident->tlb.entries[page]) & ~(SNDRV_TRIDENT_PAGE_SIZE - 1))
page               35 sound/pci/trident/trident_memory.c #define set_tlb_bus(trident,page,ptr,addr) __set_tlb_bus(trident,page,ptr,addr)
page               37 sound/pci/trident/trident_memory.c #define set_silent_tlb(trident,page)	__set_tlb_bus(trident, page, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr)
page               41 sound/pci/trident/trident_memory.c #define aligned_page_offset(page)	((page) << 12)
page               43 sound/pci/trident/trident_memory.c #define page_to_ptr(trident,page)	__tlb_to_ptr(trident, page)
page               45 sound/pci/trident/trident_memory.c #define page_to_addr(trident,page)	__tlb_to_addr(trident, page)
page               52 sound/pci/trident/trident_memory.c #define aligned_page_offset(page)	((page) << 13)
page               53 sound/pci/trident/trident_memory.c #define page_to_ptr(trident,page)	__tlb_to_ptr(trident, (page) << 1)
page               54 sound/pci/trident/trident_memory.c #define page_to_addr(trident,page)	__tlb_to_addr(trident, (page) << 1)
page               57 sound/pci/trident/trident_memory.c static inline void set_tlb_bus(struct snd_trident *trident, int page,
page               60 sound/pci/trident/trident_memory.c 	page <<= 1;
page               61 sound/pci/trident/trident_memory.c 	__set_tlb_bus(trident, page, ptr, addr);
page               62 sound/pci/trident/trident_memory.c 	__set_tlb_bus(trident, page+1, ptr + SNDRV_TRIDENT_PAGE_SIZE, addr + SNDRV_TRIDENT_PAGE_SIZE);
page               64 sound/pci/trident/trident_memory.c static inline void set_silent_tlb(struct snd_trident *trident, int page)
page               66 sound/pci/trident/trident_memory.c 	page <<= 1;
page               67 sound/pci/trident/trident_memory.c 	__set_tlb_bus(trident, page, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr);
page               68 sound/pci/trident/trident_memory.c 	__set_tlb_bus(trident, page+1, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr);
page               82 sound/pci/trident/trident_memory.c #define aligned_page_offset(page)	((page) * ALIGN_PAGE_SIZE)
page               83 sound/pci/trident/trident_memory.c #define page_to_ptr(trident,page)	__tlb_to_ptr(trident, (page) * UNIT_PAGES)
page               84 sound/pci/trident/trident_memory.c #define page_to_addr(trident,page)	__tlb_to_addr(trident, (page) * UNIT_PAGES)
page               87 sound/pci/trident/trident_memory.c static inline void set_tlb_bus(struct snd_trident *trident, int page,
page               91 sound/pci/trident/trident_memory.c 	page *= UNIT_PAGES;
page               92 sound/pci/trident/trident_memory.c 	for (i = 0; i < UNIT_PAGES; i++, page++) {
page               93 sound/pci/trident/trident_memory.c 		__set_tlb_bus(trident, page, ptr, addr);
page               98 sound/pci/trident/trident_memory.c static inline void set_silent_tlb(struct snd_trident *trident, int page)
page              101 sound/pci/trident/trident_memory.c 	page *= UNIT_PAGES;
page              102 sound/pci/trident/trident_memory.c 	for (i = 0; i < UNIT_PAGES; i++, page++)
page              103 sound/pci/trident/trident_memory.c 		__set_tlb_bus(trident, page, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr);
page              128 sound/pci/trident/trident_memory.c 	int page, psize;
page              132 sound/pci/trident/trident_memory.c 	page = 0;
page              135 sound/pci/trident/trident_memory.c 		if (page + psize <= firstpg(blk))
page              137 sound/pci/trident/trident_memory.c 		page = lastpg(blk) + 1;
page              139 sound/pci/trident/trident_memory.c 	if (page + psize > MAX_ALIGN_PAGES)
page              147 sound/pci/trident/trident_memory.c 	blk->offset = aligned_page_offset(page); /* set aligned offset */
page              148 sound/pci/trident/trident_memory.c 	firstpg(blk) = page;
page              149 sound/pci/trident/trident_memory.c 	lastpg(blk) = page + psize - 1;
page              180 sound/pci/trident/trident_memory.c 	int idx, page;
page              201 sound/pci/trident/trident_memory.c 	for (page = firstpg(blk); page <= lastpg(blk); page++, idx++) {
page              211 sound/pci/trident/trident_memory.c 		set_tlb_bus(trident, page, ptr, addr);
page              226 sound/pci/trident/trident_memory.c 	int page;
page              249 sound/pci/trident/trident_memory.c 	for (page = firstpg(blk); page <= lastpg(blk); page++,
page              256 sound/pci/trident/trident_memory.c 		set_tlb_bus(trident, page, ptr, addr);
page              285 sound/pci/trident/trident_memory.c 	int page;
page              293 sound/pci/trident/trident_memory.c 	for (page = firstpg(blk); page <= lastpg(blk); page++)
page              294 sound/pci/trident/trident_memory.c 		set_silent_tlb(trident, page);
page             1366 sound/pci/via82xx.c 	.page =		snd_pcm_sgbuf_ops_page,
page             1379 sound/pci/via82xx.c 	.page =		snd_pcm_sgbuf_ops_page,
page             1392 sound/pci/via82xx.c 	.page =		snd_pcm_sgbuf_ops_page,
page             1405 sound/pci/via82xx.c 	.page =		snd_pcm_sgbuf_ops_page,
page             1418 sound/pci/via82xx.c 	.page =		snd_pcm_sgbuf_ops_page,
page              804 sound/pci/via82xx_modem.c 	.page =		snd_pcm_sgbuf_ops_page,
page              817 sound/pci/via82xx_modem.c 	.page =		snd_pcm_sgbuf_ops_page,
page              265 sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c 	.page =		snd_pcm_lib_get_vmalloc_page,
page              270 sound/soc/codecs/rt5514-spi.c 	.page		= snd_pcm_lib_get_vmalloc_page,
page               13 sound/soc/codecs/tlv320aic26.h #define AIC26_PAGE_ADDR(page, offset)	((page << 11) | offset << 5)
page               42 sound/soc/codecs/tlv320aic31xx.h #define AIC31XX_REG(page, reg)	((page * 128) + reg)
page               20 sound/soc/codecs/tlv320aic32x4.h #define AIC32X4_REG(page, reg)	((page * 128) + reg)
page               11 sound/soc/codecs/tscs454.h #define VIRT_PAGE_BASE(page) (VIRT_BASE + (PAGE_LEN * page))
page               12 sound/soc/codecs/tscs454.h #define VIRT_ADDR(page, address) (VIRT_PAGE_BASE(page) + address)
page               13 sound/soc/codecs/tscs454.h #define ADDR(page, virt_address) (virt_address - VIRT_PAGE_BASE(page))
page              873 sound/soc/intel/haswell/sst-haswell-pcm.c 	.page		= snd_pcm_sgbuf_ops_page,
page             1287 sound/soc/intel/skylake/skl-pcm.c 	.page = snd_pcm_sgbuf_ops_page,
page              487 sound/soc/soc-component.c struct page *snd_soc_pcm_component_page(struct snd_pcm_substream *substream,
page              493 sound/soc/soc-component.c 	struct page *page;
page              500 sound/soc/soc-component.c 		    component->driver->ops->page) {
page              501 sound/soc/soc-component.c 			page = component->driver->ops->page(substream, offset);
page              502 sound/soc/soc-component.c 			if (page)
page              503 sound/soc/soc-component.c 				return page;
page             2997 sound/soc/soc-pcm.c 		if (ops->page)
page             2998 sound/soc/soc-pcm.c 			rtd->ops.page		= snd_soc_pcm_component_page;
page              550 sound/soc/sof/pcm.c 	.page		= snd_pcm_sgbuf_ops_page,
page              563 sound/usb/6fire/pcm.c 	.page = snd_pcm_lib_get_vmalloc_page,
page              337 sound/usb/caiaq/audio.c 	.page =		snd_pcm_lib_get_vmalloc_page,
page              521 sound/usb/hiface/pcm.c 	.page = snd_pcm_lib_get_vmalloc_page,
page              892 sound/usb/misc/ua101.c 	.page = snd_pcm_lib_get_vmalloc_page,
page              904 sound/usb/misc/ua101.c 	.page = snd_pcm_lib_get_vmalloc_page,
page             1805 sound/usb/pcm.c 	.page =		snd_pcm_lib_get_vmalloc_page,
page             1817 sound/usb/pcm.c 	.page =		snd_pcm_lib_get_vmalloc_page,
page             1829 sound/usb/pcm.c 	.page =		snd_pcm_sgbuf_ops_page,
page             1841 sound/usb/pcm.c 	.page =		snd_pcm_sgbuf_ops_page,
page              132 sound/usb/usx2y/us122l.c 	struct page *page;
page              152 sound/usb/usx2y/us122l.c 	page = virt_to_page(vaddr);
page              154 sound/usb/usx2y/us122l.c 	get_page(page);
page              157 sound/usb/usx2y/us122l.c 	vmf->page = page;
page               24 sound/usb/usx2y/usX2Yhwdep.c 	struct page * page;
page               33 sound/usb/usx2y/usX2Yhwdep.c 	page = virt_to_page(vaddr);
page               34 sound/usb/usx2y/usX2Yhwdep.c 	get_page(page);
page               35 sound/usb/usx2y/usX2Yhwdep.c 	vmf->page = page;
page               38 sound/usb/usx2y/usX2Yhwdep.c 		    vaddr, page);
page              652 sound/usb/usx2y/usx2yhwdeppcm.c 	vmf->page = virt_to_page(vaddr);
page              653 sound/usb/usx2y/usx2yhwdeppcm.c 	get_page(vmf->page);
page               34 sound/xen/xen_snd_front_alsa.c 	struct page **pages;
page              450 sound/xen/xen_snd_front_alsa.c 	stream->pages = kcalloc(stream->num_pages, sizeof(struct page *),
page               94 sound/xen/xen_snd_front_evtchnl.c 	struct xensnd_event_page *page = channel->u.evt.page;
page              102 sound/xen/xen_snd_front_evtchnl.c 	prod = page->in_prod;
page              105 sound/xen/xen_snd_front_evtchnl.c 	if (prod == page->in_cons)
page              113 sound/xen/xen_snd_front_evtchnl.c 	for (cons = page->in_cons; cons != prod; cons++) {
page              116 sound/xen/xen_snd_front_evtchnl.c 		event = &XENSND_IN_RING_REF(page, cons);
page              128 sound/xen/xen_snd_front_evtchnl.c 	page->in_cons = cons;
page              150 sound/xen/xen_snd_front_evtchnl.c 	unsigned long page = 0;
page              153 sound/xen/xen_snd_front_evtchnl.c 		page = (unsigned long)channel->u.req.ring.sring;
page              155 sound/xen/xen_snd_front_evtchnl.c 		page = (unsigned long)channel->u.evt.page;
page              157 sound/xen/xen_snd_front_evtchnl.c 	if (!page)
page              175 sound/xen/xen_snd_front_evtchnl.c 		gnttab_end_foreign_access(channel->gref, 0, page);
page              177 sound/xen/xen_snd_front_evtchnl.c 		free_page(page);
page              203 sound/xen/xen_snd_front_evtchnl.c 	unsigned long page;
page              215 sound/xen/xen_snd_front_evtchnl.c 	page = get_zeroed_page(GFP_KERNEL);
page              216 sound/xen/xen_snd_front_evtchnl.c 	if (!page) {
page              233 sound/xen/xen_snd_front_evtchnl.c 		struct xen_sndif_sring *sring = (struct xen_sndif_sring *)page;
page              249 sound/xen/xen_snd_front_evtchnl.c 						  virt_to_gfn((void *)page), 0);
page              253 sound/xen/xen_snd_front_evtchnl.c 		channel->u.evt.page = (struct xensnd_event_page *)page;
page              286 sound/xen/xen_snd_front_evtchnl.c 	if (page)
page              287 sound/xen/xen_snd_front_evtchnl.c 		free_page(page);
page               69 sound/xen/xen_snd_front_evtchnl.h 			struct xensnd_event_page *page;
page               13 tools/include/linux/types.h struct page;
page              123 tools/perf/builtin-help.c static void exec_woman_emacs(const char *path, const char *page)
page              131 tools/perf/builtin-help.c 		if (asprintf(&man_page, "(woman \"%s\")", page) > 0) {
page              139 tools/perf/builtin-help.c static void exec_man_konqueror(const char *path, const char *page)
page              162 tools/perf/builtin-help.c 		if (asprintf(&man_page, "man:%s(1)", page) > 0) {
page              170 tools/perf/builtin-help.c static void exec_man_man(const char *path, const char *page)
page              174 tools/perf/builtin-help.c 	execlp(path, "man", page, NULL);
page              178 tools/perf/builtin-help.c static void exec_man_cmd(const char *cmd, const char *page)
page              182 tools/perf/builtin-help.c 	if (asprintf(&shell_cmd, "%s %s", cmd, page) > 0) {
page              345 tools/perf/builtin-help.c static void exec_viewer(const char *name, const char *page)
page              350 tools/perf/builtin-help.c 		exec_man_man(info, page);
page              352 tools/perf/builtin-help.c 		exec_woman_emacs(info, page);
page              354 tools/perf/builtin-help.c 		exec_man_konqueror(info, page);
page              356 tools/perf/builtin-help.c 		exec_man_cmd(info, page);
page              364 tools/perf/builtin-help.c 	const char *page = cmd_to_page(perf_cmd);
page              369 tools/perf/builtin-help.c 		exec_viewer(viewer->name, page); /* will return when unable */
page              372 tools/perf/builtin-help.c 		exec_viewer(fallback, page);
page              373 tools/perf/builtin-help.c 	exec_viewer("man", page);
page              381 tools/perf/builtin-help.c 	const char *page = cmd_to_page(perf_cmd);
page              383 tools/perf/builtin-help.c 	execlp("info", "info", "perfman", page, NULL);
page              387 tools/perf/builtin-help.c static int get_html_page_path(char **page_path, const char *page)
page              399 tools/perf/builtin-help.c 	return asprintf(page_path, "%s/%s.html", html_path, page);
page              416 tools/perf/builtin-help.c 	const char *page = cmd_to_page(perf_cmd);
page              419 tools/perf/builtin-help.c 	if (get_html_page_path(&page_path, page) < 0)
page              282 tools/perf/builtin-kmem.c 	u64 		page;
page              454 tools/perf/builtin-kmem.c 		cmp = data->page - pstat->page;
page              468 tools/perf/builtin-kmem.c 		data->page = pstat->page;
page              523 tools/perf/builtin-kmem.c 		data->page = pstat->page;
page              790 tools/perf/builtin-kmem.c 	u64 page;
page              805 tools/perf/builtin-kmem.c 		page = perf_evsel__intval(evsel, sample, "pfn");
page              807 tools/perf/builtin-kmem.c 		page = perf_evsel__intval(evsel, sample, "page");
page              812 tools/perf/builtin-kmem.c 	if (!valid_page(page)) {
page              828 tools/perf/builtin-kmem.c 	this.page = page;
page              863 tools/perf/builtin-kmem.c 	u64 page;
page              872 tools/perf/builtin-kmem.c 		page = perf_evsel__intval(evsel, sample, "pfn");
page              874 tools/perf/builtin-kmem.c 		page = perf_evsel__intval(evsel, sample, "page");
page              879 tools/perf/builtin-kmem.c 	this.page = page;
page              883 tools/perf/builtin-kmem.c 			  page, order);
page             1084 tools/perf/builtin-kmem.c 		printf(format, (unsigned long long)data->page,
page             1526 tools/perf/builtin-kmem.c 	if (l->page < r->page)
page             1528 tools/perf/builtin-kmem.c 	else if (l->page > r->page)
page               81 tools/power/cpupower/utils/cpupower.c 	char *page;
page               87 tools/power/cpupower/utils/cpupower.c 	page = malloc(len);
page               88 tools/power/cpupower/utils/cpupower.c 	if (!page)
page               91 tools/power/cpupower/utils/cpupower.c 	sprintf(page, "cpupower");
page               93 tools/power/cpupower/utils/cpupower.c 		strcat(page, "-");
page               94 tools/power/cpupower/utils/cpupower.c 		strcat(page, subpage);
page               97 tools/power/cpupower/utils/cpupower.c 	execlp("man", "man", page, NULL);
page               19 tools/testing/nvdimm/dax-dev.c 				struct page *page;
page               24 tools/testing/nvdimm/dax-dev.c 				page = vmalloc_to_page((void *)addr);
page               25 tools/testing/nvdimm/dax-dev.c 				return PFN_PHYS(page_to_pfn(page));
page               24 tools/testing/nvdimm/pmem-dax.c 		struct page *page;
page               28 tools/testing/nvdimm/pmem-dax.c 		page = vmalloc_to_page(pmem->virt_addr + offset);
page               30 tools/testing/nvdimm/pmem-dax.c 			*pfn = page_to_pfn_t(page);
page               32 tools/testing/nvdimm/pmem-dax.c 				__func__, pmem, pgoff, page_to_pfn(page));
page               55 tools/testing/radix-tree/regression1.c static struct page *page_alloc(int index)
page               57 tools/testing/radix-tree/regression1.c 	struct page *p;
page               58 tools/testing/radix-tree/regression1.c 	p = malloc(sizeof(struct page));
page               68 tools/testing/radix-tree/regression1.c 	struct page *p = container_of(rcu, struct page, rcu);
page               74 tools/testing/radix-tree/regression1.c static void page_free(struct page *p)
page               80 tools/testing/radix-tree/regression1.c 			    unsigned int nr_pages, struct page **pages)
page               83 tools/testing/radix-tree/regression1.c 	struct page *page;
page               87 tools/testing/radix-tree/regression1.c 	xas_for_each(&xas, page, ULONG_MAX) {
page               88 tools/testing/radix-tree/regression1.c 		if (xas_retry(&xas, page))
page               91 tools/testing/radix-tree/regression1.c 		pthread_mutex_lock(&page->lock);
page               92 tools/testing/radix-tree/regression1.c 		if (!page->count)
page               96 tools/testing/radix-tree/regression1.c 		pthread_mutex_unlock(&page->lock);
page               99 tools/testing/radix-tree/regression1.c 		if (unlikely(page != xas_reload(&xas)))
page              102 tools/testing/radix-tree/regression1.c 		pages[ret] = page;
page              106 tools/testing/radix-tree/regression1.c 		pthread_mutex_unlock(&page->lock);
page              125 tools/testing/radix-tree/regression1.c 			struct page *p;
page              157 tools/testing/radix-tree/regression1.c 			struct page *pages[10];
page               67 tools/testing/radix-tree/regression2.c static struct page *page_alloc(void)
page               69 tools/testing/radix-tree/regression2.c 	struct page *p;
page               70 tools/testing/radix-tree/regression2.c 	p = malloc(sizeof(struct page));
page               79 tools/testing/radix-tree/regression2.c 	struct page *p;
page               82 tools/testing/radix-tree/regression2.c 	struct page *pages[1];
page               44 tools/testing/scatterlist/linux/mm.h static inline unsigned long page_to_phys(struct page *page)
page               51 tools/testing/scatterlist/linux/mm.h #define page_to_pfn(page) ((unsigned long)(page) / PAGE_SIZE)
page               53 tools/testing/scatterlist/linux/mm.h #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
page               78 tools/testing/scatterlist/linux/mm.h static inline void *kmap(struct page *page)
page               85 tools/testing/scatterlist/linux/mm.h static inline void *kmap_atomic(struct page *page)
page              107 tools/testing/scatterlist/linux/mm.h static inline void free_page(unsigned long page)
page              109 tools/testing/scatterlist/linux/mm.h 	free((void *)page);
page                8 tools/testing/scatterlist/main.c static void set_pages(struct page **pages, const unsigned *array, unsigned num)
page               14 tools/testing/scatterlist/main.c 		pages[i] = (struct page *)(unsigned long)
page               56 tools/testing/scatterlist/main.c 		struct page *pages[MAX_PAGES];
page              183 tools/testing/selftests/kvm/dirty_log_test.c 	uint64_t page;
page              188 tools/testing/selftests/kvm/dirty_log_test.c 	for (page = 0; page < host_num_pages; page += step) {
page              189 tools/testing/selftests/kvm/dirty_log_test.c 		value_ptr = host_test_mem + page * host_page_size;
page              192 tools/testing/selftests/kvm/dirty_log_test.c 		if (test_and_clear_bit_le(page, host_bmap_track)) {
page              194 tools/testing/selftests/kvm/dirty_log_test.c 			TEST_ASSERT(test_bit_le(page, bmap),
page              197 tools/testing/selftests/kvm/dirty_log_test.c 				    page);
page              200 tools/testing/selftests/kvm/dirty_log_test.c 		if (test_bit_le(page, bmap)) {
page              211 tools/testing/selftests/kvm/dirty_log_test.c 				    page, *value_ptr, iteration);
page              236 tools/testing/selftests/kvm/dirty_log_test.c 				    page, *value_ptr, iteration);
page              243 tools/testing/selftests/kvm/dirty_log_test.c 				set_bit_le(page, host_bmap_track);
page              187 tools/testing/selftests/kvm/lib/aarch64/processor.c static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t page, int level)
page              196 tools/testing/selftests/kvm/lib/aarch64/processor.c 	for (pte = page; pte < page + ptrs_per_pte(vm) * 8; pte += 8) {
page               68 tools/testing/selftests/powerpc/mm/subpage_prot.c static inline void check_faulted(void *addr, long page, long subpage, int write)
page               70 tools/testing/selftests/powerpc/mm/subpage_prot.c 	int want_fault = (subpage == ((page + 3) % 16));
page               73 tools/testing/selftests/powerpc/mm/subpage_prot.c 		want_fault |= (subpage == ((page + 1) % 16));
page               77 tools/testing/selftests/powerpc/mm/subpage_prot.c 		       addr, page, subpage, write,
page              676 tools/testing/selftests/x86/ldt_gdt.c 	unsigned int *page = mmap(NULL, 4096, PROT_READ | PROT_WRITE,
page              678 tools/testing/selftests/x86/ldt_gdt.c 	if (page == MAP_FAILED)
page              682 tools/testing/selftests/x86/ldt_gdt.c 		page[i] = i;
page              683 tools/testing/selftests/x86/ldt_gdt.c 	counter_page = page;
page               44 tools/virtio/linux/kernel.h #define virt_to_page(p) ((struct page *)((unsigned long)p & PAGE_MASK))
page               29 tools/virtio/linux/scatterlist.h static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
page               37 tools/virtio/linux/scatterlist.h 	BUG_ON((unsigned long) page & 0x03);
page               41 tools/virtio/linux/scatterlist.h 	sg->page_link = page_link | (unsigned long) page;
page               58 tools/virtio/linux/scatterlist.h static inline void sg_set_page(struct scatterlist *sg, struct page *page,
page               61 tools/virtio/linux/scatterlist.h 	sg_assign_page(sg, page);
page               66 tools/virtio/linux/scatterlist.h static inline struct page *sg_page(struct scatterlist *sg)
page               71 tools/virtio/linux/scatterlist.h 	return (struct page *)((sg)->page_link & ~0x3);
page              131 virt/kvm/arm/mmu.c 	void *page;
page              137 virt/kvm/arm/mmu.c 		page = (void *)__get_free_page(GFP_PGTABLE_USER);
page              138 virt/kvm/arm/mmu.c 		if (!page)
page              140 virt/kvm/arm/mmu.c 		cache->objects[cache->nobjs++] = page;
page             1380 virt/kvm/arm/mmu.c 	struct page *page = pfn_to_page(pfn);
page             1387 virt/kvm/arm/mmu.c 	if (!PageHuge(page) && PageTransCompoundMap(page)) {
page              112 virt/kvm/coalesced_mmio.c 	struct page *page;
page              116 virt/kvm/coalesced_mmio.c 	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
page              117 virt/kvm/coalesced_mmio.c 	if (!page)
page              121 virt/kvm/coalesced_mmio.c 	kvm->coalesced_mmio_ring = page_address(page);
page              326 virt/kvm/kvm_main.c 	struct page *page;
page              340 virt/kvm/kvm_main.c 	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
page              341 virt/kvm/kvm_main.c 	if (!page) {
page              345 virt/kvm/kvm_main.c 	vcpu->run = page_address(page);
page             1521 virt/kvm/kvm_main.c 	struct page *page[1];
page             1532 virt/kvm/kvm_main.c 	npages = __get_user_pages_fast(addr, 1, 1, page);
page             1534 virt/kvm/kvm_main.c 		*pfn = page_to_pfn(page[0]);
page             1552 virt/kvm/kvm_main.c 	struct page *page;
page             1565 virt/kvm/kvm_main.c 	npages = get_user_pages_unlocked(addr, 1, &page, flags);
page             1571 virt/kvm/kvm_main.c 		struct page *wpage;
page             1575 virt/kvm/kvm_main.c 			put_page(page);
page             1576 virt/kvm/kvm_main.c 			page = wpage;
page             1579 virt/kvm/kvm_main.c 	*pfn = page_to_pfn(page);
page             1779 virt/kvm/kvm_main.c 			    struct page **pages, int nr_pages)
page             1795 virt/kvm/kvm_main.c static struct page *kvm_pfn_to_page(kvm_pfn_t pfn)
page             1808 virt/kvm/kvm_main.c struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
page             1850 virt/kvm/kvm_main.c 	struct page *page = KVM_UNMAPPED_PAGE;
page             1874 virt/kvm/kvm_main.c 		page = pfn_to_page(pfn);
page             1876 virt/kvm/kvm_main.c 			hva = kmap_atomic(page);
page             1878 virt/kvm/kvm_main.c 			hva = kmap(page);
page             1890 virt/kvm/kvm_main.c 	map->page = page;
page             1924 virt/kvm/kvm_main.c 	if (map->page != KVM_UNMAPPED_PAGE) {
page             1928 virt/kvm/kvm_main.c 			kunmap(map->page);
page             1946 virt/kvm/kvm_main.c 	map->page = NULL;
page             1965 virt/kvm/kvm_main.c struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
page             1975 virt/kvm/kvm_main.c void kvm_release_page_clean(struct page *page)
page             1977 virt/kvm/kvm_main.c 	WARN_ON(is_error_page(page));
page             1979 virt/kvm/kvm_main.c 	kvm_release_pfn_clean(page_to_pfn(page));
page             1990 virt/kvm/kvm_main.c void kvm_release_page_dirty(struct page *page)
page             1992 virt/kvm/kvm_main.c 	WARN_ON(is_error_page(page));
page             1994 virt/kvm/kvm_main.c 	kvm_release_pfn_dirty(page_to_pfn(page));
page             2008 virt/kvm/kvm_main.c 		struct page *page = pfn_to_page(pfn);
page             2010 virt/kvm/kvm_main.c 		SetPageDirty(page);
page             2714 virt/kvm/kvm_main.c 	struct page *page;
page             2717 virt/kvm/kvm_main.c 		page = virt_to_page(vcpu->run);
page             2720 virt/kvm/kvm_main.c 		page = virt_to_page(vcpu->arch.pio_data);
page             2724 virt/kvm/kvm_main.c 		page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
page             2728 virt/kvm/kvm_main.c 	get_page(page);
page             2729 virt/kvm/kvm_main.c 	vmf->page = page;