zero_page        1645 arch/arm/mm/mmu.c 	void *zero_page;
zero_page        1659 arch/arm/mm/mmu.c 	zero_page = early_alloc(PAGE_SIZE);
zero_page        1663 arch/arm/mm/mmu.c 	empty_zero_page = virt_to_page(zero_page);
zero_page         488 arch/arm64/kernel/hibernate.c 	void *zero_page;
zero_page         514 arch/arm64/kernel/hibernate.c 	zero_page = (void *)get_safe_page(GFP_ATOMIC);
zero_page         515 arch/arm64/kernel/hibernate.c 	if (!zero_page) {
zero_page         562 arch/arm64/kernel/hibernate.c 		       resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
zero_page         147 arch/nds32/mm/init.c 	void *zero_page;
zero_page         159 arch/nds32/mm/init.c 	zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
zero_page         160 arch/nds32/mm/init.c 	if (!zero_page)
zero_page         165 arch/nds32/mm/init.c 	empty_zero_page = virt_to_page(zero_page);
zero_page         422 arch/unicore32/mm/mmu.c 	void *zero_page;
zero_page         433 arch/unicore32/mm/mmu.c 	zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
zero_page         434 arch/unicore32/mm/mmu.c 	if (!zero_page)
zero_page         440 arch/unicore32/mm/mmu.c 	empty_zero_page = virt_to_page(zero_page);
zero_page          52 drivers/dma/bcm2835-dma.c 	dma_addr_t zero_page;
zero_page         752 drivers/dma/bcm2835-dma.c 		if (buf_addr == od->zero_page && !c->is_lite_channel)
zero_page         857 drivers/dma/bcm2835-dma.c 	dma_unmap_page_attrs(od->ddev.dev, od->zero_page, PAGE_SIZE,
zero_page         943 drivers/dma/bcm2835-dma.c 	od->zero_page = dma_map_page_attrs(od->ddev.dev, ZERO_PAGE(0), 0,
zero_page         946 drivers/dma/bcm2835-dma.c 	if (dma_mapping_error(od->ddev.dev, od->zero_page)) {
zero_page         510 drivers/nvdimm/btt.c 		void *zero_page = page_address(ZERO_PAGE(0));
zero_page         520 drivers/nvdimm/btt.c 			ret = arena_write_bytes(arena, nsoff, zero_page,
zero_page         372 drivers/nvdimm/pfn_devs.c 	void *zero_page = page_address(ZERO_PAGE(0));
zero_page         401 drivers/nvdimm/pfn_devs.c 				rc = nvdimm_write_bytes(ndns, nsoff, zero_page,
zero_page        2517 drivers/target/target_core_transport.c 		 bool zero_page, bool chainable)
zero_page        2519 drivers/target/target_core_transport.c 	gfp_t gfp = GFP_KERNEL | (zero_page ? __GFP_ZERO : 0);
zero_page        1422 fs/dax.c       	struct page *zero_page;
zero_page        1427 fs/dax.c       	zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
zero_page        1429 fs/dax.c       	if (unlikely(!zero_page))
zero_page        1432 fs/dax.c       	pfn = page_to_pfn_t(zero_page);
zero_page        1452 fs/dax.c       	pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
zero_page        1456 fs/dax.c       	trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry);
zero_page        1462 fs/dax.c       	trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
zero_page         206 include/target/target_core_fabric.h 		u32 length, bool zero_page, bool chainable);
zero_page          65 include/trace/events/fs_dax.h 		struct page *zero_page,
zero_page          67 include/trace/events/fs_dax.h 	TP_ARGS(inode, vmf, zero_page, radix_entry),
zero_page          72 include/trace/events/fs_dax.h 		__field(struct page *, zero_page)
zero_page          81 include/trace/events/fs_dax.h 		__entry->zero_page = zero_page;
zero_page          91 include/trace/events/fs_dax.h 		__entry->zero_page,
zero_page          99 include/trace/events/fs_dax.h 		struct page *zero_page, void *radix_entry), \
zero_page         100 include/trace/events/fs_dax.h 	TP_ARGS(inode, vmf, zero_page, radix_entry))
zero_page          82 mm/huge_memory.c 	struct page *zero_page;
zero_page          87 mm/huge_memory.c 	zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
zero_page          89 mm/huge_memory.c 	if (!zero_page) {
zero_page          95 mm/huge_memory.c 	if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
zero_page          97 mm/huge_memory.c 		__free_pages(zero_page, compound_order(zero_page));
zero_page         147 mm/huge_memory.c 		struct page *zero_page = xchg(&huge_zero_page, NULL);
zero_page         148 mm/huge_memory.c 		BUG_ON(zero_page == NULL);
zero_page         149 mm/huge_memory.c 		__free_pages(zero_page, compound_order(zero_page));
zero_page         693 mm/huge_memory.c 		struct page *zero_page)
zero_page         698 mm/huge_memory.c 	entry = mk_pmd(zero_page, vma->vm_page_prot);
zero_page         724 mm/huge_memory.c 		struct page *zero_page;
zero_page         730 mm/huge_memory.c 		zero_page = mm_get_huge_zero_page(vma->vm_mm);
zero_page         731 mm/huge_memory.c 		if (unlikely(!zero_page)) {
zero_page         749 mm/huge_memory.c 						   haddr, vmf->pmd, zero_page);
zero_page        1034 mm/huge_memory.c 		struct page *zero_page;
zero_page        1040 mm/huge_memory.c 		zero_page = mm_get_huge_zero_page(dst_mm);
zero_page        1042 mm/huge_memory.c 				zero_page);
zero_page         187 net/ceph/messenger.c static struct page *zero_page;		/* used in certain error cases */
zero_page         257 net/ceph/messenger.c 	BUG_ON(zero_page == NULL);
zero_page         258 net/ceph/messenger.c 	put_page(zero_page);
zero_page         259 net/ceph/messenger.c 	zero_page = NULL;
zero_page         269 net/ceph/messenger.c 	BUG_ON(zero_page != NULL);
zero_page         270 net/ceph/messenger.c 	zero_page = ZERO_PAGE(0);
zero_page         271 net/ceph/messenger.c 	get_page(zero_page);
zero_page        1635 net/ceph/messenger.c 		ret = ceph_tcp_sendpage(con->sock, zero_page, 0, size, more);
zero_page        2333 virt/kvm/kvm_main.c 	const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
zero_page        2335 virt/kvm/kvm_main.c 	return kvm_write_guest_page(kvm, gfn, zero_page, offset, len);