hpage             627 arch/powerpc/kvm/book3s_pr.c 	struct page *hpage;
hpage             632 arch/powerpc/kvm/book3s_pr.c 	hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
hpage             633 arch/powerpc/kvm/book3s_pr.c 	if (is_error_page(hpage))
hpage             640 arch/powerpc/kvm/book3s_pr.c 	get_page(hpage);
hpage             641 arch/powerpc/kvm/book3s_pr.c 	page = kmap_atomic(hpage);
hpage             649 arch/powerpc/kvm/book3s_pr.c 	put_page(hpage);
hpage             177 arch/s390/kvm/kvm-s390.c static int hpage;
hpage             178 arch/s390/kvm/kvm-s390.c module_param(hpage, int, 0444);
hpage             179 arch/s390/kvm/kvm-s390.c MODULE_PARM_DESC(hpage, "1m huge page backing support");
hpage             539 arch/s390/kvm/kvm-s390.c 		if (hpage && !kvm_is_ucontrol(kvm))
hpage             755 arch/s390/kvm/kvm-s390.c 		else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
hpage            4581 arch/s390/kvm/kvm-s390.c 	if (nested && hpage) {
hpage              43 fs/verity/verify.c static void extract_hash(struct page *hpage, unsigned int hoffset,
hpage              46 fs/verity/verify.c 	void *virt = kmap_atomic(hpage);
hpage             113 fs/verity/verify.c 		struct page *hpage;
hpage             120 fs/verity/verify.c 		hpage = inode->i_sb->s_vop->read_merkle_tree_page(inode,
hpage             122 fs/verity/verify.c 		if (IS_ERR(hpage)) {
hpage             123 fs/verity/verify.c 			err = PTR_ERR(hpage);
hpage             130 fs/verity/verify.c 		if (PageChecked(hpage)) {
hpage             131 fs/verity/verify.c 			extract_hash(hpage, hoffset, hsize, _want_hash);
hpage             133 fs/verity/verify.c 			put_page(hpage);
hpage             140 fs/verity/verify.c 		hpages[level] = hpage;
hpage             150 fs/verity/verify.c 		struct page *hpage = hpages[level - 1];
hpage             153 fs/verity/verify.c 		err = fsverity_hash_page(params, inode, req, hpage, real_hash);
hpage             159 fs/verity/verify.c 		SetPageChecked(hpage);
hpage             160 fs/verity/verify.c 		extract_hash(hpage, hoffset, hsize, _want_hash);
hpage             162 fs/verity/verify.c 		put_page(hpage);
hpage              18 mm/hwpoison-inject.c 	struct page *hpage;
hpage              28 mm/hwpoison-inject.c 	hpage = compound_head(p);
hpage              38 mm/hwpoison-inject.c 	shake_page(hpage, 0);
hpage              42 mm/hwpoison-inject.c 	if (!PageLRU(hpage) && !PageHuge(p))
hpage              50 mm/hwpoison-inject.c 	err = hwpoison_filter(hpage);
hpage             763 mm/khugepaged.c static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
hpage             765 mm/khugepaged.c 	if (IS_ERR(*hpage)) {
hpage             770 mm/khugepaged.c 		*hpage = NULL;
hpage             772 mm/khugepaged.c 	} else if (*hpage) {
hpage             773 mm/khugepaged.c 		put_page(*hpage);
hpage             774 mm/khugepaged.c 		*hpage = NULL;
hpage             781 mm/khugepaged.c khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
hpage             783 mm/khugepaged.c 	VM_BUG_ON_PAGE(*hpage, *hpage);
hpage             785 mm/khugepaged.c 	*hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
hpage             786 mm/khugepaged.c 	if (unlikely(!*hpage)) {
hpage             788 mm/khugepaged.c 		*hpage = ERR_PTR(-ENOMEM);
hpage             792 mm/khugepaged.c 	prep_transhuge_page(*hpage);
hpage             794 mm/khugepaged.c 	return *hpage;
hpage             815 mm/khugepaged.c 	struct page *hpage;
hpage             818 mm/khugepaged.c 		hpage = alloc_khugepaged_hugepage();
hpage             819 mm/khugepaged.c 		if (!hpage) {
hpage             828 mm/khugepaged.c 	} while (unlikely(!hpage) && likely(khugepaged_enabled()));
hpage             830 mm/khugepaged.c 	return hpage;
hpage             833 mm/khugepaged.c static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
hpage             835 mm/khugepaged.c 	if (!*hpage)
hpage             836 mm/khugepaged.c 		*hpage = khugepaged_alloc_hugepage(wait);
hpage             838 mm/khugepaged.c 	if (unlikely(!*hpage))
hpage             845 mm/khugepaged.c khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
hpage             847 mm/khugepaged.c 	VM_BUG_ON(!*hpage);
hpage             849 mm/khugepaged.c 	return  *hpage;
hpage             948 mm/khugepaged.c 				   struct page **hpage,
hpage             974 mm/khugepaged.c 	new_page = khugepaged_alloc_page(hpage, gfp, node);
hpage            1101 mm/khugepaged.c 	*hpage = NULL;
hpage            1118 mm/khugepaged.c 			       struct page **hpage)
hpage            1232 mm/khugepaged.c 		collapse_huge_page(mm, address, hpage, node, referenced);
hpage            1294 mm/khugepaged.c 	struct page *hpage = NULL;
hpage            1338 mm/khugepaged.c 		if (!hpage) {
hpage            1339 mm/khugepaged.c 			hpage = compound_head(page);
hpage            1347 mm/khugepaged.c 			if (WARN_ON(hpage->mapping != vma->vm_file->f_mapping))
hpage            1358 mm/khugepaged.c 		if (WARN_ON(hpage + i != page))
hpage            1377 mm/khugepaged.c 	if (hpage) {
hpage            1378 mm/khugepaged.c 		page_ref_sub(hpage, count);
hpage            1379 mm/khugepaged.c 		add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
hpage            1494 mm/khugepaged.c 		struct page **hpage, int node)
hpage            1512 mm/khugepaged.c 	new_page = khugepaged_alloc_page(hpage, gfp, node);
hpage            1764 mm/khugepaged.c 		*hpage = NULL;
hpage            1816 mm/khugepaged.c 		struct file *file, pgoff_t start, struct page **hpage)
hpage            1884 mm/khugepaged.c 			collapse_file(mm, file, start, hpage, node);
hpage            1892 mm/khugepaged.c 		struct file *file, pgoff_t start, struct page **hpage)
hpage            1904 mm/khugepaged.c 					    struct page **hpage)
hpage            1982 mm/khugepaged.c 				khugepaged_scan_file(mm, file, pgoff, hpage);
hpage            1987 mm/khugepaged.c 						hpage);
hpage            2045 mm/khugepaged.c 	struct page *hpage = NULL;
hpage            2053 mm/khugepaged.c 		if (!khugepaged_prealloc_page(&hpage, &wait))
hpage            2067 mm/khugepaged.c 							    &hpage);
hpage            2073 mm/khugepaged.c 	if (!IS_ERR_OR_NULL(hpage))
hpage            2074 mm/khugepaged.c 		put_page(hpage);
hpage             786 mm/memory-failure.c 	struct page *hpage = compound_head(p);
hpage             789 mm/memory-failure.c 	if (!PageHuge(hpage))
hpage             792 mm/memory-failure.c 	mapping = page_mapping(hpage);
hpage             794 mm/memory-failure.c 		res = truncate_error_page(hpage, pfn, mapping);
hpage             796 mm/memory-failure.c 		unlock_page(hpage);
hpage             802 mm/memory-failure.c 		if (PageAnon(hpage))
hpage             803 mm/memory-failure.c 			put_page(hpage);
hpage             806 mm/memory-failure.c 		lock_page(hpage);
hpage             973 mm/memory-failure.c 	struct page *hpage = *hpagep;
hpage             974 mm/memory-failure.c 	bool mlocked = PageMlocked(hpage);
hpage             982 mm/memory-failure.c 	if (!(PageLRU(hpage) || PageHuge(p)))
hpage             989 mm/memory-failure.c 	if (!page_mapped(hpage))
hpage            1009 mm/memory-failure.c 	mapping = page_mapping(hpage);
hpage            1010 mm/memory-failure.c 	if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping &&
hpage            1012 mm/memory-failure.c 		if (page_mkclean(hpage)) {
hpage            1013 mm/memory-failure.c 			SetPageDirty(hpage);
hpage            1031 mm/memory-failure.c 		collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
hpage            1033 mm/memory-failure.c 	unmap_success = try_to_unmap(hpage, ttu);
hpage            1036 mm/memory-failure.c 		       pfn, page_mapcount(hpage));
hpage            1043 mm/memory-failure.c 		shake_page(hpage, 0);
hpage            1055 mm/memory-failure.c 	forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL);
hpage            1251 mm/memory-failure.c 	struct page *hpage;
hpage            1281 mm/memory-failure.c 	orig_head = hpage = compound_head(p);
hpage            1305 mm/memory-failure.c 	if (PageTransHuge(hpage)) {
hpage            1322 mm/memory-failure.c 		hpage = compound_head(p);
hpage            1363 mm/memory-failure.c 		page_flags = hpage->flags;
hpage            1401 mm/memory-failure.c 	if (!hwpoison_user_mappings(p, pfn, flags, &hpage)) {
hpage            1687 mm/memory-failure.c 	struct page *hpage = compound_head(page);
hpage            1694 mm/memory-failure.c 	lock_page(hpage);
hpage            1695 mm/memory-failure.c 	if (PageHWPoison(hpage)) {
hpage            1696 mm/memory-failure.c 		unlock_page(hpage);
hpage            1697 mm/memory-failure.c 		put_hwpoison_page(hpage);
hpage            1701 mm/memory-failure.c 	unlock_page(hpage);
hpage            1703 mm/memory-failure.c 	ret = isolate_huge_page(hpage, &pagelist);
hpage            1708 mm/memory-failure.c 	put_hwpoison_page(hpage);
hpage            1826 mm/memory-failure.c 	struct page *hpage = compound_head(page);
hpage            1828 mm/memory-failure.c 	if (!PageHuge(page) && PageTransHuge(hpage)) {
hpage            1282 mm/migrate.c   				struct page *hpage, int force,
hpage            1297 mm/migrate.c   	if (!hugepage_migration_supported(page_hstate(hpage))) {
hpage            1298 mm/migrate.c   		putback_active_hugepage(hpage);
hpage            1302 mm/migrate.c   	new_hpage = get_new_page(hpage, private);
hpage            1306 mm/migrate.c   	if (!trylock_page(hpage)) {
hpage            1316 mm/migrate.c   		lock_page(hpage);
hpage            1324 mm/migrate.c   	if (page_private(hpage) && !page_mapping(hpage)) {
hpage            1329 mm/migrate.c   	if (PageAnon(hpage))
hpage            1330 mm/migrate.c   		anon_vma = page_get_anon_vma(hpage);
hpage            1335 mm/migrate.c   	if (page_mapped(hpage)) {
hpage            1336 mm/migrate.c   		try_to_unmap(hpage,
hpage            1341 mm/migrate.c   	if (!page_mapped(hpage))
hpage            1342 mm/migrate.c   		rc = move_to_new_page(new_hpage, hpage, mode);
hpage            1345 mm/migrate.c   		remove_migration_ptes(hpage,
hpage            1346 mm/migrate.c   			rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);
hpage            1355 mm/migrate.c   		move_hugetlb_state(hpage, new_hpage, reason);
hpage            1360 mm/migrate.c   	unlock_page(hpage);
hpage            1363 mm/migrate.c   		putback_active_hugepage(hpage);
hpage              55 mm/page_vma_mapped.c static inline bool pfn_in_hpage(struct page *hpage, unsigned long pfn)
hpage              57 mm/page_vma_mapped.c 	unsigned long hpage_pfn = page_to_pfn(hpage);
hpage              60 mm/page_vma_mapped.c 	return pfn >= hpage_pfn && pfn - hpage_pfn < hpage_nr_pages(hpage);