inpages          6496 arch/x86/kvm/svm.c 				struct page **inpages, unsigned long npages)
inpages          6502 arch/x86/kvm/svm.c 	paddr = __sme_page_pa(inpages[idx]);
inpages          6504 arch/x86/kvm/svm.c 		next_paddr = __sme_page_pa(inpages[i++]);
inpages          6522 arch/x86/kvm/svm.c 	struct page **inpages;
inpages          6540 arch/x86/kvm/svm.c 	inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
inpages          6541 arch/x86/kvm/svm.c 	if (!inpages) {
inpages          6552 arch/x86/kvm/svm.c 	sev_clflush_pages(inpages, npages);
inpages          6564 arch/x86/kvm/svm.c 		pages = get_num_contig_pages(i, inpages, npages);
inpages          6570 arch/x86/kvm/svm.c 		data->address = __sme_page_pa(inpages[i]) + offset;
inpages          6582 arch/x86/kvm/svm.c 		set_page_dirty_lock(inpages[i]);
inpages          6583 arch/x86/kvm/svm.c 		mark_page_accessed(inpages[i]);
inpages          6586 arch/x86/kvm/svm.c 	sev_unpin_memory(kvm, inpages, npages);
inpages            67 include/linux/sunrpc/gss_api.h 		struct page		**inpages);
inpages           128 include/linux/sunrpc/gss_api.h 			struct page		**inpages);
inpages          1828 net/sunrpc/auth_gss/auth_gss.c 	struct page	**inpages;
inpages          1846 net/sunrpc/auth_gss/auth_gss.c 	inpages = snd_buf->pages + first;
inpages          1863 net/sunrpc/auth_gss/auth_gss.c 	maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages);
inpages           432 net/sunrpc/auth_gss/gss_mech_switch.c 	 struct page	**inpages)
inpages           435 net/sunrpc/auth_gss/gss_mech_switch.c 		->gss_wrap(ctx_id, offset, buf, inpages);
inpages          1714 net/sunrpc/auth_gss/svcauth_gss.c 	struct page **inpages = NULL;
inpages          1725 net/sunrpc/auth_gss/svcauth_gss.c 	inpages = resbuf->pages;
inpages          1762 net/sunrpc/auth_gss/svcauth_gss.c 	if (gss_wrap(gsd->rsci->mechctx, offset, resbuf, inpages))