pfnp 3303 arch/x86/kvm/mmu.c gfn_t gfn, kvm_pfn_t *pfnp, int *levelp) pfnp 3320 arch/x86/kvm/mmu.c *pfnp |= gfn & page_mask; pfnp 3393 arch/x86/kvm/mmu.c gfn_t gfn, kvm_pfn_t *pfnp, pfnp 3396 arch/x86/kvm/mmu.c kvm_pfn_t pfn = *pfnp; pfnp 3426 arch/x86/kvm/mmu.c *pfnp = pfn; pfnp 537 drivers/media/pci/pt1/pt1.c static void *pt1_alloc_page(struct pt1 *pt1, dma_addr_t *addrp, u32 *pfnp) pfnp 551 drivers/media/pci/pt1/pt1.c *pfnp = addr >> PT1_PAGE_SHIFT; pfnp 561 drivers/media/pci/pt1/pt1.c pt1_init_buffer(struct pt1 *pt1, struct pt1_buffer *buf, u32 *pfnp) pfnp 566 drivers/media/pci/pt1/pt1.c page = pt1_alloc_page(pt1, &addr, pfnp); pfnp 588 drivers/media/pci/pt1/pt1.c pt1_init_table(struct pt1 *pt1, struct pt1_table *table, u32 *pfnp) pfnp 595 drivers/media/pci/pt1/pt1.c page = pt1_alloc_page(pt1, &addr, pfnp); pfnp 992 fs/dax.c pfn_t *pfnp) pfnp 1004 fs/dax.c NULL, pfnp); pfnp 1012 fs/dax.c if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1)) pfnp 1015 fs/dax.c if (length > 1 && !pfn_t_devmap(*pfnp)) pfnp 1245 fs/dax.c static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, pfnp 1362 fs/dax.c if (WARN_ON_ONCE(!pfnp)) { pfnp 1366 fs/dax.c *pfnp = pfn; pfnp 1466 fs/dax.c static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, pfnp 1578 fs/dax.c if (WARN_ON_ONCE(!pfnp)) pfnp 1580 fs/dax.c *pfnp = pfn; pfnp 1626 fs/dax.c static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, pfnp 1647 fs/dax.c pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops) pfnp 1651 fs/dax.c return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops); pfnp 1653 fs/dax.c return dax_iomap_pmd_fault(vmf, pfnp, ops); pfnp 217 include/linux/dax.h pfn_t *pfnp, int *errp, const struct iomap_ops *ops); pfnp 1376 virt/kvm/arm/mmu.c static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap) pfnp 1378 virt/kvm/arm/mmu.c kvm_pfn_t pfn = *pfnp; pfnp 1414 virt/kvm/arm/mmu.c *pfnp = pfn;