pfn                12 arch/alpha/include/asm/cacheflush.h #define flush_cache_page(vma, vmaddr, pfn)	do { } while (0)
pfn                35 arch/alpha/include/asm/mmzone.h #define node_localnr(pfn, nid)	((pfn) - NODE_DATA(nid)->node_start_pfn)
pfn                82 arch/alpha/include/asm/mmzone.h 	unsigned long pfn;                                                   \
pfn                84 arch/alpha/include/asm/mmzone.h 	pfn = page_to_pfn(page) << 32; \
pfn                85 arch/alpha/include/asm/mmzone.h 	pte_val(pte) = pfn | pgprot_val(pgprot);			     \
pfn               104 arch/alpha/include/asm/mmzone.h #define pfn_to_nid(pfn)		pa_to_nid(((u64)(pfn) << PAGE_SHIFT))
pfn               105 arch/alpha/include/asm/mmzone.h #define pfn_valid(pfn)							\
pfn               106 arch/alpha/include/asm/mmzone.h 	(((pfn) - node_start_pfn(pfn_to_nid(pfn))) <			\
pfn               107 arch/alpha/include/asm/mmzone.h 	 node_spanned_pages(pfn_to_nid(pfn)))					\
pfn                89 arch/alpha/include/asm/page.h #define pfn_valid(pfn)		((pfn) < max_mapnr)
pfn               195 arch/alpha/include/asm/pgtable.h #define PHYS_TWIDDLE(pfn) \
pfn               196 arch/alpha/include/asm/pgtable.h   ((((pfn) & KSEG_PFN) == (0x40000000000UL >> PAGE_SHIFT)) \
pfn               197 arch/alpha/include/asm/pgtable.h   ? ((pfn) ^= KSEG_PFN) : (pfn))
pfn               199 arch/alpha/include/asm/pgtable.h #define PHYS_TWIDDLE(pfn) (pfn)
pfn               694 arch/alpha/kernel/core_marvel.c 	unsigned long pfn;
pfn               752 arch/alpha/kernel/core_marvel.c 			pfn = ptes[baddr >> PAGE_SHIFT];
pfn               753 arch/alpha/kernel/core_marvel.c 			if (!(pfn & 1)) {
pfn               758 arch/alpha/kernel/core_marvel.c 			pfn >>= 1;	/* make it a true pfn */
pfn               761 arch/alpha/kernel/core_marvel.c 						     pfn << PAGE_SHIFT, 
pfn               465 arch/alpha/kernel/core_titan.c 	unsigned long pfn;
pfn               523 arch/alpha/kernel/core_titan.c 			pfn = ptes[baddr >> PAGE_SHIFT];
pfn               524 arch/alpha/kernel/core_titan.c 			if (!(pfn & 1)) {
pfn               529 arch/alpha/kernel/core_titan.c 			pfn >>= 1;	/* make it a true pfn */
pfn               532 arch/alpha/kernel/core_titan.c 						     pfn << PAGE_SHIFT, 
pfn               398 arch/alpha/kernel/setup.c page_is_ram(unsigned long pfn)
pfn               408 arch/alpha/kernel/setup.c 		if (pfn >= cluster->start_pfn  &&
pfn               409 arch/alpha/kernel/setup.c 		    pfn < cluster->start_pfn + cluster->numpages) {
pfn               211 arch/alpha/mm/init.c 			unsigned long pfn = crb->map[i].pa >> PAGE_SHIFT;
pfn               224 arch/alpha/mm/init.c 					pfn_pte(pfn, PAGE_KERNEL));
pfn               225 arch/alpha/mm/init.c 				pfn++;
pfn                57 arch/arc/include/asm/cacheflush.h #define flush_cache_page(vma, u_vaddr, pfn)	/* PF handling/COW-break */
pfn                42 arch/arc/include/asm/hugepage.h #define pfn_pmd(pfn, prot)	(__pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
pfn                14 arch/arc/include/asm/mmzone.h static inline int pfn_to_nid(unsigned long pfn)
pfn                19 arch/arc/include/asm/mmzone.h 		is_end_low = pfn <= virt_to_pfn(0xFFFFFFFFUL);
pfn                26 arch/arc/include/asm/mmzone.h 	if (pfn >= ARCH_PFN_OFFSET && is_end_low)
pfn                32 arch/arc/include/asm/mmzone.h static inline int pfn_valid(unsigned long pfn)
pfn                34 arch/arc/include/asm/mmzone.h 	int nid = pfn_to_nid(pfn);
pfn                36 arch/arc/include/asm/mmzone.h 	return (pfn <= node_end_pfn(nid));
pfn                88 arch/arc/include/asm/page.h #define pfn_valid(pfn)		(((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
pfn               281 arch/arc/include/asm/pgtable.h #define pfn_pte(pfn, prot)	__pte(__pfn_to_phys(pfn) | pgprot_val(prot))
pfn               976 arch/arc/mm/cache.c 		unsigned long phy, pfn;
pfn               979 arch/arc/mm/cache.c 		pfn = vmalloc_to_pfn((void *)kstart);
pfn               980 arch/arc/mm/cache.c 		phy = (pfn << PAGE_SHIFT) + off;
pfn              1041 arch/arc/mm/cache.c 		      unsigned long pfn)
pfn              1043 arch/arc/mm/cache.c 	phys_addr_t paddr = pfn << PAGE_SHIFT;
pfn               235 arch/arm/include/asm/cacheflush.h vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
pfn               250 arch/arm/include/asm/cacheflush.h #define flush_cache_page(vma,addr,pfn) \
pfn               251 arch/arm/include/asm/cacheflush.h 		vivt_flush_cache_page(vma,addr,pfn)
pfn               255 arch/arm/include/asm/cacheflush.h extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
pfn                36 arch/arm/include/asm/dma-mapping.h static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
pfn                39 arch/arm/include/asm/dma-mapping.h 		pfn -= dev->dma_pfn_offset;
pfn                40 arch/arm/include/asm/dma-mapping.h 	return (dma_addr_t)__pfn_to_bus(pfn);
pfn                45 arch/arm/include/asm/dma-mapping.h 	unsigned long pfn = __bus_to_pfn(addr);
pfn                48 arch/arm/include/asm/dma-mapping.h 		pfn += dev->dma_pfn_offset;
pfn                50 arch/arm/include/asm/dma-mapping.h 	return pfn;
pfn                56 arch/arm/include/asm/dma-mapping.h 		unsigned long pfn = dma_to_pfn(dev, addr);
pfn                58 arch/arm/include/asm/dma-mapping.h 		return phys_to_virt(__pfn_to_phys(pfn));
pfn                73 arch/arm/include/asm/dma-mapping.h static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
pfn                75 arch/arm/include/asm/dma-mapping.h 	return __arch_pfn_to_dma(dev, pfn);
pfn                70 arch/arm/include/asm/highmem.h extern void *kmap_atomic_pfn(unsigned long pfn);
pfn               458 arch/arm/include/asm/io.h extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
pfn               459 arch/arm/include/asm/io.h extern int devmem_is_allowed(unsigned long pfn);
pfn                73 arch/arm/include/asm/kvm_mmu.h #define kvm_pfn_pte(pfn, prot)	pfn_pte(pfn, prot)
pfn                74 arch/arm/include/asm/kvm_mmu.h #define kvm_pfn_pmd(pfn, prot)	pfn_pmd(pfn, prot)
pfn                75 arch/arm/include/asm/kvm_mmu.h #define kvm_pfn_pud(pfn, prot)	(__pud(0))
pfn               211 arch/arm/include/asm/kvm_mmu.h static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
pfn               225 arch/arm/include/asm/kvm_mmu.h 		void *va = kmap_atomic_pfn(pfn);
pfn               230 arch/arm/include/asm/kvm_mmu.h 		pfn++;
pfn               236 arch/arm/include/asm/kvm_mmu.h static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn,
pfn               275 arch/arm/include/asm/kvm_mmu.h 		void *va = kmap_atomic_pfn(pfn);
pfn               288 arch/arm/include/asm/kvm_mmu.h 		pfn++;
pfn               313 arch/arm/include/asm/kvm_mmu.h 	kvm_pfn_t pfn = pmd_pfn(pmd);
pfn               316 arch/arm/include/asm/kvm_mmu.h 		void *va = kmap_atomic_pfn(pfn);
pfn               320 arch/arm/include/asm/kvm_mmu.h 		pfn++;
pfn                16 arch/arm/include/asm/mach/map.h 	unsigned long pfn;
pfn                75 arch/arm/include/asm/mach/pci.h extern void pci_map_io_early(unsigned long pfn);
pfn                77 arch/arm/include/asm/mach/pci.h static inline void pci_map_io_early(unsigned long pfn) {}
pfn               297 arch/arm/include/asm/memory.h #define pfn_to_kaddr(pfn)	__va((phys_addr_t)(pfn) << PAGE_SHIFT)
pfn               236 arch/arm/include/asm/pgtable-3level.h #define pfn_pmd(pfn,prot)	(__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
pfn               133 arch/arm/include/asm/pgtable.h extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
pfn               212 arch/arm/include/asm/pgtable.h #define pfn_pte(pfn,prot)	__pte(__pfn_to_phys(pfn) | pgprot_val(prot))
pfn               613 arch/arm/kernel/bios32.c void __init pci_map_io_early(unsigned long pfn)
pfn               621 arch/arm/kernel/bios32.c 	pci_io_desc.pfn = pfn;
pfn                30 arch/arm/kernel/crash_dump.c ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
pfn                39 arch/arm/kernel/crash_dump.c 	vaddr = ioremap(__pfn_to_phys(pfn), PAGE_SIZE);
pfn                49 arch/arm/kernel/efi.c 		.pfn		= __phys_to_pfn(md->phys_addr),
pfn                26 arch/arm/kernel/hibernate.c int pfn_is_nosave(unsigned long pfn)
pfn                31 arch/arm/kernel/hibernate.c 	return (pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn);
pfn                56 arch/arm/kernel/tcm.c 		.pfn		= __phys_to_pfn(DTCM_OFFSET),
pfn                65 arch/arm/kernel/tcm.c 		.pfn		= __phys_to_pfn(ITCM_OFFSET),
pfn                31 arch/arm/mach-clps711x/board-dt.c 	.pfn		= __phys_to_pfn(CLPS711X_PHYS_BASE),
pfn               230 arch/arm/mach-cns3xxx/cns3420vb.c 		.pfn		= __phys_to_pfn(CNS3XXX_UART0_BASE),
pfn                29 arch/arm/mach-cns3xxx/core.c 		.pfn		= __phys_to_pfn(CNS3XXX_TC11MP_SCU_BASE),
pfn                34 arch/arm/mach-cns3xxx/core.c 		.pfn		= __phys_to_pfn(CNS3XXX_TIMER1_2_3_BASE),
pfn                39 arch/arm/mach-cns3xxx/core.c 		.pfn		= __phys_to_pfn(CNS3XXX_MISC_BASE),
pfn                44 arch/arm/mach-cns3xxx/core.c 		.pfn		= __phys_to_pfn(CNS3XXX_PM_BASE),
pfn                50 arch/arm/mach-cns3xxx/core.c 		.pfn		= __phys_to_pfn(CNS3XXX_PCIE0_HOST_BASE),
pfn                55 arch/arm/mach-cns3xxx/core.c 		.pfn		= __phys_to_pfn(CNS3XXX_PCIE0_CFG0_BASE),
pfn                60 arch/arm/mach-cns3xxx/core.c 		.pfn		= __phys_to_pfn(CNS3XXX_PCIE0_CFG1_BASE),
pfn                65 arch/arm/mach-cns3xxx/core.c 		.pfn		= __phys_to_pfn(CNS3XXX_PCIE1_HOST_BASE),
pfn                70 arch/arm/mach-cns3xxx/core.c 		.pfn		= __phys_to_pfn(CNS3XXX_PCIE1_CFG0_BASE),
pfn                75 arch/arm/mach-cns3xxx/core.c 		.pfn		= __phys_to_pfn(CNS3XXX_PCIE1_CFG1_BASE),
pfn               632 arch/arm/mach-davinci/da830.c 		.pfn		= __phys_to_pfn(IO_PHYS),
pfn               638 arch/arm/mach-davinci/da830.c 		.pfn		= __phys_to_pfn(DA8XX_CP_INTC_BASE),
pfn               307 arch/arm/mach-davinci/da850.c 		.pfn		= __phys_to_pfn(IO_PHYS),
pfn               313 arch/arm/mach-davinci/da850.c 		.pfn		= __phys_to_pfn(DA8XX_CP_INTC_BASE),
pfn               606 arch/arm/mach-davinci/dm355.c 		.pfn		= __phys_to_pfn(IO_PHYS),
pfn               617 arch/arm/mach-davinci/dm365.c 		.pfn		= __phys_to_pfn(IO_PHYS),
pfn               540 arch/arm/mach-davinci/dm644x.c 		.pfn		= __phys_to_pfn(IO_PHYS),
pfn               480 arch/arm/mach-davinci/dm646x.c 		.pfn		= __phys_to_pfn(IO_PHYS),
pfn                54 arch/arm/mach-dove/common.c 		.pfn		= __phys_to_pfn(DOVE_SB_REGS_PHYS_BASE),
pfn                59 arch/arm/mach-dove/common.c 		.pfn		= __phys_to_pfn(DOVE_NB_REGS_PHYS_BASE),
pfn                75 arch/arm/mach-ebsa110/core.c 		.pfn		= __phys_to_pfn(TRICK4_PHYS),
pfn                80 arch/arm/mach-ebsa110/core.c 		.pfn		= __phys_to_pfn(TRICK3_PHYS),
pfn                85 arch/arm/mach-ebsa110/core.c 		.pfn		= __phys_to_pfn(TRICK1_PHYS),
pfn                90 arch/arm/mach-ebsa110/core.c 		.pfn		= __phys_to_pfn(TRICK0_PHYS),
pfn               100 arch/arm/mach-ebsa110/core.c 		.pfn		= __phys_to_pfn(ISAIO_PHYS),
pfn               105 arch/arm/mach-ebsa110/core.c 		.pfn		= __phys_to_pfn(ISAMEM_PHYS),
pfn                57 arch/arm/mach-ep93xx/core.c 		.pfn		= __phys_to_pfn(EP93XX_AHB_PHYS_BASE),
pfn                62 arch/arm/mach-ep93xx/core.c 		.pfn		= __phys_to_pfn(EP93XX_APB_PHYS_BASE),
pfn                40 arch/arm/mach-ep93xx/ts72xx.c 		.pfn		= __phys_to_pfn(TS72XX_MODEL_PHYS_BASE),
pfn                45 arch/arm/mach-ep93xx/ts72xx.c 		.pfn		= __phys_to_pfn(TS72XX_OPTIONS_PHYS_BASE),
pfn                50 arch/arm/mach-ep93xx/ts72xx.c 		.pfn		= __phys_to_pfn(TS72XX_OPTIONS2_PHYS_BASE),
pfn                55 arch/arm/mach-ep93xx/ts72xx.c 		.pfn		= __phys_to_pfn(TS72XX_CPLDVER_PHYS_BASE),
pfn                50 arch/arm/mach-ep93xx/vision_ep9307.c 		.pfn		= __phys_to_pfn(VISION_PHYS_BASE),
pfn                74 arch/arm/mach-exynos/exynos.c 	iodesc.pfn = __phys_to_pfn(be32_to_cpu(reg[0]));
pfn               140 arch/arm/mach-footbridge/common.c 		.pfn		= __phys_to_pfn(DC21285_ARMCSR_BASE),
pfn               154 arch/arm/mach-footbridge/common.c 		.pfn		= __phys_to_pfn(DC21285_PCI_MEM),
pfn               159 arch/arm/mach-footbridge/common.c 		.pfn		= __phys_to_pfn(DC21285_PCI_TYPE_0_CONFIG),
pfn               164 arch/arm/mach-footbridge/common.c 		.pfn		= __phys_to_pfn(DC21285_PCI_TYPE_1_CONFIG),
pfn               169 arch/arm/mach-footbridge/common.c 		.pfn		= __phys_to_pfn(DC21285_PCI_IACK),
pfn               253 arch/arm/mach-footbridge/common.c unsigned long __pfn_to_bus(unsigned long pfn)
pfn               255 arch/arm/mach-footbridge/common.c 	return __pfn_to_phys(pfn) + (fb_bus_sdram_offset() - PHYS_OFFSET);
pfn                19 arch/arm/mach-gemini/board-dt.c 		.pfn = __phys_to_pfn(CONFIG_DEBUG_UART_PHYS),
pfn                31 arch/arm/mach-hisi/hisilicon.c 		.pfn		= __phys_to_pfn(HI3620_SYSCTRL_PHYS_BASE),
pfn               105 arch/arm/mach-imx/hardware.h 	.pfn = __phys_to_pfn(soc ## _ ## name ## _BASE_ADDR),		\
pfn               256 arch/arm/mach-imx/mach-kzm_arm11_01.c 		.pfn		= __phys_to_pfn(MX31_CS4_BASE_ADDR),
pfn               262 arch/arm/mach-imx/mach-kzm_arm11_01.c 		.pfn		= __phys_to_pfn(MX31_CS5_BASE_ADDR),
pfn               385 arch/arm/mach-imx/mach-mx27ads.c 		.pfn = __phys_to_pfn(MX27_CS4_BASE_ADDR),
pfn               536 arch/arm/mach-imx/mach-mx31ads.c 		.pfn		= __phys_to_pfn(MX31_CS4_BASE_ADDR),
pfn               218 arch/arm/mach-imx/mach-mx31lite.c 		.pfn = __phys_to_pfn(MX31_CS4_BASE_ADDR),
pfn                37 arch/arm/mach-imx/platsmp.c 	scu_io_desc.pfn = __phys_to_pfn(base);
pfn                49 arch/arm/mach-integrator/integrator_ap.c 		.pfn		= __phys_to_pfn(INTEGRATOR_IC_BASE),
pfn                54 arch/arm/mach-integrator/integrator_ap.c 		.pfn		= __phys_to_pfn(INTEGRATOR_UART0_BASE),
pfn                42 arch/arm/mach-integrator/integrator_cp.c 		.pfn		= __phys_to_pfn(INTEGRATOR_IC_BASE),
pfn                47 arch/arm/mach-integrator/integrator_cp.c 		.pfn		= __phys_to_pfn(INTEGRATOR_UART0_BASE),
pfn                52 arch/arm/mach-integrator/integrator_cp.c 		.pfn		= __phys_to_pfn(INTEGRATOR_CP_SIC_BASE),
pfn                58 arch/arm/mach-iop32x/em7210.c 		.pfn		= __phys_to_pfn(IQ31244_UART),
pfn                53 arch/arm/mach-iop32x/glantank.c 		.pfn		= __phys_to_pfn(GLANTANK_UART),
pfn                84 arch/arm/mach-iop32x/iq31244.c 		.pfn		= __phys_to_pfn(IQ31244_UART),
pfn                52 arch/arm/mach-iop32x/iq80321.c 		.pfn		= __phys_to_pfn(IQ80321_UART),
pfn                60 arch/arm/mach-iop32x/n2100.c 		.pfn		= __phys_to_pfn(N2100_UART),
pfn                22 arch/arm/mach-iop32x/setup.c 		.pfn		= __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE),
pfn                55 arch/arm/mach-ixp4xx/common.c 		.pfn		= __phys_to_pfn(IXP4XX_PERIPHERAL_BASE_PHYS),
pfn                60 arch/arm/mach-ixp4xx/common.c 		.pfn		= __phys_to_pfn(IXP4XX_EXP_CFG_BASE_PHYS),
pfn                65 arch/arm/mach-ixp4xx/common.c 		.pfn		= __phys_to_pfn(IXP4XX_PCI_CFG_BASE_PHYS),
pfn                24 arch/arm/mach-ixp4xx/ixp4xx-of.c 		.pfn = __phys_to_pfn(IXP4XX_EXP_CFG_BASE_PHYS),
pfn                32 arch/arm/mach-ixp4xx/ixp4xx-of.c 		.pfn = __phys_to_pfn(CONFIG_DEBUG_UART_PHYS),
pfn                80 arch/arm/mach-lpc32xx/common.c 		.pfn		= __phys_to_pfn(LPC32XX_AHB0_START),
pfn                86 arch/arm/mach-lpc32xx/common.c 		.pfn		= __phys_to_pfn(LPC32XX_AHB1_START),
pfn                92 arch/arm/mach-lpc32xx/common.c 		.pfn		= __phys_to_pfn(LPC32XX_FABAPB_START),
pfn                98 arch/arm/mach-lpc32xx/common.c 		.pfn		= __phys_to_pfn(LPC32XX_IRAM_BASE),
pfn                27 arch/arm/mach-mmp/common.c 		.pfn		= __phys_to_pfn(APB_PHYS_BASE),
pfn                32 arch/arm/mach-mmp/common.c 		.pfn		= __phys_to_pfn(AXI_PHYS_BASE),
pfn               135 arch/arm/mach-mv78xx0/common.c 		.pfn		= 0,
pfn               140 arch/arm/mach-mv78xx0/common.c 		.pfn		= __phys_to_pfn(MV78XX0_REGS_PHYS_BASE),
pfn               159 arch/arm/mach-mv78xx0/common.c 	mv78xx0_io_desc[0].pfn = __phys_to_pfn(phys);
pfn                76 arch/arm/mach-nomadik/cpu-8815.c 		.pfn =		__phys_to_pfn(NOMADIK_UART1_BASE),
pfn               138 arch/arm/mach-omap1/board-ams-delta.c 		.pfn		= __phys_to_pfn(LATCH1_PHYS),
pfn               145 arch/arm/mach-omap1/board-ams-delta.c 		.pfn		= __phys_to_pfn(LATCH2_PHYS),
pfn               152 arch/arm/mach-omap1/board-ams-delta.c 		.pfn		= __phys_to_pfn(MODEM_PHYS),
pfn               336 arch/arm/mach-omap1/board-fsample.c 		.pfn		= __phys_to_pfn(H2P2_DBG_FPGA_START),
pfn               342 arch/arm/mach-omap1/board-fsample.c 		.pfn		= __phys_to_pfn(FSAMPLE_CPLD_START),
pfn               165 arch/arm/mach-omap1/board-innovator.c 		.pfn		= __phys_to_pfn(OMAP1510_FPGA_START),
pfn               304 arch/arm/mach-omap1/board-perseus2.c 		.pfn		= __phys_to_pfn(H2P2_DBG_FPGA_START),
pfn                31 arch/arm/mach-omap1/include/mach/memory.h #define __arch_pfn_to_dma(dev, pfn)	\
pfn                32 arch/arm/mach-omap1/include/mach/memory.h 	({ dma_addr_t __dma = __pfn_to_phys(pfn); \
pfn                31 arch/arm/mach-omap1/io.c 		.pfn		= __phys_to_pfn(OMAP1_IO_PHYS),
pfn                41 arch/arm/mach-omap1/io.c 		.pfn		= __phys_to_pfn(OMAP7XX_DSP_START),
pfn                46 arch/arm/mach-omap1/io.c 		.pfn		= __phys_to_pfn(OMAP7XX_DSPREG_START),
pfn                57 arch/arm/mach-omap1/io.c 		.pfn		= __phys_to_pfn(OMAP1510_DSP_START),
pfn                62 arch/arm/mach-omap1/io.c 		.pfn		= __phys_to_pfn(OMAP1510_DSPREG_START),
pfn                73 arch/arm/mach-omap1/io.c 		.pfn		= __phys_to_pfn(OMAP16XX_DSP_START),
pfn                78 arch/arm/mach-omap1/io.c 		.pfn		= __phys_to_pfn(OMAP16XX_DSPREG_START),
pfn                70 arch/arm/mach-omap2/io.c 		.pfn		= __phys_to_pfn(L3_24XX_PHYS),
pfn                76 arch/arm/mach-omap2/io.c 		.pfn		= __phys_to_pfn(L4_24XX_PHYS),
pfn                86 arch/arm/mach-omap2/io.c 		.pfn		= __phys_to_pfn(DSP_MEM_2420_PHYS),
pfn                92 arch/arm/mach-omap2/io.c 		.pfn		= __phys_to_pfn(DSP_IPI_2420_PHYS),
pfn                98 arch/arm/mach-omap2/io.c 		.pfn		= __phys_to_pfn(DSP_MMU_2420_PHYS),
pfn               110 arch/arm/mach-omap2/io.c 		.pfn		= __phys_to_pfn(L4_WK_243X_PHYS),
pfn               116 arch/arm/mach-omap2/io.c 		.pfn		= __phys_to_pfn(OMAP243X_GPMC_PHYS),
pfn               122 arch/arm/mach-omap2/io.c 		.pfn		= __phys_to_pfn(OMAP243X_SDRC_PHYS),
pfn               128 arch/arm/mach-omap2/io.c 		.pfn		= __phys_to_pfn(OMAP243X_SMS_PHYS),
pfn               140 arch/arm/mach-omap2/io.c 		.pfn		= __phys_to_pfn(L3_34XX_PHYS),
pfn               146 arch/arm/mach-omap2/io.c 		.pfn		= __phys_to_pfn(L4_34XX_PHYS),
pfn               152 arch/arm/mach-omap2/io.c 		.pfn		= __phys_to_pfn(OMAP34XX_GPMC_PHYS),
pfn               158 arch/arm/mach-omap2/io.c 		.pfn		= __phys_to_pfn(OMAP343X_SMS_PHYS),
pfn               164 arch/arm/mach-omap2/io.c 		.pfn		= __phys_to_pfn(OMAP343X_SDRC_PHYS),
pfn               170 arch/arm/mach-omap2/io.c 		.pfn		= __phys_to_pfn(L4_PER_34XX_PHYS),
pfn               176 arch/arm/mach-omap2/io.c 		.pfn		= __phys_to_pfn(L4_EMU_34XX_PHYS),
pfn               187 arch/arm/mach-omap2/io.c 		.pfn		= __phys_to_pfn(L4_34XX_PHYS),
pfn               198 arch/arm/mach-omap2/io.c 		.pfn		= __phys_to_pfn(L4_34XX_PHYS),
pfn               204 arch/arm/mach-omap2/io.c 		.pfn		= __phys_to_pfn(L4_WK_AM33XX_PHYS),
pfn               215 arch/arm/mach-omap2/io.c 		.pfn		= __phys_to_pfn(L3_44XX_PHYS),
pfn               221 arch/arm/mach-omap2/io.c 		.pfn		= __phys_to_pfn(L4_44XX_PHYS),
pfn               227 arch/arm/mach-omap2/io.c 		.pfn		= __phys_to_pfn(L4_PER_44XX_PHYS),
pfn               238 arch/arm/mach-omap2/io.c 		.pfn		= __phys_to_pfn(L3_54XX_PHYS),
pfn               244 arch/arm/mach-omap2/io.c 		.pfn		= __phys_to_pfn(L4_54XX_PHYS),
pfn               250 arch/arm/mach-omap2/io.c 		.pfn		= __phys_to_pfn(L4_WK_54XX_PHYS),
pfn               256 arch/arm/mach-omap2/io.c 		.pfn		= __phys_to_pfn(L4_PER_54XX_PHYS),
pfn               267 arch/arm/mach-omap2/io.c 		.pfn		= __phys_to_pfn(L4_CFG_MPU_DRA7XX_PHYS),
pfn               273 arch/arm/mach-omap2/io.c 		.pfn		= __phys_to_pfn(L3_MAIN_SN_DRA7XX_PHYS),
pfn               279 arch/arm/mach-omap2/io.c 		.pfn		= __phys_to_pfn(L4_PER1_DRA7XX_PHYS),
pfn               285 arch/arm/mach-omap2/io.c 		.pfn		= __phys_to_pfn(L4_PER2_DRA7XX_PHYS),
pfn               291 arch/arm/mach-omap2/io.c 		.pfn		= __phys_to_pfn(L4_PER3_DRA7XX_PHYS),
pfn               297 arch/arm/mach-omap2/io.c 		.pfn		= __phys_to_pfn(L4_CFG_DRA7XX_PHYS),
pfn               303 arch/arm/mach-omap2/io.c 		.pfn		= __phys_to_pfn(L4_WKUP_DRA7XX_PHYS),
pfn               160 arch/arm/mach-omap2/omap4-common.c 	dram_io_desc[0].pfn = __phys_to_pfn(dram_sync_paddr);
pfn                46 arch/arm/mach-orion5x/common.c 		.pfn		= __phys_to_pfn(ORION5X_REGS_PHYS_BASE),
pfn                51 arch/arm/mach-orion5x/common.c 		.pfn		= __phys_to_pfn(ORION5X_PCIE_WA_PHYS_BASE),
pfn                52 arch/arm/mach-orion5x/ts78xx-setup.c 		.pfn		= __phys_to_pfn(TS78XX_FPGA_REGS_PHYS_BASE),
pfn                41 arch/arm/mach-picoxcell/common.c 	.pfn		= __phys_to_pfn(PICOXCELL_PERIPH_BASE),
pfn               799 arch/arm/mach-pxa/balloon3.c 		.pfn		= __phys_to_pfn(BALLOON3_FPGA_PHYS),
pfn               496 arch/arm/mach-pxa/cm-x2xx.c 		.pfn		= __phys_to_pfn(PXA_CS4_PHYS),
pfn                81 arch/arm/mach-pxa/generic.c 		.pfn		= __phys_to_pfn(PERIPH_PHYS),
pfn               179 arch/arm/mach-pxa/idp.c 		.pfn		= __phys_to_pfn(IDP_COREVOLT_PHYS),
pfn               184 arch/arm/mach-pxa/idp.c 		.pfn		= __phys_to_pfn(IDP_CPLD_PHYS),
pfn               493 arch/arm/mach-pxa/lpd270.c 		.pfn		= __phys_to_pfn(LPD270_CPLD_PHYS),
pfn               522 arch/arm/mach-pxa/lubbock.c 		.pfn		= __phys_to_pfn(LUBBOCK_FPGA_PHYS),
pfn               608 arch/arm/mach-pxa/mainstone.c 		.pfn		= __phys_to_pfn(MST_FPGA_PHYS),
pfn               313 arch/arm/mach-pxa/palmld.c 	.pfn		= __phys_to_pfn(PALMLD_IDE_PHYS),
pfn               319 arch/arm/mach-pxa/palmld.c 	.pfn		= __phys_to_pfn(PALMLD_USB_PHYS),
pfn               314 arch/arm/mach-pxa/palmtx.c 	.pfn		= __phys_to_pfn(PALMTX_PCMCIA_PHYS),
pfn               319 arch/arm/mach-pxa/palmtx.c 	.pfn		= __phys_to_pfn(PALMTX_NAND_ALE_PHYS),
pfn               324 arch/arm/mach-pxa/palmtx.c 	.pfn		= __phys_to_pfn(PALMTX_NAND_CLE_PHYS),
pfn               168 arch/arm/mach-pxa/pxa25x.c 		.pfn		= __phys_to_pfn(PXA2XX_SMEMC_BASE),
pfn               173 arch/arm/mach-pxa/pxa25x.c 		.pfn		= __phys_to_pfn(0x00000000),
pfn               250 arch/arm/mach-pxa/pxa27x.c 		.pfn		= __phys_to_pfn(PXA2XX_SMEMC_BASE),
pfn               255 arch/arm/mach-pxa/pxa27x.c 		.pfn		= __phys_to_pfn(0x00000000),
pfn               375 arch/arm/mach-pxa/pxa3xx.c 		.pfn		= __phys_to_pfn(PXA3XX_SMEMC_BASE),
pfn               380 arch/arm/mach-pxa/pxa3xx.c 		.pfn		= __phys_to_pfn(NAND_PHYS),
pfn               419 arch/arm/mach-pxa/trizeps4.c 		.pfn		= __phys_to_pfn(TRIZEPS4_CFSR_PHYS),
pfn               425 arch/arm/mach-pxa/trizeps4.c 		.pfn		= __phys_to_pfn(TRIZEPS4_BOCR_PHYS),
pfn               431 arch/arm/mach-pxa/trizeps4.c 		.pfn		= __phys_to_pfn(TRIZEPS4_IRCR_PHYS),
pfn               437 arch/arm/mach-pxa/trizeps4.c 		.pfn		= __phys_to_pfn(TRIZEPS4_DICR_PHYS),
pfn               443 arch/arm/mach-pxa/trizeps4.c 		.pfn		= __phys_to_pfn(TRIZEPS4_UPSR_PHYS),
pfn               992 arch/arm/mach-pxa/viper.c 		.pfn     = __phys_to_pfn(VIPER_CPLD_PHYS),
pfn               998 arch/arm/mach-pxa/viper.c 		.pfn     = __phys_to_pfn(0x30000000),
pfn               905 arch/arm/mach-pxa/zeus.c 		.pfn     = __phys_to_pfn(ZEUS_CPLD_VERSION_PHYS),
pfn               911 arch/arm/mach-pxa/zeus.c 		.pfn     = __phys_to_pfn(ZEUS_CPLD_ISA_IRQ_PHYS),
pfn               917 arch/arm/mach-pxa/zeus.c 		.pfn     = __phys_to_pfn(ZEUS_CPLD_CONTROL_PHYS),
pfn               923 arch/arm/mach-pxa/zeus.c 		.pfn     = __phys_to_pfn(ZEUS_PC104IO_PHYS),
pfn                69 arch/arm/mach-rpc/riscpc.c 		.pfn		= __phys_to_pfn(SCREEN_START),
pfn                74 arch/arm/mach-rpc/riscpc.c 		.pfn		= __phys_to_pfn(IO_START),
pfn                79 arch/arm/mach-rpc/riscpc.c 		.pfn		= __phys_to_pfn(EASI_START),
pfn                60 arch/arm/mach-s3c24xx/mach-anubis.c 	.pfn		= __phys_to_pfn(0x0),
pfn                65 arch/arm/mach-s3c24xx/mach-anubis.c 	.pfn		= __phys_to_pfn(0x0),
pfn                79 arch/arm/mach-s3c24xx/mach-anubis.c 	.pfn		= __phys_to_pfn(ANUBIS_PA_CTRL1),
pfn                84 arch/arm/mach-s3c24xx/mach-anubis.c 	.pfn		= __phys_to_pfn(ANUBIS_PA_IDREG),
pfn                78 arch/arm/mach-s3c24xx/mach-bast.c 	  .pfn		= PA_CS2(BAST_PA_ISAIO),
pfn                83 arch/arm/mach-s3c24xx/mach-bast.c 	  .pfn		= PA_CS3(BAST_PA_ISAIO),
pfn                90 arch/arm/mach-s3c24xx/mach-bast.c 	  .pfn		= __phys_to_pfn(BAST_PA_CTRL1),
pfn                95 arch/arm/mach-s3c24xx/mach-bast.c 	  .pfn		= __phys_to_pfn(BAST_PA_CTRL2),
pfn               100 arch/arm/mach-s3c24xx/mach-bast.c 	  .pfn		= __phys_to_pfn(BAST_PA_CTRL3),
pfn               105 arch/arm/mach-s3c24xx/mach-bast.c 	  .pfn		= __phys_to_pfn(BAST_PA_CTRL4),
pfn               112 arch/arm/mach-s3c24xx/mach-bast.c 	  .pfn		= __phys_to_pfn(BAST_PA_PC104_IRQREQ),
pfn               117 arch/arm/mach-s3c24xx/mach-bast.c 	  .pfn		= __phys_to_pfn(BAST_PA_PC104_IRQRAW),
pfn               122 arch/arm/mach-s3c24xx/mach-bast.c 	  .pfn		= __phys_to_pfn(BAST_PA_PC104_IRQMASK),
pfn                96 arch/arm/mach-s3c24xx/mach-gta02.c 		.pfn		= __phys_to_pfn(S3C2410_CS3 + 0x01000000),
pfn                79 arch/arm/mach-s3c24xx/mach-h1940.c 		.pfn		= __phys_to_pfn(H1940_PA_LATCH),
pfn                61 arch/arm/mach-s3c24xx/mach-osiris.c 	  .pfn		= __phys_to_pfn(S3C2410_CS5),
pfn                66 arch/arm/mach-s3c24xx/mach-osiris.c 	  .pfn		= __phys_to_pfn(S3C2410_CS5),
pfn                75 arch/arm/mach-s3c24xx/mach-osiris.c 	  .pfn		= __phys_to_pfn(OSIRIS_PA_CTRL0),
pfn                80 arch/arm/mach-s3c24xx/mach-osiris.c 	  .pfn		= __phys_to_pfn(OSIRIS_PA_CTRL1),
pfn                85 arch/arm/mach-s3c24xx/mach-osiris.c 	  .pfn		= __phys_to_pfn(OSIRIS_PA_CTRL2),
pfn                90 arch/arm/mach-s3c24xx/mach-osiris.c 	  .pfn		= __phys_to_pfn(OSIRIS_PA_IDREG),
pfn                56 arch/arm/mach-s3c24xx/mach-rx3715.c 		.pfn		= __phys_to_pfn(S3C2410_CS3),
pfn                61 arch/arm/mach-s3c24xx/mach-rx3715.c 		.pfn		= __phys_to_pfn(S3C2410_CS3),
pfn                59 arch/arm/mach-s3c24xx/mach-smdk2416.c 		.pfn		= __phys_to_pfn(S3C2410_CS2),
pfn                64 arch/arm/mach-s3c24xx/mach-smdk2416.c 		.pfn		= __phys_to_pfn(S3C2410_CS2 + (1<<24)),
pfn                69 arch/arm/mach-s3c24xx/mach-smdk2416.c 		.pfn		= __phys_to_pfn(S3C2410_CS2),
pfn                74 arch/arm/mach-s3c24xx/mach-smdk2416.c 		.pfn		= __phys_to_pfn(S3C2410_CS2 + (1<<24)),
pfn                48 arch/arm/mach-s3c24xx/mach-smdk2440.c 		.pfn		= __phys_to_pfn(S3C2410_CS2),
pfn                53 arch/arm/mach-s3c24xx/mach-smdk2440.c 		.pfn		= __phys_to_pfn(S3C2410_CS2 + (1<<24)),
pfn                58 arch/arm/mach-s3c24xx/mach-smdk2440.c 		.pfn		= __phys_to_pfn(S3C2410_CS2),
pfn                63 arch/arm/mach-s3c24xx/mach-smdk2440.c 		.pfn		= __phys_to_pfn(S3C2410_CS2 + (1<<24)),
pfn                47 arch/arm/mach-s3c24xx/mach-smdk2443.c 		.pfn		= __phys_to_pfn(S3C2410_CS2),
pfn                52 arch/arm/mach-s3c24xx/mach-smdk2443.c 		.pfn		= __phys_to_pfn(S3C2410_CS2 + (1<<24)),
pfn                57 arch/arm/mach-s3c24xx/mach-smdk2443.c 		.pfn		= __phys_to_pfn(S3C2410_CS2),
pfn                62 arch/arm/mach-s3c24xx/mach-smdk2443.c 		.pfn		= __phys_to_pfn(S3C2410_CS2 + (1<<24)),
pfn                67 arch/arm/mach-s3c24xx/mach-vr1000.c 	  .pfn		= PA_CS2(BAST_PA_ISAIO),
pfn                72 arch/arm/mach-s3c24xx/mach-vr1000.c 	  .pfn		= PA_CS3(BAST_PA_ISAIO),
pfn                80 arch/arm/mach-s3c24xx/mach-vr1000.c 	  .pfn		= __phys_to_pfn(VR1000_PA_CTRL1),
pfn                85 arch/arm/mach-s3c24xx/mach-vr1000.c 	  .pfn		= __phys_to_pfn(VR1000_PA_CTRL2),
pfn                90 arch/arm/mach-s3c24xx/mach-vr1000.c 	  .pfn		= __phys_to_pfn(VR1000_PA_CTRL3),
pfn                95 arch/arm/mach-s3c24xx/mach-vr1000.c 	  .pfn		= __phys_to_pfn(VR1000_PA_CTRL4),
pfn                66 arch/arm/mach-s3c24xx/s3c2412.c 		.pfn	 = __phys_to_pfn(S3C2412_PA_SSMC),
pfn                72 arch/arm/mach-s3c24xx/s3c2412.c 		.pfn	 = __phys_to_pfn(S3C2412_PA_EBI),
pfn               106 arch/arm/mach-s3c64xx/common.c 		.pfn		= __phys_to_pfn(S3C64XX_PA_SYSCON),
pfn               111 arch/arm/mach-s3c64xx/common.c 		.pfn		= __phys_to_pfn(S3C64XX_PA_SROM),
pfn               116 arch/arm/mach-s3c64xx/common.c 		.pfn		= __phys_to_pfn(S3C_PA_UART),
pfn               121 arch/arm/mach-s3c64xx/common.c 		.pfn		= __phys_to_pfn(S3C64XX_PA_VIC0),
pfn               126 arch/arm/mach-s3c64xx/common.c 		.pfn		= __phys_to_pfn(S3C64XX_PA_VIC1),
pfn               131 arch/arm/mach-s3c64xx/common.c 		.pfn		= __phys_to_pfn(S3C_PA_TIMER),
pfn               136 arch/arm/mach-s3c64xx/common.c 		.pfn		= __phys_to_pfn(S3C64XX_PA_GPIO),
pfn               141 arch/arm/mach-s3c64xx/common.c 		.pfn		= __phys_to_pfn(S3C64XX_PA_MODEM),
pfn               146 arch/arm/mach-s3c64xx/common.c 		.pfn		= __phys_to_pfn(S3C64XX_PA_WATCHDOG),
pfn               151 arch/arm/mach-s3c64xx/common.c 		.pfn		= __phys_to_pfn(S3C64XX_PA_USB_HSPHY),
pfn               190 arch/arm/mach-s3c64xx/mach-anw6410.c 		.pfn		= __phys_to_pfn(ANW6410_PA_EXTDEV),
pfn                25 arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c 		.pfn		= __phys_to_pfn(S3C64XX_PA_SYSCON),
pfn                35 arch/arm/mach-s5pv210/s5pv210.c 	iodesc.pfn = __phys_to_pfn(be32_to_cpu(reg[0]));
pfn               697 arch/arm/mach-sa1100/assabet.c 		.pfn		= __phys_to_pfn(0x12000000),
pfn               702 arch/arm/mach-sa1100/assabet.c 		.pfn		= __phys_to_pfn(0x4b800000),
pfn               294 arch/arm/mach-sa1100/badge4.c 		.pfn		= __phys_to_pfn(0x08000000),
pfn               299 arch/arm/mach-sa1100/badge4.c 		.pfn		= __phys_to_pfn(0x10000000),
pfn               141 arch/arm/mach-sa1100/cerf.c 		.pfn		= __phys_to_pfn(0x08000000),
pfn               406 arch/arm/mach-sa1100/collie.c 		.pfn		= __phys_to_pfn(0x00000000),
pfn               411 arch/arm/mach-sa1100/collie.c 		.pfn		= __phys_to_pfn(0x08000000),
pfn               383 arch/arm/mach-sa1100/generic.c 		.pfn		= __phys_to_pfn(0x80000000),
pfn               388 arch/arm/mach-sa1100/generic.c 		.pfn		= __phys_to_pfn(0x90000000),
pfn               393 arch/arm/mach-sa1100/generic.c 		.pfn		= __phys_to_pfn(0xa0000000),
pfn               398 arch/arm/mach-sa1100/generic.c 		.pfn		= __phys_to_pfn(0xb0000000),
pfn               248 arch/arm/mach-sa1100/h3xxx.c 		.pfn		= __phys_to_pfn(SA1100_CS2_PHYS),
pfn               253 arch/arm/mach-sa1100/h3xxx.c 		.pfn		= __phys_to_pfn(SA1100_CS4_PHYS),
pfn               258 arch/arm/mach-sa1100/h3xxx.c 		.pfn		= __phys_to_pfn(H3600_EGPIO_PHYS),
pfn                61 arch/arm/mach-sa1100/hackkit.c 		.pfn		= __phys_to_pfn(0x00000000),
pfn               288 arch/arm/mach-sa1100/jornada720.c 		.pfn		= __phys_to_pfn(EPSONREGSTART),
pfn               293 arch/arm/mach-sa1100/jornada720.c 		.pfn		= __phys_to_pfn(EPSONFBSTART),
pfn               119 arch/arm/mach-sa1100/lart.c 		.pfn		= __phys_to_pfn(0x00000000),
pfn               124 arch/arm/mach-sa1100/lart.c 		.pfn		= __phys_to_pfn(0x08000000),
pfn                67 arch/arm/mach-sa1100/nanoengine.c 		.pfn		= __phys_to_pfn(0x10000000),
pfn                73 arch/arm/mach-sa1100/nanoengine.c 		.pfn		= __phys_to_pfn(NANO_PCI_MEM_RW_PHYS),
pfn                79 arch/arm/mach-sa1100/nanoengine.c 		.pfn		= __phys_to_pfn(NANO_PCI_CONFIG_SPACE_PHYS),
pfn               126 arch/arm/mach-sa1100/simpad.c 		.pfn		= __phys_to_pfn(0x4b800000),
pfn               131 arch/arm/mach-sa1100/simpad.c 		.pfn		= __phys_to_pfn(SA1100_CS3_PHYS),
pfn                22 arch/arm/mach-shmobile/setup-r8a7779.c 		.pfn		= __phys_to_pfn(0xf0000000),
pfn                29 arch/arm/mach-shmobile/setup-r8a7779.c 		.pfn		= __phys_to_pfn(0xfe000000),
pfn                31 arch/arm/mach-shmobile/setup-sh73a0.c 		.pfn		= __phys_to_pfn(0xe6000000),
pfn                46 arch/arm/mach-spear/spear1310.c 		.pfn		= __phys_to_pfn(SPEAR1310_RAS_GRP1_BASE),
pfn                63 arch/arm/mach-spear/spear13xx.c 		.pfn		= __phys_to_pfn(PERIP_GRP2_BASE),
pfn                68 arch/arm/mach-spear/spear13xx.c 		.pfn		= __phys_to_pfn(PERIP_GRP1_BASE),
pfn                73 arch/arm/mach-spear/spear13xx.c 		.pfn		= __phys_to_pfn(A9SM_AND_MPMC_BASE),
pfn                78 arch/arm/mach-spear/spear13xx.c 		.pfn		= __phys_to_pfn(L2CC_BASE),
pfn               256 arch/arm/mach-spear/spear320.c 		.pfn		= __phys_to_pfn(SPEAR320_SOC_CONFIG_BASE),
pfn                66 arch/arm/mach-spear/spear3xx.c 		.pfn		= __phys_to_pfn(SPEAR_ICM1_2_BASE),
pfn                71 arch/arm/mach-spear/spear3xx.c 		.pfn		= __phys_to_pfn(SPEAR_ICM3_SMI_CTRL_BASE),
pfn               348 arch/arm/mach-spear/spear6xx.c 		.pfn		= __phys_to_pfn(SPEAR_ICM3_ML1_2_BASE),
pfn               353 arch/arm/mach-spear/spear6xx.c 		.pfn		= __phys_to_pfn(SPEAR_ICM1_2_BASE),
pfn               358 arch/arm/mach-spear/spear6xx.c 		.pfn		= __phys_to_pfn(SPEAR_ICM3_SMI_CTRL_BASE),
pfn                27 arch/arm/mach-tegra/io.c 		.pfn = __phys_to_pfn(IO_PPSB_PHYS),
pfn                33 arch/arm/mach-tegra/io.c 		.pfn = __phys_to_pfn(IO_APB_PHYS),
pfn                39 arch/arm/mach-tegra/io.c 		.pfn = __phys_to_pfn(IO_CPU_PHYS),
pfn                45 arch/arm/mach-tegra/io.c 		.pfn = __phys_to_pfn(IO_IRAM_PHYS),
pfn               172 arch/arm/mach-u300/core.c 		.pfn		= __phys_to_pfn(U300_SLOW_PER_PHYS_BASE),
pfn               178 arch/arm/mach-u300/core.c 		.pfn		= __phys_to_pfn(U300_AHB_PER_PHYS_BASE),
pfn               184 arch/arm/mach-u300/core.c 		.pfn		= __phys_to_pfn(U300_FAST_PER_PHYS_BASE),
pfn                97 arch/arm/mach-versatile/versatile_dt.c 		.pfn		= __phys_to_pfn(VERSATILE_SCTL_BASE),
pfn                43 arch/arm/mach-vt8500/vt8500.c 		.pfn		= __phys_to_pfn(0xd8000000),
pfn               159 arch/arm/mach-zynq/common.c 	zynq_cortex_a9_scu_map.pfn = __phys_to_pfn(base);
pfn               439 arch/arm/mm/dma-mapping.c 		map.pfn = __phys_to_pfn(start);
pfn               794 arch/arm/mm/dma-mapping.c 	unsigned long pfn = dma_to_pfn(dev, dma_addr);
pfn               802 arch/arm/mm/dma-mapping.c 				      pfn + off,
pfn               869 arch/arm/mm/dma-mapping.c 	unsigned long pfn = dma_to_pfn(dev, handle);
pfn               874 arch/arm/mm/dma-mapping.c 	if (!pfn_valid(pfn))
pfn               877 arch/arm/mm/dma-mapping.c 	page = pfn_to_page(pfn);
pfn               891 arch/arm/mm/dma-mapping.c 	unsigned long pfn;
pfn               894 arch/arm/mm/dma-mapping.c 	pfn = page_to_pfn(page) + offset / PAGE_SIZE;
pfn               907 arch/arm/mm/dma-mapping.c 		page = pfn_to_page(pfn);
pfn               929 arch/arm/mm/dma-mapping.c 		pfn++;
pfn               973 arch/arm/mm/dma-mapping.c 		unsigned long pfn;
pfn               976 arch/arm/mm/dma-mapping.c 		pfn = page_to_pfn(page) + off / PAGE_SIZE;
pfn               979 arch/arm/mm/dma-mapping.c 			pfn++;
pfn               983 arch/arm/mm/dma-mapping.c 			page = pfn_to_page(pfn++);
pfn                38 arch/arm/mm/fault-armv.c 	unsigned long pfn, pte_t *ptep)
pfn                53 arch/arm/mm/fault-armv.c 		flush_cache_page(vma, address, pfn);
pfn                54 arch/arm/mm/fault-armv.c 		outer_flush_range((pfn << PAGE_SHIFT),
pfn                55 arch/arm/mm/fault-armv.c 				  (pfn << PAGE_SHIFT) + PAGE_SIZE);
pfn                90 arch/arm/mm/fault-armv.c 	unsigned long pfn)
pfn               120 arch/arm/mm/fault-armv.c 	ret = do_adjust_pte(vma, address, pfn, pte);
pfn               130 arch/arm/mm/fault-armv.c 	unsigned long addr, pte_t *ptep, unsigned long pfn)
pfn               157 arch/arm/mm/fault-armv.c 		aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn);
pfn               161 arch/arm/mm/fault-armv.c 		do_adjust_pte(vma, addr, pfn, ptep);
pfn               180 arch/arm/mm/fault-armv.c 	unsigned long pfn = pte_pfn(*ptep);
pfn               184 arch/arm/mm/fault-armv.c 	if (!pfn_valid(pfn))
pfn               191 arch/arm/mm/fault-armv.c 	page = pfn_to_page(pfn);
pfn               200 arch/arm/mm/fault-armv.c 			make_coherent(mapping, vma, addr, ptep, pfn);
pfn                38 arch/arm/mm/flush.c static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
pfn                43 arch/arm/mm/flush.c 	set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL));
pfn                52 arch/arm/mm/flush.c static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
pfn                58 arch/arm/mm/flush.c 	set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL));
pfn                98 arch/arm/mm/flush.c void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
pfn               101 arch/arm/mm/flush.c 		vivt_flush_cache_page(vma, user_addr, pfn);
pfn               106 arch/arm/mm/flush.c 		flush_pfn_alias(pfn, user_addr);
pfn               115 arch/arm/mm/flush.c #define flush_pfn_alias(pfn,vaddr)		do { } while (0)
pfn               116 arch/arm/mm/flush.c #define flush_icache_alias(pfn,vaddr,len)	do { } while (0)
pfn               271 arch/arm/mm/flush.c 	unsigned long pfn;
pfn               278 arch/arm/mm/flush.c 	pfn = pte_pfn(pteval);
pfn               279 arch/arm/mm/flush.c 	if (!pfn_valid(pfn))
pfn               282 arch/arm/mm/flush.c 	page = pfn_to_page(pfn);
pfn               392 arch/arm/mm/flush.c 	unsigned long pfn;
pfn               401 arch/arm/mm/flush.c 	pfn = page_to_pfn(page);
pfn               403 arch/arm/mm/flush.c 		flush_cache_page(vma, vmaddr, pfn);
pfn               409 arch/arm/mm/flush.c 		flush_pfn_alias(pfn, vmaddr);
pfn               126 arch/arm/mm/highmem.c void *kmap_atomic_pfn(unsigned long pfn)
pfn               130 arch/arm/mm/highmem.c 	struct page *page = pfn_to_page(pfn);
pfn               143 arch/arm/mm/highmem.c 	set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot));
pfn               176 arch/arm/mm/init.c int pfn_valid(unsigned long pfn)
pfn               178 arch/arm/mm/init.c 	phys_addr_t addr = __pfn_to_phys(pfn);
pfn               180 arch/arm/mm/init.c 	if (__phys_to_pfn(addr) != pfn)
pfn               183 arch/arm/mm/init.c 	return memblock_is_map_memory(__pfn_to_phys(pfn));
pfn               407 arch/arm/mm/init.c static inline void free_area_high(unsigned long pfn, unsigned long end)
pfn               409 arch/arm/mm/init.c 	for (; pfn < end; pfn++)
pfn               410 arch/arm/mm/init.c 		free_highmem_page(pfn_to_page(pfn));
pfn               188 arch/arm/mm/ioremap.c remap_area_sections(unsigned long virt, unsigned long pfn,
pfn               206 arch/arm/mm/ioremap.c 		pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
pfn               207 arch/arm/mm/ioremap.c 		pfn += SZ_1M >> PAGE_SHIFT;
pfn               208 arch/arm/mm/ioremap.c 		pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
pfn               209 arch/arm/mm/ioremap.c 		pfn += SZ_1M >> PAGE_SHIFT;
pfn               220 arch/arm/mm/ioremap.c remap_area_supersections(unsigned long virt, unsigned long pfn,
pfn               240 arch/arm/mm/ioremap.c 		super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
pfn               242 arch/arm/mm/ioremap.c 		super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
pfn               253 arch/arm/mm/ioremap.c 		pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
pfn               260 arch/arm/mm/ioremap.c static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
pfn               267 arch/arm/mm/ioremap.c 	phys_addr_t paddr = __pfn_to_phys(pfn);
pfn               273 arch/arm/mm/ioremap.c 	if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK))
pfn               289 arch/arm/mm/ioremap.c 	if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) {
pfn               304 arch/arm/mm/ioremap.c 	if (WARN_ON(pfn_valid(pfn) && mtype != MT_MEMORY_RW))
pfn               316 arch/arm/mm/ioremap.c 	       cpu_is_xsc3()) && pfn >= 0x100000 &&
pfn               319 arch/arm/mm/ioremap.c 		err = remap_area_supersections(addr, pfn, size, type);
pfn               322 arch/arm/mm/ioremap.c 		err = remap_area_sections(addr, pfn, size, type);
pfn               342 arch/arm/mm/ioremap.c  	unsigned long pfn = __phys_to_pfn(phys_addr);
pfn               351 arch/arm/mm/ioremap.c 	return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
pfn               365 arch/arm/mm/ioremap.c __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
pfn               368 arch/arm/mm/ioremap.c 	return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
pfn               164 arch/arm/mm/mmap.c int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
pfn               166 arch/arm/mm/mmap.c 	return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT));
pfn               180 arch/arm/mm/mmap.c int devmem_is_allowed(unsigned long pfn)
pfn               182 arch/arm/mm/mmap.c 	if (iomem_is_exclusive(pfn << PAGE_SHIFT))
pfn               184 arch/arm/mm/mmap.c 	if (!page_is_ram(pfn))
pfn               705 arch/arm/mm/mmu.c pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
pfn               708 arch/arm/mm/mmu.c 	if (!pfn_valid(pfn))
pfn               758 arch/arm/mm/mmu.c 				  unsigned long end, unsigned long pfn,
pfn               765 arch/arm/mm/mmu.c 		set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)),
pfn               767 arch/arm/mm/mmu.c 		pfn++;
pfn               856 arch/arm/mm/mmu.c 	phys = __pfn_to_phys(md->pfn);
pfn               861 arch/arm/mm/mmu.c 		       (long long)__pfn_to_phys((u64)md->pfn), addr);
pfn               873 arch/arm/mm/mmu.c 		       (long long)__pfn_to_phys((u64)md->pfn), addr);
pfn               877 arch/arm/mm/mmu.c 	if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
pfn               879 arch/arm/mm/mmu.c 		       (long long)__pfn_to_phys((u64)md->pfn), addr);
pfn               887 arch/arm/mm/mmu.c 	phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
pfn               922 arch/arm/mm/mmu.c 	if (md->pfn >= 0x100000) {
pfn               929 arch/arm/mm/mmu.c 	phys = __pfn_to_phys(md->pfn);
pfn               934 arch/arm/mm/mmu.c 			(long long)__pfn_to_phys(md->pfn), addr);
pfn               961 arch/arm/mm/mmu.c 			(long long)__pfn_to_phys((u64)md->pfn), md->virtual);
pfn               969 arch/arm/mm/mmu.c 			(long long)__pfn_to_phys((u64)md->pfn), md->virtual);
pfn              1010 arch/arm/mm/mmu.c 		vm->phys_addr = __pfn_to_phys(md->pfn);
pfn              1122 arch/arm/mm/mmu.c 	debug_ll_addr(&map.pfn, &map.virtual);
pfn              1123 arch/arm/mm/mmu.c 	if (!map.pfn || !map.virtual)
pfn              1125 arch/arm/mm/mmu.c 	map.pfn = __phys_to_pfn(map.pfn);
pfn              1360 arch/arm/mm/mmu.c 	map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
pfn              1371 arch/arm/mm/mmu.c 	map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
pfn              1378 arch/arm/mm/mmu.c 	map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
pfn              1390 arch/arm/mm/mmu.c 	map.pfn = __phys_to_pfn(virt_to_phys(vectors));
pfn              1408 arch/arm/mm/mmu.c 	map.pfn += 1;
pfn              1471 arch/arm/mm/mmu.c 			map.pfn = __phys_to_pfn(start);
pfn              1478 arch/arm/mm/mmu.c 			map.pfn = __phys_to_pfn(start);
pfn              1487 arch/arm/mm/mmu.c 				map.pfn = __phys_to_pfn(start);
pfn              1495 arch/arm/mm/mmu.c 			map.pfn = __phys_to_pfn(kernel_x_start);
pfn              1503 arch/arm/mm/mmu.c 				map.pfn = __phys_to_pfn(kernel_x_end);
pfn              1631 arch/arm/mm/mmu.c 		map.pfn = pte_pfn(*pte);
pfn               184 arch/arm/mm/nommu.c void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset,
pfn               187 arch/arm/mm/nommu.c 	if (pfn >= (0x100000000ULL >> PAGE_SHIFT))
pfn               189 arch/arm/mm/nommu.c 	return (void __iomem *) (offset + (pfn << PAGE_SHIFT));
pfn               366 arch/arm/xen/enlighten.c 	if (xen_xlate_map_ballooned_pages(&xen_auto_xlat_grant_frames.pfn,
pfn                22 arch/arm/xen/p2m.c 	unsigned long pfn;
pfn                43 arch/arm/xen/p2m.c 		if (new->pfn == entry->pfn)
pfn                46 arch/arm/xen/p2m.c 		if (new->pfn < entry->pfn)
pfn                58 arch/arm/xen/p2m.c 			__func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn);
pfn                63 arch/arm/xen/p2m.c unsigned long __pfn_to_mfn(unsigned long pfn)
pfn                72 arch/arm/xen/p2m.c 		if (entry->pfn <= pfn &&
pfn                73 arch/arm/xen/p2m.c 				entry->pfn + entry->nr_pages > pfn) {
pfn                74 arch/arm/xen/p2m.c 			unsigned long mfn = entry->mfn + (pfn - entry->pfn);
pfn                78 arch/arm/xen/p2m.c 		if (pfn < entry->pfn)
pfn               121 arch/arm/xen/p2m.c bool __set_phys_to_machine_multi(unsigned long pfn,
pfn               133 arch/arm/xen/p2m.c 			if (p2m_entry->pfn <= pfn &&
pfn               134 arch/arm/xen/p2m.c 					p2m_entry->pfn + p2m_entry->nr_pages > pfn) {
pfn               140 arch/arm/xen/p2m.c 			if (pfn < p2m_entry->pfn)
pfn               153 arch/arm/xen/p2m.c 	p2m_entry->pfn = pfn;
pfn               169 arch/arm/xen/p2m.c bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
pfn               171 arch/arm/xen/p2m.c 	return __set_phys_to_machine_multi(pfn, mfn, 1);
pfn               103 arch/arm64/include/asm/cacheflush.h 				    unsigned long user_addr, unsigned long pfn)
pfn               203 arch/arm64/include/asm/io.h extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
pfn               205 arch/arm64/include/asm/io.h extern int devmem_is_allowed(unsigned long pfn);
pfn                84 arch/arm64/include/asm/kexec.h extern bool crash_is_nosave(unsigned long pfn);
pfn                88 arch/arm64/include/asm/kexec.h static inline bool crash_is_nosave(unsigned long pfn) {return false; }
pfn               179 arch/arm64/include/asm/kvm_mmu.h #define kvm_pfn_pte(pfn, prot)		pfn_pte(pfn, prot)
pfn               180 arch/arm64/include/asm/kvm_mmu.h #define kvm_pfn_pmd(pfn, prot)		pfn_pmd(pfn, prot)
pfn               181 arch/arm64/include/asm/kvm_mmu.h #define kvm_pfn_pud(pfn, prot)		pfn_pud(pfn, prot)
pfn               310 arch/arm64/include/asm/kvm_mmu.h static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
pfn               312 arch/arm64/include/asm/kvm_mmu.h 	void *va = page_address(pfn_to_page(pfn));
pfn               326 arch/arm64/include/asm/kvm_mmu.h static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn,
pfn               334 arch/arm64/include/asm/kvm_mmu.h 		void *va = page_address(pfn_to_page(pfn));
pfn               308 arch/arm64/include/asm/memory.h #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
pfn                66 arch/arm64/include/asm/pgtable.h #define pfn_pte(pfn,prot)	\
pfn                67 arch/arm64/include/asm/pgtable.h 	__pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
pfn               388 arch/arm64/include/asm/pgtable.h #define pfn_pmd(pfn,prot)	__pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
pfn               400 arch/arm64/include/asm/pgtable.h #define pfn_pud(pfn,prot)	__pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
pfn               433 arch/arm64/include/asm/pgtable.h extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
pfn                28 arch/arm64/kernel/crash_dump.c ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
pfn                37 arch/arm64/kernel/crash_dump.c 	vaddr = memremap(__pfn_to_phys(pfn), PAGE_SIZE, MEMREMAP_WB);
pfn               101 arch/arm64/kernel/hibernate.c int pfn_is_nosave(unsigned long pfn)
pfn               106 arch/arm64/kernel/hibernate.c 	return ((pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn)) ||
pfn               107 arch/arm64/kernel/hibernate.c 		crash_is_nosave(pfn);
pfn               329 arch/arm64/kernel/machine_kexec.c bool crash_is_nosave(unsigned long pfn)
pfn               338 arch/arm64/kernel/machine_kexec.c 	addr = __pfn_to_phys(pfn);
pfn               104 arch/arm64/kernel/vdso.c 	unsigned long pfn;
pfn               128 arch/arm64/kernel/vdso.c 	pfn = sym_to_pfn(vdso_lookup[arch_index].vdso_code_start);
pfn               131 arch/arm64/kernel/vdso.c 		vdso_pagelist[i + 1] = pfn_to_page(pfn + i);
pfn                61 arch/arm64/mm/hugetlbpage.c 	unsigned long pfn = pte_pfn(pte);
pfn                63 arch/arm64/mm/hugetlbpage.c 	return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
pfn               179 arch/arm64/mm/hugetlbpage.c 	unsigned long pfn, dpfn;
pfn               194 arch/arm64/mm/hugetlbpage.c 	pfn = pte_pfn(pte);
pfn               200 arch/arm64/mm/hugetlbpage.c 	for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
pfn               201 arch/arm64/mm/hugetlbpage.c 		set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
pfn               375 arch/arm64/mm/hugetlbpage.c 	unsigned long pfn = pte_pfn(pte), dpfn;
pfn               398 arch/arm64/mm/hugetlbpage.c 	for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
pfn               399 arch/arm64/mm/hugetlbpage.c 		set_pte_at(vma->vm_mm, addr, ptep, pfn_pte(pfn, hugeprot));
pfn               407 arch/arm64/mm/hugetlbpage.c 	unsigned long pfn, dpfn;
pfn               425 arch/arm64/mm/hugetlbpage.c 	pfn = pte_pfn(pte);
pfn               427 arch/arm64/mm/hugetlbpage.c 	for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
pfn               428 arch/arm64/mm/hugetlbpage.c 		set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
pfn               241 arch/arm64/mm/init.c int pfn_valid(unsigned long pfn)
pfn               243 arch/arm64/mm/init.c 	phys_addr_t addr = pfn << PAGE_SHIFT;
pfn               245 arch/arm64/mm/init.c 	if ((addr >> PAGE_SHIFT) != pfn)
pfn               249 arch/arm64/mm/init.c 	if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
pfn               252 arch/arm64/mm/init.c 	if (!valid_section(__nr_to_section(pfn_to_section_nr(pfn))))
pfn                46 arch/arm64/mm/mmap.c int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
pfn                48 arch/arm64/mm/mmap.c 	return !(((pfn << PAGE_SHIFT) + size) & ~PHYS_MASK);
pfn                61 arch/arm64/mm/mmap.c int devmem_is_allowed(unsigned long pfn)
pfn                63 arch/arm64/mm/mmap.c 	if (iomem_is_exclusive(pfn << PAGE_SHIFT))
pfn                65 arch/arm64/mm/mmap.c 	if (!page_is_ram(pfn))
pfn                79 arch/arm64/mm/mmu.c pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
pfn                82 arch/arm64/mm/mmu.c 	if (!pfn_valid(pfn))
pfn                26 arch/c6x/include/asm/cacheflush.h #define flush_cache_page(vma, vmaddr, pfn)	do {} while (0)
pfn                39 arch/csky/abiv1/cacheflush.c 	unsigned long pfn = pte_pfn(*ptep);
pfn                42 arch/csky/abiv1/cacheflush.c 	if (!pfn_valid(pfn))
pfn                45 arch/csky/abiv1/cacheflush.c 	page = pfn_to_page(pfn);
pfn                15 arch/csky/abiv1/inc/abi/cacheflush.h #define flush_cache_page(vma, page, pfn)	cache_wbinv_all()
pfn                35 arch/csky/abiv2/cacheflush.c 	unsigned long addr, pfn;
pfn                38 arch/csky/abiv2/cacheflush.c 	pfn = pte_pfn(*pte);
pfn                39 arch/csky/abiv2/cacheflush.c 	if (unlikely(!pfn_valid(pfn)))
pfn                42 arch/csky/abiv2/cacheflush.c 	page = pfn_to_page(pfn);
pfn                23 arch/csky/abiv2/inc/abi/cacheflush.h #define flush_cache_page(vma, vmaddr, pfn)	do { } while (0)
pfn                40 arch/csky/include/asm/highmem.h extern void *kmap_atomic_pfn(unsigned long pfn);
pfn                38 arch/csky/include/asm/page.h #define pfn_to_virt(pfn)        __va((pfn) << PAGE_SHIFT)
pfn                42 arch/csky/include/asm/page.h #define pfn_valid(pfn)		((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
pfn                52 arch/csky/include/asm/pgtable.h #define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) \
pfn               246 arch/csky/include/asm/pgtable.h extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
pfn               309 arch/csky/include/asm/pgtable.h #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
pfn               310 arch/csky/include/asm/pgtable.h 	remap_pfn_range(vma, vaddr, pfn, size, prot)
pfn                91 arch/csky/mm/highmem.c void *kmap_atomic_pfn(unsigned long pfn)
pfn               101 arch/csky/mm/highmem.c 	set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
pfn                60 arch/csky/mm/ioremap.c pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
pfn                63 arch/csky/mm/ioremap.c 	if (!pfn_valid(pfn)) {
pfn                32 arch/hexagon/include/asm/cacheflush.h #define flush_cache_page(vma, vmaddr, pfn)	do { } while (0)
pfn                99 arch/hexagon/include/asm/page.h #define pfn_valid(pfn) ((pfn) < max_mapnr)
pfn               131 arch/hexagon/include/asm/page.h #define pfn_to_virt(pfn)        __va((pfn) << PAGE_SHIFT)
pfn               395 arch/hexagon/include/asm/pgtable.h #define pfn_pte(pfn, pgprot) __pte((pfn << PAGE_SHIFT) | pgprot_val(pgprot))
pfn                24 arch/ia64/include/asm/cacheflush.h #define flush_cache_page(vma, vmaddr, pfn)	do { } while (0)
pfn                97 arch/ia64/include/asm/io.h extern int valid_mmap_phys_addr_range (unsigned long pfn, size_t count);
pfn                20 arch/ia64/include/asm/mmzone.h static inline int pfn_to_nid(unsigned long pfn)
pfn                23 arch/ia64/include/asm/mmzone.h 	int nid = paddr_to_nid(pfn << PAGE_SHIFT);
pfn                99 arch/ia64/include/asm/page.h extern int ia64_pfn_valid (unsigned long pfn);
pfn               101 arch/ia64/include/asm/page.h # define ia64_pfn_valid(pfn) 1
pfn               108 arch/ia64/include/asm/page.h # define pfn_to_page(pfn)	(vmem_map + (pfn))
pfn               109 arch/ia64/include/asm/page.h # define __pfn_to_phys(pfn)	PFN_PHYS(pfn)
pfn               118 arch/ia64/include/asm/page.h # define pfn_valid(pfn)		(((pfn) < max_mapnr) && ia64_pfn_valid(pfn))
pfn               122 arch/ia64/include/asm/page.h # define pfn_valid(pfn)		(((pfn) >= min_low_pfn) && ((pfn) < max_low_pfn) && ia64_pfn_valid(pfn))
pfn               127 arch/ia64/include/asm/page.h #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
pfn               250 arch/ia64/include/asm/pgtable.h #define pfn_pte(pfn, pgprot) \
pfn               251 arch/ia64/include/asm/pgtable.h ({ pte_t __pte; pte_val(__pte) = ((pfn) << PAGE_SHIFT) | pgprot_val(pgprot); __pte; })
pfn               357 arch/ia64/include/asm/pgtable.h extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
pfn                35 arch/ia64/kernel/crash_dump.c copy_oldmem_page(unsigned long pfn, char *buf,
pfn                42 arch/ia64/kernel/crash_dump.c 	vaddr = __va(pfn<<PAGE_SHIFT);
pfn               867 arch/ia64/kernel/efi.c valid_mmap_phys_addr_range (unsigned long pfn, unsigned long size)
pfn               869 arch/ia64/kernel/efi.c 	unsigned long phys_addr = pfn << PAGE_SHIFT;
pfn               895 arch/ia64/kernel/efi.c phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size,
pfn               898 arch/ia64/kernel/efi.c 	unsigned long phys_addr = pfn << PAGE_SHIFT;
pfn              2179 arch/ia64/kernel/perfmon.c 		unsigned long pfn = ia64_tpa(buf) >> PAGE_SHIFT;
pfn              2182 arch/ia64/kernel/perfmon.c 		if (remap_pfn_range(vma, addr, pfn, PAGE_SIZE, PAGE_READONLY))
pfn                79 arch/ia64/mm/init.c 	unsigned long pfn = PHYS_PFN(paddr);
pfn                82 arch/ia64/mm/init.c 		set_bit(PG_arch_1, &pfn_to_page(pfn)->flags);
pfn                83 arch/ia64/mm/init.c 	} while (++pfn <= PHYS_PFN(paddr + size - 1));
pfn               547 arch/ia64/mm/init.c ia64_pfn_valid (unsigned long pfn)
pfn               550 arch/ia64/mm/init.c 	struct page *pg = pfn_to_page(pfn);
pfn                68 arch/ia64/mm/numa.c int __meminit __early_pfn_to_nid(unsigned long pfn,
pfn                71 arch/ia64/mm/numa.c 	int i, section = pfn >> PFN_SECTION_SHIFT, ssec, esec;
pfn               214 arch/m68k/include/asm/cacheflush_mm.h static inline void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
pfn               404 arch/m68k/include/asm/mcf_pgtable.h #define pfn_pte(pfn, prot)	__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
pfn               136 arch/m68k/include/asm/motorola_pgtable.h #define pfn_pte(pfn, prot)	__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
pfn                53 arch/m68k/include/asm/page.h #define __pfn_to_phys(pfn)	PFN_PHYS(pfn)
pfn               125 arch/m68k/include/asm/page_mm.h #define pfn_to_virt(pfn)	__va((pfn) << PAGE_SHIFT)
pfn               156 arch/m68k/include/asm/page_mm.h #define pfn_to_page(pfn) ({						\
pfn               157 arch/m68k/include/asm/page_mm.h 	unsigned long __pfn = (pfn);					\
pfn               170 arch/m68k/include/asm/page_mm.h #define pfn_valid(pfn)		virt_addr_valid(pfn_to_virt(pfn))
pfn                24 arch/m68k/include/asm/page_no.h #define pfn_to_virt(pfn)	__va((pfn) << PAGE_SHIFT)
pfn                29 arch/m68k/include/asm/page_no.h #define pfn_to_page(pfn)	virt_to_page(pfn_to_virt(pfn))
pfn                31 arch/m68k/include/asm/page_no.h #define pfn_valid(pfn)	        ((pfn) < max_mapnr)
pfn               131 arch/m68k/include/asm/sun3_pgtable.h #define pfn_pte(pfn, pgprot) \
pfn               132 arch/m68k/include/asm/sun3_pgtable.h ({ pte_t __pte; pte_val(__pte) = pfn | pgprot_val(pgprot); __pte; })
pfn                92 arch/microblaze/include/asm/cacheflush.h #define flush_cache_page(vma, vmaddr, pfn) \
pfn                93 arch/microblaze/include/asm/cacheflush.h 	flush_dcache_range(pfn << PAGE_SHIFT, (pfn << PAGE_SHIFT) + PAGE_SIZE);
pfn               143 arch/microblaze/include/asm/page.h extern int page_is_ram(unsigned long pfn);
pfn               146 arch/microblaze/include/asm/page.h # define pfn_to_phys(pfn)	(PFN_PHYS(pfn))
pfn               149 arch/microblaze/include/asm/page.h # define pfn_to_virt(pfn)	__va(pfn_to_phys((pfn)))
pfn               166 arch/microblaze/include/asm/page.h #  define pfn_valid(pfn)	(((pfn) >= min_low_pfn) && \
pfn               167 arch/microblaze/include/asm/page.h 				((pfn) <= (min_low_pfn + max_mapnr)))
pfn               171 arch/microblaze/include/asm/page.h #  define pfn_valid(pfn)	((pfn) < (max_mapnr + ARCH_PFN_OFFSET))
pfn                65 arch/microblaze/include/asm/pci.h 					 unsigned long pfn,
pfn               312 arch/microblaze/include/asm/pgtable.h #define pfn_pte(pfn, prot) \
pfn               313 arch/microblaze/include/asm/pgtable.h 	__pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) | pgprot_val(prot))
pfn                72 arch/microblaze/mm/init.c 	unsigned long pfn;
pfn                74 arch/microblaze/mm/init.c 	for (pfn = max_low_pfn; pfn < max_pfn; ++pfn) {
pfn                75 arch/microblaze/mm/init.c 		struct page *page = pfn_to_page(pfn);
pfn                78 arch/microblaze/mm/init.c 		if (!memblock_is_reserved(pfn << PAGE_SHIFT))
pfn               216 arch/microblaze/mm/init.c int page_is_ram(unsigned long pfn)
pfn               218 arch/microblaze/mm/init.c 	return __range_ok(pfn, 0);
pfn               221 arch/microblaze/mm/init.c int page_is_ram(unsigned long pfn)
pfn               223 arch/microblaze/mm/init.c 	return pfn < max_low_pfn;
pfn               174 arch/microblaze/pci/pci-common.c 				  unsigned long pfn,
pfn               180 arch/microblaze/pci/pci-common.c 	resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT;
pfn               183 arch/microblaze/pci/pci-common.c 	if (page_is_ram(pfn))
pfn                52 arch/mips/include/asm/cacheflush.h extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
pfn                56 arch/mips/include/asm/highmem.h extern void *kmap_atomic_pfn(unsigned long pfn);
pfn                51 arch/mips/include/asm/mach-ip27/kernel-entry-init.h 	dsrl	t1, 12			# 4K pfn
pfn                52 arch/mips/include/asm/mach-ip27/kernel-entry-init.h 	dsrl	t2, 12			# 4K pfn
pfn                53 arch/mips/include/asm/mach-ip27/kernel-entry-init.h 	dsll	t1, 6			# Get pfn into place
pfn                54 arch/mips/include/asm/mach-ip27/kernel-entry-init.h 	dsll	t2, 6			# Get pfn into place
pfn                25 arch/mips/include/asm/mmzone.h #define pfn_to_nid(pfn)		pa_to_nid((pfn) << PAGE_SHIFT)
pfn               219 arch/mips/include/asm/page.h #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
pfn               223 arch/mips/include/asm/page.h static inline int pfn_valid(unsigned long pfn)
pfn               229 arch/mips/include/asm/page.h 	return pfn >= pfn_offset && pfn < max_mapnr;
pfn               238 arch/mips/include/asm/page.h #define pfn_valid(pfn)							\
pfn               240 arch/mips/include/asm/page.h 	unsigned long __pfn = (pfn);					\
pfn               160 arch/mips/include/asm/pgtable-32.h pfn_pte(unsigned long pfn, pgprot_t prot)
pfn               164 arch/mips/include/asm/pgtable-32.h 	pte.pte_low = (pfn >> _PAGE_PRESENT_SHIFT) |
pfn               166 arch/mips/include/asm/pgtable-32.h 	pte.pte_high = (pfn << _PFN_SHIFT) |
pfn               175 arch/mips/include/asm/pgtable-32.h static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
pfn               179 arch/mips/include/asm/pgtable-32.h 	pte.pte_high = (pfn << 6) | (pgprot_val(prot) & 0x3f);
pfn               189 arch/mips/include/asm/pgtable-32.h #define pfn_pte(pfn, prot)	__pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
pfn               192 arch/mips/include/asm/pgtable-32.h #define pfn_pte(pfn, prot)	__pte(((unsigned long long)(pfn) << _PFN_SHIFT) | pgprot_val(prot))
pfn               193 arch/mips/include/asm/pgtable-32.h #define pfn_pmd(pfn, prot)	__pmd(((unsigned long long)(pfn) << _PFN_SHIFT) | pgprot_val(prot))
pfn               315 arch/mips/include/asm/pgtable-64.h #define pfn_pte(pfn, prot)	__pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
pfn               318 arch/mips/include/asm/pgtable-64.h #define pfn_pte(pfn, prot)	__pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
pfn               319 arch/mips/include/asm/pgtable-64.h #define pfn_pmd(pfn, prot)	__pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
pfn               495 arch/mips/include/asm/pgtable.h extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot);
pfn               499 arch/mips/include/asm/pgtable.h 		unsigned long pfn,
pfn               503 arch/mips/include/asm/pgtable.h 	phys_addr_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
pfn               653 arch/mips/include/asm/pgtable.h pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
pfn                27 arch/mips/kernel/crash_dump.c ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
pfn                35 arch/mips/kernel/crash_dump.c 	vaddr = kmap_atomic_pfn(pfn);
pfn               617 arch/mips/kvm/mmu.c 	kvm_pfn_t pfn = 0;	/* silence bogus GCC warning */
pfn               633 arch/mips/kvm/mmu.c 		pfn = pte_pfn(*ptep);
pfn               645 arch/mips/kvm/mmu.c 		pfn = pte_pfn(*ptep);
pfn               647 arch/mips/kvm/mmu.c 		kvm_set_pfn_dirty(pfn);
pfn               658 arch/mips/kvm/mmu.c 		kvm_set_pfn_accessed(pfn);
pfn               694 arch/mips/kvm/mmu.c 	kvm_pfn_t pfn;
pfn               733 arch/mips/kvm/mmu.c 	pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writeable);
pfn               734 arch/mips/kvm/mmu.c 	if (is_error_noslot_pfn(pfn)) {
pfn               748 arch/mips/kvm/mmu.c 		kvm_release_pfn_clean(pfn);
pfn               762 arch/mips/kvm/mmu.c 			kvm_set_pfn_dirty(pfn);
pfn               765 arch/mips/kvm/mmu.c 	entry = pfn_pte(pfn, __pgprot(prot_bits));
pfn               778 arch/mips/kvm/mmu.c 	kvm_release_pfn_clean(pfn);
pfn               779 arch/mips/kvm/mmu.c 	kvm_set_pfn_accessed(pfn);
pfn              1100 arch/mips/kvm/mmu.c 	kvm_pfn_t pfn;
pfn              1109 arch/mips/kvm/mmu.c 	pfn = PFN_DOWN(virt_to_phys(vcpu->arch.kseg0_commpage));
pfn              1111 arch/mips/kvm/mmu.c 	*ptep = pte_mkyoung(pte_mkdirty(pfn_pte(pfn, PAGE_SHARED)));
pfn               109 arch/mips/loongson64/common/mem.c pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
pfn               112 arch/mips/loongson64/common/mem.c 	unsigned long offset = pfn << PAGE_SHIFT;
pfn               153 arch/mips/mm/c-octeon.c 				    unsigned long page, unsigned long pfn)
pfn               238 arch/mips/mm/c-r3k.c 				 unsigned long addr, unsigned long pfn)
pfn               240 arch/mips/mm/c-r3k.c 	unsigned long kaddr = KSEG0ADDR(pfn << PAGE_SHIFT);
pfn               640 arch/mips/mm/c-r4k.c 	unsigned long pfn;
pfn               648 arch/mips/mm/c-r4k.c 	struct page *page = pfn_to_page(fcp_args->pfn);
pfn               718 arch/mips/mm/c-r4k.c 	unsigned long addr, unsigned long pfn)
pfn               724 arch/mips/mm/c-r4k.c 	args.pfn = pfn;
pfn               168 arch/mips/mm/c-tx39.c static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn)
pfn                33 arch/mips/mm/cache.c 	unsigned long pfn);
pfn               133 arch/mips/mm/cache.c 	unsigned long pfn, addr;
pfn               136 arch/mips/mm/cache.c 	pfn = pte_pfn(pte);
pfn               137 arch/mips/mm/cache.c 	if (unlikely(!pfn_valid(pfn)))
pfn               139 arch/mips/mm/cache.c 	page = pfn_to_page(pfn);
pfn               106 arch/mips/mm/highmem.c void *kmap_atomic_pfn(unsigned long pfn)
pfn               117 arch/mips/mm/highmem.c 	set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
pfn               483 arch/mips/mm/init.c 	unsigned long pfn;
pfn               485 arch/mips/mm/init.c 	for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
pfn               486 arch/mips/mm/init.c 		struct page *page = pfn_to_page(pfn);
pfn               487 arch/mips/mm/init.c 		void *addr = phys_to_virt(PFN_PHYS(pfn));
pfn                25 arch/mips/mm/ioremap.c 	unsigned long pfn;
pfn                34 arch/mips/mm/ioremap.c 	pfn = phys_addr >> PAGE_SHIFT;
pfn                40 arch/mips/mm/ioremap.c 		set_pte(pte, pfn_pte(pfn, pgprot));
pfn                42 arch/mips/mm/ioremap.c 		pfn++;
pfn               134 arch/mips/mm/ioremap.c 	unsigned long offset, pfn, last_pfn;
pfn               158 arch/mips/mm/ioremap.c 	pfn = PFN_DOWN(phys_addr);
pfn               160 arch/mips/mm/ioremap.c 	if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
pfn                36 arch/mips/power/cpu.c int pfn_is_nosave(unsigned long pfn)
pfn                41 arch/mips/power/cpu.c 	return	(pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
pfn                23 arch/nds32/include/asm/cacheflush.h 		      unsigned long addr, unsigned long pfn);
pfn                61 arch/nds32/include/asm/highmem.h extern void *kmap_atomic_pfn(unsigned long pfn);
pfn                82 arch/nds32/include/asm/memory.h #define pfn_valid(pfn)		((pfn) >= PHYS_PFN_OFFSET && (pfn) < (PHYS_PFN_OFFSET + max_mapnr))
pfn               188 arch/nds32/include/asm/pgtable.h #define pfn_pte(pfn,prot)	(__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
pfn                53 arch/nds32/mm/cacheflush.c 	unsigned long pfn = pte_pfn(*pte);
pfn                56 arch/nds32/mm/cacheflush.c 	if (!pfn_valid(pfn))
pfn                66 arch/nds32/mm/cacheflush.c 	page = pfn_to_page(pfn);
pfn               155 arch/nds32/mm/cacheflush.c 		      unsigned long addr, unsigned long pfn)
pfn               160 arch/nds32/mm/cacheflush.c 	vto = kremap0(addr, pfn << PAGE_SHIFT);
pfn               172 arch/nds32/mm/init.c 	unsigned long pfn;
pfn               173 arch/nds32/mm/init.c 	for (pfn = PFN_UP(__pa(high_memory)); pfn < max_pfn; pfn++) {
pfn               174 arch/nds32/mm/init.c 		phys_addr_t paddr = (phys_addr_t) pfn << PAGE_SHIFT;
pfn               176 arch/nds32/mm/init.c 			free_highmem_page(pfn_to_page(pfn));
pfn                29 arch/nios2/include/asm/cacheflush.h 	unsigned long pfn);
pfn                87 arch/nios2/include/asm/page.h # define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
pfn                89 arch/nios2/include/asm/page.h static inline bool pfn_valid(unsigned long pfn)
pfn                95 arch/nios2/include/asm/page.h 	return pfn >= pfn_offset && pfn < max_mapnr;
pfn               199 arch/nios2/include/asm/pgtable.h #define pfn_pte(pfn, prot)	(__pte(pfn | pgprot_val(prot)))
pfn               150 arch/nios2/mm/cacheflush.c 			unsigned long pfn)
pfn               204 arch/nios2/mm/cacheflush.c 	unsigned long pfn = pte_pfn(pte);
pfn               210 arch/nios2/mm/cacheflush.c 	if (!pfn_valid(pfn))
pfn               217 arch/nios2/mm/cacheflush.c 	page = pfn_to_page(pfn);
pfn                27 arch/nios2/mm/ioremap.c 	unsigned long pfn;
pfn                37 arch/nios2/mm/ioremap.c 	pfn = PFN_DOWN(phys_addr);
pfn                43 arch/nios2/mm/ioremap.c 		set_pte(pte, pfn_pte(pfn, pgprot));
pfn                45 arch/nios2/mm/ioremap.c 		pfn++;
pfn                73 arch/openrisc/include/asm/cacheflush.h #define flush_cache_page(vma, vmaddr, pfn)		do { } while (0)
pfn                76 arch/openrisc/include/asm/page.h #define pfn_to_virt(pfn)        __va((pfn) << PAGE_SHIFT)
pfn                83 arch/openrisc/include/asm/page.h #define pfn_valid(pfn)          ((pfn) < max_mapnr)
pfn               402 arch/openrisc/include/asm/pgtable.h #define pfn_pte(pfn, prot)  __pte((((pfn) << PAGE_SHIFT)) | pgprot_val(prot))
pfn                45 arch/openrisc/mm/cache.c 	unsigned long pfn = pte_val(*pte) >> PAGE_SHIFT;
pfn                46 arch/openrisc/mm/cache.c 	struct page *page = pfn_to_page(pfn);
pfn                84 arch/parisc/include/asm/cacheflush.h void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn);
pfn               133 arch/parisc/include/asm/cacheflush.h #define kmap_atomic_pfn(pfn)	kmap_atomic(pfn_to_page(pfn))
pfn               151 arch/parisc/include/asm/page.h #define pfn_valid(pfn)		((pfn) < max_mapnr)
pfn               420 arch/parisc/include/asm/pgtable.h static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
pfn               423 arch/parisc/include/asm/pgtable.h 	pte_val(pte) = (pfn << PFN_PTE_SHIFT) | pgprot_val(pgprot);
pfn                84 arch/parisc/kernel/cache.c #define pfn_va(pfn)	__va(PFN_PHYS(pfn))
pfn                89 arch/parisc/kernel/cache.c 	unsigned long pfn = pte_pfn(*ptep);
pfn                95 arch/parisc/kernel/cache.c 	if (!pfn_valid(pfn))
pfn                98 arch/parisc/kernel/cache.c 	page = pfn_to_page(pfn);
pfn               101 arch/parisc/kernel/cache.c 		flush_kernel_dcache_page_addr(pfn_va(pfn));
pfn               104 arch/parisc/kernel/cache.c 		flush_kernel_dcache_page_addr(pfn_va(pfn));
pfn               578 arch/parisc/kernel/cache.c 			unsigned long pfn;
pfn               582 arch/parisc/kernel/cache.c 			pfn = pte_pfn(*ptep);
pfn               583 arch/parisc/kernel/cache.c 			if (!pfn_valid(pfn))
pfn               587 arch/parisc/kernel/cache.c 				__flush_cache_page(vma, addr, PFN_PHYS(pfn));
pfn               589 arch/parisc/kernel/cache.c 				__purge_cache_page(vma, addr, PFN_PHYS(pfn));
pfn               619 arch/parisc/kernel/cache.c 		unsigned long pfn;
pfn               623 arch/parisc/kernel/cache.c 		pfn = pte_pfn(*ptep);
pfn               624 arch/parisc/kernel/cache.c 		if (pfn_valid(pfn)) {
pfn               627 arch/parisc/kernel/cache.c 				__flush_cache_page(vma, addr, PFN_PHYS(pfn));
pfn               629 arch/parisc/kernel/cache.c 				__purge_cache_page(vma, addr, PFN_PHYS(pfn));
pfn               636 arch/parisc/kernel/cache.c flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
pfn               638 arch/parisc/kernel/cache.c 	if (pfn_valid(pfn)) {
pfn               641 arch/parisc/kernel/cache.c 			__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
pfn               643 arch/parisc/kernel/cache.c 			__purge_cache_page(vma, vmaddr, PFN_PHYS(pfn));
pfn               436 arch/powerpc/include/asm/book3s/32/pgtable.h static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
pfn               438 arch/powerpc/include/asm/book3s/32/pgtable.h 	return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
pfn                69 arch/powerpc/include/asm/book3s/64/hash-4k.h #define remap_4k_pfn(vma, addr, pfn, prot)	\
pfn                70 arch/powerpc/include/asm/book3s/64/hash-4k.h 	remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot))
pfn               161 arch/powerpc/include/asm/book3s/64/hash-64k.h 			   unsigned long pfn, unsigned long size, pgprot_t);
pfn               163 arch/powerpc/include/asm/book3s/64/hash-64k.h 				 unsigned long pfn, pgprot_t prot)
pfn               165 arch/powerpc/include/asm/book3s/64/hash-64k.h 	if (pfn > (PTE_RPN_MASK >> PAGE_SHIFT)) {
pfn               169 arch/powerpc/include/asm/book3s/64/hash-64k.h 	return remap_pfn_range(vma, addr, pfn, PAGE_SIZE,
pfn                68 arch/powerpc/include/asm/book3s/64/pgtable-64k.h 			       unsigned long pfn, pgprot_t prot)
pfn                72 arch/powerpc/include/asm/book3s/64/pgtable-64k.h 	return hash__remap_4k_pfn(vma, addr, pfn, prot);
pfn               609 arch/powerpc/include/asm/book3s/64/pgtable.h static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
pfn               611 arch/powerpc/include/asm/book3s/64/pgtable.h 	VM_BUG_ON(pfn >> (64 - PAGE_SHIFT));
pfn               612 arch/powerpc/include/asm/book3s/64/pgtable.h 	VM_BUG_ON((pfn << PAGE_SHIFT) & ~PTE_RPN_MASK);
pfn               614 arch/powerpc/include/asm/book3s/64/pgtable.h 	return __pte(((pte_basic_t)pfn << PAGE_SHIFT) | pgprot_val(pgprot));
pfn              1137 arch/powerpc/include/asm/book3s/64/pgtable.h extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
pfn                25 arch/powerpc/include/asm/book3s/pgtable.h extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
pfn                20 arch/powerpc/include/asm/cacheflush.h #define flush_cache_page(vma, vmaddr, pfn)	do { } while (0)
pfn               802 arch/powerpc/include/asm/io.h 	unsigned long pfn = page_to_pfn(page);
pfn               804 arch/powerpc/include/asm/io.h 	WARN_ON(IS_ENABLED(CONFIG_DEBUG_VIRTUAL) && !pfn_valid(pfn));
pfn               806 arch/powerpc/include/asm/io.h 	return PFN_PHYS(pfn);
pfn                55 arch/powerpc/include/asm/kvm_book3s.h 	u64 pfn;
pfn               182 arch/powerpc/include/asm/kvm_host.h 	unsigned long pfn;
pfn               879 arch/powerpc/include/asm/kvm_ppc.h static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
pfn               886 arch/powerpc/include/asm/kvm_ppc.h 	if (!pfn_valid(pfn))
pfn               890 arch/powerpc/include/asm/kvm_ppc.h 	page = pfn_to_page(pfn);
pfn               115 arch/powerpc/include/asm/machdep.h 						unsigned long pfn,
pfn                90 arch/powerpc/include/asm/nohash/64/pgtable-4k.h #define remap_4k_pfn(vma, addr, pfn, prot)	\
pfn                91 arch/powerpc/include/asm/nohash/64/pgtable-4k.h 	remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot))
pfn               121 arch/powerpc/include/asm/nohash/pgtable.h static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) {
pfn               122 arch/powerpc/include/asm/nohash/pgtable.h 	return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
pfn               262 arch/powerpc/include/asm/nohash/pgtable.h extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
pfn               122 arch/powerpc/include/asm/page.h static inline bool pfn_valid(unsigned long pfn)
pfn               126 arch/powerpc/include/asm/page.h 	return pfn >= min_pfn && pfn < max_mapnr;
pfn               133 arch/powerpc/include/asm/page.h #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
pfn               324 arch/powerpc/include/asm/page.h extern int devmem_is_allowed(unsigned long pfn);
pfn               111 arch/powerpc/include/asm/pci.h 					 unsigned long pfn,
pfn               466 arch/powerpc/include/asm/rtas.h static inline int page_is_rtas_user_buf(unsigned long pfn)
pfn               468 arch/powerpc/include/asm/rtas.h 	unsigned long paddr = (pfn << PAGE_SHIFT);
pfn               479 arch/powerpc/include/asm/rtas.h static inline int page_is_rtas_user_buf(unsigned long pfn) { return 0;}
pfn                34 arch/powerpc/include/asm/ultravisor.h static inline int uv_share_page(u64 pfn, u64 npages)
pfn                36 arch/powerpc/include/asm/ultravisor.h 	return ucall_norets(UV_SHARE_PAGE, pfn, npages);
pfn                39 arch/powerpc/include/asm/ultravisor.h static inline int uv_unshare_page(u64 pfn, u64 npages)
pfn                41 arch/powerpc/include/asm/ultravisor.h 	return ucall_norets(UV_UNSHARE_PAGE, pfn, npages);
pfn                95 arch/powerpc/kernel/crash_dump.c ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
pfn               105 arch/powerpc/kernel/crash_dump.c 	paddr = pfn << PAGE_SHIFT;
pfn              1114 arch/powerpc/kernel/fadump.c 	unsigned long pfn;
pfn              1120 arch/powerpc/kernel/fadump.c 	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
pfn              1121 arch/powerpc/kernel/fadump.c 		free_reserved_page(pfn_to_page(pfn));
pfn               282 arch/powerpc/kernel/mce.c 				unsigned long pfn;
pfn               284 arch/powerpc/kernel/mce.c 				pfn = evt->u.ue_error.physical_address >>
pfn               286 arch/powerpc/kernel/mce.c 				memory_failure(pfn, 0);
pfn                32 arch/powerpc/kernel/mce_power.c 	unsigned long pfn, flags;
pfn                44 arch/powerpc/kernel/mce_power.c 		pfn = ULONG_MAX;
pfn                49 arch/powerpc/kernel/mce_power.c 		pfn = pte_pfn(*ptep);
pfn                52 arch/powerpc/kernel/mce_power.c 		pfn = pte_pfn(__pte(pte_val(*ptep) | (addr & rpnmask)));
pfn                57 arch/powerpc/kernel/mce_power.c 	return pfn;
pfn               369 arch/powerpc/kernel/mce_power.c 	unsigned long pfn, instr_addr;
pfn               373 arch/powerpc/kernel/mce_power.c 	pfn = addr_to_pfn(regs, regs->nip);
pfn               374 arch/powerpc/kernel/mce_power.c 	if (pfn != ULONG_MAX) {
pfn               375 arch/powerpc/kernel/mce_power.c 		instr_addr = (pfn << PAGE_SHIFT) + (regs->nip & ~PAGE_MASK);
pfn               378 arch/powerpc/kernel/mce_power.c 			pfn = addr_to_pfn(regs, op.ea);
pfn               380 arch/powerpc/kernel/mce_power.c 			*phys_addr = (pfn << PAGE_SHIFT);
pfn               457 arch/powerpc/kernel/mce_power.c 				unsigned long pfn;
pfn               460 arch/powerpc/kernel/mce_power.c 					pfn = addr_to_pfn(regs, regs->nip);
pfn               461 arch/powerpc/kernel/mce_power.c 					if (pfn != ULONG_MAX) {
pfn               463 arch/powerpc/kernel/mce_power.c 							(pfn << PAGE_SHIFT);
pfn               438 arch/powerpc/kernel/pci-common.c 				  unsigned long pfn,
pfn               444 arch/powerpc/kernel/pci-common.c 	resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT;
pfn               447 arch/powerpc/kernel/pci-common.c 	if (page_is_ram(pfn))
pfn                18 arch/powerpc/kernel/suspend.c int pfn_is_nosave(unsigned long pfn)
pfn                22 arch/powerpc/kernel/suspend.c 	return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
pfn               439 arch/powerpc/kvm/book3s.c 		kvm_pfn_t pfn;
pfn               441 arch/powerpc/kvm/book3s.c 		pfn = (kvm_pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
pfn               442 arch/powerpc/kvm/book3s.c 		get_page(pfn_to_page(pfn));
pfn               445 arch/powerpc/kvm/book3s.c 		return pfn;
pfn               249 arch/powerpc/kvm/book3s_32_mmu_host.c 	pte->pfn = hpaddr >> PAGE_SHIFT;
pfn                89 arch/powerpc/kvm/book3s_64_mmu_host.c 	unsigned long pfn;
pfn                96 arch/powerpc/kvm/book3s_64_mmu_host.c 	pfn = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable);
pfn                97 arch/powerpc/kvm/book3s_64_mmu_host.c 	if (is_error_noslot_pfn(pfn)) {
pfn               103 arch/powerpc/kvm/book3s_64_mmu_host.c 	hpaddr = pfn << PAGE_SHIFT;
pfn               123 arch/powerpc/kvm/book3s_64_mmu_host.c 	kvm_set_pfn_accessed(pfn);
pfn               128 arch/powerpc/kvm/book3s_64_mmu_host.c 		kvm_set_pfn_dirty(pfn);
pfn               134 arch/powerpc/kvm/book3s_64_mmu_host.c 		kvmppc_mmu_flush_icache(pfn);
pfn               195 arch/powerpc/kvm/book3s_64_mmu_host.c 		cpte->pfn = pfn;
pfn               204 arch/powerpc/kvm/book3s_64_mmu_host.c 	kvm_release_pfn_clean(pfn);
pfn               500 arch/powerpc/kvm/book3s_64_mmu_hv.c 	unsigned long gpa, gfn, hva, pfn;
pfn               585 arch/powerpc/kvm/book3s_64_mmu_hv.c 	pfn = 0;
pfn               599 arch/powerpc/kvm/book3s_64_mmu_hv.c 			pfn = vma->vm_pgoff +
pfn               606 arch/powerpc/kvm/book3s_64_mmu_hv.c 		if (!pfn)
pfn               610 arch/powerpc/kvm/book3s_64_mmu_hv.c 		pfn = page_to_pfn(page);
pfn               657 arch/powerpc/kvm/book3s_64_mmu_hv.c 					((pfn << PAGE_SHIFT) & ~(psize - 1));
pfn               797 arch/powerpc/kvm/book3s_64_mmu_radix.c 		unsigned long pfn;
pfn               800 arch/powerpc/kvm/book3s_64_mmu_radix.c 		pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
pfn               802 arch/powerpc/kvm/book3s_64_mmu_radix.c 		if (is_error_noslot_pfn(pfn))
pfn               805 arch/powerpc/kvm/book3s_64_mmu_radix.c 		if (pfn_valid(pfn)) {
pfn               806 arch/powerpc/kvm/book3s_64_mmu_radix.c 			page = pfn_to_page(pfn);
pfn                41 arch/powerpc/kvm/e500.h 	kvm_pfn_t pfn;		/* valid only for TLB0, except briefly */
pfn               164 arch/powerpc/kvm/e500_mmu_host.c 	kvm_pfn_t pfn;
pfn               166 arch/powerpc/kvm/e500_mmu_host.c 	pfn = (kvm_pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
pfn               167 arch/powerpc/kvm/e500_mmu_host.c 	get_page(pfn_to_page(pfn));
pfn               175 arch/powerpc/kvm/e500_mmu_host.c 	magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) |
pfn               247 arch/powerpc/kvm/e500_mmu_host.c 					 kvm_pfn_t pfn, unsigned int wimg)
pfn               249 arch/powerpc/kvm/e500_mmu_host.c 	ref->pfn = pfn;
pfn               256 arch/powerpc/kvm/e500_mmu_host.c 	kvm_set_pfn_accessed(pfn);
pfn               259 arch/powerpc/kvm/e500_mmu_host.c 		kvm_set_pfn_dirty(pfn);
pfn               266 arch/powerpc/kvm/e500_mmu_host.c 		trace_kvm_booke206_ref_release(ref->pfn, ref->flags);
pfn               310 arch/powerpc/kvm/e500_mmu_host.c 	kvm_pfn_t pfn = ref->pfn;
pfn               318 arch/powerpc/kvm/e500_mmu_host.c 	stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
pfn               328 arch/powerpc/kvm/e500_mmu_host.c 	unsigned long pfn = 0; /* silence GCC warning */
pfn               379 arch/powerpc/kvm/e500_mmu_host.c 			pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT);
pfn               381 arch/powerpc/kvm/e500_mmu_host.c 			slot_start = pfn - (gfn - slot->base_gfn);
pfn               412 arch/powerpc/kvm/e500_mmu_host.c 				if (gfn_start + pfn - gfn < start)
pfn               414 arch/powerpc/kvm/e500_mmu_host.c 				if (gfn_end + pfn - gfn > end)
pfn               417 arch/powerpc/kvm/e500_mmu_host.c 				    (pfn & (tsize_pages - 1)))
pfn               421 arch/powerpc/kvm/e500_mmu_host.c 				pfn &= ~(tsize_pages - 1);
pfn               449 arch/powerpc/kvm/e500_mmu_host.c 		pfn = gfn_to_pfn_memslot(slot, gfn);
pfn               450 arch/powerpc/kvm/e500_mmu_host.c 		if (is_error_noslot_pfn(pfn)) {
pfn               458 arch/powerpc/kvm/e500_mmu_host.c 		pfn &= ~(tsize_pages - 1);
pfn               488 arch/powerpc/kvm/e500_mmu_host.c 					   __func__, (long)gfn, pfn);
pfn               493 arch/powerpc/kvm/e500_mmu_host.c 	kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
pfn               499 arch/powerpc/kvm/e500_mmu_host.c 	kvmppc_mmu_flush_icache(pfn);
pfn               505 arch/powerpc/kvm/e500_mmu_host.c 	kvm_release_pfn_clean(pfn);
pfn               630 arch/powerpc/kvm/e500_mmu_host.c 	hfn_t pfn;
pfn               697 arch/powerpc/kvm/e500_mmu_host.c 	pfn = addr >> PAGE_SHIFT;
pfn               700 arch/powerpc/kvm/e500_mmu_host.c 	if (unlikely(!page_is_ram(pfn))) {
pfn               707 arch/powerpc/kvm/e500_mmu_host.c 	page = pfn_to_page(pfn);
pfn               136 arch/powerpc/kvm/trace_booke.h 	TP_PROTO(__u64 pfn, __u32 flags),
pfn               137 arch/powerpc/kvm/trace_booke.h 	TP_ARGS(pfn, flags),
pfn               140 arch/powerpc/kvm/trace_booke.h 		__field(	__u64,	pfn		)
pfn               145 arch/powerpc/kvm/trace_booke.h 		__entry->pfn		= pfn;
pfn               150 arch/powerpc/kvm/trace_booke.h 		__entry->pfn, __entry->flags)
pfn                69 arch/powerpc/kvm/trace_pr.h 		__field(	u64,		pfn		)
pfn                78 arch/powerpc/kvm/trace_pr.h 		__entry->pfn		= pte->pfn;
pfn                88 arch/powerpc/kvm/trace_pr.h 		  __entry->host_vpn, __entry->pfn, __entry->eaddr,
pfn                98 arch/powerpc/kvm/trace_pr.h 		__field(	u64,		pfn		)
pfn               107 arch/powerpc/kvm/trace_pr.h 		__entry->pfn		= pte->pfn;
pfn               117 arch/powerpc/kvm/trace_pr.h 		  __entry->host_vpn, __entry->pfn, __entry->eaddr,
pfn                88 arch/powerpc/lib/code-patching.c 	unsigned long pfn;
pfn                92 arch/powerpc/lib/code-patching.c 		pfn = vmalloc_to_pfn(addr);
pfn                94 arch/powerpc/lib/code-patching.c 		pfn = __pa_symbol(addr) >> PAGE_SHIFT;
pfn                96 arch/powerpc/lib/code-patching.c 	err = map_kernel_page(text_poke_addr, (pfn << PAGE_SHIFT), PAGE_KERNEL);
pfn                98 arch/powerpc/lib/code-patching.c 	pr_devel("Mapped addr %lx with pfn %lx:%d\n", text_poke_addr, pfn, err);
pfn               128 arch/powerpc/mm/book3s64/pgtable.c pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
pfn               132 arch/powerpc/mm/book3s64/pgtable.c 	pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
pfn                64 arch/powerpc/mm/book3s64/radix_pgtable.c 	unsigned long pfn = pa >> PAGE_SHIFT;
pfn                99 arch/powerpc/mm/book3s64/radix_pgtable.c 	set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
pfn               114 arch/powerpc/mm/book3s64/radix_pgtable.c 	unsigned long pfn = pa >> PAGE_SHIFT;
pfn               157 arch/powerpc/mm/book3s64/radix_pgtable.c 	set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
pfn                75 arch/powerpc/mm/mem.c pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
pfn                79 arch/powerpc/mm/mem.c 		return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
pfn                81 arch/powerpc/mm/mem.c 	if (!page_is_ram(pfn))
pfn               302 arch/powerpc/mm/mem.c 		unsigned long pfn, highmem_mapnr;
pfn               305 arch/powerpc/mm/mem.c 		for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
pfn               306 arch/powerpc/mm/mem.c 			phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
pfn               307 arch/powerpc/mm/mem.c 			struct page *page = pfn_to_page(pfn);
pfn               623 arch/powerpc/mm/mem.c int devmem_is_allowed(unsigned long pfn)
pfn               625 arch/powerpc/mm/mem.c 	if (page_is_rtas_user_buf(pfn))
pfn               627 arch/powerpc/mm/mem.c 	if (iomem_is_exclusive(PFN_PHYS(pfn)))
pfn               629 arch/powerpc/mm/mem.c 	if (!page_is_ram(pfn))
pfn                55 arch/powerpc/mm/pgtable.c 	unsigned long pfn = pte_pfn(pte);
pfn                58 arch/powerpc/mm/pgtable.c 	if (unlikely(!pfn_valid(pfn)))
pfn                60 arch/powerpc/mm/pgtable.c 	page = pfn_to_page(pfn);
pfn               293 arch/powerpc/mm/pgtable.c 	unsigned long pfn = vmalloc_to_pfn(va);
pfn               295 arch/powerpc/mm/pgtable.c 	BUG_ON(!pfn);
pfn               296 arch/powerpc/mm/pgtable.c 	return __pa(pfn_to_kaddr(pfn)) + offset_in_page(va);
pfn               119 arch/powerpc/perf/callchain.c 	unsigned long pfn, flags;
pfn               139 arch/powerpc/perf/callchain.c 	pfn = pte_pfn(pte);
pfn               140 arch/powerpc/perf/callchain.c 	if (!page_is_ram(pfn))
pfn               144 arch/powerpc/perf/callchain.c 	kaddr = pfn_to_kaddr(pfn);
pfn               227 arch/powerpc/platforms/cell/spufs/file.c 	unsigned long pfn, offset;
pfn               242 arch/powerpc/platforms/cell/spufs/file.c 		pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset);
pfn               245 arch/powerpc/platforms/cell/spufs/file.c 		pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
pfn               247 arch/powerpc/platforms/cell/spufs/file.c 	ret = vmf_insert_pfn(vma, vmf->address, pfn);
pfn                97 arch/powerpc/platforms/powernv/memtrace.c 	u64 start_pfn, end_pfn, nr_pages, pfn;
pfn               121 arch/powerpc/platforms/powernv/memtrace.c 			for (pfn = base_pfn; pfn < end_pfn; pfn += bytes>> PAGE_SHIFT) {
pfn               122 arch/powerpc/platforms/powernv/memtrace.c 				__remove_memory(nid, pfn << PAGE_SHIFT, bytes);
pfn               342 arch/powerpc/platforms/pseries/hotplug-memory.c 	unsigned long pfn, block_sz;
pfn               362 arch/powerpc/platforms/pseries/hotplug-memory.c 		pfn = PFN_DOWN(phys_addr);
pfn               363 arch/powerpc/platforms/pseries/hotplug-memory.c 		if (!pfn_present(pfn)) {
pfn               368 arch/powerpc/platforms/pseries/hotplug-memory.c 		rc &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
pfn               585 arch/powerpc/platforms/pseries/ras.c 			unsigned long pfn;
pfn               587 arch/powerpc/platforms/pseries/ras.c 			pfn = addr_to_pfn(regs, eaddr);
pfn               588 arch/powerpc/platforms/pseries/ras.c 			if (pfn != ULONG_MAX)
pfn               589 arch/powerpc/platforms/pseries/ras.c 				paddr = pfn << PAGE_SHIFT;
pfn                76 arch/powerpc/platforms/pseries/svm.c 	unsigned long pfn = PHYS_PFN(__pa(addr));
pfn                77 arch/powerpc/platforms/pseries/svm.c 	struct page *page = pfn_to_page(pfn);
pfn                83 arch/powerpc/platforms/pseries/svm.c 		uv_share_page(pfn, 1);
pfn                37 arch/riscv/include/asm/cacheflush.h 				    unsigned long pfn)
pfn               101 arch/riscv/include/asm/page.h #define pfn_to_phys(pfn)	(PFN_PHYS(pfn))
pfn               104 arch/riscv/include/asm/page.h #define pfn_to_virt(pfn)	(__va(pfn_to_phys(pfn)))
pfn               114 arch/riscv/include/asm/page.h #define pfn_valid(pfn) \
pfn               115 arch/riscv/include/asm/page.h 	(((pfn) >= pfn_base) && (((pfn)-pfn_base) < max_mapnr))
pfn                18 arch/riscv/include/asm/pgalloc.h 	unsigned long pfn = virt_to_pfn(pte);
pfn                20 arch/riscv/include/asm/pgalloc.h 	set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
pfn                26 arch/riscv/include/asm/pgalloc.h 	unsigned long pfn = virt_to_pfn(page_address(pte));
pfn                28 arch/riscv/include/asm/pgalloc.h 	set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
pfn                34 arch/riscv/include/asm/pgalloc.h 	unsigned long pfn = virt_to_pfn(pmd);
pfn                36 arch/riscv/include/asm/pgalloc.h 	set_pud(pud, __pud((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
pfn                68 arch/riscv/include/asm/pgtable-64.h static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t prot)
pfn                70 arch/riscv/include/asm/pgtable-64.h 	return __pmd((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
pfn               148 arch/riscv/include/asm/pgtable.h static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot)
pfn               150 arch/riscv/include/asm/pgtable.h 	return __pgd((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
pfn               187 arch/riscv/include/asm/pgtable.h static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
pfn               189 arch/riscv/include/asm/pgtable.h 	return __pte((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
pfn                19 arch/s390/include/asm/numa.h int numa_pfn_to_nid(unsigned long pfn);
pfn                30 arch/s390/include/asm/numa.h static inline int numa_pfn_to_nid(unsigned long pfn)
pfn               148 arch/s390/include/asm/page.h static inline int devmem_is_allowed(unsigned long pfn)
pfn               165 arch/s390/include/asm/page.h #define pfn_to_virt(pfn)	__va((pfn) << PAGE_SHIFT)
pfn               166 arch/s390/include/asm/page.h #define pfn_to_kaddr(pfn)	pfn_to_virt(pfn)
pfn               172 arch/s390/include/asm/page.h #define pfn_to_phys(pfn)	((pfn) << PAGE_SHIFT)
pfn              1287 arch/s390/include/asm/pgtable.h #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
pfn              1615 arch/s390/include/asm/pgtable.h #define pfn_pmd(pfn, pgprot)	mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
pfn               217 arch/s390/kernel/crash_dump.c ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
pfn               225 arch/s390/kernel/crash_dump.c 	src = (void *) (pfn << PAGE_SHIFT) + offset;
pfn               240 arch/s390/kernel/crash_dump.c 					unsigned long from, unsigned long pfn,
pfn               246 arch/s390/kernel/crash_dump.c 	if (pfn < OLDMEM_SIZE >> PAGE_SHIFT) {
pfn               247 arch/s390/kernel/crash_dump.c 		size_old = min(size, OLDMEM_SIZE - (pfn << PAGE_SHIFT));
pfn               249 arch/s390/kernel/crash_dump.c 				     pfn + (OLDMEM_BASE >> PAGE_SHIFT),
pfn               255 arch/s390/kernel/crash_dump.c 		pfn += size_old >> PAGE_SHIFT;
pfn               257 arch/s390/kernel/crash_dump.c 	return remap_pfn_range(vma, from, pfn, size, prot);
pfn               268 arch/s390/kernel/crash_dump.c 					   unsigned long pfn,
pfn               274 arch/s390/kernel/crash_dump.c 	if (pfn < hsa_end >> PAGE_SHIFT) {
pfn               275 arch/s390/kernel/crash_dump.c 		size_hsa = min(size, hsa_end - (pfn << PAGE_SHIFT));
pfn               280 arch/s390/kernel/crash_dump.c 		pfn += size_hsa >> PAGE_SHIFT;
pfn               282 arch/s390/kernel/crash_dump.c 	return remap_pfn_range(vma, from, pfn, size, prot);
pfn               289 arch/s390/kernel/crash_dump.c 			   unsigned long pfn, unsigned long size, pgprot_t prot)
pfn               292 arch/s390/kernel/crash_dump.c 		return remap_oldmem_pfn_range_kdump(vma, from, pfn, size, prot);
pfn               294 arch/s390/kernel/crash_dump.c 		return remap_oldmem_pfn_range_zfcpdump(vma, from, pfn, size,
pfn               100 arch/s390/kernel/suspend.c void page_key_read(unsigned long *pfn)
pfn               106 arch/s390/kernel/suspend.c 	page = pfn_to_page(*pfn);
pfn               111 arch/s390/kernel/suspend.c 	*(unsigned char *) pfn = key;
pfn               118 arch/s390/kernel/suspend.c void page_key_memorize(unsigned long *pfn)
pfn               120 arch/s390/kernel/suspend.c 	page_key_wp->data[page_key_wx] = *(unsigned char *) pfn;
pfn               121 arch/s390/kernel/suspend.c 	*(unsigned char *) pfn = 0;
pfn               152 arch/s390/kernel/suspend.c int pfn_is_nosave(unsigned long pfn)
pfn               160 arch/s390/kernel/suspend.c 	if (pfn <= LC_PAGES)
pfn               162 arch/s390/kernel/suspend.c 	if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn)
pfn               165 arch/s390/kernel/suspend.c 	if (pfn >= stext_pfn && pfn <= end_rodata_pfn)
pfn               167 arch/s390/kernel/suspend.c 	if (tprot(PFN_PHYS(pfn)))
pfn               521 arch/s390/numa/mode_emu.c static int emu_pfn_to_nid(unsigned long pfn)
pfn               523 arch/s390/numa/mode_emu.c 	return (pfn / (emu_size >> PAGE_SHIFT)) % emu_nodes;
pfn                41 arch/s390/numa/numa.c int numa_pfn_to_nid(unsigned long pfn)
pfn                43 arch/s390/numa/numa.c 	return mode->__pfn_to_nid ? mode->__pfn_to_nid(pfn) : 0;
pfn                17 arch/s390/numa/numa_mode.h 	int (*__pfn_to_nid)(unsigned long pfn);	/* PFN to node ID */
pfn               123 arch/s390/pci/pci_mmio.c 		    unsigned long *pfn)
pfn               136 arch/s390/pci/pci_mmio.c 	ret = follow_pfn(vma, user_addr, pfn);
pfn               148 arch/s390/pci/pci_mmio.c 	unsigned long pfn;
pfn               177 arch/s390/pci/pci_mmio.c 	ret = get_pfn(mmio_addr, VM_WRITE, &pfn);
pfn               180 arch/s390/pci/pci_mmio.c 	io_addr = (void __iomem *)((pfn << PAGE_SHIFT) |
pfn               275 arch/s390/pci/pci_mmio.c 	unsigned long pfn;
pfn               305 arch/s390/pci/pci_mmio.c 	ret = get_pfn(mmio_addr, VM_READ, &pfn);
pfn               308 arch/s390/pci/pci_mmio.c 	io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
pfn                43 arch/sh/include/asm/cacheflush.h 				unsigned long addr, unsigned long pfn);
pfn               391 arch/sh/include/asm/io.h int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
pfn                13 arch/sh/include/asm/mmzone.h static inline int pfn_to_nid(unsigned long pfn)
pfn                18 arch/sh/include/asm/mmzone.h 		if (pfn >= node_start_pfn(nid) && pfn <= node_end_pfn(nid))
pfn                24 arch/sh/include/asm/mmzone.h static inline struct pglist_data *pfn_to_pgdat(unsigned long pfn)
pfn                26 arch/sh/include/asm/mmzone.h 	return NODE_DATA(pfn_to_nid(pfn));
pfn               168 arch/sh/include/asm/page.h #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
pfn               181 arch/sh/include/asm/page.h #define pfn_valid(pfn)		((pfn) >= min_low_pfn && (pfn) < max_low_pfn)
pfn               318 arch/sh/include/asm/pgtable_32.h #define pfn_pte(pfn, prot) \
pfn               319 arch/sh/include/asm/pgtable_32.h 	__pte(((unsigned long long)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
pfn               320 arch/sh/include/asm/pgtable_32.h #define pfn_pmd(pfn, prot) \
pfn               321 arch/sh/include/asm/pgtable_32.h 	__pmd(((unsigned long long)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
pfn               304 arch/sh/include/asm/pgtable_64.h #define pfn_pte(pfn, prot)	__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
pfn               305 arch/sh/include/asm/pgtable_64.h #define pfn_pmd(pfn, prot)	__pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
pfn                26 arch/sh/kernel/crash_dump.c ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
pfn                34 arch/sh/kernel/crash_dump.c 	vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
pfn                50 arch/sh/kernel/dma-coherent.c 	unsigned long pfn = (dma_handle >> PAGE_SHIFT);
pfn                54 arch/sh/kernel/dma-coherent.c 		pfn += dev->dma_pfn_offset;
pfn                57 arch/sh/kernel/dma-coherent.c 		__free_pages(pfn_to_page(pfn + k), 0);
pfn                19 arch/sh/kernel/swsusp.c int pfn_is_nosave(unsigned long pfn)
pfn                24 arch/sh/kernel/swsusp.c 	return (pfn >= begin_pfn) && (pfn < end_pfn);
pfn               209 arch/sh/mm/cache-sh4.c 	unsigned long address, pfn, phys;
pfn               219 arch/sh/mm/cache-sh4.c 	pfn = data->addr2;
pfn               220 arch/sh/mm/cache-sh4.c 	phys = pfn << PAGE_SHIFT;
pfn               221 arch/sh/mm/cache-sh4.c 	page = pfn_to_page(pfn);
pfn               552 arch/sh/mm/cache-sh5.c 	unsigned long eaddr, pfn;
pfn               556 arch/sh/mm/cache-sh5.c 	pfn = data->addr2;
pfn               558 arch/sh/mm/cache-sh5.c 	sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT);
pfn               167 arch/sh/mm/cache-sh7705.c 	unsigned long pfn = data->addr2;
pfn               169 arch/sh/mm/cache-sh7705.c 	__flush_dcache_page(pfn << PAGE_SHIFT);
pfn               140 arch/sh/mm/cache.c 	unsigned long pfn = pte_pfn(pte);
pfn               145 arch/sh/mm/cache.c 	page = pfn_to_page(pfn);
pfn               146 arch/sh/mm/cache.c 	if (pfn_valid(pfn)) {
pfn               194 arch/sh/mm/cache.c 		      unsigned long pfn)
pfn               200 arch/sh/mm/cache.c 	data.addr2 = pfn;
pfn               161 arch/sh/mm/mmap.c int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
pfn                15 arch/sparc/include/asm/cacheflush_32.h #define flush_cache_page(vma,addr,pfn) \
pfn                26 arch/sparc/include/asm/cacheflush_64.h #define flush_cache_page(vma, page, pfn) \
pfn               254 arch/sparc/include/asm/leon.h #define _pfn_valid(pfn)	 ((pfn < last_valid_pfn) && (pfn >= PFN(phys_base)))
pfn               133 arch/sparc/include/asm/page_32.h #define pfn_valid(pfn)		(((pfn) >= (pfn_base)) && (((pfn)-(pfn_base)) < max_mapnr))
pfn               150 arch/sparc/include/asm/page_64.h #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
pfn               263 arch/sparc/include/asm/pgtable_32.h #define pfn_pte(pfn, prot)		mk_pte(pfn_to_page(pfn), prot)
pfn               406 arch/sparc/include/asm/pgtable_32.h #define MK_IOSPACE_PFN(space, pfn)	(pfn | (space << (BITS_PER_LONG - 4)))
pfn               407 arch/sparc/include/asm/pgtable_32.h #define GET_IOSPACE(pfn)		(pfn >> (BITS_PER_LONG - 4))
pfn               408 arch/sparc/include/asm/pgtable_32.h #define GET_PFN(pfn)			(pfn & 0x0fffffffUL)
pfn               414 arch/sparc/include/asm/pgtable_32.h 				     unsigned long from, unsigned long pfn,
pfn               419 arch/sparc/include/asm/pgtable_32.h 	offset = ((unsigned long long) GET_PFN(pfn)) << PAGE_SHIFT;
pfn               420 arch/sparc/include/asm/pgtable_32.h 	space = GET_IOSPACE(pfn);
pfn               239 arch/sparc/include/asm/pgtable_64.h static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
pfn               241 arch/sparc/include/asm/pgtable_64.h 	unsigned long paddr = pfn << PAGE_SHIFT;
pfn               840 arch/sparc/include/asm/pgtable_64.h 	unsigned long pfn;
pfn               842 arch/sparc/include/asm/pgtable_64.h 	pfn = pte_pfn(pte);
pfn               844 arch/sparc/include/asm/pgtable_64.h 	return ((unsigned long) __va(pfn << PAGE_SHIFT));
pfn               850 arch/sparc/include/asm/pgtable_64.h 	unsigned long pfn;
pfn               852 arch/sparc/include/asm/pgtable_64.h 	pfn = pte_pfn(pte);
pfn               854 arch/sparc/include/asm/pgtable_64.h 	return ((unsigned long) __va(pfn << PAGE_SHIFT));
pfn              1027 arch/sparc/include/asm/pgtable_64.h #define MK_IOSPACE_PFN(space, pfn)	(pfn | (space << (BITS_PER_LONG - 4)))
pfn              1028 arch/sparc/include/asm/pgtable_64.h #define GET_IOSPACE(pfn)		(pfn >> (BITS_PER_LONG - 4))
pfn              1029 arch/sparc/include/asm/pgtable_64.h #define GET_PFN(pfn)			(pfn & 0x0fffffffffffffffUL)
pfn              1068 arch/sparc/include/asm/pgtable_64.h 				     unsigned long from, unsigned long pfn,
pfn              1071 arch/sparc/include/asm/pgtable_64.h 	unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
pfn              1072 arch/sparc/include/asm/pgtable_64.h 	int space = GET_IOSPACE(pfn);
pfn               281 arch/sparc/mm/init_64.c static void flush_dcache(unsigned long pfn)
pfn               285 arch/sparc/mm/init_64.c 	page = pfn_to_page(pfn);
pfn               424 arch/sparc/mm/init_64.c 		unsigned long pfn = pte_pfn(pte);
pfn               426 arch/sparc/mm/init_64.c 		if (pfn_valid(pfn))
pfn               427 arch/sparc/mm/init_64.c 			flush_dcache(pfn);
pfn                55 arch/sparc/mm/iommu.c #define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
pfn               185 arch/sparc/mm/iommu.c 	unsigned long pfn = __phys_to_pfn(paddr);
pfn               208 arch/sparc/mm/iommu.c 	ioptex = bit_map_string_get(&iommu->usemap, npages, pfn);
pfn               217 arch/sparc/mm/iommu.c 		iopte_val(*iopte) = MKIOPTE(pfn, IOPERM);
pfn               221 arch/sparc/mm/iommu.c 		pfn++;
pfn               119 arch/sparc/mm/tlb.c 		unsigned long paddr, pfn = pte_pfn(orig);
pfn               123 arch/sparc/mm/tlb.c 		if (!pfn_valid(pfn))
pfn               126 arch/sparc/mm/tlb.c 		page = pfn_to_page(pfn);
pfn                22 arch/sparc/power/hibernate.c int pfn_is_nosave(unsigned long pfn)
pfn                27 arch/sparc/power/hibernate.c 	return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
pfn               109 arch/um/include/asm/page.h #define pfn_to_phys(pfn) PFN_PHYS(pfn)
pfn               111 arch/um/include/asm/page.h #define pfn_valid(pfn) ((pfn) < max_mapnr)
pfn                42 arch/um/include/asm/pgtable-2level.h #define pfn_pte(pfn, prot) __pte(pfn_to_phys(pfn) | pgprot_val(prot))
pfn                43 arch/um/include/asm/pgtable-2level.h #define pfn_pmd(pfn, prot) __pmd(pfn_to_phys(pfn) | pgprot_val(prot))
pfn               131 arch/unicore32/include/asm/cacheflush.h 		unsigned long user_addr, unsigned long pfn);
pfn                60 arch/unicore32/include/asm/io.h static inline int devmem_is_allowed(unsigned long pfn)
pfn                62 arch/unicore32/include/asm/io.h 	if (iomem_is_exclusive(pfn << PAGE_SHIFT))
pfn                64 arch/unicore32/include/asm/io.h 	if (!page_is_ram(pfn))
pfn                81 arch/unicore32/include/asm/memory.h #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
pfn               151 arch/unicore32/include/asm/pgtable.h #define pfn_pte(pfn, prot)		(__pte(((pfn) << PAGE_SHIFT) \
pfn                72 arch/unicore32/kernel/hibernate.c 	unsigned long pfn;
pfn                80 arch/unicore32/kernel/hibernate.c 	pfn = 0;
pfn                87 arch/unicore32/kernel/hibernate.c 		if (pfn >= max_low_pfn)
pfn                93 arch/unicore32/kernel/hibernate.c 			if (pfn >= max_low_pfn)
pfn               104 arch/unicore32/kernel/hibernate.c 			for (; pte < max_pte; pte++, pfn++) {
pfn               105 arch/unicore32/kernel/hibernate.c 				if (pfn >= max_low_pfn)
pfn               108 arch/unicore32/kernel/hibernate.c 				set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
pfn               142 arch/unicore32/kernel/hibernate.c int pfn_is_nosave(unsigned long pfn)
pfn               147 arch/unicore32/kernel/hibernate.c 	return (pfn >= begin_pfn) && (pfn < end_pfn);
pfn                28 arch/unicore32/mm/flush.c 		unsigned long pfn)
pfn               106 arch/unicore32/mm/init.c int pfn_valid(unsigned long pfn)
pfn               108 arch/unicore32/mm/init.c 	return memblock_is_memory(pfn << PAGE_SHIFT);
pfn                98 arch/unicore32/mm/ioremap.c remap_area_sections(unsigned long virt, unsigned long pfn,
pfn               114 arch/unicore32/mm/ioremap.c 		set_pmd(pmd, __pmd(__pfn_to_phys(pfn) | type->prot_sect));
pfn               115 arch/unicore32/mm/ioremap.c 		pfn += SZ_4M >> PAGE_SHIFT;
pfn               125 arch/unicore32/mm/ioremap.c void __iomem *__uc32_ioremap_pfn_caller(unsigned long pfn,
pfn               136 arch/unicore32/mm/ioremap.c 	if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SECTION_MASK))
pfn               142 arch/unicore32/mm/ioremap.c 	if (pfn_valid(pfn)) {
pfn               164 arch/unicore32/mm/ioremap.c 	if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
pfn               166 arch/unicore32/mm/ioremap.c 		err = remap_area_sections(addr, pfn, size, type);
pfn               168 arch/unicore32/mm/ioremap.c 		err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
pfn               185 arch/unicore32/mm/ioremap.c 	unsigned long pfn = __phys_to_pfn(phys_addr);
pfn               194 arch/unicore32/mm/ioremap.c 	return __uc32_ioremap_pfn_caller(pfn, offset, size, mtype, caller);
pfn               207 arch/unicore32/mm/ioremap.c __uc32_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
pfn               210 arch/unicore32/mm/ioremap.c 	return __uc32_ioremap_pfn_caller(pfn, offset, size, mtype,
pfn                77 arch/unicore32/mm/mmu.c 	unsigned long pfn;
pfn               159 arch/unicore32/mm/mmu.c 				  unsigned long end, unsigned long pfn,
pfn               164 arch/unicore32/mm/mmu.c 		set_pte(pte, pfn_pte(pfn, __pgprot(type->prot_pte)));
pfn               165 arch/unicore32/mm/mmu.c 		pfn++;
pfn               212 arch/unicore32/mm/mmu.c 		       __pfn_to_phys((u64)md->pfn), md->virtual);
pfn               220 arch/unicore32/mm/mmu.c 		       __pfn_to_phys((u64)md->pfn), md->virtual);
pfn               226 arch/unicore32/mm/mmu.c 	phys = (unsigned long)__pfn_to_phys(md->pfn);
pfn               232 arch/unicore32/mm/mmu.c 		       __pfn_to_phys(md->pfn), addr);
pfn               366 arch/unicore32/mm/mmu.c 	map.pfn = __phys_to_pfn(virt_to_phys(vectors));
pfn               376 arch/unicore32/mm/mmu.c 	map.pfn = __phys_to_pfn(virt_to_phys(vectors));
pfn               407 arch/unicore32/mm/mmu.c 		map.pfn = __phys_to_pfn(start);
pfn               492 arch/unicore32/mm/mmu.c 	unsigned long pfn = pte_pfn(*ptep);
pfn               496 arch/unicore32/mm/mmu.c 	if (!pfn_valid(pfn))
pfn               503 arch/unicore32/mm/mmu.c 	page = pfn_to_page(pfn);
pfn                70 arch/x86/include/asm/highmem.h void *kmap_atomic_pfn(unsigned long pfn);
pfn                71 arch/x86/include/asm/highmem.h void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
pfn               114 arch/x86/include/asm/io.h extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
pfn                17 arch/x86/include/asm/iomap.h iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
pfn                34 arch/x86/include/asm/mmzone_32.h static inline int pfn_to_nid(unsigned long pfn)
pfn                37 arch/x86/include/asm/mmzone_32.h 	return((int) physnode_map[(pfn) / PAGES_PER_SECTION]);
pfn                43 arch/x86/include/asm/mmzone_32.h static inline int pfn_valid(int pfn)
pfn                45 arch/x86/include/asm/mmzone_32.h 	int nid = pfn_to_nid(pfn);
pfn                48 arch/x86/include/asm/mmzone_32.h 		return (pfn < node_end_pfn(nid));
pfn                52 arch/x86/include/asm/mmzone_32.h #define early_pfn_valid(pfn)	pfn_valid((pfn))
pfn                70 arch/x86/include/asm/page.h #define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
pfn                19 arch/x86/include/asm/page_32.h #define pfn_valid(pfn)		((pfn) < max_mapnr)
pfn                40 arch/x86/include/asm/page_64.h #define pfn_valid(pfn)          ((pfn) < max_pfn)
pfn               324 arch/x86/include/asm/paravirt.h static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
pfn               326 arch/x86/include/asm/paravirt.h 	PVOP_VCALL2(mmu.alloc_pte, mm, pfn);
pfn               328 arch/x86/include/asm/paravirt.h static inline void paravirt_release_pte(unsigned long pfn)
pfn               330 arch/x86/include/asm/paravirt.h 	PVOP_VCALL1(mmu.release_pte, pfn);
pfn               333 arch/x86/include/asm/paravirt.h static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
pfn               335 arch/x86/include/asm/paravirt.h 	PVOP_VCALL2(mmu.alloc_pmd, mm, pfn);
pfn               338 arch/x86/include/asm/paravirt.h static inline void paravirt_release_pmd(unsigned long pfn)
pfn               340 arch/x86/include/asm/paravirt.h 	PVOP_VCALL1(mmu.release_pmd, pfn);
pfn               343 arch/x86/include/asm/paravirt.h static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
pfn               345 arch/x86/include/asm/paravirt.h 	PVOP_VCALL2(mmu.alloc_pud, mm, pfn);
pfn               347 arch/x86/include/asm/paravirt.h static inline void paravirt_release_pud(unsigned long pfn)
pfn               349 arch/x86/include/asm/paravirt.h 	PVOP_VCALL1(mmu.release_pud, pfn);
pfn               352 arch/x86/include/asm/paravirt.h static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn)
pfn               354 arch/x86/include/asm/paravirt.h 	PVOP_VCALL2(mmu.alloc_p4d, mm, pfn);
pfn               357 arch/x86/include/asm/paravirt.h static inline void paravirt_release_p4d(unsigned long pfn)
pfn               359 arch/x86/include/asm/paravirt.h 	PVOP_VCALL1(mmu.release_p4d, pfn);
pfn               238 arch/x86/include/asm/paravirt_types.h 	void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
pfn               239 arch/x86/include/asm/paravirt_types.h 	void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
pfn               240 arch/x86/include/asm/paravirt_types.h 	void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
pfn               241 arch/x86/include/asm/paravirt_types.h 	void (*alloc_p4d)(struct mm_struct *mm, unsigned long pfn);
pfn               242 arch/x86/include/asm/paravirt_types.h 	void (*release_pte)(unsigned long pfn);
pfn               243 arch/x86/include/asm/paravirt_types.h 	void (*release_pmd)(unsigned long pfn);
pfn               244 arch/x86/include/asm/paravirt_types.h 	void (*release_pud)(unsigned long pfn);
pfn               245 arch/x86/include/asm/paravirt_types.h 	void (*release_p4d)(unsigned long pfn);
pfn                25 arch/x86/include/asm/pat.h bool pat_pfn_immune_to_uc_mtrr(unsigned long pfn);
pfn                19 arch/x86/include/asm/pgalloc.h static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)	{}
pfn                20 arch/x86/include/asm/pgalloc.h static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)	{}
pfn                21 arch/x86/include/asm/pgalloc.h static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
pfn                23 arch/x86/include/asm/pgalloc.h static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)	{}
pfn                24 arch/x86/include/asm/pgalloc.h static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn)	{}
pfn                25 arch/x86/include/asm/pgalloc.h static inline void paravirt_release_pte(unsigned long pfn) {}
pfn                26 arch/x86/include/asm/pgalloc.h static inline void paravirt_release_pmd(unsigned long pfn) {}
pfn                27 arch/x86/include/asm/pgalloc.h static inline void paravirt_release_pud(unsigned long pfn) {}
pfn                28 arch/x86/include/asm/pgalloc.h static inline void paravirt_release_p4d(unsigned long pfn) {}
pfn                80 arch/x86/include/asm/pgalloc.h 	unsigned long pfn = page_to_pfn(pte);
pfn                82 arch/x86/include/asm/pgalloc.h 	paravirt_alloc_pte(mm, pfn);
pfn                83 arch/x86/include/asm/pgalloc.h 	set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
pfn               213 arch/x86/include/asm/pgtable.h 	phys_addr_t pfn = pte_val(pte);
pfn               214 arch/x86/include/asm/pgtable.h 	pfn ^= protnone_mask(pfn);
pfn               215 arch/x86/include/asm/pgtable.h 	return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT;
pfn               220 arch/x86/include/asm/pgtable.h 	phys_addr_t pfn = pmd_val(pmd);
pfn               221 arch/x86/include/asm/pgtable.h 	pfn ^= protnone_mask(pfn);
pfn               222 arch/x86/include/asm/pgtable.h 	return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
pfn               227 arch/x86/include/asm/pgtable.h 	phys_addr_t pfn = pud_val(pud);
pfn               228 arch/x86/include/asm/pgtable.h 	pfn ^= protnone_mask(pfn);
pfn               229 arch/x86/include/asm/pgtable.h 	return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT;
pfn               568 arch/x86/include/asm/pgtable.h 	phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
pfn               569 arch/x86/include/asm/pgtable.h 	pfn ^= protnone_mask(pgprot_val(pgprot));
pfn               570 arch/x86/include/asm/pgtable.h 	pfn &= PTE_PFN_MASK;
pfn               571 arch/x86/include/asm/pgtable.h 	return __pte(pfn | check_pgprot(pgprot));
pfn               576 arch/x86/include/asm/pgtable.h 	phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
pfn               577 arch/x86/include/asm/pgtable.h 	pfn ^= protnone_mask(pgprot_val(pgprot));
pfn               578 arch/x86/include/asm/pgtable.h 	pfn &= PHYSICAL_PMD_PAGE_MASK;
pfn               579 arch/x86/include/asm/pgtable.h 	return __pmd(pfn | check_pgprot(pgprot));
pfn               584 arch/x86/include/asm/pgtable.h 	phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
pfn               585 arch/x86/include/asm/pgtable.h 	pfn ^= protnone_mask(pgprot_val(pgprot));
pfn               586 arch/x86/include/asm/pgtable.h 	pfn &= PHYSICAL_PUD_PAGE_MASK;
pfn               587 arch/x86/include/asm/pgtable.h 	return __pud(pfn | check_pgprot(pgprot));
pfn              1463 arch/x86/include/asm/pgtable.h extern bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot);
pfn               525 arch/x86/include/asm/pgtable_types.h pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
pfn               566 arch/x86/include/asm/pgtable_types.h extern int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn,
pfn                94 arch/x86/include/asm/set_memory.h static inline int set_mce_nospec(unsigned long pfn, bool unmap)
pfn               111 arch/x86/include/asm/set_memory.h 	decoy_addr = (pfn << PAGE_SHIFT) + (PAGE_OFFSET ^ BIT(63));
pfn               118 arch/x86/include/asm/set_memory.h 		pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
pfn               124 arch/x86/include/asm/set_memory.h static inline int clear_mce_nospec(unsigned long pfn)
pfn               126 arch/x86/include/asm/set_memory.h 	return set_memory_wb((unsigned long) pfn_to_kaddr(pfn), 1);
pfn              1129 arch/x86/include/asm/uv/uv_mmrs.h 		unsigned long	pfn:41;				/* RO */
pfn              1136 arch/x86/include/asm/uv/uv_mmrs.h 		unsigned long	pfn:41;				/* RO */
pfn              1143 arch/x86/include/asm/uv/uv_mmrs.h 		unsigned long	pfn:41;				/* RO */
pfn              1152 arch/x86/include/asm/uv/uv_mmrs.h 		unsigned long	pfn:34;				/* RO */
pfn              1633 arch/x86/include/asm/uv/uv_mmrs.h 		unsigned long	pfn:41;				/* RO */
pfn              1640 arch/x86/include/asm/uv/uv_mmrs.h 		unsigned long	pfn:41;				/* RO */
pfn              1647 arch/x86/include/asm/uv/uv_mmrs.h 		unsigned long	pfn:41;				/* RO */
pfn              1656 arch/x86/include/asm/uv/uv_mmrs.h 		unsigned long	pfn:34;				/* RO */
pfn               100 arch/x86/include/asm/xen/interface_32.h #define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20))
pfn               133 arch/x86/include/asm/xen/interface_64.h #define xen_pfn_to_cr3(pfn) ((unsigned long)(pfn) << 12)
pfn                57 arch/x86/include/asm/xen/page.h extern int xen_alloc_p2m_entry(unsigned long pfn);
pfn                59 arch/x86/include/asm/xen/page.h extern unsigned long get_phys_to_machine(unsigned long pfn);
pfn                60 arch/x86/include/asm/xen/page.h extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
pfn                61 arch/x86/include/asm/xen/page.h extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
pfn               142 arch/x86/include/asm/xen/page.h static inline unsigned long __pfn_to_mfn(unsigned long pfn)
pfn               146 arch/x86/include/asm/xen/page.h 	if (pfn < xen_p2m_size)
pfn               147 arch/x86/include/asm/xen/page.h 		mfn = xen_p2m_addr[pfn];
pfn               148 arch/x86/include/asm/xen/page.h 	else if (unlikely(pfn < xen_max_p2m_pfn))
pfn               149 arch/x86/include/asm/xen/page.h 		return get_phys_to_machine(pfn);
pfn               151 arch/x86/include/asm/xen/page.h 		return IDENTITY_FRAME(pfn);
pfn               154 arch/x86/include/asm/xen/page.h 		return get_phys_to_machine(pfn);
pfn               159 arch/x86/include/asm/xen/page.h static inline unsigned long __pfn_to_mfn(unsigned long pfn)
pfn               161 arch/x86/include/asm/xen/page.h 	return pfn;
pfn               165 arch/x86/include/asm/xen/page.h static inline unsigned long pfn_to_mfn(unsigned long pfn)
pfn               175 arch/x86/include/asm/xen/page.h 		return pfn;
pfn               177 arch/x86/include/asm/xen/page.h 	mfn = __pfn_to_mfn(pfn);
pfn               185 arch/x86/include/asm/xen/page.h static inline int phys_to_machine_mapping_valid(unsigned long pfn)
pfn               190 arch/x86/include/asm/xen/page.h 	return __pfn_to_mfn(pfn) != INVALID_P2M_ENTRY;
pfn               195 arch/x86/include/asm/xen/page.h 	unsigned long pfn;
pfn               206 arch/x86/include/asm/xen/page.h 	ret = xen_safe_read_ulong(&machine_to_phys_mapping[mfn], &pfn);
pfn               210 arch/x86/include/asm/xen/page.h 	return pfn;
pfn               215 arch/x86/include/asm/xen/page.h 	unsigned long pfn;
pfn               225 arch/x86/include/asm/xen/page.h 	pfn = mfn_to_pfn_no_overrides(mfn);
pfn               226 arch/x86/include/asm/xen/page.h 	if (__pfn_to_mfn(pfn) != mfn)
pfn               227 arch/x86/include/asm/xen/page.h 		pfn = ~0;
pfn               233 arch/x86/include/asm/xen/page.h 	if (pfn == ~0 && __pfn_to_mfn(mfn) == IDENTITY_FRAME(mfn))
pfn               234 arch/x86/include/asm/xen/page.h 		pfn = mfn;
pfn               236 arch/x86/include/asm/xen/page.h 	return pfn;
pfn               252 arch/x86/include/asm/xen/page.h static inline unsigned long pfn_to_gfn(unsigned long pfn)
pfn               255 arch/x86/include/asm/xen/page.h 		return pfn;
pfn               257 arch/x86/include/asm/xen/page.h 		return pfn_to_mfn(pfn);
pfn               269 arch/x86/include/asm/xen/page.h #define pfn_to_bfn(pfn)		pfn_to_gfn(pfn)
pfn               294 arch/x86/include/asm/xen/page.h 	unsigned long pfn;
pfn               299 arch/x86/include/asm/xen/page.h 	pfn = mfn_to_pfn(mfn);
pfn               300 arch/x86/include/asm/xen/page.h 	if (__pfn_to_mfn(pfn) != mfn)
pfn               302 arch/x86/include/asm/xen/page.h 	return pfn;
pfn                70 arch/x86/kernel/aperture_64.c static int gart_mem_pfn_is_ram(unsigned long pfn)
pfn                72 arch/x86/kernel/aperture_64.c 	return likely((pfn < aperture_pfn_start) ||
pfn                73 arch/x86/kernel/aperture_64.c 		      (pfn >= aperture_pfn_start + aperture_page_count));
pfn               506 arch/x86/kernel/cpu/amd.c 			unsigned long pfn = tseg >> PAGE_SHIFT;
pfn               509 arch/x86/kernel/cpu/amd.c 			if (pfn_range_is_mapped(pfn, pfn + 1))
pfn               215 arch/x86/kernel/cpu/hygon.c 		unsigned long pfn = tseg >> PAGE_SHIFT;
pfn               218 arch/x86/kernel/cpu/hygon.c 		if (pfn_range_is_mapped(pfn, pfn + 1))
pfn               603 arch/x86/kernel/cpu/mce/core.c 	unsigned long pfn;
pfn               609 arch/x86/kernel/cpu/mce/core.c 		pfn = mce->addr >> PAGE_SHIFT;
pfn               610 arch/x86/kernel/cpu/mce/core.c 		if (!memory_failure(pfn, 0))
pfn               611 arch/x86/kernel/cpu/mce/core.c 			set_mce_nospec(pfn, whole_page(mce));
pfn              1372 arch/x86/kernel/cpu/mce/core.c int memory_failure(unsigned long pfn, int flags)
pfn              1378 arch/x86/kernel/cpu/mce/core.c 	       pfn);
pfn                18 arch/x86/kernel/crash_dump_32.c static inline bool is_crashed_pfn_valid(unsigned long pfn)
pfn                28 arch/x86/kernel/crash_dump_32.c 	return pte_pfn(pfn_pte(pfn, __pgprot(0))) == pfn;
pfn                51 arch/x86/kernel/crash_dump_32.c ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
pfn                59 arch/x86/kernel/crash_dump_32.c 	if (!is_crashed_pfn_valid(pfn))
pfn                62 arch/x86/kernel/crash_dump_32.c 	vaddr = kmap_atomic_pfn(pfn);
pfn                14 arch/x86/kernel/crash_dump_64.c static ssize_t __copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
pfn                24 arch/x86/kernel/crash_dump_64.c 		vaddr = (__force void *)ioremap_encrypted(pfn << PAGE_SHIFT, PAGE_SIZE);
pfn                26 arch/x86/kernel/crash_dump_64.c 		vaddr = (__force void *)ioremap_cache(pfn << PAGE_SHIFT, PAGE_SIZE);
pfn                57 arch/x86/kernel/crash_dump_64.c ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
pfn                60 arch/x86/kernel/crash_dump_64.c 	return __copy_oldmem_page(pfn, buf, csize, offset, userbuf, false);
pfn                68 arch/x86/kernel/crash_dump_64.c ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
pfn                71 arch/x86/kernel/crash_dump_64.c 	return __copy_oldmem_page(pfn, buf, csize, offset, userbuf, true);
pfn               741 arch/x86/kernel/e820.c 	unsigned long pfn = 0;
pfn               746 arch/x86/kernel/e820.c 		if (pfn < PFN_UP(entry->addr))
pfn               747 arch/x86/kernel/e820.c 			register_nosave_region(pfn, PFN_UP(entry->addr));
pfn               749 arch/x86/kernel/e820.c 		pfn = PFN_DOWN(entry->addr + entry->size);
pfn               752 arch/x86/kernel/e820.c 			register_nosave_region(PFN_UP(entry->addr), pfn);
pfn               754 arch/x86/kernel/e820.c 		if (pfn >= limit_pfn)
pfn               230 arch/x86/kernel/ldt.c 		unsigned long pfn;
pfn               235 arch/x86/kernel/ldt.c 		pfn = is_vmalloc ? vmalloc_to_pfn(src) :
pfn               253 arch/x86/kernel/ldt.c 		pte = pfn_pte(pfn, pte_prot);
pfn               104 arch/x86/kernel/tboot.c static int map_tboot_page(unsigned long vaddr, unsigned long pfn,
pfn               126 arch/x86/kernel/tboot.c 	set_pte_at(&tboot_mm, vaddr, pte, pfn_pte(pfn, prot));
pfn               489 arch/x86/kvm/mmu.c 			  kvm_pfn_t pfn, unsigned access)
pfn               491 arch/x86/kvm/mmu.c 	if (unlikely(is_noslot_pfn(pfn))) {
pfn               918 arch/x86/kvm/mmu.c 	kvm_pfn_t pfn;
pfn               929 arch/x86/kvm/mmu.c 	pfn = spte_to_pfn(old_spte);
pfn               936 arch/x86/kvm/mmu.c 	WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
pfn               939 arch/x86/kvm/mmu.c 		kvm_set_pfn_accessed(pfn);
pfn               942 arch/x86/kvm/mmu.c 		kvm_set_pfn_dirty(pfn);
pfn              3024 arch/x86/kvm/mmu.c static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
pfn              3026 arch/x86/kvm/mmu.c 	if (pfn_valid(pfn))
pfn              3027 arch/x86/kvm/mmu.c 		return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) &&
pfn              3038 arch/x86/kvm/mmu.c 			(!pat_enabled() || pat_pfn_immune_to_uc_mtrr(pfn));
pfn              3040 arch/x86/kvm/mmu.c 	return !e820__mapped_raw_any(pfn_to_hpa(pfn),
pfn              3041 arch/x86/kvm/mmu.c 				     pfn_to_hpa(pfn + 1) - 1,
pfn              3051 arch/x86/kvm/mmu.c 		    gfn_t gfn, kvm_pfn_t pfn, bool speculative,
pfn              3058 arch/x86/kvm/mmu.c 	if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access))
pfn              3094 arch/x86/kvm/mmu.c 			kvm_is_mmio_pfn(pfn));
pfn              3101 arch/x86/kvm/mmu.c 	if (!kvm_is_mmio_pfn(pfn))
pfn              3104 arch/x86/kvm/mmu.c 	spte |= (u64)pfn << PAGE_SHIFT;
pfn              3154 arch/x86/kvm/mmu.c 			int write_fault, int level, gfn_t gfn, kvm_pfn_t pfn,
pfn              3179 arch/x86/kvm/mmu.c 		} else if (pfn != spte_to_pfn(*sptep)) {
pfn              3181 arch/x86/kvm/mmu.c 				 spte_to_pfn(*sptep), pfn);
pfn              3188 arch/x86/kvm/mmu.c 	set_spte_ret = set_spte(vcpu, sptep, pte_access, level, gfn, pfn,
pfn              3326 arch/x86/kvm/mmu.c 			int map_writable, int level, kvm_pfn_t pfn,
pfn              3338 arch/x86/kvm/mmu.c 	trace_kvm_mmu_spte_requested(gpa, level, pfn);
pfn              3344 arch/x86/kvm/mmu.c 		disallowed_hugepage_adjust(it, gfn, &pfn, &level);
pfn              3362 arch/x86/kvm/mmu.c 			   write, level, base_gfn, pfn, prefault,
pfn              3374 arch/x86/kvm/mmu.c static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
pfn              3381 arch/x86/kvm/mmu.c 	if (pfn == KVM_PFN_ERR_RO_FAULT)
pfn              3384 arch/x86/kvm/mmu.c 	if (pfn == KVM_PFN_ERR_HWPOISON) {
pfn              3396 arch/x86/kvm/mmu.c 	kvm_pfn_t pfn = *pfnp;
pfn              3405 arch/x86/kvm/mmu.c 	if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
pfn              3406 arch/x86/kvm/mmu.c 	    !kvm_is_zone_device_pfn(pfn) && level == PT_PAGE_TABLE_LEVEL &&
pfn              3407 arch/x86/kvm/mmu.c 	    PageTransCompoundMap(pfn_to_page(pfn)) &&
pfn              3421 arch/x86/kvm/mmu.c 		VM_BUG_ON((gfn & mask) != (pfn & mask));
pfn              3422 arch/x86/kvm/mmu.c 		if (pfn & mask) {
pfn              3423 arch/x86/kvm/mmu.c 			kvm_release_pfn_clean(pfn);
pfn              3424 arch/x86/kvm/mmu.c 			pfn &= ~mask;
pfn              3425 arch/x86/kvm/mmu.c 			kvm_get_pfn(pfn);
pfn              3426 arch/x86/kvm/mmu.c 			*pfnp = pfn;
pfn              3432 arch/x86/kvm/mmu.c 				kvm_pfn_t pfn, unsigned access, int *ret_val)
pfn              3435 arch/x86/kvm/mmu.c 	if (unlikely(is_error_pfn(pfn))) {
pfn              3436 arch/x86/kvm/mmu.c 		*ret_val = kvm_handle_bad_page(vcpu, gfn, pfn);
pfn              3440 arch/x86/kvm/mmu.c 	if (unlikely(is_noslot_pfn(pfn)))
pfn              3642 arch/x86/kvm/mmu.c 			 gpa_t cr2_or_gpa, kvm_pfn_t *pfn, bool write,
pfn              3652 arch/x86/kvm/mmu.c 	kvm_pfn_t pfn;
pfn              3678 arch/x86/kvm/mmu.c 	if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
pfn              3681 arch/x86/kvm/mmu.c 	if (handle_abnormal_pfn(vcpu, gpa, gfn, pfn, ACC_ALL, &r))
pfn              3691 arch/x86/kvm/mmu.c 		transparent_hugepage_adjust(vcpu, gfn, &pfn, &level);
pfn              3692 arch/x86/kvm/mmu.c 	r = __direct_map(vcpu, gpa, write, map_writable, level, pfn,
pfn              3696 arch/x86/kvm/mmu.c 	kvm_release_pfn_clean(pfn);
pfn              4196 arch/x86/kvm/mmu.c 			 gpa_t cr2_or_gpa, kvm_pfn_t *pfn, bool write,
pfn              4206 arch/x86/kvm/mmu.c 		*pfn = KVM_PFN_NOSLOT;
pfn              4212 arch/x86/kvm/mmu.c 	*pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async, write, writable);
pfn              4226 arch/x86/kvm/mmu.c 	*pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL, write, writable);
pfn              4281 arch/x86/kvm/mmu.c 	kvm_pfn_t pfn;
pfn              4318 arch/x86/kvm/mmu.c 	if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
pfn              4321 arch/x86/kvm/mmu.c 	if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r))
pfn              4331 arch/x86/kvm/mmu.c 		transparent_hugepage_adjust(vcpu, gfn, &pfn, &level);
pfn              4332 arch/x86/kvm/mmu.c 	r = __direct_map(vcpu, gpa, write, map_writable, level, pfn,
pfn              4336 arch/x86/kvm/mmu.c 	kvm_release_pfn_clean(pfn);
pfn              6018 arch/x86/kvm/mmu.c 	kvm_pfn_t pfn;
pfn              6024 arch/x86/kvm/mmu.c 		pfn = spte_to_pfn(*sptep);
pfn              6033 arch/x86/kvm/mmu.c 		if (sp->role.direct && !kvm_is_reserved_pfn(pfn) &&
pfn              6034 arch/x86/kvm/mmu.c 		    !kvm_is_zone_device_pfn(pfn) &&
pfn              6035 arch/x86/kvm/mmu.c 		    PageTransCompoundMap(pfn_to_page(pfn))) {
pfn                97 arch/x86/kvm/mmu_audit.c 	kvm_pfn_t pfn;
pfn               114 arch/x86/kvm/mmu_audit.c 	pfn = kvm_vcpu_gfn_to_pfn_atomic(vcpu, gfn);
pfn               116 arch/x86/kvm/mmu_audit.c 	if (is_error_pfn(pfn))
pfn               119 arch/x86/kvm/mmu_audit.c 	hpa =  pfn << PAGE_SHIFT;
pfn               122 arch/x86/kvm/mmu_audit.c 			     "ent %llxn", vcpu->arch.mmu->root_level, pfn,
pfn               367 arch/x86/kvm/mmutrace.h 	TP_PROTO(gpa_t addr, int level, kvm_pfn_t pfn),
pfn               368 arch/x86/kvm/mmutrace.h 	TP_ARGS(addr, level, pfn),
pfn               372 arch/x86/kvm/mmutrace.h 		__field(u64, pfn)
pfn               378 arch/x86/kvm/mmutrace.h 		__entry->pfn = pfn | (__entry->gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
pfn               383 arch/x86/kvm/mmutrace.h 		  __entry->gfn, __entry->pfn, __entry->level
pfn               150 arch/x86/kvm/paging_tmpl.h 		unsigned long pfn;
pfn               159 arch/x86/kvm/paging_tmpl.h 		pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
pfn               160 arch/x86/kvm/paging_tmpl.h 		paddr = pfn << PAGE_SHIFT;
pfn               521 arch/x86/kvm/paging_tmpl.h 	kvm_pfn_t pfn;
pfn               531 arch/x86/kvm/paging_tmpl.h 	pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
pfn               533 arch/x86/kvm/paging_tmpl.h 	if (is_error_pfn(pfn))
pfn               540 arch/x86/kvm/paging_tmpl.h 	mmu_set_spte(vcpu, spte, pte_access, 0, PT_PAGE_TABLE_LEVEL, gfn, pfn,
pfn               543 arch/x86/kvm/paging_tmpl.h 	kvm_release_pfn_clean(pfn);
pfn               617 arch/x86/kvm/paging_tmpl.h 			 kvm_pfn_t pfn, bool map_writable, bool prefault,
pfn               676 arch/x86/kvm/paging_tmpl.h 	trace_kvm_mmu_spte_requested(addr, gw->level, pfn);
pfn               685 arch/x86/kvm/paging_tmpl.h 		disallowed_hugepage_adjust(it, gfn, &pfn, &hlevel);
pfn               705 arch/x86/kvm/paging_tmpl.h 			   it.level, base_gfn, pfn, prefault, map_writable);
pfn               775 arch/x86/kvm/paging_tmpl.h 	kvm_pfn_t pfn;
pfn               833 arch/x86/kvm/paging_tmpl.h 	if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault,
pfn               837 arch/x86/kvm/paging_tmpl.h 	if (handle_abnormal_pfn(vcpu, addr, walker.gfn, pfn, walker.pte_access, &r))
pfn               846 arch/x86/kvm/paging_tmpl.h 	      !is_noslot_pfn(pfn)) {
pfn               869 arch/x86/kvm/paging_tmpl.h 		transparent_hugepage_adjust(vcpu, walker.gfn, &pfn, &level);
pfn               871 arch/x86/kvm/paging_tmpl.h 			 level, pfn, map_writable, prefault, lpage_disallowed);
pfn               876 arch/x86/kvm/paging_tmpl.h 	kvm_release_pfn_clean(pfn);
pfn              2970 arch/x86/kvm/vmx/nested.c 			vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, pfn_to_hpa(map->pfn));
pfn              3000 arch/x86/kvm/vmx/nested.c 				     pfn_to_hpa(map->pfn) + offset_in_page(vmcs12->posted_intr_desc_addr));
pfn              6439 arch/x86/kvm/x86.c 	kvm_pfn_t pfn;
pfn              6468 arch/x86/kvm/x86.c 	pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa));
pfn              6474 arch/x86/kvm/x86.c 	if (is_error_noslot_pfn(pfn))
pfn              6477 arch/x86/kvm/x86.c 	kvm_release_pfn_clean(pfn);
pfn              9144 arch/x86/kvm/x86.c 	kvm_release_pfn(cache->pfn, cache->dirty, cache);
pfn               286 arch/x86/mm/fault.c static bool low_pfn(unsigned long pfn)
pfn               288 arch/x86/mm/fault.c 	return pfn < max_low_pfn;
pfn                66 arch/x86/mm/highmem_32.c void *kmap_atomic_pfn(unsigned long pfn)
pfn                68 arch/x86/mm/highmem_32.c 	return kmap_atomic_prot_pfn(pfn, kmap_prot);
pfn                93 arch/x86/mm/init.c 	unsigned long pfn;
pfn               120 arch/x86/mm/init.c 		pfn = ret >> PAGE_SHIFT;
pfn               122 arch/x86/mm/init.c 		pfn = pgt_buf_end;
pfn               125 arch/x86/mm/init.c 			pfn << PAGE_SHIFT, (pgt_buf_end << PAGE_SHIFT) - 1);
pfn               131 arch/x86/mm/init.c 		adr = __va((pfn + i) << PAGE_SHIFT);
pfn               135 arch/x86/mm/init.c 	return __va(pfn << PAGE_SHIFT);
pfn               341 arch/x86/mm/init.c 	unsigned long pfn;
pfn               347 arch/x86/mm/init.c 	pfn = start_pfn = PFN_DOWN(start);
pfn               355 arch/x86/mm/init.c 	if (pfn == 0)
pfn               358 arch/x86/mm/init.c 		end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
pfn               360 arch/x86/mm/init.c 	end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
pfn               366 arch/x86/mm/init.c 		pfn = end_pfn;
pfn               370 arch/x86/mm/init.c 	start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
pfn               374 arch/x86/mm/init.c 	end_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
pfn               382 arch/x86/mm/init.c 		pfn = end_pfn;
pfn               387 arch/x86/mm/init.c 	start_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
pfn               393 arch/x86/mm/init.c 		pfn = end_pfn;
pfn               397 arch/x86/mm/init.c 	start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
pfn               402 arch/x86/mm/init.c 		pfn = end_pfn;
pfn               407 arch/x86/mm/init.c 	start_pfn = pfn;
pfn               262 arch/x86/mm/init_32.c 	unsigned long pfn;
pfn               293 arch/x86/mm/init_32.c 	pfn = start_pfn;
pfn               294 arch/x86/mm/init_32.c 	pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
pfn               299 arch/x86/mm/init_32.c 		if (pfn >= end_pfn)
pfn               302 arch/x86/mm/init_32.c 		pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
pfn               307 arch/x86/mm/init_32.c 		for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
pfn               309 arch/x86/mm/init_32.c 			unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
pfn               326 arch/x86/mm/init_32.c 				pfn &= PMD_MASK >> PAGE_SHIFT;
pfn               327 arch/x86/mm/init_32.c 				addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
pfn               336 arch/x86/mm/init_32.c 					set_pmd(pmd, pfn_pmd(pfn, init_prot));
pfn               338 arch/x86/mm/init_32.c 					set_pmd(pmd, pfn_pmd(pfn, prot));
pfn               340 arch/x86/mm/init_32.c 				pfn += PTRS_PER_PTE;
pfn               345 arch/x86/mm/init_32.c 			pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
pfn               347 arch/x86/mm/init_32.c 			for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
pfn               348 arch/x86/mm/init_32.c 			     pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
pfn               361 arch/x86/mm/init_32.c 					set_pte(pte, pfn_pte(pfn, init_prot));
pfn               362 arch/x86/mm/init_32.c 					last_map_addr = (pfn << PAGE_SHIFT) + PAGE_SIZE;
pfn               364 arch/x86/mm/init_32.c 					set_pte(pte, pfn_pte(pfn, prot));
pfn               441 arch/x86/mm/init_32.c 		unsigned long pfn = clamp_t(unsigned long, PFN_UP(start),
pfn               445 arch/x86/mm/init_32.c 		for ( ; pfn < e_pfn; pfn++)
pfn               446 arch/x86/mm/init_32.c 			if (pfn_valid(pfn))
pfn               447 arch/x86/mm/init_32.c 				free_highmem_page(pfn_to_page(pfn));
pfn               473 arch/x86/mm/init_32.c 	unsigned long pfn, va;
pfn               489 arch/x86/mm/init_32.c 	for (pfn = max_low_pfn; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
pfn               490 arch/x86/mm/init_32.c 		va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
pfn               504 arch/x86/mm/init_32.c 				pfn, pmd, __pa(pmd));
pfn               513 arch/x86/mm/init_32.c 				pfn, pmd, __pa(pmd), pte, __pa(pte));
pfn                47 arch/x86/mm/iomap_32.c void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
pfn                58 arch/x86/mm/iomap_32.c 	set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
pfn                68 arch/x86/mm/iomap_32.c iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
pfn                84 arch/x86/mm/iomap_32.c 	return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot);
pfn               202 arch/x86/mm/mem_encrypt.c 	unsigned long pfn, pa, size;
pfn               207 arch/x86/mm/mem_encrypt.c 		pfn = pte_pfn(*kpte);
pfn               211 arch/x86/mm/mem_encrypt.c 		pfn = pmd_pfn(*(pmd_t *)kpte);
pfn               215 arch/x86/mm/mem_encrypt.c 		pfn = pud_pfn(*(pud_t *)kpte);
pfn               232 arch/x86/mm/mem_encrypt.c 	pa = pfn << page_level_shift(level);
pfn               249 arch/x86/mm/mem_encrypt.c 	new_pte = pfn_pte(pfn, new_prot);
pfn               224 arch/x86/mm/mmap.c int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
pfn               226 arch/x86/mm/mmap.c 	phys_addr_t addr = (phys_addr_t)pfn << PAGE_SHIFT;
pfn               238 arch/x86/mm/mmap.c bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
pfn               245 arch/x86/mm/mmap.c 	if (pfn_valid(pfn))
pfn               247 arch/x86/mm/mmap.c 	if (pfn >= l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN))
pfn                48 arch/x86/mm/numa_32.c 	unsigned long pfn;
pfn                56 arch/x86/mm/numa_32.c 	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
pfn                57 arch/x86/mm/numa_32.c 		physnode_map[pfn / PAGES_PER_SECTION] = nid;
pfn                58 arch/x86/mm/numa_32.c 		printk(KERN_CONT "%lx ", pfn);
pfn               140 arch/x86/mm/pageattr-test.c 		unsigned long pfn = prandom_u32() % max_pfn_mapped;
pfn               142 arch/x86/mm/pageattr-test.c 		addr[i] = (unsigned long)__va(pfn << PAGE_SHIFT);
pfn               144 arch/x86/mm/pageattr-test.c 		len[i] = min_t(unsigned long, len[i], max_pfn_mapped - pfn - 1);
pfn               168 arch/x86/mm/pageattr-test.c 			if (test_bit(pfn + k, bm)) {
pfn               172 arch/x86/mm/pageattr-test.c 			__set_bit(pfn + k, bm);
pfn               174 arch/x86/mm/pageattr-test.c 			pages[k] = pfn_to_page(pfn + k);
pfn                42 arch/x86/mm/pageattr.c 	unsigned long	pfn;
pfn               216 arch/x86/mm/pageattr.c static bool __cpa_pfn_in_highmap(unsigned long pfn)
pfn               222 arch/x86/mm/pageattr.c 	return within_inclusive(pfn, highmap_start_pfn(), highmap_end_pfn());
pfn               227 arch/x86/mm/pageattr.c static bool __cpa_pfn_in_highmap(unsigned long pfn)
pfn               496 arch/x86/mm/pageattr.c 				  unsigned long pfn, const char *txt)
pfn               508 arch/x86/mm/pageattr.c 		lvltxt[warnlvl], txt, start, end, pfn, (unsigned long long)pgprot_val(prot),
pfn               519 arch/x86/mm/pageattr.c 					  unsigned long pfn, unsigned long npg,
pfn               536 arch/x86/mm/pageattr.c 	check_conflict(warnlvl, prot, res, start, end, pfn, "Text NX");
pfn               547 arch/x86/mm/pageattr.c 		check_conflict(warnlvl, prot, res, start, end, pfn, "Text RO");
pfn               552 arch/x86/mm/pageattr.c 	res = protect_pci_bios(pfn, pfn + npg - 1);
pfn               553 arch/x86/mm/pageattr.c 	check_conflict(warnlvl, prot, res, start, end, pfn, "PCIBIOS NX");
pfn               556 arch/x86/mm/pageattr.c 	res = protect_rodata(pfn, pfn + npg - 1);
pfn               557 arch/x86/mm/pageattr.c 	check_conflict(warnlvl, prot, res, start, end, pfn, "Rodata RO");
pfn               749 arch/x86/mm/pageattr.c 	unsigned long numpages, pmask, psize, lpaddr, pfn, old_pfn;
pfn               815 arch/x86/mm/pageattr.c 	pfn = old_pfn + ((address & (psize - 1)) >> PAGE_SHIFT);
pfn               816 arch/x86/mm/pageattr.c 	cpa->pfn = pfn;
pfn               904 arch/x86/mm/pageattr.c static void split_set_pte(struct cpa_data *cpa, pte_t *pte, unsigned long pfn,
pfn               919 arch/x86/mm/pageattr.c 	prot = static_protections(ref_prot, address, pfn, npg, 0, CPA_PROTECT);
pfn               937 arch/x86/mm/pageattr.c 	set_pte(pte, pfn_pte(pfn, ref_prot));
pfn               944 arch/x86/mm/pageattr.c 	unsigned long lpaddr, lpinc, ref_pfn, pfn, pfninc = 1;
pfn              1001 arch/x86/mm/pageattr.c 	pfn = ref_pfn;
pfn              1002 arch/x86/mm/pageattr.c 	for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc, lpaddr += lpinc)
pfn              1003 arch/x86/mm/pageattr.c 		split_set_pte(cpa, pbase + i, pfn, ref_prot, lpaddr, lpinc);
pfn              1006 arch/x86/mm/pageattr.c 		unsigned long pfn = PFN_DOWN(__pa(address));
pfn              1008 arch/x86/mm/pageattr.c 		if (pfn_range_is_mapped(pfn, pfn + 1))
pfn              1232 arch/x86/mm/pageattr.c 		set_pte(pte, pfn_pte(cpa->pfn, pgprot));
pfn              1235 arch/x86/mm/pageattr.c 		cpa->pfn++;
pfn              1291 arch/x86/mm/pageattr.c 		set_pmd(pmd, pmd_mkhuge(pfn_pmd(cpa->pfn,
pfn              1295 arch/x86/mm/pageattr.c 		cpa->pfn  += PMD_SIZE >> PAGE_SHIFT;
pfn              1364 arch/x86/mm/pageattr.c 		set_pud(pud, pud_mkhuge(pfn_pud(cpa->pfn,
pfn              1368 arch/x86/mm/pageattr.c 		cpa->pfn  += PUD_SIZE >> PAGE_SHIFT;
pfn              1475 arch/x86/mm/pageattr.c 		cpa->pfn = __pa(vaddr) >> PAGE_SHIFT;
pfn              1478 arch/x86/mm/pageattr.c 	} else if (__cpa_pfn_in_highmap(cpa->pfn)) {
pfn              1510 arch/x86/mm/pageattr.c 		unsigned long pfn = pte_pfn(old_pte);
pfn              1517 arch/x86/mm/pageattr.c 		new_prot = static_protections(new_prot, address, pfn, 1, 0,
pfn              1527 arch/x86/mm/pageattr.c 		new_pte = pfn_pte(pfn, new_prot);
pfn              1528 arch/x86/mm/pageattr.c 		cpa->pfn = pfn;
pfn              1568 arch/x86/mm/pageattr.c 	unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT);
pfn              1572 arch/x86/mm/pageattr.c 	if (!pfn_range_is_mapped(cpa->pfn, cpa->pfn + 1))
pfn              1602 arch/x86/mm/pageattr.c 	    __cpa_pfn_in_highmap(cpa->pfn)) {
pfn              1603 arch/x86/mm/pageattr.c 		unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) +
pfn              2211 arch/x86/mm/pageattr.c int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
pfn              2218 arch/x86/mm/pageattr.c 		.pfn = pfn,
pfn              2261 arch/x86/mm/pageattr.c 		.pfn		= 0,
pfn               465 arch/x86/mm/pat.c 	u64 pfn;
pfn               479 arch/x86/mm/pat.c 	for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
pfn               482 arch/x86/mm/pat.c 		page = pfn_to_page(pfn);
pfn               497 arch/x86/mm/pat.c 	for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
pfn               498 arch/x86/mm/pat.c 		page = pfn_to_page(pfn);
pfn               507 arch/x86/mm/pat.c 	u64 pfn;
pfn               509 arch/x86/mm/pat.c 	for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
pfn               510 arch/x86/mm/pat.c 		page = pfn_to_page(pfn);
pfn               715 arch/x86/mm/pat.c bool pat_pfn_immune_to_uc_mtrr(unsigned long pfn)
pfn               717 arch/x86/mm/pat.c 	enum page_cache_mode cm = lookup_memtype(PFN_PHYS(pfn));
pfn               789 arch/x86/mm/pat.c pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
pfn               792 arch/x86/mm/pat.c 	if (!phys_mem_access_encrypted(pfn << PAGE_SHIFT, size))
pfn               800 arch/x86/mm/pat.c static inline int range_is_allowed(unsigned long pfn, unsigned long size)
pfn               806 arch/x86/mm/pat.c static inline int range_is_allowed(unsigned long pfn, unsigned long size)
pfn               808 arch/x86/mm/pat.c 	u64 from = ((u64)pfn) << PAGE_SHIFT;
pfn               816 arch/x86/mm/pat.c 		if (!devmem_is_allowed(pfn))
pfn               819 arch/x86/mm/pat.c 		pfn++;
pfn               825 arch/x86/mm/pat.c int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
pfn               830 arch/x86/mm/pat.c 	if (!range_is_allowed(pfn, size))
pfn               995 arch/x86/mm/pat.c 		    unsigned long pfn, unsigned long addr, unsigned long size)
pfn               997 arch/x86/mm/pat.c 	resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT;
pfn              1034 arch/x86/mm/pat.c void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, pfn_t pfn)
pfn              1042 arch/x86/mm/pat.c 	pcm = lookup_memtype(pfn_t_to_phys(pfn));
pfn              1052 arch/x86/mm/pat.c void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
pfn              1062 arch/x86/mm/pat.c 	paddr = (resource_size_t)pfn << PAGE_SHIFT;
pfn               341 arch/x86/platform/efi/efi_64.c 	unsigned long pfn, text, pf;
pfn               355 arch/x86/platform/efi/efi_64.c 	pfn = pa_memmap >> PAGE_SHIFT;
pfn               357 arch/x86/platform/efi/efi_64.c 	if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, pf)) {
pfn               401 arch/x86/platform/efi/efi_64.c 	pfn = text >> PAGE_SHIFT;
pfn               404 arch/x86/platform/efi/efi_64.c 	if (kernel_map_pages_in_pgd(pgd, pfn, text, npages, pf)) {
pfn               415 arch/x86/platform/efi/efi_64.c 	unsigned long pfn;
pfn               424 arch/x86/platform/efi/efi_64.c 	pfn = md->phys_addr >> PAGE_SHIFT;
pfn               425 arch/x86/platform/efi/efi_64.c 	if (kernel_map_pages_in_pgd(pgd, pfn, va, md->num_pages, flags))
pfn               519 arch/x86/platform/efi/efi_64.c 	unsigned long pfn;
pfn               524 arch/x86/platform/efi/efi_64.c 	pfn = md->phys_addr >> PAGE_SHIFT;
pfn               525 arch/x86/platform/efi/efi_64.c 	err1 = kernel_map_pages_in_pgd(pgd, pfn, md->phys_addr, md->num_pages, pf);
pfn               531 arch/x86/platform/efi/efi_64.c 	err2 = kernel_map_pages_in_pgd(pgd, pfn, md->virt_addr, md->num_pages, pf);
pfn                46 arch/x86/power/hibernate.c int pfn_is_nosave(unsigned long pfn)
pfn                54 arch/x86/power/hibernate.c 	return pfn >= nosave_begin_pfn && pfn < nosave_end_pfn;
pfn                83 arch/x86/power/hibernate_32.c 	unsigned long pfn;
pfn                91 arch/x86/power/hibernate_32.c 	pfn = 0;
pfn                98 arch/x86/power/hibernate_32.c 		if (pfn >= max_low_pfn)
pfn               102 arch/x86/power/hibernate_32.c 			if (pfn >= max_low_pfn)
pfn               110 arch/x86/power/hibernate_32.c 				set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
pfn               111 arch/x86/power/hibernate_32.c 				pfn += PTRS_PER_PTE;
pfn               120 arch/x86/power/hibernate_32.c 				for (; pte < max_pte; pte++, pfn++) {
pfn               121 arch/x86/power/hibernate_32.c 					if (pfn >= max_low_pfn)
pfn               124 arch/x86/power/hibernate_32.c 					set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
pfn               103 arch/x86/xen/enlighten_hvm.c 		u64 pfn;
pfn               108 arch/x86/xen/enlighten_hvm.c 		pfn = __pa(hypercall_page);
pfn               109 arch/x86/xen/enlighten_hvm.c 		wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
pfn               353 arch/x86/xen/enlighten_pv.c 	unsigned long pfn;
pfn               360 arch/x86/xen/enlighten_pv.c 	pfn = pte_pfn(*ptep);
pfn               361 arch/x86/xen/enlighten_pv.c 	page = pfn_to_page(pfn);
pfn               363 arch/x86/xen/enlighten_pv.c 	pte = pfn_pte(pfn, prot);
pfn               393 arch/x86/xen/enlighten_pv.c 		void *av = __va(PFN_PHYS(pfn));
pfn               454 arch/x86/xen/enlighten_pv.c 	unsigned long pfn, mfn;
pfn               473 arch/x86/xen/enlighten_pv.c 	pfn = pte_pfn(*ptep);
pfn               474 arch/x86/xen/enlighten_pv.c 	mfn = pfn_to_mfn(pfn);
pfn               475 arch/x86/xen/enlighten_pv.c 	virt = __va(PFN_PHYS(pfn));
pfn               491 arch/x86/xen/enlighten_pv.c 	unsigned long pfn, mfn;
pfn               498 arch/x86/xen/enlighten_pv.c 	pfn = virt_to_pfn(va);
pfn               499 arch/x86/xen/enlighten_pv.c 	mfn = pfn_to_mfn(pfn);
pfn               501 arch/x86/xen/enlighten_pv.c 	pte = pfn_pte(pfn, PAGE_KERNEL_RO);
pfn                29 arch/x86/xen/enlighten_pvh.c 	u64 pfn;
pfn                36 arch/x86/xen/enlighten_pvh.c 	pfn = __pa(hypercall_page);
pfn                37 arch/x86/xen/enlighten_pvh.c 	wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
pfn               150 arch/x86/xen/grant-table.c 	return xen_xlate_map_ballooned_pages(&xen_auto_xlat_grant_frames.pfn,
pfn                16 arch/x86/xen/mmu.h bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
pfn                18 arch/x86/xen/mmu.h void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
pfn                21 arch/x86/xen/mmu_hvm.c static int xen_oldmem_pfn_is_ram(unsigned long pfn)
pfn                25 arch/x86/xen/mmu_hvm.c 		.pfn = pfn,
pfn               337 arch/x86/xen/mmu_pv.c 		unsigned long pfn = mfn_to_pfn(mfn);
pfn               340 arch/x86/xen/mmu_pv.c 		if (unlikely(pfn == ~0))
pfn               343 arch/x86/xen/mmu_pv.c 			val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
pfn               352 arch/x86/xen/mmu_pv.c 		unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
pfn               356 arch/x86/xen/mmu_pv.c 		mfn = __pfn_to_mfn(pfn);
pfn               715 arch/x86/xen/mmu_pv.c static void xen_do_pin(unsigned level, unsigned long pfn)
pfn               720 arch/x86/xen/mmu_pv.c 	op.arg1.mfn = pfn_to_mfn(pfn);
pfn               739 arch/x86/xen/mmu_pv.c 		unsigned long pfn = page_to_pfn(page);
pfn               770 arch/x86/xen/mmu_pv.c 					pfn_pte(pfn, PAGE_KERNEL_RO),
pfn               774 arch/x86/xen/mmu_pv.c 			xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
pfn               886 arch/x86/xen/mmu_pv.c 		unsigned long pfn = page_to_pfn(page);
pfn               901 arch/x86/xen/mmu_pv.c 				xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
pfn               907 arch/x86/xen/mmu_pv.c 					pfn_pte(pfn, PAGE_KERNEL),
pfn              1082 arch/x86/xen/mmu_pv.c static void __init pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
pfn              1087 arch/x86/xen/mmu_pv.c 	op.arg1.mfn = pfn_to_mfn(pfn);
pfn              1543 arch/x86/xen/mmu_pv.c 	unsigned long pfn;
pfn              1551 arch/x86/xen/mmu_pv.c 	pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT;
pfn              1553 arch/x86/xen/mmu_pv.c 	    pfn >= xen_start_info->first_p2m_pfn &&
pfn              1554 arch/x86/xen/mmu_pv.c 	    pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
pfn              1576 arch/x86/xen/mmu_pv.c static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
pfn              1581 arch/x86/xen/mmu_pv.c 	make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
pfn              1582 arch/x86/xen/mmu_pv.c 	pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
pfn              1586 arch/x86/xen/mmu_pv.c static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
pfn              1591 arch/x86/xen/mmu_pv.c 	make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
pfn              1596 arch/x86/xen/mmu_pv.c static void __init xen_release_pte_init(unsigned long pfn)
pfn              1598 arch/x86/xen/mmu_pv.c 	pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
pfn              1599 arch/x86/xen/mmu_pv.c 	make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
pfn              1602 arch/x86/xen/mmu_pv.c static void __init xen_release_pmd_init(unsigned long pfn)
pfn              1604 arch/x86/xen/mmu_pv.c 	make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
pfn              1607 arch/x86/xen/mmu_pv.c static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
pfn              1615 arch/x86/xen/mmu_pv.c 	op->arg1.mfn = pfn_to_mfn(pfn);
pfn              1620 arch/x86/xen/mmu_pv.c static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
pfn              1623 arch/x86/xen/mmu_pv.c 	unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
pfn              1627 arch/x86/xen/mmu_pv.c 				pfn_pte(pfn, prot), 0);
pfn              1632 arch/x86/xen/mmu_pv.c static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
pfn              1637 arch/x86/xen/mmu_pv.c 	trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
pfn              1640 arch/x86/xen/mmu_pv.c 		struct page *page = pfn_to_page(pfn);
pfn              1648 arch/x86/xen/mmu_pv.c 			__set_pfn_prot(pfn, PAGE_KERNEL_RO);
pfn              1651 arch/x86/xen/mmu_pv.c 				__pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
pfn              1662 arch/x86/xen/mmu_pv.c static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
pfn              1664 arch/x86/xen/mmu_pv.c 	xen_alloc_ptpage(mm, pfn, PT_PTE);
pfn              1667 arch/x86/xen/mmu_pv.c static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
pfn              1669 arch/x86/xen/mmu_pv.c 	xen_alloc_ptpage(mm, pfn, PT_PMD);
pfn              1673 arch/x86/xen/mmu_pv.c static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
pfn              1675 arch/x86/xen/mmu_pv.c 	struct page *page = pfn_to_page(pfn);
pfn              1678 arch/x86/xen/mmu_pv.c 	trace_xen_mmu_release_ptpage(pfn, level, pinned);
pfn              1685 arch/x86/xen/mmu_pv.c 				__pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
pfn              1687 arch/x86/xen/mmu_pv.c 			__set_pfn_prot(pfn, PAGE_KERNEL);
pfn              1695 arch/x86/xen/mmu_pv.c static void xen_release_pte(unsigned long pfn)
pfn              1697 arch/x86/xen/mmu_pv.c 	xen_release_ptpage(pfn, PT_PTE);
pfn              1700 arch/x86/xen/mmu_pv.c static void xen_release_pmd(unsigned long pfn)
pfn              1702 arch/x86/xen/mmu_pv.c 	xen_release_ptpage(pfn, PT_PMD);
pfn              1706 arch/x86/xen/mmu_pv.c static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
pfn              1708 arch/x86/xen/mmu_pv.c 	xen_alloc_ptpage(mm, pfn, PT_PUD);
pfn              1711 arch/x86/xen/mmu_pv.c static void xen_release_pud(unsigned long pfn)
pfn              1713 arch/x86/xen/mmu_pv.c 	xen_release_ptpage(pfn, PT_PUD);
pfn              1764 arch/x86/xen/mmu_pv.c 	unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
pfn              1765 arch/x86/xen/mmu_pv.c 	pte_t pte = pfn_pte(pfn, prot);
pfn              1779 arch/x86/xen/mmu_pv.c 	unsigned long pfn;
pfn              1785 arch/x86/xen/mmu_pv.c 	pfn = 0;
pfn              1786 arch/x86/xen/mmu_pv.c 	for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
pfn              1804 arch/x86/xen/mmu_pv.c 		for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
pfn              1807 arch/x86/xen/mmu_pv.c 			if (pfn > max_pfn_mapped)
pfn              1808 arch/x86/xen/mmu_pv.c 				max_pfn_mapped = pfn;
pfn              1813 arch/x86/xen/mmu_pv.c 			pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
pfn              2059 arch/x86/xen/mmu_pv.c 	unsigned long p2m_pfn, p2m_pfn_end, n_frames, pfn, pfn_end;
pfn              2147 arch/x86/xen/mmu_pv.c 		pfn = xen_start_info->first_p2m_pfn;
pfn              2152 arch/x86/xen/mmu_pv.c 		pfn = p2m_pfn;
pfn              2156 arch/x86/xen/mmu_pv.c 	memblock_free(PFN_PHYS(pfn), PAGE_SIZE * (pfn_end - pfn));
pfn              2157 arch/x86/xen/mmu_pv.c 	while (pfn < pfn_end) {
pfn              2158 arch/x86/xen/mmu_pv.c 		if (pfn == p2m_pfn) {
pfn              2159 arch/x86/xen/mmu_pv.c 			pfn = p2m_pfn_end;
pfn              2162 arch/x86/xen/mmu_pv.c 		make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
pfn              2163 arch/x86/xen/mmu_pv.c 		pfn++;
pfn              2179 arch/x86/xen/mmu_pv.c 	unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
pfn              2203 arch/x86/xen/mmu_pv.c 	pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn);
pfn              2684 arch/x86/xen/mmu_pv.c 	xen_pfn_t *pfn;
pfn              2694 arch/x86/xen/mmu_pv.c 	pte_t pte = pte_mkspecial(mfn_pte(*rmd->pfn, rmd->prot));
pfn              2701 arch/x86/xen/mmu_pv.c 		(*rmd->pfn)++;
pfn              2703 arch/x86/xen/mmu_pv.c 		rmd->pfn++;
pfn              2716 arch/x86/xen/mmu_pv.c 		  xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
pfn              2727 arch/x86/xen/mmu_pv.c 	rmd.pfn = pfn;
pfn               126 arch/x86/xen/p2m.c static inline unsigned p2m_top_index(unsigned long pfn)
pfn               128 arch/x86/xen/p2m.c 	BUG_ON(pfn >= MAX_P2M_PFN);
pfn               129 arch/x86/xen/p2m.c 	return pfn / (P2M_MID_PER_PAGE * P2M_PER_PAGE);
pfn               132 arch/x86/xen/p2m.c static inline unsigned p2m_mid_index(unsigned long pfn)
pfn               134 arch/x86/xen/p2m.c 	return (pfn / P2M_PER_PAGE) % P2M_MID_PER_PAGE;
pfn               137 arch/x86/xen/p2m.c static inline unsigned p2m_index(unsigned long pfn)
pfn               139 arch/x86/xen/p2m.c 	return pfn % P2M_PER_PAGE;
pfn               174 arch/x86/xen/p2m.c static void p2m_init_identity(unsigned long *p2m, unsigned long pfn)
pfn               179 arch/x86/xen/p2m.c 		p2m[i] = IDENTITY_FRAME(pfn + i);
pfn               219 arch/x86/xen/p2m.c 	unsigned long pfn, mfn;
pfn               242 arch/x86/xen/p2m.c 	for (pfn = 0; pfn < xen_max_p2m_pfn && pfn < MAX_P2M_PFN;
pfn               243 arch/x86/xen/p2m.c 	     pfn += P2M_PER_PAGE) {
pfn               244 arch/x86/xen/p2m.c 		topidx = p2m_top_index(pfn);
pfn               245 arch/x86/xen/p2m.c 		mididx = p2m_mid_index(pfn);
pfn               248 arch/x86/xen/p2m.c 		ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn),
pfn               262 arch/x86/xen/p2m.c 			pfn += (P2M_MID_PER_PAGE - 1) * P2M_PER_PAGE;
pfn               297 arch/x86/xen/p2m.c 	unsigned long pfn;
pfn               302 arch/x86/xen/p2m.c 	for (pfn = xen_start_info->nr_pages; pfn < xen_p2m_size; pfn++)
pfn               303 arch/x86/xen/p2m.c 		xen_p2m_addr[pfn] = INVALID_P2M_ENTRY;
pfn               313 arch/x86/xen/p2m.c static int xen_p2m_elem_type(unsigned long pfn)
pfn               317 arch/x86/xen/p2m.c 	if (pfn >= xen_p2m_size)
pfn               320 arch/x86/xen/p2m.c 	mfn = xen_p2m_addr[pfn];
pfn               334 arch/x86/xen/p2m.c 	unsigned long pfn;
pfn               356 arch/x86/xen/p2m.c 	for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += chunk) {
pfn               367 arch/x86/xen/p2m.c 		chunk = (pfn & (P2M_PER_PAGE * P2M_MID_PER_PAGE - 1)) ?
pfn               370 arch/x86/xen/p2m.c 		type = xen_p2m_elem_type(pfn);
pfn               374 arch/x86/xen/p2m.c 				if (xen_p2m_elem_type(pfn + i) != type)
pfn               384 arch/x86/xen/p2m.c 			copy_page(mfns, xen_p2m_addr + pfn);
pfn               386 arch/x86/xen/p2m.c 			mfns = xen_p2m_addr + pfn;
pfn               388 arch/x86/xen/p2m.c 			ptep = populate_extra_pte((unsigned long)(p2m + pfn));
pfn               398 arch/x86/xen/p2m.c 			ptep = populate_extra_pte((unsigned long)(p2m + pfn));
pfn               409 arch/x86/xen/p2m.c 				(unsigned long)(p2m + pfn) + i * PMD_SIZE);
pfn               439 arch/x86/xen/p2m.c unsigned long get_phys_to_machine(unsigned long pfn)
pfn               444 arch/x86/xen/p2m.c 	if (unlikely(pfn >= xen_p2m_size)) {
pfn               445 arch/x86/xen/p2m.c 		if (pfn < xen_max_p2m_pfn)
pfn               446 arch/x86/xen/p2m.c 			return xen_chk_extra_mem(pfn);
pfn               448 arch/x86/xen/p2m.c 		return IDENTITY_FRAME(pfn);
pfn               451 arch/x86/xen/p2m.c 	ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn), &level);
pfn               460 arch/x86/xen/p2m.c 		return IDENTITY_FRAME(pfn);
pfn               462 arch/x86/xen/p2m.c 	return xen_p2m_addr[pfn];
pfn               535 arch/x86/xen/p2m.c int xen_alloc_p2m_entry(unsigned long pfn)
pfn               542 arch/x86/xen/p2m.c 	unsigned long addr = (unsigned long)(xen_p2m_addr + pfn);
pfn               556 arch/x86/xen/p2m.c 	if (p2m_top_mfn && pfn < MAX_P2M_PFN) {
pfn               557 arch/x86/xen/p2m.c 		topidx = p2m_top_index(pfn);
pfn               602 arch/x86/xen/p2m.c 			p2m_init_identity(p2m, pfn & ~(P2M_PER_PAGE - 1));
pfn               614 arch/x86/xen/p2m.c 				mid_mfn[p2m_mid_index(pfn)] = virt_to_mfn(p2m);
pfn               625 arch/x86/xen/p2m.c 	if (pfn > xen_p2m_last_pfn) {
pfn               626 arch/x86/xen/p2m.c 		xen_p2m_last_pfn = pfn;
pfn               637 arch/x86/xen/p2m.c 	unsigned long pfn;
pfn               648 arch/x86/xen/p2m.c 	for (pfn = pfn_s; pfn < pfn_e; pfn++)
pfn               649 arch/x86/xen/p2m.c 		xen_p2m_addr[pfn] = IDENTITY_FRAME(pfn);
pfn               651 arch/x86/xen/p2m.c 	return pfn - pfn_s;
pfn               654 arch/x86/xen/p2m.c bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
pfn               659 arch/x86/xen/p2m.c 	if (unlikely(pfn >= xen_p2m_size)) {
pfn               668 arch/x86/xen/p2m.c 	if (likely(!xen_safe_write_ulong(xen_p2m_addr + pfn, mfn)))
pfn               671 arch/x86/xen/p2m.c 	ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn), &level);
pfn               678 arch/x86/xen/p2m.c 		return mfn == IDENTITY_FRAME(pfn);
pfn               683 arch/x86/xen/p2m.c bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
pfn               685 arch/x86/xen/p2m.c 	if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
pfn               688 arch/x86/xen/p2m.c 		ret = xen_alloc_p2m_entry(pfn);
pfn               692 arch/x86/xen/p2m.c 		return __set_phys_to_machine(pfn, mfn);
pfn               716 arch/x86/xen/p2m.c 		unsigned long mfn, pfn;
pfn               729 arch/x86/xen/p2m.c 		pfn = page_to_pfn(pages[i]);
pfn               731 arch/x86/xen/p2m.c 		WARN(pfn_to_mfn(pfn) != INVALID_P2M_ENTRY, "page must be ballooned");
pfn               733 arch/x86/xen/p2m.c 		if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) {
pfn               755 arch/x86/xen/p2m.c 		unsigned long pfn = page_to_pfn(pages[i]);
pfn               762 arch/x86/xen/p2m.c 		set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
pfn               782 arch/x86/xen/p2m.c 	unsigned long pfn, first_pfn;
pfn               788 arch/x86/xen/p2m.c 	for (pfn = 0; pfn < xen_p2m_size; pfn++) {
pfn               789 arch/x86/xen/p2m.c 		type = xen_p2m_elem_type(pfn);
pfn               791 arch/x86/xen/p2m.c 			seq_printf(m, " [0x%lx->0x%lx] %s\n", first_pfn, pfn,
pfn               794 arch/x86/xen/p2m.c 			first_pfn = pfn;
pfn               797 arch/x86/xen/p2m.c 	seq_printf(m, " [0x%lx->0x%lx] %s\n", first_pfn, pfn,
pfn               524 arch/x86/xen/pmu.c 	unsigned long pfn;
pfn               537 arch/x86/xen/pmu.c 	pfn = virt_to_pfn(xenpmu_data);
pfn               539 arch/x86/xen/pmu.c 	xp.val = pfn_to_mfn(pfn);
pfn               164 arch/x86/xen/setup.c unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
pfn               169 arch/x86/xen/setup.c 		if (pfn >= xen_extra_mem[i].start_pfn &&
pfn               170 arch/x86/xen/setup.c 		    pfn < xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns)
pfn               174 arch/x86/xen/setup.c 	return IDENTITY_FRAME(pfn);
pfn               182 arch/x86/xen/setup.c 	unsigned long pfn, pfn_s, pfn_e;
pfn               190 arch/x86/xen/setup.c 		for (pfn = pfn_s; pfn < pfn_e; pfn++)
pfn               191 arch/x86/xen/setup.c 			set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
pfn               257 arch/x86/xen/setup.c 	unsigned long pfn, end;
pfn               264 arch/x86/xen/setup.c 	for (pfn = start_pfn; pfn < end; pfn++) {
pfn               265 arch/x86/xen/setup.c 		unsigned long mfn = pfn_to_mfn(pfn);
pfn               268 arch/x86/xen/setup.c 		if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
pfn               272 arch/x86/xen/setup.c 		WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
pfn               276 arch/x86/xen/setup.c 			if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
pfn               288 arch/x86/xen/setup.c static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
pfn               292 arch/x86/xen/setup.c 		.val = pfn
pfn               296 arch/x86/xen/setup.c 	if (!set_phys_to_machine(pfn, mfn)) {
pfn               298 arch/x86/xen/setup.c 		     pfn, mfn);
pfn               305 arch/x86/xen/setup.c 		     mfn, pfn);
pfn               310 arch/x86/xen/setup.c 	if (pfn >= PFN_UP(__pa(high_memory - 1)))
pfn               313 arch/x86/xen/setup.c 	if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
pfn               316 arch/x86/xen/setup.c 		      mfn, pfn);
pfn               391 arch/x86/xen/setup.c 	unsigned long pfn;
pfn               435 arch/x86/xen/setup.c 	for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
pfn               437 arch/x86/xen/setup.c 			(unsigned long)__va(pfn << PAGE_SHIFT),
pfn               438 arch/x86/xen/setup.c 			mfn_pte(pfn, PAGE_KERNEL_IO), 0);
pfn               502 arch/x86/xen/setup.c 	unsigned long mfn_save, pfn;
pfn               516 arch/x86/xen/setup.c 		pfn = xen_remap_buf.target_pfn;
pfn               518 arch/x86/xen/setup.c 			xen_update_mem_tables(pfn, xen_remap_buf.mfns[i]);
pfn               520 arch/x86/xen/setup.c 			pfn++;
pfn               522 arch/x86/xen/setup.c 		if (pfn_s == ~0UL || pfn == pfn_s) {
pfn                48 arch/x86/xen/xen-ops.h unsigned long __ref xen_chk_extra_mem(unsigned long pfn);
pfn               128 arch/xtensa/include/asm/cacheflush.h 		unsigned long address, unsigned long pfn);
pfn               143 arch/xtensa/include/asm/cacheflush.h #define flush_cache_page(vma, addr, pfn)		do { } while (0)
pfn               181 arch/xtensa/include/asm/page.h #define pfn_valid(pfn) \
pfn               182 arch/xtensa/include/asm/page.h 	((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
pfn               297 arch/xtensa/include/asm/pgtable.h #define pfn_pte(pfn, prot)	__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
pfn                29 arch/xtensa/kernel/pci-dma.c 	unsigned long pfn = PFN_DOWN(paddr);
pfn                30 arch/xtensa/kernel/pci-dma.c 	struct page *page = pfn_to_page(pfn);
pfn               557 arch/xtensa/kernel/smp.c 		     unsigned long address, unsigned long pfn)
pfn               562 arch/xtensa/kernel/smp.c 		.addr2 = pfn,
pfn               196 arch/xtensa/mm/cache.c 		      unsigned long pfn)
pfn               200 arch/xtensa/mm/cache.c 	unsigned long phys = page_to_phys(pfn_to_page(pfn));
pfn               213 arch/xtensa/mm/cache.c 	unsigned long pfn = pte_pfn(*ptep);
pfn               216 arch/xtensa/mm/cache.c 	if (!pfn_valid(pfn))
pfn               219 arch/xtensa/mm/cache.c 	page = pfn_to_page(pfn);
pfn                83 arch/xtensa/mm/init.c static void __init free_area_high(unsigned long pfn, unsigned long end)
pfn                85 arch/xtensa/mm/init.c 	for (; pfn < end; pfn++)
pfn                86 arch/xtensa/mm/init.c 		free_highmem_page(pfn_to_page(pfn));
pfn                18 arch/xtensa/mm/ioremap.c 	unsigned long pfn = __phys_to_pfn(paddr);
pfn                25 arch/xtensa/mm/ioremap.c 	WARN_ON(pfn_valid(pfn));
pfn               133 drivers/acpi/apei/ghes.c static void __iomem *ghes_map(u64 pfn, enum fixed_addresses fixmap_idx)
pfn               138 drivers/acpi/apei/ghes.c 	paddr = PFN_PHYS(pfn);
pfn               420 drivers/acpi/apei/ghes.c 	unsigned long pfn;
pfn               428 drivers/acpi/apei/ghes.c 	pfn = mem_err->physical_addr >> PAGE_SHIFT;
pfn               429 drivers/acpi/apei/ghes.c 	if (!pfn_valid(pfn)) {
pfn               444 drivers/acpi/apei/ghes.c 		memory_failure_queue(pfn, flags);
pfn               276 drivers/acpi/osl.c #define should_use_kmap(pfn)   0
pfn               278 drivers/acpi/osl.c #define should_use_kmap(pfn)   page_is_ram(pfn)
pfn               283 drivers/acpi/osl.c 	unsigned long pfn;
pfn               285 drivers/acpi/osl.c 	pfn = pg_off >> PAGE_SHIFT;
pfn               286 drivers/acpi/osl.c 	if (should_use_kmap(pfn)) {
pfn               289 drivers/acpi/osl.c 		return (void __iomem __force *)kmap(pfn_to_page(pfn));
pfn               296 drivers/acpi/osl.c 	unsigned long pfn;
pfn               298 drivers/acpi/osl.c 	pfn = pg_off >> PAGE_SHIFT;
pfn               299 drivers/acpi/osl.c 	if (should_use_kmap(pfn))
pfn               300 drivers/acpi/osl.c 		kunmap(pfn_to_page(pfn));
pfn                42 drivers/base/memory.c static inline unsigned long pfn_to_block_id(unsigned long pfn)
pfn                44 drivers/base/memory.c 	return base_memory_block_id(pfn_to_section_nr(pfn));
pfn               178 drivers/base/memory.c 	unsigned long pfn = start_pfn;
pfn               186 drivers/base/memory.c 		if (WARN_ON_ONCE(!pfn_valid(pfn)))
pfn               191 drivers/base/memory.c 				section_nr, pfn, pfn + PAGES_PER_SECTION);
pfn               195 drivers/base/memory.c 				section_nr, pfn, pfn + PAGES_PER_SECTION);
pfn               199 drivers/base/memory.c 				section_nr, pfn, pfn + PAGES_PER_SECTION);
pfn               202 drivers/base/memory.c 		pfn += PAGES_PER_SECTION;
pfn               518 drivers/base/memory.c 	u64 pfn;
pfn               521 drivers/base/memory.c 	if (kstrtoull(buf, 0, &pfn) < 0)
pfn               523 drivers/base/memory.c 	pfn >>= PAGE_SHIFT;
pfn               524 drivers/base/memory.c 	if (!pfn_valid(pfn))
pfn               527 drivers/base/memory.c 	if (!pfn_to_online_page(pfn))
pfn               529 drivers/base/memory.c 	ret = soft_offline_page(pfn_to_page(pfn), 0);
pfn               539 drivers/base/memory.c 	u64 pfn;
pfn               542 drivers/base/memory.c 	if (kstrtoull(buf, 0, &pfn) < 0)
pfn               544 drivers/base/memory.c 	pfn >>= PAGE_SHIFT;
pfn               545 drivers/base/memory.c 	ret = memory_failure(pfn, 0);
pfn               750 drivers/base/node.c static int __ref get_nid_for_pfn(unsigned long pfn)
pfn               752 drivers/base/node.c 	if (!pfn_valid_within(pfn))
pfn               756 drivers/base/node.c 		return early_pfn_to_nid(pfn);
pfn               758 drivers/base/node.c 	return pfn_to_nid(pfn);
pfn               769 drivers/base/node.c 	unsigned long pfn;
pfn               771 drivers/base/node.c 	for (pfn = start_pfn; pfn <= end_pfn; pfn++) {
pfn               778 drivers/base/node.c 		if (!pfn_present(pfn)) {
pfn               779 drivers/base/node.c 			pfn = round_down(pfn + PAGES_PER_SECTION,
pfn               790 drivers/base/node.c 			page_nid = get_nid_for_pfn(pfn);
pfn                57 drivers/char/mem.c static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
pfn                64 drivers/char/mem.c static inline int page_is_allowed(unsigned long pfn)
pfn                66 drivers/char/mem.c 	return devmem_is_allowed(pfn);
pfn                68 drivers/char/mem.c static inline int range_is_allowed(unsigned long pfn, unsigned long size)
pfn                70 drivers/char/mem.c 	u64 from = ((u64)pfn) << PAGE_SHIFT;
pfn                75 drivers/char/mem.c 		if (!devmem_is_allowed(pfn))
pfn                78 drivers/char/mem.c 		pfn++;
pfn                83 drivers/char/mem.c static inline int page_is_allowed(unsigned long pfn)
pfn                87 drivers/char/mem.c static inline int range_is_allowed(unsigned long pfn, unsigned long size)
pfn               272 drivers/char/mem.c 	unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
pfn               313 drivers/char/mem.c static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
pfn               317 drivers/char/mem.c 	phys_addr_t offset = pfn << PAGE_SHIFT;
pfn               414 drivers/char/mem.c 	unsigned long pfn;
pfn               417 drivers/char/mem.c 	pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
pfn               426 drivers/char/mem.c 	if (!pfn_valid(pfn))
pfn               429 drivers/char/mem.c 	vma->vm_pgoff = pfn;
pfn               141 drivers/char/mspec.c 	unsigned long pfn;
pfn               163 drivers/char/mspec.c 	pfn = paddr >> PAGE_SHIFT;
pfn               165 drivers/char/mspec.c 	return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
pfn                79 drivers/dax/device.c 				struct vm_fault *vmf, pfn_t *pfn)
pfn               105 drivers/dax/device.c 	*pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
pfn               107 drivers/dax/device.c 	return vmf_insert_mixed(vmf->vma, vmf->address, *pfn);
pfn               111 drivers/dax/device.c 				struct vm_fault *vmf, pfn_t *pfn)
pfn               153 drivers/dax/device.c 	*pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
pfn               155 drivers/dax/device.c 	return vmf_insert_pfn_pmd(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
pfn               160 drivers/dax/device.c 				struct vm_fault *vmf, pfn_t *pfn)
pfn               203 drivers/dax/device.c 	*pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
pfn               205 drivers/dax/device.c 	return vmf_insert_pfn_pud(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
pfn               209 drivers/dax/device.c 				struct vm_fault *vmf, pfn_t *pfn)
pfn               222 drivers/dax/device.c 	pfn_t pfn;
pfn               233 drivers/dax/device.c 		rc = __dev_dax_pte_fault(dev_dax, vmf, &pfn);
pfn               237 drivers/dax/device.c 		rc = __dev_dax_pmd_fault(dev_dax, vmf, &pfn);
pfn               241 drivers/dax/device.c 		rc = __dev_dax_pud_fault(dev_dax, vmf, &pfn);
pfn               262 drivers/dax/device.c 			page = pfn_to_page(pfn_t_to_pfn(pfn) + i);
pfn                77 drivers/dax/super.c 	pfn_t pfn, end_pfn;
pfn               104 drivers/dax/super.c 	len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
pfn               114 drivers/dax/super.c 	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED) && pfn_t_special(pfn)) {
pfn               125 drivers/dax/super.c 	} else if (pfn_t_devmap(pfn) && pfn_t_devmap(end_pfn)) {
pfn               128 drivers/dax/super.c 		pgmap = get_dev_pagemap(pfn_t_to_pfn(pfn), NULL);
pfn               131 drivers/dax/super.c 				&& pfn_t_to_page(pfn)->pgmap == pgmap
pfn               133 drivers/dax/super.c 				&& pfn_t_to_pfn(pfn) == PHYS_PFN(__pa(kaddr))
pfn               297 drivers/dax/super.c 		void **kaddr, pfn_t *pfn)
pfn               311 drivers/dax/super.c 			kaddr, pfn);
pfn                38 drivers/edac/cell_edac.c 	unsigned long			address, pfn, offset, syndrome;
pfn                47 drivers/edac/cell_edac.c 	pfn = address >> PAGE_SHIFT;
pfn                53 drivers/edac/cell_edac.c 			     csrow->first_page + pfn, offset, syndrome,
pfn                61 drivers/edac/cell_edac.c 	unsigned long			address, pfn, offset;
pfn                70 drivers/edac/cell_edac.c 	pfn = address >> PAGE_SHIFT;
pfn                75 drivers/edac/cell_edac.c 			     csrow->first_page + pfn, offset, 0,
pfn               436 drivers/edac/cpc925_edac.c 		unsigned long *pfn, unsigned long *offset, int *csrow)
pfn               499 drivers/edac/cpc925_edac.c 	*pfn = pa >> PAGE_SHIFT;
pfn               525 drivers/edac/cpc925_edac.c 	unsigned long pfn = 0, offset = 0;
pfn               539 drivers/edac/cpc925_edac.c 	cpc925_mc_get_pfn(mci, mear, &pfn, &offset, &csrow);
pfn               545 drivers/edac/cpc925_edac.c 				     pfn, offset, syndrome,
pfn               553 drivers/edac/cpc925_edac.c 				     pfn, offset, 0,
pfn               285 drivers/edac/fsl_ddr_edac.c 	u32 pfn;
pfn               318 drivers/edac/fsl_ddr_edac.c 	pfn = err_addr >> PAGE_SHIFT;
pfn               322 drivers/edac/fsl_ddr_edac.c 		if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page))
pfn               355 drivers/edac/fsl_ddr_edac.c 	fsl_mc_printk(mci, KERN_ERR, "PFN: %#8.8x\n", pfn);
pfn               363 drivers/edac/fsl_ddr_edac.c 				     pfn, err_addr & ~PAGE_MASK, syndrome,
pfn               369 drivers/edac/fsl_ddr_edac.c 				     pfn, err_addr & ~PAGE_MASK, syndrome,
pfn               235 drivers/edac/i3000_edac.c 	unsigned long pfn, offset;
pfn               252 drivers/edac/i3000_edac.c 	pfn = deap_pfn(info->edeap, info->deap);
pfn               256 drivers/edac/i3000_edac.c 	row = edac_mc_find_csrow_by_page(mci, pfn);
pfn               260 drivers/edac/i3000_edac.c 				     pfn, offset, 0,
pfn               265 drivers/edac/i3000_edac.c 				     pfn, offset, info->derrsyn,
pfn              2325 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		unsigned long pfn;
pfn              2337 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		pfn = addr >> PAGE_SHIFT;
pfn              2338 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		if (!pfn_valid(pfn))
pfn              2341 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		p = pfn_to_page(pfn);
pfn              2380 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		unsigned long pfn;
pfn              2388 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		pfn = addr >> PAGE_SHIFT;
pfn              2389 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		if (!pfn_valid(pfn))
pfn              2392 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		p = pfn_to_page(pfn);
pfn               348 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 	uint64_t pfn;
pfn               368 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 	cursor->pfn = start;
pfn               397 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 	idx = (cursor->pfn >> shift) & mask;
pfn               429 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 	cursor->pfn += 1ULL << shift;
pfn               430 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 	cursor->pfn &= ~((1ULL << shift) - 1);
pfn               474 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 			cursor->pfn = ~0ll;
pfn              1395 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 	while (cursor.pfn < end) {
pfn              1445 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 		pe_start = ((cursor.pfn >> shift) & mask) * 8;
pfn              1447 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 		entry_end += cursor.pfn & ~(entry_end - 1);
pfn              1473 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 			while (cursor.pfn < frag_start) {
pfn              1567 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 	uint64_t pfn, start = mapping->start;
pfn              1602 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 	pfn = mapping->offset >> PAGE_SHIFT;
pfn              1604 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 		while (pfn >= nodes->size) {
pfn              1605 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 			pfn -= nodes->size;
pfn              1617 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 			max_entries = (nodes->size - pfn) *
pfn              1630 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 				uint64_t idx = pfn + count;
pfn              1638 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 				addr = pfn << PAGE_SHIFT;
pfn              1641 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 				addr = pages_addr[pfn];
pfn              1647 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 			addr += pfn << PAGE_SHIFT;
pfn              1657 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 		pfn += (last - start + 1) / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
pfn              1658 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 		if (nodes && nodes->size == pfn) {
pfn              1659 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 			pfn = 0;
pfn               784 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	unsigned long pfn;
pfn               802 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	pfn = __pa(page->kernel_address);
pfn               803 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	pfn >>= PAGE_SHIFT;
pfn               811 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	pr_debug("     pfn                 == 0x%016lX\n", pfn);
pfn               819 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	ret = remap_pfn_range(vma, vma->vm_start, pfn,
pfn                22 drivers/gpu/drm/armada/armada_gem.c 	unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
pfn                24 drivers/gpu/drm/armada/armada_gem.c 	pfn += (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT;
pfn                25 drivers/gpu/drm/armada/armada_gem.c 	return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
pfn               389 drivers/gpu/drm/exynos/exynos_drm_gem.c 	unsigned long pfn;
pfn               399 drivers/gpu/drm/exynos/exynos_drm_gem.c 	pfn = page_to_pfn(exynos_gem->pages[page_offset]);
pfn               401 drivers/gpu/drm/exynos/exynos_drm_gem.c 			__pfn_to_pfn_t(pfn, PFN_DEV));
pfn               110 drivers/gpu/drm/gma500/framebuffer.c 	unsigned long pfn;
pfn               120 drivers/gpu/drm/gma500/framebuffer.c 		pfn = (phys_addr >> PAGE_SHIFT);
pfn               123 drivers/gpu/drm/gma500/framebuffer.c 				__pfn_to_pfn_t(pfn, PFN_DEV));
pfn               133 drivers/gpu/drm/gma500/gem.c 	unsigned long pfn;
pfn               166 drivers/gpu/drm/gma500/gem.c 		pfn = (dev_priv->stolen_base + r->offset) >> PAGE_SHIFT;
pfn               168 drivers/gpu/drm/gma500/gem.c 		pfn = page_to_pfn(r->pages[page_offset]);
pfn               169 drivers/gpu/drm/gma500/gem.c 	ret = vmf_insert_pfn(vma, vmf->address, pfn);
pfn                29 drivers/gpu/drm/gma500/gtt.c static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
pfn                35 drivers/gpu/drm/gma500/gtt.c 	BUG_ON(pfn & ~(0xFFFFFFFF >> PAGE_SHIFT));
pfn                44 drivers/gpu/drm/gma500/gtt.c 	return (pfn << PAGE_SHIFT) | mask;
pfn               151 drivers/gpu/drm/gma500/mmu.c static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
pfn               162 drivers/gpu/drm/gma500/mmu.c 	return (pfn << PAGE_SHIFT) | mask;
pfn               764 drivers/gpu/drm/gma500/mmu.c 			   unsigned long *pfn)
pfn               788 drivers/gpu/drm/gma500/mmu.c 		*pfn = pd->invalid_pte >> PAGE_SHIFT;
pfn               796 drivers/gpu/drm/gma500/mmu.c 		*pfn = tmp >> PAGE_SHIFT;
pfn                75 drivers/gpu/drm/gma500/mmu.h 				  unsigned long *pfn);
pfn               364 drivers/gpu/drm/i915/gvt/gtt.c 	unsigned long pfn;
pfn               367 drivers/gpu/drm/i915/gvt/gtt.c 		pfn = (e->val64 & ADDR_1G_MASK) >> PAGE_SHIFT;
pfn               369 drivers/gpu/drm/i915/gvt/gtt.c 		pfn = (e->val64 & ADDR_2M_MASK) >> PAGE_SHIFT;
pfn               371 drivers/gpu/drm/i915/gvt/gtt.c 		pfn = (e->val64 & ADDR_64K_MASK) >> PAGE_SHIFT;
pfn               373 drivers/gpu/drm/i915/gvt/gtt.c 		pfn = (e->val64 & ADDR_4K_MASK) >> PAGE_SHIFT;
pfn               374 drivers/gpu/drm/i915/gvt/gtt.c 	return pfn;
pfn               377 drivers/gpu/drm/i915/gvt/gtt.c static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
pfn               381 drivers/gpu/drm/i915/gvt/gtt.c 		pfn &= (ADDR_1G_MASK >> PAGE_SHIFT);
pfn               384 drivers/gpu/drm/i915/gvt/gtt.c 		pfn &= (ADDR_2M_MASK >> PAGE_SHIFT);
pfn               387 drivers/gpu/drm/i915/gvt/gtt.c 		pfn &= (ADDR_64K_MASK >> PAGE_SHIFT);
pfn               390 drivers/gpu/drm/i915/gvt/gtt.c 		pfn &= (ADDR_4K_MASK >> PAGE_SHIFT);
pfn               393 drivers/gpu/drm/i915/gvt/gtt.c 	e->val64 |= (pfn << PAGE_SHIFT);
pfn               979 drivers/gpu/drm/i915/gvt/gtt.c 	unsigned long pfn;
pfn               982 drivers/gpu/drm/i915/gvt/gtt.c 	pfn = ops->get_pfn(entry);
pfn               986 drivers/gpu/drm/i915/gvt/gtt.c 	if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn)
pfn               989 drivers/gpu/drm/i915/gvt/gtt.c 	intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
pfn              1154 drivers/gpu/drm/i915/gvt/gtt.c 	unsigned long pfn;
pfn              1159 drivers/gpu/drm/i915/gvt/gtt.c 	pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry));
pfn              1160 drivers/gpu/drm/i915/gvt/gtt.c 	if (pfn == INTEL_GVT_INVALID_ADDR)
pfn              1163 drivers/gpu/drm/i915/gvt/gtt.c 	return PageTransHuge(pfn_to_page(pfn));
pfn              2198 drivers/gpu/drm/i915/gvt/gtt.c 	unsigned long pfn;
pfn              2200 drivers/gpu/drm/i915/gvt/gtt.c 	pfn = pte_ops->get_pfn(entry);
pfn              2201 drivers/gpu/drm/i915/gvt/gtt.c 	if (pfn != vgpu->gvt->gtt.scratch_mfn)
pfn              2203 drivers/gpu/drm/i915/gvt/gtt.c 						pfn << PAGE_SHIFT);
pfn                71 drivers/gpu/drm/i915/gvt/gtt.h 	void (*set_pfn)(struct intel_gvt_gtt_entry *e, unsigned long pfn);
pfn               153 drivers/gpu/drm/i915/gvt/kvmgt.c 		unsigned long pfn;
pfn               156 drivers/gpu/drm/i915/gvt/kvmgt.c 				     IOMMU_READ | IOMMU_WRITE, &pfn);
pfn               163 drivers/gpu/drm/i915/gvt/kvmgt.c 		if (!pfn_valid(pfn)) {
pfn               164 drivers/gpu/drm/i915/gvt/kvmgt.c 			gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn);
pfn               171 drivers/gpu/drm/i915/gvt/kvmgt.c 			base_pfn = pfn;
pfn               172 drivers/gpu/drm/i915/gvt/kvmgt.c 		else if (base_pfn + npage != pfn) {
pfn              1870 drivers/gpu/drm/i915/gvt/kvmgt.c 	kvm_pfn_t pfn;
pfn              1877 drivers/gpu/drm/i915/gvt/kvmgt.c 	pfn = gfn_to_pfn(info->kvm, gfn);
pfn              1878 drivers/gpu/drm/i915/gvt/kvmgt.c 	if (is_error_noslot_pfn(pfn))
pfn              1881 drivers/gpu/drm/i915/gvt/kvmgt.c 	return pfn;
pfn              2487 drivers/gpu/drm/i915/i915_drv.h 		     unsigned long addr, unsigned long pfn, unsigned long size,
pfn                34 drivers/gpu/drm/i915/i915_mm.c 	unsigned long pfn;
pfn                43 drivers/gpu/drm/i915/i915_mm.c 	set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot)));
pfn                44 drivers/gpu/drm/i915/i915_mm.c 	r->pfn++;
pfn                60 drivers/gpu/drm/i915/i915_mm.c 		     unsigned long addr, unsigned long pfn, unsigned long size,
pfn                71 drivers/gpu/drm/i915/i915_mm.c 	r.pfn = pfn;
pfn                77 drivers/gpu/drm/i915/i915_mm.c 		zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT);
pfn                22 drivers/gpu/drm/i915/i915_scatterlist.h 		unsigned long pfn;
pfn                36 drivers/gpu/drm/i915/i915_scatterlist.h 			s.pfn = page_to_pfn(sg_page(s.sgp));
pfn                90 drivers/gpu/drm/i915/i915_scatterlist.h 	     ((__pp) = (__iter).pfn == 0 ? NULL :			\
pfn                91 drivers/gpu/drm/i915/i915_scatterlist.h 	      pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \
pfn                48 drivers/gpu/drm/i915/selftests/scatterlist.c 	unsigned long pfn, n;
pfn                50 drivers/gpu/drm/i915/selftests/scatterlist.c 	pfn = pt->start;
pfn                55 drivers/gpu/drm/i915/selftests/scatterlist.c 		if (page_to_pfn(page) != pfn) {
pfn                57 drivers/gpu/drm/i915/selftests/scatterlist.c 			       __func__, who, pfn, page_to_pfn(page));
pfn                70 drivers/gpu/drm/i915/selftests/scatterlist.c 		pfn += npages;
pfn                72 drivers/gpu/drm/i915/selftests/scatterlist.c 	if (pfn != pt->end) {
pfn                74 drivers/gpu/drm/i915/selftests/scatterlist.c 		       __func__, who, pt->end, pfn);
pfn                86 drivers/gpu/drm/i915/selftests/scatterlist.c 	unsigned long pfn;
pfn                88 drivers/gpu/drm/i915/selftests/scatterlist.c 	pfn = pt->start;
pfn                92 drivers/gpu/drm/i915/selftests/scatterlist.c 		if (page != pfn_to_page(pfn)) {
pfn                94 drivers/gpu/drm/i915/selftests/scatterlist.c 			       __func__, who, pfn, page_to_pfn(page));
pfn               101 drivers/gpu/drm/i915/selftests/scatterlist.c 		pfn++;
pfn               103 drivers/gpu/drm/i915/selftests/scatterlist.c 	if (pfn != pt->end) {
pfn               105 drivers/gpu/drm/i915/selftests/scatterlist.c 		       __func__, who, pt->end, pfn);
pfn               118 drivers/gpu/drm/i915/selftests/scatterlist.c 	unsigned long pfn;
pfn               120 drivers/gpu/drm/i915/selftests/scatterlist.c 	pfn = pt->start;
pfn               122 drivers/gpu/drm/i915/selftests/scatterlist.c 		if (page != pfn_to_page(pfn)) {
pfn               124 drivers/gpu/drm/i915/selftests/scatterlist.c 			       __func__, who, pfn, page_to_pfn(page));
pfn               131 drivers/gpu/drm/i915/selftests/scatterlist.c 		pfn++;
pfn               133 drivers/gpu/drm/i915/selftests/scatterlist.c 	if (pfn != pt->end) {
pfn               135 drivers/gpu/drm/i915/selftests/scatterlist.c 		       __func__, who, pt->end, pfn);
pfn               221 drivers/gpu/drm/i915/selftests/scatterlist.c 	unsigned long n, pfn;
pfn               235 drivers/gpu/drm/i915/selftests/scatterlist.c 	pfn = pt->start;
pfn               241 drivers/gpu/drm/i915/selftests/scatterlist.c 		if (!page_contiguous(pfn_to_page(pfn),
pfn               242 drivers/gpu/drm/i915/selftests/scatterlist.c 				     pfn_to_page(pfn + npages),
pfn               250 drivers/gpu/drm/i915/selftests/scatterlist.c 		sg_set_page(sg, pfn_to_page(pfn), npages * PAGE_SIZE, 0);
pfn               252 drivers/gpu/drm/i915/selftests/scatterlist.c 		GEM_BUG_ON(page_to_pfn(sg_page(sg)) != pfn);
pfn               256 drivers/gpu/drm/i915/selftests/scatterlist.c 		pfn += npages;
pfn               260 drivers/gpu/drm/i915/selftests/scatterlist.c 	pt->end = pfn;
pfn                96 drivers/gpu/drm/lima/lima_gem.c 	pfn_t pfn;
pfn               101 drivers/gpu/drm/lima/lima_gem.c 	pfn = __pfn_to_pfn_t(page_to_pfn(bo->pages[pgoff]), PFN_DEV);
pfn               103 drivers/gpu/drm/lima/lima_gem.c 	return vmf_insert_mixed(vma, vmf->address, pfn);
pfn               256 drivers/gpu/drm/msm/msm_gem.c 	unsigned long pfn;
pfn               286 drivers/gpu/drm/msm/msm_gem.c 	pfn = page_to_pfn(pages[pgoff]);
pfn               289 drivers/gpu/drm/msm/msm_gem.c 			pfn, pfn << PAGE_SHIFT);
pfn               291 drivers/gpu/drm/msm/msm_gem.c 	ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
pfn                69 drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h 	u64 *pfn;
pfn               258 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_unref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
pfn               265 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	if (pfn) {
pfn               364 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_ref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
pfn               394 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_sparse_unref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
pfn               402 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	return nvkm_vmm_unref_ptes(it, pfn, ptei, ptes);
pfn               406 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_sparse_ref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
pfn               409 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	return nvkm_vmm_ref_ptes(it, pfn, ptei, ptes);
pfn               502 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	      u64 addr, u64 size, const char *name, bool ref, bool pfn,
pfn               503 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	      bool (*REF_PTES)(struct nvkm_vmm_iter *, bool pfn, u32, u32),
pfn               563 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		if (!REF_PTES || REF_PTES(&it, pfn, ptei, ptes)) {
pfn               681 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			u64 addr, u64 size, bool sparse, bool pfn)
pfn               685 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		      false, pfn, nvkm_vmm_unref_ptes, NULL, NULL,
pfn               707 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		    u64 addr, u64 size, bool sparse, bool pfn)
pfn               710 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	nvkm_vmm_iter(vmm, page, addr, size, "unmap", false, pfn,
pfn              1211 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_pfn_map(struct nvkm_vmm *vmm, u8 shift, u64 addr, u64 size, u64 *pfn)
pfn              1224 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	       page->desc->func->pfn == NULL)
pfn              1239 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		bool map = !!(pfn[pi] & NVKM_VMM_PFN_V);
pfn              1249 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			if (map != !!(pfn[pi + pn] & NVKM_VMM_PFN_V))
pfn              1294 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			args.pfn = &pfn[pi];
pfn              1299 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 							    desc->func->pfn);
pfn              1302 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 						  page->desc->func->pfn);
pfn              1322 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 				pfn[pi++] = NVKM_VMM_PFN_NONE;
pfn              1351 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_unmap_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma, bool pfn)
pfn              1356 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		nvkm_vmm_ptes_unmap_put(vmm, page, vma->addr, vma->size, vma->sparse, pfn);
pfn              1359 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		nvkm_vmm_ptes_unmap(vmm, page, vma->addr, vma->size, vma->sparse, pfn);
pfn              1806 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_boot_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
pfn                71 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h 	nvkm_vmm_pte_func pfn;
pfn               176 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h void nvkm_vmm_unmap_locked(struct nvkm_vmm *, struct nvkm_vma *, bool pfn);
pfn               188 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h int nvkm_vmm_pfn_map(struct nvkm_vmm *, u8 page, u64 addr, u64 size, u64 *pfn);
pfn                84 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c 		if (!(*map->pfn & NVKM_VMM_PFN_W))
pfn                87 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c 		if (!(*map->pfn & NVKM_VMM_PFN_VRAM)) {
pfn                88 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c 			addr = *map->pfn >> NVKM_VMM_PFN_ADDR_SHIFT;
pfn                98 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c 			data |= (*map->pfn & NVKM_VMM_PFN_ADDR) >> 4;
pfn               103 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c 		map->pfn++;
pfn               170 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c 	.pfn = gp100_vmm_pgt_pfn,
pfn               350 drivers/gpu/drm/omapdrm/omap_gem.c 	unsigned long pfn;
pfn               358 drivers/gpu/drm/omapdrm/omap_gem.c 		pfn = page_to_pfn(omap_obj->pages[pgoff]);
pfn               361 drivers/gpu/drm/omapdrm/omap_gem.c 		pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff;
pfn               365 drivers/gpu/drm/omapdrm/omap_gem.c 			pfn, pfn << PAGE_SHIFT);
pfn               368 drivers/gpu/drm/omapdrm/omap_gem.c 			__pfn_to_pfn_t(pfn, PFN_DEV));
pfn               380 drivers/gpu/drm/omapdrm/omap_gem.c 	unsigned long pfn;
pfn               457 drivers/gpu/drm/omapdrm/omap_gem.c 	pfn = entry->dma_addr >> PAGE_SHIFT;
pfn               460 drivers/gpu/drm/omapdrm/omap_gem.c 			pfn, pfn << PAGE_SHIFT);
pfn               464 drivers/gpu/drm/omapdrm/omap_gem.c 			vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
pfn               467 drivers/gpu/drm/omapdrm/omap_gem.c 		pfn += priv->usergart[fmt].stride_pfn;
pfn               117 drivers/gpu/drm/ttm/ttm_bo_vm.c 	unsigned long pfn;
pfn               261 drivers/gpu/drm/ttm/ttm_bo_vm.c 			pfn = ttm_bo_io_mem_pfn(bo, page_offset);
pfn               272 drivers/gpu/drm/ttm/ttm_bo_vm.c 			pfn = page_to_pfn(page);
pfn               277 drivers/gpu/drm/ttm/ttm_bo_vm.c 					__pfn_to_pfn_t(pfn, PFN_DEV));
pfn               279 drivers/gpu/drm/ttm/ttm_bo_vm.c 			ret = vmf_insert_pfn(&cvma, address, pfn);
pfn               389 drivers/hv/channel.c 				gpadl_body->pfn[i] = virt_to_hvpfn(
pfn               823 drivers/hv/channel.c 		desc.range[i].pfn	 = pagebuffers[i].pfn;
pfn               573 drivers/hv/hv_balloon.c 				     unsigned long pfn)
pfn               578 drivers/hv/hv_balloon.c 	if ((pfn < has->covered_start_pfn) || (pfn >= has->covered_end_pfn))
pfn               583 drivers/hv/hv_balloon.c 		if ((pfn >= gap->start_pfn) && (pfn < gap->end_pfn))
pfn               593 drivers/hv/hv_balloon.c 	unsigned long pfn = start_pfn, count = 0;
pfn               597 drivers/hv/hv_balloon.c 	while (pfn < start_pfn + nr_pages) {
pfn               604 drivers/hv/hv_balloon.c 			while ((pfn >= has->start_pfn) &&
pfn               605 drivers/hv/hv_balloon.c 			       (pfn < has->end_pfn) &&
pfn               606 drivers/hv/hv_balloon.c 			       (pfn < start_pfn + nr_pages)) {
pfn               608 drivers/hv/hv_balloon.c 				if (has_pfn_is_backed(has, pfn))
pfn               610 drivers/hv/hv_balloon.c 				pfn++;
pfn               620 drivers/hv/hv_balloon.c 			pfn++;
pfn               773 drivers/hv/hv_balloon.c 	unsigned long pfn = page_to_pfn(pg);
pfn               778 drivers/hv/hv_balloon.c 		if ((pfn < has->start_pfn) ||
pfn               779 drivers/hv/hv_balloon.c 				(pfn + (1UL << order) > has->end_pfn))
pfn               782 drivers/hv/hv_balloon.c 		hv_bring_pgs_online(has, pfn, 1UL << order);
pfn               938 drivers/infiniband/core/uverbs_main.c 		      unsigned long pfn, unsigned long size, pgprot_t prot)
pfn               960 drivers/infiniband/core/uverbs_main.c 	if (io_remap_pfn_range(vma, vma->vm_start, pfn, size, prot)) {
pfn              3692 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	u64 pfn;
pfn              3705 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
pfn              3707 drivers/infiniband/hw/bnxt_re/ib_verbs.c 				    pfn, PAGE_SIZE, vma->vm_page_prot)) {
pfn              1597 drivers/infiniband/hw/efa/efa_verbs.c 	u64 pfn;
pfn              1611 drivers/infiniband/hw/efa/efa_verbs.c 	pfn = entry->address >> PAGE_SHIFT;
pfn              1614 drivers/infiniband/hw/efa/efa_verbs.c 		err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, length,
pfn              1618 drivers/infiniband/hw/efa/efa_verbs.c 		err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, length,
pfn              1623 drivers/infiniband/hw/efa/efa_verbs.c 		     va += PAGE_SIZE, pfn++) {
pfn              1624 drivers/infiniband/hw/efa/efa_verbs.c 			err = vm_insert_page(vma, va, pfn_to_page(pfn));
pfn               591 drivers/infiniband/hw/hfi1/user_exp_rcv.c 	unsigned long pfn, this_pfn;
pfn               603 drivers/infiniband/hw/hfi1/user_exp_rcv.c 	pfn = page_to_pfn(pages[0]);
pfn               611 drivers/infiniband/hw/hfi1/user_exp_rcv.c 		if (this_pfn != ++pfn) {
pfn               645 drivers/infiniband/hw/hfi1/user_exp_rcv.c 			pfn = this_pfn;
pfn               277 drivers/infiniband/hw/hns/hns_roce_device.h 	u64		pfn;
pfn               360 drivers/infiniband/hw/hns/hns_roce_main.c 					 to_hr_ucontext(context)->uar.pfn,
pfn               114 drivers/infiniband/hw/hns/hns_roce_pd.c 		uar->pfn = ((res->start) >> PAGE_SHIFT) + uar->index;
pfn               116 drivers/infiniband/hw/hns/hns_roce_pd.c 		uar->pfn = ((pci_resource_start(hr_dev->pci_dev, 2))
pfn              1159 drivers/infiniband/hw/mlx4/main.c 					 to_mucontext(context)->uar.pfn,
pfn              1168 drivers/infiniband/hw/mlx4/main.c 			to_mucontext(context)->uar.pfn +
pfn              2656 drivers/infiniband/hw/mlx4/main.c 	ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
pfn              2087 drivers/infiniband/hw/mlx5/main.c 	phys_addr_t pfn;
pfn              2164 drivers/infiniband/hw/mlx5/main.c 	pfn = uar_index2pfn(dev, uar_index);
pfn              2165 drivers/infiniband/hw/mlx5/main.c 	mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
pfn              2167 drivers/infiniband/hw/mlx5/main.c 	err = rdma_user_mmap_io(&context->ibucontext, vma, pfn, PAGE_SIZE,
pfn              2199 drivers/infiniband/hw/mlx5/main.c 	phys_addr_t pfn;
pfn              2205 drivers/infiniband/hw/mlx5/main.c 	pfn = ((dev->mdev->bar_addr +
pfn              2209 drivers/infiniband/hw/mlx5/main.c 	return rdma_user_mmap_io(context, vma, pfn, map_size,
pfn              2218 drivers/infiniband/hw/mlx5/main.c 	phys_addr_t pfn;
pfn              2243 drivers/infiniband/hw/mlx5/main.c 		pfn = (dev->mdev->iseg_base +
pfn              2246 drivers/infiniband/hw/mlx5/main.c 		return rdma_user_mmap_io(&context->ibucontext, vma, pfn,
pfn                54 drivers/infiniband/hw/mlx5/mem.c 	u64 len, pfn;
pfn                67 drivers/infiniband/hw/mlx5/mem.c 		pfn = sg_dma_address(sg) >> PAGE_SHIFT;
pfn                68 drivers/infiniband/hw/mlx5/mem.c 		if (base + p != pfn) {
pfn                72 drivers/infiniband/hw/mlx5/mem.c 			tmp = (unsigned long)(pfn | p);
pfn                76 drivers/infiniband/hw/mlx5/mem.c 			base = pfn;
pfn               714 drivers/infiniband/hw/mthca/mthca_main.c 	dev->kar = ioremap((phys_addr_t) dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
pfn               359 drivers/infiniband/hw/mthca/mthca_provider.c 			       to_mucontext(context)->uar.pfn,
pfn                58 drivers/infiniband/hw/mthca/mthca_provider.h 	unsigned long pfn;
pfn                44 drivers/infiniband/hw/mthca/mthca_uar.c 	uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index;
pfn               712 drivers/infiniband/hw/qib/qib_file_ops.c 	unsigned long pfn;
pfn               739 drivers/infiniband/hw/qib/qib_file_ops.c 	pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT;
pfn               740 drivers/infiniband/hw/qib/qib_file_ops.c 	ret = remap_pfn_range(vma, vma->vm_start, pfn,
pfn               745 drivers/infiniband/hw/qib/qib_file_ops.c 			what, rcd->ctxt, pfn, len, ret);
pfn               833 drivers/infiniband/hw/qib/qib_file_ops.c 	unsigned long pfn;
pfn               860 drivers/infiniband/hw/qib/qib_file_ops.c 		pfn = virt_to_phys(rcd->rcvegrbuf[i]) >> PAGE_SHIFT;
pfn               861 drivers/infiniband/hw/qib/qib_file_ops.c 		ret = remap_pfn_range(vma, start, pfn, size,
pfn               110 drivers/infiniband/hw/vmw_pvrdma/pvrdma.h 	unsigned long pfn;
pfn               418 drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h 		u32 pfn; /* UAR page frame number */
pfn               109 drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c 	uar->pfn = (pci_resource_start(dev->pdev, PVRDMA_PCI_RESOURCE_UAR) >>
pfn               872 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c 	dev->driver_uar.pfn =
pfn               876 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c 		ioremap(dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
pfn               904 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c 		dev->dsr->uar_pfn = dev->driver_uar.pfn;
pfn               906 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c 		dev->dsr->uar_pfn64 = dev->driver_uar.pfn;
pfn               335 drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c 		cmd->pfn = context->uar.pfn;
pfn               337 drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c 		cmd->pfn64 = context->uar.pfn;
pfn               413 drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c 	if (io_remap_pfn_range(vma, start, context->uar.pfn, size,
pfn              1766 drivers/iommu/amd_iommu.c 	unsigned long pfn = 0;
pfn              1771 drivers/iommu/amd_iommu.c 		pfn = alloc_iova_fast(&dma_dom->iovad, pages,
pfn              1774 drivers/iommu/amd_iommu.c 	if (!pfn)
pfn              1775 drivers/iommu/amd_iommu.c 		pfn = alloc_iova_fast(&dma_dom->iovad, pages,
pfn              1778 drivers/iommu/amd_iommu.c 	return (pfn << PAGE_SHIFT);
pfn              1033 drivers/iommu/dma-iommu.c 	unsigned long pfn, off = vma->vm_pgoff;
pfn              1049 drivers/iommu/dma-iommu.c 		pfn = vmalloc_to_pfn(cpu_addr);
pfn              1051 drivers/iommu/dma-iommu.c 		pfn = page_to_pfn(virt_to_page(cpu_addr));
pfn              1054 drivers/iommu/dma-iommu.c 	return remap_pfn_range(vma, vma->vm_start, pfn + off,
pfn               126 drivers/iommu/intel-iommu.c static inline int pfn_level_offset(unsigned long pfn, int level)
pfn               128 drivers/iommu/intel-iommu.c 	return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
pfn               141 drivers/iommu/intel-iommu.c static inline unsigned long align_to_level(unsigned long pfn, int level)
pfn               143 drivers/iommu/intel-iommu.c 	return (pfn + level_size(level) - 1) & level_mask(level);
pfn               556 drivers/iommu/intel-iommu.c 				       unsigned long pfn)
pfn               560 drivers/iommu/intel-iommu.c 	return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
pfn               882 drivers/iommu/intel-iommu.c 				      unsigned long pfn, int *target_level)
pfn               890 drivers/iommu/intel-iommu.c 	if (!domain_pfn_supported(domain, pfn))
pfn               899 drivers/iommu/intel-iommu.c 		offset = pfn_level_offset(pfn, level);
pfn               937 drivers/iommu/intel-iommu.c 					 unsigned long pfn,
pfn               946 drivers/iommu/intel-iommu.c 		offset = pfn_level_offset(pfn, total);
pfn              1001 drivers/iommu/intel-iommu.c 			       unsigned long pfn, unsigned long start_pfn,
pfn              1004 drivers/iommu/intel-iommu.c 	pfn = max(start_pfn, pfn);
pfn              1005 drivers/iommu/intel-iommu.c 	pte = &pte[pfn_level_offset(pfn, level)];
pfn              1014 drivers/iommu/intel-iommu.c 		level_pfn = pfn & level_mask(level);
pfn              1034 drivers/iommu/intel-iommu.c 		pfn += level_size(level);
pfn              1035 drivers/iommu/intel-iommu.c 	} while (!first_pte_in_page(++pte) && pfn <= last_pfn);
pfn              1095 drivers/iommu/intel-iommu.c 					struct dma_pte *pte, unsigned long pfn,
pfn              1102 drivers/iommu/intel-iommu.c 	pfn = max(start_pfn, pfn);
pfn              1103 drivers/iommu/intel-iommu.c 	pte = &pte[pfn_level_offset(pfn, level)];
pfn              1111 drivers/iommu/intel-iommu.c 		level_pfn = pfn & level_mask(level);
pfn              1133 drivers/iommu/intel-iommu.c 		pfn += level_size(level);
pfn              1134 drivers/iommu/intel-iommu.c 	} while (!first_pte_in_page(++pte) && pfn <= last_pfn);
pfn              1488 drivers/iommu/intel-iommu.c 				  unsigned long pfn, unsigned int pages,
pfn              1492 drivers/iommu/intel-iommu.c 	uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
pfn              1523 drivers/iommu/intel-iommu.c 					unsigned long pfn, unsigned int pages)
pfn              1527 drivers/iommu/intel-iommu.c 		iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1);
pfn                19 drivers/iommu/iova.c 			       unsigned long pfn,
pfn               318 drivers/iommu/iova.c private_find_iova(struct iova_domain *iovad, unsigned long pfn)
pfn               327 drivers/iommu/iova.c 		if (pfn < iova->pfn_lo)
pfn               329 drivers/iommu/iova.c 		else if (pfn > iova->pfn_hi)
pfn               353 drivers/iommu/iova.c struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
pfn               360 drivers/iommu/iova.c 	iova = private_find_iova(iovad, pfn);
pfn               391 drivers/iommu/iova.c free_iova(struct iova_domain *iovad, unsigned long pfn)
pfn               393 drivers/iommu/iova.c 	struct iova *iova = find_iova(iovad, pfn);
pfn               450 drivers/iommu/iova.c free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
pfn               452 drivers/iommu/iova.c 	if (iova_rcache_insert(iovad, pfn, size))
pfn               455 drivers/iommu/iova.c 	free_iova(iovad, pfn);
pfn               550 drivers/iommu/iova.c 		unsigned long pfn, unsigned long pages,
pfn               573 drivers/iommu/iova.c 	fq->entries[idx].iova_pfn = pfn;
pfn               837 drivers/iommu/iova.c 	unsigned long pfn;
pfn               847 drivers/iommu/iova.c 	pfn = mag->pfns[i];
pfn               850 drivers/iommu/iova.c 	return pfn;
pfn               853 drivers/iommu/iova.c static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn)
pfn               857 drivers/iommu/iova.c 	mag->pfns[mag->size++] = pfn;
pfn               937 drivers/iommu/iova.c static bool iova_rcache_insert(struct iova_domain *iovad, unsigned long pfn,
pfn               945 drivers/iommu/iova.c 	return __iova_rcache_insert(iovad, &iovad->rcaches[log_size], pfn);
pfn               704 drivers/iommu/tegra-smmu.c 	unsigned long pfn;
pfn               712 drivers/iommu/tegra-smmu.c 	pfn = *pte & as->smmu->pfn_mask;
pfn               714 drivers/iommu/tegra-smmu.c 	return PFN_PHYS(pfn);
pfn               167 drivers/md/dm-linear.c 		long nr_pages, void **kaddr, pfn_t *pfn)
pfn               179 drivers/md/dm-linear.c 	return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
pfn               949 drivers/md/dm-log-writes.c 					 long nr_pages, void **kaddr, pfn_t *pfn)
pfn               958 drivers/md/dm-log-writes.c 	return dax_direct_access(lc->dev->dax_dev, pgoff, nr_pages, kaddr, pfn);
pfn               318 drivers/md/dm-stripe.c 		long nr_pages, void **kaddr, pfn_t *pfn)
pfn               335 drivers/md/dm-stripe.c 	return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
pfn               145 drivers/md/dm-target.c 		long nr_pages, void **kaddr, pfn_t *pfn)
pfn               224 drivers/md/dm-writecache.c 	pfn_t pfn;
pfn               247 drivers/md/dm-writecache.c 	da = dax_direct_access(wc->ssd_dev->dax_dev, 0, p, &wc->memory_map, &pfn);
pfn               253 drivers/md/dm-writecache.c 	if (!pfn_t_has_page(pfn)) {
pfn               270 drivers/md/dm-writecache.c 						NULL, &pfn);
pfn               275 drivers/md/dm-writecache.c 			if (!pfn_t_has_page(pfn)) {
pfn               280 drivers/md/dm-writecache.c 				pages[i++] = pfn_t_to_page(pfn);
pfn               281 drivers/md/dm-writecache.c 				pfn.val++;
pfn              1082 drivers/md/dm.c 				 long nr_pages, void **kaddr, pfn_t *pfn)
pfn              1100 drivers/md/dm.c 	ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn);
pfn               638 drivers/media/pci/pt1/pt1.c 	u32 first_pfn, pfn;
pfn               656 drivers/media/pci/pt1/pt1.c 		ret = pt1_init_table(pt1, &tables[i], &pfn);
pfn               659 drivers/media/pci/pt1/pt1.c 		tables[i - 1].page->next_pfn = cpu_to_le32(pfn);
pfn               432 drivers/misc/genwqe/card_dev.c 	unsigned long pfn, vsize = vma->vm_end - vma->vm_start;
pfn               461 drivers/misc/genwqe/card_dev.c 	pfn = virt_to_phys(dma_map->k_vaddr) >> PAGE_SHIFT;
pfn               464 drivers/misc/genwqe/card_dev.c 			     pfn,
pfn               162 drivers/misc/sgi-gru/gruhandles.c 	tfh->pfn = paddr >> GRU_PADDR_SHIFT;
pfn               178 drivers/misc/sgi-gru/gruhandles.c 	tfh->pfn = paddr >> GRU_PADDR_SHIFT;
pfn               289 drivers/misc/sgi-gru/gruhandles.h 	unsigned long pfn:41;		/* DW 3 */
pfn               260 drivers/misc/vmw_balloon.c 	u64 pfn : 52;
pfn               755 drivers/misc/vmw_balloon.c 		*p = pfn_to_page(b->batch_page[idx].pfn);
pfn               791 drivers/misc/vmw_balloon.c 	unsigned long cmd, pfn;
pfn               805 drivers/misc/vmw_balloon.c 		pfn = PHYS_PFN(virt_to_phys(b->batch_page));
pfn               809 drivers/misc/vmw_balloon.c 		pfn = page_to_pfn(b->page);
pfn               812 drivers/misc/vmw_balloon.c 		if (unlikely(pfn != (u32)pfn))
pfn               816 drivers/misc/vmw_balloon.c 	return vmballoon_cmd(b, cmd, pfn, num_pages);
pfn               835 drivers/misc/vmw_balloon.c 					{ .pfn = page_to_pfn(p) };
pfn              1128 drivers/net/ethernet/cavium/liquidio/octeon_device.c 	octeon_dispatch_fn_t pfn;
pfn              1149 drivers/net/ethernet/cavium/liquidio/octeon_device.c 	pfn = octeon_get_dispatch(oct, opcode, subcode);
pfn              1150 drivers/net/ethernet/cavium/liquidio/octeon_device.c 	if (!pfn) {
pfn              1175 drivers/net/ethernet/cavium/liquidio/octeon_device.c 		if (pfn == fn &&
pfn               701 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	u64 wq_first_page_paddr, cmdq_first_block_paddr, pfn;
pfn               708 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	pfn = CMDQ_PFN(wq_first_page_paddr, wq->wq_page_size);
pfn               711 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 		HINIC_CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN)   |
pfn               720 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	pfn = CMDQ_PFN(cmdq_first_block_paddr, wq->wq_page_size);
pfn               723 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 		HINIC_CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN) |
pfn               608 drivers/net/ethernet/ibm/ehea/ehea_qmr.c static int ehea_update_busmap(unsigned long pfn, unsigned long nr_pages, int add)
pfn               621 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 	start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE;
pfn               651 drivers/net/ethernet/ibm/ehea/ehea_qmr.c int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages)
pfn               656 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 	ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
pfn               661 drivers/net/ethernet/ibm/ehea/ehea_qmr.c int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages)
pfn               666 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 	ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_REM_SECT);
pfn               671 drivers/net/ethernet/ibm/ehea/ehea_qmr.c static int ehea_is_hugepage(unsigned long pfn)
pfn               675 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 	if (pfn & EHEA_HUGEPAGE_PFN_MASK)
pfn               678 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 	page_order = compound_order(pfn_to_page(pfn));
pfn               689 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 	unsigned long pfn, start_pfn, end_pfn, nr_pages;
pfn               698 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 	pfn = start_pfn;
pfn               700 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 	while (pfn < end_pfn) {
pfn               701 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 		if (ehea_is_hugepage(pfn)) {
pfn               703 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 			nr_pages = pfn - start_pfn;
pfn               710 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 			pfn += (EHEA_HUGEPAGE_SIZE / PAGE_SIZE);
pfn               711 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 			start_pfn = pfn;
pfn               713 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 			pfn += (EHEA_SECTSIZE / PAGE_SIZE);
pfn               717 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 	nr_pages = pfn - start_pfn;
pfn               384 drivers/net/ethernet/ibm/ehea/ehea_qmr.h int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages);
pfn               385 drivers/net/ethernet/ibm/ehea/ehea_qmr.h int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages);
pfn               288 drivers/net/ethernet/mellanox/mlx4/en_main.c 	mdev->uar_map = ioremap((phys_addr_t) mdev->priv_uar.pfn << PAGE_SHIFT,
pfn              2718 drivers/net/ethernet/mellanox/mlx4/main.c 	priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
pfn               159 drivers/net/ethernet/mellanox/mlx4/pd.c 	uar->pfn = (pci_resource_start(dev->persist->pdev, 2) >> PAGE_SHIFT)
pfn               202 drivers/net/ethernet/mellanox/mlx4/pd.c 		uar->map = ioremap(uar->pfn << PAGE_SHIFT, PAGE_SIZE);
pfn               103 drivers/net/ethernet/mellanox/mlx5/core/uar.c 	phys_addr_t pfn;
pfn               137 drivers/net/ethernet/mellanox/mlx5/core/uar.c 	pfn = uar2pfn(mdev, up->index);
pfn               139 drivers/net/ethernet/mellanox/mlx5/core/uar.c 		up->map = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE);
pfn               145 drivers/net/ethernet/mellanox/mlx5/core/uar.c 		up->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
pfn              1012 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 	u8 pfn;
pfn              1041 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 		pfn = pci_info[i].id;
pfn              1043 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 		if (pfn >= ahw->max_vnic_func) {
pfn              1046 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 				__func__, pfn, ahw->max_vnic_func);
pfn              1055 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 			if (!qlcnic_83xx_set_port_eswitch_status(adapter, pfn,
pfn              1064 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 		adapter->npars[j].pci_func = pfn;
pfn               791 drivers/net/hyperv/netvsc.c 		char *src = phys_to_virt(pb[i].pfn << PAGE_SHIFT);
pfn               392 drivers/net/hyperv/netvsc_drv.c 		pb[j].pfn = page_to_pfn(page);
pfn               217 drivers/net/hyperv/rndis_filter.c 	pb[0].pfn = virt_to_phys(&req->request_msg) >>
pfn               228 drivers/net/hyperv/rndis_filter.c 		pb[1].pfn = virt_to_phys((void *)&req->request_msg
pfn               197 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c 	struct brcmf_pno_net_param_le pfn;
pfn               200 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c 	pfn.auth = cpu_to_le32(WLAN_AUTH_OPEN);
pfn               201 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c 	pfn.wpa_auth = cpu_to_le32(BRCMF_PNO_WPA_AUTH_ANY);
pfn               202 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c 	pfn.wsec = cpu_to_le32(0);
pfn               203 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c 	pfn.infra = cpu_to_le32(1);
pfn               204 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c 	pfn.flags = 0;
pfn               206 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c 		pfn.flags = cpu_to_le32(1 << BRCMF_PNO_HIDDEN_BIT);
pfn               207 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c 	pfn.ssid.SSID_len = cpu_to_le32(ssid->ssid_len);
pfn               208 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c 	memcpy(pfn.ssid.SSID, ssid->ssid, ssid->ssid_len);
pfn               211 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c 	err = brcmf_fil_iovar_data_set(ifp, "pfn_add", &pfn, sizeof(pfn));
pfn                50 drivers/nvdimm/pmem.c 	unsigned long pfn_start, pfn_end, pfn;
pfn                58 drivers/nvdimm/pmem.c 	for (pfn = pfn_start; pfn < pfn_end; pfn++) {
pfn                59 drivers/nvdimm/pmem.c 		struct page *page = pfn_to_page(pfn);
pfn                67 drivers/nvdimm/pmem.c 			clear_mce_nospec(pfn);
pfn               244 drivers/nvdimm/pmem.c 		long nr_pages, void **kaddr, pfn_t *pfn)
pfn               254 drivers/nvdimm/pmem.c 	if (pfn)
pfn               255 drivers/nvdimm/pmem.c 		*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
pfn               273 drivers/nvdimm/pmem.c 		pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn)
pfn               277 drivers/nvdimm/pmem.c 	return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn);
pfn                30 drivers/nvdimm/pmem.h 		long nr_pages, void **kaddr, pfn_t *pfn);
pfn                34 drivers/nvdimm/region_devs.c 		unsigned long pfn = PHYS_PFN(res->start);
pfn                42 drivers/nvdimm/region_devs.c 			if (pfn == pfn_j)
pfn                52 drivers/nvdimm/region_devs.c 					PFN_PHYS(pfn), PAGE_SIZE);
pfn               186 drivers/ras/cec.c static int __find_elem(struct ce_array *ca, u64 pfn, unsigned int *to)
pfn               196 drivers/ras/cec.c 		if (this_pfn < pfn)
pfn               198 drivers/ras/cec.c 		else if (this_pfn > pfn)
pfn               200 drivers/ras/cec.c 		else if (this_pfn == pfn) {
pfn               223 drivers/ras/cec.c static int find_elem(struct ce_array *ca, u64 pfn, unsigned int *to)
pfn               231 drivers/ras/cec.c 	return __find_elem(ca, pfn, to);
pfn               271 drivers/ras/cec.c 	u64 pfn;
pfn               277 drivers/ras/cec.c 	pfn = del_lru_elem_unlocked(ca);
pfn               280 drivers/ras/cec.c 	return pfn;
pfn               312 drivers/ras/cec.c int cec_add_elem(u64 pfn)
pfn               333 drivers/ras/cec.c 	ret = find_elem(ca, pfn, &to);
pfn               342 drivers/ras/cec.c 		ca->array[to] = pfn << PAGE_SHIFT;
pfn               353 drivers/ras/cec.c 		u64 pfn = ca->array[to] >> PAGE_SHIFT;
pfn               355 drivers/ras/cec.c 		if (!pfn_valid(pfn)) {
pfn               356 drivers/ras/cec.c 			pr_warn("CEC: Invalid pfn: 0x%llx\n", pfn);
pfn               359 drivers/ras/cec.c 			pr_err("Soft-offlining pfn: 0x%llx\n", pfn);
pfn               360 drivers/ras/cec.c 			memory_failure_queue(pfn, MF_SOFT_OFFLINE);
pfn               485 drivers/ras/cec.c 	struct dentry *d, *pfn, *decay, *count, *array;
pfn               510 drivers/ras/cec.c 	pfn = debugfs_create_file("pfn", S_IRUSR | S_IWUSR, d, &dfs_pfn, &pfn_ops);
pfn               511 drivers/ras/cec.c 	if (!pfn) {
pfn                37 drivers/s390/block/dcssblk.c 		long nr_pages, void **kaddr, pfn_t *pfn);
pfn               920 drivers/s390/block/dcssblk.c 		long nr_pages, void **kaddr, pfn_t *pfn)
pfn               928 drivers/s390/block/dcssblk.c 	if (pfn)
pfn               929 drivers/s390/block/dcssblk.c 		*pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset),
pfn               937 drivers/s390/block/dcssblk.c 		long nr_pages, void **kaddr, pfn_t *pfn)
pfn               941 drivers/s390/block/dcssblk.c 	return __dcssblk_direct_access(dev_info, pgoff, nr_pages, kaddr, pfn);
pfn               903 drivers/scsi/csiostor/csio_hw.c 	hw->pfn = src_pf;
pfn               935 drivers/scsi/csiostor/csio_hw.c 	csio_mb_hello(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn,
pfn               936 drivers/scsi/csiostor/csio_hw.c 		      hw->pfn, CSIO_MASTER_MAY, NULL);
pfn               952 drivers/scsi/csiostor/csio_hw.c 	if (hw->pfn == mpfn) {
pfn              1043 drivers/scsi/csiostor/csio_hw.c 	if (hw->pfn == mpfn)
pfn              1045 drivers/scsi/csiostor/csio_hw.c 			hw->pfn, state_str);
pfn              1049 drivers/scsi/csiostor/csio_hw.c 		    hw->pfn, mpfn, state_str);
pfn              1386 drivers/scsi/csiostor/csio_hw.c 	csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0,
pfn              1802 drivers/scsi/csiostor/csio_hw.c 				       hw->pfn, 0, 1, &param, &val, true,
pfn              1944 drivers/scsi/csiostor/csio_hw.c 	csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0,
pfn              2355 drivers/scsi/csiostor/csio_hw.c 		ret = csio_hw_fw_upgrade(hw, hw->pfn, fw_data,
pfn               512 drivers/scsi/csiostor/csio_hw.h 	uint8_t			pfn;			/* Physical Function
pfn               302 drivers/scsi/csiostor/csio_hw_t5.c 	win_pf = PFNUM_V(hw->pfn);
pfn               281 drivers/scsi/csiostor/csio_mb.c 		(FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(hw->pfn));
pfn               450 drivers/scsi/csiostor/csio_mb.c 				FW_IQ_CMD_PFN_V(iq_params->pfn)	|
pfn               507 drivers/scsi/csiostor/csio_mb.c 				FW_IQ_CMD_PFN_V(iq_params->pfn)	|
pfn               632 drivers/scsi/csiostor/csio_mb.c 				FW_IQ_CMD_PFN_V(iq_params->pfn)	|
pfn               667 drivers/scsi/csiostor/csio_mb.c 				FW_EQ_OFLD_CMD_PFN_V(eq_ofld_params->pfn) |
pfn               715 drivers/scsi/csiostor/csio_mb.c 				FW_EQ_OFLD_CMD_PFN_V(eq_ofld_params->pfn) |
pfn               818 drivers/scsi/csiostor/csio_mb.c 				FW_EQ_OFLD_CMD_PFN_V(eq_ofld_params->pfn) |
pfn              1162 drivers/scsi/csiostor/csio_mb.c 	uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A);
pfn              1163 drivers/scsi/csiostor/csio_mb.c 	uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A);
pfn              1196 drivers/scsi/csiostor/csio_mb.c 	uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A);
pfn              1197 drivers/scsi/csiostor/csio_mb.c 	uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A);
pfn              1221 drivers/scsi/csiostor/csio_mb.c 				    hw->pfn, *((uint8_t *)mbp->mb));
pfn              1250 drivers/scsi/csiostor/csio_mb.c 					 hw->pfn, *((uint8_t *)mbp->mb), owner);
pfn              1257 drivers/scsi/csiostor/csio_mb.c 						 hw->pfn, *((uint8_t *)mbp->mb),
pfn              1274 drivers/scsi/csiostor/csio_mb.c 	CSIO_DUMP_MB(hw, hw->pfn, data_reg);
pfn              1310 drivers/scsi/csiostor/csio_mb.c 			CSIO_DUMP_MB(hw, hw->pfn, data_reg);
pfn              1338 drivers/scsi/csiostor/csio_mb.c 		 hw->pfn, *((uint8_t *)cmd));
pfn              1479 drivers/scsi/csiostor/csio_mb.c 	uint32_t	ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A);
pfn              1480 drivers/scsi/csiostor/csio_mb.c 	uint32_t	data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A);
pfn              1506 drivers/scsi/csiostor/csio_mb.c 		CSIO_DUMP_MB(hw, hw->pfn, data_reg);
pfn              1593 drivers/scsi/csiostor/csio_mb.c 	csio_dbg(hw, "Mailbox num:%x op:0x%x timed out\n", hw->pfn,
pfn              1647 drivers/scsi/csiostor/csio_mb.c 			    hw->pfn, FW_CMD_OP_G(ntohl(hdr->hi)));
pfn               454 drivers/scsi/csiostor/csio_wr.c 	iqp.pfn			= hw->pfn;
pfn               572 drivers/scsi/csiostor/csio_wr.c 	eqp.pfn			= hw->pfn;
pfn               646 drivers/scsi/csiostor/csio_wr.c 	iqp.pfn		= hw->pfn;
pfn               719 drivers/scsi/csiostor/csio_wr.c 	eqp.pfn		= hw->pfn;
pfn               116 drivers/scsi/csiostor/csio_wr.h 	uint8_t		pfn:3;
pfn               202 drivers/scsi/csiostor/csio_wr.h 	uint8_t		pfn;
pfn              2375 drivers/staging/comedi/comedi_fops.c 			unsigned long pfn;
pfn              2378 drivers/staging/comedi/comedi_fops.c 			pfn = page_to_pfn(virt_to_page(buf->virt_addr));
pfn              2379 drivers/staging/comedi/comedi_fops.c 			retval = remap_pfn_range(vma, start, pfn, PAGE_SIZE,
pfn               126 drivers/vfio/pci/vfio_pci_nvlink2.c 	unsigned long pfn = nv2pg + vm_pgoff + vmf_off;
pfn               128 drivers/vfio/pci/vfio_pci_nvlink2.c 	ret = vmf_insert_pfn(vma, vmf->address, pfn);
pfn               129 drivers/vfio/pci/vfio_pci_nvlink2.c 	trace_vfio_pci_nvgpu_mmap_fault(data->gpdev, pfn << PAGE_SHIFT,
pfn               113 drivers/vfio/vfio_iommu_type1.c 	unsigned long		pfn;		/* Host pfn */
pfn               127 drivers/vfio/vfio_iommu_type1.c static int put_pfn(unsigned long pfn, int prot);
pfn               225 drivers/vfio/vfio_iommu_type1.c 				unsigned long pfn)
pfn               234 drivers/vfio/vfio_iommu_type1.c 	vpfn->pfn = pfn;
pfn               262 drivers/vfio/vfio_iommu_type1.c 		ret = put_pfn(vpfn->pfn, dma->prot);
pfn               298 drivers/vfio/vfio_iommu_type1.c static bool is_invalid_reserved_pfn(unsigned long pfn)
pfn               300 drivers/vfio/vfio_iommu_type1.c 	if (pfn_valid(pfn)) {
pfn               302 drivers/vfio/vfio_iommu_type1.c 		struct page *tail = pfn_to_page(pfn);
pfn               326 drivers/vfio/vfio_iommu_type1.c static int put_pfn(unsigned long pfn, int prot)
pfn               328 drivers/vfio/vfio_iommu_type1.c 	if (!is_invalid_reserved_pfn(pfn)) {
pfn               329 drivers/vfio/vfio_iommu_type1.c 		struct page *page = pfn_to_page(pfn);
pfn               339 drivers/vfio/vfio_iommu_type1.c 			 int prot, unsigned long *pfn)
pfn               372 drivers/vfio/vfio_iommu_type1.c 		*pfn = page_to_pfn(page[0]);
pfn               383 drivers/vfio/vfio_iommu_type1.c 		if (!follow_pfn(vma, vaddr, pfn) &&
pfn               384 drivers/vfio/vfio_iommu_type1.c 		    is_invalid_reserved_pfn(*pfn))
pfn               401 drivers/vfio/vfio_iommu_type1.c 	unsigned long pfn = 0;
pfn               437 drivers/vfio/vfio_iommu_type1.c 		ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, &pfn);
pfn               441 drivers/vfio/vfio_iommu_type1.c 		if (pfn != *pfn_base + pinned ||
pfn               442 drivers/vfio/vfio_iommu_type1.c 		    rsvd != is_invalid_reserved_pfn(pfn)) {
pfn               443 drivers/vfio/vfio_iommu_type1.c 			put_pfn(pfn, dma->prot);
pfn               450 drivers/vfio/vfio_iommu_type1.c 				put_pfn(pfn, dma->prot);
pfn               466 drivers/vfio/vfio_iommu_type1.c 			for (pfn = *pfn_base ; pinned ; pfn++, pinned--)
pfn               467 drivers/vfio/vfio_iommu_type1.c 				put_pfn(pfn, dma->prot);
pfn               477 drivers/vfio/vfio_iommu_type1.c 				    unsigned long pfn, long npage,
pfn               484 drivers/vfio/vfio_iommu_type1.c 		if (put_pfn(pfn++, dma->prot)) {
pfn               592 drivers/vfio/vfio_iommu_type1.c 			phys_pfn[i] = vpfn->pfn;
pfn               986 drivers/vfio/vfio_iommu_type1.c 			  unsigned long pfn, long npage, int prot)
pfn               992 drivers/vfio/vfio_iommu_type1.c 		ret = iommu_map(d->domain, iova, (phys_addr_t)pfn << PAGE_SHIFT,
pfn              1016 drivers/vfio/vfio_iommu_type1.c 	unsigned long pfn, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
pfn              1022 drivers/vfio/vfio_iommu_type1.c 					      size >> PAGE_SHIFT, &pfn, limit);
pfn              1030 drivers/vfio/vfio_iommu_type1.c 		ret = vfio_iommu_map(iommu, iova + dma->size, pfn, npage,
pfn              1033 drivers/vfio/vfio_iommu_type1.c 			vfio_unpin_pages_remote(dma, iova + dma->size, pfn,
pfn              1231 drivers/vfio/vfio_iommu_type1.c 				unsigned long pfn;
pfn              1239 drivers/vfio/vfio_iommu_type1.c 							      &pfn, limit);
pfn              1246 drivers/vfio/vfio_iommu_type1.c 				phys = pfn << PAGE_SHIFT;
pfn              1878 drivers/vfio/vfio_iommu_type1.c 			if (!is_invalid_reserved_pfn(vpfn->pfn))
pfn               125 drivers/virtio/virtio_balloon.c 	unsigned long pfn = page_to_pfn(page);
pfn               129 drivers/virtio/virtio_balloon.c 	return pfn * VIRTIO_BALLOON_PAGES_PER_PAGE;
pfn               273 drivers/xen/balloon.c 		unsigned long pfn = res->start >> PAGE_SHIFT;
pfn               275 drivers/xen/balloon.c 		if (pfn > limit) {
pfn               277 drivers/xen/balloon.c 			       pfn, limit);
pfn               329 drivers/xen/balloon.c 		unsigned long pfn, i;
pfn               331 drivers/xen/balloon.c 		pfn = PFN_DOWN(resource->start);
pfn               333 drivers/xen/balloon.c 			if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
pfn               669 drivers/xen/balloon.c 	unsigned long pfn, extra_pfn_end;
pfn               678 drivers/xen/balloon.c 	for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) {
pfn               682 drivers/xen/balloon.c 		balloon_append(pfn_to_page(pfn));
pfn               452 drivers/xen/grant-table.c int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn)
pfn               459 drivers/xen/grant-table.c 	gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
pfn               466 drivers/xen/grant-table.c 				       unsigned long pfn)
pfn               468 drivers/xen/grant-table.c 	gnttab_interface->update_entry(ref, domid, pfn, GTF_accept_transfer);
pfn               733 drivers/xen/grant-table.c 	xen_pfn_t *pfn;
pfn               747 drivers/xen/grant-table.c 	pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
pfn               748 drivers/xen/grant-table.c 	if (!pfn) {
pfn               753 drivers/xen/grant-table.c 		pfn[i] = XEN_PFN_DOWN(addr) + i;
pfn               756 drivers/xen/grant-table.c 	xen_auto_xlat_grant_frames.pfn = pfn;
pfn               767 drivers/xen/grant-table.c 	kfree(xen_auto_xlat_grant_frames.pfn);
pfn               770 drivers/xen/grant-table.c 	xen_auto_xlat_grant_frames.pfn = NULL;
pfn               852 drivers/xen/grant-table.c 	unsigned long pfn, start_pfn;
pfn               871 drivers/xen/grant-table.c 	for (pfn = start_pfn, i = 0; pfn < start_pfn + args->nr_pages;
pfn               872 drivers/xen/grant-table.c 			pfn++, i++) {
pfn               873 drivers/xen/grant-table.c 		struct page *page = pfn_to_page(pfn);
pfn              1253 drivers/xen/grant-table.c 			xatp.gpfn = xen_auto_xlat_grant_frames.pfn[i];
pfn                37 drivers/xen/mem-reservation.c 		unsigned long pfn = page_to_pfn(page);
pfn                47 drivers/xen/mem-reservation.c 		set_phys_to_machine(pfn, frames[i]);
pfn                54 drivers/xen/mem-reservation.c 					(unsigned long)__va(pfn << PAGE_SHIFT),
pfn                70 drivers/xen/mem-reservation.c 		unsigned long pfn = page_to_pfn(page);
pfn                82 drivers/xen/mem-reservation.c 					(unsigned long)__va(pfn << PAGE_SHIFT),
pfn                86 drivers/xen/mem-reservation.c 		__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
pfn               770 drivers/xen/privcmd.c 			xen_pfn_t pfn =
pfn               773 drivers/xen/privcmd.c 			pfns[i] = pfn + (i % XEN_PFN_PER_PAGE);
pfn               248 drivers/xen/xen-scsiback.c 	unsigned long pfn = page_to_pfn(page);
pfn               250 drivers/xen/xen-scsiback.c 	return (unsigned long)pfn_to_kaddr(pfn);
pfn               425 fs/cramfs/inode.c 			pfn_t pfn = phys_to_pfn_t(address + off, PFN_DEV);
pfn               426 fs/cramfs/inode.c 			vmf = vmf_insert_mixed(vma, vma->vm_start + off, pfn);
pfn                88 fs/dax.c       static void *dax_make_entry(pfn_t pfn, unsigned long flags)
pfn                90 fs/dax.c       	return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT));
pfn               322 fs/dax.c       #define for_each_mapped_pfn(entry, pfn) \
pfn               323 fs/dax.c       	for (pfn = dax_to_pfn(entry); \
pfn               324 fs/dax.c       			pfn < dax_end_pfn(entry); pfn++)
pfn               334 fs/dax.c       	unsigned long size = dax_entry_size(entry), pfn, index;
pfn               341 fs/dax.c       	for_each_mapped_pfn(entry, pfn) {
pfn               342 fs/dax.c       		struct page *page = pfn_to_page(pfn);
pfn               353 fs/dax.c       	unsigned long pfn;
pfn               358 fs/dax.c       	for_each_mapped_pfn(entry, pfn) {
pfn               359 fs/dax.c       		struct page *page = pfn_to_page(pfn);
pfn               370 fs/dax.c       	unsigned long pfn;
pfn               372 fs/dax.c       	for_each_mapped_pfn(entry, pfn) {
pfn               373 fs/dax.c       		struct page *page = pfn_to_page(pfn);
pfn               718 fs/dax.c       		void *entry, pfn_t pfn, unsigned long flags, bool dirty)
pfn               720 fs/dax.c       	void *new_entry = dax_make_entry(pfn, flags);
pfn               777 fs/dax.c       		unsigned long pfn)
pfn               816 fs/dax.c       			if (pfn != pmd_pfn(*pmdp))
pfn               821 fs/dax.c       			flush_cache_page(vma, address, pfn);
pfn               830 fs/dax.c       			if (pfn != pte_pfn(*ptep))
pfn               835 fs/dax.c       			flush_cache_page(vma, address, pfn);
pfn               852 fs/dax.c       	unsigned long pfn, index, count;
pfn               908 fs/dax.c       	pfn = dax_to_pfn(entry);
pfn               912 fs/dax.c       	dax_entry_mkclean(mapping, index, pfn);
pfn               913 fs/dax.c       	dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE);
pfn              1036 fs/dax.c       	pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
pfn              1039 fs/dax.c       	*entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
pfn              1042 fs/dax.c       	ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
pfn              1261 fs/dax.c       	pfn_t pfn;
pfn              1348 fs/dax.c       		error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn);
pfn              1352 fs/dax.c       		entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
pfn              1366 fs/dax.c       			*pfnp = pfn;
pfn              1372 fs/dax.c       			ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn);
pfn              1374 fs/dax.c       			ret = vmf_insert_mixed(vma, vaddr, pfn);
pfn              1425 fs/dax.c       	pfn_t pfn;
pfn              1432 fs/dax.c       	pfn = page_to_pfn_t(zero_page);
pfn              1433 fs/dax.c       	*entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
pfn              1483 fs/dax.c       	pfn_t pfn;
pfn              1564 fs/dax.c       		error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn);
pfn              1568 fs/dax.c       		entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
pfn              1580 fs/dax.c       			*pfnp = pfn;
pfn              1585 fs/dax.c       		trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry);
pfn              1586 fs/dax.c       		result = vmf_insert_pfn_pmd(vmf, pfn, write);
pfn              1670 fs/dax.c       dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
pfn              1692 fs/dax.c       		ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
pfn              1695 fs/dax.c       		ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE);
pfn              1715 fs/dax.c       		enum page_entry_size pe_size, pfn_t pfn)
pfn              1725 fs/dax.c       	return dax_insert_pfn_mkwrite(vmf, pfn, order);
pfn               314 fs/ext4/file.c 	pfn_t pfn;
pfn               331 fs/ext4/file.c 	result = dax_iomap_fault(vmf, pe_size, &pfn, &error, &ext4_iomap_ops);
pfn               340 fs/ext4/file.c 			result = dax_finish_sync_fault(vmf, pe_size, pfn);
pfn              3705 fs/io_uring.c  	unsigned long pfn;
pfn              3725 fs/io_uring.c  	pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
pfn              3726 fs/io_uring.c  	return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
pfn                62 fs/proc/kcore.c static int (*mem_pfn_is_ram)(unsigned long pfn);
pfn                64 fs/proc/kcore.c int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn))
pfn                72 fs/proc/kcore.c static int pfn_is_ram(unsigned long pfn)
pfn                75 fs/proc/kcore.c 		return mem_pfn_is_ram(pfn);
pfn               147 fs/proc/kcore.c 	unsigned long pfn = __pa(ent->addr) >> PAGE_SHIFT;
pfn               153 fs/proc/kcore.c 	start = ((unsigned long)pfn_to_page(pfn)) & PAGE_MASK;
pfn               154 fs/proc/kcore.c 	end = ((unsigned long)pfn_to_page(pfn + nr_pages)) - 1;
pfn               186 fs/proc/kcore.c kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg)
pfn               192 fs/proc/kcore.c 	if (!pfn_valid(pfn))
pfn               195 fs/proc/kcore.c 	p = pfn_to_page(pfn);
pfn               196 fs/proc/kcore.c 	if (!memmap_valid_within(pfn, p, page_zone(p)))
pfn                35 fs/proc/page.c 	unsigned long pfn;
pfn                39 fs/proc/page.c 	pfn = src / KPMSIZE;
pfn                49 fs/proc/page.c 		ppage = pfn_to_online_page(pfn);
pfn                61 fs/proc/page.c 		pfn++;
pfn               212 fs/proc/page.c 	unsigned long pfn;
pfn               215 fs/proc/page.c 	pfn = src / KPMSIZE;
pfn               225 fs/proc/page.c 		ppage = pfn_to_online_page(pfn);
pfn               232 fs/proc/page.c 		pfn++;
pfn               257 fs/proc/page.c 	unsigned long pfn;
pfn               261 fs/proc/page.c 	pfn = src / KPMSIZE;
pfn               271 fs/proc/page.c 		ppage = pfn_to_online_page(pfn);
pfn               283 fs/proc/page.c 		pfn++;
pfn                70 fs/proc/vmcore.c static int (*oldmem_pfn_is_ram)(unsigned long pfn);
pfn                72 fs/proc/vmcore.c int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn))
pfn                88 fs/proc/vmcore.c static int pfn_is_ram(unsigned long pfn)
pfn                90 fs/proc/vmcore.c 	int (*fn)(unsigned long pfn);
pfn               101 fs/proc/vmcore.c 		ret = fn(pfn);
pfn               111 fs/proc/vmcore.c 	unsigned long pfn, offset;
pfn               119 fs/proc/vmcore.c 	pfn = (unsigned long)(*ppos / PAGE_SIZE);
pfn               128 fs/proc/vmcore.c 		if (pfn_is_ram(pfn) == 0)
pfn               132 fs/proc/vmcore.c 				tmp = copy_oldmem_page_encrypted(pfn, buf,
pfn               137 fs/proc/vmcore.c 				tmp = copy_oldmem_page(pfn, buf, nr_bytes,
pfn               147 fs/proc/vmcore.c 		++pfn;
pfn               188 fs/proc/vmcore.c 				  unsigned long from, unsigned long pfn,
pfn               192 fs/proc/vmcore.c 	return remap_pfn_range(vma, from, pfn, size, prot);
pfn               199 fs/proc/vmcore.c copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
pfn               202 fs/proc/vmcore.c 	return copy_oldmem_page(pfn, buf, csize, offset, userbuf);
pfn               488 fs/proc/vmcore.c 				    unsigned long from, unsigned long pfn,
pfn               496 fs/proc/vmcore.c 	pos_start = pfn;
pfn               497 fs/proc/vmcore.c 	pos_end = pfn + (size >> PAGE_SHIFT);
pfn               538 fs/proc/vmcore.c 			    unsigned long from, unsigned long pfn,
pfn               546 fs/proc/vmcore.c 		return remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
pfn               548 fs/proc/vmcore.c 		return remap_oldmem_pfn_range(vma, from, pfn, size, prot);
pfn               573 fs/proc/vmcore.c 		u64 pfn;
pfn               576 fs/proc/vmcore.c 		pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
pfn               577 fs/proc/vmcore.c 		if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
pfn              1157 fs/xfs/xfs_file.c 		pfn_t pfn;
pfn              1159 fs/xfs/xfs_file.c 		ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL, &xfs_iomap_ops);
pfn              1161 fs/xfs/xfs_file.c 			ret = dax_finish_sync_fault(vmf, pe_size, pfn);
pfn                43 include/asm-generic/cacheflush.h 				    unsigned long pfn)
pfn                18 include/asm-generic/memory_model.h #define arch_pfn_to_nid(pfn)	pfn_to_nid(pfn)
pfn                22 include/asm-generic/memory_model.h #define arch_local_page_offset(pfn, nid)	\
pfn                23 include/asm-generic/memory_model.h 	((pfn) - NODE_DATA(nid)->node_start_pfn)
pfn                33 include/asm-generic/memory_model.h #define __pfn_to_page(pfn)	(mem_map + ((pfn) - ARCH_PFN_OFFSET))
pfn                38 include/asm-generic/memory_model.h #define __pfn_to_page(pfn)			\
pfn                39 include/asm-generic/memory_model.h ({	unsigned long __pfn = (pfn);		\
pfn                54 include/asm-generic/memory_model.h #define __pfn_to_page(pfn)	(vmemmap + (pfn))
pfn                68 include/asm-generic/memory_model.h #define __pfn_to_page(pfn)				\
pfn                69 include/asm-generic/memory_model.h ({	unsigned long __pfn = (pfn);			\
pfn                79 include/asm-generic/memory_model.h #define	__pfn_to_phys(pfn)	PFN_PHYS(pfn)
pfn                82 include/asm-generic/page.h #define pfn_to_virt(pfn)	__va((pfn) << PAGE_SHIFT)
pfn                91 include/asm-generic/page.h #define pfn_valid(pfn)		((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
pfn               806 include/asm-generic/pgtable.h 				  unsigned long pfn, unsigned long addr,
pfn               817 include/asm-generic/pgtable.h 				    pfn_t pfn)
pfn               836 include/asm-generic/pgtable.h 			       unsigned long pfn, unsigned long size)
pfn               848 include/asm-generic/pgtable.h 			   unsigned long pfn, unsigned long addr,
pfn               851 include/asm-generic/pgtable.h 			     pfn_t pfn);
pfn               853 include/asm-generic/pgtable.h extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
pfn               859 include/asm-generic/pgtable.h static inline int is_zero_pfn(unsigned long pfn)
pfn               862 include/asm-generic/pgtable.h 	unsigned long offset_from_zero_pfn = pfn - zero_pfn;
pfn               869 include/asm-generic/pgtable.h static inline int is_zero_pfn(unsigned long pfn)
pfn               872 include/asm-generic/pgtable.h 	return pfn == zero_pfn;
pfn              1121 include/asm-generic/pgtable.h int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
pfn              1131 include/asm-generic/pgtable.h static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
pfn                24 include/linux/crash_dump.h 				  unsigned long from, unsigned long pfn,
pfn                29 include/linux/crash_dump.h extern ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf,
pfn                93 include/linux/crash_dump.h extern int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn));
pfn               205 include/linux/dax.h 		void **kaddr, pfn_t *pfn);
pfn               219 include/linux/dax.h 		enum page_entry_size pe_size, pfn_t pfn);
pfn               140 include/linux/device-mapper.h 		long nr_pages, void **kaddr, pfn_t *pfn);
pfn               616 include/linux/gfp.h void free_contig_range(unsigned long pfn, unsigned int nr_pages);
pfn               105 include/linux/highmem.h #define kmap_atomic_pfn(pfn)	kmap_atomic(pfn_to_page(pfn))
pfn               228 include/linux/hmm.h hmm_device_entry_to_pfn(const struct hmm_range *range, uint64_t pfn)
pfn               230 include/linux/hmm.h 	if (pfn == range->values[HMM_PFN_NONE])
pfn               232 include/linux/hmm.h 	if (pfn == range->values[HMM_PFN_ERROR])
pfn               234 include/linux/hmm.h 	if (pfn == range->values[HMM_PFN_SPECIAL])
pfn               236 include/linux/hmm.h 	if (!(pfn & range->flags[HMM_PFN_VALID]))
pfn               238 include/linux/hmm.h 	return (pfn >> range->pfn_shift);
pfn               261 include/linux/hmm.h 						 unsigned long pfn)
pfn               263 include/linux/hmm.h 	return (pfn << range->pfn_shift) |
pfn                50 include/linux/huge_mm.h vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
pfn                51 include/linux/huge_mm.h vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
pfn                36 include/linux/hyperv.h 	u64 pfn;
pfn               570 include/linux/hyperv.h 	u64 pfn[0];
pfn                67 include/linux/io-mapping.h 	unsigned long pfn;
pfn                71 include/linux/io-mapping.h 	pfn = (unsigned long) (phys_addr >> PAGE_SHIFT);
pfn                72 include/linux/io-mapping.h 	return iomap_atomic_prot_pfn(pfn, mapping->prot);
pfn               141 include/linux/iova.h void free_iova(struct iova_domain *iovad, unsigned long pfn);
pfn               146 include/linux/iova.h void free_iova_fast(struct iova_domain *iovad, unsigned long pfn,
pfn               149 include/linux/iova.h 		unsigned long pfn, unsigned long pages,
pfn               161 include/linux/iova.h struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
pfn               185 include/linux/iova.h static inline void free_iova(struct iova_domain *iovad, unsigned long pfn)
pfn               202 include/linux/iova.h 				  unsigned long pfn,
pfn               208 include/linux/iova.h 			      unsigned long pfn, unsigned long pages,
pfn               252 include/linux/iova.h 				     unsigned long pfn)
pfn                42 include/linux/kcore.h extern int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn));
pfn                94 include/linux/kvm_host.h static inline bool is_error_pfn(kvm_pfn_t pfn)
pfn                96 include/linux/kvm_host.h 	return !!(pfn & KVM_PFN_ERR_MASK);
pfn               104 include/linux/kvm_host.h static inline bool is_error_noslot_pfn(kvm_pfn_t pfn)
pfn               106 include/linux/kvm_host.h 	return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK);
pfn               110 include/linux/kvm_host.h static inline bool is_noslot_pfn(kvm_pfn_t pfn)
pfn               112 include/linux/kvm_host.h 	return pfn == KVM_PFN_NOSLOT;
pfn               240 include/linux/kvm_host.h 	kvm_pfn_t pfn;
pfn               725 include/linux/kvm_host.h void kvm_release_pfn_clean(kvm_pfn_t pfn);
pfn               726 include/linux/kvm_host.h void kvm_release_pfn_dirty(kvm_pfn_t pfn);
pfn               727 include/linux/kvm_host.h void kvm_set_pfn_dirty(kvm_pfn_t pfn);
pfn               728 include/linux/kvm_host.h void kvm_set_pfn_accessed(kvm_pfn_t pfn);
pfn               729 include/linux/kvm_host.h void kvm_get_pfn(kvm_pfn_t pfn);
pfn               731 include/linux/kvm_host.h void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache);
pfn               973 include/linux/kvm_host.h bool kvm_is_reserved_pfn(kvm_pfn_t pfn);
pfn               974 include/linux/kvm_host.h bool kvm_is_zone_device_pfn(kvm_pfn_t pfn);
pfn              1074 include/linux/kvm_host.h static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn)
pfn              1076 include/linux/kvm_host.h 	return (hpa_t)pfn << PAGE_SHIFT;
pfn                55 include/linux/kvm_types.h 	kvm_pfn_t pfn;
pfn               220 include/linux/memblock.h int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
pfn                24 include/linux/memory_hotplug.h #define pfn_to_online_page(pfn)					   \
pfn                27 include/linux/memory_hotplug.h 	unsigned long ___pfn = pfn;				   \
pfn                95 include/linux/memory_hotplug.h extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
pfn               236 include/linux/memory_hotplug.h #define pfn_to_online_page(pfn)			\
pfn               239 include/linux/memory_hotplug.h 	if (pfn_valid(pfn))			\
pfn               240 include/linux/memory_hotplug.h 		___page = pfn_to_page(pfn);	\
pfn               314 include/linux/memory_hotplug.h extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
pfn               321 include/linux/memory_hotplug.h static inline bool is_mem_section_removable(unsigned long pfn,
pfn               352 include/linux/memory_hotplug.h extern int sparse_add_section(int nid, unsigned long pfn,
pfn               355 include/linux/memory_hotplug.h 		unsigned long pfn, unsigned long nr_pages,
pfn               359 include/linux/memory_hotplug.h extern bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages,
pfn               130 include/linux/memremap.h struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
pfn               153 include/linux/memremap.h static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
pfn               178 include/linux/migrate.h static inline unsigned long migrate_pfn(unsigned long pfn)
pfn               180 include/linux/migrate.h 	return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID;
pfn               722 include/linux/mlx4/device.h 	unsigned long		pfn;
pfn               622 include/linux/mm.h extern int page_is_ram(unsigned long pfn);
pfn              1290 include/linux/mm.h 	unsigned long node, unsigned long pfn)
pfn              1295 include/linux/mm.h 	set_page_section(page, pfn_to_section_nr(pfn));
pfn              1473 include/linux/mm.h 	unsigned long *pfn);
pfn              2190 include/linux/mm.h static inline int __early_pfn_to_nid(unsigned long pfn,
pfn              2197 include/linux/mm.h extern int __meminit early_pfn_to_nid(unsigned long pfn);
pfn              2199 include/linux/mm.h extern int __meminit __early_pfn_to_nid(unsigned long pfn,
pfn              2546 include/linux/mm.h 			unsigned long pfn, unsigned long size, pgprot_t);
pfn              2553 include/linux/mm.h 			unsigned long pfn);
pfn              2555 include/linux/mm.h 			unsigned long pfn, pgprot_t pgprot);
pfn              2557 include/linux/mm.h 			pfn_t pfn);
pfn              2559 include/linux/mm.h 		unsigned long addr, pfn_t pfn);
pfn              2773 include/linux/mm.h struct page * __populate_section_memmap(unsigned long pfn,
pfn              2803 include/linux/mm.h extern int memory_failure(unsigned long pfn, int flags);
pfn              2804 include/linux/mm.h extern void memory_failure_queue(unsigned long pfn, int flags);
pfn              2805 include/linux/mm.h extern int unpoison_memory(unsigned long pfn);
pfn               596 include/linux/mmzone.h static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
pfn               598 include/linux/mmzone.h 	return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
pfn              1113 include/linux/mmzone.h static inline unsigned long early_pfn_to_nid(unsigned long pfn)
pfn              1121 include/linux/mmzone.h #define pfn_to_nid(pfn)		(0)
pfn              1147 include/linux/mmzone.h static inline unsigned long pfn_to_section_nr(unsigned long pfn)
pfn              1149 include/linux/mmzone.h 	return pfn >> PFN_SECTION_SHIFT;
pfn              1156 include/linux/mmzone.h #define SECTION_ALIGN_UP(pfn)	(((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
pfn              1157 include/linux/mmzone.h #define SECTION_ALIGN_DOWN(pfn)	((pfn) & PAGE_SECTION_MASK)
pfn              1171 include/linux/mmzone.h #define SUBSECTION_ALIGN_UP(pfn) ALIGN((pfn), PAGES_PER_SUBSECTION)
pfn              1172 include/linux/mmzone.h #define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK)
pfn              1180 include/linux/mmzone.h void subsection_map_init(unsigned long pfn, unsigned long nr_pages);
pfn              1318 include/linux/mmzone.h static inline struct mem_section *__pfn_to_section(unsigned long pfn)
pfn              1320 include/linux/mmzone.h 	return __nr_to_section(pfn_to_section_nr(pfn));
pfn              1325 include/linux/mmzone.h static inline int subsection_map_index(unsigned long pfn)
pfn              1327 include/linux/mmzone.h 	return (pfn & ~(PAGE_SECTION_MASK)) / PAGES_PER_SUBSECTION;
pfn              1331 include/linux/mmzone.h static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
pfn              1333 include/linux/mmzone.h 	int idx = subsection_map_index(pfn);
pfn              1338 include/linux/mmzone.h static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
pfn              1345 include/linux/mmzone.h static inline int pfn_valid(unsigned long pfn)
pfn              1349 include/linux/mmzone.h 	if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
pfn              1351 include/linux/mmzone.h 	ms = __nr_to_section(pfn_to_section_nr(pfn));
pfn              1358 include/linux/mmzone.h 	return early_section(ms) || pfn_section_valid(ms, pfn);
pfn              1362 include/linux/mmzone.h static inline int pfn_present(unsigned long pfn)
pfn              1364 include/linux/mmzone.h 	if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
pfn              1366 include/linux/mmzone.h 	return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
pfn              1375 include/linux/mmzone.h #define pfn_to_nid(pfn)							\
pfn              1377 include/linux/mmzone.h 	unsigned long __pfn_to_nid_pfn = (pfn);				\
pfn              1381 include/linux/mmzone.h #define pfn_to_nid(pfn)		(0)
pfn              1384 include/linux/mmzone.h #define early_pfn_valid(pfn)	pfn_valid(pfn)
pfn              1405 include/linux/mmzone.h #define early_pfn_valid(pfn)	(1)
pfn              1417 include/linux/mmzone.h #define pfn_valid_within(pfn) pfn_valid(pfn)
pfn              1419 include/linux/mmzone.h #define pfn_valid_within(pfn) (1)
pfn              1443 include/linux/mmzone.h bool memmap_valid_within(unsigned long pfn,
pfn              1446 include/linux/mmzone.h static inline bool memmap_valid_within(unsigned long pfn,
pfn                58 include/linux/pageblock-flags.h 				unsigned long pfn,
pfn                64 include/linux/pageblock-flags.h 				unsigned long pfn,
pfn                29 include/linux/pfn_t.h static inline pfn_t __pfn_to_pfn_t(unsigned long pfn, u64 flags)
pfn                31 include/linux/pfn_t.h 	pfn_t pfn_t = { .val = pfn | (flags & PFN_FLAGS_MASK), };
pfn                37 include/linux/pfn_t.h static inline pfn_t pfn_to_pfn_t(unsigned long pfn)
pfn                39 include/linux/pfn_t.h 	return __pfn_to_pfn_t(pfn, 0);
pfn                47 include/linux/pfn_t.h static inline bool pfn_t_has_page(pfn_t pfn)
pfn                49 include/linux/pfn_t.h 	return (pfn.val & PFN_MAP) == PFN_MAP || (pfn.val & PFN_DEV) == 0;
pfn                52 include/linux/pfn_t.h static inline unsigned long pfn_t_to_pfn(pfn_t pfn)
pfn                54 include/linux/pfn_t.h 	return pfn.val & ~PFN_FLAGS_MASK;
pfn                57 include/linux/pfn_t.h static inline struct page *pfn_t_to_page(pfn_t pfn)
pfn                59 include/linux/pfn_t.h 	if (pfn_t_has_page(pfn))
pfn                60 include/linux/pfn_t.h 		return pfn_to_page(pfn_t_to_pfn(pfn));
pfn                64 include/linux/pfn_t.h static inline phys_addr_t pfn_t_to_phys(pfn_t pfn)
pfn                66 include/linux/pfn_t.h 	return PFN_PHYS(pfn_t_to_pfn(pfn));
pfn                74 include/linux/pfn_t.h static inline int pfn_t_valid(pfn_t pfn)
pfn                76 include/linux/pfn_t.h 	return pfn_valid(pfn_t_to_pfn(pfn));
pfn                80 include/linux/pfn_t.h static inline pte_t pfn_t_pte(pfn_t pfn, pgprot_t pgprot)
pfn                82 include/linux/pfn_t.h 	return pfn_pte(pfn_t_to_pfn(pfn), pgprot);
pfn                87 include/linux/pfn_t.h static inline pmd_t pfn_t_pmd(pfn_t pfn, pgprot_t pgprot)
pfn                89 include/linux/pfn_t.h 	return pfn_pmd(pfn_t_to_pfn(pfn), pgprot);
pfn                93 include/linux/pfn_t.h static inline pud_t pfn_t_pud(pfn_t pfn, pgprot_t pgprot)
pfn                95 include/linux/pfn_t.h 	return pfn_pud(pfn_t_to_pfn(pfn), pgprot);
pfn               101 include/linux/pfn_t.h static inline bool pfn_t_devmap(pfn_t pfn)
pfn               105 include/linux/pfn_t.h 	return (pfn.val & flags) == flags;
pfn               108 include/linux/pfn_t.h static inline bool pfn_t_devmap(pfn_t pfn)
pfn               121 include/linux/pfn_t.h static inline bool pfn_t_special(pfn_t pfn)
pfn               123 include/linux/pfn_t.h 	return (pfn.val & PFN_SPECIAL) == PFN_SPECIAL;
pfn               126 include/linux/pfn_t.h static inline bool pfn_t_special(pfn_t pfn)
pfn                22 include/linux/ras.h int cec_add_elem(u64 pfn);
pfn                25 include/linux/ras.h static inline int cec_add_elem(u64 pfn)		{ return -ENODEV; }
pfn                29 include/linux/set_memory.h static inline int set_mce_nospec(unsigned long pfn, bool unmap)
pfn                36 include/linux/set_memory.h static inline int clear_mce_nospec(unsigned long pfn)
pfn               453 include/linux/suspend.h int pfn_is_nosave(unsigned long pfn);
pfn               578 include/linux/suspend.h void page_key_read(unsigned long *pfn);
pfn               579 include/linux/suspend.h void page_key_memorize(unsigned long *pfn);
pfn               595 include/linux/suspend.h static inline void page_key_read(unsigned long *pfn) {}
pfn               596 include/linux/suspend.h static inline void page_key_memorize(unsigned long *pfn) {}
pfn               400 include/ras/ras_event.h 	TP_PROTO(unsigned long pfn,
pfn               404 include/ras/ras_event.h 	TP_ARGS(pfn, type, result),
pfn               407 include/ras/ras_event.h 		__field(unsigned long, pfn)
pfn               413 include/ras/ras_event.h 		__entry->pfn	= pfn;
pfn               419 include/ras/ras_event.h 		__entry->pfn,
pfn              2797 include/rdma/ib_verbs.h 		      unsigned long pfn, unsigned long size, pgprot_t prot);
pfn              2801 include/rdma/ib_verbs.h 				    unsigned long pfn, unsigned long size,
pfn                13 include/trace/events/cma.h 	TP_PROTO(unsigned long pfn, const struct page *page,
pfn                16 include/trace/events/cma.h 	TP_ARGS(pfn, page, count, align),
pfn                19 include/trace/events/cma.h 		__field(unsigned long, pfn)
pfn                26 include/trace/events/cma.h 		__entry->pfn = pfn;
pfn                33 include/trace/events/cma.h 		  __entry->pfn,
pfn                41 include/trace/events/cma.h 	TP_PROTO(unsigned long pfn, const struct page *page,
pfn                44 include/trace/events/cma.h 	TP_ARGS(pfn, page, count),
pfn                47 include/trace/events/cma.h 		__field(unsigned long, pfn)
pfn                53 include/trace/events/cma.h 		__entry->pfn = pfn;
pfn                59 include/trace/events/cma.h 		  __entry->pfn,
pfn                23 include/trace/events/filemap.h 		__field(unsigned long, pfn)
pfn                30 include/trace/events/filemap.h 		__entry->pfn = page_to_pfn(page);
pfn                42 include/trace/events/filemap.h 		pfn_to_page(__entry->pfn),
pfn                43 include/trace/events/filemap.h 		__entry->pfn,
pfn               107 include/trace/events/fs_dax.h 		long length, pfn_t pfn, void *radix_entry),
pfn               108 include/trace/events/fs_dax.h 	TP_ARGS(inode, vmf, length, pfn, radix_entry),
pfn               126 include/trace/events/fs_dax.h 		__entry->pfn_val = pfn.val;
pfn               148 include/trace/events/fs_dax.h 		long length, pfn_t pfn, void *radix_entry), \
pfn               149 include/trace/events/fs_dax.h 	TP_ARGS(inode, vmf, length, pfn, radix_entry))
pfn                58 include/trace/events/huge_memory.h 		__field(unsigned long, pfn)
pfn                68 include/trace/events/huge_memory.h 		__entry->pfn = page ? page_to_pfn(page) : -1;
pfn                78 include/trace/events/huge_memory.h 		__entry->pfn,
pfn               118 include/trace/events/huge_memory.h 		__field(unsigned long, pfn)
pfn               126 include/trace/events/huge_memory.h 		__entry->pfn = page ? page_to_pfn(page) : -1;
pfn               134 include/trace/events/huge_memory.h 		__entry->pfn,
pfn               159 include/trace/events/kmem.h 		__field(	unsigned long,	pfn		)
pfn               164 include/trace/events/kmem.h 		__entry->pfn		= page_to_pfn(page);
pfn               169 include/trace/events/kmem.h 			pfn_to_page(__entry->pfn),
pfn               170 include/trace/events/kmem.h 			__entry->pfn,
pfn               181 include/trace/events/kmem.h 		__field(	unsigned long,	pfn		)
pfn               185 include/trace/events/kmem.h 		__entry->pfn		= page_to_pfn(page);
pfn               189 include/trace/events/kmem.h 			pfn_to_page(__entry->pfn),
pfn               190 include/trace/events/kmem.h 			__entry->pfn)
pfn               201 include/trace/events/kmem.h 		__field(	unsigned long,	pfn		)
pfn               208 include/trace/events/kmem.h 		__entry->pfn		= page ? page_to_pfn(page) : -1UL;
pfn               215 include/trace/events/kmem.h 		__entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
pfn               216 include/trace/events/kmem.h 		__entry->pfn != -1UL ? __entry->pfn : 0,
pfn               229 include/trace/events/kmem.h 		__field(	unsigned long,	pfn		)
pfn               235 include/trace/events/kmem.h 		__entry->pfn		= page ? page_to_pfn(page) : -1UL;
pfn               241 include/trace/events/kmem.h 		__entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
pfn               242 include/trace/events/kmem.h 		__entry->pfn != -1UL ? __entry->pfn : 0,
pfn               262 include/trace/events/kmem.h 		__field(	unsigned long,	pfn		)
pfn               268 include/trace/events/kmem.h 		__entry->pfn		= page ? page_to_pfn(page) : -1UL;
pfn               274 include/trace/events/kmem.h 		pfn_to_page(__entry->pfn), __entry->pfn,
pfn               289 include/trace/events/kmem.h 		__field(	unsigned long,	pfn			)
pfn               298 include/trace/events/kmem.h 		__entry->pfn			= page_to_pfn(page);
pfn               308 include/trace/events/kmem.h 		pfn_to_page(__entry->pfn),
pfn               309 include/trace/events/kmem.h 		__entry->pfn,
pfn                20 include/trace/events/page_ref.h 		__field(unsigned long, pfn)
pfn                30 include/trace/events/page_ref.h 		__entry->pfn = page_to_pfn(page);
pfn                40 include/trace/events/page_ref.h 		__entry->pfn,
pfn                68 include/trace/events/page_ref.h 		__field(unsigned long, pfn)
pfn                79 include/trace/events/page_ref.h 		__entry->pfn = page_to_pfn(page);
pfn                90 include/trace/events/page_ref.h 		__entry->pfn,
pfn                39 include/trace/events/pagemap.h 		__field(unsigned long,	pfn	)
pfn                46 include/trace/events/pagemap.h 		__entry->pfn	= page_to_pfn(page);
pfn                54 include/trace/events/pagemap.h 			__entry->pfn,
pfn                72 include/trace/events/pagemap.h 		__field(unsigned long,	pfn	)
pfn                77 include/trace/events/pagemap.h 		__entry->pfn	= page_to_pfn(page);
pfn                81 include/trace/events/pagemap.h 	TP_printk("page=%p pfn=%lu", __entry->page, __entry->pfn)
pfn               319 include/trace/events/vmscan.h 		__field(unsigned long, pfn)
pfn               324 include/trace/events/vmscan.h 		__entry->pfn = page_to_pfn(page);
pfn               330 include/trace/events/vmscan.h 		pfn_to_page(__entry->pfn),
pfn               331 include/trace/events/vmscan.h 		__entry->pfn,
pfn               307 include/trace/events/xen.h 	    TP_PROTO(struct mm_struct *mm, unsigned long pfn, unsigned level, bool pinned),
pfn               308 include/trace/events/xen.h 	    TP_ARGS(mm, pfn, level, pinned),
pfn               311 include/trace/events/xen.h 		    __field(unsigned long, pfn)
pfn               316 include/trace/events/xen.h 			   __entry->pfn = pfn;
pfn               320 include/trace/events/xen.h 		      __entry->mm, __entry->pfn, __entry->level,
pfn               325 include/trace/events/xen.h 	    TP_PROTO(unsigned long pfn, unsigned level, bool pinned),
pfn               326 include/trace/events/xen.h 	    TP_ARGS(pfn, level, pinned),
pfn               328 include/trace/events/xen.h 		    __field(unsigned long, pfn)
pfn               332 include/trace/events/xen.h 	    TP_fast_assign(__entry->pfn = pfn;
pfn               336 include/trace/events/xen.h 		      __entry->pfn, __entry->level,
pfn                15 include/xen/arm/page.h #define phys_to_machine_mapping_valid(pfn) (1)
pfn                43 include/xen/arm/page.h unsigned long __pfn_to_mfn(unsigned long pfn);
pfn                47 include/xen/arm/page.h static inline unsigned long pfn_to_gfn(unsigned long pfn)
pfn                49 include/xen/arm/page.h 	return pfn;
pfn                58 include/xen/arm/page.h static inline unsigned long pfn_to_bfn(unsigned long pfn)
pfn                63 include/xen/arm/page.h 		mfn = __pfn_to_mfn(pfn);
pfn                68 include/xen/arm/page.h 	return pfn;
pfn                96 include/xen/arm/page.h bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
pfn                97 include/xen/arm/page.h bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn,
pfn               100 include/xen/arm/page.h static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
pfn               102 include/xen/arm/page.h 	return __set_phys_to_machine(pfn, mfn);
pfn               104 include/xen/grant_table.h int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn);
pfn               144 include/xen/grant_table.h 				       unsigned long pfn);
pfn               187 include/xen/grant_table.h 	xen_pfn_t *pfn;
pfn                61 include/xen/interface/hvm/hvm_op.h     uint64_t pfn;
pfn                66 include/xen/xen-ops.h 		  xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
pfn                70 include/xen/xen-ops.h 				xen_pfn_t *pfn, int nr, int *err_ptr,
pfn               379 kernel/debug/kdb/kdb_support.c 	unsigned long pfn;
pfn               383 kernel/debug/kdb/kdb_support.c 	pfn = (addr >> PAGE_SHIFT);
pfn               384 kernel/debug/kdb/kdb_support.c 	if (!pfn_valid(pfn))
pfn               386 kernel/debug/kdb/kdb_support.c 	page = pfn_to_page(pfn);
pfn               250 kernel/dma/coherent.c 			unsigned long pfn = mem->pfn_base + start + off;
pfn               251 kernel/dma/coherent.c 			*ret = remap_pfn_range(vma, vma->vm_start, pfn,
pfn                71 kernel/dma/debug.c 	unsigned long	 pfn;
pfn               394 kernel/dma/debug.c 		return __pfn_to_phys(entry->pfn) + entry->offset;
pfn               396 kernel/dma/debug.c 	return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset;
pfn               418 kernel/dma/debug.c 					 phys_addr(entry), entry->pfn,
pfn               461 kernel/dma/debug.c 	return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) +
pfn               838 kernel/dma/debug.c 				   phys_addr(entry), entry->pfn,
pfn              1272 kernel/dma/debug.c 	entry->pfn	 = page_to_pfn(page);
pfn              1363 kernel/dma/debug.c 		entry->pfn	      = page_to_pfn(sg_page(s));
pfn              1417 kernel/dma/debug.c 			.pfn		= page_to_pfn(sg_page(s)),
pfn              1463 kernel/dma/debug.c 		entry->pfn = vmalloc_to_pfn(virt);
pfn              1465 kernel/dma/debug.c 		entry->pfn = page_to_pfn(virt_to_page(virt));
pfn              1487 kernel/dma/debug.c 		ref.pfn = vmalloc_to_pfn(virt);
pfn              1489 kernel/dma/debug.c 		ref.pfn = page_to_pfn(virt_to_page(virt));
pfn              1511 kernel/dma/debug.c 	entry->pfn		= PHYS_PFN(addr);
pfn              1593 kernel/dma/debug.c 			.pfn		= page_to_pfn(sg_page(s)),
pfn              1626 kernel/dma/debug.c 			.pfn		= page_to_pfn(sg_page(s)),
pfn               119 kernel/dma/mapping.c 		unsigned long pfn;
pfn               125 kernel/dma/mapping.c 		pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr);
pfn               126 kernel/dma/mapping.c 		if (!pfn_valid(pfn))
pfn               128 kernel/dma/mapping.c 		page = pfn_to_page(pfn);
pfn               197 kernel/dma/mapping.c 	unsigned long pfn;
pfn               213 kernel/dma/mapping.c 		pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr);
pfn               214 kernel/dma/mapping.c 		if (!pfn_valid(pfn))
pfn               217 kernel/dma/mapping.c 		pfn = page_to_pfn(virt_to_page(cpu_addr));
pfn               220 kernel/dma/mapping.c 	return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
pfn               411 kernel/dma/swiotlb.c 	unsigned long pfn = PFN_DOWN(orig_addr);
pfn               414 kernel/dma/swiotlb.c 	if (PageHighMem(pfn_to_page(pfn))) {
pfn               425 kernel/dma/swiotlb.c 			buffer = kmap_atomic(pfn_to_page(pfn));
pfn               434 kernel/dma/swiotlb.c 			pfn++;
pfn                33 kernel/iomem.c 	unsigned long pfn = PHYS_PFN(offset);
pfn                36 kernel/iomem.c 	if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)) &&
pfn               377 kernel/kexec_core.c 		unsigned long pfn, epfn, addr, eaddr;
pfn               382 kernel/kexec_core.c 		pfn   = page_to_boot_pfn(pages);
pfn               383 kernel/kexec_core.c 		epfn  = pfn + count;
pfn               384 kernel/kexec_core.c 		addr  = pfn << PAGE_SHIFT;
pfn               707 kernel/power/snapshot.c static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
pfn               716 kernel/power/snapshot.c 	if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
pfn               723 kernel/power/snapshot.c 		if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
pfn               745 kernel/power/snapshot.c 	    ((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
pfn               749 kernel/power/snapshot.c 	block_nr  = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
pfn               764 kernel/power/snapshot.c 	bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
pfn               768 kernel/power/snapshot.c 	*bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
pfn               773 kernel/power/snapshot.c static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
pfn               779 kernel/power/snapshot.c 	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
pfn               784 kernel/power/snapshot.c static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
pfn               790 kernel/power/snapshot.c 	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
pfn               797 kernel/power/snapshot.c static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
pfn               803 kernel/power/snapshot.c 	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
pfn               816 kernel/power/snapshot.c static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
pfn               822 kernel/power/snapshot.c 	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
pfn               827 kernel/power/snapshot.c static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
pfn               832 kernel/power/snapshot.c 	return !memory_bm_find_bit(bm, pfn, &addr, &bit);
pfn               884 kernel/power/snapshot.c 	unsigned long bits, pfn, pages;
pfn               893 kernel/power/snapshot.c 			pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
pfn               895 kernel/power/snapshot.c 			return pfn;
pfn              1051 kernel/power/snapshot.c 		unsigned long pfn;
pfn              1058 kernel/power/snapshot.c 		for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
pfn              1059 kernel/power/snapshot.c 			if (pfn_valid(pfn)) {
pfn              1066 kernel/power/snapshot.c 				mem_bm_set_bit_check(bm, pfn);
pfn              1151 kernel/power/snapshot.c 	unsigned long pfn;
pfn              1158 kernel/power/snapshot.c 		pfn = memory_bm_next_pfn(bm);
pfn              1159 kernel/power/snapshot.c 		while (pfn != BM_END_OF_MAP) {
pfn              1160 kernel/power/snapshot.c 			if (pfn_valid(pfn))
pfn              1161 kernel/power/snapshot.c 				clear_highpage(pfn_to_page(pfn));
pfn              1163 kernel/power/snapshot.c 			pfn = memory_bm_next_pfn(bm);
pfn              1219 kernel/power/snapshot.c static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
pfn              1223 kernel/power/snapshot.c 	if (!pfn_valid(pfn))
pfn              1226 kernel/power/snapshot.c 	page = pfn_to_online_page(pfn);
pfn              1253 kernel/power/snapshot.c 		unsigned long pfn, max_zone_pfn;
pfn              1260 kernel/power/snapshot.c 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
pfn              1261 kernel/power/snapshot.c 			if (saveable_highmem_page(zone, pfn))
pfn              1283 kernel/power/snapshot.c static struct page *saveable_page(struct zone *zone, unsigned long pfn)
pfn              1287 kernel/power/snapshot.c 	if (!pfn_valid(pfn))
pfn              1290 kernel/power/snapshot.c 	page = pfn_to_online_page(pfn);
pfn              1303 kernel/power/snapshot.c 	    && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
pfn              1318 kernel/power/snapshot.c 	unsigned long pfn, max_zone_pfn;
pfn              1327 kernel/power/snapshot.c 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
pfn              1328 kernel/power/snapshot.c 			if (saveable_page(zone, pfn))
pfn              1366 kernel/power/snapshot.c static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn)
pfn              1369 kernel/power/snapshot.c 		saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
pfn              1401 kernel/power/snapshot.c #define page_is_saveable(zone, pfn)	saveable_page(zone, pfn)
pfn              1414 kernel/power/snapshot.c 	unsigned long pfn;
pfn              1421 kernel/power/snapshot.c 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
pfn              1422 kernel/power/snapshot.c 			if (page_is_saveable(zone, pfn))
pfn              1423 kernel/power/snapshot.c 				memory_bm_set_bit(orig_bm, pfn);
pfn              1428 kernel/power/snapshot.c 		pfn = memory_bm_next_pfn(orig_bm);
pfn              1429 kernel/power/snapshot.c 		if (unlikely(pfn == BM_END_OF_MAP))
pfn              1431 kernel/power/snapshot.c 		copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
pfn              1627 kernel/power/snapshot.c 		unsigned long pfn = memory_bm_next_pfn(&copy_bm);
pfn              1628 kernel/power/snapshot.c 		struct page *page = pfn_to_page(pfn);
pfn              1641 kernel/power/snapshot.c 		memory_bm_clear_bit(&copy_bm, pfn);
pfn              2145 kernel/power/snapshot.c 	unsigned long pfn;
pfn              2148 kernel/power/snapshot.c 	pfn = memory_bm_next_pfn(src);
pfn              2149 kernel/power/snapshot.c 	while (pfn != BM_END_OF_MAP) {
pfn              2150 kernel/power/snapshot.c 		memory_bm_set_bit(dst, pfn);
pfn              2151 kernel/power/snapshot.c 		pfn = memory_bm_next_pfn(src);
pfn              2163 kernel/power/snapshot.c 	unsigned long pfn;
pfn              2167 kernel/power/snapshot.c 	pfn = memory_bm_next_pfn(free_pages_map);
pfn              2168 kernel/power/snapshot.c 	while (pfn != BM_END_OF_MAP) {
pfn              2170 kernel/power/snapshot.c 		pfn = memory_bm_next_pfn(free_pages_map);
pfn              2265 kernel/power/snapshot.c 	unsigned long pfn;
pfn              2269 kernel/power/snapshot.c 	pfn = memory_bm_next_pfn(bm);
pfn              2270 kernel/power/snapshot.c 	while (pfn != BM_END_OF_MAP) {
pfn              2271 kernel/power/snapshot.c 		if (PageHighMem(pfn_to_page(pfn)))
pfn              2274 kernel/power/snapshot.c 		pfn = memory_bm_next_pfn(bm);
pfn              2542 kernel/power/snapshot.c 	unsigned long pfn = memory_bm_next_pfn(bm);
pfn              2544 kernel/power/snapshot.c 	if (pfn == BM_END_OF_MAP)
pfn              2547 kernel/power/snapshot.c 	page = pfn_to_page(pfn);
pfn               481 kernel/resource.c 	unsigned long pfn, end_pfn;
pfn               490 kernel/resource.c 		pfn = PFN_UP(res.start);
pfn               492 kernel/resource.c 		if (end_pfn > pfn)
pfn               493 kernel/resource.c 			ret = (*func)(pfn, end_pfn - pfn, arg);
pfn               501 kernel/resource.c static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg)
pfn               510 kernel/resource.c int __weak page_is_ram(unsigned long pfn)
pfn               512 kernel/resource.c 	return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1;
pfn                67 lib/ioremap.c  	u64 pfn;
pfn                69 lib/ioremap.c  	pfn = phys_addr >> PAGE_SHIFT;
pfn                75 lib/ioremap.c  		set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
pfn                76 lib/ioremap.c  		pfn++;
pfn                83 mm/cma.c       static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
pfn                88 mm/cma.c       	bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
pfn                99 mm/cma.c       	unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
pfn               110 mm/cma.c       	WARN_ON_ONCE(!pfn_valid(pfn));
pfn               111 mm/cma.c       	zone = page_zone(pfn_to_page(pfn));
pfn               116 mm/cma.c       		base_pfn = pfn;
pfn               117 mm/cma.c       		for (j = pageblock_nr_pages; j; --j, pfn++) {
pfn               118 mm/cma.c       			WARN_ON_ONCE(!pfn_valid(pfn));
pfn               125 mm/cma.c       			if (page_zone(pfn_to_page(pfn)) != zone)
pfn               421 mm/cma.c       	unsigned long pfn = -1;
pfn               462 mm/cma.c       		pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
pfn               464 mm/cma.c       		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
pfn               468 mm/cma.c       			page = pfn_to_page(pfn);
pfn               472 mm/cma.c       		cma_clear_bitmap(cma, pfn, count);
pfn               477 mm/cma.c       			 __func__, pfn_to_page(pfn));
pfn               482 mm/cma.c       	trace_cma_alloc(pfn, page, count, align);
pfn               516 mm/cma.c       	unsigned long pfn;
pfn               523 mm/cma.c       	pfn = page_to_pfn(pages);
pfn               525 mm/cma.c       	if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
pfn               528 mm/cma.c       	VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
pfn               530 mm/cma.c       	free_contig_range(pfn, count);
pfn               531 mm/cma.c       	cma_clear_bitmap(cma, pfn, count);
pfn               532 mm/cma.c       	trace_cma_release(pfn, pages, count);
pfn                48 mm/compaction.c #define block_start_pfn(pfn, order)	round_down(pfn, 1UL << (order))
pfn                49 mm/compaction.c #define block_end_pfn(pfn, order)	ALIGN((pfn) + 1, 1UL << (order))
pfn                50 mm/compaction.c #define pageblock_start_pfn(pfn)	block_start_pfn(pfn, pageblock_order)
pfn                51 mm/compaction.c #define pageblock_end_pfn(pfn)		block_end_pfn(pfn, pageblock_order)
pfn                59 mm/compaction.c 		unsigned long pfn = page_to_pfn(page);
pfn                62 mm/compaction.c 		if (pfn > high_pfn)
pfn                63 mm/compaction.c 			high_pfn = pfn;
pfn               241 mm/compaction.c __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
pfn               244 mm/compaction.c 	struct page *page = pfn_to_online_page(pfn);
pfn               272 mm/compaction.c 	block_pfn = pageblock_start_pfn(pfn);
pfn               277 mm/compaction.c 		pfn = block_pfn;
pfn               281 mm/compaction.c 	block_pfn = pageblock_end_pfn(pfn) - 1;
pfn               293 mm/compaction.c 		if (pfn_valid_within(pfn)) {
pfn               306 mm/compaction.c 		pfn += (1 << PAGE_ALLOC_COSTLY_ORDER);
pfn               389 mm/compaction.c 							unsigned long pfn)
pfn               397 mm/compaction.c 	if (!IS_ALIGNED(pfn, pageblock_nr_pages))
pfn               407 mm/compaction.c static void update_cached_migrate(struct compact_control *cc, unsigned long pfn)
pfn               411 mm/compaction.c 	pfn = pageblock_end_pfn(pfn);
pfn               417 mm/compaction.c 	if (pfn > zone->compact_cached_migrate_pfn[0])
pfn               418 mm/compaction.c 		zone->compact_cached_migrate_pfn[0] = pfn;
pfn               420 mm/compaction.c 	    pfn > zone->compact_cached_migrate_pfn[1])
pfn               421 mm/compaction.c 		zone->compact_cached_migrate_pfn[1] = pfn;
pfn               429 mm/compaction.c 			struct page *page, unsigned long pfn)
pfn               442 mm/compaction.c 	if (pfn < zone->compact_cached_free_pfn)
pfn               443 mm/compaction.c 		zone->compact_cached_free_pfn = pfn;
pfn               458 mm/compaction.c 			struct page *page, unsigned long pfn)
pfn               462 mm/compaction.c static void update_cached_migrate(struct compact_control *cc, unsigned long pfn)
pfn               467 mm/compaction.c 							unsigned long pfn)
pfn               685 mm/compaction.c 	unsigned long isolated, pfn, block_start_pfn, block_end_pfn;
pfn               688 mm/compaction.c 	pfn = start_pfn;
pfn               689 mm/compaction.c 	block_start_pfn = pageblock_start_pfn(pfn);
pfn               692 mm/compaction.c 	block_end_pfn = pageblock_end_pfn(pfn);
pfn               694 mm/compaction.c 	for (; pfn < end_pfn; pfn += isolated,
pfn               698 mm/compaction.c 		unsigned long isolate_start_pfn = pfn;
pfn               707 mm/compaction.c 		if (pfn >= block_end_pfn) {
pfn               708 mm/compaction.c 			block_start_pfn = pageblock_start_pfn(pfn);
pfn               709 mm/compaction.c 			block_end_pfn = pageblock_end_pfn(pfn);
pfn               738 mm/compaction.c 	if (pfn < end_pfn) {
pfn               745 mm/compaction.c 	return pfn;
pfn              1088 mm/compaction.c 	unsigned long pfn, block_start_pfn, block_end_pfn;
pfn              1091 mm/compaction.c 	pfn = start_pfn;
pfn              1092 mm/compaction.c 	block_start_pfn = pageblock_start_pfn(pfn);
pfn              1095 mm/compaction.c 	block_end_pfn = pageblock_end_pfn(pfn);
pfn              1097 mm/compaction.c 	for (; pfn < end_pfn; pfn = block_end_pfn,
pfn              1107 mm/compaction.c 		pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
pfn              1110 mm/compaction.c 		if (!pfn)
pfn              1117 mm/compaction.c 	return pfn;
pfn              1222 mm/compaction.c fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long nr_isolated)
pfn              1225 mm/compaction.c 	struct page *page = pfn_to_page(pfn);
pfn              1236 mm/compaction.c 	start_pfn = pageblock_start_pfn(pfn);
pfn              1237 mm/compaction.c 	end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone)) - 1;
pfn              1240 mm/compaction.c 	if (start_pfn != pfn) {
pfn              1241 mm/compaction.c 		isolate_freepages_block(cc, &start_pfn, pfn, &cc->freepages, 1, false);
pfn              1247 mm/compaction.c 	start_pfn = pfn + nr_isolated;
pfn              1331 mm/compaction.c 			unsigned long pfn;
pfn              1335 mm/compaction.c 			pfn = page_to_pfn(freepage);
pfn              1337 mm/compaction.c 			if (pfn >= highest)
pfn              1338 mm/compaction.c 				highest = pageblock_start_pfn(pfn);
pfn              1340 mm/compaction.c 			if (pfn >= low_pfn) {
pfn              1347 mm/compaction.c 			if (pfn >= min_pfn && pfn > high_pfn) {
pfn              1348 mm/compaction.c 				high_pfn = pfn;
pfn              1596 mm/compaction.c update_fast_start_pfn(struct compact_control *cc, unsigned long pfn)
pfn              1602 mm/compaction.c 		cc->fast_start_pfn = pfn;
pfn              1604 mm/compaction.c 	cc->fast_start_pfn = min(cc->fast_start_pfn, pfn);
pfn              1629 mm/compaction.c 	unsigned long pfn = cc->migrate_pfn;
pfn              1635 mm/compaction.c 		return pfn;
pfn              1642 mm/compaction.c 	if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn))
pfn              1643 mm/compaction.c 		return pfn;
pfn              1651 mm/compaction.c 		return pfn;
pfn              1660 mm/compaction.c 		return pfn;
pfn              1674 mm/compaction.c 	     order >= PAGE_ALLOC_COSTLY_ORDER && pfn == cc->migrate_pfn && nr_scanned < limit;
pfn              1709 mm/compaction.c 				pfn = pageblock_start_pfn(free_pfn);
pfn              1730 mm/compaction.c 	if (pfn == cc->migrate_pfn)
pfn              1731 mm/compaction.c 		pfn = reinit_migrate_pfn(cc);
pfn              1733 mm/compaction.c 	return pfn;
pfn              1907 mm/gup.c       static int __gup_device_huge(unsigned long pfn, unsigned long addr,
pfn              1914 mm/gup.c       		struct page *page = pfn_to_page(pfn);
pfn              1916 mm/gup.c       		pgmap = get_dev_pagemap(pfn, pgmap);
pfn              1925 mm/gup.c       		pfn++;
pfn               224 mm/hmm.c       			    bool write_fault, uint64_t *pfn)
pfn               251 mm/hmm.c       	*pfn = range->values[HMM_PFN_ERROR];
pfn               410 mm/hmm.c       	unsigned long pfn, npages, i;
pfn               422 mm/hmm.c       	pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
pfn               423 mm/hmm.c       	for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) {
pfn               425 mm/hmm.c       			hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
pfn               430 mm/hmm.c       		pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags;
pfn               456 mm/hmm.c       			      uint64_t *pfn)
pfn               463 mm/hmm.c       	uint64_t orig_pfn = *pfn;
pfn               465 mm/hmm.c       	*pfn = range->values[HMM_PFN_NONE];
pfn               501 mm/hmm.c       			*pfn = hmm_device_entry_from_pfn(range,
pfn               503 mm/hmm.c       			*pfn |= cpu_flags;
pfn               518 mm/hmm.c       		*pfn = range->values[HMM_PFN_ERROR];
pfn               535 mm/hmm.c       		*pfn = range->values[HMM_PFN_SPECIAL];
pfn               539 mm/hmm.c       	*pfn = hmm_device_entry_from_pfn(range, pte_pfn(pte)) | cpu_flags;
pfn               672 mm/hmm.c       		unsigned long i, npages, pfn;
pfn               690 mm/hmm.c       		pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
pfn               691 mm/hmm.c       		for (i = 0; i < npages; ++i, ++pfn) {
pfn               692 mm/hmm.c       			hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
pfn               696 mm/hmm.c       			pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
pfn               730 mm/hmm.c       	unsigned long addr = start, i, pfn;
pfn               755 mm/hmm.c       	pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT);
pfn               756 mm/hmm.c       	for (; addr < end; addr += PAGE_SIZE, i++, pfn++)
pfn               757 mm/hmm.c       		range->pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
pfn               770 mm/huge_memory.c 		pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
pfn               780 mm/huge_memory.c 			if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
pfn               793 mm/huge_memory.c 	entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
pfn               794 mm/huge_memory.c 	if (pfn_t_devmap(pfn))
pfn               816 mm/huge_memory.c vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
pfn               829 mm/huge_memory.c 			!pfn_t_devmap(pfn));
pfn               843 mm/huge_memory.c 	track_pfn_insert(vma, &pgprot, pfn);
pfn               845 mm/huge_memory.c 	insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable);
pfn               859 mm/huge_memory.c 		pud_t *pud, pfn_t pfn, pgprot_t prot, bool write)
pfn               868 mm/huge_memory.c 			if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) {
pfn               880 mm/huge_memory.c 	entry = pud_mkhuge(pfn_t_pud(pfn, prot));
pfn               881 mm/huge_memory.c 	if (pfn_t_devmap(pfn))
pfn               894 mm/huge_memory.c vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
pfn               906 mm/huge_memory.c 			!pfn_t_devmap(pfn));
pfn               914 mm/huge_memory.c 	track_pfn_insert(vma, &pgprot, pfn);
pfn               916 mm/huge_memory.c 	insert_pfn_pud(vma, addr, vmf->pud, pfn, pgprot, write);
pfn               938 mm/huge_memory.c 	unsigned long pfn = pmd_pfn(*pmd);
pfn               968 mm/huge_memory.c 	pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT;
pfn               969 mm/huge_memory.c 	*pgmap = get_dev_pagemap(pfn, *pgmap);
pfn               972 mm/huge_memory.c 	page = pfn_to_page(pfn);
pfn              1084 mm/huge_memory.c 	unsigned long pfn = pud_pfn(*pud);
pfn              1108 mm/huge_memory.c 	pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT;
pfn              1109 mm/huge_memory.c 	*pgmap = get_dev_pagemap(pfn, *pgmap);
pfn              1112 mm/huge_memory.c 	page = pfn_to_page(pfn);
pfn              2971 mm/huge_memory.c 	unsigned long pfn, max_zone_pfn;
pfn              2979 mm/huge_memory.c 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
pfn              2980 mm/huge_memory.c 			if (!pfn_valid(pfn))
pfn              2983 mm/huge_memory.c 			page = pfn_to_page(pfn);
pfn              1120 mm/hugetlb.c   	unsigned long ret, pfn, flags;
pfn              1129 mm/hugetlb.c   		pfn = ALIGN(zone->zone_start_pfn, nr_pages);
pfn              1130 mm/hugetlb.c   		while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
pfn              1131 mm/hugetlb.c   			if (pfn_range_valid_gigantic(zone, pfn, nr_pages)) {
pfn              1140 mm/hugetlb.c   				ret = __alloc_gigantic_page(pfn, nr_pages, gfp_mask);
pfn              1142 mm/hugetlb.c   					return pfn_to_page(pfn);
pfn              1145 mm/hugetlb.c   			pfn += nr_pages;
pfn              1651 mm/hugetlb.c   	unsigned long pfn;
pfn              1658 mm/hugetlb.c   	for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
pfn              1659 mm/hugetlb.c   		page = pfn_to_page(pfn);
pfn                16 mm/hwpoison-inject.c 	unsigned long pfn = val;
pfn                24 mm/hwpoison-inject.c 	if (!pfn_valid(pfn))
pfn                27 mm/hwpoison-inject.c 	p = pfn_to_page(pfn);
pfn                55 mm/hwpoison-inject.c 	pr_info("Injecting memory failure at pfn %#lx\n", pfn);
pfn                56 mm/hwpoison-inject.c 	return memory_failure(pfn, MF_COUNT_INCREASED);
pfn               160 mm/internal.h  extern void memblock_free_pages(struct page *page, unsigned long pfn,
pfn               413 mm/internal.h  		unsigned long pfn = page_to_pfn(base) + offset;
pfn               414 mm/internal.h  		if (!pfn_valid(pfn))
pfn               416 mm/internal.h  		return pfn_to_page(pfn);
pfn              1445 mm/kmemleak.c  		unsigned long pfn;
pfn              1447 mm/kmemleak.c  		for (pfn = start_pfn; pfn < end_pfn; pfn++) {
pfn              1448 mm/kmemleak.c  			struct page *page = pfn_to_online_page(pfn);
pfn              1460 mm/kmemleak.c  			if (!(pfn & 63))
pfn               880 mm/madvise.c   		unsigned long pfn;
pfn               886 mm/madvise.c   		pfn = page_to_pfn(page);
pfn               902 mm/madvise.c   					pfn, start);
pfn               911 mm/madvise.c   				pfn, start);
pfn               920 mm/madvise.c   		ret = memory_failure(pfn, 0);
pfn              1750 mm/memblock.c  int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
pfn              1754 mm/memblock.c  	int mid = memblock_search(type, PFN_PHYS(pfn));
pfn               209 mm/memory-failure.c static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags)
pfn               216 mm/memory-failure.c 		pfn, t->comm, t->pid);
pfn               362 mm/memory-failure.c 		unsigned long pfn, int flags)
pfn               375 mm/memory-failure.c 				       pfn, tk->tsk->comm, tk->tsk->pid);
pfn               386 mm/memory-failure.c 			else if (kill_proc(tk, pfn, flags) < 0)
pfn               388 mm/memory-failure.c 				       pfn, tk->tsk->comm, tk->tsk->pid);
pfn               593 mm/memory-failure.c static int truncate_error_page(struct page *p, unsigned long pfn,
pfn               603 mm/memory-failure.c 				pfn, err);
pfn               607 mm/memory-failure.c 				pfn);
pfn               620 mm/memory-failure.c 				pfn);
pfn               631 mm/memory-failure.c static int me_kernel(struct page *p, unsigned long pfn)
pfn               639 mm/memory-failure.c static int me_unknown(struct page *p, unsigned long pfn)
pfn               641 mm/memory-failure.c 	pr_err("Memory failure: %#lx: Unknown page state\n", pfn);
pfn               648 mm/memory-failure.c static int me_pagecache_clean(struct page *p, unsigned long pfn)
pfn               681 mm/memory-failure.c 	return truncate_error_page(p, pfn, mapping);
pfn               689 mm/memory-failure.c static int me_pagecache_dirty(struct page *p, unsigned long pfn)
pfn               733 mm/memory-failure.c 	return me_pagecache_clean(p, pfn);
pfn               755 mm/memory-failure.c static int me_swapcache_dirty(struct page *p, unsigned long pfn)
pfn               767 mm/memory-failure.c static int me_swapcache_clean(struct page *p, unsigned long pfn)
pfn               783 mm/memory-failure.c static int me_huge_page(struct page *p, unsigned long pfn)
pfn               794 mm/memory-failure.c 		res = truncate_error_page(hpage, pfn, mapping);
pfn               839 mm/memory-failure.c 	int (*action)(struct page *p, unsigned long pfn);
pfn               888 mm/memory-failure.c static void action_result(unsigned long pfn, enum mf_action_page_type type,
pfn               891 mm/memory-failure.c 	trace_memory_failure_event(pfn, type, result);
pfn               894 mm/memory-failure.c 		pfn, action_page_types[type], action_name[result]);
pfn               898 mm/memory-failure.c 			unsigned long pfn)
pfn               903 mm/memory-failure.c 	result = ps->action(p, pfn);
pfn               910 mm/memory-failure.c 		       pfn, action_page_types[ps->type], count);
pfn               913 mm/memory-failure.c 	action_result(pfn, ps->type, result);
pfn               965 mm/memory-failure.c static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
pfn               993 mm/memory-failure.c 		pr_err("Memory failure: %#lx: can't handle KSM pages.\n", pfn);
pfn               999 mm/memory-failure.c 			pfn);
pfn              1018 mm/memory-failure.c 				pfn);
pfn              1036 mm/memory-failure.c 		       pfn, page_mapcount(hpage));
pfn              1056 mm/memory-failure.c 	kill_procs(&tokill, forcekill, !unmap_success, pfn, flags);
pfn              1061 mm/memory-failure.c static int identify_page_state(unsigned long pfn, struct page *p,
pfn              1081 mm/memory-failure.c 	return page_action(ps, p, pfn);
pfn              1084 mm/memory-failure.c static int memory_failure_hugetlb(unsigned long pfn, int flags)
pfn              1086 mm/memory-failure.c 	struct page *p = pfn_to_page(pfn);
pfn              1093 mm/memory-failure.c 		       pfn);
pfn              1114 mm/memory-failure.c 		action_result(pfn, MF_MSG_FREE_HUGE, MF_DELAYED);
pfn              1122 mm/memory-failure.c 		pr_err("Memory failure: %#lx: just unpoisoned\n", pfn);
pfn              1139 mm/memory-failure.c 		action_result(pfn, MF_MSG_NON_PMD_HUGE, MF_IGNORED);
pfn              1144 mm/memory-failure.c 	if (!hwpoison_user_mappings(p, pfn, flags, &head)) {
pfn              1145 mm/memory-failure.c 		action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
pfn              1150 mm/memory-failure.c 	res = identify_page_state(pfn, p, page_flags);
pfn              1156 mm/memory-failure.c static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
pfn              1159 mm/memory-failure.c 	struct page *page = pfn_to_page(pfn);
pfn              1220 mm/memory-failure.c 	kill_procs(&tokill, flags & MF_MUST_KILL, !unmap_success, pfn, flags);
pfn              1227 mm/memory-failure.c 	action_result(pfn, MF_MSG_DAX, rc ? MF_FAILED : MF_RECOVERED);
pfn              1248 mm/memory-failure.c int memory_failure(unsigned long pfn, int flags)
pfn              1258 mm/memory-failure.c 		panic("Memory failure on page %lx", pfn);
pfn              1260 mm/memory-failure.c 	p = pfn_to_online_page(pfn);
pfn              1262 mm/memory-failure.c 		if (pfn_valid(pfn)) {
pfn              1263 mm/memory-failure.c 			pgmap = get_dev_pagemap(pfn, NULL);
pfn              1265 mm/memory-failure.c 				return memory_failure_dev_pagemap(pfn, flags,
pfn              1269 mm/memory-failure.c 			pfn);
pfn              1274 mm/memory-failure.c 		return memory_failure_hugetlb(pfn, flags);
pfn              1277 mm/memory-failure.c 			pfn);
pfn              1297 mm/memory-failure.c 			action_result(pfn, MF_MSG_BUDDY, MF_DELAYED);
pfn              1300 mm/memory-failure.c 			action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED);
pfn              1311 mm/memory-failure.c 					pfn);
pfn              1314 mm/memory-failure.c 					pfn);
pfn              1337 mm/memory-failure.c 			action_result(pfn, MF_MSG_BUDDY, MF_DELAYED);
pfn              1339 mm/memory-failure.c 			action_result(pfn, MF_MSG_BUDDY_2ND, MF_DELAYED);
pfn              1350 mm/memory-failure.c 		action_result(pfn, MF_MSG_DIFFERENT_COMPOUND, MF_IGNORED);
pfn              1371 mm/memory-failure.c 		pr_err("Memory failure: %#lx: just unpoisoned\n", pfn);
pfn              1401 mm/memory-failure.c 	if (!hwpoison_user_mappings(p, pfn, flags, &hpage)) {
pfn              1402 mm/memory-failure.c 		action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
pfn              1411 mm/memory-failure.c 		action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED);
pfn              1417 mm/memory-failure.c 	res = identify_page_state(pfn, p, page_flags);
pfn              1428 mm/memory-failure.c 	unsigned long pfn;
pfn              1457 mm/memory-failure.c void memory_failure_queue(unsigned long pfn, int flags)
pfn              1462 mm/memory-failure.c 		.pfn =		pfn,
pfn              1472 mm/memory-failure.c 		       pfn);
pfn              1493 mm/memory-failure.c 			soft_offline_page(pfn_to_page(entry.pfn), entry.flags);
pfn              1495 mm/memory-failure.c 			memory_failure(entry.pfn, entry.flags);
pfn              1515 mm/memory-failure.c #define unpoison_pr_info(fmt, pfn, rs)			\
pfn              1518 mm/memory-failure.c 		pr_info(fmt, pfn);			\
pfn              1533 mm/memory-failure.c int unpoison_memory(unsigned long pfn)
pfn              1541 mm/memory-failure.c 	if (!pfn_valid(pfn))
pfn              1544 mm/memory-failure.c 	p = pfn_to_page(pfn);
pfn              1549 mm/memory-failure.c 				 pfn, &unpoison_rs);
pfn              1555 mm/memory-failure.c 				 pfn, &unpoison_rs);
pfn              1561 mm/memory-failure.c 				 pfn, &unpoison_rs);
pfn              1567 mm/memory-failure.c 				 pfn, &unpoison_rs);
pfn              1578 mm/memory-failure.c 				 pfn, &unpoison_rs);
pfn              1586 mm/memory-failure.c 				 pfn, &unpoison_rs);
pfn              1599 mm/memory-failure.c 				 pfn, &unpoison_rs);
pfn              1606 mm/memory-failure.c 	if (freeit && !(pfn == my_zero_pfn(0) && page_count(p) == 1))
pfn              1626 mm/memory-failure.c static int __get_any_page(struct page *p, unsigned long pfn, int flags)
pfn              1639 mm/memory-failure.c 			pr_info("%s: %#lx free huge page\n", __func__, pfn);
pfn              1642 mm/memory-failure.c 			pr_info("%s: %#lx free buddy page\n", __func__, pfn);
pfn              1646 mm/memory-failure.c 				__func__, pfn, p->flags);
pfn              1656 mm/memory-failure.c static int get_any_page(struct page *page, unsigned long pfn, int flags)
pfn              1658 mm/memory-failure.c 	int ret = __get_any_page(page, pfn, flags);
pfn              1671 mm/memory-failure.c 		ret = __get_any_page(page, pfn, 0);
pfn              1676 mm/memory-failure.c 				pfn, page->flags, &page->flags);
pfn              1686 mm/memory-failure.c 	unsigned long pfn = page_to_pfn(page);
pfn              1698 mm/memory-failure.c 		pr_info("soft offline: %#lx hugepage already poisoned\n", pfn);
pfn              1710 mm/memory-failure.c 		pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn);
pfn              1718 mm/memory-failure.c 			pfn, ret, page->flags, &page->flags);
pfn              1745 mm/memory-failure.c 	unsigned long pfn = page_to_pfn(page);
pfn              1758 mm/memory-failure.c 		pr_info("soft offline: %#lx page already poisoned\n", pfn);
pfn              1773 mm/memory-failure.c 		pr_info("soft_offline: %#lx: invalidated\n", pfn);
pfn              1811 mm/memory-failure.c 				pfn, ret, page->flags, &page->flags);
pfn              1817 mm/memory-failure.c 			pfn, ret, page_count(page), page->flags, &page->flags);
pfn              1897 mm/memory-failure.c 	unsigned long pfn = page_to_pfn(page);
pfn              1901 mm/memory-failure.c 				pfn);
pfn              1908 mm/memory-failure.c 		pr_info("soft offline: %#lx page already poisoned\n", pfn);
pfn              1915 mm/memory-failure.c 	ret = get_any_page(page, pfn, flags);
pfn               577 mm/memory.c    	unsigned long pfn = pte_pfn(pte);
pfn               586 mm/memory.c    		if (is_zero_pfn(pfn))
pfn               599 mm/memory.c    			if (!pfn_valid(pfn))
pfn               605 mm/memory.c    			if (pfn == vma->vm_pgoff + off)
pfn               612 mm/memory.c    	if (is_zero_pfn(pfn))
pfn               616 mm/memory.c    	if (unlikely(pfn > highest_memmap_pfn)) {
pfn               626 mm/memory.c    	return pfn_to_page(pfn);
pfn               633 mm/memory.c    	unsigned long pfn = pmd_pfn(pmd);
pfn               642 mm/memory.c    			if (!pfn_valid(pfn))
pfn               648 mm/memory.c    			if (pfn == vma->vm_pgoff + off)
pfn               657 mm/memory.c    	if (is_zero_pfn(pfn))
pfn               659 mm/memory.c    	if (unlikely(pfn > highest_memmap_pfn))
pfn               667 mm/memory.c    	return pfn_to_page(pfn);
pfn              1582 mm/memory.c    			pfn_t pfn, pgprot_t prot, bool mkwrite)
pfn              1603 mm/memory.c    			if (pte_pfn(*pte) != pfn_t_to_pfn(pfn)) {
pfn              1616 mm/memory.c    	if (pfn_t_devmap(pfn))
pfn              1617 mm/memory.c    		entry = pte_mkdevmap(pfn_t_pte(pfn, prot));
pfn              1619 mm/memory.c    		entry = pte_mkspecial(pfn_t_pte(pfn, prot));
pfn              1653 mm/memory.c    			unsigned long pfn, pgprot_t pgprot)
pfn              1665 mm/memory.c    	BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
pfn              1670 mm/memory.c    	if (!pfn_modify_allowed(pfn, pgprot))
pfn              1673 mm/memory.c    	track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
pfn              1675 mm/memory.c    	return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
pfn              1701 mm/memory.c    			unsigned long pfn)
pfn              1703 mm/memory.c    	return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
pfn              1707 mm/memory.c    static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn)
pfn              1712 mm/memory.c    	if (pfn_t_devmap(pfn))
pfn              1714 mm/memory.c    	if (pfn_t_special(pfn))
pfn              1716 mm/memory.c    	if (is_zero_pfn(pfn_t_to_pfn(pfn)))
pfn              1722 mm/memory.c    		unsigned long addr, pfn_t pfn, bool mkwrite)
pfn              1727 mm/memory.c    	BUG_ON(!vm_mixed_ok(vma, pfn));
pfn              1732 mm/memory.c    	track_pfn_insert(vma, &pgprot, pfn);
pfn              1734 mm/memory.c    	if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot))
pfn              1745 mm/memory.c    	    !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
pfn              1753 mm/memory.c    		page = pfn_to_page(pfn_t_to_pfn(pfn));
pfn              1756 mm/memory.c    		return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
pfn              1768 mm/memory.c    		pfn_t pfn)
pfn              1770 mm/memory.c    	return __vm_insert_mixed(vma, addr, pfn, false);
pfn              1780 mm/memory.c    		unsigned long addr, pfn_t pfn)
pfn              1782 mm/memory.c    	return __vm_insert_mixed(vma, addr, pfn, true);
pfn              1793 mm/memory.c    			unsigned long pfn, pgprot_t prot)
pfn              1805 mm/memory.c    		if (!pfn_modify_allowed(pfn, prot)) {
pfn              1809 mm/memory.c    		set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
pfn              1810 mm/memory.c    		pfn++;
pfn              1819 mm/memory.c    			unsigned long pfn, pgprot_t prot)
pfn              1825 mm/memory.c    	pfn -= addr >> PAGE_SHIFT;
pfn              1833 mm/memory.c    				pfn + (addr >> PAGE_SHIFT), prot);
pfn              1842 mm/memory.c    			unsigned long pfn, pgprot_t prot)
pfn              1848 mm/memory.c    	pfn -= addr >> PAGE_SHIFT;
pfn              1855 mm/memory.c    				pfn + (addr >> PAGE_SHIFT), prot);
pfn              1864 mm/memory.c    			unsigned long pfn, pgprot_t prot)
pfn              1870 mm/memory.c    	pfn -= addr >> PAGE_SHIFT;
pfn              1877 mm/memory.c    				pfn + (addr >> PAGE_SHIFT), prot);
pfn              1897 mm/memory.c    		    unsigned long pfn, unsigned long size, pgprot_t prot)
pfn              1903 mm/memory.c    	unsigned long remap_pfn = pfn;
pfn              1927 mm/memory.c    		vma->vm_pgoff = pfn;
pfn              1937 mm/memory.c    	pfn -= addr >> PAGE_SHIFT;
pfn              1943 mm/memory.c    				pfn + (addr >> PAGE_SHIFT), prot);
pfn              1972 mm/memory.c    	unsigned long vm_len, pfn, pages;
pfn              1983 mm/memory.c    	pfn = start >> PAGE_SHIFT;
pfn              1985 mm/memory.c    	if (pfn + pages < pfn)
pfn              1991 mm/memory.c    	pfn += vma->vm_pgoff;
pfn              2000 mm/memory.c    	return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
pfn              4236 mm/memory.c    	unsigned long *pfn)
pfn              4248 mm/memory.c    	*pfn = pte_pfn(*ptep);
pfn               227 mm/memory_hotplug.c 	unsigned long i, pfn, end_pfn, nr_pages;
pfn               237 mm/memory_hotplug.c 	pfn = pgdat->node_start_pfn;
pfn               241 mm/memory_hotplug.c 	for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
pfn               248 mm/memory_hotplug.c 		if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node))
pfn               249 mm/memory_hotplug.c 			register_page_bootmem_info_section(pfn);
pfn               254 mm/memory_hotplug.c static int check_pfn_span(unsigned long pfn, unsigned long nr_pages,
pfn               272 mm/memory_hotplug.c 	if (!IS_ALIGNED(pfn, min_align)
pfn               275 mm/memory_hotplug.c 				reason, pfn, pfn + nr_pages - 1);
pfn               287 mm/memory_hotplug.c int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
pfn               298 mm/memory_hotplug.c 		if (altmap->base_pfn != pfn
pfn               306 mm/memory_hotplug.c 	err = check_pfn_span(pfn, nr_pages, "add");
pfn               310 mm/memory_hotplug.c 	start_sec = pfn_to_section_nr(pfn);
pfn               311 mm/memory_hotplug.c 	end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
pfn               316 mm/memory_hotplug.c 				- (pfn & ~PAGE_SECTION_MASK));
pfn               317 mm/memory_hotplug.c 		err = sparse_add_section(nid, pfn, pfns, altmap);
pfn               320 mm/memory_hotplug.c 		pfn += pfns;
pfn               354 mm/memory_hotplug.c 	unsigned long pfn;
pfn               357 mm/memory_hotplug.c 	pfn = end_pfn - 1;
pfn               358 mm/memory_hotplug.c 	for (; pfn >= start_pfn; pfn -= PAGES_PER_SUBSECTION) {
pfn               359 mm/memory_hotplug.c 		if (unlikely(!pfn_to_online_page(pfn)))
pfn               362 mm/memory_hotplug.c 		if (unlikely(pfn_to_nid(pfn) != nid))
pfn               365 mm/memory_hotplug.c 		if (zone && zone != page_zone(pfn_to_page(pfn)))
pfn               368 mm/memory_hotplug.c 		return pfn;
pfn               380 mm/memory_hotplug.c 	unsigned long pfn;
pfn               391 mm/memory_hotplug.c 		pfn = find_smallest_section_pfn(nid, zone, end_pfn,
pfn               393 mm/memory_hotplug.c 		if (pfn) {
pfn               394 mm/memory_hotplug.c 			zone->zone_start_pfn = pfn;
pfn               395 mm/memory_hotplug.c 			zone->spanned_pages = zone_end_pfn - pfn;
pfn               404 mm/memory_hotplug.c 		pfn = find_biggest_section_pfn(nid, zone, zone_start_pfn,
pfn               406 mm/memory_hotplug.c 		if (pfn)
pfn               407 mm/memory_hotplug.c 			zone->spanned_pages = pfn - zone_start_pfn + 1;
pfn               416 mm/memory_hotplug.c 	pfn = zone_start_pfn;
pfn               417 mm/memory_hotplug.c 	for (; pfn < zone_end_pfn; pfn += PAGES_PER_SUBSECTION) {
pfn               418 mm/memory_hotplug.c 		if (unlikely(!pfn_to_online_page(pfn)))
pfn               421 mm/memory_hotplug.c 		if (page_zone(pfn_to_page(pfn)) != zone)
pfn               425 mm/memory_hotplug.c 		if (pfn >= start_pfn && pfn < end_pfn)
pfn               495 mm/memory_hotplug.c static void __remove_section(unsigned long pfn, unsigned long nr_pages,
pfn               499 mm/memory_hotplug.c 	struct mem_section *ms = __nr_to_section(pfn_to_section_nr(pfn));
pfn               504 mm/memory_hotplug.c 	sparse_remove_section(ms, pfn, nr_pages, map_offset, altmap);
pfn               518 mm/memory_hotplug.c void __remove_pages(unsigned long pfn, unsigned long nr_pages,
pfn               526 mm/memory_hotplug.c 	if (check_pfn_span(pfn, nr_pages, "remove"))
pfn               529 mm/memory_hotplug.c 	start_sec = pfn_to_section_nr(pfn);
pfn               530 mm/memory_hotplug.c 	end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
pfn               536 mm/memory_hotplug.c 				- (pfn & ~PAGE_SECTION_MASK));
pfn               537 mm/memory_hotplug.c 		__remove_section(pfn, pfns, map_offset, altmap);
pfn               538 mm/memory_hotplug.c 		pfn += pfns;
pfn               620 mm/memory_hotplug.c 	unsigned long pfn;
pfn               628 mm/memory_hotplug.c 	for (pfn = start_pfn; pfn < end_pfn; pfn += 1ul << order) {
pfn               629 mm/memory_hotplug.c 		order = min(MAX_ORDER - 1, get_order(PFN_PHYS(end_pfn - pfn)));
pfn               631 mm/memory_hotplug.c 		if (WARN_ON_ONCE(!IS_ALIGNED(pfn, 1ul << order)))
pfn               633 mm/memory_hotplug.c 		(*online_page_callback)(pfn_to_page(pfn), order);
pfn               790 mm/memory_hotplug.c int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type)
pfn               807 mm/memory_hotplug.c 	mem = find_memory_block(__pfn_to_section(pfn));
pfn               812 mm/memory_hotplug.c 	zone = zone_for_pfn_range(online_type, nid, pfn, nr_pages);
pfn               813 mm/memory_hotplug.c 	move_pfn_range_to_zone(zone, pfn, nr_pages, NULL);
pfn               815 mm/memory_hotplug.c 	arg.start_pfn = pfn;
pfn               834 mm/memory_hotplug.c 	ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages,
pfn               872 mm/memory_hotplug.c 		 (unsigned long long) pfn << PAGE_SHIFT,
pfn               873 mm/memory_hotplug.c 		 (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1);
pfn               875 mm/memory_hotplug.c 	remove_pfn_range_from_zone(zone, pfn, nr_pages);
pfn              1150 mm/memory_hotplug.c static unsigned long next_active_pageblock(unsigned long pfn)
pfn              1152 mm/memory_hotplug.c 	struct page *page = pfn_to_page(pfn);
pfn              1155 mm/memory_hotplug.c 	BUG_ON(pfn & (pageblock_nr_pages - 1));
pfn              1163 mm/memory_hotplug.c 			return pfn + (1 << order);
pfn              1166 mm/memory_hotplug.c 	return pfn + pageblock_nr_pages;
pfn              1169 mm/memory_hotplug.c static bool is_pageblock_removable_nolock(unsigned long pfn)
pfn              1171 mm/memory_hotplug.c 	struct page *page = pfn_to_page(pfn);
pfn              1185 mm/memory_hotplug.c 	pfn = page_to_pfn(page);
pfn              1186 mm/memory_hotplug.c 	if (!zone_spans_pfn(zone, pfn))
pfn              1195 mm/memory_hotplug.c 	unsigned long end_pfn, pfn;
pfn              1201 mm/memory_hotplug.c 	for (pfn = start_pfn; pfn < end_pfn; pfn = next_active_pageblock(pfn)) {
pfn              1202 mm/memory_hotplug.c 		if (!is_pageblock_removable_nolock(pfn))
pfn              1218 mm/memory_hotplug.c 	unsigned long pfn, sec_end_pfn;
pfn              1223 mm/memory_hotplug.c 	for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1);
pfn              1224 mm/memory_hotplug.c 	     pfn < end_pfn;
pfn              1225 mm/memory_hotplug.c 	     pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) {
pfn              1227 mm/memory_hotplug.c 		if (!present_section_nr(pfn_to_section_nr(pfn)))
pfn              1229 mm/memory_hotplug.c 		for (; pfn < sec_end_pfn && pfn < end_pfn;
pfn              1230 mm/memory_hotplug.c 		     pfn += MAX_ORDER_NR_PAGES) {
pfn              1234 mm/memory_hotplug.c 				!pfn_valid_within(pfn + i))
pfn              1236 mm/memory_hotplug.c 			if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn)
pfn              1239 mm/memory_hotplug.c 			if (zone && !zone_spans_pfn(zone, pfn + i))
pfn              1241 mm/memory_hotplug.c 			page = pfn_to_page(pfn + i);
pfn              1245 mm/memory_hotplug.c 				start = pfn + i;
pfn              1247 mm/memory_hotplug.c 			end = pfn + MAX_ORDER_NR_PAGES;
pfn              1268 mm/memory_hotplug.c 	unsigned long pfn;
pfn              1270 mm/memory_hotplug.c 	for (pfn = start; pfn < end; pfn++) {
pfn              1274 mm/memory_hotplug.c 		if (!pfn_valid(pfn))
pfn              1276 mm/memory_hotplug.c 		page = pfn_to_page(pfn);
pfn              1278 mm/memory_hotplug.c 			return pfn;
pfn              1280 mm/memory_hotplug.c 			return pfn;
pfn              1286 mm/memory_hotplug.c 			return pfn;
pfn              1288 mm/memory_hotplug.c 		pfn += skip - 1;
pfn              1313 mm/memory_hotplug.c 	unsigned long pfn;
pfn              1318 mm/memory_hotplug.c 	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
pfn              1319 mm/memory_hotplug.c 		if (!pfn_valid(pfn))
pfn              1321 mm/memory_hotplug.c 		page = pfn_to_page(pfn);
pfn              1325 mm/memory_hotplug.c 			pfn = page_to_pfn(head) + compound_nr(head) - 1;
pfn              1329 mm/memory_hotplug.c 			pfn = page_to_pfn(compound_head(page))
pfn              1364 mm/memory_hotplug.c 			pr_warn("failed to isolate pfn %lx\n", pfn);
pfn              1490 mm/memory_hotplug.c 	unsigned long pfn, nr_pages;
pfn              1536 mm/memory_hotplug.c 		for (pfn = start_pfn; pfn;) {
pfn              1546 mm/memory_hotplug.c 			pfn = scan_movable_pages(pfn, end_pfn);
pfn              1547 mm/memory_hotplug.c 			if (pfn) {
pfn              1552 mm/memory_hotplug.c 				do_migrate_range(pfn, end_pfn);
pfn                69 mm/memremap.c  static unsigned long pfn_next(unsigned long pfn)
pfn                71 mm/memremap.c  	if (pfn % 1024 == 0)
pfn                73 mm/memremap.c  	return pfn + 1;
pfn                76 mm/memremap.c  #define for_each_device_pfn(pfn, map) \
pfn                77 mm/memremap.c  	for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn))
pfn               107 mm/memremap.c  	unsigned long pfn;
pfn               111 mm/memremap.c  	for_each_device_pfn(pfn, pgmap)
pfn               112 mm/memremap.c  		put_page(pfn_to_page(pfn));
pfn               387 mm/memremap.c  struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
pfn               390 mm/memremap.c  	resource_size_t phys = PFN_PHYS(pfn);
pfn              2247 mm/migrate.c   		unsigned long mpfn, pfn;
pfn              2278 mm/migrate.c   			pfn = pte_pfn(pte);
pfn              2279 mm/migrate.c   			if (is_zero_pfn(pfn)) {
pfn              2285 mm/migrate.c   			mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
pfn              2784 mm/migrate.c   		unsigned long pfn = pte_pfn(*ptep);
pfn              2786 mm/migrate.c   		if (!is_zero_pfn(pfn)) {
pfn                76 mm/mmzone.c    bool memmap_valid_within(unsigned long pfn,
pfn                79 mm/mmzone.c    	if (page_to_pfn(page) != pfn)
pfn               125 mm/nommu.c     	unsigned long *pfn)
pfn               130 mm/nommu.c     	*pfn = address >> PAGE_SHIFT;
pfn              1652 mm/nommu.c     		unsigned long pfn, unsigned long size, pgprot_t prot)
pfn              1654 mm/nommu.c     	if (addr != (pfn << PAGE_SHIFT))
pfn              1664 mm/nommu.c     	unsigned long pfn = start >> PAGE_SHIFT;
pfn              1667 mm/nommu.c     	pfn += vma->vm_pgoff;
pfn              1668 mm/nommu.c     	return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
pfn               390 mm/page_alloc.c static inline bool __meminit early_page_uninitialised(unsigned long pfn)
pfn               392 mm/page_alloc.c 	int nid = early_pfn_to_nid(pfn);
pfn               394 mm/page_alloc.c 	if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
pfn               405 mm/page_alloc.c defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
pfn               428 mm/page_alloc.c 	    (pfn & (PAGES_PER_SECTION - 1)) == 0) {
pfn               429 mm/page_alloc.c 		NODE_DATA(nid)->first_deferred_pfn = pfn;
pfn               437 mm/page_alloc.c static inline bool early_page_uninitialised(unsigned long pfn)
pfn               442 mm/page_alloc.c static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
pfn               450 mm/page_alloc.c 							unsigned long pfn)
pfn               453 mm/page_alloc.c 	return section_to_usemap(__pfn_to_section(pfn));
pfn               459 mm/page_alloc.c static inline int pfn_to_bitidx(struct page *page, unsigned long pfn)
pfn               462 mm/page_alloc.c 	pfn &= (PAGES_PER_SECTION-1);
pfn               463 mm/page_alloc.c 	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
pfn               465 mm/page_alloc.c 	pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages);
pfn               466 mm/page_alloc.c 	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
pfn               480 mm/page_alloc.c 					unsigned long pfn,
pfn               488 mm/page_alloc.c 	bitmap = get_pageblock_bitmap(page, pfn);
pfn               489 mm/page_alloc.c 	bitidx = pfn_to_bitidx(page, pfn);
pfn               498 mm/page_alloc.c unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
pfn               502 mm/page_alloc.c 	return __get_pfnblock_flags_mask(page, pfn, end_bitidx, mask);
pfn               505 mm/page_alloc.c static __always_inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn)
pfn               507 mm/page_alloc.c 	return __get_pfnblock_flags_mask(page, pfn, PB_migrate_end, MIGRATETYPE_MASK);
pfn               519 mm/page_alloc.c 					unsigned long pfn,
pfn               530 mm/page_alloc.c 	bitmap = get_pageblock_bitmap(page, pfn);
pfn               531 mm/page_alloc.c 	bitidx = pfn_to_bitidx(page, pfn);
pfn               535 mm/page_alloc.c 	VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
pfn               565 mm/page_alloc.c 	unsigned long pfn = page_to_pfn(page);
pfn               572 mm/page_alloc.c 		if (!zone_spans_pfn(zone, pfn))
pfn               578 mm/page_alloc.c 			pfn, zone_to_nid(zone), zone->name,
pfn               898 mm/page_alloc.c 		unsigned long pfn,
pfn               917 mm/page_alloc.c 	VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
pfn               927 mm/page_alloc.c 		buddy_pfn = __find_buddy_pfn(pfn, order);
pfn               928 mm/page_alloc.c 		buddy = page + (buddy_pfn - pfn);
pfn               942 mm/page_alloc.c 		combined_pfn = buddy_pfn & pfn;
pfn               943 mm/page_alloc.c 		page = page + (combined_pfn - pfn);
pfn               944 mm/page_alloc.c 		pfn = combined_pfn;
pfn               959 mm/page_alloc.c 			buddy_pfn = __find_buddy_pfn(pfn, order);
pfn               960 mm/page_alloc.c 			buddy = page + (buddy_pfn - pfn);
pfn               986 mm/page_alloc.c 		combined_pfn = buddy_pfn & pfn;
pfn               987 mm/page_alloc.c 		higher_page = page + (combined_pfn - pfn);
pfn              1231 mm/page_alloc.c 	unsigned long pfn = page_to_pfn(page);
pfn              1232 mm/page_alloc.c 	unsigned long buddy_pfn = __find_buddy_pfn(pfn, 0);
pfn              1233 mm/page_alloc.c 	struct page *buddy = page + (buddy_pfn - pfn);
pfn              1327 mm/page_alloc.c 				struct page *page, unsigned long pfn,
pfn              1334 mm/page_alloc.c 		migratetype = get_pfnblock_migratetype(page, pfn);
pfn              1336 mm/page_alloc.c 	__free_one_page(page, pfn, zone, order, migratetype);
pfn              1340 mm/page_alloc.c static void __meminit __init_single_page(struct page *page, unsigned long pfn,
pfn              1344 mm/page_alloc.c 	set_page_links(page, zone, nid, pfn);
pfn              1354 mm/page_alloc.c 		set_page_address(page, __va(pfn << PAGE_SHIFT));
pfn              1359 mm/page_alloc.c static void __meminit init_reserved_page(unsigned long pfn)
pfn              1364 mm/page_alloc.c 	if (!early_page_uninitialised(pfn))
pfn              1367 mm/page_alloc.c 	nid = early_pfn_to_nid(pfn);
pfn              1373 mm/page_alloc.c 		if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
pfn              1376 mm/page_alloc.c 	__init_single_page(pfn_to_page(pfn), pfn, zid, nid);
pfn              1379 mm/page_alloc.c static inline void init_reserved_page(unsigned long pfn)
pfn              1418 mm/page_alloc.c 	unsigned long pfn = page_to_pfn(page);
pfn              1423 mm/page_alloc.c 	migratetype = get_pfnblock_migratetype(page, pfn);
pfn              1426 mm/page_alloc.c 	free_one_page(page_zone(page), page, pfn, order, migratetype);
pfn              1455 mm/page_alloc.c int __meminit early_pfn_to_nid(unsigned long pfn)
pfn              1461 mm/page_alloc.c 	nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
pfn              1472 mm/page_alloc.c static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
pfn              1476 mm/page_alloc.c 	nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
pfn              1483 mm/page_alloc.c static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
pfn              1490 mm/page_alloc.c void __init memblock_free_pages(struct page *page, unsigned long pfn,
pfn              1493 mm/page_alloc.c 	if (early_page_uninitialised(pfn))
pfn              1571 mm/page_alloc.c static void __init deferred_free_range(unsigned long pfn,
pfn              1580 mm/page_alloc.c 	page = pfn_to_page(pfn);
pfn              1584 mm/page_alloc.c 	    (pfn & (pageblock_nr_pages - 1)) == 0) {
pfn              1590 mm/page_alloc.c 	for (i = 0; i < nr_pages; i++, page++, pfn++) {
pfn              1591 mm/page_alloc.c 		if ((pfn & (pageblock_nr_pages - 1)) == 0)
pfn              1617 mm/page_alloc.c static inline bool __init deferred_pfn_valid(unsigned long pfn)
pfn              1619 mm/page_alloc.c 	if (!pfn_valid_within(pfn))
pfn              1621 mm/page_alloc.c 	if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn))
pfn              1630 mm/page_alloc.c static void __init deferred_free_pages(unsigned long pfn,
pfn              1636 mm/page_alloc.c 	for (; pfn < end_pfn; pfn++) {
pfn              1637 mm/page_alloc.c 		if (!deferred_pfn_valid(pfn)) {
pfn              1638 mm/page_alloc.c 			deferred_free_range(pfn - nr_free, nr_free);
pfn              1640 mm/page_alloc.c 		} else if (!(pfn & nr_pgmask)) {
pfn              1641 mm/page_alloc.c 			deferred_free_range(pfn - nr_free, nr_free);
pfn              1649 mm/page_alloc.c 	deferred_free_range(pfn - nr_free, nr_free);
pfn              1658 mm/page_alloc.c 						 unsigned long pfn,
pfn              1667 mm/page_alloc.c 	for (; pfn < end_pfn; pfn++) {
pfn              1668 mm/page_alloc.c 		if (!deferred_pfn_valid(pfn)) {
pfn              1671 mm/page_alloc.c 		} else if (!page || !(pfn & nr_pgmask)) {
pfn              1672 mm/page_alloc.c 			page = pfn_to_page(pfn);
pfn              1677 mm/page_alloc.c 		__init_single_page(page, pfn, zid, nid);
pfn              2970 mm/page_alloc.c 	unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
pfn              2981 mm/page_alloc.c 	for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
pfn              2982 mm/page_alloc.c 		if (pfn_valid(pfn)) {
pfn              2983 mm/page_alloc.c 			page = pfn_to_page(pfn);
pfn              3002 mm/page_alloc.c 			pfn = page_to_pfn(page);
pfn              3008 mm/page_alloc.c 				swsusp_set_page_free(pfn_to_page(pfn + i));
pfn              3016 mm/page_alloc.c static bool free_unref_page_prepare(struct page *page, unsigned long pfn)
pfn              3023 mm/page_alloc.c 	migratetype = get_pfnblock_migratetype(page, pfn);
pfn              3028 mm/page_alloc.c static void free_unref_page_commit(struct page *page, unsigned long pfn)
pfn              3046 mm/page_alloc.c 			free_one_page(zone, page, pfn, 0, migratetype);
pfn              3067 mm/page_alloc.c 	unsigned long pfn = page_to_pfn(page);
pfn              3069 mm/page_alloc.c 	if (!free_unref_page_prepare(page, pfn))
pfn              3073 mm/page_alloc.c 	free_unref_page_commit(page, pfn);
pfn              3083 mm/page_alloc.c 	unsigned long flags, pfn;
pfn              3088 mm/page_alloc.c 		pfn = page_to_pfn(page);
pfn              3089 mm/page_alloc.c 		if (!free_unref_page_prepare(page, pfn))
pfn              3091 mm/page_alloc.c 		set_page_private(page, pfn);
pfn              3096 mm/page_alloc.c 		unsigned long pfn = page_private(page);
pfn              3100 mm/page_alloc.c 		free_unref_page_commit(page, pfn);
pfn              5850 mm/page_alloc.c overlap_memmap_init(unsigned long zone, unsigned long *pfn)
pfn              5856 mm/page_alloc.c 		if (!r || *pfn >= memblock_region_memory_end_pfn(r)) {
pfn              5858 mm/page_alloc.c 				if (*pfn < memblock_region_memory_end_pfn(r))
pfn              5862 mm/page_alloc.c 		if (*pfn >= memblock_region_memory_base_pfn(r) &&
pfn              5864 mm/page_alloc.c 			*pfn = memblock_region_memory_end_pfn(r);
pfn              5881 mm/page_alloc.c 	unsigned long pfn, end_pfn = start_pfn + size;
pfn              5905 mm/page_alloc.c 	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
pfn              5911 mm/page_alloc.c 			if (!early_pfn_valid(pfn))
pfn              5913 mm/page_alloc.c 			if (!early_pfn_in_nid(pfn, nid))
pfn              5915 mm/page_alloc.c 			if (overlap_memmap_init(zone, &pfn))
pfn              5917 mm/page_alloc.c 			if (defer_init(nid, pfn, end_pfn))
pfn              5921 mm/page_alloc.c 		page = pfn_to_page(pfn);
pfn              5922 mm/page_alloc.c 		__init_single_page(page, pfn, zone, nid);
pfn              5938 mm/page_alloc.c 		if (!(pfn & (pageblock_nr_pages - 1))) {
pfn              5951 mm/page_alloc.c 	unsigned long pfn, end_pfn = start_pfn + size;
pfn              5971 mm/page_alloc.c 	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
pfn              5972 mm/page_alloc.c 		struct page *page = pfn_to_page(pfn);
pfn              5974 mm/page_alloc.c 		__init_single_page(page, pfn, zone_idx, nid);
pfn              6008 mm/page_alloc.c 		if (!(pfn & (pageblock_nr_pages - 1))) {
pfn              6235 mm/page_alloc.c int __meminit __early_pfn_to_nid(unsigned long pfn,
pfn              6241 mm/page_alloc.c 	if (state->last_start <= pfn && pfn < state->last_end)
pfn              6244 mm/page_alloc.c 	nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
pfn              6919 mm/page_alloc.c 	unsigned long pfn;
pfn              6922 mm/page_alloc.c 	for (pfn = spfn; pfn < epfn; pfn++) {
pfn              6923 mm/page_alloc.c 		if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) {
pfn              6924 mm/page_alloc.c 			pfn = ALIGN_DOWN(pfn, pageblock_nr_pages)
pfn              6928 mm/page_alloc.c 		mm_zero_struct_page(pfn_to_page(pfn));
pfn              8196 mm/page_alloc.c 	unsigned long pfn = page_to_pfn(page);
pfn              8221 mm/page_alloc.c 		unsigned long check = pfn + iter;
pfn              8300 mm/page_alloc.c 		dump_page(pfn_to_page(pfn + iter), reason);
pfn              8305 mm/page_alloc.c static unsigned long pfn_max_align_down(unsigned long pfn)
pfn              8307 mm/page_alloc.c 	return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
pfn              8311 mm/page_alloc.c static unsigned long pfn_max_align_up(unsigned long pfn)
pfn              8313 mm/page_alloc.c 	return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
pfn              8323 mm/page_alloc.c 	unsigned long pfn = start;
pfn              8329 mm/page_alloc.c 	while (pfn < end || !list_empty(&cc->migratepages)) {
pfn              8337 mm/page_alloc.c 			pfn = isolate_migratepages_range(cc, pfn, end);
pfn              8338 mm/page_alloc.c 			if (!pfn) {
pfn              8515 mm/page_alloc.c void free_contig_range(unsigned long pfn, unsigned int nr_pages)
pfn              8519 mm/page_alloc.c 	for (; nr_pages--; pfn++) {
pfn              8520 mm/page_alloc.c 		struct page *page = pfn_to_page(pfn);
pfn              8572 mm/page_alloc.c 	unsigned long pfn;
pfn              8577 mm/page_alloc.c 	for (pfn = start_pfn; pfn < end_pfn; pfn++)
pfn              8578 mm/page_alloc.c 		if (pfn_valid(pfn))
pfn              8580 mm/page_alloc.c 	if (pfn == end_pfn)
pfn              8583 mm/page_alloc.c 	offline_mem_sections(pfn, end_pfn);
pfn              8584 mm/page_alloc.c 	zone = page_zone(pfn_to_page(pfn));
pfn              8586 mm/page_alloc.c 	pfn = start_pfn;
pfn              8587 mm/page_alloc.c 	while (pfn < end_pfn) {
pfn              8588 mm/page_alloc.c 		if (!pfn_valid(pfn)) {
pfn              8589 mm/page_alloc.c 			pfn++;
pfn              8592 mm/page_alloc.c 		page = pfn_to_page(pfn);
pfn              8598 mm/page_alloc.c 			pfn++;
pfn              8610 mm/page_alloc.c 			pfn, 1 << order, end_pfn);
pfn              8615 mm/page_alloc.c 		pfn += (1 << order);
pfn              8626 mm/page_alloc.c 	unsigned long pfn = page_to_pfn(page);
pfn              8632 mm/page_alloc.c 		struct page *page_head = page - (pfn & ((1 << order) - 1));
pfn              8651 mm/page_alloc.c 	unsigned long pfn = page_to_pfn(page);
pfn              8658 mm/page_alloc.c 		struct page *page_head = page - (pfn & ((1 << order) - 1));
pfn               117 mm/page_ext.c  	unsigned long pfn = page_to_pfn(page);
pfn               130 mm/page_ext.c  	index = pfn - round_down(node_start_pfn(page_to_nid(page)),
pfn               192 mm/page_ext.c  	unsigned long pfn = page_to_pfn(page);
pfn               193 mm/page_ext.c  	struct mem_section *section = __pfn_to_section(pfn);
pfn               202 mm/page_ext.c  	return get_entry(section->page_ext, pfn);
pfn               221 mm/page_ext.c  static int __meminit init_section_page_ext(unsigned long pfn, int nid)
pfn               227 mm/page_ext.c  	section = __pfn_to_section(pfn);
pfn               251 mm/page_ext.c  	pfn &= PAGE_SECTION_MASK;
pfn               252 mm/page_ext.c  	section->page_ext = (void *)base - page_ext_size * pfn;
pfn               273 mm/page_ext.c  static void __free_page_ext(unsigned long pfn)
pfn               278 mm/page_ext.c  	ms = __pfn_to_section(pfn);
pfn               281 mm/page_ext.c  	base = get_entry(ms->page_ext, pfn);
pfn               290 mm/page_ext.c  	unsigned long start, end, pfn;
pfn               306 mm/page_ext.c  	for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
pfn               307 mm/page_ext.c  		if (!pfn_present(pfn))
pfn               309 mm/page_ext.c  		fail = init_section_page_ext(pfn, nid);
pfn               315 mm/page_ext.c  	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
pfn               316 mm/page_ext.c  		__free_page_ext(pfn);
pfn               324 mm/page_ext.c  	unsigned long start, end, pfn;
pfn               329 mm/page_ext.c  	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
pfn               330 mm/page_ext.c  		__free_page_ext(pfn);
pfn               368 mm/page_ext.c  	unsigned long pfn;
pfn               384 mm/page_ext.c  		for (pfn = start_pfn; pfn < end_pfn;
pfn               385 mm/page_ext.c  			pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {
pfn               387 mm/page_ext.c  			if (!pfn_valid(pfn))
pfn               395 mm/page_ext.c  			if (pfn_to_nid(pfn) != nid)
pfn               397 mm/page_ext.c  			if (init_section_page_ext(pfn, nid))
pfn                31 mm/page_idle.c static struct page *page_idle_get_page(unsigned long pfn)
pfn                36 mm/page_idle.c 	if (!pfn_valid(pfn))
pfn                39 mm/page_idle.c 	page = pfn_to_page(pfn);
pfn               127 mm/page_idle.c 	unsigned long pfn, end_pfn;
pfn               133 mm/page_idle.c 	pfn = pos * BITS_PER_BYTE;
pfn               134 mm/page_idle.c 	if (pfn >= max_pfn)
pfn               137 mm/page_idle.c 	end_pfn = pfn + count * BITS_PER_BYTE;
pfn               141 mm/page_idle.c 	for (; pfn < end_pfn; pfn++) {
pfn               142 mm/page_idle.c 		bit = pfn % BITMAP_CHUNK_BITS;
pfn               145 mm/page_idle.c 		page = page_idle_get_page(pfn);
pfn               172 mm/page_idle.c 	unsigned long pfn, end_pfn;
pfn               178 mm/page_idle.c 	pfn = pos * BITS_PER_BYTE;
pfn               179 mm/page_idle.c 	if (pfn >= max_pfn)
pfn               182 mm/page_idle.c 	end_pfn = pfn + count * BITS_PER_BYTE;
pfn               186 mm/page_idle.c 	for (; pfn < end_pfn; pfn++) {
pfn               187 mm/page_idle.c 		bit = pfn % BITMAP_CHUNK_BITS;
pfn               189 mm/page_idle.c 			page = page_idle_get_page(pfn);
pfn                21 mm/page_isolation.c 	unsigned long flags, pfn;
pfn                38 mm/page_isolation.c 	pfn = page_to_pfn(page);
pfn                39 mm/page_isolation.c 	arg.start_pfn = pfn;
pfn                96 mm/page_isolation.c 	unsigned long pfn, buddy_pfn;
pfn               115 mm/page_isolation.c 			pfn = page_to_pfn(page);
pfn               116 mm/page_isolation.c 			buddy_pfn = __find_buddy_pfn(pfn, order);
pfn               117 mm/page_isolation.c 			buddy = page + (buddy_pfn - pfn);
pfn               147 mm/page_isolation.c __first_valid_page(unsigned long pfn, unsigned long nr_pages)
pfn               154 mm/page_isolation.c 		page = pfn_to_online_page(pfn + i);
pfn               196 mm/page_isolation.c 	unsigned long pfn;
pfn               204 mm/page_isolation.c 	for (pfn = start_pfn;
pfn               205 mm/page_isolation.c 	     pfn < end_pfn;
pfn               206 mm/page_isolation.c 	     pfn += pageblock_nr_pages) {
pfn               207 mm/page_isolation.c 		page = __first_valid_page(pfn, pageblock_nr_pages);
pfn               210 mm/page_isolation.c 				undo_pfn = pfn;
pfn               218 mm/page_isolation.c 	for (pfn = start_pfn;
pfn               219 mm/page_isolation.c 	     pfn < undo_pfn;
pfn               220 mm/page_isolation.c 	     pfn += pageblock_nr_pages) {
pfn               221 mm/page_isolation.c 		struct page *page = pfn_to_online_page(pfn);
pfn               236 mm/page_isolation.c 	unsigned long pfn;
pfn               242 mm/page_isolation.c 	for (pfn = start_pfn;
pfn               243 mm/page_isolation.c 	     pfn < end_pfn;
pfn               244 mm/page_isolation.c 	     pfn += pageblock_nr_pages) {
pfn               245 mm/page_isolation.c 		page = __first_valid_page(pfn, pageblock_nr_pages);
pfn               259 mm/page_isolation.c __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
pfn               264 mm/page_isolation.c 	while (pfn < end_pfn) {
pfn               265 mm/page_isolation.c 		if (!pfn_valid_within(pfn)) {
pfn               266 mm/page_isolation.c 			pfn++;
pfn               269 mm/page_isolation.c 		page = pfn_to_page(pfn);
pfn               276 mm/page_isolation.c 			pfn += 1 << page_order(page);
pfn               279 mm/page_isolation.c 			pfn++;
pfn               284 mm/page_isolation.c 	return pfn;
pfn               291 mm/page_isolation.c 	unsigned long pfn, flags;
pfn               300 mm/page_isolation.c 	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
pfn               301 mm/page_isolation.c 		page = __first_valid_page(pfn, pageblock_nr_pages);
pfn               306 mm/page_isolation.c 	if ((pfn < end_pfn) || !page)
pfn               311 mm/page_isolation.c 	pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn,
pfn               315 mm/page_isolation.c 	trace_test_pages_isolated(start_pfn, end_pfn, pfn);
pfn               317 mm/page_isolation.c 	return pfn < end_pfn ? -EBUSY : 0;
pfn               259 mm/page_owner.c 	unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
pfn               260 mm/page_owner.c 	unsigned long end_pfn = pfn + zone->spanned_pages;
pfn               266 mm/page_owner.c 	pfn = zone->zone_start_pfn;
pfn               273 mm/page_owner.c 	for (; pfn < end_pfn; ) {
pfn               274 mm/page_owner.c 		page = pfn_to_online_page(pfn);
pfn               276 mm/page_owner.c 			pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
pfn               280 mm/page_owner.c 		block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
pfn               285 mm/page_owner.c 		for (; pfn < block_end_pfn; pfn++) {
pfn               286 mm/page_owner.c 			if (!pfn_valid_within(pfn))
pfn               290 mm/page_owner.c 			page = pfn_to_page(pfn);
pfn               300 mm/page_owner.c 					pfn += (1UL << freepage_order) - 1;
pfn               323 mm/page_owner.c 				pfn = block_end_pfn;
pfn               326 mm/page_owner.c 			pfn += (1UL << page_owner->order) - 1;
pfn               338 mm/page_owner.c print_page_owner(char __user *buf, size_t count, unsigned long pfn,
pfn               365 mm/page_owner.c 			pfn,
pfn               367 mm/page_owner.c 			pfn >> pageblock_order,
pfn               459 mm/page_owner.c 	unsigned long pfn;
pfn               469 mm/page_owner.c 	pfn = min_low_pfn + *ppos;
pfn               472 mm/page_owner.c 	while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
pfn               473 mm/page_owner.c 		pfn++;
pfn               478 mm/page_owner.c 	for (; pfn < max_pfn; pfn++) {
pfn               483 mm/page_owner.c 		if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
pfn               484 mm/page_owner.c 			pfn += MAX_ORDER_NR_PAGES - 1;
pfn               489 mm/page_owner.c 		if (!pfn_valid_within(pfn))
pfn               492 mm/page_owner.c 		page = pfn_to_page(pfn);
pfn               497 mm/page_owner.c 				pfn += (1UL << freepage_order) - 1;
pfn               525 mm/page_owner.c 		if (!IS_ALIGNED(pfn, 1 << page_owner->order))
pfn               537 mm/page_owner.c 		*ppos = (pfn - min_low_pfn) + 1;
pfn               539 mm/page_owner.c 		return print_page_owner(buf, count, pfn, page,
pfn               548 mm/page_owner.c 	unsigned long pfn = zone->zone_start_pfn;
pfn               557 mm/page_owner.c 	for (; pfn < end_pfn; ) {
pfn               560 mm/page_owner.c 		if (!pfn_valid(pfn)) {
pfn               561 mm/page_owner.c 			pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
pfn               565 mm/page_owner.c 		block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
pfn               568 mm/page_owner.c 		for (; pfn < block_end_pfn; pfn++) {
pfn               572 mm/page_owner.c 			if (!pfn_valid_within(pfn))
pfn               575 mm/page_owner.c 			page = pfn_to_page(pfn);
pfn               591 mm/page_owner.c 					pfn += (1UL << order) - 1;
pfn                55 mm/page_vma_mapped.c static inline bool pfn_in_hpage(struct page *hpage, unsigned long pfn)
pfn                60 mm/page_vma_mapped.c 	return pfn >= hpage_pfn && pfn - hpage_pfn < hpage_nr_pages(hpage);
pfn                83 mm/page_vma_mapped.c 	unsigned long pfn;
pfn                94 mm/page_vma_mapped.c 		pfn = migration_entry_to_pfn(entry);
pfn               103 mm/page_vma_mapped.c 		pfn = device_private_entry_to_pfn(entry);
pfn               108 mm/page_vma_mapped.c 		pfn = pte_pfn(*pvmw->pte);
pfn               111 mm/page_vma_mapped.c 	return pfn_in_hpage(pvmw->page, pfn);
pfn                61 mm/shuffle.c   static struct page * __meminit shuffle_valid_page(unsigned long pfn, int order)
pfn                71 mm/shuffle.c   	if (!pfn_valid_within(pfn))
pfn                75 mm/shuffle.c   	if (!pfn_present(pfn))
pfn                79 mm/shuffle.c   	page = pfn_to_page(pfn);
pfn               108 mm/sparse-vmemmap.c 	unsigned long pfn, nr_pfns, nr_align;
pfn               116 mm/sparse-vmemmap.c 	pfn = vmem_altmap_next_pfn(altmap);
pfn               119 mm/sparse-vmemmap.c 	nr_align = ALIGN(pfn, nr_align) - pfn;
pfn               125 mm/sparse-vmemmap.c 	pfn += nr_align;
pfn               128 mm/sparse-vmemmap.c 			__func__, pfn, altmap->alloc, altmap->align, nr_pfns);
pfn               129 mm/sparse-vmemmap.c 	return __va(__pfn_to_phys(pfn));
pfn               135 mm/sparse-vmemmap.c 	unsigned long pfn = pte_pfn(*pte);
pfn               136 mm/sparse-vmemmap.c 	int actual_node = early_pfn_to_nid(pfn);
pfn               248 mm/sparse-vmemmap.c struct page * __meminit __populate_section_memmap(unsigned long pfn,
pfn               259 mm/sparse-vmemmap.c 	end = ALIGN(pfn + nr_pages, PAGES_PER_SUBSECTION);
pfn               260 mm/sparse-vmemmap.c 	pfn &= PAGE_SUBSECTION_MASK;
pfn               261 mm/sparse-vmemmap.c 	nr_pages = end - pfn;
pfn               263 mm/sparse-vmemmap.c 	start = (unsigned long) pfn_to_page(pfn);
pfn               269 mm/sparse-vmemmap.c 	return pfn_to_page(pfn);
pfn               222 mm/sparse.c    static void subsection_mask_set(unsigned long *map, unsigned long pfn,
pfn               225 mm/sparse.c    	int idx = subsection_map_index(pfn);
pfn               226 mm/sparse.c    	int end = subsection_map_index(pfn + nr_pages - 1);
pfn               231 mm/sparse.c    void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
pfn               233 mm/sparse.c    	int end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
pfn               234 mm/sparse.c    	unsigned long nr, start_sec = pfn_to_section_nr(pfn);
pfn               244 mm/sparse.c    				- (pfn & ~PAGE_SECTION_MASK));
pfn               246 mm/sparse.c    		subsection_mask_set(ms->usage->subsection_map, pfn, pfns);
pfn               249 mm/sparse.c    				pfns, subsection_map_index(pfn),
pfn               250 mm/sparse.c    				subsection_map_index(pfn + pfns - 1));
pfn               252 mm/sparse.c    		pfn += pfns;
pfn               260 mm/sparse.c    	unsigned long pfn;
pfn               277 mm/sparse.c    	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
pfn               278 mm/sparse.c    		unsigned long section = pfn_to_section_nr(pfn);
pfn               451 mm/sparse.c    struct page __init *__populate_section_memmap(unsigned long pfn,
pfn               543 mm/sparse.c    		unsigned long pfn = section_nr_to_pfn(pnum);
pfn               548 mm/sparse.c    		map = __populate_section_memmap(pfn, PAGES_PER_SECTION,
pfn               611 mm/sparse.c    	unsigned long pfn;
pfn               613 mm/sparse.c    	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
pfn               614 mm/sparse.c    		unsigned long section_nr = pfn_to_section_nr(pfn);
pfn               630 mm/sparse.c    	unsigned long pfn;
pfn               632 mm/sparse.c    	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
pfn               633 mm/sparse.c    		unsigned long section_nr = pfn_to_section_nr(pfn);
pfn               650 mm/sparse.c    static struct page * __meminit populate_section_memmap(unsigned long pfn,
pfn               653 mm/sparse.c    	return __populate_section_memmap(pfn, nr_pages, nid, altmap);
pfn               656 mm/sparse.c    static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
pfn               659 mm/sparse.c    	unsigned long start = (unsigned long) pfn_to_page(pfn);
pfn               672 mm/sparse.c    struct page * __meminit populate_section_memmap(unsigned long pfn,
pfn               694 mm/sparse.c    static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
pfn               697 mm/sparse.c    	struct page *memmap = pfn_to_page(pfn);
pfn               737 mm/sparse.c    static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
pfn               742 mm/sparse.c    	struct mem_section *ms = __pfn_to_section(pfn);
pfn               749 mm/sparse.c    	subsection_mask_set(map, pfn, nr_pages);
pfn               755 mm/sparse.c    				pfn, nr_pages))
pfn               778 mm/sparse.c    		unsigned long section_nr = pfn_to_section_nr(pfn);
pfn               803 mm/sparse.c    		depopulate_section_memmap(pfn, nr_pages, altmap);
pfn               809 mm/sparse.c    static struct page * __meminit section_activate(int nid, unsigned long pfn,
pfn               813 mm/sparse.c    	struct mem_section *ms = __pfn_to_section(pfn);
pfn               819 mm/sparse.c    	subsection_mask_set(map, pfn, nr_pages);
pfn               852 mm/sparse.c    		return pfn_to_page(pfn);
pfn               854 mm/sparse.c    	memmap = populate_section_memmap(pfn, nr_pages, nid, altmap);
pfn               856 mm/sparse.c    		section_deactivate(pfn, nr_pages, altmap);
pfn               938 mm/sparse.c    void sparse_remove_section(struct mem_section *ms, unsigned long pfn,
pfn               942 mm/sparse.c    	clear_hwpoisoned_pages(pfn_to_page(pfn) + map_offset,
pfn               944 mm/sparse.c    	section_deactivate(pfn, nr_pages, altmap);
pfn               646 mm/swap_state.c 	unsigned long faddr, pfn, fpfn;
pfn               671 mm/swap_state.c 	pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val));
pfn               674 mm/swap_state.c 	ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits,
pfn               685 mm/swap_state.c 	if (fpfn == pfn + 1)
pfn               687 mm/swap_state.c 	else if (pfn == fpfn + 1)
pfn               702 mm/swap_state.c 	for (pfn = start; pfn != end; pfn++)
pfn              1435 mm/vmstat.c    	unsigned long pfn;
pfn              1440 mm/vmstat.c    	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
pfn              1443 mm/vmstat.c    		page = pfn_to_online_page(pfn);
pfn              1448 mm/vmstat.c    		if (!memmap_valid_within(pfn, page, zone))
pfn               970 net/xdp/xsk.c  	unsigned long pfn;
pfn              1002 net/xdp/xsk.c  	pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
pfn              1003 net/xdp/xsk.c  	return remap_pfn_range(vma, vma->vm_start, pfn,
pfn               444 sound/soc/intel/haswell/sst-haswell-pcm.c 		u32 pfn = snd_sgbuf_get_addr(dmab, i * PAGE_SIZE) >> PAGE_SHIFT;
pfn               447 sound/soc/intel/haswell/sst-haswell-pcm.c 		dev_dbg(rtd->dev, "pfn i %i idx %d pfn %x\n", i, idx, pfn);
pfn               452 sound/soc/intel/haswell/sst-haswell-pcm.c 			*pg_table |= (pfn << 4);
pfn               454 sound/soc/intel/haswell/sst-haswell-pcm.c 			*pg_table |= pfn;
pfn               225 sound/soc/sof/core.c 		u32 pfn = snd_sgbuf_get_addr(dmab, i * PAGE_SIZE) >> PAGE_SHIFT;
pfn               228 sound/soc/sof/core.c 		dev_vdbg(sdev->dev, "pfn i %i idx %d pfn %x\n", i, idx, pfn);
pfn               247 sound/soc/sof/core.c 			put_unaligned_le32((pg_table[0] & 0xf) | pfn << 4,
pfn               250 sound/soc/sof/core.c 			put_unaligned_le32(pfn, pg_table);
pfn                11 tools/testing/nvdimm/pmem-dax.c 		long nr_pages, void **kaddr, pfn_t *pfn)
pfn                29 tools/testing/nvdimm/pmem-dax.c 		if (pfn)
pfn                30 tools/testing/nvdimm/pmem-dax.c 			*pfn = page_to_pfn_t(page);
pfn                39 tools/testing/nvdimm/pmem-dax.c 	if (pfn)
pfn                40 tools/testing/nvdimm/pmem-dax.c 		*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
pfn                52 tools/testing/scatterlist/linux/mm.h #define pfn_to_page(pfn) (void *)((pfn) * PAGE_SIZE)
pfn                26 tools/testing/scatterlist/main.c 		unsigned *pfn;
pfn                31 tools/testing/scatterlist/main.c 		{ -EINVAL, 1, pfn(0), PAGE_SIZE, PAGE_SIZE + 1, 1 },
pfn                32 tools/testing/scatterlist/main.c 		{ -EINVAL, 1, pfn(0), PAGE_SIZE, 0, 1 },
pfn                33 tools/testing/scatterlist/main.c 		{ -EINVAL, 1, pfn(0), PAGE_SIZE, sgmax + 1, 1 },
pfn                34 tools/testing/scatterlist/main.c 		{ 0, 1, pfn(0), PAGE_SIZE, sgmax, 1 },
pfn                35 tools/testing/scatterlist/main.c 		{ 0, 1, pfn(0), 1, sgmax, 1 },
pfn                36 tools/testing/scatterlist/main.c 		{ 0, 2, pfn(0, 1), 2 * PAGE_SIZE, sgmax, 1 },
pfn                37 tools/testing/scatterlist/main.c 		{ 0, 2, pfn(1, 0), 2 * PAGE_SIZE, sgmax, 2 },
pfn                38 tools/testing/scatterlist/main.c 		{ 0, 3, pfn(0, 1, 2), 3 * PAGE_SIZE, sgmax, 1 },
pfn                39 tools/testing/scatterlist/main.c 		{ 0, 3, pfn(0, 2, 1), 3 * PAGE_SIZE, sgmax, 3 },
pfn                40 tools/testing/scatterlist/main.c 		{ 0, 3, pfn(0, 1, 3), 3 * PAGE_SIZE, sgmax, 2 },
pfn                41 tools/testing/scatterlist/main.c 		{ 0, 3, pfn(1, 2, 4), 3 * PAGE_SIZE, sgmax, 2 },
pfn                42 tools/testing/scatterlist/main.c 		{ 0, 3, pfn(1, 3, 4), 3 * PAGE_SIZE, sgmax, 2 },
pfn                43 tools/testing/scatterlist/main.c 		{ 0, 4, pfn(0, 1, 3, 4), 4 * PAGE_SIZE, sgmax, 2 },
pfn                44 tools/testing/scatterlist/main.c 		{ 0, 5, pfn(0, 1, 3, 4, 5), 5 * PAGE_SIZE, sgmax, 2 },
pfn                45 tools/testing/scatterlist/main.c 		{ 0, 5, pfn(0, 1, 3, 4, 6), 5 * PAGE_SIZE, sgmax, 3 },
pfn                46 tools/testing/scatterlist/main.c 		{ 0, 5, pfn(0, 1, 2, 3, 4), 5 * PAGE_SIZE, sgmax, 1 },
pfn                47 tools/testing/scatterlist/main.c 		{ 0, 5, pfn(0, 1, 2, 3, 4), 5 * PAGE_SIZE, 2 * PAGE_SIZE, 3 },
pfn                48 tools/testing/scatterlist/main.c 		{ 0, 6, pfn(0, 1, 2, 3, 4, 5), 6 * PAGE_SIZE, 2 * PAGE_SIZE, 3 },
pfn                49 tools/testing/scatterlist/main.c 		{ 0, 6, pfn(0, 2, 3, 4, 5, 6), 6 * PAGE_SIZE, 2 * PAGE_SIZE, 4 },
pfn                50 tools/testing/scatterlist/main.c 		{ 0, 6, pfn(0, 1, 3, 4, 5, 6), 6 * PAGE_SIZE, 2 * PAGE_SIZE, 3 },
pfn                60 tools/testing/scatterlist/main.c 		set_pages(pages, test->pfn, test->num_pages);
pfn               110 tools/testing/selftests/vm/transhuge-stress.c 			int64_t pfn;
pfn               112 tools/testing/selftests/vm/transhuge-stress.c 			pfn = allocate_transhuge(p);
pfn               114 tools/testing/selftests/vm/transhuge-stress.c 			if (pfn < 0) {
pfn               117 tools/testing/selftests/vm/transhuge-stress.c 				size_t idx = pfn >> (HPAGE_SHIFT - PAGE_SHIFT);
pfn               308 tools/vm/page-types.c 	unsigned long pfn;
pfn               311 tools/vm/page-types.c 		pfn = PM_PFRAME(val);
pfn               313 tools/vm/page-types.c 		pfn = 0;
pfn               315 tools/vm/page-types.c 	return pfn;
pfn               730 tools/vm/page-types.c 	unsigned long pfn;
pfn               740 tools/vm/page-types.c 			pfn = pagemap_pfn(buf[i]);
pfn               741 tools/vm/page-types.c 			if (pfn)
pfn               742 tools/vm/page-types.c 				walk_pfn(index + i, pfn, 1, buf[i]);
pfn               974 tools/vm/page-types.c 	unsigned long nr_pages, pfn, i;
pfn              1026 tools/vm/page-types.c 			pfn = pagemap_pfn(buf[i]);
pfn              1027 tools/vm/page-types.c 			if (!pfn)
pfn              1029 tools/vm/page-types.c 			if (!kpageflags_read(&flags, pfn, 1))
pfn              1031 tools/vm/page-types.c 			if (!kpagecgroup_read(&cgroup, pfn, 1))
pfn              1033 tools/vm/page-types.c 			if (!kpagecount_read(&mapcnt, pfn, 1))
pfn              1040 tools/vm/page-types.c 			add_page(off / page_size + i, pfn,
pfn                87 virt/kvm/arm/mmu.c static bool kvm_is_device_pfn(unsigned long pfn)
pfn                89 virt/kvm/arm/mmu.c 	return !pfn_valid(pfn);
pfn               611 virt/kvm/arm/mmu.c 				    unsigned long end, unsigned long pfn,
pfn               620 virt/kvm/arm/mmu.c 		kvm_set_pte(pte, kvm_pfn_pte(pfn, prot));
pfn               622 virt/kvm/arm/mmu.c 		pfn++;
pfn               627 virt/kvm/arm/mmu.c 				   unsigned long end, unsigned long pfn,
pfn               652 virt/kvm/arm/mmu.c 		create_hyp_pte_mappings(pmd, addr, next, pfn, prot);
pfn               653 virt/kvm/arm/mmu.c 		pfn += (next - addr) >> PAGE_SHIFT;
pfn               660 virt/kvm/arm/mmu.c 				   unsigned long end, unsigned long pfn,
pfn               683 virt/kvm/arm/mmu.c 		ret = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
pfn               686 virt/kvm/arm/mmu.c 		pfn += (next - addr) >> PAGE_SHIFT;
pfn               694 virt/kvm/arm/mmu.c 				 unsigned long pfn, pgprot_t prot)
pfn               719 virt/kvm/arm/mmu.c 		err = create_hyp_pud_mappings(pgd, addr, next, pfn, prot);
pfn               722 virt/kvm/arm/mmu.c 		pfn += (next - addr) >> PAGE_SHIFT;
pfn              1344 virt/kvm/arm/mmu.c 	unsigned long pfn;
pfn              1348 virt/kvm/arm/mmu.c 	pfn = __phys_to_pfn(pa);
pfn              1351 virt/kvm/arm/mmu.c 		pte_t pte = kvm_pfn_pte(pfn, PAGE_S2_DEVICE);
pfn              1368 virt/kvm/arm/mmu.c 		pfn++;
pfn              1378 virt/kvm/arm/mmu.c 	kvm_pfn_t pfn = *pfnp;
pfn              1380 virt/kvm/arm/mmu.c 	struct page *page = pfn_to_page(pfn);
pfn              1408 virt/kvm/arm/mmu.c 		VM_BUG_ON((gfn & mask) != (pfn & mask));
pfn              1409 virt/kvm/arm/mmu.c 		if (pfn & mask) {
pfn              1411 virt/kvm/arm/mmu.c 			kvm_release_pfn_clean(pfn);
pfn              1412 virt/kvm/arm/mmu.c 			pfn &= ~mask;
pfn              1413 virt/kvm/arm/mmu.c 			kvm_get_pfn(pfn);
pfn              1414 virt/kvm/arm/mmu.c 			*pfnp = pfn;
pfn              1589 virt/kvm/arm/mmu.c static void clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
pfn              1591 virt/kvm/arm/mmu.c 	__clean_dcache_guest_page(pfn, size);
pfn              1594 virt/kvm/arm/mmu.c static void invalidate_icache_guest_page(kvm_pfn_t pfn, unsigned long size)
pfn              1596 virt/kvm/arm/mmu.c 	__invalidate_icache_guest_page(pfn, size);
pfn              1681 virt/kvm/arm/mmu.c 	kvm_pfn_t pfn;
pfn              1742 virt/kvm/arm/mmu.c 	pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
pfn              1743 virt/kvm/arm/mmu.c 	if (pfn == KVM_PFN_ERR_HWPOISON) {
pfn              1747 virt/kvm/arm/mmu.c 	if (is_error_noslot_pfn(pfn))
pfn              1750 virt/kvm/arm/mmu.c 	if (kvm_is_device_pfn(pfn)) {
pfn              1786 virt/kvm/arm/mmu.c 		    transparent_hugepage_adjust(&pfn, &fault_ipa))
pfn              1791 virt/kvm/arm/mmu.c 		kvm_set_pfn_dirty(pfn);
pfn              1794 virt/kvm/arm/mmu.c 		clean_dcache_guest_page(pfn, vma_pagesize);
pfn              1797 virt/kvm/arm/mmu.c 		invalidate_icache_guest_page(pfn, vma_pagesize);
pfn              1811 virt/kvm/arm/mmu.c 		pud_t new_pud = kvm_pfn_pud(pfn, mem_type);
pfn              1822 virt/kvm/arm/mmu.c 		pmd_t new_pmd = kvm_pfn_pmd(pfn, mem_type);
pfn              1834 virt/kvm/arm/mmu.c 		pte_t new_pte = kvm_pfn_pte(pfn, mem_type);
pfn              1849 virt/kvm/arm/mmu.c 	kvm_set_pfn_accessed(pfn);
pfn              1850 virt/kvm/arm/mmu.c 	kvm_release_pfn_clean(pfn);
pfn              1866 virt/kvm/arm/mmu.c 	kvm_pfn_t pfn;
pfn              1878 virt/kvm/arm/mmu.c 		pfn = kvm_pud_pfn(*pud);
pfn              1882 virt/kvm/arm/mmu.c 		pfn = pmd_pfn(*pmd);
pfn              1886 virt/kvm/arm/mmu.c 		pfn = pte_pfn(*pte);
pfn              1893 virt/kvm/arm/mmu.c 		kvm_set_pfn_accessed(pfn);
pfn              2082 virt/kvm/arm/mmu.c 	kvm_pfn_t pfn = pte_pfn(pte);
pfn              2094 virt/kvm/arm/mmu.c 	clean_dcache_guest_page(pfn, PAGE_SIZE);
pfn              2095 virt/kvm/arm/mmu.c 	stage2_pte = kvm_pfn_pte(pfn, PAGE_S2);
pfn               165 virt/kvm/kvm_main.c bool kvm_is_zone_device_pfn(kvm_pfn_t pfn)
pfn               173 virt/kvm/kvm_main.c 	if (!pfn_valid(pfn) || WARN_ON_ONCE(!page_count(pfn_to_page(pfn))))
pfn               176 virt/kvm/kvm_main.c 	return is_zone_device_page(pfn_to_page(pfn));
pfn               179 virt/kvm/kvm_main.c bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
pfn               186 virt/kvm/kvm_main.c 	if (pfn_valid(pfn))
pfn               187 virt/kvm/kvm_main.c 		return PageReserved(pfn_to_page(pfn)) &&
pfn               188 virt/kvm/kvm_main.c 		       !kvm_is_zone_device_pfn(pfn);
pfn              1519 virt/kvm/kvm_main.c 			    bool *writable, kvm_pfn_t *pfn)
pfn              1534 virt/kvm/kvm_main.c 		*pfn = page_to_pfn(page[0]);
pfn              1549 virt/kvm/kvm_main.c 			   bool *writable, kvm_pfn_t *pfn)
pfn              1579 virt/kvm/kvm_main.c 	*pfn = page_to_pfn(page);
pfn              1599 virt/kvm/kvm_main.c 	unsigned long pfn;
pfn              1602 virt/kvm/kvm_main.c 	r = follow_pfn(vma, addr, &pfn);
pfn              1617 virt/kvm/kvm_main.c 		r = follow_pfn(vma, addr, &pfn);
pfn              1637 virt/kvm/kvm_main.c 	kvm_get_pfn(pfn);
pfn              1639 virt/kvm/kvm_main.c 	*p_pfn = pfn;
pfn              1661 virt/kvm/kvm_main.c 	kvm_pfn_t pfn = 0;
pfn              1667 virt/kvm/kvm_main.c 	if (hva_to_pfn_fast(addr, write_fault, writable, &pfn))
pfn              1668 virt/kvm/kvm_main.c 		return pfn;
pfn              1673 virt/kvm/kvm_main.c 	npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn);
pfn              1675 virt/kvm/kvm_main.c 		return pfn;
pfn              1680 virt/kvm/kvm_main.c 		pfn = KVM_PFN_ERR_HWPOISON;
pfn              1688 virt/kvm/kvm_main.c 		pfn = KVM_PFN_ERR_FAULT;
pfn              1690 virt/kvm/kvm_main.c 		r = hva_to_pfn_remapped(vma, addr, async, write_fault, writable, &pfn);
pfn              1694 virt/kvm/kvm_main.c 			pfn = KVM_PFN_ERR_FAULT;
pfn              1698 virt/kvm/kvm_main.c 		pfn = KVM_PFN_ERR_FAULT;
pfn              1702 virt/kvm/kvm_main.c 	return pfn;
pfn              1795 virt/kvm/kvm_main.c static struct page *kvm_pfn_to_page(kvm_pfn_t pfn)
pfn              1797 virt/kvm/kvm_main.c 	if (is_error_noslot_pfn(pfn))
pfn              1800 virt/kvm/kvm_main.c 	if (kvm_is_reserved_pfn(pfn)) {
pfn              1805 virt/kvm/kvm_main.c 	return pfn_to_page(pfn);
pfn              1810 virt/kvm/kvm_main.c 	kvm_pfn_t pfn;
pfn              1812 virt/kvm/kvm_main.c 	pfn = gfn_to_pfn(kvm, gfn);
pfn              1814 virt/kvm/kvm_main.c 	return kvm_pfn_to_page(pfn);
pfn              1818 virt/kvm/kvm_main.c void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache)
pfn              1820 virt/kvm/kvm_main.c 	if (pfn == 0)
pfn              1824 virt/kvm/kvm_main.c 		cache->pfn = cache->gfn = 0;
pfn              1827 virt/kvm/kvm_main.c 		kvm_release_pfn_dirty(pfn);
pfn              1829 virt/kvm/kvm_main.c 		kvm_release_pfn_clean(pfn);
pfn              1835 virt/kvm/kvm_main.c 	kvm_release_pfn(cache->pfn, cache->dirty, cache);
pfn              1837 virt/kvm/kvm_main.c 	cache->pfn = gfn_to_pfn_memslot(slot, gfn);
pfn              1848 virt/kvm/kvm_main.c 	kvm_pfn_t pfn;
pfn              1858 virt/kvm/kvm_main.c 		if (!cache->pfn || cache->gfn != gfn ||
pfn              1864 virt/kvm/kvm_main.c 		pfn = cache->pfn;
pfn              1868 virt/kvm/kvm_main.c 		pfn = gfn_to_pfn_memslot(slot, gfn);
pfn              1870 virt/kvm/kvm_main.c 	if (is_error_noslot_pfn(pfn))
pfn              1873 virt/kvm/kvm_main.c 	if (pfn_valid(pfn)) {
pfn              1874 virt/kvm/kvm_main.c 		page = pfn_to_page(pfn);
pfn              1881 virt/kvm/kvm_main.c 		hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
pfn              1892 virt/kvm/kvm_main.c 	map->pfn = pfn;
pfn              1943 virt/kvm/kvm_main.c 		kvm_release_pfn(map->pfn, dirty, NULL);
pfn              1967 virt/kvm/kvm_main.c 	kvm_pfn_t pfn;
pfn              1969 virt/kvm/kvm_main.c 	pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn);
pfn              1971 virt/kvm/kvm_main.c 	return kvm_pfn_to_page(pfn);
pfn              1983 virt/kvm/kvm_main.c void kvm_release_pfn_clean(kvm_pfn_t pfn)
pfn              1985 virt/kvm/kvm_main.c 	if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn))
pfn              1986 virt/kvm/kvm_main.c 		put_page(pfn_to_page(pfn));
pfn              1998 virt/kvm/kvm_main.c void kvm_release_pfn_dirty(kvm_pfn_t pfn)
pfn              2000 virt/kvm/kvm_main.c 	kvm_set_pfn_dirty(pfn);
pfn              2001 virt/kvm/kvm_main.c 	kvm_release_pfn_clean(pfn);
pfn              2005 virt/kvm/kvm_main.c void kvm_set_pfn_dirty(kvm_pfn_t pfn)
pfn              2007 virt/kvm/kvm_main.c 	if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn)) {
pfn              2008 virt/kvm/kvm_main.c 		struct page *page = pfn_to_page(pfn);
pfn              2015 virt/kvm/kvm_main.c void kvm_set_pfn_accessed(kvm_pfn_t pfn)
pfn              2017 virt/kvm/kvm_main.c 	if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn))
pfn              2018 virt/kvm/kvm_main.c 		mark_page_accessed(pfn_to_page(pfn));
pfn              2022 virt/kvm/kvm_main.c void kvm_get_pfn(kvm_pfn_t pfn)
pfn              2024 virt/kvm/kvm_main.c 	if (!kvm_is_reserved_pfn(pfn))
pfn              2025 virt/kvm/kvm_main.c 		get_page(pfn_to_page(pfn));