limit_pfn 42 arch/x86/include/asm/e820/api.h extern void e820__register_nosave_regions(unsigned long limit_pfn); limit_pfn 852 arch/x86/kernel/cpu/mtrr/cleanup.c real_trim_memory(unsigned long start_pfn, unsigned long limit_pfn) limit_pfn 859 arch/x86/kernel/cpu/mtrr/cleanup.c trim_size = limit_pfn; limit_pfn 738 arch/x86/kernel/e820.c void __init e820__register_nosave_regions(unsigned long limit_pfn) limit_pfn 754 arch/x86/kernel/e820.c if (pfn >= limit_pfn) limit_pfn 815 arch/x86/kernel/e820.c static unsigned long __init e820_end_pfn(unsigned long limit_pfn, enum e820_type type) limit_pfn 832 arch/x86/kernel/e820.c if (start_pfn >= limit_pfn) limit_pfn 834 arch/x86/kernel/e820.c if (end_pfn > limit_pfn) { limit_pfn 835 arch/x86/kernel/e820.c last_pfn = limit_pfn; limit_pfn 340 arch/x86/mm/init.c unsigned long start_pfn, end_pfn, limit_pfn; limit_pfn 344 arch/x86/mm/init.c limit_pfn = PFN_DOWN(end); limit_pfn 362 arch/x86/mm/init.c if (end_pfn > limit_pfn) limit_pfn 363 arch/x86/mm/init.c end_pfn = limit_pfn; limit_pfn 372 arch/x86/mm/init.c end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE)); limit_pfn 375 arch/x86/mm/init.c if (end_pfn > round_down(limit_pfn, PFN_DOWN(PMD_SIZE))) limit_pfn 376 arch/x86/mm/init.c end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE)); limit_pfn 388 arch/x86/mm/init.c end_pfn = round_down(limit_pfn, PFN_DOWN(PUD_SIZE)); limit_pfn 398 arch/x86/mm/init.c end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE)); limit_pfn 408 arch/x86/mm/init.c end_pfn = limit_pfn; limit_pfn 23 drivers/iommu/iova.c unsigned long limit_pfn); limit_pfn 117 drivers/iommu/iova.c __get_cached_rbnode(struct iova_domain *iovad, unsigned long limit_pfn) limit_pfn 119 drivers/iommu/iova.c if (limit_pfn <= iovad->dma_32bit_pfn) limit_pfn 181 drivers/iommu/iova.c unsigned long size, unsigned long limit_pfn, limit_pfn 195 drivers/iommu/iova.c if (limit_pfn <= iovad->dma_32bit_pfn && limit_pfn 199 drivers/iommu/iova.c curr = __get_cached_rbnode(iovad, limit_pfn); limit_pfn 202 drivers/iommu/iova.c limit_pfn = min(limit_pfn, curr_iova->pfn_lo); limit_pfn 203 drivers/iommu/iova.c new_pfn = (limit_pfn - size) & align_mask; limit_pfn 209 drivers/iommu/iova.c if (limit_pfn < size || new_pfn < iovad->start_pfn) { limit_pfn 295 drivers/iommu/iova.c unsigned long limit_pfn, limit_pfn 305 drivers/iommu/iova.c ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn + 1, limit_pfn 413 drivers/iommu/iova.c unsigned long limit_pfn, bool flush_rcache) limit_pfn 418 drivers/iommu/iova.c iova_pfn = iova_rcache_get(iovad, size, limit_pfn + 1); limit_pfn 423 drivers/iommu/iova.c new_iova = alloc_iova(iovad, size, limit_pfn, true); limit_pfn 834 drivers/iommu/iova.c unsigned long limit_pfn) limit_pfn 842 drivers/iommu/iova.c for (i = mag->size - 1; mag->pfns[i] > limit_pfn; i--) limit_pfn 954 drivers/iommu/iova.c unsigned long limit_pfn) limit_pfn 980 drivers/iommu/iova.c iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn); limit_pfn 994 drivers/iommu/iova.c unsigned long limit_pfn) limit_pfn 1001 drivers/iommu/iova.c return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size); limit_pfn 144 include/linux/iova.h unsigned long limit_pfn, limit_pfn 152 include/linux/iova.h unsigned long limit_pfn, bool flush_rcache); limit_pfn 195 include/linux/iova.h unsigned long limit_pfn, limit_pfn 215 include/linux/iova.h unsigned long limit_pfn, limit_pfn 441 include/linux/memblock.h phys_addr_t memblock_mem_size(unsigned long limit_pfn); limit_pfn 1597 mm/memblock.c phys_addr_t __init memblock_mem_size(unsigned long limit_pfn) limit_pfn 1606 mm/memblock.c start_pfn = min_t(unsigned long, start_pfn, limit_pfn); limit_pfn 1607 mm/memblock.c end_pfn = min_t(unsigned long, end_pfn, limit_pfn);