/linux-4.4.14/drivers/xen/ |
D | biomerge.c | 10 unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page)); in xen_biovec_phys_mergeable() 11 unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page)); in xen_biovec_phys_mergeable()
|
D | balloon.c | 458 unsigned long pfn = page_to_pfn(page); in increase_reservation() 536 unsigned long pfn = page_to_pfn(page); in decrease_reservation() 662 ret = xen_alloc_p2m_entry(page_to_pfn(page)); in alloc_xenballooned_pages()
|
D | xlate_mmu.c | 97 pte_t pte = pte_mkspecial(pfn_pte(page_to_pfn(page), info->prot)); in remap_pte_fn()
|
D | gntdev.c | 283 pfn_to_kaddr(page_to_pfn(map->pages[i])); in map_grant_pages() 299 pfn_to_kaddr(page_to_pfn(map->pages[i])); in map_grant_pages() 339 uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno])); in __unmap_grant_pages()
|
D | grant-table.c | 330 entry->ref, page_to_pfn(entry->page)); in gnttab_handle_deferred() 378 what, ref, page ? page_to_pfn(page) : -1); in gnttab_add_deferred()
|
D | xen-scsiback.c | 250 unsigned long pfn = page_to_pfn(page); in vaddr_page()
|
/linux-4.4.14/arch/microblaze/include/asm/ |
D | page.h | 153 # define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT) 154 # define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) 158 # define page_to_virt(page) (pfn_to_virt(page_to_pfn(page))) 159 # define page_to_phys(page) (pfn_to_phys(page_to_pfn(page)))
|
/linux-4.4.14/include/trace/events/ |
D | kmem.h | 185 __entry->pfn = page_to_pfn(page); 207 __entry->pfn = page_to_pfn(page); 232 __entry->pfn = page ? page_to_pfn(page) : -1UL; 259 __entry->pfn = page ? page_to_pfn(page) : -1UL; 303 __entry->pfn = page ? page_to_pfn(page) : -1UL; 333 __entry->pfn = page_to_pfn(page);
|
D | pagemap.h | 45 __entry->pfn = page_to_pfn(page); 76 __entry->pfn = page_to_pfn(page);
|
D | filemap.h | 28 __entry->pfn = page_to_pfn(page);
|
D | vmscan.h | 344 __entry->pfn = page_to_pfn(page);
|
/linux-4.4.14/arch/arm/mm/ |
D | flush.c | 143 flush_pfn_alias(page_to_pfn(page), uaddr); in __flush_ptrace_access() 152 flush_icache_alias(page_to_pfn(page), uaddr, len); in __flush_ptrace_access() 237 flush_pfn_alias(page_to_pfn(page), in __flush_dcache_page() 267 flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page)); in __flush_dcache_aliases() 399 pfn = page_to_pfn(page); in __flush_anon_page()
|
D | copypage-xsc3.c | 80 flush_cache_page(vma, vaddr, page_to_pfn(from)); in xsc3_mc_copy_user_highpage()
|
D | copypage-feroceon.c | 77 flush_cache_page(vma, vaddr, page_to_pfn(from)); in feroceon_copy_user_highpage()
|
D | copypage-v4wb.c | 57 flush_cache_page(vma, vaddr, page_to_pfn(from)); in v4wb_copy_user_highpage()
|
D | dma-mapping.c | 82 return pfn_to_dma(dev, page_to_pfn(page)) + offset; in arm_dma_map_page() 89 return pfn_to_dma(dev, page_to_pfn(page)) + offset; in arm_coherent_dma_map_page() 235 phys_addr_t base = __pfn_to_phys(page_to_pfn(page)); in __dma_clear_buffer() 666 *handle = pfn_to_dma(dev, page_to_pfn(page)); in __dma_alloc() 799 pfn = page_to_pfn(page) + offset / PAGE_SIZE; in dma_cache_maint_page() 881 pfn = page_to_pfn(page) + off / PAGE_SIZE; in __dma_page_dev_to_cpu() 1262 unsigned int next_pfn = page_to_pfn(pages[i]) + 1; in __iommu_create_mapping() 1267 if (page_to_pfn(pages[j]) != next_pfn) in __iommu_create_mapping()
|
/linux-4.4.14/include/linux/ |
D | pageblock-flags.h | 81 get_pfnblock_flags_mask(page, page_to_pfn(page), \ 85 set_pfnblock_flags_mask(page, flags, page_to_pfn(page), \
|
D | swapops.h | 108 page_to_pfn(page)); in make_migration_entry() 176 return swp_entry(SWP_HWPOISON, page_to_pfn(page)); in make_hwpoison_entry()
|
D | mmzone.h | 82 get_pfnblock_flags_mask(page, page_to_pfn(page), \
|
D | mm.h | 85 #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) 908 return __va(PFN_PHYS(page_to_pfn(page))); in lowmem_page_address()
|
/linux-4.4.14/arch/sparc/include/asm/ |
D | cacheflush_32.h | 23 flush_cache_page(vma, vaddr, page_to_pfn(page));\ 28 flush_cache_page(vma, vaddr, page_to_pfn(page));\
|
D | cacheflush_64.h | 59 flush_cache_page(vma, vaddr, page_to_pfn(page)); \ 66 flush_cache_page(vma, vaddr, page_to_pfn(page)); \
|
D | page.h | 4 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
|
D | pgtable_32.h | 285 return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot)); in mk_pte()
|
D | pgtable_64.h | 244 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 253 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
|
/linux-4.4.14/drivers/gpu/drm/gma500/ |
D | gtt.c | 109 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), in psb_gtt_insert() 114 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), in psb_gtt_insert() 143 pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page), in psb_gtt_remove() 183 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), in psb_gtt_roll() 188 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), in psb_gtt_roll() 548 pfn_base = page_to_pfn(dev_priv->scratch_page); in psb_gtt_init()
|
D | mmu.c | 146 PSB_WSGX32(page_to_pfn(pd->p) << PAGE_SHIFT, offset); in psb_mmu_set_pd_context() 196 pd->invalid_pde = psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt), in psb_mmu_alloc_pd() 198 pd->invalid_pte = psb_mmu_mask_pte(page_to_pfn(pd->dummy_page), in psb_mmu_alloc_pd() 353 v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask; in psb_mmu_pt_alloc_map_lock() 435 return page_to_pfn(pd->p) << PAGE_SHIFT; in psb_get_default_pd_addr() 747 pte = psb_mmu_mask_pte(page_to_pfn(*pages++), in psb_mmu_insert_pages()
|
D | gem.c | 214 pfn = page_to_pfn(r->pages[page_offset]); in psb_gem_fault()
|
/linux-4.4.14/arch/avr32/include/asm/ |
D | page.h | 82 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) 89 #define page_to_pfn(page) ((unsigned long)((page) - mem_map) + PHYS_PFN_OFFSET) macro
|
D | pgtable.h | 280 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
|
/linux-4.4.14/include/asm-generic/ |
D | page.h | 87 #define page_to_virt(page) pfn_to_virt(page_to_pfn(page)) 90 #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
|
D | memory_model.h | 78 #define page_to_pfn __page_to_pfn macro
|
D | pgtable.h | 599 #define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
|
/linux-4.4.14/arch/alpha/include/asm/ |
D | mmzone.h | 83 pfn = page_to_pfn(page) << 32; \ 101 (page_to_pfn(page) << PAGE_SHIFT)
|
D | pgtable.h | 214 pte_val(pte) = (page_to_pfn(page) << 32) | pgprot_val(pgprot); \
|
/linux-4.4.14/arch/mn10300/include/asm/ |
D | page.h | 109 #define page_to_pfn(page) ((unsigned long)((page) - mem_map) + __pfn_disp) macro 119 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
|
D | pgalloc.h | 28 set_pmd(pmd, __pmd((page_to_pfn(pte) << PAGE_SHIFT) | _PAGE_TABLE)); in pmd_populate()
|
D | pgtable.h | 391 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
|
/linux-4.4.14/mm/ |
D | page_isolation.c | 25 pfn = page_to_pfn(page); in set_migratetype_isolate() 101 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); in unset_migratetype_isolate() 105 if (pfn_valid_within(page_to_pfn(buddy)) && in unset_migratetype_isolate()
|
D | madvise.c | 358 page_to_pfn(p), start); in madvise_hwpoison() 365 page_to_pfn(p), start); in madvise_hwpoison() 367 memory_failure(page_to_pfn(p), 0, MF_COUNT_INCREASED); in madvise_hwpoison()
|
D | page_ext.c | 104 unsigned long pfn = page_to_pfn(page); in lookup_page_ext() 181 unsigned long pfn = page_to_pfn(page); in lookup_page_ext()
|
D | mmzone.c | 78 if (page_to_pfn(page) != pfn) in memmap_valid_within()
|
D | memory-failure.c | 309 page_to_pfn(p), tsk->comm); in add_to_kill() 902 page_to_pfn(page)); in get_hwpoison_page() 1589 unsigned long pfn = page_to_pfn(page); in soft_offline_huge_page() 1647 unsigned long pfn = page_to_pfn(page); in __soft_offline_page() 1744 unsigned long pfn = page_to_pfn(page); in soft_offline_page()
|
D | frame_vector.c | 177 nums[i] = page_to_pfn(pages[i]); in frame_vector_to_pfns()
|
D | ksm.c | 886 flush_cache_page(vma, addr, page_to_pfn(page)); in write_protect_page() 1164 nid = get_kpfn_nid(page_to_pfn(page)); in stable_tree_search() 1267 kpfn = page_to_pfn(kpage); in stable_tree_insert() 1350 nid = get_kpfn_nid(page_to_pfn(page)); in unstable_tree_search_insert() 1987 VM_BUG_ON_PAGE(stable_node->kpfn != page_to_pfn(oldpage), oldpage); in ksm_migrate_page() 1988 stable_node->kpfn = page_to_pfn(newpage); in ksm_migrate_page()
|
D | page_alloc.c | 353 unsigned long pfn = page_to_pfn(page); in page_outside_zone_boundaries() 374 if (!pfn_valid_within(page_to_pfn(page))) in page_is_consistent() 434 current->comm, page_to_pfn(page)); in bad_page() 603 if (!pfn_valid_within(page_to_pfn(buddy))) in page_is_buddy() 739 if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) { in __free_one_page() 846 __free_one_page(page, page_to_pfn(page), zone, 0, mt); in free_pcppages_bulk() 1014 unsigned long pfn = page_to_pfn(page); in __free_pages_ok() 1512 if (!pfn_valid_within(page_to_pfn(page))) { in move_freepages() 1538 start_pfn = page_to_pfn(page); in move_freepages_block() 2031 pfn = page_to_pfn(list_entry(curr, struct page, lru)); in mark_free_pages() [all …]
|
D | sparse.c | 635 ret = (struct page *)pfn_to_kaddr(page_to_pfn(page)); in __kmalloc_section_memmap() 670 maps_section_nr = pfn_to_section_nr(page_to_pfn(page)); in free_map_bootmem()
|
D | internal.h | 343 unsigned long pfn = page_to_pfn(base) + offset; in mem_map_next()
|
D | cma.c | 448 pfn = page_to_pfn(pages); in cma_release()
|
D | compaction.c | 48 unsigned long pfn = page_to_pfn(page); in release_freepages() 275 pfn = page_to_pfn(page); in update_pageblock_skip()
|
D | rmap.c | 786 if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) { in __page_check_address() 1338 flush_cache_page(vma, address, page_to_pfn(page)); in try_to_unmap_one()
|
D | memory_hotplug.c | 1342 BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1)); in next_active_pageblock() 1451 pfn = page_to_pfn(head) + (1<<compound_order(head)) - 1; in do_migrate_range()
|
D | gup.c | 1009 flush_cache_page(vma, addr, page_to_pfn(page)); in get_dump_page()
|
D | hugetlb.c | 1024 free_contig_range(page_to_pfn(page), 1 << order); in free_gigantic_page() 1329 compound_idx = page_to_pfn(page) - page_to_pfn(page_head); in __basepage_index()
|
D | zsmalloc.c | 811 obj = page_to_pfn(page) << OBJ_INDEX_BITS; in location_to_obj()
|
D | nommu.c | 302 return page_to_pfn(virt_to_page(addr)); in vmalloc_to_pfn()
|
D | Kconfig | 128 pfn_to_page and page_to_pfn operations. This is the most
|
D | vmalloc.c | 270 return page_to_pfn(vmalloc_to_page(vmalloc_addr)); in vmalloc_to_pfn()
|
D | memory.c | 129 zero_pfn = page_to_pfn(ZERO_PAGE(0)); in init_zero_pfn()
|
/linux-4.4.14/arch/score/include/asm/ |
D | page.h | 71 #define page_to_virt(page) (pfn_to_virt(page_to_pfn(page))) 73 #define page_to_phys(page) (pfn_to_phys(page_to_pfn(page)))
|
D | cacheflush.h | 45 flush_cache_page(vma, vaddr, page_to_pfn(page));\
|
D | pgtable.h | 99 #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
|
/linux-4.4.14/arch/metag/include/asm/ |
D | page.h | 109 #define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT) 111 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
|
D | pgtable.h | 154 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
|
/linux-4.4.14/arch/m68k/include/asm/ |
D | page_mm.h | 155 pfn_to_virt(page_to_pfn(page)); \ 164 #define page_to_pfn(_page) ({ \ macro
|
D | cacheflush_mm.h | 264 flush_cache_page(vma, vaddr, page_to_pfn(page)); in copy_to_user_page() 272 flush_cache_page(vma, vaddr, page_to_pfn(page)); in copy_from_user_page()
|
D | virtconvert.h | 33 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
|
D | page_no.h | 32 #define page_to_pfn(page) virt_to_pfn(page_to_virt(page)) macro
|
D | sun3_pgtable.h | 102 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
|
D | mcf_pgtable.h | 156 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
|
D | motorola_pgtable.h | 100 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
|
/linux-4.4.14/arch/nios2/mm/ |
D | cacheflush.c | 92 page_to_pfn(page)); in flush_aliases() 253 flush_cache_page(vma, user_vaddr, page_to_pfn(page)); in copy_from_user_page() 264 flush_cache_page(vma, user_vaddr, page_to_pfn(page)); in copy_to_user_page()
|
/linux-4.4.14/arch/parisc/include/asm/ |
D | cacheflush.h | 94 flush_cache_page(vma, vaddr, page_to_pfn(page)); \ 101 flush_cache_page(vma, vaddr, page_to_pfn(page)); \
|
D | page.h | 167 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
|
D | pgtable.h | 396 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
|
/linux-4.4.14/arch/tile/include/asm/ |
D | pgalloc.h | 60 set_pmd(pmd, ptfn_pmd(HV_CPA_TO_PTFN(PFN_PHYS(page_to_pfn(page))), in pmd_populate() 91 return pfn_to_kaddr(page_to_pfn(pte_alloc_one(mm, address))); in pte_alloc_one_kernel()
|
D | page.h | 320 #define page_to_pa(page) ((phys_addr_t)(page_to_pfn(page)) << PAGE_SHIFT) 322 #define page_to_virt(page) pfn_to_kaddr(page_to_pfn(page))
|
D | pgtable.h | 303 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 482 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
|
D | io.h | 39 #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
|
/linux-4.4.14/arch/xtensa/include/asm/ |
D | page.h | 182 #define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT) 184 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
|
D | pgtable.h | 291 #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
|
/linux-4.4.14/lib/ |
D | scatterlist.c | 406 if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) in sg_alloc_table_from_pages() 421 if (page_to_pfn(pages[j]) != in sg_alloc_table_from_pages() 422 page_to_pfn(pages[j - 1]) + 1) in sg_alloc_table_from_pages()
|
D | dma-debug.c | 583 cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT; in debug_dma_assert_idle() 1282 entry->pfn = page_to_pfn(page); in debug_dma_map_page() 1379 entry->pfn = page_to_pfn(sg_page(s)); in debug_dma_map_sg() 1430 .pfn = page_to_pfn(sg_page(s)), in debug_dma_unmap_sg() 1466 entry->pfn = page_to_pfn(virt_to_page(virt)); in debug_dma_alloc_coherent() 1482 .pfn = page_to_pfn(virt_to_page(virt)), in debug_dma_free_coherent() 1591 .pfn = page_to_pfn(sg_page(s)), in debug_dma_sync_sg_for_cpu() 1624 .pfn = page_to_pfn(sg_page(s)), in debug_dma_sync_sg_for_device()
|
/linux-4.4.14/drivers/staging/android/ion/ |
D | ion_carveout_heap.c | 65 ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page)); in ion_carveout_heap_phys() 114 ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page)); in ion_carveout_heap_free()
|
D | ion_heap.c | 95 ret = remap_pfn_range(vma, addr, page_to_pfn(page), len, in ion_heap_map_user()
|
D | ion.c | 960 pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff])); in ion_vm_fault()
|
/linux-4.4.14/include/xen/ |
D | page.h | 20 ((page_to_pfn(page)) << (PAGE_SHIFT - XEN_PAGE_SHIFT))
|
/linux-4.4.14/arch/ia64/include/asm/ |
D | page.h | 106 # define page_to_pfn(page) ((unsigned long) (page - vmem_map)) macro 123 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
|
D | pgtable.h | 255 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
|
/linux-4.4.14/block/ |
D | bounce.c | 192 if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q)) in __blk_queue_bounce() 202 if (page_to_pfn(page) <= queue_bounce_pfn(q)) in __blk_queue_bounce()
|
/linux-4.4.14/arch/frv/include/asm/ |
D | pgalloc.h | 26 __set_pmd((PMD), page_to_pfn(PAGE) << PAGE_SHIFT | _PAGE_TABLE); \
|
D | pgtable.h | 418 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
|
/linux-4.4.14/arch/um/include/asm/ |
D | pgalloc.h | 18 ((unsigned long long)page_to_pfn(pte) << \
|
D | pgtable.h | 274 #define page_to_phys(page) pfn_to_phys((pfn_t) page_to_pfn(page))
|
/linux-4.4.14/arch/unicore32/include/asm/ |
D | memory.h | 66 #define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
|
D | pgalloc.h | 108 page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE); in pmd_populate()
|
D | pgtable.h | 230 #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
|
/linux-4.4.14/arch/sparc/mm/ |
D | iommu.c | 186 ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page)); in iommu_get_one() 195 iopte_val(*iopte) = MKIOPTE(page_to_pfn(page), IOPERM); in iommu_get_one() 367 MKIOPTE(page_to_pfn(virt_to_page(page)), ioperm_noc); in iommu_map_dma_area()
|
D | srmmu.c | 129 ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */ in pmd_populate() 365 p = page_to_pfn(pte) << PAGE_SHIFT; /* Physical address */ in pte_free()
|
D | init_64.c | 1684 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT; in __kernel_map_pages()
|
/linux-4.4.14/kernel/power/ |
D | snapshot.c | 892 memory_bm_set_bit(free_pages_map, page_to_pfn(page)); in swsusp_set_page_free() 898 memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0; in swsusp_page_is_free() 904 memory_bm_clear_bit(free_pages_map, page_to_pfn(page)); in swsusp_unset_page_free() 910 memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page)); in swsusp_set_page_forbidden() 916 memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0; in swsusp_page_is_forbidden() 922 memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page)); in swsusp_unset_page_forbidden() 1388 memory_bm_set_bit(©_bm, page_to_pfn(page)); in preallocate_image_pages() 1783 memory_bm_set_bit(bm, page_to_pfn(page)); in alloc_highmem_pages() 1826 memory_bm_set_bit(copy_bm, page_to_pfn(page)); in swsusp_alloc() 2192 memory_bm_set_bit(bm, page_to_pfn(page)); in prepare_highmem_image()
|
/linux-4.4.14/arch/openrisc/include/asm/ |
D | pgalloc.h | 37 ((unsigned long)page_to_pfn(pte) << in pmd_populate()
|
D | page.h | 90 #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
|
/linux-4.4.14/drivers/base/ |
D | dma-mapping.c | 253 unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr)); in dma_common_mmap() 316 for (i = 0, pfn = page_to_pfn(page); i < (size >> PAGE_SHIFT); i++) in dma_common_contiguous_remap()
|
/linux-4.4.14/drivers/misc/ |
D | vmw_balloon.c | 470 unsigned long pfn = page_to_pfn(b->page); in vmballoon_send_batched_lock() 518 unsigned long pfn = page_to_pfn(b->page); in vmballoon_send_batched_unlock() 604 locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status, in vmballoon_lock_page() 705 if (!vmballoon_send_unlock_page(b, page_to_pfn(page), target)) { in vmballoon_unlock_page() 789 (u64)page_to_pfn(p) << PAGE_SHIFT); in vmballoon_add_batched_page()
|
/linux-4.4.14/arch/mips/mm/ |
D | pgtable-64.c | 83 pmd_val(pmd) = (page_to_pfn(page) << _PFN_SHIFT) | pgprot_val(prot); in mk_pmd()
|
D | dma-default.c | 214 pfn = page_to_pfn(virt_to_page((void *)addr)); in mips_dma_mmap()
|
D | init.c | 200 flush_cache_page(vma, vaddr, page_to_pfn(page)); in copy_to_user_page()
|
/linux-4.4.14/arch/arm64/include/asm/ |
D | memory.h | 84 #define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
|
D | pgtable.h | 368 #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) 439 #define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot)
|
/linux-4.4.14/arch/powerpc/include/asm/ |
D | pgalloc-32.h | 27 (pmd_val(*(pmd)) = (page_to_pfn(pte) << PAGE_SHIFT) | _PMD_PRESENT)
|
D | pgtable.h | 78 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
|
D | io.h | 798 #define page_to_phys(page) ((phys_addr_t)page_to_pfn(page) << PAGE_SHIFT)
|
/linux-4.4.14/arch/hexagon/include/asm/ |
D | page.h | 141 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
|
D | pgalloc.h | 103 set_pmd(pmd, __pmd(((unsigned long)page_to_pfn(pte) << PAGE_SHIFT) | in pmd_populate()
|
D | pgtable.h | 309 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
|
/linux-4.4.14/arch/x86/xen/ |
D | grant-table.c | 145 pfns[i] = page_to_pfn(pages[i]); in xlated_setup_gnttab_pages()
|
D | p2m.c | 738 pfn = page_to_pfn(pages[i]); in set_foreign_p2m_mapping() 763 unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i])); in clear_foreign_p2m_mapping() 764 unsigned long pfn = page_to_pfn(pages[i]); in clear_foreign_p2m_mapping()
|
D | mmu.c | 753 unsigned long pfn = page_to_pfn(page); in xen_pin_page() 895 unsigned long pfn = page_to_pfn(page); in xen_unpin_page()
|
/linux-4.4.14/arch/tile/mm/ |
D | homecache.c | 191 pte = pfn_pte(page_to_pfn(page), PAGE_KERNEL); in homecache_finv_map_page() 230 sim_validate_lines_evicted(PFN_PHYS(page_to_pfn(page)), PAGE_SIZE); in homecache_finv_page_internal()
|
/linux-4.4.14/kernel/ |
D | kexec_core.c | 356 pfn = page_to_pfn(pages); in kimage_alloc_normal_control_pages() 637 addr = page_to_pfn(page) << PAGE_SHIFT; in kimage_alloc_page() 652 if (page_to_pfn(page) > in kimage_alloc_page() 657 addr = page_to_pfn(page) << PAGE_SHIFT; in kimage_alloc_page() 736 result = kimage_add_page(image, page_to_pfn(page) in kimage_load_normal_segment()
|
D | kexec_file.c | 560 zero_buf = __va(page_to_pfn(ZERO_PAGE(0)) << PAGE_SHIFT); in kexec_calculate_store_digests()
|
/linux-4.4.14/arch/arm/kernel/ |
D | machine_kexec.c | 164 page_to_pfn(image->control_code_page) << PAGE_SHIFT; in machine_kexec()
|
/linux-4.4.14/arch/s390/include/asm/ |
D | page.h | 147 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
|
D | pgtable.h | 1627 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
|
/linux-4.4.14/arch/x86/kernel/ |
D | machine_kexec_64.c | 231 start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT; in machine_kexec_prepare() 294 page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page) in machine_kexec()
|
D | machine_kexec_32.c | 227 page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page) in machine_kexec()
|
/linux-4.4.14/arch/x86/include/asm/ |
D | pgalloc.h | 72 unsigned long pfn = page_to_pfn(pte); in pmd_populate()
|
D | io.h | 144 #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
|
D | pgtable.h | 552 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 803 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
|
/linux-4.4.14/arch/arc/include/asm/ |
D | io.h | 44 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
|
D | pgtable.h | 278 #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
|
/linux-4.4.14/arch/sh/include/asm/ |
D | page.h | 168 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
|
D | pgtable_32.h | 389 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
|
/linux-4.4.14/arch/m32r/include/asm/ |
D | io.h | 77 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
|
D | pgtable.h | 279 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), pgprot)
|
/linux-4.4.14/arch/arm/include/asm/ |
D | memory.h | 126 #define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
|
D | pgtable-3level.h | 258 #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
|
D | pgtable.h | 213 #define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot)
|
/linux-4.4.14/init/ |
D | main.c | 624 page_to_pfn(virt_to_page((void *)initrd_start)) < min_low_pfn) { in start_kernel() 626 page_to_pfn(virt_to_page((void *)initrd_start)), in start_kernel()
|
/linux-4.4.14/drivers/net/ethernet/tile/ |
D | tilegx.c | 1120 addr = pfn_to_kaddr(page_to_pfn(page)); in alloc_percpu_mpipe_resources() 1136 addr = pfn_to_kaddr(page_to_pfn(page)); in alloc_percpu_mpipe_resources() 1391 headers = pfn_to_kaddr(page_to_pfn(headers_page)); in tile_net_init_egress() 1403 edescs = pfn_to_kaddr(page_to_pfn(edescs_page)); in tile_net_init_egress() 1414 equeue = pfn_to_kaddr(page_to_pfn(equeue_page)); in tile_net_init_egress() 1633 unsigned long pfn = page_to_pfn(skb_frag_page(f)); in tile_net_frag_buf()
|
D | tilepro.c | 1624 unsigned long pfn = page_to_pfn(skb_frag_page(f)); in tile_net_tx_frags()
|
/linux-4.4.14/drivers/gpu/drm/ttm/ |
D | ttm_memory.c | 564 if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL) in ttm_mem_global_alloc_page() 579 if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL) in ttm_mem_global_free_page()
|
D | ttm_bo_vm.c | 228 pfn = page_to_pfn(page); in ttm_bo_vm_fault()
|
/linux-4.4.14/arch/nios2/include/asm/ |
D | pgtable.h | 241 #define mk_pte(page, prot) (pfn_pte(page_to_pfn(page), prot))
|
/linux-4.4.14/arch/x86/mm/ |
D | pageattr.c | 630 paravirt_alloc_pte(&init_mm, page_to_pfn(base)); in __split_large_page() 1745 start = page_to_pfn(pages[i]) << PAGE_SHIFT; in _set_pages_array() 1771 start = page_to_pfn(pages[i]) << PAGE_SHIFT; in _set_pages_array() 1820 start = page_to_pfn(pages[i]) << PAGE_SHIFT; in set_pages_array_wb()
|
D | pgtable.c | 58 paravirt_release_pte(page_to_pfn(pte)); in ___pte_free_tlb()
|
/linux-4.4.14/arch/powerpc/mm/ |
D | init_64.c | 181 if (pfn_valid(page_to_pfn((struct page *)start))) in vmemmap_populated()
|
D | mmu_context_iommu.c | 136 mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT; in mm_iommu_get()
|
D | mem.c | 426 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT); in flush_dcache_icache_page()
|
D | pgtable_64.c | 782 return pfn_pmd(page_to_pfn(page), pgprot); in mk_pmd()
|
/linux-4.4.14/fs/proc/ |
D | page.c | 140 } else if (is_zero_pfn(page_to_pfn(page))) in stable_page_flags()
|
D | vmcore.c | 281 buf = __va((page_to_pfn(page) << PAGE_SHIFT)); in mmap_vmcore_fault()
|
/linux-4.4.14/arch/sh/mm/ |
D | cache.c | 74 flush_cache_page(vma, vaddr, page_to_pfn(page)); in copy_to_user_page()
|
/linux-4.4.14/arch/powerpc/kernel/ |
D | dma.c | 188 pfn = page_to_pfn(virt_to_page(cpu_addr)); in dma_direct_mmap_coherent()
|
/linux-4.4.14/arch/metag/kernel/ |
D | dma.c | 357 page_to_pfn(c->vm_pages) + off, in dma_mmap()
|
/linux-4.4.14/arch/mips/include/asm/ |
D | io.h | 168 #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
|
D | pgtable.h | 431 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
|
/linux-4.4.14/drivers/gpu/drm/exynos/ |
D | exynos_drm_gem.c | 492 pfn = page_to_pfn(exynos_gem->pages[page_offset]); in exynos_drm_gem_fault()
|
/linux-4.4.14/drivers/staging/unisys/visornic/ |
D | visornic_main.c | 243 page_to_pfn(virt_to_page(skb->data + offset)); in visor_copy_fragsinfo_from_skb() 262 count = add_physinfo_entries(page_to_pfn( in visor_copy_fragsinfo_from_skb() 430 cmdrsp->net.rcvpost.frag.pi_pfn = page_to_pfn(virt_to_page(skb->data)); in post_skb()
|
/linux-4.4.14/drivers/gpu/drm/ |
D | drm_gem.c | 495 (page_to_pfn(p) >= 0x00100000UL)); in drm_gem_get_pages()
|
D | drm_vm.c | 631 page_to_pfn(virt_to_page(map->handle)), in drm_mmap_locked()
|
/linux-4.4.14/drivers/virtio/ |
D | virtio_balloon.c | 93 unsigned long pfn = page_to_pfn(page); in page_to_balloon_pfn()
|
/linux-4.4.14/drivers/block/ |
D | brd.c | 392 *pfn = page_to_pfn(page); in brd_direct_access()
|
/linux-4.4.14/arch/tile/kernel/ |
D | pci-dma.c | 144 PFN_PHYS(page_to_pfn(page)) + offset, size); in __dma_prep_page()
|
/linux-4.4.14/arch/arm/common/ |
D | dmabounce.c | 321 dma_addr = pfn_to_dma(dev, page_to_pfn(page)) + offset; in dmabounce_map_page()
|
/linux-4.4.14/sound/pci/emu10k1/ |
D | memory.c | 476 if (!p || (page_to_pfn(p) & ~(emu->dma_mask >> PAGE_SHIFT))) { in synth_alloc_pages()
|
/linux-4.4.14/arch/ia64/mm/ |
D | init.c | 501 args->nid, args->zone, page_to_pfn(map_start), in virtual_memmap_init()
|
/linux-4.4.14/drivers/gpu/drm/msm/ |
D | msm_gem.c | 220 pfn = page_to_pfn(pages[pgoff]); in msm_gem_fault()
|
/linux-4.4.14/drivers/block/xen-blkback/ |
D | blkback.c | 174 #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page))) 293 (unsigned long) pfn_to_kaddr(page_to_pfn( in free_persistent_gnts()
|
/linux-4.4.14/virt/kvm/ |
D | kvm_main.c | 1321 *pfn = page_to_pfn(page[0]); in hva_to_pfn_fast() 1371 *pfn = page_to_pfn(page[0]); in hva_to_pfn_slow() 1565 kvm_release_pfn_clean(page_to_pfn(page)); in kvm_release_page_clean() 1580 kvm_release_pfn_dirty(page_to_pfn(page)); in kvm_release_page_dirty()
|
/linux-4.4.14/drivers/lguest/ |
D | page_tables.c | 194 return page_to_pfn(page); in get_pfn()
|
/linux-4.4.14/arch/parisc/mm/ |
D | init.c | 611 set_max_mapnr(page_to_pfn(virt_to_page(high_memory - 1)) + 1); in mem_init()
|
/linux-4.4.14/arch/powerpc/kvm/ |
D | book3s_64_mmu_hv.c | 67 hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page)); in kvmppc_alloc_hpt() 532 pfn = page_to_pfn(page); in kvmppc_book3s_hv_page_fault()
|
/linux-4.4.14/drivers/staging/rdma/ehca/ |
D | ehca_mrmw.c | 1864 pgaddr = page_to_pfn(sg_page(*sg)) in ehca_set_pagebuf_user1() 1903 u64 pgaddr = page_to_pfn(sg_page(*sg)) << PAGE_SHIFT; in ehca_check_kpages_per_ate() 1933 pgaddr = (page_to_pfn(sg_page(*sg)) in ehca_set_pagebuf_user2()
|
/linux-4.4.14/drivers/vfio/ |
D | vfio_iommu_type1.c | 238 *pfn = page_to_pfn(page[0]); in vaddr_get_pfn()
|
/linux-4.4.14/drivers/mmc/host/ |
D | usdhi6rol0.c | 384 host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped, in usdhi6_sg_map() 507 host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped, in usdhi6_sg_advance()
|
/linux-4.4.14/drivers/hv/ |
D | hv_balloon.c | 1127 page_to_pfn(pg); in alloc_balloon_pages()
|
/linux-4.4.14/arch/sparc/kernel/ |
D | ldc.c | 2054 unsigned long base = page_to_pfn(sg_page(sg)) << PAGE_SHIFT; in sg_count_one() 2117 fill_cookies(&state, page_to_pfn(sg_page(s)) << PAGE_SHIFT, in ldc_map_sg()
|
/linux-4.4.14/drivers/scsi/ |
D | storvsc_drv.c | 1469 page_to_pfn(sg_page((cur_sgl))); in storvsc_queuecommand()
|
/linux-4.4.14/drivers/net/hyperv/ |
D | netvsc_drv.c | 305 pb[j].pfn = page_to_pfn(page); in fill_pg_buf()
|
/linux-4.4.14/drivers/gpu/drm/omapdrm/ |
D | omap_gem.c | 379 pfn = page_to_pfn(omap_obj->pages[pgoff]); in fault_1d()
|
/linux-4.4.14/fs/ |
D | exec.c | 252 flush_cache_page(bprm->vma, pos, page_to_pfn(page)); in flush_arg_page()
|
/linux-4.4.14/arch/x86/kvm/ |
D | svm.c | 673 wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT); in svm_hardware_enable() 883 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT; in svm_hardware_setup() 1180 svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; in svm_create_vcpu()
|
D | mmu.c | 2662 sp->role.level, gfn, page_to_pfn(pages[i]), in direct_pte_prefetch_many()
|
/linux-4.4.14/security/selinux/ |
D | selinuxfs.c | 262 page_to_pfn(status), in sel_mmap_handle_status()
|
/linux-4.4.14/drivers/net/xen-netback/ |
D | netback.c | 113 return page_to_pfn(queue->mmap_pages[idx]); in idx_to_pfn()
|
/linux-4.4.14/drivers/staging/rdma/hfi1/ |
D | file_ops.c | 844 paddr = page_to_pfn(page) << PAGE_SHIFT; in kvirt_to_phys()
|
/linux-4.4.14/drivers/gpu/drm/i915/ |
D | i915_gem.c | 2312 if (!i || page_to_pfn(page) != last_pfn + 1) { in i915_gem_object_get_pages_gtt() 2320 last_pfn = page_to_pfn(page); in i915_gem_object_get_pages_gtt()
|
/linux-4.4.14/drivers/infiniband/hw/qib/ |
D | qib_file_ops.c | 93 paddr = page_to_pfn(page) << PAGE_SHIFT; in cvt_kvaddr()
|
/linux-4.4.14/drivers/net/ethernet/natsemi/ |
D | ns83820.c | 1164 (long long)buf, (long) page_to_pfn(frag->page),
|
/linux-4.4.14/drivers/staging/rdma/ipath/ |
D | ipath_file_ops.c | 89 paddr = page_to_pfn(page) << PAGE_SHIFT; in cvt_kvaddr()
|
/linux-4.4.14/drivers/staging/comedi/ |
D | comedi_fops.c | 2240 page_to_pfn(virt_to_page(buf->virt_addr)), in comedi_mmap()
|
/linux-4.4.14/drivers/iommu/ |
D | intel-iommu.c | 167 return mm_to_dma_pfn(page_to_pfn(pg)); in page_to_dma_pfn()
|