Searched refs:PFN (Results 1 - 78 of 78) sorted by relevance

/linux-4.4.14/arch/s390/numa/
H A Dnuma_mode.h16 int (*__pfn_to_nid)(unsigned long pfn); /* PFN to node ID */
/linux-4.4.14/arch/m32r/mm/
H A Dmmu.S37 ld r0, @(MDEVP_offset, r3) ; r0: PFN + ASID (MDEVP reg.)
44 ;; r0: PFN + ASID (MDEVP reg.)
47 ;; r0: PFN + ASID
76 ;; r0: PFN + ASID
84 or r0, r1 ; r0: PFN + ASID
104 ;; r0: PFN + ASID
109 ;; r0: PFN + ASID
131 ;; r0: PFN + ASID
135 ;; r0: PFN + ASID
177 ;; r0: PFN + ASID
196 ;; r0: PFN + ASID
200 ;; r0: PFN + ASID
/linux-4.4.14/arch/unicore32/include/asm/
H A Dmemory.h77 * PFN 0 == physical address 0.
79 * This is the PFN of the first RAM page in the kernel
95 * page_to_pfn(page) convert a struct page * to a PFN number
96 * pfn_to_page(pfn) convert a _valid_ PFN number to struct page *
/linux-4.4.14/arch/nios2/include/asm/
H A Dpgtable-bits.h18 * ignored........ C R W X G PFN............
/linux-4.4.14/arch/sparc/mm/
H A Dleon_mm.c56 if (!_pfn_valid(PFN(ctxtbl))) { leon_swprobe()
60 PFN(ctxtbl)); leon_swprobe()
89 if (!_pfn_valid(PFN(ptr))) leon_swprobe()
112 if (!_pfn_valid(PFN(ptr))) { leon_swprobe()
115 PFN(ptr)); leon_swprobe()
140 if (!_pfn_valid(PFN(ptr))) leon_swprobe()
/linux-4.4.14/arch/avr32/mm/
H A Dtlb.c40 SYSREG_BFEXT(PFN, tlbelo) >> 2, show_dtlb_entry()
58 printk("ID V G ASID VPN PFN AP SZ C B W D\n"); dump_dtlb()
122 * Caller is responsible for masking out non-PFN bits in page __flush_tlb_page()
311 seq_puts(tlb, "ID V G ASID VPN PFN AP SZ C B W D\n"); tlb_show()
339 SYSREG_BFEXT(PFN, tlbelo) >> 2, tlb_show()
/linux-4.4.14/arch/arm64/include/asm/
H A Dmemory.h119 * PFN 0 == physical address 0.
121 * This is the PFN of the first RAM page in the kernel
/linux-4.4.14/arch/tile/include/asm/
H A Dmmzone.h34 * translate the high bits of the PFN to the node number.
H A Dpgtable.h168 /* Just setting the PFN to zero suffices. */
307 * value and combine it with the PFN from the old PTE to get a new PTE.
/linux-4.4.14/arch/cris/include/asm/
H A Dpage.h40 /* On CRIS the PFN numbers doesn't start at 0 so we have to compensate */
/linux-4.4.14/arch/arm/mm/
H A Dmm.h20 /* PFN alias flushing, for VIPT caches */
H A Ddma-mapping.c190 * Translate the device's DMA mask to a PFN limit. This __dma_supported()
191 * PFN number includes the page which we can DMA to. __dma_supported()
/linux-4.4.14/arch/x86/include/asm/xen/
H A Dpage.h223 * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
224 * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
/linux-4.4.14/mm/
H A Dpage_owner.c113 "PFN %lu Block %lu type %d %s Flags %s%s%s%s%s%s%s%s%s%s%s%s\n", print_page_owner()
166 /* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */ read_page_owner()
205 /* Record the next PFN to read in the file offset */ read_page_owner()
H A Dpage_isolation.c147 * @start_pfn: The lower PFN of the range to be isolated.
148 * @end_pfn: The upper PFN of the range to be isolated.
H A Dcompaction.c542 * @start_pfn: The first PFN to start isolating.
543 * @end_pfn: The one-past-last PFN.
549 * Otherwise, function returns one-past-the-last PFN of isolated page
649 * @low_pfn: The first PFN to isolate
650 * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock
655 * Returns zero if there is a fatal signal pending, otherwise PFN of the
853 * isolate_migratepages_range() - isolate migrate-able pages in a PFN range
855 * @start_pfn: The first PFN to start isolating.
856 * @end_pfn: The one-past-last PFN.
859 * Otherwise, function returns one-past-the-last PFN of isolated page
H A Dpage_alloc.c4795 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
4820 * @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid
4860 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
5001 * @start_pfn: The start PFN to start searching for holes
5002 * @end_pfn: The end PFN to stop searching for holes
5451 * find_min_pfn_with_active_regions - Find the minimum PFN registered
5453 * It returns the minimum PFN based on information provided via
5483 * Find the PFN the Movable zone begins in each node. Kernel memory
5611 * The usable PFN range for ZONE_MOVABLE is from for_each_node_state()
5680 * zone in each node and their holes is calculated. If the maximum PFN
6683 * @start: start PFN to allocate
6684 * @end: one-past-the-last PFN to allocate
6690 * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
6695 * The PFN range must belong to a single zone.
6698 * pages which PFN is in [start, end) are allocated for the caller and
H A Dcma.c65 * Find a PFN aligned to the specified order and return an offset represented in
H A Dmemory.c642 * is found. For example, we might have a PFN-mapped pte in
721 * set, and the vm_pgoff will point to the first PFN mapped: thus every special
1749 * raw PFN mappings, and do not have a "struct page" associated remap_pfn_range()
1969 * If the source page was a PFN mapping, we don't have cow_user_page()
3586 /* We cannot handle huge page PFN maps. Luckily they don't exist. */ __follow_pte()
3615 * follow_pfn - look up PFN at a user virtual address
3618 * @pfn: location to store found PFN
3620 * Only IO mappings and raw PFN mappings are allowed.
H A Dnommu.c236 * follow_pfn - look up PFN at a user virtual address
239 * @pfn: location to store found PFN
241 * Only IO mappings and raw PFN mappings are allowed.
H A Dzsmalloc.c86 * Object location (<PFN>, <obj_idx>) is encoded as
90 * page <PFN> it is stored in, so for each sub-page belonging
233 * Position of next free chunk (encodes <PFN, obj_idx>)
H A Dbootmem.c330 bdebug("silent double reserve of PFN %lx\n", __reserve()
/linux-4.4.14/arch/sh/include/asm/
H A Dpage.h171 * PFN = physical frame number (ie PFN 0 == physical address 0)
172 * PFN_START is the PFN of the first page of RAM. By defining this we
/linux-4.4.14/arch/arm/include/asm/
H A Dmemory.h145 * PFN 0 == physical address 0.
323 * page_to_pfn(page) convert a struct page * to a PFN number
324 * pfn_to_page(pfn) convert a _valid_ PFN number to struct page *
/linux-4.4.14/arch/x86/xen/
H A Dp2m.c28 * In short, these structures contain the Machine Frame Number (MFN) of the PFN.
44 * get the PFN value to match the MFN.
53 * return the PFN. If the PFN has IDENTITY_FRAME_BIT set we unmask that in
57 * PFN is that we could find ourselves where pfn_to_mfn(pfn)==pfn for a
116 * Hint at last populated PFN.
H A Dsetup.c221 * at the min_pfn PFN. xen_find_pfn_range()
326 * up to REMAP_SIZE MFNs and the start target PFN for doing the remap.
H A Dmmu.c142 * if the PFN is in the linear mapped vaddr range, we can just use arbitrary_virt_to_machine()
1593 * If there is no MFN for this PFN then this page is initially
/linux-4.4.14/tools/vm/
H A Dpage_owner_sort.c6 * grep -v ^PFN page_owner_full.txt > page_owner.txt
/linux-4.4.14/arch/sparc/include/asm/
H A Dleon.h253 #define PFN(x) ((x) >> PAGE_SHIFT) macro
254 #define _pfn_valid(pfn) ((pfn < last_valid_pfn) && (pfn >= PFN(phys_base)))
/linux-4.4.14/include/uapi/linux/
H A Dvirtio_balloon.h38 /* Size of a PFN in the balloon interface. */
H A Dvirtio_pci.h52 /* A 32-bit r/w PFN for the currently selected queue */
/linux-4.4.14/include/linux/
H A Dvirtio_mmio.h92 /* Guest's PFN for the currently selected queue - Read Write */
H A Dmmzone.h688 * is the first PFN that needs to be initialised.
1196 * pfn_valid() is meant to be able to tell if a given PFN has valid memmap
1207 * the zone and PFN linkages are still valid. This is expensive, but walkers
H A Dmm.h129 #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ set_max_mapnr()
156 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ set_max_mapnr()
1756 * free_area_init_nodes() passing in the PFN each zone ends at. At a basic
H A Dhyperv.h63 * The number of entries in the PFN array is determined by
/linux-4.4.14/arch/powerpc/include/asm/
H A Dpte-hash64-64k.h8 #define _PAGE_4K_PFN 0x20000000 /* PFN is for a single 4k page */
H A Dpte-common.h76 /* Location of the PFN in the PTE. Most 32-bit platforms use the same
H A Dpgtable.h67 * Even if PTEs can be unsigned long long, a PFN is always an unsigned
/linux-4.4.14/arch/parisc/kernel/
H A Dhead.S110 load32 (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
115 ldo (1<<PFN_PTE_SHIFT)(%r3),%r3 /* add one PFN */
H A Dentry.S509 /* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
510 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
/linux-4.4.14/arch/x86/kernel/cpu/mtrr/
H A Dcleanup.c86 printk(KERN_DEBUG "MTRR MAP PFN: %016llx - %016llx\n", x86_get_mtrr_mem_range()
121 printk(KERN_DEBUG "MTRR MAP PFN: %016llx - %016llx\n", x86_get_mtrr_mem_range()
131 printk(KERN_DEBUG "MTRR MAP PFN: %016llx - %016llx\n", x86_get_mtrr_mem_range()
/linux-4.4.14/drivers/xen/
H A Dswiotlb-xen.c26 * translate the page frame numbers (PFN) to machine frame numbers (MFN)
29 * from different pools, which means there is no guarantee that PFN==MFN
30 * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
H A Dxlate_mmu.c120 /* info->err_ptr expect to have one error status per Xen PFN */ remap_pte_fn()
H A Dballoon.c433 /* XENMEM_populate_physmap requires a PFN based on Xen increase_reservation()
/linux-4.4.14/drivers/net/wireless/brcm80211/brcmfmac/
H A Dcfg80211.h245 * @flags: Bit field to control features of PFN such as sort criteria auto
247 * @rssi_margin: Margin to avoid jitter for choosing a PFN based on RSSI sort
H A Dcfg80211.c3304 * PFN result doesn't have all the info which are
3331 brcmf_dbg(SCAN, "PFN NET LOST event. Do Nothing\n"); brcmf_notify_sched_scan_results()
3340 * PFN event is limited to fit 512 bytes so we may get brcmf_notify_sched_scan_results()
3344 brcmf_dbg(SCAN, "PFN NET FOUND event. count: %d\n", result_count); brcmf_notify_sched_scan_results()
/linux-4.4.14/include/trace/events/
H A Dhswadsp.h176 TP_printk("stream %d ring addr 0x%x pages %d size 0x%x offset 0x%x PFN 0x%x",
/linux-4.4.14/arch/mips/include/asm/
H A Dpgtable-bits.h16 * 6 bits to the left. That way we can convert the PFN into the
/linux-4.4.14/arch/tile/mm/
H A Dhomecache.c95 * - Accepts a 32-bit PFN rather than a 64-bit PA, which generally
H A Dpgtable.c440 panic("set_pte(): out-of-range PFN and mode 0\n"); set_pte()
H A Dfault.c173 panic("Hit migrating PTE (%#llx) and page PFN %#lx still migrating", wait_for_migration()
/linux-4.4.14/arch/unicore32/mm/
H A Dinit.c310 * the system, not the maximum PFN. bootmem_init()
/linux-4.4.14/arch/powerpc/mm/
H A Ddma-noncoherent.c403 * Return the PFN for a given cpu virtual address returned by
H A Dpgtable_64.c174 /* We don't support the 4K PFN hack with ioremap */ __ioremap_at()
/linux-4.4.14/arch/sh/mm/
H A Dcache-sh4.c202 * PFN: Physical page number
/linux-4.4.14/arch/cris/mm/
H A Dfault.c41 * occurred in, since we only get the PFN in R_MMU_CAUSE not the complete
/linux-4.4.14/fs/proc/
H A Dtask_mmu.c1230 * Bits 0-54 page frame number (PFN) if present
1240 * If the page is not present but in swap, then the PFN contains an
1242 * swap. Unmapped pages return a null PFN. This allows determining
/linux-4.4.14/drivers/block/
H A Dbrd.c498 * direct_access API needing 4k alignment, returning a PFN brd_alloc()
/linux-4.4.14/arch/x86/include/asm/
H A Dpgtable_types.h212 /* Extracts the PFN from a (pte|pmd|pud|pgd)val_t of a 4KB page */
/linux-4.4.14/arch/arc/mm/
H A Dtlbex.S263 and r3, r0, PTE_BITS_NON_RWX_IN_PD1 ; Extract PFN+cache bits from PTE
H A Dtlb.c645 * - software page walker address split between PGD:PTE:PFN (typical
649 * Default Page Walker is 2 levels, PGD:PTE:PFN, which in THP regime
/linux-4.4.14/arch/mips/sgi-ip22/
H A Dip28-berr.c339 a = (a & 0x3f) << 6; /* PFN */ check_microtlb()
/linux-4.4.14/drivers/edac/
H A Dmpc85xx_edac.c894 mpc85xx_mc_printk(mci, KERN_ERR, "PFN: %#8.8x\n", pfn); mpc85xx_mc_check()
898 mpc85xx_mc_printk(mci, KERN_ERR, "PFN out of range!\n"); mpc85xx_mc_check()
/linux-4.4.14/drivers/iommu/
H A Dtegra-smmu.c904 dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n", tegra_smmu_probe()
H A Dintel-iommu.c3597 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n", intel_unmap()
4463 pr_debug("Failed get IOVA for PFN %lx\n", intel_iommu_memory_notifier()
4471 pr_warn("Failed to split IOVA PFN [%lx-%lx]\n", intel_iommu_memory_notifier()
/linux-4.4.14/arch/parisc/include/asm/
H A Dpgtable.h187 /* PFN_PTE_SHIFT defines the shift of a PTE value to access the PFN field */
/linux-4.4.14/drivers/hv/
H A Dhv_balloon.c138 * The PFN number of the first page in the range.
139 * 40 bits is the architectural limit of a PFN
/linux-4.4.14/drivers/net/ethernet/chelsio/cxgb4/
H A Dt4fw_api.h555 FW_FLOWC_MNEM_PFNVFN, /* PFN [15:8] VFN [7:0] */
1900 * VIID - [10:8] PFN, [7] VI Valid, [6:0] VI number
/linux-4.4.14/fs/
H A Dblock_dev.c459 * will tell the caller the PFN and the address of the memory. The address
461 * ioremap(), kmap() or similar. The PFN is suitable for inserting into
/linux-4.4.14/arch/alpha/kernel/
H A Dsetup.c283 return end >> PAGE_SHIFT; /* Return the PFN of the limit. */ get_mem_size_limit()
/linux-4.4.14/include/xen/interface/
H A Dxen.h652 #define SIF_MOD_START_PFN (1<<3) /* Is mod_start a PFN? */
/linux-4.4.14/arch/arm/kernel/
H A Dhead.S616 mov r0, r8, lsr #PAGE_SHIFT @ convert to PFN
/linux-4.4.14/drivers/misc/vmw_vmci/
H A Dvmci_queue_pair.c508 /* Fail allocation if PFN isn't supported by hypervisor. */ qp_alloc_ppn_set()
521 /* Fail allocation if PFN isn't supported by hypervisor. */ qp_alloc_ppn_set()
/linux-4.4.14/drivers/block/xen-blkback/
H A Dblkback.c802 * assign map[..] with the PFN of the page in our domain with the xen_blkbk_map()
/linux-4.4.14/arch/openrisc/kernel/
H A Dhead.S786 * it's not the same as the PFN */
/linux-4.4.14/tools/perf/
H A Dbuiltin-kmem.c1019 use_pfn ? "PFN" : "Page", live_page ? "Live" : "Total", __print_page_alloc_result()
/linux-4.4.14/arch/mips/mm/
H A Dtlbex.c2372 /* clear all non-PFN bits */ check_pabits()
/linux-4.4.14/arch/tile/include/hv/
H A Dhypervisor.h2633 /** Position of the PFN field within the PTE (subset of the PTFN). */
2637 /** Length of the PFN field within the PTE (subset of the PTFN). */

Completed in 2345 milliseconds