pfns              797 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	uint64_t *pfns;
pfns              815 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	pfns = kvmalloc_array(ttm->num_pages, sizeof(*pfns), GFP_KERNEL);
pfns              816 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	if (unlikely(!pfns)) {
pfns              826 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	range->pfns = pfns;
pfns              858 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		pages[i] = hmm_device_entry_to_page(range, pfns[i]);
pfns              861 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 			       i, pfns[i]);
pfns              877 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	kvfree(pfns);
pfns              902 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	WARN_ONCE(!gtt->range || !gtt->range->pfns,
pfns              909 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		kvfree(gtt->range->pfns);
pfns              997 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 						      gtt->range->pfns[0]))
pfns              325 drivers/gpu/drm/nouveau/nouveau_dmem.c 	unsigned long pfns[1];
pfns              330 drivers/gpu/drm/nouveau/nouveau_dmem.c 	ret = nouveau_dmem_pages_alloc(drm, 1, pfns);
pfns              334 drivers/gpu/drm/nouveau/nouveau_dmem.c 	page = pfn_to_page(pfns[0]);
pfns              689 drivers/gpu/drm/nouveau/nouveau_dmem.c 		page = hmm_device_entry_to_page(range, range->pfns[i]);
pfns              693 drivers/gpu/drm/nouveau/nouveau_dmem.c 		if (!(range->pfns[i] & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
pfns              699 drivers/gpu/drm/nouveau/nouveau_dmem.c 			range->pfns[i] = 0;
pfns              704 drivers/gpu/drm/nouveau/nouveau_dmem.c 		range->pfns[i] &= ((1UL << range->pfn_shift) - 1);
pfns              705 drivers/gpu/drm/nouveau/nouveau_dmem.c 		range->pfns[i] |= (addr >> PAGE_SHIFT) << range->pfn_shift;
pfns              691 drivers/gpu/drm/nouveau/nouveau_svm.c 		range.pfns = args.phys;
pfns              725 drivers/gpu/drm/nouveau/nouveau_svm.c 			     !(range.pfns[pi] & NVIF_VMM_PFNMAP_V0_V) ||
pfns              726 drivers/gpu/drm/nouveau/nouveau_svm.c 			    (!(range.pfns[pi] & NVIF_VMM_PFNMAP_V0_W) &&
pfns              781 drivers/iommu/iova.c 	unsigned long pfns[IOVA_MAG_SIZE];
pfns              812 drivers/iommu/iova.c 		struct iova *iova = private_find_iova(iovad, mag->pfns[i]);
pfns              842 drivers/iommu/iova.c 	for (i = mag->size - 1; mag->pfns[i] > limit_pfn; i--)
pfns              847 drivers/iommu/iova.c 	pfn = mag->pfns[i];
pfns              848 drivers/iommu/iova.c 	mag->pfns[i] = mag->pfns[--mag->size];
pfns              857 drivers/iommu/iova.c 	mag->pfns[mag->size++] = pfn;
pfns              109 drivers/virtio/virtio_balloon.c 	__virtio32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX];
pfns              144 drivers/virtio/virtio_balloon.c 	sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns);
pfns              156 drivers/virtio/virtio_balloon.c 			  __virtio32 pfns[], struct page *page)
pfns              167 drivers/virtio/virtio_balloon.c 		pfns[i] = cpu_to_virtio32(vb->vdev,
pfns              179 drivers/virtio/virtio_balloon.c 	num = min(num, ARRAY_SIZE(vb->pfns));
pfns              204 drivers/virtio/virtio_balloon.c 		set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
pfns              243 drivers/virtio/virtio_balloon.c 	num = min(num, ARRAY_SIZE(vb->pfns));
pfns              253 drivers/virtio/virtio_balloon.c 		set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
pfns              746 drivers/virtio/virtio_balloon.c 	set_page_pfns(vb, vb->pfns, newpage);
pfns              754 drivers/virtio/virtio_balloon.c 	set_page_pfns(vb, vb->pfns, page);
pfns              733 drivers/xen/privcmd.c 	xen_pfn_t *pfns = NULL;
pfns              752 drivers/xen/privcmd.c 	pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL);
pfns              753 drivers/xen/privcmd.c 	if (!pfns) {
pfns              773 drivers/xen/privcmd.c 			pfns[i] = pfn + (i % XEN_PFN_PER_PAGE);
pfns              784 drivers/xen/privcmd.c 	set_xen_guest_handle(xdata.frame_list, pfns);
pfns              804 drivers/xen/privcmd.c 						 pfns, kdata.num, (int *)pfns,
pfns              814 drivers/xen/privcmd.c 				rc = pfns[i];
pfns              824 drivers/xen/privcmd.c 	kfree(pfns);
pfns              193 drivers/xen/xlate_mmu.c 	xen_pfn_t *pfns;
pfns              201 drivers/xen/xlate_mmu.c 	info->pfns[info->idx++] = gfn;
pfns              218 drivers/xen/xlate_mmu.c 	xen_pfn_t *pfns;
pfns              230 drivers/xen/xlate_mmu.c 	pfns = kcalloc(nr_grant_frames, sizeof(pfns[0]), GFP_KERNEL);
pfns              231 drivers/xen/xlate_mmu.c 	if (!pfns) {
pfns              240 drivers/xen/xlate_mmu.c 		kfree(pfns);
pfns              244 drivers/xen/xlate_mmu.c 	data.pfns = pfns;
pfns              254 drivers/xen/xlate_mmu.c 		kfree(pfns);
pfns              259 drivers/xen/xlate_mmu.c 	*gfns = pfns;
pfns              166 include/linux/hmm.h 	uint64_t		*pfns;
pfns              210 include/xen/xen-ops.h int xen_xlate_map_ballooned_pages(xen_pfn_t **pfns, void **vaddr,
pfns              261 mm/hmm.c       	uint64_t *pfns = range->pfns;
pfns              266 mm/hmm.c       		pfns[i] = range->values[HMM_PFN_ERROR];
pfns              289 mm/hmm.c       	uint64_t *pfns = range->pfns;
pfns              299 mm/hmm.c       		pfns[i] = range->values[HMM_PFN_NONE];
pfns              304 mm/hmm.c       					       &pfns[i]);
pfns              314 mm/hmm.c       				      uint64_t pfns, uint64_t cpu_flags,
pfns              332 mm/hmm.c       	pfns = (pfns & range->pfn_flags_mask) | range->default_flags;
pfns              335 mm/hmm.c       	if (!(pfns & range->flags[HMM_PFN_VALID]))
pfns              340 mm/hmm.c       		if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) {
pfns              341 mm/hmm.c       			*write_fault = pfns & range->flags[HMM_PFN_WRITE];
pfns              350 mm/hmm.c       	if ((pfns & range->flags[HMM_PFN_WRITE]) &&
pfns              358 mm/hmm.c       				 const uint64_t *pfns, unsigned long npages,
pfns              371 mm/hmm.c       		hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags,
pfns              385 mm/hmm.c       	uint64_t *pfns;
pfns              389 mm/hmm.c       	pfns = &range->pfns[i];
pfns              390 mm/hmm.c       	hmm_range_need_fault(hmm_vma_walk, pfns, npages,
pfns              406 mm/hmm.c       		unsigned long end, uint64_t *pfns, pmd_t pmd)
pfns              416 mm/hmm.c       	hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags,
pfns              430 mm/hmm.c       		pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags;
pfns              442 mm/hmm.c       		unsigned long end, uint64_t *pfns, pmd_t pmd);
pfns              559 mm/hmm.c       	uint64_t *pfns = range->pfns;
pfns              572 mm/hmm.c       		uint64_t *pfns;
pfns              576 mm/hmm.c       		pfns = &range->pfns[i];
pfns              578 mm/hmm.c       		hmm_range_need_fault(hmm_vma_walk, pfns, npages,
pfns              605 mm/hmm.c       		return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd);
pfns              622 mm/hmm.c       		r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, &pfns[i]);
pfns              673 mm/hmm.c       		uint64_t *pfns, cpu_flags;
pfns              681 mm/hmm.c       		pfns = &range->pfns[i];
pfns              684 mm/hmm.c       		hmm_range_need_fault(hmm_vma_walk, pfns, npages,
pfns              696 mm/hmm.c       			pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
pfns              744 mm/hmm.c       	orig_pfn = range->pfns[i];
pfns              745 mm/hmm.c       	range->pfns[i] = range->values[HMM_PFN_NONE];
pfns              757 mm/hmm.c       		range->pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
pfns              774 mm/hmm.c       			   uint64_t *pfns,
pfns              778 mm/hmm.c       	for (; addr < end; addr += PAGE_SIZE, pfns++)
pfns              779 mm/hmm.c       		*pfns = range->values[HMM_PFN_NONE];
pfns              916 mm/hmm.c       			hmm_pfns_clear(range, range->pfns,
pfns              942 mm/hmm.c       			hmm_pfns_clear(range, &range->pfns[i],
pfns              989 mm/hmm.c       		page = hmm_device_entry_to_page(range, range->pfns[i]);
pfns             1000 mm/hmm.c       		if (range->pfns[i] & range->flags[HMM_PFN_WRITE])
pfns             1019 mm/hmm.c       		page = hmm_device_entry_to_page(range, range->pfns[i]);
pfns             1027 mm/hmm.c       		if (range->pfns[i] & range->flags[HMM_PFN_WRITE])
pfns             1064 mm/hmm.c       	if (!range->pfns)
pfns             1072 mm/hmm.c       		page = hmm_device_entry_to_page(range, range->pfns[i]);
pfns             1077 mm/hmm.c       		if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) {
pfns             1090 mm/hmm.c       		range->pfns[i] = range->values[HMM_PFN_NONE];
pfns              313 mm/memory_hotplug.c 		unsigned long pfns;
pfns              315 mm/memory_hotplug.c 		pfns = min(nr_pages, PAGES_PER_SECTION
pfns              317 mm/memory_hotplug.c 		err = sparse_add_section(nid, pfn, pfns, altmap);
pfns              320 mm/memory_hotplug.c 		pfn += pfns;
pfns              321 mm/memory_hotplug.c 		nr_pages -= pfns;
pfns              532 mm/memory_hotplug.c 		unsigned long pfns;
pfns              535 mm/memory_hotplug.c 		pfns = min(nr_pages, PAGES_PER_SECTION
pfns              537 mm/memory_hotplug.c 		__remove_section(pfn, pfns, map_offset, altmap);
pfns              538 mm/memory_hotplug.c 		pfn += pfns;
pfns              539 mm/memory_hotplug.c 		nr_pages -= pfns;
pfns              241 mm/sparse.c    		unsigned long pfns;
pfns              243 mm/sparse.c    		pfns = min(nr_pages, PAGES_PER_SECTION
pfns              246 mm/sparse.c    		subsection_mask_set(ms->usage->subsection_map, pfn, pfns);
pfns              249 mm/sparse.c    				pfns, subsection_map_index(pfn),
pfns              250 mm/sparse.c    				subsection_map_index(pfn + pfns - 1));
pfns              252 mm/sparse.c    		pfn += pfns;
pfns              253 mm/sparse.c    		nr_pages -= pfns;