Home
last modified time | relevance | path

Searched refs:pages (Results 1 – 200 of 943) sorted by relevance

12345

/linux-4.4.14/net/ceph/
Dpagevec.c18 struct page **pages; in ceph_get_direct_page_vector() local
22 pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS); in ceph_get_direct_page_vector()
23 if (!pages) in ceph_get_direct_page_vector()
29 num_pages - got, write_page, 0, pages + got); in ceph_get_direct_page_vector()
37 return pages; in ceph_get_direct_page_vector()
40 ceph_put_page_vector(pages, got, false); in ceph_get_direct_page_vector()
45 void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty) in ceph_put_page_vector() argument
51 set_page_dirty_lock(pages[i]); in ceph_put_page_vector()
52 put_page(pages[i]); in ceph_put_page_vector()
54 kvfree(pages); in ceph_put_page_vector()
[all …]
/linux-4.4.14/mm/
Dpercpu-vm.c35 static struct page **pages; in pcpu_get_pages() local
36 size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]); in pcpu_get_pages()
40 if (!pages) in pcpu_get_pages()
41 pages = pcpu_mem_zalloc(pages_size); in pcpu_get_pages()
42 return pages; in pcpu_get_pages()
56 struct page **pages, int page_start, int page_end) in pcpu_free_pages() argument
63 struct page *page = pages[pcpu_page_idx(cpu, i)]; in pcpu_free_pages()
83 struct page **pages, int page_start, int page_end) in pcpu_alloc_pages() argument
91 struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; in pcpu_alloc_pages()
102 __free_page(pages[pcpu_page_idx(cpu, i)]); in pcpu_alloc_pages()
[all …]
Dkmemcheck.c11 int pages; in kmemcheck_alloc_shadow() local
14 pages = 1 << order; in kmemcheck_alloc_shadow()
28 for(i = 0; i < pages; ++i) in kmemcheck_alloc_shadow()
36 kmemcheck_hide_pages(page, pages); in kmemcheck_alloc_shadow()
42 int pages; in kmemcheck_free_shadow() local
48 pages = 1 << order; in kmemcheck_free_shadow()
50 kmemcheck_show_pages(page, pages); in kmemcheck_free_shadow()
54 for(i = 0; i < pages; ++i) in kmemcheck_free_shadow()
103 int pages; in kmemcheck_pagealloc_alloc() local
108 pages = 1 << order; in kmemcheck_pagealloc_alloc()
[all …]
Dgup.c455 unsigned int gup_flags, struct page **pages, in __get_user_pages() argument
465 VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); in __get_user_pages()
487 pages ? &pages[i] : NULL); in __get_user_pages()
497 i = follow_hugetlb_page(mm, vma, pages, vmas, in __get_user_pages()
538 if (pages) { in __get_user_pages()
539 pages[i] = page; in __get_user_pages()
626 struct page **pages, in __get_user_pages_locked() argument
641 if (pages) in __get_user_pages_locked()
651 ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages, in __get_user_pages_locked()
663 if (!pages) in __get_user_pages_locked()
[all …]
Dframe_vector.c112 struct page **pages; in put_vaddr_frames() local
116 pages = frame_vector_pages(vec); in put_vaddr_frames()
122 if (WARN_ON(IS_ERR(pages))) in put_vaddr_frames()
125 put_page(pages[i]); in put_vaddr_frames()
144 struct page **pages; in frame_vector_to_pages() local
152 pages = (struct page **)nums; in frame_vector_to_pages()
154 pages[i] = pfn_to_page(nums[i]); in frame_vector_to_pages()
170 struct page **pages; in frame_vector_to_pfns() local
174 pages = (struct page **)(vec->ptrs); in frame_vector_to_pfns()
175 nums = (unsigned long *)pages; in frame_vector_to_pfns()
[all …]
Dmprotect.c69 unsigned long pages = 0; in change_pte_range() local
110 pages++; in change_pte_range()
126 pages++; in change_pte_range()
133 return pages; in change_pte_range()
143 unsigned long pages = 0; in change_pmd_range() local
170 pages += HPAGE_PMD_NR; in change_pmd_range()
182 pages += this_pages; in change_pmd_range()
190 return pages; in change_pmd_range()
199 unsigned long pages = 0; in change_pud_range() local
206 pages += change_pmd_range(vma, pud, addr, next, newprot, in change_pud_range()
[all …]
Dswap_state.c258 void free_pages_and_swap_cache(struct page **pages, int nr) in free_pages_and_swap_cache() argument
260 struct page **pagep = pages; in free_pages_and_swap_cache()
405 unsigned int pages, max_pages, last_ra; in swapin_nr_pages() local
417 pages = atomic_xchg(&swapin_readahead_hits, 0) + 2; in swapin_nr_pages()
418 if (pages == 2) { in swapin_nr_pages()
425 pages = 1; in swapin_nr_pages()
429 while (roundup < pages) in swapin_nr_pages()
431 pages = roundup; in swapin_nr_pages()
434 if (pages > max_pages) in swapin_nr_pages()
435 pages = max_pages; in swapin_nr_pages()
[all …]
Dpercpu-km.c52 struct page *pages; in pcpu_create_chunk() local
59 pages = alloc_pages(GFP_KERNEL, order_base_2(nr_pages)); in pcpu_create_chunk()
60 if (!pages) { in pcpu_create_chunk()
66 pcpu_set_page_chunk(nth_page(pages, i), chunk); in pcpu_create_chunk()
68 chunk->data = pages; in pcpu_create_chunk()
69 chunk->base_addr = page_address(pages) - pcpu_group_offsets[0]; in pcpu_create_chunk()
Dprocess_vm_access.c33 static int process_vm_rw_pages(struct page **pages, in process_vm_rw_pages() argument
41 struct page *page = *pages++; in process_vm_rw_pages()
90 / sizeof(struct pages *); in process_vm_rw_single_vec()
98 int pages = min(nr_pages, max_pages_per_loop); in process_vm_rw_single_vec() local
102 pages = get_user_pages_unlocked(task, mm, pa, pages, in process_vm_rw_single_vec()
104 if (pages <= 0) in process_vm_rw_single_vec()
107 bytes = pages * PAGE_SIZE - start_offset; in process_vm_rw_single_vec()
116 nr_pages -= pages; in process_vm_rw_single_vec()
117 pa += pages * PAGE_SIZE; in process_vm_rw_single_vec()
118 while (pages) in process_vm_rw_single_vec()
[all …]
Dmincore.c173 static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec) in do_mincore() argument
189 end = min(vma->vm_end, addr + (pages << PAGE_SHIFT)); in do_mincore()
224 unsigned long pages; in SYSCALL_DEFINE3() local
236 pages = len >> PAGE_SHIFT; in SYSCALL_DEFINE3()
237 pages += (offset_in_page(len)) != 0; in SYSCALL_DEFINE3()
239 if (!access_ok(VERIFY_WRITE, vec, pages)) in SYSCALL_DEFINE3()
247 while (pages) { in SYSCALL_DEFINE3()
253 retval = do_mincore(start, min(pages, PAGE_SIZE), tmp); in SYSCALL_DEFINE3()
262 pages -= retval; in SYSCALL_DEFINE3()
Dswap.c354 void put_pages_list(struct list_head *pages) in put_pages_list() argument
356 while (!list_empty(pages)) { in put_pages_list()
359 victim = list_entry(pages->prev, struct page, lru); in put_pages_list()
380 struct page **pages) in get_kernel_pages() argument
388 pages[seg] = kmap_to_page(kiov[seg].iov_base); in get_kernel_pages()
389 page_cache_get(pages[seg]); in get_kernel_pages()
407 int get_kernel_page(unsigned long start, int write, struct page **pages) in get_kernel_page() argument
414 return get_kernel_pages(&kiov, 1, write, pages); in get_kernel_page()
428 struct page *page = pvec->pages[i]; in pagevec_lru_move_fn()
443 release_pages(pvec->pages, pvec->nr, pvec->cold); in pagevec_lru_move_fn()
[all …]
Dreadahead.c62 struct list_head *pages) in read_cache_pages_invalidate_pages() argument
66 while (!list_empty(pages)) { in read_cache_pages_invalidate_pages()
67 victim = list_to_page(pages); in read_cache_pages_invalidate_pages()
83 int read_cache_pages(struct address_space *mapping, struct list_head *pages, in read_cache_pages() argument
89 while (!list_empty(pages)) { in read_cache_pages()
90 page = list_to_page(pages); in read_cache_pages()
101 read_cache_pages_invalidate_pages(mapping, pages); in read_cache_pages()
112 struct list_head *pages, unsigned nr_pages) in read_pages() argument
121 ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages); in read_pages()
123 put_pages_list(pages); in read_pages()
[all …]
Dbootmem.c57 static unsigned long __init bootmap_bytes(unsigned long pages) in bootmap_bytes() argument
59 unsigned long bytes = DIV_ROUND_UP(pages, 8); in bootmap_bytes()
68 unsigned long __init bootmem_bootmap_pages(unsigned long pages) in bootmem_bootmap_pages() argument
70 unsigned long bytes = bootmap_bytes(pages); in bootmem_bootmap_pages()
141 unsigned long __init init_bootmem(unsigned long start, unsigned long pages) in init_bootmem() argument
143 max_low_pfn = pages; in init_bootmem()
145 return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages); in init_bootmem()
175 unsigned long *map, start, end, pages, cur, count = 0; in free_all_bootmem_core() local
234 pages = bdata->node_low_pfn - bdata->node_min_pfn; in free_all_bootmem_core()
235 pages = bootmem_bootmap_pages(pages); in free_all_bootmem_core()
[all …]
Dcma.c79 unsigned long pages) in cma_bitmap_pages_to_bits() argument
81 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; in cma_bitmap_pages_to_bits()
439 bool cma_release(struct cma *cma, const struct page *pages, unsigned int count) in cma_release() argument
443 if (!cma || !pages) in cma_release()
446 pr_debug("%s(page %p)\n", __func__, (void *)pages); in cma_release()
448 pfn = page_to_pfn(pages); in cma_release()
457 trace_cma_release(pfn, pages, count); in cma_release()
DKconfig161 such as direct mapping pages cannot be migrated. So the corresponding
236 with the reduced number of transparent huge pages that could be used
238 pages enlisted as being part of memory balloon devices avoids the
249 Allows the compaction of memory for the allocation of huge pages.
259 Allows the migration of the physical location of pages of processes
261 two situations. The first is on NUMA systems to put pages nearer
263 pages as migration can relocate pages to satisfy a huge page
318 mergeable. When it finds pages of identical content, it replaces
332 from userspace allocation. Keeping a user from writing to low pages
361 tristate "HWPoison pages injector"
[all …]
Dhuge_memory.c474 unsigned long pages; in pages_to_scan_store() local
476 err = kstrtoul(buf, 10, &pages); in pages_to_scan_store()
477 if (err || !pages || pages > UINT_MAX) in pages_to_scan_store()
480 khugepaged_pages_to_scan = pages; in pages_to_scan_store()
1054 struct page **pages; in do_huge_pmd_wp_page_fallback() local
1058 pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR, in do_huge_pmd_wp_page_fallback()
1060 if (unlikely(!pages)) { in do_huge_pmd_wp_page_fallback()
1066 pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE | in do_huge_pmd_wp_page_fallback()
1069 if (unlikely(!pages[i] || in do_huge_pmd_wp_page_fallback()
1070 mem_cgroup_try_charge(pages[i], mm, GFP_KERNEL, in do_huge_pmd_wp_page_fallback()
[all …]
DKconfig.debug18 Unmap pages from the kernel linear mapping after free_pages().
23 fill the pages with poison patterns after free_pages() and verify
27 a resume because free pages are not saved to the suspend image.
Dvmalloc.c120 unsigned long end, pgprot_t prot, struct page **pages, int *nr) in vmap_pte_range() argument
133 struct page *page = pages[*nr]; in vmap_pte_range()
146 unsigned long end, pgprot_t prot, struct page **pages, int *nr) in vmap_pmd_range() argument
156 if (vmap_pte_range(pmd, addr, next, prot, pages, nr)) in vmap_pmd_range()
163 unsigned long end, pgprot_t prot, struct page **pages, int *nr) in vmap_pud_range() argument
173 if (vmap_pmd_range(pud, addr, next, prot, pages, nr)) in vmap_pud_range()
186 pgprot_t prot, struct page **pages) in vmap_page_range_noflush() argument
198 err = vmap_pud_range(pgd, addr, next, prot, pages, &nr); in vmap_page_range_noflush()
207 pgprot_t prot, struct page **pages) in vmap_page_range() argument
211 ret = vmap_page_range_noflush(start, end, prot, pages); in vmap_page_range()
[all …]
Dzsmalloc.c347 static int zs_zpool_shrink(void *pool, unsigned int pages, in zs_zpool_shrink() argument
1053 struct page *pages[2], int off, int size) in __zs_map_object()
1055 BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, pages)); in __zs_map_object()
1061 struct page *pages[2], int off, int size) in __zs_unmap_object()
1091 struct page *pages[2], int off, int size) in __zs_map_object()
1108 addr = kmap_atomic(pages[0]); in __zs_map_object()
1111 addr = kmap_atomic(pages[1]); in __zs_map_object()
1119 struct page *pages[2], int off, int size) in __zs_unmap_object()
1140 addr = kmap_atomic(pages[0]); in __zs_unmap_object()
1143 addr = kmap_atomic(pages[1]); in __zs_unmap_object()
[all …]
Dcma_debug.c125 int pages = val; in cma_free_write() local
128 return cma_free_mem(cma, pages); in cma_free_write()
157 int pages = val; in cma_alloc_write() local
160 return cma_alloc_mem(cma, pages); in cma_alloc_write()
Dballoon_compaction.c65 list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) { in balloon_page_dequeue()
97 if (unlikely(list_empty(&b_dev_info->pages) && in balloon_page_dequeue()
128 list_add(&page->lru, &b_dev_info->pages); in __putback_balloon_page()
Dfrontswap.c395 unsigned long pages = 0, pages_to_unuse = 0; in __frontswap_unuse_pages() local
401 pages = pages_to_unuse = total_pages_to_unuse; in __frontswap_unuse_pages()
403 pages = si_frontswap_pages; in __frontswap_unuse_pages()
407 if (security_vm_enough_memory_mm(current->mm, pages)) { in __frontswap_unuse_pages()
411 vm_unacct_memory(pages); in __frontswap_unuse_pages()
/linux-4.4.14/fs/isofs/
Dcompress.c45 struct page **pages, unsigned poffset, in zisofs_uncompress_block() argument
71 if (!pages[i]) in zisofs_uncompress_block()
73 memset(page_address(pages[i]), 0, PAGE_CACHE_SIZE); in zisofs_uncompress_block()
74 flush_dcache_page(pages[i]); in zisofs_uncompress_block()
75 SetPageUptodate(pages[i]); in zisofs_uncompress_block()
121 if (pages[curpage]) { in zisofs_uncompress_block()
122 stream.next_out = page_address(pages[curpage]) in zisofs_uncompress_block()
174 if (pages[curpage]) { in zisofs_uncompress_block()
175 flush_dcache_page(pages[curpage]); in zisofs_uncompress_block()
176 SetPageUptodate(pages[curpage]); in zisofs_uncompress_block()
[all …]
/linux-4.4.14/arch/x86/xen/
Dgrant-table.c119 struct page **pages; in xlated_setup_gnttab_pages() local
127 pages = kcalloc(nr_grant_frames, sizeof(pages[0]), GFP_KERNEL); in xlated_setup_gnttab_pages()
128 if (!pages) in xlated_setup_gnttab_pages()
133 kfree(pages); in xlated_setup_gnttab_pages()
136 rc = alloc_xenballooned_pages(nr_grant_frames, pages); in xlated_setup_gnttab_pages()
140 kfree(pages); in xlated_setup_gnttab_pages()
145 pfns[i] = page_to_pfn(pages[i]); in xlated_setup_gnttab_pages()
147 vaddr = vmap(pages, nr_grant_frames, 0, PAGE_KERNEL); in xlated_setup_gnttab_pages()
151 free_xenballooned_pages(nr_grant_frames, pages); in xlated_setup_gnttab_pages()
152 kfree(pages); in xlated_setup_gnttab_pages()
[all …]
/linux-4.4.14/drivers/gpu/drm/ttm/
Dttm_page_alloc.c220 static int set_pages_array_wb(struct page **pages, int addrinarray) in set_pages_array_wb() argument
226 unmap_page_from_agp(pages[i]); in set_pages_array_wb()
231 static int set_pages_array_wc(struct page **pages, int addrinarray) in set_pages_array_wc() argument
237 map_page_into_agp(pages[i]); in set_pages_array_wc()
242 static int set_pages_array_uc(struct page **pages, int addrinarray) in set_pages_array_uc() argument
248 map_page_into_agp(pages[i]); in set_pages_array_uc()
276 static void ttm_pages_put(struct page *pages[], unsigned npages) in ttm_pages_put() argument
279 if (set_pages_array_wb(pages, npages)) in ttm_pages_put()
282 __free_page(pages[i]); in ttm_pages_put()
448 static int ttm_set_pages_caching(struct page **pages, in ttm_set_pages_caching() argument
[all …]
Dttm_tt.c53 ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*)); in ttm_tt_alloc_page_directory()
58 ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, in ttm_dma_tt_alloc_page_directory()
59 sizeof(*ttm->ttm.pages) + in ttm_dma_tt_alloc_page_directory()
62 ttm->cpu_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages); in ttm_dma_tt_alloc_page_directory()
123 drm_clflush_pages(ttm->pages, ttm->num_pages); in ttm_tt_set_caching()
126 cur_page = ttm->pages[i]; in ttm_tt_set_caching()
142 cur_page = ttm->pages[j]; in ttm_tt_set_caching()
201 if (!ttm->pages) { in ttm_tt_init()
212 drm_free_large(ttm->pages); in ttm_tt_fini()
213 ttm->pages = NULL; in ttm_tt_fini()
[all …]
Dttm_page_alloc_dma.c272 static int set_pages_array_wb(struct page **pages, int addrinarray) in set_pages_array_wb() argument
278 unmap_page_from_agp(pages[i]); in set_pages_array_wb()
283 static int set_pages_array_wc(struct page **pages, int addrinarray) in set_pages_array_wc() argument
289 map_page_into_agp(pages[i]); in set_pages_array_wc()
294 static int set_pages_array_uc(struct page **pages, int addrinarray) in set_pages_array_uc() argument
300 map_page_into_agp(pages[i]); in set_pages_array_uc()
307 struct page **pages, unsigned cpages) in ttm_set_pages_caching() argument
312 r = set_pages_array_uc(pages, cpages); in ttm_set_pages_caching()
318 r = set_pages_array_wc(pages, cpages); in ttm_set_pages_caching()
382 struct page *pages[], unsigned npages) in ttm_dma_pages_put() argument
[all …]
/linux-4.4.14/fs/ramfs/
Dfile-nommu.c69 struct page *pages; in ramfs_nommu_expand_for_mapping() local
88 pages = alloc_pages(gfp, order); in ramfs_nommu_expand_for_mapping()
89 if (!pages) in ramfs_nommu_expand_for_mapping()
96 split_page(pages, order); in ramfs_nommu_expand_for_mapping()
100 __free_page(pages + loop); in ramfs_nommu_expand_for_mapping()
104 data = page_address(pages); in ramfs_nommu_expand_for_mapping()
109 struct page *page = pages + loop; in ramfs_nommu_expand_for_mapping()
128 __free_page(pages + loop++); in ramfs_nommu_expand_for_mapping()
211 struct page **pages = NULL, **ptr, *page; in ramfs_nommu_get_unmapped_area() local
231 pages = kcalloc(lpages, sizeof(struct page *), GFP_KERNEL); in ramfs_nommu_get_unmapped_area()
[all …]
/linux-4.4.14/fs/squashfs/
Dpage_actor.c32 if (actor->next_page == actor->pages) in cache_next_page()
44 int pages, int length) in squashfs_page_actor_init() argument
51 actor->length = length ? : pages * PAGE_CACHE_SIZE; in squashfs_page_actor_init()
53 actor->pages = pages; in squashfs_page_actor_init()
73 return actor->pageaddr = actor->next_page == actor->pages ? NULL : in direct_next_page()
84 int pages, int length) in squashfs_page_actor_init_special() argument
91 actor->length = length ? : pages * PAGE_CACHE_SIZE; in squashfs_page_actor_init_special()
93 actor->pages = pages; in squashfs_page_actor_init_special()
Dfile_direct.c24 int pages, struct page **page);
37 int i, n, pages, missing_pages, bytes, res = -ENOMEM; in squashfs_readpage_block() local
45 pages = end_index - start_index + 1; in squashfs_readpage_block()
47 page = kmalloc_array(pages, sizeof(void *), GFP_KERNEL); in squashfs_readpage_block()
55 actor = squashfs_page_actor_init_special(page, pages, 0); in squashfs_readpage_block()
60 for (missing_pages = 0, i = 0, n = start_index; i < pages; i++, n++) { in squashfs_readpage_block()
85 res = squashfs_read_cache(target_page, block, bsize, pages, in squashfs_readpage_block()
101 pageaddr = kmap_atomic(page[pages - 1]); in squashfs_readpage_block()
107 for (i = 0; i < pages; i++) { in squashfs_readpage_block()
124 for (i = 0; i < pages; i++) { in squashfs_readpage_block()
[all …]
Dpage_actor.h14 int pages; member
20 int pages, int length) in squashfs_page_actor_init() argument
27 actor->length = length ? : pages * PAGE_CACHE_SIZE; in squashfs_page_actor_init()
29 actor->pages = pages; in squashfs_page_actor_init()
42 return actor->next_page == actor->pages ? NULL : in squashfs_next_page()
60 int pages; member
Dcache.c219 for (j = 0; j < cache->pages; j++) in squashfs_cache_delete()
258 cache->pages = block_size >> PAGE_CACHE_SHIFT; in squashfs_cache_init()
259 cache->pages = cache->pages ? cache->pages : 1; in squashfs_cache_init()
271 entry->data = kcalloc(cache->pages, sizeof(void *), GFP_KERNEL); in squashfs_cache_init()
277 for (j = 0; j < cache->pages; j++) { in squashfs_cache_init()
286 cache->pages, 0); in squashfs_cache_init()
418 int pages = (length + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; in squashfs_read_table() local
427 data = kcalloc(pages, sizeof(void *), GFP_KERNEL); in squashfs_read_table()
433 actor = squashfs_page_actor_init(data, pages, length); in squashfs_read_table()
439 for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE) in squashfs_read_table()
/linux-4.4.14/fs/proc/
Dmeminfo.c34 unsigned long pages[NR_LRU_LISTS]; in meminfo_proc_show() local
52 pages[lru] = global_page_state(NR_LRU_BASE + lru); in meminfo_proc_show()
71 pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE]; in meminfo_proc_show()
152 K(pages[LRU_ACTIVE_ANON] + pages[LRU_ACTIVE_FILE]), in meminfo_proc_show()
153 K(pages[LRU_INACTIVE_ANON] + pages[LRU_INACTIVE_FILE]), in meminfo_proc_show()
154 K(pages[LRU_ACTIVE_ANON]), in meminfo_proc_show()
155 K(pages[LRU_INACTIVE_ANON]), in meminfo_proc_show()
156 K(pages[LRU_ACTIVE_FILE]), in meminfo_proc_show()
157 K(pages[LRU_INACTIVE_FILE]), in meminfo_proc_show()
158 K(pages[LRU_UNEVICTABLE]), in meminfo_proc_show()
/linux-4.4.14/arch/mips/mm/
Dgup.c38 int write, struct page **pages, int *nr) in gup_pte_range() argument
54 pages[*nr] = page; in gup_pte_range()
72 int write, struct page **pages, int *nr) in gup_huge_pmd() argument
89 pages[*nr] = page; in gup_huge_pmd()
102 int write, struct page **pages, int *nr) in gup_pmd_range() argument
126 if (!gup_huge_pmd(pmd, addr, next, write, pages,nr)) in gup_pmd_range()
129 if (!gup_pte_range(pmd, addr, next, write, pages,nr)) in gup_pmd_range()
138 int write, struct page **pages, int *nr) in gup_huge_pud() argument
155 pages[*nr] = page; in gup_huge_pud()
168 int write, struct page **pages, int *nr) in gup_pud_range() argument
[all …]
/linux-4.4.14/drivers/gpu/drm/virtio/
Dvirtgpu_object.c38 if (bo->pages) in virtio_gpu_ttm_bo_destroy()
123 struct page **pages = bo->tbo.ttm->pages; in virtio_gpu_object_get_sg_table() local
127 if (bo->pages) in virtio_gpu_object_get_sg_table()
132 bo->pages = kmalloc(sizeof(struct sg_table), GFP_KERNEL); in virtio_gpu_object_get_sg_table()
133 if (!bo->pages) in virtio_gpu_object_get_sg_table()
136 ret = sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0, in virtio_gpu_object_get_sg_table()
142 kfree(bo->pages); in virtio_gpu_object_get_sg_table()
143 bo->pages = NULL; in virtio_gpu_object_get_sg_table()
149 sg_free_table(bo->pages); in virtio_gpu_object_free_sg_table()
150 kfree(bo->pages); in virtio_gpu_object_free_sg_table()
[all …]
/linux-4.4.14/net/rds/
Dinfo.c65 struct page **pages; member
122 iter->addr = kmap_atomic(*iter->pages); in rds_info_copy()
127 "bytes %lu\n", *iter->pages, iter->addr, in rds_info_copy()
140 iter->pages++; in rds_info_copy()
167 struct page **pages = NULL; in rds_info_getsockopt() local
191 pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); in rds_info_getsockopt()
192 if (!pages) { in rds_info_getsockopt()
196 ret = get_user_pages_fast(start, nr_pages, 1, pages); in rds_info_getsockopt()
215 iter.pages = pages; in rds_info_getsockopt()
238 for (i = 0; pages && i < nr_pages; i++) in rds_info_getsockopt()
[all …]
Drdma.c158 struct page **pages, int write) in rds_pin_pages() argument
162 ret = get_user_pages_fast(user_addr, nr_pages, write, pages); in rds_pin_pages()
166 put_page(pages[ret]); in rds_pin_pages()
178 struct page **pages = NULL; in __rds_rdma_map() local
207 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); in __rds_rdma_map()
208 if (!pages) { in __rds_rdma_map()
241 ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1); in __rds_rdma_map()
256 sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0); in __rds_rdma_map()
309 kfree(pages); in __rds_rdma_map()
554 struct page **pages = NULL; in rds_cmsg_rdma_args() local
[all …]
/linux-4.4.14/arch/s390/mm/
Dgup.c21 unsigned long end, int write, struct page **pages, int *nr) in gup_pte_range() argument
46 pages[*nr] = page; in gup_pte_range()
55 unsigned long end, int write, struct page **pages, int *nr) in gup_huge_pmd() argument
73 pages[*nr] = page; in gup_huge_pmd()
106 unsigned long end, int write, struct page **pages, int *nr) in gup_pmd_range() argument
139 write, pages, nr)) in gup_pmd_range()
142 write, pages, nr)) in gup_pmd_range()
150 unsigned long end, int write, struct page **pages, int *nr) in gup_pud_range() argument
165 if (!gup_pmd_range(pudp, pud, addr, next, write, pages, nr)) in gup_pud_range()
177 struct page **pages) in __get_user_pages_fast() argument
[all …]
/linux-4.4.14/drivers/gpu/drm/udl/
Dudl_gem.c113 if (!obj->pages) in udl_gem_fault()
116 page = obj->pages[page_offset]; in udl_gem_fault()
132 struct page **pages; in udl_gem_get_pages() local
134 if (obj->pages) in udl_gem_get_pages()
137 pages = drm_gem_get_pages(&obj->base); in udl_gem_get_pages()
138 if (IS_ERR(pages)) in udl_gem_get_pages()
139 return PTR_ERR(pages); in udl_gem_get_pages()
141 obj->pages = pages; in udl_gem_get_pages()
149 drm_free_large(obj->pages); in udl_gem_put_pages()
150 obj->pages = NULL; in udl_gem_put_pages()
[all …]
Dudl_dmabuf.c92 if (!obj->pages) { in udl_map_dma_buf()
101 obj->sg = drm_prime_pages_to_sg(obj->pages, page_count); in udl_map_dma_buf()
231 obj->pages = drm_malloc_ab(npages, sizeof(struct page *)); in udl_prime_create()
232 if (obj->pages == NULL) { in udl_prime_create()
237 drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages); in udl_prime_create()
/linux-4.4.14/arch/tile/kernel/
Dvdso.c52 static struct page **vdso_setup(void *vdso_kbase, unsigned int pages) in vdso_setup() argument
57 pagelist = kzalloc(sizeof(struct page *) * (pages + 1), GFP_KERNEL); in vdso_setup()
59 for (i = 0; i < pages - 1; i++) { in vdso_setup()
64 pagelist[pages - 1] = virt_to_page(vdso_data); in vdso_setup()
65 pagelist[pages] = NULL; in vdso_setup()
127 unsigned long pages; in setup_vdso_pages() local
138 pages = vdso_pages; in setup_vdso_pages()
142 pages = vdso32_pages; in setup_vdso_pages()
150 if (pages == 0) in setup_vdso_pages()
154 (pages << PAGE_SHIFT) + in setup_vdso_pages()
[all …]
Dmodule.c39 struct page **pages; in module_alloc() local
46 pages = kmalloc(npages * sizeof(struct page *), GFP_KERNEL); in module_alloc()
47 if (pages == NULL) in module_alloc()
50 pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); in module_alloc()
51 if (!pages[i]) in module_alloc()
59 area->pages = pages; in module_alloc()
61 if (map_vm_area(area, prot_rwx, pages)) { in module_alloc()
70 __free_page(pages[i]); in module_alloc()
71 kfree(pages); in module_alloc()
/linux-4.4.14/arch/x86/mm/
Dgup.c72 unsigned long end, int write, struct page **pages, int *nr) in gup_pte_range() argument
100 pages[*nr] = page; in gup_pte_range()
118 unsigned long end, int write, struct page **pages, int *nr) in gup_huge_pmd() argument
138 pages[*nr] = page; in gup_huge_pmd()
151 int write, struct page **pages, int *nr) in gup_pmd_range() argument
182 if (!gup_huge_pmd(pmd, addr, next, write, pages, nr)) in gup_pmd_range()
185 if (!gup_pte_range(pmd, addr, next, write, pages, nr)) in gup_pmd_range()
194 unsigned long end, int write, struct page **pages, int *nr) in gup_huge_pud() argument
214 pages[*nr] = page; in gup_huge_pud()
227 int write, struct page **pages, int *nr) in gup_pud_range() argument
[all …]
Dpageattr.c41 struct page **pages; member
59 void update_page_count(int level, unsigned long pages) in update_page_count() argument
63 direct_pages_count[level] += pages; in update_page_count()
208 int in_flags, struct page **pages) in cpa_flush_array() argument
231 addr = (unsigned long)page_address(pages[i]); in cpa_flush_array()
1158 struct page *page = cpa->pages[cpa->curpage]; in __change_page_attr()
1275 struct page *page = cpa->pages[cpa->curpage]; in cpa_process_alias()
1368 struct page **pages) in change_page_attr_set_clr() argument
1419 cpa.pages = pages; in change_page_attr_set_clr()
1456 cpa.flags, pages); in change_page_attr_set_clr()
[all …]
Dinit_64.c405 unsigned long pages = 0, next; in phys_pte_init() local
429 pages++; in phys_pte_init()
436 pages++; in phys_pte_init()
441 update_page_count(PG_LEVEL_4K, pages); in phys_pte_init()
450 unsigned long pages = 0, next; in phys_pmd_init() local
492 pages++; in phys_pmd_init()
500 pages++; in phys_pmd_init()
517 update_page_count(PG_LEVEL_2M, pages); in phys_pmd_init()
525 unsigned long pages = 0, next; in phys_pud_init() local
565 pages++; in phys_pud_init()
[all …]
/linux-4.4.14/arch/sparc/mm/
Dgup.c21 unsigned long end, int write, struct page **pages, int *nr) in gup_pte_range() argument
62 pages[*nr] = page; in gup_pte_range()
70 unsigned long end, int write, struct page **pages, in gup_huge_pmd() argument
88 pages[*nr] = page; in gup_huge_pmd()
119 int write, struct page **pages, int *nr) in gup_pmd_range() argument
133 write, pages, nr)) in gup_pmd_range()
136 pages, nr)) in gup_pmd_range()
144 int write, struct page **pages, int *nr) in gup_pud_range() argument
156 if (!gup_pmd_range(pud, addr, next, write, pages, nr)) in gup_pud_range()
164 struct page **pages) in __get_user_pages_fast() argument
[all …]
/linux-4.4.14/Documentation/vm/
Dhugetlbpage.txt22 persistent hugetlb pages in the kernel's huge page pool. It also displays
23 information about the number of free, reserved and surplus huge pages and the
38 HugePages_Total is the size of the pool of huge pages.
39 HugePages_Free is the number of huge pages in the pool that are not yet
41 HugePages_Rsvd is short for "reserved," and is the number of huge pages for
43 but no allocation has yet been made. Reserved huge pages
45 huge page from the pool of huge pages at fault time.
46 HugePages_Surp is short for "surplus," and is the number of huge pages in
48 maximum number of surplus huge pages is controlled by
55 pages in the kernel's huge page pool. "Persistent" huge pages will be
[all …]
Dunevictable-lru.txt15 - vmscan's handling of unevictable pages.
17 (*) mlock()'d pages.
24 - Migrating mlocked pages.
25 - Compacting mlocked pages.
39 pages.
54 pages and to hide these pages from vmscan. This mechanism is based on a patch
60 main memory will have over 32 million 4k pages in a single zone. When a large
61 fraction of these pages are not evictable for any reason [see below], vmscan
63 of pages that are evictable. This can result in a situation where all CPUs are
67 The unevictable list addresses the following classes of unevictable pages:
[all …]
Didle_page_tracking.txt3 The idle page tracking feature allows to track which memory pages are being
27 Only accesses to user memory pages are tracked. These are pages mapped to a
28 process address space, page cache and buffer pages, swap cache pages. For other
29 page types (e.g. SLAB pages) an attempt to mark a page idle is silently ignored,
30 and hence such pages are never reported idle.
32 For huge pages the idle flag is set only on the head page, so one has to read
33 /proc/kpageflags in order to correctly count idle huge pages.
40 That said, in order to estimate the amount of pages that are not used by a
43 1. Mark all the workload's pages as idle by setting corresponding bits in
44 /sys/kernel/mm/page_idle/bitmap. The pages can be found by reading
[all …]
Dksm.txt9 have been registered with it, looking for pages of identical content which
18 KSM only merges anonymous (private) pages, never pagecache (file) pages.
19 KSM's merged pages were originally locked into kernel memory, but can now
20 be swapped out just like other user pages (but sharing is broken when they
28 that advice and restore unshared pages: whereupon KSM unmerges whatever
38 cannot contain any pages which KSM could actually merge; even if
53 pages_to_scan - how many present pages to scan before ksmd goes to sleep
61 merge_across_nodes - specifies if pages from different numa nodes can be merged.
62 When set to 0, ksm merges only pages which physically
64 lower latency to access of shared pages. Systems with more
[all …]
Dzswap.txt3 Zswap is a lightweight compressed cache for swap pages. It takes pages that are
25 Zswap evicts pages from compressed cache on an LRU basis to the backing swap
37 When zswap is disabled at runtime it will stop storing pages that are
39 back into memory all of the pages stored in the compressed pool. The
40 pages stored in zswap will remain in the compressed pool until they are
42 pages out of the compressed pool, a swapoff on the swap device(s) will
43 fault back into memory all swapped out pages, including those in the
48 Zswap receives pages for compression through the Frontswap API and is able to
49 evict pages from its own compressed pool on an LRU basis and write them back to
56 pages are freed. The pool is not preallocated. By default, a zpool of type
[all …]
Dpage_migration4 Page migration allows the moving of the physical location of pages between
7 system rearranges the physical location of those pages.
10 by moving pages near to the processor where the process accessing that memory
14 pages are located through the MF_MOVE and MF_MOVE_ALL options while setting
15 a new memory policy via mbind(). The pages of process can also be relocated
17 migrate_pages function call takes two sets of nodes and moves pages of a
24 pages of a process are located. See also the numa_maps documentation in the
29 administrator may detect the situation and move the pages of the process
32 through user space processes that move pages. A special function call
33 "move_pages" allows the moving of individual pages within a process.
[all …]
Dbalance21 mapped pages from the direct mapped pool, instead of falling back on
23 or not). A similar argument applies to highmem and direct mapped pages.
24 OTOH, if there is a lot of free dma pages, it is preferable to satisfy
29 _total_ number of free pages fell below 1/64 th of total memory. With the
38 at init time how many free pages we should aim for while balancing any
51 fancy, we could assign different weights to free pages in different
55 it becomes less significant to consider the free dma pages while
64 fall back into regular zone. This also makes sure that HIGHMEM pages
73 highmem pages. kswapd looks at the zone_wake_kswapd field in the zone
82 the number of pages falls below watermark[WMARK_MIN], the hysteric field
[all …]
Dtranshuge.txt8 using huge pages for the backing of virtual memory with huge pages
40 working on the regular pages and their respective regular pmd/pte
44 regular pages should be gracefully allocated instead and mixed in
50 backed by regular pages should be relocated on hugepages
55 to avoid unmovable pages to fragment all the memory but such a tweak
108 to never try to defrag memory and simply fallback to regular pages
111 we use hugepages later instead of regular pages. This isn't always
139 You can also control how many pages khugepaged should scan at each
154 The khugepaged progress can be seen in the number of pages collapsed:
162 max_ptes_none specifies how many extra small pages (that are
[all …]
Dhwpoison.txt11 * High level machine check handler. Handles pages reported by the
15 * This focusses on pages detected as corrupted in the background.
22 * Handles page cache pages in various states. The tricky part
38 pages.
68 Note some pages are always handled as late kill.
112 some early filtering to avoid corrupted unintended pages in test suites.
127 Only handle memory failures to pages associated with the file system defined
133 Limit injection to pages owned by memgroup. Specified by inode number
146 page-types -p `pidof usemem` --hwpoison # poison its pages
151 When specified, only poison pages if ((page_flags & mask) == value).
[all …]
Dpagemap.txt32 swap. Unmapped pages return a null PFN. This allows determining
33 precisely which pages are mapped (or in swap) and comparing mapped
34 pages between processes.
92 An order N block has 2^N physically contiguous pages, with the BUDDY flag
97 A compound page with order N consists of 2^N physically contiguous pages.
100 pages are hugeTLB pages (Documentation/vm/hugetlbpage.txt), the SLUB etc.
102 only huge/giga pages are made visible to end users.
113 identical memory pages dynamically shared between one or more processes
116 contiguous pages which construct transparent hugepages
143 eg. ramfs pages, shmctl(SHM_LOCK) and mlock() memory segments
[all …]
Dfrontswap.txt1 Frontswap provides a "transcendent memory" interface for swap pages.
3 swapped pages are saved in RAM (or a RAM-like device) instead of a swap disk.
23 An "init" prepares the device to receive frontswap pages associated
29 from transcendent memory and an "invalidate_area" will remove ALL pages
45 store frontswap pages to more completely manage its memory usage.
69 providing a clean, dynamic interface to read and write swap pages to
73 useful for write-balancing for some RAM-like devices). Swap pages (and
74 evicted page-cache pages) are a great use for this kind of slower-than-RAM-
77 and write -- and indirectly "name" -- the pages.
83 In the single kernel case, aka "zcache", pages are compressed and
[all …]
Dzsmalloc.txt8 (0-order) pages, it would suffer from very high fragmentation --
12 To overcome these issues, zsmalloc allocates a bunch of 0-order pages
14 pages act as a single higher-order page i.e. an object can span 0-order
15 page boundaries. The code refers to these linked pages as a single entity
58 pages_used: the number of pages allocated for the class
59 pages_per_zspage: the number of 0-order pages to make a zspage
Dcleancache.txt8 pages that the kernel's pageframe replacement algorithm (PFRA) would like
36 Thus, as its name implies, cleancache is not suitable for dirty pages.
37 Cleancache has complete discretion over what pages to preserve and what
38 pages to discard and when.
48 an "invalidate_inode" will invalidate all pages associated with the specified
50 all pages in all files specified by the given pool id and also surrender
58 same UUID will receive the same pool id, thus allowing the pages to
102 effectiveness of the pagecache. Clean pagecache pages are
104 addressable to the kernel); fetching those pages later avoids "refaults"
113 balancing for some RAM-like devices). Evicted page-cache pages (and
[all …]
/linux-4.4.14/arch/m68k/mm/
Dsun3kmap.c49 unsigned long type, int pages) in do_pmeg_mapin() argument
55 while(pages) { in do_pmeg_mapin()
59 pages--; in do_pmeg_mapin()
68 int pages; in sun3_ioremap() local
87 pages = size / PAGE_SIZE; in sun3_ioremap()
91 while(pages) { in sun3_ioremap()
95 if(seg_pages > pages) in sun3_ioremap()
96 seg_pages = pages; in sun3_ioremap()
100 pages -= seg_pages; in sun3_ioremap()
/linux-4.4.14/drivers/gpu/drm/
Ddrm_scatter.c55 for (i = 0; i < entry->pages; i++) { in drm_sg_cleanup()
87 unsigned long pages, i, j; in drm_legacy_sg_alloc() local
104 pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; in drm_legacy_sg_alloc()
105 DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages); in drm_legacy_sg_alloc()
107 entry->pages = pages; in drm_legacy_sg_alloc()
108 entry->pagelist = kcalloc(pages, sizeof(*entry->pagelist), GFP_KERNEL); in drm_legacy_sg_alloc()
114 entry->busaddr = kcalloc(pages, sizeof(*entry->busaddr), GFP_KERNEL); in drm_legacy_sg_alloc()
121 entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT); in drm_legacy_sg_alloc()
132 memset(entry->virtual, 0, pages << PAGE_SHIFT); in drm_legacy_sg_alloc()
139 for (i = (unsigned long)entry->virtual, j = 0; j < pages; in drm_legacy_sg_alloc()
[all …]
Dati_pcigart.c62 unsigned long pages; in drm_ati_pcigart_cleanup() local
75 pages = (entry->pages <= max_pages) in drm_ati_pcigart_cleanup()
76 ? entry->pages : max_pages; in drm_ati_pcigart_cleanup()
78 for (i = 0; i < pages; i++) { in drm_ati_pcigart_cleanup()
103 unsigned long pages; in drm_ati_pcigart_init() local
144 pages = (entry->pages <= max_real_pages) in drm_ati_pcigart_init()
145 ? entry->pages : max_real_pages; in drm_ati_pcigart_init()
154 for (i = 0; i < pages; i++) { in drm_ati_pcigart_init()
Ddrm_cache.c58 static void drm_cache_flush_clflush(struct page *pages[], in drm_cache_flush_clflush() argument
65 drm_clflush_page(*pages++); in drm_cache_flush_clflush()
71 drm_clflush_pages(struct page *pages[], unsigned long num_pages) in drm_clflush_pages() argument
76 drm_cache_flush_clflush(pages, num_pages); in drm_clflush_pages()
86 struct page *page = pages[i]; in drm_clflush_pages()
Ddrm_agpsupport.c201 unsigned long pages; in drm_agp_alloc() local
209 pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; in drm_agp_alloc()
211 if (!(memory = agp_allocate_memory(dev->agp->bridge, pages, type))) { in drm_agp_alloc()
219 entry->pages = pages; in drm_agp_alloc()
369 drm_free_agp(entry->memory, entry->pages); in drm_agp_free()
449 drm_free_agp(entry->memory, entry->pages); in drm_agp_clear()
470 struct page **pages, in drm_agp_bind_pages() argument
489 mem->pages[i] = pages[i]; in drm_agp_bind_pages()
Ddrm_gem.c465 struct page *p, **pages; in drm_gem_get_pages() local
479 pages = drm_malloc_ab(npages, sizeof(struct page *)); in drm_gem_get_pages()
480 if (pages == NULL) in drm_gem_get_pages()
487 pages[i] = p; in drm_gem_get_pages()
498 return pages; in drm_gem_get_pages()
502 page_cache_release(pages[i]); in drm_gem_get_pages()
504 drm_free_large(pages); in drm_gem_get_pages()
516 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, in drm_gem_put_pages() argument
531 set_page_dirty(pages[i]); in drm_gem_put_pages()
534 mark_page_accessed(pages[i]); in drm_gem_put_pages()
[all …]
Ddrm_memory.c71 && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >= in agp_remap()
87 phys_page_map = (agpmem->memory->pages + (offset - agpmem->bound) / PAGE_SIZE); in agp_remap()
97 void drm_free_agp(struct agp_memory * handle, int pages) in drm_free_agp() argument
Ddrm_vma_manager.c143 unsigned long pages) in drm_vma_offset_lookup_locked() argument
168 if (offset < start + pages) in drm_vma_offset_lookup_locked()
224 struct drm_vma_offset_node *node, unsigned long pages) in drm_vma_offset_add() argument
236 pages, 0, DRM_MM_SEARCH_DEFAULT); in drm_vma_offset_add()
/linux-4.4.14/sound/core/
Dsgbuf.c47 for (i = 0; i < sgbuf->pages; i++) { in snd_free_sgbuf_pages()
71 unsigned int i, pages, chunk, maxpages; in snd_malloc_sgbuf_pages() local
82 pages = snd_sgbuf_aligned_pages(size); in snd_malloc_sgbuf_pages()
83 sgbuf->tblsize = sgbuf_align_table(pages); in snd_malloc_sgbuf_pages()
95 while (pages > 0) { in snd_malloc_sgbuf_pages()
96 chunk = pages; in snd_malloc_sgbuf_pages()
103 if (!sgbuf->pages) in snd_malloc_sgbuf_pages()
107 size = sgbuf->pages * PAGE_SIZE; in snd_malloc_sgbuf_pages()
121 sgbuf->pages += chunk; in snd_malloc_sgbuf_pages()
122 pages -= chunk; in snd_malloc_sgbuf_pages()
[all …]
/linux-4.4.14/arch/sh/mm/
Dgup.c75 unsigned long end, int write, struct page **pages, int *nr) in gup_pte_range() argument
110 pages[*nr] = page; in gup_pte_range()
120 int write, struct page **pages, int *nr) in gup_pmd_range() argument
132 if (!gup_pte_range(pmd, addr, next, write, pages, nr)) in gup_pmd_range()
140 int write, struct page **pages, int *nr) in gup_pud_range() argument
152 if (!gup_pmd_range(pud, addr, next, write, pages, nr)) in gup_pud_range()
164 struct page **pages) in __get_user_pages_fast() argument
193 if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) in __get_user_pages_fast()
218 struct page **pages) in get_user_pages_fast() argument
242 if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) in get_user_pages_fast()
[all …]
/linux-4.4.14/drivers/net/ethernet/amd/xgbe/
Dxgbe-desc.c141 if (ring->rx_hdr_pa.pages) { in xgbe_free_ring()
144 put_page(ring->rx_hdr_pa.pages); in xgbe_free_ring()
146 ring->rx_hdr_pa.pages = NULL; in xgbe_free_ring()
152 if (ring->rx_buf_pa.pages) { in xgbe_free_ring()
155 put_page(ring->rx_buf_pa.pages); in xgbe_free_ring()
157 ring->rx_buf_pa.pages = NULL; in xgbe_free_ring()
266 struct page *pages = NULL; in xgbe_alloc_pages() local
273 pages = alloc_pages(gfp, order); in xgbe_alloc_pages()
274 if (pages) in xgbe_alloc_pages()
279 if (!pages) in xgbe_alloc_pages()
[all …]
/linux-4.4.14/drivers/gpu/drm/omapdrm/
Domap_gem_dmabuf.c85 struct page **pages; in omap_gem_dmabuf_begin_cpu_access() local
93 return omap_gem_get_pages(obj, &pages, true); in omap_gem_dmabuf_begin_cpu_access()
108 struct page **pages; in omap_gem_dmabuf_kmap_atomic() local
109 omap_gem_get_pages(obj, &pages, false); in omap_gem_dmabuf_kmap_atomic()
111 return kmap_atomic(pages[page_num]); in omap_gem_dmabuf_kmap_atomic()
124 struct page **pages; in omap_gem_dmabuf_kmap() local
125 omap_gem_get_pages(obj, &pages, false); in omap_gem_dmabuf_kmap()
127 return kmap(pages[page_num]); in omap_gem_dmabuf_kmap()
134 struct page **pages; in omap_gem_dmabuf_kunmap() local
135 omap_gem_get_pages(obj, &pages, false); in omap_gem_dmabuf_kunmap()
[all …]
Domap_gem.c30 void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
88 struct page **pages; member
122 static int get_pages(struct drm_gem_object *obj, struct page ***pages);
229 struct page **pages; in omap_gem_attach_pages() local
234 WARN_ON(omap_obj->pages); in omap_gem_attach_pages()
236 pages = drm_gem_get_pages(obj); in omap_gem_attach_pages()
237 if (IS_ERR(pages)) { in omap_gem_attach_pages()
238 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages)); in omap_gem_attach_pages()
239 return PTR_ERR(pages); in omap_gem_attach_pages()
253 addrs[i] = dma_map_page(dev->dev, pages[i], in omap_gem_attach_pages()
[all …]
DTODO3 accessing the pages via a GART, so maybe we need some other threshold
4 to put a cap on the # of pages that can be pin'd.
5 . Use mm_shrinker to trigger unpinning pages.
8 . GEM/shmem backed pages can have existing mappings (kernel linear map,
/linux-4.4.14/drivers/staging/lustre/lustre/llite/
Drw26.c189 size_t size, struct page ***pages, in ll_get_user_pages() argument
196 *pages = NULL; in ll_get_user_pages()
203 *pages = libcfs_kvzalloc(*max_pages * sizeof(**pages), GFP_NOFS); in ll_get_user_pages()
204 if (*pages) { in ll_get_user_pages()
206 (rw == READ), *pages); in ll_get_user_pages()
208 kvfree(*pages); in ll_get_user_pages()
216 static void ll_free_user_pages(struct page **pages, int npages, int do_dirty) in ll_free_user_pages() argument
222 set_page_dirty_lock(pages[i]); in ll_free_user_pages()
223 page_cache_release(pages[i]); in ll_free_user_pages()
225 kvfree(pages); in ll_free_user_pages()
[all …]
/linux-4.4.14/drivers/iommu/
Ddma-iommu.c186 static void __iommu_dma_free_pages(struct page **pages, int count) in __iommu_dma_free_pages() argument
189 __free_page(pages[count]); in __iommu_dma_free_pages()
190 kvfree(pages); in __iommu_dma_free_pages()
195 struct page **pages; in __iommu_dma_alloc_pages() local
196 unsigned int i = 0, array_size = count * sizeof(*pages); in __iommu_dma_alloc_pages()
200 pages = kzalloc(array_size, GFP_KERNEL); in __iommu_dma_alloc_pages()
202 pages = vzalloc(array_size); in __iommu_dma_alloc_pages()
203 if (!pages) in __iommu_dma_alloc_pages()
235 __iommu_dma_free_pages(pages, i); in __iommu_dma_alloc_pages()
241 pages[i++] = page++; in __iommu_dma_alloc_pages()
[all …]
Dintel-svm.c39 struct page *pages; in intel_svm_alloc_pasid_tables() local
46 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); in intel_svm_alloc_pasid_tables()
47 if (!pages) { in intel_svm_alloc_pasid_tables()
52 iommu->pasid_table = page_address(pages); in intel_svm_alloc_pasid_tables()
56 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); in intel_svm_alloc_pasid_tables()
57 if (pages) in intel_svm_alloc_pasid_tables()
58 iommu->pasid_state_table = page_address(pages); in intel_svm_alloc_pasid_tables()
93 struct page *pages; in intel_svm_enable_prq() local
96 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, PRQ_ORDER); in intel_svm_enable_prq()
97 if (!pages) { in intel_svm_enable_prq()
[all …]
/linux-4.4.14/drivers/misc/mic/scif/
Dscif_mmap.c222 struct scif_range **pages) in scif_get_pages() argument
261 *pages = kzalloc(sizeof(**pages), GFP_KERNEL); in scif_get_pages()
262 if (!*pages) { in scif_get_pages()
268 (*pages)->phys_addr = scif_zalloc(nr_pages * sizeof(dma_addr_t)); in scif_get_pages()
269 if (!((*pages)->phys_addr)) { in scif_get_pages()
276 ((*pages)->va = scif_zalloc(nr_pages * sizeof(void *))); in scif_get_pages()
277 if (!(*pages)->va) { in scif_get_pages()
283 (*pages)->cookie = window; in scif_get_pages()
284 (*pages)->nr_pages = nr_pages; in scif_get_pages()
285 (*pages)->prot_flags = window->prot; in scif_get_pages()
[all …]
Dscif_rma.c97 pin->pages = scif_zalloc(nr_pages * sizeof(*pin->pages)); in scif_create_pinned_pages()
98 if (!pin->pages) in scif_create_pinned_pages()
124 if (pin->pages[j] && !kernel) { in scif_destroy_pinned_pages()
126 SetPageDirty(pin->pages[j]); in scif_destroy_pinned_pages()
127 put_page(pin->pages[j]); in scif_destroy_pinned_pages()
131 scif_free(pin->pages, in scif_destroy_pinned_pages()
132 pin->nr_pages * sizeof(*pin->pages)); in scif_destroy_pinned_pages()
556 sg_set_page(sg, pin->pages[i], PAGE_SIZE, 0x0); in scif_iommu_map()
604 phys_prev = page_to_phys(pin->pages[i]); in scif_map_window()
609 phys_curr = page_to_phys(pin->pages[k]); in scif_map_window()
[all …]
/linux-4.4.14/drivers/gpu/drm/vgem/
Dvgem_drv.c47 drm_gem_put_pages(&obj->base, obj->pages, false, false); in vgem_gem_put_pages()
48 obj->pages = NULL; in vgem_gem_put_pages()
64 if (vgem_obj->pages) in vgem_gem_free_object()
67 vgem_obj->pages = NULL; in vgem_gem_free_object()
74 struct page **pages; in vgem_gem_get_pages() local
76 if (obj->pages || obj->use_dma_buf) in vgem_gem_get_pages()
79 pages = drm_gem_get_pages(&obj->base); in vgem_gem_get_pages()
80 if (IS_ERR(pages)) { in vgem_gem_get_pages()
81 return PTR_ERR(pages); in vgem_gem_get_pages()
84 obj->pages = pages; in vgem_gem_get_pages()
[all …]
/linux-4.4.14/include/trace/events/
Dtlb.h39 TP_PROTO(int reason, unsigned long pages),
40 TP_ARGS(reason, pages),
46 __field(unsigned long, pages)
51 __entry->pages = pages;
55 __entry->pages,
/linux-4.4.14/arch/mips/jazz/
Djazzdma.c94 int first, last, pages, frame, i; in vdma_alloc() local
115 pages = VDMA_PAGE(paddr + size) - VDMA_PAGE(paddr) + 1; in vdma_alloc()
120 if (first + pages > VDMA_PGTBL_ENTRIES) { /* nothing free */ in vdma_alloc()
127 && last - first < pages) in vdma_alloc()
130 if (last - first == pages) in vdma_alloc()
154 pages, laddr); in vdma_alloc()
214 int first, pages; in vdma_remap() local
231 pages = (((paddr & (VDMA_PAGESIZE - 1)) + size) >> 12) + 1; in vdma_remap()
234 printk("vdma_remap: first=%x, pages=%x\n", first, pages); in vdma_remap()
235 if (first + pages > VDMA_PGTBL_ENTRIES) { in vdma_remap()
[all …]
/linux-4.4.14/drivers/gpu/drm/gma500/
Dgtt.c89 struct page **pages; in psb_gtt_insert() local
92 if (r->pages == NULL) { in psb_gtt_insert()
100 pages = r->pages; in psb_gtt_insert()
104 set_pages_array_wc(pages, r->npage); in psb_gtt_insert()
109 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), in psb_gtt_insert()
114 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), in psb_gtt_insert()
149 set_pages_array_wb(r->pages, r->npage); in psb_gtt_remove()
183 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), in psb_gtt_roll()
188 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), in psb_gtt_roll()
205 struct page **pages; in psb_gtt_attach_pages() local
[all …]
/linux-4.4.14/drivers/gpu/drm/i915/
Di915_gem_dmabuf.c60 ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL); in i915_gem_map_dma_buf()
64 src = obj->pages->sgl; in i915_gem_map_dma_buf()
66 for (i = 0; i < obj->pages->nents; i++) { in i915_gem_map_dma_buf()
114 struct page **pages; in i915_gem_dmabuf_vmap() local
134 pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages)); in i915_gem_dmabuf_vmap()
135 if (pages == NULL) in i915_gem_dmabuf_vmap()
139 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) in i915_gem_dmabuf_vmap()
140 pages[i++] = sg_page_iter_page(&sg_iter); in i915_gem_dmabuf_vmap()
142 obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL); in i915_gem_dmabuf_vmap()
143 drm_free_large(pages); in i915_gem_dmabuf_vmap()
[all …]
Di915_gem_userptr.c76 if (obj->pages != NULL) { in __cancel_userptr__worker()
517 ret = st_set_pages(&obj->pages, pvec, num_pages); in __i915_gem_userptr_set_pages()
523 sg_free_table(obj->pages); in __i915_gem_userptr_set_pages()
524 kfree(obj->pages); in __i915_gem_userptr_set_pages()
525 obj->pages = NULL; in __i915_gem_userptr_set_pages()
607 obj->get_page.sg = obj->pages->sgl; in __i915_gem_userptr_get_pages_worker()
760 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { in i915_gem_userptr_put_pages()
771 sg_free_table(obj->pages); in i915_gem_userptr_put_pages()
772 kfree(obj->pages); in i915_gem_userptr_put_pages()
/linux-4.4.14/drivers/gpu/drm/amd/amdgpu/
Damdgpu_gart.c225 int pages) in amdgpu_gart_unbind() argument
240 for (i = 0; i < pages; i++, p++) { in amdgpu_gart_unbind()
241 if (adev->gart.pages[p]) { in amdgpu_gart_unbind()
242 adev->gart.pages[p] = NULL; in amdgpu_gart_unbind()
273 int pages, struct page **pagelist, dma_addr_t *dma_addr, in amdgpu_gart_bind() argument
289 for (i = 0; i < pages; i++, p++) { in amdgpu_gart_bind()
291 adev->gart.pages[p] = pagelist[i]; in amdgpu_gart_bind()
317 if (adev->gart.pages) { in amdgpu_gart_init()
334 adev->gart.pages = vzalloc(sizeof(void *) * adev->gart.num_cpu_pages); in amdgpu_gart_init()
335 if (adev->gart.pages == NULL) { in amdgpu_gart_init()
[all …]
/linux-4.4.14/drivers/lguest/x86/
Dcore.c85 static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages) in copy_in_guest_info() argument
93 if (__this_cpu_read(lg_last_cpu) != cpu || cpu->last_pages != pages) { in copy_in_guest_info()
95 cpu->last_pages = pages; in copy_in_guest_info()
103 pages->state.host_cr3 = __pa(current->mm->pgd); in copy_in_guest_info()
108 map_switcher_in_guest(cpu, pages); in copy_in_guest_info()
114 pages->state.guest_tss.sp1 = cpu->esp1; in copy_in_guest_info()
115 pages->state.guest_tss.ss1 = cpu->ss1; in copy_in_guest_info()
119 copy_traps(cpu, pages->state.guest_idt, default_idt_entries); in copy_in_guest_info()
123 copy_gdt(cpu, pages->state.guest_gdt); in copy_in_guest_info()
126 copy_gdt_tls(cpu, pages->state.guest_gdt); in copy_in_guest_info()
[all …]
/linux-4.4.14/drivers/block/xen-blkback/
Dblkback.c279 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; in free_persistent_gnts() local
285 unmap_data.pages = pages; in free_persistent_gnts()
298 pages[segs_to_unmap] = persistent_gnt->page; in free_persistent_gnts()
306 put_free_pages(blkif, pages, segs_to_unmap); in free_persistent_gnts()
320 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; in xen_blkbk_unmap_purged_grants() local
326 unmap_data.pages = pages; in xen_blkbk_unmap_purged_grants()
341 pages[segs_to_unmap] = persistent_gnt->page; in xen_blkbk_unmap_purged_grants()
346 put_free_pages(blkif, pages, segs_to_unmap); in xen_blkbk_unmap_purged_grants()
354 put_free_pages(blkif, pages, segs_to_unmap); in xen_blkbk_unmap_purged_grants()
677 struct grant_page **pages, in xen_blkbk_unmap_prepare() argument
[all …]
/linux-4.4.14/fs/ntfs/
Dcompress.c522 struct page **pages; in ntfs_read_compressed_block() local
534 pages = kmalloc(nr_pages * sizeof(struct page *), GFP_NOFS); in ntfs_read_compressed_block()
540 if (unlikely(!pages || !bhs)) { in ntfs_read_compressed_block()
542 kfree(pages); in ntfs_read_compressed_block()
554 pages[xpage] = page; in ntfs_read_compressed_block()
568 kfree(pages); in ntfs_read_compressed_block()
579 pages[i] = grab_cache_page_nowait(mapping, offset); in ntfs_read_compressed_block()
580 page = pages[i]; in ntfs_read_compressed_block()
595 pages[i] = NULL; in ntfs_read_compressed_block()
754 page = pages[cur_page]; in ntfs_read_compressed_block()
[all …]
Dfile.c509 pgoff_t index, const unsigned nr_pages, struct page **pages, in __ntfs_grab_cache_pages() argument
517 pages[nr] = find_get_page_flags(mapping, index, FGP_LOCK | in __ntfs_grab_cache_pages()
519 if (!pages[nr]) { in __ntfs_grab_cache_pages()
535 pages[nr] = *cached_page; in __ntfs_grab_cache_pages()
545 unlock_page(pages[--nr]); in __ntfs_grab_cache_pages()
546 page_cache_release(pages[nr]); in __ntfs_grab_cache_pages()
584 static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, in ntfs_prepare_pages_for_non_resident_write() argument
614 BUG_ON(!pages); in ntfs_prepare_pages_for_non_resident_write()
615 BUG_ON(!*pages); in ntfs_prepare_pages_for_non_resident_write()
616 vi = pages[0]->mapping->host; in ntfs_prepare_pages_for_non_resident_write()
[all …]
/linux-4.4.14/drivers/gpu/drm/msm/
Dmsm_gem.c76 if (!msm_obj->pages) { in get_pages()
98 msm_obj->pages = p; in get_pages()
108 return msm_obj->pages; in get_pages()
115 if (msm_obj->pages) { in put_pages()
126 drm_gem_put_pages(obj, msm_obj->pages, true, false); in put_pages()
129 drm_free_large(msm_obj->pages); in put_pages()
132 msm_obj->pages = NULL; in put_pages()
197 struct page **pages; in msm_gem_fault() local
210 pages = get_pages(obj); in msm_gem_fault()
211 if (IS_ERR(pages)) { in msm_gem_fault()
[all …]
Dmsm_gem_prime.c28 if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */ in msm_gem_prime_get_sg_table()
31 return drm_prime_pages_to_sg(msm_obj->pages, npages); in msm_gem_prime_get_sg_table()
/linux-4.4.14/drivers/lightnvm/
Drrpc.h171 unsigned pages, struct rrpc_inflight_rq *r) in __rrpc_lock_laddr() argument
173 sector_t laddr_end = laddr + pages - 1; in __rrpc_lock_laddr()
194 unsigned pages, in rrpc_lock_laddr() argument
197 BUG_ON((laddr + pages) > rrpc->nr_pages); in rrpc_lock_laddr()
199 return __rrpc_lock_laddr(rrpc, laddr, pages, r); in rrpc_lock_laddr()
213 unsigned int pages = rrpc_get_pages(bio); in rrpc_lock_rq() local
216 return rrpc_lock_laddr(rrpc, laddr, pages, r); in rrpc_lock_rq()
232 uint8_t pages = rqd->nr_pages; in rrpc_unlock_rq() local
234 BUG_ON((r->l_start + pages) > rrpc->nr_pages); in rrpc_unlock_rq()
/linux-4.4.14/drivers/xen/
Dxlate_mmu.c44 static void xen_for_each_gfn(struct page **pages, unsigned nr_gfn, in xen_for_each_gfn() argument
53 page = pages[i / XEN_PFN_PER_PAGE]; in xen_for_each_gfn()
67 struct page **pages; member
96 struct page *page = info->pages[info->index++]; in remap_pte_fn()
145 struct page **pages) in xen_xlate_remap_gfn_array() argument
160 data.pages = pages; in xen_xlate_remap_gfn_array()
181 int nr, struct page **pages) in xen_xlate_unmap_gfn_range() argument
183 xen_for_each_gfn(pages, nr, unmap_gfn, NULL); in xen_xlate_unmap_gfn_range()
Dgntdev.c95 struct page **pages; member
99 static int unmap_grant_pages(struct grant_map *map, int offset, int pages);
122 if (map->pages) in gntdev_free_map()
123 gnttab_free_pages(map->count, map->pages); in gntdev_free_map()
124 kfree(map->pages); in gntdev_free_map()
147 add->pages = kcalloc(count, sizeof(add->pages[0]), GFP_KERNEL); in gntdev_alloc_map()
153 NULL == add->pages) in gntdev_alloc_map()
156 if (gnttab_alloc_pages(count, add->pages)) in gntdev_alloc_map()
230 if (map->pages && !use_ptemod) in gntdev_put_map()
283 pfn_to_kaddr(page_to_pfn(map->pages[i])); in map_grant_pages()
[all …]
Dprivcmd.c69 static void free_page_list(struct list_head *pages) in free_page_list() argument
73 list_for_each_entry_safe(p, n, pages, lru) in free_page_list()
76 INIT_LIST_HEAD(pages); in free_page_list()
316 struct page **pages = vma->vm_private_data; in mmap_batch_fn() local
321 cur_pages = &pages[st->index]; in mmap_batch_fn()
398 struct page **pages; in alloc_empty_pages() local
400 pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL); in alloc_empty_pages()
401 if (pages == NULL) in alloc_empty_pages()
404 rc = alloc_xenballooned_pages(numpgs, pages); in alloc_empty_pages()
408 kfree(pages); in alloc_empty_pages()
[all …]
Dgrant-table.c685 int gnttab_alloc_pages(int nr_pages, struct page **pages) in gnttab_alloc_pages() argument
690 ret = alloc_xenballooned_pages(nr_pages, pages); in gnttab_alloc_pages()
700 gnttab_free_pages(nr_pages, pages); in gnttab_alloc_pages()
703 set_page_private(pages[i], (unsigned long)foreign); in gnttab_alloc_pages()
705 SetPagePrivate(pages[i]); in gnttab_alloc_pages()
717 void gnttab_free_pages(int nr_pages, struct page **pages) in gnttab_free_pages() argument
722 if (PagePrivate(pages[i])) { in gnttab_free_pages()
724 kfree((void *)page_private(pages[i])); in gnttab_free_pages()
726 ClearPagePrivate(pages[i]); in gnttab_free_pages()
729 free_xenballooned_pages(nr_pages, pages); in gnttab_free_pages()
[all …]
Dballoon.c494 LIST_HEAD(pages); in decrease_reservation()
507 list_add(&page->lru, &pages); in decrease_reservation()
524 list_for_each_entry_safe(page, tmp, &pages, lru) { in decrease_reservation()
641 int alloc_xenballooned_pages(int nr_pages, struct page **pages) in alloc_xenballooned_pages() argument
654 pages[pgno++] = page; in alloc_xenballooned_pages()
676 free_xenballooned_pages(pgno, pages); in alloc_xenballooned_pages()
686 void free_xenballooned_pages(int nr_pages, struct page **pages) in free_xenballooned_pages() argument
693 if (pages[i]) in free_xenballooned_pages()
694 balloon_append(pages[i]); in free_xenballooned_pages()
708 unsigned long pages) in balloon_add_region() argument
[all …]
/linux-4.4.14/drivers/media/v4l2-core/
Dvideobuf2-dma-sg.c40 struct page **pages; member
68 struct page *pages; in vb2_dma_sg_alloc_compacted() local
77 pages = NULL; in vb2_dma_sg_alloc_compacted()
78 while (!pages) { in vb2_dma_sg_alloc_compacted()
79 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO | in vb2_dma_sg_alloc_compacted()
81 if (pages) in vb2_dma_sg_alloc_compacted()
86 __free_page(buf->pages[last_page]); in vb2_dma_sg_alloc_compacted()
92 split_page(pages, order); in vb2_dma_sg_alloc_compacted()
94 buf->pages[last_page++] = &pages[i]; in vb2_dma_sg_alloc_compacted()
128 buf->pages = kzalloc(buf->num_pages * sizeof(struct page *), in vb2_dma_sg_alloc()
[all …]
Dvideobuf-dma-sg.c95 static struct scatterlist *videobuf_pages_to_sg(struct page **pages, in videobuf_pages_to_sg() argument
101 if (NULL == pages[0]) in videobuf_pages_to_sg()
108 if (PageHighMem(pages[0])) in videobuf_pages_to_sg()
111 sg_set_page(&sglist[0], pages[0], in videobuf_pages_to_sg()
115 if (NULL == pages[i]) in videobuf_pages_to_sg()
117 if (PageHighMem(pages[i])) in videobuf_pages_to_sg()
119 sg_set_page(&sglist[i], pages[i], min_t(size_t, PAGE_SIZE, size), 0); in videobuf_pages_to_sg()
177 dma->pages = kmalloc(dma->nr_pages * sizeof(struct page *), GFP_KERNEL); in videobuf_dma_init_user_locked()
178 if (NULL == dma->pages) in videobuf_dma_init_user_locked()
187 dma->pages, NULL); in videobuf_dma_init_user_locked()
[all …]
Dvideobuf-vmalloc.c162 int pages; in __videobuf_iolock() local
179 pages = PAGE_ALIGN(vb->size); in __videobuf_iolock()
192 mem->vaddr = vmalloc_user(pages); in __videobuf_iolock()
194 printk(KERN_ERR "vmalloc (%d pages) failed\n", pages); in __videobuf_iolock()
198 mem->vaddr, pages); in __videobuf_iolock()
238 int retval, pages; in __videobuf_mmap_mapper() local
256 pages = PAGE_ALIGN(vma->vm_end - vma->vm_start); in __videobuf_mmap_mapper()
257 mem->vaddr = vmalloc_user(pages); in __videobuf_mmap_mapper()
259 printk(KERN_ERR "vmalloc (%d pages) failed\n", pages); in __videobuf_mmap_mapper()
262 dprintk(1, "vmalloc is at addr %p (%d pages)\n", mem->vaddr, pages); in __videobuf_mmap_mapper()
/linux-4.4.14/Documentation/ABI/testing/
Dsysfs-kernel-mm-ksm22 pages_shared: how many shared pages are being used.
27 pages_to_scan: how many present pages to scan before ksmd goes
30 pages_unshared: how many pages unique but repeatedly checked
33 pages_volatile: how many pages changing too fast to be placed
38 write 2 to disable ksm and unmerge all its pages.
49 Description: Control merging pages across different NUMA nodes.
51 When it is set to 0 only pages from the same node are merged,
52 otherwise pages from all nodes can be merged together (default).
/linux-4.4.14/drivers/gpu/drm/radeon/
Dradeon_gart.c239 int pages) in radeon_gart_unbind() argument
251 for (i = 0; i < pages; i++, p++) { in radeon_gart_unbind()
252 if (rdev->gart.pages[p]) { in radeon_gart_unbind()
253 rdev->gart.pages[p] = NULL; in radeon_gart_unbind()
284 int pages, struct page **pagelist, dma_addr_t *dma_addr, in radeon_gart_bind() argument
299 for (i = 0; i < pages; i++, p++) { in radeon_gart_bind()
300 rdev->gart.pages[p] = pagelist[i]; in radeon_gart_bind()
330 if (rdev->gart.pages) { in radeon_gart_init()
347 rdev->gart.pages = vzalloc(sizeof(void *) * rdev->gart.num_cpu_pages); in radeon_gart_init()
348 if (rdev->gart.pages == NULL) { in radeon_gart_init()
[all …]
/linux-4.4.14/drivers/firewire/
Dcore-iso.c48 buffer->pages = kmalloc(page_count * sizeof(buffer->pages[0]), in fw_iso_buffer_alloc()
50 if (buffer->pages == NULL) in fw_iso_buffer_alloc()
54 buffer->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); in fw_iso_buffer_alloc()
55 if (buffer->pages[i] == NULL) in fw_iso_buffer_alloc()
76 address = dma_map_page(card->device, buffer->pages[i], in fw_iso_buffer_map_dma()
81 set_page_private(buffer->pages[i], address); in fw_iso_buffer_map_dma()
115 err = vm_insert_page(vma, uaddr, buffer->pages[i]); in fw_iso_buffer_map_vma()
132 address = page_private(buffer->pages[i]); in fw_iso_buffer_destroy()
137 __free_page(buffer->pages[i]); in fw_iso_buffer_destroy()
139 kfree(buffer->pages); in fw_iso_buffer_destroy()
[all …]
/linux-4.4.14/include/xen/
Dxen-ops.h54 struct page **pages);
72 struct page **pages);
74 int numpgs, struct page **pages);
80 struct page **pages);
82 int nr, struct page **pages);
Dgrant_table.h76 struct page **pages; member
195 int gnttab_alloc_pages(int nr_pages, struct page **pages);
196 void gnttab_free_pages(int nr_pages, struct page **pages);
200 struct page **pages, unsigned int count);
203 struct page **pages, unsigned int count);
268 void gnttab_foreach_grant(struct page **pages,
Dballoon.h26 int alloc_xenballooned_pages(int nr_pages, struct page **pages);
27 void free_xenballooned_pages(int nr_pages, struct page **pages);
/linux-4.4.14/arch/s390/hypfs/
Dhypfs_diag.c386 static void *diag204_alloc_vbuf(int pages) in diag204_alloc_vbuf() argument
389 diag204_buf_vmalloc = vmalloc(PAGE_SIZE * (pages + 1)); in diag204_alloc_vbuf()
393 diag204_buf_pages = pages; in diag204_alloc_vbuf()
406 static void *diag204_get_buffer(enum diag204_format fmt, int *pages) in diag204_get_buffer() argument
409 *pages = diag204_buf_pages; in diag204_get_buffer()
413 *pages = 1; in diag204_get_buffer()
416 *pages = diag204((unsigned long)SUBC_RSI | in diag204_get_buffer()
418 if (*pages <= 0) in diag204_get_buffer()
421 return diag204_alloc_vbuf(*pages); in diag204_get_buffer()
442 int pages, rc; in diag204_probe() local
[all …]
/linux-4.4.14/drivers/hwmon/pmbus/
Ducd9200.c102 info->pages = 0; in ucd9200_probe()
106 info->pages++; in ucd9200_probe()
108 if (!info->pages) { in ucd9200_probe()
112 dev_info(&client->dev, "%d rails configured\n", info->pages); in ucd9200_probe()
122 for (i = 0; i < info->pages; i++) { in ucd9200_probe()
143 if (info->pages > 1) in ucd9200_probe()
153 for (i = 1; i < info->pages; i++) in ucd9200_probe()
Dpmbus.c75 for (page = 0; page < info->pages; page++) { in pmbus_find_sensor_groups()
101 if (!info->pages) { in pmbus_identify()
116 info->pages = page; in pmbus_identify()
118 info->pages = 1; in pmbus_identify()
176 info->pages = id->driver_data; in pmbus_probe()
Dltc2978.c646 info->pages = LTC2974_NUM_PAGES; in ltc2978_probe()
649 for (i = 0; i < info->pages; i++) { in ltc2978_probe()
658 info->pages = LTC2974_NUM_PAGES; in ltc2978_probe()
662 for (i = 0; i < info->pages; i++) { in ltc2978_probe()
674 info->pages = LTC2978_NUM_PAGES; in ltc2978_probe()
689 info->pages = LTC3880_NUM_PAGES; in ltc2978_probe()
704 info->pages = LTC3880_NUM_PAGES; in ltc2978_probe()
719 info->pages = LTC3883_NUM_PAGES; in ltc2978_probe()
730 info->pages = LTC3880_NUM_PAGES; in ltc2978_probe()
747 info->num_regulators = info->pages; in ltc2978_probe()
/linux-4.4.14/drivers/md/
Ddm-kcopyd.c41 struct page_list *pages; member
230 pl->next = kc->pages; in kcopyd_put_pages()
231 kc->pages = pl; in kcopyd_put_pages()
240 unsigned int nr, struct page_list **pages) in kcopyd_get_pages() argument
244 *pages = NULL; in kcopyd_get_pages()
250 pl = kc->pages; in kcopyd_get_pages()
253 kc->pages = pl->next; in kcopyd_get_pages()
256 pl->next = *pages; in kcopyd_get_pages()
257 *pages = pl; in kcopyd_get_pages()
263 if (*pages) in kcopyd_get_pages()
[all …]
/linux-4.4.14/fs/nfs/
Dnfs3acl.c17 struct page *pages[NFSACL_MAXPAGES] = { }; in nfs3_get_acl() local
21 .pages = pages, in nfs3_get_acl()
62 for (count = 0; count < NFSACL_MAXPAGES && args.pages[count]; count++) in nfs3_get_acl()
63 __free_page(args.pages[count]); in nfs3_get_acl()
122 struct page *pages[NFSACL_MAXPAGES]; in __nfs3_proc_setacls() local
127 .pages = pages, in __nfs3_proc_setacls()
161 args.pages[args.npages] = alloc_page(GFP_KERNEL); in __nfs3_proc_setacls()
162 if (args.pages[args.npages] == NULL) in __nfs3_proc_setacls()
199 __free_page(args.pages[args.npages]); in __nfs3_proc_setacls()
Dpnfs_dev.c102 struct page **pages = NULL; in nfs4_get_device_info() local
123 pages = kcalloc(max_pages, sizeof(struct page *), gfp_flags); in nfs4_get_device_info()
124 if (!pages) in nfs4_get_device_info()
128 pages[i] = alloc_page(gfp_flags); in nfs4_get_device_info()
129 if (!pages[i]) in nfs4_get_device_info()
135 pdev->pages = pages; in nfs4_get_device_info()
157 __free_page(pages[i]); in nfs4_get_device_info()
158 kfree(pages); in nfs4_get_device_info()
Dpnfs_nfs.c33 struct nfs_page *first = nfs_list_entry(data->pages.next); in pnfs_generic_prepare_to_resend_writes()
186 LIST_HEAD(pages); in pnfs_generic_retry_commit()
196 list_splice_init(&bucket->committing, &pages); in pnfs_generic_retry_commit()
198 nfs_retry_commit(&pages, freeme, cinfo, i); in pnfs_generic_retry_commit()
224 list_add(&data->pages, list); in pnfs_generic_alloc_ds_commits()
234 void pnfs_fetch_commit_bucket_list(struct list_head *pages, in pnfs_fetch_commit_bucket_list() argument
242 list_splice_init(&bucket->committing, pages); in pnfs_fetch_commit_bucket_list()
264 list_add(&data->pages, &list); in pnfs_generic_commit_pagelist()
283 list_for_each_entry_safe(data, tmp, &list, pages) { in pnfs_generic_commit_pagelist()
284 list_del_init(&data->pages); in pnfs_generic_commit_pagelist()
[all …]
Dfscache.h132 struct list_head *pages, in nfs_readpages_from_fscache() argument
136 return __nfs_readpages_from_fscache(ctx, inode, mapping, pages, in nfs_readpages_from_fscache()
211 struct list_head *pages, in nfs_readpages_from_fscache() argument
/linux-4.4.14/fs/cifs/
Dfscache.h79 struct list_head *pages, in cifs_readpages_from_fscache() argument
83 return __cifs_readpages_from_fscache(inode, mapping, pages, in cifs_readpages_from_fscache()
96 struct list_head *pages) in cifs_fscache_readpages_cancel() argument
99 return __cifs_fscache_readpages_cancel(inode, pages); in cifs_fscache_readpages_cancel()
133 struct list_head *pages, in cifs_readpages_from_fscache() argument
143 struct list_head *pages) in cifs_fscache_readpages_cancel() argument
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx4/
Den_resources.c111 struct page **pages; in mlx4_en_map_buffer() local
117 pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL); in mlx4_en_map_buffer()
118 if (!pages) in mlx4_en_map_buffer()
122 pages[i] = virt_to_page(buf->page_list[i].buf); in mlx4_en_map_buffer()
124 buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); in mlx4_en_map_buffer()
125 kfree(pages); in mlx4_en_map_buffer()
/linux-4.4.14/fs/exofs/
Dore_raid.c58 struct page **pages; member
80 struct page *pages[group_width]; in _sp2d_alloc() member
130 sp2d->_1p_stripes[i].pages = __a1pa->pages; in _sp2d_alloc()
157 struct page *page = _1ps->pages[c]; in _sp2d_reset()
167 memset(_1ps->pages, 0, group_width * sizeof(*_1ps->pages)); in _sp2d_reset()
184 kfree(sp2d->_1p_stripes[i].pages); in _sp2d_free()
236 _1ps->tx = async_xor(_1ps->pages[sp2d->data_devs], in _gen_xor_unit()
237 _1ps->pages, 0, sp2d->data_devs, in _gen_xor_unit()
240 _1ps->tx = async_gen_syndrome(_1ps->pages, 0, in _gen_xor_unit()
263 _1ps->pages[si->cur_comp] = page; in _ore_add_stripe_page()
[all …]
Dinode.c43 unsigned pages = min_t(unsigned, expected_pages, in exofs_max_io_pages() local
46 return pages; in exofs_max_io_pages()
55 struct page **pages; member
76 pcol->pages = NULL; in _pcol_init()
89 pcol->pages = NULL; in _pcol_reset()
107 unsigned pages; in pcol_try_alloc() local
110 pages = exofs_max_io_pages(&pcol->sbi->layout, pcol->expected_pages); in pcol_try_alloc()
112 for (; pages; pages >>= 1) { in pcol_try_alloc()
113 pcol->pages = kmalloc(pages * sizeof(struct page *), in pcol_try_alloc()
115 if (likely(pcol->pages)) { in pcol_try_alloc()
[all …]
Dore.c149 struct page **pages; in _ore_get_io_state() local
156 struct page *pages[num_par_pages]; in _ore_get_io_state() member
168 pages = num_par_pages ? _aios->pages : NULL; in _ore_get_io_state()
178 struct page *pages[num_par_pages]; in _ore_get_io_state() member
197 pages = num_par_pages ? extra_part->pages : NULL; in _ore_get_io_state()
206 if (pages) { in _ore_get_io_state()
207 ios->parity_pages = pages; in _ore_get_io_state()
594 unsigned pgbase, struct page **pages, in _ore_add_stripe_unit() argument
630 added_len = bio_add_pc_page(q, per_dev->bio, pages[pg], in _ore_add_stripe_unit()
642 _add_stripe_page(ios->sp2d, &ios->si, pages[pg]); in _ore_add_stripe_unit()
[all …]
/linux-4.4.14/arch/powerpc/kvm/
Dbook3s_64_vio.c56 __free_page(stt->pages[i]); in release_spapr_tce_table()
71 page = stt->pages[vmf->pgoff]; in kvm_spapr_tce_fault()
126 stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO); in kvm_vm_ioctl_create_spapr_tce()
127 if (!stt->pages[i]) in kvm_vm_ioctl_create_spapr_tce()
144 if (stt->pages[i]) in kvm_vm_ioctl_create_spapr_tce()
145 __free_page(stt->pages[i]); in kvm_vm_ioctl_create_spapr_tce()
/linux-4.4.14/Documentation/arm64/
Dmemory.txt13 64KB pages, only 2 levels of translation tables, allowing 42-bit (4TB)
24 AArch64 Linux memory layout with 4KB pages + 3 levels:
32 AArch64 Linux memory layout with 4KB pages + 4 levels:
40 AArch64 Linux memory layout with 64KB pages + 2 levels:
48 AArch64 Linux memory layout with 64KB pages + 3 levels:
60 Translation table lookup with 4KB pages:
75 Translation table lookup with 64KB pages:
89 When using KVM, the hypervisor maps kernel pages in EL2, at a fixed
/linux-4.4.14/arch/ia64/include/asm/
Dtlb.h64 struct page **pages; member
143 free_page_and_swap_cache(tlb->pages[i]); in ia64_tlb_flush_mmu_free()
164 tlb->pages = (void *)addr; in __tlb_alloc_page()
175 tlb->pages = tlb->local; in tlb_gather_mmu()
199 if (tlb->pages != tlb->local) in tlb_finish_mmu()
200 free_pages((unsigned long)tlb->pages, 0); in tlb_finish_mmu()
212 if (!tlb->nr && tlb->pages == tlb->local) in __tlb_remove_page()
215 tlb->pages[tlb->nr++] = page; in __tlb_remove_page()
/linux-4.4.14/drivers/base/
Ddma-mapping.c278 void *dma_common_pages_remap(struct page **pages, size_t size, in dma_common_pages_remap() argument
288 area->pages = pages; in dma_common_pages_remap()
290 if (map_vm_area(area, prot, pages)) { in dma_common_pages_remap()
308 struct page **pages; in dma_common_contiguous_remap() local
312 pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL); in dma_common_contiguous_remap()
313 if (!pages) in dma_common_contiguous_remap()
317 pages[i] = pfn_to_page(pfn + i); in dma_common_contiguous_remap()
319 ptr = dma_common_pages_remap(pages, size, vm_flags, prot, caller); in dma_common_contiguous_remap()
321 kfree(pages); in dma_common_contiguous_remap()
Dfirmware_class.c148 struct page **pages; member
259 __free_page(buf->pages[i]); in __fw_free_buf()
260 kfree(buf->pages); in __fw_free_buf()
391 fw->pages = buf->pages; in fw_set_page_data()
623 buf->data = vmap(buf->pages, buf->nr_pages, 0, PAGE_KERNEL_RO); in fw_map_pages_buf()
662 __free_page(fw_buf->pages[i]); in firmware_loading_store()
663 kfree(fw_buf->pages); in firmware_loading_store()
664 fw_buf->pages = NULL; in firmware_loading_store()
748 page_data = kmap(buf->pages[page_nr]); in firmware_data_read()
752 kunmap(buf->pages[page_nr]); in firmware_data_read()
[all …]
/linux-4.4.14/arch/arm/mm/
Ddma-mapping.c1128 struct page **pages; in __iommu_alloc_buffer() local
1134 pages = kzalloc(array_size, GFP_KERNEL); in __iommu_alloc_buffer()
1136 pages = vzalloc(array_size); in __iommu_alloc_buffer()
1137 if (!pages) in __iommu_alloc_buffer()
1152 pages[i] = page + i; in __iommu_alloc_buffer()
1154 return pages; in __iommu_alloc_buffer()
1171 pages[i] = alloc_pages(gfp | __GFP_NORETRY, order); in __iommu_alloc_buffer()
1172 if (pages[i]) in __iommu_alloc_buffer()
1176 if (!pages[i]) { in __iommu_alloc_buffer()
1181 pages[i] = alloc_pages(gfp, 0); in __iommu_alloc_buffer()
[all …]
/linux-4.4.14/fs/ceph/
Dcache.h45 struct list_head *pages,
85 struct list_head *pages) in ceph_fscache_readpages_cancel() argument
88 return fscache_readpages_cancel(ci->fscache, pages); in ceph_fscache_readpages_cancel()
121 struct page *pages) in ceph_fscache_uncache_page() argument
133 struct list_head *pages, in ceph_readpages_from_fscache() argument
172 struct list_head *pages) in ceph_fscache_readpages_cancel() argument
Dfile.c71 struct page **pages; in dio_get_pages_alloc() local
77 pages = kmalloc(sizeof(*pages) * npages, GFP_KERNEL); in dio_get_pages_alloc()
78 if (!pages) { in dio_get_pages_alloc()
79 pages = vmalloc(sizeof(*pages) * npages); in dio_get_pages_alloc()
80 if (!pages) in dio_get_pages_alloc()
86 ret = iov_iter_get_pages(&tmp_it, pages + idx, nbytes, in dio_get_pages_alloc()
100 return pages; in dio_get_pages_alloc()
102 ceph_put_page_vector(pages, idx, false); in dio_get_pages_alloc()
413 struct page **pages, int num_pages, in striped_read() argument
432 page_pos = pages; in striped_read()
[all …]
Daddr.c277 struct page *page = osd_data->pages[i]; in finish_read()
296 kfree(osd_data->pages); in finish_read()
299 static void ceph_unlock_page_vector(struct page **pages, int num_pages) in ceph_unlock_page_vector() argument
304 unlock_page(pages[i]); in ceph_unlock_page_vector()
322 struct page **pages; in start_read() local
353 pages = kmalloc(sizeof(*pages) * nr_pages, GFP_KERNEL); in start_read()
355 if (!pages) in start_read()
373 pages[i] = page; in start_read()
375 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false, false); in start_read()
389 ceph_unlock_page_vector(pages, nr_pages); in start_read()
[all …]
/linux-4.4.14/arch/blackfin/kernel/
Ddma-mapping.c47 static unsigned long __alloc_dma_pages(unsigned int pages) in __alloc_dma_pages() argument
57 start = bitmap_find_next_zero_area(dma_page, dma_pages, 0, pages, 0); in __alloc_dma_pages()
60 bitmap_set(dma_page, start, pages); in __alloc_dma_pages()
66 static void __free_dma_pages(unsigned long addr, unsigned int pages) in __free_dma_pages() argument
71 if ((page + pages) > dma_pages) { in __free_dma_pages()
77 bitmap_clear(dma_page, page, pages); in __free_dma_pages()
/linux-4.4.14/drivers/staging/android/ion/
Dion_heap.c37 struct page **pages = vmalloc(sizeof(struct page *) * npages); in ion_heap_map_kernel() local
38 struct page **tmp = pages; in ion_heap_map_kernel()
40 if (!pages) in ion_heap_map_kernel()
56 vaddr = vmap(pages, npages, VM_MAP, pgprot); in ion_heap_map_kernel()
57 vfree(pages); in ion_heap_map_kernel()
106 static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot) in ion_heap_clear_pages() argument
108 void *addr = vm_map_ram(pages, num, -1, pgprot); in ion_heap_clear_pages()
124 struct page *pages[32]; in ion_heap_sglist_zero() local
127 pages[p++] = sg_page_iter_page(&piter); in ion_heap_sglist_zero()
128 if (p == ARRAY_SIZE(pages)) { in ion_heap_sglist_zero()
[all …]
Dion_system_heap.c130 struct list_head pages; in ion_system_heap_allocate() local
142 INIT_LIST_HEAD(&pages); in ion_system_heap_allocate()
148 list_add_tail(&page->lru, &pages); in ion_system_heap_allocate()
161 list_for_each_entry_safe(page, tmp_page, &pages, lru) { in ion_system_heap_allocate()
173 list_for_each_entry_safe(page, tmp_page, &pages, lru) in ion_system_heap_allocate()
380 unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT; in ion_system_contig_heap_free() local
383 for (i = 0; i < pages; i++) in ion_system_contig_heap_free()
/linux-4.4.14/arch/arm/include/asm/
Dtlb.h78 struct page **pages; member
123 tlb->pages = (void *)addr; in __tlb_alloc_page()
138 free_pages_and_swap_cache(tlb->pages, tlb->nr); in tlb_flush_mmu_free()
140 if (tlb->pages == tlb->local) in tlb_flush_mmu_free()
159 tlb->pages = tlb->local; in tlb_gather_mmu()
176 if (tlb->pages != tlb->local) in tlb_finish_mmu()
177 free_pages((unsigned long)tlb->pages, 0); in tlb_finish_mmu()
214 tlb->pages[tlb->nr++] = page; in __tlb_remove_page()
/linux-4.4.14/fs/btrfs/tests/
Dextent-io-tests.c32 struct page *pages[16]; in process_page_range() local
43 ARRAY_SIZE(pages)), pages); in process_page_range()
46 !PageLocked(pages[i])) in process_page_range()
48 if (flags & PROCESS_UNLOCK && PageLocked(pages[i])) in process_page_range()
49 unlock_page(pages[i]); in process_page_range()
50 page_cache_release(pages[i]); in process_page_range()
52 page_cache_release(pages[i]); in process_page_range()
/linux-4.4.14/net/sunrpc/xprtrdma/
Dsvc_rdma_recvfrom.c68 page = ctxt->pages[0]; in rdma_build_arg_xdr()
89 rqstp->rq_arg.pages = &rqstp->rq_pages[0]; in rdma_build_arg_xdr()
91 rqstp->rq_arg.pages = &rqstp->rq_pages[1]; in rdma_build_arg_xdr()
95 page = ctxt->pages[sge_no]; in rdma_build_arg_xdr()
108 page = ctxt->pages[sge_no++]; in rdma_build_arg_xdr()
145 head->arg.pages[pg_no] = rqstp->rq_arg.pages[pg_no]; in rdma_read_chunk_lcl()
150 rqstp->rq_respages = &rqstp->rq_arg.pages[pg_no+1]; in rdma_read_chunk_lcl()
154 head->arg.pages[pg_no], pg_off, in rdma_read_chunk_lcl()
248 head->arg.pages[pg_no] = rqstp->rq_arg.pages[pg_no]; in rdma_read_chunk_frmr()
254 sg_set_page(&frmr->sg[pno], rqstp->rq_arg.pages[pg_no], in rdma_read_chunk_frmr()
[all …]
/linux-4.4.14/arch/parisc/mm/
Dinit.c154 tmp = pmem_ranges[j-1].pages; in setup_bootmem()
155 pmem_ranges[j-1].pages = pmem_ranges[j].pages; in setup_bootmem()
156 pmem_ranges[j].pages = tmp; in setup_bootmem()
169 pmem_ranges[i-1].pages) > MAX_GAP) { in setup_bootmem()
175 pmem_ranges[i-1].pages)); in setup_bootmem()
191 size = (pmem_ranges[i].pages << PAGE_SHIFT); in setup_bootmem()
203 res->end = res->start + (pmem_ranges[i].pages << PAGE_SHIFT)-1; in setup_bootmem()
223 rsize = pmem_ranges[i].pages << PAGE_SHIFT; in setup_bootmem()
229 pmem_ranges[i].pages = (mem_limit >> PAGE_SHIFT) in setup_bootmem()
249 end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages; in setup_bootmem()
[all …]
/linux-4.4.14/include/linux/
Dmman.h23 static inline void vm_acct_memory(long pages) in vm_acct_memory() argument
25 __percpu_counter_add(&vm_committed_as, pages, vm_committed_as_batch); in vm_acct_memory()
28 static inline void vm_unacct_memory(long pages) in vm_unacct_memory() argument
30 vm_acct_memory(-pages); in vm_unacct_memory()
Dvmalloc.h36 struct page **pages; member
57 extern void *vm_map_ram(struct page **pages, unsigned int count,
85 extern void *vmap(struct page **pages, unsigned int count,
124 struct page **pages);
127 pgprot_t prot, struct page **pages);
133 pgprot_t prot, struct page **pages) in map_kernel_range_noflush() argument
Dballoon_compaction.h62 struct list_head pages; /* Pages enqueued & handled to Host */ member
74 INIT_LIST_HEAD(&balloon->pages); in balloon_devinfo_init()
129 list_add(&page->lru, &balloon->pages); in balloon_page_insert()
170 list_add(&page->lru, &balloon->pages); in balloon_page_insert()
Dfscache.h249 struct list_head *pages);
593 struct list_head *pages, in fscache_read_or_alloc_pages() argument
600 return __fscache_read_or_alloc_pages(cookie, mapping, pages, in fscache_read_or_alloc_pages()
650 struct list_head *pages) in fscache_readpages_cancel() argument
653 __fscache_readpages_cancel(cookie, pages); in fscache_readpages_cancel()
Dsuspend.h513 unsigned long page_key_additional_pages(unsigned long pages);
514 int page_key_alloc(unsigned long pages);
522 static inline unsigned long page_key_additional_pages(unsigned long pages) in page_key_additional_pages() argument
527 static inline int page_key_alloc(unsigned long pages) in page_key_alloc() argument
/linux-4.4.14/Documentation/virtual/kvm/
Dmmu.txt52 pages, pae, pse, pse36, cr0.wp, and 1GB pages. Work is in progress to support
102 Shadow pages
109 A nonleaf spte allows the hardware mmu to reach the leaf pages and
110 is not related to a translation directly. It points to other shadow pages.
115 Leaf ptes point at guest pages.
131 Shadow pages contain the following information:
137 Examples include real mode translation, large guest pages backed by small
138 host pages, and gpa->hpa translations when NPT or EPT is active.
147 so multiple shadow pages are needed to shadow one guest page.
148 For first-level shadow pages, role.quadrant can be 0 or 1 and denotes the
[all …]
/linux-4.4.14/net/9p/
Dtrans_common.c21 void p9_release_pages(struct page **pages, int nr_pages) in p9_release_pages() argument
26 if (pages[i]) in p9_release_pages()
27 put_page(pages[i]); in p9_release_pages()
/linux-4.4.14/drivers/usb/storage/
Dalauda.c734 unsigned int page, unsigned int pages, unsigned char *data) in alauda_read_block_raw() argument
739 PBA_ZONE(pba), 0, PBA_LO(pba) + page, pages, 0, MEDIA_PORT(us) in alauda_read_block_raw()
742 usb_stor_dbg(us, "pba %d page %d count %d\n", pba, page, pages); in alauda_read_block_raw()
750 data, (MEDIA_INFO(us).pagesize + 64) * pages, NULL); in alauda_read_block_raw()
760 unsigned int page, unsigned int pages, unsigned char *data) in alauda_read_block() argument
765 rc = alauda_read_block_raw(us, pba, page, pages, data); in alauda_read_block()
770 for (i = 0; i < pages; i++) { in alauda_read_block()
813 unsigned int page, unsigned int pages, in alauda_write_lba() argument
882 for (i = page; i < page+pages; i++) { in alauda_write_lba()
957 unsigned int pages; in alauda_read_data() local
[all …]
Dsddr55.c211 unsigned short pages; in sddr55_read_data() local
237 pages = min((unsigned int) sectors << info->smallpageshift, in sddr55_read_data()
239 len = pages << info->pageshift; in sddr55_read_data()
242 pages, pba, lba, page); in sddr55_read_data()
258 command[6] = LSB_of(pages << (1 - info->smallpageshift)); in sddr55_read_data()
305 sectors -= pages >> info->smallpageshift; in sddr55_read_data()
331 unsigned short pages; in sddr55_write_data() local
364 pages = min((unsigned int) sectors << info->smallpageshift, in sddr55_write_data()
366 len = pages << info->pageshift; in sddr55_write_data()
373 pages, pba, lba, page); in sddr55_write_data()
[all …]
Dsddr09.c750 unsigned int page, pages; in sddr09_read_data() local
783 pages = min(sectors, info->blocksize - page); in sddr09_read_data()
784 len = pages << info->pageshift; in sddr09_read_data()
800 pages, lba, page); in sddr09_read_data()
811 pages, pba, lba, page); in sddr09_read_data()
817 pages, info->pageshift, buffer, 0); in sddr09_read_data()
828 sectors -= pages; in sddr09_read_data()
863 unsigned int page, unsigned int pages, in sddr09_write_lba() argument
930 for (i = page; i < page+pages; i++) { in sddr09_write_lba()
974 unsigned int lba, maxlba, page, pages; in sddr09_write_data() local
[all …]
/linux-4.4.14/include/linux/ceph/
Dlibceph.h211 extern void ceph_release_page_vector(struct page **pages, int num_pages);
216 extern void ceph_put_page_vector(struct page **pages, int num_pages,
219 extern int ceph_copy_user_to_page_vector(struct page **pages,
222 extern void ceph_copy_to_page_vector(struct page **pages,
225 extern void ceph_copy_from_page_vector(struct page **pages,
228 extern void ceph_zero_page_vector_range(int off, int len, struct page **pages);
Dosd_client.h61 struct page **pages; member
256 struct page **pages, u64 length,
276 struct page **pages, u64 length,
293 struct page **pages, u64 length,
298 struct page **pages, u64 length,
358 struct page **pages, int nr_pages,
368 struct page **pages, int nr_pages);
/linux-4.4.14/kernel/
Dkexec_core.c284 struct page *pages; in kimage_alloc_pages() local
286 pages = alloc_pages(gfp_mask, order); in kimage_alloc_pages()
287 if (pages) { in kimage_alloc_pages()
290 pages->mapping = NULL; in kimage_alloc_pages()
291 set_page_private(pages, order); in kimage_alloc_pages()
294 SetPageReserved(pages + i); in kimage_alloc_pages()
297 return pages; in kimage_alloc_pages()
341 struct page *pages; in kimage_alloc_normal_control_pages() local
353 pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order); in kimage_alloc_normal_control_pages()
354 if (!pages) in kimage_alloc_normal_control_pages()
[all …]
/linux-4.4.14/sound/firewire/
Dpackets-buffer.c25 unsigned int packets_per_page, pages; in iso_packets_buffer_init() local
42 pages = DIV_ROUND_UP(count, packets_per_page); in iso_packets_buffer_init()
45 pages, direction); in iso_packets_buffer_init()
51 p = page_address(b->iso_buffer.pages[page_index]); in iso_packets_buffer_init()
/linux-4.4.14/arch/powerpc/platforms/cell/
Dras.c104 struct page *pages; member
126 area->pages = __alloc_pages_node(area->nid, in cbe_ptcal_enable_on_node()
130 if (!area->pages) { in cbe_ptcal_enable_on_node()
141 addr = __pa(page_address(area->pages)) + (PAGE_SIZE >> 1); in cbe_ptcal_enable_on_node()
159 __free_pages(area->pages, area->order); in cbe_ptcal_enable_on_node()
228 memset(page_address(area->pages), 0, in cbe_ptcal_disable()
233 __free_pages(area->pages, area->order); in cbe_ptcal_disable()
/linux-4.4.14/arch/x86/include/asm/
Dcacheflush.h57 int set_pages_array_uc(struct page **pages, int addrinarray);
58 int set_pages_array_wc(struct page **pages, int addrinarray);
59 int set_pages_array_wt(struct page **pages, int addrinarray);
60 int set_pages_array_wb(struct page **pages, int addrinarray);
/linux-4.4.14/arch/x86/um/
Dldt.c81 if (copy_to_user(ptr, ldt->u.pages[i], size)) { in read_ldt()
160 ldt->u.pages[i] = (struct ldt_entry *) in write_ldt()
162 if (!ldt->u.pages[i]) { in write_ldt()
170 memcpy(ldt->u.pages[0], &entry0, in write_ldt()
172 memcpy(ldt->u.pages[0]+1, ldt->u.entries+1, in write_ldt()
184 ldt_p = ldt->u.pages[ldt_info.entry_number/LDT_ENTRIES_PER_PAGE] + in write_ldt()
346 new_mm->arch.ldt.u.pages[i] = in init_new_ldt()
348 memcpy(new_mm->arch.ldt.u.pages[i], in init_new_ldt()
349 from_mm->arch.ldt.u.pages[i], PAGE_SIZE); in init_new_ldt()
367 free_page((long) mm->arch.ldt.u.pages[i]); in free_ldt()
/linux-4.4.14/drivers/gpu/drm/exynos/
Dexynos_drm_gem.c61 exynos_gem->pages = drm_calloc_large(nr_pages, sizeof(struct page *)); in exynos_drm_alloc_buf()
62 if (!exynos_gem->pages) { in exynos_drm_alloc_buf()
83 if (drm_prime_sg_to_page_addr_arrays(&sgt, exynos_gem->pages, NULL, in exynos_drm_alloc_buf()
103 drm_free_large(exynos_gem->pages); in exynos_drm_alloc_buf()
124 drm_free_large(exynos_gem->pages); in exynos_drm_free_buf()
337 ret = dma_mmap_attrs(drm_dev->dev, vma, exynos_gem->pages, in exynos_drm_gem_mmap_buffer()
492 pfn = page_to_pfn(exynos_gem->pages[page_offset]); in exynos_drm_gem_fault()
556 return drm_prime_pages_to_sg(exynos_gem->pages, npages); in exynos_drm_gem_prime_get_sg_table()
577 exynos_gem->pages = drm_malloc_ab(npages, sizeof(struct page *)); in exynos_drm_gem_prime_import_sg_table()
578 if (!exynos_gem->pages) { in exynos_drm_gem_prime_import_sg_table()
[all …]
Dexynos_drm_gem.h54 struct page **pages; member
140 struct page **pages,
144 void exynos_gem_put_pages_to_userptr(struct page **pages,
/linux-4.4.14/tools/testing/selftests/powerpc/mm/
Dsubpage_prot.c96 long i, j, pages, err; in run_test() local
98 pages = size / 0x10000; in run_test()
99 map = malloc(pages * 4); in run_test()
106 for (i = 0; i < pages; i++) { in run_test()
120 for (i = 0; i < pages; i++) { in run_test()
/linux-4.4.14/arch/mips/ar7/
Dmemory.c61 unsigned long pages; in prom_meminit() local
63 pages = memsize() >> PAGE_SHIFT; in prom_meminit()
64 add_memory_region(PHYS_OFFSET, pages << PAGE_SHIFT, BOOT_MEM_RAM); in prom_meminit()
/linux-4.4.14/Documentation/cma/
Ddebugfs.txt13 - [RO] order_per_bit: Order of pages represented by one bit.
15 - [WO] alloc: Allocate N pages from that CMA area. For example:
19 would try to allocate 5 pages from the cma-2 area.
21 - [WO] free: Free N pages from that CMA area, similar to the above.
/linux-4.4.14/Documentation/sysctl/
Dvm.txt70 admin_reserve_kbytes defaults to min(3% of free pages, 8MB)
106 huge pages although processes will also directly compact memory as required.
113 allowed to examine the unevictable lru (mlocked pages) for pages to compact.
116 compaction from moving pages that are unevictable. Default value is 1.
134 Contains, as a percentage of total available memory that contains free pages
135 and reclaimable pages, the number of pages at which the background kernel
152 Note: the minimum value allowed for dirty_bytes is two pages (in bytes); any
169 Contains, as a percentage of total available memory that contains free pages
170 and reclaimable pages, the number of pages at which a process which is
319 pages for each zones from them. These are shown as array of protection pages
[all …]
/linux-4.4.14/net/sunrpc/auth_gss/
Dgss_krb5_wrap.c85 ptr = kmap_atomic(buf->pages[last]); in gss_krb5_remove_padding()
159 struct xdr_buf *buf, struct page **pages) in gss_wrap_kerberos_v1() argument
222 tmp_pages = buf->pages; in gss_wrap_kerberos_v1()
223 buf->pages = pages; in gss_wrap_kerberos_v1()
227 buf->pages = tmp_pages; in gss_wrap_kerberos_v1()
252 offset + headlen - conflen, pages); in gss_wrap_kerberos_v1()
258 offset + headlen - conflen, pages)) in gss_wrap_kerberos_v1()
441 struct xdr_buf *buf, struct page **pages) in gss_wrap_kerberos_v2() argument
486 err = (*kctx->gk5e->encrypt_v2)(kctx, offset, buf, pages); in gss_wrap_kerberos_v2()
592 struct xdr_buf *buf, struct page **pages) in gss_wrap_kerberos() argument
[all …]
Dgss_krb5_crypto.c393 struct page **pages; member
418 in_page = desc->pages[i]; in encryptor()
463 int offset, struct page **pages) in gss_encrypt_xdr_buf() argument
476 desc.pages = pages; in gss_encrypt_xdr_buf()
598 u32 offset, u8 *iv, struct page **pages, int encrypt) in gss_krb5_cts_crypt() argument
617 save_pages = buf->pages; in gss_krb5_cts_crypt()
619 buf->pages = pages; in gss_krb5_cts_crypt()
622 buf->pages = save_pages; in gss_krb5_cts_crypt()
644 struct xdr_buf *buf, struct page **pages) in gss_krb5_aes_encrypt() argument
703 save_pages = buf->pages; in gss_krb5_aes_encrypt()
[all …]
/linux-4.4.14/include/drm/
Ddrm_vma_manager.h59 unsigned long pages);
61 struct drm_vma_offset_node *node, unsigned long pages);
85 unsigned long pages) in drm_vma_offset_exact_lookup_locked() argument
89 node = drm_vma_offset_lookup_locked(mgr, start, pages); in drm_vma_offset_exact_lookup_locked()
Ddrm_agpsupport.h30 void drm_free_agp(struct agp_memory * handle, int pages);
34 struct page **pages,
68 static inline void drm_free_agp(struct agp_memory * handle, int pages) in drm_free_agp() argument
83 struct page **pages, in drm_agp_bind_pages() argument
/linux-4.4.14/arch/m32r/mm/
Ddiscontig.c29 unsigned long pages; member
48 mp->pages = PFN_DOWN(memory_end - memory_start); in mem_prof_init()
66 mp->pages = PFN_DOWN(CONFIG_IRAM_SIZE) + holes; in mem_prof_init()
88 max_pfn = mp->start_pfn + mp->pages; in setup_memory()
93 PFN_PHYS(mp->pages)); in setup_memory()
/linux-4.4.14/Documentation/trace/
Devents-kmem.txt22 justified, particularly if kmalloc slab pages are getting significantly
50 If pages are allocated directly from the buddy allocator, the
60 When pages are freed in batch, the also mm_page_free_batched is triggered.
61 Broadly speaking, pages are taken off the LRU lock in bulk and
72 for order-0 pages, reduces contention on the zone->lock and reduces the
75 When a per-CPU list is empty or pages of the wrong type are allocated,
80 When the per-CPU list is too full, a number of pages are freed, each one
83 The individual nature of the events is so that pages can be tracked
84 between allocation and freeing. A number of drain or refill pages that occur
90 line bounces due to writes between CPUs and worth investigating if pages
[all …]
/linux-4.4.14/tools/vm/
Dpage-types.c222 static unsigned long pages2mb(unsigned long pages) in pages2mb() argument
224 return (pages * page_size) >> 20; in pages2mb()
276 unsigned long pages) in kpageflags_read() argument
278 return do_u64_read(kpageflags_fd, PROC_KPAGEFLAGS, buf, index, pages); in kpageflags_read()
283 unsigned long pages) in pagemap_read() argument
285 return do_u64_read(pagemap_fd, "/proc/pid/pagemap", buf, index, pages); in pagemap_read()
599 unsigned long pages; in walk_pfn() local
604 pages = kpageflags_read(buf, index, batch); in walk_pfn()
605 if (pages == 0) in walk_pfn()
608 for (i = 0; i < pages; i++) in walk_pfn()
[all …]
/linux-4.4.14/drivers/gpu/drm/nouveau/
Dnouveau_sgdma.c34 node->pages = NULL; in nv04_sgdma_bind()
37 node->pages = nvbe->ttm.dma_address; in nv04_sgdma_bind()
69 node->pages = NULL; in nv50_sgdma_bind()
72 node->pages = nvbe->ttm.dma_address; in nv50_sgdma_bind()
/linux-4.4.14/fs/9p/
Dcache.h51 struct list_head *pages,
76 struct list_head *pages, in v9fs_readpages_from_fscache() argument
79 return __v9fs_readpages_from_fscache(inode, mapping, pages, in v9fs_readpages_from_fscache()
132 struct list_head *pages, in v9fs_readpages_from_fscache() argument
/linux-4.4.14/arch/x86/kernel/
Damd_gart_64.c336 unsigned long pages) in __dma_map_cont() argument
338 unsigned long iommu_start = alloc_iommu(dev, pages, 0); in __dma_map_cont()
347 unsigned long pages, addr; in __dma_map_cont() local
360 pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE); in __dma_map_cont()
361 while (pages--) { in __dma_map_cont()
367 BUG_ON(iommu_page - iommu_start != pages); in __dma_map_cont()
374 struct scatterlist *sout, unsigned long pages, int need) in dma_map_cont() argument
382 return __dma_map_cont(dev, start, nelems, sout, pages); in dma_map_cont()
394 unsigned long pages = 0; in gart_map_sg() local
431 sgmap, pages, need) < 0) in gart_map_sg()
[all …]
Dalternative.c693 struct page *pages[2]; in text_poke() local
697 pages[0] = vmalloc_to_page(addr); in text_poke()
698 pages[1] = vmalloc_to_page(addr + PAGE_SIZE); in text_poke()
700 pages[0] = virt_to_page(addr); in text_poke()
701 WARN_ON(!PageReserved(pages[0])); in text_poke()
702 pages[1] = virt_to_page(addr + PAGE_SIZE); in text_poke()
704 BUG_ON(!pages[0]); in text_poke()
706 set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0])); in text_poke()
707 if (pages[1]) in text_poke()
708 set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1])); in text_poke()
[all …]
/linux-4.4.14/net/sunrpc/
Dxdr.c125 kaddr = kmap_atomic(buf->pages[0]); in xdr_terminate_string()
133 struct page **pages, unsigned int base, unsigned int len) in xdr_inline_pages() argument
142 xdr->pages = pages; in xdr_inline_pages()
172 _shift_data_right_pages(struct page **pages, size_t pgto_base, in _shift_data_right_pages() argument
184 pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT); in _shift_data_right_pages()
185 pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT); in _shift_data_right_pages()
233 _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len) in _copy_to_pages() argument
239 pgto = pages + (pgbase >> PAGE_CACHE_SHIFT); in _copy_to_pages()
277 _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len) in _copy_from_pages() argument
283 pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT); in _copy_from_pages()
[all …]
/linux-4.4.14/drivers/mtd/nand/
Dnand_bbt.c273 res = read_bbt(mtd, buf, td->pages[i], in read_abs_bbt()
281 res = read_bbt(mtd, buf, td->pages[0], in read_abs_bbt()
395 scan_read(mtd, buf, (loff_t)td->pages[0] << this->page_shift, in read_abs_bbts()
399 td->pages[0], td->version[0]); in read_abs_bbts()
404 scan_read(mtd, buf, (loff_t)md->pages[0] << this->page_shift, in read_abs_bbts()
408 md->pages[0], md->version[0]); in read_abs_bbts()
555 td->pages[i] = -1; in search_bbt()
565 td->pages[i] = actblock << blocktopage; in search_bbt()
577 if (td->pages[i] == -1) in search_bbt()
581 td->pages[i], td->version[i]); in search_bbt()
[all …]
/linux-4.4.14/Documentation/cgroups/
Dmemory.txt41 - accounting anonymous pages, file caches, swap caches usage and limiting them.
42 - pages are linked to per-memcg LRU exclusively, and there is no global LRU.
169 All mapped anon pages (RSS) and cache pages (Page Cache) are accounted.
170 Some pages which are never reclaimable and will not be on the LRU
171 are not accounted. We just account pages under usual VM management.
173 RSS pages are accounted at page_fault unless they've already been accounted
179 unaccounted when it's removed from radix-tree. Even if RSS pages are fully
185 This means swapped-in pages may contain pages for other tasks than a task
190 Note: we just account pages-on-LRU because our purpose is to control amount
191 of used pages; not-on-LRU pages tend to be out-of-control from VM view.
[all …]
/linux-4.4.14/arch/s390/kernel/
Dsuspend.c53 unsigned long page_key_additional_pages(unsigned long pages) in page_key_additional_pages() argument
55 return DIV_ROUND_UP(pages, PAGE_KEY_DATA_SIZE); in page_key_additional_pages()
76 int page_key_alloc(unsigned long pages) in page_key_alloc() argument
81 size = DIV_ROUND_UP(pages, PAGE_KEY_DATA_SIZE); in page_key_alloc()
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/
Dgk20a.c85 struct page *pages[]; member
140 struct page *pages[npages]; in gk20a_instobj_cpu_map_dma() local
145 pages[0] = pfn_to_page(dma_to_phys(dev, node->handle) >> PAGE_SHIFT); in gk20a_instobj_cpu_map_dma()
147 pages[i] = pages[0] + i; in gk20a_instobj_cpu_map_dma()
149 return vmap(pages, npages, VM_MAP, pgprot_writecombine(PAGE_KERNEL)); in gk20a_instobj_cpu_map_dma()
162 return vmap(node->pages, npages, VM_MAP, in gk20a_instobj_cpu_map_iommu()
351 __free_page(node->pages[i]); in gk20a_instobj_dtor_iommu()
445 if (!(node = kzalloc(sizeof(*node) + ((sizeof(node->pages[0]) + in gk20a_instobj_ctor_iommu()
449 node->dma_addrs = (void *)(node->pages + npages); in gk20a_instobj_ctor_iommu()
462 node->pages[i] = p; in gk20a_instobj_ctor_iommu()
[all …]
/linux-4.4.14/drivers/gpu/drm/tegra/
Dgem.c179 if (bo->pages) { in tegra_bo_free()
180 drm_gem_put_pages(&bo->gem, bo->pages, true, true); in tegra_bo_free()
194 bo->pages = drm_gem_get_pages(&bo->gem); in tegra_bo_get_pages()
195 if (IS_ERR(bo->pages)) in tegra_bo_get_pages()
196 return PTR_ERR(bo->pages); in tegra_bo_get_pages()
200 bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages); in tegra_bo_get_pages()
220 drm_gem_put_pages(&bo->gem, bo->pages, false, false); in tegra_bo_get_pages()
439 if (!bo->pages) in tegra_bo_fault()
443 page = bo->pages[offset]; in tegra_bo_fault()
480 if (!bo->pages) { in tegra_drm_mmap()
[all …]
/linux-4.4.14/fs/afs/
Dwrite.c295 first, count, pv.pages); in afs_kill_pages()
299 ClearPageUptodate(pv.pages[loop]); in afs_kill_pages()
301 SetPageError(pv.pages[loop]); in afs_kill_pages()
302 end_page_writeback(pv.pages[loop]); in afs_kill_pages()
318 struct page *pages[8], *page; in afs_write_back_from_locked_page() local
342 if (n > ARRAY_SIZE(pages)) in afs_write_back_from_locked_page()
343 n = ARRAY_SIZE(pages); in afs_write_back_from_locked_page()
345 start, n, pages); in afs_write_back_from_locked_page()
349 if (pages[0]->index != start) { in afs_write_back_from_locked_page()
351 put_page(pages[--n]); in afs_write_back_from_locked_page()
[all …]
Dfile.c28 struct list_head *pages, unsigned nr_pages);
241 struct list_head *pages, unsigned nr_pages) in afs_readpages() argument
262 pages, in afs_readpages()
274 BUG_ON(!list_empty(pages)); in afs_readpages()
291 ret = read_cache_pages(mapping, pages, afs_page_filler, key); in afs_readpages()
/linux-4.4.14/drivers/infiniband/hw/mlx4/
Dmr.c92 u64 *pages; in mlx4_ib_umem_write_mtt() local
99 pages = (u64 *) __get_free_page(GFP_KERNEL); in mlx4_ib_umem_write_mtt()
100 if (!pages) in mlx4_ib_umem_write_mtt()
108 pages[i++] = sg_dma_address(sg) + in mlx4_ib_umem_write_mtt()
116 i, pages); in mlx4_ib_umem_write_mtt()
126 err = mlx4_write_mtt(dev->dev, mtt, n, i, pages); in mlx4_ib_umem_write_mtt()
129 free_page((unsigned long) pages); in mlx4_ib_umem_write_mtt()
289 mr->pages = PTR_ALIGN(mr->pages_alloc, MLX4_MR_PAGES_ALIGN); in mlx4_alloc_priv_pages()
291 mr->page_map = dma_map_single(device->dma_device, mr->pages, in mlx4_alloc_priv_pages()
309 if (mr->pages) { in mlx4_free_priv_pages()
[all …]
/linux-4.4.14/tools/perf/util/
Devlist.c1019 static size_t perf_evlist__mmap_size(unsigned long pages) in perf_evlist__mmap_size() argument
1021 if (pages == UINT_MAX) { in perf_evlist__mmap_size()
1035 pages = (max * 1024) / page_size; in perf_evlist__mmap_size()
1036 if (!is_power_of_2(pages)) in perf_evlist__mmap_size()
1037 pages = rounddown_pow_of_two(pages); in perf_evlist__mmap_size()
1038 } else if (!is_power_of_2(pages)) in perf_evlist__mmap_size()
1041 return (pages + 1) * page_size; in perf_evlist__mmap_size()
1047 unsigned long pages, val; in parse_pages_arg() local
1062 pages = PERF_ALIGN(val, page_size) / page_size; in parse_pages_arg()
1066 pages = strtoul(str, &eptr, 10); in parse_pages_arg()
[all …]
/linux-4.4.14/fs/
Dsplice.c209 buf->page = spd->pages[page_nr]; in splice_to_pipe()
271 page_cache_release(spd->pages[i]); in spd_release_page()
286 spd->pages = kmalloc(buffers * sizeof(struct page *), GFP_KERNEL); in splice_grow_spd()
289 if (spd->pages && spd->partial) in splice_grow_spd()
292 kfree(spd->pages); in splice_grow_spd()
302 kfree(spd->pages); in splice_shrink_spd()
313 struct page *pages[PIPE_DEF_BUFFERS]; in __generic_file_splice_read() local
320 .pages = pages, in __generic_file_splice_read()
339 spd.nr_pages = find_get_pages_contig(mapping, index, nr_pages, spd.pages); in __generic_file_splice_read()
380 spd.pages[spd.nr_pages++] = page; in __generic_file_splice_read()
[all …]
/linux-4.4.14/arch/metag/mm/
Dmmu-meta2.c139 unsigned int pages = DIV_ROUND_UP(mem_size, 1 << 22); in mmu_init() local
170 second_level_table = alloc_bootmem_pages(SECOND_LEVEL_ALIGN * pages); in mmu_init()
176 while (pages > 0) { in mmu_init()
199 pages--; in mmu_init()
DKconfig12 bool "Map kernel with 4MB pages"
16 Map the kernel with large pages to reduce TLB pressure.
31 This enables 8kB pages as supported by Meta 2.x and later MMUs.
37 This enables 16kB pages as supported by Meta 2.x and later MMUs.
58 pages. This option selects the largest power of two that the kernel
64 a value of 11 means that the largest free memory block is 2^10 pages.
/linux-4.4.14/fs/ext4/
Dreadpage.c134 struct list_head *pages, struct page *page, in ext4_mpage_readpages() argument
165 if (pages) { in ext4_mpage_readpages()
166 page = list_entry(pages->prev, struct page, lru); in ext4_mpage_readpages()
321 if (pages) in ext4_mpage_readpages()
324 BUG_ON(pages && !list_empty(pages)); in ext4_mpage_readpages()
/linux-4.4.14/drivers/virt/
Dfsl_hypervisor.c151 struct page **pages = NULL; in ioctl_memcpy() local
226 pages = kzalloc(num_pages * sizeof(struct page *), GFP_KERNEL); in ioctl_memcpy()
227 if (!pages) { in ioctl_memcpy()
250 0, pages, NULL); in ioctl_memcpy()
265 sg_list[0].source = page_to_phys(pages[0]) + lb_offset; in ioctl_memcpy()
269 sg_list[0].target = page_to_phys(pages[0]) + lb_offset; in ioctl_memcpy()
279 sg_list[i].source = page_to_phys(pages[i]); in ioctl_memcpy()
284 sg_list[i].target = page_to_phys(pages[i]); in ioctl_memcpy()
296 if (pages) { in ioctl_memcpy()
298 if (pages[i]) in ioctl_memcpy()
[all …]
/linux-4.4.14/kernel/power/
Dsnapshot.c434 unsigned long pages; in create_zone_bm_rtree() local
436 pages = end - start; in create_zone_bm_rtree()
445 nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK); in create_zone_bm_rtree()
804 unsigned long bits, pfn, pages; in memory_bm_next_pfn() local
808 pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn; in memory_bm_next_pfn()
809 bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK); in memory_bm_next_pfn()
1562 unsigned long saveable, size, max_size, count, highmem, pages = 0; in hibernate_preallocate_memory() local
1620 pages = preallocate_image_highmem(save_highmem); in hibernate_preallocate_memory()
1621 pages += preallocate_image_memory(saveable - pages, avail_normal); in hibernate_preallocate_memory()
1626 pages = minimum_image_size(saveable); in hibernate_preallocate_memory()
[all …]
/linux-4.4.14/drivers/char/agp/
Dgeneric.c91 mem->pages = NULL; in agp_alloc_page_array()
94 mem->pages = kmalloc(size, GFP_KERNEL | __GFP_NOWARN); in agp_alloc_page_array()
95 if (mem->pages == NULL) { in agp_alloc_page_array()
96 mem->pages = vmalloc(size); in agp_alloc_page_array()
122 if (new->pages == NULL) { in agp_create_user_memory()
148 if (new->pages == NULL) { in agp_create_memory()
193 curr->pages[i], in agp_free_memory()
198 curr->pages[i], in agp_free_memory()
275 new->pages[i] = page; in agp_allocate_memory()
1103 page_to_phys(mem->pages[i]), in agp_generic_insert_memory()
[all …]
Dintel-gtt.c97 static int intel_gtt_map_memory(struct page **pages, in intel_gtt_map_memory() argument
110 sg_set_page(sg, pages[i], PAGE_SIZE, 0); in intel_gtt_map_memory()
261 new->pages[0] = page; in alloc_agpphysmem_i8xx()
264 new->pages[1] = new->pages[0] + 1; in alloc_agpphysmem_i8xx()
265 new->pages[2] = new->pages[1] + 1; in alloc_agpphysmem_i8xx()
266 new->pages[3] = new->pages[2] + 1; in alloc_agpphysmem_i8xx()
271 new->physical = page_to_phys(new->pages[0]); in alloc_agpphysmem_i8xx()
280 i8xx_destroy_pages(curr->pages[0]); in intel_i810_free_by_type()
282 agp_bridge->driver->agp_destroy_page(curr->pages[0], in intel_i810_free_by_type()
284 agp_bridge->driver->agp_destroy_page(curr->pages[0], in intel_i810_free_by_type()
[all …]
/linux-4.4.14/arch/arm/xen/
Denlighten.c57 struct page **pages) in xen_remap_domain_gfn_array() argument
60 prot, domid, pages); in xen_remap_domain_gfn_array()
69 struct page **pages) in xen_remap_domain_gfn_range() argument
76 int nr, struct page **pages) in xen_unmap_domain_gfn_range() argument
78 return xen_xlate_unmap_gfn_range(vma, nr, pages); in xen_unmap_domain_gfn_range()
/linux-4.4.14/Documentation/
Dnommu-mmap.txt21 In the MMU case: VM regions backed by arbitrary pages; copy-on-write
25 pages.
36 In the MMU case: VM regions backed by pages read from file; changes to
61 In the MMU case: like the non-PROT_WRITE case, except that the pages in
64 the mapping's backing pages. The page is then backed by swap instead.
71 In the MMU case: VM regions backed by pages read from file; changes to
72 pages written back to file; writes to file reflected into pages backing
83 sequence by providing a contiguous sequence of pages to map. In that
93 blockdev must be able to provide a contiguous run of pages without
124 Linux man pages (ver 2.22 or later).
[all …]
/linux-4.4.14/arch/sparc/kernel/
Dpci_fire.c231 unsigned long pages, order, i; in pci_fire_msiq_alloc() local
234 pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order); in pci_fire_msiq_alloc()
235 if (pages == 0UL) { in pci_fire_msiq_alloc()
240 memset((char *)pages, 0, PAGE_SIZE << order); in pci_fire_msiq_alloc()
241 pbm->msi_queues = (void *) pages; in pci_fire_msiq_alloc()
263 unsigned long pages, order; in pci_fire_msiq_free() local
266 pages = (unsigned long) pbm->msi_queues; in pci_fire_msiq_free()
268 free_pages(pages, order); in pci_fire_msiq_free()
/linux-4.4.14/fs/btrfs/
Dextent_io.c1651 struct page *pages[16]; in __unlock_for_delalloc() local
1663 ARRAY_SIZE(pages)), pages); in __unlock_for_delalloc()
1665 if (pages[i] != locked_page) in __unlock_for_delalloc()
1666 unlock_page(pages[i]); in __unlock_for_delalloc()
1667 page_cache_release(pages[i]); in __unlock_for_delalloc()
1684 struct page *pages[16]; in lock_delalloc_pages() local
1698 nrpages, ARRAY_SIZE(pages)), pages); in lock_delalloc_pages()
1709 if (pages[i] != locked_page) { in lock_delalloc_pages()
1710 lock_page(pages[i]); in lock_delalloc_pages()
1711 if (!PageDirty(pages[i]) || in lock_delalloc_pages()
[all …]
/linux-4.4.14/fs/nilfs2/
Dpage.c270 struct page *page = pvec.pages[i], *dpage; in nilfs_copy_dirty_pages()
323 index = pvec.pages[n - 1]->index + 1; in nilfs_copy_back_pages()
326 struct page *page = pvec.pages[i], *dpage; in nilfs_copy_back_pages()
388 struct page *page = pvec.pages[i]; in nilfs_clear_dirty_pages()
533 pvec.pages); in nilfs_find_uncommitted_extent()
537 if (length > 0 && pvec.pages[0]->index > index) in nilfs_find_uncommitted_extent()
540 b = pvec.pages[0]->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); in nilfs_find_uncommitted_extent()
543 page = pvec.pages[i]; in nilfs_find_uncommitted_extent()
/linux-4.4.14/drivers/block/
Drbd.c266 struct page **pages; member
1281 static void zero_pages(struct page **pages, u64 offset, u64 end) in zero_pages() argument
1283 struct page **page = &pages[offset >> PAGE_SHIFT]; in zero_pages()
1733 zero_pages(obj_request->pages, 0, length); in rbd_img_obj_request_read_callback()
1739 zero_pages(obj_request->pages, xferred, length); in rbd_img_obj_request_read_callback()
2089 if (obj_request->pages) in rbd_obj_request_destroy()
2090 ceph_release_page_vector(obj_request->pages, in rbd_obj_request_destroy()
2313 obj_request->pages = NULL; in rbd_img_obj_end_request()
2428 obj_request->pages, length, in rbd_img_obj_request_fill()
2455 struct page **pages = NULL; in rbd_img_request_fill() local
[all …]
/linux-4.4.14/Documentation/device-mapper/
Ddm-io.txt21 The first I/O service type takes a list of memory pages as the data buffer for
50 memory pages.
68 and specify the number of pages they expect to perform I/O on concurrently.
69 Dm-io will attempt to resize its mempool to make sure enough pages are
73 dm_io_put() and specify the same number of pages that were given on the
/linux-4.4.14/include/linux/sunrpc/
Dxdr.h59 struct page ** pages; /* Array of pages */ member
193 extern void _copy_from_pages(char *p, struct page **pages, size_t pgbase,
221 extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages,
226 struct page **pages, unsigned int len);
/linux-4.4.14/drivers/misc/
Dvmw_balloon.c179 u64 pages[VMW_BALLOON_BATCH_MAX_PAGES]; member
184 return batch->pages[idx] & VMW_BALLOON_BATCH_PAGE_MASK; in vmballoon_batch_get_pa()
190 return (int)(batch->pages[idx] & VMW_BALLOON_BATCH_STATUS_MASK); in vmballoon_batch_get_status()
196 batch->pages[idx] = pa; in vmballoon_batch_set_pa()
267 struct list_head pages; member
570 list_for_each_entry_safe(page, next, &page_size->pages, lru) { in vmballoon_pop()
630 list_add(&page->lru, &page_size->pages); in vmballoon_lock_page()
667 list_add(&p->lru, &page_size->pages); in vmballoon_lock_batched_page()
706 list_add(&page->lru, &page_size->pages); in vmballoon_unlock_page()
746 list_add(&p->lru, &page_size->pages); in vmballoon_unlock_batched_page()
[all …]
/linux-4.4.14/arch/s390/kvm/
Dgaccess.c714 unsigned long *pages, unsigned long nr_pages, in guest_page_range() argument
733 rc = guest_translate(vcpu, ga, pages, asce, write); in guest_page_range()
741 *pages = kvm_s390_real_to_abs(vcpu, ga); in guest_page_range()
742 if (kvm_is_error_gpa(vcpu->kvm, *pages)) in guest_page_range()
748 pages++; in guest_page_range()
760 unsigned long *pages; in access_guest() local
771 pages = pages_array; in access_guest()
773 pages = vmalloc(nr_pages * sizeof(unsigned long)); in access_guest()
774 if (!pages) in access_guest()
779 rc = guest_page_range(vcpu, ga, pages, nr_pages, asce, write); in access_guest()
[all …]
/linux-4.4.14/drivers/gpu/drm/vmwgfx/
Dvmwgfx_buffer.c261 return viter->pages[viter->i]; in __vmw_piter_non_sg_page()
281 return page_to_phys(viter->pages[viter->i]); in __vmw_piter_phys_addr()
315 viter->pages = vsgt->pages; in vmw_piter_start()
322 viter->pages = vsgt->pages; in vmw_piter_start()
407 vsgt->pages = vmw_tt->dma_ttm.ttm.pages; in vmw_ttm_map_dma()
425 ret = sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages, in vmw_ttm_map_dma()
/linux-4.4.14/Documentation/filesystems/caching/
Dnetfs-api.txt259 or not. Note that several pages at once may be presented for marking.
261 The PG_fscache bit is set on the pages before this function would be
266 (10) A function to unmark all the pages retaining cache metadata [mandatory].
269 unbound from a cookie and that all the marks on the pages should be
271 its tracking information so that the pages don't need to be explicitly
433 Note that attempts to read or write data pages in the cache over this size may
445 And the sixth step is to store and retrieve pages in the cache. There are
592 A facility is provided to read several pages at once, as requested by the
597 struct list_head *pages,
605 (1) Any page it can retrieve data for is removed from pages and nr_pages and
[all …]

12345