Home
last modified time | relevance | path

Searched refs:pages (Results 1 – 200 of 878) sorted by relevance

12345

/linux-4.1.27/net/ceph/
Dpagevec.c18 struct page **pages; in ceph_get_direct_page_vector() local
22 pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS); in ceph_get_direct_page_vector()
23 if (!pages) in ceph_get_direct_page_vector()
29 num_pages - got, write_page, 0, pages + got); in ceph_get_direct_page_vector()
37 return pages; in ceph_get_direct_page_vector()
40 ceph_put_page_vector(pages, got, false); in ceph_get_direct_page_vector()
45 void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty) in ceph_put_page_vector() argument
51 set_page_dirty_lock(pages[i]); in ceph_put_page_vector()
52 put_page(pages[i]); in ceph_put_page_vector()
54 if (is_vmalloc_addr(pages)) in ceph_put_page_vector()
[all …]
/linux-4.1.27/mm/
Dpercpu-vm.c35 static struct page **pages; in pcpu_get_pages() local
36 size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]); in pcpu_get_pages()
40 if (!pages) in pcpu_get_pages()
41 pages = pcpu_mem_zalloc(pages_size); in pcpu_get_pages()
42 return pages; in pcpu_get_pages()
56 struct page **pages, int page_start, int page_end) in pcpu_free_pages() argument
63 struct page *page = pages[pcpu_page_idx(cpu, i)]; in pcpu_free_pages()
83 struct page **pages, int page_start, int page_end) in pcpu_alloc_pages() argument
91 struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; in pcpu_alloc_pages()
102 __free_page(pages[pcpu_page_idx(cpu, i)]); in pcpu_alloc_pages()
[all …]
Dkmemcheck.c11 int pages; in kmemcheck_alloc_shadow() local
14 pages = 1 << order; in kmemcheck_alloc_shadow()
28 for(i = 0; i < pages; ++i) in kmemcheck_alloc_shadow()
36 kmemcheck_hide_pages(page, pages); in kmemcheck_alloc_shadow()
42 int pages; in kmemcheck_free_shadow() local
48 pages = 1 << order; in kmemcheck_free_shadow()
50 kmemcheck_show_pages(page, pages); in kmemcheck_free_shadow()
54 for(i = 0; i < pages; ++i) in kmemcheck_free_shadow()
103 int pages; in kmemcheck_pagealloc_alloc() local
108 pages = 1 << order; in kmemcheck_pagealloc_alloc()
[all …]
Dgup.c418 unsigned int gup_flags, struct page **pages, in __get_user_pages() argument
428 VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); in __get_user_pages()
450 pages ? &pages[i] : NULL); in __get_user_pages()
460 i = follow_hugetlb_page(mm, vma, pages, vmas, in __get_user_pages()
495 if (pages) { in __get_user_pages()
496 pages[i] = page; in __get_user_pages()
583 struct page **pages, in __get_user_pages_locked() argument
598 if (pages) in __get_user_pages_locked()
608 ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages, in __get_user_pages_locked()
620 if (!pages) in __get_user_pages_locked()
[all …]
Dmprotect.c67 unsigned long pages = 0; in change_pte_range() local
108 pages++; in change_pte_range()
124 pages++; in change_pte_range()
131 return pages; in change_pte_range()
141 unsigned long pages = 0; in change_pmd_range() local
168 pages += HPAGE_PMD_NR; in change_pmd_range()
180 pages += this_pages; in change_pmd_range()
188 return pages; in change_pmd_range()
197 unsigned long pages = 0; in change_pud_range() local
204 pages += change_pmd_range(vma, pud, addr, next, newprot, in change_pud_range()
[all …]
Dswap_state.c258 void free_pages_and_swap_cache(struct page **pages, int nr) in free_pages_and_swap_cache() argument
260 struct page **pagep = pages; in free_pages_and_swap_cache()
390 unsigned int pages, max_pages, last_ra; in swapin_nr_pages() local
402 pages = atomic_xchg(&swapin_readahead_hits, 0) + 2; in swapin_nr_pages()
403 if (pages == 2) { in swapin_nr_pages()
410 pages = 1; in swapin_nr_pages()
414 while (roundup < pages) in swapin_nr_pages()
416 pages = roundup; in swapin_nr_pages()
419 if (pages > max_pages) in swapin_nr_pages()
420 pages = max_pages; in swapin_nr_pages()
[all …]
Dpercpu-km.c52 struct page *pages; in pcpu_create_chunk() local
59 pages = alloc_pages(GFP_KERNEL, order_base_2(nr_pages)); in pcpu_create_chunk()
60 if (!pages) { in pcpu_create_chunk()
66 pcpu_set_page_chunk(nth_page(pages, i), chunk); in pcpu_create_chunk()
68 chunk->data = pages; in pcpu_create_chunk()
69 chunk->base_addr = page_address(pages) - pcpu_group_offsets[0]; in pcpu_create_chunk()
Dprocess_vm_access.c33 static int process_vm_rw_pages(struct page **pages, in process_vm_rw_pages() argument
41 struct page *page = *pages++; in process_vm_rw_pages()
90 / sizeof(struct pages *); in process_vm_rw_single_vec()
98 int pages = min(nr_pages, max_pages_per_loop); in process_vm_rw_single_vec() local
102 pages = get_user_pages_unlocked(task, mm, pa, pages, in process_vm_rw_single_vec()
104 if (pages <= 0) in process_vm_rw_single_vec()
107 bytes = pages * PAGE_SIZE - start_offset; in process_vm_rw_single_vec()
116 nr_pages -= pages; in process_vm_rw_single_vec()
117 pa += pages * PAGE_SIZE; in process_vm_rw_single_vec()
118 while (pages) in process_vm_rw_single_vec()
[all …]
Dmincore.c173 static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec) in do_mincore() argument
189 end = min(vma->vm_end, addr + (pages << PAGE_SHIFT)); in do_mincore()
224 unsigned long pages; in SYSCALL_DEFINE3() local
236 pages = len >> PAGE_SHIFT; in SYSCALL_DEFINE3()
237 pages += (len & ~PAGE_MASK) != 0; in SYSCALL_DEFINE3()
239 if (!access_ok(VERIFY_WRITE, vec, pages)) in SYSCALL_DEFINE3()
247 while (pages) { in SYSCALL_DEFINE3()
253 retval = do_mincore(start, min(pages, PAGE_SIZE), tmp); in SYSCALL_DEFINE3()
262 pages -= retval; in SYSCALL_DEFINE3()
Dswap.c354 void put_pages_list(struct list_head *pages) in put_pages_list() argument
356 while (!list_empty(pages)) { in put_pages_list()
359 victim = list_entry(pages->prev, struct page, lru); in put_pages_list()
380 struct page **pages) in get_kernel_pages() argument
388 pages[seg] = kmap_to_page(kiov[seg].iov_base); in get_kernel_pages()
389 page_cache_get(pages[seg]); in get_kernel_pages()
407 int get_kernel_page(unsigned long start, int write, struct page **pages) in get_kernel_page() argument
414 return get_kernel_pages(&kiov, 1, write, pages); in get_kernel_page()
428 struct page *page = pvec->pages[i]; in pagevec_lru_move_fn()
443 release_pages(pvec->pages, pvec->nr, pvec->cold); in pagevec_lru_move_fn()
[all …]
Dreadahead.c62 struct list_head *pages) in read_cache_pages_invalidate_pages() argument
66 while (!list_empty(pages)) { in read_cache_pages_invalidate_pages()
67 victim = list_to_page(pages); in read_cache_pages_invalidate_pages()
83 int read_cache_pages(struct address_space *mapping, struct list_head *pages, in read_cache_pages() argument
89 while (!list_empty(pages)) { in read_cache_pages()
90 page = list_to_page(pages); in read_cache_pages()
101 read_cache_pages_invalidate_pages(mapping, pages); in read_cache_pages()
112 struct list_head *pages, unsigned nr_pages) in read_pages() argument
121 ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages); in read_pages()
123 put_pages_list(pages); in read_pages()
[all …]
Dbootmem.c57 static unsigned long __init bootmap_bytes(unsigned long pages) in bootmap_bytes() argument
59 unsigned long bytes = DIV_ROUND_UP(pages, 8); in bootmap_bytes()
68 unsigned long __init bootmem_bootmap_pages(unsigned long pages) in bootmem_bootmap_pages() argument
70 unsigned long bytes = bootmap_bytes(pages); in bootmem_bootmap_pages()
141 unsigned long __init init_bootmem(unsigned long start, unsigned long pages) in init_bootmem() argument
143 max_low_pfn = pages; in init_bootmem()
145 return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages); in init_bootmem()
175 unsigned long *map, start, end, pages, cur, count = 0; in free_all_bootmem_core() local
234 pages = bdata->node_low_pfn - bdata->node_min_pfn; in free_all_bootmem_core()
235 pages = bootmem_bootmap_pages(pages); in free_all_bootmem_core()
[all …]
Dcma.c79 unsigned long pages) in cma_bitmap_pages_to_bits() argument
81 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; in cma_bitmap_pages_to_bits()
435 bool cma_release(struct cma *cma, const struct page *pages, unsigned int count) in cma_release() argument
439 if (!cma || !pages) in cma_release()
442 pr_debug("%s(page %p)\n", __func__, (void *)pages); in cma_release()
444 pfn = page_to_pfn(pages); in cma_release()
453 trace_cma_release(pfn, pages, count); in cma_release()
DKconfig161 such as direct mapping pages cannot be migrated. So the corresponding
248 with the reduced number of transparent huge pages that could be used
250 pages enlisted as being part of memory balloon devices avoids the
261 Allows the compaction of memory for the allocation of huge pages.
271 Allows the migration of the physical location of pages of processes
273 two situations. The first is on NUMA systems to put pages nearer
275 pages as migration can relocate pages to satisfy a huge page
306 # a major rework effort. Instead, use the bounce buffer to snapshot pages
336 mergeable. When it finds pages of identical content, it replaces
350 from userspace allocation. Keeping a user from writing to low pages
[all …]
Dhuge_memory.c477 unsigned long pages; in pages_to_scan_store() local
479 err = kstrtoul(buf, 10, &pages); in pages_to_scan_store()
480 if (err || !pages || pages > UINT_MAX) in pages_to_scan_store()
483 khugepaged_pages_to_scan = pages; in pages_to_scan_store()
985 struct page **pages; in do_huge_pmd_wp_page_fallback() local
989 pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR, in do_huge_pmd_wp_page_fallback()
991 if (unlikely(!pages)) { in do_huge_pmd_wp_page_fallback()
997 pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE | in do_huge_pmd_wp_page_fallback()
1000 if (unlikely(!pages[i] || in do_huge_pmd_wp_page_fallback()
1001 mem_cgroup_try_charge(pages[i], mm, GFP_KERNEL, in do_huge_pmd_wp_page_fallback()
[all …]
DKconfig.debug18 Unmap pages from the kernel linear mapping after free_pages().
23 fill the pages with poison patterns after free_pages() and verify
27 a resume because free pages are not saved to the suspend image.
Dvmalloc.c118 unsigned long end, pgprot_t prot, struct page **pages, int *nr) in vmap_pte_range() argument
131 struct page *page = pages[*nr]; in vmap_pte_range()
144 unsigned long end, pgprot_t prot, struct page **pages, int *nr) in vmap_pmd_range() argument
154 if (vmap_pte_range(pmd, addr, next, prot, pages, nr)) in vmap_pmd_range()
161 unsigned long end, pgprot_t prot, struct page **pages, int *nr) in vmap_pud_range() argument
171 if (vmap_pmd_range(pud, addr, next, prot, pages, nr)) in vmap_pud_range()
184 pgprot_t prot, struct page **pages) in vmap_page_range_noflush() argument
196 err = vmap_pud_range(pgd, addr, next, prot, pages, &nr); in vmap_page_range_noflush()
205 pgprot_t prot, struct page **pages) in vmap_page_range() argument
209 ret = vmap_page_range_noflush(start, end, prot, pages); in vmap_page_range()
[all …]
Dfrontswap.c348 unsigned long pages = 0, pages_to_unuse = 0; in __frontswap_unuse_pages() local
354 pages = pages_to_unuse = total_pages_to_unuse; in __frontswap_unuse_pages()
356 pages = si_frontswap_pages; in __frontswap_unuse_pages()
360 if (security_vm_enough_memory_mm(current->mm, pages)) { in __frontswap_unuse_pages()
364 vm_unacct_memory(pages); in __frontswap_unuse_pages()
Dzsmalloc.c342 static int zs_zpool_shrink(void *pool, unsigned int pages, in zs_zpool_shrink() argument
1053 struct page *pages[2], int off, int size) in __zs_map_object()
1055 BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, pages)); in __zs_map_object()
1061 struct page *pages[2], int off, int size) in __zs_unmap_object()
1091 struct page *pages[2], int off, int size) in __zs_map_object()
1108 addr = kmap_atomic(pages[0]); in __zs_map_object()
1111 addr = kmap_atomic(pages[1]); in __zs_map_object()
1119 struct page *pages[2], int off, int size) in __zs_unmap_object()
1140 addr = kmap_atomic(pages[0]); in __zs_unmap_object()
1143 addr = kmap_atomic(pages[1]); in __zs_unmap_object()
[all …]
Dcma_debug.c124 int pages = val; in cma_free_write() local
127 return cma_free_mem(cma, pages); in cma_free_write()
156 int pages = val; in cma_alloc_write() local
159 return cma_alloc_mem(cma, pages); in cma_alloc_write()
Dballoon_compaction.c65 list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) { in balloon_page_dequeue()
97 if (unlikely(list_empty(&b_dev_info->pages) && in balloon_page_dequeue()
128 list_add(&page->lru, &b_dev_info->pages); in __putback_balloon_page()
Dnommu.c153 unsigned int foll_flags, struct page **pages, in __get_user_pages() argument
178 if (pages) { in __get_user_pages()
179 pages[i] = virt_to_page(start); in __get_user_pages()
180 if (pages[i]) in __get_user_pages()
181 page_cache_get(pages[i]); in __get_user_pages()
203 int write, int force, struct page **pages, in get_user_pages() argument
213 return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas, in get_user_pages()
220 int write, int force, struct page **pages, in get_user_pages_locked() argument
224 pages, NULL); in get_user_pages_locked()
230 int write, int force, struct page **pages, in __get_user_pages_unlocked() argument
[all …]
/linux-4.1.27/fs/isofs/
Dcompress.c45 struct page **pages, unsigned poffset, in zisofs_uncompress_block() argument
71 if (!pages[i]) in zisofs_uncompress_block()
73 memset(page_address(pages[i]), 0, PAGE_CACHE_SIZE); in zisofs_uncompress_block()
74 flush_dcache_page(pages[i]); in zisofs_uncompress_block()
75 SetPageUptodate(pages[i]); in zisofs_uncompress_block()
121 if (pages[curpage]) { in zisofs_uncompress_block()
122 stream.next_out = page_address(pages[curpage]) in zisofs_uncompress_block()
174 if (pages[curpage]) { in zisofs_uncompress_block()
175 flush_dcache_page(pages[curpage]); in zisofs_uncompress_block()
176 SetPageUptodate(pages[curpage]); in zisofs_uncompress_block()
[all …]
/linux-4.1.27/arch/x86/xen/
Dgrant-table.c119 struct page **pages; in xlated_setup_gnttab_pages() local
127 pages = kcalloc(nr_grant_frames, sizeof(pages[0]), GFP_KERNEL); in xlated_setup_gnttab_pages()
128 if (!pages) in xlated_setup_gnttab_pages()
133 kfree(pages); in xlated_setup_gnttab_pages()
136 rc = alloc_xenballooned_pages(nr_grant_frames, pages, 0 /* lowmem */); in xlated_setup_gnttab_pages()
140 kfree(pages); in xlated_setup_gnttab_pages()
145 pfns[i] = page_to_pfn(pages[i]); in xlated_setup_gnttab_pages()
147 vaddr = vmap(pages, nr_grant_frames, 0, PAGE_KERNEL); in xlated_setup_gnttab_pages()
151 free_xenballooned_pages(nr_grant_frames, pages); in xlated_setup_gnttab_pages()
152 kfree(pages); in xlated_setup_gnttab_pages()
[all …]
/linux-4.1.27/drivers/gpu/drm/ttm/
Dttm_page_alloc.c220 static int set_pages_array_wb(struct page **pages, int addrinarray) in set_pages_array_wb() argument
226 unmap_page_from_agp(pages[i]); in set_pages_array_wb()
231 static int set_pages_array_wc(struct page **pages, int addrinarray) in set_pages_array_wc() argument
237 map_page_into_agp(pages[i]); in set_pages_array_wc()
242 static int set_pages_array_uc(struct page **pages, int addrinarray) in set_pages_array_uc() argument
248 map_page_into_agp(pages[i]); in set_pages_array_uc()
276 static void ttm_pages_put(struct page *pages[], unsigned npages) in ttm_pages_put() argument
279 if (set_pages_array_wb(pages, npages)) in ttm_pages_put()
282 __free_page(pages[i]); in ttm_pages_put()
448 static int ttm_set_pages_caching(struct page **pages, in ttm_set_pages_caching() argument
[all …]
Dttm_tt.c53 ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*)); in ttm_tt_alloc_page_directory()
58 ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, in ttm_dma_tt_alloc_page_directory()
59 sizeof(*ttm->ttm.pages) + in ttm_dma_tt_alloc_page_directory()
62 ttm->cpu_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages); in ttm_dma_tt_alloc_page_directory()
123 drm_clflush_pages(ttm->pages, ttm->num_pages); in ttm_tt_set_caching()
126 cur_page = ttm->pages[i]; in ttm_tt_set_caching()
142 cur_page = ttm->pages[j]; in ttm_tt_set_caching()
201 if (!ttm->pages) { in ttm_tt_init()
212 drm_free_large(ttm->pages); in ttm_tt_fini()
213 ttm->pages = NULL; in ttm_tt_fini()
[all …]
Dttm_page_alloc_dma.c272 static int set_pages_array_wb(struct page **pages, int addrinarray) in set_pages_array_wb() argument
278 unmap_page_from_agp(pages[i]); in set_pages_array_wb()
283 static int set_pages_array_wc(struct page **pages, int addrinarray) in set_pages_array_wc() argument
289 map_page_into_agp(pages[i]); in set_pages_array_wc()
294 static int set_pages_array_uc(struct page **pages, int addrinarray) in set_pages_array_uc() argument
300 map_page_into_agp(pages[i]); in set_pages_array_uc()
307 struct page **pages, unsigned cpages) in ttm_set_pages_caching() argument
312 r = set_pages_array_uc(pages, cpages); in ttm_set_pages_caching()
318 r = set_pages_array_wc(pages, cpages); in ttm_set_pages_caching()
379 struct page *pages[], unsigned npages) in ttm_dma_pages_put() argument
[all …]
/linux-4.1.27/fs/ramfs/
Dfile-nommu.c69 struct page *pages; in ramfs_nommu_expand_for_mapping() local
87 pages = alloc_pages(mapping_gfp_mask(inode->i_mapping), order); in ramfs_nommu_expand_for_mapping()
88 if (!pages) in ramfs_nommu_expand_for_mapping()
95 split_page(pages, order); in ramfs_nommu_expand_for_mapping()
99 __free_page(pages + loop); in ramfs_nommu_expand_for_mapping()
103 data = page_address(pages); in ramfs_nommu_expand_for_mapping()
108 struct page *page = pages + loop; in ramfs_nommu_expand_for_mapping()
127 __free_page(pages + loop++); in ramfs_nommu_expand_for_mapping()
210 struct page **pages = NULL, **ptr, *page; in ramfs_nommu_get_unmapped_area() local
230 pages = kcalloc(lpages, sizeof(struct page *), GFP_KERNEL); in ramfs_nommu_get_unmapped_area()
[all …]
/linux-4.1.27/fs/squashfs/
Dpage_actor.c32 if (actor->next_page == actor->pages) in cache_next_page()
44 int pages, int length) in squashfs_page_actor_init() argument
51 actor->length = length ? : pages * PAGE_CACHE_SIZE; in squashfs_page_actor_init()
53 actor->pages = pages; in squashfs_page_actor_init()
73 return actor->pageaddr = actor->next_page == actor->pages ? NULL : in direct_next_page()
84 int pages, int length) in squashfs_page_actor_init_special() argument
91 actor->length = length ? : pages * PAGE_CACHE_SIZE; in squashfs_page_actor_init_special()
93 actor->pages = pages; in squashfs_page_actor_init_special()
Dfile_direct.c24 int pages, struct page **page);
37 int i, n, pages, missing_pages, bytes, res = -ENOMEM; in squashfs_readpage_block() local
45 pages = end_index - start_index + 1; in squashfs_readpage_block()
47 page = kmalloc_array(pages, sizeof(void *), GFP_KERNEL); in squashfs_readpage_block()
55 actor = squashfs_page_actor_init_special(page, pages, 0); in squashfs_readpage_block()
60 for (missing_pages = 0, i = 0, n = start_index; i < pages; i++, n++) { in squashfs_readpage_block()
85 res = squashfs_read_cache(target_page, block, bsize, pages, in squashfs_readpage_block()
101 pageaddr = kmap_atomic(page[pages - 1]); in squashfs_readpage_block()
107 for (i = 0; i < pages; i++) { in squashfs_readpage_block()
124 for (i = 0; i < pages; i++) { in squashfs_readpage_block()
[all …]
Dpage_actor.h14 int pages; member
20 int pages, int length) in squashfs_page_actor_init() argument
27 actor->length = length ? : pages * PAGE_CACHE_SIZE; in squashfs_page_actor_init()
29 actor->pages = pages; in squashfs_page_actor_init()
42 return actor->next_page == actor->pages ? NULL : in squashfs_next_page()
60 int pages; member
Dcache.c219 for (j = 0; j < cache->pages; j++) in squashfs_cache_delete()
258 cache->pages = block_size >> PAGE_CACHE_SHIFT; in squashfs_cache_init()
259 cache->pages = cache->pages ? cache->pages : 1; in squashfs_cache_init()
271 entry->data = kcalloc(cache->pages, sizeof(void *), GFP_KERNEL); in squashfs_cache_init()
277 for (j = 0; j < cache->pages; j++) { in squashfs_cache_init()
286 cache->pages, 0); in squashfs_cache_init()
418 int pages = (length + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; in squashfs_read_table() local
427 data = kcalloc(pages, sizeof(void *), GFP_KERNEL); in squashfs_read_table()
433 actor = squashfs_page_actor_init(data, pages, length); in squashfs_read_table()
439 for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE) in squashfs_read_table()
/linux-4.1.27/fs/proc/
Dmeminfo.c35 unsigned long pages[NR_LRU_LISTS]; in meminfo_proc_show() local
55 pages[lru] = global_page_state(NR_LRU_BASE + lru); in meminfo_proc_show()
74 pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE]; in meminfo_proc_show()
155 K(pages[LRU_ACTIVE_ANON] + pages[LRU_ACTIVE_FILE]), in meminfo_proc_show()
156 K(pages[LRU_INACTIVE_ANON] + pages[LRU_INACTIVE_FILE]), in meminfo_proc_show()
157 K(pages[LRU_ACTIVE_ANON]), in meminfo_proc_show()
158 K(pages[LRU_INACTIVE_ANON]), in meminfo_proc_show()
159 K(pages[LRU_ACTIVE_FILE]), in meminfo_proc_show()
160 K(pages[LRU_INACTIVE_FILE]), in meminfo_proc_show()
161 K(pages[LRU_UNEVICTABLE]), in meminfo_proc_show()
/linux-4.1.27/arch/mips/mm/
Dgup.c38 int write, struct page **pages, int *nr) in gup_pte_range() argument
54 pages[*nr] = page; in gup_pte_range()
72 int write, struct page **pages, int *nr) in gup_huge_pmd() argument
89 pages[*nr] = page; in gup_huge_pmd()
102 int write, struct page **pages, int *nr) in gup_pmd_range() argument
126 if (!gup_huge_pmd(pmd, addr, next, write, pages,nr)) in gup_pmd_range()
129 if (!gup_pte_range(pmd, addr, next, write, pages,nr)) in gup_pmd_range()
138 int write, struct page **pages, int *nr) in gup_huge_pud() argument
155 pages[*nr] = page; in gup_huge_pud()
168 int write, struct page **pages, int *nr) in gup_pud_range() argument
[all …]
/linux-4.1.27/net/rds/
Dinfo.c65 struct page **pages; member
122 iter->addr = kmap_atomic(*iter->pages); in rds_info_copy()
127 "bytes %lu\n", *iter->pages, iter->addr, in rds_info_copy()
140 iter->pages++; in rds_info_copy()
167 struct page **pages = NULL; in rds_info_getsockopt() local
191 pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); in rds_info_getsockopt()
192 if (!pages) { in rds_info_getsockopt()
196 ret = get_user_pages_fast(start, nr_pages, 1, pages); in rds_info_getsockopt()
215 iter.pages = pages; in rds_info_getsockopt()
238 for (i = 0; pages && i < nr_pages; i++) in rds_info_getsockopt()
[all …]
Drdma.c158 struct page **pages, int write) in rds_pin_pages() argument
162 ret = get_user_pages_fast(user_addr, nr_pages, write, pages); in rds_pin_pages()
166 put_page(pages[ret]); in rds_pin_pages()
178 struct page **pages = NULL; in __rds_rdma_map() local
207 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); in __rds_rdma_map()
208 if (!pages) { in __rds_rdma_map()
241 ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1); in __rds_rdma_map()
256 sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0); in __rds_rdma_map()
309 kfree(pages); in __rds_rdma_map()
553 struct page **pages = NULL; in rds_cmsg_rdma_args() local
[all …]
/linux-4.1.27/drivers/gpu/drm/udl/
Dudl_gem.c113 if (!obj->pages) in udl_gem_fault()
116 page = obj->pages[page_offset]; in udl_gem_fault()
132 struct page **pages; in udl_gem_get_pages() local
134 if (obj->pages) in udl_gem_get_pages()
137 pages = drm_gem_get_pages(&obj->base); in udl_gem_get_pages()
138 if (IS_ERR(pages)) in udl_gem_get_pages()
139 return PTR_ERR(pages); in udl_gem_get_pages()
141 obj->pages = pages; in udl_gem_get_pages()
149 drm_free_large(obj->pages); in udl_gem_put_pages()
150 obj->pages = NULL; in udl_gem_put_pages()
[all …]
Dudl_dmabuf.c92 if (!obj->pages) { in udl_map_dma_buf()
101 obj->sg = drm_prime_pages_to_sg(obj->pages, page_count); in udl_map_dma_buf()
231 obj->pages = drm_malloc_ab(npages, sizeof(struct page *)); in udl_prime_create()
232 if (obj->pages == NULL) { in udl_prime_create()
237 drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages); in udl_prime_create()
/linux-4.1.27/arch/s390/mm/
Dgup.c21 unsigned long end, int write, struct page **pages, int *nr) in gup_pte_range() argument
43 pages[*nr] = page; in gup_pte_range()
52 unsigned long end, int write, struct page **pages, int *nr) in gup_huge_pmd() argument
70 pages[*nr] = page; in gup_huge_pmd()
103 unsigned long end, int write, struct page **pages, int *nr) in gup_pmd_range() argument
129 write, pages, nr)) in gup_pmd_range()
132 write, pages, nr)) in gup_pmd_range()
140 unsigned long end, int write, struct page **pages, int *nr) in gup_pud_range() argument
155 if (!gup_pmd_range(pudp, pud, addr, next, write, pages, nr)) in gup_pud_range()
167 struct page **pages) in __get_user_pages_fast() argument
[all …]
/linux-4.1.27/arch/tile/kernel/
Dvdso.c52 static struct page **vdso_setup(void *vdso_kbase, unsigned int pages) in vdso_setup() argument
57 pagelist = kzalloc(sizeof(struct page *) * (pages + 1), GFP_KERNEL); in vdso_setup()
59 for (i = 0; i < pages - 1; i++) { in vdso_setup()
64 pagelist[pages - 1] = virt_to_page(vdso_data); in vdso_setup()
65 pagelist[pages] = NULL; in vdso_setup()
127 unsigned long pages; in setup_vdso_pages() local
138 pages = vdso_pages; in setup_vdso_pages()
142 pages = vdso32_pages; in setup_vdso_pages()
150 if (pages == 0) in setup_vdso_pages()
154 (pages << PAGE_SHIFT) + in setup_vdso_pages()
[all …]
Dmodule.c39 struct page **pages; in module_alloc() local
46 pages = kmalloc(npages * sizeof(struct page *), GFP_KERNEL); in module_alloc()
47 if (pages == NULL) in module_alloc()
50 pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); in module_alloc()
51 if (!pages[i]) in module_alloc()
59 area->pages = pages; in module_alloc()
61 if (map_vm_area(area, prot_rwx, pages)) { in module_alloc()
70 __free_page(pages[i]); in module_alloc()
71 kfree(pages); in module_alloc()
/linux-4.1.27/arch/sparc/mm/
Dgup.c21 unsigned long end, int write, struct page **pages, int *nr) in gup_pte_range() argument
62 pages[*nr] = page; in gup_pte_range()
70 unsigned long end, int write, struct page **pages, in gup_huge_pmd() argument
88 pages[*nr] = page; in gup_huge_pmd()
119 int write, struct page **pages, int *nr) in gup_pmd_range() argument
133 write, pages, nr)) in gup_pmd_range()
136 pages, nr)) in gup_pmd_range()
144 int write, struct page **pages, int *nr) in gup_pud_range() argument
156 if (!gup_pmd_range(pud, addr, next, write, pages, nr)) in gup_pud_range()
164 struct page **pages) in __get_user_pages_fast() argument
[all …]
/linux-4.1.27/Documentation/vm/
Dhugetlbpage.txt22 persistent hugetlb pages in the kernel's huge page pool. It also displays
23 information about the number of free, reserved and surplus huge pages and the
38 HugePages_Total is the size of the pool of huge pages.
39 HugePages_Free is the number of huge pages in the pool that are not yet
41 HugePages_Rsvd is short for "reserved," and is the number of huge pages for
43 but no allocation has yet been made. Reserved huge pages
45 huge page from the pool of huge pages at fault time.
46 HugePages_Surp is short for "surplus," and is the number of huge pages in
48 maximum number of surplus huge pages is controlled by
55 pages in the kernel's huge page pool. "Persistent" huge pages will be
[all …]
Dunevictable-lru.txt15 - vmscan's handling of unevictable pages.
17 (*) mlock()'d pages.
24 - Migrating mlocked pages.
25 - Compacting mlocked pages.
39 pages.
54 pages and to hide these pages from vmscan. This mechanism is based on a patch
60 main memory will have over 32 million 4k pages in a single zone. When a large
61 fraction of these pages are not evictable for any reason [see below], vmscan
63 of pages that are evictable. This can result in a situation where all CPUs are
67 The unevictable list addresses the following classes of unevictable pages:
[all …]
Dksm.txt9 have been registered with it, looking for pages of identical content which
18 KSM only merges anonymous (private) pages, never pagecache (file) pages.
19 KSM's merged pages were originally locked into kernel memory, but can now
20 be swapped out just like other user pages (but sharing is broken when they
28 that advice and restore unshared pages: whereupon KSM unmerges whatever
38 cannot contain any pages which KSM could actually merge; even if
53 pages_to_scan - how many present pages to scan before ksmd goes to sleep
61 merge_across_nodes - specifies if pages from different numa nodes can be merged.
62 When set to 0, ksm merges only pages which physically
64 lower latency to access of shared pages. Systems with more
[all …]
Dpage_migration4 Page migration allows the moving of the physical location of pages between
7 system rearranges the physical location of those pages.
10 by moving pages near to the processor where the process accessing that memory
14 pages are located through the MF_MOVE and MF_MOVE_ALL options while setting
15 a new memory policy via mbind(). The pages of process can also be relocated
17 migrate_pages function call takes two sets of nodes and moves pages of a
24 pages of a process are located. See also the numa_maps documentation in the
29 administrator may detect the situation and move the pages of the process
32 through user space processes that move pages. A special function call
33 "move_pages" allows the moving of individual pages within a process.
[all …]
Dbalance19 mapped pages from the direct mapped pool, instead of falling back on
21 or not). A similar argument applies to highmem and direct mapped pages.
22 OTOH, if there is a lot of free dma pages, it is preferable to satisfy
27 _total_ number of free pages fell below 1/64 th of total memory. With the
36 at init time how many free pages we should aim for while balancing any
49 fancy, we could assign different weights to free pages in different
53 it becomes less significant to consider the free dma pages while
62 fall back into regular zone. This also makes sure that HIGHMEM pages
71 highmem pages. kswapd looks at the zone_wake_kswapd field in the zone
80 the number of pages falls below watermark[WMARK_MIN], the hysteric field
[all …]
Dpagemap.txt26 swap. Unmapped pages return a null PFN. This allows determining
27 precisely which pages are mapped (or in swap) and comparing mapped
28 pages between processes.
81 An order N block has 2^N physically contiguous pages, with the BUDDY flag
86 A compound page with order N consists of 2^N physically contiguous pages.
89 pages are hugeTLB pages (Documentation/vm/hugetlbpage.txt), the SLUB etc.
91 only huge/giga pages are made visible to end users.
102 identical memory pages dynamically shared between one or more processes
105 contiguous pages which construct transparent hugepages
126 eg. ramfs pages, shmctl(SHM_LOCK) and mlock() memory segments
[all …]
Dzswap.txt3 Zswap is a lightweight compressed cache for swap pages. It takes pages that are
25 Zswap evicts pages from compressed cache on an LRU basis to the backing swap
34 Zswap receives pages for compression through the Frontswap API and is able to
35 evict pages from its own compressed pool on an LRU basis and write them back to
42 pages are freed. The pool is not preallocated.
68 of pages stored, and various counters for the reasons pages are rejected.
Dtranshuge.txt8 using huge pages for the backing of virtual memory with huge pages
40 working on the regular pages and their respective regular pmd/pte
44 regular pages should be gracefully allocated instead and mixed in
50 backed by regular pages should be relocated on hugepages
55 to avoid unmovable pages to fragment all the memory but such a tweak
108 to never try to defrag memory and simply fallback to regular pages
111 we use hugepages later instead of regular pages. This isn't always
139 You can also control how many pages khugepaged should scan at each
154 The khugepaged progress can be seen in the number of pages collapsed:
162 max_ptes_none specifies how many extra small pages (that are
[all …]
Dhwpoison.txt11 * High level machine check handler. Handles pages reported by the
15 * This focusses on pages detected as corrupted in the background.
22 * Handles page cache pages in various states. The tricky part
38 pages.
68 Note some pages are always handled as late kill.
112 some early filtering to avoid corrupted unintended pages in test suites.
127 Only handle memory failures to pages associated with the file system defined
133 Limit injection to pages owned by memgroup. Specified by inode number
146 page-types -p `pidof usemem` --hwpoison # poison its pages
151 When specified, only poison pages if ((page_flags & mask) == value).
[all …]
Dzsmalloc.txt8 (0-order) pages, it would suffer from very high fragmentation --
12 To overcome these issues, zsmalloc allocates a bunch of 0-order pages
14 pages act as a single higher-order page i.e. an object can span 0-order
15 page boundaries. The code refers to these linked pages as a single entity
58 pages_used: the number of pages allocated for the class
59 pages_per_zspage: the number of 0-order pages to make a zspage
Dfrontswap.txt1 Frontswap provides a "transcendent memory" interface for swap pages.
3 swapped pages are saved in RAM (or a RAM-like device) instead of a swap disk.
23 An "init" prepares the device to receive frontswap pages associated
29 from transcendent memory and an "invalidate_area" will remove ALL pages
45 store frontswap pages to more completely manage its memory usage.
69 providing a clean, dynamic interface to read and write swap pages to
73 useful for write-balancing for some RAM-like devices). Swap pages (and
74 evicted page-cache pages) are a great use for this kind of slower-than-RAM-
77 and write -- and indirectly "name" -- the pages.
83 In the single kernel case, aka "zcache", pages are compressed and
[all …]
Dcleancache.txt8 pages that the kernel's pageframe replacement algorithm (PFRA) would like
36 Thus, as its name implies, cleancache is not suitable for dirty pages.
37 Cleancache has complete discretion over what pages to preserve and what
38 pages to discard and when.
48 an "invalidate_inode" will invalidate all pages associated with the specified
50 all pages in all files specified by the given pool id and also surrender
58 same UUID will receive the same pool id, thus allowing the pages to
102 effectiveness of the pagecache. Clean pagecache pages are
104 addressable to the kernel); fetching those pages later avoids "refaults"
113 balancing for some RAM-like devices). Evicted page-cache pages (and
[all …]
Dpage_owner.txt9 and order of pages is stored into certain storage for each page.
10 When we need to know about status of all pages, we can get and analyze
54 memory system, so, until initialization, many pages can be allocated and
56 pages are investigated and marked as allocated in initialization phase.
59 more accurately. On 2GB memory x86-64 VM box, 13343 early allocated pages
/linux-4.1.27/arch/m68k/mm/
Dsun3kmap.c49 unsigned long type, int pages) in do_pmeg_mapin() argument
55 while(pages) { in do_pmeg_mapin()
59 pages--; in do_pmeg_mapin()
68 int pages; in sun3_ioremap() local
87 pages = size / PAGE_SIZE; in sun3_ioremap()
91 while(pages) { in sun3_ioremap()
95 if(seg_pages > pages) in sun3_ioremap()
96 seg_pages = pages; in sun3_ioremap()
100 pages -= seg_pages; in sun3_ioremap()
/linux-4.1.27/arch/x86/mm/
Dgup.c72 unsigned long end, int write, struct page **pages, int *nr) in gup_pte_range() argument
100 pages[*nr] = page; in gup_pte_range()
118 unsigned long end, int write, struct page **pages, int *nr) in gup_huge_pmd() argument
139 pages[*nr] = page; in gup_huge_pmd()
152 int write, struct page **pages, int *nr) in gup_pmd_range() argument
183 if (!gup_huge_pmd(pmd, addr, next, write, pages, nr)) in gup_pmd_range()
186 if (!gup_pte_range(pmd, addr, next, write, pages, nr)) in gup_pmd_range()
195 unsigned long end, int write, struct page **pages, int *nr) in gup_huge_pud() argument
216 pages[*nr] = page; in gup_huge_pud()
229 int write, struct page **pages, int *nr) in gup_pud_range() argument
[all …]
Dpageattr.c41 struct page **pages; member
59 void update_page_count(int level, unsigned long pages) in update_page_count() argument
63 direct_pages_count[level] += pages; in update_page_count()
209 int in_flags, struct page **pages) in cpa_flush_array() argument
232 addr = (unsigned long)page_address(pages[i]); in cpa_flush_array()
1131 struct page *page = cpa->pages[cpa->curpage]; in __change_page_attr()
1248 struct page *page = cpa->pages[cpa->curpage]; in cpa_process_alias()
1341 struct page **pages) in change_page_attr_set_clr() argument
1392 cpa.pages = pages; in change_page_attr_set_clr()
1429 cpa.flags, pages); in change_page_attr_set_clr()
[all …]
Dinit_64.c405 unsigned long pages = 0, next; in phys_pte_init() local
429 pages++; in phys_pte_init()
436 pages++; in phys_pte_init()
441 update_page_count(PG_LEVEL_4K, pages); in phys_pte_init()
450 unsigned long pages = 0, next; in phys_pmd_init() local
492 pages++; in phys_pmd_init()
500 pages++; in phys_pmd_init()
517 update_page_count(PG_LEVEL_2M, pages); in phys_pmd_init()
525 unsigned long pages = 0, next; in phys_pud_init() local
565 pages++; in phys_pud_init()
[all …]
/linux-4.1.27/drivers/gpu/drm/
Ddrm_scatter.c55 for (i = 0; i < entry->pages; i++) { in drm_sg_cleanup()
87 unsigned long pages, i, j; in drm_legacy_sg_alloc() local
104 pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; in drm_legacy_sg_alloc()
105 DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages); in drm_legacy_sg_alloc()
107 entry->pages = pages; in drm_legacy_sg_alloc()
108 entry->pagelist = kcalloc(pages, sizeof(*entry->pagelist), GFP_KERNEL); in drm_legacy_sg_alloc()
114 entry->busaddr = kcalloc(pages, sizeof(*entry->busaddr), GFP_KERNEL); in drm_legacy_sg_alloc()
121 entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT); in drm_legacy_sg_alloc()
132 memset(entry->virtual, 0, pages << PAGE_SHIFT); in drm_legacy_sg_alloc()
139 for (i = (unsigned long)entry->virtual, j = 0; j < pages; in drm_legacy_sg_alloc()
[all …]
Dati_pcigart.c62 unsigned long pages; in drm_ati_pcigart_cleanup() local
75 pages = (entry->pages <= max_pages) in drm_ati_pcigart_cleanup()
76 ? entry->pages : max_pages; in drm_ati_pcigart_cleanup()
78 for (i = 0; i < pages; i++) { in drm_ati_pcigart_cleanup()
103 unsigned long pages; in drm_ati_pcigart_init() local
144 pages = (entry->pages <= max_real_pages) in drm_ati_pcigart_init()
145 ? entry->pages : max_real_pages; in drm_ati_pcigart_init()
154 for (i = 0; i < pages; i++) { in drm_ati_pcigart_init()
Ddrm_cache.c58 static void drm_cache_flush_clflush(struct page *pages[], in drm_cache_flush_clflush() argument
65 drm_clflush_page(*pages++); in drm_cache_flush_clflush()
71 drm_clflush_pages(struct page *pages[], unsigned long num_pages) in drm_clflush_pages() argument
76 drm_cache_flush_clflush(pages, num_pages); in drm_clflush_pages()
86 struct page *page = pages[i]; in drm_clflush_pages()
Ddrm_agpsupport.c203 unsigned long pages; in drm_agp_alloc() local
211 pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; in drm_agp_alloc()
213 if (!(memory = agp_allocate_memory(dev->agp->bridge, pages, type))) { in drm_agp_alloc()
221 entry->pages = pages; in drm_agp_alloc()
371 drm_free_agp(entry->memory, entry->pages); in drm_agp_free()
451 drm_free_agp(entry->memory, entry->pages); in drm_agp_clear()
472 struct page **pages, in drm_agp_bind_pages() argument
491 mem->pages[i] = pages[i]; in drm_agp_bind_pages()
Ddrm_gem.c465 struct page *p, **pages; in drm_gem_get_pages() local
479 pages = drm_malloc_ab(npages, sizeof(struct page *)); in drm_gem_get_pages()
480 if (pages == NULL) in drm_gem_get_pages()
487 pages[i] = p; in drm_gem_get_pages()
498 return pages; in drm_gem_get_pages()
502 page_cache_release(pages[i]); in drm_gem_get_pages()
504 drm_free_large(pages); in drm_gem_get_pages()
516 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, in drm_gem_put_pages() argument
531 set_page_dirty(pages[i]); in drm_gem_put_pages()
534 mark_page_accessed(pages[i]); in drm_gem_put_pages()
[all …]
Ddrm_vma_manager.c132 unsigned long pages) in drm_vma_offset_lookup() argument
137 node = drm_vma_offset_lookup_locked(mgr, start, pages); in drm_vma_offset_lookup()
159 unsigned long pages) in drm_vma_offset_lookup_locked() argument
184 if (offset < start + pages) in drm_vma_offset_lookup_locked()
240 struct drm_vma_offset_node *node, unsigned long pages) in drm_vma_offset_add() argument
252 pages, 0, DRM_MM_SEARCH_DEFAULT); in drm_vma_offset_add()
Ddrm_memory.c71 && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >= in agp_remap()
87 phys_page_map = (agpmem->memory->pages + (offset - agpmem->bound) / PAGE_SIZE); in agp_remap()
97 void drm_free_agp(struct agp_memory * handle, int pages) in drm_free_agp() argument
/linux-4.1.27/sound/core/
Dsgbuf.c47 for (i = 0; i < sgbuf->pages; i++) { in snd_free_sgbuf_pages()
71 unsigned int i, pages, chunk, maxpages; in snd_malloc_sgbuf_pages() local
82 pages = snd_sgbuf_aligned_pages(size); in snd_malloc_sgbuf_pages()
83 sgbuf->tblsize = sgbuf_align_table(pages); in snd_malloc_sgbuf_pages()
95 while (pages > 0) { in snd_malloc_sgbuf_pages()
96 chunk = pages; in snd_malloc_sgbuf_pages()
103 if (!sgbuf->pages) in snd_malloc_sgbuf_pages()
107 size = sgbuf->pages * PAGE_SIZE; in snd_malloc_sgbuf_pages()
121 sgbuf->pages += chunk; in snd_malloc_sgbuf_pages()
122 pages -= chunk; in snd_malloc_sgbuf_pages()
[all …]
/linux-4.1.27/arch/sh/mm/
Dgup.c75 unsigned long end, int write, struct page **pages, int *nr) in gup_pte_range() argument
110 pages[*nr] = page; in gup_pte_range()
120 int write, struct page **pages, int *nr) in gup_pmd_range() argument
132 if (!gup_pte_range(pmd, addr, next, write, pages, nr)) in gup_pmd_range()
140 int write, struct page **pages, int *nr) in gup_pud_range() argument
152 if (!gup_pmd_range(pud, addr, next, write, pages, nr)) in gup_pud_range()
164 struct page **pages) in __get_user_pages_fast() argument
193 if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) in __get_user_pages_fast()
218 struct page **pages) in get_user_pages_fast() argument
242 if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) in get_user_pages_fast()
[all …]
/linux-4.1.27/drivers/net/ethernet/amd/xgbe/
Dxgbe-desc.c141 if (ring->rx_hdr_pa.pages) { in xgbe_free_ring()
144 put_page(ring->rx_hdr_pa.pages); in xgbe_free_ring()
146 ring->rx_hdr_pa.pages = NULL; in xgbe_free_ring()
152 if (ring->rx_buf_pa.pages) { in xgbe_free_ring()
155 put_page(ring->rx_buf_pa.pages); in xgbe_free_ring()
157 ring->rx_buf_pa.pages = NULL; in xgbe_free_ring()
261 struct page *pages = NULL; in xgbe_alloc_pages() local
268 pages = alloc_pages(gfp, order); in xgbe_alloc_pages()
269 if (pages) in xgbe_alloc_pages()
274 if (!pages) in xgbe_alloc_pages()
[all …]
/linux-4.1.27/drivers/gpu/drm/omapdrm/
Domap_gem_dmabuf.c85 struct page **pages; in omap_gem_dmabuf_begin_cpu_access() local
93 return omap_gem_get_pages(obj, &pages, true); in omap_gem_dmabuf_begin_cpu_access()
108 struct page **pages; in omap_gem_dmabuf_kmap_atomic() local
109 omap_gem_get_pages(obj, &pages, false); in omap_gem_dmabuf_kmap_atomic()
111 return kmap_atomic(pages[page_num]); in omap_gem_dmabuf_kmap_atomic()
124 struct page **pages; in omap_gem_dmabuf_kmap() local
125 omap_gem_get_pages(obj, &pages, false); in omap_gem_dmabuf_kmap()
127 return kmap(pages[page_num]); in omap_gem_dmabuf_kmap()
134 struct page **pages; in omap_gem_dmabuf_kunmap() local
135 omap_gem_get_pages(obj, &pages, false); in omap_gem_dmabuf_kunmap()
[all …]
Domap_gem.c30 void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
88 struct page **pages; member
122 static int get_pages(struct drm_gem_object *obj, struct page ***pages);
229 struct page **pages; in omap_gem_attach_pages() local
234 WARN_ON(omap_obj->pages); in omap_gem_attach_pages()
236 pages = drm_gem_get_pages(obj); in omap_gem_attach_pages()
237 if (IS_ERR(pages)) { in omap_gem_attach_pages()
238 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages)); in omap_gem_attach_pages()
239 return PTR_ERR(pages); in omap_gem_attach_pages()
253 addrs[i] = dma_map_page(dev->dev, pages[i], in omap_gem_attach_pages()
[all …]
DTODO3 accessing the pages via a GART, so maybe we need some other threshold
4 to put a cap on the # of pages that can be pin'd.
5 . Use mm_shrinker to trigger unpinning pages.
8 . GEM/shmem backed pages can have existing mappings (kernel linear map,
/linux-4.1.27/drivers/gpu/drm/exynos/
Dexynos_drm_buf.c60 buf->pages = drm_calloc_large(nr_pages, sizeof(struct page *)); in lowlevel_buffer_allocate()
61 if (!buf->pages) { in lowlevel_buffer_allocate()
78 buf->pages[i] = phys_to_page(start_addr); in lowlevel_buffer_allocate()
84 buf->pages = dma_alloc_attrs(dev->dev, buf->size, in lowlevel_buffer_allocate()
87 if (!buf->pages) { in lowlevel_buffer_allocate()
93 buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages); in lowlevel_buffer_allocate()
107 dma_free_attrs(dev->dev, buf->size, buf->pages, in lowlevel_buffer_allocate()
112 drm_free_large(buf->pages); in lowlevel_buffer_allocate()
137 drm_free_large(buf->pages); in lowlevel_buffer_deallocate()
139 dma_free_attrs(dev->dev, buf->size, buf->pages, in lowlevel_buffer_deallocate()
Dexynos_drm_gem.h45 struct page **pages; member
164 struct page **pages,
168 void exynos_gem_put_pages_to_userptr(struct page **pages,
Dexynos_drm_gem.c344 ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages, in exynos_drm_gem_mmap_buffer()
420 struct page **pages, in exynos_gem_get_pages_from_userptr() argument
435 pages[i] = pfn_to_page(pfn); in exynos_gem_get_pages_from_userptr()
447 npages, 1, 1, pages, NULL); in exynos_gem_get_pages_from_userptr()
452 put_page(pages[--get_npages]); in exynos_gem_get_pages_from_userptr()
459 void exynos_gem_put_pages_to_userptr(struct page **pages, in exynos_gem_put_pages_to_userptr() argument
467 set_page_dirty_lock(pages[i]); in exynos_gem_put_pages_to_userptr()
473 put_page(pages[i]); in exynos_gem_put_pages_to_userptr()
/linux-4.1.27/drivers/staging/lustre/lustre/llite/
Drw26.c189 size_t size, struct page ***pages, in ll_get_user_pages() argument
196 *pages = NULL; in ll_get_user_pages()
203 OBD_ALLOC_LARGE(*pages, *max_pages * sizeof(**pages)); in ll_get_user_pages()
204 if (*pages) { in ll_get_user_pages()
206 (rw == READ), *pages); in ll_get_user_pages()
208 OBD_FREE_LARGE(*pages, *max_pages * sizeof(**pages)); in ll_get_user_pages()
216 static void ll_free_user_pages(struct page **pages, int npages, int do_dirty) in ll_free_user_pages() argument
222 set_page_dirty_lock(pages[i]); in ll_free_user_pages()
223 page_cache_release(pages[i]); in ll_free_user_pages()
225 kvfree(pages); in ll_free_user_pages()
[all …]
/linux-4.1.27/include/trace/events/
Dtlb.h38 TP_PROTO(int reason, unsigned long pages),
39 TP_ARGS(reason, pages),
45 __field(unsigned long, pages)
50 __entry->pages = pages;
54 __entry->pages,
/linux-4.1.27/arch/mips/jazz/
Djazzdma.c94 int first, last, pages, frame, i; in vdma_alloc() local
115 pages = VDMA_PAGE(paddr + size) - VDMA_PAGE(paddr) + 1; in vdma_alloc()
120 if (first + pages > VDMA_PGTBL_ENTRIES) { /* nothing free */ in vdma_alloc()
127 && last - first < pages) in vdma_alloc()
130 if (last - first == pages) in vdma_alloc()
154 pages, laddr); in vdma_alloc()
214 int first, pages; in vdma_remap() local
231 pages = (((paddr & (VDMA_PAGESIZE - 1)) + size) >> 12) + 1; in vdma_remap()
234 printk("vdma_remap: first=%x, pages=%x\n", first, pages); in vdma_remap()
235 if (first + pages > VDMA_PGTBL_ENTRIES) { in vdma_remap()
[all …]
/linux-4.1.27/drivers/block/xen-blkback/
Dblkback.c272 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; in free_persistent_gnts() local
278 unmap_data.pages = pages; in free_persistent_gnts()
291 pages[segs_to_unmap] = persistent_gnt->page; in free_persistent_gnts()
299 put_free_pages(blkif, pages, segs_to_unmap); in free_persistent_gnts()
313 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; in xen_blkbk_unmap_purged_grants() local
319 unmap_data.pages = pages; in xen_blkbk_unmap_purged_grants()
334 pages[segs_to_unmap] = persistent_gnt->page; in xen_blkbk_unmap_purged_grants()
339 put_free_pages(blkif, pages, segs_to_unmap); in xen_blkbk_unmap_purged_grants()
347 put_free_pages(blkif, pages, segs_to_unmap); in xen_blkbk_unmap_purged_grants()
670 struct grant_page **pages, in xen_blkbk_unmap_prepare() argument
[all …]
/linux-4.1.27/drivers/gpu/drm/gma500/
Dgtt.c89 struct page **pages; in psb_gtt_insert() local
92 if (r->pages == NULL) { in psb_gtt_insert()
100 pages = r->pages; in psb_gtt_insert()
104 set_pages_array_wc(pages, r->npage); in psb_gtt_insert()
109 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), in psb_gtt_insert()
114 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), in psb_gtt_insert()
149 set_pages_array_wb(r->pages, r->npage); in psb_gtt_remove()
183 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), in psb_gtt_roll()
188 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), in psb_gtt_roll()
205 struct page **pages; in psb_gtt_attach_pages() local
[all …]
/linux-4.1.27/drivers/media/v4l2-core/
Dvideobuf2-dma-sg.c40 struct page **pages; member
68 struct page *pages; in vb2_dma_sg_alloc_compacted() local
77 pages = NULL; in vb2_dma_sg_alloc_compacted()
78 while (!pages) { in vb2_dma_sg_alloc_compacted()
79 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO | in vb2_dma_sg_alloc_compacted()
81 if (pages) in vb2_dma_sg_alloc_compacted()
86 __free_page(buf->pages[last_page]); in vb2_dma_sg_alloc_compacted()
92 split_page(pages, order); in vb2_dma_sg_alloc_compacted()
94 buf->pages[last_page++] = &pages[i]; in vb2_dma_sg_alloc_compacted()
128 buf->pages = kzalloc(buf->num_pages * sizeof(struct page *), in vb2_dma_sg_alloc()
[all …]
Dvideobuf-dma-sg.c95 static struct scatterlist *videobuf_pages_to_sg(struct page **pages, in videobuf_pages_to_sg() argument
101 if (NULL == pages[0]) in videobuf_pages_to_sg()
108 if (PageHighMem(pages[0])) in videobuf_pages_to_sg()
111 sg_set_page(&sglist[0], pages[0], in videobuf_pages_to_sg()
115 if (NULL == pages[i]) in videobuf_pages_to_sg()
117 if (PageHighMem(pages[i])) in videobuf_pages_to_sg()
119 sg_set_page(&sglist[i], pages[i], min_t(size_t, PAGE_SIZE, size), 0); in videobuf_pages_to_sg()
177 dma->pages = kmalloc(dma->nr_pages * sizeof(struct page *), GFP_KERNEL); in videobuf_dma_init_user_locked()
178 if (NULL == dma->pages) in videobuf_dma_init_user_locked()
187 dma->pages, NULL); in videobuf_dma_init_user_locked()
[all …]
Dvideobuf-vmalloc.c162 int pages; in __videobuf_iolock() local
179 pages = PAGE_ALIGN(vb->size); in __videobuf_iolock()
192 mem->vaddr = vmalloc_user(pages); in __videobuf_iolock()
194 printk(KERN_ERR "vmalloc (%d pages) failed\n", pages); in __videobuf_iolock()
198 mem->vaddr, pages); in __videobuf_iolock()
238 int retval, pages; in __videobuf_mmap_mapper() local
256 pages = PAGE_ALIGN(vma->vm_end - vma->vm_start); in __videobuf_mmap_mapper()
257 mem->vaddr = vmalloc_user(pages); in __videobuf_mmap_mapper()
259 printk(KERN_ERR "vmalloc (%d pages) failed\n", pages); in __videobuf_mmap_mapper()
262 dprintk(1, "vmalloc is at addr %p (%d pages)\n", mem->vaddr, pages); in __videobuf_mmap_mapper()
Dvideobuf2-vmalloc.c26 struct page **pages; member
105 buf->pages = kzalloc(buf->n_pages * sizeof(struct page *), in vb2_vmalloc_get_userptr()
107 if (!buf->pages) in vb2_vmalloc_get_userptr()
115 buf->pages, NULL); in vb2_vmalloc_get_userptr()
119 buf->vaddr = vm_map_ram(buf->pages, buf->n_pages, -1, in vb2_vmalloc_get_userptr()
132 put_page(buf->pages[n_pages]); in vb2_vmalloc_get_userptr()
133 kfree(buf->pages); in vb2_vmalloc_get_userptr()
147 if (buf->pages) { in vb2_vmalloc_put_userptr()
152 set_page_dirty_lock(buf->pages[i]); in vb2_vmalloc_put_userptr()
153 put_page(buf->pages[i]); in vb2_vmalloc_put_userptr()
[all …]
Dvideobuf2-dma-contig.c470 static int vb2_dc_get_user_pages(unsigned long start, struct page **pages, in vb2_dc_get_user_pages() argument
488 pages[i] = pfn_to_page(pfn); in vb2_dc_get_user_pages()
494 n_pages, dma_dir == DMA_FROM_DEVICE, 1, pages, NULL); in vb2_dc_get_user_pages()
500 put_page(pages[--n]); in vb2_dc_get_user_pages()
578 struct page **pages; in vb2_dc_get_userptr() local
612 pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL); in vb2_dc_get_userptr()
613 if (!pages) { in vb2_dc_get_userptr()
641 ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, dma_dir); in vb2_dc_get_userptr()
647 kfree(pages); in vb2_dc_get_userptr()
662 ret = sg_alloc_table_from_pages(sgt, pages, n_pages, in vb2_dc_get_userptr()
[all …]
/linux-4.1.27/drivers/gpu/drm/i915/
Di915_gem_dmabuf.c60 ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL); in i915_gem_map_dma_buf()
64 src = obj->pages->sgl; in i915_gem_map_dma_buf()
66 for (i = 0; i < obj->pages->nents; i++) { in i915_gem_map_dma_buf()
114 struct page **pages; in i915_gem_dmabuf_vmap() local
134 pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages)); in i915_gem_dmabuf_vmap()
135 if (pages == NULL) in i915_gem_dmabuf_vmap()
139 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) in i915_gem_dmabuf_vmap()
140 pages[i++] = sg_page_iter_page(&sg_iter); in i915_gem_dmabuf_vmap()
142 obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL); in i915_gem_dmabuf_vmap()
143 drm_free_large(pages); in i915_gem_dmabuf_vmap()
[all …]
Di915_gem_gtt.c543 struct sg_table *pages, in gen8_ppgtt_insert_entries() argument
557 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { in gen8_ppgtt_insert_entries()
1129 struct sg_table *pages, in gen6_ppgtt_insert_entries() argument
1142 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { in gen6_ppgtt_insert_entries()
1547 vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start, in ppgtt_bind_vma()
1723 obj->pages->sgl, obj->pages->nents, in i915_gem_gtt_prepare_object()
1885 intel_gtt_insert_sg_entries(vma->ggtt_view.pages, entry, flags); in i915_ggtt_bind_vma()
1916 struct sg_table *pages = obj->pages; in ggtt_bind_vma() local
1923 pages = vma->ggtt_view.pages; in ggtt_bind_vma()
1939 vma->vm->insert_entries(vma->vm, pages, in ggtt_bind_vma()
[all …]
/linux-4.1.27/drivers/lguest/x86/
Dcore.c85 static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages) in copy_in_guest_info() argument
93 if (__this_cpu_read(lg_last_cpu) != cpu || cpu->last_pages != pages) { in copy_in_guest_info()
95 cpu->last_pages = pages; in copy_in_guest_info()
103 pages->state.host_cr3 = __pa(current->mm->pgd); in copy_in_guest_info()
108 map_switcher_in_guest(cpu, pages); in copy_in_guest_info()
114 pages->state.guest_tss.sp1 = cpu->esp1; in copy_in_guest_info()
115 pages->state.guest_tss.ss1 = cpu->ss1; in copy_in_guest_info()
119 copy_traps(cpu, pages->state.guest_idt, default_idt_entries); in copy_in_guest_info()
123 copy_gdt(cpu, pages->state.guest_gdt); in copy_in_guest_info()
126 copy_gdt_tls(cpu, pages->state.guest_gdt); in copy_in_guest_info()
[all …]
/linux-4.1.27/fs/ntfs/
Dcompress.c522 struct page **pages; in ntfs_read_compressed_block() local
534 pages = kmalloc(nr_pages * sizeof(struct page *), GFP_NOFS); in ntfs_read_compressed_block()
540 if (unlikely(!pages || !bhs)) { in ntfs_read_compressed_block()
542 kfree(pages); in ntfs_read_compressed_block()
554 pages[xpage] = page; in ntfs_read_compressed_block()
568 kfree(pages); in ntfs_read_compressed_block()
579 pages[i] = grab_cache_page_nowait(mapping, offset); in ntfs_read_compressed_block()
580 page = pages[i]; in ntfs_read_compressed_block()
595 pages[i] = NULL; in ntfs_read_compressed_block()
754 page = pages[cur_page]; in ntfs_read_compressed_block()
[all …]
Dfile.c509 pgoff_t index, const unsigned nr_pages, struct page **pages, in __ntfs_grab_cache_pages() argument
517 pages[nr] = find_get_page_flags(mapping, index, FGP_LOCK | in __ntfs_grab_cache_pages()
519 if (!pages[nr]) { in __ntfs_grab_cache_pages()
534 pages[nr] = *cached_page; in __ntfs_grab_cache_pages()
544 unlock_page(pages[--nr]); in __ntfs_grab_cache_pages()
545 page_cache_release(pages[nr]); in __ntfs_grab_cache_pages()
583 static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, in ntfs_prepare_pages_for_non_resident_write() argument
613 BUG_ON(!pages); in ntfs_prepare_pages_for_non_resident_write()
614 BUG_ON(!*pages); in ntfs_prepare_pages_for_non_resident_write()
615 vi = pages[0]->mapping->host; in ntfs_prepare_pages_for_non_resident_write()
[all …]
/linux-4.1.27/drivers/gpu/drm/msm/
Dmsm_gem.c76 if (!msm_obj->pages) { in get_pages()
98 msm_obj->pages = p; in get_pages()
108 return msm_obj->pages; in get_pages()
115 if (msm_obj->pages) { in put_pages()
126 drm_gem_put_pages(obj, msm_obj->pages, true, false); in put_pages()
129 drm_free_large(msm_obj->pages); in put_pages()
132 msm_obj->pages = NULL; in put_pages()
197 struct page **pages; in msm_gem_fault() local
210 pages = get_pages(obj); in msm_gem_fault()
211 if (IS_ERR(pages)) { in msm_gem_fault()
[all …]
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx4/
Den_resources.c86 struct page **pages; in mlx4_en_map_buffer() local
92 pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL); in mlx4_en_map_buffer()
93 if (!pages) in mlx4_en_map_buffer()
97 pages[i] = virt_to_page(buf->page_list[i].buf); in mlx4_en_map_buffer()
99 buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); in mlx4_en_map_buffer()
100 kfree(pages); in mlx4_en_map_buffer()
/linux-4.1.27/drivers/xen/
Dxlate_mmu.c70 struct page **pages; member
80 struct page *page = info->pages[info->index++]; in remap_pte_fn()
101 struct page **pages) in xen_xlate_remap_gfn_array() argument
115 data.pages = pages; in xen_xlate_remap_gfn_array()
127 int nr, struct page **pages) in xen_xlate_unmap_gfn_range() argument
135 pfn = page_to_pfn(pages[i]); in xen_xlate_unmap_gfn_range()
Dgntdev.c95 struct page **pages; member
99 static int unmap_grant_pages(struct grant_map *map, int offset, int pages);
122 if (map->pages) in gntdev_free_map()
123 gnttab_free_pages(map->count, map->pages); in gntdev_free_map()
124 kfree(map->pages); in gntdev_free_map()
147 add->pages = kcalloc(count, sizeof(add->pages[0]), GFP_KERNEL); in gntdev_alloc_map()
153 NULL == add->pages) in gntdev_alloc_map()
156 if (gnttab_alloc_pages(count, add->pages)) in gntdev_alloc_map()
230 if (map->pages && !use_ptemod) in gntdev_put_map()
283 pfn_to_kaddr(page_to_pfn(map->pages[i])); in map_grant_pages()
[all …]
Dprivcmd.c69 static void free_page_list(struct list_head *pages) in free_page_list() argument
73 list_for_each_entry_safe(p, n, pages, lru) in free_page_list()
76 INIT_LIST_HEAD(pages); in free_page_list()
316 struct page **pages = vma->vm_private_data; in mmap_batch_fn() local
321 cur_pages = &pages[st->index]; in mmap_batch_fn()
398 struct page **pages; in alloc_empty_pages() local
400 pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL); in alloc_empty_pages()
401 if (pages == NULL) in alloc_empty_pages()
404 rc = alloc_xenballooned_pages(numpgs, pages, 0); in alloc_empty_pages()
408 kfree(pages); in alloc_empty_pages()
[all …]
Dgrant-table.c686 int gnttab_alloc_pages(int nr_pages, struct page **pages) in gnttab_alloc_pages() argument
691 ret = alloc_xenballooned_pages(nr_pages, pages, false); in gnttab_alloc_pages()
701 gnttab_free_pages(nr_pages, pages); in gnttab_alloc_pages()
704 set_page_private(pages[i], (unsigned long)foreign); in gnttab_alloc_pages()
706 SetPagePrivate(pages[i]); in gnttab_alloc_pages()
718 void gnttab_free_pages(int nr_pages, struct page **pages) in gnttab_free_pages() argument
723 if (PagePrivate(pages[i])) { in gnttab_free_pages()
725 kfree((void *)page_private(pages[i])); in gnttab_free_pages()
727 ClearPagePrivate(pages[i]); in gnttab_free_pages()
730 free_xenballooned_pages(nr_pages, pages); in gnttab_free_pages()
[all …]
Dballoon.c531 int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem) in alloc_xenballooned_pages() argument
539 pages[pgno++] = page; in alloc_xenballooned_pages()
554 balloon_append(pages[--pgno]); in alloc_xenballooned_pages()
567 void free_xenballooned_pages(int nr_pages, struct page **pages) in free_xenballooned_pages() argument
574 if (pages[i]) in free_xenballooned_pages()
575 balloon_append(pages[i]); in free_xenballooned_pages()
587 unsigned long pages) in balloon_add_region() argument
597 extra_pfn_end = min(max_pfn, start_pfn + pages); in balloon_add_region()
/linux-4.1.27/Documentation/ABI/testing/
Dsysfs-kernel-mm-ksm22 pages_shared: how many shared pages are being used.
27 pages_to_scan: how many present pages to scan before ksmd goes
30 pages_unshared: how many pages unique but repeatedly checked
33 pages_volatile: how many pages changing too fast to be placed
38 write 2 to disable ksm and unmerge all its pages.
49 Description: Control merging pages across different NUMA nodes.
51 When it is set to 0 only pages from the same node are merged,
52 otherwise pages from all nodes can be merged together (default).
/linux-4.1.27/drivers/gpu/drm/vgem/
Dvgem_drv.c47 drm_gem_put_pages(&obj->base, obj->pages, false, false); in vgem_gem_put_pages()
48 obj->pages = NULL; in vgem_gem_put_pages()
64 if (vgem_obj->pages) in vgem_gem_free_object()
67 vgem_obj->pages = NULL; in vgem_gem_free_object()
74 struct page **pages; in vgem_gem_get_pages() local
76 if (obj->pages || obj->use_dma_buf) in vgem_gem_get_pages()
79 pages = drm_gem_get_pages(&obj->base); in vgem_gem_get_pages()
80 if (IS_ERR(pages)) { in vgem_gem_get_pages()
81 return PTR_ERR(pages); in vgem_gem_get_pages()
84 obj->pages = pages; in vgem_gem_get_pages()
[all …]
/linux-4.1.27/drivers/gpu/drm/radeon/
Dradeon_gart.c239 int pages) in radeon_gart_unbind() argument
251 for (i = 0; i < pages; i++, p++) { in radeon_gart_unbind()
252 if (rdev->gart.pages[p]) { in radeon_gart_unbind()
253 rdev->gart.pages[p] = NULL; in radeon_gart_unbind()
284 int pages, struct page **pagelist, dma_addr_t *dma_addr, in radeon_gart_bind() argument
299 for (i = 0; i < pages; i++, p++) { in radeon_gart_bind()
300 rdev->gart.pages[p] = pagelist[i]; in radeon_gart_bind()
330 if (rdev->gart.pages) { in radeon_gart_init()
347 rdev->gart.pages = vzalloc(sizeof(void *) * rdev->gart.num_cpu_pages); in radeon_gart_init()
348 if (rdev->gart.pages == NULL) { in radeon_gart_init()
[all …]
/linux-4.1.27/drivers/firewire/
Dcore-iso.c48 buffer->pages = kmalloc(page_count * sizeof(buffer->pages[0]), in fw_iso_buffer_alloc()
50 if (buffer->pages == NULL) in fw_iso_buffer_alloc()
54 buffer->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); in fw_iso_buffer_alloc()
55 if (buffer->pages[i] == NULL) in fw_iso_buffer_alloc()
76 address = dma_map_page(card->device, buffer->pages[i], in fw_iso_buffer_map_dma()
81 set_page_private(buffer->pages[i], address); in fw_iso_buffer_map_dma()
115 err = vm_insert_page(vma, uaddr, buffer->pages[i]); in fw_iso_buffer_map_vma()
132 address = page_private(buffer->pages[i]); in fw_iso_buffer_destroy()
137 __free_page(buffer->pages[i]); in fw_iso_buffer_destroy()
139 kfree(buffer->pages); in fw_iso_buffer_destroy()
[all …]
/linux-4.1.27/include/xen/
Dxen-ops.h54 struct page **pages);
72 struct page **pages);
74 int numpgs, struct page **pages);
80 struct page **pages);
82 int nr, struct page **pages);
Dgrant_table.h74 struct page **pages; member
184 int gnttab_alloc_pages(int nr_pages, struct page **pages);
185 void gnttab_free_pages(int nr_pages, struct page **pages);
189 struct page **pages, unsigned int count);
192 struct page **pages, unsigned int count);
Dballoon.h28 int alloc_xenballooned_pages(int nr_pages, struct page **pages,
30 void free_xenballooned_pages(int nr_pages, struct page **pages);
/linux-4.1.27/arch/s390/hypfs/
Dhypfs_diag.c379 static void *diag204_alloc_vbuf(int pages) in diag204_alloc_vbuf() argument
382 diag204_buf_vmalloc = vmalloc(PAGE_SIZE * (pages + 1)); in diag204_alloc_vbuf()
386 diag204_buf_pages = pages; in diag204_alloc_vbuf()
399 static void *diag204_get_buffer(enum diag204_format fmt, int *pages) in diag204_get_buffer() argument
402 *pages = diag204_buf_pages; in diag204_get_buffer()
406 *pages = 1; in diag204_get_buffer()
409 *pages = diag204((unsigned long)SUBC_RSI | in diag204_get_buffer()
411 if (*pages <= 0) in diag204_get_buffer()
414 return diag204_alloc_vbuf(*pages); in diag204_get_buffer()
435 int pages, rc; in diag204_probe() local
[all …]
/linux-4.1.27/drivers/hwmon/pmbus/
Ducd9200.c102 info->pages = 0; in ucd9200_probe()
106 info->pages++; in ucd9200_probe()
108 if (!info->pages) { in ucd9200_probe()
112 dev_info(&client->dev, "%d rails configured\n", info->pages); in ucd9200_probe()
122 for (i = 0; i < info->pages; i++) { in ucd9200_probe()
143 if (info->pages > 1) in ucd9200_probe()
153 for (i = 1; i < info->pages; i++) in ucd9200_probe()
Dpmbus.c75 for (page = 0; page < info->pages; page++) { in pmbus_find_sensor_groups()
101 if (!info->pages) { in pmbus_identify()
116 info->pages = page; in pmbus_identify()
118 info->pages = 1; in pmbus_identify()
175 info->pages = id->driver_data; in pmbus_probe()
/linux-4.1.27/drivers/md/
Ddm-kcopyd.c41 struct page_list *pages; member
230 pl->next = kc->pages; in kcopyd_put_pages()
231 kc->pages = pl; in kcopyd_put_pages()
240 unsigned int nr, struct page_list **pages) in kcopyd_get_pages() argument
244 *pages = NULL; in kcopyd_get_pages()
250 pl = kc->pages; in kcopyd_get_pages()
253 kc->pages = pl->next; in kcopyd_get_pages()
256 pl->next = *pages; in kcopyd_get_pages()
257 *pages = pl; in kcopyd_get_pages()
263 if (*pages) in kcopyd_get_pages()
[all …]
/linux-4.1.27/fs/nfs/
Dnfs3acl.c17 struct page *pages[NFSACL_MAXPAGES] = { }; in nfs3_get_acl() local
21 .pages = pages, in nfs3_get_acl()
62 for (count = 0; count < NFSACL_MAXPAGES && args.pages[count]; count++) in nfs3_get_acl()
63 __free_page(args.pages[count]); in nfs3_get_acl()
122 struct page *pages[NFSACL_MAXPAGES]; in __nfs3_proc_setacls() local
127 .pages = pages, in __nfs3_proc_setacls()
161 args.pages[args.npages] = alloc_page(GFP_KERNEL); in __nfs3_proc_setacls()
162 if (args.pages[args.npages] == NULL) in __nfs3_proc_setacls()
199 __free_page(args.pages[args.npages]); in __nfs3_proc_setacls()
Dpnfs_dev.c102 struct page **pages = NULL; in nfs4_get_device_info() local
123 pages = kcalloc(max_pages, sizeof(struct page *), gfp_flags); in nfs4_get_device_info()
124 if (!pages) in nfs4_get_device_info()
128 pages[i] = alloc_page(gfp_flags); in nfs4_get_device_info()
129 if (!pages[i]) in nfs4_get_device_info()
135 pdev->pages = pages; in nfs4_get_device_info()
157 __free_page(pages[i]); in nfs4_get_device_info()
158 kfree(pages); in nfs4_get_device_info()
Dfscache.h132 struct list_head *pages, in nfs_readpages_from_fscache() argument
136 return __nfs_readpages_from_fscache(ctx, inode, mapping, pages, in nfs_readpages_from_fscache()
211 struct list_head *pages, in nfs_readpages_from_fscache() argument
/linux-4.1.27/fs/cifs/
Dfscache.h79 struct list_head *pages, in cifs_readpages_from_fscache() argument
83 return __cifs_readpages_from_fscache(inode, mapping, pages, in cifs_readpages_from_fscache()
96 struct list_head *pages) in cifs_fscache_readpages_cancel() argument
99 return __cifs_fscache_readpages_cancel(inode, pages); in cifs_fscache_readpages_cancel()
133 struct list_head *pages, in cifs_readpages_from_fscache() argument
143 struct list_head *pages) in cifs_fscache_readpages_cancel() argument
Dfscache.c186 struct list_head *pages, in __cifs_readpages_from_fscache() argument
194 pages, nr_pages, in __cifs_readpages_from_fscache()
226 void __cifs_fscache_readpages_cancel(struct inode *inode, struct list_head *pages) in __cifs_fscache_readpages_cancel() argument
230 fscache_readpages_cancel(CIFS_I(inode)->fscache, pages); in __cifs_fscache_readpages_cancel()
/linux-4.1.27/fs/exofs/
Dore_raid.c58 struct page **pages; member
80 struct page *pages[group_width]; in _sp2d_alloc() member
130 sp2d->_1p_stripes[i].pages = __a1pa->pages; in _sp2d_alloc()
157 struct page *page = _1ps->pages[c]; in _sp2d_reset()
167 memset(_1ps->pages, 0, group_width * sizeof(*_1ps->pages)); in _sp2d_reset()
184 kfree(sp2d->_1p_stripes[i].pages); in _sp2d_free()
236 _1ps->tx = async_xor(_1ps->pages[sp2d->data_devs], in _gen_xor_unit()
237 _1ps->pages, 0, sp2d->data_devs, in _gen_xor_unit()
240 _1ps->tx = async_gen_syndrome(_1ps->pages, 0, in _gen_xor_unit()
263 _1ps->pages[si->cur_comp] = page; in _ore_add_stripe_page()
[all …]
Dinode.c43 unsigned pages = min_t(unsigned, expected_pages, in exofs_max_io_pages() local
46 return pages; in exofs_max_io_pages()
55 struct page **pages; member
76 pcol->pages = NULL; in _pcol_init()
89 pcol->pages = NULL; in _pcol_reset()
107 unsigned pages; in pcol_try_alloc() local
110 pages = exofs_max_io_pages(&pcol->sbi->layout, pcol->expected_pages); in pcol_try_alloc()
112 for (; pages; pages >>= 1) { in pcol_try_alloc()
113 pcol->pages = kmalloc(pages * sizeof(struct page *), in pcol_try_alloc()
115 if (likely(pcol->pages)) { in pcol_try_alloc()
[all …]
Dore.c149 struct page **pages; in _ore_get_io_state() local
156 struct page *pages[num_par_pages]; in _ore_get_io_state() member
168 pages = num_par_pages ? _aios->pages : NULL; in _ore_get_io_state()
178 struct page *pages[num_par_pages]; in _ore_get_io_state() member
197 pages = num_par_pages ? extra_part->pages : NULL; in _ore_get_io_state()
206 if (pages) { in _ore_get_io_state()
207 ios->parity_pages = pages; in _ore_get_io_state()
594 unsigned pgbase, struct page **pages, in _ore_add_stripe_unit() argument
630 added_len = bio_add_pc_page(q, per_dev->bio, pages[pg], in _ore_add_stripe_unit()
642 _add_stripe_page(ios->sp2d, &ios->si, pages[pg]); in _ore_add_stripe_unit()
[all …]
/linux-4.1.27/net/sunrpc/xprtrdma/
Dsvc_rdma_recvfrom.c68 page = ctxt->pages[0]; in rdma_build_arg_xdr()
89 rqstp->rq_arg.pages = &rqstp->rq_pages[0]; in rdma_build_arg_xdr()
91 rqstp->rq_arg.pages = &rqstp->rq_pages[1]; in rdma_build_arg_xdr()
95 page = ctxt->pages[sge_no]; in rdma_build_arg_xdr()
108 page = ctxt->pages[sge_no++]; in rdma_build_arg_xdr()
155 head->arg.pages[pg_no] = rqstp->rq_arg.pages[pg_no]; in rdma_read_chunk_lcl()
160 rqstp->rq_respages = &rqstp->rq_arg.pages[pg_no+1]; in rdma_read_chunk_lcl()
164 head->arg.pages[pg_no], pg_off, in rdma_read_chunk_lcl()
252 frmr->kva = page_address(rqstp->rq_arg.pages[pg_no]); in rdma_read_chunk_frmr()
261 head->arg.pages[pg_no] = rqstp->rq_arg.pages[pg_no]; in rdma_read_chunk_frmr()
[all …]
/linux-4.1.27/arch/powerpc/kvm/
Dbook3s_64_vio.c56 __free_page(stt->pages[i]); in release_spapr_tce_table()
71 page = stt->pages[vmf->pgoff]; in kvm_spapr_tce_fault()
126 stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO); in kvm_vm_ioctl_create_spapr_tce()
127 if (!stt->pages[i]) in kvm_vm_ioctl_create_spapr_tce()
144 if (stt->pages[i]) in kvm_vm_ioctl_create_spapr_tce()
145 __free_page(stt->pages[i]); in kvm_vm_ioctl_create_spapr_tce()
/linux-4.1.27/arch/ia64/include/asm/
Dtlb.h64 struct page **pages; member
143 free_page_and_swap_cache(tlb->pages[i]); in ia64_tlb_flush_mmu_free()
164 tlb->pages = (void *)addr; in __tlb_alloc_page()
175 tlb->pages = tlb->local; in tlb_gather_mmu()
199 if (tlb->pages != tlb->local) in tlb_finish_mmu()
200 free_pages((unsigned long)tlb->pages, 0); in tlb_finish_mmu()
212 if (!tlb->nr && tlb->pages == tlb->local) in __tlb_remove_page()
215 tlb->pages[tlb->nr++] = page; in __tlb_remove_page()
/linux-4.1.27/Documentation/arm64/
Dmemory.txt13 64KB pages, only 2 levels of translation tables, allowing 42-bit (4TB)
24 AArch64 Linux memory layout with 4KB pages + 3 levels:
32 AArch64 Linux memory layout with 4KB pages + 4 levels:
40 AArch64 Linux memory layout with 64KB pages + 2 levels:
48 AArch64 Linux memory layout with 64KB pages + 3 levels:
60 Translation table lookup with 4KB pages:
75 Translation table lookup with 64KB pages:
89 When using KVM, the hypervisor maps kernel pages in EL2, at a fixed
/linux-4.1.27/drivers/base/
Ddma-mapping.c278 void *dma_common_pages_remap(struct page **pages, size_t size, in dma_common_pages_remap() argument
288 area->pages = pages; in dma_common_pages_remap()
290 if (map_vm_area(area, prot, pages)) { in dma_common_pages_remap()
308 struct page **pages; in dma_common_contiguous_remap() local
312 pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL); in dma_common_contiguous_remap()
313 if (!pages) in dma_common_contiguous_remap()
317 pages[i] = pfn_to_page(pfn + i); in dma_common_contiguous_remap()
319 ptr = dma_common_pages_remap(pages, size, vm_flags, prot, caller); in dma_common_contiguous_remap()
321 kfree(pages); in dma_common_contiguous_remap()
Dfirmware_class.c148 struct page **pages; member
255 __free_page(buf->pages[i]); in __fw_free_buf()
256 kfree(buf->pages); in __fw_free_buf()
377 fw->pages = buf->pages; in fw_set_page_data()
604 buf->data = vmap(buf->pages, buf->nr_pages, 0, PAGE_KERNEL_RO); in fw_map_pages_buf()
643 __free_page(fw_buf->pages[i]); in firmware_loading_store()
644 kfree(fw_buf->pages); in firmware_loading_store()
645 fw_buf->pages = NULL; in firmware_loading_store()
729 page_data = kmap(buf->pages[page_nr]); in firmware_data_read()
733 kunmap(buf->pages[page_nr]); in firmware_data_read()
[all …]
/linux-4.1.27/arch/arm/mm/
Ddma-mapping.c1118 struct page **pages; in __iommu_alloc_buffer() local
1124 pages = kzalloc(array_size, GFP_KERNEL); in __iommu_alloc_buffer()
1126 pages = vzalloc(array_size); in __iommu_alloc_buffer()
1127 if (!pages) in __iommu_alloc_buffer()
1142 pages[i] = page + i; in __iommu_alloc_buffer()
1144 return pages; in __iommu_alloc_buffer()
1161 pages[i] = alloc_pages(gfp | __GFP_NORETRY, order); in __iommu_alloc_buffer()
1162 if (pages[i]) in __iommu_alloc_buffer()
1166 if (!pages[i]) { in __iommu_alloc_buffer()
1171 pages[i] = alloc_pages(gfp, 0); in __iommu_alloc_buffer()
[all …]
/linux-4.1.27/arch/parisc/mm/
Dinit.c152 tmp = pmem_ranges[j-1].pages; in setup_bootmem()
153 pmem_ranges[j-1].pages = pmem_ranges[j].pages; in setup_bootmem()
154 pmem_ranges[j].pages = tmp; in setup_bootmem()
167 pmem_ranges[i-1].pages) > MAX_GAP) { in setup_bootmem()
173 pmem_ranges[i-1].pages)); in setup_bootmem()
189 size = (pmem_ranges[i].pages << PAGE_SHIFT); in setup_bootmem()
201 res->end = res->start + (pmem_ranges[i].pages << PAGE_SHIFT)-1; in setup_bootmem()
221 rsize = pmem_ranges[i].pages << PAGE_SHIFT; in setup_bootmem()
227 pmem_ranges[i].pages = (mem_limit >> PAGE_SHIFT) in setup_bootmem()
247 end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages; in setup_bootmem()
[all …]
/linux-4.1.27/arch/blackfin/kernel/
Ddma-mapping.c47 static unsigned long __alloc_dma_pages(unsigned int pages) in __alloc_dma_pages() argument
57 start = bitmap_find_next_zero_area(dma_page, dma_pages, 0, pages, 0); in __alloc_dma_pages()
60 bitmap_set(dma_page, start, pages); in __alloc_dma_pages()
66 static void __free_dma_pages(unsigned long addr, unsigned int pages) in __free_dma_pages() argument
71 if ((page + pages) > dma_pages) { in __free_dma_pages()
77 bitmap_clear(dma_page, page, pages); in __free_dma_pages()
/linux-4.1.27/fs/ceph/
Dcache.h45 struct list_head *pages,
85 struct list_head *pages) in ceph_fscache_readpages_cancel() argument
88 return fscache_readpages_cancel(ci->fscache, pages); in ceph_fscache_readpages_cancel()
121 struct page *pages) in ceph_fscache_uncache_page() argument
133 struct list_head *pages, in ceph_readpages_from_fscache() argument
172 struct list_head *pages) in ceph_fscache_readpages_cancel() argument
Daddr.c273 struct page *page = osd_data->pages[i]; in finish_read()
292 kfree(osd_data->pages); in finish_read()
295 static void ceph_unlock_page_vector(struct page **pages, int num_pages) in ceph_unlock_page_vector() argument
300 unlock_page(pages[i]); in ceph_unlock_page_vector()
318 struct page **pages; in start_read() local
349 pages = kmalloc(sizeof(*pages) * nr_pages, GFP_NOFS); in start_read()
351 if (!pages) in start_read()
369 pages[i] = page; in start_read()
371 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false, false); in start_read()
385 ceph_unlock_page_vector(pages, nr_pages); in start_read()
[all …]
Dfile.c349 struct page **pages, int num_pages, in striped_read() argument
368 page_pos = pages; in striped_read()
400 ceph_zero_page_vector_range(zoff, zlen, pages); in striped_read()
438 struct page **pages; in ceph_sync_read() local
465 n = iov_iter_get_pages_alloc(i, &pages, INT_MAX, &start); in ceph_sync_read()
472 pages, num_pages, checkeof, in ceph_sync_read()
475 ceph_put_page_vector(pages, num_pages, true); in ceph_sync_read()
486 pages = ceph_alloc_page_vector(num_pages, GFP_NOFS); in ceph_sync_read()
487 if (IS_ERR(pages)) in ceph_sync_read()
488 return PTR_ERR(pages); in ceph_sync_read()
[all …]
/linux-4.1.27/drivers/staging/android/ion/
Dion_heap.c37 struct page **pages = vmalloc(sizeof(struct page *) * npages); in ion_heap_map_kernel() local
38 struct page **tmp = pages; in ion_heap_map_kernel()
40 if (!pages) in ion_heap_map_kernel()
56 vaddr = vmap(pages, npages, VM_MAP, pgprot); in ion_heap_map_kernel()
57 vfree(pages); in ion_heap_map_kernel()
106 static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot) in ion_heap_clear_pages() argument
108 void *addr = vm_map_ram(pages, num, -1, pgprot); in ion_heap_clear_pages()
124 struct page *pages[32]; in ion_heap_sglist_zero() local
127 pages[p++] = sg_page_iter_page(&piter); in ion_heap_sglist_zero()
128 if (p == ARRAY_SIZE(pages)) { in ion_heap_sglist_zero()
[all …]
Dion_system_heap.c130 struct list_head pages; in ion_system_heap_allocate() local
142 INIT_LIST_HEAD(&pages); in ion_system_heap_allocate()
148 list_add_tail(&page->lru, &pages); in ion_system_heap_allocate()
161 list_for_each_entry_safe(page, tmp_page, &pages, lru) { in ion_system_heap_allocate()
173 list_for_each_entry_safe(page, tmp_page, &pages, lru) in ion_system_heap_allocate()
365 unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT; in ion_system_contig_heap_free() local
368 for (i = 0; i < pages; i++) in ion_system_contig_heap_free()
/linux-4.1.27/fs/btrfs/tests/
Dextent-io-tests.c32 struct page *pages[16]; in process_page_range() local
43 ARRAY_SIZE(pages)), pages); in process_page_range()
46 !PageLocked(pages[i])) in process_page_range()
48 if (flags & PROCESS_UNLOCK && PageLocked(pages[i])) in process_page_range()
49 unlock_page(pages[i]); in process_page_range()
50 page_cache_release(pages[i]); in process_page_range()
52 page_cache_release(pages[i]); in process_page_range()
/linux-4.1.27/arch/arm/include/asm/
Dtlb.h78 struct page **pages; member
123 tlb->pages = (void *)addr; in __tlb_alloc_page()
138 free_pages_and_swap_cache(tlb->pages, tlb->nr); in tlb_flush_mmu_free()
140 if (tlb->pages == tlb->local) in tlb_flush_mmu_free()
159 tlb->pages = tlb->local; in tlb_gather_mmu()
176 if (tlb->pages != tlb->local) in tlb_finish_mmu()
177 free_pages((unsigned long)tlb->pages, 0); in tlb_finish_mmu()
214 tlb->pages[tlb->nr++] = page; in __tlb_remove_page()
/linux-4.1.27/include/drm/
Ddrm_vma_manager.h59 unsigned long pages);
62 unsigned long pages);
64 struct drm_vma_offset_node *node, unsigned long pages);
88 unsigned long pages) in drm_vma_offset_exact_lookup() argument
92 node = drm_vma_offset_lookup(mgr, start, pages); in drm_vma_offset_exact_lookup()
Ddrm_agpsupport.h33 void drm_free_agp(struct agp_memory * handle, int pages);
37 struct page **pages,
71 static inline void drm_free_agp(struct agp_memory * handle, int pages) in drm_free_agp() argument
86 struct page **pages, in drm_agp_bind_pages() argument
/linux-4.1.27/include/linux/
Dmman.h23 static inline void vm_acct_memory(long pages) in vm_acct_memory() argument
25 __percpu_counter_add(&vm_committed_as, pages, vm_committed_as_batch); in vm_acct_memory()
28 static inline void vm_unacct_memory(long pages) in vm_unacct_memory() argument
30 vm_acct_memory(-pages); in vm_unacct_memory()
Dvmalloc.h36 struct page **pages; member
57 extern void *vm_map_ram(struct page **pages, unsigned int count,
85 extern void *vmap(struct page **pages, unsigned int count,
124 struct page **pages);
127 pgprot_t prot, struct page **pages);
133 pgprot_t prot, struct page **pages) in map_kernel_range_noflush() argument
Dballoon_compaction.h62 struct list_head pages; /* Pages enqueued & handled to Host */ member
74 INIT_LIST_HEAD(&balloon->pages); in balloon_devinfo_init()
129 list_add(&page->lru, &balloon->pages); in balloon_page_insert()
170 list_add(&page->lru, &balloon->pages); in balloon_page_insert()
Dsuspend.h474 unsigned long page_key_additional_pages(unsigned long pages);
475 int page_key_alloc(unsigned long pages);
483 static inline unsigned long page_key_additional_pages(unsigned long pages) in page_key_additional_pages() argument
488 static inline int page_key_alloc(unsigned long pages) in page_key_alloc() argument
Dfscache.h249 struct list_head *pages);
593 struct list_head *pages, in fscache_read_or_alloc_pages() argument
600 return __fscache_read_or_alloc_pages(cookie, mapping, pages, in fscache_read_or_alloc_pages()
650 struct list_head *pages) in fscache_readpages_cancel() argument
653 __fscache_readpages_cancel(cookie, pages); in fscache_readpages_cancel()
Dnfs_xdr.h215 struct page **pages; member
515 struct page ** pages; member
689 struct page ** pages; member
698 struct page ** pages; member
704 struct page ** pages; member
714 struct page ** pages; member
726 struct page ** pages; member
767 struct page ** pages; member
794 struct page ** pages; member
812 struct page ** pages; member
[all …]
Dpagevec.h20 struct page *pages[PAGEVEC_SIZE]; member
62 pvec->pages[pvec->nr++] = page; in pagevec_add()
/linux-4.1.27/Documentation/virtual/kvm/
Dmmu.txt52 pages, pae, pse, pse36, cr0.wp, and 1GB pages. Work is in progress to support
102 Shadow pages
109 A nonleaf spte allows the hardware mmu to reach the leaf pages and
110 is not related to a translation directly. It points to other shadow pages.
115 Leaf ptes point at guest pages.
131 Shadow pages contain the following information:
137 Examples include real mode translation, large guest pages backed by small
138 host pages, and gpa->hpa translations when NPT or EPT is active.
147 so multiple shadow pages are needed to shadow one guest page.
148 For first-level shadow pages, role.quadrant can be 0 or 1 and denotes the
[all …]
/linux-4.1.27/net/9p/
Dtrans_common.c21 void p9_release_pages(struct page **pages, int nr_pages) in p9_release_pages() argument
26 if (pages[i]) in p9_release_pages()
27 put_page(pages[i]); in p9_release_pages()
/linux-4.1.27/drivers/usb/storage/
Dalauda.c731 unsigned int page, unsigned int pages, unsigned char *data) in alauda_read_block_raw() argument
736 PBA_ZONE(pba), 0, PBA_LO(pba) + page, pages, 0, MEDIA_PORT(us) in alauda_read_block_raw()
739 usb_stor_dbg(us, "pba %d page %d count %d\n", pba, page, pages); in alauda_read_block_raw()
747 data, (MEDIA_INFO(us).pagesize + 64) * pages, NULL); in alauda_read_block_raw()
757 unsigned int page, unsigned int pages, unsigned char *data) in alauda_read_block() argument
762 rc = alauda_read_block_raw(us, pba, page, pages, data); in alauda_read_block()
767 for (i = 0; i < pages; i++) { in alauda_read_block()
810 unsigned int page, unsigned int pages, in alauda_write_lba() argument
879 for (i = page; i < page+pages; i++) { in alauda_write_lba()
954 unsigned int pages; in alauda_read_data() local
[all …]
Dsddr55.c208 unsigned short pages; in sddr55_read_data() local
234 pages = min((unsigned int) sectors << info->smallpageshift, in sddr55_read_data()
236 len = pages << info->pageshift; in sddr55_read_data()
239 pages, pba, lba, page); in sddr55_read_data()
255 command[6] = LSB_of(pages << (1 - info->smallpageshift)); in sddr55_read_data()
302 sectors -= pages >> info->smallpageshift; in sddr55_read_data()
328 unsigned short pages; in sddr55_write_data() local
361 pages = min((unsigned int) sectors << info->smallpageshift, in sddr55_write_data()
363 len = pages << info->pageshift; in sddr55_write_data()
370 pages, pba, lba, page); in sddr55_write_data()
[all …]
Dsddr09.c747 unsigned int page, pages; in sddr09_read_data() local
780 pages = min(sectors, info->blocksize - page); in sddr09_read_data()
781 len = pages << info->pageshift; in sddr09_read_data()
797 pages, lba, page); in sddr09_read_data()
808 pages, pba, lba, page); in sddr09_read_data()
814 pages, info->pageshift, buffer, 0); in sddr09_read_data()
825 sectors -= pages; in sddr09_read_data()
860 unsigned int page, unsigned int pages, in sddr09_write_lba() argument
927 for (i = page; i < page+pages; i++) { in sddr09_write_lba()
971 unsigned int lba, maxlba, page, pages; in sddr09_write_data() local
[all …]
/linux-4.1.27/include/linux/ceph/
Dlibceph.h210 extern void ceph_release_page_vector(struct page **pages, int num_pages);
215 extern void ceph_put_page_vector(struct page **pages, int num_pages,
218 extern int ceph_copy_user_to_page_vector(struct page **pages,
221 extern void ceph_copy_to_page_vector(struct page **pages,
224 extern void ceph_copy_from_page_vector(struct page **pages,
227 extern void ceph_zero_page_vector_range(int off, int len, struct page **pages);
Dosd_client.h61 struct page **pages; member
256 struct page **pages, u64 length,
276 struct page **pages, u64 length,
293 struct page **pages, u64 length,
298 struct page **pages, u64 length,
358 struct page **pages, int nr_pages,
368 struct page **pages, int nr_pages);
/linux-4.1.27/sound/firewire/
Dpackets-buffer.c25 unsigned int packets_per_page, pages; in iso_packets_buffer_init() local
42 pages = DIV_ROUND_UP(count, packets_per_page); in iso_packets_buffer_init()
45 pages, direction); in iso_packets_buffer_init()
51 p = page_address(b->iso_buffer.pages[page_index]); in iso_packets_buffer_init()
/linux-4.1.27/arch/powerpc/platforms/cell/
Dras.c104 struct page *pages; member
126 area->pages = alloc_pages_exact_node(area->nid, in cbe_ptcal_enable_on_node()
130 if (!area->pages) { in cbe_ptcal_enable_on_node()
141 addr = __pa(page_address(area->pages)) + (PAGE_SIZE >> 1); in cbe_ptcal_enable_on_node()
159 __free_pages(area->pages, area->order); in cbe_ptcal_enable_on_node()
228 memset(page_address(area->pages), 0, in cbe_ptcal_disable()
233 __free_pages(area->pages, area->order); in cbe_ptcal_disable()
/linux-4.1.27/arch/x86/um/
Dldt.c77 if (copy_to_user(ptr, ldt->u.pages[i], size)) { in read_ldt()
156 ldt->u.pages[i] = (struct ldt_entry *) in write_ldt()
158 if (!ldt->u.pages[i]) { in write_ldt()
166 memcpy(ldt->u.pages[0], &entry0, in write_ldt()
168 memcpy(ldt->u.pages[0]+1, ldt->u.entries+1, in write_ldt()
180 ldt_p = ldt->u.pages[ldt_info.entry_number/LDT_ENTRIES_PER_PAGE] + in write_ldt()
342 new_mm->arch.ldt.u.pages[i] = in init_new_ldt()
344 memcpy(new_mm->arch.ldt.u.pages[i], in init_new_ldt()
345 from_mm->arch.ldt.u.pages[i], PAGE_SIZE); in init_new_ldt()
363 free_page((long) mm->arch.ldt.u.pages[i]); in free_ldt()
/linux-4.1.27/tools/testing/selftests/powerpc/mm/
Dsubpage_prot.c96 long i, j, pages, err; in run_test() local
98 pages = size / 0x10000; in run_test()
99 map = malloc(pages * 4); in run_test()
106 for (i = 0; i < pages; i++) { in run_test()
120 for (i = 0; i < pages; i++) { in run_test()
/linux-4.1.27/arch/mips/ar7/
Dmemory.c61 unsigned long pages; in prom_meminit() local
63 pages = memsize() >> PAGE_SHIFT; in prom_meminit()
64 add_memory_region(PHYS_OFFSET, pages << PAGE_SHIFT, BOOT_MEM_RAM); in prom_meminit()
/linux-4.1.27/Documentation/cma/
Ddebugfs.txt13 - [RO] order_per_bit: Order of pages represented by one bit.
15 - [WO] alloc: Allocate N pages from that CMA area. For example:
19 would try to allocate 5 pages from the cma-2 area.
21 - [WO] free: Free N pages from that CMA area, similar to the above.
/linux-4.1.27/Documentation/sysctl/
Dvm.txt70 admin_reserve_kbytes defaults to min(3% of free pages, 8MB)
106 huge pages although processes will also directly compact memory as required.
113 allowed to examine the unevictable lru (mlocked pages) for pages to compact.
116 compaction from moving pages that are unevictable. Default value is 1.
134 Contains, as a percentage of total available memory that contains free pages
135 and reclaimable pages, the number of pages at which the background kernel
152 Note: the minimum value allowed for dirty_bytes is two pages (in bytes); any
169 Contains, as a percentage of total available memory that contains free pages
170 and reclaimable pages, the number of pages at which a process which is
319 pages for each zones from them. These are shown as array of protection pages
[all …]
/linux-4.1.27/net/sunrpc/auth_gss/
Dgss_krb5_wrap.c85 ptr = kmap_atomic(buf->pages[last]); in gss_krb5_remove_padding()
159 struct xdr_buf *buf, struct page **pages) in gss_wrap_kerberos_v1() argument
222 tmp_pages = buf->pages; in gss_wrap_kerberos_v1()
223 buf->pages = pages; in gss_wrap_kerberos_v1()
227 buf->pages = tmp_pages; in gss_wrap_kerberos_v1()
252 offset + headlen - conflen, pages); in gss_wrap_kerberos_v1()
258 offset + headlen - conflen, pages)) in gss_wrap_kerberos_v1()
441 struct xdr_buf *buf, struct page **pages) in gss_wrap_kerberos_v2() argument
486 err = (*kctx->gk5e->encrypt_v2)(kctx, offset, buf, pages); in gss_wrap_kerberos_v2()
592 struct xdr_buf *buf, struct page **pages) in gss_wrap_kerberos() argument
[all …]
Dgss_krb5_crypto.c393 struct page **pages; member
418 in_page = desc->pages[i]; in encryptor()
463 int offset, struct page **pages) in gss_encrypt_xdr_buf() argument
476 desc.pages = pages; in gss_encrypt_xdr_buf()
598 u32 offset, u8 *iv, struct page **pages, int encrypt) in gss_krb5_cts_crypt() argument
617 save_pages = buf->pages; in gss_krb5_cts_crypt()
619 buf->pages = pages; in gss_krb5_cts_crypt()
622 buf->pages = save_pages; in gss_krb5_cts_crypt()
644 struct xdr_buf *buf, struct page **pages) in gss_krb5_aes_encrypt() argument
703 save_pages = buf->pages; in gss_krb5_aes_encrypt()
[all …]
Dgss_rpc_upcall.c218 for (i = 0; i < arg->npages && arg->pages[i]; i++) in gssp_free_receive_pages()
219 __free_page(arg->pages[i]); in gssp_free_receive_pages()
221 kfree(arg->pages); in gssp_free_receive_pages()
227 arg->pages = kzalloc(arg->npages * sizeof(struct page *), GFP_KERNEL); in gssp_alloc_receive_pages()
232 if (!arg->pages) in gssp_alloc_receive_pages()
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx5/core/
Dalloc.c94 struct page **pages; in mlx5_buf_alloc() local
95 pages = kmalloc(sizeof(*pages) * buf->nbufs, GFP_KERNEL); in mlx5_buf_alloc()
96 if (!pages) in mlx5_buf_alloc()
99 pages[i] = virt_to_page(buf->page_list[i].buf); in mlx5_buf_alloc()
100 buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); in mlx5_buf_alloc()
101 kfree(pages); in mlx5_buf_alloc()
/linux-4.1.27/arch/m32r/mm/
Ddiscontig.c29 unsigned long pages; member
48 mp->pages = PFN_DOWN(memory_end - memory_start); in mem_prof_init()
66 mp->pages = PFN_DOWN(CONFIG_IRAM_SIZE) + holes; in mem_prof_init()
88 max_pfn = mp->start_pfn + mp->pages; in setup_memory()
93 PFN_PHYS(mp->pages)); in setup_memory()
/linux-4.1.27/Documentation/trace/
Devents-kmem.txt22 justified, particularly if kmalloc slab pages are getting significantly
50 If pages are allocated directly from the buddy allocator, the
60 When pages are freed in batch, the also mm_page_free_batched is triggered.
61 Broadly speaking, pages are taken off the LRU lock in bulk and
72 for order-0 pages, reduces contention on the zone->lock and reduces the
75 When a per-CPU list is empty or pages of the wrong type are allocated,
80 When the per-CPU list is too full, a number of pages are freed, each one
83 The individual nature of the events is so that pages can be tracked
84 between allocation and freeing. A number of drain or refill pages that occur
90 line bounces due to writes between CPUs and worth investigating if pages
[all …]
/linux-4.1.27/tools/vm/
Dpage-types.c224 static unsigned long pages2mb(unsigned long pages) in pages2mb() argument
226 return (pages * page_size) >> 20; in pages2mb()
278 unsigned long pages) in kpageflags_read() argument
280 return do_u64_read(kpageflags_fd, PROC_KPAGEFLAGS, buf, index, pages); in kpageflags_read()
285 unsigned long pages) in pagemap_read() argument
287 return do_u64_read(pagemap_fd, "/proc/pid/pagemap", buf, index, pages); in pagemap_read()
597 unsigned long pages; in walk_pfn() local
602 pages = kpageflags_read(buf, index, batch); in walk_pfn()
603 if (pages == 0) in walk_pfn()
606 for (i = 0; i < pages; i++) in walk_pfn()
[all …]
/linux-4.1.27/fs/9p/
Dcache.h50 struct list_head *pages,
75 struct list_head *pages, in v9fs_readpages_from_fscache() argument
78 return __v9fs_readpages_from_fscache(inode, mapping, pages, in v9fs_readpages_from_fscache()
131 struct list_head *pages, in v9fs_readpages_from_fscache() argument
Dcache.c172 ClearPageFsCache(pvec.pages[loop]); in v9fs_cache_inode_now_uncached()
174 first = pvec.pages[nr_pages - 1]->index + 1; in v9fs_cache_inode_now_uncached()
357 struct list_head *pages, in __v9fs_readpages_from_fscache() argument
368 mapping, pages, nr_pages, in __v9fs_readpages_from_fscache()
378 BUG_ON(!list_empty(pages)); in __v9fs_readpages_from_fscache()
/linux-4.1.27/drivers/gpu/drm/nouveau/
Dnouveau_sgdma.c34 node->pages = NULL; in nv04_sgdma_bind()
37 node->pages = nvbe->ttm.dma_address; in nv04_sgdma_bind()
69 node->pages = NULL; in nv50_sgdma_bind()
72 node->pages = nvbe->ttm.dma_address; in nv50_sgdma_bind()
/linux-4.1.27/arch/x86/kernel/
Damd_gart_64.c336 unsigned long pages) in __dma_map_cont() argument
338 unsigned long iommu_start = alloc_iommu(dev, pages, 0); in __dma_map_cont()
347 unsigned long pages, addr; in __dma_map_cont() local
360 pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE); in __dma_map_cont()
361 while (pages--) { in __dma_map_cont()
367 BUG_ON(iommu_page - iommu_start != pages); in __dma_map_cont()
374 struct scatterlist *sout, unsigned long pages, int need) in dma_map_cont() argument
382 return __dma_map_cont(dev, start, nelems, sout, pages); in dma_map_cont()
394 unsigned long pages = 0; in gart_map_sg() local
431 sgmap, pages, need) < 0) in gart_map_sg()
[all …]
Dalternative.c679 struct page *pages[2]; in text_poke() local
683 pages[0] = vmalloc_to_page(addr); in text_poke()
684 pages[1] = vmalloc_to_page(addr + PAGE_SIZE); in text_poke()
686 pages[0] = virt_to_page(addr); in text_poke()
687 WARN_ON(!PageReserved(pages[0])); in text_poke()
688 pages[1] = virt_to_page(addr + PAGE_SIZE); in text_poke()
690 BUG_ON(!pages[0]); in text_poke()
692 set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0])); in text_poke()
693 if (pages[1]) in text_poke()
694 set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1])); in text_poke()
[all …]
/linux-4.1.27/net/sunrpc/
Dxdr.c125 kaddr = kmap_atomic(buf->pages[0]); in xdr_terminate_string()
133 struct page **pages, unsigned int base, unsigned int len) in xdr_inline_pages() argument
142 xdr->pages = pages; in xdr_inline_pages()
172 _shift_data_right_pages(struct page **pages, size_t pgto_base, in _shift_data_right_pages() argument
184 pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT); in _shift_data_right_pages()
185 pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT); in _shift_data_right_pages()
233 _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len) in _copy_to_pages() argument
239 pgto = pages + (pgbase >> PAGE_CACHE_SHIFT); in _copy_to_pages()
277 _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len) in _copy_from_pages() argument
283 pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT); in _copy_from_pages()
[all …]
/linux-4.1.27/drivers/mtd/nand/
Dnand_bbt.c276 res = read_bbt(mtd, buf, td->pages[i], in read_abs_bbt()
284 res = read_bbt(mtd, buf, td->pages[0], in read_abs_bbt()
398 scan_read(mtd, buf, (loff_t)td->pages[0] << this->page_shift, in read_abs_bbts()
402 td->pages[0], td->version[0]); in read_abs_bbts()
407 scan_read(mtd, buf, (loff_t)md->pages[0] << this->page_shift, in read_abs_bbts()
411 md->pages[0], md->version[0]); in read_abs_bbts()
558 td->pages[i] = -1; in search_bbt()
568 td->pages[i] = actblock << blocktopage; in search_bbt()
580 if (td->pages[i] == -1) in search_bbt()
584 td->pages[i], td->version[i]); in search_bbt()
[all …]
/linux-4.1.27/Documentation/cgroups/
Dmemory.txt41 - accounting anonymous pages, file caches, swap caches usage and limiting them.
42 - pages are linked to per-memcg LRU exclusively, and there is no global LRU.
169 All mapped anon pages (RSS) and cache pages (Page Cache) are accounted.
170 Some pages which are never reclaimable and will not be on the LRU
171 are not accounted. We just account pages under usual VM management.
173 RSS pages are accounted at page_fault unless they've already been accounted
179 unaccounted when it's removed from radix-tree. Even if RSS pages are fully
185 This means swapped-in pages may contain pages for other tasks than a task
190 Note: we just account pages-on-LRU because our purpose is to control amount
191 of used pages; not-on-LRU pages tend to be out-of-control from VM view.
[all …]
/linux-4.1.27/arch/s390/kernel/
Dsuspend.c53 unsigned long page_key_additional_pages(unsigned long pages) in page_key_additional_pages() argument
55 return DIV_ROUND_UP(pages, PAGE_KEY_DATA_SIZE); in page_key_additional_pages()
76 int page_key_alloc(unsigned long pages) in page_key_alloc() argument
81 size = DIV_ROUND_UP(pages, PAGE_KEY_DATA_SIZE); in page_key_alloc()
/linux-4.1.27/drivers/gpu/drm/tegra/
Dgem.c179 if (bo->pages) { in tegra_bo_free()
180 drm_gem_put_pages(&bo->gem, bo->pages, true, true); in tegra_bo_free()
195 bo->pages = drm_gem_get_pages(&bo->gem); in tegra_bo_get_pages()
196 if (IS_ERR(bo->pages)) in tegra_bo_get_pages()
197 return PTR_ERR(bo->pages); in tegra_bo_get_pages()
201 sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages); in tegra_bo_get_pages()
229 drm_gem_put_pages(&bo->gem, bo->pages, false, false); in tegra_bo_get_pages()
448 if (!bo->pages) in tegra_bo_fault()
452 page = bo->pages[offset]; in tegra_bo_fault()
489 if (!bo->pages) { in tegra_drm_mmap()
[all …]
/linux-4.1.27/tools/perf/util/
Devlist.c894 static size_t perf_evlist__mmap_size(unsigned long pages) in perf_evlist__mmap_size() argument
896 if (pages == UINT_MAX) { in perf_evlist__mmap_size()
910 pages = (max * 1024) / page_size; in perf_evlist__mmap_size()
911 if (!is_power_of_2(pages)) in perf_evlist__mmap_size()
912 pages = rounddown_pow_of_two(pages); in perf_evlist__mmap_size()
913 } else if (!is_power_of_2(pages)) in perf_evlist__mmap_size()
916 return (pages + 1) * page_size; in perf_evlist__mmap_size()
922 unsigned long pages, val; in parse_pages_arg() local
937 pages = PERF_ALIGN(val, page_size) / page_size; in parse_pages_arg()
941 pages = strtoul(str, &eptr, 10); in parse_pages_arg()
[all …]
/linux-4.1.27/arch/x86/include/asm/
Dcacheflush.h53 int set_pages_array_uc(struct page **pages, int addrinarray);
54 int set_pages_array_wc(struct page **pages, int addrinarray);
55 int set_pages_array_wb(struct page **pages, int addrinarray);
/linux-4.1.27/fs/afs/
Dwrite.c295 first, count, pv.pages); in afs_kill_pages()
299 ClearPageUptodate(pv.pages[loop]); in afs_kill_pages()
301 SetPageError(pv.pages[loop]); in afs_kill_pages()
302 end_page_writeback(pv.pages[loop]); in afs_kill_pages()
318 struct page *pages[8], *page; in afs_write_back_from_locked_page() local
342 if (n > ARRAY_SIZE(pages)) in afs_write_back_from_locked_page()
343 n = ARRAY_SIZE(pages); in afs_write_back_from_locked_page()
345 start, n, pages); in afs_write_back_from_locked_page()
349 if (pages[0]->index != start) { in afs_write_back_from_locked_page()
351 put_page(pages[--n]); in afs_write_back_from_locked_page()
[all …]
Dfile.c28 struct list_head *pages, unsigned nr_pages);
241 struct list_head *pages, unsigned nr_pages) in afs_readpages() argument
262 pages, in afs_readpages()
274 BUG_ON(!list_empty(pages)); in afs_readpages()
291 ret = read_cache_pages(mapping, pages, afs_page_filler, key); in afs_readpages()
/linux-4.1.27/fs/
Dsplice.c209 buf->page = spd->pages[page_nr]; in splice_to_pipe()
270 page_cache_release(spd->pages[i]); in spd_release_page()
285 spd->pages = kmalloc(buffers * sizeof(struct page *), GFP_KERNEL); in splice_grow_spd()
288 if (spd->pages && spd->partial) in splice_grow_spd()
291 kfree(spd->pages); in splice_grow_spd()
301 kfree(spd->pages); in splice_shrink_spd()
312 struct page *pages[PIPE_DEF_BUFFERS]; in __generic_file_splice_read() local
319 .pages = pages, in __generic_file_splice_read()
338 spd.nr_pages = find_get_pages_contig(mapping, index, nr_pages, spd.pages); in __generic_file_splice_read()
379 spd.pages[spd.nr_pages++] = page; in __generic_file_splice_read()
[all …]
/linux-4.1.27/arch/metag/mm/
Dmmu-meta2.c139 unsigned int pages = DIV_ROUND_UP(mem_size, 1 << 22); in mmu_init() local
170 second_level_table = alloc_bootmem_pages(SECOND_LEVEL_ALIGN * pages); in mmu_init()
176 while (pages > 0) { in mmu_init()
199 pages--; in mmu_init()
DKconfig12 bool "Map kernel with 4MB pages"
16 Map the kernel with large pages to reduce TLB pressure.
31 This enables 8kB pages as supported by Meta 2.x and later MMUs.
37 This enables 16kB pages as supported by Meta 2.x and later MMUs.
58 pages. This option selects the largest power of two that the kernel
64 a value of 11 means that the largest free memory block is 2^10 pages.
/linux-4.1.27/fs/ext4/
Dreadpage.c134 struct list_head *pages, struct page *page, in ext4_mpage_readpages() argument
165 if (pages) { in ext4_mpage_readpages()
166 page = list_entry(pages->prev, struct page, lru); in ext4_mpage_readpages()
321 if (pages) in ext4_mpage_readpages()
324 BUG_ON(pages && !list_empty(pages)); in ext4_mpage_readpages()
/linux-4.1.27/drivers/virt/
Dfsl_hypervisor.c151 struct page **pages = NULL; in ioctl_memcpy() local
226 pages = kzalloc(num_pages * sizeof(struct page *), GFP_KERNEL); in ioctl_memcpy()
227 if (!pages) { in ioctl_memcpy()
250 0, pages, NULL); in ioctl_memcpy()
265 sg_list[0].source = page_to_phys(pages[0]) + lb_offset; in ioctl_memcpy()
269 sg_list[0].target = page_to_phys(pages[0]) + lb_offset; in ioctl_memcpy()
279 sg_list[i].source = page_to_phys(pages[i]); in ioctl_memcpy()
284 sg_list[i].target = page_to_phys(pages[i]); in ioctl_memcpy()
296 if (pages) { in ioctl_memcpy()
298 if (pages[i]) in ioctl_memcpy()
[all …]
/linux-4.1.27/kernel/power/
Dsnapshot.c434 unsigned long pages; in create_zone_bm_rtree() local
436 pages = end - start; in create_zone_bm_rtree()
445 nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK); in create_zone_bm_rtree()
804 unsigned long bits, pfn, pages; in memory_bm_next_pfn() local
808 pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn; in memory_bm_next_pfn()
809 bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK); in memory_bm_next_pfn()
1562 unsigned long saveable, size, max_size, count, highmem, pages = 0; in hibernate_preallocate_memory() local
1620 pages = preallocate_image_highmem(save_highmem); in hibernate_preallocate_memory()
1621 pages += preallocate_image_memory(saveable - pages, avail_normal); in hibernate_preallocate_memory()
1626 pages = minimum_image_size(saveable); in hibernate_preallocate_memory()
[all …]
/linux-4.1.27/drivers/char/agp/
Dgeneric.c91 mem->pages = NULL; in agp_alloc_page_array()
94 mem->pages = kmalloc(size, GFP_KERNEL | __GFP_NOWARN); in agp_alloc_page_array()
95 if (mem->pages == NULL) { in agp_alloc_page_array()
96 mem->pages = vmalloc(size); in agp_alloc_page_array()
122 if (new->pages == NULL) { in agp_create_user_memory()
148 if (new->pages == NULL) { in agp_create_memory()
193 curr->pages[i], in agp_free_memory()
198 curr->pages[i], in agp_free_memory()
275 new->pages[i] = page; in agp_allocate_memory()
1103 page_to_phys(mem->pages[i]), in agp_generic_insert_memory()
[all …]
Duninorth-agp.c186 gp[i] = (page_to_phys(mem->pages[i]) >> PAGE_SHIFT) | 0x80000000UL; in uninorth_insert_memory()
188 gp[i] = cpu_to_le32((page_to_phys(mem->pages[i]) & 0xFFFFF000UL) | in uninorth_insert_memory()
190 flush_dcache_range((unsigned long)__va(page_to_phys(mem->pages[i])), in uninorth_insert_memory()
191 (unsigned long)__va(page_to_phys(mem->pages[i]))+0x1000); in uninorth_insert_memory()
374 struct page **pages; in uninorth_create_gatt_table() local
403 pages = kmalloc((1 << page_order) * sizeof(struct page*), GFP_KERNEL); in uninorth_create_gatt_table()
404 if (pages == NULL) in uninorth_create_gatt_table()
412 pages[i] = page; in uninorth_create_gatt_table()
419 bridge->gatt_table = vmap(pages, (1 << page_order), 0, PAGE_KERNEL_NCG); in uninorth_create_gatt_table()
437 kfree(pages); in uninorth_create_gatt_table()
Dintel-gtt.c97 static int intel_gtt_map_memory(struct page **pages, in intel_gtt_map_memory() argument
110 sg_set_page(sg, pages[i], PAGE_SIZE, 0); in intel_gtt_map_memory()
261 new->pages[0] = page; in alloc_agpphysmem_i8xx()
264 new->pages[1] = new->pages[0] + 1; in alloc_agpphysmem_i8xx()
265 new->pages[2] = new->pages[1] + 1; in alloc_agpphysmem_i8xx()
266 new->pages[3] = new->pages[2] + 1; in alloc_agpphysmem_i8xx()
271 new->physical = page_to_phys(new->pages[0]); in alloc_agpphysmem_i8xx()
280 i8xx_destroy_pages(curr->pages[0]); in intel_i810_free_by_type()
282 agp_bridge->driver->agp_destroy_page(curr->pages[0], in intel_i810_free_by_type()
284 agp_bridge->driver->agp_destroy_page(curr->pages[0], in intel_i810_free_by_type()
[all …]
/linux-4.1.27/arch/arm/xen/
Denlighten.c61 struct page **pages) in xen_remap_domain_mfn_array() argument
64 prot, domid, pages); in xen_remap_domain_mfn_array()
73 struct page **pages) in xen_remap_domain_mfn_range() argument
80 int nr, struct page **pages) in xen_unmap_domain_mfn_range() argument
82 return xen_xlate_unmap_gfn_range(vma, nr, pages); in xen_unmap_domain_mfn_range()
/linux-4.1.27/fs/btrfs/
Dextent_io.c1583 struct page *pages[16]; in __unlock_for_delalloc() local
1595 ARRAY_SIZE(pages)), pages); in __unlock_for_delalloc()
1597 if (pages[i] != locked_page) in __unlock_for_delalloc()
1598 unlock_page(pages[i]); in __unlock_for_delalloc()
1599 page_cache_release(pages[i]); in __unlock_for_delalloc()
1616 struct page *pages[16]; in lock_delalloc_pages() local
1630 nrpages, ARRAY_SIZE(pages)), pages); in lock_delalloc_pages()
1641 if (pages[i] != locked_page) { in lock_delalloc_pages()
1642 lock_page(pages[i]); in lock_delalloc_pages()
1643 if (!PageDirty(pages[i]) || in lock_delalloc_pages()
[all …]
Dfile.c464 static void btrfs_drop_pages(struct page **pages, size_t num_pages) in btrfs_drop_pages() argument
474 ClearPageChecked(pages[i]); in btrfs_drop_pages()
475 unlock_page(pages[i]); in btrfs_drop_pages()
476 page_cache_release(pages[i]); in btrfs_drop_pages()
489 struct page **pages, size_t num_pages, in btrfs_dirty_pages() argument
511 struct page *p = pages[i]; in btrfs_dirty_pages()
1316 static noinline int prepare_pages(struct inode *inode, struct page **pages, in prepare_pages() argument
1327 pages[i] = find_or_create_page(inode->i_mapping, index + i, in prepare_pages()
1329 if (!pages[i]) { in prepare_pages()
1336 err = prepare_uptodate_page(pages[i], pos, in prepare_pages()
[all …]
/linux-4.1.27/Documentation/
Dnommu-mmap.txt21 In the MMU case: VM regions backed by arbitrary pages; copy-on-write
25 pages.
36 In the MMU case: VM regions backed by pages read from file; changes to
61 In the MMU case: like the non-PROT_WRITE case, except that the pages in
64 the mapping's backing pages. The page is then backed by swap instead.
71 In the MMU case: VM regions backed by pages read from file; changes to
72 pages written back to file; writes to file reflected into pages backing
83 sequence by providing a contiguous sequence of pages to map. In that
93 blockdev must be able to provide a contiguous run of pages without
124 Linux man pages (ver 2.22 or later).
[all …]
/linux-4.1.27/arch/sparc/kernel/
Dpci_fire.c231 unsigned long pages, order, i; in pci_fire_msiq_alloc() local
234 pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order); in pci_fire_msiq_alloc()
235 if (pages == 0UL) { in pci_fire_msiq_alloc()
240 memset((char *)pages, 0, PAGE_SIZE << order); in pci_fire_msiq_alloc()
241 pbm->msi_queues = (void *) pages; in pci_fire_msiq_alloc()
263 unsigned long pages, order; in pci_fire_msiq_free() local
266 pages = (unsigned long) pbm->msi_queues; in pci_fire_msiq_free()
268 free_pages(pages, order); in pci_fire_msiq_free()
/linux-4.1.27/fs/nilfs2/
Dpage.c270 struct page *page = pvec.pages[i], *dpage; in nilfs_copy_dirty_pages()
323 index = pvec.pages[n - 1]->index + 1; in nilfs_copy_back_pages()
326 struct page *page = pvec.pages[i], *dpage; in nilfs_copy_back_pages()
388 struct page *page = pvec.pages[i]; in nilfs_clear_dirty_pages()
533 pvec.pages); in nilfs_find_uncommitted_extent()
537 if (length > 0 && pvec.pages[0]->index > index) in nilfs_find_uncommitted_extent()
540 b = pvec.pages[0]->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); in nilfs_find_uncommitted_extent()
543 page = pvec.pages[i]; in nilfs_find_uncommitted_extent()
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/
Dgk20a.c79 struct page *pages[]; member
179 __free_page(node->pages[i]); in gk20a_instobj_dtor_iommu()
256 sizeof(*node) + sizeof(node->pages[0]) * npages, in gk20a_instobj_ctor_iommu()
270 node->pages[i] = p; in gk20a_instobj_ctor_iommu()
285 struct page *p = node->pages[i]; in gk20a_instobj_ctor_iommu()
317 for (i = 0; i < npages && node->pages[i] != NULL; i++) in gk20a_instobj_ctor_iommu()
318 __free_page(node->pages[i]); in gk20a_instobj_ctor_iommu()
/linux-4.1.27/drivers/block/
Drbd.c266 struct page **pages; member
1279 static void zero_pages(struct page **pages, u64 offset, u64 end) in zero_pages() argument
1281 struct page **page = &pages[offset >> PAGE_SHIFT]; in zero_pages()
1714 zero_pages(obj_request->pages, 0, length); in rbd_img_obj_request_read_callback()
1720 zero_pages(obj_request->pages, xferred, length); in rbd_img_obj_request_read_callback()
2068 if (obj_request->pages) in rbd_obj_request_destroy()
2069 ceph_release_page_vector(obj_request->pages, in rbd_obj_request_destroy()
2292 obj_request->pages = NULL; in rbd_img_obj_end_request()
2404 obj_request->pages, length, in rbd_img_obj_request_fill()
2431 struct page **pages = NULL; in rbd_img_request_fill() local
[all …]
/linux-4.1.27/Documentation/device-mapper/
Ddm-io.txt21 The first I/O service type takes a list of memory pages as the data buffer for
50 memory pages.
68 and specify the number of pages they expect to perform I/O on concurrently.
69 Dm-io will attempt to resize its mempool to make sure enough pages are
73 dm_io_put() and specify the same number of pages that were given on the
/linux-4.1.27/include/linux/sunrpc/
Dxdr.h59 struct page ** pages; /* Array of pages */ member
193 extern void _copy_from_pages(char *p, struct page **pages, size_t pgbase,
221 extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages,
226 struct page **pages, unsigned int len);
Dgss_krb5.h85 struct page **pages); /* v2 encryption function */
257 struct xdr_buf *outbuf, struct page **pages);
274 int offset, struct page **pages);
314 struct page **pages);
/linux-4.1.27/arch/s390/kvm/
Dgaccess.c714 unsigned long *pages, unsigned long nr_pages, in guest_page_range() argument
733 rc = guest_translate(vcpu, ga, pages, asce, write); in guest_page_range()
741 *pages = kvm_s390_real_to_abs(vcpu, ga); in guest_page_range()
742 if (kvm_is_error_gpa(vcpu->kvm, *pages)) in guest_page_range()
748 pages++; in guest_page_range()
760 unsigned long *pages; in access_guest() local
771 pages = pages_array; in access_guest()
773 pages = vmalloc(nr_pages * sizeof(unsigned long)); in access_guest()
774 if (!pages) in access_guest()
779 rc = guest_page_range(vcpu, ga, pages, nr_pages, asce, write); in access_guest()
[all …]
/linux-4.1.27/kernel/
Dkexec.c642 struct page *pages; in kimage_alloc_pages() local
644 pages = alloc_pages(gfp_mask, order); in kimage_alloc_pages()
645 if (pages) { in kimage_alloc_pages()
647 pages->mapping = NULL; in kimage_alloc_pages()
648 set_page_private(pages, order); in kimage_alloc_pages()
651 SetPageReserved(pages + i); in kimage_alloc_pages()
654 return pages; in kimage_alloc_pages()
698 struct page *pages; in kimage_alloc_normal_control_pages() local
710 pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order); in kimage_alloc_normal_control_pages()
711 if (!pages) in kimage_alloc_normal_control_pages()
[all …]
/linux-4.1.27/drivers/gpu/drm/vmwgfx/
Dvmwgfx_buffer.c248 return viter->pages[viter->i]; in __vmw_piter_non_sg_page()
268 return page_to_phys(viter->pages[viter->i]); in __vmw_piter_phys_addr()
302 viter->pages = vsgt->pages; in vmw_piter_start()
309 viter->pages = vsgt->pages; in vmw_piter_start()
394 vsgt->pages = vmw_tt->dma_ttm.ttm.pages; in vmw_ttm_map_dma()
412 ret = sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages, in vmw_ttm_map_dma()
/linux-4.1.27/Documentation/filesystems/caching/
Dnetfs-api.txt259 or not. Note that several pages at once may be presented for marking.
261 The PG_fscache bit is set on the pages before this function would be
266 (10) A function to unmark all the pages retaining cache metadata [mandatory].
269 unbound from a cookie and that all the marks on the pages should be
271 its tracking information so that the pages don't need to be explicitly
433 Note that attempts to read or write data pages in the cache over this size may
445 And the sixth step is to store and retrieve pages in the cache. There are
592 A facility is provided to read several pages at once, as requested by the
597 struct list_head *pages,
605 (1) Any page it can retrieve data for is removed from pages and nr_pages and
[all …]

12345