/linux-4.1.27/mm/ |
D | page_counter.c | 20 void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages) in page_counter_cancel() argument 24 new = atomic_long_sub_return(nr_pages, &counter->count); in page_counter_cancel() 36 void page_counter_charge(struct page_counter *counter, unsigned long nr_pages) in page_counter_charge() argument 43 new = atomic_long_add_return(nr_pages, &c->count); in page_counter_charge() 63 unsigned long nr_pages, in page_counter_try_charge() argument 84 new = atomic_long_add_return(nr_pages, &c->count); in page_counter_try_charge() 86 atomic_long_sub(nr_pages, &c->count); in page_counter_try_charge() 106 page_counter_cancel(c, nr_pages); in page_counter_try_charge() 116 void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages) in page_counter_uncharge() argument 121 page_counter_cancel(c, nr_pages); in page_counter_uncharge() [all …]
|
D | percpu-km.c | 50 const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; in pcpu_create_chunk() local 59 pages = alloc_pages(GFP_KERNEL, order_base_2(nr_pages)); in pcpu_create_chunk() 65 for (i = 0; i < nr_pages; i++) in pcpu_create_chunk() 72 pcpu_chunk_populated(chunk, 0, nr_pages); in pcpu_create_chunk() 80 const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; in pcpu_destroy_chunk() local 83 __free_pages(chunk->data, order_base_2(nr_pages)); in pcpu_destroy_chunk() 94 size_t nr_pages, alloc_pages; in pcpu_verify_alloc_info() local 102 nr_pages = (ai->groups[0].nr_units * ai->unit_size) >> PAGE_SHIFT; in pcpu_verify_alloc_info() 103 alloc_pages = roundup_pow_of_two(nr_pages); in pcpu_verify_alloc_info() 105 if (alloc_pages > nr_pages) in pcpu_verify_alloc_info() [all …]
|
D | memory_hotplug.c | 247 unsigned long i, pfn, end_pfn, nr_pages; in register_page_bootmem_info_node() local 252 nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT; in register_page_bootmem_info_node() 255 for (i = 0; i < nr_pages; i++, page++) in register_page_bootmem_info_node() 261 nr_pages = zone->wait_table_hash_nr_entries in register_page_bootmem_info_node() 263 nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT; in register_page_bootmem_info_node() 266 for (i = 0; i < nr_pages; i++, page++) in register_page_bootmem_info_node() 446 int nr_pages = PAGES_PER_SECTION; in __add_zone() local 453 ret = ensure_zone_is_initialized(zone, phys_start_pfn, nr_pages); in __add_zone() 458 grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages); in __add_zone() 460 phys_start_pfn + nr_pages); in __add_zone() [all …]
|
D | page_ext.c | 124 unsigned long nr_pages; in alloc_node_page_ext() local 126 nr_pages = NODE_DATA(nid)->node_spanned_pages; in alloc_node_page_ext() 127 if (!nr_pages) in alloc_node_page_ext() 137 nr_pages += MAX_ORDER_NR_PAGES; in alloc_node_page_ext() 139 table_size = sizeof(struct page_ext) * nr_pages; in alloc_node_page_ext() 276 unsigned long nr_pages, in online_page_ext() argument 283 end = SECTION_ALIGN_UP(start_pfn + nr_pages); in online_page_ext() 311 unsigned long nr_pages, int nid) in offline_page_ext() argument 316 end = SECTION_ALIGN_UP(start_pfn + nr_pages); in offline_page_ext() 333 mn->nr_pages, mn->status_change_nid); in page_ext_callback() [all …]
|
D | hugetlb_cgroup.c | 112 unsigned int nr_pages; in hugetlb_cgroup_move_parent() local 126 nr_pages = 1 << compound_order(page); in hugetlb_cgroup_move_parent() 130 page_counter_charge(&parent->hugepage[idx], nr_pages); in hugetlb_cgroup_move_parent() 134 page_counter_cancel(counter, nr_pages); in hugetlb_cgroup_move_parent() 165 int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, in hugetlb_cgroup_charge_cgroup() argument 189 ret = page_counter_try_charge(&h_cg->hugepage[idx], nr_pages, &counter); in hugetlb_cgroup_charge_cgroup() 197 void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, in hugetlb_cgroup_commit_charge() argument 211 void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, in hugetlb_cgroup_uncharge_page() argument 223 page_counter_uncharge(&h_cg->hugepage[idx], nr_pages); in hugetlb_cgroup_uncharge_page() 227 void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, in hugetlb_cgroup_uncharge_cgroup() argument [all …]
|
D | page_isolation.c | 26 arg.nr_pages = pageblock_nr_pages; in set_migratetype_isolate() 59 unsigned long nr_pages; in set_migratetype_isolate() local 64 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE); in set_migratetype_isolate() 66 __mod_zone_freepage_state(zone, -nr_pages, migratetype); in set_migratetype_isolate() 78 unsigned long flags, nr_pages; in unset_migratetype_isolate() local 120 nr_pages = move_freepages_block(zone, page, migratetype); in unset_migratetype_isolate() 121 __mod_zone_freepage_state(zone, nr_pages, migratetype); in unset_migratetype_isolate() 132 __first_valid_page(unsigned long pfn, unsigned long nr_pages) in __first_valid_page() argument 135 for (i = 0; i < nr_pages; i++) in __first_valid_page() 138 if (unlikely(i == nr_pages)) in __first_valid_page()
|
D | gup.c | 417 unsigned long start, unsigned long nr_pages, in __get_user_pages() argument 425 if (!nr_pages) in __get_user_pages() 461 &start, &nr_pages, i, in __get_user_pages() 507 if (page_increm > nr_pages) in __get_user_pages() 508 page_increm = nr_pages; in __get_user_pages() 511 nr_pages -= page_increm; in __get_user_pages() 512 } while (nr_pages); in __get_user_pages() 581 unsigned long nr_pages, in __get_user_pages_locked() argument 608 ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages, in __get_user_pages_locked() 617 BUG_ON(ret >= nr_pages); in __get_user_pages_locked() [all …]
|
D | process_vm_access.c | 87 unsigned long nr_pages; in process_vm_rw_single_vec() local 95 nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1; in process_vm_rw_single_vec() 97 while (!rc && nr_pages && iov_iter_count(iter)) { in process_vm_rw_single_vec() 98 int pages = min(nr_pages, max_pages_per_loop); in process_vm_rw_single_vec() 116 nr_pages -= pages; in process_vm_rw_single_vec() 152 unsigned long nr_pages = 0; in process_vm_rw_core() local 168 nr_pages = max(nr_pages, nr_pages_iov); in process_vm_rw_core() 172 if (nr_pages == 0) in process_vm_rw_core() 175 if (nr_pages > PVM_MAX_PP_ARRAY_COUNT) { in process_vm_rw_core() 179 sizeof(struct pages *)*nr_pages), in process_vm_rw_core()
|
D | memcontrol.c | 676 unsigned long nr_pages = page_counter_read(&memcg->memory); in soft_limit_excess() local 680 if (nr_pages > soft_limit) in soft_limit_excess() 681 excess = nr_pages - soft_limit; in soft_limit_excess() 830 int nr_pages) in mem_cgroup_charge_statistics() argument 838 nr_pages); in mem_cgroup_charge_statistics() 841 nr_pages); in mem_cgroup_charge_statistics() 845 nr_pages); in mem_cgroup_charge_statistics() 848 if (nr_pages > 0) in mem_cgroup_charge_statistics() 852 nr_pages = -nr_pages; /* for event */ in mem_cgroup_charge_statistics() 855 __this_cpu_add(memcg->stat->nr_page_events, nr_pages); in mem_cgroup_charge_statistics() [all …]
|
D | migrate.c | 442 int nr_pages) in __copy_gigantic_page() argument 448 for (i = 0; i < nr_pages; ) { in __copy_gigantic_page() 461 int nr_pages; in copy_huge_page() local 466 nr_pages = pages_per_huge_page(h); in copy_huge_page() 468 if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) { in copy_huge_page() 469 __copy_gigantic_page(dst, src, nr_pages); in copy_huge_page() 475 nr_pages = hpage_nr_pages(src); in copy_huge_page() 478 for (i = 0; i < nr_pages; i++) { in copy_huge_page() 1296 unsigned long nr_pages, in do_pages_move() argument 1320 chunk_start < nr_pages; in do_pages_move() [all …]
|
D | quicklist.c | 55 pages_to_free = q->nr_pages - max_pages(min_pages); in min_pages_to_free() 70 if (q->nr_pages > min_pages) { in quicklist_trim() 98 count += q->nr_pages; in quicklist_total_size()
|
D | mlock.c | 175 int nr_pages; in munlock_vma_page() local 188 nr_pages = hpage_nr_pages(page); in munlock_vma_page() 192 __mod_zone_page_state(zone, NR_MLOCK, -nr_pages); in munlock_vma_page() 205 return nr_pages - 1; in munlock_vma_page() 503 int nr_pages; in mlock_fixup() local 535 nr_pages = (end - start) >> PAGE_SHIFT; in mlock_fixup() 537 nr_pages = -nr_pages; in mlock_fixup() 538 mm->locked_vm += nr_pages; in mlock_fixup()
|
D | memory-failure.c | 1068 int nr_pages = 1 << compound_order(hpage); in set_page_hwpoison_huge_page() local 1069 for (i = 0; i < nr_pages; i++) in set_page_hwpoison_huge_page() 1076 int nr_pages = 1 << compound_order(hpage); in clear_page_hwpoison_huge_page() local 1077 for (i = 0; i < nr_pages; i++) in clear_page_hwpoison_huge_page() 1105 unsigned int nr_pages; in memory_failure() local 1133 nr_pages = 1 << compound_order(hpage); in memory_failure() 1135 nr_pages = 1; in memory_failure() 1136 atomic_long_add(nr_pages, &num_poisoned_pages); in memory_failure() 1165 atomic_long_sub(nr_pages, &num_poisoned_pages); in memory_failure() 1234 atomic_long_sub(nr_pages, &num_poisoned_pages); in memory_failure() [all …]
|
D | percpu-vm.c | 133 static void __pcpu_unmap_pages(unsigned long addr, int nr_pages) in __pcpu_unmap_pages() argument 135 unmap_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT); in __pcpu_unmap_pages() 192 int nr_pages) in __pcpu_map_pages() argument 194 return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT, in __pcpu_map_pages()
|
D | sparse.c | 198 unsigned long nr_pages = 0; in node_memmap_size_bytes() local 206 nr_pages += PAGES_PER_SECTION; in node_memmap_size_bytes() 209 return nr_pages * sizeof(struct page); in node_memmap_size_bytes() 659 unsigned long magic, nr_pages; in free_map_bootmem() local 662 nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) in free_map_bootmem() 665 for (i = 0; i < nr_pages; i++, page++) { in free_map_bootmem() 744 static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) in clear_hwpoisoned_pages() argument 759 static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) in clear_hwpoisoned_pages() argument
|
D | page-writeback.c | 185 unsigned long nr_pages; in zone_dirtyable_memory() local 187 nr_pages = zone_page_state(zone, NR_FREE_PAGES); in zone_dirtyable_memory() 188 nr_pages -= min(nr_pages, zone->dirty_balance_reserve); in zone_dirtyable_memory() 190 nr_pages += zone_page_state(zone, NR_INACTIVE_FILE); in zone_dirtyable_memory() 191 nr_pages += zone_page_state(zone, NR_ACTIVE_FILE); in zone_dirtyable_memory() 193 return nr_pages; in zone_dirtyable_memory() 1668 int nr_pages = global_page_state(NR_FILE_DIRTY) + in laptop_mode_timer_fn() local 1676 bdi_start_writeback(&q->backing_dev_info, nr_pages, in laptop_mode_timer_fn() 1841 int nr_pages; in write_cache_pages() local 1877 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, in write_cache_pages() [all …]
|
D | internal.h | 280 int nr_pages = hpage_nr_pages(page); in mlock_migrate_page() local 283 __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); in mlock_migrate_page() 285 __mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages); in mlock_migrate_page()
|
D | swapfile.c | 134 nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9); in discard_swap() 145 nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); in discard_swap() 162 pgoff_t start_page, pgoff_t nr_pages) in discard_swap_cluster() argument 167 while (nr_pages) { in discard_swap_cluster() 171 start_page < se->start_page + se->nr_pages) { in discard_swap_cluster() 174 sector_t nr_blocks = se->nr_pages - offset; in discard_swap_cluster() 176 if (nr_blocks > nr_pages) in discard_swap_cluster() 177 nr_blocks = nr_pages; in discard_swap_cluster() 179 nr_pages -= nr_blocks; in discard_swap_cluster() 1597 offset < (se->start_page + se->nr_pages)) { in map_swap_entry() [all …]
|
D | swap.c | 1078 pgoff_t start, unsigned nr_pages, in pagevec_lookup_entries() argument 1081 pvec->nr = find_get_entries(mapping, start, nr_pages, in pagevec_lookup_entries() 1124 pgoff_t start, unsigned nr_pages) in pagevec_lookup() argument 1126 pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages); in pagevec_lookup() 1132 pgoff_t *index, int tag, unsigned nr_pages) in pagevec_lookup_tag() argument 1135 nr_pages, pvec->pages); in pagevec_lookup_tag()
|
D | hugetlb.c | 761 int nr_pages = 1 << order; in destroy_compound_gigantic_page() local 764 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { in destroy_compound_gigantic_page() 780 unsigned long nr_pages) in __alloc_gigantic_page() argument 782 unsigned long end_pfn = start_pfn + nr_pages; in __alloc_gigantic_page() 787 unsigned long nr_pages) in pfn_range_valid_gigantic() argument 789 unsigned long i, end_pfn = start_pfn + nr_pages; in pfn_range_valid_gigantic() 812 unsigned long start_pfn, unsigned long nr_pages) in zone_spans_last_pfn() argument 814 unsigned long last_pfn = start_pfn + nr_pages - 1; in zone_spans_last_pfn() 820 unsigned long nr_pages = 1 << order; in alloc_gigantic_page() local 828 pfn = ALIGN(z->zone_start_pfn, nr_pages); in alloc_gigantic_page() [all …]
|
D | util.c | 247 int nr_pages, int write, struct page **pages) in __get_user_pages_fast() argument 278 int nr_pages, int write, struct page **pages) in get_user_pages_fast() argument 281 return get_user_pages_unlocked(current, mm, start, nr_pages, in get_user_pages_fast()
|
D | filemap.c | 335 int nr_pages; in filemap_fdatawait_range() local 343 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, in filemap_fdatawait_range() 348 for (i = 0; i < nr_pages; i++) { in filemap_fdatawait_range() 1219 unsigned int nr_pages, struct page **pages) in find_get_pages() argument 1225 if (unlikely(!nr_pages)) in find_get_pages() 1265 if (++ret == nr_pages) in find_get_pages() 1286 unsigned int nr_pages, struct page **pages) in find_get_pages_contig() argument 1292 if (unlikely(!nr_pages)) in find_get_pages_contig() 1342 if (++ret == nr_pages) in find_get_pages_contig() 1362 int tag, unsigned int nr_pages, struct page **pages) in find_get_pages_tag() argument [all …]
|
D | vmscan.c | 1320 int nr_pages; in isolate_lru_pages() local 1329 nr_pages = hpage_nr_pages(page); in isolate_lru_pages() 1330 mem_cgroup_update_lru_size(lruvec, lru, -nr_pages); in isolate_lru_pages() 1332 nr_taken += nr_pages; in isolate_lru_pages() 1680 int nr_pages; in move_active_pages_to_lru() local 1689 nr_pages = hpage_nr_pages(page); in move_active_pages_to_lru() 1690 mem_cgroup_update_lru_size(lruvec, lru, nr_pages); in move_active_pages_to_lru() 1692 pgmoved += nr_pages; in move_active_pages_to_lru() 2836 unsigned long nr_pages, in try_to_free_mem_cgroup_pages() argument 2844 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), in try_to_free_mem_cgroup_pages() [all …]
|
D | readahead.c | 112 struct list_head *pages, unsigned nr_pages) in read_pages() argument 121 ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages); in read_pages() 127 for (page_idx = 0; page_idx < nr_pages; page_idx++) { in read_pages()
|
D | nommu.c | 152 unsigned long start, unsigned long nr_pages, in __get_user_pages() argument 168 for (i = 0; i < nr_pages; i++) { in __get_user_pages() 202 unsigned long start, unsigned long nr_pages, in get_user_pages() argument 213 return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas, in get_user_pages() 219 unsigned long start, unsigned long nr_pages, in get_user_pages_locked() argument 223 return get_user_pages(tsk, mm, start, nr_pages, write, force, in get_user_pages_locked() 229 unsigned long start, unsigned long nr_pages, in __get_user_pages_unlocked() argument 235 ret = get_user_pages(tsk, mm, start, nr_pages, write, force, in __get_user_pages_unlocked() 243 unsigned long start, unsigned long nr_pages, in get_user_pages_unlocked() argument 246 return __get_user_pages_unlocked(tsk, mm, start, nr_pages, write, in get_user_pages_unlocked()
|
D | vmalloc.c | 1475 for (i = 0; i < area->nr_pages; i++) { in __vunmap() 1583 unsigned int nr_pages, array_size, i; in __vmalloc_area_node() local 1587 nr_pages = get_vm_area_size(area) >> PAGE_SHIFT; in __vmalloc_area_node() 1588 array_size = (nr_pages * sizeof(struct page *)); in __vmalloc_area_node() 1590 area->nr_pages = nr_pages; in __vmalloc_area_node() 1606 for (i = 0; i < area->nr_pages; i++) { in __vmalloc_area_node() 1616 area->nr_pages = i; in __vmalloc_area_node() 1631 (area->nr_pages*PAGE_SIZE), area->size); in __vmalloc_area_node() 2606 for (nr = 0; nr < v->nr_pages; nr++) in show_numa_info() 2635 if (v->nr_pages) in s_show() [all …]
|
D | shmem.c | 1632 unsigned int loff, nr_pages, req_pages; in shmem_file_splice_read() local 1662 nr_pages = min(req_pages, spd.nr_pages_max); in shmem_file_splice_read() 1664 spd.nr_pages = find_get_pages_contig(mapping, index, in shmem_file_splice_read() 1665 nr_pages, spd.pages); in shmem_file_splice_read() 1666 index += spd.nr_pages; in shmem_file_splice_read() 1669 while (spd.nr_pages < nr_pages) { in shmem_file_splice_read() 1674 spd.pages[spd.nr_pages++] = page; in shmem_file_splice_read() 1679 nr_pages = spd.nr_pages; in shmem_file_splice_read() 1680 spd.nr_pages = 0; in shmem_file_splice_read() 1682 for (page_nr = 0; page_nr < nr_pages; page_nr++) { in shmem_file_splice_read() [all …]
|
/linux-4.1.27/drivers/xen/ |
D | balloon.c | 326 static enum bp_state increase_reservation(unsigned long nr_pages) in increase_reservation() argument 339 nr_pages = min(nr_pages, balloon_stats.balloon_hotplug); in increase_reservation() 340 balloon_stats.hotplug_pages += nr_pages; in increase_reservation() 341 balloon_stats.balloon_hotplug -= nr_pages; in increase_reservation() 346 if (nr_pages > ARRAY_SIZE(frame_list)) in increase_reservation() 347 nr_pages = ARRAY_SIZE(frame_list); in increase_reservation() 350 for (i = 0; i < nr_pages; i++) { in increase_reservation() 352 nr_pages = i; in increase_reservation() 360 reservation.nr_extents = nr_pages; in increase_reservation() 396 static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) in decrease_reservation() argument [all …]
|
D | privcmd.c | 49 unsigned long nr_pages); 425 unsigned long nr_pages; in privcmd_ioctl_mmap_batch() local 449 nr_pages = m.num; in privcmd_ioctl_mmap_batch() 450 if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT))) in privcmd_ioctl_mmap_batch() 492 m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) { in privcmd_ioctl_mmap_batch() 504 m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) { in privcmd_ioctl_mmap_batch() 508 if (privcmd_vma_range_is_mapped(vma, m.addr, nr_pages)) { in privcmd_ioctl_mmap_batch() 639 unsigned long nr_pages) in privcmd_vma_range_is_mapped() argument 641 return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT, in privcmd_vma_range_is_mapped()
|
D | grant-table.c | 686 int gnttab_alloc_pages(int nr_pages, struct page **pages) in gnttab_alloc_pages() argument 691 ret = alloc_xenballooned_pages(nr_pages, pages, false); in gnttab_alloc_pages() 695 for (i = 0; i < nr_pages; i++) { in gnttab_alloc_pages() 701 gnttab_free_pages(nr_pages, pages); in gnttab_alloc_pages() 718 void gnttab_free_pages(int nr_pages, struct page **pages) in gnttab_free_pages() argument 722 for (i = 0; i < nr_pages; i++) { in gnttab_free_pages() 730 free_xenballooned_pages(nr_pages, pages); in gnttab_free_pages()
|
D | swiotlb-xen.c | 115 int nr_pages; in check_pages_physically_contiguous() local 118 nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT; in check_pages_physically_contiguous() 120 for (i = 1; i < nr_pages; i++) { in check_pages_physically_contiguous()
|
/linux-4.1.27/drivers/media/v4l2-core/ |
D | videobuf-dma-sg.c | 66 int nr_pages) in videobuf_vmalloc_to_sg() argument 72 sglist = vzalloc(nr_pages * sizeof(*sglist)); in videobuf_vmalloc_to_sg() 75 sg_init_table(sglist, nr_pages); in videobuf_vmalloc_to_sg() 76 for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) { in videobuf_vmalloc_to_sg() 96 int nr_pages, int offset, size_t size) in videobuf_pages_to_sg() argument 103 sglist = vmalloc(nr_pages * sizeof(*sglist)); in videobuf_pages_to_sg() 106 sg_init_table(sglist, nr_pages); in videobuf_pages_to_sg() 114 for (i = 1; i < nr_pages; i++) { in videobuf_pages_to_sg() 176 dma->nr_pages = last-first+1; in videobuf_dma_init_user_locked() 177 dma->pages = kmalloc(dma->nr_pages * sizeof(struct page *), GFP_KERNEL); in videobuf_dma_init_user_locked() [all …]
|
/linux-4.1.27/include/linux/ |
D | hugetlb_cgroup.h | 56 extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, 58 extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, 61 extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, 63 extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, 87 hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, in hugetlb_cgroup_charge_cgroup() argument 94 hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, in hugetlb_cgroup_commit_charge() argument 102 hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, struct page *page) in hugetlb_cgroup_uncharge_page() argument 108 hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, in hugetlb_cgroup_uncharge_cgroup() argument
|
D | mm_inline.h | 28 int nr_pages = hpage_nr_pages(page); in add_page_to_lru_list() local 29 mem_cgroup_update_lru_size(lruvec, lru, nr_pages); in add_page_to_lru_list() 31 __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, nr_pages); in add_page_to_lru_list() 37 int nr_pages = hpage_nr_pages(page); in del_page_from_lru_list() local 38 mem_cgroup_update_lru_size(lruvec, lru, -nr_pages); in del_page_from_lru_list() 40 __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, -nr_pages); in del_page_from_lru_list()
|
D | page_counter.h | 37 void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages); 38 void page_counter_charge(struct page_counter *counter, unsigned long nr_pages); 40 unsigned long nr_pages, 42 void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages); 45 unsigned long *nr_pages);
|
D | memory_hotplug.h | 83 extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages); 105 unsigned long nr_pages); 110 unsigned long nr_pages); 244 extern int is_mem_section_removable(unsigned long pfn, unsigned long nr_pages); 246 extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); 251 unsigned long nr_pages) in is_mem_section_removable() argument 258 static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages) in offline_pages() argument 271 extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
|
D | quicklist.h | 18 int nr_pages; member 43 q->nr_pages--; in quicklist_alloc() 63 q->nr_pages++; in __quicklist_free()
|
D | pagevec.h | 31 pgoff_t start, unsigned nr_pages); 34 unsigned nr_pages);
|
D | swap.h | 131 pgoff_t nr_pages; member 323 unsigned long nr_pages, 330 extern unsigned long shrink_all_memory(unsigned long nr_pages); 349 extern void check_move_unevictable_pages(struct page **, int nr_pages); 383 unsigned long nr_pages, sector_t start_block);
|
D | memory.h | 50 unsigned long nr_pages; member 65 unsigned int nr_pages; /* # pages in range to check */ member
|
D | vmstat.h | 280 static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages, in __mod_zone_freepage_state() argument 283 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages); in __mod_zone_freepage_state() 285 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages); in __mod_zone_freepage_state()
|
D | mpage.h | 17 unsigned nr_pages, get_block_t get_block);
|
D | splice.h | 56 int nr_pages; /* number of populated pages in map */ member
|
D | pagemap.h | 352 unsigned int nr_pages, struct page **pages); 354 unsigned int nr_pages, struct page **pages); 356 int tag, unsigned int nr_pages, struct page **pages);
|
D | fscache.h | 594 unsigned *nr_pages, in fscache_read_or_alloc_pages() argument 601 nr_pages, end_io_func, in fscache_read_or_alloc_pages()
|
D | mm.h | 1208 unsigned long start, unsigned long nr_pages, 1212 unsigned long start, unsigned long nr_pages, 1216 unsigned long start, unsigned long nr_pages, 1220 unsigned long start, unsigned long nr_pages, 1224 unsigned long start, unsigned long nr_pages, 1226 int get_user_pages_fast(unsigned long start, int nr_pages, int write, 1229 int get_kernel_pages(const struct kvec *iov, int nr_pages, int write, 1295 int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
D | memcontrol.h | 445 unsigned long nr_pages); 446 void memcg_uncharge_kmem(struct mem_cgroup *memcg, unsigned long nr_pages);
|
D | writeback.h | 100 void wakeup_flusher_threads(long nr_pages, enum wb_reason reason);
|
D | vmalloc.h | 37 unsigned int nr_pages; member
|
/linux-4.1.27/drivers/gpu/drm/radeon/ |
D | drm_buffer.c | 46 int nr_pages = size / PAGE_SIZE + 1; in drm_buffer_alloc() local 51 *buf = kzalloc(sizeof(struct drm_buffer) + nr_pages*sizeof(char *), in drm_buffer_alloc() 57 size, nr_pages); in drm_buffer_alloc() 63 for (idx = 0; idx < nr_pages; ++idx) { in drm_buffer_alloc() 73 idx + 1, size, nr_pages); in drm_buffer_alloc() 100 int nr_pages = size / PAGE_SIZE + 1; in drm_buffer_copy_from_user() local 110 for (idx = 0; idx < nr_pages; ++idx) { in drm_buffer_copy_from_user() 134 int nr_pages = buf->size / PAGE_SIZE + 1; in drm_buffer_free() local 136 for (idx = 0; idx < nr_pages; ++idx) in drm_buffer_free()
|
/linux-4.1.27/net/rds/ |
D | info.c | 163 unsigned long nr_pages = 0; in rds_info_getsockopt() local 188 nr_pages = (PAGE_ALIGN(start + len) - (start & PAGE_MASK)) in rds_info_getsockopt() 191 pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); in rds_info_getsockopt() 196 ret = get_user_pages_fast(start, nr_pages, 1, pages); in rds_info_getsockopt() 197 if (ret != nr_pages) { in rds_info_getsockopt() 199 nr_pages = ret; in rds_info_getsockopt() 201 nr_pages = 0; in rds_info_getsockopt() 206 rdsdebug("len %d nr_pages %lu\n", len, nr_pages); in rds_info_getsockopt() 238 for (i = 0; pages && i < nr_pages; i++) in rds_info_getsockopt()
|
D | rdma.c | 157 static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages, in rds_pin_pages() argument 162 ret = get_user_pages_fast(user_addr, nr_pages, write, pages); in rds_pin_pages() 164 if (ret >= 0 && ret < nr_pages) { in rds_pin_pages() 177 unsigned int nr_pages; in __rds_rdma_map() local 197 nr_pages = rds_pages_in_vec(&args->vec); in __rds_rdma_map() 198 if (nr_pages == 0) { in __rds_rdma_map() 204 args->vec.addr, args->vec.bytes, nr_pages); in __rds_rdma_map() 207 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); in __rds_rdma_map() 241 ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1); in __rds_rdma_map() 487 unsigned int nr_pages; in rds_rdma_pages() local [all …]
|
/linux-4.1.27/kernel/events/ |
D | ring_buffer.c | 128 if (unlikely(!rb->nr_pages)) in perf_output_begin() 175 handle->page = (offset >> page_shift) & (rb->nr_pages - 1); in perf_output_begin() 468 pgoff_t pgoff, int nr_pages, long watermark, int flags) in rb_alloc_aux() argument 482 max_order = ilog2(nr_pages); in rb_alloc_aux() 497 rb->aux_pages = kzalloc_node(nr_pages * sizeof(void *), GFP_KERNEL, node); in rb_alloc_aux() 502 for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) { in rb_alloc_aux() 506 order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages)); in rb_alloc_aux() 530 rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages, in rb_alloc_aux() 549 rb->aux_watermark = nr_pages << (PAGE_SHIFT - 1); in rb_alloc_aux() 605 if (pgoff > rb->nr_pages) in __perf_mmap_to_page() [all …]
|
D | internal.h | 19 int nr_pages; /* nr of data pages */ member 69 rb_alloc(int nr_pages, long watermark, int cpu, int flags); 72 pgoff_t pgoff, int nr_pages, long watermark, int flags); 119 return rb->nr_pages << (PAGE_SHIFT + page_order(rb)); in perf_data_size() 147 handle->page &= rb->nr_pages - 1; \
|
/linux-4.1.27/include/trace/events/ |
D | migrate.h | 75 TP_PROTO(struct task_struct *p, int dst_nid, unsigned long nr_pages), 77 TP_ARGS(p, dst_nid, nr_pages), 83 __field( unsigned long, nr_pages) 90 __entry->nr_pages = nr_pages; 97 __entry->nr_pages)
|
D | writeback.h | 179 __field(long, nr_pages) 190 __entry->nr_pages = work->nr_pages; 202 __entry->nr_pages,
|
/linux-4.1.27/fs/ |
D | fs-writeback.c | 41 long nr_pages; member 138 __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, in __bdi_start_writeback() argument 155 work->nr_pages = nr_pages; in __bdi_start_writeback() 174 void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, in bdi_start_writeback() argument 177 __bdi_start_writeback(bdi, nr_pages, true, reason); in bdi_start_writeback() 650 pages = min(pages, work->nr_pages); in writeback_chunk_size() 756 work->nr_pages -= write_chunk - wbc.nr_to_write; in writeback_sb_inodes() 773 if (work->nr_pages <= 0) in writeback_sb_inodes() 806 if (work->nr_pages <= 0) in __writeback_inodes_wb() 814 static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, in writeback_inodes_wb() argument [all …]
|
D | splice.c | 185 unsigned int spd_pages = spd->nr_pages; in splice_to_pipe() 224 if (!--spd->nr_pages) in splice_to_pipe() 311 unsigned int loff, nr_pages, req_pages; in __generic_file_splice_read() local 333 nr_pages = min(req_pages, spd.nr_pages_max); in __generic_file_splice_read() 338 spd.nr_pages = find_get_pages_contig(mapping, index, nr_pages, spd.pages); in __generic_file_splice_read() 339 index += spd.nr_pages; in __generic_file_splice_read() 345 if (spd.nr_pages < nr_pages) in __generic_file_splice_read() 347 index, req_pages - spd.nr_pages); in __generic_file_splice_read() 350 while (spd.nr_pages < nr_pages) { in __generic_file_splice_read() 379 spd.pages[spd.nr_pages++] = page; in __generic_file_splice_read() [all …]
|
D | pipe.c | 961 static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages) in pipe_set_size() argument 971 if (nr_pages < pipe->nrbufs) in pipe_set_size() 974 bufs = kcalloc(nr_pages, sizeof(*bufs), GFP_KERNEL | __GFP_NOWARN); in pipe_set_size() 1002 pipe->buffers = nr_pages; in pipe_set_size() 1003 return nr_pages * PAGE_SIZE; in pipe_set_size() 1012 unsigned long nr_pages; in round_pipe_size() local 1014 nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; in round_pipe_size() 1015 return roundup_pow_of_two(nr_pages) << PAGE_SHIFT; in round_pipe_size() 1058 unsigned int size, nr_pages; in pipe_fcntl() local 1061 nr_pages = size >> PAGE_SHIFT; in pipe_fcntl() [all …]
|
D | aio.c | 116 long nr_pages; member 205 static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages) in aio_private_file() argument 216 inode->i_size = PAGE_SIZE * nr_pages; in aio_private_file() 294 for (i = 0; i < ctx->nr_pages; i++) { in aio_free_ring() 379 if (idx < (pgoff_t)ctx->nr_pages) { in aio_migratepage() 433 int nr_pages; in aio_setup_ring() local 443 nr_pages = PFN_UP(size); in aio_setup_ring() 444 if (nr_pages < 0) in aio_setup_ring() 447 file = aio_private_file(ctx, nr_pages); in aio_setup_ring() 454 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) in aio_setup_ring() [all …]
|
D | mpage.c | 140 do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, in do_mpage_readpage() argument 164 last_block = block_in_file + nr_pages * blocks_per_page; in do_mpage_readpage() 280 min_t(int, nr_pages, bio_get_nr_vecs(bdev)), in do_mpage_readpage() 357 unsigned nr_pages, get_block_t get_block) in mpage_readpages() argument 367 for (page_idx = 0; page_idx < nr_pages; page_idx++) { in mpage_readpages() 375 nr_pages - page_idx, in mpage_readpages()
|
D | direct-io.c | 650 int ret, nr_pages; in dio_new_bio() local 656 nr_pages = min(sdio->pages_in_io, bio_get_nr_vecs(map_bh->b_bdev)); in dio_new_bio() 657 BUG_ON(nr_pages <= 0); in dio_new_bio() 658 dio_bio_alloc(dio, sdio, map_bh->b_bdev, sector, nr_pages); in dio_new_bio()
|
/linux-4.1.27/fs/logfs/ |
D | dev_bdev.c | 77 size_t nr_pages) in __bdev_writeseg() argument 86 max_pages = min(nr_pages, (size_t) bio_get_nr_vecs(super->s_bdev)); in __bdev_writeseg() 91 for (i = 0; i < nr_pages; i++) { in __bdev_writeseg() 105 nr_pages -= i; in __bdev_writeseg() 121 bio->bi_vcnt = nr_pages; in __bdev_writeseg() 122 bio->bi_iter.bi_size = nr_pages * PAGE_SIZE; in __bdev_writeseg() 171 size_t nr_pages) in do_erase() argument 178 max_pages = min(nr_pages, (size_t) bio_get_nr_vecs(super->s_bdev)); in do_erase() 183 for (i = 0; i < nr_pages; i++) { in do_erase() 197 nr_pages -= i; in do_erase() [all …]
|
D | dev_mtd.c | 184 size_t nr_pages) in __logfs_mtd_writeseg() argument 191 for (i = 0; i < nr_pages; i++) { in __logfs_mtd_writeseg()
|
/linux-4.1.27/net/ipv4/ |
D | tcp_memcontrol.c | 57 static int tcp_update_limit(struct mem_cgroup *memcg, unsigned long nr_pages) in tcp_update_limit() argument 67 ret = page_counter_limit(&cg_proto->memory_allocated, nr_pages); in tcp_update_limit() 72 cg_proto->sysctl_mem[i] = min_t(long, nr_pages, in tcp_update_limit() 75 if (nr_pages == PAGE_COUNTER_MAX) in tcp_update_limit() 119 unsigned long nr_pages; in tcp_cgroup_write() local 127 ret = page_counter_memparse(buf, "-1", &nr_pages); in tcp_cgroup_write() 131 ret = tcp_update_limit(memcg, nr_pages); in tcp_cgroup_write()
|
/linux-4.1.27/arch/arm/mach-rpc/include/mach/ |
D | uncompress.h | 23 unsigned long nr_pages; member 119 unsigned int nr_pages = 0, page_size = PAGE_SIZE; in arch_decomp_setup() local 138 nr_pages += (t->u.mem.size / PAGE_SIZE); in arch_decomp_setup() 144 nr_pages = params->nr_pages; in arch_decomp_setup() 189 if (nr_pages * page_size < 4096*1024) error("<4M of mem\n"); in arch_decomp_setup()
|
/linux-4.1.27/drivers/net/ethernet/ibm/ehea/ |
D | ehea_qmr.c | 107 int i, nr_pages; in hw_queue_dtor() local 114 nr_pages = queue->queue_length / queue->pagesize; in hw_queue_dtor() 116 for (i = 0; i < nr_pages; i += pages_per_kpage) in hw_queue_dtor() 153 ret = hw_queue_ctor(&cq->hw_queue, cq->attr.nr_pages, in ehea_create_cq() 158 for (counter = 0; counter < cq->attr.nr_pages; counter++) { in ehea_create_cq() 171 cq, hret, counter, cq->attr.nr_pages); in ehea_create_cq() 175 if (counter == (cq->attr.nr_pages - 1)) { in ehea_create_cq() 275 ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages, in ehea_create_eq() 282 for (i = 0; i < eq->attr.nr_pages; i++) { in ehea_create_eq() 296 if (i == (eq->attr.nr_pages - 1)) { in ehea_create_eq() [all …]
|
D | ehea.h | 260 u32 nr_pages; member 305 u32 nr_pages; member
|
D | ehea_qmr.h | 398 int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages); 399 int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages);
|
/linux-4.1.27/drivers/firmware/efi/libstub/ |
D | efi-stub-helper.c | 148 unsigned long nr_pages; in efi_high_alloc() local 165 nr_pages = round_up(size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE; in efi_high_alloc() 176 if (desc->num_pages < nr_pages) in efi_high_alloc() 209 nr_pages, &max_addr); in efi_high_alloc() 234 unsigned long nr_pages; in efi_low_alloc() local 250 nr_pages = round_up(size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE; in efi_low_alloc() 261 if (desc->num_pages < nr_pages) in efi_low_alloc() 281 nr_pages, &start); in efi_low_alloc() 299 unsigned long nr_pages; in efi_free() local 304 nr_pages = round_up(size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE; in efi_free() [all …]
|
/linux-4.1.27/fs/btrfs/ |
D | compression.c | 68 unsigned long nr_pages; member 120 for (i = 0; i < cb->nr_pages; i++) { in check_compressed_csum() 193 for (index = 0; index < cb->nr_pages; index++) { in end_compressed_bio_read() 233 unsigned long nr_pages = end_index - index + 1; in end_compressed_writeback() local 240 while (nr_pages > 0) { in end_compressed_writeback() 243 nr_pages, ARRAY_SIZE(pages)), pages); in end_compressed_writeback() 245 nr_pages -= 1; in end_compressed_writeback() 255 nr_pages -= ret; in end_compressed_writeback() 307 for (index = 0; index < cb->nr_pages; index++) { in end_compressed_bio_write() 333 unsigned long nr_pages) in btrfs_submit_compressed_write() argument [all …]
|
D | zlib.c | 88 int nr_pages = 0; in zlib_compress_pages() local 116 nr_pages = 1; in zlib_compress_pages() 146 if (nr_pages == nr_dest_pages) { in zlib_compress_pages() 157 pages[nr_pages] = out_page; in zlib_compress_pages() 158 nr_pages++; in zlib_compress_pages() 202 *out_pages = nr_pages; in zlib_compress_pages()
|
D | lzo.c | 101 int nr_pages = 0; in lzo_compress_pages() local 135 nr_pages = 1; in lzo_compress_pages() 190 if (nr_pages == nr_dest_pages) { in lzo_compress_pages() 202 pages[nr_pages++] = out_page; in lzo_compress_pages() 245 *out_pages = nr_pages; in lzo_compress_pages()
|
D | raid56.c | 140 int nr_pages; member 265 for (i = 0; i < rbio->nr_pages; i++) { in cache_rbio_pages() 312 for (i = 0; i < dest->nr_pages; i++) { in steal_rbio() 833 for (i = 0; i < rbio->nr_pages; i++) { in __free_raid_bio() 972 rbio->nr_pages = num_pages; in alloc_rbio() 1007 for (i = 0; i < rbio->nr_pages; i++) { in alloc_rbio_pages() 1027 for (; i < rbio->nr_pages; i++) { in alloc_rbio_parity_pages() 1502 int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE); in raid56_rmw_stripe() local 1521 for (pagenr = 0; pagenr < nr_pages; pagenr++) { in raid56_rmw_stripe() 1798 int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE); in __raid_recover_end_io() local [all …]
|
D | compression.h | 45 unsigned long nr_pages);
|
D | extent_io.c | 1586 unsigned long nr_pages = end_index - index + 1; in __unlock_for_delalloc() local 1592 while (nr_pages > 0) { in __unlock_for_delalloc() 1594 min_t(unsigned long, nr_pages, in __unlock_for_delalloc() 1601 nr_pages -= ret; in __unlock_for_delalloc() 1765 unsigned long nr_pages = end_index - index + 1; in extent_clear_unlock_delalloc() local 1772 if ((page_ops & PAGE_SET_ERROR) && nr_pages > 0) in extent_clear_unlock_delalloc() 1775 while (nr_pages > 0) { in extent_clear_unlock_delalloc() 1778 nr_pages, ARRAY_SIZE(pages)), pages); in extent_clear_unlock_delalloc() 1800 nr_pages -= ret; in extent_clear_unlock_delalloc() 3126 struct page *pages[], int nr_pages, in __do_contiguous_readpages() argument [all …]
|
/linux-4.1.27/fs/9p/ |
D | cache.c | 159 int loop, nr_pages; in v9fs_cache_inode_now_uncached() local 165 nr_pages = pagevec_lookup(&pvec, v9inode->vfs_inode.i_mapping, in v9fs_cache_inode_now_uncached() 168 if (!nr_pages) in v9fs_cache_inode_now_uncached() 171 for (loop = 0; loop < nr_pages; loop++) in v9fs_cache_inode_now_uncached() 174 first = pvec.pages[nr_pages - 1]->index + 1; in v9fs_cache_inode_now_uncached() 176 pvec.nr = nr_pages; in v9fs_cache_inode_now_uncached() 358 unsigned *nr_pages) in __v9fs_readpages_from_fscache() argument 363 p9_debug(P9_DEBUG_FSC, "inode %p pages %u\n", inode, *nr_pages); in __v9fs_readpages_from_fscache() 368 mapping, pages, nr_pages, in __v9fs_readpages_from_fscache() 379 BUG_ON(*nr_pages != 0); in __v9fs_readpages_from_fscache()
|
D | cache.h | 51 unsigned *nr_pages); 76 unsigned *nr_pages) in v9fs_readpages_from_fscache() argument 79 nr_pages); in v9fs_readpages_from_fscache() 132 unsigned *nr_pages) in v9fs_readpages_from_fscache() argument
|
D | vfs_addr.c | 112 struct list_head *pages, unsigned nr_pages) in v9fs_vfs_readpages() argument 120 ret = v9fs_readpages_from_fscache(inode, mapping, pages, &nr_pages); in v9fs_vfs_readpages()
|
/linux-4.1.27/arch/arm/xen/ |
D | p2m.c | 23 unsigned long nr_pages; member 72 entry->pfn + entry->nr_pages > pfn) { in __pfn_to_mfn() 120 unsigned long mfn, unsigned long nr_pages) in __set_phys_to_machine_multi() argument 132 p2m_entry->pfn + p2m_entry->nr_pages > pfn) { in __set_phys_to_machine_multi() 153 p2m_entry->nr_pages = nr_pages; in __set_phys_to_machine_multi()
|
/linux-4.1.27/drivers/misc/genwqe/ |
D | card_utils.c | 305 sgl->nr_pages = DIV_ROUND_UP(sgl->fpage_offs + user_size, PAGE_SIZE); in genwqe_alloc_sync_sgl() 309 __func__, user_addr, user_size, sgl->nr_pages, in genwqe_alloc_sync_sgl() 314 sgl->sgl_size = genwqe_sgl_size(sgl->nr_pages); in genwqe_alloc_sync_sgl() 382 while (p < sgl->nr_pages) { in genwqe_setup_sgl() 400 } else if ((p == sgl->nr_pages - 1) && in genwqe_setup_sgl() 421 if (p == sgl->nr_pages) in genwqe_setup_sgl() 437 if (p == sgl->nr_pages) in genwqe_setup_sgl() 519 static int free_user_pages(struct page **page_list, unsigned int nr_pages, in free_user_pages() argument 524 for (i = 0; i < nr_pages; i++) { in free_user_pages() 574 m->nr_pages = DIV_ROUND_UP(offs + size, PAGE_SIZE); in genwqe_user_vmap() [all …]
|
D | card_base.h | 181 unsigned int nr_pages; /* number of pages */ member 360 unsigned long nr_pages; member
|
/linux-4.1.27/kernel/power/ |
D | snapshot.c | 1378 static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask) in preallocate_image_pages() argument 1382 while (nr_pages > 0) { in preallocate_image_pages() 1393 nr_pages--; in preallocate_image_pages() 1400 static unsigned long preallocate_image_memory(unsigned long nr_pages, in preallocate_image_memory() argument 1409 if (nr_pages < alloc) in preallocate_image_memory() 1410 alloc = nr_pages; in preallocate_image_memory() 1416 static unsigned long preallocate_image_highmem(unsigned long nr_pages) in preallocate_image_highmem() argument 1418 return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM); in preallocate_image_highmem() 1431 static unsigned long preallocate_highmem_fraction(unsigned long nr_pages, in preallocate_highmem_fraction() argument 1435 unsigned long alloc = __fraction(nr_pages, highmem, total); in preallocate_highmem_fraction() [all …]
|
D | swap.c | 446 int nr_pages; in save_image() local 457 nr_pages = 0; in save_image() 467 if (!(nr_pages % m)) in save_image() 469 nr_pages / m * 10); in save_image() 470 nr_pages++; in save_image() 581 int nr_pages; in save_image_lzo() local 676 nr_pages = 0; in save_image_lzo() 692 if (!(nr_pages % m)) in save_image_lzo() 696 nr_pages / m * 10); in save_image_lzo() 697 nr_pages++; in save_image_lzo() [all …]
|
/linux-4.1.27/drivers/edac/ |
D | ie31200_edac.c | 406 unsigned long nr_pages; in ie31200_probe1() local 408 nr_pages = IE31200_PAGES(dimm_info[j][i].size); in ie31200_probe1() 409 if (nr_pages == 0) in ie31200_probe1() 413 nr_pages = nr_pages / 2; in ie31200_probe1() 417 dimm->nr_pages = nr_pages; in ie31200_probe1() 418 edac_dbg(0, "set nr pages: 0x%lx\n", nr_pages); in ie31200_probe1() 426 dimm->nr_pages = nr_pages; in ie31200_probe1() 427 edac_dbg(0, "set nr pages: 0x%lx\n", nr_pages); in ie31200_probe1()
|
D | pasemi_edac.c | 157 dimm->nr_pages = 128 << (20 - PAGE_SHIFT); in pasemi_edac_init_csrows() 160 dimm->nr_pages = 256 << (20 - PAGE_SHIFT); in pasemi_edac_init_csrows() 164 dimm->nr_pages = 512 << (20 - PAGE_SHIFT); in pasemi_edac_init_csrows() 167 dimm->nr_pages = 1024 << (20 - PAGE_SHIFT); in pasemi_edac_init_csrows() 170 dimm->nr_pages = 2048 << (20 - PAGE_SHIFT); in pasemi_edac_init_csrows() 180 csrow->last_page = csrow->first_page + dimm->nr_pages - 1; in pasemi_edac_init_csrows() 181 last_page_in_mmc += dimm->nr_pages; in pasemi_edac_init_csrows()
|
D | cell_edac.c | 135 u32 nr_pages; in cell_edac_init_csrows() local 149 nr_pages = resource_size(&r) >> PAGE_SHIFT; in cell_edac_init_csrows() 150 csrow->last_page = csrow->first_page + nr_pages - 1; in cell_edac_init_csrows() 156 dimm->nr_pages = nr_pages / csrow->nr_channels; in cell_edac_init_csrows() 162 csrow->first_page, nr_pages); in cell_edac_init_csrows()
|
D | i3200_edac.c | 395 unsigned long nr_pages; in i3200_probe1() local 401 nr_pages = drb_to_nr_pages(drbs, stacked, j, i); in i3200_probe1() 402 if (nr_pages == 0) in i3200_probe1() 406 stacked ? " (stacked)" : "", PAGES_TO_MiB(nr_pages)); in i3200_probe1() 408 dimm->nr_pages = nr_pages; in i3200_probe1() 409 dimm->grain = nr_pages << PAGE_SHIFT; in i3200_probe1()
|
D | x38_edac.c | 376 unsigned long nr_pages; in x38_probe1() local 379 nr_pages = drb_to_nr_pages(drbs, stacked, in x38_probe1() 383 if (nr_pages == 0) in x38_probe1() 389 dimm->nr_pages = nr_pages / x38_channel_num; in x38_probe1() 390 dimm->grain = nr_pages << PAGE_SHIFT; in x38_probe1()
|
D | ghes_edac.c | 111 dimm->nr_pages = MiB_TO_PAGES(32);/* Unknown */ in ghes_edac_dmidecode() 113 dimm->nr_pages = MiB_TO_PAGES(entry->extended_size); in ghes_edac_dmidecode() 116 dimm->nr_pages = MiB_TO_PAGES((entry->size & in ghes_edac_dmidecode() 119 dimm->nr_pages = MiB_TO_PAGES(entry->size); in ghes_edac_dmidecode() 174 if (dimm->nr_pages) { in ghes_edac_dmidecode() 177 PAGES_TO_MiB(dimm->nr_pages), in ghes_edac_dmidecode() 512 dimm->nr_pages = 1; in ghes_edac_register()
|
D | edac_mc_sysfs.c | 187 u32 nr_pages = 0; in csrow_size_show() local 190 nr_pages += csrow->channels[i]->dimm->nr_pages; in csrow_size_show() 191 return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages)); in csrow_size_show() 370 if (!csrow->channels[idx]->dimm->nr_pages) in csrow_dev_is_visible() 394 int chan, nr_pages = 0; in nr_pages_per_csrow() local 397 nr_pages += csrow->channels[chan]->dimm->nr_pages; in nr_pages_per_csrow() 399 return nr_pages; in nr_pages_per_csrow() 517 return sprintf(data, "%u\n", PAGES_TO_MiB(dimm->nr_pages)); in dimmdev_size_show() 769 total_pages += dimm->nr_pages; in mci_size_mb_show() 1025 if (!dimm->nr_pages) in edac_create_sysfs_mci_device() [all …]
|
D | i82975x_edac.c | 376 u32 cumul_size, nr_pages; in i82975x_init_csrows() local 407 nr_pages = cumul_size - last_cumul_size; in i82975x_init_csrows() 408 if (!nr_pages) in i82975x_init_csrows() 421 dimm->nr_pages = nr_pages / csrow->nr_channels; in i82975x_init_csrows()
|
D | edac_mc.c | 92 edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages); in edac_mc_dump_dimm() 94 edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages); in edac_mc_dump_dimm() 736 u32 nr_pages = 0; in edac_mc_add_mc_with_groups() local 740 nr_pages += csrow->channels[j]->dimm->nr_pages; in edac_mc_add_mc_with_groups() 741 if (!nr_pages) in edac_mc_add_mc_with_groups() 745 if (csrow->channels[j]->dimm->nr_pages) in edac_mc_add_mc_with_groups() 749 if (mci->dimms[i]->nr_pages) in edac_mc_add_mc_with_groups() 892 n += dimm->nr_pages; in edac_mc_find_csrow_by_page() 1224 if (e->enable_per_layer_report && dimm->nr_pages) { in edac_mc_handle_error()
|
D | i3000_edac.c | 317 unsigned long last_cumul_size, nr_pages; in i3000_probe1() local 408 nr_pages = cumul_size - last_cumul_size; in i3000_probe1() 414 dimm->nr_pages = nr_pages / nr_channels; in i3000_probe1()
|
D | i82875p_edac.c | 351 u32 cumul_size, nr_pages; in i82875p_init_csrows() local 374 nr_pages = cumul_size - last_cumul_size; in i82875p_init_csrows() 380 dimm->nr_pages = nr_pages / nr_chans; in i82875p_init_csrows()
|
D | amd76x_edac.c | 211 dimm->nr_pages = (mba_mask + 1) >> PAGE_SHIFT; in amd76x_init_csrows() 212 csrow->last_page = csrow->first_page + dimm->nr_pages - 1; in amd76x_init_csrows() 214 dimm->grain = dimm->nr_pages << PAGE_SHIFT; in amd76x_init_csrows()
|
D | e7xxx_edac.c | 361 u32 dra, cumul_size, nr_pages; in e7xxx_init_csrows() local 392 nr_pages = cumul_size - last_cumul_size; in e7xxx_init_csrows() 413 dimm->nr_pages = nr_pages / (drc_chan + 1); in e7xxx_init_csrows()
|
D | cpc925_edac.c | 335 unsigned long row_size, nr_pages, last_nr_pages = 0; in cpc925_init_csrows() local 354 nr_pages = row_size >> PAGE_SHIFT; in cpc925_init_csrows() 355 csrow->last_page = csrow->first_page + nr_pages - 1; in cpc925_init_csrows() 383 dimm->nr_pages = nr_pages / csrow->nr_channels; in cpc925_init_csrows()
|
D | ppc4xx_edac.c | 901 u32 mbxcf, size, nr_pages; in ppc4xx_edac_init_csrows() local 952 nr_pages = SDRAM_MBCF_SZ_TO_PAGES(size); in ppc4xx_edac_init_csrows() 978 dimm->nr_pages = nr_pages / csi->nr_channels; in ppc4xx_edac_init_csrows()
|
D | amd64_edac.c | 2374 u32 cs_mode, nr_pages; in get_csrow_nr_pages() local 2387 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, (csrow_nr / 2)) in get_csrow_nr_pages() 2392 edac_dbg(0, "nr_pages/channel: %u\n", nr_pages); in get_csrow_nr_pages() 2394 return nr_pages; in get_csrow_nr_pages() 2408 int nr_pages = 0; in init_csrows() local 2439 nr_pages = get_csrow_nr_pages(pvt, 0, i); in init_csrows() 2440 csrow->channels[0]->dimm->nr_pages = nr_pages; in init_csrows() 2447 csrow->channels[1]->dimm->nr_pages = row_dct1_pages; in init_csrows() 2448 nr_pages += row_dct1_pages; in init_csrows() 2451 edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages); in init_csrows()
|
D | tile_edac.c | 116 dimm->nr_pages = mem_info.mem_size >> PAGE_SHIFT; in tile_edac_init_csrows()
|
D | e752x_edac.c | 1077 u32 dra, drc, cumul_size, i, nr_pages; in e752x_init_csrows() local 1110 nr_pages = cumul_size - last_cumul_size; in e752x_init_csrows() 1131 dimm->nr_pages = nr_pages / csrow->nr_channels; in e752x_init_csrows()
|
/linux-4.1.27/drivers/infiniband/hw/ehca/ |
D | ehca_eq.c | 59 u32 nr_pages; in ehca_create_eq() local 83 &nr_pages, &eq->ist); in ehca_create_eq() 90 ret = ipz_queue_ctor(NULL, &eq->ipz_queue, nr_pages, in ehca_create_eq() 97 for (i = 0; i < nr_pages; i++) { in ehca_create_eq() 110 if (i == (nr_pages - 1)) { in ehca_create_eq()
|
D | ipz_pt_fn.c | 258 int i, nr_pages; in ipz_queue_dtor() local 268 nr_pages = queue->queue_length / queue->pagesize; in ipz_queue_dtor() 269 for (i = 0; i < nr_pages; i += PAGES_PER_KPAGE) in ipz_queue_dtor()
|
/linux-4.1.27/fs/exofs/ |
D | inode.c | 57 unsigned nr_pages; member 78 pcol->nr_pages = 0; in _pcol_init() 87 pcol->expected_pages -= min(pcol->nr_pages, pcol->expected_pages); in _pcol_reset() 91 pcol->nr_pages = 0; in _pcol_reset() 140 if (unlikely(pcol->nr_pages >= pcol->alloc_pages)) in pcol_add_page() 143 pcol->pages[pcol->nr_pages++] = page; in pcol_add_page() 212 pcol->nr_pages); in __readpages_done() 214 for (i = 0; i < pcol->nr_pages; i++) { in __readpages_done() 256 for (i = 0; i < pcol->nr_pages; i++) { in _unlock_pcol_pages() 272 BUG_ON(pcol_src->nr_pages < ios->nr_pages); in _maybe_not_all_in_one_io() [all …]
|
/linux-4.1.27/arch/s390/mm/ |
D | init.c | 173 unsigned long zone_start_pfn, zone_end_pfn, nr_pages; in arch_add_memory() local 195 nr_pages = (start_pfn + size_pages > zone_end_pfn) ? in arch_add_memory() 197 rc = __add_pages(nid, zone, start_pfn, nr_pages); in arch_add_memory() 200 start_pfn += nr_pages; in arch_add_memory() 201 size_pages -= nr_pages; in arch_add_memory()
|
D | gup.c | 166 int __get_user_pages_fast(unsigned long start, int nr_pages, int write, in __get_user_pages_fast() argument 177 len = (unsigned long) nr_pages << PAGE_SHIFT; in __get_user_pages_fast() 220 int get_user_pages_fast(unsigned long start, int nr_pages, int write, in get_user_pages_fast() argument 227 nr = __get_user_pages_fast(start, nr_pages, write, pages); in get_user_pages_fast() 228 if (nr == nr_pages) in get_user_pages_fast() 235 nr_pages - nr, write, 0, pages); in get_user_pages_fast()
|
/linux-4.1.27/drivers/gpu/drm/exynos/ |
D | exynos_drm_buf.c | 25 unsigned int nr_pages; in lowlevel_buffer_allocate() local 54 nr_pages = buf->size >> PAGE_SHIFT; in lowlevel_buffer_allocate() 60 buf->pages = drm_calloc_large(nr_pages, sizeof(struct page *)); in lowlevel_buffer_allocate() 77 while (i < nr_pages) { in lowlevel_buffer_allocate() 93 buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages); in lowlevel_buffer_allocate()
|
D | exynos_drm_fbdev.c | 85 unsigned int nr_pages; in exynos_drm_fbdev_update() local 98 nr_pages = buffer->size >> PAGE_SHIFT; in exynos_drm_fbdev_update() 101 nr_pages, VM_MAP, in exynos_drm_fbdev_update()
|
/linux-4.1.27/arch/powerpc/kvm/ |
D | book3s_hv_builtin.c | 52 struct page *kvm_alloc_hpt(unsigned long nr_pages) in kvm_alloc_hpt() argument 54 VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); in kvm_alloc_hpt() 56 return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES)); in kvm_alloc_hpt() 60 void kvm_release_hpt(struct page *page, unsigned long nr_pages) in kvm_release_hpt() argument 62 cma_release(kvm_cma, page, nr_pages); in kvm_release_hpt()
|
/linux-4.1.27/drivers/media/pci/cx23885/ |
D | cx23885-alsa.c | 83 static int cx23885_alsa_dma_init(struct cx23885_audio_dev *chip, int nr_pages) in cx23885_alsa_dma_init() argument 89 buf->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT); in cx23885_alsa_dma_init() 91 dprintk(1, "vmalloc_32(%d pages) failed\n", nr_pages); in cx23885_alsa_dma_init() 97 nr_pages << PAGE_SHIFT); in cx23885_alsa_dma_init() 99 memset(buf->vaddr, 0, nr_pages << PAGE_SHIFT); in cx23885_alsa_dma_init() 100 buf->nr_pages = nr_pages; in cx23885_alsa_dma_init() 102 buf->sglist = vzalloc(buf->nr_pages * sizeof(*buf->sglist)); in cx23885_alsa_dma_init() 106 sg_init_table(buf->sglist, buf->nr_pages); in cx23885_alsa_dma_init() 107 for (i = 0; i < buf->nr_pages; i++) { in cx23885_alsa_dma_init() 129 buf->nr_pages, PCI_DMA_FROMDEVICE); in cx23885_alsa_dma_map()
|
/linux-4.1.27/fs/nfs/ |
D | fscache-index.c | 266 int loop, nr_pages; in nfs_fscache_inode_now_uncached() local 275 nr_pages = pagevec_lookup(&pvec, in nfs_fscache_inode_now_uncached() 279 if (!nr_pages) in nfs_fscache_inode_now_uncached() 282 for (loop = 0; loop < nr_pages; loop++) in nfs_fscache_inode_now_uncached() 285 first = pvec.pages[nr_pages - 1]->index + 1; in nfs_fscache_inode_now_uncached() 287 pvec.nr = nr_pages; in nfs_fscache_inode_now_uncached()
|
D | fscache.c | 370 unsigned *nr_pages) in __nfs_readpages_from_fscache() argument 372 unsigned npages = *nr_pages; in __nfs_readpages_from_fscache() 379 mapping, pages, nr_pages, in __nfs_readpages_from_fscache() 383 if (*nr_pages < npages) in __nfs_readpages_from_fscache() 386 if (*nr_pages > 0) in __nfs_readpages_from_fscache() 388 *nr_pages); in __nfs_readpages_from_fscache() 393 BUG_ON(*nr_pages != 0); in __nfs_readpages_from_fscache()
|
D | fscache.h | 133 unsigned *nr_pages) in nfs_readpages_from_fscache() argument 137 nr_pages); in nfs_readpages_from_fscache() 212 unsigned *nr_pages) in nfs_readpages_from_fscache() argument
|
D | read.c | 369 struct list_head *pages, unsigned nr_pages) in nfs_readpages() argument 383 nr_pages); in nfs_readpages() 400 pages, &nr_pages); in nfs_readpages()
|
/linux-4.1.27/arch/x86/kernel/cpu/ |
D | perf_event_intel_pt.c | 381 p = virt_to_page(buf->data_pages[buf->nr_pages]); in topa_insert_pages() 403 buf->nr_pages += 1ul << order; in topa_insert_pages() 481 ((buf->nr_pages << PAGE_SHIFT) - 1)); in pt_update_head() 483 base += buf->nr_pages << PAGE_SHIFT; in pt_update_head() 600 pg &= buf->nr_pages - 1; in pt_topa_next_entry() 647 idx &= buf->nr_pages - 1; in pt_buffer_reset_markers() 657 idx &= buf->nr_pages - 1; in pt_buffer_reset_markers() 681 while (pg < buf->nr_pages) { in pt_buffer_setup_topa_index() 715 head &= (buf->nr_pages << PAGE_SHIFT) - 1; in pt_buffer_reset_offsets() 717 pg = (head >> PAGE_SHIFT) & (buf->nr_pages - 1); in pt_buffer_reset_offsets() [all …]
|
D | perf_event_intel_bts.c | 51 unsigned int nr_pages; member 74 bts_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool overwrite) in bts_buffer_setup_aux() argument 80 size_t size = nr_pages << PAGE_SHIFT; in bts_buffer_setup_aux() 84 for (pg = 0, nbuf = 0; pg < nr_pages;) { in bts_buffer_setup_aux() 86 if (WARN_ON_ONCE(!PagePrivate(page) && nr_pages > 1)) in bts_buffer_setup_aux() 102 buf->nr_pages = nr_pages; in bts_buffer_setup_aux() 312 head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1); in bts_buffer_reset() 432 buf->nr_pages << PAGE_SHIFT); in bts_event_del()
|
D | intel_pt.h | 111 unsigned long nr_pages; member
|
/linux-4.1.27/fs/ceph/ |
D | cache.c | 151 int loop, nr_pages; in ceph_fscache_inode_now_uncached() local 159 nr_pages = pagevec_lookup(&pvec, ci->vfs_inode.i_mapping, first, in ceph_fscache_inode_now_uncached() 162 if (!nr_pages) in ceph_fscache_inode_now_uncached() 165 for (loop = 0; loop < nr_pages; loop++) in ceph_fscache_inode_now_uncached() 168 first = pvec.pages[nr_pages - 1]->index + 1; in ceph_fscache_inode_now_uncached() 170 pvec.nr = nr_pages; in ceph_fscache_inode_now_uncached() 282 unsigned *nr_pages) in ceph_readpages_from_fscache() argument 290 ret = fscache_read_or_alloc_pages(ci->fscache, mapping, pages, nr_pages, in ceph_readpages_from_fscache()
|
D | addr.c | 320 int nr_pages = 0; in start_read() local 330 nr_pages++; in start_read() 332 if (max && nr_pages == max) in start_read() 335 len = nr_pages << PAGE_CACHE_SHIFT; in start_read() 336 dout("start_read %p nr_pages %d is %lld~%lld\n", inode, nr_pages, in start_read() 348 nr_pages = calc_pages_for(0, len); in start_read() 349 pages = kmalloc(sizeof(*pages) * nr_pages, GFP_NOFS); in start_read() 353 for (i = 0; i < nr_pages; ++i) { in start_read() 366 nr_pages = i; in start_read() 382 return nr_pages; in start_read() [all …]
|
D | cache.h | 46 unsigned *nr_pages); 134 unsigned *nr_pages) in ceph_readpages_from_fscache() argument
|
/linux-4.1.27/arch/arm/kernel/ |
D | atags_compat.c | 46 unsigned long nr_pages; /* 4 */ member 107 if (params->u1.s.nr_pages != 0x02000 && in build_tag_list() 108 params->u1.s.nr_pages != 0x04000 && in build_tag_list() 109 params->u1.s.nr_pages != 0x08000 && in build_tag_list() 110 params->u1.s.nr_pages != 0x10000) { in build_tag_list() 113 params->u1.s.nr_pages = 0x1000; /* 16MB */ in build_tag_list() 161 tag = memtag(tag, PHYS_OFFSET, params->u1.s.nr_pages * PAGE_SIZE); in build_tag_list()
|
/linux-4.1.27/arch/x86/mm/ |
D | numa_32.c | 67 unsigned long nr_pages = end_pfn - start_pfn; in node_memmap_size_bytes() local 69 if (!nr_pages) in node_memmap_size_bytes() 72 return (nr_pages + 1) * sizeof(struct page); in node_memmap_size_bytes()
|
D | gup.c | 257 int __get_user_pages_fast(unsigned long start, int nr_pages, int write, in __get_user_pages_fast() argument 269 len = (unsigned long) nr_pages << PAGE_SHIFT; in __get_user_pages_fast() 325 int get_user_pages_fast(unsigned long start, int nr_pages, int write, in get_user_pages_fast() argument 336 len = (unsigned long) nr_pages << PAGE_SHIFT; in get_user_pages_fast()
|
D | init_64.c | 696 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_add_memory() local 701 ret = __add_pages(nid, zone, start_pfn, nr_pages); in arch_add_memory() 716 unsigned int nr_pages = 1 << order; in free_pagetable() local 724 while (nr_pages--) in free_pagetable() 727 while (nr_pages--) in free_pagetable() 1020 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_remove_memory() local 1026 ret = __remove_pages(zone, start_pfn, nr_pages); in arch_remove_memory() 1316 unsigned int nr_pages; in register_page_bootmem_memmap() local 1356 nr_pages = 1 << (get_order(PMD_SIZE)); in register_page_bootmem_memmap() 1358 while (nr_pages--) in register_page_bootmem_memmap()
|
D | tlb.c | 119 unsigned long nr_pages = in flush_tlb_func() local 126 trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages); in flush_tlb_func()
|
D | init_32.c | 832 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_add_memory() local 834 return __add_pages(nid, zone, start_pfn, nr_pages); in arch_add_memory() 841 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_remove_memory() local 845 return __remove_pages(zone, start_pfn, nr_pages); in arch_remove_memory()
|
D | ioremap.c | 53 static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, in __ioremap_check_ram() argument 58 for (i = 0; i < nr_pages; ++i) in __ioremap_check_ram()
|
/linux-4.1.27/fs/cifs/ |
D | file.c | 1892 unsigned int nr_pages; in wdata_alloc_and_fillpages() local 1910 nr_pages = find_get_pages_tag(mapping, index, in wdata_alloc_and_fillpages() 1913 *found_pages += nr_pages; in wdata_alloc_and_fillpages() 1914 tofind -= nr_pages; in wdata_alloc_and_fillpages() 1915 pages += nr_pages; in wdata_alloc_and_fillpages() 1916 } while (nr_pages && tofind && *index <= end); in wdata_alloc_and_fillpages() 1927 unsigned int nr_pages = 0, i; in wdata_prepare_pages() local 1940 if (nr_pages == 0) in wdata_prepare_pages() 1985 ++nr_pages; in wdata_prepare_pages() 1989 if (nr_pages == 0) in wdata_prepare_pages() [all …]
|
D | cache.c | 300 int loop, nr_pages; in cifs_fscache_inode_now_uncached() local 308 nr_pages = pagevec_lookup(&pvec, in cifs_fscache_inode_now_uncached() 311 if (!nr_pages) in cifs_fscache_inode_now_uncached() 314 for (loop = 0; loop < nr_pages; loop++) in cifs_fscache_inode_now_uncached() 317 first = pvec.pages[nr_pages - 1]->index + 1; in cifs_fscache_inode_now_uncached() 319 pvec.nr = nr_pages; in cifs_fscache_inode_now_uncached()
|
D | fscache.h | 80 unsigned *nr_pages) in cifs_readpages_from_fscache() argument 84 nr_pages); in cifs_readpages_from_fscache() 134 unsigned *nr_pages) in cifs_readpages_from_fscache() argument
|
D | fscache.c | 187 unsigned *nr_pages) in __cifs_readpages_from_fscache() argument 192 __func__, CIFS_I(inode)->fscache, *nr_pages, inode); in __cifs_readpages_from_fscache() 194 pages, nr_pages, in __cifs_readpages_from_fscache()
|
/linux-4.1.27/fs/ntfs/ |
D | file.c | 509 pgoff_t index, const unsigned nr_pages, struct page **pages, in __ntfs_grab_cache_pages() argument 514 BUG_ON(!nr_pages); in __ntfs_grab_cache_pages() 539 } while (nr < nr_pages); in __ntfs_grab_cache_pages() 584 unsigned nr_pages, s64 pos, size_t bytes) in ntfs_prepare_pages_for_non_resident_write() argument 612 BUG_ON(!nr_pages); in ntfs_prepare_pages_for_non_resident_write() 620 vi->i_ino, ni->type, pages[0]->index, nr_pages, in ntfs_prepare_pages_for_non_resident_write() 637 } while (++u < nr_pages); in ntfs_prepare_pages_for_non_resident_write() 1190 if (likely(!err && ++u < nr_pages)) in ntfs_prepare_pages_for_non_resident_write() 1236 } while (++u < nr_pages); in ntfs_prepare_pages_for_non_resident_write() 1344 nr_pages = u; in ntfs_prepare_pages_for_non_resident_write() [all …]
|
D | compress.c | 517 unsigned int nr_pages = (end_vcn - start_vcn) << in ntfs_read_compressed_block() local 526 "%i.", index, cb_size, nr_pages); in ntfs_read_compressed_block() 534 pages = kmalloc(nr_pages * sizeof(struct page *), GFP_NOFS); in ntfs_read_compressed_block() 575 if (nr_pages < max_page) in ntfs_read_compressed_block() 576 max_page = nr_pages; in ntfs_read_compressed_block()
|
/linux-4.1.27/arch/s390/pci/ |
D | pci_dma.c | 145 unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; in dma_update_trans() local 151 if (!nr_pages) in dma_update_trans() 158 for (i = 0; i < nr_pages; i++) { in dma_update_trans() 177 nr_pages * PAGE_SIZE); in dma_update_trans() 281 unsigned long nr_pages, iommu_page_index; in s390_dma_map_pages() local 287 nr_pages = iommu_num_pages(pa, size, PAGE_SIZE); in s390_dma_map_pages() 288 iommu_page_index = dma_alloc_iommu(zdev, nr_pages); in s390_dma_map_pages() 293 size = nr_pages * PAGE_SIZE; in s390_dma_map_pages() 303 atomic64_add(nr_pages, &zdev->mapped_pages); in s390_dma_map_pages() 308 dma_free_iommu(zdev, iommu_page_index, nr_pages); in s390_dma_map_pages()
|
/linux-4.1.27/drivers/media/pci/cx25821/ |
D | cx25821-alsa.c | 70 int nr_pages; member 147 static int cx25821_alsa_dma_init(struct cx25821_audio_dev *chip, int nr_pages) in cx25821_alsa_dma_init() argument 153 buf->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT); in cx25821_alsa_dma_init() 155 dprintk(1, "vmalloc_32(%d pages) failed\n", nr_pages); in cx25821_alsa_dma_init() 161 nr_pages << PAGE_SHIFT); in cx25821_alsa_dma_init() 163 memset(buf->vaddr, 0, nr_pages << PAGE_SHIFT); in cx25821_alsa_dma_init() 164 buf->nr_pages = nr_pages; in cx25821_alsa_dma_init() 166 buf->sglist = vzalloc(buf->nr_pages * sizeof(*buf->sglist)); in cx25821_alsa_dma_init() 170 sg_init_table(buf->sglist, buf->nr_pages); in cx25821_alsa_dma_init() 171 for (i = 0; i < buf->nr_pages; i++) { in cx25821_alsa_dma_init() [all …]
|
/linux-4.1.27/net/9p/ |
D | trans_common.c | 21 void p9_release_pages(struct page **pages, int nr_pages) in p9_release_pages() argument 25 for (i = 0; i < nr_pages; i++) in p9_release_pages()
|
D | trans_virtio.c | 225 struct page **pdata, int nr_pages, size_t offs, int count) in pack_sg_list_p() argument 231 BUG_ON(nr_pages > (limit - start)); in pack_sg_list_p() 236 while (nr_pages) { in pack_sg_list_p() 245 nr_pages--; in pack_sg_list_p() 321 int nr_pages; in p9_get_mapped_pages() local 343 nr_pages = DIV_ROUND_UP(n + *offs, PAGE_SIZE); in p9_get_mapped_pages() 344 atomic_add(nr_pages, &vp_pinned); in p9_get_mapped_pages() 364 nr_pages = DIV_ROUND_UP((unsigned long)p + len, PAGE_SIZE) - in p9_get_mapped_pages() 367 *pages = kmalloc(sizeof(struct page *) * nr_pages, GFP_NOFS); in p9_get_mapped_pages() 373 for (index = 0; index < nr_pages; index++) { in p9_get_mapped_pages()
|
/linux-4.1.27/fs/btrfs/tests/ |
D | extent-io-tests.c | 35 unsigned long nr_pages = end_index - index + 1; in process_page_range() local 40 while (nr_pages > 0) { in process_page_range() 42 min_t(unsigned long, nr_pages, in process_page_range() 54 nr_pages -= ret; in process_page_range() 59 …KERN_ERR "stuck in a loop, start %Lu, end %Lu, nr_pages %lu, ret %d\n", start, end, nr_pages, ret); in process_page_range()
|
/linux-4.1.27/tools/vm/ |
D | page-types.c | 204 static unsigned long nr_pages[HASH_SIZE]; variable 401 for (i = 0; i < ARRAY_SIZE(nr_pages); i++) { in show_summary() 402 if (nr_pages[i]) in show_summary() 405 nr_pages[i], in show_summary() 406 pages2mb(nr_pages[i]), in show_summary() 585 nr_pages[hash_slot(flags)]++; in add_page() 852 unsigned long nr_pages, pfn, i; in walk_file() local 862 nr_pages = (end - off + page_size - 1) / page_size; in walk_file() 863 if (nr_pages > PAGEMAP_BATCH) in walk_file() 864 nr_pages = PAGEMAP_BATCH; in walk_file() [all …]
|
/linux-4.1.27/drivers/misc/carma/ |
D | carma-fpga-program.c | 72 int nr_pages; member 80 static int fpga_dma_init(struct fpga_dev *priv, int nr_pages) in fpga_dma_init() argument 85 priv->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT); in fpga_dma_init() 87 pr_debug("vmalloc_32(%d pages) failed\n", nr_pages); in fpga_dma_init() 93 nr_pages << PAGE_SHIFT); in fpga_dma_init() 95 memset(priv->vaddr, 0, nr_pages << PAGE_SHIFT); in fpga_dma_init() 96 priv->nr_pages = nr_pages; in fpga_dma_init() 98 priv->sglist = vzalloc(priv->nr_pages * sizeof(*priv->sglist)); in fpga_dma_init() 102 sg_init_table(priv->sglist, priv->nr_pages); in fpga_dma_init() 103 for (i = 0; i < priv->nr_pages; i++) { in fpga_dma_init() [all …]
|
D | carma-fpga.c | 147 int nr_pages; member 212 static int carma_dma_init(struct data_buf *buf, int nr_pages) in carma_dma_init() argument 217 buf->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT); in carma_dma_init() 219 pr_debug("vmalloc_32(%d pages) failed\n", nr_pages); in carma_dma_init() 225 nr_pages << PAGE_SHIFT); in carma_dma_init() 227 memset(buf->vaddr, 0, nr_pages << PAGE_SHIFT); in carma_dma_init() 228 buf->nr_pages = nr_pages; in carma_dma_init() 230 buf->sglist = vzalloc(buf->nr_pages * sizeof(*buf->sglist)); in carma_dma_init() 234 sg_init_table(buf->sglist, buf->nr_pages); in carma_dma_init() 235 for (i = 0; i < buf->nr_pages; i++) { in carma_dma_init() [all …]
|
/linux-4.1.27/block/ |
D | bio.c | 691 int nr_pages; in bio_get_nr_vecs() local 693 nr_pages = min_t(unsigned, in bio_get_nr_vecs() 697 return min_t(unsigned, nr_pages, BIO_MAX_PAGES); in bio_get_nr_vecs() 1161 int nr_pages = 0; in bio_copy_user_iov() local 1181 nr_pages += end - start; in bio_copy_user_iov() 1185 nr_pages++; in bio_copy_user_iov() 1202 bio = bio_kmalloc(gfp_mask, nr_pages); in bio_copy_user_iov() 1212 nr_pages = 1 << map_data->page_order; in bio_copy_user_iov() 1224 if (i == map_data->nr_entries * nr_pages) { in bio_copy_user_iov() 1229 page = map_data->pages[i / nr_pages]; in bio_copy_user_iov() [all …]
|
D | bio-integrity.c | 266 unsigned int len, nr_pages; in bio_integrity_prep() local 287 nr_pages = end - start; in bio_integrity_prep() 290 bip = bio_integrity_alloc(bio, GFP_NOIO, nr_pages); in bio_integrity_prep() 306 for (i = 0 ; i < nr_pages ; i++) { in bio_integrity_prep()
|
/linux-4.1.27/arch/x86/xen/ |
D | setup.c | 224 unsigned long end_pfn, unsigned long nr_pages, unsigned long *released) in xen_set_identity_and_release_chunk() argument 232 end = min(end_pfn, nr_pages); in xen_set_identity_and_release_chunk() 360 unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn, in xen_set_identity_and_remap_chunk() argument 374 if (cur_pfn >= nr_pages) { in xen_set_identity_and_remap_chunk() 379 if (cur_pfn + size > nr_pages) in xen_set_identity_and_remap_chunk() 380 size = nr_pages - cur_pfn; in xen_set_identity_and_remap_chunk() 387 cur_pfn + left, nr_pages, released); in xen_set_identity_and_remap_chunk() 415 const struct e820entry *list, size_t map_size, unsigned long nr_pages, in xen_set_identity_and_remap() argument 419 unsigned long last_pfn = nr_pages; in xen_set_identity_and_remap() 448 end_pfn, nr_pages, last_pfn, in xen_set_identity_and_remap() [all …]
|
/linux-4.1.27/include/xen/ |
D | balloon.h | 28 int alloc_xenballooned_pages(int nr_pages, struct page **pages, 30 void free_xenballooned_pages(int nr_pages, struct page **pages);
|
D | grant_table.h | 184 int gnttab_alloc_pages(int nr_pages, struct page **pages); 185 void gnttab_free_pages(int nr_pages, struct page **pages);
|
D | xenbus.h | 207 unsigned int nr_pages, grant_ref_t *grefs);
|
/linux-4.1.27/fs/ext4/ |
D | readpage.c | 135 unsigned nr_pages) in ext4_mpage_readpages() argument 160 for (page_idx = 0; nr_pages; page_idx++, nr_pages--) { in ext4_mpage_readpages() 177 last_block = block_in_file + nr_pages * blocks_per_page; in ext4_mpage_readpages() 287 min_t(int, nr_pages, bio_get_nr_vecs(bdev))); in ext4_mpage_readpages()
|
D | file.c | 352 unsigned long nr_pages; in ext4_find_unwritten_pgoff() local 355 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index, in ext4_find_unwritten_pgoff() 357 if (nr_pages == 0) { in ext4_find_unwritten_pgoff() 383 for (i = 0; i < nr_pages; i++) { in ext4_find_unwritten_pgoff() 441 if (nr_pages < num && whence == SEEK_HOLE) { in ext4_find_unwritten_pgoff()
|
/linux-4.1.27/arch/powerpc/mm/ |
D | mem.c | 121 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_add_memory() local 133 return __add_pages(nid, zone, start_pfn, nr_pages); in arch_add_memory() 140 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_remove_memory() local 145 ret = __remove_pages(zone, start_pfn, nr_pages); in arch_remove_memory() 170 walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, in walk_system_ram_range() argument 174 unsigned long end_pfn = start_pfn + nr_pages; in walk_system_ram_range()
|
D | init_64.c | 394 unsigned int nr_pages = in vmemmap_free() local 396 while (nr_pages--) in vmemmap_free()
|
/linux-4.1.27/kernel/trace/ |
D | ring_buffer.c | 464 unsigned long nr_pages; member 1164 static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu) in __rb_allocate_pages() argument 1169 for (i = 0; i < nr_pages; i++) { in __rb_allocate_pages() 1204 unsigned long nr_pages) in rb_allocate_pages() argument 1208 WARN_ON(!nr_pages); in rb_allocate_pages() 1210 if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu)) in rb_allocate_pages() 1221 cpu_buffer->nr_pages = nr_pages; in rb_allocate_pages() 1229 rb_allocate_cpu_buffer(struct ring_buffer *buffer, long nr_pages, int cpu) in rb_allocate_cpu_buffer() argument 1269 ret = rb_allocate_pages(cpu_buffer, nr_pages); in rb_allocate_cpu_buffer() 1329 long nr_pages; in __ring_buffer_alloc() local [all …]
|
/linux-4.1.27/arch/sh/mm/ |
D | gup.c | 163 int __get_user_pages_fast(unsigned long start, int nr_pages, int write, in __get_user_pages_fast() argument 175 len = (unsigned long) nr_pages << PAGE_SHIFT; in __get_user_pages_fast() 217 int get_user_pages_fast(unsigned long start, int nr_pages, int write, in get_user_pages_fast() argument 228 len = (unsigned long) nr_pages << PAGE_SHIFT; in get_user_pages_fast()
|
D | init.c | 492 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_add_memory() local 500 start_pfn, nr_pages); in arch_add_memory() 521 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_remove_memory() local 526 ret = __remove_pages(zone, start_pfn, nr_pages); in arch_remove_memory()
|
/linux-4.1.27/fs/afs/ |
D | cache.c | 375 int loop, nr_pages; in afs_vnode_cache_now_uncached() local 385 nr_pages = pagevec_lookup(&pvec, vnode->vfs_inode.i_mapping, in afs_vnode_cache_now_uncached() 388 if (!nr_pages) in afs_vnode_cache_now_uncached() 391 for (loop = 0; loop < nr_pages; loop++) in afs_vnode_cache_now_uncached() 394 first = pvec.pages[nr_pages - 1]->index + 1; in afs_vnode_cache_now_uncached() 396 pvec.nr = nr_pages; in afs_vnode_cache_now_uncached()
|
D | file.c | 28 struct list_head *pages, unsigned nr_pages); 241 struct list_head *pages, unsigned nr_pages) in afs_readpages() argument 248 key_serial(key), mapping->host->i_ino, nr_pages); in afs_readpages() 263 &nr_pages, in afs_readpages() 275 BUG_ON(nr_pages != 0); in afs_readpages()
|
/linux-4.1.27/drivers/media/pci/cx88/ |
D | cx88-alsa.c | 68 int nr_pages; member 294 static int cx88_alsa_dma_init(struct cx88_audio_dev *chip, int nr_pages) in cx88_alsa_dma_init() argument 300 buf->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT); in cx88_alsa_dma_init() 302 dprintk(1, "vmalloc_32(%d pages) failed\n", nr_pages); in cx88_alsa_dma_init() 308 nr_pages << PAGE_SHIFT); in cx88_alsa_dma_init() 310 memset(buf->vaddr, 0, nr_pages << PAGE_SHIFT); in cx88_alsa_dma_init() 311 buf->nr_pages = nr_pages; in cx88_alsa_dma_init() 313 buf->sglist = vzalloc(buf->nr_pages * sizeof(*buf->sglist)); in cx88_alsa_dma_init() 317 sg_init_table(buf->sglist, buf->nr_pages); in cx88_alsa_dma_init() 318 for (i = 0; i < buf->nr_pages; i++) { in cx88_alsa_dma_init() [all …]
|
/linux-4.1.27/tools/testing/selftests/vm/ |
D | transhuge-stress.c | 104 int nr_succeed = 0, nr_failed = 0, nr_pages = 0; in main() local 128 nr_pages++; in main() 142 nr_succeed, nr_failed, nr_pages); in main()
|
/linux-4.1.27/arch/sparc/mm/ |
D | gup.c | 163 int __get_user_pages_fast(unsigned long start, int nr_pages, int write, in __get_user_pages_fast() argument 174 len = (unsigned long) nr_pages << PAGE_SHIFT; in __get_user_pages_fast() 193 int get_user_pages_fast(unsigned long start, int nr_pages, int write, in get_user_pages_fast() argument 204 len = (unsigned long) nr_pages << PAGE_SHIFT; in get_user_pages_fast()
|
/linux-4.1.27/fs/cachefiles/ |
D | rdwr.c | 680 unsigned *nr_pages, in cachefiles_read_or_alloc_pages() argument 699 *nr_pages); in cachefiles_read_or_alloc_pages() 705 if (cachefiles_has_space(cache, 0, *nr_pages) < 0) in cachefiles_read_or_alloc_pages() 751 (*nr_pages)--; in cachefiles_read_or_alloc_pages() 777 ret, *nr_pages, list_empty(pages) ? " empty" : ""); in cachefiles_read_or_alloc_pages() 781 fscache_retrieval_complete(op, *nr_pages); in cachefiles_read_or_alloc_pages() 835 unsigned *nr_pages, in cachefiles_allocate_pages() argument 849 _enter("%p,,,%d,", object, *nr_pages); in cachefiles_allocate_pages() 851 ret = cachefiles_has_space(cache, 0, *nr_pages); in cachefiles_allocate_pages() 867 fscache_retrieval_complete(op, *nr_pages); in cachefiles_allocate_pages()
|
/linux-4.1.27/drivers/media/common/saa7146/ |
D | saa7146_core.c | 149 static struct scatterlist* vmalloc_to_sg(unsigned char *virt, int nr_pages) in vmalloc_to_sg() argument 155 sglist = kcalloc(nr_pages, sizeof(struct scatterlist), GFP_KERNEL); in vmalloc_to_sg() 158 sg_init_table(sglist, nr_pages); in vmalloc_to_sg() 159 for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) { in vmalloc_to_sg() 251 int nr_pages = 0; in saa7146_pgtable_build_single() local 270 nr_pages++; in saa7146_pgtable_build_single() 277 for(i=nr_pages;i<1024;i++) { in saa7146_pgtable_build_single()
|
/linux-4.1.27/arch/arm64/mm/ |
D | dma-mapping.c | 313 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; in __dma_common_mmap() local 320 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { in __dma_common_mmap() 359 unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT; in atomic_pool_init() local 365 page = dma_alloc_from_contiguous(NULL, nr_pages, in atomic_pool_init() 409 if (!dma_release_from_contiguous(NULL, page, nr_pages)) in atomic_pool_init()
|
/linux-4.1.27/arch/mips/mm/ |
D | gup.c | 196 int __get_user_pages_fast(unsigned long start, int nr_pages, int write, in __get_user_pages_fast() argument 208 len = (unsigned long) nr_pages << PAGE_SHIFT; in __get_user_pages_fast() 263 int get_user_pages_fast(unsigned long start, int nr_pages, int write, in get_user_pages_fast() argument 274 len = (unsigned long) nr_pages << PAGE_SHIFT; in get_user_pages_fast()
|
/linux-4.1.27/arch/s390/kvm/ |
D | gaccess.c | 714 unsigned long *pages, unsigned long nr_pages, in guest_page_range() argument 724 while (nr_pages) { in guest_page_range() 749 nr_pages--; in guest_page_range() 758 unsigned long _len, nr_pages, gpa, idx; in access_guest() local 770 nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1; in access_guest() 772 if (nr_pages > ARRAY_SIZE(pages_array)) in access_guest() 773 pages = vmalloc(nr_pages * sizeof(unsigned long)); in access_guest() 779 rc = guest_page_range(vcpu, ga, pages, nr_pages, asce, write); in access_guest() 780 for (idx = 0; idx < nr_pages && !rc; idx++) { in access_guest() 793 if (nr_pages > ARRAY_SIZE(pages_array)) in access_guest()
|
/linux-4.1.27/kernel/ |
D | relay.c | 1218 unsigned int pidx, poff, total_len, subbuf_pages, nr_pages; in subbuf_splice_actor() local 1231 .nr_pages = 0, in subbuf_splice_actor() 1254 nr_pages = min_t(unsigned int, subbuf_pages, spd.nr_pages_max); in subbuf_splice_actor() 1256 for (total_len = 0; spd.nr_pages < nr_pages; spd.nr_pages++) { in subbuf_splice_actor() 1266 spd.pages[spd.nr_pages] = rbuf->page_array[pidx]; in subbuf_splice_actor() 1267 spd.partial[spd.nr_pages].offset = poff; in subbuf_splice_actor() 1274 spd.partial[spd.nr_pages].len = this_len; in subbuf_splice_actor() 1275 spd.partial[spd.nr_pages].private = private; in subbuf_splice_actor() 1283 spd.nr_pages++; in subbuf_splice_actor() 1289 if (!spd.nr_pages) in subbuf_splice_actor()
|
/linux-4.1.27/arch/alpha/mm/ |
D | init.c | 192 unsigned long nr_pages = 0; in callback_init() local 198 nr_pages += crb->map[i].count; in callback_init() 202 console_remap_vm.size = nr_pages << PAGE_SHIFT; in callback_init()
|
/linux-4.1.27/drivers/media/pci/saa7134/ |
D | saa7134-alsa.c | 276 static int saa7134_alsa_dma_init(struct saa7134_dev *dev, int nr_pages) in saa7134_alsa_dma_init() argument 282 dma->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT); in saa7134_alsa_dma_init() 284 dprintk("vmalloc_32(%d pages) failed\n", nr_pages); in saa7134_alsa_dma_init() 290 nr_pages << PAGE_SHIFT); in saa7134_alsa_dma_init() 292 memset(dma->vaddr, 0, nr_pages << PAGE_SHIFT); in saa7134_alsa_dma_init() 293 dma->nr_pages = nr_pages; in saa7134_alsa_dma_init() 295 dma->sglist = vzalloc(dma->nr_pages * sizeof(*dma->sglist)); in saa7134_alsa_dma_init() 299 sg_init_table(dma->sglist, dma->nr_pages); in saa7134_alsa_dma_init() 300 for (i = 0; i < dma->nr_pages; i++) { in saa7134_alsa_dma_init() 322 dma->nr_pages, PCI_DMA_FROMDEVICE); in saa7134_alsa_dma_map()
|
/linux-4.1.27/drivers/video/fbdev/ |
D | pvr2fb.c | 679 unsigned int nr_pages; in pvr2fb_write() local 683 nr_pages = (count + PAGE_SIZE - 1) >> PAGE_SHIFT; in pvr2fb_write() 685 pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); in pvr2fb_write() 690 nr_pages, WRITE, 0, pages); in pvr2fb_write() 692 if (ret < nr_pages) { in pvr2fb_write() 693 nr_pages = ret; in pvr2fb_write() 702 end = (unsigned long)page_address(pages[nr_pages]); in pvr2fb_write() 703 len = nr_pages << PAGE_SHIFT; in pvr2fb_write() 721 for (i = 0; i < nr_pages; i++, dst += PAGE_SIZE) { in pvr2fb_write() 737 for (i = 0; i < nr_pages; i++) in pvr2fb_write()
|
D | xen-fbfront.c | 46 int nr_pages; member 403 info->nr_pages = (fb_size + PAGE_SIZE - 1) >> PAGE_SHIFT; in xenfb_probe() 405 info->mfns = vmalloc(sizeof(unsigned long) * info->nr_pages); in xenfb_probe() 551 for (i = 0; i < info->nr_pages; i++) in xenfb_init_shared_page() 554 for (i = 0; i * epd < info->nr_pages; i++) in xenfb_init_shared_page()
|
/linux-4.1.27/fs/gfs2/ |
D | aops.c | 234 int nr_pages, pgoff_t end, in gfs2_write_jdata_pagevec() argument 239 unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize); in gfs2_write_jdata_pagevec() 247 for(i = 0; i < nr_pages; i++) { in gfs2_write_jdata_pagevec() 350 int nr_pages; in gfs2_write_cache_jdata() local 385 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, in gfs2_write_cache_jdata() 387 if (nr_pages == 0) in gfs2_write_cache_jdata() 390 ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, end, &done_index); in gfs2_write_cache_jdata() 604 struct list_head *pages, unsigned nr_pages) in gfs2_readpages() argument 617 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map); in gfs2_readpages()
|
/linux-4.1.27/drivers/base/ |
D | memory.c | 228 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; in memory_block_action() local 240 ret = online_pages(start_pfn, nr_pages, online_type); in memory_block_action() 243 ret = offline_pages(start_pfn, nr_pages); in memory_block_action() 387 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; in show_valid_zones() local 392 end_pfn = start_pfn + nr_pages; in show_valid_zones()
|
D | firmware_class.c | 149 int nr_pages; member 254 for (i = 0; i < buf->nr_pages; i++) in __fw_free_buf() 604 buf->data = vmap(buf->pages, buf->nr_pages, 0, PAGE_KERNEL_RO); in fw_map_pages_buf() 642 for (i = 0; i < fw_buf->nr_pages; i++) in firmware_loading_store() 647 fw_buf->nr_pages = 0; in firmware_loading_store() 769 while (buf->nr_pages < pages_needed) { in fw_realloc_buffer() 770 buf->pages[buf->nr_pages] = in fw_realloc_buffer() 773 if (!buf->pages[buf->nr_pages]) { in fw_realloc_buffer() 777 buf->nr_pages++; in fw_realloc_buffer()
|
/linux-4.1.27/drivers/mtd/devices/ |
D | mtd_dataflash.c | 621 static int add_dataflash_otp(struct spi_device *spi, char *name, int nr_pages, in add_dataflash_otp() argument 647 device->size = nr_pages * pagesize; in add_dataflash_otp() 681 int nr_pages, int pagesize, int pageoffset) in add_dataflash() argument 683 return add_dataflash_otp(spi, name, nr_pages, pagesize, in add_dataflash() 696 unsigned nr_pages; member 839 return add_dataflash_otp(spi, info->name, info->nr_pages, in dataflash_probe()
|
D | sst25l.c | 60 unsigned nr_pages; member 385 flash->mtd.size = flash_info->page_size * flash_info->nr_pages; in sst25l_probe()
|
/linux-4.1.27/include/media/ |
D | videobuf-dma-sg.h | 66 int nr_pages; member
|
/linux-4.1.27/drivers/block/ |
D | brd.c | 165 int nr_pages; in brd_free_pages() local 170 nr_pages = radix_tree_gang_lookup(&brd->brd_pages, in brd_free_pages() 173 for (i = 0; i < nr_pages; i++) { in brd_free_pages() 190 } while (nr_pages == FREE_BATCH); in brd_free_pages()
|
/linux-4.1.27/drivers/md/ |
D | dm-kcopyd.c | 285 static int client_reserve_pages(struct dm_kcopyd_client *kc, unsigned nr_pages) in client_reserve_pages() argument 290 for (i = 0; i < nr_pages; i++) { in client_reserve_pages() 301 kc->nr_reserved_pages += nr_pages; in client_reserve_pages() 521 unsigned nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9); in run_pages_job() local 523 r = kcopyd_get_pages(job->kc, nr_pages, &job->pages); in run_pages_job()
|
/linux-4.1.27/fs/proc/ |
D | kcore.c | 168 unsigned long nr_pages = ent->size >> PAGE_SHIFT; in get_sparsemem_vmemmap_info() local 174 end = ((unsigned long)pfn_to_page(pfn + nr_pages)) - 1; in get_sparsemem_vmemmap_info() 206 kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg) in kclist_add_private() argument 215 ent->size = nr_pages << PAGE_SHIFT; in kclist_add_private()
|
D | task_mmu.c | 1363 unsigned long nr_pages) in gather_stats() argument 1367 md->pages += nr_pages; in gather_stats() 1369 md->dirty += nr_pages; in gather_stats() 1372 md->swapcache += nr_pages; in gather_stats() 1375 md->active += nr_pages; in gather_stats() 1378 md->writeback += nr_pages; in gather_stats() 1381 md->anon += nr_pages; in gather_stats() 1386 md->node[page_to_nid(page)] += nr_pages; in gather_stats()
|
/linux-4.1.27/fs/fscache/ |
D | page.c | 535 unsigned *nr_pages, in __fscache_read_or_alloc_pages() argument 545 _enter("%p,,%d,,,", cookie, *nr_pages); in __fscache_read_or_alloc_pages() 558 ASSERTCMP(*nr_pages, >, 0); in __fscache_read_or_alloc_pages() 567 atomic_set(&op->n_pages, *nr_pages); in __fscache_read_or_alloc_pages() 605 op, pages, nr_pages, gfp); in __fscache_read_or_alloc_pages() 610 op, pages, nr_pages, gfp); in __fscache_read_or_alloc_pages()
|
/linux-4.1.27/arch/ia64/mm/ |
D | init.c | 660 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_add_memory() local 667 ret = __add_pages(nid, zone, start_pfn, nr_pages); in arch_add_memory() 680 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_remove_memory() local 685 ret = __remove_pages(zone, start_pfn, nr_pages); in arch_remove_memory()
|
/linux-4.1.27/fs/hpfs/ |
D | file.c | 123 struct list_head *pages, unsigned nr_pages) in hpfs_readpages() argument 125 return mpage_readpages(mapping, pages, nr_pages, hpfs_get_block); in hpfs_readpages()
|
/linux-4.1.27/arch/powerpc/platforms/pseries/ |
D | cmm.c | 476 unsigned long end = start + (marg->nr_pages << PAGE_SHIFT); in cmm_count_pages() 529 unsigned long end_page = start_page + (marg->nr_pages << PAGE_SHIFT); in cmm_mem_going_offline() 535 start_page, marg->nr_pages); in cmm_mem_going_offline()
|
/linux-4.1.27/drivers/gpu/drm/savage/ |
D | savage_bci.c | 367 unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE - 1) / in savage_dma_alloc() local 373 cur, dev_priv->dma_pages[cur].used, n, rest, nr_pages); in savage_dma_alloc() 375 if (cur + nr_pages < dev_priv->nr_dma_pages) { in savage_dma_alloc() 385 nr_pages = in savage_dma_alloc() 395 for (i = cur; nr_pages > 0; ++i, --nr_pages) { in savage_dma_alloc()
|
/linux-4.1.27/arch/arm/include/asm/xen/ |
D | page.h | 100 unsigned long nr_pages);
|
/linux-4.1.27/arch/sh/kernel/cpu/sh4/ |
D | sq.c | 376 unsigned int nr_pages = 0x04000000 >> PAGE_SHIFT; in sq_api_init() local 377 unsigned int size = (nr_pages + (BITS_PER_LONG - 1)) / BITS_PER_LONG; in sq_api_init()
|
/linux-4.1.27/fs/xfs/ |
D | xfs_buf.c | 1140 int nr_pages; in xfs_buf_ioapply_map() local 1166 nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT); in xfs_buf_ioapply_map() 1167 if (nr_pages > total_nr_pages) in xfs_buf_ioapply_map() 1168 nr_pages = total_nr_pages; in xfs_buf_ioapply_map() 1170 bio = bio_alloc(GFP_NOIO, nr_pages); in xfs_buf_ioapply_map() 1177 for (; size && nr_pages; nr_pages--, page_index++) { in xfs_buf_ioapply_map()
|
D | xfs_file.c | 1170 unsigned nr_pages; in xfs_find_get_desired_pgoff() local 1174 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index, in xfs_find_get_desired_pgoff() 1187 if (nr_pages == 0) { in xfs_find_get_desired_pgoff() 1211 for (i = 0; i < nr_pages; i++) { in xfs_find_get_desired_pgoff() 1278 if (nr_pages < want) { in xfs_find_get_desired_pgoff()
|
/linux-4.1.27/include/linux/ceph/ |
D | osd_client.h | 358 struct page **pages, int nr_pages, 368 struct page **pages, int nr_pages);
|
/linux-4.1.27/fs/jfs/ |
D | inode.c | 299 struct list_head *pages, unsigned nr_pages) in jfs_readpages() argument 301 return mpage_readpages(mapping, pages, nr_pages, jfs_get_block); in jfs_readpages()
|
/linux-4.1.27/drivers/gpu/drm/ |
D | drm_prime.c | 676 struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages) in drm_prime_pages_to_sg() argument 687 ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0, in drm_prime_pages_to_sg() 688 nr_pages << PAGE_SHIFT, GFP_KERNEL); in drm_prime_pages_to_sg()
|
/linux-4.1.27/fs/omfs/ |
D | file.c | 293 struct list_head *pages, unsigned nr_pages) in omfs_readpages() argument 295 return mpage_readpages(mapping, pages, nr_pages, omfs_get_block); in omfs_readpages()
|
/linux-4.1.27/include/scsi/ |
D | osd_ore.h | 132 unsigned nr_pages; member
|
/linux-4.1.27/drivers/block/xen-blkback/ |
D | blkback.c | 732 invcount = xen_blkbk_unmap_prepare(blkif, pages, req->nr_pages, in xen_blkbk_unmap_and_respond() 918 pending_req->nr_pages, in xen_blkbk_map_seg() 934 nseg = pending_req->nr_pages; in xen_blkbk_parse_indirect() 1254 pending_req->nr_pages = nseg; in dispatch_rw_block_io() 1375 pending_req->nr_pages); in dispatch_rw_block_io()
|
/linux-4.1.27/arch/tile/kernel/ |
D | module.c | 58 area->nr_pages = npages; in module_alloc()
|
/linux-4.1.27/drivers/target/ |
D | target_core_pscsi.c | 881 int nr_pages = (cmd->data_length + sgl[0].offset + in pscsi_map_sg() local 888 pr_debug("PSCSI: nr_pages: %d\n", nr_pages); in pscsi_map_sg() 910 nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages); in pscsi_map_sg() 911 nr_pages -= nr_vecs; in pscsi_map_sg()
|
/linux-4.1.27/fs/f2fs/ |
D | node.c | 1154 int i, nr_pages; in sync_node_pages() local 1155 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, in sync_node_pages() 1158 if (nr_pages == 0) in sync_node_pages() 1161 for (i = 0; i < nr_pages; i++) { in sync_node_pages() 1256 int i, nr_pages; in wait_on_node_pages_writeback() local 1257 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, in wait_on_node_pages_writeback() 1260 if (nr_pages == 0) in wait_on_node_pages_writeback() 1263 for (i = 0; i < nr_pages; i++) { in wait_on_node_pages_writeback()
|
/linux-4.1.27/drivers/staging/lustre/lustre/osc/ |
D | osc_cache.c | 60 static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages, 837 int nr_pages = ext->oe_nr_pages; in osc_extent_finish() local 877 osc_free_grant(cli, nr_pages, lost_grant); in osc_extent_finish() 959 int nr_pages = 0; in osc_extent_truncate() local 1013 ++nr_pages; in osc_extent_truncate() 1050 if (grants > 0 || nr_pages > 0) in osc_extent_truncate() 1051 osc_free_grant(cli, nr_pages, grants); in osc_extent_truncate() 1445 static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages, in osc_free_grant() argument 1451 atomic_sub(nr_pages, &obd_dirty_pages); in osc_free_grant() 1452 cli->cl_dirty -= nr_pages << PAGE_CACHE_SHIFT; in osc_free_grant()
|
/linux-4.1.27/drivers/iommu/ |
D | intel-iommu.c | 1997 unsigned long nr_pages, int prot) in __domain_mapping() argument 2005 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1)); in __domain_mapping() 2013 sg_res = nr_pages; in __domain_mapping() 2017 while (nr_pages > 0) { in __domain_mapping() 2071 BUG_ON(nr_pages < lvl_pages); in __domain_mapping() 2074 nr_pages -= lvl_pages; in __domain_mapping() 2092 if (!nr_pages || first_pte_in_page(pte) || in __domain_mapping() 2099 if (!sg_res && nr_pages) in __domain_mapping() 2106 struct scatterlist *sg, unsigned long nr_pages, in domain_sg_mapping() argument 2109 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot); in domain_sg_mapping() [all …]
|