/linux-4.4.14/mm/ |
H A D | percpu-km.c | 50 const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; pcpu_create_chunk() local 59 pages = alloc_pages(GFP_KERNEL, order_base_2(nr_pages)); pcpu_create_chunk() 65 for (i = 0; i < nr_pages; i++) pcpu_create_chunk() 72 pcpu_chunk_populated(chunk, 0, nr_pages); pcpu_create_chunk() 80 const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; pcpu_destroy_chunk() local 83 __free_pages(chunk->data, order_base_2(nr_pages)); pcpu_destroy_chunk() 94 size_t nr_pages, alloc_pages; pcpu_verify_alloc_info() local 102 nr_pages = (ai->groups[0].nr_units * ai->unit_size) >> PAGE_SHIFT; pcpu_verify_alloc_info() 103 alloc_pages = roundup_pow_of_two(nr_pages); pcpu_verify_alloc_info() 105 if (alloc_pages > nr_pages) pcpu_verify_alloc_info() 107 alloc_pages - nr_pages); pcpu_verify_alloc_info()
|
H A D | page_counter.c | 18 * @nr_pages: number of pages to cancel 20 void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages) page_counter_cancel() argument 24 new = atomic_long_sub_return(nr_pages, &counter->count); page_counter_cancel() 32 * @nr_pages: number of pages to charge 36 void page_counter_charge(struct page_counter *counter, unsigned long nr_pages) page_counter_charge() argument 43 new = atomic_long_add_return(nr_pages, &c->count); page_counter_charge() 56 * @nr_pages: number of pages to charge 63 unsigned long nr_pages, page_counter_try_charge() 84 new = atomic_long_add_return(nr_pages, &c->count); page_counter_try_charge() 86 atomic_long_sub(nr_pages, &c->count); page_counter_try_charge() 106 page_counter_cancel(c, nr_pages); page_counter_try_charge() 114 * @nr_pages: number of pages to uncharge 116 void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages) page_counter_uncharge() argument 121 page_counter_cancel(c, nr_pages); page_counter_uncharge() 170 * @nr_pages: returns the result in number of pages 172 * Returns -EINVAL, or 0 and @nr_pages on success. @nr_pages will be 176 unsigned long *nr_pages) page_counter_memparse() 182 *nr_pages = PAGE_COUNTER_MAX; page_counter_memparse() 190 *nr_pages = min(bytes / PAGE_SIZE, (u64)PAGE_COUNTER_MAX); page_counter_memparse() 62 page_counter_try_charge(struct page_counter *counter, unsigned long nr_pages, struct page_counter **fail) page_counter_try_charge() argument 175 page_counter_memparse(const char *buf, const char *max, unsigned long *nr_pages) page_counter_memparse() argument
|
H A D | hugetlb_cgroup.c | 112 unsigned int nr_pages; hugetlb_cgroup_move_parent() local 126 nr_pages = 1 << compound_order(page); hugetlb_cgroup_move_parent() 130 page_counter_charge(&parent->hugepage[idx], nr_pages); hugetlb_cgroup_move_parent() 134 page_counter_cancel(counter, nr_pages); hugetlb_cgroup_move_parent() 165 int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, hugetlb_cgroup_charge_cgroup() argument 189 if (!page_counter_try_charge(&h_cg->hugepage[idx], nr_pages, &counter)) hugetlb_cgroup_charge_cgroup() 198 void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, hugetlb_cgroup_commit_charge() argument 212 void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, hugetlb_cgroup_uncharge_page() argument 224 page_counter_uncharge(&h_cg->hugepage[idx], nr_pages); hugetlb_cgroup_uncharge_page() 228 void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, hugetlb_cgroup_uncharge_cgroup() argument 237 page_counter_uncharge(&h_cg->hugepage[idx], nr_pages); hugetlb_cgroup_uncharge_cgroup() 276 unsigned long nr_pages; hugetlb_cgroup_write() local 283 ret = page_counter_memparse(buf, "-1", &nr_pages); hugetlb_cgroup_write() 292 ret = page_counter_limit(&h_cg->hugepage[idx], nr_pages); hugetlb_cgroup_write()
|
H A D | page_isolation.c | 27 arg.nr_pages = pageblock_nr_pages; set_migratetype_isolate() 60 unsigned long nr_pages; set_migratetype_isolate() local 65 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE); set_migratetype_isolate() 67 __mod_zone_freepage_state(zone, -nr_pages, migratetype); set_migratetype_isolate() 79 unsigned long flags, nr_pages; unset_migratetype_isolate() local 121 nr_pages = move_freepages_block(zone, page, migratetype); unset_migratetype_isolate() 122 __mod_zone_freepage_state(zone, nr_pages, migratetype); unset_migratetype_isolate() 133 __first_valid_page(unsigned long pfn, unsigned long nr_pages) __first_valid_page() argument 136 for (i = 0; i < nr_pages; i++) __first_valid_page() 139 if (unlikely(i == nr_pages)) __first_valid_page()
|
H A D | page_ext.c | 128 unsigned long nr_pages; alloc_node_page_ext() local 130 nr_pages = NODE_DATA(nid)->node_spanned_pages; alloc_node_page_ext() 131 if (!nr_pages) alloc_node_page_ext() 141 nr_pages += MAX_ORDER_NR_PAGES; alloc_node_page_ext() 143 table_size = sizeof(struct page_ext) * nr_pages; alloc_node_page_ext() 280 unsigned long nr_pages, online_page_ext() 287 end = SECTION_ALIGN_UP(start_pfn + nr_pages); online_page_ext() 315 unsigned long nr_pages, int nid) offline_page_ext() 320 end = SECTION_ALIGN_UP(start_pfn + nr_pages); offline_page_ext() 337 mn->nr_pages, mn->status_change_nid); page_ext_callback() 341 mn->nr_pages, mn->status_change_nid); page_ext_callback() 345 mn->nr_pages, mn->status_change_nid); page_ext_callback() 279 online_page_ext(unsigned long start_pfn, unsigned long nr_pages, int nid) online_page_ext() argument 314 offline_page_ext(unsigned long start_pfn, unsigned long nr_pages, int nid) offline_page_ext() argument
|
H A D | quicklist.c | 55 pages_to_free = q->nr_pages - max_pages(min_pages); min_pages_to_free() 70 if (q->nr_pages > min_pages) { quicklist_trim() 98 count += q->nr_pages; for_each_online_cpu()
|
H A D | memory_hotplug.c | 247 unsigned long i, pfn, end_pfn, nr_pages; register_page_bootmem_info_node() local 252 nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT; register_page_bootmem_info_node() 255 for (i = 0; i < nr_pages; i++, page++) register_page_bootmem_info_node() 261 nr_pages = zone->wait_table_hash_nr_entries register_page_bootmem_info_node() 263 nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT; register_page_bootmem_info_node() 266 for (i = 0; i < nr_pages; i++, page++) register_page_bootmem_info_node() 446 int nr_pages = PAGES_PER_SECTION; __add_zone() local 453 ret = ensure_zone_is_initialized(zone, phys_start_pfn, nr_pages); __add_zone() 458 grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages); __add_zone() 460 phys_start_pfn + nr_pages); __add_zone() 462 memmap_init_zone(nr_pages, nid, zone_type, __add_zone() 466 for (pfn = phys_start_pfn; pfn < phys_start_pfn + nr_pages; pfn++) { __add_zone() 503 unsigned long nr_pages) __add_pages() 510 end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1); __add_pages() 721 int nr_pages = PAGES_PER_SECTION; __remove_zone() local 728 shrink_zone_span(zone, start_pfn, start_pfn + nr_pages); __remove_zone() 729 shrink_pgdat_span(pgdat, start_pfn, start_pfn + nr_pages); __remove_zone() 758 * @nr_pages: number of pages to remove (must be multiple of section size) 766 unsigned long nr_pages) __remove_pages() 777 BUG_ON(nr_pages % PAGES_PER_SECTION); __remove_pages() 780 size = nr_pages * PAGE_SIZE; __remove_pages() 792 sections_to_remove = nr_pages / PAGES_PER_SECTION; __remove_pages() 866 static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages, online_pages_range() argument 873 for (i = 0; i < nr_pages; i++) { online_pages_range() 900 static void node_states_check_changes_online(unsigned long nr_pages, node_states_check_changes_online() argument 975 int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type) online_pages() argument 999 if (move_pfn_range_left(zone - 1, zone, pfn, pfn + nr_pages)) online_pages() 1004 if (move_pfn_range_right(zone, zone + 1, pfn, pfn + nr_pages)) online_pages() 1012 arg.nr_pages = nr_pages; online_pages() 1013 node_states_check_changes_online(nr_pages, zone, &arg); online_pages() 1034 ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages, online_pages() 1042 (((unsigned long long) pfn + nr_pages) online_pages() 1187 u64 nr_pages = size >> PAGE_SHIFT; check_hotplug_memory_range() local 1191 (nr_pages % PAGES_PER_SECTION) || (!nr_pages)) { check_hotplug_memory_range() 1357 int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) is_mem_section_removable() argument 1360 struct page *end_page = page + nr_pages; is_mem_section_removable() 1514 offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages, offline_isolated_pages_cb() argument 1517 __offline_isolated_pages(start, start + nr_pages); offline_isolated_pages_cb() 1532 check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages, check_pages_isolated_cb() argument 1537 ret = test_pages_isolated(start_pfn, start_pfn + nr_pages, true); check_pages_isolated_cb() 1538 offlined = nr_pages; check_pages_isolated_cb() 1562 static bool can_offline_normal(struct zone *zone, unsigned long nr_pages) can_offline_normal() argument 1568 static bool can_offline_normal(struct zone *zone, unsigned long nr_pages) can_offline_normal() argument 1577 if (present_pages > nr_pages) can_offline_normal() 1624 static void node_states_check_changes_offline(unsigned long nr_pages, node_states_check_changes_offline() argument 1652 if (zone_idx(zone) <= zone_last && nr_pages >= present_pages) node_states_check_changes_offline() 1673 if (zone_idx(zone) <= zone_last && nr_pages >= present_pages) node_states_check_changes_offline() 1688 * If we try to offline the last present @nr_pages from the node, node_states_check_changes_offline() 1694 if (nr_pages >= present_pages) node_states_check_changes_offline() 1717 unsigned long pfn, nr_pages, expire; __offline_pages() local 1736 nr_pages = end_pfn - start_pfn; __offline_pages() 1738 if (zone_idx(zone) <= ZONE_NORMAL && !can_offline_normal(zone, nr_pages)) __offline_pages() 1748 arg.nr_pages = nr_pages; __offline_pages() 1749 node_states_check_changes_offline(nr_pages, zone, &arg); __offline_pages() 1851 int offline_pages(unsigned long start_pfn, unsigned long nr_pages) offline_pages() argument 1853 return __offline_pages(start_pfn, start_pfn + nr_pages, 120 * HZ); offline_pages() 502 __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn, unsigned long nr_pages) __add_pages() argument 765 __remove_pages(struct zone *zone, unsigned long phys_start_pfn, unsigned long nr_pages) __remove_pages() argument
|
H A D | process_vm_access.c | 87 unsigned long nr_pages; process_vm_rw_single_vec() local 95 nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1; process_vm_rw_single_vec() 97 while (!rc && nr_pages && iov_iter_count(iter)) { process_vm_rw_single_vec() 98 int pages = min(nr_pages, max_pages_per_loop); process_vm_rw_single_vec() 116 nr_pages -= pages; process_vm_rw_single_vec() 152 unsigned long nr_pages = 0; process_vm_rw_core() local 168 nr_pages = max(nr_pages, nr_pages_iov); process_vm_rw_core() 172 if (nr_pages == 0) process_vm_rw_core() 175 if (nr_pages > PVM_MAX_PP_ARRAY_COUNT) { process_vm_rw_core() 179 sizeof(struct pages *)*nr_pages), process_vm_rw_core()
|
H A D | gup.c | 402 * @nr_pages: number of pages from start to pin 405 * Should be at least nr_pages long. Or NULL, if caller 412 * requested. If nr_pages is 0 or negative, returns 0. If no pages 454 unsigned long start, unsigned long nr_pages, __get_user_pages() 462 if (!nr_pages) __get_user_pages() 498 &start, &nr_pages, i, __get_user_pages() 550 if (page_increm > nr_pages) __get_user_pages() 551 page_increm = nr_pages; __get_user_pages() 554 nr_pages -= page_increm; __get_user_pages() 555 } while (nr_pages); __get_user_pages() 624 unsigned long nr_pages, __get_user_pages_locked() 651 ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages, __get_user_pages_locked() 660 BUG_ON(ret >= nr_pages); __get_user_pages_locked() 668 nr_pages -= ret; __get_user_pages_locked() 670 if (!nr_pages) __get_user_pages_locked() 699 nr_pages--; __get_user_pages_locked() 701 if (!nr_pages) __get_user_pages_locked() 739 unsigned long start, unsigned long nr_pages, get_user_pages_locked() 743 return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force, get_user_pages_locked() 759 unsigned long start, unsigned long nr_pages, __get_user_pages_unlocked() 766 ret = __get_user_pages_locked(tsk, mm, start, nr_pages, write, force, __get_user_pages_unlocked() 792 unsigned long start, unsigned long nr_pages, get_user_pages_unlocked() 795 return __get_user_pages_unlocked(tsk, mm, start, nr_pages, write, get_user_pages_unlocked() 806 * @nr_pages: number of pages from start to pin 811 * Should be at least nr_pages long. Or NULL, if caller 817 * requested. If nr_pages is 0 or negative, returns 0. If no pages 856 unsigned long start, unsigned long nr_pages, int write, get_user_pages() 859 return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force, get_user_pages() 887 unsigned long nr_pages = (end - start) / PAGE_SIZE; populate_vma_page_range() local 919 return __get_user_pages(current, mm, start, nr_pages, gup_flags, populate_vma_page_range() 1325 int __get_user_pages_fast(unsigned long start, int nr_pages, int write, __get_user_pages_fast() argument 1336 len = (unsigned long) nr_pages << PAGE_SHIFT; __get_user_pages_fast() 1382 * @nr_pages: number of pages from start to pin 1385 * Should be at least nr_pages long. 1392 * requested. If nr_pages is 0 or negative, returns 0. If no pages 1395 int get_user_pages_fast(unsigned long start, int nr_pages, int write, get_user_pages_fast() argument 1402 nr = __get_user_pages_fast(start, nr_pages, write, pages); get_user_pages_fast() 1405 if (nr < nr_pages) { get_user_pages_fast() 1411 nr_pages - nr, write, 0, pages); get_user_pages_fast() 453 __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *nonblocking) __get_user_pages() argument 621 __get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages, struct vm_area_struct **vmas, int *locked, bool notify_drop, unsigned int flags) __get_user_pages_locked() argument 738 get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages, int *locked) get_user_pages_locked() argument 758 __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages, unsigned int gup_flags) __get_user_pages_unlocked() argument 791 get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages) get_user_pages_unlocked() argument 855 get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages, struct vm_area_struct **vmas) get_user_pages() argument
|
H A D | memcontrol.c | 551 unsigned long nr_pages = page_counter_read(&memcg->memory); soft_limit_excess() local 555 if (nr_pages > soft_limit) soft_limit_excess() 556 excess = nr_pages - soft_limit; soft_limit_excess() 700 int nr_pages) mem_cgroup_charge_statistics() 708 nr_pages); mem_cgroup_charge_statistics() 711 nr_pages); mem_cgroup_charge_statistics() 715 nr_pages); mem_cgroup_charge_statistics() 718 if (nr_pages > 0) mem_cgroup_charge_statistics() 722 nr_pages = -nr_pages; /* for event */ mem_cgroup_charge_statistics() 725 __this_cpu_add(memcg->stat->nr_page_events, nr_pages); mem_cgroup_charge_statistics() 1120 * @nr_pages: positive when adding or negative when removing 1126 int nr_pages) mem_cgroup_update_lru_size() 1136 *lru_size += nr_pages; mem_cgroup_update_lru_size() 1869 unsigned int nr_pages; member in struct:memcg_stock_pcp 1880 * @nr_pages: how many pages to charge. 1883 * stock, and at least @nr_pages are available in that stock. Failure to 1888 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) consume_stock() argument 1893 if (nr_pages > CHARGE_BATCH) consume_stock() 1897 if (memcg == stock->cached && stock->nr_pages >= nr_pages) { consume_stock() 1898 stock->nr_pages -= nr_pages; consume_stock() 1912 if (stock->nr_pages) { drain_stock() 1913 page_counter_uncharge(&old->memory, stock->nr_pages); drain_stock() 1915 page_counter_uncharge(&old->memsw, stock->nr_pages); drain_stock() 1916 css_put_many(&old->css, stock->nr_pages); drain_stock() 1917 stock->nr_pages = 0; drain_stock() 1937 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) refill_stock() argument 1945 stock->nr_pages += nr_pages; refill_stock() 1968 if (!memcg || !stock->nr_pages) for_each_online_cpu() 2008 unsigned int nr_pages = current->memcg_nr_pages_over_high; mem_cgroup_handle_over_high() local 2011 if (likely(!nr_pages)) mem_cgroup_handle_over_high() 2020 try_to_free_mem_cgroup_pages(pos, nr_pages, GFP_KERNEL, true); mem_cgroup_handle_over_high() 2028 unsigned int nr_pages) try_charge() 2030 unsigned int batch = max(CHARGE_BATCH, nr_pages); try_charge() 2041 if (consume_stock(memcg, nr_pages)) try_charge() 2056 if (batch > nr_pages) { try_charge() 2057 batch = nr_pages; try_charge() 2080 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, try_charge() 2083 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) try_charge() 2103 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER)) try_charge() 2124 get_order(nr_pages * PAGE_SIZE)); try_charge() 2134 page_counter_charge(&memcg->memory, nr_pages); try_charge() 2136 page_counter_charge(&memcg->memsw, nr_pages); try_charge() 2137 css_get_many(&memcg->css, nr_pages); try_charge() 2143 if (batch > nr_pages) try_charge() 2144 refill_stock(memcg, batch - nr_pages); try_charge() 2166 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) cancel_charge() argument 2171 page_counter_uncharge(&memcg->memory, nr_pages); cancel_charge() 2173 page_counter_uncharge(&memcg->memsw, nr_pages); cancel_charge() 2175 css_put_many(&memcg->css, nr_pages); cancel_charge() 2408 unsigned int nr_pages = 1 << order; __memcg_kmem_charge_memcg() local 2415 if (!page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) __memcg_kmem_charge_memcg() 2418 ret = try_charge(memcg, gfp, nr_pages); __memcg_kmem_charge_memcg() 2420 page_counter_uncharge(&memcg->kmem, nr_pages); __memcg_kmem_charge_memcg() 2443 unsigned int nr_pages = 1 << order; __memcg_kmem_uncharge() local 2450 page_counter_uncharge(&memcg->kmem, nr_pages); __memcg_kmem_uncharge() 2451 page_counter_uncharge(&memcg->memory, nr_pages); __memcg_kmem_uncharge() 2453 page_counter_uncharge(&memcg->memsw, nr_pages); __memcg_kmem_uncharge() 2456 css_put_many(&memcg->css, nr_pages); __memcg_kmem_uncharge() 2898 unsigned long nr_pages) memcg_activate_kmem() 2937 err = page_counter_limit(&memcg->kmem, nr_pages); memcg_activate_kmem() 3002 unsigned long nr_pages; mem_cgroup_write() local 3006 ret = page_counter_memparse(buf, "-1", &nr_pages); mem_cgroup_write() 3018 ret = mem_cgroup_resize_limit(memcg, nr_pages); mem_cgroup_write() 3021 ret = mem_cgroup_resize_memsw_limit(memcg, nr_pages); mem_cgroup_write() 3024 ret = memcg_update_kmem_limit(memcg, nr_pages); mem_cgroup_write() 3029 memcg->soft_limit = nr_pages; mem_cgroup_write() 4535 * @nr_pages: number of regular pages (>1 for huge pages) 4541 * - compound_lock is held when nr_pages > 1 4547 unsigned int nr_pages, mem_cgroup_move_account() 4564 if (nr_pages > 1 && !PageTransHuge(page)) mem_cgroup_move_account() 4584 nr_pages); mem_cgroup_move_account() 4586 nr_pages); mem_cgroup_move_account() 4599 nr_pages); mem_cgroup_move_account() 4601 nr_pages); mem_cgroup_move_account() 4607 nr_pages); mem_cgroup_move_account() 4609 nr_pages); mem_cgroup_move_account() 4625 mem_cgroup_charge_statistics(to, page, nr_pages); mem_cgroup_move_account() 4627 mem_cgroup_charge_statistics(from, page, -nr_pages); mem_cgroup_move_account() 5129 unsigned long nr_pages; memory_high_write() local 5140 nr_pages = page_counter_read(&memcg->memory); memory_high_write() 5141 if (nr_pages > high) memory_high_write() 5142 try_to_free_mem_cgroup_pages(memcg, nr_pages - high, memory_high_write() 5179 unsigned long nr_pages = page_counter_read(&memcg->memory); memory_max_write() local 5181 if (nr_pages <= max) memory_max_write() 5196 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, memory_max_write() 5330 unsigned int nr_pages = 1; mem_cgroup_try_charge() local 5361 nr_pages <<= compound_order(page); mem_cgroup_try_charge() 5368 ret = try_charge(memcg, gfp_mask, nr_pages); mem_cgroup_try_charge() 5395 unsigned int nr_pages = 1; mem_cgroup_commit_charge() local 5413 nr_pages <<= compound_order(page); mem_cgroup_commit_charge() 5418 mem_cgroup_charge_statistics(memcg, page, nr_pages); mem_cgroup_commit_charge() 5442 unsigned int nr_pages = 1; mem_cgroup_cancel_charge() local 5455 nr_pages <<= compound_order(page); mem_cgroup_cancel_charge() 5459 cancel_charge(memcg, nr_pages); mem_cgroup_cancel_charge() 5466 unsigned long nr_pages = nr_anon + nr_file; uncharge_batch() local 5470 page_counter_uncharge(&memcg->memory, nr_pages); uncharge_batch() 5472 page_counter_uncharge(&memcg->memsw, nr_pages); uncharge_batch() 5481 __this_cpu_add(memcg->stat->nr_page_events, nr_pages); uncharge_batch() 5486 css_put_many(&memcg->css, nr_pages); uncharge_batch() 5501 unsigned int nr_pages = 1; uncharge_list() local 5528 nr_pages <<= compound_order(page); uncharge_list() 5530 nr_huge += nr_pages; uncharge_list() 5534 nr_anon += nr_pages; uncharge_list() 5536 nr_file += nr_pages; uncharge_list() 698 mem_cgroup_charge_statistics(struct mem_cgroup *memcg, struct page *page, int nr_pages) mem_cgroup_charge_statistics() argument 1125 mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, int nr_pages) mem_cgroup_update_lru_size() argument 2027 try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, unsigned int nr_pages) try_charge() argument 2897 memcg_activate_kmem(struct mem_cgroup *memcg, unsigned long nr_pages) memcg_activate_kmem() argument 4546 mem_cgroup_move_account(struct page *page, unsigned int nr_pages, struct mem_cgroup *from, struct mem_cgroup *to) mem_cgroup_move_account() argument
|
H A D | migrate.c | 479 int nr_pages) __copy_gigantic_page() 485 for (i = 0; i < nr_pages; ) { __copy_gigantic_page() 498 int nr_pages; copy_huge_page() local 503 nr_pages = pages_per_huge_page(h); copy_huge_page() 505 if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) { copy_huge_page() 506 __copy_gigantic_page(dst, src, nr_pages); copy_huge_page() 512 nr_pages = hpage_nr_pages(src); copy_huge_page() 515 for (i = 0; i < nr_pages; i++) { copy_huge_page() 1321 unsigned long nr_pages, do_pages_move() 1345 chunk_start < nr_pages; do_pages_move() 1349 if (chunk_start + chunk_nr_pages > nr_pages) do_pages_move() 1350 chunk_nr_pages = nr_pages - chunk_start; do_pages_move() 1406 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, do_pages_stat_array() argument 1413 for (i = 0; i < nr_pages; i++) { do_pages_stat_array() 1445 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, do_pages_stat() argument 1453 while (nr_pages) { do_pages_stat() 1456 chunk_nr = nr_pages; do_pages_stat() 1470 nr_pages -= chunk_nr; do_pages_stat() 1472 return nr_pages ? -EFAULT : 0; do_pages_stat() 1479 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, SYSCALL_DEFINE6() 1534 err = do_pages_move(mm, task_nodes, nr_pages, pages, SYSCALL_DEFINE6() 1537 err = do_pages_stat(mm, nr_pages, pages, status); SYSCALL_DEFINE6() 1602 unsigned long nr_pages) numamigrate_update_ratelimit() 1618 nr_pages); numamigrate_update_ratelimit() 1628 pgdat->numabalancing_migrate_nr_pages += nr_pages; numamigrate_update_ratelimit() 478 __copy_gigantic_page(struct page *dst, struct page *src, int nr_pages) __copy_gigantic_page() argument 1320 do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, unsigned long nr_pages, const void __user * __user *pages, const int __user *nodes, int __user *status, int flags) do_pages_move() argument 1601 numamigrate_update_ratelimit(pg_data_t *pgdat, unsigned long nr_pages) numamigrate_update_ratelimit() argument
|
H A D | mlock.c | 175 int nr_pages; munlock_vma_page() local 188 nr_pages = hpage_nr_pages(page); munlock_vma_page() 192 __mod_zone_page_state(zone, NR_MLOCK, -nr_pages); munlock_vma_page() 205 return nr_pages - 1; munlock_vma_page() 503 int nr_pages; mlock_fixup() local 537 nr_pages = (end - start) >> PAGE_SHIFT; mlock_fixup() 539 nr_pages = -nr_pages; mlock_fixup() 540 mm->locked_vm += nr_pages; mlock_fixup()
|
H A D | util.c | 247 int nr_pages, int write, struct page **pages) __get_user_pages_fast() 256 * @nr_pages: number of pages from start to pin 259 * Should be at least nr_pages long. 262 * requested. If nr_pages is 0 or negative, returns 0. If no pages 278 int nr_pages, int write, struct page **pages) get_user_pages_fast() 281 return get_user_pages_unlocked(current, mm, start, nr_pages, get_user_pages_fast() 246 __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) __get_user_pages_fast() argument 277 get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) get_user_pages_fast() argument
|
H A D | sparse.c | 198 unsigned long nr_pages = 0; node_memmap_size_bytes() local 206 nr_pages += PAGES_PER_SECTION; node_memmap_size_bytes() 209 return nr_pages * sizeof(struct page); node_memmap_size_bytes() 659 unsigned long magic, nr_pages; free_map_bootmem() local 662 nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) free_map_bootmem() 665 for (i = 0; i < nr_pages; i++, page++) { free_map_bootmem() 744 static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) clear_hwpoisoned_pages() argument 759 static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) clear_hwpoisoned_pages() argument
|
H A D | percpu-vm.c | 133 static void __pcpu_unmap_pages(unsigned long addr, int nr_pages) __pcpu_unmap_pages() argument 135 unmap_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT); __pcpu_unmap_pages() 192 int nr_pages) __pcpu_map_pages() 194 return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT, __pcpu_map_pages() 191 __pcpu_map_pages(unsigned long addr, struct page **pages, int nr_pages) __pcpu_map_pages() argument
|
H A D | memory-failure.c | 1037 int nr_pages = 1 << compound_order(hpage); set_page_hwpoison_huge_page() local 1038 for (i = 0; i < nr_pages; i++) set_page_hwpoison_huge_page() 1045 int nr_pages = 1 << compound_order(hpage); clear_page_hwpoison_huge_page() local 1046 for (i = 0; i < nr_pages; i++) clear_page_hwpoison_huge_page() 1075 unsigned int nr_pages; memory_failure() local 1097 * so nr_pages should be 1 << compound_order. OTOH when errors are on memory_failure() 1099 * measurement is done in normal page units. So nr_pages should be one memory_failure() 1103 nr_pages = 1 << compound_order(hpage); memory_failure() 1105 nr_pages = 1; memory_failure() 1106 num_poisoned_pages_add(nr_pages); memory_failure() 1134 num_poisoned_pages_sub(nr_pages); memory_failure() 1158 num_poisoned_pages_sub(nr_pages); memory_failure() 1218 num_poisoned_pages_sub(nr_pages); memory_failure() 1225 num_poisoned_pages_sub(nr_pages); memory_failure() 1423 unsigned int nr_pages; unpoison_memory() local 1468 nr_pages = 1 << compound_order(page); unpoison_memory() 1499 num_poisoned_pages_sub(nr_pages); unpoison_memory()
|
H A D | swap.c | 375 * requested. If nr_pages is 0 or negative, returns 0. If no pages 1080 pgoff_t start, unsigned nr_pages, pagevec_lookup_entries() 1083 pvec->nr = find_get_entries(mapping, start, nr_pages, pagevec_lookup_entries() 1114 * @nr_pages: The maximum number of pages 1116 * pagevec_lookup() will search for and return a group of up to @nr_pages pages 1126 pgoff_t start, unsigned nr_pages) pagevec_lookup() 1128 pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages); pagevec_lookup() 1134 pgoff_t *index, int tag, unsigned nr_pages) pagevec_lookup_tag() 1137 nr_pages, pvec->pages); pagevec_lookup_tag() 1078 pagevec_lookup_entries(struct pagevec *pvec, struct address_space *mapping, pgoff_t start, unsigned nr_pages, pgoff_t *indices) pagevec_lookup_entries() argument 1125 pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, pgoff_t start, unsigned nr_pages) pagevec_lookup() argument 1133 pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping, pgoff_t *index, int tag, unsigned nr_pages) pagevec_lookup_tag() argument
|
H A D | swapfile.c | 134 nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9); discard_swap() 145 nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); discard_swap() 162 pgoff_t start_page, pgoff_t nr_pages) discard_swap_cluster() 167 while (nr_pages) { discard_swap_cluster() 171 start_page < se->start_page + se->nr_pages) { discard_swap_cluster() 174 sector_t nr_blocks = se->nr_pages - offset; discard_swap_cluster() 176 if (nr_blocks > nr_pages) discard_swap_cluster() 177 nr_blocks = nr_pages; discard_swap_cluster() 179 nr_pages -= nr_blocks; discard_swap_cluster() 1639 offset < (se->start_page + se->nr_pages)) { map_swap_entry() 1690 unsigned long nr_pages, sector_t start_block) add_swap_extent() 1700 se->nr_pages = nr_pages; add_swap_extent() 1706 BUG_ON(se->start_page + se->nr_pages != start_page); add_swap_extent() 1707 if (se->start_block + se->nr_pages == start_block) { add_swap_extent() 1709 se->nr_pages += nr_pages; add_swap_extent() 1721 new_se->nr_pages = nr_pages; add_swap_extent() 161 discard_swap_cluster(struct swap_info_struct *si, pgoff_t start_page, pgoff_t nr_pages) discard_swap_cluster() argument 1689 add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, unsigned long nr_pages, sector_t start_block) add_swap_extent() argument
|
H A D | internal.h | 301 int nr_pages = hpage_nr_pages(page); mlock_migrate_page() local 304 __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); mlock_migrate_page() 306 __mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages); mlock_migrate_page() local
|
H A D | filemap.c | 340 int nr_pages; __filemap_fdatawait_range() local 348 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, __filemap_fdatawait_range() 353 for (i = 0; i < nr_pages; i++) { __filemap_fdatawait_range() 1273 * @nr_pages: The maximum number of pages 1277 * @nr_pages pages in the mapping. The pages are placed at @pages. 1286 unsigned int nr_pages, struct page **pages) find_get_pages() 1292 if (unlikely(!nr_pages)) find_get_pages() 1332 if (++ret == nr_pages) find_get_pages() 1344 * @nr_pages: The maximum number of pages 1353 unsigned int nr_pages, struct page **pages) find_get_pages_contig() 1359 if (unlikely(!nr_pages)) find_get_pages_contig() 1409 if (++ret == nr_pages) find_get_pages_contig() 1422 * @nr_pages: the maximum number of pages 1429 int tag, unsigned int nr_pages, struct page **pages) find_get_pages_tag() 1435 if (unlikely(!nr_pages)) find_get_pages_tag() 1481 if (++ret == nr_pages) find_get_pages_tag() 1285 find_get_pages(struct address_space *mapping, pgoff_t start, unsigned int nr_pages, struct page **pages) find_get_pages() argument 1352 find_get_pages_contig(struct address_space *mapping, pgoff_t index, unsigned int nr_pages, struct page **pages) find_get_pages_contig() argument 1428 find_get_pages_tag(struct address_space *mapping, pgoff_t *index, int tag, unsigned int nr_pages, struct page **pages) find_get_pages_tag() argument
|
H A D | shmem.c | 1648 unsigned int loff, nr_pages, req_pages; shmem_file_splice_read() local 1678 nr_pages = min(req_pages, spd.nr_pages_max); shmem_file_splice_read() 1680 spd.nr_pages = find_get_pages_contig(mapping, index, shmem_file_splice_read() 1681 nr_pages, spd.pages); shmem_file_splice_read() 1682 index += spd.nr_pages; shmem_file_splice_read() 1685 while (spd.nr_pages < nr_pages) { shmem_file_splice_read() 1690 spd.pages[spd.nr_pages++] = page; shmem_file_splice_read() 1695 nr_pages = spd.nr_pages; shmem_file_splice_read() 1696 spd.nr_pages = 0; shmem_file_splice_read() 1698 for (page_nr = 0; page_nr < nr_pages; page_nr++) { shmem_file_splice_read() 1737 spd.nr_pages++; shmem_file_splice_read() 1741 while (page_nr < nr_pages) shmem_file_splice_read() 1744 if (spd.nr_pages) shmem_file_splice_read()
|
H A D | nommu.c | 136 unsigned long start, unsigned long nr_pages, __get_user_pages() 152 for (i = 0; i < nr_pages; i++) { __get_user_pages() 186 unsigned long start, unsigned long nr_pages, get_user_pages() 197 return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas, get_user_pages() 203 unsigned long start, unsigned long nr_pages, get_user_pages_locked() 207 return get_user_pages(tsk, mm, start, nr_pages, write, force, get_user_pages_locked() 213 unsigned long start, unsigned long nr_pages, __get_user_pages_unlocked() 219 ret = get_user_pages(tsk, mm, start, nr_pages, write, force, __get_user_pages_unlocked() 227 unsigned long start, unsigned long nr_pages, get_user_pages_unlocked() 230 return __get_user_pages_unlocked(tsk, mm, start, nr_pages, write, get_user_pages_unlocked() 135 __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int foll_flags, struct page **pages, struct vm_area_struct **vmas, int *nonblocking) __get_user_pages() argument 185 get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages, struct vm_area_struct **vmas) get_user_pages() argument 202 get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages, int *locked) get_user_pages_locked() argument 212 __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages, unsigned int gup_flags) __get_user_pages_unlocked() argument 226 get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages) get_user_pages_unlocked() argument
|
H A D | hugetlb.c | 1010 int nr_pages = 1 << order; destroy_compound_gigantic_page() local 1013 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { destroy_compound_gigantic_page() 1028 unsigned long nr_pages) __alloc_gigantic_page() 1030 unsigned long end_pfn = start_pfn + nr_pages; __alloc_gigantic_page() 1035 unsigned long nr_pages) pfn_range_valid_gigantic() 1037 unsigned long i, end_pfn = start_pfn + nr_pages; pfn_range_valid_gigantic() 1060 unsigned long start_pfn, unsigned long nr_pages) zone_spans_last_pfn() 1062 unsigned long last_pfn = start_pfn + nr_pages - 1; zone_spans_last_pfn() 1068 unsigned long nr_pages = 1 << order; alloc_gigantic_page() local 1076 pfn = ALIGN(z->zone_start_pfn, nr_pages); alloc_gigantic_page() 1077 while (zone_spans_last_pfn(z, pfn, nr_pages)) { alloc_gigantic_page() 1078 if (pfn_range_valid_gigantic(pfn, nr_pages)) { alloc_gigantic_page() 1087 ret = __alloc_gigantic_page(pfn, nr_pages); alloc_gigantic_page() 1092 pfn += nr_pages; alloc_gigantic_page() 1266 int nr_pages = 1 << order; prep_compound_gigantic_page() local 1273 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { prep_compound_gigantic_page() 1733 unsigned long nr_pages; return_unused_surplus_pages() local 1742 nr_pages = min(unused_resv_pages, h->surplus_huge_pages); return_unused_surplus_pages() 1752 while (nr_pages--) { return_unused_surplus_pages() 3824 unsigned long *position, unsigned long *nr_pages, follow_hugetlb_page() 3829 unsigned long remainder = *nr_pages; follow_hugetlb_page() 3925 *nr_pages = remainder; follow_hugetlb_page() 1027 __alloc_gigantic_page(unsigned long start_pfn, unsigned long nr_pages) __alloc_gigantic_page() argument 1034 pfn_range_valid_gigantic(unsigned long start_pfn, unsigned long nr_pages) pfn_range_valid_gigantic() argument 1059 zone_spans_last_pfn(const struct zone *zone, unsigned long start_pfn, unsigned long nr_pages) zone_spans_last_pfn() argument 3822 follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, struct page **pages, struct vm_area_struct **vmas, unsigned long *position, unsigned long *nr_pages, long i, unsigned int flags) follow_hugetlb_page() argument
|
H A D | page-writeback.c | 278 unsigned long nr_pages; zone_dirtyable_memory() local 280 nr_pages = zone_page_state(zone, NR_FREE_PAGES); zone_dirtyable_memory() 281 nr_pages -= min(nr_pages, zone->dirty_balance_reserve); zone_dirtyable_memory() 283 nr_pages += zone_page_state(zone, NR_INACTIVE_FILE); zone_dirtyable_memory() 284 nr_pages += zone_page_state(zone, NR_ACTIVE_FILE); zone_dirtyable_memory() 286 return nr_pages; zone_dirtyable_memory() 1969 int nr_pages = global_page_state(NR_FILE_DIRTY) + laptop_mode_timer_fn() local 1983 wb_start_writeback(wb, nr_pages, true, laptop_mode_timer_fn() 2151 int nr_pages; write_cache_pages() local 2187 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, write_cache_pages() 2189 if (nr_pages == 0) write_cache_pages() 2192 for (i = 0; i < nr_pages; i++) { write_cache_pages()
|
H A D | readahead.c | 112 struct list_head *pages, unsigned nr_pages) read_pages() 121 ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages); read_pages() 127 for (page_idx = 0; page_idx < nr_pages; page_idx++) { read_pages() 111 read_pages(struct address_space *mapping, struct file *filp, struct list_head *pages, unsigned nr_pages) read_pages() argument
|
H A D | vmalloc.c | 1476 for (i = 0; i < area->nr_pages; i++) { __vunmap() 1584 unsigned int nr_pages, array_size, i; __vmalloc_area_node() local 1588 nr_pages = get_vm_area_size(area) >> PAGE_SHIFT; __vmalloc_area_node() 1589 array_size = (nr_pages * sizeof(struct page *)); __vmalloc_area_node() 1591 area->nr_pages = nr_pages; __vmalloc_area_node() 1607 for (i = 0; i < area->nr_pages; i++) { __vmalloc_area_node() 1617 area->nr_pages = i; __vmalloc_area_node() 1632 (area->nr_pages*PAGE_SIZE), area->size); __vmalloc_area_node() 2607 for (nr = 0; nr < v->nr_pages; nr++) show_numa_info() 2636 if (v->nr_pages) s_show() 2637 seq_printf(m, " pages=%d", v->nr_pages); s_show()
|
/linux-4.4.14/include/linux/ |
H A D | page_counter.h | 37 void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages); 38 void page_counter_charge(struct page_counter *counter, unsigned long nr_pages); 40 unsigned long nr_pages, 42 void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages); 45 unsigned long *nr_pages);
|
H A D | hugetlb_cgroup.h | 54 extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, 56 extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, 59 extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, 61 extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, 85 hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, hugetlb_cgroup_charge_cgroup() argument 92 hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, hugetlb_cgroup_commit_charge() argument 100 hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, struct page *page) hugetlb_cgroup_uncharge_page() argument 106 hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, hugetlb_cgroup_uncharge_cgroup() argument
|
H A D | mm_inline.h | 28 int nr_pages = hpage_nr_pages(page); add_page_to_lru_list() local 29 mem_cgroup_update_lru_size(lruvec, lru, nr_pages); add_page_to_lru_list() 31 __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, nr_pages); add_page_to_lru_list() local 37 int nr_pages = hpage_nr_pages(page); del_page_from_lru_list() local 38 mem_cgroup_update_lru_size(lruvec, lru, -nr_pages); del_page_from_lru_list() 40 __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, -nr_pages); del_page_from_lru_list()
|
H A D | quicklist.h | 18 int nr_pages; member in struct:quicklist 43 q->nr_pages--; quicklist_alloc() 63 q->nr_pages++; __quicklist_free()
|
H A D | pagevec.h | 31 pgoff_t start, unsigned nr_pages); 34 unsigned nr_pages);
|
H A D | memory_hotplug.h | 84 extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages); 106 unsigned long nr_pages); 111 unsigned long nr_pages); 245 extern int is_mem_section_removable(unsigned long pfn, unsigned long nr_pages); mem_hotplug_done() 247 extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); mem_hotplug_done() 252 unsigned long nr_pages) is_mem_section_removable() 259 static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages) offline_pages() argument 274 extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); 251 is_mem_section_removable(unsigned long pfn, unsigned long nr_pages) is_mem_section_removable() argument
|
H A D | memory.h | 50 unsigned long nr_pages; member in struct:memory_notify 58 * range [start_pfn, start_pfn + nr_pages) which are owned by code 65 unsigned int nr_pages; /* # pages in range to check */ member in struct:memory_isolate_notify
|
H A D | splice.h | 56 int nr_pages; /* number of populated pages in map */ member in struct:splice_pipe_desc
|
H A D | vmstat.h | 257 static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages, __mod_zone_freepage_state() argument 260 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages); __mod_zone_freepage_state() 262 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages); __mod_zone_freepage_state()
|
H A D | swap.h | 131 pgoff_t nr_pages; member in struct:swap_extent 323 unsigned long nr_pages, 330 extern unsigned long shrink_all_memory(unsigned long nr_pages); 349 extern void check_move_unevictable_pages(struct page **, int nr_pages); 390 unsigned long nr_pages, sector_t start_block);
|
H A D | pagemap.h | 359 unsigned int nr_pages, struct page **pages); 361 unsigned int nr_pages, struct page **pages); 363 int tag, unsigned int nr_pages, struct page **pages);
|
/linux-4.4.14/include/trace/events/ |
H A D | migrate.h | 75 TP_PROTO(struct task_struct *p, int dst_nid, unsigned long nr_pages), 77 TP_ARGS(p, dst_nid, nr_pages), 83 __field( unsigned long, nr_pages) 90 __entry->nr_pages = nr_pages; 93 TP_printk("comm=%s pid=%d dst_nid=%d nr_pages=%lu", 97 __entry->nr_pages)
|
H A D | writeback.h | 242 __field(long, nr_pages) 254 __entry->nr_pages = work->nr_pages; 263 TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d " 267 __entry->nr_pages,
|
/linux-4.4.14/drivers/media/v4l2-core/ |
H A D | videobuf-dma-sg.c | 66 int nr_pages) videobuf_vmalloc_to_sg() 72 sglist = vzalloc(nr_pages * sizeof(*sglist)); videobuf_vmalloc_to_sg() 75 sg_init_table(sglist, nr_pages); videobuf_vmalloc_to_sg() 76 for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) { videobuf_vmalloc_to_sg() 96 int nr_pages, int offset, size_t size) videobuf_pages_to_sg() 103 sglist = vmalloc(nr_pages * sizeof(*sglist)); videobuf_pages_to_sg() 106 sg_init_table(sglist, nr_pages); videobuf_pages_to_sg() 114 for (i = 1; i < nr_pages; i++) { videobuf_pages_to_sg() 176 dma->nr_pages = last-first+1; videobuf_dma_init_user_locked() 177 dma->pages = kmalloc(dma->nr_pages * sizeof(struct page *), GFP_KERNEL); videobuf_dma_init_user_locked() 182 data, size, dma->nr_pages); videobuf_dma_init_user_locked() 185 data & PAGE_MASK, dma->nr_pages, videobuf_dma_init_user_locked() 189 if (err != dma->nr_pages) { videobuf_dma_init_user_locked() 190 dma->nr_pages = (err >= 0) ? err : 0; videobuf_dma_init_user_locked() 191 dprintk(1, "get_user_pages: err=%d [%d]\n", err, dma->nr_pages); videobuf_dma_init_user_locked() 210 int nr_pages) videobuf_dma_init_kernel() 214 dprintk(1, "init kernel [%d pages]\n", nr_pages); videobuf_dma_init_kernel() 217 dma->vaddr_pages = kcalloc(nr_pages, sizeof(*dma->vaddr_pages), videobuf_dma_init_kernel() 222 dma->dma_addr = kcalloc(nr_pages, sizeof(*dma->dma_addr), GFP_KERNEL); videobuf_dma_init_kernel() 227 for (i = 0; i < nr_pages; i++) { videobuf_dma_init_kernel() 237 dma->vaddr = vmap(dma->vaddr_pages, nr_pages, VM_MAP | VM_IOREMAP, videobuf_dma_init_kernel() 240 dprintk(1, "vmalloc_32(%d pages) failed\n", nr_pages); videobuf_dma_init_kernel() 246 nr_pages << PAGE_SHIFT); videobuf_dma_init_kernel() 248 memset(dma->vaddr, 0, nr_pages << PAGE_SHIFT); videobuf_dma_init_kernel() 249 dma->nr_pages = nr_pages; videobuf_dma_init_kernel() 270 dma_addr_t addr, int nr_pages) videobuf_dma_init_overlay() 273 nr_pages, (unsigned long)addr); videobuf_dma_init_overlay() 280 dma->nr_pages = nr_pages; videobuf_dma_init_overlay() 288 BUG_ON(0 == dma->nr_pages); videobuf_dma_map() 291 dma->sglist = videobuf_pages_to_sg(dma->pages, dma->nr_pages, videobuf_dma_map() 296 dma->nr_pages); videobuf_dma_map() 305 sg_dma_len(&dma->sglist[0]) = dma->nr_pages * PAGE_SIZE; videobuf_dma_map() 314 dma->nr_pages, dma->direction); videobuf_dma_map() 352 for (i = 0; i < dma->nr_pages; i++) videobuf_dma_free() 359 for (i = 0; i < dma->nr_pages; i++) { videobuf_dma_free() 65 videobuf_vmalloc_to_sg(unsigned char *virt, int nr_pages) videobuf_vmalloc_to_sg() argument 95 videobuf_pages_to_sg(struct page **pages, int nr_pages, int offset, size_t size) videobuf_pages_to_sg() argument 209 videobuf_dma_init_kernel(struct videobuf_dmabuf *dma, int direction, int nr_pages) videobuf_dma_init_kernel() argument 269 videobuf_dma_init_overlay(struct videobuf_dmabuf *dma, int direction, dma_addr_t addr, int nr_pages) videobuf_dma_init_overlay() argument
|
/linux-4.4.14/net/9p/ |
H A D | trans_common.c | 21 void p9_release_pages(struct page **pages, int nr_pages) p9_release_pages() argument 25 for (i = 0; i < nr_pages; i++) p9_release_pages()
|
H A D | trans_virtio.c | 219 * @nr_pages: number of pages to pack into the scatter/gather list 225 struct page **pdata, int nr_pages, size_t offs, int count) pack_sg_list_p() 231 BUG_ON(nr_pages > (limit - start)); pack_sg_list_p() 236 while (nr_pages) { pack_sg_list_p() 245 nr_pages--; pack_sg_list_p() 321 int nr_pages; p9_get_mapped_pages() local 343 nr_pages = DIV_ROUND_UP(n + *offs, PAGE_SIZE); p9_get_mapped_pages() 344 atomic_add(nr_pages, &vp_pinned); p9_get_mapped_pages() 364 nr_pages = DIV_ROUND_UP((unsigned long)p + len, PAGE_SIZE) - p9_get_mapped_pages() 367 *pages = kmalloc(sizeof(struct page *) * nr_pages, GFP_NOFS); p9_get_mapped_pages() 373 for (index = 0; index < nr_pages; index++) { p9_get_mapped_pages() 224 pack_sg_list_p(struct scatterlist *sg, int start, int limit, struct page **pdata, int nr_pages, size_t offs, int count) pack_sg_list_p() argument
|
/linux-4.4.14/include/xen/ |
H A D | balloon.h | 26 int alloc_xenballooned_pages(int nr_pages, struct page **pages); 27 void free_xenballooned_pages(int nr_pages, struct page **pages);
|
H A D | grant_table.h | 195 int gnttab_alloc_pages(int nr_pages, struct page **pages); 196 void gnttab_free_pages(int nr_pages, struct page **pages);
|
/linux-4.4.14/drivers/misc/mic/scif/ |
H A D | scif_rma.c | 82 * @nr_pages: number of pages in window 88 scif_create_pinned_pages(int nr_pages, int prot) scif_create_pinned_pages() argument 97 pin->pages = scif_zalloc(nr_pages * sizeof(*pin->pages)); scif_create_pinned_pages() 123 for (j = 0; j < pin->nr_pages; j++) { scif_destroy_pinned_pages() 132 pin->nr_pages * sizeof(*pin->pages)); scif_destroy_pinned_pages() 140 * @nr_pages: number of pages 146 struct scif_window *scif_create_window(struct scif_endpt *ep, int nr_pages, scif_create_window() argument 156 window->dma_addr = scif_zalloc(nr_pages * sizeof(*window->dma_addr)); scif_create_window() 160 window->num_pages = scif_zalloc(nr_pages * sizeof(*window->num_pages)); scif_create_window() 178 nr_pages * sizeof(*window->dma_addr)); scif_create_window() 195 int nr_pages = window->nr_pages; scif_destroy_incomplete_window() local 220 scif_free(window->dma_addr, nr_pages * sizeof(*window->dma_addr)); scif_destroy_incomplete_window() 221 scif_free(window->num_pages, nr_pages * sizeof(*window->num_pages)); scif_destroy_incomplete_window() 273 int nr_pages, bool try_lock) __scif_dec_pinned_vm_lock() 275 if (!mm || !nr_pages || !scif_ulimit_check) __scif_dec_pinned_vm_lock() 286 mm->pinned_vm -= nr_pages; __scif_dec_pinned_vm_lock() 292 int nr_pages) __scif_check_inc_pinned_vm() 296 if (!mm || !nr_pages || !scif_ulimit_check) __scif_check_inc_pinned_vm() 299 locked = nr_pages; __scif_check_inc_pinned_vm() 323 int nr_pages = window->nr_pages; scif_destroy_window() local 327 __scif_dec_pinned_vm_lock(window->mm, window->nr_pages, 0); scif_destroy_window() 350 scif_free(window->dma_addr, nr_pages * sizeof(*window->dma_addr)); scif_destroy_window() 351 scif_free(window->num_pages, nr_pages * sizeof(*window->num_pages)); scif_destroy_window() 370 int nr_pages = window->nr_pages; scif_create_remote_lookup() local 381 window->nr_lookup = ALIGN(nr_pages * PAGE_SIZE, scif_create_remote_lookup() 408 for (i = 0, j = 0; i < nr_pages; i += SCIF_NR_ADDR_IN_PAGE, j++) { scif_create_remote_lookup() 446 for (i = 0, j = 0; i < window->nr_pages; scif_destroy_remote_lookup() 475 * @nr_pages: number of pages in window 480 scif_create_remote_window(struct scif_dev *scifdev, int nr_pages) scif_create_remote_window() argument 490 window->nr_pages = nr_pages; scif_create_remote_window() 492 window->dma_addr = scif_zalloc(nr_pages * sizeof(*window->dma_addr)); scif_create_remote_window() 496 window->num_pages = scif_zalloc(nr_pages * scif_create_remote_window() 524 scif_free(window->dma_addr, window->nr_pages * scif_destroy_remote_window() 526 scif_free(window->num_pages, window->nr_pages * scif_destroy_remote_window() 551 err = sg_alloc_table(window->st, window->nr_pages, GFP_KERNEL); scif_iommu_map() 603 for (i = 0, j = 0; i < window->nr_pages; i += nr_contig_pages, j++) { scif_map_window() 608 for (k = i + 1; k < window->nr_pages; k++) { scif_map_window() 725 scif_put_window(window, window->nr_pages); scif_unregister_window() 738 window->nr_pages, 1)) { scif_unregister_window() 768 msg.payload[1] = window->nr_pages; scif_send_alloc_request() 1061 int nr_pages = msg->payload[1]; scif_alloc_req() local 1063 window = scif_create_remote_window(scifdev, nr_pages); scif_alloc_req() 1080 "%s %d error %d alloc_ptr %p nr_pages 0x%x\n", scif_alloc_req() 1081 __func__, __LINE__, err, window, nr_pages); scif_alloc_req() 1203 req.nr_bytes = recv_window->nr_pages << PAGE_SHIFT; scif_recv_unreg() 1218 scif_put_window(window, window->nr_pages); scif_recv_unreg() 1327 int nr_pages, err = 0, i; __scif_pin_pages() local 1351 nr_pages = len >> PAGE_SHIFT; __scif_pin_pages() 1354 pinned_pages = scif_create_pinned_pages(nr_pages, prot); __scif_pin_pages() 1362 for (i = 0; i < nr_pages; i++) { __scif_pin_pages() 1370 pinned_pages->nr_pages = nr_pages; __scif_pin_pages() 1388 err = __scif_check_inc_pinned_vm(mm, nr_pages); __scif_pin_pages() 1391 pinned_pages->nr_pages = 0; __scif_pin_pages() 1396 pinned_pages->nr_pages = get_user_pages( __scif_pin_pages() 1400 nr_pages, __scif_pin_pages() 1406 if (nr_pages != pinned_pages->nr_pages) { __scif_pin_pages() 1410 nr_pages, 0); __scif_pin_pages() 1412 for (i = 0; i < pinned_pages->nr_pages; i++) { __scif_pin_pages() 1425 if (pinned_pages->nr_pages < nr_pages) { __scif_pin_pages() 1427 pinned_pages->nr_pages = nr_pages; __scif_pin_pages() 1437 __scif_dec_pinned_vm_lock(mm, nr_pages, 0); __scif_pin_pages() 1440 pinned_pages->nr_pages = nr_pages; __scif_pin_pages() 1505 len = pinned_pages->nr_pages << PAGE_SHIFT; scif_register_pinned_pages() 1538 window = scif_create_window(ep, pinned_pages->nr_pages, scif_register_pinned_pages() 1547 window->nr_pages = pinned_pages->nr_pages; scif_register_pinned_pages() 1656 window->nr_pages = len >> PAGE_SHIFT; scif_register() 1722 int nr_pages, err; scif_unregister() local 1744 nr_pages = len >> PAGE_SHIFT; scif_unregister() 1767 err = scif_rma_list_unregister(window, offset, nr_pages); scif_unregister() 272 __scif_dec_pinned_vm_lock(struct mm_struct *mm, int nr_pages, bool try_lock) __scif_dec_pinned_vm_lock() argument 291 __scif_check_inc_pinned_vm(struct mm_struct *mm, int nr_pages) __scif_check_inc_pinned_vm() argument
|
H A D | scif_mmap.c | 43 req.nr_bytes = recv_window->nr_pages << PAGE_SHIFT; scif_recv_munmap() 57 scif_put_window(window, window->nr_pages); scif_recv_munmap() 139 scif_put_window(window, window->nr_pages); __scif_cleanup_rma_for_zombies() 227 int nr_pages, err, i; scif_get_pages() local 242 nr_pages = len >> PAGE_SHIFT; scif_get_pages() 268 (*pages)->phys_addr = scif_zalloc(nr_pages * sizeof(dma_addr_t)); scif_get_pages() 276 ((*pages)->va = scif_zalloc(nr_pages * sizeof(void *))); scif_get_pages() 284 (*pages)->nr_pages = nr_pages; scif_get_pages() 287 for (i = 0; i < nr_pages; i++) { scif_get_pages() 300 scif_get_window(window, nr_pages); scif_get_pages() 306 nr_pages * sizeof(dma_addr_t)); scif_get_pages() 308 nr_pages * sizeof(void *)); scif_get_pages() 345 scif_put_window(window, pages->nr_pages); scif_put_pages() 365 scif_free(pages->phys_addr, pages->nr_pages * sizeof(dma_addr_t)); scif_put_pages() 366 scif_free(pages->va, pages->nr_pages * sizeof(void *)); scif_put_pages() 382 int nr_pages, struct vm_area_struct *vma) scif_rma_list_mmap() 386 int loop_nr_pages, nr_pages_left = nr_pages; scif_rma_list_mmap() 397 (window->nr_pages << PAGE_SHIFT); list_for_each_entry_from() 428 nr_pages_left = nr_pages; 433 (window->nr_pages << PAGE_SHIFT); list_for_each_entry_from() 459 s64 offset, int nr_pages) scif_rma_list_munmap() 463 int loop_nr_pages, nr_pages_left = nr_pages; scif_rma_list_munmap() 471 nr_pages_left = nr_pages; list_for_each_entry_safe_from() 474 (window->nr_pages << PAGE_SHIFT); list_for_each_entry_safe_from() 555 int nr_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; scif_munmap() local 569 "SCIFAPI munmap: ep %p nr_pages 0x%x offset 0x%llx\n", scif_munmap() 570 ep, nr_pages, offset); scif_munmap() 585 scif_rma_list_munmap(window, offset, nr_pages); scif_munmap() 617 int nr_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; scif_mmap() local 622 "SCIFAPI mmap: ep %p start_offset 0x%llx nr_pages 0x%x\n", scif_mmap() 623 ep, start_offset, nr_pages); scif_mmap() 681 err = scif_rma_list_mmap(window, start_offset, nr_pages, vma); scif_mmap() 381 scif_rma_list_mmap(struct scif_window *start_window, s64 offset, int nr_pages, struct vm_area_struct *vma) scif_rma_list_mmap() argument 458 scif_rma_list_munmap(struct scif_window *start_window, s64 offset, int nr_pages) scif_rma_list_munmap() argument
|
H A D | scif_rma_list.c | 74 scif_set_window_ref(window, window->nr_pages); 101 (window->nr_pages << PAGE_SHIFT); scif_query_tcw() 109 (window->nr_pages << PAGE_SHIFT); list_for_each_safe() 157 (window->nr_pages << PAGE_SHIFT); scif_query_window() 204 s64 offset, int nr_pages) scif_rma_list_unregister() 214 end_offset = window->offset + (window->nr_pages << PAGE_SHIFT); list_for_each_entry_safe_from() 216 nr_pages); list_for_each_entry_safe_from() 220 nr_pages -= loop_nr_pages; list_for_each_entry_safe_from() 222 if (!nr_pages) list_for_each_entry_safe_from() 203 scif_rma_list_unregister(struct scif_window *window, s64 offset, int nr_pages) scif_rma_list_unregister() argument
|
H A D | scif_debugfs.c | 73 seq_printf(s, "nr_pages 0x%llx nr_contig_chunks 0x%x prot %d ", scif_display_window() 74 window->nr_pages, window->nr_contig_chunks, window->prot); scif_display_window() 85 for (j = 0; j < window->nr_pages; j++) scif_display_window()
|
H A D | scif_rma.h | 178 * @nr_pages: Number of pages which is defined as a s64 instead of an int 187 s64 nr_pages; member in struct:scif_pinned_pages 211 * @nr_pages: Number of pages which is defined as a s64 instead of an int 243 s64 nr_pages; member in struct:scif_window 310 s64 offset, int nr_pages, s64 *out_offset); 315 struct scif_window *scif_create_window(struct scif_endpt *ep, int nr_pages, 426 static inline void scif_get_window(struct scif_window *window, int nr_pages) scif_get_window() argument 428 window->ref_count += nr_pages; scif_get_window() 431 static inline void scif_put_window(struct scif_window *window, int nr_pages) scif_put_window() argument 433 window->ref_count -= nr_pages; scif_put_window() 436 static inline void scif_set_window_ref(struct scif_window *window, int nr_pages) scif_set_window_ref() argument 438 window->ref_count = nr_pages; scif_set_window_ref()
|
H A D | scif_dma.c | 140 end_va = start_va + (window->nr_pages << PAGE_SHIFT); __scif_rma_destroy_tcw() 409 (*out_window)->nr_pages = pinned_pages->nr_pages; scif_register_temp() 664 atomic_sub(window->nr_pages, scif_rma_destroy_tcw_invalid() 787 if (window->nr_pages == window->nr_contig_chunks) { scif_off_to_dma_addr() 851 (window->nr_pages << PAGE_SHIFT); scif_rma_local_cpu_copy() 856 (window->nr_pages << PAGE_SHIFT); scif_rma_local_cpu_copy() 954 (window->nr_pages << PAGE_SHIFT); scif_rma_list_dma_copy_unaligned() 962 (window->nr_pages << PAGE_SHIFT); scif_rma_list_dma_copy_unaligned() 1069 (window->nr_pages << PAGE_SHIFT); scif_rma_list_dma_copy_unaligned() 1145 (src_window->nr_pages << PAGE_SHIFT); _scif_rma_list_dma_copy_aligned() 1147 (dst_window->nr_pages << PAGE_SHIFT); _scif_rma_list_dma_copy_aligned() 1152 (src_window->nr_pages << PAGE_SHIFT); _scif_rma_list_dma_copy_aligned() 1158 (dst_window->nr_pages << PAGE_SHIFT); _scif_rma_list_dma_copy_aligned() 1310 (src_window->nr_pages << PAGE_SHIFT); scif_rma_list_dma_copy_aligned() 1312 (dst_window->nr_pages << PAGE_SHIFT); scif_rma_list_dma_copy_aligned() 1319 (src_window->nr_pages << PAGE_SHIFT); scif_rma_list_dma_copy_aligned() 1325 (dst_window->nr_pages << PAGE_SHIFT); scif_rma_list_dma_copy_aligned() 1549 (src_window->nr_pages << PAGE_SHIFT); scif_rma_list_cpu_copy() 1551 (dst_window->nr_pages << PAGE_SHIFT); scif_rma_list_cpu_copy() 1767 atomic_add_return(local_window->nr_pages, scif_rma_copy()
|
H A D | scif_rma_list.h | 56 int nr_pages);
|
/linux-4.4.14/drivers/gpu/drm/radeon/ |
H A D | drm_buffer.c | 46 int nr_pages = size / PAGE_SIZE + 1; drm_buffer_alloc() local 51 *buf = kzalloc(sizeof(struct drm_buffer) + nr_pages*sizeof(char *), drm_buffer_alloc() 57 size, nr_pages); drm_buffer_alloc() 63 for (idx = 0; idx < nr_pages; ++idx) { drm_buffer_alloc() 73 idx + 1, size, nr_pages); drm_buffer_alloc() 100 int nr_pages = size / PAGE_SIZE + 1; drm_buffer_copy_from_user() local 110 for (idx = 0; idx < nr_pages; ++idx) { drm_buffer_copy_from_user() 134 int nr_pages = buf->size / PAGE_SIZE + 1; drm_buffer_free() local 136 for (idx = 0; idx < nr_pages; ++idx) drm_buffer_free()
|
/linux-4.4.14/arch/arm/xen/ |
H A D | p2m.c | 23 unsigned long nr_pages; member in struct:xen_p2m_entry 72 entry->pfn + entry->nr_pages > pfn) { __pfn_to_mfn() 120 unsigned long mfn, unsigned long nr_pages) __set_phys_to_machine_multi() 132 p2m_entry->pfn + p2m_entry->nr_pages > pfn) { __set_phys_to_machine_multi() 153 p2m_entry->nr_pages = nr_pages; __set_phys_to_machine_multi() 119 __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn, unsigned long nr_pages) __set_phys_to_machine_multi() argument
|
/linux-4.4.14/arch/arm/mach-rpc/include/mach/ |
H A D | uncompress.h | 23 unsigned long nr_pages; member in struct:param_struct 119 unsigned int nr_pages = 0, page_size = PAGE_SIZE; arch_decomp_setup() local 138 nr_pages += (t->u.mem.size / PAGE_SIZE); arch_decomp_setup() 144 nr_pages = params->nr_pages; arch_decomp_setup() 189 if (nr_pages * page_size < 4096*1024) error("<4M of mem\n"); arch_decomp_setup()
|
/linux-4.4.14/fs/logfs/ |
H A D | dev_bdev.c | 75 size_t nr_pages) __bdev_writeseg() 84 max_pages = min_t(size_t, nr_pages, BIO_MAX_PAGES); __bdev_writeseg() 89 for (i = 0; i < nr_pages; i++) { __bdev_writeseg() 103 nr_pages -= i; __bdev_writeseg() 119 bio->bi_vcnt = nr_pages; __bdev_writeseg() 120 bio->bi_iter.bi_size = nr_pages * PAGE_SIZE; __bdev_writeseg() 167 size_t nr_pages) do_erase() 174 max_pages = min_t(size_t, nr_pages, BIO_MAX_PAGES); do_erase() 179 for (i = 0; i < nr_pages; i++) { do_erase() 193 nr_pages -= i; do_erase() 203 bio->bi_vcnt = nr_pages; do_erase() 204 bio->bi_iter.bi_size = nr_pages * PAGE_SIZE; do_erase() 74 __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index, size_t nr_pages) __bdev_writeseg() argument 166 do_erase(struct super_block *sb, u64 ofs, pgoff_t index, size_t nr_pages) do_erase() argument
|
H A D | dev_mtd.c | 184 size_t nr_pages) __logfs_mtd_writeseg() 191 for (i = 0; i < nr_pages; i++) { __logfs_mtd_writeseg() 183 __logfs_mtd_writeseg(struct super_block *sb, u64 ofs, pgoff_t index, size_t nr_pages) __logfs_mtd_writeseg() argument
|
/linux-4.4.14/net/ipv4/ |
H A D | tcp_memcontrol.c | 57 static int tcp_update_limit(struct mem_cgroup *memcg, unsigned long nr_pages) tcp_update_limit() argument 67 ret = page_counter_limit(&cg_proto->memory_allocated, nr_pages); tcp_update_limit() 72 cg_proto->sysctl_mem[i] = min_t(long, nr_pages, tcp_update_limit() 75 if (nr_pages == PAGE_COUNTER_MAX) tcp_update_limit() 119 unsigned long nr_pages; tcp_cgroup_write() local 127 ret = page_counter_memparse(buf, "-1", &nr_pages); tcp_cgroup_write() 131 ret = tcp_update_limit(memcg, nr_pages); tcp_cgroup_write()
|
/linux-4.4.14/drivers/xen/ |
H A D | balloon.c | 412 static enum bp_state increase_reservation(unsigned long nr_pages) increase_reservation() argument 423 if (nr_pages > ARRAY_SIZE(frame_list)) increase_reservation() 424 nr_pages = ARRAY_SIZE(frame_list); increase_reservation() 427 for (i = 0; i < nr_pages; i++) { increase_reservation() 429 nr_pages = i; increase_reservation() 441 reservation.nr_extents = nr_pages; increase_reservation() 483 static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) decrease_reservation() argument 496 if (nr_pages > ARRAY_SIZE(frame_list)) decrease_reservation() 497 nr_pages = ARRAY_SIZE(frame_list); decrease_reservation() 499 for (i = 0; i < nr_pages; i++) { decrease_reservation() 502 nr_pages = i; decrease_reservation() 555 reservation.nr_extents = nr_pages; decrease_reservation() 557 BUG_ON(ret != nr_pages); decrease_reservation() 559 balloon_stats.current_pages -= nr_pages; decrease_reservation() 613 static int add_ballooned_pages(int nr_pages) add_ballooned_pages() argument 628 st = decrease_reservation(nr_pages, GFP_USER); add_ballooned_pages() 637 * @nr_pages: Number of pages to get 641 int alloc_xenballooned_pages(int nr_pages, struct page **pages) alloc_xenballooned_pages() argument 649 balloon_stats.target_unpopulated += nr_pages; alloc_xenballooned_pages() 651 while (pgno < nr_pages) { alloc_xenballooned_pages() 667 ret = add_ballooned_pages(nr_pages - pgno); alloc_xenballooned_pages() 683 * @nr_pages: Number of pages 686 void free_xenballooned_pages(int nr_pages, struct page **pages) free_xenballooned_pages() argument 692 for (i = 0; i < nr_pages; i++) { free_xenballooned_pages() 697 balloon_stats.target_unpopulated -= nr_pages; free_xenballooned_pages() 741 ? min(xen_start_info->nr_pages - xen_released_pages, max_pfn) balloon_init()
|
H A D | privcmd.c | 49 unsigned long nr_pages); 425 unsigned long nr_pages; privcmd_ioctl_mmap_batch() local 449 nr_pages = DIV_ROUND_UP(m.num, XEN_PFN_PER_PAGE); privcmd_ioctl_mmap_batch() 450 if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT))) privcmd_ioctl_mmap_batch() 492 m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) { privcmd_ioctl_mmap_batch() 497 ret = alloc_empty_pages(vma, nr_pages); privcmd_ioctl_mmap_batch() 504 m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) { privcmd_ioctl_mmap_batch() 508 if (privcmd_vma_range_is_mapped(vma, m.addr, nr_pages)) { privcmd_ioctl_mmap_batch() 641 unsigned long nr_pages) privcmd_vma_range_is_mapped() 643 return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT, privcmd_vma_range_is_mapped() 638 privcmd_vma_range_is_mapped( struct vm_area_struct *vma, unsigned long addr, unsigned long nr_pages) privcmd_vma_range_is_mapped() argument
|
H A D | grant-table.c | 682 * @nr_pages: number of pages to alloc 685 int gnttab_alloc_pages(int nr_pages, struct page **pages) gnttab_alloc_pages() argument 690 ret = alloc_xenballooned_pages(nr_pages, pages); gnttab_alloc_pages() 694 for (i = 0; i < nr_pages; i++) { gnttab_alloc_pages() 700 gnttab_free_pages(nr_pages, pages); gnttab_alloc_pages() 714 * @nr_pages; number of pages to free 717 void gnttab_free_pages(int nr_pages, struct page **pages) gnttab_free_pages() argument 721 for (i = 0; i < nr_pages; i++) { gnttab_free_pages() 729 free_xenballooned_pages(nr_pages, pages); gnttab_free_pages()
|
H A D | swiotlb-xen.c | 115 int nr_pages; check_pages_physically_contiguous() local 118 nr_pages = (offset + length + XEN_PAGE_SIZE-1) >> XEN_PAGE_SHIFT; check_pages_physically_contiguous() 120 for (i = 1; i < nr_pages; i++) { check_pages_physically_contiguous()
|
/linux-4.4.14/net/rds/ |
H A D | info.c | 163 unsigned long nr_pages = 0; rds_info_getsockopt() local 188 nr_pages = (PAGE_ALIGN(start + len) - (start & PAGE_MASK)) rds_info_getsockopt() 191 pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); rds_info_getsockopt() 196 ret = get_user_pages_fast(start, nr_pages, 1, pages); rds_info_getsockopt() 197 if (ret != nr_pages) { rds_info_getsockopt() 199 nr_pages = ret; rds_info_getsockopt() 201 nr_pages = 0; rds_info_getsockopt() 206 rdsdebug("len %d nr_pages %lu\n", len, nr_pages); rds_info_getsockopt() 238 for (i = 0; pages && i < nr_pages; i++) rds_info_getsockopt()
|
H A D | rdma.c | 157 static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages, rds_pin_pages() argument 162 ret = get_user_pages_fast(user_addr, nr_pages, write, pages); rds_pin_pages() 164 if (ret >= 0 && ret < nr_pages) { rds_pin_pages() 177 unsigned int nr_pages; __rds_rdma_map() local 197 nr_pages = rds_pages_in_vec(&args->vec); __rds_rdma_map() 198 if (nr_pages == 0) { __rds_rdma_map() 203 rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n", __rds_rdma_map() 204 args->vec.addr, args->vec.bytes, nr_pages); __rds_rdma_map() 206 /* XXX clamp nr_pages to limit the size of this alloc? */ __rds_rdma_map() 207 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); __rds_rdma_map() 241 ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1); __rds_rdma_map() 488 unsigned int nr_pages; rds_rdma_pages() local 493 nr_pages = rds_pages_in_vec(&iov[i]); rds_rdma_pages() 494 if (nr_pages == 0) rds_rdma_pages() 497 tot_pages += nr_pages; rds_rdma_pages() 500 * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1, rds_rdma_pages() 515 unsigned int nr_pages; rds_rdma_extra_size() local 526 nr_pages = rds_pages_in_vec(&vec); rds_rdma_extra_size() 527 if (nr_pages == 0) rds_rdma_extra_size() 530 tot_pages += nr_pages; rds_rdma_extra_size() 533 * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1, rds_rdma_extra_size() 552 int nr_pages; rds_cmsg_rdma_args() local 591 nr_pages = rds_rdma_pages(iovs, args->nr_local); rds_cmsg_rdma_args() 592 if (nr_pages < 0) { rds_cmsg_rdma_args() 597 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); rds_cmsg_rdma_args() 609 WARN_ON(!nr_pages); rds_cmsg_rdma_args() 610 op->op_sg = rds_message_alloc_sgs(rm, nr_pages); rds_cmsg_rdma_args()
|
/linux-4.4.14/drivers/net/ethernet/ibm/ehea/ |
H A D | ehea_qmr.c | 107 int i, nr_pages; hw_queue_dtor() local 114 nr_pages = queue->queue_length / queue->pagesize; hw_queue_dtor() 116 for (i = 0; i < nr_pages; i += pages_per_kpage) hw_queue_dtor() 153 ret = hw_queue_ctor(&cq->hw_queue, cq->attr.nr_pages, ehea_create_cq() 158 for (counter = 0; counter < cq->attr.nr_pages; counter++) { ehea_create_cq() 171 cq, hret, counter, cq->attr.nr_pages); ehea_create_cq() 175 if (counter == (cq->attr.nr_pages - 1)) { ehea_create_cq() 275 ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages, ehea_create_eq() 282 for (i = 0; i < eq->attr.nr_pages; i++) { ehea_create_eq() 296 if (i == (eq->attr.nr_pages - 1)) { ehea_create_eq() 378 int nr_pages, int wqe_size, int act_nr_sges, ehea_qp_alloc_register() 385 ret = hw_queue_ctor(hw_queue, nr_pages, EHEA_PAGESIZE, wqe_size); ehea_qp_alloc_register() 389 for (cnt = 0; cnt < nr_pages; cnt++) { ehea_qp_alloc_register() 628 static int ehea_update_busmap(unsigned long pfn, unsigned long nr_pages, int add) ehea_update_busmap() argument 632 if (!nr_pages) ehea_update_busmap() 642 end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE); ehea_update_busmap() 671 int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages) ehea_add_sect_bmap() argument 676 ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_ADD_SECT); ehea_add_sect_bmap() 681 int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages) ehea_rem_sect_bmap() argument 686 ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_REM_SECT); ehea_rem_sect_bmap() 709 unsigned long pfn, start_pfn, end_pfn, nr_pages; ehea_create_busmap_callback() local 723 nr_pages = pfn - start_pfn; ehea_create_busmap_callback() 724 ret = ehea_update_busmap(start_pfn, nr_pages, ehea_create_busmap_callback() 737 nr_pages = pfn - start_pfn; ehea_create_busmap_callback() 738 return ehea_update_busmap(start_pfn, nr_pages, EHEA_BUSMAP_ADD_SECT); ehea_create_busmap_callback() 377 ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue, int nr_pages, int wqe_size, int act_nr_sges, struct ehea_adapter *adapter, int h_call_q_selector) ehea_qp_alloc_register() argument
|
H A D | ehea.h | 260 u32 nr_pages; member in struct:ehea_eq_attr 305 u32 nr_pages; member in struct:ehea_cq_attr
|
H A D | ehea_qmr.h | 398 int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages); 399 int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages);
|
H A D | ehea_phyp.c | 328 cq_attr->nr_pages = outs[4]; ehea_h_alloc_resource_cq() 395 eq_attr->nr_pages = outs[4]; ehea_h_alloc_resource_eq()
|
/linux-4.4.14/arch/s390/mm/ |
H A D | init.c | 155 unsigned long nr_pages; arch_add_memory() local 164 nr_pages = (start_pfn + size_pages > dma_end_pfn) ? arch_add_memory() 168 nr_pages = (start_pfn + size_pages > normal_end_pfn) ? arch_add_memory() 172 nr_pages = size_pages; arch_add_memory() 179 start_pfn += nr_pages; arch_add_memory() 180 size_pages -= nr_pages; arch_add_memory()
|
H A D | gup.c | 176 int __get_user_pages_fast(unsigned long start, int nr_pages, int write, __get_user_pages_fast() argument 187 len = (unsigned long) nr_pages << PAGE_SHIFT; __get_user_pages_fast() 217 * @nr_pages: number of pages from start to pin 220 * Should be at least nr_pages long. 227 * requested. If nr_pages is 0 or negative, returns 0. If no pages 230 int get_user_pages_fast(unsigned long start, int nr_pages, int write, get_user_pages_fast() argument 237 nr = __get_user_pages_fast(start, nr_pages, write, pages); get_user_pages_fast() 238 if (nr == nr_pages) get_user_pages_fast() 245 nr_pages - nr, write, 0, pages); get_user_pages_fast()
|
/linux-4.4.14/drivers/firmware/efi/libstub/ |
H A D | arm64-stub.c | 26 unsigned long nr_pages; handle_kernel_image() local 56 nr_pages = round_up(kernel_memsize, EFI_ALLOC_ALIGN) / handle_kernel_image() 59 EFI_LOADER_DATA, nr_pages, handle_kernel_image()
|
H A D | efi-stub-helper.c | 148 unsigned long nr_pages; efi_high_alloc() local 165 nr_pages = round_up(size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE; efi_high_alloc() 176 if (desc->num_pages < nr_pages) efi_high_alloc() 209 nr_pages, &max_addr); efi_high_alloc() 234 unsigned long nr_pages; efi_low_alloc() local 250 nr_pages = round_up(size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE; efi_low_alloc() 261 if (desc->num_pages < nr_pages) efi_low_alloc() 281 nr_pages, &start); efi_low_alloc() 299 unsigned long nr_pages; efi_free() local 304 nr_pages = round_up(size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE; efi_free() 305 efi_call_early(free_pages, addr, nr_pages); efi_free() 555 unsigned long nr_pages; efi_relocate_kernel() local 573 nr_pages = round_up(alloc_size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE; efi_relocate_kernel() 576 nr_pages, &efi_addr); efi_relocate_kernel()
|
/linux-4.4.14/kernel/events/ |
H A D | ring_buffer.c | 128 if (unlikely(!rb->nr_pages)) perf_output_begin() 175 handle->page = (offset >> page_shift) & (rb->nr_pages - 1); perf_output_begin() 471 pgoff_t pgoff, int nr_pages, long watermark, int flags) rb_alloc_aux() 482 * We need to start with the max_order that fits in nr_pages, rb_alloc_aux() 485 max_order = ilog2(nr_pages); rb_alloc_aux() 500 rb->aux_pages = kzalloc_node(nr_pages * sizeof(void *), GFP_KERNEL, node); rb_alloc_aux() 505 for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) { rb_alloc_aux() 509 order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages)); rb_alloc_aux() 533 rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages, rb_alloc_aux() 552 rb->aux_watermark = nr_pages << (PAGE_SHIFT - 1); rb_alloc_aux() 608 if (pgoff > rb->nr_pages) __perf_mmap_to_page() 630 struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags) rb_alloc() argument 637 size += nr_pages * sizeof(void *); rb_alloc() 647 for (i = 0; i < nr_pages; i++) { rb_alloc() 653 rb->nr_pages = nr_pages; rb_alloc() 685 for (i = 0; i < rb->nr_pages; i++) rb_free() 693 return rb->nr_pages << page_order(rb); data_page_nr() 736 struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags) rb_alloc() argument 751 all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE); rb_alloc() 757 rb->page_order = ilog2(nr_pages); rb_alloc() 758 rb->nr_pages = !!nr_pages; rb_alloc() 470 rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event, pgoff_t pgoff, int nr_pages, long watermark, int flags) rb_alloc_aux() argument
|
H A D | internal.h | 19 int nr_pages; /* nr of data pages */ member in struct:ring_buffer 69 rb_alloc(int nr_pages, long watermark, int cpu, int flags); 72 pgoff_t pgoff, int nr_pages, long watermark, int flags); 110 return rb->nr_pages << (PAGE_SHIFT + page_order(rb)); perf_data_size() 138 handle->page &= rb->nr_pages - 1; \
|
/linux-4.4.14/fs/ceph/ |
H A D | cache.c | 151 int loop, nr_pages; ceph_fscache_inode_now_uncached() local 159 nr_pages = pagevec_lookup(&pvec, ci->vfs_inode.i_mapping, first, ceph_fscache_inode_now_uncached() 162 if (!nr_pages) ceph_fscache_inode_now_uncached() 165 for (loop = 0; loop < nr_pages; loop++) ceph_fscache_inode_now_uncached() 168 first = pvec.pages[nr_pages - 1]->index + 1; ceph_fscache_inode_now_uncached() 170 pvec.nr = nr_pages; ceph_fscache_inode_now_uncached() 282 unsigned *nr_pages) ceph_readpages_from_fscache() 290 ret = fscache_read_or_alloc_pages(ci->fscache, mapping, pages, nr_pages, ceph_readpages_from_fscache() 279 ceph_readpages_from_fscache(struct inode *inode, struct address_space *mapping, struct list_head *pages, unsigned *nr_pages) ceph_readpages_from_fscache() argument
|
H A D | addr.c | 308 * start an async read(ahead) operation. return nr_pages we submitted 324 int nr_pages = 0; start_read() local 334 nr_pages++; list_for_each_entry_reverse() 336 if (max && nr_pages == max) list_for_each_entry_reverse() 339 len = nr_pages << PAGE_CACHE_SHIFT; 340 dout("start_read %p nr_pages %d is %lld~%lld\n", inode, nr_pages, 352 nr_pages = calc_pages_for(0, len); 353 pages = kmalloc(sizeof(*pages) * nr_pages, GFP_KERNEL); 357 for (i = 0; i < nr_pages; ++i) { 370 nr_pages = i; 386 return nr_pages; 389 ceph_unlock_page_vector(pages, nr_pages); 390 ceph_release_page_vector(pages, nr_pages); 402 struct list_head *page_list, unsigned nr_pages) ceph_readpages() 413 &nr_pages); ceph_readpages() 422 dout("readpages %p file %p nr_pages %d max %d\n", inode, ceph_readpages() 423 file, nr_pages, ceph_readpages() 401 ceph_readpages(struct file *file, struct address_space *mapping, struct list_head *page_list, unsigned nr_pages) ceph_readpages() argument
|
H A D | cache.h | 46 unsigned *nr_pages); 134 unsigned *nr_pages) ceph_readpages_from_fscache() 131 ceph_readpages_from_fscache(struct inode *inode, struct address_space *mapping, struct list_head *pages, unsigned *nr_pages) ceph_readpages_from_fscache() argument
|
/linux-4.4.14/drivers/staging/rdma/ehca/ |
H A D | ehca_eq.c | 59 u32 nr_pages; ehca_create_eq() local 83 &nr_pages, &eq->ist); ehca_create_eq() 90 ret = ipz_queue_ctor(NULL, &eq->ipz_queue, nr_pages, ehca_create_eq() 97 for (i = 0; i < nr_pages; i++) { ehca_create_eq() 110 if (i == (nr_pages - 1)) { ehca_create_eq()
|
H A D | ipz_pt_fn.c | 255 int i, nr_pages; ipz_queue_dtor() local 265 nr_pages = queue->queue_length / queue->pagesize; ipz_queue_dtor() 266 for (i = 0; i < nr_pages; i += PAGES_PER_KPAGE) ipz_queue_dtor()
|
H A D | ehca_mrmw.c | 2316 static int ehca_update_busmap(unsigned long pfn, unsigned long nr_pages) ehca_update_busmap() argument 2321 if (!nr_pages) ehca_update_busmap() 2333 end_section = ((pfn + nr_pages) * PAGE_SIZE) / EHCA_SECTSIZE; ehca_update_busmap() 2369 unsigned long pfn, start_pfn, end_pfn, nr_pages; ehca_create_busmap_callback() local 2382 nr_pages = pfn - start_pfn; ehca_create_busmap_callback() 2383 ret = ehca_update_busmap(start_pfn, nr_pages); ehca_create_busmap_callback() 2394 nr_pages = pfn - start_pfn; ehca_create_busmap_callback() 2395 return ehca_update_busmap(start_pfn, nr_pages); ehca_create_busmap_callback()
|
/linux-4.4.14/fs/9p/ |
H A D | cache.c | 159 int loop, nr_pages; v9fs_cache_inode_now_uncached() local 165 nr_pages = pagevec_lookup(&pvec, v9inode->vfs_inode.i_mapping, v9fs_cache_inode_now_uncached() 168 if (!nr_pages) v9fs_cache_inode_now_uncached() 171 for (loop = 0; loop < nr_pages; loop++) v9fs_cache_inode_now_uncached() 174 first = pvec.pages[nr_pages - 1]->index + 1; v9fs_cache_inode_now_uncached() 176 pvec.nr = nr_pages; v9fs_cache_inode_now_uncached() 358 unsigned *nr_pages) __v9fs_readpages_from_fscache() 363 p9_debug(P9_DEBUG_FSC, "inode %p pages %u\n", inode, *nr_pages); __v9fs_readpages_from_fscache() 368 mapping, pages, nr_pages, __v9fs_readpages_from_fscache() 379 BUG_ON(*nr_pages != 0); __v9fs_readpages_from_fscache() 355 __v9fs_readpages_from_fscache(struct inode *inode, struct address_space *mapping, struct list_head *pages, unsigned *nr_pages) __v9fs_readpages_from_fscache() argument
|
H A D | cache.h | 52 unsigned *nr_pages); 77 unsigned *nr_pages) v9fs_readpages_from_fscache() 80 nr_pages); v9fs_readpages_from_fscache() 133 unsigned *nr_pages) v9fs_readpages_from_fscache() 74 v9fs_readpages_from_fscache(struct inode *inode, struct address_space *mapping, struct list_head *pages, unsigned *nr_pages) v9fs_readpages_from_fscache() argument 130 v9fs_readpages_from_fscache(struct inode *inode, struct address_space *mapping, struct list_head *pages, unsigned *nr_pages) v9fs_readpages_from_fscache() argument
|
H A D | vfs_addr.c | 107 * @nr_pages: count of pages to read 112 struct list_head *pages, unsigned nr_pages) v9fs_vfs_readpages() 120 ret = v9fs_readpages_from_fscache(inode, mapping, pages, &nr_pages); v9fs_vfs_readpages() 111 v9fs_vfs_readpages(struct file *filp, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) v9fs_vfs_readpages() argument
|
/linux-4.4.14/drivers/edac/ |
H A D | cell_edac.c | 135 u32 nr_pages; cell_edac_init_csrows() local 149 nr_pages = resource_size(&r) >> PAGE_SHIFT; cell_edac_init_csrows() 150 csrow->last_page = csrow->first_page + nr_pages - 1; cell_edac_init_csrows() 156 dimm->nr_pages = nr_pages / csrow->nr_channels; cell_edac_init_csrows() 160 " first_page=0x%lx, nr_pages=0x%x\n", cell_edac_init_csrows() 162 csrow->first_page, nr_pages); cell_edac_init_csrows()
|
H A D | pasemi_edac.c | 157 dimm->nr_pages = 128 << (20 - PAGE_SHIFT); pasemi_edac_init_csrows() 160 dimm->nr_pages = 256 << (20 - PAGE_SHIFT); pasemi_edac_init_csrows() 164 dimm->nr_pages = 512 << (20 - PAGE_SHIFT); pasemi_edac_init_csrows() 167 dimm->nr_pages = 1024 << (20 - PAGE_SHIFT); pasemi_edac_init_csrows() 170 dimm->nr_pages = 2048 << (20 - PAGE_SHIFT); pasemi_edac_init_csrows() 180 csrow->last_page = csrow->first_page + dimm->nr_pages - 1; pasemi_edac_init_csrows() 181 last_page_in_mmc += dimm->nr_pages; pasemi_edac_init_csrows()
|
H A D | ie31200_edac.c | 406 unsigned long nr_pages; ie31200_probe1() local 408 nr_pages = IE31200_PAGES(dimm_info[j][i].size); ie31200_probe1() 409 if (nr_pages == 0) ie31200_probe1() 413 nr_pages = nr_pages / 2; ie31200_probe1() 417 dimm->nr_pages = nr_pages; ie31200_probe1() 418 edac_dbg(0, "set nr pages: 0x%lx\n", nr_pages); ie31200_probe1() 426 dimm->nr_pages = nr_pages; ie31200_probe1() 427 edac_dbg(0, "set nr pages: 0x%lx\n", nr_pages); ie31200_probe1()
|
H A D | i3200_edac.c | 395 unsigned long nr_pages; i3200_probe1() local 401 nr_pages = drb_to_nr_pages(drbs, stacked, j, i); i3200_probe1() 402 if (nr_pages == 0) i3200_probe1() 406 stacked ? " (stacked)" : "", PAGES_TO_MiB(nr_pages)); i3200_probe1() 408 dimm->nr_pages = nr_pages; i3200_probe1() 409 dimm->grain = nr_pages << PAGE_SHIFT; i3200_probe1()
|
H A D | x38_edac.c | 376 unsigned long nr_pages; x38_probe1() local 379 nr_pages = drb_to_nr_pages(drbs, stacked, x38_probe1() 383 if (nr_pages == 0) x38_probe1() 389 dimm->nr_pages = nr_pages / x38_channel_num; x38_probe1() 390 dimm->grain = nr_pages << PAGE_SHIFT; x38_probe1()
|
H A D | ghes_edac.c | 91 dimm->nr_pages = MiB_TO_PAGES(32);/* Unknown */ ghes_edac_dmidecode() 93 dimm->nr_pages = MiB_TO_PAGES(entry->extended_size); ghes_edac_dmidecode() 96 dimm->nr_pages = MiB_TO_PAGES((entry->size & ghes_edac_dmidecode() 99 dimm->nr_pages = MiB_TO_PAGES(entry->size); ghes_edac_dmidecode() 154 if (dimm->nr_pages) { ghes_edac_dmidecode() 157 PAGES_TO_MiB(dimm->nr_pages), ghes_edac_dmidecode() 492 dimm->nr_pages = 1; ghes_edac_register()
|
H A D | edac_mc_sysfs.c | 187 u32 nr_pages = 0; csrow_size_show() local 190 nr_pages += csrow->channels[i]->dimm->nr_pages; csrow_size_show() 191 return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages)); csrow_size_show() 375 if (!csrow->channels[idx]->dimm->nr_pages) csrow_dev_is_visible() 399 int chan, nr_pages = 0; nr_pages_per_csrow() local 402 nr_pages += csrow->channels[chan]->dimm->nr_pages; nr_pages_per_csrow() 404 return nr_pages; nr_pages_per_csrow() 526 return sprintf(data, "%u\n", PAGES_TO_MiB(dimm->nr_pages)); dimmdev_size_show() 778 total_pages += dimm->nr_pages; mci_size_mb_show() 928 if (!dimm->nr_pages) edac_create_sysfs_mci_device() 961 if (!dimm->nr_pages) edac_create_sysfs_mci_device() 992 if (dimm->nr_pages == 0) edac_remove_sysfs_mci_device()
|
H A D | amd76x_edac.c | 211 dimm->nr_pages = (mba_mask + 1) >> PAGE_SHIFT; amd76x_init_csrows() 212 csrow->last_page = csrow->first_page + dimm->nr_pages - 1; amd76x_init_csrows() 214 dimm->grain = dimm->nr_pages << PAGE_SHIFT; amd76x_init_csrows()
|
H A D | edac_mc.c | 97 edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages); edac_mc_dump_dimm() 99 edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages); edac_mc_dump_dimm() 741 u32 nr_pages = 0; edac_mc_add_mc_with_groups() local 745 nr_pages += csrow->channels[j]->dimm->nr_pages; edac_mc_add_mc_with_groups() 746 if (!nr_pages) edac_mc_add_mc_with_groups() 750 if (csrow->channels[j]->dimm->nr_pages) edac_mc_add_mc_with_groups() 754 if (mci->dimms[i]->nr_pages) edac_mc_add_mc_with_groups() 897 n += dimm->nr_pages; edac_mc_find_csrow_by_page() 1229 if (e->enable_per_layer_report && dimm->nr_pages) { edac_mc_handle_error()
|
H A D | i82975x_edac.c | 376 u32 cumul_size, nr_pages; i82975x_init_csrows() local 407 nr_pages = cumul_size - last_cumul_size; i82975x_init_csrows() 408 if (!nr_pages) i82975x_init_csrows() 421 dimm->nr_pages = nr_pages / csrow->nr_channels; i82975x_init_csrows()
|
H A D | e7xxx_edac.c | 361 u32 dra, cumul_size, nr_pages; e7xxx_init_csrows() local 392 nr_pages = cumul_size - last_cumul_size; e7xxx_init_csrows() 413 dimm->nr_pages = nr_pages / (drc_chan + 1); e7xxx_init_csrows()
|
H A D | i3000_edac.c | 317 unsigned long last_cumul_size, nr_pages; i3000_probe1() local 408 nr_pages = cumul_size - last_cumul_size; i3000_probe1() 414 dimm->nr_pages = nr_pages / nr_channels; i3000_probe1()
|
H A D | i82875p_edac.c | 351 u32 cumul_size, nr_pages; i82875p_init_csrows() local 374 nr_pages = cumul_size - last_cumul_size; i82875p_init_csrows() 380 dimm->nr_pages = nr_pages / nr_chans; i82875p_init_csrows()
|
H A D | cpc925_edac.c | 335 unsigned long row_size, nr_pages, last_nr_pages = 0; cpc925_init_csrows() local 354 nr_pages = row_size >> PAGE_SHIFT; cpc925_init_csrows() 355 csrow->last_page = csrow->first_page + nr_pages - 1; cpc925_init_csrows() 383 dimm->nr_pages = nr_pages / csrow->nr_channels; cpc925_init_csrows()
|
H A D | amd64_edac.c | 2389 u32 cs_mode, nr_pages; get_csrow_nr_pages() local 2402 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, (csrow_nr / 2)) get_csrow_nr_pages() 2407 edac_dbg(0, "nr_pages/channel: %u\n", nr_pages); get_csrow_nr_pages() 2409 return nr_pages; get_csrow_nr_pages() 2423 int nr_pages = 0; init_csrows() local 2454 nr_pages = get_csrow_nr_pages(pvt, 0, i); init_csrows() 2455 csrow->channels[0]->dimm->nr_pages = nr_pages; init_csrows() 2462 csrow->channels[1]->dimm->nr_pages = row_dct1_pages; init_csrows() 2463 nr_pages += row_dct1_pages; init_csrows() 2466 edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages); init_csrows()
|
/linux-4.4.14/arch/sh/mm/ |
H A D | gup.c | 163 int __get_user_pages_fast(unsigned long start, int nr_pages, int write, __get_user_pages_fast() argument 175 len = (unsigned long) nr_pages << PAGE_SHIFT; __get_user_pages_fast() 204 * @nr_pages: number of pages from start to pin 207 * Should be at least nr_pages long. 214 * requested. If nr_pages is 0 or negative, returns 0. If no pages 217 int get_user_pages_fast(unsigned long start, int nr_pages, int write, get_user_pages_fast() argument 228 len = (unsigned long) nr_pages << PAGE_SHIFT; get_user_pages_fast()
|
H A D | init.c | 492 unsigned long nr_pages = size >> PAGE_SHIFT; arch_add_memory() local 501 start_pfn, nr_pages); arch_add_memory() 522 unsigned long nr_pages = size >> PAGE_SHIFT; arch_remove_memory() local 527 ret = __remove_pages(zone, start_pfn, nr_pages); arch_remove_memory()
|
/linux-4.4.14/arch/mips/mm/ |
H A D | gup.c | 196 int __get_user_pages_fast(unsigned long start, int nr_pages, int write, __get_user_pages_fast() argument 208 len = (unsigned long) nr_pages << PAGE_SHIFT; __get_user_pages_fast() 250 * @nr_pages: number of pages from start to pin 253 * Should be at least nr_pages long. 260 * requested. If nr_pages is 0 or negative, returns 0. If no pages 263 int get_user_pages_fast(unsigned long start, int nr_pages, int write, get_user_pages_fast() argument 274 len = (unsigned long) nr_pages << PAGE_SHIFT; get_user_pages_fast()
|
/linux-4.4.14/arch/arm/kernel/ |
H A D | atags_compat.c | 46 unsigned long nr_pages; /* 4 */ member in struct:param_struct::__anon196::__anon197 107 if (params->u1.s.nr_pages != 0x02000 && build_tag_list() 108 params->u1.s.nr_pages != 0x04000 && build_tag_list() 109 params->u1.s.nr_pages != 0x08000 && build_tag_list() 110 params->u1.s.nr_pages != 0x10000) { build_tag_list() 113 params->u1.s.nr_pages = 0x1000; /* 16MB */ build_tag_list() 161 tag = memtag(tag, PHYS_OFFSET, params->u1.s.nr_pages * PAGE_SIZE); build_tag_list()
|
/linux-4.4.14/arch/x86/mm/ |
H A D | numa_32.c | 67 unsigned long nr_pages = end_pfn - start_pfn; node_memmap_size_bytes() local 69 if (!nr_pages) node_memmap_size_bytes() 72 return (nr_pages + 1) * sizeof(struct page); node_memmap_size_bytes()
|
H A D | gup.c | 255 int __get_user_pages_fast(unsigned long start, int nr_pages, int write, __get_user_pages_fast() argument 267 len = (unsigned long) nr_pages << PAGE_SHIFT; __get_user_pages_fast() 310 * @nr_pages: number of pages from start to pin 313 * Should be at least nr_pages long. 320 * requested. If nr_pages is 0 or negative, returns 0. If no pages 323 int get_user_pages_fast(unsigned long start, int nr_pages, int write, get_user_pages_fast() argument 334 len = (unsigned long) nr_pages << PAGE_SHIFT; get_user_pages_fast()
|
H A D | init_64.c | 696 unsigned long nr_pages = size >> PAGE_SHIFT; arch_add_memory() local 701 ret = __add_pages(nid, zone, start_pfn, nr_pages); arch_add_memory() 716 unsigned int nr_pages = 1 << order; free_pagetable() local 724 while (nr_pages--) free_pagetable() 727 while (nr_pages--) free_pagetable() 1020 unsigned long nr_pages = size >> PAGE_SHIFT; arch_remove_memory() local 1026 ret = __remove_pages(zone, start_pfn, nr_pages); arch_remove_memory() 1318 unsigned int nr_pages; register_page_bootmem_memmap() local 1358 nr_pages = 1 << (get_order(PMD_SIZE)); register_page_bootmem_memmap() 1360 while (nr_pages--) register_page_bootmem_memmap()
|
H A D | tlb.c | 117 unsigned long nr_pages = flush_tlb_func() local 124 trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages); flush_tlb_func()
|
H A D | ioremap.c | 56 static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, __ioremap_check_ram() argument 61 for (i = 0; i < nr_pages; ++i) __ioremap_check_ram()
|
/linux-4.4.14/tools/testing/selftests/vm/ |
H A D | transhuge-stress.c | 104 int nr_succeed = 0, nr_failed = 0, nr_pages = 0; main() local 128 nr_pages++; main() 142 nr_succeed, nr_failed, nr_pages); main()
|
H A D | userfaultfd.c | 71 static unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size; variable 147 page_nr %= nr_pages; locking_thread() 224 if (offset >= nr_pages * page_size) copy_page() 387 if (madvise(area_src, nr_pages * page_size, MADV_DONTNEED)) { stress() 428 if (posix_memalign(&area, page_size, nr_pages * page_size)) { userfaultfd_stress() 433 if (posix_memalign(&area, page_size, nr_pages * page_size)) { userfaultfd_stress() 458 count_verify = malloc(nr_pages * sizeof(unsigned long long)); userfaultfd_stress() 464 for (nr = 0; nr < nr_pages; nr++) { userfaultfd_stress() 525 uffdio_register.range.len = nr_pages * page_size; userfaultfd_stress() 565 if (madvise(area_dst, nr_pages * page_size, MADV_DONTNEED)) { userfaultfd_stress() 582 for (nr = 0; nr < nr_pages; nr++) { userfaultfd_stress() 629 nr_pages = nr_pages_per_cpu * nr_cpus; main() 630 printf("nr_pages: %lu, nr_pages_per_cpu: %lu\n", main() 631 nr_pages, nr_pages_per_cpu); main()
|
/linux-4.4.14/fs/exofs/ |
H A D | inode.c | 57 unsigned nr_pages; member in struct:page_collect 78 pcol->nr_pages = 0; _pcol_init() 87 pcol->expected_pages -= min(pcol->nr_pages, pcol->expected_pages); _pcol_reset() 91 pcol->nr_pages = 0; _pcol_reset() 140 if (unlikely(pcol->nr_pages >= pcol->alloc_pages)) pcol_add_page() 143 pcol->pages[pcol->nr_pages++] = page; pcol_add_page() 210 " length=0x%lx nr_pages=%u\n", __readpages_done() 212 pcol->nr_pages); __readpages_done() 214 for (i = 0; i < pcol->nr_pages; i++) { __readpages_done() 256 for (i = 0; i < pcol->nr_pages; i++) { _unlock_pcol_pages() 272 BUG_ON(pcol_src->nr_pages < ios->nr_pages); _maybe_not_all_in_one_io() 274 if (pcol_src->nr_pages > ios->nr_pages) { _maybe_not_all_in_one_io() 276 unsigned pages_less = pcol_src->nr_pages - ios->nr_pages; _maybe_not_all_in_one_io() 282 pcol_src->nr_pages = ios->nr_pages; _maybe_not_all_in_one_io() 287 pcol->nr_pages = pages_less; _maybe_not_all_in_one_io() 289 src_page = pcol_src->pages + pcol_src->nr_pages; _maybe_not_all_in_one_io() 299 EXOFS_DBGMSG("Length was adjusted nr_pages=0x%x " _maybe_not_all_in_one_io() 302 pcol_src->nr_pages, pages_less, pcol->expected_pages, _maybe_not_all_in_one_io() 430 } else if (unlikely((pcol->pg_first + pcol->nr_pages) != readpage_strip() 454 "this_len=0x%zx nr_pages=%u length=0x%lx\n", readpage_strip() 455 page, len, pcol->nr_pages, pcol->length); readpage_strip() 474 struct list_head *pages, unsigned nr_pages) exofs_readpages() 479 _pcol_init(&pcol, nr_pages, mapping->host); exofs_readpages() 538 " length=0x%lx nr_pages=%u\n", writepages_done() 540 pcol->nr_pages); writepages_done() 542 for (i = 0; i < pcol->nr_pages; i++) { writepages_done() 736 } else if (unlikely((pcol->pg_first + pcol->nr_pages) != writepage_strip() 760 "nr_pages=%u total_length=0x%lx\n", writepage_strip() 761 pcol->nr_pages, pcol->length); writepage_strip() 825 } else if (pcol.nr_pages) { exofs_writepages() 829 for (i = 0; i < pcol.nr_pages; i++) { exofs_writepages() 473 exofs_readpages(struct file *file, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) exofs_readpages() argument
|
/linux-4.4.14/drivers/media/pci/cx23885/ |
H A D | cx23885-alsa.c | 83 static int cx23885_alsa_dma_init(struct cx23885_audio_dev *chip, int nr_pages) cx23885_alsa_dma_init() argument 89 buf->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT); cx23885_alsa_dma_init() 91 dprintk(1, "vmalloc_32(%d pages) failed\n", nr_pages); cx23885_alsa_dma_init() 97 nr_pages << PAGE_SHIFT); cx23885_alsa_dma_init() 99 memset(buf->vaddr, 0, nr_pages << PAGE_SHIFT); cx23885_alsa_dma_init() 100 buf->nr_pages = nr_pages; cx23885_alsa_dma_init() 102 buf->sglist = vzalloc(buf->nr_pages * sizeof(*buf->sglist)); cx23885_alsa_dma_init() 106 sg_init_table(buf->sglist, buf->nr_pages); cx23885_alsa_dma_init() 107 for (i = 0; i < buf->nr_pages; i++) { cx23885_alsa_dma_init() 129 buf->nr_pages, PCI_DMA_FROMDEVICE); cx23885_alsa_dma_map()
|
/linux-4.4.14/fs/btrfs/ |
H A D | compression.c | 68 unsigned long nr_pages; member in struct:compressed_bio 117 for (i = 0; i < cb->nr_pages; i++) { check_compressed_csum() 190 for (index = 0; index < cb->nr_pages; index++) { end_compressed_bio_read() 230 unsigned long nr_pages = end_index - index + 1; end_compressed_writeback() local 237 while (nr_pages > 0) { end_compressed_writeback() 240 nr_pages, ARRAY_SIZE(pages)), pages); end_compressed_writeback() 242 nr_pages -= 1; end_compressed_writeback() 252 nr_pages -= ret; end_compressed_writeback() 304 for (index = 0; index < cb->nr_pages; index++) { end_compressed_bio_write() 330 unsigned long nr_pages) btrfs_submit_compressed_write() 357 cb->nr_pages = nr_pages; btrfs_submit_compressed_write() 372 for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) { btrfs_submit_compressed_write() 418 bytes_left, cb->compressed_len, cb->nr_pages); btrfs_submit_compressed_write() 451 unsigned long nr_pages = 0; add_ra_bio_pages() local 537 nr_pages++; add_ra_bio_pages() 572 unsigned long nr_pages; btrfs_submit_compressed_read() local 620 nr_pages = DIV_ROUND_UP(compressed_len, PAGE_CACHE_SIZE); btrfs_submit_compressed_read() 621 cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *), btrfs_submit_compressed_read() 628 for (pg_index = 0; pg_index < nr_pages; pg_index++) { btrfs_submit_compressed_read() 637 faili = nr_pages - 1; btrfs_submit_compressed_read() 638 cb->nr_pages = nr_pages; btrfs_submit_compressed_read() 657 for (pg_index = 0; pg_index < nr_pages; pg_index++) { btrfs_submit_compressed_read() 326 btrfs_submit_compressed_write(struct inode *inode, u64 start, unsigned long len, u64 disk_start, unsigned long compressed_len, struct page **compressed_pages, unsigned long nr_pages) btrfs_submit_compressed_write() argument
|
H A D | zlib.c | 88 int nr_pages = 0; zlib_compress_pages() local 116 nr_pages = 1; zlib_compress_pages() 146 if (nr_pages == nr_dest_pages) { zlib_compress_pages() 157 pages[nr_pages] = out_page; zlib_compress_pages() 158 nr_pages++; zlib_compress_pages() 202 *out_pages = nr_pages; zlib_compress_pages()
|
H A D | lzo.c | 101 int nr_pages = 0; lzo_compress_pages() local 135 nr_pages = 1; lzo_compress_pages() 190 if (nr_pages == nr_dest_pages) { lzo_compress_pages() 202 pages[nr_pages++] = out_page; lzo_compress_pages() 245 *out_pages = nr_pages; lzo_compress_pages()
|
H A D | compression.h | 45 unsigned long nr_pages);
|
H A D | extent_io.c | 1654 unsigned long nr_pages = end_index - index + 1; __unlock_for_delalloc() local 1660 while (nr_pages > 0) { __unlock_for_delalloc() 1662 min_t(unsigned long, nr_pages, __unlock_for_delalloc() 1669 nr_pages -= ret; __unlock_for_delalloc() 1833 unsigned long nr_pages = end_index - index + 1; extent_clear_unlock_delalloc() local 1840 if ((page_ops & PAGE_SET_ERROR) && nr_pages > 0) extent_clear_unlock_delalloc() 1843 while (nr_pages > 0) { extent_clear_unlock_delalloc() 1846 nr_pages, ARRAY_SIZE(pages)), pages); extent_clear_unlock_delalloc() 1868 nr_pages -= ret; extent_clear_unlock_delalloc() 3197 struct page *pages[], int nr_pages, __do_contiguous_readpages() 3221 for (index = 0; index < nr_pages; index++) { __do_contiguous_readpages() 3230 int nr_pages, get_extent_t *get_extent, __extent_readpages() 3242 for (index = 0; index < nr_pages; index++) { __extent_readpages() 3918 int nr_pages; btree_write_cache_pages() local 3941 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, btree_write_cache_pages() 3946 for (i = 0; i < nr_pages; i++) { btree_write_cache_pages() 4050 int nr_pages; extent_write_cache_pages() local 4085 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, extent_write_cache_pages() 4090 for (i = 0; i < nr_pages; i++) { extent_write_cache_pages() 4208 unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >> extent_write_locked_range() local 4221 .nr_to_write = nr_pages * 2, extent_write_locked_range() 4269 struct list_head *pages, unsigned nr_pages, extent_readpages() 4281 for (page_idx = 0; page_idx < nr_pages; page_idx++) { extent_readpages() 3196 __do_contiguous_readpages(struct extent_io_tree *tree, struct page *pages[], int nr_pages, u64 start, u64 end, get_extent_t *get_extent, struct extent_map **em_cached, struct bio **bio, int mirror_num, unsigned long *bio_flags, int rw, u64 *prev_em_start) __do_contiguous_readpages() argument 3228 __extent_readpages(struct extent_io_tree *tree, struct page *pages[], int nr_pages, get_extent_t *get_extent, struct extent_map **em_cached, struct bio **bio, int mirror_num, unsigned long *bio_flags, int rw, u64 *prev_em_start) __extent_readpages() argument 4267 extent_readpages(struct extent_io_tree *tree, struct address_space *mapping, struct list_head *pages, unsigned nr_pages, get_extent_t get_extent) extent_readpages() argument
|
H A D | raid56.c | 141 int nr_pages; member in struct:btrfs_raid_bio 266 for (i = 0; i < rbio->nr_pages; i++) { cache_rbio_pages() 313 for (i = 0; i < dest->nr_pages; i++) { steal_rbio() 845 for (i = 0; i < rbio->nr_pages; i++) { __free_raid_bio() 984 rbio->nr_pages = num_pages; alloc_rbio() 1019 for (i = 0; i < rbio->nr_pages; i++) { alloc_rbio_pages() 1039 for (; i < rbio->nr_pages; i++) { alloc_rbio_parity_pages() 1511 int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE); raid56_rmw_stripe() local 1530 for (pagenr = 0; pagenr < nr_pages; pagenr++) { raid56_rmw_stripe() 1806 int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE); __raid_recover_end_io() local 1829 for (pagenr = 0; pagenr < nr_pages; pagenr++) { __raid_recover_end_io() 1940 for (i = 0; i < nr_pages; i++) { __raid_recover_end_io() 2036 int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE); __raid56_parity_recover() local 2060 for (pagenr = 0; pagenr < nr_pages; pagenr++) { __raid56_parity_recover()
|
/linux-4.4.14/fs/btrfs/tests/ |
H A D | extent-io-tests.c | 35 unsigned long nr_pages = end_index - index + 1; process_page_range() local 40 while (nr_pages > 0) { process_page_range() 42 min_t(unsigned long, nr_pages, process_page_range() 54 nr_pages -= ret; process_page_range() 59 printk(KERN_ERR "stuck in a loop, start %Lu, end %Lu, nr_pages %lu, ret %d\n", start, end, nr_pages, ret); process_page_range()
|
/linux-4.4.14/fs/cifs/ |
H A D | cache.c | 300 int loop, nr_pages; cifs_fscache_inode_now_uncached() local 308 nr_pages = pagevec_lookup(&pvec, cifs_fscache_inode_now_uncached() 311 if (!nr_pages) cifs_fscache_inode_now_uncached() 314 for (loop = 0; loop < nr_pages; loop++) cifs_fscache_inode_now_uncached() 317 first = pvec.pages[nr_pages - 1]->index + 1; cifs_fscache_inode_now_uncached() 319 pvec.nr = nr_pages; cifs_fscache_inode_now_uncached()
|
H A D | fscache.h | 80 unsigned *nr_pages) cifs_readpages_from_fscache() 84 nr_pages); cifs_readpages_from_fscache() 134 unsigned *nr_pages) cifs_readpages_from_fscache() 77 cifs_readpages_from_fscache(struct inode *inode, struct address_space *mapping, struct list_head *pages, unsigned *nr_pages) cifs_readpages_from_fscache() argument 131 cifs_readpages_from_fscache(struct inode *inode, struct address_space *mapping, struct list_head *pages, unsigned *nr_pages) cifs_readpages_from_fscache() argument
|
H A D | file.c | 1892 unsigned int nr_pages; wdata_alloc_and_fillpages() local 1910 nr_pages = find_get_pages_tag(mapping, index, wdata_alloc_and_fillpages() 1913 *found_pages += nr_pages; wdata_alloc_and_fillpages() 1914 tofind -= nr_pages; wdata_alloc_and_fillpages() 1915 pages += nr_pages; wdata_alloc_and_fillpages() 1916 } while (nr_pages && tofind && *index <= end); wdata_alloc_and_fillpages() 1927 unsigned int nr_pages = 0, i; wdata_prepare_pages() local 1940 if (nr_pages == 0) wdata_prepare_pages() 1985 ++nr_pages; wdata_prepare_pages() 1989 if (nr_pages == 0) wdata_prepare_pages() 1993 for (i = nr_pages; i < found_pages; i++) { wdata_prepare_pages() 1998 return nr_pages; wdata_prepare_pages() 2002 wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages, wdata_send_pages() argument 2010 wdata->nr_pages = nr_pages; wdata_send_pages() 2014 page_offset(wdata->pages[nr_pages - 1]), wdata_send_pages() 2016 wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) + wdata->tailsz; wdata_send_pages() 2030 for (i = 0; i < nr_pages; ++i) wdata_send_pages() 2066 unsigned int i, nr_pages, found_pages, wsize, credits; cifs_writepages() local 2090 nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc, cifs_writepages() 2094 if (nr_pages == 0) { cifs_writepages() 2102 rc = wdata_send_pages(wdata, nr_pages, mapping, wbc); cifs_writepages() 2107 for (i = 0; i < nr_pages; ++i) { cifs_writepages() 2126 wbc->nr_to_write -= nr_pages; cifs_writepages() 2398 for (i = 0; i < wdata->nr_pages; i++) cifs_uncached_writedata_release() 2427 unsigned long i, nr_pages = *num_pages; wdata_fill_from_iovec() local 2430 for (i = 0; i < nr_pages; i++) { wdata_fill_from_iovec() 2471 unsigned long nr_pages, num_pages, i; cifs_write_from_iter() local 2494 nr_pages = get_numpages(wsize, len, &cur_len); cifs_write_from_iter() 2495 wdata = cifs_writedata_alloc(nr_pages, cifs_write_from_iter() 2503 rc = cifs_write_allocate_pages(wdata->pages, nr_pages); cifs_write_from_iter() 2510 num_pages = nr_pages; cifs_write_from_iter() 2513 for (i = 0; i < nr_pages; i++) cifs_write_from_iter() 2521 * Bring nr_pages down to the number of pages we actually used, cifs_write_from_iter() 2524 for ( ; nr_pages > num_pages; nr_pages--) cifs_write_from_iter() 2525 put_page(wdata->pages[nr_pages - 1]); cifs_write_from_iter() 2528 wdata->nr_pages = nr_pages; cifs_write_from_iter() 2534 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE); cifs_write_from_iter() 2748 cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete) cifs_readdata_alloc() argument 2752 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages), cifs_readdata_alloc() 2777 cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages) cifs_read_allocate_pages() argument 2783 for (i = 0; i < nr_pages; i++) { cifs_read_allocate_pages() 2793 for (i = 0; i < nr_pages; i++) { cifs_read_allocate_pages() 2808 for (i = 0; i < rdata->nr_pages; i++) { cifs_uncached_readdata_release() 2830 for (i = 0; i < rdata->nr_pages; i++) { cifs_readdata_to_iov() 2857 unsigned int nr_pages = rdata->nr_pages; cifs_uncached_read_into_pages() local 2862 for (i = 0; i < nr_pages; i++) { cifs_uncached_read_into_pages() 2884 rdata->nr_pages--; cifs_uncached_read_into_pages() 2942 rdata->nr_pages = npages; cifs_send_async_read() 3272 for (i = 0; i < rdata->nr_pages; i++) { cifs_readv_complete() 3305 unsigned int nr_pages = rdata->nr_pages; cifs_readpages_read_into_pages() local 3315 for (i = 0; i < nr_pages; i++) { cifs_readpages_read_into_pages() 3351 rdata->nr_pages--; cifs_readpages_read_into_pages() 3359 rdata->nr_pages--; cifs_readpages_read_into_pages() 3378 unsigned int *nr_pages, loff_t *offset, unsigned int *bytes) readpages_get_pages() 3407 *nr_pages = 1; readpages_get_pages() 3429 (*nr_pages)++; list_for_each_entry_safe_reverse() 3479 unsigned int i, nr_pages, bytes, rsize; cifs_readpages() local 3502 &nr_pages, &offset, &bytes); cifs_readpages() 3508 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete); cifs_readpages() 3533 rdata->pages[rdata->nr_pages++] = page; cifs_readpages() 3541 for (i = 0; i < rdata->nr_pages; i++) { cifs_readpages() 3376 readpages_get_pages(struct address_space *mapping, struct list_head *page_list, unsigned int rsize, struct list_head *tmplist, unsigned int *nr_pages, loff_t *offset, unsigned int *bytes) readpages_get_pages() argument
|
H A D | fscache.c | 187 unsigned *nr_pages) __cifs_readpages_from_fscache() 192 __func__, CIFS_I(inode)->fscache, *nr_pages, inode); __cifs_readpages_from_fscache() 194 pages, nr_pages, __cifs_readpages_from_fscache() 184 __cifs_readpages_from_fscache(struct inode *inode, struct address_space *mapping, struct list_head *pages, unsigned *nr_pages) __cifs_readpages_from_fscache() argument
|
/linux-4.4.14/tools/vm/ |
H A D | page-types.c | 202 static unsigned long nr_pages[HASH_SIZE]; variable 399 for (i = 0; i < ARRAY_SIZE(nr_pages); i++) { show_summary() 400 if (nr_pages[i]) show_summary() 403 nr_pages[i], show_summary() 404 pages2mb(nr_pages[i]), show_summary() 587 nr_pages[hash_slot(flags)]++; add_page() 854 unsigned long nr_pages, pfn, i; walk_file() local 864 nr_pages = (end - off + page_size - 1) / page_size; walk_file() 865 if (nr_pages > PAGEMAP_BATCH) walk_file() 866 nr_pages = PAGEMAP_BATCH; walk_file() 867 len = nr_pages * page_size; walk_file() 889 for (i = 0; i < nr_pages ; i++) { walk_file() 900 nr_pages) != nr_pages) walk_file() 905 for (i = 0; i < nr_pages; i++) { walk_file()
|
/linux-4.4.14/kernel/power/ |
H A D | swap.c | 519 int nr_pages; save_image() local 532 nr_pages = 0; save_image() 541 if (!(nr_pages % m)) save_image() 543 nr_pages / m * 10); save_image() 544 nr_pages++; save_image() 655 int nr_pages; save_image_lzo() local 752 nr_pages = 0; save_image_lzo() 767 if (!(nr_pages % m)) save_image_lzo() 771 nr_pages / m * 10); save_image_lzo() 772 nr_pages++; save_image_lzo() 868 static int enough_swap(unsigned int nr_pages, unsigned int flags) enough_swap() argument 875 required = PAGES_FOR_IO + nr_pages; enough_swap() 1035 * (assume there are @nr_pages pages to load) 1048 unsigned nr_pages; load_image() local 1057 nr_pages = 0; load_image() 1070 if (!(nr_pages % m)) load_image() 1072 nr_pages / m * 10); load_image() 1073 nr_pages++; load_image() 1149 unsigned nr_pages; load_image_lzo() local 1270 nr_pages = 0; load_image_lzo() 1397 if (!(nr_pages % m)) load_image_lzo() 1401 nr_pages / m * 10); load_image_lzo() 1402 nr_pages++; load_image_lzo()
|
H A D | snapshot.c | 1373 * @nr_pages: Number of page frames to allocate. 1378 static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask) preallocate_image_pages() argument 1382 while (nr_pages > 0) { preallocate_image_pages() 1393 nr_pages--; preallocate_image_pages() 1400 static unsigned long preallocate_image_memory(unsigned long nr_pages, preallocate_image_memory() argument 1409 if (nr_pages < alloc) preallocate_image_memory() 1410 alloc = nr_pages; preallocate_image_memory() 1416 static unsigned long preallocate_image_highmem(unsigned long nr_pages) preallocate_image_highmem() argument 1418 return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM); preallocate_image_highmem() 1431 static unsigned long preallocate_highmem_fraction(unsigned long nr_pages, preallocate_highmem_fraction() argument 1435 unsigned long alloc = __fraction(nr_pages, highmem, total); preallocate_highmem_fraction() 1440 static inline unsigned long preallocate_image_highmem(unsigned long nr_pages) preallocate_image_highmem() argument 1445 static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages, preallocate_highmem_fraction() argument 1736 static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem) enough_free_mem() argument 1745 nr_pages += count_pages_for_highmem(nr_highmem); enough_free_mem() 1747 nr_pages, PAGES_FOR_IO, free); enough_free_mem() 1749 return free > nr_pages + PAGES_FOR_IO; enough_free_mem() 1808 unsigned int nr_pages, unsigned int nr_highmem) swsusp_alloc() 1815 nr_pages += alloc_highmem_pages(copy_bm, nr_highmem); swsusp_alloc() 1818 if (nr_pages > alloc_normal) { swsusp_alloc() 1819 nr_pages -= alloc_normal; swsusp_alloc() 1820 while (nr_pages-- > 0) { swsusp_alloc() 1839 unsigned int nr_pages, nr_highmem; swsusp_save() local 1844 nr_pages = count_data_pages(); swsusp_save() 1846 printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem); swsusp_save() 1848 if (!enough_free_mem(nr_pages, nr_highmem)) { swsusp_save() 1853 if (swsusp_alloc(&orig_bm, ©_bm, nr_pages, nr_highmem)) { swsusp_save() 1870 nr_pages += nr_highmem; swsusp_save() 1871 nr_copy_pages = nr_pages; swsusp_save() 1872 nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE); swsusp_save() 1875 nr_pages); swsusp_save() 2336 unsigned int nr_pages, nr_highmem; prepare_image() local 2368 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages; prepare_image() 2369 nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE); prepare_image() 2370 while (nr_pages > 0) { prepare_image() 2378 nr_pages--; prepare_image() 2382 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages; prepare_image() 2383 while (nr_pages > 0) { prepare_image() 2397 nr_pages--; prepare_image() 1807 swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm, unsigned int nr_pages, unsigned int nr_highmem) swsusp_alloc() argument
|
/linux-4.4.14/arch/s390/pci/ |
H A D | pci_dma.c | 135 unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; dma_update_trans() local 142 if (!nr_pages) dma_update_trans() 151 for (i = 0; i < nr_pages; i++) { dma_update_trans() 175 nr_pages * PAGE_SIZE); dma_update_trans() 288 unsigned long nr_pages, iommu_page_index; s390_dma_map_pages() local 295 nr_pages = iommu_num_pages(pa, size, PAGE_SIZE); s390_dma_map_pages() 296 iommu_page_index = dma_alloc_iommu(zdev, nr_pages); s390_dma_map_pages() 303 size = nr_pages * PAGE_SIZE; s390_dma_map_pages() 318 atomic64_add(nr_pages, &zdev->mapped_pages); s390_dma_map_pages() 322 dma_free_iommu(zdev, iommu_page_index, nr_pages); s390_dma_map_pages()
|
/linux-4.4.14/drivers/misc/genwqe/ |
H A D | card_utils.c | 306 sgl->nr_pages = DIV_ROUND_UP(sgl->fpage_offs + user_size, PAGE_SIZE); genwqe_alloc_sync_sgl() 309 dev_dbg(&pci_dev->dev, "[%s] uaddr=%p usize=%8ld nr_pages=%ld fpage_offs=%lx fpage_size=%ld lpage_size=%ld\n", genwqe_alloc_sync_sgl() 310 __func__, user_addr, user_size, sgl->nr_pages, genwqe_alloc_sync_sgl() 315 sgl->sgl_size = genwqe_sgl_size(sgl->nr_pages); genwqe_alloc_sync_sgl() 383 while (p < sgl->nr_pages) { genwqe_setup_sgl() 401 } else if ((p == sgl->nr_pages - 1) && genwqe_setup_sgl() 422 if (p == sgl->nr_pages) genwqe_setup_sgl() 438 if (p == sgl->nr_pages) genwqe_setup_sgl() 520 static int free_user_pages(struct page **page_list, unsigned int nr_pages, free_user_pages() argument 525 for (i = 0; i < nr_pages; i++) { free_user_pages() 575 m->nr_pages = DIV_ROUND_UP(offs + size, PAGE_SIZE); genwqe_user_vmap() 577 m->page_list = kcalloc(m->nr_pages, genwqe_user_vmap() 582 m->nr_pages = 0; genwqe_user_vmap() 587 m->dma_list = (dma_addr_t *)(m->page_list + m->nr_pages); genwqe_user_vmap() 591 m->nr_pages, genwqe_user_vmap() 598 if (rc < m->nr_pages) { genwqe_user_vmap() 604 rc = genwqe_map_pages(cd, m->page_list, m->nr_pages, m->dma_list); genwqe_user_vmap() 611 free_user_pages(m->page_list, m->nr_pages, 0); genwqe_user_vmap() 617 m->nr_pages = 0; genwqe_user_vmap() 641 genwqe_unmap_pages(cd, m->dma_list, m->nr_pages); genwqe_user_vunmap() 644 free_user_pages(m->page_list, m->nr_pages, 1); genwqe_user_vunmap() 649 m->nr_pages = 0; genwqe_user_vunmap()
|
/linux-4.4.14/fs/nfs/ |
H A D | fscache.c | 370 unsigned *nr_pages) __nfs_readpages_from_fscache() 372 unsigned npages = *nr_pages; __nfs_readpages_from_fscache() 379 mapping, pages, nr_pages, __nfs_readpages_from_fscache() 383 if (*nr_pages < npages) __nfs_readpages_from_fscache() 386 if (*nr_pages > 0) __nfs_readpages_from_fscache() 388 *nr_pages); __nfs_readpages_from_fscache() 393 BUG_ON(*nr_pages != 0); __nfs_readpages_from_fscache() 366 __nfs_readpages_from_fscache(struct nfs_open_context *ctx, struct inode *inode, struct address_space *mapping, struct list_head *pages, unsigned *nr_pages) __nfs_readpages_from_fscache() argument
|
H A D | fscache-index.c | 266 int loop, nr_pages; nfs_fscache_inode_now_uncached() local 275 nr_pages = pagevec_lookup(&pvec, nfs_fscache_inode_now_uncached() 279 if (!nr_pages) nfs_fscache_inode_now_uncached() 282 for (loop = 0; loop < nr_pages; loop++) nfs_fscache_inode_now_uncached() 285 first = pvec.pages[nr_pages - 1]->index + 1; nfs_fscache_inode_now_uncached() 287 pvec.nr = nr_pages; nfs_fscache_inode_now_uncached()
|
H A D | fscache.h | 133 unsigned *nr_pages) nfs_readpages_from_fscache() 137 nr_pages); nfs_readpages_from_fscache() 212 unsigned *nr_pages) nfs_readpages_from_fscache() 129 nfs_readpages_from_fscache(struct nfs_open_context *ctx, struct inode *inode, struct address_space *mapping, struct list_head *pages, unsigned *nr_pages) nfs_readpages_from_fscache() argument 208 nfs_readpages_from_fscache(struct nfs_open_context *ctx, struct inode *inode, struct address_space *mapping, struct list_head *pages, unsigned *nr_pages) nfs_readpages_from_fscache() argument
|
H A D | read.c | 376 struct list_head *pages, unsigned nr_pages) nfs_readpages() 390 nr_pages); nfs_readpages() 407 pages, &nr_pages); nfs_readpages() 375 nfs_readpages(struct file *filp, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) nfs_readpages() argument
|
/linux-4.4.14/drivers/hwtracing/intel_th/ |
H A D | msu.c | 93 * @nr_pages: total number of pages allocated for this buffer 113 unsigned long nr_pages; member in struct:msc 493 reg = msc->nr_pages; msc_configure() 549 msc->single_sz = reg & ((msc->nr_pages << PAGE_SHIFT) - 1); msc_disable() 624 msc->nr_pages = size >> PAGE_SHIFT; msc_buffer_contig_alloc() 639 for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) { msc_buffer_contig_free() 646 msc->nr_pages = 0; msc_buffer_contig_free() 659 if (pgoff >= msc->nr_pages) msc_buffer_contig_get_page() 719 msc->nr_pages += nr_blocks; msc_buffer_win_alloc() 749 msc->nr_pages -= win->nr_blocks; msc_buffer_win_free() 841 static int msc_buffer_multi_alloc(struct msc *msc, unsigned long *nr_pages, msc_buffer_multi_alloc() argument 847 ret = msc_buffer_win_alloc(msc, nr_pages[i]); msc_buffer_multi_alloc() 892 static int msc_buffer_alloc(struct msc *msc, unsigned long *nr_pages, msc_buffer_alloc() argument 905 ret = msc_buffer_contig_alloc(msc, nr_pages[0] << PAGE_SHIFT); msc_buffer_alloc() 907 ret = msc_buffer_multi_alloc(msc, nr_pages, nr_wins); msc_buffer_alloc() 1059 unsigned long size = msc->nr_pages << PAGE_SHIFT, rem = len; msc_single_to_user() 1112 size = msc->nr_pages << PAGE_SHIFT; intel_th_msc_read() 1169 for (pg = 0; pg < msc->nr_pages; pg++) { msc_mmap_close() 1227 if (size >> PAGE_SHIFT != msc->nr_pages) intel_th_msc_mmap() 1355 count = scnprintf(buf, PAGE_SIZE, "%ld\n", msc->nr_pages); nr_pages_show() 1435 static DEVICE_ATTR_RW(nr_pages);
|
/linux-4.4.14/arch/x86/kernel/cpu/ |
H A D | perf_event_intel_bts.c | 51 unsigned int nr_pages; member in struct:bts_buffer 71 bts_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool overwrite) bts_buffer_setup_aux() argument 77 size_t size = nr_pages << PAGE_SHIFT; bts_buffer_setup_aux() 81 for (pg = 0, nbuf = 0; pg < nr_pages;) { bts_buffer_setup_aux() 83 if (WARN_ON_ONCE(!PagePrivate(page) && nr_pages > 1)) bts_buffer_setup_aux() 99 buf->nr_pages = nr_pages; bts_buffer_setup_aux() 310 head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1); bts_buffer_reset() 430 buf->nr_pages << PAGE_SHIFT); bts_event_del()
|
H A D | perf_event_intel_pt.c | 440 p = virt_to_page(buf->data_pages[buf->nr_pages]); topa_insert_pages() 462 buf->nr_pages += 1ul << order; topa_insert_pages() 540 ((buf->nr_pages << PAGE_SHIFT) - 1)); pt_update_head() 542 base += buf->nr_pages << PAGE_SHIFT; pt_update_head() 659 pg &= buf->nr_pages - 1; pt_topa_next_entry() 708 idx &= buf->nr_pages - 1; pt_buffer_reset_markers() 718 idx &= buf->nr_pages - 1; pt_buffer_reset_markers() 742 while (pg < buf->nr_pages) { pt_buffer_setup_topa_index() 783 head &= (buf->nr_pages << PAGE_SHIFT) - 1; pt_buffer_reset_offsets() 785 pg = (head >> PAGE_SHIFT) & (buf->nr_pages - 1); pt_buffer_reset_offsets() 820 static int pt_buffer_init_topa(struct pt_buffer *buf, unsigned long nr_pages, pt_buffer_init_topa() argument 832 while (buf->nr_pages < nr_pages) { pt_buffer_init_topa() 856 * @nr_pages: Number of pages in the buffer. 865 pt_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool snapshot) pt_buffer_setup_aux() argument 870 if (!nr_pages) pt_buffer_setup_aux() 877 buf = kzalloc_node(offsetof(struct pt_buffer, topa_index[nr_pages]), pt_buffer_setup_aux() 888 ret = pt_buffer_init_topa(buf, nr_pages, GFP_KERNEL); pt_buffer_setup_aux() 1055 buf->nr_pages << PAGE_SHIFT); pt_event_del()
|
H A D | intel_pt.h | 78 * @nr_pages: buffer size in pages 96 unsigned long nr_pages; member in struct:pt_buffer
|
/linux-4.4.14/drivers/iommu/ |
H A D | s390-iommu.c | 218 unsigned long irq_flags, nr_pages, i; s390_iommu_update_trans() local 226 nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; s390_iommu_update_trans() 227 if (!nr_pages) s390_iommu_update_trans() 231 for (i = 0; i < nr_pages; i++) { s390_iommu_update_trans() 245 start_dma_addr, nr_pages * PAGE_SIZE); s390_iommu_update_trans()
|
/linux-4.4.14/fs/ext4/ |
H A D | readpage.c | 135 unsigned nr_pages) ext4_mpage_readpages() 160 for (page_idx = 0; nr_pages; page_idx++, nr_pages--) { ext4_mpage_readpages() 177 last_block = block_in_file + nr_pages * blocks_per_page; ext4_mpage_readpages() 287 min_t(int, nr_pages, BIO_MAX_PAGES)); ext4_mpage_readpages() 133 ext4_mpage_readpages(struct address_space *mapping, struct list_head *pages, struct page *page, unsigned nr_pages) ext4_mpage_readpages() argument
|
H A D | file.c | 461 unsigned long nr_pages; ext4_find_unwritten_pgoff() local 464 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index, ext4_find_unwritten_pgoff() 466 if (nr_pages == 0) { ext4_find_unwritten_pgoff() 492 for (i = 0; i < nr_pages; i++) { ext4_find_unwritten_pgoff() 550 if (nr_pages < num && whence == SEEK_HOLE) { ext4_find_unwritten_pgoff()
|
/linux-4.4.14/fs/afs/ |
H A D | file.c | 28 struct list_head *pages, unsigned nr_pages); 241 struct list_head *pages, unsigned nr_pages) afs_readpages() 248 key_serial(key), mapping->host->i_ino, nr_pages); afs_readpages() 263 &nr_pages, afs_readpages() 275 BUG_ON(nr_pages != 0); afs_readpages() 240 afs_readpages(struct file *file, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) afs_readpages() argument
|
H A D | cache.c | 375 int loop, nr_pages; afs_vnode_cache_now_uncached() local 385 nr_pages = pagevec_lookup(&pvec, vnode->vfs_inode.i_mapping, afs_vnode_cache_now_uncached() 388 if (!nr_pages) afs_vnode_cache_now_uncached() 391 for (loop = 0; loop < nr_pages; loop++) afs_vnode_cache_now_uncached() 394 first = pvec.pages[nr_pages - 1]->index + 1; afs_vnode_cache_now_uncached() 396 pvec.nr = nr_pages; afs_vnode_cache_now_uncached()
|
/linux-4.4.14/arch/powerpc/kvm/ |
H A D | book3s_hv_builtin.c | 52 struct page *kvm_alloc_hpt(unsigned long nr_pages) kvm_alloc_hpt() argument 54 VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); kvm_alloc_hpt() 56 return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES)); kvm_alloc_hpt() 60 void kvm_release_hpt(struct page *page, unsigned long nr_pages) kvm_release_hpt() argument 62 cma_release(kvm_cma, page, nr_pages); kvm_release_hpt()
|
/linux-4.4.14/fs/ |
H A D | fs-writeback.c | 46 long nr_pages; member in struct:wb_writeback_work 749 * wb_split_bdi_pages - split nr_pages to write according to bandwidth 750 * @wb: target bdi_writeback to split @nr_pages to 751 * @nr_pages: number of pages to write for the whole bdi 753 * Split @wb's portion of @nr_pages according to @wb's write bandwidth in 757 static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages) wb_split_bdi_pages() argument 762 if (nr_pages == LONG_MAX) wb_split_bdi_pages() 767 * may not make sense, just use the original @nr_pages in those wb_split_bdi_pages() 771 return nr_pages; wb_split_bdi_pages() 773 return DIV_ROUND_UP_ULL((u64)nr_pages * this_bw, tot_bw); wb_split_bdi_pages() 802 long nr_pages; bdi_split_work_to_wbs() local 817 nr_pages = wb_split_bdi_pages(wb, base_work->nr_pages); bdi_split_work_to_wbs() 822 work->nr_pages = nr_pages; bdi_split_work_to_wbs() 831 work->nr_pages = nr_pages; bdi_split_work_to_wbs() 905 static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages) wb_split_bdi_pages() argument 907 return nr_pages; wb_split_bdi_pages() 924 void wb_start_writeback(struct bdi_writeback *wb, long nr_pages, wb_start_writeback() argument 944 work->nr_pages = nr_pages; wb_start_writeback() 1424 pages = min(pages, work->nr_pages); writeback_chunk_size() 1536 work->nr_pages -= write_chunk - wbc.nr_to_write; writeback_sb_inodes() 1576 if (work->nr_pages <= 0) writeback_sb_inodes() 1609 if (work->nr_pages <= 0) __writeback_inodes_wb() 1617 static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, writeback_inodes_wb() argument 1621 .nr_pages = nr_pages, writeback_inodes_wb() 1636 return nr_pages - work.nr_pages; writeback_inodes_wb() 1658 long nr_pages = work->nr_pages; wb_writeback() local 1671 * Stop writeback when nr_pages has been consumed wb_writeback() 1673 if (work->nr_pages <= 0) wb_writeback() 1749 return nr_pages - work->nr_pages; wb_writeback() 1785 .nr_pages = LONG_MAX, wb_check_background_flush() 1801 long nr_pages; wb_check_old_data_flush() local 1815 nr_pages = get_nr_dirty_pages(); wb_check_old_data_flush() 1817 if (nr_pages) { wb_check_old_data_flush() 1819 .nr_pages = nr_pages, wb_check_old_data_flush() 1909 * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back 1912 void wakeup_flusher_threads(long nr_pages, enum wb_reason reason) wakeup_flusher_threads() argument 1916 if (!nr_pages) wakeup_flusher_threads() 1917 nr_pages = get_nr_dirty_pages(); wakeup_flusher_threads() 1927 wb_start_writeback(wb, wb_split_bdi_pages(wb, nr_pages), wakeup_flusher_threads() 2225 .nr_pages = nr, __writeback_inodes_sb_nr() 2319 .nr_pages = LONG_MAX, sync_inodes_sb()
|
H A D | splice.c | 185 unsigned int spd_pages = spd->nr_pages; splice_to_pipe() 224 if (!--spd->nr_pages) splice_to_pipe() 312 unsigned int loff, nr_pages, req_pages; __generic_file_splice_read() local 334 nr_pages = min(req_pages, spd.nr_pages_max); __generic_file_splice_read() 339 spd.nr_pages = find_get_pages_contig(mapping, index, nr_pages, spd.pages); __generic_file_splice_read() 340 index += spd.nr_pages; __generic_file_splice_read() 346 if (spd.nr_pages < nr_pages) __generic_file_splice_read() 348 index, req_pages - spd.nr_pages); __generic_file_splice_read() 351 while (spd.nr_pages < nr_pages) { __generic_file_splice_read() 380 spd.pages[spd.nr_pages++] = page; __generic_file_splice_read() 389 nr_pages = spd.nr_pages; __generic_file_splice_read() 390 spd.nr_pages = 0; __generic_file_splice_read() 391 for (page_nr = 0; page_nr < nr_pages; page_nr++) { __generic_file_splice_read() 490 spd.nr_pages++; __generic_file_splice_read() 496 * we got, 'nr_pages' is how many pages are in the map. __generic_file_splice_read() 498 while (page_nr < nr_pages) __generic_file_splice_read() 502 if (spd.nr_pages) __generic_file_splice_read() 611 unsigned int nr_pages; default_file_splice_read() local 642 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; default_file_splice_read() 644 for (i = 0; i < nr_pages && i < spd.nr_pages_max && len; i++) { default_file_splice_read() 656 spd.nr_pages++; default_file_splice_read() 661 res = kernel_readv(in, vec, spd.nr_pages, *ppos); default_file_splice_read() 672 for (i = 0; i < spd.nr_pages; i++) { default_file_splice_read() 683 spd.nr_pages -= nr_freed; default_file_splice_read() 696 for (i = 0; i < spd.nr_pages; i++) default_file_splice_read() 1615 spd.nr_pages = get_iovec_page_array(iov, nr_segs, spd.pages, vmsplice_to_pipe() 1618 if (spd.nr_pages <= 0) vmsplice_to_pipe() 1619 ret = spd.nr_pages; vmsplice_to_pipe()
|
H A D | pipe.c | 1000 static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages) pipe_set_size() argument 1010 if (nr_pages < pipe->nrbufs) pipe_set_size() 1013 bufs = kcalloc(nr_pages, sizeof(*bufs), GFP_KERNEL | __GFP_NOWARN); pipe_set_size() 1038 account_pipe_buffers(pipe, pipe->buffers, nr_pages); pipe_set_size() 1042 pipe->buffers = nr_pages; pipe_set_size() 1043 return nr_pages * PAGE_SIZE; pipe_set_size() 1052 unsigned long nr_pages; round_pipe_size() local 1054 nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; round_pipe_size() 1055 return roundup_pow_of_two(nr_pages) << PAGE_SHIFT; round_pipe_size() 1098 unsigned int size, nr_pages; pipe_fcntl() local 1101 nr_pages = size >> PAGE_SHIFT; pipe_fcntl() 1104 if (!nr_pages) pipe_fcntl() 1116 ret = pipe_set_size(pipe, nr_pages); pipe_fcntl()
|
H A D | aio.c | 116 long nr_pages; member in struct:kioctx 205 static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages) aio_private_file() argument 216 inode->i_size = PAGE_SIZE * nr_pages; aio_private_file() 294 for (i = 0; i < ctx->nr_pages; i++) { aio_free_ring() 388 if (idx < (pgoff_t)ctx->nr_pages) { aio_migratepage() 442 int nr_pages; aio_setup_ring() local 452 nr_pages = PFN_UP(size); aio_setup_ring() 453 if (nr_pages < 0) aio_setup_ring() 456 file = aio_private_file(ctx, nr_pages); aio_setup_ring() 463 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) aio_setup_ring() 467 if (nr_pages > AIO_RING_PAGES) { aio_setup_ring() 468 ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *), aio_setup_ring() 476 for (i = 0; i < nr_pages; i++) { aio_setup_ring() 489 ctx->nr_pages = i; aio_setup_ring() 491 if (unlikely(i != nr_pages)) { aio_setup_ring() 496 ctx->mmap_size = nr_pages * PAGE_SIZE; aio_setup_ring()
|
H A D | mpage.c | 140 do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, do_mpage_readpage() argument 165 last_block = block_in_file + nr_pages * blocks_per_page; do_mpage_readpage() 281 min_t(int, nr_pages, BIO_MAX_PAGES), gfp); do_mpage_readpage() 319 * @nr_pages: The number of pages at *@pages 357 unsigned nr_pages, get_block_t get_block) mpage_readpages() 368 for (page_idx = 0; page_idx < nr_pages; page_idx++) { mpage_readpages() 377 nr_pages - page_idx, mpage_readpages() 356 mpage_readpages(struct address_space *mapping, struct list_head *pages, unsigned nr_pages, get_block_t get_block) mpage_readpages() argument
|
/linux-4.4.14/drivers/gpu/drm/virtio/ |
H A D | virtgpu_object.c | 124 int nr_pages = bo->tbo.num_pages; virtio_gpu_object_get_sg_table() local 136 ret = sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0, virtio_gpu_object_get_sg_table() 137 nr_pages << PAGE_SHIFT, GFP_KERNEL); virtio_gpu_object_get_sg_table()
|
/linux-4.4.14/drivers/media/pci/cx25821/ |
H A D | cx25821-alsa.c | 70 int nr_pages; member in struct:cx25821_audio_buffer 147 static int cx25821_alsa_dma_init(struct cx25821_audio_dev *chip, int nr_pages) cx25821_alsa_dma_init() argument 153 buf->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT); cx25821_alsa_dma_init() 155 dprintk(1, "vmalloc_32(%d pages) failed\n", nr_pages); cx25821_alsa_dma_init() 161 nr_pages << PAGE_SHIFT); cx25821_alsa_dma_init() 163 memset(buf->vaddr, 0, nr_pages << PAGE_SHIFT); cx25821_alsa_dma_init() 164 buf->nr_pages = nr_pages; cx25821_alsa_dma_init() 166 buf->sglist = vzalloc(buf->nr_pages * sizeof(*buf->sglist)); cx25821_alsa_dma_init() 170 sg_init_table(buf->sglist, buf->nr_pages); cx25821_alsa_dma_init() 171 for (i = 0; i < buf->nr_pages; i++) { cx25821_alsa_dma_init() 193 buf->nr_pages, PCI_DMA_FROMDEVICE); cx25821_alsa_dma_map()
|
/linux-4.4.14/drivers/media/pci/cx88/ |
H A D | cx88-alsa.c | 68 int nr_pages; member in struct:cx88_audio_buffer 294 static int cx88_alsa_dma_init(struct cx88_audio_dev *chip, int nr_pages) cx88_alsa_dma_init() argument 300 buf->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT); cx88_alsa_dma_init() 302 dprintk(1, "vmalloc_32(%d pages) failed\n", nr_pages); cx88_alsa_dma_init() 308 nr_pages << PAGE_SHIFT); cx88_alsa_dma_init() 310 memset(buf->vaddr, 0, nr_pages << PAGE_SHIFT); cx88_alsa_dma_init() 311 buf->nr_pages = nr_pages; cx88_alsa_dma_init() 313 buf->sglist = vzalloc(buf->nr_pages * sizeof(*buf->sglist)); cx88_alsa_dma_init() 317 sg_init_table(buf->sglist, buf->nr_pages); cx88_alsa_dma_init() 318 for (i = 0; i < buf->nr_pages; i++) { cx88_alsa_dma_init() 340 buf->nr_pages, PCI_DMA_FROMDEVICE); cx88_alsa_dma_map()
|
/linux-4.4.14/drivers/lightnvm/ |
H A D | gennvm.c | 323 if (rqd->nr_pages > 1) { gennvm_addr_to_generic_mode() 324 for (i = 0; i < rqd->nr_pages; i++) gennvm_addr_to_generic_mode() 336 if (rqd->nr_pages > 1) { gennvm_generic_to_addr_mode() 337 for (i = 0; i < rqd->nr_pages; i++) gennvm_generic_to_addr_mode() 396 if (rqd->nr_pages > 1) gennvm_mark_blk_bad() 397 for (i = 0; i < rqd->nr_pages; i++) gennvm_mark_blk_bad() 437 rqd.nr_pages = 1; gennvm_erase_blk() 441 rqd.nr_pages = plane_cnt; gennvm_erase_blk()
|
H A D | rrpc.h | 96 unsigned long long nr_pages; member in struct:rrpc 197 BUG_ON((laddr + pages) > rrpc->nr_pages); rrpc_lock_laddr() 232 uint8_t pages = rqd->nr_pages; rrpc_unlock_rq() 234 BUG_ON((r->l_start + pages) > rrpc->nr_pages); rrpc_unlock_rq()
|
H A D | rrpc.c | 515 BUG_ON(laddr >= rrpc->nr_pages); rrpc_update_map() 647 uint8_t npages = rqd->nr_pages; rrpc_end_io() 685 BUG_ON(!(laddr + i >= 0 && laddr + i < rrpc->nr_pages)); rrpc_read_ppalist_rq() 716 BUG_ON(!(laddr >= 0 && laddr < rrpc->nr_pages)); rrpc_read_rq() 823 uint8_t nr_pages = rrpc_get_pages(bio); rrpc_submit_io() local 831 err = rrpc_setup_rq(rrpc, bio, rqd, flags, nr_pages); rrpc_submit_io() 838 rqd->nr_pages = nr_pages; rrpc_submit_io() 998 rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_pages); rrpc_map_init() 1003 * rrpc->nr_pages); rrpc_map_init() 1007 for (i = 0; i < rrpc->nr_pages; i++) { rrpc_map_init() 1118 rrpc->nr_pages += dev->sec_per_lun; rrpc_luns_init() 1170 provisioned = rrpc->nr_pages - reserved; rrpc_capacity() 1172 if (reserved > rrpc->nr_pages) { rrpc_capacity() 1335 rrpc->nr_luns, (unsigned long long)rrpc->nr_pages); rrpc_init()
|
/linux-4.4.14/fs/ntfs/ |
H A D | file.c | 497 * @nr_pages: number of page cache pages to obtain 501 * Obtain @nr_pages locked page cache pages from the mapping @mapping and 509 pgoff_t index, const unsigned nr_pages, struct page **pages, __ntfs_grab_cache_pages() 514 BUG_ON(!nr_pages); __ntfs_grab_cache_pages() 540 } while (nr < nr_pages); __ntfs_grab_cache_pages() 562 * @nr_pages: number of pages in @pages 568 * @nr_pages pages in @pages which are locked but not kmap()ped. The source 575 * If @nr_pages is greater than one, we are guaranteed that the cluster size is 585 unsigned nr_pages, s64 pos, size_t bytes) ntfs_prepare_pages_for_non_resident_write() 613 BUG_ON(!nr_pages); ntfs_prepare_pages_for_non_resident_write() 620 "index 0x%lx, nr_pages 0x%x, pos 0x%llx, bytes 0x%zx.", ntfs_prepare_pages_for_non_resident_write() 621 vi->i_ino, ni->type, pages[0]->index, nr_pages, ntfs_prepare_pages_for_non_resident_write() 638 } while (++u < nr_pages); ntfs_prepare_pages_for_non_resident_write() 1191 if (likely(!err && ++u < nr_pages)) ntfs_prepare_pages_for_non_resident_write() 1237 } while (++u < nr_pages); ntfs_prepare_pages_for_non_resident_write() 1345 nr_pages = u; ntfs_prepare_pages_for_non_resident_write() 1352 if (u == nr_pages && ntfs_prepare_pages_for_non_resident_write() 1370 } while (++u <= nr_pages); ntfs_prepare_pages_for_non_resident_write() 1376 unsigned nr_pages) ntfs_flush_dcache_pages() 1378 BUG_ON(!nr_pages); ntfs_flush_dcache_pages() 1385 --nr_pages; ntfs_flush_dcache_pages() 1386 flush_dcache_page(pages[nr_pages]); ntfs_flush_dcache_pages() 1387 } while (nr_pages > 0); ntfs_flush_dcache_pages() 1393 * @nr_pages: number of pages in @pages 1400 struct page **pages, const unsigned nr_pages, ntfs_commit_pages_after_non_resident_write() 1446 } while (++u < nr_pages); ntfs_commit_pages_after_non_resident_write() 1521 * @nr_pages: number of pages in @pages 1526 * (@pages[0]->mapping->host). There are @nr_pages pages in @pages which are 1555 const unsigned nr_pages, s64 pos, size_t bytes) ntfs_commit_pages_after_write() 1570 BUG_ON(!nr_pages); ntfs_commit_pages_after_write() 1577 "index 0x%lx, nr_pages 0x%x, pos 0x%llx, bytes 0x%zx.", ntfs_commit_pages_after_write() 1578 vi->i_ino, ni->type, page->index, nr_pages, ntfs_commit_pages_after_write() 1582 nr_pages, pos, bytes); ntfs_commit_pages_after_write() 1583 BUG_ON(nr_pages > 1); ntfs_commit_pages_after_write() 1700 static size_t ntfs_copy_from_user_iter(struct page **pages, unsigned nr_pages, ntfs_copy_from_user_iter() argument 1703 struct page **last_page = pages + nr_pages; ntfs_copy_from_user_iter() 1758 unsigned nr_pages; ntfs_perform_write() local 1789 nr_pages = 1; ntfs_perform_write() 1791 nr_pages = vol->cluster_size >> PAGE_CACHE_SHIFT; ntfs_perform_write() 1803 if (nr_pages > 1) { ntfs_perform_write() 1838 do_pages = nr_pages; ntfs_perform_write() 508 __ntfs_grab_cache_pages(struct address_space *mapping, pgoff_t index, const unsigned nr_pages, struct page **pages, struct page **cached_page) __ntfs_grab_cache_pages() argument 584 ntfs_prepare_pages_for_non_resident_write(struct page **pages, unsigned nr_pages, s64 pos, size_t bytes) ntfs_prepare_pages_for_non_resident_write() argument 1375 ntfs_flush_dcache_pages(struct page **pages, unsigned nr_pages) ntfs_flush_dcache_pages() argument 1399 ntfs_commit_pages_after_non_resident_write( struct page **pages, const unsigned nr_pages, s64 pos, size_t bytes) ntfs_commit_pages_after_non_resident_write() argument 1554 ntfs_commit_pages_after_write(struct page **pages, const unsigned nr_pages, s64 pos, size_t bytes) ntfs_commit_pages_after_write() argument
|
H A D | compress.c | 517 unsigned int nr_pages = (end_vcn - start_vcn) << ntfs_read_compressed_block() local 525 ntfs_debug("Entering, page->index = 0x%lx, cb_size = 0x%x, nr_pages = " ntfs_read_compressed_block() 526 "%i.", index, cb_size, nr_pages); ntfs_read_compressed_block() 534 pages = kmalloc(nr_pages * sizeof(struct page *), GFP_NOFS); ntfs_read_compressed_block() 575 if (nr_pages < max_page) ntfs_read_compressed_block() 576 max_page = nr_pages; ntfs_read_compressed_block()
|
/linux-4.4.14/arch/sparc/mm/ |
H A D | gup.c | 163 int __get_user_pages_fast(unsigned long start, int nr_pages, int write, __get_user_pages_fast() argument 174 len = (unsigned long) nr_pages << PAGE_SHIFT; __get_user_pages_fast() 193 int get_user_pages_fast(unsigned long start, int nr_pages, int write, get_user_pages_fast() argument 204 len = (unsigned long) nr_pages << PAGE_SHIFT; get_user_pages_fast()
|
/linux-4.4.14/arch/x86/xen/ |
H A D | setup.c | 254 unsigned long end_pfn, unsigned long nr_pages) xen_set_identity_and_release_chunk() 262 end = min(end_pfn, nr_pages); xen_set_identity_and_release_chunk() 389 unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages, xen_set_identity_and_remap_chunk() 397 remap_pfn = nr_pages; xen_set_identity_and_remap_chunk() 406 if (cur_pfn >= nr_pages) { xen_set_identity_and_remap_chunk() 411 if (cur_pfn + size > nr_pages) xen_set_identity_and_remap_chunk() 412 size = nr_pages - cur_pfn; xen_set_identity_and_remap_chunk() 418 cur_pfn + left, nr_pages); xen_set_identity_and_remap_chunk() 445 unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages, xen_count_remap_pages() 448 if (start_pfn >= nr_pages) xen_count_remap_pages() 451 return remap_pages + min(end_pfn, nr_pages) - start_pfn; xen_count_remap_pages() 454 static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages, xen_foreach_remap_area() argument 456 unsigned long nr_pages, unsigned long last_val)) xen_foreach_remap_area() 484 ret_val = func(start_pfn, end_pfn, nr_pages, xen_foreach_remap_area() 709 size = PFN_ALIGN(xen_start_info->nr_pages * xen_reserve_xen_mfnlist() 750 max_pfn = min(max_pfn, xen_start_info->nr_pages); xen_memory_setup() 253 xen_set_identity_and_release_chunk(unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages) xen_set_identity_and_release_chunk() argument 388 xen_set_identity_and_remap_chunk( unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn) xen_set_identity_and_remap_chunk() argument 444 xen_count_remap_pages( unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pages) xen_count_remap_pages() argument
|
/linux-4.4.14/drivers/media/common/saa7146/ |
H A D | saa7146_core.c | 149 static struct scatterlist* vmalloc_to_sg(unsigned char *virt, int nr_pages) vmalloc_to_sg() argument 155 sglist = kcalloc(nr_pages, sizeof(struct scatterlist), GFP_KERNEL); vmalloc_to_sg() 158 sg_init_table(sglist, nr_pages); vmalloc_to_sg() 159 for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) { vmalloc_to_sg() 251 int nr_pages = 0; saa7146_pgtable_build_single() local 270 nr_pages++; saa7146_pgtable_build_single() 277 for(i=nr_pages;i<1024;i++) { saa7146_pgtable_build_single()
|
/linux-4.4.14/arch/powerpc/mm/ |
H A D | mem.c | 121 unsigned long nr_pages = size >> PAGE_SHIFT; arch_add_memory() local 133 return __add_pages(nid, zone, start_pfn, nr_pages); arch_add_memory() 140 unsigned long nr_pages = size >> PAGE_SHIFT; arch_remove_memory() local 145 ret = __remove_pages(zone, start_pfn, nr_pages); arch_remove_memory() 170 walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, walk_system_ram_range() argument 174 unsigned long end_pfn = start_pfn + nr_pages; walk_system_ram_range()
|
/linux-4.4.14/fs/hpfs/ |
H A D | file.c | 128 struct list_head *pages, unsigned nr_pages) hpfs_readpages() 130 return mpage_readpages(mapping, pages, nr_pages, hpfs_get_block); hpfs_readpages() 127 hpfs_readpages(struct file *file, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) hpfs_readpages() argument
|
/linux-4.4.14/drivers/gpu/drm/exynos/ |
H A D | exynos_drm_fbdev.c | 84 unsigned int nr_pages; exynos_drm_fbdev_update() local 100 nr_pages = exynos_gem->size >> PAGE_SHIFT; exynos_drm_fbdev_update() 102 exynos_gem->kvaddr = (void __iomem *) vmap(exynos_gem->pages, nr_pages, exynos_drm_fbdev_update()
|
H A D | exynos_drm_gem.c | 27 unsigned int nr_pages; exynos_drm_alloc_buf() local 59 nr_pages = exynos_gem->size >> PAGE_SHIFT; exynos_drm_alloc_buf() 61 exynos_gem->pages = drm_calloc_large(nr_pages, sizeof(struct page *)); exynos_drm_alloc_buf() 84 nr_pages)) { exynos_drm_alloc_buf()
|
/linux-4.4.14/arch/alpha/mm/ |
H A D | init.c | 192 unsigned long nr_pages = 0; callback_init() local 198 nr_pages += crb->map[i].count; callback_init() 202 console_remap_vm.size = nr_pages << PAGE_SHIFT; callback_init()
|
/linux-4.4.14/fs/gfs2/ |
H A D | aops.c | 226 * @nr_pages: The number of pages to write 236 int nr_pages, pgoff_t end, gfs2_write_jdata_pagevec() 241 unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize); gfs2_write_jdata_pagevec() 249 for(i = 0; i < nr_pages; i++) { gfs2_write_jdata_pagevec() 350 int nr_pages; gfs2_write_cache_jdata() local 385 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, gfs2_write_cache_jdata() 387 if (nr_pages == 0) gfs2_write_cache_jdata() 390 ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, end, &done_index); gfs2_write_cache_jdata() 594 * @nr_pages: Number of pages to read 608 struct list_head *pages, unsigned nr_pages) gfs2_readpages() 621 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map); gfs2_readpages() 1082 * the first place, mapping->nr_pages will always be zero. gfs2_direct_IO() 233 gfs2_write_jdata_pagevec(struct address_space *mapping, struct writeback_control *wbc, struct pagevec *pvec, int nr_pages, pgoff_t end, pgoff_t *done_index) gfs2_write_jdata_pagevec() argument 607 gfs2_readpages(struct file *file, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) gfs2_readpages() argument
|
/linux-4.4.14/kernel/ |
H A D | relay.c | 1215 unsigned int pidx, poff, total_len, subbuf_pages, nr_pages; subbuf_splice_actor() local 1228 .nr_pages = 0, subbuf_splice_actor() 1251 nr_pages = min_t(unsigned int, subbuf_pages, spd.nr_pages_max); subbuf_splice_actor() 1253 for (total_len = 0; spd.nr_pages < nr_pages; spd.nr_pages++) { subbuf_splice_actor() 1263 spd.pages[spd.nr_pages] = rbuf->page_array[pidx]; subbuf_splice_actor() 1264 spd.partial[spd.nr_pages].offset = poff; subbuf_splice_actor() 1271 spd.partial[spd.nr_pages].len = this_len; subbuf_splice_actor() 1272 spd.partial[spd.nr_pages].private = private; subbuf_splice_actor() 1280 spd.nr_pages++; subbuf_splice_actor() 1286 if (!spd.nr_pages) subbuf_splice_actor()
|
/linux-4.4.14/drivers/xen/xenbus/ |
H A D | xenbus_client.c | 372 * @nr_pages: number of pages to be granted 381 unsigned int nr_pages, grant_ref_t *grefs) xenbus_grant_ring() 386 for (i = 0; i < nr_pages; i++) { xenbus_grant_ring() 634 unsigned int nr_pages = XENBUS_PAGES(nr_grefs); xenbus_map_ring_valloc_hvm() local 645 err = alloc_xenballooned_pages(nr_pages, node->hvm.pages); xenbus_map_ring_valloc_hvm() 660 addr = vmap(node->hvm.pages, nr_pages, VM_MAP | VM_IOREMAP, xenbus_map_ring_valloc_hvm() 681 addr, nr_pages); xenbus_map_ring_valloc_hvm() 684 free_xenballooned_pages(nr_pages, node->hvm.pages); xenbus_map_ring_valloc_hvm() 836 unsigned int nr_pages; xenbus_unmap_ring_vfree_hvm() local 856 nr_pages = XENBUS_PAGES(node->nr_handles); xenbus_unmap_ring_vfree_hvm() 866 free_xenballooned_pages(nr_pages, node->hvm.pages); xenbus_unmap_ring_vfree_hvm() 869 WARN(1, "Leaking %p, size %u page(s)\n", vaddr, nr_pages); xenbus_unmap_ring_vfree_hvm() 380 xenbus_grant_ring(struct xenbus_device *dev, void *vaddr, unsigned int nr_pages, grant_ref_t *grefs) xenbus_grant_ring() argument
|
/linux-4.4.14/block/ |
H A D | bio.c | 1129 int nr_pages = 0; bio_copy_user_iov() local 1149 nr_pages += end - start; bio_copy_user_iov() 1153 nr_pages++; bio_copy_user_iov() 1170 bio = bio_kmalloc(gfp_mask, nr_pages); bio_copy_user_iov() 1180 nr_pages = 1 << map_data->page_order; bio_copy_user_iov() 1192 if (i == map_data->nr_entries * nr_pages) { bio_copy_user_iov() 1197 page = map_data->pages[i / nr_pages]; bio_copy_user_iov() 1198 page += (i % nr_pages); bio_copy_user_iov() 1254 int nr_pages = 0; bio_map_user_iov() local 1274 nr_pages += end - start; bio_map_user_iov() 1282 if (!nr_pages) bio_map_user_iov() 1285 bio = bio_kmalloc(gfp_mask, nr_pages); bio_map_user_iov() 1290 pages = kcalloc(nr_pages, sizeof(struct page *), gfp_mask); bio_map_user_iov() 1359 for (j = 0; j < nr_pages; j++) { bio_map_user_iov() 1425 const int nr_pages = end - start; bio_map_kern() local 1429 bio = bio_kmalloc(gfp_mask, nr_pages); bio_map_kern() 1434 for (i = 0; i < nr_pages; i++) { bio_map_kern() 1499 int nr_pages = 0; bio_copy_kern() local 1507 nr_pages = end - start; bio_copy_kern() 1508 bio = bio_kmalloc(gfp_mask, nr_pages); bio_copy_kern()
|
H A D | bio-integrity.c | 276 unsigned int len, nr_pages; bio_integrity_prep() local 297 nr_pages = end - start; bio_integrity_prep() 300 bip = bio_integrity_alloc(bio, GFP_NOIO, nr_pages); bio_integrity_prep() 316 for (i = 0 ; i < nr_pages ; i++) { bio_integrity_prep()
|
/linux-4.4.14/drivers/media/pci/saa7134/ |
H A D | saa7134-alsa.c | 268 static int saa7134_alsa_dma_init(struct saa7134_dev *dev, int nr_pages) saa7134_alsa_dma_init() argument 274 dma->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT); saa7134_alsa_dma_init() 276 pr_debug("vmalloc_32(%d pages) failed\n", nr_pages); saa7134_alsa_dma_init() 282 nr_pages << PAGE_SHIFT); saa7134_alsa_dma_init() 284 memset(dma->vaddr, 0, nr_pages << PAGE_SHIFT); saa7134_alsa_dma_init() 285 dma->nr_pages = nr_pages; saa7134_alsa_dma_init() 287 dma->sglist = vzalloc(dma->nr_pages * sizeof(*dma->sglist)); saa7134_alsa_dma_init() 291 sg_init_table(dma->sglist, dma->nr_pages); saa7134_alsa_dma_init() 292 for (i = 0; i < dma->nr_pages; i++) { saa7134_alsa_dma_init() 314 dma->nr_pages, PCI_DMA_FROMDEVICE); saa7134_alsa_dma_map()
|
/linux-4.4.14/kernel/trace/ |
H A D | ring_buffer.c | 440 unsigned long nr_pages; member in struct:ring_buffer_per_cpu 1140 static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu) __rb_allocate_pages() argument 1145 for (i = 0; i < nr_pages; i++) { __rb_allocate_pages() 1180 unsigned long nr_pages) rb_allocate_pages() 1184 WARN_ON(!nr_pages); rb_allocate_pages() 1186 if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu)) rb_allocate_pages() 1197 cpu_buffer->nr_pages = nr_pages; rb_allocate_pages() 1205 rb_allocate_cpu_buffer(struct ring_buffer *buffer, long nr_pages, int cpu) rb_allocate_cpu_buffer() argument 1245 ret = rb_allocate_pages(cpu_buffer, nr_pages); rb_allocate_cpu_buffer() 1305 long nr_pages; __ring_buffer_alloc() local 1318 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); __ring_buffer_alloc() 1327 if (nr_pages < 2) __ring_buffer_alloc() 1328 nr_pages = 2; __ring_buffer_alloc() 1351 rb_allocate_cpu_buffer(buffer, nr_pages, cpu); for_each_buffer_cpu() 1433 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages) rb_remove_pages() argument 1469 for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) { rb_remove_pages() 1630 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update; rb_update_pages() 1655 unsigned long nr_pages; ring_buffer_resize() local 1669 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); ring_buffer_resize() 1672 if (nr_pages < 2) ring_buffer_resize() 1673 nr_pages = 2; ring_buffer_resize() 1675 size = nr_pages * BUF_PAGE_SIZE; ring_buffer_resize() 1693 cpu_buffer->nr_pages_to_update = nr_pages - for_each_buffer_cpu() 1694 cpu_buffer->nr_pages; for_each_buffer_cpu() 1753 if (nr_pages == cpu_buffer->nr_pages) 1756 cpu_buffer->nr_pages_to_update = nr_pages - 1757 cpu_buffer->nr_pages; 2411 max_count = cpu_buffer->nr_pages * 100; rb_set_commit_to_write() 4163 * BUF_PAGE_SIZE * buffer->nr_pages ring_buffer_size() 4164 * Since the nr_pages field is now removed, we have converted this to ring_buffer_size() 4170 return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages; ring_buffer_size() 4348 if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages) ring_buffer_swap_cpu() 4650 unsigned long nr_pages; rb_cpu_notify() local 4658 nr_pages = 0; rb_cpu_notify() 4663 if (nr_pages == 0) for_each_buffer_cpu() 4664 nr_pages = buffer->buffers[cpu_i]->nr_pages; for_each_buffer_cpu() 4665 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) { for_each_buffer_cpu() 4672 nr_pages = 2; 4674 rb_allocate_cpu_buffer(buffer, nr_pages, cpu); 1179 rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages) rb_allocate_pages() argument
|
/linux-4.4.14/drivers/video/fbdev/ |
H A D | pvr2fb.c | 679 unsigned int nr_pages; pvr2fb_write() local 683 nr_pages = (count + PAGE_SIZE - 1) >> PAGE_SHIFT; pvr2fb_write() 685 pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); pvr2fb_write() 690 nr_pages, WRITE, 0, pages); pvr2fb_write() 692 if (ret < nr_pages) { pvr2fb_write() 693 nr_pages = ret; pvr2fb_write() 702 end = (unsigned long)page_address(pages[nr_pages]); pvr2fb_write() 703 len = nr_pages << PAGE_SHIFT; pvr2fb_write() 721 for (i = 0; i < nr_pages; i++, dst += PAGE_SIZE) { pvr2fb_write() 737 for (i = 0; i < nr_pages; i++) pvr2fb_write()
|
H A D | xen-fbfront.c | 46 int nr_pages; member in struct:xenfb_info 403 info->nr_pages = (fb_size + PAGE_SIZE - 1) >> PAGE_SHIFT; xenfb_probe() 405 info->gfns = vmalloc(sizeof(unsigned long) * info->nr_pages); xenfb_probe() 551 for (i = 0; i < info->nr_pages; i++) xenfb_init_shared_page() 554 for (i = 0; i * epd < info->nr_pages; i++) xenfb_init_shared_page()
|
/linux-4.4.14/fs/cachefiles/ |
H A D | rdwr.c | 677 unsigned *nr_pages, cachefiles_read_or_alloc_pages() 696 *nr_pages); cachefiles_read_or_alloc_pages() 702 if (cachefiles_has_space(cache, 0, *nr_pages) < 0) cachefiles_read_or_alloc_pages() 745 (*nr_pages)--; list_for_each_entry_safe() 771 ret, *nr_pages, list_empty(pages) ? " empty" : ""); 775 fscache_retrieval_complete(op, *nr_pages); 829 unsigned *nr_pages, cachefiles_allocate_pages() 843 _enter("%p,,,%d,", object, *nr_pages); cachefiles_allocate_pages() 845 ret = cachefiles_has_space(cache, 0, *nr_pages); cachefiles_allocate_pages() 861 fscache_retrieval_complete(op, *nr_pages); 675 cachefiles_read_or_alloc_pages(struct fscache_retrieval *op, struct list_head *pages, unsigned *nr_pages, gfp_t gfp) cachefiles_read_or_alloc_pages() argument 827 cachefiles_allocate_pages(struct fscache_retrieval *op, struct list_head *pages, unsigned *nr_pages, gfp_t gfp) cachefiles_allocate_pages() argument
|
/linux-4.4.14/arch/s390/kvm/ |
H A D | gaccess.c | 714 unsigned long *pages, unsigned long nr_pages, guest_page_range() 724 while (nr_pages) { guest_page_range() 749 nr_pages--; guest_page_range() 758 unsigned long _len, nr_pages, gpa, idx; access_guest() local 770 nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1; access_guest() 772 if (nr_pages > ARRAY_SIZE(pages_array)) access_guest() 773 pages = vmalloc(nr_pages * sizeof(unsigned long)); access_guest() 779 rc = guest_page_range(vcpu, ga, pages, nr_pages, asce, write); access_guest() 780 for (idx = 0; idx < nr_pages && !rc; idx++) { access_guest() 793 if (nr_pages > ARRAY_SIZE(pages_array)) access_guest() 713 guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, unsigned long *pages, unsigned long nr_pages, const union asce asce, int write) guest_page_range() argument
|
/linux-4.4.14/drivers/md/ |
H A D | dm-kcopyd.c | 283 * Allocate and reserve nr_pages for the use of a specific client. 285 static int client_reserve_pages(struct dm_kcopyd_client *kc, unsigned nr_pages) client_reserve_pages() argument 290 for (i = 0; i < nr_pages; i++) { client_reserve_pages() 301 kc->nr_reserved_pages += nr_pages; client_reserve_pages() 521 unsigned nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9); run_pages_job() local 523 r = kcopyd_get_pages(job->kc, nr_pages, &job->pages); run_pages_job()
|
/linux-4.4.14/drivers/mtd/devices/ |
H A D | mtd_dataflash.c | 622 static int add_dataflash_otp(struct spi_device *spi, char *name, int nr_pages, add_dataflash_otp() argument 648 device->size = nr_pages * pagesize; add_dataflash_otp() 681 int nr_pages, int pagesize, int pageoffset) add_dataflash() 683 return add_dataflash_otp(spi, name, nr_pages, pagesize, add_dataflash() 696 unsigned nr_pages; member in struct:flash_info 839 return add_dataflash_otp(spi, info->name, info->nr_pages, dataflash_probe() 680 add_dataflash(struct spi_device *spi, char *name, int nr_pages, int pagesize, int pageoffset) add_dataflash() argument
|
H A D | sst25l.c | 60 unsigned nr_pages; member in struct:flash_info 384 flash->mtd.size = flash_info->page_size * flash_info->nr_pages; sst25l_probe()
|
/linux-4.4.14/fs/f2fs/ |
H A D | data.c | 875 unsigned nr_pages) f2fs_mpage_readpages() 895 for (page_idx = 0; nr_pages; page_idx++, nr_pages--) { f2fs_mpage_readpages() 907 last_block = block_in_file + nr_pages; f2fs_mpage_readpages() 976 min_t(int, nr_pages, BIO_MAX_PAGES)); f2fs_mpage_readpages() 1031 struct list_head *pages, unsigned nr_pages) f2fs_read_data_pages() 1036 trace_f2fs_readpages(inode, page, nr_pages); f2fs_read_data_pages() 1042 return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages); f2fs_read_data_pages() 1214 int nr_pages; f2fs_write_cache_pages() local 1252 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, f2fs_write_cache_pages() 1254 if (nr_pages == 0) f2fs_write_cache_pages() 1257 for (i = 0; i < nr_pages; i++) { f2fs_write_cache_pages() 873 f2fs_mpage_readpages(struct address_space *mapping, struct list_head *pages, struct page *page, unsigned nr_pages) f2fs_mpage_readpages() argument 1029 f2fs_read_data_pages(struct file *file, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) f2fs_read_data_pages() argument
|
H A D | node.c | 1168 int i, nr_pages; sync_node_pages() local 1169 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, sync_node_pages() 1172 if (nr_pages == 0) sync_node_pages() 1175 for (i = 0; i < nr_pages; i++) { sync_node_pages() 1266 int i, nr_pages; wait_on_node_pages_writeback() local 1267 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, wait_on_node_pages_writeback() 1270 if (nr_pages == 0) wait_on_node_pages_writeback() 1273 for (i = 0; i < nr_pages; i++) { wait_on_node_pages_writeback()
|
/linux-4.4.14/drivers/block/ |
H A D | brd.c | 165 int nr_pages; brd_free_pages() local 170 nr_pages = radix_tree_gang_lookup(&brd->brd_pages, brd_free_pages() 173 for (i = 0; i < nr_pages; i++) { brd_free_pages() 190 } while (nr_pages == FREE_BATCH); brd_free_pages()
|
/linux-4.4.14/fs/proc/ |
H A D | kcore.c | 168 unsigned long nr_pages = ent->size >> PAGE_SHIFT; get_sparsemem_vmemmap_info() local 174 end = ((unsigned long)pfn_to_page(pfn + nr_pages)) - 1; get_sparsemem_vmemmap_info() 206 kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg) kclist_add_private() argument 215 ent->size = nr_pages << PAGE_SHIFT; kclist_add_private()
|
H A D | task_mmu.c | 1389 unsigned long nr_pages) gather_stats() 1393 md->pages += nr_pages; gather_stats() 1395 md->dirty += nr_pages; gather_stats() 1398 md->swapcache += nr_pages; gather_stats() 1401 md->active += nr_pages; gather_stats() 1404 md->writeback += nr_pages; gather_stats() 1407 md->anon += nr_pages; gather_stats() 1412 md->node[page_to_nid(page)] += nr_pages; gather_stats() 1388 gather_stats(struct page *page, struct numa_maps *md, int pte_dirty, unsigned long nr_pages) gather_stats() argument
|
/linux-4.4.14/include/media/ |
H A D | videobuf-dma-sg.h | 66 int nr_pages; member in struct:videobuf_dmabuf
|
/linux-4.4.14/arch/arm/include/asm/xen/ |
H A D | page.h | 107 unsigned long nr_pages);
|
/linux-4.4.14/fs/omfs/ |
H A D | file.c | 293 struct list_head *pages, unsigned nr_pages) omfs_readpages() 295 return mpage_readpages(mapping, pages, nr_pages, omfs_get_block); omfs_readpages() 292 omfs_readpages(struct file *file, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) omfs_readpages() argument
|
/linux-4.4.14/drivers/net/ethernet/broadcom/bnxt/ |
H A D | bnxt_sriov.c | 305 u32 nr_pages, size, i, j, k = 0; bnxt_alloc_vf_resources() local 314 nr_pages = size / BNXT_PAGE_SIZE; bnxt_alloc_vf_resources() 316 nr_pages++; bnxt_alloc_vf_resources() 318 for (i = 0; i < nr_pages; i++) { bnxt_alloc_vf_resources() 344 bp->pf.hwrm_cmd_req_pages = nr_pages; bnxt_alloc_vf_resources()
|
/linux-4.4.14/drivers/base/ |
H A D | memory.c | 228 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; memory_block_action() local 240 ret = online_pages(start_pfn, nr_pages, online_type); memory_block_action() 243 ret = offline_pages(start_pfn, nr_pages); memory_block_action() 391 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; show_valid_zones() local 396 end_pfn = start_pfn + nr_pages; show_valid_zones()
|
H A D | firmware_class.c | 149 int nr_pages; member in struct:firmware_buf 258 for (i = 0; i < buf->nr_pages; i++) 623 buf->data = vmap(buf->pages, buf->nr_pages, 0, PAGE_KERNEL_RO); fw_map_pages_buf() 661 for (i = 0; i < fw_buf->nr_pages; i++) firmware_loading_store() 666 fw_buf->nr_pages = 0; firmware_loading_store() 788 while (buf->nr_pages < pages_needed) { fw_realloc_buffer() 789 buf->pages[buf->nr_pages] = fw_realloc_buffer() 792 if (!buf->pages[buf->nr_pages]) { fw_realloc_buffer() 796 buf->nr_pages++; fw_realloc_buffer()
|
/linux-4.4.14/drivers/gpu/drm/savage/ |
H A D | savage_bci.c | 367 unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE - 1) / savage_dma_alloc() local 372 DRM_DEBUG("cur=%u, cur->used=%u, n=%u, rest=%u, nr_pages=%u\n", savage_dma_alloc() 373 cur, dev_priv->dma_pages[cur].used, n, rest, nr_pages); savage_dma_alloc() 375 if (cur + nr_pages < dev_priv->nr_dma_pages) { savage_dma_alloc() 385 nr_pages = savage_dma_alloc() 395 for (i = cur; nr_pages > 0; ++i, --nr_pages) { savage_dma_alloc()
|
/linux-4.4.14/fs/fscache/ |
H A D | page.c | 536 * nr_pages 541 unsigned *nr_pages, __fscache_read_or_alloc_pages() 551 _enter("%p,,%d,,,", cookie, *nr_pages); __fscache_read_or_alloc_pages() 564 ASSERTCMP(*nr_pages, >, 0); __fscache_read_or_alloc_pages() 573 atomic_set(&op->n_pages, *nr_pages); __fscache_read_or_alloc_pages() 606 op, pages, nr_pages, gfp); __fscache_read_or_alloc_pages() 611 op, pages, nr_pages, gfp); __fscache_read_or_alloc_pages() 538 __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, struct address_space *mapping, struct list_head *pages, unsigned *nr_pages, fscache_rw_complete_t end_io_func, void *context, gfp_t gfp) __fscache_read_or_alloc_pages() argument
|
/linux-4.4.14/drivers/nvme/host/ |
H A D | lightnvm.c | 429 c.set_bb.nlb = cpu_to_le16(rqd->nr_pages - 1); nvme_nvm_set_bb_tbl() 446 c->ph_rw.length = cpu_to_le16(rqd->nr_pages - 1); nvme_nvm_rqtocmd() 515 c.erase.length = cpu_to_le16(rqd->nr_pages - 1); nvme_nvm_erase_block()
|
/linux-4.4.14/drivers/gpu/drm/ |
H A D | drm_prime.c | 672 * @nr_pages: length of the page vector 678 struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages) drm_prime_pages_to_sg() argument 689 ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0, drm_prime_pages_to_sg() 690 nr_pages << PAGE_SHIFT, GFP_KERNEL); drm_prime_pages_to_sg()
|
/linux-4.4.14/arch/tile/kernel/ |
H A D | module.c | 58 area->nr_pages = npages; module_alloc()
|
/linux-4.4.14/include/scsi/ |
H A D | osd_ore.h | 132 unsigned nr_pages; member in struct:ore_io_state
|
/linux-4.4.14/arch/ia64/mm/ |
H A D | init.c | 653 unsigned long nr_pages = size >> PAGE_SHIFT; arch_add_memory() local 660 ret = __add_pages(nid, zone, start_pfn, nr_pages); arch_add_memory() 673 unsigned long nr_pages = size >> PAGE_SHIFT; arch_remove_memory() local 678 ret = __remove_pages(zone, start_pfn, nr_pages); arch_remove_memory()
|
/linux-4.4.14/fs/jfs/ |
H A D | inode.c | 300 struct list_head *pages, unsigned nr_pages) jfs_readpages() 302 return mpage_readpages(mapping, pages, nr_pages, jfs_get_block); jfs_readpages() 299 jfs_readpages(struct file *file, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) jfs_readpages() argument
|
/linux-4.4.14/include/linux/ceph/ |
H A D | osd_client.h | 358 struct page **pages, int nr_pages, 368 struct page **pages, int nr_pages);
|
/linux-4.4.14/arch/sh/kernel/cpu/sh4/ |
H A D | sq.c | 375 unsigned int nr_pages = 0x04000000 >> PAGE_SHIFT; sq_api_init() local 376 unsigned int size = (nr_pages + (BITS_PER_LONG - 1)) / BITS_PER_LONG; sq_api_init()
|
/linux-4.4.14/drivers/target/ |
H A D | target_core_pscsi.c | 886 int nr_pages = (cmd->data_length + sgl[0].offset + pscsi_map_sg() local 893 pr_debug("PSCSI: nr_pages: %d\n", nr_pages); pscsi_map_sg() 915 nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages); for_each_sg() 916 nr_pages -= nr_vecs; for_each_sg() 932 * nr_pages > BIO_MAX_PAGES, where additional for_each_sg()
|
/linux-4.4.14/arch/arm64/mm/ |
H A D | dma-mapping.c | 311 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; __swiotlb_mmap() local 321 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { __swiotlb_mmap() 364 unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT; atomic_pool_init() local 370 page = dma_alloc_from_contiguous(NULL, nr_pages, atomic_pool_init() 414 if (!dma_release_from_contiguous(NULL, page, nr_pages)) atomic_pool_init()
|
/linux-4.4.14/fs/xfs/ |
H A D | xfs_buf.c | 1138 int nr_pages; xfs_buf_ioapply_map() local 1164 nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT); xfs_buf_ioapply_map() 1165 if (nr_pages > total_nr_pages) xfs_buf_ioapply_map() 1166 nr_pages = total_nr_pages; xfs_buf_ioapply_map() 1168 bio = bio_alloc(GFP_NOIO, nr_pages); xfs_buf_ioapply_map() 1175 for (; size && nr_pages; nr_pages--, page_index++) { xfs_buf_ioapply_map()
|
/linux-4.4.14/arch/powerpc/platforms/pseries/ |
H A D | cmm.c | 476 unsigned long end = start + (marg->nr_pages << PAGE_SHIFT); cmm_count_pages() 529 unsigned long end_page = start_page + (marg->nr_pages << PAGE_SHIFT); cmm_mem_going_offline() 535 start_page, marg->nr_pages); cmm_mem_going_offline()
|