Lines Matching refs:h

70 static int hugetlb_acct_memory(struct hstate *h, long delta);
89 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, in hugepage_new_subpool() argument
101 spool->hstate = h; in hugepage_new_subpool()
104 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) { in hugepage_new_subpool()
575 struct hstate *h = hstate_inode(inode); in hugetlb_fix_reserve_counts() local
577 hugetlb_acct_memory(h, 1); in hugetlb_fix_reserve_counts()
616 static pgoff_t vma_hugecache_offset(struct hstate *h, in vma_hugecache_offset() argument
619 return ((address - vma->vm_start) >> huge_page_shift(h)) + in vma_hugecache_offset()
620 (vma->vm_pgoff >> huge_page_order(h)); in vma_hugecache_offset()
838 static void enqueue_huge_page(struct hstate *h, struct page *page) in enqueue_huge_page() argument
841 list_move(&page->lru, &h->hugepage_freelists[nid]); in enqueue_huge_page()
842 h->free_huge_pages++; in enqueue_huge_page()
843 h->free_huge_pages_node[nid]++; in enqueue_huge_page()
846 static struct page *dequeue_huge_page_node(struct hstate *h, int nid) in dequeue_huge_page_node() argument
850 list_for_each_entry(page, &h->hugepage_freelists[nid], lru) in dequeue_huge_page_node()
857 if (&h->hugepage_freelists[nid] == &page->lru) in dequeue_huge_page_node()
859 list_move(&page->lru, &h->hugepage_activelist); in dequeue_huge_page_node()
861 h->free_huge_pages--; in dequeue_huge_page_node()
862 h->free_huge_pages_node[nid]--; in dequeue_huge_page_node()
867 static inline gfp_t htlb_alloc_mask(struct hstate *h) in htlb_alloc_mask() argument
869 if (hugepages_treat_as_movable || hugepage_migration_supported(h)) in htlb_alloc_mask()
875 static struct page *dequeue_huge_page_vma(struct hstate *h, in dequeue_huge_page_vma() argument
894 h->free_huge_pages - h->resv_huge_pages == 0) in dequeue_huge_page_vma()
898 if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0) in dequeue_huge_page_vma()
904 htlb_alloc_mask(h), &mpol, &nodemask); in dequeue_huge_page_vma()
908 if (cpuset_zone_allowed(zone, htlb_alloc_mask(h))) { in dequeue_huge_page_vma()
909 page = dequeue_huge_page_node(h, zone_to_nid(zone)); in dequeue_huge_page_vma()
917 h->resv_huge_pages--; in dequeue_huge_page_vma()
962 static int hstate_next_node_to_alloc(struct hstate *h, in hstate_next_node_to_alloc() argument
969 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed); in hstate_next_node_to_alloc()
970 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed); in hstate_next_node_to_alloc()
981 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) in hstate_next_node_to_free() argument
987 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed); in hstate_next_node_to_free()
988 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed); in hstate_next_node_to_free()
1101 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1104 static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid) in alloc_fresh_gigantic_page_node() argument
1108 page = alloc_gigantic_page(nid, huge_page_order(h)); in alloc_fresh_gigantic_page_node()
1110 prep_compound_gigantic_page(page, huge_page_order(h)); in alloc_fresh_gigantic_page_node()
1111 prep_new_huge_page(h, page, nid); in alloc_fresh_gigantic_page_node()
1117 static int alloc_fresh_gigantic_page(struct hstate *h, in alloc_fresh_gigantic_page() argument
1123 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { in alloc_fresh_gigantic_page()
1124 page = alloc_fresh_gigantic_page_node(h, node); in alloc_fresh_gigantic_page()
1138 static inline int alloc_fresh_gigantic_page(struct hstate *h, in alloc_fresh_gigantic_page() argument
1142 static void update_and_free_page(struct hstate *h, struct page *page) in update_and_free_page() argument
1146 if (hstate_is_gigantic(h) && !gigantic_page_supported()) in update_and_free_page()
1149 h->nr_huge_pages--; in update_and_free_page()
1150 h->nr_huge_pages_node[page_to_nid(page)]--; in update_and_free_page()
1151 for (i = 0; i < pages_per_huge_page(h); i++) { in update_and_free_page()
1160 if (hstate_is_gigantic(h)) { in update_and_free_page()
1161 destroy_compound_gigantic_page(page, huge_page_order(h)); in update_and_free_page()
1162 free_gigantic_page(page, huge_page_order(h)); in update_and_free_page()
1164 __free_pages(page, huge_page_order(h)); in update_and_free_page()
1170 struct hstate *h; in size_to_hstate() local
1172 for_each_hstate(h) { in size_to_hstate()
1173 if (huge_page_size(h) == size) in size_to_hstate()
1174 return h; in size_to_hstate()
1210 struct hstate *h = page_hstate(page); in free_huge_page() local
1233 hugetlb_cgroup_uncharge_page(hstate_index(h), in free_huge_page()
1234 pages_per_huge_page(h), page); in free_huge_page()
1236 h->resv_huge_pages++; in free_huge_page()
1238 if (h->surplus_huge_pages_node[nid]) { in free_huge_page()
1241 update_and_free_page(h, page); in free_huge_page()
1242 h->surplus_huge_pages--; in free_huge_page()
1243 h->surplus_huge_pages_node[nid]--; in free_huge_page()
1246 enqueue_huge_page(h, page); in free_huge_page()
1251 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) in prep_new_huge_page() argument
1257 h->nr_huge_pages++; in prep_new_huge_page()
1258 h->nr_huge_pages_node[nid]++; in prep_new_huge_page()
1336 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid) in alloc_fresh_huge_page_node() argument
1341 htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE| in alloc_fresh_huge_page_node()
1343 huge_page_order(h)); in alloc_fresh_huge_page_node()
1345 prep_new_huge_page(h, page, nid); in alloc_fresh_huge_page_node()
1351 static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed) in alloc_fresh_huge_page() argument
1357 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { in alloc_fresh_huge_page()
1358 page = alloc_fresh_huge_page_node(h, node); in alloc_fresh_huge_page()
1379 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, in free_pool_huge_page() argument
1385 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { in free_pool_huge_page()
1390 if ((!acct_surplus || h->surplus_huge_pages_node[node]) && in free_pool_huge_page()
1391 !list_empty(&h->hugepage_freelists[node])) { in free_pool_huge_page()
1393 list_entry(h->hugepage_freelists[node].next, in free_pool_huge_page()
1396 h->free_huge_pages--; in free_pool_huge_page()
1397 h->free_huge_pages_node[node]--; in free_pool_huge_page()
1399 h->surplus_huge_pages--; in free_pool_huge_page()
1400 h->surplus_huge_pages_node[node]--; in free_pool_huge_page()
1402 update_and_free_page(h, page); in free_pool_huge_page()
1419 struct hstate *h = page_hstate(page); in dissolve_free_huge_page() local
1422 h->free_huge_pages--; in dissolve_free_huge_page()
1423 h->free_huge_pages_node[nid]--; in dissolve_free_huge_page()
1424 update_and_free_page(h, page); in dissolve_free_huge_page()
1455 static struct page *__hugetlb_alloc_buddy_huge_page(struct hstate *h, in __hugetlb_alloc_buddy_huge_page() argument
1458 int order = huge_page_order(h); in __hugetlb_alloc_buddy_huge_page()
1459 gfp_t gfp = htlb_alloc_mask(h)|__GFP_COMP|__GFP_REPEAT|__GFP_NOWARN; in __hugetlb_alloc_buddy_huge_page()
1520 static struct page *__alloc_buddy_huge_page(struct hstate *h, in __alloc_buddy_huge_page() argument
1526 if (hstate_is_gigantic(h)) in __alloc_buddy_huge_page()
1562 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { in __alloc_buddy_huge_page()
1566 h->nr_huge_pages++; in __alloc_buddy_huge_page()
1567 h->surplus_huge_pages++; in __alloc_buddy_huge_page()
1571 page = __hugetlb_alloc_buddy_huge_page(h, vma, addr, nid); in __alloc_buddy_huge_page()
1582 h->nr_huge_pages_node[r_nid]++; in __alloc_buddy_huge_page()
1583 h->surplus_huge_pages_node[r_nid]++; in __alloc_buddy_huge_page()
1586 h->nr_huge_pages--; in __alloc_buddy_huge_page()
1587 h->surplus_huge_pages--; in __alloc_buddy_huge_page()
1601 struct page *__alloc_buddy_huge_page_no_mpol(struct hstate *h, int nid) in __alloc_buddy_huge_page_no_mpol() argument
1605 return __alloc_buddy_huge_page(h, NULL, addr, nid); in __alloc_buddy_huge_page_no_mpol()
1612 struct page *__alloc_buddy_huge_page_with_mpol(struct hstate *h, in __alloc_buddy_huge_page_with_mpol() argument
1615 return __alloc_buddy_huge_page(h, vma, addr, NUMA_NO_NODE); in __alloc_buddy_huge_page_with_mpol()
1623 struct page *alloc_huge_page_node(struct hstate *h, int nid) in alloc_huge_page_node() argument
1628 if (h->free_huge_pages - h->resv_huge_pages > 0) in alloc_huge_page_node()
1629 page = dequeue_huge_page_node(h, nid); in alloc_huge_page_node()
1633 page = __alloc_buddy_huge_page_no_mpol(h, nid); in alloc_huge_page_node()
1642 static int gather_surplus_pages(struct hstate *h, int delta) in gather_surplus_pages() argument
1650 needed = (h->resv_huge_pages + delta) - h->free_huge_pages; in gather_surplus_pages()
1652 h->resv_huge_pages += delta; in gather_surplus_pages()
1663 page = __alloc_buddy_huge_page_no_mpol(h, NUMA_NO_NODE); in gather_surplus_pages()
1677 needed = (h->resv_huge_pages + delta) - in gather_surplus_pages()
1678 (h->free_huge_pages + allocated); in gather_surplus_pages()
1698 h->resv_huge_pages += delta; in gather_surplus_pages()
1711 enqueue_huge_page(h, page); in gather_surplus_pages()
1730 static void return_unused_surplus_pages(struct hstate *h, in return_unused_surplus_pages() argument
1736 h->resv_huge_pages -= unused_resv_pages; in return_unused_surplus_pages()
1739 if (hstate_is_gigantic(h)) in return_unused_surplus_pages()
1742 nr_pages = min(unused_resv_pages, h->surplus_huge_pages); in return_unused_surplus_pages()
1753 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1)) in return_unused_surplus_pages()
1784 static long __vma_reservation_common(struct hstate *h, in __vma_reservation_common() argument
1796 idx = vma_hugecache_offset(h, vma, addr); in __vma_reservation_common()
1818 static long vma_needs_reservation(struct hstate *h, in vma_needs_reservation() argument
1821 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV); in vma_needs_reservation()
1824 static long vma_commit_reservation(struct hstate *h, in vma_commit_reservation() argument
1827 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV); in vma_commit_reservation()
1830 static void vma_end_reservation(struct hstate *h, in vma_end_reservation() argument
1833 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); in vma_end_reservation()
1840 struct hstate *h = hstate_vma(vma); in alloc_huge_page() local
1847 idx = hstate_index(h); in alloc_huge_page()
1853 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr); in alloc_huge_page()
1867 vma_end_reservation(h, vma, addr); in alloc_huge_page()
1883 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg); in alloc_huge_page()
1893 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg); in alloc_huge_page()
1896 page = __alloc_buddy_huge_page_with_mpol(h, vma, addr); in alloc_huge_page()
1901 h->resv_huge_pages--; in alloc_huge_page()
1904 list_move(&page->lru, &h->hugepage_activelist); in alloc_huge_page()
1907 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page); in alloc_huge_page()
1912 map_commit = vma_commit_reservation(h, vma, addr); in alloc_huge_page()
1926 hugetlb_acct_memory(h, -rsv_adjust); in alloc_huge_page()
1931 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg); in alloc_huge_page()
1935 vma_end_reservation(h, vma, addr); in alloc_huge_page()
1953 int __weak alloc_bootmem_huge_page(struct hstate *h) in alloc_bootmem_huge_page() argument
1958 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) { in alloc_bootmem_huge_page()
1962 huge_page_size(h), huge_page_size(h), in alloc_bootmem_huge_page()
1977 BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h))); in alloc_bootmem_huge_page()
1980 m->hstate = h; in alloc_bootmem_huge_page()
1999 struct hstate *h = m->hstate; in gather_bootmem_prealloc() local
2010 prep_compound_huge_page(page, h->order); in gather_bootmem_prealloc()
2012 prep_new_huge_page(h, page, page_to_nid(page)); in gather_bootmem_prealloc()
2019 if (hstate_is_gigantic(h)) in gather_bootmem_prealloc()
2020 adjust_managed_page_count(page, 1 << h->order); in gather_bootmem_prealloc()
2024 static void __init hugetlb_hstate_alloc_pages(struct hstate *h) in hugetlb_hstate_alloc_pages() argument
2028 for (i = 0; i < h->max_huge_pages; ++i) { in hugetlb_hstate_alloc_pages()
2029 if (hstate_is_gigantic(h)) { in hugetlb_hstate_alloc_pages()
2030 if (!alloc_bootmem_huge_page(h)) in hugetlb_hstate_alloc_pages()
2032 } else if (!alloc_fresh_huge_page(h, in hugetlb_hstate_alloc_pages()
2036 h->max_huge_pages = i; in hugetlb_hstate_alloc_pages()
2041 struct hstate *h; in hugetlb_init_hstates() local
2043 for_each_hstate(h) { in hugetlb_init_hstates()
2044 if (minimum_order > huge_page_order(h)) in hugetlb_init_hstates()
2045 minimum_order = huge_page_order(h); in hugetlb_init_hstates()
2048 if (!hstate_is_gigantic(h)) in hugetlb_init_hstates()
2049 hugetlb_hstate_alloc_pages(h); in hugetlb_init_hstates()
2067 struct hstate *h; in report_hugepages() local
2069 for_each_hstate(h) { in report_hugepages()
2072 memfmt(buf, huge_page_size(h)), in report_hugepages()
2073 h->free_huge_pages); in report_hugepages()
2078 static void try_to_free_low(struct hstate *h, unsigned long count, in try_to_free_low() argument
2083 if (hstate_is_gigantic(h)) in try_to_free_low()
2088 struct list_head *freel = &h->hugepage_freelists[i]; in try_to_free_low()
2090 if (count >= h->nr_huge_pages) in try_to_free_low()
2095 update_and_free_page(h, page); in try_to_free_low()
2096 h->free_huge_pages--; in try_to_free_low()
2097 h->free_huge_pages_node[page_to_nid(page)]--; in try_to_free_low()
2102 static inline void try_to_free_low(struct hstate *h, unsigned long count, in try_to_free_low() argument
2113 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, in adjust_pool_surplus() argument
2121 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { in adjust_pool_surplus()
2122 if (h->surplus_huge_pages_node[node]) in adjust_pool_surplus()
2126 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { in adjust_pool_surplus()
2127 if (h->surplus_huge_pages_node[node] < in adjust_pool_surplus()
2128 h->nr_huge_pages_node[node]) in adjust_pool_surplus()
2135 h->surplus_huge_pages += delta; in adjust_pool_surplus()
2136 h->surplus_huge_pages_node[node] += delta; in adjust_pool_surplus()
2140 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) argument
2141 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count, in set_max_huge_pages() argument
2146 if (hstate_is_gigantic(h) && !gigantic_page_supported()) in set_max_huge_pages()
2147 return h->max_huge_pages; in set_max_huge_pages()
2161 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { in set_max_huge_pages()
2162 if (!adjust_pool_surplus(h, nodes_allowed, -1)) in set_max_huge_pages()
2166 while (count > persistent_huge_pages(h)) { in set_max_huge_pages()
2173 if (hstate_is_gigantic(h)) in set_max_huge_pages()
2174 ret = alloc_fresh_gigantic_page(h, nodes_allowed); in set_max_huge_pages()
2176 ret = alloc_fresh_huge_page(h, nodes_allowed); in set_max_huge_pages()
2201 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; in set_max_huge_pages()
2203 try_to_free_low(h, min_count, nodes_allowed); in set_max_huge_pages()
2204 while (min_count < persistent_huge_pages(h)) { in set_max_huge_pages()
2205 if (!free_pool_huge_page(h, nodes_allowed, 0)) in set_max_huge_pages()
2209 while (count < persistent_huge_pages(h)) { in set_max_huge_pages()
2210 if (!adjust_pool_surplus(h, nodes_allowed, 1)) in set_max_huge_pages()
2214 ret = persistent_huge_pages(h); in set_max_huge_pages()
2248 struct hstate *h; in nr_hugepages_show_common() local
2252 h = kobj_to_hstate(kobj, &nid); in nr_hugepages_show_common()
2254 nr_huge_pages = h->nr_huge_pages; in nr_hugepages_show_common()
2256 nr_huge_pages = h->nr_huge_pages_node[nid]; in nr_hugepages_show_common()
2262 struct hstate *h, int nid, in __nr_hugepages_store_common() argument
2268 if (hstate_is_gigantic(h) && !gigantic_page_supported()) { in __nr_hugepages_store_common()
2287 count += h->nr_huge_pages - h->nr_huge_pages_node[nid]; in __nr_hugepages_store_common()
2292 h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed); in __nr_hugepages_store_common()
2307 struct hstate *h; in nr_hugepages_store_common() local
2316 h = kobj_to_hstate(kobj, &nid); in nr_hugepages_store_common()
2317 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len); in nr_hugepages_store_common()
2357 struct hstate *h = kobj_to_hstate(kobj, NULL); in nr_overcommit_hugepages_show() local
2358 return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages); in nr_overcommit_hugepages_show()
2366 struct hstate *h = kobj_to_hstate(kobj, NULL); in nr_overcommit_hugepages_store() local
2368 if (hstate_is_gigantic(h)) in nr_overcommit_hugepages_store()
2376 h->nr_overcommit_huge_pages = input; in nr_overcommit_hugepages_store()
2386 struct hstate *h; in free_hugepages_show() local
2390 h = kobj_to_hstate(kobj, &nid); in free_hugepages_show()
2392 free_huge_pages = h->free_huge_pages; in free_hugepages_show()
2394 free_huge_pages = h->free_huge_pages_node[nid]; in free_hugepages_show()
2403 struct hstate *h = kobj_to_hstate(kobj, NULL); in resv_hugepages_show() local
2404 return sprintf(buf, "%lu\n", h->resv_huge_pages); in resv_hugepages_show()
2411 struct hstate *h; in surplus_hugepages_show() local
2415 h = kobj_to_hstate(kobj, &nid); in surplus_hugepages_show()
2417 surplus_huge_pages = h->surplus_huge_pages; in surplus_hugepages_show()
2419 surplus_huge_pages = h->surplus_huge_pages_node[nid]; in surplus_hugepages_show()
2441 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent, in hugetlb_sysfs_add_hstate() argument
2446 int hi = hstate_index(h); in hugetlb_sysfs_add_hstate()
2448 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent); in hugetlb_sysfs_add_hstate()
2461 struct hstate *h; in hugetlb_sysfs_init() local
2468 for_each_hstate(h) { in hugetlb_sysfs_init()
2469 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj, in hugetlb_sysfs_init()
2472 pr_err("Hugetlb: Unable to add hstate %s", h->name); in hugetlb_sysfs_init()
2534 struct hstate *h; in hugetlb_unregister_node() local
2540 for_each_hstate(h) { in hugetlb_unregister_node()
2541 int idx = hstate_index(h); in hugetlb_unregister_node()
2578 struct hstate *h; in hugetlb_register_node() local
2590 for_each_hstate(h) { in hugetlb_register_node()
2591 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj, in hugetlb_register_node()
2596 h->name, node->dev.id); in hugetlb_register_node()
2643 struct hstate *h; in hugetlb_exit() local
2647 for_each_hstate(h) { in hugetlb_exit()
2648 kobject_put(hstate_kobjs[hstate_index(h)]); in hugetlb_exit()
2698 struct hstate *h; in hugetlb_add_hstate() local
2707 h = &hstates[hugetlb_max_hstate++]; in hugetlb_add_hstate()
2708 h->order = order; in hugetlb_add_hstate()
2709 h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1); in hugetlb_add_hstate()
2710 h->nr_huge_pages = 0; in hugetlb_add_hstate()
2711 h->free_huge_pages = 0; in hugetlb_add_hstate()
2713 INIT_LIST_HEAD(&h->hugepage_freelists[i]); in hugetlb_add_hstate()
2714 INIT_LIST_HEAD(&h->hugepage_activelist); in hugetlb_add_hstate()
2715 h->next_nid_to_alloc = first_node(node_states[N_MEMORY]); in hugetlb_add_hstate()
2716 h->next_nid_to_free = first_node(node_states[N_MEMORY]); in hugetlb_add_hstate()
2717 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", in hugetlb_add_hstate()
2718 huge_page_size(h)/1024); in hugetlb_add_hstate()
2720 parsed_hstate = h; in hugetlb_add_hstate()
2783 struct hstate *h = &default_hstate; in hugetlb_sysctl_handler_common() local
2784 unsigned long tmp = h->max_huge_pages; in hugetlb_sysctl_handler_common()
2797 ret = __nr_hugepages_store_common(obey_mempolicy, h, in hugetlb_sysctl_handler_common()
2824 struct hstate *h = &default_hstate; in hugetlb_overcommit_handler() local
2831 tmp = h->nr_overcommit_huge_pages; in hugetlb_overcommit_handler()
2833 if (write && hstate_is_gigantic(h)) in hugetlb_overcommit_handler()
2844 h->nr_overcommit_huge_pages = tmp; in hugetlb_overcommit_handler()
2855 struct hstate *h = &default_hstate; in hugetlb_report_meminfo() local
2864 h->nr_huge_pages, in hugetlb_report_meminfo()
2865 h->free_huge_pages, in hugetlb_report_meminfo()
2866 h->resv_huge_pages, in hugetlb_report_meminfo()
2867 h->surplus_huge_pages, in hugetlb_report_meminfo()
2868 1UL << (huge_page_order(h) + PAGE_SHIFT - 10)); in hugetlb_report_meminfo()
2873 struct hstate *h = &default_hstate; in hugetlb_report_node_meminfo() local
2880 nid, h->nr_huge_pages_node[nid], in hugetlb_report_node_meminfo()
2881 nid, h->free_huge_pages_node[nid], in hugetlb_report_node_meminfo()
2882 nid, h->surplus_huge_pages_node[nid]); in hugetlb_report_node_meminfo()
2887 struct hstate *h; in hugetlb_show_meminfo() local
2894 for_each_hstate(h) in hugetlb_show_meminfo()
2897 h->nr_huge_pages_node[nid], in hugetlb_show_meminfo()
2898 h->free_huge_pages_node[nid], in hugetlb_show_meminfo()
2899 h->surplus_huge_pages_node[nid], in hugetlb_show_meminfo()
2900 1UL << (huge_page_order(h) + PAGE_SHIFT - 10)); in hugetlb_show_meminfo()
2912 struct hstate *h; in hugetlb_total_pages() local
2915 for_each_hstate(h) in hugetlb_total_pages()
2916 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); in hugetlb_total_pages()
2920 static int hugetlb_acct_memory(struct hstate *h, long delta) in hugetlb_acct_memory() argument
2943 if (gather_surplus_pages(h, delta) < 0) in hugetlb_acct_memory()
2946 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) { in hugetlb_acct_memory()
2947 return_unused_surplus_pages(h, delta); in hugetlb_acct_memory()
2954 return_unused_surplus_pages(h, (unsigned long) -delta); in hugetlb_acct_memory()
2979 struct hstate *h = hstate_vma(vma); in hugetlb_vm_op_close() local
2988 start = vma_hugecache_offset(h, vma, vma->vm_start); in hugetlb_vm_op_close()
2989 end = vma_hugecache_offset(h, vma, vma->vm_end); in hugetlb_vm_op_close()
3001 hugetlb_acct_memory(h, -gbl_reserve); in hugetlb_vm_op_close()
3085 struct hstate *h = hstate_vma(vma); in copy_hugetlb_page_range() local
3086 unsigned long sz = huge_page_size(h); in copy_hugetlb_page_range()
3113 dst_ptl = huge_pte_lock(h, dst, dst_pte); in copy_hugetlb_page_range()
3114 src_ptl = huge_pte_lockptr(h, src, src_pte); in copy_hugetlb_page_range()
3144 hugetlb_count_add(pages_per_huge_page(h), dst); in copy_hugetlb_page_range()
3167 struct hstate *h = hstate_vma(vma); in __unmap_hugepage_range() local
3168 unsigned long sz = huge_page_size(h); in __unmap_hugepage_range()
3173 BUG_ON(start & ~huge_page_mask(h)); in __unmap_hugepage_range()
3174 BUG_ON(end & ~huge_page_mask(h)); in __unmap_hugepage_range()
3185 ptl = huge_pte_lock(h, mm, ptep); in __unmap_hugepage_range()
3225 hugetlb_count_sub(pages_per_huge_page(h), mm); in __unmap_hugepage_range()
3297 struct hstate *h = hstate_vma(vma); in unmap_ref_private() local
3306 address = address & huge_page_mask(h); in unmap_ref_private()
3339 address + huge_page_size(h), page); in unmap_ref_private()
3354 struct hstate *h = hstate_vma(vma); in hugetlb_cow() local
3407 ptep = huge_pte_offset(mm, address & huge_page_mask(h)); in hugetlb_cow()
3433 pages_per_huge_page(h)); in hugetlb_cow()
3437 mmun_start = address & huge_page_mask(h); in hugetlb_cow()
3438 mmun_end = mmun_start + huge_page_size(h); in hugetlb_cow()
3446 ptep = huge_pte_offset(mm, address & huge_page_mask(h)); in hugetlb_cow()
3472 static struct page *hugetlbfs_pagecache_page(struct hstate *h, in hugetlbfs_pagecache_page() argument
3479 idx = vma_hugecache_offset(h, vma, address); in hugetlbfs_pagecache_page()
3488 static bool hugetlbfs_pagecache_present(struct hstate *h, in hugetlbfs_pagecache_present() argument
3496 idx = vma_hugecache_offset(h, vma, address); in hugetlbfs_pagecache_present()
3508 struct hstate *h = hstate_inode(inode); in huge_add_to_page_cache() local
3516 inode->i_blocks += blocks_per_huge_page(h); in huge_add_to_page_cache()
3525 struct hstate *h = hstate_vma(vma); in hugetlb_no_page() local
3551 size = i_size_read(mapping->host) >> huge_page_shift(h); in hugetlb_no_page()
3563 clear_huge_page(page, address, pages_per_huge_page(h)); in hugetlb_no_page()
3591 VM_FAULT_SET_HINDEX(hstate_index(h)); in hugetlb_no_page()
3603 if (vma_needs_reservation(h, vma, address) < 0) { in hugetlb_no_page()
3608 vma_end_reservation(h, vma, address); in hugetlb_no_page()
3611 ptl = huge_pte_lockptr(h, mm, ptep); in hugetlb_no_page()
3613 size = i_size_read(mapping->host) >> huge_page_shift(h); in hugetlb_no_page()
3630 hugetlb_count_add(pages_per_huge_page(h), mm); in hugetlb_no_page()
3650 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, in hugetlb_fault_mutex_hash() argument
3663 key[1] = address >> huge_page_shift(h); in hugetlb_fault_mutex_hash()
3675 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, in hugetlb_fault_mutex_hash() argument
3694 struct hstate *h = hstate_vma(vma); in hugetlb_fault() local
3698 address &= huge_page_mask(h); in hugetlb_fault()
3708 VM_FAULT_SET_HINDEX(hstate_index(h)); in hugetlb_fault()
3710 ptep = huge_pte_alloc(mm, address, huge_page_size(h)); in hugetlb_fault()
3716 idx = vma_hugecache_offset(h, vma, address); in hugetlb_fault()
3723 hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, address); in hugetlb_fault()
3753 if (vma_needs_reservation(h, vma, address) < 0) { in hugetlb_fault()
3758 vma_end_reservation(h, vma, address); in hugetlb_fault()
3761 pagecache_page = hugetlbfs_pagecache_page(h, in hugetlb_fault()
3765 ptl = huge_pte_lock(h, mm, ptep); in hugetlb_fault()
3830 struct hstate *h = hstate_vma(vma); in follow_hugetlb_page() local
3854 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h)); in follow_hugetlb_page()
3856 ptl = huge_pte_lock(h, mm, pte); in follow_hugetlb_page()
3867 !hugetlbfs_pagecache_present(h, vma, vaddr)) { in follow_hugetlb_page()
3900 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT; in follow_hugetlb_page()
3916 pfn_offset < pages_per_huge_page(h)) { in follow_hugetlb_page()
3938 struct hstate *h = hstate_vma(vma); in hugetlb_change_protection() local
3946 for (; address < end; address += huge_page_size(h)) { in hugetlb_change_protection()
3951 ptl = huge_pte_lock(h, mm, ptep); in hugetlb_change_protection()
3996 return pages << h->order; in hugetlb_change_protection()
4005 struct hstate *h = hstate_inode(inode); in hugetlb_reserve_pages() local
4060 ret = hugetlb_acct_memory(h, gbl_reserve); in hugetlb_reserve_pages()
4093 hugetlb_acct_memory(h, -rsv_adjust); in hugetlb_reserve_pages()
4108 struct hstate *h = hstate_inode(inode); in hugetlb_unreserve_pages() local
4126 inode->i_blocks -= (blocks_per_huge_page(h) * freed); in hugetlb_unreserve_pages()
4134 hugetlb_acct_memory(h, -gbl_reserve); in hugetlb_unreserve_pages()
4389 struct hstate *h = page_hstate(hpage); in dequeue_hwpoisoned_huge_page() local
4407 h->free_huge_pages--; in dequeue_hwpoisoned_huge_page()
4408 h->free_huge_pages_node[nid]--; in dequeue_hwpoisoned_huge_page()