Lines Matching refs:h

70 static int hugetlb_acct_memory(struct hstate *h, long delta);
89 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, in hugepage_new_subpool() argument
101 spool->hstate = h; in hugepage_new_subpool()
104 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) { in hugepage_new_subpool()
400 static pgoff_t vma_hugecache_offset(struct hstate *h, in vma_hugecache_offset() argument
403 return ((address - vma->vm_start) >> huge_page_shift(h)) + in vma_hugecache_offset()
404 (vma->vm_pgoff >> huge_page_order(h)); in vma_hugecache_offset()
589 static void enqueue_huge_page(struct hstate *h, struct page *page) in enqueue_huge_page() argument
592 list_move(&page->lru, &h->hugepage_freelists[nid]); in enqueue_huge_page()
593 h->free_huge_pages++; in enqueue_huge_page()
594 h->free_huge_pages_node[nid]++; in enqueue_huge_page()
597 static struct page *dequeue_huge_page_node(struct hstate *h, int nid) in dequeue_huge_page_node() argument
601 list_for_each_entry(page, &h->hugepage_freelists[nid], lru) in dequeue_huge_page_node()
608 if (&h->hugepage_freelists[nid] == &page->lru) in dequeue_huge_page_node()
610 list_move(&page->lru, &h->hugepage_activelist); in dequeue_huge_page_node()
612 h->free_huge_pages--; in dequeue_huge_page_node()
613 h->free_huge_pages_node[nid]--; in dequeue_huge_page_node()
618 static inline gfp_t htlb_alloc_mask(struct hstate *h) in htlb_alloc_mask() argument
620 if (hugepages_treat_as_movable || hugepage_migration_supported(h)) in htlb_alloc_mask()
626 static struct page *dequeue_huge_page_vma(struct hstate *h, in dequeue_huge_page_vma() argument
645 h->free_huge_pages - h->resv_huge_pages == 0) in dequeue_huge_page_vma()
649 if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0) in dequeue_huge_page_vma()
655 htlb_alloc_mask(h), &mpol, &nodemask); in dequeue_huge_page_vma()
659 if (cpuset_zone_allowed(zone, htlb_alloc_mask(h))) { in dequeue_huge_page_vma()
660 page = dequeue_huge_page_node(h, zone_to_nid(zone)); in dequeue_huge_page_vma()
668 h->resv_huge_pages--; in dequeue_huge_page_vma()
713 static int hstate_next_node_to_alloc(struct hstate *h, in hstate_next_node_to_alloc() argument
720 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed); in hstate_next_node_to_alloc()
721 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed); in hstate_next_node_to_alloc()
732 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) in hstate_next_node_to_free() argument
738 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed); in hstate_next_node_to_free()
739 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed); in hstate_next_node_to_free()
853 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
856 static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid) in alloc_fresh_gigantic_page_node() argument
860 page = alloc_gigantic_page(nid, huge_page_order(h)); in alloc_fresh_gigantic_page_node()
862 prep_compound_gigantic_page(page, huge_page_order(h)); in alloc_fresh_gigantic_page_node()
863 prep_new_huge_page(h, page, nid); in alloc_fresh_gigantic_page_node()
869 static int alloc_fresh_gigantic_page(struct hstate *h, in alloc_fresh_gigantic_page() argument
875 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { in alloc_fresh_gigantic_page()
876 page = alloc_fresh_gigantic_page_node(h, node); in alloc_fresh_gigantic_page()
890 static inline int alloc_fresh_gigantic_page(struct hstate *h, in alloc_fresh_gigantic_page() argument
894 static void update_and_free_page(struct hstate *h, struct page *page) in update_and_free_page() argument
898 if (hstate_is_gigantic(h) && !gigantic_page_supported()) in update_and_free_page()
901 h->nr_huge_pages--; in update_and_free_page()
902 h->nr_huge_pages_node[page_to_nid(page)]--; in update_and_free_page()
903 for (i = 0; i < pages_per_huge_page(h); i++) { in update_and_free_page()
912 if (hstate_is_gigantic(h)) { in update_and_free_page()
913 destroy_compound_gigantic_page(page, huge_page_order(h)); in update_and_free_page()
914 free_gigantic_page(page, huge_page_order(h)); in update_and_free_page()
917 __free_pages(page, huge_page_order(h)); in update_and_free_page()
923 struct hstate *h; in size_to_hstate() local
925 for_each_hstate(h) { in size_to_hstate()
926 if (huge_page_size(h) == size) in size_to_hstate()
927 return h; in size_to_hstate()
963 struct hstate *h = page_hstate(page); in free_huge_page() local
986 hugetlb_cgroup_uncharge_page(hstate_index(h), in free_huge_page()
987 pages_per_huge_page(h), page); in free_huge_page()
989 h->resv_huge_pages++; in free_huge_page()
991 if (h->surplus_huge_pages_node[nid]) { in free_huge_page()
994 update_and_free_page(h, page); in free_huge_page()
995 h->surplus_huge_pages--; in free_huge_page()
996 h->surplus_huge_pages_node[nid]--; in free_huge_page()
999 enqueue_huge_page(h, page); in free_huge_page()
1004 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) in prep_new_huge_page() argument
1010 h->nr_huge_pages++; in prep_new_huge_page()
1011 h->nr_huge_pages_node[nid]++; in prep_new_huge_page()
1092 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid) in alloc_fresh_huge_page_node() argument
1097 htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE| in alloc_fresh_huge_page_node()
1099 huge_page_order(h)); in alloc_fresh_huge_page_node()
1102 __free_pages(page, huge_page_order(h)); in alloc_fresh_huge_page_node()
1105 prep_new_huge_page(h, page, nid); in alloc_fresh_huge_page_node()
1111 static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed) in alloc_fresh_huge_page() argument
1117 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { in alloc_fresh_huge_page()
1118 page = alloc_fresh_huge_page_node(h, node); in alloc_fresh_huge_page()
1139 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, in free_pool_huge_page() argument
1145 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { in free_pool_huge_page()
1150 if ((!acct_surplus || h->surplus_huge_pages_node[node]) && in free_pool_huge_page()
1151 !list_empty(&h->hugepage_freelists[node])) { in free_pool_huge_page()
1153 list_entry(h->hugepage_freelists[node].next, in free_pool_huge_page()
1156 h->free_huge_pages--; in free_pool_huge_page()
1157 h->free_huge_pages_node[node]--; in free_pool_huge_page()
1159 h->surplus_huge_pages--; in free_pool_huge_page()
1160 h->surplus_huge_pages_node[node]--; in free_pool_huge_page()
1162 update_and_free_page(h, page); in free_pool_huge_page()
1179 struct hstate *h = page_hstate(page); in dissolve_free_huge_page() local
1182 h->free_huge_pages--; in dissolve_free_huge_page()
1183 h->free_huge_pages_node[nid]--; in dissolve_free_huge_page()
1184 update_and_free_page(h, page); in dissolve_free_huge_page()
1206 static struct page *alloc_buddy_huge_page(struct hstate *h, int nid) in alloc_buddy_huge_page() argument
1211 if (hstate_is_gigantic(h)) in alloc_buddy_huge_page()
1238 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { in alloc_buddy_huge_page()
1242 h->nr_huge_pages++; in alloc_buddy_huge_page()
1243 h->surplus_huge_pages++; in alloc_buddy_huge_page()
1248 page = alloc_pages(htlb_alloc_mask(h)|__GFP_COMP| in alloc_buddy_huge_page()
1250 huge_page_order(h)); in alloc_buddy_huge_page()
1253 htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE| in alloc_buddy_huge_page()
1254 __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h)); in alloc_buddy_huge_page()
1257 __free_pages(page, huge_page_order(h)); in alloc_buddy_huge_page()
1270 h->nr_huge_pages_node[r_nid]++; in alloc_buddy_huge_page()
1271 h->surplus_huge_pages_node[r_nid]++; in alloc_buddy_huge_page()
1274 h->nr_huge_pages--; in alloc_buddy_huge_page()
1275 h->surplus_huge_pages--; in alloc_buddy_huge_page()
1288 struct page *alloc_huge_page_node(struct hstate *h, int nid) in alloc_huge_page_node() argument
1293 if (h->free_huge_pages - h->resv_huge_pages > 0) in alloc_huge_page_node()
1294 page = dequeue_huge_page_node(h, nid); in alloc_huge_page_node()
1298 page = alloc_buddy_huge_page(h, nid); in alloc_huge_page_node()
1307 static int gather_surplus_pages(struct hstate *h, int delta) in gather_surplus_pages() argument
1315 needed = (h->resv_huge_pages + delta) - h->free_huge_pages; in gather_surplus_pages()
1317 h->resv_huge_pages += delta; in gather_surplus_pages()
1328 page = alloc_buddy_huge_page(h, NUMA_NO_NODE); in gather_surplus_pages()
1342 needed = (h->resv_huge_pages + delta) - in gather_surplus_pages()
1343 (h->free_huge_pages + allocated); in gather_surplus_pages()
1363 h->resv_huge_pages += delta; in gather_surplus_pages()
1376 enqueue_huge_page(h, page); in gather_surplus_pages()
1395 static void return_unused_surplus_pages(struct hstate *h, in return_unused_surplus_pages() argument
1401 h->resv_huge_pages -= unused_resv_pages; in return_unused_surplus_pages()
1404 if (hstate_is_gigantic(h)) in return_unused_surplus_pages()
1407 nr_pages = min(unused_resv_pages, h->surplus_huge_pages); in return_unused_surplus_pages()
1418 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1)) in return_unused_surplus_pages()
1434 static long vma_needs_reservation(struct hstate *h, in vma_needs_reservation() argument
1445 idx = vma_hugecache_offset(h, vma, addr); in vma_needs_reservation()
1453 static void vma_commit_reservation(struct hstate *h, in vma_commit_reservation() argument
1463 idx = vma_hugecache_offset(h, vma, addr); in vma_commit_reservation()
1471 struct hstate *h = hstate_vma(vma); in alloc_huge_page() local
1477 idx = hstate_index(h); in alloc_huge_page()
1486 chg = vma_needs_reservation(h, vma, addr); in alloc_huge_page()
1493 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg); in alloc_huge_page()
1498 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, chg); in alloc_huge_page()
1501 page = alloc_buddy_huge_page(h, NUMA_NO_NODE); in alloc_huge_page()
1506 list_move(&page->lru, &h->hugepage_activelist); in alloc_huge_page()
1509 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page); in alloc_huge_page()
1514 vma_commit_reservation(h, vma, addr); in alloc_huge_page()
1518 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg); in alloc_huge_page()
1539 int __weak alloc_bootmem_huge_page(struct hstate *h) in alloc_bootmem_huge_page() argument
1544 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) { in alloc_bootmem_huge_page()
1548 huge_page_size(h), huge_page_size(h), in alloc_bootmem_huge_page()
1563 BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h))); in alloc_bootmem_huge_page()
1566 m->hstate = h; in alloc_bootmem_huge_page()
1585 struct hstate *h = m->hstate; in gather_bootmem_prealloc() local
1596 prep_compound_huge_page(page, h->order); in gather_bootmem_prealloc()
1598 prep_new_huge_page(h, page, page_to_nid(page)); in gather_bootmem_prealloc()
1605 if (hstate_is_gigantic(h)) in gather_bootmem_prealloc()
1606 adjust_managed_page_count(page, 1 << h->order); in gather_bootmem_prealloc()
1610 static void __init hugetlb_hstate_alloc_pages(struct hstate *h) in hugetlb_hstate_alloc_pages() argument
1614 for (i = 0; i < h->max_huge_pages; ++i) { in hugetlb_hstate_alloc_pages()
1615 if (hstate_is_gigantic(h)) { in hugetlb_hstate_alloc_pages()
1616 if (!alloc_bootmem_huge_page(h)) in hugetlb_hstate_alloc_pages()
1618 } else if (!alloc_fresh_huge_page(h, in hugetlb_hstate_alloc_pages()
1622 h->max_huge_pages = i; in hugetlb_hstate_alloc_pages()
1627 struct hstate *h; in hugetlb_init_hstates() local
1629 for_each_hstate(h) { in hugetlb_init_hstates()
1630 if (minimum_order > huge_page_order(h)) in hugetlb_init_hstates()
1631 minimum_order = huge_page_order(h); in hugetlb_init_hstates()
1634 if (!hstate_is_gigantic(h)) in hugetlb_init_hstates()
1635 hugetlb_hstate_alloc_pages(h); in hugetlb_init_hstates()
1653 struct hstate *h; in report_hugepages() local
1655 for_each_hstate(h) { in report_hugepages()
1658 memfmt(buf, huge_page_size(h)), in report_hugepages()
1659 h->free_huge_pages); in report_hugepages()
1664 static void try_to_free_low(struct hstate *h, unsigned long count, in try_to_free_low() argument
1669 if (hstate_is_gigantic(h)) in try_to_free_low()
1674 struct list_head *freel = &h->hugepage_freelists[i]; in try_to_free_low()
1676 if (count >= h->nr_huge_pages) in try_to_free_low()
1681 update_and_free_page(h, page); in try_to_free_low()
1682 h->free_huge_pages--; in try_to_free_low()
1683 h->free_huge_pages_node[page_to_nid(page)]--; in try_to_free_low()
1688 static inline void try_to_free_low(struct hstate *h, unsigned long count, in try_to_free_low() argument
1699 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, in adjust_pool_surplus() argument
1707 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { in adjust_pool_surplus()
1708 if (h->surplus_huge_pages_node[node]) in adjust_pool_surplus()
1712 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { in adjust_pool_surplus()
1713 if (h->surplus_huge_pages_node[node] < in adjust_pool_surplus()
1714 h->nr_huge_pages_node[node]) in adjust_pool_surplus()
1721 h->surplus_huge_pages += delta; in adjust_pool_surplus()
1722 h->surplus_huge_pages_node[node] += delta; in adjust_pool_surplus()
1726 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) argument
1727 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count, in set_max_huge_pages() argument
1732 if (hstate_is_gigantic(h) && !gigantic_page_supported()) in set_max_huge_pages()
1733 return h->max_huge_pages; in set_max_huge_pages()
1747 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { in set_max_huge_pages()
1748 if (!adjust_pool_surplus(h, nodes_allowed, -1)) in set_max_huge_pages()
1752 while (count > persistent_huge_pages(h)) { in set_max_huge_pages()
1759 if (hstate_is_gigantic(h)) in set_max_huge_pages()
1760 ret = alloc_fresh_gigantic_page(h, nodes_allowed); in set_max_huge_pages()
1762 ret = alloc_fresh_huge_page(h, nodes_allowed); in set_max_huge_pages()
1787 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; in set_max_huge_pages()
1789 try_to_free_low(h, min_count, nodes_allowed); in set_max_huge_pages()
1790 while (min_count < persistent_huge_pages(h)) { in set_max_huge_pages()
1791 if (!free_pool_huge_page(h, nodes_allowed, 0)) in set_max_huge_pages()
1795 while (count < persistent_huge_pages(h)) { in set_max_huge_pages()
1796 if (!adjust_pool_surplus(h, nodes_allowed, 1)) in set_max_huge_pages()
1800 ret = persistent_huge_pages(h); in set_max_huge_pages()
1834 struct hstate *h; in nr_hugepages_show_common() local
1838 h = kobj_to_hstate(kobj, &nid); in nr_hugepages_show_common()
1840 nr_huge_pages = h->nr_huge_pages; in nr_hugepages_show_common()
1842 nr_huge_pages = h->nr_huge_pages_node[nid]; in nr_hugepages_show_common()
1848 struct hstate *h, int nid, in __nr_hugepages_store_common() argument
1854 if (hstate_is_gigantic(h) && !gigantic_page_supported()) { in __nr_hugepages_store_common()
1873 count += h->nr_huge_pages - h->nr_huge_pages_node[nid]; in __nr_hugepages_store_common()
1878 h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed); in __nr_hugepages_store_common()
1893 struct hstate *h; in nr_hugepages_store_common() local
1902 h = kobj_to_hstate(kobj, &nid); in nr_hugepages_store_common()
1903 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len); in nr_hugepages_store_common()
1943 struct hstate *h = kobj_to_hstate(kobj, NULL); in nr_overcommit_hugepages_show() local
1944 return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages); in nr_overcommit_hugepages_show()
1952 struct hstate *h = kobj_to_hstate(kobj, NULL); in nr_overcommit_hugepages_store() local
1954 if (hstate_is_gigantic(h)) in nr_overcommit_hugepages_store()
1962 h->nr_overcommit_huge_pages = input; in nr_overcommit_hugepages_store()
1972 struct hstate *h; in free_hugepages_show() local
1976 h = kobj_to_hstate(kobj, &nid); in free_hugepages_show()
1978 free_huge_pages = h->free_huge_pages; in free_hugepages_show()
1980 free_huge_pages = h->free_huge_pages_node[nid]; in free_hugepages_show()
1989 struct hstate *h = kobj_to_hstate(kobj, NULL); in resv_hugepages_show() local
1990 return sprintf(buf, "%lu\n", h->resv_huge_pages); in resv_hugepages_show()
1997 struct hstate *h; in surplus_hugepages_show() local
2001 h = kobj_to_hstate(kobj, &nid); in surplus_hugepages_show()
2003 surplus_huge_pages = h->surplus_huge_pages; in surplus_hugepages_show()
2005 surplus_huge_pages = h->surplus_huge_pages_node[nid]; in surplus_hugepages_show()
2027 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent, in hugetlb_sysfs_add_hstate() argument
2032 int hi = hstate_index(h); in hugetlb_sysfs_add_hstate()
2034 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent); in hugetlb_sysfs_add_hstate()
2047 struct hstate *h; in hugetlb_sysfs_init() local
2054 for_each_hstate(h) { in hugetlb_sysfs_init()
2055 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj, in hugetlb_sysfs_init()
2058 pr_err("Hugetlb: Unable to add hstate %s", h->name); in hugetlb_sysfs_init()
2120 struct hstate *h; in hugetlb_unregister_node() local
2126 for_each_hstate(h) { in hugetlb_unregister_node()
2127 int idx = hstate_index(h); in hugetlb_unregister_node()
2164 struct hstate *h; in hugetlb_register_node() local
2176 for_each_hstate(h) { in hugetlb_register_node()
2177 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj, in hugetlb_register_node()
2182 h->name, node->dev.id); in hugetlb_register_node()
2229 struct hstate *h; in hugetlb_exit() local
2233 for_each_hstate(h) { in hugetlb_exit()
2234 kobject_put(hstate_kobjs[hstate_index(h)]); in hugetlb_exit()
2284 struct hstate *h; in hugetlb_add_hstate() local
2293 h = &hstates[hugetlb_max_hstate++]; in hugetlb_add_hstate()
2294 h->order = order; in hugetlb_add_hstate()
2295 h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1); in hugetlb_add_hstate()
2296 h->nr_huge_pages = 0; in hugetlb_add_hstate()
2297 h->free_huge_pages = 0; in hugetlb_add_hstate()
2299 INIT_LIST_HEAD(&h->hugepage_freelists[i]); in hugetlb_add_hstate()
2300 INIT_LIST_HEAD(&h->hugepage_activelist); in hugetlb_add_hstate()
2301 h->next_nid_to_alloc = first_node(node_states[N_MEMORY]); in hugetlb_add_hstate()
2302 h->next_nid_to_free = first_node(node_states[N_MEMORY]); in hugetlb_add_hstate()
2303 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", in hugetlb_add_hstate()
2304 huge_page_size(h)/1024); in hugetlb_add_hstate()
2306 parsed_hstate = h; in hugetlb_add_hstate()
2369 struct hstate *h = &default_hstate; in hugetlb_sysctl_handler_common() local
2370 unsigned long tmp = h->max_huge_pages; in hugetlb_sysctl_handler_common()
2383 ret = __nr_hugepages_store_common(obey_mempolicy, h, in hugetlb_sysctl_handler_common()
2410 struct hstate *h = &default_hstate; in hugetlb_overcommit_handler() local
2417 tmp = h->nr_overcommit_huge_pages; in hugetlb_overcommit_handler()
2419 if (write && hstate_is_gigantic(h)) in hugetlb_overcommit_handler()
2430 h->nr_overcommit_huge_pages = tmp; in hugetlb_overcommit_handler()
2441 struct hstate *h = &default_hstate; in hugetlb_report_meminfo() local
2450 h->nr_huge_pages, in hugetlb_report_meminfo()
2451 h->free_huge_pages, in hugetlb_report_meminfo()
2452 h->resv_huge_pages, in hugetlb_report_meminfo()
2453 h->surplus_huge_pages, in hugetlb_report_meminfo()
2454 1UL << (huge_page_order(h) + PAGE_SHIFT - 10)); in hugetlb_report_meminfo()
2459 struct hstate *h = &default_hstate; in hugetlb_report_node_meminfo() local
2466 nid, h->nr_huge_pages_node[nid], in hugetlb_report_node_meminfo()
2467 nid, h->free_huge_pages_node[nid], in hugetlb_report_node_meminfo()
2468 nid, h->surplus_huge_pages_node[nid]); in hugetlb_report_node_meminfo()
2473 struct hstate *h; in hugetlb_show_meminfo() local
2480 for_each_hstate(h) in hugetlb_show_meminfo()
2483 h->nr_huge_pages_node[nid], in hugetlb_show_meminfo()
2484 h->free_huge_pages_node[nid], in hugetlb_show_meminfo()
2485 h->surplus_huge_pages_node[nid], in hugetlb_show_meminfo()
2486 1UL << (huge_page_order(h) + PAGE_SHIFT - 10)); in hugetlb_show_meminfo()
2492 struct hstate *h; in hugetlb_total_pages() local
2495 for_each_hstate(h) in hugetlb_total_pages()
2496 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); in hugetlb_total_pages()
2500 static int hugetlb_acct_memory(struct hstate *h, long delta) in hugetlb_acct_memory() argument
2523 if (gather_surplus_pages(h, delta) < 0) in hugetlb_acct_memory()
2526 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) { in hugetlb_acct_memory()
2527 return_unused_surplus_pages(h, delta); in hugetlb_acct_memory()
2534 return_unused_surplus_pages(h, (unsigned long) -delta); in hugetlb_acct_memory()
2559 struct hstate *h = hstate_vma(vma); in hugetlb_vm_op_close() local
2568 start = vma_hugecache_offset(h, vma, vma->vm_start); in hugetlb_vm_op_close()
2569 end = vma_hugecache_offset(h, vma, vma->vm_end); in hugetlb_vm_op_close()
2581 hugetlb_acct_memory(h, -gbl_reserve); in hugetlb_vm_op_close()
2665 struct hstate *h = hstate_vma(vma); in copy_hugetlb_page_range() local
2666 unsigned long sz = huge_page_size(h); in copy_hugetlb_page_range()
2693 dst_ptl = huge_pte_lock(h, dst, dst_pte); in copy_hugetlb_page_range()
2694 src_ptl = huge_pte_lockptr(h, src, src_pte); in copy_hugetlb_page_range()
2746 struct hstate *h = hstate_vma(vma); in __unmap_hugepage_range() local
2747 unsigned long sz = huge_page_size(h); in __unmap_hugepage_range()
2752 BUG_ON(start & ~huge_page_mask(h)); in __unmap_hugepage_range()
2753 BUG_ON(end & ~huge_page_mask(h)); in __unmap_hugepage_range()
2764 ptl = huge_pte_lock(h, mm, ptep); in __unmap_hugepage_range()
2875 struct hstate *h = hstate_vma(vma); in unmap_ref_private() local
2884 address = address & huge_page_mask(h); in unmap_ref_private()
2917 address + huge_page_size(h), page); in unmap_ref_private()
2932 struct hstate *h = hstate_vma(vma); in hugetlb_cow() local
2985 ptep = huge_pte_offset(mm, address & huge_page_mask(h)); in hugetlb_cow()
3011 pages_per_huge_page(h)); in hugetlb_cow()
3015 mmun_start = address & huge_page_mask(h); in hugetlb_cow()
3016 mmun_end = mmun_start + huge_page_size(h); in hugetlb_cow()
3024 ptep = huge_pte_offset(mm, address & huge_page_mask(h)); in hugetlb_cow()
3050 static struct page *hugetlbfs_pagecache_page(struct hstate *h, in hugetlbfs_pagecache_page() argument
3057 idx = vma_hugecache_offset(h, vma, address); in hugetlbfs_pagecache_page()
3066 static bool hugetlbfs_pagecache_present(struct hstate *h, in hugetlbfs_pagecache_present() argument
3074 idx = vma_hugecache_offset(h, vma, address); in hugetlbfs_pagecache_present()
3086 struct hstate *h = hstate_vma(vma); in hugetlb_no_page() local
3112 size = i_size_read(mapping->host) >> huge_page_shift(h); in hugetlb_no_page()
3124 clear_huge_page(page, address, pages_per_huge_page(h)); in hugetlb_no_page()
3142 inode->i_blocks += blocks_per_huge_page(h); in hugetlb_no_page()
3160 VM_FAULT_SET_HINDEX(hstate_index(h)); in hugetlb_no_page()
3172 if (vma_needs_reservation(h, vma, address) < 0) { in hugetlb_no_page()
3177 ptl = huge_pte_lockptr(h, mm, ptep); in hugetlb_no_page()
3179 size = i_size_read(mapping->host) >> huge_page_shift(h); in hugetlb_no_page()
3215 static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm, in fault_mutex_hash() argument
3228 key[1] = address >> huge_page_shift(h); in fault_mutex_hash()
3240 static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm, in fault_mutex_hash() argument
3259 struct hstate *h = hstate_vma(vma); in hugetlb_fault() local
3263 address &= huge_page_mask(h); in hugetlb_fault()
3273 VM_FAULT_SET_HINDEX(hstate_index(h)); in hugetlb_fault()
3276 ptep = huge_pte_alloc(mm, address, huge_page_size(h)); in hugetlb_fault()
3281 idx = vma_hugecache_offset(h, vma, address); in hugetlb_fault()
3288 hash = fault_mutex_hash(h, mm, vma, mapping, idx, address); in hugetlb_fault()
3318 if (vma_needs_reservation(h, vma, address) < 0) { in hugetlb_fault()
3324 pagecache_page = hugetlbfs_pagecache_page(h, in hugetlb_fault()
3328 ptl = huge_pte_lock(h, mm, ptep); in hugetlb_fault()
3393 struct hstate *h = hstate_vma(vma); in follow_hugetlb_page() local
3417 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h)); in follow_hugetlb_page()
3419 ptl = huge_pte_lock(h, mm, pte); in follow_hugetlb_page()
3430 !hugetlbfs_pagecache_present(h, vma, vaddr)) { in follow_hugetlb_page()
3463 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT; in follow_hugetlb_page()
3479 pfn_offset < pages_per_huge_page(h)) { in follow_hugetlb_page()
3501 struct hstate *h = hstate_vma(vma); in hugetlb_change_protection() local
3509 for (; address < end; address += huge_page_size(h)) { in hugetlb_change_protection()
3514 ptl = huge_pte_lock(h, mm, ptep); in hugetlb_change_protection()
3559 return pages << h->order; in hugetlb_change_protection()
3568 struct hstate *h = hstate_inode(inode); in hugetlb_reserve_pages() local
3623 ret = hugetlb_acct_memory(h, gbl_reserve); in hugetlb_reserve_pages()
3652 struct hstate *h = hstate_inode(inode); in hugetlb_unreserve_pages() local
3661 inode->i_blocks -= (blocks_per_huge_page(h) * freed); in hugetlb_unreserve_pages()
3669 hugetlb_acct_memory(h, -gbl_reserve); in hugetlb_unreserve_pages()
3917 struct hstate *h = page_hstate(hpage); in dequeue_hwpoisoned_huge_page() local
3935 h->free_huge_pages--; in dequeue_hwpoisoned_huge_page()
3936 h->free_huge_pages_node[nid]--; in dequeue_hwpoisoned_huge_page()