Lines Matching refs:zone

349 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)  in page_outside_zone_boundaries()  argument
357 seq = zone_span_seqbegin(zone); in page_outside_zone_boundaries()
358 start_pfn = zone->zone_start_pfn; in page_outside_zone_boundaries()
359 sp = zone->spanned_pages; in page_outside_zone_boundaries()
360 if (!zone_spans_pfn(zone, pfn)) in page_outside_zone_boundaries()
362 } while (zone_span_seqretry(zone, seq)); in page_outside_zone_boundaries()
366 pfn, zone_to_nid(zone), zone->name, in page_outside_zone_boundaries()
372 static int page_is_consistent(struct zone *zone, struct page *page) in page_is_consistent() argument
376 if (zone != page_zone(page)) in page_is_consistent()
384 static int bad_range(struct zone *zone, struct page *page) in bad_range() argument
386 if (page_outside_zone_boundaries(zone, page)) in bad_range()
388 if (!page_is_consistent(zone, page)) in bad_range()
394 static inline int bad_range(struct zone *zone, struct page *page) in bad_range() argument
533 static inline void set_page_guard(struct zone *zone, struct page *page, in set_page_guard() argument
547 __mod_zone_freepage_state(zone, -(1 << order), migratetype); in set_page_guard()
550 static inline void clear_page_guard(struct zone *zone, struct page *page, in clear_page_guard() argument
563 __mod_zone_freepage_state(zone, (1 << order), migratetype); in clear_page_guard()
567 static inline void set_page_guard(struct zone *zone, struct page *page, in set_page_guard() argument
569 static inline void clear_page_guard(struct zone *zone, struct page *page, in clear_page_guard() argument
658 struct zone *zone, unsigned int order, in __free_one_page() argument
669 VM_BUG_ON(!zone_is_initialized(zone)); in __free_one_page()
674 __mod_zone_freepage_state(zone, 1 << order, migratetype); in __free_one_page()
679 VM_BUG_ON_PAGE(bad_range(zone, page), page); in __free_one_page()
692 clear_page_guard(zone, buddy, order, migratetype); in __free_one_page()
695 zone->free_area[order].nr_free--; in __free_one_page()
712 if (unlikely(has_isolate_pageblock(zone))) { in __free_one_page()
747 &zone->free_area[order].free_list[migratetype]); in __free_one_page()
752 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]); in __free_one_page()
754 zone->free_area[order].nr_free++; in __free_one_page()
797 static void free_pcppages_bulk(struct zone *zone, int count, in free_pcppages_bulk() argument
805 spin_lock(&zone->lock); in free_pcppages_bulk()
806 nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED); in free_pcppages_bulk()
808 __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned); in free_pcppages_bulk()
843 if (unlikely(has_isolate_pageblock(zone))) in free_pcppages_bulk()
846 __free_one_page(page, page_to_pfn(page), zone, 0, mt); in free_pcppages_bulk()
850 spin_unlock(&zone->lock); in free_pcppages_bulk()
853 static void free_one_page(struct zone *zone, in free_one_page() argument
859 spin_lock(&zone->lock); in free_one_page()
860 nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED); in free_one_page()
862 __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned); in free_one_page()
864 if (unlikely(has_isolate_pageblock(zone) || in free_one_page()
868 __free_one_page(page, pfn, zone, order, migratetype); in free_one_page()
869 spin_unlock(&zone->lock); in free_one_page()
901 unsigned long zone, int nid) in __init_single_page() argument
903 set_page_links(page, zone, nid, pfn); in __init_single_page()
911 if (!is_highmem_idx(zone)) in __init_single_page()
916 static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone, in __init_single_pfn() argument
919 return __init_single_page(pfn_to_page(pfn), pfn, zone, nid); in __init_single_pfn()
935 struct zone *zone = &pgdat->node_zones[zid]; in init_reserved_page() local
937 if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone)) in init_reserved_page()
1148 struct zone *zone; in deferred_init_memmap() local
1168 zone = pgdat->node_zones + zid; in deferred_init_memmap()
1169 if (first_init_pfn < zone_end_pfn(zone)) in deferred_init_memmap()
1180 end_pfn = min(walk_end, zone_end_pfn(zone)); in deferred_init_memmap()
1184 if (pfn < zone->zone_start_pfn) in deferred_init_memmap()
1185 pfn = zone->zone_start_pfn; in deferred_init_memmap()
1222 VM_BUG_ON(page_zone(page) != zone); in deferred_init_memmap()
1249 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone)); in deferred_init_memmap()
1321 static inline void expand(struct zone *zone, struct page *page, in expand() argument
1331 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); in expand()
1342 set_page_guard(zone, &page[size], high, migratetype); in expand()
1430 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, in __rmqueue_smallest() argument
1439 area = &(zone->free_area[current_order]); in __rmqueue_smallest()
1448 expand(zone, page, order, current_order, area, migratetype); in __rmqueue_smallest()
1474 static struct page *__rmqueue_cma_fallback(struct zone *zone, in __rmqueue_cma_fallback() argument
1477 return __rmqueue_smallest(zone, order, MIGRATE_CMA); in __rmqueue_cma_fallback()
1480 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, in __rmqueue_cma_fallback() argument
1489 int move_freepages(struct zone *zone, in move_freepages() argument
1510 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); in move_freepages()
1524 &zone->free_area[order].free_list[migratetype]); in move_freepages()
1532 int move_freepages_block(struct zone *zone, struct page *page, in move_freepages_block() argument
1545 if (!zone_spans_pfn(zone, start_pfn)) in move_freepages_block()
1547 if (!zone_spans_pfn(zone, end_pfn)) in move_freepages_block()
1550 return move_freepages(zone, start_page, end_page, migratetype); in move_freepages_block()
1604 static void steal_suitable_fallback(struct zone *zone, struct page *page, in steal_suitable_fallback() argument
1616 pages = move_freepages_block(zone, page, start_type); in steal_suitable_fallback()
1665 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone, in reserve_highatomic_pageblock() argument
1675 max_managed = (zone->managed_pages / 100) + pageblock_nr_pages; in reserve_highatomic_pageblock()
1676 if (zone->nr_reserved_highatomic >= max_managed) in reserve_highatomic_pageblock()
1679 spin_lock_irqsave(&zone->lock, flags); in reserve_highatomic_pageblock()
1682 if (zone->nr_reserved_highatomic >= max_managed) in reserve_highatomic_pageblock()
1689 zone->nr_reserved_highatomic += pageblock_nr_pages; in reserve_highatomic_pageblock()
1691 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC); in reserve_highatomic_pageblock()
1695 spin_unlock_irqrestore(&zone->lock, flags); in reserve_highatomic_pageblock()
1709 struct zone *zone; in unreserve_highatomic_pageblock() local
1713 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx, in unreserve_highatomic_pageblock()
1716 if (zone->nr_reserved_highatomic <= pageblock_nr_pages) in unreserve_highatomic_pageblock()
1719 spin_lock_irqsave(&zone->lock, flags); in unreserve_highatomic_pageblock()
1721 struct free_area *area = &(zone->free_area[order]); in unreserve_highatomic_pageblock()
1735 zone->nr_reserved_highatomic -= min(pageblock_nr_pages, in unreserve_highatomic_pageblock()
1736 zone->nr_reserved_highatomic); in unreserve_highatomic_pageblock()
1748 move_freepages_block(zone, page, ac->migratetype); in unreserve_highatomic_pageblock()
1749 spin_unlock_irqrestore(&zone->lock, flags); in unreserve_highatomic_pageblock()
1752 spin_unlock_irqrestore(&zone->lock, flags); in unreserve_highatomic_pageblock()
1758 __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype) in __rmqueue_fallback() argument
1770 area = &(zone->free_area[current_order]); in __rmqueue_fallback()
1779 steal_suitable_fallback(zone, page, start_migratetype); in __rmqueue_fallback()
1786 expand(zone, page, order, current_order, area, in __rmqueue_fallback()
1810 static struct page *__rmqueue(struct zone *zone, unsigned int order, in __rmqueue() argument
1815 page = __rmqueue_smallest(zone, order, migratetype); in __rmqueue()
1818 page = __rmqueue_cma_fallback(zone, order); in __rmqueue()
1821 page = __rmqueue_fallback(zone, order, migratetype); in __rmqueue()
1833 static int rmqueue_bulk(struct zone *zone, unsigned int order, in rmqueue_bulk() argument
1839 spin_lock(&zone->lock); in rmqueue_bulk()
1841 struct page *page = __rmqueue(zone, order, migratetype, 0); in rmqueue_bulk()
1860 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, in rmqueue_bulk()
1863 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); in rmqueue_bulk()
1864 spin_unlock(&zone->lock); in rmqueue_bulk()
1877 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) in drain_zone_pages() argument
1886 free_pcppages_bulk(zone, to_drain, pcp); in drain_zone_pages()
1900 static void drain_pages_zone(unsigned int cpu, struct zone *zone) in drain_pages_zone() argument
1907 pset = per_cpu_ptr(zone->pageset, cpu); in drain_pages_zone()
1911 free_pcppages_bulk(zone, pcp->count, pcp); in drain_pages_zone()
1926 struct zone *zone; in drain_pages() local
1928 for_each_populated_zone(zone) { in drain_pages()
1929 drain_pages_zone(cpu, zone); in drain_pages()
1939 void drain_local_pages(struct zone *zone) in drain_local_pages() argument
1943 if (zone) in drain_local_pages()
1944 drain_pages_zone(cpu, zone); in drain_local_pages()
1960 void drain_all_pages(struct zone *zone) in drain_all_pages() argument
1978 struct zone *z; in drain_all_pages()
1981 if (zone) { in drain_all_pages()
1982 pcp = per_cpu_ptr(zone->pageset, cpu); in drain_all_pages()
2001 zone, 1); in drain_all_pages()
2006 void mark_free_pages(struct zone *zone) in mark_free_pages() argument
2013 if (zone_is_empty(zone)) in mark_free_pages()
2016 spin_lock_irqsave(&zone->lock, flags); in mark_free_pages()
2018 max_zone_pfn = zone_end_pfn(zone); in mark_free_pages()
2019 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) in mark_free_pages()
2028 list_for_each(curr, &zone->free_area[order].free_list[t]) { in mark_free_pages()
2036 spin_unlock_irqrestore(&zone->lock, flags); in mark_free_pages()
2046 struct zone *zone = page_zone(page); in free_hot_cold_page() local
2069 free_one_page(zone, page, pfn, 0, migratetype); in free_hot_cold_page()
2075 pcp = &this_cpu_ptr(zone->pageset)->pcp; in free_hot_cold_page()
2083 free_pcppages_bulk(zone, batch, pcp); in free_hot_cold_page()
2141 struct zone *zone; in __isolate_free_page() local
2146 zone = page_zone(page); in __isolate_free_page()
2151 watermark = low_wmark_pages(zone) + (1 << order); in __isolate_free_page()
2152 if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) in __isolate_free_page()
2155 __mod_zone_freepage_state(zone, -(1UL << order), mt); in __isolate_free_page()
2160 zone->free_area[order].nr_free--; in __isolate_free_page()
2211 struct page *buffered_rmqueue(struct zone *preferred_zone, in buffered_rmqueue()
2212 struct zone *zone, unsigned int order, in buffered_rmqueue() argument
2224 pcp = &this_cpu_ptr(zone->pageset)->pcp; in buffered_rmqueue()
2227 pcp->count += rmqueue_bulk(zone, 0, in buffered_rmqueue()
2255 spin_lock_irqsave(&zone->lock, flags); in buffered_rmqueue()
2259 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); in buffered_rmqueue()
2264 page = __rmqueue(zone, order, migratetype, gfp_flags); in buffered_rmqueue()
2265 spin_unlock(&zone->lock); in buffered_rmqueue()
2268 __mod_zone_freepage_state(zone, -(1 << order), in buffered_rmqueue()
2272 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); in buffered_rmqueue()
2273 if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 && in buffered_rmqueue()
2274 !test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) in buffered_rmqueue()
2275 set_bit(ZONE_FAIR_DEPLETED, &zone->flags); in buffered_rmqueue()
2277 __count_zone_vm_events(PGALLOC, zone, 1 << order); in buffered_rmqueue()
2278 zone_statistics(preferred_zone, zone, gfp_flags); in buffered_rmqueue()
2281 VM_BUG_ON_PAGE(bad_range(zone, page), page); in buffered_rmqueue()
2373 static bool __zone_watermark_ok(struct zone *z, unsigned int order, in __zone_watermark_ok()
2441 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in zone_watermark_ok()
2448 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, in zone_watermark_ok_safe()
2461 static bool zone_local(struct zone *local_zone, struct zone *zone) in zone_local() argument
2463 return local_zone->node == zone->node; in zone_local()
2466 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) in zone_allows_reclaim() argument
2468 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) < in zone_allows_reclaim()
2472 static bool zone_local(struct zone *local_zone, struct zone *zone) in zone_local() argument
2477 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) in zone_allows_reclaim() argument
2483 static void reset_alloc_batches(struct zone *preferred_zone) in reset_alloc_batches()
2485 struct zone *zone = preferred_zone->zone_pgdat->node_zones; in reset_alloc_batches() local
2488 mod_zone_page_state(zone, NR_ALLOC_BATCH, in reset_alloc_batches()
2489 high_wmark_pages(zone) - low_wmark_pages(zone) - in reset_alloc_batches()
2490 atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH])); in reset_alloc_batches()
2491 clear_bit(ZONE_FAIR_DEPLETED, &zone->flags); in reset_alloc_batches()
2492 } while (zone++ != preferred_zone); in reset_alloc_batches()
2506 struct zone *zone; in get_page_from_freelist() local
2517 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx, in get_page_from_freelist()
2523 !cpuset_zone_allowed(zone, gfp_mask)) in get_page_from_freelist()
2532 if (!zone_local(ac->preferred_zone, zone)) in get_page_from_freelist()
2534 if (test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) { in get_page_from_freelist()
2565 if (ac->spread_dirty_pages && !zone_dirty_ok(zone)) in get_page_from_freelist()
2568 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK]; in get_page_from_freelist()
2569 if (!zone_watermark_ok(zone, order, mark, in get_page_from_freelist()
2579 !zone_allows_reclaim(ac->preferred_zone, zone)) in get_page_from_freelist()
2582 ret = zone_reclaim(zone, gfp_mask, order); in get_page_from_freelist()
2592 if (zone_watermark_ok(zone, order, mark, in get_page_from_freelist()
2601 page = buffered_rmqueue(ac->preferred_zone, zone, order, in get_page_from_freelist()
2612 reserve_highatomic_pageblock(page, zone, order); in get_page_from_freelist()
2809 struct zone *zone = page_zone(page); in __alloc_pages_direct_compact() local
2811 zone->compact_blockskip_flush = false; in __alloc_pages_direct_compact()
2812 compaction_defer_reset(zone, order, true); in __alloc_pages_direct_compact()
2924 struct zone *zone; in wake_all_kswapds() local
2926 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in wake_all_kswapds()
2928 wakeup_kswapd(zone, order, zone_idx(ac->preferred_zone)); in wake_all_kswapds()
3220 if (unlikely(!zonelist->_zonerefs->zone)) in __alloc_pages_nodemask()
3561 struct zone *zone; in nr_free_zone_pages() local
3568 for_each_zone_zonelist(zone, z, zonelist, offset) { in nr_free_zone_pages()
3569 unsigned long size = zone->managed_pages; in nr_free_zone_pages()
3570 unsigned long high = high_wmark_pages(zone); in nr_free_zone_pages()
3601 static inline void show_node(struct zone *zone) in show_node() argument
3604 printk("Node %d ", zone_to_nid(zone)); in show_node()
3706 struct zone *zone; in show_free_areas() local
3708 for_each_populated_zone(zone) { in show_free_areas()
3709 if (skip_free_areas_node(filter, zone_to_nid(zone))) in show_free_areas()
3713 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; in show_free_areas()
3742 for_each_populated_zone(zone) { in show_free_areas()
3745 if (skip_free_areas_node(filter, zone_to_nid(zone))) in show_free_areas()
3750 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; in show_free_areas()
3752 show_node(zone); in show_free_areas()
3785 zone->name, in show_free_areas()
3786 K(zone_page_state(zone, NR_FREE_PAGES)), in show_free_areas()
3787 K(min_wmark_pages(zone)), in show_free_areas()
3788 K(low_wmark_pages(zone)), in show_free_areas()
3789 K(high_wmark_pages(zone)), in show_free_areas()
3790 K(zone_page_state(zone, NR_ACTIVE_ANON)), in show_free_areas()
3791 K(zone_page_state(zone, NR_INACTIVE_ANON)), in show_free_areas()
3792 K(zone_page_state(zone, NR_ACTIVE_FILE)), in show_free_areas()
3793 K(zone_page_state(zone, NR_INACTIVE_FILE)), in show_free_areas()
3794 K(zone_page_state(zone, NR_UNEVICTABLE)), in show_free_areas()
3795 K(zone_page_state(zone, NR_ISOLATED_ANON)), in show_free_areas()
3796 K(zone_page_state(zone, NR_ISOLATED_FILE)), in show_free_areas()
3797 K(zone->present_pages), in show_free_areas()
3798 K(zone->managed_pages), in show_free_areas()
3799 K(zone_page_state(zone, NR_MLOCK)), in show_free_areas()
3800 K(zone_page_state(zone, NR_FILE_DIRTY)), in show_free_areas()
3801 K(zone_page_state(zone, NR_WRITEBACK)), in show_free_areas()
3802 K(zone_page_state(zone, NR_FILE_MAPPED)), in show_free_areas()
3803 K(zone_page_state(zone, NR_SHMEM)), in show_free_areas()
3804 K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)), in show_free_areas()
3805 K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)), in show_free_areas()
3806 zone_page_state(zone, NR_KERNEL_STACK) * in show_free_areas()
3808 K(zone_page_state(zone, NR_PAGETABLE)), in show_free_areas()
3809 K(zone_page_state(zone, NR_UNSTABLE_NFS)), in show_free_areas()
3810 K(zone_page_state(zone, NR_BOUNCE)), in show_free_areas()
3812 K(this_cpu_read(zone->pageset->pcp.count)), in show_free_areas()
3813 K(zone_page_state(zone, NR_FREE_CMA_PAGES)), in show_free_areas()
3814 K(zone_page_state(zone, NR_WRITEBACK_TEMP)), in show_free_areas()
3815 K(zone_page_state(zone, NR_PAGES_SCANNED)), in show_free_areas()
3816 (!zone_reclaimable(zone) ? "yes" : "no") in show_free_areas()
3820 printk(" %ld", zone->lowmem_reserve[i]); in show_free_areas()
3824 for_each_populated_zone(zone) { in show_free_areas()
3829 if (skip_free_areas_node(filter, zone_to_nid(zone))) in show_free_areas()
3831 show_node(zone); in show_free_areas()
3832 printk("%s: ", zone->name); in show_free_areas()
3834 spin_lock_irqsave(&zone->lock, flags); in show_free_areas()
3836 struct free_area *area = &zone->free_area[order]; in show_free_areas()
3848 spin_unlock_irqrestore(&zone->lock, flags); in show_free_areas()
3864 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) in zoneref_set_zone() argument
3866 zoneref->zone = zone; in zoneref_set_zone()
3867 zoneref->zone_idx = zone_idx(zone); in zoneref_set_zone()
3878 struct zone *zone; in build_zonelists_node() local
3883 zone = pgdat->node_zones + zone_type; in build_zonelists_node()
3884 if (populated_zone(zone)) { in build_zonelists_node()
3885 zoneref_set_zone(zone, in build_zonelists_node()
4082 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++) in build_zonelists_in_node_order()
4085 zonelist->_zonerefs[j].zone = NULL; in build_zonelists_in_node_order()
4099 zonelist->_zonerefs[j].zone = NULL; in build_thisnode_zonelists()
4115 struct zone *z; in build_zonelists_in_zone_order()
4131 zonelist->_zonerefs[pos].zone = NULL; in build_zonelists_in_zone_order()
4180 zonelist->_zonerefs[0].zone = NULL; in build_zonelists()
4228 struct zone *zone; in local_memory_node() local
4233 &zone); in local_memory_node()
4234 return zone->node; in local_memory_node()
4275 zonelist->_zonerefs[j].zone = NULL; in build_zonelists()
4298 static void setup_zone_pageset(struct zone *zone);
4377 void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone) in build_all_zonelists() argument
4385 if (zone) in build_all_zonelists()
4386 setup_zone_pageset(zone); in build_all_zonelists()
4488 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, in memmap_init_zone() argument
4494 struct zone *z; in memmap_init_zone()
4500 z = &pgdat->node_zones[zone]; in memmap_init_zone()
4532 __init_single_page(page, pfn, zone, nid); in memmap_init_zone()
4535 __init_single_pfn(pfn, zone, nid); in memmap_init_zone()
4540 static void __meminit zone_init_free_lists(struct zone *zone) in zone_init_free_lists() argument
4544 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); in zone_init_free_lists()
4545 zone->free_area[order].nr_free = 0; in zone_init_free_lists()
4550 #define memmap_init(size, nid, zone, start_pfn) \ argument
4551 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
4554 static int zone_batchsize(struct zone *zone) in zone_batchsize() argument
4565 batch = zone->managed_pages / 1024; in zone_batchsize()
4670 static void pageset_set_high_and_batch(struct zone *zone, in pageset_set_high_and_batch() argument
4675 (zone->managed_pages / in pageset_set_high_and_batch()
4678 pageset_set_batch(pcp, zone_batchsize(zone)); in pageset_set_high_and_batch()
4681 static void __meminit zone_pageset_init(struct zone *zone, int cpu) in zone_pageset_init() argument
4683 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu); in zone_pageset_init()
4686 pageset_set_high_and_batch(zone, pcp); in zone_pageset_init()
4689 static void __meminit setup_zone_pageset(struct zone *zone) in setup_zone_pageset() argument
4692 zone->pageset = alloc_percpu(struct per_cpu_pageset); in setup_zone_pageset()
4694 zone_pageset_init(zone, cpu); in setup_zone_pageset()
4703 struct zone *zone; in setup_per_cpu_pageset() local
4705 for_each_populated_zone(zone) in setup_per_cpu_pageset()
4706 setup_zone_pageset(zone); in setup_per_cpu_pageset()
4710 int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) in zone_wait_table_init() argument
4719 zone->wait_table_hash_nr_entries = in zone_wait_table_init()
4721 zone->wait_table_bits = in zone_wait_table_init()
4722 wait_table_bits(zone->wait_table_hash_nr_entries); in zone_wait_table_init()
4723 alloc_size = zone->wait_table_hash_nr_entries in zone_wait_table_init()
4727 zone->wait_table = (wait_queue_head_t *) in zone_wait_table_init()
4729 alloc_size, zone->zone_pgdat->node_id); in zone_wait_table_init()
4741 zone->wait_table = vmalloc(alloc_size); in zone_wait_table_init()
4743 if (!zone->wait_table) in zone_wait_table_init()
4746 for (i = 0; i < zone->wait_table_hash_nr_entries; ++i) in zone_wait_table_init()
4747 init_waitqueue_head(zone->wait_table + i); in zone_wait_table_init()
4752 static __meminit void zone_pcp_init(struct zone *zone) in zone_pcp_init() argument
4759 zone->pageset = &boot_pageset; in zone_pcp_init()
4761 if (populated_zone(zone)) in zone_pcp_init()
4763 zone->name, zone->present_pages, in zone_pcp_init()
4764 zone_batchsize(zone)); in zone_pcp_init()
4767 int __meminit init_currently_empty_zone(struct zone *zone, in init_currently_empty_zone() argument
4771 struct pglist_data *pgdat = zone->zone_pgdat; in init_currently_empty_zone()
4773 ret = zone_wait_table_init(zone, size); in init_currently_empty_zone()
4776 pgdat->nr_zones = zone_idx(zone) + 1; in init_currently_empty_zone()
4778 zone->zone_start_pfn = zone_start_pfn; in init_currently_empty_zone()
4783 (unsigned long)zone_idx(zone), in init_currently_empty_zone()
4786 zone_init_free_lists(zone); in init_currently_empty_zone()
5070 struct zone *zone = pgdat->node_zones + i; in calculate_node_totalpages() local
5080 zone->spanned_pages = size; in calculate_node_totalpages()
5081 zone->present_pages = real_size; in calculate_node_totalpages()
5115 struct zone *zone, in setup_usemap() argument
5120 zone->pageblock_flags = NULL; in setup_usemap()
5122 zone->pageblock_flags = in setup_usemap()
5127 static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone, in setup_usemap() argument
5214 struct zone *zone = pgdat->node_zones + j; in free_area_init_core() local
5217 size = zone->spanned_pages; in free_area_init_core()
5218 realsize = freesize = zone->present_pages; in free_area_init_core()
5258 zone->managed_pages = is_highmem_idx(j) ? realsize : freesize; in free_area_init_core()
5260 zone->node = nid; in free_area_init_core()
5261 zone->min_unmapped_pages = (freesize*sysctl_min_unmapped_ratio) in free_area_init_core()
5263 zone->min_slab_pages = (freesize * sysctl_min_slab_ratio) / 100; in free_area_init_core()
5265 zone->name = zone_names[j]; in free_area_init_core()
5266 spin_lock_init(&zone->lock); in free_area_init_core()
5267 spin_lock_init(&zone->lru_lock); in free_area_init_core()
5268 zone_seqlock_init(zone); in free_area_init_core()
5269 zone->zone_pgdat = pgdat; in free_area_init_core()
5270 zone_pcp_init(zone); in free_area_init_core()
5273 mod_zone_page_state(zone, NR_ALLOC_BATCH, zone->managed_pages); in free_area_init_core()
5275 lruvec_init(&zone->lruvec); in free_area_init_core()
5280 setup_usemap(pgdat, zone, zone_start_pfn, size); in free_area_init_core()
5281 ret = init_currently_empty_zone(zone, zone_start_pfn, size); in free_area_init_core()
5663 struct zone *zone = &pgdat->node_zones[zone_type]; in check_for_memory() local
5664 if (populated_zone(zone)) { in check_for_memory()
5965 struct zone *zone = pgdat->node_zones + i; in calculate_totalreserve_pages() local
5970 if (zone->lowmem_reserve[j] > max) in calculate_totalreserve_pages()
5971 max = zone->lowmem_reserve[j]; in calculate_totalreserve_pages()
5975 max += high_wmark_pages(zone); in calculate_totalreserve_pages()
5977 if (max > zone->managed_pages) in calculate_totalreserve_pages()
5978 max = zone->managed_pages; in calculate_totalreserve_pages()
5989 zone->dirty_balance_reserve = max; in calculate_totalreserve_pages()
6009 struct zone *zone = pgdat->node_zones + j; in setup_per_zone_lowmem_reserve() local
6010 unsigned long managed_pages = zone->managed_pages; in setup_per_zone_lowmem_reserve()
6012 zone->lowmem_reserve[j] = 0; in setup_per_zone_lowmem_reserve()
6016 struct zone *lower_zone; in setup_per_zone_lowmem_reserve()
6039 struct zone *zone; in __setup_per_zone_wmarks() local
6043 for_each_zone(zone) { in __setup_per_zone_wmarks()
6044 if (!is_highmem(zone)) in __setup_per_zone_wmarks()
6045 lowmem_pages += zone->managed_pages; in __setup_per_zone_wmarks()
6048 for_each_zone(zone) { in __setup_per_zone_wmarks()
6051 spin_lock_irqsave(&zone->lock, flags); in __setup_per_zone_wmarks()
6052 tmp = (u64)pages_min * zone->managed_pages; in __setup_per_zone_wmarks()
6054 if (is_highmem(zone)) { in __setup_per_zone_wmarks()
6066 min_pages = zone->managed_pages / 1024; in __setup_per_zone_wmarks()
6068 zone->watermark[WMARK_MIN] = min_pages; in __setup_per_zone_wmarks()
6074 zone->watermark[WMARK_MIN] = tmp; in __setup_per_zone_wmarks()
6077 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2); in __setup_per_zone_wmarks()
6078 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1); in __setup_per_zone_wmarks()
6080 __mod_zone_page_state(zone, NR_ALLOC_BATCH, in __setup_per_zone_wmarks()
6081 high_wmark_pages(zone) - low_wmark_pages(zone) - in __setup_per_zone_wmarks()
6082 atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH])); in __setup_per_zone_wmarks()
6084 spin_unlock_irqrestore(&zone->lock, flags); in __setup_per_zone_wmarks()
6126 static void __meminit calculate_zone_inactive_ratio(struct zone *zone) in calculate_zone_inactive_ratio() argument
6131 gb = zone->managed_pages >> (30 - PAGE_SHIFT); in calculate_zone_inactive_ratio()
6137 zone->inactive_ratio = ratio; in calculate_zone_inactive_ratio()
6142 struct zone *zone; in setup_per_zone_inactive_ratio() local
6144 for_each_zone(zone) in setup_per_zone_inactive_ratio()
6145 calculate_zone_inactive_ratio(zone); in setup_per_zone_inactive_ratio()
6223 struct zone *zone; in sysctl_min_unmapped_ratio_sysctl_handler() local
6230 for_each_zone(zone) in sysctl_min_unmapped_ratio_sysctl_handler()
6231 zone->min_unmapped_pages = (zone->managed_pages * in sysctl_min_unmapped_ratio_sysctl_handler()
6239 struct zone *zone; in sysctl_min_slab_ratio_sysctl_handler() local
6246 for_each_zone(zone) in sysctl_min_slab_ratio_sysctl_handler()
6247 zone->min_slab_pages = (zone->managed_pages * in sysctl_min_slab_ratio_sysctl_handler()
6278 struct zone *zone; in percpu_pagelist_fraction_sysctl_handler() local
6301 for_each_populated_zone(zone) { in percpu_pagelist_fraction_sysctl_handler()
6305 pageset_set_high_and_batch(zone, in percpu_pagelist_fraction_sysctl_handler()
6306 per_cpu_ptr(zone->pageset, cpu)); in percpu_pagelist_fraction_sysctl_handler()
6425 static inline unsigned long *get_pageblock_bitmap(struct zone *zone, in get_pageblock_bitmap() argument
6431 return zone->pageblock_flags; in get_pageblock_bitmap()
6435 static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn) in pfn_to_bitidx() argument
6441 pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages); in pfn_to_bitidx()
6459 struct zone *zone; in get_pfnblock_flags_mask() local
6464 zone = page_zone(page); in get_pfnblock_flags_mask()
6465 bitmap = get_pageblock_bitmap(zone, pfn); in get_pfnblock_flags_mask()
6466 bitidx = pfn_to_bitidx(zone, pfn); in get_pfnblock_flags_mask()
6488 struct zone *zone; in set_pfnblock_flags_mask() local
6495 zone = page_zone(page); in set_pfnblock_flags_mask()
6496 bitmap = get_pageblock_bitmap(zone, pfn); in set_pfnblock_flags_mask()
6497 bitidx = pfn_to_bitidx(zone, pfn); in set_pfnblock_flags_mask()
6501 VM_BUG_ON_PAGE(!zone_spans_pfn(zone, pfn), page); in set_pfnblock_flags_mask()
6524 bool has_unmovable_pages(struct zone *zone, struct page *page, int count, in has_unmovable_pages() argument
6534 if (zone_idx(zone) == ZONE_MOVABLE) in has_unmovable_pages()
6601 struct zone *zone; in is_pageblock_removable_nolock() local
6614 zone = page_zone(page); in is_pageblock_removable_nolock()
6616 if (!zone_spans_pfn(zone, pfn)) in is_pageblock_removable_nolock()
6619 return !has_unmovable_pages(zone, page, 0, true); in is_pageblock_removable_nolock()
6667 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, in __alloc_contig_migrate_range()
6711 .zone = page_zone(pfn_to_page(start)), in alloc_contig_range()
6769 drain_all_pages(cc.zone); in alloc_contig_range()
6827 void __meminit zone_pcp_update(struct zone *zone) in zone_pcp_update() argument
6832 pageset_set_high_and_batch(zone, in zone_pcp_update()
6833 per_cpu_ptr(zone->pageset, cpu)); in zone_pcp_update()
6838 void zone_pcp_reset(struct zone *zone) in zone_pcp_reset() argument
6846 if (zone->pageset != &boot_pageset) { in zone_pcp_reset()
6848 pset = per_cpu_ptr(zone->pageset, cpu); in zone_pcp_reset()
6849 drain_zonestat(zone, pset); in zone_pcp_reset()
6851 free_percpu(zone->pageset); in zone_pcp_reset()
6852 zone->pageset = &boot_pageset; in zone_pcp_reset()
6865 struct zone *zone; in __offline_isolated_pages() local
6875 zone = page_zone(pfn_to_page(pfn)); in __offline_isolated_pages()
6876 spin_lock_irqsave(&zone->lock, flags); in __offline_isolated_pages()
6903 zone->free_area[order].nr_free--; in __offline_isolated_pages()
6908 spin_unlock_irqrestore(&zone->lock, flags); in __offline_isolated_pages()
6915 struct zone *zone = page_zone(page); in is_free_buddy_page() local
6920 spin_lock_irqsave(&zone->lock, flags); in is_free_buddy_page()
6927 spin_unlock_irqrestore(&zone->lock, flags); in is_free_buddy_page()