Lines Matching refs:cc

190 static inline bool isolation_suitable(struct compact_control *cc,  in isolation_suitable()  argument
193 if (cc->ignore_skip_hint) in isolation_suitable()
257 static void update_pageblock_skip(struct compact_control *cc, in update_pageblock_skip() argument
261 struct zone *zone = cc->zone; in update_pageblock_skip()
264 if (cc->ignore_skip_hint) in update_pageblock_skip()
281 if (cc->mode != MIGRATE_ASYNC && in update_pageblock_skip()
290 static inline bool isolation_suitable(struct compact_control *cc, in isolation_suitable() argument
296 static void update_pageblock_skip(struct compact_control *cc, in update_pageblock_skip() argument
312 struct compact_control *cc) in compact_trylock_irqsave() argument
314 if (cc->mode == MIGRATE_ASYNC) { in compact_trylock_irqsave()
316 cc->contended = COMPACT_CONTENDED_LOCK; in compact_trylock_irqsave()
342 unsigned long flags, bool *locked, struct compact_control *cc) in compact_unlock_should_abort() argument
350 cc->contended = COMPACT_CONTENDED_SCHED; in compact_unlock_should_abort()
355 if (cc->mode == MIGRATE_ASYNC) { in compact_unlock_should_abort()
356 cc->contended = COMPACT_CONTENDED_SCHED; in compact_unlock_should_abort()
374 static inline bool compact_should_abort(struct compact_control *cc) in compact_should_abort() argument
378 if (cc->mode == MIGRATE_ASYNC) { in compact_should_abort()
379 cc->contended = COMPACT_CONTENDED_SCHED; in compact_should_abort()
394 static unsigned long isolate_freepages_block(struct compact_control *cc, in isolate_freepages_block() argument
419 && compact_unlock_should_abort(&cc->zone->lock, flags, in isolate_freepages_block()
420 &locked, cc)) in isolate_freepages_block()
466 locked = compact_trylock_irqsave(&cc->zone->lock, in isolate_freepages_block()
467 &flags, cc); in isolate_freepages_block()
486 cc->nr_freepages += isolated; in isolate_freepages_block()
488 cc->nr_migratepages <= cc->nr_freepages) { in isolate_freepages_block()
528 spin_unlock_irqrestore(&cc->zone->lock, flags); in isolate_freepages_block()
532 update_pageblock_skip(cc, valid_page, total_isolated, false); in isolate_freepages_block()
554 isolate_freepages_range(struct compact_control *cc, in isolate_freepages_range() argument
580 if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone)) in isolate_freepages_range()
583 isolated = isolate_freepages_block(cc, &isolate_start_pfn, in isolate_freepages_range()
615 static void acct_isolated(struct zone *zone, struct compact_control *cc) in acct_isolated() argument
620 if (list_empty(&cc->migratepages)) in acct_isolated()
623 list_for_each_entry(page, &cc->migratepages, lru) in acct_isolated()
664 isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, in isolate_migratepages_block() argument
667 struct zone *zone = cc->zone; in isolate_migratepages_block()
669 struct list_head *migratelist = &cc->migratepages; in isolate_migratepages_block()
683 if (cc->mode == MIGRATE_ASYNC) in isolate_migratepages_block()
692 if (compact_should_abort(cc)) in isolate_migratepages_block()
706 &locked, cc)) in isolate_migratepages_block()
783 &flags, cc); in isolate_migratepages_block()
815 cc->nr_migratepages++; in isolate_migratepages_block()
819 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) { in isolate_migratepages_block()
840 update_pageblock_skip(cc, valid_page, nr_isolated, true); in isolate_migratepages_block()
863 isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, in isolate_migratepages_range() argument
877 if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone)) in isolate_migratepages_range()
880 pfn = isolate_migratepages_block(cc, pfn, block_end_pfn, in isolate_migratepages_range()
886 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) in isolate_migratepages_range()
889 acct_isolated(cc->zone, cc); in isolate_migratepages_range()
923 static inline bool compact_scanners_met(struct compact_control *cc) in compact_scanners_met() argument
925 return (cc->free_pfn >> pageblock_order) in compact_scanners_met()
926 <= (cc->migrate_pfn >> pageblock_order); in compact_scanners_met()
933 static void isolate_freepages(struct compact_control *cc) in isolate_freepages() argument
935 struct zone *zone = cc->zone; in isolate_freepages()
941 struct list_head *freelist = &cc->freepages; in isolate_freepages()
954 isolate_start_pfn = cc->free_pfn; in isolate_freepages()
955 block_start_pfn = cc->free_pfn & ~(pageblock_nr_pages-1); in isolate_freepages()
958 low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages); in isolate_freepages()
976 && compact_should_abort(cc)) in isolate_freepages()
989 if (!isolation_suitable(cc, page)) in isolate_freepages()
993 isolate_freepages_block(cc, &isolate_start_pfn, in isolate_freepages()
1007 if ((cc->nr_freepages >= cc->nr_migratepages) in isolate_freepages()
1008 || cc->contended) { in isolate_freepages()
1031 cc->free_pfn = isolate_start_pfn; in isolate_freepages()
1042 struct compact_control *cc = (struct compact_control *)data; in compaction_alloc() local
1049 if (list_empty(&cc->freepages)) { in compaction_alloc()
1050 if (!cc->contended) in compaction_alloc()
1051 isolate_freepages(cc); in compaction_alloc()
1053 if (list_empty(&cc->freepages)) in compaction_alloc()
1057 freepage = list_entry(cc->freepages.next, struct page, lru); in compaction_alloc()
1059 cc->nr_freepages--; in compaction_alloc()
1071 struct compact_control *cc = (struct compact_control *)data; in compaction_free() local
1073 list_add(&page->lru, &cc->freepages); in compaction_free()
1074 cc->nr_freepages++; in compaction_free()
1096 struct compact_control *cc) in isolate_migratepages() argument
1103 (cc->mode == MIGRATE_ASYNC ? ISOLATE_ASYNC_MIGRATE : 0); in isolate_migratepages()
1109 low_pfn = cc->migrate_pfn; in isolate_migratepages()
1118 for (; end_pfn <= cc->free_pfn; in isolate_migratepages()
1127 && compact_should_abort(cc)) in isolate_migratepages()
1135 if (!isolation_suitable(cc, page)) in isolate_migratepages()
1143 if (cc->mode == MIGRATE_ASYNC && in isolate_migratepages()
1149 low_pfn = isolate_migratepages_block(cc, low_pfn, end_pfn, in isolate_migratepages()
1152 if (!low_pfn || cc->contended) { in isolate_migratepages()
1153 acct_isolated(zone, cc); in isolate_migratepages()
1163 if (cc->nr_migratepages && !cc->last_migrated_pfn) in isolate_migratepages()
1164 cc->last_migrated_pfn = isolate_start_pfn; in isolate_migratepages()
1174 acct_isolated(zone, cc); in isolate_migratepages()
1176 cc->migrate_pfn = low_pfn; in isolate_migratepages()
1178 return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; in isolate_migratepages()
1190 static int __compact_finished(struct zone *zone, struct compact_control *cc, in __compact_finished() argument
1196 if (cc->contended || fatal_signal_pending(current)) in __compact_finished()
1200 if (compact_scanners_met(cc)) { in __compact_finished()
1216 if (is_via_compact_memory(cc->order)) in __compact_finished()
1222 if (!zone_watermark_ok(zone, cc->order, watermark, cc->classzone_idx, in __compact_finished()
1223 cc->alloc_flags)) in __compact_finished()
1227 for (order = cc->order; order < MAX_ORDER; order++) { in __compact_finished()
1253 static int compact_finished(struct zone *zone, struct compact_control *cc, in compact_finished() argument
1258 ret = __compact_finished(zone, cc, migratetype); in compact_finished()
1259 trace_mm_compaction_finished(zone, cc->order, ret); in compact_finished()
1331 static int compact_zone(struct zone *zone, struct compact_control *cc) in compact_zone() argument
1336 const int migratetype = gfpflags_to_migratetype(cc->gfp_mask); in compact_zone()
1337 const bool sync = cc->mode != MIGRATE_ASYNC; in compact_zone()
1339 ret = compaction_suitable(zone, cc->order, cc->alloc_flags, in compact_zone()
1340 cc->classzone_idx); in compact_zone()
1356 if (compaction_restarting(zone, cc->order) && !current_is_kswapd()) in compact_zone()
1364 cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync]; in compact_zone()
1365 cc->free_pfn = zone->compact_cached_free_pfn; in compact_zone()
1366 if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) { in compact_zone()
1367 cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1); in compact_zone()
1368 zone->compact_cached_free_pfn = cc->free_pfn; in compact_zone()
1370 if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) { in compact_zone()
1371 cc->migrate_pfn = start_pfn; in compact_zone()
1372 zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; in compact_zone()
1373 zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; in compact_zone()
1375 cc->last_migrated_pfn = 0; in compact_zone()
1377 trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, in compact_zone()
1378 cc->free_pfn, end_pfn, sync); in compact_zone()
1382 while ((ret = compact_finished(zone, cc, migratetype)) == in compact_zone()
1386 switch (isolate_migratepages(zone, cc)) { in compact_zone()
1389 putback_movable_pages(&cc->migratepages); in compact_zone()
1390 cc->nr_migratepages = 0; in compact_zone()
1403 err = migrate_pages(&cc->migratepages, compaction_alloc, in compact_zone()
1404 compaction_free, (unsigned long)cc, cc->mode, in compact_zone()
1407 trace_mm_compaction_migratepages(cc->nr_migratepages, err, in compact_zone()
1408 &cc->migratepages); in compact_zone()
1411 cc->nr_migratepages = 0; in compact_zone()
1413 putback_movable_pages(&cc->migratepages); in compact_zone()
1418 if (err == -ENOMEM && !compact_scanners_met(cc)) { in compact_zone()
1432 if (cc->order > 0 && cc->last_migrated_pfn) { in compact_zone()
1435 cc->migrate_pfn & ~((1UL << cc->order) - 1); in compact_zone()
1437 if (cc->last_migrated_pfn < current_block_start) { in compact_zone()
1443 cc->last_migrated_pfn = 0; in compact_zone()
1454 if (cc->nr_freepages > 0) { in compact_zone()
1455 unsigned long free_pfn = release_freepages(&cc->freepages); in compact_zone()
1457 cc->nr_freepages = 0; in compact_zone()
1469 trace_mm_compaction_end(start_pfn, cc->migrate_pfn, in compact_zone()
1470 cc->free_pfn, end_pfn, sync, ret); in compact_zone()
1483 struct compact_control cc = { in compact_zone_order() local
1493 INIT_LIST_HEAD(&cc.freepages); in compact_zone_order()
1494 INIT_LIST_HEAD(&cc.migratepages); in compact_zone_order()
1496 ret = compact_zone(zone, &cc); in compact_zone_order()
1498 VM_BUG_ON(!list_empty(&cc.freepages)); in compact_zone_order()
1499 VM_BUG_ON(!list_empty(&cc.migratepages)); in compact_zone_order()
1501 *contended = cc.contended; in compact_zone_order()
1624 static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc) in __compact_pgdat() argument
1635 cc->nr_freepages = 0; in __compact_pgdat()
1636 cc->nr_migratepages = 0; in __compact_pgdat()
1637 cc->zone = zone; in __compact_pgdat()
1638 INIT_LIST_HEAD(&cc->freepages); in __compact_pgdat()
1639 INIT_LIST_HEAD(&cc->migratepages); in __compact_pgdat()
1646 if (is_via_compact_memory(cc->order)) in __compact_pgdat()
1649 if (is_via_compact_memory(cc->order) || in __compact_pgdat()
1650 !compaction_deferred(zone, cc->order)) in __compact_pgdat()
1651 compact_zone(zone, cc); in __compact_pgdat()
1653 if (cc->order > 0) { in __compact_pgdat()
1654 if (zone_watermark_ok(zone, cc->order, in __compact_pgdat()
1656 compaction_defer_reset(zone, cc->order, false); in __compact_pgdat()
1659 VM_BUG_ON(!list_empty(&cc->freepages)); in __compact_pgdat()
1660 VM_BUG_ON(!list_empty(&cc->migratepages)); in __compact_pgdat()
1666 struct compact_control cc = { in compact_pgdat() local
1674 __compact_pgdat(pgdat, &cc); in compact_pgdat()
1679 struct compact_control cc = { in compact_node() local
1685 __compact_pgdat(NODE_DATA(nid), &cc); in compact_node()