Lines Matching refs:b
224 static bool vmballoon_send_start(struct vmballoon *b) in vmballoon_send_start() argument
228 STATS_INC(b->stats.start); in vmballoon_send_start()
235 STATS_INC(b->stats.start_fail); in vmballoon_send_start()
239 static bool vmballoon_check_status(struct vmballoon *b, unsigned long status) in vmballoon_check_status() argument
246 b->reset_required = true; in vmballoon_check_status()
260 static bool vmballoon_send_guest_id(struct vmballoon *b) in vmballoon_send_guest_id() argument
266 STATS_INC(b->stats.guest_type); in vmballoon_send_guest_id()
268 if (vmballoon_check_status(b, status)) in vmballoon_send_guest_id()
272 STATS_INC(b->stats.guest_type_fail); in vmballoon_send_guest_id()
279 static bool vmballoon_send_get_target(struct vmballoon *b, u32 *new_target) in vmballoon_send_get_target() argument
291 si_meminfo(&b->sysinfo); in vmballoon_send_get_target()
292 limit = b->sysinfo.totalram; in vmballoon_send_get_target()
300 STATS_INC(b->stats.target); in vmballoon_send_get_target()
303 if (vmballoon_check_status(b, status)) { in vmballoon_send_get_target()
309 STATS_INC(b->stats.target_fail); in vmballoon_send_get_target()
318 static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn, in vmballoon_send_lock_page() argument
328 STATS_INC(b->stats.lock); in vmballoon_send_lock_page()
331 if (vmballoon_check_status(b, status)) in vmballoon_send_lock_page()
335 STATS_INC(b->stats.lock_fail); in vmballoon_send_lock_page()
343 static bool vmballoon_send_unlock_page(struct vmballoon *b, unsigned long pfn) in vmballoon_send_unlock_page() argument
352 STATS_INC(b->stats.unlock); in vmballoon_send_unlock_page()
355 if (vmballoon_check_status(b, status)) in vmballoon_send_unlock_page()
359 STATS_INC(b->stats.unlock_fail); in vmballoon_send_unlock_page()
369 static void vmballoon_pop(struct vmballoon *b) in vmballoon_pop() argument
374 list_for_each_entry_safe(page, next, &b->pages, lru) { in vmballoon_pop()
377 STATS_INC(b->stats.free); in vmballoon_pop()
378 b->size--; in vmballoon_pop()
380 if (++count >= b->rate_free) { in vmballoon_pop()
392 static void vmballoon_reset(struct vmballoon *b) in vmballoon_reset() argument
395 vmballoon_pop(b); in vmballoon_reset()
397 if (vmballoon_send_start(b)) { in vmballoon_reset()
398 b->reset_required = false; in vmballoon_reset()
399 if (!vmballoon_send_guest_id(b)) in vmballoon_reset()
410 static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep) in vmballoon_reserve_page() argument
420 STATS_INC(b->stats.alloc); in vmballoon_reserve_page()
422 STATS_INC(b->stats.sleep_alloc); in vmballoon_reserve_page()
427 STATS_INC(b->stats.alloc_fail); in vmballoon_reserve_page()
429 STATS_INC(b->stats.sleep_alloc_fail); in vmballoon_reserve_page()
434 locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status); in vmballoon_reserve_page()
436 STATS_INC(b->stats.refused_alloc); in vmballoon_reserve_page()
449 list_add(&page->lru, &b->refused_pages); in vmballoon_reserve_page()
450 if (++b->n_refused_pages >= VMW_BALLOON_MAX_REFUSED) in vmballoon_reserve_page()
456 list_add(&page->lru, &b->pages); in vmballoon_reserve_page()
459 b->size++; in vmballoon_reserve_page()
469 static int vmballoon_release_page(struct vmballoon *b, struct page *page) in vmballoon_release_page() argument
471 if (!vmballoon_send_unlock_page(b, page_to_pfn(page))) in vmballoon_release_page()
478 STATS_INC(b->stats.free); in vmballoon_release_page()
481 b->size--; in vmballoon_release_page()
490 static void vmballoon_release_refused_pages(struct vmballoon *b) in vmballoon_release_refused_pages() argument
494 list_for_each_entry_safe(page, next, &b->refused_pages, lru) { in vmballoon_release_refused_pages()
497 STATS_INC(b->stats.refused_free); in vmballoon_release_refused_pages()
500 b->n_refused_pages = 0; in vmballoon_release_refused_pages()
508 static void vmballoon_inflate(struct vmballoon *b) in vmballoon_inflate() argument
517 pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target); in vmballoon_inflate()
534 goal = b->target - b->size; in vmballoon_inflate()
539 rate = b->slow_allocation_cycles ? in vmballoon_inflate()
540 b->rate_alloc : VMW_BALLOON_NOSLEEP_ALLOC_MAX; in vmballoon_inflate()
543 __func__, goal, rate, b->rate_alloc); in vmballoon_inflate()
547 error = vmballoon_reserve_page(b, alloc_can_sleep); in vmballoon_inflate()
564 b->rate_alloc = max(b->rate_alloc / 2, in vmballoon_inflate()
577 b->slow_allocation_cycles = VMW_BALLOON_SLOW_CYCLES; in vmballoon_inflate()
579 if (i >= b->rate_alloc) in vmballoon_inflate()
584 rate = b->rate_alloc; in vmballoon_inflate()
602 if (error == 0 && i >= b->rate_alloc) { in vmballoon_inflate()
603 unsigned int mult = i / b->rate_alloc; in vmballoon_inflate()
605 b->rate_alloc = in vmballoon_inflate()
606 min(b->rate_alloc + mult * VMW_BALLOON_RATE_ALLOC_INC, in vmballoon_inflate()
610 vmballoon_release_refused_pages(b); in vmballoon_inflate()
616 static void vmballoon_deflate(struct vmballoon *b) in vmballoon_deflate() argument
623 pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target); in vmballoon_deflate()
626 goal = min(b->size - b->target, b->rate_free); in vmballoon_deflate()
628 pr_debug("%s - goal: %d, rate: %d\n", __func__, goal, b->rate_free); in vmballoon_deflate()
631 list_for_each_entry_safe(page, next, &b->pages, lru) { in vmballoon_deflate()
632 error = vmballoon_release_page(b, page); in vmballoon_deflate()
635 b->rate_free = max(b->rate_free / 2, in vmballoon_deflate()
645 b->rate_free = min(b->rate_free + VMW_BALLOON_RATE_FREE_INC, in vmballoon_deflate()
656 struct vmballoon *b = container_of(dwork, struct vmballoon, dwork); in vmballoon_work() local
659 STATS_INC(b->stats.timer); in vmballoon_work()
661 if (b->reset_required) in vmballoon_work()
662 vmballoon_reset(b); in vmballoon_work()
664 if (b->slow_allocation_cycles > 0) in vmballoon_work()
665 b->slow_allocation_cycles--; in vmballoon_work()
667 if (vmballoon_send_get_target(b, &target)) { in vmballoon_work()
669 b->target = target; in vmballoon_work()
671 if (b->size < target) in vmballoon_work()
672 vmballoon_inflate(b); in vmballoon_work()
673 else if (b->size > target) in vmballoon_work()
674 vmballoon_deflate(b); in vmballoon_work()
692 struct vmballoon *b = f->private; in vmballoon_debug_show() local
693 struct vmballoon_stats *stats = &b->stats; in vmballoon_debug_show()
699 b->target, b->size); in vmballoon_debug_show()
707 b->rate_alloc, b->rate_free); in vmballoon_debug_show()
749 static int __init vmballoon_debugfs_init(struct vmballoon *b) in vmballoon_debugfs_init() argument
753 b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b, in vmballoon_debugfs_init()
755 if (IS_ERR(b->dbg_entry)) { in vmballoon_debugfs_init()
756 error = PTR_ERR(b->dbg_entry); in vmballoon_debugfs_init()
764 static void __exit vmballoon_debugfs_exit(struct vmballoon *b) in vmballoon_debugfs_exit() argument
766 debugfs_remove(b->dbg_entry); in vmballoon_debugfs_exit()
771 static inline int vmballoon_debugfs_init(struct vmballoon *b) in vmballoon_debugfs_init() argument
776 static inline void vmballoon_debugfs_exit(struct vmballoon *b) in vmballoon_debugfs_exit() argument