bfqd 227 block/bfq-cgroup.c if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue)) bfqd 314 block/bfq-cgroup.c bfqq->bfqd->root_group; bfqd 527 block/bfq-cgroup.c struct bfq_data *bfqd = blkg->q->elevator->elevator_data; bfqd 537 block/bfq-cgroup.c bfqg->bfqd = bfqd; bfqd 567 block/bfq-cgroup.c static struct bfq_group *bfq_lookup_bfqg(struct bfq_data *bfqd, bfqd 572 block/bfq-cgroup.c blkg = blkg_lookup(blkcg, bfqd->queue); bfqd 578 block/bfq-cgroup.c struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, bfqd 584 block/bfq-cgroup.c bfqg = bfq_lookup_bfqg(bfqd, blkcg); bfqd 598 block/bfq-cgroup.c if (curr_bfqg != bfqd->root_group) { bfqd 601 block/bfq-cgroup.c parent = bfqd->root_group; bfqd 623 block/bfq-cgroup.c void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, bfqd 640 block/bfq-cgroup.c if (bfqq == bfqd->in_service_queue) bfqd 641 block/bfq-cgroup.c bfq_bfqq_expire(bfqd, bfqd->in_service_queue, bfqd 645 block/bfq-cgroup.c bfq_deactivate_bfqq(bfqd, bfqq, false, false); bfqd 656 block/bfq-cgroup.c if (unlikely(!bfqd->nonrot_with_queueing)) bfqd 657 block/bfq-cgroup.c bfq_pos_tree_add_move(bfqd, bfqq); bfqd 658 block/bfq-cgroup.c bfq_activate_bfqq(bfqd, bfqq); bfqd 661 block/bfq-cgroup.c if (!bfqd->in_service_queue && !bfqd->rq_in_driver) bfqd 662 block/bfq-cgroup.c bfq_schedule_dispatch(bfqd); bfqd 681 block/bfq-cgroup.c static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd, bfqd 690 block/bfq-cgroup.c bfqg = bfq_find_set_group(bfqd, blkcg); bfqd 693 block/bfq-cgroup.c bfqg = bfqd->root_group; bfqd 700 block/bfq-cgroup.c bfq_release_process_ref(bfqd, async_bfqq); bfqd 707 block/bfq-cgroup.c bfq_bfqq_move(bfqd, sync_bfqq, bfqg); bfqd 715 block/bfq-cgroup.c struct bfq_data *bfqd = bic_to_bfqd(bic); bfqd 726 block/bfq-cgroup.c if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr)) bfqd 729 block/bfq-cgroup.c bfqg = __bfq_bic_change_cgroup(bfqd, bic, __bio_blkcg(bio)); bfqd 804 block/bfq-cgroup.c static void bfq_reparent_leaf_entity(struct bfq_data *bfqd, bfqd 824 block/bfq-cgroup.c bfq_bfqq_move(bfqd, bfqq, bfqd->root_group); bfqd 833 block/bfq-cgroup.c static void bfq_reparent_active_queues(struct bfq_data *bfqd, bfqd 842 block/bfq-cgroup.c bfq_reparent_leaf_entity(bfqd, entity, ioprio_class); bfqd 845 block/bfq-cgroup.c bfq_reparent_leaf_entity(bfqd, bfqd 862 block/bfq-cgroup.c struct bfq_data *bfqd = bfqg->bfqd; bfqd 867 block/bfq-cgroup.c spin_lock_irqsave(&bfqd->lock, flags); bfqd 891 block/bfq-cgroup.c bfq_reparent_active_queues(bfqd, bfqg, st, i); bfqd 910 block/bfq-cgroup.c bfq_put_async_queues(bfqd, bfqg); bfqd 912 block/bfq-cgroup.c spin_unlock_irqrestore(&bfqd->lock, flags); bfqd 922 block/bfq-cgroup.c void bfq_end_wr_async(struct bfq_data *bfqd) bfqd 926 block/bfq-cgroup.c list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) { bfqd 929 block/bfq-cgroup.c bfq_end_wr_async_queues(bfqd, bfqg); bfqd 931 block/bfq-cgroup.c bfq_end_wr_async_queues(bfqd, bfqd->root_group); bfqd 1211 block/bfq-cgroup.c struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) bfqd 1215 block/bfq-cgroup.c ret = blkcg_activate_policy(bfqd->queue, &blkcg_policy_bfq); bfqd 1219 block/bfq-cgroup.c return blkg_to_bfqg(bfqd->queue->root_blkg); bfqd 1376 block/bfq-cgroup.c void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, bfqd 1394 block/bfq-cgroup.c void bfq_end_wr_async(struct bfq_data *bfqd) bfqd 1396 block/bfq-cgroup.c bfq_end_wr_async_queues(bfqd, bfqd->root_group); bfqd 1399 block/bfq-cgroup.c struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, struct blkcg *blkcg) bfqd 1401 block/bfq-cgroup.c return bfqd->root_group; bfqd 1406 block/bfq-cgroup.c return bfqq->bfqd->root_group; bfqd 1413 block/bfq-cgroup.c struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) bfqd 230 block/bfq-iosched.c #define BFQ_RQ_SEEKY(bfqd, last_pos, rq) \ bfqd 233 block/bfq-iosched.c (!blk_queue_nonrot(bfqd->queue) || \ bfqd 399 block/bfq-iosched.c static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd, bfqd 421 block/bfq-iosched.c void bfq_schedule_dispatch(struct bfq_data *bfqd) bfqd 423 block/bfq-iosched.c if (bfqd->queued != 0) { bfqd 424 block/bfq-iosched.c bfq_log(bfqd, "schedule dispatch"); bfqd 425 block/bfq-iosched.c blk_mq_run_hw_queues(bfqd->queue, true); bfqd 439 block/bfq-iosched.c static struct request *bfq_choose_req(struct bfq_data *bfqd, bfqd 470 block/bfq-iosched.c back_max = bfqd->bfq_back_max * 2; bfqd 480 block/bfq-iosched.c d1 = (last - s1) * bfqd->bfq_back_penalty; bfqd 487 block/bfq-iosched.c d2 = (last - s2) * bfqd->bfq_back_penalty; bfqd 537 block/bfq-iosched.c struct bfq_data *bfqd = data->q->elevator->elevator_data; bfqd 543 block/bfq-iosched.c bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(op)]; bfqd 545 block/bfq-iosched.c bfq_log(bfqd, "[%s] wr_busy %d sync %d depth %u", bfqd 546 block/bfq-iosched.c __func__, bfqd->wr_busy_queues, op_is_sync(op), bfqd 551 block/bfq-iosched.c bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root, bfqd 584 block/bfq-iosched.c bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d", bfqd 607 block/bfq-iosched.c bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq) bfqd 618 block/bfq-iosched.c if (bfqq == &bfqd->oom_bfqq) bfqd 635 block/bfq-iosched.c __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root, bfqd 677 block/bfq-iosched.c static bool bfq_asymmetric_scenario(struct bfq_data *bfqd, bfqd 684 block/bfq-iosched.c rb_first_cached(&bfqd->queue_weights_tree), bfqd 693 block/bfq-iosched.c !RB_EMPTY_ROOT(&bfqd->queue_weights_tree.rb_root) && bfqd 694 block/bfq-iosched.c (bfqd->queue_weights_tree.rb_root.rb_node->rb_left || bfqd 695 block/bfq-iosched.c bfqd->queue_weights_tree.rb_root.rb_node->rb_right); bfqd 698 block/bfq-iosched.c (bfqd->busy_queues[0] && bfqd->busy_queues[1]) || bfqd 699 block/bfq-iosched.c (bfqd->busy_queues[0] && bfqd->busy_queues[2]) || bfqd 700 block/bfq-iosched.c (bfqd->busy_queues[1] && bfqd->busy_queues[2]); bfqd 704 block/bfq-iosched.c || bfqd->num_groups_with_pending_reqs > 0 bfqd 722 block/bfq-iosched.c void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq, bfqd 796 block/bfq-iosched.c void __bfq_weights_tree_remove(struct bfq_data *bfqd, bfqd 819 block/bfq-iosched.c void bfq_weights_tree_remove(struct bfq_data *bfqd, bfqd 854 block/bfq-iosched.c bfqd->num_groups_with_pending_reqs--; bfqd 864 block/bfq-iosched.c __bfq_weights_tree_remove(bfqd, bfqq, bfqd 865 block/bfq-iosched.c &bfqd->queue_weights_tree); bfqd 886 block/bfq-iosched.c bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq); bfqd 890 block/bfq-iosched.c static struct request *bfq_find_next_rq(struct bfq_data *bfqd, bfqd 914 block/bfq-iosched.c return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last)); bfqd 922 block/bfq-iosched.c bfq_asymmetric_scenario(bfqq->bfqd, bfqq)) bfqd 939 block/bfq-iosched.c static void bfq_updated_next_req(struct bfq_data *bfqd, bfqd 949 block/bfq-iosched.c if (bfqq == bfqd->in_service_queue) bfqd 962 block/bfq-iosched.c bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu", bfqd 964 block/bfq-iosched.c bfq_requeue_bfqq(bfqd, bfqq, false); bfqd 968 block/bfq-iosched.c static unsigned int bfq_wr_duration(struct bfq_data *bfqd) bfqd 972 block/bfq-iosched.c if (bfqd->bfq_wr_max_time > 0) bfqd 973 block/bfq-iosched.c return bfqd->bfq_wr_max_time; bfqd 975 block/bfq-iosched.c dur = bfqd->rate_dur_prod; bfqd 976 block/bfq-iosched.c do_div(dur, bfqd->peak_rate); bfqd 1003 block/bfq-iosched.c struct bfq_data *bfqd) bfqd 1005 block/bfq-iosched.c bfqq->wr_coeff = bfqd->bfq_wr_coeff; bfqd 1006 block/bfq-iosched.c bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); bfqd 1011 block/bfq-iosched.c bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd, bfqd 1037 block/bfq-iosched.c if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time && bfqd 1040 block/bfq-iosched.c bfq_wr_duration(bfqd))) { bfqd 1041 block/bfq-iosched.c switch_back_to_interactive_wr(bfqq, bfqd); bfqd 1044 block/bfq-iosched.c bfq_log_bfqq(bfqq->bfqd, bfqq, bfqd 1056 block/bfq-iosched.c bfqd->wr_busy_queues++; bfqd 1058 block/bfq-iosched.c bfqd->wr_busy_queues--; bfqd 1068 block/bfq-iosched.c static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq) bfqd 1073 block/bfq-iosched.c hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node) bfqd 1081 block/bfq-iosched.c if (bfq_tot_busy_queues(bfqd) == 0) { bfqd 1082 block/bfq-iosched.c hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list); bfqd 1083 block/bfq-iosched.c bfqd->burst_size = 1; bfqd 1085 block/bfq-iosched.c bfqd->burst_size = 0; bfqd 1087 block/bfq-iosched.c bfqd->burst_parent_entity = bfqq->entity.parent; bfqd 1091 block/bfq-iosched.c static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) bfqd 1094 block/bfq-iosched.c bfqd->burst_size++; bfqd 1096 block/bfq-iosched.c if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) { bfqd 1104 block/bfq-iosched.c bfqd->large_burst = true; bfqd 1110 block/bfq-iosched.c hlist_for_each_entry(bfqq_item, &bfqd->burst_list, bfqd 1122 block/bfq-iosched.c hlist_for_each_entry_safe(pos, n, &bfqd->burst_list, bfqd 1131 block/bfq-iosched.c hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list); bfqd 1243 block/bfq-iosched.c static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) bfqd 1273 block/bfq-iosched.c if (time_is_before_jiffies(bfqd->last_ins_in_burst + bfqd 1274 block/bfq-iosched.c bfqd->bfq_burst_interval) || bfqd 1275 block/bfq-iosched.c bfqq->entity.parent != bfqd->burst_parent_entity) { bfqd 1276 block/bfq-iosched.c bfqd->large_burst = false; bfqd 1277 block/bfq-iosched.c bfq_reset_burst_list(bfqd, bfqq); bfqd 1286 block/bfq-iosched.c if (bfqd->large_burst) { bfqd 1296 block/bfq-iosched.c bfq_add_to_burst(bfqd, bfqq); bfqd 1306 block/bfq-iosched.c bfqd->last_ins_in_burst = jiffies; bfqd 1321 block/bfq-iosched.c static int bfq_max_budget(struct bfq_data *bfqd) bfqd 1323 block/bfq-iosched.c if (bfqd->budgets_assigned < bfq_stats_min_budgets) bfqd 1326 block/bfq-iosched.c return bfqd->bfq_max_budget; bfqd 1333 block/bfq-iosched.c static int bfq_min_budget(struct bfq_data *bfqd) bfqd 1335 block/bfq-iosched.c if (bfqd->budgets_assigned < bfq_stats_min_budgets) bfqd 1338 block/bfq-iosched.c return bfqd->bfq_max_budget / 32; bfqd 1445 block/bfq-iosched.c static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd, bfqd 1514 block/bfq-iosched.c static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd, bfqd 1526 block/bfq-iosched.c bfqq->wr_coeff = bfqd->bfq_wr_coeff; bfqd 1527 block/bfq-iosched.c bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); bfqd 1542 block/bfq-iosched.c bfqq->wr_coeff = bfqd->bfq_wr_coeff * bfqd 1545 block/bfq-iosched.c bfqd->bfq_wr_rt_max_time; bfqd 1559 block/bfq-iosched.c 2 * bfq_min_budget(bfqd)); bfqd 1562 block/bfq-iosched.c bfqq->wr_coeff = bfqd->bfq_wr_coeff; bfqd 1563 block/bfq-iosched.c bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); bfqd 1597 block/bfq-iosched.c bfqd->bfq_wr_rt_max_time) { bfqd 1602 block/bfq-iosched.c bfqd->bfq_wr_rt_max_time; bfqd 1603 block/bfq-iosched.c bfqq->wr_coeff = bfqd->bfq_wr_coeff * bfqd 1611 block/bfq-iosched.c static bool bfq_bfqq_idle_for_long_time(struct bfq_data *bfqd, bfqd 1617 block/bfq-iosched.c bfqd->bfq_wr_min_idle_time); bfqd 1650 block/bfq-iosched.c static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd, bfqd 1658 block/bfq-iosched.c idle_for_long_time = bfq_bfqq_idle_for_long_time(bfqd, bfqq), bfqd 1666 block/bfq-iosched.c bfqd->bfq_slice_idle * 3; bfqd 1677 block/bfq-iosched.c soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 && bfqd 1683 block/bfq-iosched.c wr_or_deserves_wr = bfqd->low_latency && bfqd 1693 block/bfq-iosched.c bfq_bfqq_update_budg_for_activation(bfqd, bfqq, bfqd 1725 block/bfq-iosched.c bfqd->bfq_requests_within_timer) bfqd 1731 block/bfq-iosched.c if (bfqd->low_latency) { bfqd 1735 block/bfq-iosched.c jiffies - bfqd->bfq_wr_min_idle_time - 1; bfqd 1738 block/bfq-iosched.c bfqd->bfq_wr_min_idle_time)) { bfqd 1739 block/bfq-iosched.c bfq_update_bfqq_wr_on_rq_arrival(bfqd, bfqq, bfqd 1755 block/bfq-iosched.c bfq_add_bfqq_busy(bfqd, bfqq); bfqd 1789 block/bfq-iosched.c if (bfqd->in_service_queue && bfqd 1791 block/bfq-iosched.c bfqq->wr_coeff >= bfqd->in_service_queue->wr_coeff) || bfqd 1792 block/bfq-iosched.c bfq_bfqq_higher_class_or_weight(bfqq, bfqd->in_service_queue)) && bfqd 1793 block/bfq-iosched.c next_queue_may_preempt(bfqd)) bfqd 1794 block/bfq-iosched.c bfq_bfqq_expire(bfqd, bfqd->in_service_queue, bfqd 1798 block/bfq-iosched.c static void bfq_reset_inject_limit(struct bfq_data *bfqd, bfqd 1808 block/bfq-iosched.c bfqd->waited_rq = NULL; bfqd 1867 block/bfq-iosched.c struct bfq_data *bfqd = bfqq->bfqd; bfqd 1872 block/bfq-iosched.c bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq)); bfqd 1874 block/bfq-iosched.c bfqd->queued++; bfqd 1931 block/bfq-iosched.c if (bfqd->last_completed_rq_bfqq && bfqd 1933 block/bfq-iosched.c ktime_get_ns() - bfqd->last_completion < bfqd 1935 block/bfq-iosched.c if (bfqd->last_completed_rq_bfqq != bfqq && bfqd 1936 block/bfq-iosched.c bfqd->last_completed_rq_bfqq != bfqd 1944 block/bfq-iosched.c bfqq->waker_bfqq = bfqd->last_completed_rq_bfqq; bfqd 1969 block/bfq-iosched.c &bfqd->last_completed_rq_bfqq->woken_list); bfqd 1972 block/bfq-iosched.c } else if (bfqd->last_completed_rq_bfqq == bfqd 1991 block/bfq-iosched.c bfq_reset_inject_limit(bfqd, bfqq); bfqd 2018 block/bfq-iosched.c if (bfqq == bfqd->in_service_queue && bfqd 2019 block/bfq-iosched.c (bfqd->rq_in_driver == 0 || bfqd 2021 block/bfq-iosched.c bfqd->rqs_injected && bfqd->rq_in_driver > 0)) && bfqd 2024 block/bfq-iosched.c bfqd->last_empty_occupied_ns = ktime_get_ns(); bfqd 2031 block/bfq-iosched.c bfqd->wait_dispatch = true; bfqd 2045 block/bfq-iosched.c if (bfqd->rq_in_driver == 0) bfqd 2046 block/bfq-iosched.c bfqd->rqs_injected = false; bfqd 2056 block/bfq-iosched.c next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position); bfqd 2063 block/bfq-iosched.c if (unlikely(!bfqd->nonrot_with_queueing && prev != bfqq->next_rq)) bfqd 2064 block/bfq-iosched.c bfq_pos_tree_add_move(bfqd, bfqq); bfqd 2067 block/bfq-iosched.c bfq_bfqq_handle_idle_busy_switch(bfqd, bfqq, old_wr_coeff, bfqd 2070 block/bfq-iosched.c if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) && bfqd 2073 block/bfq-iosched.c bfqd->bfq_wr_min_inter_arr_async)) { bfqd 2074 block/bfq-iosched.c bfqq->wr_coeff = bfqd->bfq_wr_coeff; bfqd 2075 block/bfq-iosched.c bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); bfqd 2077 block/bfq-iosched.c bfqd->wr_busy_queues++; bfqd 2081 block/bfq-iosched.c bfq_updated_next_req(bfqd, bfqq); bfqd 2110 block/bfq-iosched.c if (bfqd->low_latency && bfqd 2115 block/bfq-iosched.c static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd, bfqd 2119 block/bfq-iosched.c struct bfq_queue *bfqq = bfqd->bio_bfqq; bfqd 2139 block/bfq-iosched.c struct bfq_data *bfqd = q->elevator->elevator_data; bfqd 2141 block/bfq-iosched.c bfqd->rq_in_driver++; bfqd 2146 block/bfq-iosched.c struct bfq_data *bfqd = q->elevator->elevator_data; bfqd 2148 block/bfq-iosched.c bfqd->rq_in_driver--; bfqd 2156 block/bfq-iosched.c struct bfq_data *bfqd = bfqq->bfqd; bfqd 2160 block/bfq-iosched.c bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq); bfqd 2161 block/bfq-iosched.c bfq_updated_next_req(bfqd, bfqq); bfqd 2167 block/bfq-iosched.c bfqd->queued--; bfqd 2177 block/bfq-iosched.c if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) { bfqd 2178 block/bfq-iosched.c bfq_del_bfqq_busy(bfqd, bfqq, false); bfqd 2204 block/bfq-iosched.c if (unlikely(!bfqd->nonrot_with_queueing)) bfqd 2205 block/bfq-iosched.c bfq_pos_tree_add_move(bfqd, bfqq); bfqd 2217 block/bfq-iosched.c struct bfq_data *bfqd = q->elevator->elevator_data; bfqd 2226 block/bfq-iosched.c struct bfq_io_cq *bic = bfq_bic_lookup(bfqd, current->io_context, q); bfqd 2229 block/bfq-iosched.c spin_lock_irq(&bfqd->lock); bfqd 2232 block/bfq-iosched.c bfqd->bio_bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf)); bfqd 2234 block/bfq-iosched.c bfqd->bio_bfqq = NULL; bfqd 2235 block/bfq-iosched.c bfqd->bio_bic = bic; bfqd 2241 block/bfq-iosched.c spin_unlock_irq(&bfqd->lock); bfqd 2249 block/bfq-iosched.c struct bfq_data *bfqd = q->elevator->elevator_data; bfqd 2252 block/bfq-iosched.c __rq = bfq_find_rq_fmerge(bfqd, bio, q); bfqd 2272 block/bfq-iosched.c struct bfq_data *bfqd; bfqd 2278 block/bfq-iosched.c bfqd = bfqq->bfqd; bfqd 2286 block/bfq-iosched.c next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req, bfqd 2287 block/bfq-iosched.c bfqd->last_position); bfqd 2295 block/bfq-iosched.c bfq_updated_next_req(bfqd, bfqq); bfqd 2300 block/bfq-iosched.c if (unlikely(!bfqd->nonrot_with_queueing)) bfqd 2301 block/bfq-iosched.c bfq_pos_tree_add_move(bfqd, bfqq); bfqd 2356 block/bfq-iosched.c bfqq->bfqd->wr_busy_queues--; bfqd 2367 block/bfq-iosched.c void bfq_end_wr_async_queues(struct bfq_data *bfqd, bfqd 2380 block/bfq-iosched.c static void bfq_end_wr(struct bfq_data *bfqd) bfqd 2384 block/bfq-iosched.c spin_lock_irq(&bfqd->lock); bfqd 2386 block/bfq-iosched.c list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) bfqd 2388 block/bfq-iosched.c list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) bfqd 2390 block/bfq-iosched.c bfq_end_wr_async(bfqd); bfqd 2392 block/bfq-iosched.c spin_unlock_irq(&bfqd->lock); bfqd 2410 block/bfq-iosched.c static struct bfq_queue *bfqq_find_close(struct bfq_data *bfqd, bfqd 2425 block/bfq-iosched.c __bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL); bfqd 2452 block/bfq-iosched.c static struct bfq_queue *bfq_find_close_cooperator(struct bfq_data *bfqd, bfqd 2465 block/bfq-iosched.c bfqq = bfqq_find_close(bfqd, cur_bfqq, sector); bfqd 2503 block/bfq-iosched.c bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d", bfqd 2581 block/bfq-iosched.c bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, bfqd 2623 block/bfq-iosched.c if (likely(bfqd->nonrot_with_queueing)) bfqd 2643 block/bfq-iosched.c if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq)) bfqd 2647 block/bfq-iosched.c if (bfq_tot_busy_queues(bfqd) == 1) bfqd 2650 block/bfq-iosched.c in_service_bfqq = bfqd->in_service_queue; bfqd 2653 block/bfq-iosched.c likely(in_service_bfqq != &bfqd->oom_bfqq) && bfqd 2655 block/bfq-iosched.c bfqd->in_serv_last_pos) && bfqd 2667 block/bfq-iosched.c new_bfqq = bfq_find_close_cooperator(bfqd, bfqq, bfqd 2670 block/bfq-iosched.c if (new_bfqq && likely(new_bfqq != &bfqd->oom_bfqq) && bfqd 2697 block/bfq-iosched.c bfqq->bfqd->low_latency)) { bfqd 2707 block/bfq-iosched.c bic->saved_wr_coeff = bfqq->bfqd->bfq_wr_coeff; bfqd 2709 block/bfq-iosched.c bic->saved_wr_cur_max_time = bfq_wr_duration(bfqq->bfqd); bfqd 2720 block/bfq-iosched.c void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq) bfqd 2734 block/bfq-iosched.c bfqq != bfqd->in_service_queue) bfqd 2735 block/bfq-iosched.c bfq_del_bfqq_busy(bfqd, bfqq, false); bfqd 2741 block/bfq-iosched.c bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, bfqd 2744 block/bfq-iosched.c bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu", bfqd 2769 block/bfq-iosched.c bfqd->wr_busy_queues++; bfqd 2777 block/bfq-iosched.c bfqd->wr_busy_queues--; bfqd 2780 block/bfq-iosched.c bfq_log_bfqq(bfqd, new_bfqq, "merge_bfqqs: wr_busy %d", bfqd 2781 block/bfq-iosched.c bfqd->wr_busy_queues); bfqd 2810 block/bfq-iosched.c bfq_release_process_ref(bfqd, bfqq); bfqd 2816 block/bfq-iosched.c struct bfq_data *bfqd = q->elevator->elevator_data; bfqd 2818 block/bfq-iosched.c struct bfq_queue *bfqq = bfqd->bio_bfqq, *new_bfqq; bfqd 2837 block/bfq-iosched.c new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false); bfqd 2846 block/bfq-iosched.c bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq, bfqd 2861 block/bfq-iosched.c bfqd->bio_bfqq = bfqq; bfqd 2873 block/bfq-iosched.c static void bfq_set_budget_timeout(struct bfq_data *bfqd, bfqd 2878 block/bfq-iosched.c if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time) bfqd 2883 block/bfq-iosched.c bfqd->last_budget_start = ktime_get(); bfqd 2886 block/bfq-iosched.c bfqd->bfq_timeout * timeout_coeff; bfqd 2889 block/bfq-iosched.c static void __bfq_set_in_service_queue(struct bfq_data *bfqd, bfqd 2895 block/bfq-iosched.c bfqd->budgets_assigned = (bfqd->budgets_assigned * 7 + 256) / 8; bfqd 2899 block/bfq-iosched.c bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time && bfqd 2933 block/bfq-iosched.c bfq_set_budget_timeout(bfqd, bfqq); bfqd 2934 block/bfq-iosched.c bfq_log_bfqq(bfqd, bfqq, bfqd 2939 block/bfq-iosched.c bfqd->in_service_queue = bfqq; bfqd 2945 block/bfq-iosched.c static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd) bfqd 2947 block/bfq-iosched.c struct bfq_queue *bfqq = bfq_get_next_queue(bfqd); bfqd 2949 block/bfq-iosched.c __bfq_set_in_service_queue(bfqd, bfqq); bfqd 2953 block/bfq-iosched.c static void bfq_arm_slice_timer(struct bfq_data *bfqd) bfqd 2955 block/bfq-iosched.c struct bfq_queue *bfqq = bfqd->in_service_queue; bfqd 2965 block/bfq-iosched.c sl = bfqd->bfq_slice_idle; bfqd 2977 block/bfq-iosched.c !bfq_asymmetric_scenario(bfqd, bfqq)) bfqd 2982 block/bfq-iosched.c bfqd->last_idling_start = ktime_get(); bfqd 2983 block/bfq-iosched.c bfqd->last_idling_start_jiffies = jiffies; bfqd 2985 block/bfq-iosched.c hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl), bfqd 2997 block/bfq-iosched.c static unsigned long bfq_calc_max_budget(struct bfq_data *bfqd) bfqd 2999 block/bfq-iosched.c return (u64)bfqd->peak_rate * USEC_PER_MSEC * bfqd 3000 block/bfq-iosched.c jiffies_to_msecs(bfqd->bfq_timeout)>>BFQ_RATE_SHIFT; bfqd 3008 block/bfq-iosched.c static void update_thr_responsiveness_params(struct bfq_data *bfqd) bfqd 3010 block/bfq-iosched.c if (bfqd->bfq_user_max_budget == 0) { bfqd 3011 block/bfq-iosched.c bfqd->bfq_max_budget = bfqd 3012 block/bfq-iosched.c bfq_calc_max_budget(bfqd); bfqd 3013 block/bfq-iosched.c bfq_log(bfqd, "new max_budget = %d", bfqd->bfq_max_budget); bfqd 3017 block/bfq-iosched.c static void bfq_reset_rate_computation(struct bfq_data *bfqd, bfqd 3021 block/bfq-iosched.c bfqd->last_dispatch = bfqd->first_dispatch = ktime_get_ns(); bfqd 3022 block/bfq-iosched.c bfqd->peak_rate_samples = 1; bfqd 3023 block/bfq-iosched.c bfqd->sequential_samples = 0; bfqd 3024 block/bfq-iosched.c bfqd->tot_sectors_dispatched = bfqd->last_rq_max_size = bfqd 3027 block/bfq-iosched.c bfqd->peak_rate_samples = 0; /* full re-init on next disp. */ bfqd 3029 block/bfq-iosched.c bfq_log(bfqd, bfqd 3031 block/bfq-iosched.c bfqd->peak_rate_samples, bfqd->sequential_samples, bfqd 3032 block/bfq-iosched.c bfqd->tot_sectors_dispatched); bfqd 3035 block/bfq-iosched.c static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq) bfqd 3047 block/bfq-iosched.c if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES || bfqd 3048 block/bfq-iosched.c bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL) bfqd 3057 block/bfq-iosched.c bfqd->delta_from_first = bfqd 3058 block/bfq-iosched.c max_t(u64, bfqd->delta_from_first, bfqd 3059 block/bfq-iosched.c bfqd->last_completion - bfqd->first_dispatch); bfqd 3065 block/bfq-iosched.c rate = div64_ul(bfqd->tot_sectors_dispatched<<BFQ_RATE_SHIFT, bfqd 3066 block/bfq-iosched.c div_u64(bfqd->delta_from_first, NSEC_PER_USEC)); bfqd 3074 block/bfq-iosched.c if ((bfqd->sequential_samples < (3 * bfqd->peak_rate_samples)>>2 && bfqd 3075 block/bfq-iosched.c rate <= bfqd->peak_rate) || bfqd 3102 block/bfq-iosched.c weight = (9 * bfqd->sequential_samples) / bfqd->peak_rate_samples; bfqd 3109 block/bfq-iosched.c div_u64(weight * bfqd->delta_from_first, bfqd 3123 block/bfq-iosched.c bfqd->peak_rate *= divisor-1; bfqd 3124 block/bfq-iosched.c bfqd->peak_rate /= divisor; bfqd 3127 block/bfq-iosched.c bfqd->peak_rate += rate; bfqd 3136 block/bfq-iosched.c bfqd->peak_rate = max_t(u32, 1, bfqd->peak_rate); bfqd 3138 block/bfq-iosched.c update_thr_responsiveness_params(bfqd); bfqd 3141 block/bfq-iosched.c bfq_reset_rate_computation(bfqd, rq); bfqd 3176 block/bfq-iosched.c static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq) bfqd 3180 block/bfq-iosched.c if (bfqd->peak_rate_samples == 0) { /* first dispatch */ bfqd 3181 block/bfq-iosched.c bfq_log(bfqd, "update_peak_rate: goto reset, samples %d", bfqd 3182 block/bfq-iosched.c bfqd->peak_rate_samples); bfqd 3183 block/bfq-iosched.c bfq_reset_rate_computation(bfqd, rq); bfqd 3199 block/bfq-iosched.c if (now_ns - bfqd->last_dispatch > 100*NSEC_PER_MSEC && bfqd 3200 block/bfq-iosched.c bfqd->rq_in_driver == 0) bfqd 3204 block/bfq-iosched.c bfqd->peak_rate_samples++; bfqd 3206 block/bfq-iosched.c if ((bfqd->rq_in_driver > 0 || bfqd 3207 block/bfq-iosched.c now_ns - bfqd->last_completion < BFQ_MIN_TT) bfqd 3208 block/bfq-iosched.c && !BFQ_RQ_SEEKY(bfqd, bfqd->last_position, rq)) bfqd 3209 block/bfq-iosched.c bfqd->sequential_samples++; bfqd 3211 block/bfq-iosched.c bfqd->tot_sectors_dispatched += blk_rq_sectors(rq); bfqd 3214 block/bfq-iosched.c if (likely(bfqd->peak_rate_samples % 32)) bfqd 3215 block/bfq-iosched.c bfqd->last_rq_max_size = bfqd 3216 block/bfq-iosched.c max_t(u32, blk_rq_sectors(rq), bfqd->last_rq_max_size); bfqd 3218 block/bfq-iosched.c bfqd->last_rq_max_size = blk_rq_sectors(rq); bfqd 3220 block/bfq-iosched.c bfqd->delta_from_first = now_ns - bfqd->first_dispatch; bfqd 3223 block/bfq-iosched.c if (bfqd->delta_from_first < BFQ_RATE_REF_INTERVAL) bfqd 3227 block/bfq-iosched.c bfq_update_rate_reset(bfqd, rq); bfqd 3229 block/bfq-iosched.c bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); bfqd 3230 block/bfq-iosched.c if (RQ_BFQQ(rq) == bfqd->in_service_queue) bfqd 3231 block/bfq-iosched.c bfqd->in_serv_last_pos = bfqd->last_position; bfqd 3232 block/bfq-iosched.c bfqd->last_dispatch = now_ns; bfqd 3446 block/bfq-iosched.c static bool idling_needed_for_service_guarantees(struct bfq_data *bfqd, bfqd 3454 block/bfq-iosched.c (bfqd->wr_busy_queues < bfqd 3455 block/bfq-iosched.c bfq_tot_busy_queues(bfqd) || bfqd 3456 block/bfq-iosched.c bfqd->rq_in_driver >= bfqd 3458 block/bfq-iosched.c bfq_asymmetric_scenario(bfqd, bfqq); bfqd 3461 block/bfq-iosched.c static bool __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq, bfqd 3488 block/bfq-iosched.c idling_needed_for_service_guarantees(bfqd, bfqq))) { bfqd 3498 block/bfq-iosched.c bfq_del_bfqq_busy(bfqd, bfqq, true); bfqd 3500 block/bfq-iosched.c bfq_requeue_bfqq(bfqd, bfqq, true); bfqd 3505 block/bfq-iosched.c if (unlikely(!bfqd->nonrot_with_queueing && bfqd 3507 block/bfq-iosched.c bfq_pos_tree_add_move(bfqd, bfqq); bfqd 3517 block/bfq-iosched.c return __bfq_bfqd_reset_in_service(bfqd); bfqd 3529 block/bfq-iosched.c static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd, bfqd 3536 block/bfq-iosched.c min_budget = bfq_min_budget(bfqd); bfqd 3548 block/bfq-iosched.c bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d", bfqd 3550 block/bfq-iosched.c bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %d, min budg %d", bfqd 3551 block/bfq-iosched.c budget, bfq_min_budget(bfqd)); bfqd 3552 block/bfq-iosched.c bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d", bfqd 3553 block/bfq-iosched.c bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue)); bfqd 3587 block/bfq-iosched.c budget = min(budget * 2, bfqd->bfq_max_budget); bfqd 3602 block/bfq-iosched.c budget = min(budget * 2, bfqd->bfq_max_budget); bfqd 3614 block/bfq-iosched.c budget = min(budget * 4, bfqd->bfq_max_budget); bfqd 3661 block/bfq-iosched.c budget = bfqd->bfq_max_budget; bfqd 3666 block/bfq-iosched.c if (bfqd->budgets_assigned >= bfq_stats_min_budgets && bfqd 3667 block/bfq-iosched.c !bfqd->bfq_user_max_budget) bfqd 3668 block/bfq-iosched.c bfqq->max_budget = min(bfqq->max_budget, bfqd->bfq_max_budget); bfqd 3685 block/bfq-iosched.c bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %d", bfqd 3721 block/bfq-iosched.c static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq, bfqd 3733 block/bfq-iosched.c delta_ktime = bfqd->last_idling_start; bfqd 3736 block/bfq-iosched.c delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start); bfqd 3741 block/bfq-iosched.c if (blk_queue_nonrot(bfqd->queue)) bfqd 3770 block/bfq-iosched.c slow = bfqq->entity.service < bfqd->bfq_max_budget / 2; bfqd 3773 block/bfq-iosched.c bfq_log_bfqq(bfqd, bfqq, "bfq_bfqq_is_slow: slow %d", slow); bfqd 3871 block/bfq-iosched.c static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd, bfqd 3877 block/bfq-iosched.c bfqd->bfq_wr_max_softrt_rate, bfqd 3878 block/bfq-iosched.c jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4); bfqd 3907 block/bfq-iosched.c void bfq_bfqq_expire(struct bfq_data *bfqd, bfqd 3919 block/bfq-iosched.c slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, reason, &delta); bfqd 3940 block/bfq-iosched.c bfq_bfqq_charge_time(bfqd, bfqq, delta); bfqd 3946 block/bfq-iosched.c if (bfqd->low_latency && bfqq->wr_coeff == 1) bfqd 3949 block/bfq-iosched.c if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 && bfqd 3978 block/bfq-iosched.c bfqq->wr_coeff != bfqd->bfq_wr_coeff) bfqd 3980 block/bfq-iosched.c bfq_bfqq_softrt_next_start(bfqd, bfqq); bfqd 3990 block/bfq-iosched.c bfq_log_bfqq(bfqd, bfqq, bfqd 3999 block/bfq-iosched.c bfqd->rqs_injected = bfqd->wait_dispatch = false; bfqd 4000 block/bfq-iosched.c bfqd->waited_rq = NULL; bfqd 4006 block/bfq-iosched.c __bfq_bfqq_recalc_budget(bfqd, bfqq, reason); bfqd 4007 block/bfq-iosched.c if (__bfq_bfqq_expire(bfqd, bfqq, reason)) bfqd 4066 block/bfq-iosched.c bfq_log_bfqq(bfqq->bfqd, bfqq, bfqd 4078 block/bfq-iosched.c static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd, bfqd 4082 block/bfq-iosched.c !blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag, bfqd 4116 block/bfq-iosched.c ((!blk_queue_nonrot(bfqd->queue) || !bfqd->hw_tag) && bfqd 4154 block/bfq-iosched.c bfqd->wr_busy_queues == 0; bfqd 4180 block/bfq-iosched.c struct bfq_data *bfqd = bfqq->bfqd; bfqd 4187 block/bfq-iosched.c if (unlikely(bfqd->strict_guarantees)) bfqd 4198 block/bfq-iosched.c if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) || bfqd 4203 block/bfq-iosched.c idling_boosts_thr_without_issues(bfqd, bfqq); bfqd 4206 block/bfq-iosched.c idling_needed_for_service_guarantees(bfqd, bfqq); bfqd 4242 block/bfq-iosched.c bfq_choose_bfqq_for_injection(struct bfq_data *bfqd) bfqd 4244 block/bfq-iosched.c struct bfq_queue *bfqq, *in_serv_bfqq = bfqd->in_service_queue; bfqd 4272 block/bfq-iosched.c time_is_before_eq_jiffies(bfqd->last_idling_start_jiffies + bfqd 4273 block/bfq-iosched.c bfqd->bfq_slice_idle) bfqd 4277 block/bfq-iosched.c if (bfqd->rq_in_driver >= limit) bfqd 4292 block/bfq-iosched.c list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) bfqd 4314 block/bfq-iosched.c if (blk_queue_nonrot(bfqd->queue) && bfqd 4321 block/bfq-iosched.c if (bfqd->rq_in_driver < limit) { bfqd 4322 block/bfq-iosched.c bfqd->rqs_injected = true; bfqd 4334 block/bfq-iosched.c static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) bfqd 4340 block/bfq-iosched.c bfqq = bfqd->in_service_queue; bfqd 4344 block/bfq-iosched.c bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue"); bfqd 4401 block/bfq-iosched.c hrtimer_try_to_cancel(&bfqd->idle_slice_timer); bfqd 4508 block/bfq-iosched.c else if (!idling_boosts_thr_without_issues(bfqd, bfqq) && bfqd 4509 block/bfq-iosched.c (bfqq->wr_coeff == 1 || bfqd->wr_busy_queues > 1 || bfqd 4511 block/bfq-iosched.c bfqq = bfq_choose_bfqq_for_injection(bfqd); bfqd 4520 block/bfq-iosched.c bfq_bfqq_expire(bfqd, bfqq, false, reason); bfqd 4522 block/bfq-iosched.c bfqq = bfq_set_in_service_queue(bfqd); bfqd 4524 block/bfq-iosched.c bfq_log_bfqq(bfqd, bfqq, "select_queue: checking new queue"); bfqd 4529 block/bfq-iosched.c bfq_log_bfqq(bfqd, bfqq, "select_queue: returned this queue"); bfqd 4531 block/bfq-iosched.c bfq_log(bfqd, "select_queue: no queue returned"); bfqd 4536 block/bfq-iosched.c static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq) bfqd 4541 block/bfq-iosched.c bfq_log_bfqq(bfqd, bfqq, bfqd 4549 block/bfq-iosched.c bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change"); bfqd 4560 block/bfq-iosched.c if (bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time || bfqd 4562 block/bfq-iosched.c bfq_wr_duration(bfqd))) bfqd 4565 block/bfq-iosched.c switch_back_to_interactive_wr(bfqq, bfqd); bfqd 4570 block/bfq-iosched.c bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time && bfqd 4592 block/bfq-iosched.c static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd, bfqd 4602 block/bfq-iosched.c if (bfqq == bfqd->in_service_queue && bfqd->wait_dispatch) { bfqd 4603 block/bfq-iosched.c bfqd->wait_dispatch = false; bfqd 4604 block/bfq-iosched.c bfqd->waited_rq = rq; bfqd 4607 block/bfq-iosched.c bfq_dispatch_remove(bfqd->queue, rq); bfqd 4609 block/bfq-iosched.c if (bfqq != bfqd->in_service_queue) bfqd 4623 block/bfq-iosched.c bfq_update_wr_data(bfqd, bfqq); bfqd 4630 block/bfq-iosched.c if (!(bfq_tot_busy_queues(bfqd) > 1 && bfq_class_idle(bfqq))) bfqd 4633 block/bfq-iosched.c bfq_bfqq_expire(bfqd, bfqq, false, BFQQE_BUDGET_EXHAUSTED); bfqd 4641 block/bfq-iosched.c struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; bfqd 4647 block/bfq-iosched.c return !list_empty_careful(&bfqd->dispatch) || bfqd 4648 block/bfq-iosched.c bfq_tot_busy_queues(bfqd) > 0; bfqd 4653 block/bfq-iosched.c struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; bfqd 4657 block/bfq-iosched.c if (!list_empty(&bfqd->dispatch)) { bfqd 4658 block/bfq-iosched.c rq = list_first_entry(&bfqd->dispatch, struct request, bfqd 4702 block/bfq-iosched.c bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd 4703 block/bfq-iosched.c bfq_tot_busy_queues(bfqd)); bfqd 4705 block/bfq-iosched.c if (bfq_tot_busy_queues(bfqd) == 0) bfqd 4720 block/bfq-iosched.c if (bfqd->strict_guarantees && bfqd->rq_in_driver > 0) bfqd 4723 block/bfq-iosched.c bfqq = bfq_select_queue(bfqd); bfqd 4727 block/bfq-iosched.c rq = bfq_dispatch_rq_from_bfqq(bfqd, bfqq); bfqd 4731 block/bfq-iosched.c bfqd->rq_in_driver++; bfqd 4793 block/bfq-iosched.c struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; bfqd 4798 block/bfq-iosched.c spin_lock_irq(&bfqd->lock); bfqd 4800 block/bfq-iosched.c in_serv_queue = bfqd->in_service_queue; bfqd 4808 block/bfq-iosched.c spin_unlock_irq(&bfqd->lock); bfqd 4829 block/bfq-iosched.c if (bfqq->bfqd) bfqd 4830 block/bfq-iosched.c bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d", bfqd 4865 block/bfq-iosched.c if (bfqq->bic && bfqq->bfqd->burst_size > 0) bfqd 4866 block/bfq-iosched.c bfqq->bfqd->burst_size--; bfqd 4896 block/bfq-iosched.c if (bfqq->bfqd && bfqq->bfqd->last_completed_rq_bfqq == bfqq) bfqd 4897 block/bfq-iosched.c bfqq->bfqd->last_completed_rq_bfqq = NULL; bfqd 4922 block/bfq-iosched.c static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) bfqd 4924 block/bfq-iosched.c if (bfqq == bfqd->in_service_queue) { bfqd 4925 block/bfq-iosched.c __bfq_bfqq_expire(bfqd, bfqq, BFQQE_BUDGET_TIMEOUT); bfqd 4926 block/bfq-iosched.c bfq_schedule_dispatch(bfqd); bfqd 4929 block/bfq-iosched.c bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref); bfqd 4933 block/bfq-iosched.c bfq_release_process_ref(bfqd, bfqq); bfqd 4939 block/bfq-iosched.c struct bfq_data *bfqd; bfqd 4942 block/bfq-iosched.c bfqd = bfqq->bfqd; /* NULL if scheduler already exited */ bfqd 4944 block/bfq-iosched.c if (bfqq && bfqd) { bfqd 4947 block/bfq-iosched.c spin_lock_irqsave(&bfqd->lock, flags); bfqd 4949 block/bfq-iosched.c bfq_exit_bfqq(bfqd, bfqq); bfqd 4951 block/bfq-iosched.c spin_unlock_irqrestore(&bfqd->lock, flags); bfqd 4972 block/bfq-iosched.c struct bfq_data *bfqd = bfqq->bfqd; bfqd 4974 block/bfq-iosched.c if (!bfqd) bfqd 4980 block/bfq-iosched.c dev_err(bfqq->bfqd->queue->backing_dev_info->dev, bfqd 5014 block/bfq-iosched.c static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, bfqd 5020 block/bfq-iosched.c struct bfq_data *bfqd = bic_to_bfqd(bic); bfqd 5028 block/bfq-iosched.c if (unlikely(!bfqd) || likely(bic->ioprio == ioprio)) bfqd 5035 block/bfq-iosched.c bfq_release_process_ref(bfqd, bfqq); bfqd 5036 block/bfq-iosched.c bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic); bfqd 5045 block/bfq-iosched.c static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, bfqd 5055 block/bfq-iosched.c bfqq->bfqd = bfqd; bfqd 5082 block/bfq-iosched.c bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3; bfqd 5105 block/bfq-iosched.c static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd, bfqd 5124 block/bfq-iosched.c static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, bfqd 5136 block/bfq-iosched.c bfqg = bfq_find_set_group(bfqd, __bio_blkcg(bio)); bfqd 5138 block/bfq-iosched.c bfqq = &bfqd->oom_bfqq; bfqd 5143 block/bfq-iosched.c async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class, bfqd 5152 block/bfq-iosched.c bfqd->queue->node); bfqd 5155 block/bfq-iosched.c bfq_init_bfqq(bfqd, bfqq, bic, current->pid, bfqd 5158 block/bfq-iosched.c bfq_log_bfqq(bfqd, bfqq, "allocated"); bfqd 5160 block/bfq-iosched.c bfqq = &bfqd->oom_bfqq; bfqd 5161 block/bfq-iosched.c bfq_log_bfqq(bfqd, bfqq, "using oom bfqq"); bfqd 5177 block/bfq-iosched.c bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d", bfqd 5184 block/bfq-iosched.c bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, bfqq->ref); bfqd 5189 block/bfq-iosched.c static void bfq_update_io_thinktime(struct bfq_data *bfqd, bfqd 5195 block/bfq-iosched.c elapsed = min_t(u64, elapsed, 2ULL * bfqd->bfq_slice_idle); bfqd 5204 block/bfq-iosched.c bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq, bfqd 5208 block/bfq-iosched.c bfqq->seek_history |= BFQ_RQ_SEEKY(bfqd, bfqq->last_request_pos, rq); bfqd 5211 block/bfq-iosched.c bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time && bfqd 5216 block/bfq-iosched.c static void bfq_update_has_short_ttime(struct bfq_data *bfqd, bfqd 5228 block/bfq-iosched.c bfqd->bfq_slice_idle == 0) bfqd 5233 block/bfq-iosched.c bfqd->bfq_wr_min_idle_time)) bfqd 5242 block/bfq-iosched.c bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle)) bfqd 5340 block/bfq-iosched.c bfq_reset_inject_limit(bfqd, bfqq); bfqd 5347 block/bfq-iosched.c static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, bfqd 5355 block/bfq-iosched.c if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) { bfqd 5376 block/bfq-iosched.c if (small_req && idling_boosts_thr_without_issues(bfqd, bfqq) && bfqd 5388 block/bfq-iosched.c hrtimer_try_to_cancel(&bfqd->idle_slice_timer); bfqd 5398 block/bfq-iosched.c bfq_bfqq_expire(bfqd, bfqq, false, bfqd 5404 block/bfq-iosched.c static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq) bfqd 5407 block/bfq-iosched.c *new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true); bfqd 5427 block/bfq-iosched.c bfq_merge_bfqqs(bfqd, RQ_BIC(rq), bfqd 5440 block/bfq-iosched.c bfq_update_io_thinktime(bfqd, bfqq); bfqd 5441 block/bfq-iosched.c bfq_update_has_short_ttime(bfqd, bfqq, RQ_BIC(rq)); bfqd 5442 block/bfq-iosched.c bfq_update_io_seektime(bfqd, bfqq, rq); bfqd 5448 block/bfq-iosched.c rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)]; bfqd 5451 block/bfq-iosched.c bfq_rq_enqueued(bfqd, bfqq, rq); bfqd 5492 block/bfq-iosched.c struct bfq_data *bfqd = q->elevator->elevator_data; bfqd 5497 block/bfq-iosched.c spin_lock_irq(&bfqd->lock); bfqd 5499 block/bfq-iosched.c spin_unlock_irq(&bfqd->lock); bfqd 5503 block/bfq-iosched.c spin_unlock_irq(&bfqd->lock); bfqd 5507 block/bfq-iosched.c spin_lock_irq(&bfqd->lock); bfqd 5511 block/bfq-iosched.c list_add(&rq->queuelist, &bfqd->dispatch); bfqd 5513 block/bfq-iosched.c list_add_tail(&rq->queuelist, &bfqd->dispatch); bfqd 5515 block/bfq-iosched.c idle_timer_disabled = __bfq_insert_request(bfqd, rq); bfqd 5537 block/bfq-iosched.c spin_unlock_irq(&bfqd->lock); bfqd 5555 block/bfq-iosched.c static void bfq_update_hw_tag(struct bfq_data *bfqd) bfqd 5557 block/bfq-iosched.c struct bfq_queue *bfqq = bfqd->in_service_queue; bfqd 5559 block/bfq-iosched.c bfqd->max_rq_in_driver = max_t(int, bfqd->max_rq_in_driver, bfqd 5560 block/bfq-iosched.c bfqd->rq_in_driver); bfqd 5562 block/bfq-iosched.c if (bfqd->hw_tag == 1) bfqd 5571 block/bfq-iosched.c if (bfqd->rq_in_driver + bfqd->queued <= BFQ_HW_QUEUE_THRESHOLD) bfqd 5582 block/bfq-iosched.c bfqd->rq_in_driver < BFQ_HW_QUEUE_THRESHOLD) bfqd 5585 block/bfq-iosched.c if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES) bfqd 5588 block/bfq-iosched.c bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD; bfqd 5589 block/bfq-iosched.c bfqd->max_rq_in_driver = 0; bfqd 5590 block/bfq-iosched.c bfqd->hw_tag_samples = 0; bfqd 5592 block/bfq-iosched.c bfqd->nonrot_with_queueing = bfqd 5593 block/bfq-iosched.c blk_queue_nonrot(bfqd->queue) && bfqd->hw_tag; bfqd 5596 block/bfq-iosched.c static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd) bfqd 5601 block/bfq-iosched.c bfq_update_hw_tag(bfqd); bfqd 5603 block/bfq-iosched.c bfqd->rq_in_driver--; bfqd 5615 block/bfq-iosched.c bfq_weights_tree_remove(bfqd, bfqq); bfqd 5626 block/bfq-iosched.c delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC); bfqd 5645 block/bfq-iosched.c (bfqd->last_rq_max_size<<BFQ_RATE_SHIFT)/delta_us < bfqd 5647 block/bfq-iosched.c bfq_update_rate_reset(bfqd, NULL); bfqd 5648 block/bfq-iosched.c bfqd->last_completion = now_ns; bfqd 5649 block/bfq-iosched.c bfqd->last_completed_rq_bfqq = bfqq; bfqd 5664 block/bfq-iosched.c bfqq->wr_coeff != bfqd->bfq_wr_coeff) bfqd 5666 block/bfq-iosched.c bfq_bfqq_softrt_next_start(bfqd, bfqq); bfqd 5672 block/bfq-iosched.c if (bfqd->in_service_queue == bfqq) { bfqd 5675 block/bfq-iosched.c bfq_arm_slice_timer(bfqd); bfqd 5701 block/bfq-iosched.c bfq_bfqq_expire(bfqd, bfqq, false, bfqd 5706 block/bfq-iosched.c bfq_bfqq_expire(bfqd, bfqq, false, bfqd 5710 block/bfq-iosched.c if (!bfqd->rq_in_driver) bfqd 5711 block/bfq-iosched.c bfq_schedule_dispatch(bfqd); bfqd 5825 block/bfq-iosched.c static void bfq_update_inject_limit(struct bfq_data *bfqd, bfqd 5828 block/bfq-iosched.c u64 tot_time_ns = ktime_get_ns() - bfqd->last_empty_occupied_ns; bfqd 5831 block/bfq-iosched.c if (bfqq->last_serv_time_ns > 0 && bfqd->rqs_injected) { bfqd 5838 block/bfq-iosched.c old_limit <= bfqd->max_rq_in_driver) bfqd 5854 block/bfq-iosched.c if ((bfqq->last_serv_time_ns == 0 && bfqd->rq_in_driver == 1) || bfqd 5864 block/bfq-iosched.c } else if (!bfqd->rqs_injected && bfqd->rq_in_driver == 1) bfqd 5878 block/bfq-iosched.c bfqd->waited_rq = NULL; bfqd 5879 block/bfq-iosched.c bfqd->rqs_injected = false; bfqd 5891 block/bfq-iosched.c struct bfq_data *bfqd; bfqd 5913 block/bfq-iosched.c bfqd = bfqq->bfqd; bfqd 5924 block/bfq-iosched.c spin_lock_irqsave(&bfqd->lock, flags); bfqd 5926 block/bfq-iosched.c if (rq == bfqd->waited_rq) bfqd 5927 block/bfq-iosched.c bfq_update_inject_limit(bfqd, bfqq); bfqd 5929 block/bfq-iosched.c bfq_completed_request(bfqq, bfqd); bfqd 5932 block/bfq-iosched.c spin_unlock_irqrestore(&bfqd->lock, flags); bfqd 5983 block/bfq-iosched.c bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue"); bfqd 5996 block/bfq-iosched.c bfq_release_process_ref(bfqq->bfqd, bfqq); bfqd 6000 block/bfq-iosched.c static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd, bfqd 6008 block/bfq-iosched.c if (likely(bfqq && bfqq != &bfqd->oom_bfqq)) bfqd 6016 block/bfq-iosched.c bfqq = bfq_get_queue(bfqd, bio, is_sync, bic); bfqd 6020 block/bfq-iosched.c if ((bic->was_in_burst_list && bfqd->large_burst) || bfqd 6055 block/bfq-iosched.c &bfqd->burst_list); bfqd 6106 block/bfq-iosched.c struct bfq_data *bfqd = q->elevator->elevator_data; bfqd 6132 block/bfq-iosched.c bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync, bfqd 6138 block/bfq-iosched.c bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq"); bfqd 6148 block/bfq-iosched.c bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, bfqd 6158 block/bfq-iosched.c bfq_log_bfqq(bfqd, bfqq, "get_request %p: bfqq %p, %d", bfqd 6170 block/bfq-iosched.c if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) { bfqd 6178 block/bfq-iosched.c bfq_bfqq_resume_state(bfqq, bfqd, bic, bfqd 6204 block/bfq-iosched.c (bfqd->burst_size > 0 || bfqd 6205 block/bfq-iosched.c bfq_tot_busy_queues(bfqd) == 0))) bfqd 6206 block/bfq-iosched.c bfq_handle_burst(bfqd, bfqq); bfqd 6212 block/bfq-iosched.c bfq_idle_slice_timer_body(struct bfq_data *bfqd, struct bfq_queue *bfqq) bfqd 6217 block/bfq-iosched.c spin_lock_irqsave(&bfqd->lock, flags); bfqd 6226 block/bfq-iosched.c if (bfqq != bfqd->in_service_queue) { bfqd 6227 block/bfq-iosched.c spin_unlock_irqrestore(&bfqd->lock, flags); bfqd 6251 block/bfq-iosched.c bfq_bfqq_expire(bfqd, bfqq, true, reason); bfqd 6254 block/bfq-iosched.c spin_unlock_irqrestore(&bfqd->lock, flags); bfqd 6255 block/bfq-iosched.c bfq_schedule_dispatch(bfqd); bfqd 6264 block/bfq-iosched.c struct bfq_data *bfqd = container_of(timer, struct bfq_data, bfqd 6266 block/bfq-iosched.c struct bfq_queue *bfqq = bfqd->in_service_queue; bfqd 6277 block/bfq-iosched.c bfq_idle_slice_timer_body(bfqd, bfqq); bfqd 6282 block/bfq-iosched.c static void __bfq_put_async_bfqq(struct bfq_data *bfqd, bfqd 6287 block/bfq-iosched.c bfq_log(bfqd, "put_async_bfqq: %p", bfqq); bfqd 6289 block/bfq-iosched.c bfq_bfqq_move(bfqd, bfqq, bfqd->root_group); bfqd 6291 block/bfq-iosched.c bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d", bfqd 6304 block/bfq-iosched.c void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg) bfqd 6310 block/bfq-iosched.c __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]); bfqd 6312 block/bfq-iosched.c __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq); bfqd 6319 block/bfq-iosched.c static unsigned int bfq_update_depths(struct bfq_data *bfqd, bfqd 6335 block/bfq-iosched.c bfqd->word_depths[0][0] = max((1U << bt->sb.shift) >> 1, 1U); bfqd 6341 block/bfq-iosched.c bfqd->word_depths[0][1] = max(((1U << bt->sb.shift) * 3) >> 2, 1U); bfqd 6351 block/bfq-iosched.c bfqd->word_depths[1][0] = max(((1U << bt->sb.shift) * 3) >> 4, 1U); bfqd 6353 block/bfq-iosched.c bfqd->word_depths[1][1] = max(((1U << bt->sb.shift) * 6) >> 4, 1U); bfqd 6357 block/bfq-iosched.c min_shallow = min(min_shallow, bfqd->word_depths[i][j]); bfqd 6364 block/bfq-iosched.c struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; bfqd 6368 block/bfq-iosched.c min_shallow = bfq_update_depths(bfqd, &tags->bitmap_tags); bfqd 6380 block/bfq-iosched.c struct bfq_data *bfqd = e->elevator_data; bfqd 6383 block/bfq-iosched.c hrtimer_cancel(&bfqd->idle_slice_timer); bfqd 6385 block/bfq-iosched.c spin_lock_irq(&bfqd->lock); bfqd 6386 block/bfq-iosched.c list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) bfqd 6387 block/bfq-iosched.c bfq_deactivate_bfqq(bfqd, bfqq, false, false); bfqd 6388 block/bfq-iosched.c spin_unlock_irq(&bfqd->lock); bfqd 6390 block/bfq-iosched.c hrtimer_cancel(&bfqd->idle_slice_timer); bfqd 6393 block/bfq-iosched.c bfqg_and_blkg_put(bfqd->root_group); bfqd 6396 block/bfq-iosched.c blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq); bfqd 6398 block/bfq-iosched.c spin_lock_irq(&bfqd->lock); bfqd 6399 block/bfq-iosched.c bfq_put_async_queues(bfqd, bfqd->root_group); bfqd 6400 block/bfq-iosched.c kfree(bfqd->root_group); bfqd 6401 block/bfq-iosched.c spin_unlock_irq(&bfqd->lock); bfqd 6404 block/bfq-iosched.c kfree(bfqd); bfqd 6408 block/bfq-iosched.c struct bfq_data *bfqd) bfqd 6415 block/bfq-iosched.c root_group->bfqd = bfqd; bfqd 6425 block/bfq-iosched.c struct bfq_data *bfqd; bfqd 6432 block/bfq-iosched.c bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node); bfqd 6433 block/bfq-iosched.c if (!bfqd) { bfqd 6437 block/bfq-iosched.c eq->elevator_data = bfqd; bfqd 6448 block/bfq-iosched.c bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0); bfqd 6449 block/bfq-iosched.c bfqd->oom_bfqq.ref++; bfqd 6450 block/bfq-iosched.c bfqd->oom_bfqq.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO; bfqd 6451 block/bfq-iosched.c bfqd->oom_bfqq.new_ioprio_class = IOPRIO_CLASS_BE; bfqd 6452 block/bfq-iosched.c bfqd->oom_bfqq.entity.new_weight = bfqd 6453 block/bfq-iosched.c bfq_ioprio_to_weight(bfqd->oom_bfqq.new_ioprio); bfqd 6456 block/bfq-iosched.c bfq_clear_bfqq_just_created(&bfqd->oom_bfqq); bfqd 6463 block/bfq-iosched.c bfqd->oom_bfqq.entity.prio_changed = 1; bfqd 6465 block/bfq-iosched.c bfqd->queue = q; bfqd 6467 block/bfq-iosched.c INIT_LIST_HEAD(&bfqd->dispatch); bfqd 6469 block/bfq-iosched.c hrtimer_init(&bfqd->idle_slice_timer, CLOCK_MONOTONIC, bfqd 6471 block/bfq-iosched.c bfqd->idle_slice_timer.function = bfq_idle_slice_timer; bfqd 6473 block/bfq-iosched.c bfqd->queue_weights_tree = RB_ROOT_CACHED; bfqd 6474 block/bfq-iosched.c bfqd->num_groups_with_pending_reqs = 0; bfqd 6476 block/bfq-iosched.c INIT_LIST_HEAD(&bfqd->active_list); bfqd 6477 block/bfq-iosched.c INIT_LIST_HEAD(&bfqd->idle_list); bfqd 6478 block/bfq-iosched.c INIT_HLIST_HEAD(&bfqd->burst_list); bfqd 6480 block/bfq-iosched.c bfqd->hw_tag = -1; bfqd 6481 block/bfq-iosched.c bfqd->nonrot_with_queueing = blk_queue_nonrot(bfqd->queue); bfqd 6483 block/bfq-iosched.c bfqd->bfq_max_budget = bfq_default_max_budget; bfqd 6485 block/bfq-iosched.c bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0]; bfqd 6486 block/bfq-iosched.c bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1]; bfqd 6487 block/bfq-iosched.c bfqd->bfq_back_max = bfq_back_max; bfqd 6488 block/bfq-iosched.c bfqd->bfq_back_penalty = bfq_back_penalty; bfqd 6489 block/bfq-iosched.c bfqd->bfq_slice_idle = bfq_slice_idle; bfqd 6490 block/bfq-iosched.c bfqd->bfq_timeout = bfq_timeout; bfqd 6492 block/bfq-iosched.c bfqd->bfq_requests_within_timer = 120; bfqd 6494 block/bfq-iosched.c bfqd->bfq_large_burst_thresh = 8; bfqd 6495 block/bfq-iosched.c bfqd->bfq_burst_interval = msecs_to_jiffies(180); bfqd 6497 block/bfq-iosched.c bfqd->low_latency = true; bfqd 6502 block/bfq-iosched.c bfqd->bfq_wr_coeff = 30; bfqd 6503 block/bfq-iosched.c bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300); bfqd 6504 block/bfq-iosched.c bfqd->bfq_wr_max_time = 0; bfqd 6505 block/bfq-iosched.c bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000); bfqd 6506 block/bfq-iosched.c bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500); bfqd 6507 block/bfq-iosched.c bfqd->bfq_wr_max_softrt_rate = 7000; /* bfqd 6513 block/bfq-iosched.c bfqd->wr_busy_queues = 0; bfqd 6519 block/bfq-iosched.c bfqd->rate_dur_prod = ref_rate[blk_queue_nonrot(bfqd->queue)] * bfqd 6520 block/bfq-iosched.c ref_wr_duration[blk_queue_nonrot(bfqd->queue)]; bfqd 6521 block/bfq-iosched.c bfqd->peak_rate = ref_rate[blk_queue_nonrot(bfqd->queue)] * 2 / 3; bfqd 6523 block/bfq-iosched.c spin_lock_init(&bfqd->lock); bfqd 6540 block/bfq-iosched.c bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node); bfqd 6541 block/bfq-iosched.c if (!bfqd->root_group) bfqd 6543 block/bfq-iosched.c bfq_init_root_group(bfqd->root_group, bfqd); bfqd 6544 block/bfq-iosched.c bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group); bfqd 6550 block/bfq-iosched.c kfree(bfqd); bfqd 6587 block/bfq-iosched.c struct bfq_data *bfqd = e->elevator_data; \ bfqd 6595 block/bfq-iosched.c SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 2); bfqd 6596 block/bfq-iosched.c SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 2); bfqd 6597 block/bfq-iosched.c SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0); bfqd 6598 block/bfq-iosched.c SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0); bfqd 6599 block/bfq-iosched.c SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 2); bfqd 6600 block/bfq-iosched.c SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0); bfqd 6601 block/bfq-iosched.c SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout, 1); bfqd 6602 block/bfq-iosched.c SHOW_FUNCTION(bfq_strict_guarantees_show, bfqd->strict_guarantees, 0); bfqd 6603 block/bfq-iosched.c SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0); bfqd 6609 block/bfq-iosched.c struct bfq_data *bfqd = e->elevator_data; \ bfqd 6614 block/bfq-iosched.c USEC_SHOW_FUNCTION(bfq_slice_idle_us_show, bfqd->bfq_slice_idle); bfqd 6621 block/bfq-iosched.c struct bfq_data *bfqd = e->elevator_data; \ bfqd 6640 block/bfq-iosched.c STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1, bfqd 6642 block/bfq-iosched.c STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1, bfqd 6644 block/bfq-iosched.c STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0); bfqd 6645 block/bfq-iosched.c STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1, bfqd 6647 block/bfq-iosched.c STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 2); bfqd 6653 block/bfq-iosched.c struct bfq_data *bfqd = e->elevator_data; \ bfqd 6667 block/bfq-iosched.c USEC_STORE_FUNCTION(bfq_slice_idle_us_store, &bfqd->bfq_slice_idle, 0, bfqd 6674 block/bfq-iosched.c struct bfq_data *bfqd = e->elevator_data; bfqd 6683 block/bfq-iosched.c bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd); bfqd 6687 block/bfq-iosched.c bfqd->bfq_max_budget = __data; bfqd 6690 block/bfq-iosched.c bfqd->bfq_user_max_budget = __data; bfqd 6702 block/bfq-iosched.c struct bfq_data *bfqd = e->elevator_data; bfqd 6715 block/bfq-iosched.c bfqd->bfq_timeout = msecs_to_jiffies(__data); bfqd 6716 block/bfq-iosched.c if (bfqd->bfq_user_max_budget == 0) bfqd 6717 block/bfq-iosched.c bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd); bfqd 6725 block/bfq-iosched.c struct bfq_data *bfqd = e->elevator_data; bfqd 6735 block/bfq-iosched.c if (!bfqd->strict_guarantees && __data == 1 bfqd 6736 block/bfq-iosched.c && bfqd->bfq_slice_idle < 8 * NSEC_PER_MSEC) bfqd 6737 block/bfq-iosched.c bfqd->bfq_slice_idle = 8 * NSEC_PER_MSEC; bfqd 6739 block/bfq-iosched.c bfqd->strict_guarantees = __data; bfqd 6747 block/bfq-iosched.c struct bfq_data *bfqd = e->elevator_data; bfqd 6757 block/bfq-iosched.c if (__data == 0 && bfqd->low_latency != 0) bfqd 6758 block/bfq-iosched.c bfq_end_wr(bfqd); bfqd 6759 block/bfq-iosched.c bfqd->low_latency = __data; bfqd 232 block/bfq-iosched.h struct bfq_data *bfqd; bfqd 903 block/bfq-iosched.h void *bfqd; bfqd 941 block/bfq-iosched.h void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq); bfqd 942 block/bfq-iosched.h void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq, bfqd 944 block/bfq-iosched.h void __bfq_weights_tree_remove(struct bfq_data *bfqd, bfqd 947 block/bfq-iosched.h void bfq_weights_tree_remove(struct bfq_data *bfqd, bfqd 949 block/bfq-iosched.h void bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq, bfqd 952 block/bfq-iosched.h void bfq_end_wr_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg); bfqd 953 block/bfq-iosched.h void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq); bfqd 954 block/bfq-iosched.h void bfq_schedule_dispatch(struct bfq_data *bfqd); bfqd 955 block/bfq-iosched.h void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg); bfqd 972 block/bfq-iosched.h void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, bfqd 977 block/bfq-iosched.h void bfq_end_wr_async(struct bfq_data *bfqd); bfqd 978 block/bfq-iosched.h struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, bfqd 982 block/bfq-iosched.h struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node); bfqd 1025 block/bfq-iosched.h unsigned int bfq_tot_busy_queues(struct bfq_data *bfqd); bfqd 1036 block/bfq-iosched.h void bfq_bfqq_charge_time(struct bfq_data *bfqd, struct bfq_queue *bfqq, bfqd 1040 block/bfq-iosched.h bool next_queue_may_preempt(struct bfq_data *bfqd); bfqd 1041 block/bfq-iosched.h struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd); bfqd 1042 block/bfq-iosched.h bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd); bfqd 1043 block/bfq-iosched.h void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, bfqd 1045 block/bfq-iosched.h void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq); bfqd 1046 block/bfq-iosched.h void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, bfqd 1048 block/bfq-iosched.h void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq, bfqd 1050 block/bfq-iosched.h void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq); bfqd 1066 block/bfq-iosched.h #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \ bfqd 1069 block/bfq-iosched.h blk_add_cgroup_trace_msg((bfqd)->queue, \ bfqd 1075 block/bfq-iosched.h #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \ bfqd 1076 block/bfq-iosched.h blk_add_cgroup_trace_msg((bfqd)->queue, \ bfqd 1082 block/bfq-iosched.h #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \ bfqd 1085 block/bfq-iosched.h blk_add_trace_msg((bfqd)->queue, "bfq%s%c " fmt, pid_str, \ bfqd 1089 block/bfq-iosched.h #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0) bfqd 1093 block/bfq-iosched.h #define bfq_log(bfqd, fmt, args...) \ bfqd 1094 block/bfq-iosched.h blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args) bfqd 38 block/bfq-wf2q.c unsigned int bfq_tot_busy_queues(struct bfq_data *bfqd) bfqd 40 block/bfq-wf2q.c return bfqd->busy_queues[0] + bfqd->busy_queues[1] + bfqd 41 block/bfq-wf2q.c bfqd->busy_queues[2]; bfqd 153 block/bfq-wf2q.c group_entity = &bfqq->bfqd->root_group->entity; bfqd 238 block/bfq-wf2q.c return bfqq->bfqd->root_group; bfqd 299 block/bfq-wf2q.c bfq_log_bfqq(bfqq->bfqd, bfqq, bfqd 302 block/bfq-wf2q.c bfq_log_bfqq(bfqq->bfqd, bfqq, bfqd 483 block/bfq-wf2q.c struct bfq_data *bfqd = NULL; bfqd 498 block/bfq-wf2q.c bfqd = (struct bfq_data *)bfqg->bfqd; bfqd 501 block/bfq-wf2q.c list_add(&bfqq->bfqq_list, &bfqq->bfqd->active_list); bfqd 503 block/bfq-wf2q.c if (bfqg != bfqd->root_group) bfqd 537 block/bfq-wf2q.c bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d", bfqd 587 block/bfq-wf2q.c struct bfq_data *bfqd = NULL; bfqd 599 block/bfq-wf2q.c bfqd = (struct bfq_data *)bfqg->bfqd; bfqd 604 block/bfq-wf2q.c if (bfqg != bfqd->root_group) bfqd 629 block/bfq-wf2q.c list_add(&bfqq->bfqq_list, &bfqq->bfqd->idle_list); bfqd 738 block/bfq-wf2q.c struct bfq_data *bfqd = NULL; bfqd 746 block/bfq-wf2q.c bfqd = bfqq->bfqd; bfqd 751 block/bfq-wf2q.c bfqd = (struct bfq_data *)bfqg->bfqd; bfqd 803 block/bfq-wf2q.c root = &bfqd->queue_weights_tree; bfqd 804 block/bfq-wf2q.c __bfq_weights_tree_remove(bfqd, bfqq, root); bfqd 813 block/bfq-wf2q.c bfq_weights_tree_add(bfqd, bfqq, root); bfqd 855 block/bfq-wf2q.c bfq_log_bfqq(bfqq->bfqd, bfqq, "bfqq_served %d secs", served); bfqd 885 block/bfq-wf2q.c void bfq_bfqq_charge_time(struct bfq_data *bfqd, struct bfq_queue *bfqq, bfqd 892 block/bfq-wf2q.c (bfqd->bfq_max_budget * bounded_time_ms) / timeout_ms; bfqd 1020 block/bfq-wf2q.c struct bfq_data *bfqd = bfqg->bfqd; bfqd 1024 block/bfq-wf2q.c bfqd->num_groups_with_pending_reqs++; bfqd 1507 block/bfq-wf2q.c bool next_queue_may_preempt(struct bfq_data *bfqd) bfqd 1509 block/bfq-wf2q.c struct bfq_sched_data *sd = &bfqd->root_group->sched_data; bfqd 1517 block/bfq-wf2q.c struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd) bfqd 1523 block/bfq-wf2q.c if (bfq_tot_busy_queues(bfqd) == 0) bfqd 1531 block/bfq-wf2q.c sd = &bfqd->root_group->sched_data; bfqd 1610 block/bfq-wf2q.c bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd) bfqd 1612 block/bfq-wf2q.c struct bfq_queue *in_serv_bfqq = bfqd->in_service_queue; bfqd 1617 block/bfq-wf2q.c hrtimer_try_to_cancel(&bfqd->idle_slice_timer); bfqd 1618 block/bfq-wf2q.c bfqd->in_service_queue = NULL; bfqd 1650 block/bfq-wf2q.c void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, bfqd 1658 block/bfq-wf2q.c void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) bfqd 1667 block/bfq-wf2q.c void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, bfqd 1673 block/bfq-wf2q.c bfqq == bfqd->in_service_queue, expiration); bfqd 1681 block/bfq-wf2q.c void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq, bfqd 1684 block/bfq-wf2q.c bfq_log_bfqq(bfqd, bfqq, "del from busy"); bfqd 1688 block/bfq-wf2q.c bfqd->busy_queues[bfqq->ioprio_class - 1]--; bfqd 1691 block/bfq-wf2q.c bfqd->wr_busy_queues--; bfqd 1695 block/bfq-wf2q.c bfq_deactivate_bfqq(bfqd, bfqq, true, expiration); bfqd 1698 block/bfq-wf2q.c bfq_weights_tree_remove(bfqd, bfqq); bfqd 1704 block/bfq-wf2q.c void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq) bfqd 1706 block/bfq-wf2q.c bfq_log_bfqq(bfqd, bfqq, "add to busy"); bfqd 1708 block/bfq-wf2q.c bfq_activate_bfqq(bfqd, bfqq); bfqd 1711 block/bfq-wf2q.c bfqd->busy_queues[bfqq->ioprio_class - 1]++; bfqd 1715 block/bfq-wf2q.c bfq_weights_tree_add(bfqd, bfqq, bfqd 1716 block/bfq-wf2q.c &bfqd->queue_weights_tree); bfqd 1719 block/bfq-wf2q.c bfqd->wr_busy_queues++;