Lines Matching refs:cfqd

108 	struct cfq_data *cfqd;  member
395 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
645 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) do { \ argument
649 blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c %s " fmt, (cfqq)->pid, \
655 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do { \ argument
659 blk_add_trace_msg((cfqd)->queue, "%s " fmt, __pbuf, ##args); \
764 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ argument
765 blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c " fmt, (cfqq)->pid, \
769 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0) argument
782 #define cfq_log(cfqd, fmt, args...) \ argument
783 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
795 static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd, in cfq_io_thinktime_big() argument
802 slice = cfqd->cfq_group_idle; in cfq_io_thinktime_big()
804 slice = cfqd->cfq_slice_idle; in cfq_io_thinktime_big()
808 static inline bool iops_mode(struct cfq_data *cfqd) in iops_mode() argument
817 if (!cfqd->cfq_slice_idle && cfqd->hw_tag) in iops_mode()
843 struct cfq_data *cfqd, in cfq_group_busy_queues_wl() argument
854 static inline int cfqg_busy_async_queues(struct cfq_data *cfqd, in cfqg_busy_async_queues() argument
862 static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
871 static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd, in cfq_cic_lookup() argument
875 return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue)); in cfq_cic_lookup()
908 static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) in cfq_schedule_dispatch() argument
910 if (cfqd->busy_queues) { in cfq_schedule_dispatch()
911 cfq_log(cfqd, "schedule dispatch"); in cfq_schedule_dispatch()
912 kblockd_schedule_work(&cfqd->unplug_work); in cfq_schedule_dispatch()
921 static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync, in cfq_prio_slice() argument
924 const int base_slice = cfqd->cfq_slice[sync]; in cfq_prio_slice()
932 cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_prio_to_slice() argument
934 return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio); in cfq_prio_to_slice()
995 static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd, in cfq_group_get_avg_queues() argument
1001 unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg); in cfq_group_get_avg_queues()
1011 cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg) in cfq_group_slice() argument
1013 return cfqd->cfq_target_latency * cfqg->vfraction >> CFQ_SERVICE_SHIFT; in cfq_group_slice()
1017 cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_scaled_cfqq_slice() argument
1019 unsigned slice = cfq_prio_to_slice(cfqd, cfqq); in cfq_scaled_cfqq_slice()
1020 if (cfqd->cfq_latency) { in cfq_scaled_cfqq_slice()
1025 unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg, in cfq_scaled_cfqq_slice()
1027 unsigned sync_slice = cfqd->cfq_slice[1]; in cfq_scaled_cfqq_slice()
1029 unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg); in cfq_scaled_cfqq_slice()
1032 unsigned base_low_slice = 2 * cfqd->cfq_slice_idle; in cfq_scaled_cfqq_slice()
1047 cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_set_prio_slice() argument
1049 unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq); in cfq_set_prio_slice()
1054 cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies); in cfq_set_prio_slice()
1078 cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last) in cfq_choose_req() argument
1103 back_max = cfqd->cfq_back_max * 2; in cfq_choose_req()
1113 d1 = (last - s1) * cfqd->cfq_back_penalty; in cfq_choose_req()
1120 d2 = (last - s2) * cfqd->cfq_back_penalty; in cfq_choose_req()
1209 cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq, in cfq_find_next_rq() argument
1229 return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last)); in cfq_find_next_rq()
1232 static unsigned long cfq_slice_offset(struct cfq_data *cfqd, in cfq_slice_offset() argument
1238 return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) - in cfq_slice_offset()
1239 cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio)); in cfq_slice_offset()
1351 cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg) in cfq_group_notify_queue_add() argument
1353 struct cfq_rb_root *st = &cfqd->grp_service_tree; in cfq_group_notify_queue_add()
1409 cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg) in cfq_group_notify_queue_del() argument
1411 struct cfq_rb_root *st = &cfqd->grp_service_tree; in cfq_group_notify_queue_del()
1420 cfq_log_cfqg(cfqd, cfqg, "del_from_rr group"); in cfq_group_notify_queue_del()
1458 static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg, in cfq_group_served() argument
1461 struct cfq_rb_root *st = &cfqd->grp_service_tree; in cfq_group_served()
1463 int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg) in cfq_group_served()
1470 if (iops_mode(cfqd)) in cfq_group_served()
1487 if (time_after(cfqd->workload_expires, jiffies)) { in cfq_group_served()
1488 cfqg->saved_wl_slice = cfqd->workload_expires in cfq_group_served()
1490 cfqg->saved_wl_type = cfqd->serving_wl_type; in cfq_group_served()
1491 cfqg->saved_wl_class = cfqd->serving_wl_class; in cfq_group_served()
1495 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, in cfq_group_served()
1497 cfq_log_cfqq(cfqq->cfqd, cfqq, in cfq_group_served()
1500 iops_mode(cfqd), cfqq->nr_sectors); in cfq_group_served()
1677 static struct cfq_group *cfq_lookup_cfqg(struct cfq_data *cfqd, in cfq_lookup_cfqg() argument
1682 blkg = blkg_lookup(blkcg, cfqd->queue); in cfq_lookup_cfqg()
2181 static struct cfq_group *cfq_lookup_cfqg(struct cfq_data *cfqd, in cfq_lookup_cfqg() argument
2184 return cfqd->root_group; in cfq_lookup_cfqg()
2199 static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, in cfq_service_tree_add() argument
2225 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies; in cfq_service_tree_add()
2274 cfq_group_notify_queue_add(cfqd, cfqq->cfqg); in cfq_service_tree_add()
2278 cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root, in cfq_prio_tree_lookup() argument
2313 static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_prio_tree_add() argument
2328 cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio]; in cfq_prio_tree_add()
2329 __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root, in cfq_prio_tree_add()
2341 static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_resort_rr_list() argument
2347 cfq_service_tree_add(cfqd, cfqq, 0); in cfq_resort_rr_list()
2348 cfq_prio_tree_add(cfqd, cfqq); in cfq_resort_rr_list()
2356 static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_add_cfqq_rr() argument
2358 cfq_log_cfqq(cfqd, cfqq, "add_to_rr"); in cfq_add_cfqq_rr()
2361 cfqd->busy_queues++; in cfq_add_cfqq_rr()
2363 cfqd->busy_sync_queues++; in cfq_add_cfqq_rr()
2365 cfq_resort_rr_list(cfqd, cfqq); in cfq_add_cfqq_rr()
2372 static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_del_cfqq_rr() argument
2374 cfq_log_cfqq(cfqd, cfqq, "del_from_rr"); in cfq_del_cfqq_rr()
2387 cfq_group_notify_queue_del(cfqd, cfqq->cfqg); in cfq_del_cfqq_rr()
2388 BUG_ON(!cfqd->busy_queues); in cfq_del_cfqq_rr()
2389 cfqd->busy_queues--; in cfq_del_cfqq_rr()
2391 cfqd->busy_sync_queues--; in cfq_del_cfqq_rr()
2423 struct cfq_data *cfqd = cfqq->cfqd; in cfq_add_rq_rb() local
2431 cfq_add_cfqq_rr(cfqd, cfqq); in cfq_add_rq_rb()
2437 cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position); in cfq_add_rq_rb()
2443 cfq_prio_tree_add(cfqd, cfqq); in cfq_add_rq_rb()
2454 cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group, in cfq_reposition_rq_rb()
2459 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) in cfq_find_rq_fmerge() argument
2465 cic = cfq_cic_lookup(cfqd, tsk->io_context); in cfq_find_rq_fmerge()
2478 struct cfq_data *cfqd = q->elevator->elevator_data; in cfq_activate_request() local
2480 cfqd->rq_in_driver++; in cfq_activate_request()
2481 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d", in cfq_activate_request()
2482 cfqd->rq_in_driver); in cfq_activate_request()
2484 cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); in cfq_activate_request()
2489 struct cfq_data *cfqd = q->elevator->elevator_data; in cfq_deactivate_request() local
2491 WARN_ON(!cfqd->rq_in_driver); in cfq_deactivate_request()
2492 cfqd->rq_in_driver--; in cfq_deactivate_request()
2493 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d", in cfq_deactivate_request()
2494 cfqd->rq_in_driver); in cfq_deactivate_request()
2502 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq); in cfq_remove_request()
2507 cfqq->cfqd->rq_queued--; in cfq_remove_request()
2518 struct cfq_data *cfqd = q->elevator->elevator_data; in cfq_merge() local
2521 __rq = cfq_find_rq_fmerge(cfqd, bio); in cfq_merge()
2551 struct cfq_data *cfqd = q->elevator->elevator_data; in cfq_merged_requests() local
2575 cfqq != cfqd->active_queue) in cfq_merged_requests()
2576 cfq_del_cfqq_rr(cfqd, cfqq); in cfq_merged_requests()
2582 struct cfq_data *cfqd = q->elevator->elevator_data; in cfq_allow_merge() local
2596 cic = cfq_cic_lookup(cfqd, current->io_context); in cfq_allow_merge()
2604 static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_del_timer() argument
2606 del_timer(&cfqd->idle_slice_timer); in cfq_del_timer()
2610 static void __cfq_set_active_queue(struct cfq_data *cfqd, in __cfq_set_active_queue() argument
2614 cfq_log_cfqq(cfqd, cfqq, "set_active wl_class:%d wl_type:%d", in __cfq_set_active_queue()
2615 cfqd->serving_wl_class, cfqd->serving_wl_type); in __cfq_set_active_queue()
2630 cfq_del_timer(cfqd, cfqq); in __cfq_set_active_queue()
2633 cfqd->active_queue = cfqq; in __cfq_set_active_queue()
2640 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, in __cfq_slice_expired() argument
2643 cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out); in __cfq_slice_expired()
2646 cfq_del_timer(cfqd, cfqq); in __cfq_slice_expired()
2665 cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq); in __cfq_slice_expired()
2668 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid); in __cfq_slice_expired()
2671 cfq_group_served(cfqd, cfqq->cfqg, cfqq); in __cfq_slice_expired()
2674 cfq_del_cfqq_rr(cfqd, cfqq); in __cfq_slice_expired()
2676 cfq_resort_rr_list(cfqd, cfqq); in __cfq_slice_expired()
2678 if (cfqq == cfqd->active_queue) in __cfq_slice_expired()
2679 cfqd->active_queue = NULL; in __cfq_slice_expired()
2681 if (cfqd->active_cic) { in __cfq_slice_expired()
2682 put_io_context(cfqd->active_cic->icq.ioc); in __cfq_slice_expired()
2683 cfqd->active_cic = NULL; in __cfq_slice_expired()
2687 static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out) in cfq_slice_expired() argument
2689 struct cfq_queue *cfqq = cfqd->active_queue; in cfq_slice_expired()
2692 __cfq_slice_expired(cfqd, cfqq, timed_out); in cfq_slice_expired()
2699 static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) in cfq_get_next_queue() argument
2701 struct cfq_rb_root *st = st_for(cfqd->serving_group, in cfq_get_next_queue()
2702 cfqd->serving_wl_class, cfqd->serving_wl_type); in cfq_get_next_queue()
2704 if (!cfqd->rq_queued) in cfq_get_next_queue()
2715 static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd) in cfq_get_next_queue_forced() argument
2722 if (!cfqd->rq_queued) in cfq_get_next_queue_forced()
2725 cfqg = cfq_get_next_cfqg(cfqd); in cfq_get_next_queue_forced()
2738 static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd, in cfq_set_active_queue() argument
2742 cfqq = cfq_get_next_queue(cfqd); in cfq_set_active_queue()
2744 __cfq_set_active_queue(cfqd, cfqq); in cfq_set_active_queue()
2748 static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd, in cfq_dist_from_last() argument
2751 if (blk_rq_pos(rq) >= cfqd->last_position) in cfq_dist_from_last()
2752 return blk_rq_pos(rq) - cfqd->last_position; in cfq_dist_from_last()
2754 return cfqd->last_position - blk_rq_pos(rq); in cfq_dist_from_last()
2757 static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq, in cfq_rq_close() argument
2760 return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR; in cfq_rq_close()
2763 static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, in cfqq_close() argument
2766 struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio]; in cfqq_close()
2769 sector_t sector = cfqd->last_position; in cfqq_close()
2778 __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL); in cfqq_close()
2787 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq)) in cfqq_close()
2798 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq)) in cfqq_close()
2814 static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd, in cfq_close_cooperator() argument
2837 cfqq = cfqq_close(cfqd, cur_cfqq); in cfq_close_cooperator()
2866 static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_should_idle() argument
2874 if (!cfqd->cfq_slice_idle) in cfq_should_idle()
2883 !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)) in cfq_should_idle()
2891 !cfq_io_thinktime_big(cfqd, &st->ttime, false)) in cfq_should_idle()
2893 cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", st->count); in cfq_should_idle()
2897 static void cfq_arm_slice_timer(struct cfq_data *cfqd) in cfq_arm_slice_timer() argument
2899 struct cfq_queue *cfqq = cfqd->active_queue; in cfq_arm_slice_timer()
2908 if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag) in cfq_arm_slice_timer()
2917 if (!cfq_should_idle(cfqd, cfqq)) { in cfq_arm_slice_timer()
2919 if (cfqd->cfq_group_idle) in cfq_arm_slice_timer()
2920 group_idle = cfqd->cfq_group_idle; in cfq_arm_slice_timer()
2934 cic = cfqd->active_cic; in cfq_arm_slice_timer()
2945 cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu", in cfq_arm_slice_timer()
2957 sl = cfqd->cfq_group_idle; in cfq_arm_slice_timer()
2959 sl = cfqd->cfq_slice_idle; in cfq_arm_slice_timer()
2961 mod_timer(&cfqd->idle_slice_timer, jiffies + sl); in cfq_arm_slice_timer()
2963 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl, in cfq_arm_slice_timer()
2972 struct cfq_data *cfqd = q->elevator->elevator_data; in cfq_dispatch_insert() local
2975 cfq_log_cfqq(cfqd, cfqq, "dispatch_insert"); in cfq_dispatch_insert()
2977 cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq); in cfq_dispatch_insert()
2983 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; in cfq_dispatch_insert()
3006 cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq); in cfq_check_fifo()
3011 cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_prio_to_maxrq() argument
3013 const int base_rq = cfqd->cfq_slice_async_rq; in cfq_prio_to_maxrq()
3075 static enum wl_type_t cfq_choose_wl_type(struct cfq_data *cfqd, in cfq_choose_wl_type() argument
3099 choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg) in choose_wl_class_and_type() argument
3105 enum wl_class_t original_class = cfqd->serving_wl_class; in choose_wl_class_and_type()
3108 if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg)) in choose_wl_class_and_type()
3109 cfqd->serving_wl_class = RT_WORKLOAD; in choose_wl_class_and_type()
3110 else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg)) in choose_wl_class_and_type()
3111 cfqd->serving_wl_class = BE_WORKLOAD; in choose_wl_class_and_type()
3113 cfqd->serving_wl_class = IDLE_WORKLOAD; in choose_wl_class_and_type()
3114 cfqd->workload_expires = jiffies + 1; in choose_wl_class_and_type()
3118 if (original_class != cfqd->serving_wl_class) in choose_wl_class_and_type()
3126 st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type); in choose_wl_class_and_type()
3132 if (count && !time_after(jiffies, cfqd->workload_expires)) in choose_wl_class_and_type()
3137 cfqd->serving_wl_type = cfq_choose_wl_type(cfqd, cfqg, in choose_wl_class_and_type()
3138 cfqd->serving_wl_class); in choose_wl_class_and_type()
3139 st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type); in choose_wl_class_and_type()
3147 group_slice = cfq_group_slice(cfqd, cfqg); in choose_wl_class_and_type()
3150 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_wl_class], in choose_wl_class_and_type()
3151 cfq_group_busy_queues_wl(cfqd->serving_wl_class, cfqd, in choose_wl_class_and_type()
3154 if (cfqd->serving_wl_type == ASYNC_WORKLOAD) { in choose_wl_class_and_type()
3164 tmp = cfqd->cfq_target_latency * in choose_wl_class_and_type()
3165 cfqg_busy_async_queues(cfqd, cfqg); in choose_wl_class_and_type()
3166 tmp = tmp/cfqd->busy_queues; in choose_wl_class_and_type()
3171 slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1]; in choose_wl_class_and_type()
3174 slice = max(slice, 2 * cfqd->cfq_slice_idle); in choose_wl_class_and_type()
3177 cfq_log(cfqd, "workload slice:%d", slice); in choose_wl_class_and_type()
3178 cfqd->workload_expires = jiffies + slice; in choose_wl_class_and_type()
3181 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd) in cfq_get_next_cfqg() argument
3183 struct cfq_rb_root *st = &cfqd->grp_service_tree; in cfq_get_next_cfqg()
3193 static void cfq_choose_cfqg(struct cfq_data *cfqd) in cfq_choose_cfqg() argument
3195 struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd); in cfq_choose_cfqg()
3197 cfqd->serving_group = cfqg; in cfq_choose_cfqg()
3201 cfqd->workload_expires = jiffies + cfqg->saved_wl_slice; in cfq_choose_cfqg()
3202 cfqd->serving_wl_type = cfqg->saved_wl_type; in cfq_choose_cfqg()
3203 cfqd->serving_wl_class = cfqg->saved_wl_class; in cfq_choose_cfqg()
3205 cfqd->workload_expires = jiffies - 1; in cfq_choose_cfqg()
3207 choose_wl_class_and_type(cfqd, cfqg); in cfq_choose_cfqg()
3214 static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) in cfq_select_queue() argument
3218 cfqq = cfqd->active_queue; in cfq_select_queue()
3222 if (!cfqd->rq_queued) in cfq_select_queue()
3245 && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) { in cfq_select_queue()
3265 new_cfqq = cfq_close_cooperator(cfqd, cfqq); in cfq_select_queue()
3277 if (timer_pending(&cfqd->idle_slice_timer)) { in cfq_select_queue()
3293 if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) { in cfq_select_queue()
3303 if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 && in cfq_select_queue()
3305 !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) { in cfq_select_queue()
3311 cfq_slice_expired(cfqd, 0); in cfq_select_queue()
3318 cfq_choose_cfqg(cfqd); in cfq_select_queue()
3320 cfqq = cfq_set_active_queue(cfqd, new_cfqq); in cfq_select_queue()
3330 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq); in __cfq_forced_dispatch_cfqq()
3337 __cfq_slice_expired(cfqq->cfqd, cfqq, 0); in __cfq_forced_dispatch_cfqq()
3345 static int cfq_forced_dispatch(struct cfq_data *cfqd) in cfq_forced_dispatch() argument
3351 cfq_slice_expired(cfqd, 0); in cfq_forced_dispatch()
3352 while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) { in cfq_forced_dispatch()
3353 __cfq_set_active_queue(cfqd, cfqq); in cfq_forced_dispatch()
3357 BUG_ON(cfqd->busy_queues); in cfq_forced_dispatch()
3359 cfq_log(cfqd, "forced_dispatch=%d", dispatched); in cfq_forced_dispatch()
3363 static inline bool cfq_slice_used_soon(struct cfq_data *cfqd, in cfq_slice_used_soon() argument
3369 if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched, in cfq_slice_used_soon()
3376 static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_may_dispatch() argument
3383 if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC]) in cfq_may_dispatch()
3389 if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq)) in cfq_may_dispatch()
3392 max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1); in cfq_may_dispatch()
3414 if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1) in cfq_may_dispatch()
3420 if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) && in cfq_may_dispatch()
3427 if (cfqd->busy_queues == 1 || promote_sync) in cfq_may_dispatch()
3436 max_dispatch = cfqd->cfq_quantum; in cfq_may_dispatch()
3444 if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) { in cfq_may_dispatch()
3445 unsigned long last_sync = jiffies - cfqd->last_delayed_sync; in cfq_may_dispatch()
3448 depth = last_sync / cfqd->cfq_slice[1]; in cfq_may_dispatch()
3465 static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_dispatch_request() argument
3471 if (!cfq_may_dispatch(cfqd, cfqq)) in cfq_dispatch_request()
3484 cfq_dispatch_insert(cfqd->queue, rq); in cfq_dispatch_request()
3486 if (!cfqd->active_cic) { in cfq_dispatch_request()
3490 cfqd->active_cic = cic; in cfq_dispatch_request()
3502 struct cfq_data *cfqd = q->elevator->elevator_data; in cfq_dispatch_requests() local
3505 if (!cfqd->busy_queues) in cfq_dispatch_requests()
3509 return cfq_forced_dispatch(cfqd); in cfq_dispatch_requests()
3511 cfqq = cfq_select_queue(cfqd); in cfq_dispatch_requests()
3518 if (!cfq_dispatch_request(cfqd, cfqq)) in cfq_dispatch_requests()
3528 if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) && in cfq_dispatch_requests()
3529 cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) || in cfq_dispatch_requests()
3532 cfq_slice_expired(cfqd, 0); in cfq_dispatch_requests()
3535 cfq_log_cfqq(cfqd, cfqq, "dispatched a request"); in cfq_dispatch_requests()
3548 struct cfq_data *cfqd = cfqq->cfqd; in cfq_put_queue() local
3557 cfq_log_cfqq(cfqd, cfqq, "put_queue"); in cfq_put_queue()
3562 if (unlikely(cfqd->active_queue == cfqq)) { in cfq_put_queue()
3563 __cfq_slice_expired(cfqd, cfqq, 0); in cfq_put_queue()
3564 cfq_schedule_dispatch(cfqd); in cfq_put_queue()
3593 static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_exit_cfqq() argument
3595 if (unlikely(cfqq == cfqd->active_queue)) { in cfq_exit_cfqq()
3596 __cfq_slice_expired(cfqd, cfqq, 0); in cfq_exit_cfqq()
3597 cfq_schedule_dispatch(cfqd); in cfq_exit_cfqq()
3615 struct cfq_data *cfqd = cic_to_cfqd(cic); in cfq_exit_icq() local
3618 cfq_exit_cfqq(cfqd, cic_to_cfqq(cic, false)); in cfq_exit_icq()
3623 cfq_exit_cfqq(cfqd, cic_to_cfqq(cic, true)); in cfq_exit_icq()
3673 struct cfq_data *cfqd = cic_to_cfqd(cic); in check_ioprio_changed() local
3680 if (unlikely(!cfqd) || likely(cic->ioprio == ioprio)) in check_ioprio_changed()
3686 cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio); in check_ioprio_changed()
3697 static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, in cfq_init_cfqq() argument
3705 cfqq->cfqd = cfqd; in cfq_init_cfqq()
3720 struct cfq_data *cfqd = cic_to_cfqd(cic); in check_blkcg_changed() local
3732 if (unlikely(!cfqd) || likely(cic->blkcg_serial_nr == serial_nr)) in check_blkcg_changed()
3741 cfq_log_cfqq(cfqd, cfqq, "changed cgroup"); in check_blkcg_changed()
3748 cfq_log_cfqq(cfqd, cfqq, "changed cgroup"); in check_blkcg_changed()
3778 cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic, in cfq_get_queue() argument
3788 cfqg = cfq_lookup_cfqg(cfqd, bio_blkcg(bio)); in cfq_get_queue()
3790 cfqq = &cfqd->oom_cfqq; in cfq_get_queue()
3807 cfqd->queue->node); in cfq_get_queue()
3809 cfqq = &cfqd->oom_cfqq; in cfq_get_queue()
3813 cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync); in cfq_get_queue()
3816 cfq_log_cfqq(cfqd, cfqq, "alloced"); in cfq_get_queue()
3841 cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq, in cfq_update_io_thinktime() argument
3845 __cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle); in cfq_update_io_thinktime()
3847 cfqd->cfq_slice_idle); in cfq_update_io_thinktime()
3850 __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle); in cfq_update_io_thinktime()
3855 cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq, in cfq_update_io_seektime() argument
3868 if (blk_queue_nonrot(cfqd->queue)) in cfq_update_io_seektime()
3879 cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, in cfq_update_idle_window() argument
3898 !cfqd->cfq_slice_idle || in cfq_update_idle_window()
3902 if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle) in cfq_update_idle_window()
3909 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle); in cfq_update_idle_window()
3922 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, in cfq_should_preempt() argument
3927 cfqq = cfqd->active_queue; in cfq_should_preempt()
3957 if (cfqd->serving_wl_type == SYNC_NOIDLE_WORKLOAD && in cfq_should_preempt()
3977 if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq)) in cfq_should_preempt()
3980 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq)) in cfq_should_preempt()
3987 if (cfq_rq_close(cfqd, cfqq, rq)) in cfq_should_preempt()
3997 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_preempt_queue() argument
3999 enum wl_type_t old_type = cfqq_type(cfqd->active_queue); in cfq_preempt_queue()
4001 cfq_log_cfqq(cfqd, cfqq, "preempt"); in cfq_preempt_queue()
4002 cfq_slice_expired(cfqd, 1); in cfq_preempt_queue()
4017 cfq_service_tree_add(cfqd, cfqq, 1); in cfq_preempt_queue()
4028 cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, in cfq_rq_enqueued() argument
4033 cfqd->rq_queued++; in cfq_rq_enqueued()
4037 cfq_update_io_thinktime(cfqd, cfqq, cic); in cfq_rq_enqueued()
4038 cfq_update_io_seektime(cfqd, cfqq, rq); in cfq_rq_enqueued()
4039 cfq_update_idle_window(cfqd, cfqq, cic); in cfq_rq_enqueued()
4043 if (cfqq == cfqd->active_queue) { in cfq_rq_enqueued()
4056 cfqd->busy_queues > 1) { in cfq_rq_enqueued()
4057 cfq_del_timer(cfqd, cfqq); in cfq_rq_enqueued()
4059 __blk_run_queue(cfqd->queue); in cfq_rq_enqueued()
4065 } else if (cfq_should_preempt(cfqd, cfqq, rq)) { in cfq_rq_enqueued()
4072 cfq_preempt_queue(cfqd, cfqq); in cfq_rq_enqueued()
4073 __blk_run_queue(cfqd->queue); in cfq_rq_enqueued()
4079 struct cfq_data *cfqd = q->elevator->elevator_data; in cfq_insert_request() local
4082 cfq_log_cfqq(cfqd, cfqq, "insert_request"); in cfq_insert_request()
4085 rq->fifo_time = jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]; in cfq_insert_request()
4088 cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group, in cfq_insert_request()
4090 cfq_rq_enqueued(cfqd, cfqq, rq); in cfq_insert_request()
4097 static void cfq_update_hw_tag(struct cfq_data *cfqd) in cfq_update_hw_tag() argument
4099 struct cfq_queue *cfqq = cfqd->active_queue; in cfq_update_hw_tag()
4101 if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth) in cfq_update_hw_tag()
4102 cfqd->hw_tag_est_depth = cfqd->rq_in_driver; in cfq_update_hw_tag()
4104 if (cfqd->hw_tag == 1) in cfq_update_hw_tag()
4107 if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN && in cfq_update_hw_tag()
4108 cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN) in cfq_update_hw_tag()
4118 CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN) in cfq_update_hw_tag()
4121 if (cfqd->hw_tag_samples++ < 50) in cfq_update_hw_tag()
4124 if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN) in cfq_update_hw_tag()
4125 cfqd->hw_tag = 1; in cfq_update_hw_tag()
4127 cfqd->hw_tag = 0; in cfq_update_hw_tag()
4130 static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_should_wait_busy() argument
4132 struct cfq_io_cq *cic = cfqd->active_cic; in cfq_should_wait_busy()
4143 if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) in cfq_should_wait_busy()
4170 struct cfq_data *cfqd = cfqq->cfqd; in cfq_completed_request() local
4175 cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", in cfq_completed_request()
4178 cfq_update_hw_tag(cfqd); in cfq_completed_request()
4180 WARN_ON(!cfqd->rq_in_driver); in cfq_completed_request()
4182 cfqd->rq_in_driver--; in cfq_completed_request()
4188 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--; in cfq_completed_request()
4202 if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now)) in cfq_completed_request()
4203 cfqd->last_delayed_sync = now; in cfq_completed_request()
4214 if (cfqd->active_queue == cfqq) { in cfq_completed_request()
4218 cfq_set_prio_slice(cfqd, cfqq); in cfq_completed_request()
4226 if (cfq_should_wait_busy(cfqd, cfqq)) { in cfq_completed_request()
4227 unsigned long extend_sl = cfqd->cfq_slice_idle; in cfq_completed_request()
4228 if (!cfqd->cfq_slice_idle) in cfq_completed_request()
4229 extend_sl = cfqd->cfq_group_idle; in cfq_completed_request()
4232 cfq_log_cfqq(cfqd, cfqq, "will busy wait"); in cfq_completed_request()
4244 cfq_slice_expired(cfqd, 1); in cfq_completed_request()
4246 !cfq_close_cooperator(cfqd, cfqq)) { in cfq_completed_request()
4247 cfq_arm_slice_timer(cfqd); in cfq_completed_request()
4251 if (!cfqd->rq_in_driver) in cfq_completed_request()
4252 cfq_schedule_dispatch(cfqd); in cfq_completed_request()
4267 struct cfq_data *cfqd = q->elevator->elevator_data; in cfq_may_queue() local
4278 cic = cfq_cic_lookup(cfqd, tsk->io_context); in cfq_may_queue()
4315 cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic, in cfq_merge_cfqqs() argument
4318 cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq); in cfq_merge_cfqqs()
4353 struct cfq_data *cfqd = q->elevator->elevator_data; in cfq_set_request() local
4365 if (!cfqq || cfqq == &cfqd->oom_cfqq) { in cfq_set_request()
4368 cfqq = cfq_get_queue(cfqd, is_sync, cic, bio); in cfq_set_request()
4375 cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq"); in cfq_set_request()
4388 cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq); in cfq_set_request()
4403 struct cfq_data *cfqd = in cfq_kick_queue() local
4405 struct request_queue *q = cfqd->queue; in cfq_kick_queue()
4408 __blk_run_queue(cfqd->queue); in cfq_kick_queue()
4417 struct cfq_data *cfqd = (struct cfq_data *) data; in cfq_idle_slice_timer() local
4422 cfq_log(cfqd, "idle timer fired"); in cfq_idle_slice_timer()
4424 spin_lock_irqsave(cfqd->queue->queue_lock, flags); in cfq_idle_slice_timer()
4426 cfqq = cfqd->active_queue; in cfq_idle_slice_timer()
4446 if (!cfqd->busy_queues) in cfq_idle_slice_timer()
4461 cfq_slice_expired(cfqd, timed_out); in cfq_idle_slice_timer()
4463 cfq_schedule_dispatch(cfqd); in cfq_idle_slice_timer()
4465 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); in cfq_idle_slice_timer()
4468 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) in cfq_shutdown_timer_wq() argument
4470 del_timer_sync(&cfqd->idle_slice_timer); in cfq_shutdown_timer_wq()
4471 cancel_work_sync(&cfqd->unplug_work); in cfq_shutdown_timer_wq()
4476 struct cfq_data *cfqd = e->elevator_data; in cfq_exit_queue() local
4477 struct request_queue *q = cfqd->queue; in cfq_exit_queue()
4479 cfq_shutdown_timer_wq(cfqd); in cfq_exit_queue()
4483 if (cfqd->active_queue) in cfq_exit_queue()
4484 __cfq_slice_expired(cfqd, cfqd->active_queue, 0); in cfq_exit_queue()
4488 cfq_shutdown_timer_wq(cfqd); in cfq_exit_queue()
4493 kfree(cfqd->root_group); in cfq_exit_queue()
4495 kfree(cfqd); in cfq_exit_queue()
4500 struct cfq_data *cfqd; in cfq_init_queue() local
4509 cfqd = kzalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node); in cfq_init_queue()
4510 if (!cfqd) { in cfq_init_queue()
4514 eq->elevator_data = cfqd; in cfq_init_queue()
4516 cfqd->queue = q; in cfq_init_queue()
4522 cfqd->grp_service_tree = CFQ_RB_ROOT; in cfq_init_queue()
4530 cfqd->root_group = blkg_to_cfqg(q->root_blkg); in cfq_init_queue()
4533 cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group), in cfq_init_queue()
4534 GFP_KERNEL, cfqd->queue->node); in cfq_init_queue()
4535 if (!cfqd->root_group) in cfq_init_queue()
4538 cfq_init_cfqg_base(cfqd->root_group); in cfq_init_queue()
4539 cfqd->root_group->weight = 2 * CFQ_WEIGHT_LEGACY_DFL; in cfq_init_queue()
4540 cfqd->root_group->leaf_weight = 2 * CFQ_WEIGHT_LEGACY_DFL; in cfq_init_queue()
4549 cfqd->prio_trees[i] = RB_ROOT; in cfq_init_queue()
4558 cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0); in cfq_init_queue()
4559 cfqd->oom_cfqq.ref++; in cfq_init_queue()
4562 cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group); in cfq_init_queue()
4563 cfqg_put(cfqd->root_group); in cfq_init_queue()
4566 init_timer(&cfqd->idle_slice_timer); in cfq_init_queue()
4567 cfqd->idle_slice_timer.function = cfq_idle_slice_timer; in cfq_init_queue()
4568 cfqd->idle_slice_timer.data = (unsigned long) cfqd; in cfq_init_queue()
4570 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); in cfq_init_queue()
4572 cfqd->cfq_quantum = cfq_quantum; in cfq_init_queue()
4573 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; in cfq_init_queue()
4574 cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1]; in cfq_init_queue()
4575 cfqd->cfq_back_max = cfq_back_max; in cfq_init_queue()
4576 cfqd->cfq_back_penalty = cfq_back_penalty; in cfq_init_queue()
4577 cfqd->cfq_slice[0] = cfq_slice_async; in cfq_init_queue()
4578 cfqd->cfq_slice[1] = cfq_slice_sync; in cfq_init_queue()
4579 cfqd->cfq_target_latency = cfq_target_latency; in cfq_init_queue()
4580 cfqd->cfq_slice_async_rq = cfq_slice_async_rq; in cfq_init_queue()
4581 cfqd->cfq_slice_idle = cfq_slice_idle; in cfq_init_queue()
4582 cfqd->cfq_group_idle = cfq_group_idle; in cfq_init_queue()
4583 cfqd->cfq_latency = 1; in cfq_init_queue()
4584 cfqd->hw_tag = -1; in cfq_init_queue()
4589 cfqd->last_delayed_sync = jiffies - HZ; in cfq_init_queue()
4593 kfree(cfqd); in cfq_init_queue()
4601 struct cfq_data *cfqd = e->elevator_data; in cfq_registered_queue() local
4607 cfqd->cfq_slice_idle = 0; in cfq_registered_queue()
4631 struct cfq_data *cfqd = e->elevator_data; \
4637 SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
4638 SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
4639 SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
4640 SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
4641 SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
4642 SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
4643 SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
4644 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
4645 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
4646 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
4647 SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
4648 SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1);
4654 struct cfq_data *cfqd = e->elevator_data; \
4667 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
4668 STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
4670 STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
4672 STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
4673 STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
4675 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
4676 STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
4677 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
4678 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
4679 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
4681 STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
4682 STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1);