Lines Matching refs:cfqd
103 struct cfq_data *cfqd; member
389 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
627 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) do { \ argument
631 blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c %s " fmt, (cfqq)->pid, \
637 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do { \ argument
641 blk_add_trace_msg((cfqd)->queue, "%s " fmt, __pbuf, ##args); \
760 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ argument
761 blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c " fmt, (cfqq)->pid, \
765 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0) argument
780 #define cfq_log(cfqd, fmt, args...) \ argument
781 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
793 static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd, in cfq_io_thinktime_big() argument
800 slice = cfqd->cfq_group_idle; in cfq_io_thinktime_big()
802 slice = cfqd->cfq_slice_idle; in cfq_io_thinktime_big()
806 static inline bool iops_mode(struct cfq_data *cfqd) in iops_mode() argument
815 if (!cfqd->cfq_slice_idle && cfqd->hw_tag) in iops_mode()
841 struct cfq_data *cfqd, in cfq_group_busy_queues_wl() argument
852 static inline int cfqg_busy_async_queues(struct cfq_data *cfqd, in cfqg_busy_async_queues() argument
860 static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
870 static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd, in cfq_cic_lookup() argument
874 return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue)); in cfq_cic_lookup()
907 static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) in cfq_schedule_dispatch() argument
909 if (cfqd->busy_queues) { in cfq_schedule_dispatch()
910 cfq_log(cfqd, "schedule dispatch"); in cfq_schedule_dispatch()
911 kblockd_schedule_work(&cfqd->unplug_work); in cfq_schedule_dispatch()
920 static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync, in cfq_prio_slice() argument
923 const int base_slice = cfqd->cfq_slice[sync]; in cfq_prio_slice()
931 cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_prio_to_slice() argument
933 return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio); in cfq_prio_to_slice()
994 static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd, in cfq_group_get_avg_queues() argument
1000 unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg); in cfq_group_get_avg_queues()
1010 cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg) in cfq_group_slice() argument
1012 return cfqd->cfq_target_latency * cfqg->vfraction >> CFQ_SERVICE_SHIFT; in cfq_group_slice()
1016 cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_scaled_cfqq_slice() argument
1018 unsigned slice = cfq_prio_to_slice(cfqd, cfqq); in cfq_scaled_cfqq_slice()
1019 if (cfqd->cfq_latency) { in cfq_scaled_cfqq_slice()
1024 unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg, in cfq_scaled_cfqq_slice()
1026 unsigned sync_slice = cfqd->cfq_slice[1]; in cfq_scaled_cfqq_slice()
1028 unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg); in cfq_scaled_cfqq_slice()
1031 unsigned base_low_slice = 2 * cfqd->cfq_slice_idle; in cfq_scaled_cfqq_slice()
1046 cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_set_prio_slice() argument
1048 unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq); in cfq_set_prio_slice()
1053 cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies); in cfq_set_prio_slice()
1077 cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last) in cfq_choose_req() argument
1102 back_max = cfqd->cfq_back_max * 2; in cfq_choose_req()
1112 d1 = (last - s1) * cfqd->cfq_back_penalty; in cfq_choose_req()
1119 d2 = (last - s2) * cfqd->cfq_back_penalty; in cfq_choose_req()
1208 cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq, in cfq_find_next_rq() argument
1228 return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last)); in cfq_find_next_rq()
1231 static unsigned long cfq_slice_offset(struct cfq_data *cfqd, in cfq_slice_offset() argument
1237 return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) - in cfq_slice_offset()
1238 cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio)); in cfq_slice_offset()
1350 cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg) in cfq_group_notify_queue_add() argument
1352 struct cfq_rb_root *st = &cfqd->grp_service_tree; in cfq_group_notify_queue_add()
1408 cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg) in cfq_group_notify_queue_del() argument
1410 struct cfq_rb_root *st = &cfqd->grp_service_tree; in cfq_group_notify_queue_del()
1419 cfq_log_cfqg(cfqd, cfqg, "del_from_rr group"); in cfq_group_notify_queue_del()
1457 static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg, in cfq_group_served() argument
1460 struct cfq_rb_root *st = &cfqd->grp_service_tree; in cfq_group_served()
1462 int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg) in cfq_group_served()
1469 if (iops_mode(cfqd)) in cfq_group_served()
1486 if (time_after(cfqd->workload_expires, jiffies)) { in cfq_group_served()
1487 cfqg->saved_wl_slice = cfqd->workload_expires in cfq_group_served()
1489 cfqg->saved_wl_type = cfqd->serving_wl_type; in cfq_group_served()
1490 cfqg->saved_wl_class = cfqd->serving_wl_class; in cfq_group_served()
1494 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, in cfq_group_served()
1496 cfq_log_cfqq(cfqq->cfqd, cfqq, in cfq_group_served()
1499 iops_mode(cfqd), cfqq->nr_sectors); in cfq_group_served()
1607 static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd, in cfq_lookup_create_cfqg() argument
1610 struct request_queue *q = cfqd->queue; in cfq_lookup_create_cfqg()
1615 cfqg = cfqd->root_group; in cfq_lookup_create_cfqg()
1631 cfqg = cfqq->cfqd->root_group; in cfq_link_cfqq_cfqg()
2004 static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd, in cfq_lookup_create_cfqg() argument
2007 return cfqd->root_group; in cfq_lookup_create_cfqg()
2022 static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, in cfq_service_tree_add() argument
2048 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies; in cfq_service_tree_add()
2097 cfq_group_notify_queue_add(cfqd, cfqq->cfqg); in cfq_service_tree_add()
2101 cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root, in cfq_prio_tree_lookup() argument
2136 static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_prio_tree_add() argument
2151 cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio]; in cfq_prio_tree_add()
2152 __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root, in cfq_prio_tree_add()
2164 static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_resort_rr_list() argument
2170 cfq_service_tree_add(cfqd, cfqq, 0); in cfq_resort_rr_list()
2171 cfq_prio_tree_add(cfqd, cfqq); in cfq_resort_rr_list()
2179 static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_add_cfqq_rr() argument
2181 cfq_log_cfqq(cfqd, cfqq, "add_to_rr"); in cfq_add_cfqq_rr()
2184 cfqd->busy_queues++; in cfq_add_cfqq_rr()
2186 cfqd->busy_sync_queues++; in cfq_add_cfqq_rr()
2188 cfq_resort_rr_list(cfqd, cfqq); in cfq_add_cfqq_rr()
2195 static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_del_cfqq_rr() argument
2197 cfq_log_cfqq(cfqd, cfqq, "del_from_rr"); in cfq_del_cfqq_rr()
2210 cfq_group_notify_queue_del(cfqd, cfqq->cfqg); in cfq_del_cfqq_rr()
2211 BUG_ON(!cfqd->busy_queues); in cfq_del_cfqq_rr()
2212 cfqd->busy_queues--; in cfq_del_cfqq_rr()
2214 cfqd->busy_sync_queues--; in cfq_del_cfqq_rr()
2246 struct cfq_data *cfqd = cfqq->cfqd; in cfq_add_rq_rb() local
2254 cfq_add_cfqq_rr(cfqd, cfqq); in cfq_add_rq_rb()
2260 cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position); in cfq_add_rq_rb()
2266 cfq_prio_tree_add(cfqd, cfqq); in cfq_add_rq_rb()
2277 cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group, in cfq_reposition_rq_rb()
2282 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) in cfq_find_rq_fmerge() argument
2288 cic = cfq_cic_lookup(cfqd, tsk->io_context); in cfq_find_rq_fmerge()
2301 struct cfq_data *cfqd = q->elevator->elevator_data; in cfq_activate_request() local
2303 cfqd->rq_in_driver++; in cfq_activate_request()
2304 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d", in cfq_activate_request()
2305 cfqd->rq_in_driver); in cfq_activate_request()
2307 cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); in cfq_activate_request()
2312 struct cfq_data *cfqd = q->elevator->elevator_data; in cfq_deactivate_request() local
2314 WARN_ON(!cfqd->rq_in_driver); in cfq_deactivate_request()
2315 cfqd->rq_in_driver--; in cfq_deactivate_request()
2316 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d", in cfq_deactivate_request()
2317 cfqd->rq_in_driver); in cfq_deactivate_request()
2325 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq); in cfq_remove_request()
2330 cfqq->cfqd->rq_queued--; in cfq_remove_request()
2341 struct cfq_data *cfqd = q->elevator->elevator_data; in cfq_merge() local
2344 __rq = cfq_find_rq_fmerge(cfqd, bio); in cfq_merge()
2374 struct cfq_data *cfqd = q->elevator->elevator_data; in cfq_merged_requests() local
2398 cfqq != cfqd->active_queue) in cfq_merged_requests()
2399 cfq_del_cfqq_rr(cfqd, cfqq); in cfq_merged_requests()
2405 struct cfq_data *cfqd = q->elevator->elevator_data; in cfq_allow_merge() local
2419 cic = cfq_cic_lookup(cfqd, current->io_context); in cfq_allow_merge()
2427 static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_del_timer() argument
2429 del_timer(&cfqd->idle_slice_timer); in cfq_del_timer()
2433 static void __cfq_set_active_queue(struct cfq_data *cfqd, in __cfq_set_active_queue() argument
2437 cfq_log_cfqq(cfqd, cfqq, "set_active wl_class:%d wl_type:%d", in __cfq_set_active_queue()
2438 cfqd->serving_wl_class, cfqd->serving_wl_type); in __cfq_set_active_queue()
2453 cfq_del_timer(cfqd, cfqq); in __cfq_set_active_queue()
2456 cfqd->active_queue = cfqq; in __cfq_set_active_queue()
2463 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, in __cfq_slice_expired() argument
2466 cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out); in __cfq_slice_expired()
2469 cfq_del_timer(cfqd, cfqq); in __cfq_slice_expired()
2488 cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq); in __cfq_slice_expired()
2491 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid); in __cfq_slice_expired()
2494 cfq_group_served(cfqd, cfqq->cfqg, cfqq); in __cfq_slice_expired()
2497 cfq_del_cfqq_rr(cfqd, cfqq); in __cfq_slice_expired()
2499 cfq_resort_rr_list(cfqd, cfqq); in __cfq_slice_expired()
2501 if (cfqq == cfqd->active_queue) in __cfq_slice_expired()
2502 cfqd->active_queue = NULL; in __cfq_slice_expired()
2504 if (cfqd->active_cic) { in __cfq_slice_expired()
2505 put_io_context(cfqd->active_cic->icq.ioc); in __cfq_slice_expired()
2506 cfqd->active_cic = NULL; in __cfq_slice_expired()
2510 static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out) in cfq_slice_expired() argument
2512 struct cfq_queue *cfqq = cfqd->active_queue; in cfq_slice_expired()
2515 __cfq_slice_expired(cfqd, cfqq, timed_out); in cfq_slice_expired()
2522 static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) in cfq_get_next_queue() argument
2524 struct cfq_rb_root *st = st_for(cfqd->serving_group, in cfq_get_next_queue()
2525 cfqd->serving_wl_class, cfqd->serving_wl_type); in cfq_get_next_queue()
2527 if (!cfqd->rq_queued) in cfq_get_next_queue()
2538 static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd) in cfq_get_next_queue_forced() argument
2545 if (!cfqd->rq_queued) in cfq_get_next_queue_forced()
2548 cfqg = cfq_get_next_cfqg(cfqd); in cfq_get_next_queue_forced()
2561 static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd, in cfq_set_active_queue() argument
2565 cfqq = cfq_get_next_queue(cfqd); in cfq_set_active_queue()
2567 __cfq_set_active_queue(cfqd, cfqq); in cfq_set_active_queue()
2571 static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd, in cfq_dist_from_last() argument
2574 if (blk_rq_pos(rq) >= cfqd->last_position) in cfq_dist_from_last()
2575 return blk_rq_pos(rq) - cfqd->last_position; in cfq_dist_from_last()
2577 return cfqd->last_position - blk_rq_pos(rq); in cfq_dist_from_last()
2580 static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq, in cfq_rq_close() argument
2583 return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR; in cfq_rq_close()
2586 static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, in cfqq_close() argument
2589 struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio]; in cfqq_close()
2592 sector_t sector = cfqd->last_position; in cfqq_close()
2601 __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL); in cfqq_close()
2610 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq)) in cfqq_close()
2621 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq)) in cfqq_close()
2637 static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd, in cfq_close_cooperator() argument
2660 cfqq = cfqq_close(cfqd, cur_cfqq); in cfq_close_cooperator()
2689 static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_should_idle() argument
2697 if (!cfqd->cfq_slice_idle) in cfq_should_idle()
2706 !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)) in cfq_should_idle()
2714 !cfq_io_thinktime_big(cfqd, &st->ttime, false)) in cfq_should_idle()
2716 cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", st->count); in cfq_should_idle()
2720 static void cfq_arm_slice_timer(struct cfq_data *cfqd) in cfq_arm_slice_timer() argument
2722 struct cfq_queue *cfqq = cfqd->active_queue; in cfq_arm_slice_timer()
2731 if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag) in cfq_arm_slice_timer()
2740 if (!cfq_should_idle(cfqd, cfqq)) { in cfq_arm_slice_timer()
2742 if (cfqd->cfq_group_idle) in cfq_arm_slice_timer()
2743 group_idle = cfqd->cfq_group_idle; in cfq_arm_slice_timer()
2757 cic = cfqd->active_cic; in cfq_arm_slice_timer()
2768 cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu", in cfq_arm_slice_timer()
2780 sl = cfqd->cfq_group_idle; in cfq_arm_slice_timer()
2782 sl = cfqd->cfq_slice_idle; in cfq_arm_slice_timer()
2784 mod_timer(&cfqd->idle_slice_timer, jiffies + sl); in cfq_arm_slice_timer()
2786 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl, in cfq_arm_slice_timer()
2795 struct cfq_data *cfqd = q->elevator->elevator_data; in cfq_dispatch_insert() local
2798 cfq_log_cfqq(cfqd, cfqq, "dispatch_insert"); in cfq_dispatch_insert()
2800 cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq); in cfq_dispatch_insert()
2806 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; in cfq_dispatch_insert()
2830 cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq); in cfq_check_fifo()
2835 cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_prio_to_maxrq() argument
2837 const int base_rq = cfqd->cfq_slice_async_rq; in cfq_prio_to_maxrq()
2899 static enum wl_type_t cfq_choose_wl_type(struct cfq_data *cfqd, in cfq_choose_wl_type() argument
2923 choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg) in choose_wl_class_and_type() argument
2929 enum wl_class_t original_class = cfqd->serving_wl_class; in choose_wl_class_and_type()
2932 if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg)) in choose_wl_class_and_type()
2933 cfqd->serving_wl_class = RT_WORKLOAD; in choose_wl_class_and_type()
2934 else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg)) in choose_wl_class_and_type()
2935 cfqd->serving_wl_class = BE_WORKLOAD; in choose_wl_class_and_type()
2937 cfqd->serving_wl_class = IDLE_WORKLOAD; in choose_wl_class_and_type()
2938 cfqd->workload_expires = jiffies + 1; in choose_wl_class_and_type()
2942 if (original_class != cfqd->serving_wl_class) in choose_wl_class_and_type()
2950 st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type); in choose_wl_class_and_type()
2956 if (count && !time_after(jiffies, cfqd->workload_expires)) in choose_wl_class_and_type()
2961 cfqd->serving_wl_type = cfq_choose_wl_type(cfqd, cfqg, in choose_wl_class_and_type()
2962 cfqd->serving_wl_class); in choose_wl_class_and_type()
2963 st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type); in choose_wl_class_and_type()
2971 group_slice = cfq_group_slice(cfqd, cfqg); in choose_wl_class_and_type()
2974 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_wl_class], in choose_wl_class_and_type()
2975 cfq_group_busy_queues_wl(cfqd->serving_wl_class, cfqd, in choose_wl_class_and_type()
2978 if (cfqd->serving_wl_type == ASYNC_WORKLOAD) { in choose_wl_class_and_type()
2988 tmp = cfqd->cfq_target_latency * in choose_wl_class_and_type()
2989 cfqg_busy_async_queues(cfqd, cfqg); in choose_wl_class_and_type()
2990 tmp = tmp/cfqd->busy_queues; in choose_wl_class_and_type()
2995 slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1]; in choose_wl_class_and_type()
2998 slice = max(slice, 2 * cfqd->cfq_slice_idle); in choose_wl_class_and_type()
3001 cfq_log(cfqd, "workload slice:%d", slice); in choose_wl_class_and_type()
3002 cfqd->workload_expires = jiffies + slice; in choose_wl_class_and_type()
3005 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd) in cfq_get_next_cfqg() argument
3007 struct cfq_rb_root *st = &cfqd->grp_service_tree; in cfq_get_next_cfqg()
3017 static void cfq_choose_cfqg(struct cfq_data *cfqd) in cfq_choose_cfqg() argument
3019 struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd); in cfq_choose_cfqg()
3021 cfqd->serving_group = cfqg; in cfq_choose_cfqg()
3025 cfqd->workload_expires = jiffies + cfqg->saved_wl_slice; in cfq_choose_cfqg()
3026 cfqd->serving_wl_type = cfqg->saved_wl_type; in cfq_choose_cfqg()
3027 cfqd->serving_wl_class = cfqg->saved_wl_class; in cfq_choose_cfqg()
3029 cfqd->workload_expires = jiffies - 1; in cfq_choose_cfqg()
3031 choose_wl_class_and_type(cfqd, cfqg); in cfq_choose_cfqg()
3038 static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) in cfq_select_queue() argument
3042 cfqq = cfqd->active_queue; in cfq_select_queue()
3046 if (!cfqd->rq_queued) in cfq_select_queue()
3069 && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) { in cfq_select_queue()
3089 new_cfqq = cfq_close_cooperator(cfqd, cfqq); in cfq_select_queue()
3101 if (timer_pending(&cfqd->idle_slice_timer)) { in cfq_select_queue()
3117 if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) { in cfq_select_queue()
3127 if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 && in cfq_select_queue()
3129 !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) { in cfq_select_queue()
3135 cfq_slice_expired(cfqd, 0); in cfq_select_queue()
3142 cfq_choose_cfqg(cfqd); in cfq_select_queue()
3144 cfqq = cfq_set_active_queue(cfqd, new_cfqq); in cfq_select_queue()
3154 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq); in __cfq_forced_dispatch_cfqq()
3161 __cfq_slice_expired(cfqq->cfqd, cfqq, 0); in __cfq_forced_dispatch_cfqq()
3169 static int cfq_forced_dispatch(struct cfq_data *cfqd) in cfq_forced_dispatch() argument
3175 cfq_slice_expired(cfqd, 0); in cfq_forced_dispatch()
3176 while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) { in cfq_forced_dispatch()
3177 __cfq_set_active_queue(cfqd, cfqq); in cfq_forced_dispatch()
3181 BUG_ON(cfqd->busy_queues); in cfq_forced_dispatch()
3183 cfq_log(cfqd, "forced_dispatch=%d", dispatched); in cfq_forced_dispatch()
3187 static inline bool cfq_slice_used_soon(struct cfq_data *cfqd, in cfq_slice_used_soon() argument
3193 if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched, in cfq_slice_used_soon()
3200 static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_may_dispatch() argument
3207 if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC]) in cfq_may_dispatch()
3213 if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq)) in cfq_may_dispatch()
3216 max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1); in cfq_may_dispatch()
3238 if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1) in cfq_may_dispatch()
3244 if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) && in cfq_may_dispatch()
3251 if (cfqd->busy_queues == 1 || promote_sync) in cfq_may_dispatch()
3260 max_dispatch = cfqd->cfq_quantum; in cfq_may_dispatch()
3268 if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) { in cfq_may_dispatch()
3269 unsigned long last_sync = jiffies - cfqd->last_delayed_sync; in cfq_may_dispatch()
3272 depth = last_sync / cfqd->cfq_slice[1]; in cfq_may_dispatch()
3289 static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_dispatch_request() argument
3295 if (!cfq_may_dispatch(cfqd, cfqq)) in cfq_dispatch_request()
3308 cfq_dispatch_insert(cfqd->queue, rq); in cfq_dispatch_request()
3310 if (!cfqd->active_cic) { in cfq_dispatch_request()
3314 cfqd->active_cic = cic; in cfq_dispatch_request()
3326 struct cfq_data *cfqd = q->elevator->elevator_data; in cfq_dispatch_requests() local
3329 if (!cfqd->busy_queues) in cfq_dispatch_requests()
3333 return cfq_forced_dispatch(cfqd); in cfq_dispatch_requests()
3335 cfqq = cfq_select_queue(cfqd); in cfq_dispatch_requests()
3342 if (!cfq_dispatch_request(cfqd, cfqq)) in cfq_dispatch_requests()
3352 if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) && in cfq_dispatch_requests()
3353 cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) || in cfq_dispatch_requests()
3356 cfq_slice_expired(cfqd, 0); in cfq_dispatch_requests()
3359 cfq_log_cfqq(cfqd, cfqq, "dispatched a request"); in cfq_dispatch_requests()
3372 struct cfq_data *cfqd = cfqq->cfqd; in cfq_put_queue() local
3381 cfq_log_cfqq(cfqd, cfqq, "put_queue"); in cfq_put_queue()
3386 if (unlikely(cfqd->active_queue == cfqq)) { in cfq_put_queue()
3387 __cfq_slice_expired(cfqd, cfqq, 0); in cfq_put_queue()
3388 cfq_schedule_dispatch(cfqd); in cfq_put_queue()
3417 static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_exit_cfqq() argument
3419 if (unlikely(cfqq == cfqd->active_queue)) { in cfq_exit_cfqq()
3420 __cfq_slice_expired(cfqd, cfqq, 0); in cfq_exit_cfqq()
3421 cfq_schedule_dispatch(cfqd); in cfq_exit_cfqq()
3439 struct cfq_data *cfqd = cic_to_cfqd(cic); in cfq_exit_icq() local
3442 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]); in cfq_exit_icq()
3447 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]); in cfq_exit_icq()
3497 struct cfq_data *cfqd = cic_to_cfqd(cic); in check_ioprio_changed() local
3504 if (unlikely(!cfqd) || likely(cic->ioprio == ioprio)) in check_ioprio_changed()
3510 new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio, in check_ioprio_changed()
3525 static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, in cfq_init_cfqq() argument
3533 cfqq->cfqd = cfqd; in cfq_init_cfqq()
3548 struct cfq_data *cfqd = cic_to_cfqd(cic); in check_blkcg_changed() local
3560 if (unlikely(!cfqd) || likely(cic->blkcg_serial_nr == serial_nr)) in check_blkcg_changed()
3569 cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup"); in check_blkcg_changed()
3581 cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic, in cfq_find_alloc_queue() argument
3592 cfqg = cfq_lookup_create_cfqg(cfqd, blkcg); in cfq_find_alloc_queue()
3594 cfqq = &cfqd->oom_cfqq; in cfq_find_alloc_queue()
3604 if (!cfqq || cfqq == &cfqd->oom_cfqq) { in cfq_find_alloc_queue()
3611 spin_unlock_irq(cfqd->queue->queue_lock); in cfq_find_alloc_queue()
3614 cfqd->queue->node); in cfq_find_alloc_queue()
3615 spin_lock_irq(cfqd->queue->queue_lock); in cfq_find_alloc_queue()
3619 return &cfqd->oom_cfqq; in cfq_find_alloc_queue()
3623 cfqd->queue->node); in cfq_find_alloc_queue()
3627 cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync); in cfq_find_alloc_queue()
3630 cfq_log_cfqq(cfqd, cfqq, "alloced"); in cfq_find_alloc_queue()
3632 cfqq = &cfqd->oom_cfqq; in cfq_find_alloc_queue()
3643 cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio) in cfq_async_queue_prio() argument
3647 return &cfqd->async_cfqq[0][ioprio]; in cfq_async_queue_prio()
3652 return &cfqd->async_cfqq[1][ioprio]; in cfq_async_queue_prio()
3654 return &cfqd->async_idle_cfqq; in cfq_async_queue_prio()
3661 cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic, in cfq_get_queue() argument
3675 async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio); in cfq_get_queue()
3680 cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio, gfp_mask); in cfq_get_queue()
3706 cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq, in cfq_update_io_thinktime() argument
3710 __cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle); in cfq_update_io_thinktime()
3712 cfqd->cfq_slice_idle); in cfq_update_io_thinktime()
3715 __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle); in cfq_update_io_thinktime()
3720 cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq, in cfq_update_io_seektime() argument
3733 if (blk_queue_nonrot(cfqd->queue)) in cfq_update_io_seektime()
3744 cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, in cfq_update_idle_window() argument
3763 !cfqd->cfq_slice_idle || in cfq_update_idle_window()
3767 if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle) in cfq_update_idle_window()
3774 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle); in cfq_update_idle_window()
3787 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, in cfq_should_preempt() argument
3792 cfqq = cfqd->active_queue; in cfq_should_preempt()
3822 if (cfqd->serving_wl_type == SYNC_NOIDLE_WORKLOAD && in cfq_should_preempt()
3842 if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq)) in cfq_should_preempt()
3845 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq)) in cfq_should_preempt()
3852 if (cfq_rq_close(cfqd, cfqq, rq)) in cfq_should_preempt()
3862 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_preempt_queue() argument
3864 enum wl_type_t old_type = cfqq_type(cfqd->active_queue); in cfq_preempt_queue()
3866 cfq_log_cfqq(cfqd, cfqq, "preempt"); in cfq_preempt_queue()
3867 cfq_slice_expired(cfqd, 1); in cfq_preempt_queue()
3882 cfq_service_tree_add(cfqd, cfqq, 1); in cfq_preempt_queue()
3893 cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, in cfq_rq_enqueued() argument
3898 cfqd->rq_queued++; in cfq_rq_enqueued()
3902 cfq_update_io_thinktime(cfqd, cfqq, cic); in cfq_rq_enqueued()
3903 cfq_update_io_seektime(cfqd, cfqq, rq); in cfq_rq_enqueued()
3904 cfq_update_idle_window(cfqd, cfqq, cic); in cfq_rq_enqueued()
3908 if (cfqq == cfqd->active_queue) { in cfq_rq_enqueued()
3921 cfqd->busy_queues > 1) { in cfq_rq_enqueued()
3922 cfq_del_timer(cfqd, cfqq); in cfq_rq_enqueued()
3924 __blk_run_queue(cfqd->queue); in cfq_rq_enqueued()
3930 } else if (cfq_should_preempt(cfqd, cfqq, rq)) { in cfq_rq_enqueued()
3937 cfq_preempt_queue(cfqd, cfqq); in cfq_rq_enqueued()
3938 __blk_run_queue(cfqd->queue); in cfq_rq_enqueued()
3944 struct cfq_data *cfqd = q->elevator->elevator_data; in cfq_insert_request() local
3947 cfq_log_cfqq(cfqd, cfqq, "insert_request"); in cfq_insert_request()
3950 rq->fifo_time = jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]; in cfq_insert_request()
3953 cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group, in cfq_insert_request()
3955 cfq_rq_enqueued(cfqd, cfqq, rq); in cfq_insert_request()
3962 static void cfq_update_hw_tag(struct cfq_data *cfqd) in cfq_update_hw_tag() argument
3964 struct cfq_queue *cfqq = cfqd->active_queue; in cfq_update_hw_tag()
3966 if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth) in cfq_update_hw_tag()
3967 cfqd->hw_tag_est_depth = cfqd->rq_in_driver; in cfq_update_hw_tag()
3969 if (cfqd->hw_tag == 1) in cfq_update_hw_tag()
3972 if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN && in cfq_update_hw_tag()
3973 cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN) in cfq_update_hw_tag()
3983 CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN) in cfq_update_hw_tag()
3986 if (cfqd->hw_tag_samples++ < 50) in cfq_update_hw_tag()
3989 if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN) in cfq_update_hw_tag()
3990 cfqd->hw_tag = 1; in cfq_update_hw_tag()
3992 cfqd->hw_tag = 0; in cfq_update_hw_tag()
3995 static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_should_wait_busy() argument
3997 struct cfq_io_cq *cic = cfqd->active_cic; in cfq_should_wait_busy()
4008 if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) in cfq_should_wait_busy()
4035 struct cfq_data *cfqd = cfqq->cfqd; in cfq_completed_request() local
4040 cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", in cfq_completed_request()
4043 cfq_update_hw_tag(cfqd); in cfq_completed_request()
4045 WARN_ON(!cfqd->rq_in_driver); in cfq_completed_request()
4047 cfqd->rq_in_driver--; in cfq_completed_request()
4053 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--; in cfq_completed_request()
4067 if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now)) in cfq_completed_request()
4068 cfqd->last_delayed_sync = now; in cfq_completed_request()
4079 if (cfqd->active_queue == cfqq) { in cfq_completed_request()
4083 cfq_set_prio_slice(cfqd, cfqq); in cfq_completed_request()
4091 if (cfq_should_wait_busy(cfqd, cfqq)) { in cfq_completed_request()
4092 unsigned long extend_sl = cfqd->cfq_slice_idle; in cfq_completed_request()
4093 if (!cfqd->cfq_slice_idle) in cfq_completed_request()
4094 extend_sl = cfqd->cfq_group_idle; in cfq_completed_request()
4097 cfq_log_cfqq(cfqd, cfqq, "will busy wait"); in cfq_completed_request()
4109 cfq_slice_expired(cfqd, 1); in cfq_completed_request()
4111 !cfq_close_cooperator(cfqd, cfqq)) { in cfq_completed_request()
4112 cfq_arm_slice_timer(cfqd); in cfq_completed_request()
4116 if (!cfqd->rq_in_driver) in cfq_completed_request()
4117 cfq_schedule_dispatch(cfqd); in cfq_completed_request()
4132 struct cfq_data *cfqd = q->elevator->elevator_data; in cfq_may_queue() local
4143 cic = cfq_cic_lookup(cfqd, tsk->io_context); in cfq_may_queue()
4180 cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic, in cfq_merge_cfqqs() argument
4183 cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq); in cfq_merge_cfqqs()
4218 struct cfq_data *cfqd = q->elevator->elevator_data; in cfq_set_request() local
4232 if (!cfqq || cfqq == &cfqd->oom_cfqq) { in cfq_set_request()
4233 cfqq = cfq_get_queue(cfqd, is_sync, cic, bio, gfp_mask); in cfq_set_request()
4240 cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq"); in cfq_set_request()
4253 cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq); in cfq_set_request()
4268 struct cfq_data *cfqd = in cfq_kick_queue() local
4270 struct request_queue *q = cfqd->queue; in cfq_kick_queue()
4273 __blk_run_queue(cfqd->queue); in cfq_kick_queue()
4282 struct cfq_data *cfqd = (struct cfq_data *) data; in cfq_idle_slice_timer() local
4287 cfq_log(cfqd, "idle timer fired"); in cfq_idle_slice_timer()
4289 spin_lock_irqsave(cfqd->queue->queue_lock, flags); in cfq_idle_slice_timer()
4291 cfqq = cfqd->active_queue; in cfq_idle_slice_timer()
4311 if (!cfqd->busy_queues) in cfq_idle_slice_timer()
4326 cfq_slice_expired(cfqd, timed_out); in cfq_idle_slice_timer()
4328 cfq_schedule_dispatch(cfqd); in cfq_idle_slice_timer()
4330 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); in cfq_idle_slice_timer()
4333 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) in cfq_shutdown_timer_wq() argument
4335 del_timer_sync(&cfqd->idle_slice_timer); in cfq_shutdown_timer_wq()
4336 cancel_work_sync(&cfqd->unplug_work); in cfq_shutdown_timer_wq()
4339 static void cfq_put_async_queues(struct cfq_data *cfqd) in cfq_put_async_queues() argument
4344 if (cfqd->async_cfqq[0][i]) in cfq_put_async_queues()
4345 cfq_put_queue(cfqd->async_cfqq[0][i]); in cfq_put_async_queues()
4346 if (cfqd->async_cfqq[1][i]) in cfq_put_async_queues()
4347 cfq_put_queue(cfqd->async_cfqq[1][i]); in cfq_put_async_queues()
4350 if (cfqd->async_idle_cfqq) in cfq_put_async_queues()
4351 cfq_put_queue(cfqd->async_idle_cfqq); in cfq_put_async_queues()
4356 struct cfq_data *cfqd = e->elevator_data; in cfq_exit_queue() local
4357 struct request_queue *q = cfqd->queue; in cfq_exit_queue()
4359 cfq_shutdown_timer_wq(cfqd); in cfq_exit_queue()
4363 if (cfqd->active_queue) in cfq_exit_queue()
4364 __cfq_slice_expired(cfqd, cfqd->active_queue, 0); in cfq_exit_queue()
4366 cfq_put_async_queues(cfqd); in cfq_exit_queue()
4370 cfq_shutdown_timer_wq(cfqd); in cfq_exit_queue()
4375 kfree(cfqd->root_group); in cfq_exit_queue()
4377 kfree(cfqd); in cfq_exit_queue()
4382 struct cfq_data *cfqd; in cfq_init_queue() local
4391 cfqd = kzalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node); in cfq_init_queue()
4392 if (!cfqd) { in cfq_init_queue()
4396 eq->elevator_data = cfqd; in cfq_init_queue()
4398 cfqd->queue = q; in cfq_init_queue()
4404 cfqd->grp_service_tree = CFQ_RB_ROOT; in cfq_init_queue()
4412 cfqd->root_group = blkg_to_cfqg(q->root_blkg); in cfq_init_queue()
4415 cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group), in cfq_init_queue()
4416 GFP_KERNEL, cfqd->queue->node); in cfq_init_queue()
4417 if (!cfqd->root_group) in cfq_init_queue()
4420 cfq_init_cfqg_base(cfqd->root_group); in cfq_init_queue()
4422 cfqd->root_group->weight = 2 * CFQ_WEIGHT_DEFAULT; in cfq_init_queue()
4423 cfqd->root_group->leaf_weight = 2 * CFQ_WEIGHT_DEFAULT; in cfq_init_queue()
4431 cfqd->prio_trees[i] = RB_ROOT; in cfq_init_queue()
4440 cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0); in cfq_init_queue()
4441 cfqd->oom_cfqq.ref++; in cfq_init_queue()
4444 cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group); in cfq_init_queue()
4445 cfqg_put(cfqd->root_group); in cfq_init_queue()
4448 init_timer(&cfqd->idle_slice_timer); in cfq_init_queue()
4449 cfqd->idle_slice_timer.function = cfq_idle_slice_timer; in cfq_init_queue()
4450 cfqd->idle_slice_timer.data = (unsigned long) cfqd; in cfq_init_queue()
4452 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); in cfq_init_queue()
4454 cfqd->cfq_quantum = cfq_quantum; in cfq_init_queue()
4455 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; in cfq_init_queue()
4456 cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1]; in cfq_init_queue()
4457 cfqd->cfq_back_max = cfq_back_max; in cfq_init_queue()
4458 cfqd->cfq_back_penalty = cfq_back_penalty; in cfq_init_queue()
4459 cfqd->cfq_slice[0] = cfq_slice_async; in cfq_init_queue()
4460 cfqd->cfq_slice[1] = cfq_slice_sync; in cfq_init_queue()
4461 cfqd->cfq_target_latency = cfq_target_latency; in cfq_init_queue()
4462 cfqd->cfq_slice_async_rq = cfq_slice_async_rq; in cfq_init_queue()
4463 cfqd->cfq_slice_idle = cfq_slice_idle; in cfq_init_queue()
4464 cfqd->cfq_group_idle = cfq_group_idle; in cfq_init_queue()
4465 cfqd->cfq_latency = 1; in cfq_init_queue()
4466 cfqd->hw_tag = -1; in cfq_init_queue()
4471 cfqd->last_delayed_sync = jiffies - HZ; in cfq_init_queue()
4475 kfree(cfqd); in cfq_init_queue()
4501 struct cfq_data *cfqd = e->elevator_data; \
4507 SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
4508 SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
4509 SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
4510 SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
4511 SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
4512 SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
4513 SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
4514 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
4515 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
4516 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
4517 SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
4518 SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1);
4524 struct cfq_data *cfqd = e->elevator_data; \
4537 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
4538 STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
4540 STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
4542 STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
4543 STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
4545 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
4546 STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
4547 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
4548 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
4549 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
4551 STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
4552 STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1);