H A D | cfq-iosched.c | 55 #define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8) 64 #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) 65 #define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT) 264 /* number of cfqq currently on this group */ 298 struct cfq_queue *cfqq[2]; member in struct:cfq_io_cq 382 * Fallback dummy cfqq for extreme OOM conditions 414 CFQ_CFQQ_FLAG_coop, /* cfqq is shared */ 415 CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */ 416 CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */ 421 static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \ 423 (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \ 425 static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \ 427 (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \ 429 static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \ 431 return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \ 544 * group is already marked empty. This can happen if cfqq got new cfqg_stats_set_start_empty_time() 627 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) do { \ 630 blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf)); \ 631 blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c %s " fmt, (cfqq)->pid, \ 632 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \ 633 cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\ 760 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ cfqg_put() 761 blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c " fmt, (cfqq)->pid, \ cfqg_put() 762 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \ cfqg_put() 763 cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\ cfqg_put() 821 static inline enum wl_class_t cfqq_class(struct cfq_queue *cfqq) cfqq_class() argument 823 if (cfq_class_idle(cfqq)) cfqq_class() 825 if (cfq_class_rt(cfqq)) cfqq_class() 831 static enum wl_type_t cfqq_type(struct cfq_queue *cfqq) cfqq_type() argument 833 if (!cfq_cfqq_sync(cfqq)) cfqq_type() 835 if (!cfq_cfqq_idle_window(cfqq)) cfqq_type() 880 return cic->cfqq[is_sync]; cic_to_cfqq() 883 static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq, cic_set_cfqq() argument 886 cic->cfqq[is_sync] = cfqq; cic_set_cfqq() 931 cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) cfq_prio_to_slice() argument 933 return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio); cfq_prio_to_slice() 1016 cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) cfq_scaled_cfqq_slice() argument 1018 unsigned slice = cfq_prio_to_slice(cfqd, cfqq); cfq_scaled_cfqq_slice() 1024 unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg, cfq_scaled_cfqq_slice() 1025 cfq_class_rt(cfqq)); cfq_scaled_cfqq_slice() 1028 unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg); cfq_scaled_cfqq_slice() 1046 cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) cfq_set_prio_slice() argument 1048 unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq); cfq_set_prio_slice() 1050 cfqq->slice_start = jiffies; cfq_set_prio_slice() 1051 cfqq->slice_end = jiffies + slice; cfq_set_prio_slice() 1052 cfqq->allocated_slice = slice; cfq_set_prio_slice() 1053 cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies); cfq_set_prio_slice() 1061 static inline bool cfq_slice_used(struct cfq_queue *cfqq) cfq_slice_used() argument 1063 if (cfq_cfqq_slice_new(cfqq)) cfq_slice_used() 1065 if (time_before(jiffies, cfqq->slice_end)) cfq_slice_used() 1208 cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq, cfq_find_next_rq() argument 1223 rbnext = rb_first(&cfqq->sort_list); cfq_find_next_rq() 1232 struct cfq_queue *cfqq) cfq_slice_offset() 1237 return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) - cfq_slice_offset() 1238 cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio)); cfq_slice_offset() 1425 static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq, cfq_cfqq_slice_usage() argument 1434 if (!cfqq->slice_start || cfqq->slice_start == jiffies) { cfq_cfqq_slice_usage() 1441 slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start), cfq_cfqq_slice_usage() 1444 slice_used = jiffies - cfqq->slice_start; cfq_cfqq_slice_usage() 1445 if (slice_used > cfqq->allocated_slice) { cfq_cfqq_slice_usage() 1446 *unaccounted_time = slice_used - cfqq->allocated_slice; cfq_cfqq_slice_usage() 1447 slice_used = cfqq->allocated_slice; cfq_cfqq_slice_usage() 1449 if (time_after(cfqq->slice_start, cfqq->dispatch_start)) cfq_cfqq_slice_usage() 1450 *unaccounted_time += cfqq->slice_start - cfq_cfqq_slice_usage() 1451 cfqq->dispatch_start; cfq_cfqq_slice_usage() 1458 struct cfq_queue *cfqq) cfq_group_served() 1467 used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl); cfq_group_served() 1470 charge = cfqq->slice_dispatch; cfq_group_served() 1471 else if (!cfq_cfqq_sync(cfqq) && !nr_sync) cfq_group_served() 1472 charge = cfqq->allocated_slice; cfq_group_served() 1496 cfq_log_cfqq(cfqq->cfqd, cfqq, cfq_group_served() 1498 used_sl, cfqq->slice_dispatch, charge, cfq_group_served() 1499 iops_mode(cfqd), cfqq->nr_sectors); cfq_group_served() 1627 static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) cfq_link_cfqq_cfqg() argument 1630 if (!cfq_cfqq_sync(cfqq)) cfq_link_cfqq_cfqg() 1631 cfqg = cfqq->cfqd->root_group; cfq_link_cfqq_cfqg() 1633 cfqq->cfqg = cfqg; cfq_link_cfqq_cfqg() 1634 /* cfqq reference on cfqg */ cfq_link_cfqq_cfqg() 2011 cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) { cfq_link_cfqq_cfqg() argument 2012 cfqq->cfqg = cfqg; cfq_link_cfqq_cfqg() 2022 static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, cfq_service_tree_add() argument 2032 st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq)); cfq_service_tree_add() 2033 if (cfq_class_idle(cfqq)) { cfq_service_tree_add() 2036 if (parent && parent != &cfqq->rb_node) { cfq_service_tree_add() 2048 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies; cfq_service_tree_add() 2049 rb_key -= cfqq->slice_resid; cfq_service_tree_add() 2050 cfqq->slice_resid = 0; cfq_service_tree_add() 2057 if (!RB_EMPTY_NODE(&cfqq->rb_node)) { cfq_service_tree_add() 2062 if (rb_key == cfqq->rb_key && cfqq->service_tree == st) cfq_service_tree_add() 2065 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree); cfq_service_tree_add() 2066 cfqq->service_tree = NULL; cfq_service_tree_add() 2071 cfqq->service_tree = st; cfq_service_tree_add() 2089 st->left = &cfqq->rb_node; cfq_service_tree_add() 2091 cfqq->rb_key = rb_key; cfq_service_tree_add() 2092 rb_link_node(&cfqq->rb_node, parent, p); cfq_service_tree_add() 2093 rb_insert_color(&cfqq->rb_node, &st->rb); cfq_service_tree_add() 2097 cfq_group_notify_queue_add(cfqd, cfqq->cfqg); cfq_service_tree_add() 2106 struct cfq_queue *cfqq = NULL; cfq_prio_tree_lookup() local 2114 cfqq = rb_entry(parent, struct cfq_queue, p_node); cfq_prio_tree_lookup() 2120 if (sector > blk_rq_pos(cfqq->next_rq)) cfq_prio_tree_lookup() 2122 else if (sector < blk_rq_pos(cfqq->next_rq)) cfq_prio_tree_lookup() 2127 cfqq = NULL; cfq_prio_tree_lookup() 2133 return cfqq; cfq_prio_tree_lookup() 2136 static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq) cfq_prio_tree_add() argument 2141 if (cfqq->p_root) { cfq_prio_tree_add() 2142 rb_erase(&cfqq->p_node, cfqq->p_root); cfq_prio_tree_add() 2143 cfqq->p_root = NULL; cfq_prio_tree_add() 2146 if (cfq_class_idle(cfqq)) cfq_prio_tree_add() 2148 if (!cfqq->next_rq) cfq_prio_tree_add() 2151 cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio]; cfq_prio_tree_add() 2152 __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root, cfq_prio_tree_add() 2153 blk_rq_pos(cfqq->next_rq), &parent, &p); cfq_prio_tree_add() 2155 rb_link_node(&cfqq->p_node, parent, p); cfq_prio_tree_add() 2156 rb_insert_color(&cfqq->p_node, cfqq->p_root); cfq_prio_tree_add() 2158 cfqq->p_root = NULL; cfq_prio_tree_add() 2162 * Update cfqq's position in the service tree. 2164 static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq) cfq_resort_rr_list() argument 2167 * Resorting requires the cfqq to be on the RR list already. cfq_resort_rr_list() 2169 if (cfq_cfqq_on_rr(cfqq)) { cfq_resort_rr_list() 2170 cfq_service_tree_add(cfqd, cfqq, 0); cfq_resort_rr_list() 2171 cfq_prio_tree_add(cfqd, cfqq); cfq_resort_rr_list() 2179 static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) cfq_add_cfqq_rr() argument 2181 cfq_log_cfqq(cfqd, cfqq, "add_to_rr"); cfq_add_cfqq_rr() 2182 BUG_ON(cfq_cfqq_on_rr(cfqq)); cfq_add_cfqq_rr() 2183 cfq_mark_cfqq_on_rr(cfqq); cfq_add_cfqq_rr() 2185 if (cfq_cfqq_sync(cfqq)) cfq_add_cfqq_rr() 2188 cfq_resort_rr_list(cfqd, cfqq); cfq_add_cfqq_rr() 2192 * Called when the cfqq no longer has requests pending, remove it from 2195 static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) cfq_del_cfqq_rr() argument 2197 cfq_log_cfqq(cfqd, cfqq, "del_from_rr"); cfq_del_cfqq_rr() 2198 BUG_ON(!cfq_cfqq_on_rr(cfqq)); cfq_del_cfqq_rr() 2199 cfq_clear_cfqq_on_rr(cfqq); cfq_del_cfqq_rr() 2201 if (!RB_EMPTY_NODE(&cfqq->rb_node)) { cfq_del_cfqq_rr() 2202 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree); cfq_del_cfqq_rr() 2203 cfqq->service_tree = NULL; cfq_del_cfqq_rr() 2205 if (cfqq->p_root) { cfq_del_cfqq_rr() 2206 rb_erase(&cfqq->p_node, cfqq->p_root); cfq_del_cfqq_rr() 2207 cfqq->p_root = NULL; cfq_del_cfqq_rr() 2210 cfq_group_notify_queue_del(cfqd, cfqq->cfqg); cfq_del_cfqq_rr() 2213 if (cfq_cfqq_sync(cfqq)) cfq_del_cfqq_rr() 2222 struct cfq_queue *cfqq = RQ_CFQQ(rq); cfq_del_rq_rb() local 2225 BUG_ON(!cfqq->queued[sync]); cfq_del_rq_rb() 2226 cfqq->queued[sync]--; cfq_del_rq_rb() 2228 elv_rb_del(&cfqq->sort_list, rq); cfq_del_rq_rb() 2230 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) { cfq_del_rq_rb() 2236 if (cfqq->p_root) { cfq_del_rq_rb() 2237 rb_erase(&cfqq->p_node, cfqq->p_root); cfq_del_rq_rb() 2238 cfqq->p_root = NULL; cfq_del_rq_rb() 2245 struct cfq_queue *cfqq = RQ_CFQQ(rq); cfq_add_rq_rb() local 2246 struct cfq_data *cfqd = cfqq->cfqd; cfq_add_rq_rb() 2249 cfqq->queued[rq_is_sync(rq)]++; cfq_add_rq_rb() 2251 elv_rb_add(&cfqq->sort_list, rq); cfq_add_rq_rb() 2253 if (!cfq_cfqq_on_rr(cfqq)) cfq_add_rq_rb() 2254 cfq_add_cfqq_rr(cfqd, cfqq); cfq_add_rq_rb() 2259 prev = cfqq->next_rq; cfq_add_rq_rb() 2260 cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position); cfq_add_rq_rb() 2265 if (prev != cfqq->next_rq) cfq_add_rq_rb() 2266 cfq_prio_tree_add(cfqd, cfqq); cfq_add_rq_rb() 2268 BUG_ON(!cfqq->next_rq); cfq_add_rq_rb() 2271 static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq) cfq_reposition_rq_rb() argument 2273 elv_rb_del(&cfqq->sort_list, rq); cfq_reposition_rq_rb() 2274 cfqq->queued[rq_is_sync(rq)]--; cfq_reposition_rq_rb() 2277 cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group, cfq_reposition_rq_rb() 2286 struct cfq_queue *cfqq; cfq_find_rq_fmerge() local 2292 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); cfq_find_rq_fmerge() 2293 if (cfqq) cfq_find_rq_fmerge() 2294 return elv_rb_find(&cfqq->sort_list, bio_end_sector(bio)); cfq_find_rq_fmerge() 2322 struct cfq_queue *cfqq = RQ_CFQQ(rq); cfq_remove_request() local 2324 if (cfqq->next_rq == rq) cfq_remove_request() 2325 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq); cfq_remove_request() 2330 cfqq->cfqd->rq_queued--; cfq_remove_request() 2333 WARN_ON(!cfqq->prio_pending); cfq_remove_request() 2334 cfqq->prio_pending--; cfq_remove_request() 2357 struct cfq_queue *cfqq = RQ_CFQQ(req); cfq_merged_request() local 2359 cfq_reposition_rq_rb(cfqq, req); cfq_merged_request() 2373 struct cfq_queue *cfqq = RQ_CFQQ(rq); cfq_merged_requests() local 2381 cfqq == RQ_CFQQ(next)) { cfq_merged_requests() 2386 if (cfqq->next_rq == next) cfq_merged_requests() 2387 cfqq->next_rq = rq; cfq_merged_requests() 2391 cfqq = RQ_CFQQ(next); cfq_merged_requests() 2397 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) && cfq_merged_requests() 2398 cfqq != cfqd->active_queue) cfq_merged_requests() 2399 cfq_del_cfqq_rr(cfqd, cfqq); cfq_merged_requests() 2407 struct cfq_queue *cfqq; cfq_allow_merge() local 2416 * Lookup the cfqq that this bio will be queued with and allow cfq_allow_merge() 2423 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); cfq_allow_merge() 2424 return cfqq == RQ_CFQQ(rq); cfq_allow_merge() 2427 static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) cfq_del_timer() argument 2430 cfqg_stats_update_idle_time(cfqq->cfqg); cfq_del_timer() 2434 struct cfq_queue *cfqq) __cfq_set_active_queue() 2436 if (cfqq) { __cfq_set_active_queue() 2437 cfq_log_cfqq(cfqd, cfqq, "set_active wl_class:%d wl_type:%d", __cfq_set_active_queue() 2439 cfqg_stats_update_avg_queue_size(cfqq->cfqg); __cfq_set_active_queue() 2440 cfqq->slice_start = 0; __cfq_set_active_queue() 2441 cfqq->dispatch_start = jiffies; __cfq_set_active_queue() 2442 cfqq->allocated_slice = 0; __cfq_set_active_queue() 2443 cfqq->slice_end = 0; __cfq_set_active_queue() 2444 cfqq->slice_dispatch = 0; __cfq_set_active_queue() 2445 cfqq->nr_sectors = 0; __cfq_set_active_queue() 2447 cfq_clear_cfqq_wait_request(cfqq); __cfq_set_active_queue() 2448 cfq_clear_cfqq_must_dispatch(cfqq); __cfq_set_active_queue() 2449 cfq_clear_cfqq_must_alloc_slice(cfqq); __cfq_set_active_queue() 2450 cfq_clear_cfqq_fifo_expire(cfqq); __cfq_set_active_queue() 2451 cfq_mark_cfqq_slice_new(cfqq); __cfq_set_active_queue() 2453 cfq_del_timer(cfqd, cfqq); __cfq_set_active_queue() 2456 cfqd->active_queue = cfqq; __cfq_set_active_queue() 2460 * current cfqq expired its slice (or was too idle), select new one 2463 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, __cfq_slice_expired() argument 2466 cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out); __cfq_slice_expired() 2468 if (cfq_cfqq_wait_request(cfqq)) __cfq_slice_expired() 2469 cfq_del_timer(cfqd, cfqq); __cfq_slice_expired() 2471 cfq_clear_cfqq_wait_request(cfqq); __cfq_slice_expired() 2472 cfq_clear_cfqq_wait_busy(cfqq); __cfq_slice_expired() 2475 * If this cfqq is shared between multiple processes, check to __cfq_slice_expired() 2480 if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq)) __cfq_slice_expired() 2481 cfq_mark_cfqq_split_coop(cfqq); __cfq_slice_expired() 2487 if (cfq_cfqq_slice_new(cfqq)) __cfq_slice_expired() 2488 cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq); __cfq_slice_expired() 2490 cfqq->slice_resid = cfqq->slice_end - jiffies; __cfq_slice_expired() 2491 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid); __cfq_slice_expired() 2494 cfq_group_served(cfqd, cfqq->cfqg, cfqq); __cfq_slice_expired() 2496 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) __cfq_slice_expired() 2497 cfq_del_cfqq_rr(cfqd, cfqq); __cfq_slice_expired() 2499 cfq_resort_rr_list(cfqd, cfqq); __cfq_slice_expired() 2501 if (cfqq == cfqd->active_queue) __cfq_slice_expired() 2512 struct cfq_queue *cfqq = cfqd->active_queue; cfq_slice_expired() local 2514 if (cfqq) cfq_slice_expired() 2515 __cfq_slice_expired(cfqd, cfqq, timed_out); cfq_slice_expired() 2520 * we'll simply select the first cfqq in the service tree. 2541 struct cfq_queue *cfqq; cfq_get_next_queue_forced() local 2553 if ((cfqq = cfq_rb_first(st)) != NULL) cfq_get_next_queue_forced() 2554 return cfqq; cfq_get_next_queue_forced() 2562 struct cfq_queue *cfqq) cfq_set_active_queue() 2564 if (!cfqq) cfq_set_active_queue() 2565 cfqq = cfq_get_next_queue(cfqd); cfq_set_active_queue() 2567 __cfq_set_active_queue(cfqd, cfqq); cfq_set_active_queue() 2568 return cfqq; cfq_set_active_queue() 2580 static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq, cfq_rq_close() argument 2640 struct cfq_queue *cfqq; cfq_close_cooperator() local 2660 cfqq = cfqq_close(cfqd, cur_cfqq); cfq_close_cooperator() 2661 if (!cfqq) cfq_close_cooperator() 2665 if (cur_cfqq->cfqg != cfqq->cfqg) cfq_close_cooperator() 2671 if (!cfq_cfqq_sync(cfqq)) cfq_close_cooperator() 2673 if (CFQQ_SEEKY(cfqq)) cfq_close_cooperator() 2679 if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq)) cfq_close_cooperator() 2682 return cfqq; cfq_close_cooperator() 2689 static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq) cfq_should_idle() argument 2691 enum wl_class_t wl_class = cfqq_class(cfqq); cfq_should_idle() 2692 struct cfq_rb_root *st = cfqq->service_tree; cfq_should_idle() 2705 if (cfq_cfqq_idle_window(cfqq) && cfq_should_idle() 2713 if (st->count == 1 && cfq_cfqq_sync(cfqq) && cfq_should_idle() 2716 cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", st->count); cfq_should_idle() 2722 struct cfq_queue *cfqq = cfqd->active_queue; cfq_arm_slice_timer() local 2734 WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list)); cfq_arm_slice_timer() 2735 WARN_ON(cfq_cfqq_slice_new(cfqq)); cfq_arm_slice_timer() 2740 if (!cfq_should_idle(cfqd, cfqq)) { cfq_arm_slice_timer() 2751 if (cfqq->dispatched) cfq_arm_slice_timer() 2767 (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) { cfq_arm_slice_timer() 2768 cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu", cfq_arm_slice_timer() 2774 if (group_idle && cfqq->cfqg->nr_cfqq > 1) cfq_arm_slice_timer() 2777 cfq_mark_cfqq_wait_request(cfqq); cfq_arm_slice_timer() 2785 cfqg_stats_set_start_idle_time(cfqq->cfqg); cfq_arm_slice_timer() 2786 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl, cfq_arm_slice_timer() 2796 struct cfq_queue *cfqq = RQ_CFQQ(rq); cfq_dispatch_insert() local 2798 cfq_log_cfqq(cfqd, cfqq, "dispatch_insert"); cfq_dispatch_insert() 2800 cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq); cfq_dispatch_insert() 2802 cfqq->dispatched++; cfq_dispatch_insert() 2806 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; cfq_dispatch_insert() 2807 cfqq->nr_sectors += blk_rq_sectors(rq); cfq_dispatch_insert() 2808 cfqg_stats_update_dispatch(cfqq->cfqg, blk_rq_bytes(rq), rq->cmd_flags); cfq_dispatch_insert() 2814 static struct request *cfq_check_fifo(struct cfq_queue *cfqq) cfq_check_fifo() argument 2818 if (cfq_cfqq_fifo_expire(cfqq)) cfq_check_fifo() 2821 cfq_mark_cfqq_fifo_expire(cfqq); cfq_check_fifo() 2823 if (list_empty(&cfqq->fifo)) cfq_check_fifo() 2826 rq = rq_entry_fifo(cfqq->fifo.next); cfq_check_fifo() 2830 cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq); cfq_check_fifo() 2835 cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq) cfq_prio_to_maxrq() argument 2839 WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR); cfq_prio_to_maxrq() 2841 return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio); cfq_prio_to_maxrq() 2847 static int cfqq_process_refs(struct cfq_queue *cfqq) cfqq_process_refs() argument 2851 io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE]; cfqq_process_refs() 2852 process_refs = cfqq->ref - io_refs; cfqq_process_refs() 2857 static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq) cfq_setup_merge() argument 2864 * unsafe to follow the ->new_cfqq chain as other cfqq's in the cfq_setup_merge() 2873 if (__cfqq == cfqq) cfq_setup_merge() 2878 process_refs = cfqq_process_refs(cfqq); cfq_setup_merge() 2881 * If the process for the cfqq has gone away, there is no cfq_setup_merge() 2891 cfqq->new_cfqq = new_cfqq; cfq_setup_merge() 2894 new_cfqq->new_cfqq = cfqq; cfq_setup_merge() 2895 cfqq->ref += new_process_refs; cfq_setup_merge() 3040 struct cfq_queue *cfqq, *new_cfqq = NULL; cfq_select_queue() local 3042 cfqq = cfqd->active_queue; cfq_select_queue() 3043 if (!cfqq) cfq_select_queue() 3052 if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list)) cfq_select_queue() 3058 if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) { cfq_select_queue() 3068 if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list) cfq_select_queue() 3069 && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) { cfq_select_queue() 3070 cfqq = NULL; cfq_select_queue() 3080 if (!RB_EMPTY_ROOT(&cfqq->sort_list)) cfq_select_queue() 3087 * tree. If possible, merge the expiring queue with the new cfqq. cfq_select_queue() 3089 new_cfqq = cfq_close_cooperator(cfqd, cfqq); cfq_select_queue() 3091 if (!cfqq->new_cfqq) cfq_select_queue() 3092 cfq_setup_merge(cfqq, new_cfqq); cfq_select_queue() 3102 cfqq = NULL; cfq_select_queue() 3110 if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) && cfq_select_queue() 3111 (cfq_cfqq_slice_new(cfqq) || cfq_select_queue() 3112 (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) { cfq_select_queue() 3113 cfq_clear_cfqq_deep(cfqq); cfq_select_queue() 3114 cfq_clear_cfqq_idle_window(cfqq); cfq_select_queue() 3117 if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) { cfq_select_queue() 3118 cfqq = NULL; cfq_select_queue() 3127 if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 && cfq_select_queue() 3128 cfqq->cfqg->dispatched && cfq_select_queue() 3129 !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) { cfq_select_queue() 3130 cfqq = NULL; cfq_select_queue() 3144 cfqq = cfq_set_active_queue(cfqd, new_cfqq); cfq_select_queue() 3146 return cfqq; cfq_select_queue() 3149 static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq) __cfq_forced_dispatch_cfqq() argument 3153 while (cfqq->next_rq) { __cfq_forced_dispatch_cfqq() 3154 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq); __cfq_forced_dispatch_cfqq() 3158 BUG_ON(!list_empty(&cfqq->fifo)); __cfq_forced_dispatch_cfqq() 3160 /* By default cfqq is not expired if it is empty. Do it explicitly */ __cfq_forced_dispatch_cfqq() 3161 __cfq_slice_expired(cfqq->cfqd, cfqq, 0); __cfq_forced_dispatch_cfqq() 3171 struct cfq_queue *cfqq; cfq_forced_dispatch() local 3176 while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) { cfq_forced_dispatch() 3177 __cfq_set_active_queue(cfqd, cfqq); cfq_forced_dispatch() 3178 dispatched += __cfq_forced_dispatch_cfqq(cfqq); cfq_forced_dispatch() 3188 struct cfq_queue *cfqq) cfq_slice_used_soon() 3191 if (cfq_cfqq_slice_new(cfqq)) cfq_slice_used_soon() 3193 if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched, cfq_slice_used_soon() 3194 cfqq->slice_end)) cfq_slice_used_soon() 3200 static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) cfq_may_dispatch() argument 3207 if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC]) cfq_may_dispatch() 3213 if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq)) cfq_may_dispatch() 3217 if (cfq_class_idle(cfqq)) cfq_may_dispatch() 3221 * Does this cfqq already have too much IO in flight? cfq_may_dispatch() 3223 if (cfqq->dispatched >= max_dispatch) { cfq_may_dispatch() 3228 if (cfq_class_idle(cfqq)) cfq_may_dispatch() 3238 if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1) cfq_may_dispatch() 3244 if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) && cfq_may_dispatch() 3255 * Normally we start throttling cfqq when cfq_quantum/2 cfq_may_dispatch() 3268 if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) { cfq_may_dispatch() 3273 if (!depth && !cfqq->dispatched) cfq_may_dispatch() 3282 return cfqq->dispatched < max_dispatch; cfq_may_dispatch() 3286 * Dispatch a request from cfqq, moving them to the request queue 3289 static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq) cfq_dispatch_request() argument 3293 BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); cfq_dispatch_request() 3295 if (!cfq_may_dispatch(cfqd, cfqq)) cfq_dispatch_request() 3301 rq = cfq_check_fifo(cfqq); cfq_dispatch_request() 3303 rq = cfqq->next_rq; cfq_dispatch_request() 3321 * Find the cfqq that we need to service and move a request from that to the 3327 struct cfq_queue *cfqq; cfq_dispatch_requests() local 3335 cfqq = cfq_select_queue(cfqd); cfq_dispatch_requests() 3336 if (!cfqq) cfq_dispatch_requests() 3340 * Dispatch a request from this cfqq, if it is allowed cfq_dispatch_requests() 3342 if (!cfq_dispatch_request(cfqd, cfqq)) cfq_dispatch_requests() 3345 cfqq->slice_dispatch++; cfq_dispatch_requests() 3346 cfq_clear_cfqq_must_dispatch(cfqq); cfq_dispatch_requests() 3352 if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) && cfq_dispatch_requests() 3353 cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) || cfq_dispatch_requests() 3354 cfq_class_idle(cfqq))) { cfq_dispatch_requests() 3355 cfqq->slice_end = jiffies + 1; cfq_dispatch_requests() 3359 cfq_log_cfqq(cfqd, cfqq, "dispatched a request"); cfq_dispatch_requests() 3370 static void cfq_put_queue(struct cfq_queue *cfqq) cfq_put_queue() argument 3372 struct cfq_data *cfqd = cfqq->cfqd; cfq_put_queue() 3375 BUG_ON(cfqq->ref <= 0); cfq_put_queue() 3377 cfqq->ref--; cfq_put_queue() 3378 if (cfqq->ref) cfq_put_queue() 3381 cfq_log_cfqq(cfqd, cfqq, "put_queue"); cfq_put_queue() 3382 BUG_ON(rb_first(&cfqq->sort_list)); cfq_put_queue() 3383 BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]); cfq_put_queue() 3384 cfqg = cfqq->cfqg; cfq_put_queue() 3386 if (unlikely(cfqd->active_queue == cfqq)) { cfq_put_queue() 3387 __cfq_slice_expired(cfqd, cfqq, 0); cfq_put_queue() 3391 BUG_ON(cfq_cfqq_on_rr(cfqq)); cfq_put_queue() 3392 kmem_cache_free(cfq_pool, cfqq); cfq_put_queue() 3396 static void cfq_put_cooperator(struct cfq_queue *cfqq) cfq_put_cooperator() argument 3405 __cfqq = cfqq->new_cfqq; cfq_put_cooperator() 3407 if (__cfqq == cfqq) { cfq_put_cooperator() 3408 WARN(1, "cfqq->new_cfqq loop detected\n"); cfq_put_cooperator() 3417 static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) cfq_exit_cfqq() argument 3419 if (unlikely(cfqq == cfqd->active_queue)) { cfq_exit_cfqq() 3420 __cfq_slice_expired(cfqd, cfqq, 0); cfq_exit_cfqq() 3424 cfq_put_cooperator(cfqq); cfq_exit_cfqq() 3426 cfq_put_queue(cfqq); cfq_exit_cfqq() 3441 if (cic->cfqq[BLK_RW_ASYNC]) { cfq_exit_icq() 3442 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]); cfq_exit_icq() 3443 cic->cfqq[BLK_RW_ASYNC] = NULL; cfq_exit_icq() 3446 if (cic->cfqq[BLK_RW_SYNC]) { cfq_exit_icq() 3447 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]); cfq_exit_icq() 3448 cic->cfqq[BLK_RW_SYNC] = NULL; cfq_exit_icq() 3452 static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic) cfq_init_prio_data() argument 3457 if (!cfq_cfqq_prio_changed(cfqq)) cfq_init_prio_data() 3468 cfqq->ioprio = task_nice_ioprio(tsk); cfq_init_prio_data() 3469 cfqq->ioprio_class = task_nice_ioclass(tsk); cfq_init_prio_data() 3472 cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio); cfq_init_prio_data() 3473 cfqq->ioprio_class = IOPRIO_CLASS_RT; cfq_init_prio_data() 3476 cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio); cfq_init_prio_data() 3477 cfqq->ioprio_class = IOPRIO_CLASS_BE; cfq_init_prio_data() 3480 cfqq->ioprio_class = IOPRIO_CLASS_IDLE; cfq_init_prio_data() 3481 cfqq->ioprio = 7; cfq_init_prio_data() 3482 cfq_clear_cfqq_idle_window(cfqq); cfq_init_prio_data() 3490 cfqq->org_ioprio = cfqq->ioprio; cfq_init_prio_data() 3491 cfq_clear_cfqq_prio_changed(cfqq); cfq_init_prio_data() 3498 struct cfq_queue *cfqq; check_ioprio_changed() local 3507 cfqq = cic->cfqq[BLK_RW_ASYNC]; check_ioprio_changed() 3508 if (cfqq) { check_ioprio_changed() 3513 cic->cfqq[BLK_RW_ASYNC] = new_cfqq; check_ioprio_changed() 3514 cfq_put_queue(cfqq); check_ioprio_changed() 3518 cfqq = cic->cfqq[BLK_RW_SYNC]; check_ioprio_changed() 3519 if (cfqq) check_ioprio_changed() 3520 cfq_mark_cfqq_prio_changed(cfqq); check_ioprio_changed() 3525 static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, cfq_init_cfqq() argument 3528 RB_CLEAR_NODE(&cfqq->rb_node); cfq_init_cfqq() 3529 RB_CLEAR_NODE(&cfqq->p_node); cfq_init_cfqq() 3530 INIT_LIST_HEAD(&cfqq->fifo); cfq_init_cfqq() 3532 cfqq->ref = 0; cfq_init_cfqq() 3533 cfqq->cfqd = cfqd; cfq_init_cfqq() 3535 cfq_mark_cfqq_prio_changed(cfqq); cfq_init_cfqq() 3538 if (!cfq_class_idle(cfqq)) cfq_init_cfqq() 3539 cfq_mark_cfqq_idle_window(cfqq); cfq_init_cfqq() 3540 cfq_mark_cfqq_sync(cfqq); cfq_init_cfqq() 3542 cfqq->pid = pid; cfq_init_cfqq() 3585 struct cfq_queue *cfqq, *new_cfqq = NULL; cfq_find_alloc_queue() local 3594 cfqq = &cfqd->oom_cfqq; cfq_find_alloc_queue() 3598 cfqq = cic_to_cfqq(cic, is_sync); cfq_find_alloc_queue() 3601 * Always try a new alloc if we fell back to the OOM cfqq cfq_find_alloc_queue() 3604 if (!cfqq || cfqq == &cfqd->oom_cfqq) { cfq_find_alloc_queue() 3605 cfqq = NULL; cfq_find_alloc_queue() 3607 cfqq = new_cfqq; cfq_find_alloc_queue() 3621 cfqq = kmem_cache_alloc_node(cfq_pool, cfq_find_alloc_queue() 3626 if (cfqq) { cfq_find_alloc_queue() 3627 cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync); cfq_find_alloc_queue() 3628 cfq_init_prio_data(cfqq, cic); cfq_find_alloc_queue() 3629 cfq_link_cfqq_cfqg(cfqq, cfqg); cfq_find_alloc_queue() 3630 cfq_log_cfqq(cfqd, cfqq, "alloced"); cfq_find_alloc_queue() 3632 cfqq = &cfqd->oom_cfqq; cfq_find_alloc_queue() 3639 return cfqq; cfq_find_alloc_queue() 3667 struct cfq_queue *cfqq = NULL; cfq_get_queue() local 3676 cfqq = *async_cfqq; cfq_get_queue() 3679 if (!cfqq) cfq_get_queue() 3680 cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio, gfp_mask); cfq_get_queue() 3686 cfqq->ref++; cfq_get_queue() 3687 *async_cfqq = cfqq; cfq_get_queue() 3690 cfqq->ref++; cfq_get_queue() 3691 return cfqq; cfq_get_queue() 3706 cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq, cfq_update_io_thinktime() argument 3709 if (cfq_cfqq_sync(cfqq)) { cfq_update_io_thinktime() 3711 __cfq_update_io_thinktime(&cfqq->service_tree->ttime, cfq_update_io_thinktime() 3715 __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle); cfq_update_io_thinktime() 3720 cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq, cfq_update_io_seektime() argument 3725 if (cfqq->last_request_pos) { cfq_update_io_seektime() 3726 if (cfqq->last_request_pos < blk_rq_pos(rq)) cfq_update_io_seektime() 3727 sdist = blk_rq_pos(rq) - cfqq->last_request_pos; cfq_update_io_seektime() 3729 sdist = cfqq->last_request_pos - blk_rq_pos(rq); cfq_update_io_seektime() 3732 cfqq->seek_history <<= 1; cfq_update_io_seektime() 3734 cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT); cfq_update_io_seektime() 3736 cfqq->seek_history |= (sdist > CFQQ_SEEK_THR); cfq_update_io_seektime() 3744 cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, cfq_update_idle_window() argument 3752 if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq)) cfq_update_idle_window() 3755 enable_idle = old_idle = cfq_cfqq_idle_window(cfqq); cfq_update_idle_window() 3757 if (cfqq->queued[0] + cfqq->queued[1] >= 4) cfq_update_idle_window() 3758 cfq_mark_cfqq_deep(cfqq); cfq_update_idle_window() 3760 if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE)) cfq_update_idle_window() 3764 (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq))) cfq_update_idle_window() 3774 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle); cfq_update_idle_window() 3776 cfq_mark_cfqq_idle_window(cfqq); cfq_update_idle_window() 3778 cfq_clear_cfqq_idle_window(cfqq); cfq_update_idle_window() 3790 struct cfq_queue *cfqq; cfq_should_preempt() local 3792 cfqq = cfqd->active_queue; cfq_should_preempt() 3793 if (!cfqq) cfq_should_preempt() 3799 if (cfq_class_idle(cfqq)) cfq_should_preempt() 3803 * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice. cfq_should_preempt() 3805 if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq)) cfq_should_preempt() 3812 if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq)) cfq_should_preempt() 3815 if (new_cfqq->cfqg != cfqq->cfqg) cfq_should_preempt() 3818 if (cfq_slice_used(cfqq)) cfq_should_preempt() 3825 RB_EMPTY_ROOT(&cfqq->sort_list)) cfq_should_preempt() 3832 if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending) cfq_should_preempt() 3836 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice. cfq_should_preempt() 3838 if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq)) cfq_should_preempt() 3842 if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq)) cfq_should_preempt() 3845 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq)) cfq_should_preempt() 3850 * current cfqq, let it preempt cfq_should_preempt() 3852 if (cfq_rq_close(cfqd, cfqq, rq)) cfq_should_preempt() 3859 * cfqq preempts the active queue. if we allowed preempt with no slice left, 3862 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) cfq_preempt_queue() argument 3866 cfq_log_cfqq(cfqd, cfqq, "preempt"); cfq_preempt_queue() 3873 if (old_type != cfqq_type(cfqq)) cfq_preempt_queue() 3874 cfqq->cfqg->saved_wl_slice = 0; cfq_preempt_queue() 3880 BUG_ON(!cfq_cfqq_on_rr(cfqq)); cfq_preempt_queue() 3882 cfq_service_tree_add(cfqd, cfqq, 1); cfq_preempt_queue() 3884 cfqq->slice_end = 0; cfq_preempt_queue() 3885 cfq_mark_cfqq_slice_new(cfqq); cfq_preempt_queue() 3889 * Called when a new fs request (rq) is added (to cfqq). Check if there's 3893 cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, cfq_rq_enqueued() argument 3900 cfqq->prio_pending++; cfq_rq_enqueued() 3902 cfq_update_io_thinktime(cfqd, cfqq, cic); cfq_rq_enqueued() 3903 cfq_update_io_seektime(cfqd, cfqq, rq); cfq_rq_enqueued() 3904 cfq_update_idle_window(cfqd, cfqq, cic); cfq_rq_enqueued() 3906 cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); cfq_rq_enqueued() 3908 if (cfqq == cfqd->active_queue) { cfq_rq_enqueued() 3919 if (cfq_cfqq_wait_request(cfqq)) { cfq_rq_enqueued() 3922 cfq_del_timer(cfqd, cfqq); cfq_rq_enqueued() 3923 cfq_clear_cfqq_wait_request(cfqq); cfq_rq_enqueued() 3926 cfqg_stats_update_idle_time(cfqq->cfqg); cfq_rq_enqueued() 3927 cfq_mark_cfqq_must_dispatch(cfqq); cfq_rq_enqueued() 3930 } else if (cfq_should_preempt(cfqd, cfqq, rq)) { cfq_rq_enqueued() 3937 cfq_preempt_queue(cfqd, cfqq); cfq_rq_enqueued() 3945 struct cfq_queue *cfqq = RQ_CFQQ(rq); cfq_insert_request() local 3947 cfq_log_cfqq(cfqd, cfqq, "insert_request"); cfq_insert_request() 3948 cfq_init_prio_data(cfqq, RQ_CIC(rq)); cfq_insert_request() 3951 list_add_tail(&rq->queuelist, &cfqq->fifo); cfq_insert_request() 3955 cfq_rq_enqueued(cfqd, cfqq, rq); cfq_insert_request() 3964 struct cfq_queue *cfqq = cfqd->active_queue; cfq_update_hw_tag() local 3981 if (cfqq && cfq_cfqq_idle_window(cfqq) && cfq_update_hw_tag() 3982 cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] < cfq_update_hw_tag() 3995 static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq) cfq_should_wait_busy() argument 4000 if (!RB_EMPTY_ROOT(&cfqq->sort_list)) cfq_should_wait_busy() 4004 if (cfqq->cfqg->nr_cfqq > 1) cfq_should_wait_busy() 4008 if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) cfq_should_wait_busy() 4011 if (cfq_slice_used(cfqq)) cfq_should_wait_busy() 4016 && (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) cfq_should_wait_busy() 4026 if (cfqq->slice_end - jiffies == 1) cfq_should_wait_busy() 4034 struct cfq_queue *cfqq = RQ_CFQQ(rq); cfq_completed_request() local 4035 struct cfq_data *cfqd = cfqq->cfqd; cfq_completed_request() 4040 cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", cfq_completed_request() 4046 WARN_ON(!cfqq->dispatched); cfq_completed_request() 4048 cfqq->dispatched--; cfq_completed_request() 4050 cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq), cfq_completed_request() 4053 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--; cfq_completed_request() 4060 if (cfq_cfqq_on_rr(cfqq)) cfq_completed_request() 4061 st = cfqq->service_tree; cfq_completed_request() 4063 st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfq_completed_request() 4064 cfqq_type(cfqq)); cfq_completed_request() 4072 cfqq->cfqg->ttime.last_end_request = now; cfq_completed_request() 4079 if (cfqd->active_queue == cfqq) { cfq_completed_request() 4080 const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list); cfq_completed_request() 4082 if (cfq_cfqq_slice_new(cfqq)) { cfq_completed_request() 4083 cfq_set_prio_slice(cfqd, cfqq); cfq_completed_request() 4084 cfq_clear_cfqq_slice_new(cfqq); cfq_completed_request() 4091 if (cfq_should_wait_busy(cfqd, cfqq)) { cfq_completed_request() 4095 cfqq->slice_end = jiffies + extend_sl; cfq_completed_request() 4096 cfq_mark_cfqq_wait_busy(cfqq); cfq_completed_request() 4097 cfq_log_cfqq(cfqd, cfqq, "will busy wait"); cfq_completed_request() 4108 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq)) cfq_completed_request() 4111 !cfq_close_cooperator(cfqd, cfqq)) { cfq_completed_request() 4120 static inline int __cfq_may_queue(struct cfq_queue *cfqq) __cfq_may_queue() argument 4122 if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) { __cfq_may_queue() 4123 cfq_mark_cfqq_must_alloc_slice(cfqq); __cfq_may_queue() 4135 struct cfq_queue *cfqq; cfq_may_queue() local 4147 cfqq = cic_to_cfqq(cic, rw_is_sync(rw)); cfq_may_queue() 4148 if (cfqq) { cfq_may_queue() 4149 cfq_init_prio_data(cfqq, cic); cfq_may_queue() 4151 return __cfq_may_queue(cfqq); cfq_may_queue() 4162 struct cfq_queue *cfqq = RQ_CFQQ(rq); cfq_put_request() local 4164 if (cfqq) { cfq_put_request() 4167 BUG_ON(!cfqq->allocated[rw]); cfq_put_request() 4168 cfqq->allocated[rw]--; cfq_put_request() 4175 cfq_put_queue(cfqq); cfq_put_request() 4181 struct cfq_queue *cfqq) cfq_merge_cfqqs() 4183 cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq); cfq_merge_cfqqs() 4184 cic_set_cfqq(cic, cfqq->new_cfqq, 1); cfq_merge_cfqqs() 4185 cfq_mark_cfqq_coop(cfqq->new_cfqq); cfq_merge_cfqqs() 4186 cfq_put_queue(cfqq); cfq_merge_cfqqs() 4191 * Returns NULL if a new cfqq should be allocated, or the old cfqq if this 4192 * was the last process referring to said cfqq. 4195 split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq) split_cfqq() argument 4197 if (cfqq_process_refs(cfqq) == 1) { split_cfqq() 4198 cfqq->pid = current->pid; split_cfqq() 4199 cfq_clear_cfqq_coop(cfqq); split_cfqq() 4200 cfq_clear_cfqq_split_coop(cfqq); split_cfqq() 4201 return cfqq; split_cfqq() 4206 cfq_put_cooperator(cfqq); split_cfqq() 4208 cfq_put_queue(cfqq); split_cfqq() 4222 struct cfq_queue *cfqq; cfq_set_request() local 4231 cfqq = cic_to_cfqq(cic, is_sync); cfq_set_request() 4232 if (!cfqq || cfqq == &cfqd->oom_cfqq) { cfq_set_request() 4233 cfqq = cfq_get_queue(cfqd, is_sync, cic, bio, gfp_mask); cfq_set_request() 4234 cic_set_cfqq(cic, cfqq, is_sync); cfq_set_request() 4239 if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) { cfq_set_request() 4240 cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq"); cfq_set_request() 4241 cfqq = split_cfqq(cic, cfqq); cfq_set_request() 4242 if (!cfqq) cfq_set_request() 4252 if (cfqq->new_cfqq) cfq_set_request() 4253 cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq); cfq_set_request() 4256 cfqq->allocated[rw]++; cfq_set_request() 4258 cfqq->ref++; cfq_set_request() 4259 cfqg_get(cfqq->cfqg); cfq_set_request() 4260 rq->elv.priv[0] = cfqq; cfq_set_request() 4261 rq->elv.priv[1] = cfqq->cfqg; cfq_set_request() 4283 struct cfq_queue *cfqq; cfq_idle_slice_timer() local 4291 cfqq = cfqd->active_queue; cfq_idle_slice_timer() 4292 if (cfqq) { cfq_idle_slice_timer() 4298 if (cfq_cfqq_must_dispatch(cfqq)) cfq_idle_slice_timer() 4304 if (cfq_slice_used(cfqq)) cfq_idle_slice_timer() 4317 if (!RB_EMPTY_ROOT(&cfqq->sort_list)) cfq_idle_slice_timer() 4323 cfq_clear_cfqq_deep(cfqq); cfq_idle_slice_timer() 4434 * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues. cfq_init_queue() 1231 cfq_slice_offset(struct cfq_data *cfqd, struct cfq_queue *cfqq) cfq_slice_offset() argument 1457 cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg, struct cfq_queue *cfqq) cfq_group_served() argument 2433 __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) __cfq_set_active_queue() argument 2561 cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) cfq_set_active_queue() argument 3187 cfq_slice_used_soon(struct cfq_data *cfqd, struct cfq_queue *cfqq) cfq_slice_used_soon() argument 4180 cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic, struct cfq_queue *cfqq) cfq_merge_cfqqs() argument
|