Lines Matching refs:q
35 static bool blkcg_policy_enabled(struct request_queue *q, in blkcg_policy_enabled() argument
38 return pol && test_bit(pol->plid, q->blkcg_pols); in blkcg_policy_enabled()
69 static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, in blkg_alloc() argument
76 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); in blkg_alloc()
80 blkg->q = q; in blkg_alloc()
87 if (blk_init_rl(&blkg->rl, q, gfp_mask)) in blkg_alloc()
96 if (!blkcg_policy_enabled(q, pol)) in blkg_alloc()
100 pd = kzalloc_node(pol->pd_size, gfp_mask, q->node); in blkg_alloc()
127 struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q, in __blkg_lookup() argument
133 if (blkg && blkg->q == q) in __blkg_lookup()
142 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); in __blkg_lookup()
143 if (blkg && blkg->q == q) { in __blkg_lookup()
145 lockdep_assert_held(q->queue_lock); in __blkg_lookup()
163 struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q) in blkg_lookup() argument
167 if (unlikely(blk_queue_bypass(q))) in blkg_lookup()
169 return __blkg_lookup(blkcg, q, false); in blkg_lookup()
178 struct request_queue *q, in blkg_create() argument
185 lockdep_assert_held(q->queue_lock); in blkg_create()
195 new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC); in blkg_create()
205 blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false); in blkg_create()
223 ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg); in blkg_create()
226 list_add(&blkg->q_node, &q->blkg_list); in blkg_create()
240 q->root_blkg = blkg; in blkg_create()
241 q->root_rl.blkg = blkg; in blkg_create()
272 struct request_queue *q) in blkg_lookup_create() argument
277 lockdep_assert_held(q->queue_lock); in blkg_lookup_create()
283 if (unlikely(blk_queue_bypass(q))) in blkg_lookup_create()
284 return ERR_PTR(blk_queue_dying(q) ? -EINVAL : -EBUSY); in blkg_lookup_create()
286 blkg = __blkg_lookup(blkcg, q, true); in blkg_lookup_create()
298 while (parent && !__blkg_lookup(parent, q, false)) { in blkg_lookup_create()
303 blkg = blkg_create(pos, q, NULL); in blkg_lookup_create()
315 lockdep_assert_held(blkg->q->queue_lock); in blkg_destroy()
330 radix_tree_delete(&blkcg->blkg_tree, blkg->q->id); in blkg_destroy()
347 blkg->q->root_blkg = NULL; in blkg_destroy()
348 blkg->q->root_rl.blkg = NULL; in blkg_destroy()
364 static void blkg_destroy_all(struct request_queue *q) in blkg_destroy_all() argument
368 lockdep_assert_held(q->queue_lock); in blkg_destroy_all()
370 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { in blkg_destroy_all()
414 struct request_queue *q) in __blk_queue_next_rl() argument
423 if (rl == &q->root_rl) { in __blk_queue_next_rl()
424 ent = &q->blkg_list; in __blk_queue_next_rl()
435 if (ent == &q->root_blkg->q_node) in __blk_queue_next_rl()
437 if (ent == &q->blkg_list) in __blk_queue_next_rl()
476 if (blkcg_policy_enabled(blkg->q, pol) && in blkcg_reset_stats()
490 if (blkg->q->backing_dev_info.dev) in blkg_dev_name()
491 return dev_name(blkg->q->backing_dev_info.dev); in blkg_dev_name()
524 spin_lock_irq(blkg->q->queue_lock); in blkcg_print_blkgs()
525 if (blkcg_policy_enabled(blkg->q, pol)) in blkcg_print_blkgs()
527 spin_unlock_irq(blkg->q->queue_lock); in blkcg_print_blkgs()
637 lockdep_assert_held(pd->blkg->q->queue_lock); in blkg_stat_recursive_sum()
671 lockdep_assert_held(pd->blkg->q->queue_lock); in blkg_rwstat_recursive_sum()
803 struct request_queue *q = blkg->q; in blkcg_css_offline() local
805 if (spin_trylock(q->queue_lock)) { in blkcg_css_offline()
807 spin_unlock(q->queue_lock); in blkcg_css_offline()
860 int blkcg_init_queue(struct request_queue *q) in blkcg_init_queue() argument
864 return blk_throtl_init(q); in blkcg_init_queue()
873 void blkcg_drain_queue(struct request_queue *q) in blkcg_drain_queue() argument
875 lockdep_assert_held(q->queue_lock); in blkcg_drain_queue()
881 if (!q->root_blkg) in blkcg_drain_queue()
884 blk_throtl_drain(q); in blkcg_drain_queue()
893 void blkcg_exit_queue(struct request_queue *q) in blkcg_exit_queue() argument
895 spin_lock_irq(q->queue_lock); in blkcg_exit_queue()
896 blkg_destroy_all(q); in blkcg_exit_queue()
897 spin_unlock_irq(q->queue_lock); in blkcg_exit_queue()
899 blk_throtl_exit(q); in blkcg_exit_queue()
961 int blkcg_activate_policy(struct request_queue *q, in blkcg_activate_policy() argument
970 if (blkcg_policy_enabled(q, pol)) in blkcg_activate_policy()
974 new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL); in blkcg_activate_policy()
978 blk_queue_bypass_start(q); in blkcg_activate_policy()
987 spin_lock_irq(q->queue_lock); in blkcg_activate_policy()
990 blkg = __blkg_lookup(&blkcg_root, q, false); in blkcg_activate_policy()
994 blkg = blkg_create(&blkcg_root, q, new_blkg); in blkcg_activate_policy()
1005 list_for_each_entry(blkg, &q->blkg_list, q_node) in blkcg_activate_policy()
1008 spin_unlock_irq(q->queue_lock); in blkcg_activate_policy()
1012 pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node); in blkcg_activate_policy()
1024 spin_lock_irq(q->queue_lock); in blkcg_activate_policy()
1026 list_for_each_entry(blkg, &q->blkg_list, q_node) { in blkcg_activate_policy()
1046 __set_bit(pol->plid, q->blkcg_pols); in blkcg_activate_policy()
1049 spin_unlock_irq(q->queue_lock); in blkcg_activate_policy()
1051 blk_queue_bypass_end(q); in blkcg_activate_policy()
1066 void blkcg_deactivate_policy(struct request_queue *q, in blkcg_deactivate_policy() argument
1071 if (!blkcg_policy_enabled(q, pol)) in blkcg_deactivate_policy()
1074 blk_queue_bypass_start(q); in blkcg_deactivate_policy()
1075 spin_lock_irq(q->queue_lock); in blkcg_deactivate_policy()
1077 __clear_bit(pol->plid, q->blkcg_pols); in blkcg_deactivate_policy()
1080 if (bitmap_empty(q->blkcg_pols, BLKCG_MAX_POLS)) in blkcg_deactivate_policy()
1081 blkg_destroy_all(q); in blkcg_deactivate_policy()
1083 list_for_each_entry(blkg, &q->blkg_list, q_node) { in blkcg_deactivate_policy()
1098 spin_unlock_irq(q->queue_lock); in blkcg_deactivate_policy()
1099 blk_queue_bypass_end(q); in blkcg_deactivate_policy()