Searched refs:blkg (Results 1 - 7 of 7) sorted by relevance

/linux-4.4.14/block/
H A Dblk-cgroup.c59 * blkg_free - free a blkg
60 * @blkg: blkg to free
62 * Free @blkg which may be partially allocated.
64 static void blkg_free(struct blkcg_gq *blkg) blkg_free() argument
68 if (!blkg) blkg_free()
72 if (blkg->pd[i]) blkg_free()
73 blkcg_policy[i]->pd_free_fn(blkg->pd[i]); blkg_free()
75 if (blkg->blkcg != &blkcg_root) blkg_free()
76 blk_exit_rl(&blkg->rl); blkg_free()
78 blkg_rwstat_exit(&blkg->stat_ios); blkg_free()
79 blkg_rwstat_exit(&blkg->stat_bytes); blkg_free()
80 kfree(blkg); blkg_free()
84 * blkg_alloc - allocate a blkg
85 * @blkcg: block cgroup the new blkg is associated with
86 * @q: request_queue the new blkg is associated with
89 * Allocate a new blkg assocating @blkcg and @q.
94 struct blkcg_gq *blkg; blkg_alloc() local
98 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); blkg_alloc()
99 if (!blkg) blkg_alloc()
102 if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) || blkg_alloc()
103 blkg_rwstat_init(&blkg->stat_ios, gfp_mask)) blkg_alloc()
106 blkg->q = q; blkg_alloc()
107 INIT_LIST_HEAD(&blkg->q_node); blkg_alloc()
108 blkg->blkcg = blkcg; blkg_alloc()
109 atomic_set(&blkg->refcnt, 1); blkg_alloc()
111 /* root blkg uses @q->root_rl, init rl only for !root blkgs */ blkg_alloc()
113 if (blk_init_rl(&blkg->rl, q, gfp_mask)) blkg_alloc()
115 blkg->rl.blkg = blkg; blkg_alloc()
125 /* alloc per-policy data and attach it to blkg */ blkg_alloc()
130 blkg->pd[i] = pd; blkg_alloc()
131 pd->blkg = blkg; blkg_alloc()
135 return blkg; blkg_alloc()
138 blkg_free(blkg); blkg_alloc()
145 struct blkcg_gq *blkg; blkg_lookup_slowpath() local
149 * hint can only be updated under queue_lock as otherwise @blkg blkg_lookup_slowpath()
153 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); blkg_lookup_slowpath()
154 if (blkg && blkg->q == q) { blkg_lookup_slowpath()
157 rcu_assign_pointer(blkcg->blkg_hint, blkg); blkg_lookup_slowpath()
159 return blkg; blkg_lookup_slowpath()
174 struct blkcg_gq *blkg; blkg_create() local
181 /* blkg holds a reference to blkcg */ blkg_create()
202 blkg = new_blkg; blkg_create()
203 blkg->wb_congested = wb_congested; blkg_create()
207 blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false); blkg_create()
208 if (WARN_ON_ONCE(!blkg->parent)) { blkg_create()
212 blkg_get(blkg->parent); blkg_create()
219 if (blkg->pd[i] && pol->pd_init_fn) blkg_create()
220 pol->pd_init_fn(blkg->pd[i]); blkg_create()
225 ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg); blkg_create()
227 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); blkg_create()
228 list_add(&blkg->q_node, &q->blkg_list); blkg_create()
233 if (blkg->pd[i] && pol->pd_online_fn) blkg_create()
234 pol->pd_online_fn(blkg->pd[i]); blkg_create()
237 blkg->online = true; blkg_create()
241 return blkg; blkg_create()
243 /* @blkg failed fully initialized, use the usual release path */ blkg_create()
244 blkg_put(blkg); blkg_create()
257 * blkg_lookup_create - lookup blkg, try to create one if not there
261 * Lookup blkg for the @blkcg - @q pair. If it doesn't exist, try to
262 * create one. blkg creation is performed recursively from blkcg_root such
263 * that all non-root blkg's have access to the parent blkg. This function
266 * Returns pointer to the looked up or created blkg on success, ERR_PTR()
273 struct blkcg_gq *blkg; blkg_lookup_create() local
285 blkg = __blkg_lookup(blkcg, q, true); blkg_lookup_create()
286 if (blkg) blkg_lookup_create()
287 return blkg; blkg_lookup_create()
302 blkg = blkg_create(pos, q, NULL); blkg_lookup_create()
303 if (pos == blkcg || IS_ERR(blkg)) blkg_lookup_create()
304 return blkg; blkg_lookup_create()
308 static void blkg_destroy(struct blkcg_gq *blkg) blkg_destroy() argument
310 struct blkcg *blkcg = blkg->blkcg; blkg_destroy()
311 struct blkcg_gq *parent = blkg->parent; blkg_destroy()
314 lockdep_assert_held(blkg->q->queue_lock); blkg_destroy()
318 WARN_ON_ONCE(list_empty(&blkg->q_node)); blkg_destroy()
319 WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node)); blkg_destroy()
324 if (blkg->pd[i] && pol->pd_offline_fn) blkg_destroy()
325 pol->pd_offline_fn(blkg->pd[i]); blkg_destroy()
329 blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes); blkg_destroy()
330 blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios); blkg_destroy()
333 blkg->online = false; blkg_destroy()
335 radix_tree_delete(&blkcg->blkg_tree, blkg->q->id); blkg_destroy()
336 list_del_init(&blkg->q_node); blkg_destroy()
337 hlist_del_init_rcu(&blkg->blkcg_node); blkg_destroy()
340 * Both setting lookup hint to and clearing it from @blkg are done blkg_destroy()
341 * under queue_lock. If it's not pointing to @blkg now, it never blkg_destroy()
344 if (rcu_access_pointer(blkcg->blkg_hint) == blkg) blkg_destroy()
351 blkg_put(blkg); blkg_destroy()
362 struct blkcg_gq *blkg, *n; blkg_destroy_all() local
366 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { blkg_destroy_all()
367 struct blkcg *blkcg = blkg->blkcg; blkg_destroy_all()
370 blkg_destroy(blkg); blkg_destroy_all()
375 q->root_rl.blkg = NULL; blkg_destroy_all()
380 * can access all the fields of blkg and assume these are valid. For
383 * Having a reference to blkg under an rcu allows accesses to only values
388 struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head); __blkg_release_rcu() local
390 /* release the blkcg and parent blkg refs this blkg has been holding */ __blkg_release_rcu()
391 css_put(&blkg->blkcg->css); __blkg_release_rcu()
392 if (blkg->parent) __blkg_release_rcu()
393 blkg_put(blkg->parent); __blkg_release_rcu()
395 wb_congested_put(blkg->wb_congested); __blkg_release_rcu()
397 blkg_free(blkg); __blkg_release_rcu()
403 * because the root blkg uses @q->root_rl instead of its own rl.
409 struct blkcg_gq *blkg; __blk_queue_next_rl() local
412 * Determine the current blkg list_head. The first entry is __blk_queue_next_rl()
421 blkg = container_of(rl, struct blkcg_gq, rl); __blk_queue_next_rl()
422 ent = &blkg->q_node; __blk_queue_next_rl()
432 blkg = container_of(ent, struct blkcg_gq, q_node); __blk_queue_next_rl()
433 return &blkg->rl; __blk_queue_next_rl()
440 struct blkcg_gq *blkg; blkcg_reset_stats() local
451 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { blkcg_reset_stats()
452 blkg_rwstat_reset(&blkg->stat_bytes); blkcg_reset_stats()
453 blkg_rwstat_reset(&blkg->stat_ios); blkcg_reset_stats()
458 if (blkg->pd[i] && pol->pd_reset_stats_fn) blkcg_reset_stats()
459 pol->pd_reset_stats_fn(blkg->pd[i]); blkcg_reset_stats()
468 const char *blkg_dev_name(struct blkcg_gq *blkg) blkg_dev_name() argument
471 if (blkg->q->backing_dev_info.dev) blkg_dev_name()
472 return dev_name(blkg->q->backing_dev_info.dev); blkg_dev_name()
478 * blkcg_print_blkgs - helper for printing per-blkg data
481 * @prfill: fill function to print out a blkg
486 * This function invokes @prfill on each blkg of @blkcg if pd for the
501 struct blkcg_gq *blkg; blkcg_print_blkgs() local
505 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { blkcg_print_blkgs()
506 spin_lock_irq(blkg->q->queue_lock); blkcg_print_blkgs()
507 if (blkcg_policy_enabled(blkg->q, pol)) blkcg_print_blkgs()
508 total += prfill(sf, blkg->pd[pol->plid], data); blkcg_print_blkgs()
509 spin_unlock_irq(blkg->q->queue_lock); blkcg_print_blkgs()
528 const char *dname = blkg_dev_name(pd->blkg); __blkg_prfill_u64()
555 const char *dname = blkg_dev_name(pd->blkg); __blkg_prfill_rwstat()
607 struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd->blkg + off); blkg_prfill_rwstat_field()
613 * blkg_print_stat_bytes - seq_show callback for blkg->stat_bytes
617 * To be used as cftype->seq_show to print blkg->stat_bytes.
630 * blkg_print_stat_bytes - seq_show callback for blkg->stat_ios
634 * To be used as cftype->seq_show to print blkg->stat_ios. cftype->private
650 struct blkg_rwstat rwstat = blkg_rwstat_recursive_sum(pd->blkg, blkg_prfill_rwstat_field_recursive()
687 * @blkg: blkg of interest
689 * @off: offset to the blkg_stat in blkg_policy_data or @blkg
691 * Collect the blkg_stat specified by @blkg, @pol and @off and all its
695 * If @pol is NULL, blkg_stat is at @off bytes into @blkg; otherwise, it is
696 * at @off bytes into @blkg's blkg_policy_data of the policy.
698 u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg, blkg_stat_recursive_sum() argument
705 lockdep_assert_held(blkg->q->queue_lock); blkg_stat_recursive_sum()
708 blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) { blkg_for_each_descendant_pre()
717 stat = (void *)blkg + off; blkg_for_each_descendant_pre()
729 * @blkg: blkg of interest
731 * @off: offset to the blkg_rwstat in blkg_policy_data or @blkg
733 * Collect the blkg_rwstat specified by @blkg, @pol and @off and all its
737 * If @pol is NULL, blkg_rwstat is at @off bytes into @blkg; otherwise, it
738 * is at @off bytes into @blkg's blkg_policy_data of the policy.
740 struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, blkg_rwstat_recursive_sum() argument
748 lockdep_assert_held(blkg->q->queue_lock); blkg_rwstat_recursive_sum()
751 blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) { blkg_for_each_descendant_pre()
774 * blkg_conf_prep - parse and prepare for per-blkg config update
780 * Parse per-blkg config update from @input and initialize @ctx with the
781 * result. @ctx->blkg points to the blkg to be updated and @ctx->body the
790 struct blkcg_gq *blkg; __acquires() local
815 blkg = blkg_lookup_create(blkcg, disk->queue); __acquires()
817 blkg = ERR_PTR(-EOPNOTSUPP); __acquires()
819 if (IS_ERR(blkg)) { __acquires()
820 ret = PTR_ERR(blkg); __acquires()
838 ctx->blkg = blkg; __acquires()
845 * blkg_conf_finish - finish up per-blkg config update
848 * Finish up after per-blkg config update. This function must be paired
863 struct blkcg_gq *blkg; blkcg_print_stat() local
867 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { blkcg_print_stat()
872 dname = blkg_dev_name(blkg); blkcg_print_stat()
876 spin_lock_irq(blkg->q->queue_lock); blkcg_print_stat()
878 rwstat = blkg_rwstat_recursive_sum(blkg, NULL, blkcg_print_stat()
883 rwstat = blkg_rwstat_recursive_sum(blkg, NULL, blkcg_print_stat()
888 spin_unlock_irq(blkg->q->queue_lock); blkcg_print_stat()
934 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, blkcg_css_offline() local
936 struct request_queue *q = blkg->q; blkcg_css_offline()
939 blkg_destroy(blkg); blkcg_css_offline()
1048 struct blkcg_gq *new_blkg, *blkg; blkcg_init_queue() local
1059 * Make sure the root blkg exists and count the existing blkgs. As blkcg_init_queue()
1065 blkg = blkg_create(&blkcg_root, q, new_blkg); blkcg_init_queue()
1072 if (IS_ERR(blkg)) { blkcg_init_queue()
1074 return PTR_ERR(blkg); blkcg_init_queue()
1077 q->root_blkg = blkg; blkcg_init_queue()
1078 q->root_rl.blkg = blkg; blkcg_init_queue()
1199 * from IO path. Update of each blkg is protected by both queue and blkcg
1210 struct blkcg_gq *blkg; blkcg_activate_policy() local
1228 list_for_each_entry(blkg, &q->blkg_list, q_node) { blkcg_activate_policy()
1231 if (blkg->pd[pol->plid]) blkcg_activate_policy()
1242 blkg->pd[pol->plid] = pd; blkcg_activate_policy()
1243 pd->blkg = blkg; blkcg_activate_policy()
1272 struct blkcg_gq *blkg; blkcg_deactivate_policy() local
1282 list_for_each_entry(blkg, &q->blkg_list, q_node) { blkcg_deactivate_policy()
1283 /* grab blkcg lock too while removing @pd from @blkg */ blkcg_deactivate_policy()
1284 spin_lock(&blkg->blkcg->lock); blkcg_deactivate_policy()
1286 if (blkg->pd[pol->plid]) { blkcg_deactivate_policy()
1288 pol->pd_offline_fn(blkg->pd[pol->plid]); blkcg_deactivate_policy()
1289 pol->pd_free_fn(blkg->pd[pol->plid]); blkcg_deactivate_policy()
1290 blkg->pd[pol->plid] = NULL; blkcg_deactivate_policy()
1293 spin_unlock(&blkg->blkcg->lock); blkcg_deactivate_policy()
H A Dblk-throttle.c46 * It's also used to track the reference counts on blkg's. A qnode always
49 * queued and decrementing when dequeued is enough to keep the whole blkg
164 static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg) blkg_to_tg() argument
166 return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl)); blkg_to_tg()
354 struct blkcg_gq *blkg = tg_to_blkg(tg); throtl_pd_init() local
355 struct throtl_data *td = blkg->q->td; throtl_pd_init()
372 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent) throtl_pd_init()
373 sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue; throtl_pd_init()
1151 struct blkcg_gq *blkg; tg_conf_updated() local
1165 blkg_for_each_descendant_pre(blkg, pos_css, tg_to_blkg(tg)) tg_conf_updated()
1166 tg_update_has_rules(blkg_to_tg(blkg)); tg_conf_updated()
1204 tg = blkg_to_tg(ctx.blkg); tg_set_conf()
1272 const char *dname = blkg_dev_name(pd->blkg); tg_prfill_max()
1315 tg = blkg_to_tg(ctx.blkg); tg_set_max()
1396 bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, blk_throtl_bio() argument
1400 struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg); blk_throtl_bio()
1523 struct blkcg_gq *blkg; variable in typeref:struct:blkcg_gq
1532 * Drain each tg while doing post-order walk on the blkg tree, so
1534 * better to walk service_queue tree directly but blkg walk is
1537 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg)
1538 tg_drain_bios(&blkg_to_tg(blkg)->service_queue);
H A Dcfq-iosched.c618 static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg) blkg_to_cfqg() argument
620 return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq)); blkg_to_cfqg()
1632 struct cfq_group_data *cgd = blkcg_to_cfqgd(pd->blkg->blkcg); cfq_pd_init()
1654 * @blkg is going offline and will be ignored by cfq_pd_offline()
1680 struct blkcg_gq *blkg; cfq_lookup_cfqg() local
1682 blkg = blkg_lookup(blkcg, cfqd->queue); cfq_lookup_cfqg()
1683 if (likely(blkg)) cfq_lookup_cfqg()
1684 return blkg_to_cfqg(blkg); cfq_lookup_cfqg()
1786 cfqg = blkg_to_cfqg(ctx.blkg); __cfqg_set_weight_device()
1823 struct blkcg_gq *blkg; __cfq_set_weight() local
1842 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { __cfq_set_weight()
1843 struct cfq_group *cfqg = blkg_to_cfqg(blkg); __cfq_set_weight()
1927 u64 sum = blkg_rwstat_total(&pd->blkg->stat_bytes); cfqg_prfill_sectors()
1942 struct blkg_rwstat tmp = blkg_rwstat_recursive_sum(pd->blkg, NULL, cfqg_prfill_sectors_recursive()
4501 struct blkcg_gq *blkg __maybe_unused; cfq_init_queue()
H A Dblk-core.c69 clear_wb_congested(rl->blkg->wb_congested, sync); blk_clear_congested()
72 * If !CGROUP_WRITEBACK, all blkg's map to bdi->wb and we shouldn't blk_clear_congested()
83 set_wb_congested(rl->blkg->wb_congested, sync); blk_set_congested()
/linux-4.4.14/include/linux/
H A Dblk-cgroup.h75 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
79 * There can be multiple active blkcg policies and each blkg:policy pair is
86 /* the blkg and policy id this per-policy data belongs to */
87 struct blkcg_gq *blkg; member in struct:blkg_policy_data
113 * Each blkg gets congested separately and the congestion state is
127 /* is this blkg online? protected by both blkcg and q locks */
188 const char *blkg_dev_name(struct blkcg_gq *blkg);
205 u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
207 struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
212 struct blkcg_gq *blkg; member in struct:blkg_conf_ctx
270 struct blkcg_gq *blkg; __blkg_lookup() local
275 blkg = rcu_dereference(blkcg->blkg_hint); __blkg_lookup()
276 if (blkg && blkg->q == q) __blkg_lookup()
277 return blkg; __blkg_lookup()
283 * blkg_lookup - lookup blkg for the specified blkcg - q pair
287 * Lookup blkg for the @blkcg - @q pair. This function should be called
303 * @blkg: blkg of interest
306 * Return pointer to private data associated with the @blkg-@pol pair.
308 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, blkg_to_pd() argument
311 return blkg ? blkg->pd[pol->plid] : NULL; blkg_to_pd()
321 * pdata_to_blkg - get blkg associated with policy private data
324 * @pd is policy private data. Determine the blkg it's associated with.
328 return pd ? pd->blkg : NULL; pd_to_blkg()
337 * blkg_path - format cgroup path of blkg
338 * @blkg: blkg of interest
342 * Format the path of the cgroup of @blkg into @buf.
344 static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen) blkg_path() argument
348 p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen); blkg_path()
359 * blkg_get - get a blkg reference
360 * @blkg: blkg to get
364 static inline void blkg_get(struct blkcg_gq *blkg) blkg_get() argument
366 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0); blkg_get()
367 atomic_inc(&blkg->refcnt); blkg_get()
373 * blkg_put - put a blkg reference
374 * @blkg: blkg to put
376 static inline void blkg_put(struct blkcg_gq *blkg) blkg_put() argument
378 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0); blkg_put()
379 if (atomic_dec_and_test(&blkg->refcnt)) blkg_put()
380 call_rcu(&blkg->rcu_head, __blkg_release_rcu); blkg_put()
384 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
387 * @p_blkg: target blkg to walk descendants of
401 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
404 * @p_blkg: target blkg to walk descendants of
429 struct blkcg_gq *blkg; blk_get_rl() local
435 /* bypass blkg lookup and use @q->root_rl directly for root */ blk_get_rl()
440 * Try to use blkg->rl. blkg lookup may fail under memory pressure blk_get_rl()
444 blkg = blkg_lookup(blkcg, q); blk_get_rl()
445 if (unlikely(!blkg)) blk_get_rl()
448 blkg_get(blkg); blk_get_rl()
450 return &blkg->rl; blk_get_rl()
465 if (rl->blkg->blkcg != &blkcg_root) blk_put_rl()
466 blkg_put(rl->blkg); blk_put_rl()
686 extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
689 static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, blk_throtl_bio() argument
697 struct blkcg_gq *blkg; blkcg_bio_issue_check() local
703 blkg = blkg_lookup(blkcg, q); blkcg_bio_issue_check()
704 if (unlikely(!blkg)) { blkcg_bio_issue_check()
706 blkg = blkg_lookup_create(blkcg, q); blkcg_bio_issue_check()
707 if (IS_ERR(blkg)) blkcg_bio_issue_check()
708 blkg = NULL; blkcg_bio_issue_check()
712 throtl = blk_throtl_bio(q, blkg, bio); blkcg_bio_issue_check()
715 blkg = blkg ?: q->root_blkg; blkcg_bio_issue_check()
716 blkg_rwstat_add(&blkg->stat_bytes, bio->bi_rw, blkcg_bio_issue_check()
718 blkg_rwstat_add(&blkg->stat_ios, bio->bi_rw, 1); blkcg_bio_issue_check()
765 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, blkg_to_pd() argument
768 static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; } blkg_get() argument
769 static inline void blkg_get(struct blkcg_gq *blkg) { } blkg_put() argument
770 static inline void blkg_put(struct blkcg_gq *blkg) { } blkg_put() argument
H A Dbacking-dev-defs.h53 atomic_t refcnt; /* nr of attached wb's and blkg */
H A Dblkdev.h58 struct blkcg_gq *blkg; /* blkg this request pool belongs to */ member in struct:request_list
294 * is used, root blkg allocates from @q->root_rl and all other
295 * blkgs from their own blkg->rl. Which one to use should be

Completed in 482 milliseconds