root/block/blk-mq-tag.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. bt_wait_ptr
  2. blk_mq_tag_busy
  3. blk_mq_tag_idle
  4. blk_mq_tag_set_rq
  5. blk_mq_tag_is_reserved

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef INT_BLK_MQ_TAG_H
   3 #define INT_BLK_MQ_TAG_H
   4 
   5 #include "blk-mq.h"
   6 
   7 /*
   8  * Tag address space map.
   9  */
  10 struct blk_mq_tags {
  11         unsigned int nr_tags;
  12         unsigned int nr_reserved_tags;
  13 
  14         atomic_t active_queues;
  15 
  16         struct sbitmap_queue bitmap_tags;
  17         struct sbitmap_queue breserved_tags;
  18 
  19         struct request **rqs;
  20         struct request **static_rqs;
  21         struct list_head page_list;
  22 };
  23 
  24 
  25 extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node, int alloc_policy);
  26 extern void blk_mq_free_tags(struct blk_mq_tags *tags);
  27 
  28 extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
  29 extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags,
  30                            struct blk_mq_ctx *ctx, unsigned int tag);
  31 extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags);
  32 extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
  33                                         struct blk_mq_tags **tags,
  34                                         unsigned int depth, bool can_grow);
  35 extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
  36 void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
  37                 void *priv);
  38 
  39 static inline struct sbq_wait_state *bt_wait_ptr(struct sbitmap_queue *bt,
  40                                                  struct blk_mq_hw_ctx *hctx)
  41 {
  42         if (!hctx)
  43                 return &bt->ws[0];
  44         return sbq_wait_ptr(bt, &hctx->wait_index);
  45 }
  46 
  47 enum {
  48         BLK_MQ_TAG_FAIL         = -1U,
  49         BLK_MQ_TAG_MIN          = 1,
  50         BLK_MQ_TAG_MAX          = BLK_MQ_TAG_FAIL - 1,
  51 };
  52 
  53 extern bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *);
  54 extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx *);
  55 
  56 static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
  57 {
  58         if (!(hctx->flags & BLK_MQ_F_TAG_SHARED))
  59                 return false;
  60 
  61         return __blk_mq_tag_busy(hctx);
  62 }
  63 
  64 static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
  65 {
  66         if (!(hctx->flags & BLK_MQ_F_TAG_SHARED))
  67                 return;
  68 
  69         __blk_mq_tag_idle(hctx);
  70 }
  71 
  72 /*
  73  * This helper should only be used for flush request to share tag
  74  * with the request cloned from, and both the two requests can't be
  75  * in flight at the same time. The caller has to make sure the tag
  76  * can't be freed.
  77  */
  78 static inline void blk_mq_tag_set_rq(struct blk_mq_hw_ctx *hctx,
  79                 unsigned int tag, struct request *rq)
  80 {
  81         hctx->tags->rqs[tag] = rq;
  82 }
  83 
  84 static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags,
  85                                           unsigned int tag)
  86 {
  87         return tag < tags->nr_reserved_tags;
  88 }
  89 
  90 #endif

/* [<][>][^][v][top][bottom][index][help] */