root/include/linux/blk-mq.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. set
  2. set
  3. nr_hw_queues
  4. request_to_qc_t
  5. blk_mq_cleanup_rq

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef BLK_MQ_H
   3 #define BLK_MQ_H
   4 
   5 #include <linux/blkdev.h>
   6 #include <linux/sbitmap.h>
   7 #include <linux/srcu.h>
   8 
   9 struct blk_mq_tags;
  10 struct blk_flush_queue;
  11 
  12 /**
  13  * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware block device
  14  */
  15 struct blk_mq_hw_ctx {
  16         struct {
  17                 spinlock_t              lock;
  18                 struct list_head        dispatch;
  19                 unsigned long           state;          /* BLK_MQ_S_* flags */
  20         } ____cacheline_aligned_in_smp;
  21 
  22         struct delayed_work     run_work;
  23         cpumask_var_t           cpumask;
  24         int                     next_cpu;
  25         int                     next_cpu_batch;
  26 
  27         unsigned long           flags;          /* BLK_MQ_F_* flags */
  28 
  29         void                    *sched_data;
  30         struct request_queue    *queue;
  31         struct blk_flush_queue  *fq;
  32 
  33         void                    *driver_data;
  34 
  35         struct sbitmap          ctx_map;
  36 
  37         struct blk_mq_ctx       *dispatch_from;
  38         unsigned int            dispatch_busy;
  39 
  40         unsigned short          type;
  41         unsigned short          nr_ctx;
  42         struct blk_mq_ctx       **ctxs;
  43 
  44         spinlock_t              dispatch_wait_lock;
  45         wait_queue_entry_t      dispatch_wait;
  46         atomic_t                wait_index;
  47 
  48         struct blk_mq_tags      *tags;
  49         struct blk_mq_tags      *sched_tags;
  50 
  51         unsigned long           queued;
  52         unsigned long           run;
  53 #define BLK_MQ_MAX_DISPATCH_ORDER       7
  54         unsigned long           dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
  55 
  56         unsigned int            numa_node;
  57         unsigned int            queue_num;
  58 
  59         atomic_t                nr_active;
  60 
  61         struct hlist_node       cpuhp_dead;
  62         struct kobject          kobj;
  63 
  64         unsigned long           poll_considered;
  65         unsigned long           poll_invoked;
  66         unsigned long           poll_success;
  67 
  68 #ifdef CONFIG_BLK_DEBUG_FS
  69         struct dentry           *debugfs_dir;
  70         struct dentry           *sched_debugfs_dir;
  71 #endif
  72 
  73         struct list_head        hctx_list;
  74 
  75         /* Must be the last member - see also blk_mq_hw_ctx_size(). */
  76         struct srcu_struct      srcu[0];
  77 };
  78 
  79 struct blk_mq_queue_map {
  80         unsigned int *mq_map;
  81         unsigned int nr_queues;
  82         unsigned int queue_offset;
  83 };
  84 
  85 enum hctx_type {
  86         HCTX_TYPE_DEFAULT,      /* all I/O not otherwise accounted for */
  87         HCTX_TYPE_READ,         /* just for READ I/O */
  88         HCTX_TYPE_POLL,         /* polled I/O of any kind */
  89 
  90         HCTX_MAX_TYPES,
  91 };
  92 
  93 struct blk_mq_tag_set {
  94         /*
  95          * map[] holds ctx -> hctx mappings, one map exists for each type
  96          * that the driver wishes to support. There are no restrictions
  97          * on maps being of the same size, and it's perfectly legal to
  98          * share maps between types.
  99          */
 100         struct blk_mq_queue_map map[HCTX_MAX_TYPES];
 101         unsigned int            nr_maps;        /* nr entries in map[] */
 102         const struct blk_mq_ops *ops;
 103         unsigned int            nr_hw_queues;   /* nr hw queues across maps */
 104         unsigned int            queue_depth;    /* max hw supported */
 105         unsigned int            reserved_tags;
 106         unsigned int            cmd_size;       /* per-request extra data */
 107         int                     numa_node;
 108         unsigned int            timeout;
 109         unsigned int            flags;          /* BLK_MQ_F_* */
 110         void                    *driver_data;
 111 
 112         struct blk_mq_tags      **tags;
 113 
 114         struct mutex            tag_list_lock;
 115         struct list_head        tag_list;
 116 };
 117 
 118 struct blk_mq_queue_data {
 119         struct request *rq;
 120         bool last;
 121 };
 122 
 123 typedef blk_status_t (queue_rq_fn)(struct blk_mq_hw_ctx *,
 124                 const struct blk_mq_queue_data *);
 125 typedef void (commit_rqs_fn)(struct blk_mq_hw_ctx *);
 126 typedef bool (get_budget_fn)(struct blk_mq_hw_ctx *);
 127 typedef void (put_budget_fn)(struct blk_mq_hw_ctx *);
 128 typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
 129 typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
 130 typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
 131 typedef int (init_request_fn)(struct blk_mq_tag_set *set, struct request *,
 132                 unsigned int, unsigned int);
 133 typedef void (exit_request_fn)(struct blk_mq_tag_set *set, struct request *,
 134                 unsigned int);
 135 
 136 typedef bool (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
 137                 bool);
 138 typedef bool (busy_tag_iter_fn)(struct request *, void *, bool);
 139 typedef int (poll_fn)(struct blk_mq_hw_ctx *);
 140 typedef int (map_queues_fn)(struct blk_mq_tag_set *set);
 141 typedef bool (busy_fn)(struct request_queue *);
 142 typedef void (complete_fn)(struct request *);
 143 typedef void (cleanup_rq_fn)(struct request *);
 144 
 145 
 146 struct blk_mq_ops {
 147         /*
 148          * Queue request
 149          */
 150         queue_rq_fn             *queue_rq;
 151 
 152         /*
 153          * If a driver uses bd->last to judge when to submit requests to
 154          * hardware, it must define this function. In case of errors that
 155          * make us stop issuing further requests, this hook serves the
 156          * purpose of kicking the hardware (which the last request otherwise
 157          * would have done).
 158          */
 159         commit_rqs_fn           *commit_rqs;
 160 
 161         /*
 162          * Reserve budget before queue request, once .queue_rq is
 163          * run, it is driver's responsibility to release the
 164          * reserved budget. Also we have to handle failure case
 165          * of .get_budget for avoiding I/O deadlock.
 166          */
 167         get_budget_fn           *get_budget;
 168         put_budget_fn           *put_budget;
 169 
 170         /*
 171          * Called on request timeout
 172          */
 173         timeout_fn              *timeout;
 174 
 175         /*
 176          * Called to poll for completion of a specific tag.
 177          */
 178         poll_fn                 *poll;
 179 
 180         complete_fn             *complete;
 181 
 182         /*
 183          * Called when the block layer side of a hardware queue has been
 184          * set up, allowing the driver to allocate/init matching structures.
 185          * Ditto for exit/teardown.
 186          */
 187         init_hctx_fn            *init_hctx;
 188         exit_hctx_fn            *exit_hctx;
 189 
 190         /*
 191          * Called for every command allocated by the block layer to allow
 192          * the driver to set up driver specific data.
 193          *
 194          * Tag greater than or equal to queue_depth is for setting up
 195          * flush request.
 196          *
 197          * Ditto for exit/teardown.
 198          */
 199         init_request_fn         *init_request;
 200         exit_request_fn         *exit_request;
 201         /* Called from inside blk_get_request() */
 202         void (*initialize_rq_fn)(struct request *rq);
 203 
 204         /*
 205          * Called before freeing one request which isn't completed yet,
 206          * and usually for freeing the driver private data
 207          */
 208         cleanup_rq_fn           *cleanup_rq;
 209 
 210         /*
 211          * If set, returns whether or not this queue currently is busy
 212          */
 213         busy_fn                 *busy;
 214 
 215         map_queues_fn           *map_queues;
 216 
 217 #ifdef CONFIG_BLK_DEBUG_FS
 218         /*
 219          * Used by the debugfs implementation to show driver-specific
 220          * information about a request.
 221          */
 222         void (*show_rq)(struct seq_file *m, struct request *rq);
 223 #endif
 224 };
 225 
 226 enum {
 227         BLK_MQ_F_SHOULD_MERGE   = 1 << 0,
 228         BLK_MQ_F_TAG_SHARED     = 1 << 1,
 229         BLK_MQ_F_BLOCKING       = 1 << 5,
 230         BLK_MQ_F_NO_SCHED       = 1 << 6,
 231         BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
 232         BLK_MQ_F_ALLOC_POLICY_BITS = 1,
 233 
 234         BLK_MQ_S_STOPPED        = 0,
 235         BLK_MQ_S_TAG_ACTIVE     = 1,
 236         BLK_MQ_S_SCHED_RESTART  = 2,
 237 
 238         BLK_MQ_MAX_DEPTH        = 10240,
 239 
 240         BLK_MQ_CPU_WORK_BATCH   = 8,
 241 };
 242 #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \
 243         ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \
 244                 ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1))
 245 #define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \
 246         ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
 247                 << BLK_MQ_F_ALLOC_POLICY_START_BIT)
 248 
 249 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
 250 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
 251                                                   struct request_queue *q,
 252                                                   bool elevator_init);
 253 struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,
 254                                                 const struct blk_mq_ops *ops,
 255                                                 unsigned int queue_depth,
 256                                                 unsigned int set_flags);
 257 void blk_mq_unregister_dev(struct device *, struct request_queue *);
 258 
 259 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
 260 void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
 261 
 262 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
 263 
 264 void blk_mq_free_request(struct request *rq);
 265 bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
 266 
 267 bool blk_mq_queue_inflight(struct request_queue *q);
 268 
 269 enum {
 270         /* return when out of requests */
 271         BLK_MQ_REQ_NOWAIT       = (__force blk_mq_req_flags_t)(1 << 0),
 272         /* allocate from reserved pool */
 273         BLK_MQ_REQ_RESERVED     = (__force blk_mq_req_flags_t)(1 << 1),
 274         /* allocate internal/sched tag */
 275         BLK_MQ_REQ_INTERNAL     = (__force blk_mq_req_flags_t)(1 << 2),
 276         /* set RQF_PREEMPT */
 277         BLK_MQ_REQ_PREEMPT      = (__force blk_mq_req_flags_t)(1 << 3),
 278 };
 279 
 280 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
 281                 blk_mq_req_flags_t flags);
 282 struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
 283                 unsigned int op, blk_mq_req_flags_t flags,
 284                 unsigned int hctx_idx);
 285 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
 286 
 287 enum {
 288         BLK_MQ_UNIQUE_TAG_BITS = 16,
 289         BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1,
 290 };
 291 
 292 u32 blk_mq_unique_tag(struct request *rq);
 293 
 294 static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag)
 295 {
 296         return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS;
 297 }
 298 
 299 static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
 300 {
 301         return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
 302 }
 303 
 304 
 305 int blk_mq_request_started(struct request *rq);
 306 int blk_mq_request_completed(struct request *rq);
 307 void blk_mq_start_request(struct request *rq);
 308 void blk_mq_end_request(struct request *rq, blk_status_t error);
 309 void __blk_mq_end_request(struct request *rq, blk_status_t error);
 310 
 311 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
 312 void blk_mq_kick_requeue_list(struct request_queue *q);
 313 void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
 314 bool blk_mq_complete_request(struct request *rq);
 315 bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
 316                            struct bio *bio, unsigned int nr_segs);
 317 bool blk_mq_queue_stopped(struct request_queue *q);
 318 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
 319 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
 320 void blk_mq_stop_hw_queues(struct request_queue *q);
 321 void blk_mq_start_hw_queues(struct request_queue *q);
 322 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
 323 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
 324 void blk_mq_quiesce_queue(struct request_queue *q);
 325 void blk_mq_unquiesce_queue(struct request_queue *q);
 326 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
 327 bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
 328 void blk_mq_run_hw_queues(struct request_queue *q, bool async);
 329 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
 330                 busy_tag_iter_fn *fn, void *priv);
 331 void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset);
 332 void blk_mq_freeze_queue(struct request_queue *q);
 333 void blk_mq_unfreeze_queue(struct request_queue *q);
 334 void blk_freeze_queue_start(struct request_queue *q);
 335 void blk_mq_freeze_queue_wait(struct request_queue *q);
 336 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
 337                                      unsigned long timeout);
 338 
 339 int blk_mq_map_queues(struct blk_mq_queue_map *qmap);
 340 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
 341 
 342 void blk_mq_quiesce_queue_nowait(struct request_queue *q);
 343 
 344 unsigned int blk_mq_rq_cpu(struct request *rq);
 345 
 346 /*
 347  * Driver command data is immediately after the request. So subtract request
 348  * size to get back to the original request, add request size to get the PDU.
 349  */
 350 static inline struct request *blk_mq_rq_from_pdu(void *pdu)
 351 {
 352         return pdu - sizeof(struct request);
 353 }
 354 static inline void *blk_mq_rq_to_pdu(struct request *rq)
 355 {
 356         return rq + 1;
 357 }
 358 
 359 #define queue_for_each_hw_ctx(q, hctx, i)                               \
 360         for ((i) = 0; (i) < (q)->nr_hw_queues &&                        \
 361              ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
 362 
 363 #define hctx_for_each_ctx(hctx, ctx, i)                                 \
 364         for ((i) = 0; (i) < (hctx)->nr_ctx &&                           \
 365              ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
 366 
 367 static inline blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx,
 368                 struct request *rq)
 369 {
 370         if (rq->tag != -1)
 371                 return rq->tag | (hctx->queue_num << BLK_QC_T_SHIFT);
 372 
 373         return rq->internal_tag | (hctx->queue_num << BLK_QC_T_SHIFT) |
 374                         BLK_QC_T_INTERNAL;
 375 }
 376 
 377 static inline void blk_mq_cleanup_rq(struct request *rq)
 378 {
 379         if (rq->q->mq_ops->cleanup_rq)
 380                 rq->q->mq_ops->cleanup_rq(rq);
 381 }
 382 
 383 #endif

/* [<][>][^][v][top][bottom][index][help] */