root/block/blk-mq-tag.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. blk_mq_has_free_tags
  2. __blk_mq_tag_busy
  3. blk_mq_tag_wakeup_all
  4. __blk_mq_tag_idle
  5. hctx_may_queue
  6. __blk_mq_get_tag
  7. blk_mq_get_tag
  8. blk_mq_put_tag
  9. bt_iter
  10. bt_for_each
  11. bt_tags_iter
  12. bt_tags_for_each
  13. blk_mq_all_tag_busy_iter
  14. blk_mq_tagset_busy_iter
  15. blk_mq_tagset_count_completed_rqs
  16. blk_mq_tagset_wait_completed_request
  17. blk_mq_queue_tag_busy_iter
  18. bt_alloc
  19. blk_mq_init_bitmap_tags
  20. blk_mq_init_tags
  21. blk_mq_free_tags
  22. blk_mq_tag_update_depth
  23. blk_mq_unique_tag

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Tag allocation using scalable bitmaps. Uses active queue tracking to support
   4  * fairer distribution of tags between multiple submitters when a shared tag map
   5  * is used.
   6  *
   7  * Copyright (C) 2013-2014 Jens Axboe
   8  */
   9 #include <linux/kernel.h>
  10 #include <linux/module.h>
  11 
  12 #include <linux/blk-mq.h>
  13 #include <linux/delay.h>
  14 #include "blk.h"
  15 #include "blk-mq.h"
  16 #include "blk-mq-tag.h"
  17 
  18 bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
  19 {
  20         if (!tags)
  21                 return true;
  22 
  23         return sbitmap_any_bit_clear(&tags->bitmap_tags.sb);
  24 }
  25 
  26 /*
  27  * If a previously inactive queue goes active, bump the active user count.
  28  * We need to do this before try to allocate driver tag, then even if fail
  29  * to get tag when first time, the other shared-tag users could reserve
  30  * budget for it.
  31  */
  32 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
  33 {
  34         if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
  35             !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
  36                 atomic_inc(&hctx->tags->active_queues);
  37 
  38         return true;
  39 }
  40 
  41 /*
  42  * Wakeup all potentially sleeping on tags
  43  */
  44 void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
  45 {
  46         sbitmap_queue_wake_all(&tags->bitmap_tags);
  47         if (include_reserve)
  48                 sbitmap_queue_wake_all(&tags->breserved_tags);
  49 }
  50 
  51 /*
  52  * If a previously busy queue goes inactive, potential waiters could now
  53  * be allowed to queue. Wake them up and check.
  54  */
  55 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
  56 {
  57         struct blk_mq_tags *tags = hctx->tags;
  58 
  59         if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
  60                 return;
  61 
  62         atomic_dec(&tags->active_queues);
  63 
  64         blk_mq_tag_wakeup_all(tags, false);
  65 }
  66 
  67 /*
  68  * For shared tag users, we track the number of currently active users
  69  * and attempt to provide a fair share of the tag depth for each of them.
  70  */
  71 static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
  72                                   struct sbitmap_queue *bt)
  73 {
  74         unsigned int depth, users;
  75 
  76         if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
  77                 return true;
  78         if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
  79                 return true;
  80 
  81         /*
  82          * Don't try dividing an ant
  83          */
  84         if (bt->sb.depth == 1)
  85                 return true;
  86 
  87         users = atomic_read(&hctx->tags->active_queues);
  88         if (!users)
  89                 return true;
  90 
  91         /*
  92          * Allow at least some tags
  93          */
  94         depth = max((bt->sb.depth + users - 1) / users, 4U);
  95         return atomic_read(&hctx->nr_active) < depth;
  96 }
  97 
  98 static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
  99                             struct sbitmap_queue *bt)
 100 {
 101         if (!(data->flags & BLK_MQ_REQ_INTERNAL) &&
 102             !hctx_may_queue(data->hctx, bt))
 103                 return -1;
 104         if (data->shallow_depth)
 105                 return __sbitmap_queue_get_shallow(bt, data->shallow_depth);
 106         else
 107                 return __sbitmap_queue_get(bt);
 108 }
 109 
 110 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
 111 {
 112         struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
 113         struct sbitmap_queue *bt;
 114         struct sbq_wait_state *ws;
 115         DEFINE_SBQ_WAIT(wait);
 116         unsigned int tag_offset;
 117         int tag;
 118 
 119         if (data->flags & BLK_MQ_REQ_RESERVED) {
 120                 if (unlikely(!tags->nr_reserved_tags)) {
 121                         WARN_ON_ONCE(1);
 122                         return BLK_MQ_TAG_FAIL;
 123                 }
 124                 bt = &tags->breserved_tags;
 125                 tag_offset = 0;
 126         } else {
 127                 bt = &tags->bitmap_tags;
 128                 tag_offset = tags->nr_reserved_tags;
 129         }
 130 
 131         tag = __blk_mq_get_tag(data, bt);
 132         if (tag != -1)
 133                 goto found_tag;
 134 
 135         if (data->flags & BLK_MQ_REQ_NOWAIT)
 136                 return BLK_MQ_TAG_FAIL;
 137 
 138         ws = bt_wait_ptr(bt, data->hctx);
 139         do {
 140                 struct sbitmap_queue *bt_prev;
 141 
 142                 /*
 143                  * We're out of tags on this hardware queue, kick any
 144                  * pending IO submits before going to sleep waiting for
 145                  * some to complete.
 146                  */
 147                 blk_mq_run_hw_queue(data->hctx, false);
 148 
 149                 /*
 150                  * Retry tag allocation after running the hardware queue,
 151                  * as running the queue may also have found completions.
 152                  */
 153                 tag = __blk_mq_get_tag(data, bt);
 154                 if (tag != -1)
 155                         break;
 156 
 157                 sbitmap_prepare_to_wait(bt, ws, &wait, TASK_UNINTERRUPTIBLE);
 158 
 159                 tag = __blk_mq_get_tag(data, bt);
 160                 if (tag != -1)
 161                         break;
 162 
 163                 bt_prev = bt;
 164                 io_schedule();
 165 
 166                 sbitmap_finish_wait(bt, ws, &wait);
 167 
 168                 data->ctx = blk_mq_get_ctx(data->q);
 169                 data->hctx = blk_mq_map_queue(data->q, data->cmd_flags,
 170                                                 data->ctx);
 171                 tags = blk_mq_tags_from_data(data);
 172                 if (data->flags & BLK_MQ_REQ_RESERVED)
 173                         bt = &tags->breserved_tags;
 174                 else
 175                         bt = &tags->bitmap_tags;
 176 
 177                 /*
 178                  * If destination hw queue is changed, fake wake up on
 179                  * previous queue for compensating the wake up miss, so
 180                  * other allocations on previous queue won't be starved.
 181                  */
 182                 if (bt != bt_prev)
 183                         sbitmap_queue_wake_up(bt_prev);
 184 
 185                 ws = bt_wait_ptr(bt, data->hctx);
 186         } while (1);
 187 
 188         sbitmap_finish_wait(bt, ws, &wait);
 189 
 190 found_tag:
 191         return tag + tag_offset;
 192 }
 193 
 194 void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags,
 195                     struct blk_mq_ctx *ctx, unsigned int tag)
 196 {
 197         if (!blk_mq_tag_is_reserved(tags, tag)) {
 198                 const int real_tag = tag - tags->nr_reserved_tags;
 199 
 200                 BUG_ON(real_tag >= tags->nr_tags);
 201                 sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
 202         } else {
 203                 BUG_ON(tag >= tags->nr_reserved_tags);
 204                 sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
 205         }
 206 }
 207 
 208 struct bt_iter_data {
 209         struct blk_mq_hw_ctx *hctx;
 210         busy_iter_fn *fn;
 211         void *data;
 212         bool reserved;
 213 };
 214 
 215 static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
 216 {
 217         struct bt_iter_data *iter_data = data;
 218         struct blk_mq_hw_ctx *hctx = iter_data->hctx;
 219         struct blk_mq_tags *tags = hctx->tags;
 220         bool reserved = iter_data->reserved;
 221         struct request *rq;
 222 
 223         if (!reserved)
 224                 bitnr += tags->nr_reserved_tags;
 225         rq = tags->rqs[bitnr];
 226 
 227         /*
 228          * We can hit rq == NULL here, because the tagging functions
 229          * test and set the bit before assigning ->rqs[].
 230          */
 231         if (rq && rq->q == hctx->queue)
 232                 return iter_data->fn(hctx, rq, iter_data->data, reserved);
 233         return true;
 234 }
 235 
 236 /**
 237  * bt_for_each - iterate over the requests associated with a hardware queue
 238  * @hctx:       Hardware queue to examine.
 239  * @bt:         sbitmap to examine. This is either the breserved_tags member
 240  *              or the bitmap_tags member of struct blk_mq_tags.
 241  * @fn:         Pointer to the function that will be called for each request
 242  *              associated with @hctx that has been assigned a driver tag.
 243  *              @fn will be called as follows: @fn(@hctx, rq, @data, @reserved)
 244  *              where rq is a pointer to a request. Return true to continue
 245  *              iterating tags, false to stop.
 246  * @data:       Will be passed as third argument to @fn.
 247  * @reserved:   Indicates whether @bt is the breserved_tags member or the
 248  *              bitmap_tags member of struct blk_mq_tags.
 249  */
 250 static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
 251                         busy_iter_fn *fn, void *data, bool reserved)
 252 {
 253         struct bt_iter_data iter_data = {
 254                 .hctx = hctx,
 255                 .fn = fn,
 256                 .data = data,
 257                 .reserved = reserved,
 258         };
 259 
 260         sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
 261 }
 262 
 263 struct bt_tags_iter_data {
 264         struct blk_mq_tags *tags;
 265         busy_tag_iter_fn *fn;
 266         void *data;
 267         bool reserved;
 268 };
 269 
 270 static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
 271 {
 272         struct bt_tags_iter_data *iter_data = data;
 273         struct blk_mq_tags *tags = iter_data->tags;
 274         bool reserved = iter_data->reserved;
 275         struct request *rq;
 276 
 277         if (!reserved)
 278                 bitnr += tags->nr_reserved_tags;
 279 
 280         /*
 281          * We can hit rq == NULL here, because the tagging functions
 282          * test and set the bit before assining ->rqs[].
 283          */
 284         rq = tags->rqs[bitnr];
 285         if (rq && blk_mq_request_started(rq))
 286                 return iter_data->fn(rq, iter_data->data, reserved);
 287 
 288         return true;
 289 }
 290 
 291 /**
 292  * bt_tags_for_each - iterate over the requests in a tag map
 293  * @tags:       Tag map to iterate over.
 294  * @bt:         sbitmap to examine. This is either the breserved_tags member
 295  *              or the bitmap_tags member of struct blk_mq_tags.
 296  * @fn:         Pointer to the function that will be called for each started
 297  *              request. @fn will be called as follows: @fn(rq, @data,
 298  *              @reserved) where rq is a pointer to a request. Return true
 299  *              to continue iterating tags, false to stop.
 300  * @data:       Will be passed as second argument to @fn.
 301  * @reserved:   Indicates whether @bt is the breserved_tags member or the
 302  *              bitmap_tags member of struct blk_mq_tags.
 303  */
 304 static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
 305                              busy_tag_iter_fn *fn, void *data, bool reserved)
 306 {
 307         struct bt_tags_iter_data iter_data = {
 308                 .tags = tags,
 309                 .fn = fn,
 310                 .data = data,
 311                 .reserved = reserved,
 312         };
 313 
 314         if (tags->rqs)
 315                 sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
 316 }
 317 
 318 /**
 319  * blk_mq_all_tag_busy_iter - iterate over all started requests in a tag map
 320  * @tags:       Tag map to iterate over.
 321  * @fn:         Pointer to the function that will be called for each started
 322  *              request. @fn will be called as follows: @fn(rq, @priv,
 323  *              reserved) where rq is a pointer to a request. 'reserved'
 324  *              indicates whether or not @rq is a reserved request. Return
 325  *              true to continue iterating tags, false to stop.
 326  * @priv:       Will be passed as second argument to @fn.
 327  */
 328 static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
 329                 busy_tag_iter_fn *fn, void *priv)
 330 {
 331         if (tags->nr_reserved_tags)
 332                 bt_tags_for_each(tags, &tags->breserved_tags, fn, priv, true);
 333         bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, false);
 334 }
 335 
 336 /**
 337  * blk_mq_tagset_busy_iter - iterate over all started requests in a tag set
 338  * @tagset:     Tag set to iterate over.
 339  * @fn:         Pointer to the function that will be called for each started
 340  *              request. @fn will be called as follows: @fn(rq, @priv,
 341  *              reserved) where rq is a pointer to a request. 'reserved'
 342  *              indicates whether or not @rq is a reserved request. Return
 343  *              true to continue iterating tags, false to stop.
 344  * @priv:       Will be passed as second argument to @fn.
 345  */
 346 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
 347                 busy_tag_iter_fn *fn, void *priv)
 348 {
 349         int i;
 350 
 351         for (i = 0; i < tagset->nr_hw_queues; i++) {
 352                 if (tagset->tags && tagset->tags[i])
 353                         blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv);
 354         }
 355 }
 356 EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
 357 
 358 static bool blk_mq_tagset_count_completed_rqs(struct request *rq,
 359                 void *data, bool reserved)
 360 {
 361         unsigned *count = data;
 362 
 363         if (blk_mq_request_completed(rq))
 364                 (*count)++;
 365         return true;
 366 }
 367 
 368 /**
 369  * blk_mq_tagset_wait_completed_request - wait until all completed req's
 370  * complete funtion is run
 371  * @tagset:     Tag set to drain completed request
 372  *
 373  * Note: This function has to be run after all IO queues are shutdown
 374  */
 375 void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset)
 376 {
 377         while (true) {
 378                 unsigned count = 0;
 379 
 380                 blk_mq_tagset_busy_iter(tagset,
 381                                 blk_mq_tagset_count_completed_rqs, &count);
 382                 if (!count)
 383                         break;
 384                 msleep(5);
 385         }
 386 }
 387 EXPORT_SYMBOL(blk_mq_tagset_wait_completed_request);
 388 
 389 /**
 390  * blk_mq_queue_tag_busy_iter - iterate over all requests with a driver tag
 391  * @q:          Request queue to examine.
 392  * @fn:         Pointer to the function that will be called for each request
 393  *              on @q. @fn will be called as follows: @fn(hctx, rq, @priv,
 394  *              reserved) where rq is a pointer to a request and hctx points
 395  *              to the hardware queue associated with the request. 'reserved'
 396  *              indicates whether or not @rq is a reserved request.
 397  * @priv:       Will be passed as third argument to @fn.
 398  *
 399  * Note: if @q->tag_set is shared with other request queues then @fn will be
 400  * called for all requests on all queues that share that tag set and not only
 401  * for requests associated with @q.
 402  */
 403 void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
 404                 void *priv)
 405 {
 406         struct blk_mq_hw_ctx *hctx;
 407         int i;
 408 
 409         /*
 410          * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and queue_hw_ctx
 411          * while the queue is frozen. So we can use q_usage_counter to avoid
 412          * racing with it. __blk_mq_update_nr_hw_queues() uses
 413          * synchronize_rcu() to ensure this function left the critical section
 414          * below.
 415          */
 416         if (!percpu_ref_tryget(&q->q_usage_counter))
 417                 return;
 418 
 419         queue_for_each_hw_ctx(q, hctx, i) {
 420                 struct blk_mq_tags *tags = hctx->tags;
 421 
 422                 /*
 423                  * If no software queues are currently mapped to this
 424                  * hardware queue, there's nothing to check
 425                  */
 426                 if (!blk_mq_hw_queue_mapped(hctx))
 427                         continue;
 428 
 429                 if (tags->nr_reserved_tags)
 430                         bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
 431                 bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
 432         }
 433         blk_queue_exit(q);
 434 }
 435 
 436 static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
 437                     bool round_robin, int node)
 438 {
 439         return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL,
 440                                        node);
 441 }
 442 
 443 static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
 444                                                    int node, int alloc_policy)
 445 {
 446         unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
 447         bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
 448 
 449         if (bt_alloc(&tags->bitmap_tags, depth, round_robin, node))
 450                 goto free_tags;
 451         if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, round_robin,
 452                      node))
 453                 goto free_bitmap_tags;
 454 
 455         return tags;
 456 free_bitmap_tags:
 457         sbitmap_queue_free(&tags->bitmap_tags);
 458 free_tags:
 459         kfree(tags);
 460         return NULL;
 461 }
 462 
 463 struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
 464                                      unsigned int reserved_tags,
 465                                      int node, int alloc_policy)
 466 {
 467         struct blk_mq_tags *tags;
 468 
 469         if (total_tags > BLK_MQ_TAG_MAX) {
 470                 pr_err("blk-mq: tag depth too large\n");
 471                 return NULL;
 472         }
 473 
 474         tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
 475         if (!tags)
 476                 return NULL;
 477 
 478         tags->nr_tags = total_tags;
 479         tags->nr_reserved_tags = reserved_tags;
 480 
 481         return blk_mq_init_bitmap_tags(tags, node, alloc_policy);
 482 }
 483 
 484 void blk_mq_free_tags(struct blk_mq_tags *tags)
 485 {
 486         sbitmap_queue_free(&tags->bitmap_tags);
 487         sbitmap_queue_free(&tags->breserved_tags);
 488         kfree(tags);
 489 }
 490 
 491 int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
 492                             struct blk_mq_tags **tagsptr, unsigned int tdepth,
 493                             bool can_grow)
 494 {
 495         struct blk_mq_tags *tags = *tagsptr;
 496 
 497         if (tdepth <= tags->nr_reserved_tags)
 498                 return -EINVAL;
 499 
 500         /*
 501          * If we are allowed to grow beyond the original size, allocate
 502          * a new set of tags before freeing the old one.
 503          */
 504         if (tdepth > tags->nr_tags) {
 505                 struct blk_mq_tag_set *set = hctx->queue->tag_set;
 506                 struct blk_mq_tags *new;
 507                 bool ret;
 508 
 509                 if (!can_grow)
 510                         return -EINVAL;
 511 
 512                 /*
 513                  * We need some sort of upper limit, set it high enough that
 514                  * no valid use cases should require more.
 515                  */
 516                 if (tdepth > 16 * BLKDEV_MAX_RQ)
 517                         return -EINVAL;
 518 
 519                 new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth,
 520                                 tags->nr_reserved_tags);
 521                 if (!new)
 522                         return -ENOMEM;
 523                 ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
 524                 if (ret) {
 525                         blk_mq_free_rq_map(new);
 526                         return -ENOMEM;
 527                 }
 528 
 529                 blk_mq_free_rqs(set, *tagsptr, hctx->queue_num);
 530                 blk_mq_free_rq_map(*tagsptr);
 531                 *tagsptr = new;
 532         } else {
 533                 /*
 534                  * Don't need (or can't) update reserved tags here, they
 535                  * remain static and should never need resizing.
 536                  */
 537                 sbitmap_queue_resize(&tags->bitmap_tags,
 538                                 tdepth - tags->nr_reserved_tags);
 539         }
 540 
 541         return 0;
 542 }
 543 
 544 /**
 545  * blk_mq_unique_tag() - return a tag that is unique queue-wide
 546  * @rq: request for which to compute a unique tag
 547  *
 548  * The tag field in struct request is unique per hardware queue but not over
 549  * all hardware queues. Hence this function that returns a tag with the
 550  * hardware context index in the upper bits and the per hardware queue tag in
 551  * the lower bits.
 552  *
 553  * Note: When called for a request that is queued on a non-multiqueue request
 554  * queue, the hardware context index is set to zero.
 555  */
 556 u32 blk_mq_unique_tag(struct request *rq)
 557 {
 558         return (rq->mq_hctx->queue_num << BLK_MQ_UNIQUE_TAG_BITS) |
 559                 (rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
 560 }
 561 EXPORT_SYMBOL(blk_mq_unique_tag);

/* [<][>][^][v][top][bottom][index][help] */