Lines Matching refs:hctx

36 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
41 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) in blk_mq_hctx_has_pending() argument
45 for (i = 0; i < hctx->ctx_map.size; i++) in blk_mq_hctx_has_pending()
46 if (hctx->ctx_map.map[i].word) in blk_mq_hctx_has_pending()
52 static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx, in get_bm() argument
55 return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word]; in get_bm()
58 #define CTX_TO_BIT(hctx, ctx) \ argument
59 ((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1))
64 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_mark_pending() argument
67 struct blk_align_bitmap *bm = get_bm(hctx, ctx); in blk_mq_hctx_mark_pending()
69 if (!test_bit(CTX_TO_BIT(hctx, ctx), &bm->word)) in blk_mq_hctx_mark_pending()
70 set_bit(CTX_TO_BIT(hctx, ctx), &bm->word); in blk_mq_hctx_mark_pending()
73 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_clear_pending() argument
76 struct blk_align_bitmap *bm = get_bm(hctx, ctx); in blk_mq_hctx_clear_pending()
78 clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word); in blk_mq_hctx_clear_pending()
140 struct blk_mq_hw_ctx *hctx; in blk_mq_wake_waiters() local
143 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_wake_waiters()
144 if (blk_mq_hw_queue_mapped(hctx)) in blk_mq_wake_waiters()
145 blk_mq_tag_wakeup_all(hctx->tags, true); in blk_mq_wake_waiters()
155 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) in blk_mq_can_queue() argument
157 return blk_mq_has_free_tags(hctx->tags); in blk_mq_can_queue()
217 rq = data->hctx->tags->rqs[tag]; in __blk_mq_alloc_request()
219 if (blk_mq_tag_busy(data->hctx)) { in __blk_mq_alloc_request()
221 atomic_inc(&data->hctx->nr_active); in __blk_mq_alloc_request()
236 struct blk_mq_hw_ctx *hctx; in blk_mq_alloc_request() local
246 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_mq_alloc_request()
248 reserved, ctx, hctx); in blk_mq_alloc_request()
252 __blk_mq_run_hw_queue(hctx); in blk_mq_alloc_request()
256 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_mq_alloc_request()
258 hctx); in blk_mq_alloc_request()
271 static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx, in __blk_mq_free_request() argument
278 atomic_dec(&hctx->nr_active); in __blk_mq_free_request()
282 blk_mq_put_tag(hctx, tag, &ctx->last_tag); in __blk_mq_free_request()
286 void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq) in blk_mq_free_hctx_request() argument
291 __blk_mq_free_request(hctx, ctx, rq); in blk_mq_free_hctx_request()
298 struct blk_mq_hw_ctx *hctx; in blk_mq_free_request() local
301 hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu); in blk_mq_free_request()
302 blk_mq_free_hctx_request(hctx, rq); in blk_mq_free_request()
594 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, in blk_mq_check_expired() argument
635 struct blk_mq_hw_ctx *hctx; in blk_mq_rq_timer() local
637 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_rq_timer()
639 if (blk_mq_hw_queue_mapped(hctx)) in blk_mq_rq_timer()
640 blk_mq_tag_idle(hctx); in blk_mq_rq_timer()
688 static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list) in flush_busy_ctxs() argument
693 for (i = 0; i < hctx->ctx_map.size; i++) { in flush_busy_ctxs()
694 struct blk_align_bitmap *bm = &hctx->ctx_map.map[i]; in flush_busy_ctxs()
701 off = i * hctx->ctx_map.bits_per_word; in flush_busy_ctxs()
707 ctx = hctx->ctxs[bit + off]; in flush_busy_ctxs()
724 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) in __blk_mq_run_hw_queue() argument
726 struct request_queue *q = hctx->queue; in __blk_mq_run_hw_queue()
733 WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)); in __blk_mq_run_hw_queue()
735 if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state))) in __blk_mq_run_hw_queue()
738 hctx->run++; in __blk_mq_run_hw_queue()
743 flush_busy_ctxs(hctx, &rq_list); in __blk_mq_run_hw_queue()
749 if (!list_empty_careful(&hctx->dispatch)) { in __blk_mq_run_hw_queue()
750 spin_lock(&hctx->lock); in __blk_mq_run_hw_queue()
751 if (!list_empty(&hctx->dispatch)) in __blk_mq_run_hw_queue()
752 list_splice_init(&hctx->dispatch, &rq_list); in __blk_mq_run_hw_queue()
753 spin_unlock(&hctx->lock); in __blk_mq_run_hw_queue()
777 ret = q->mq_ops->queue_rq(hctx, &bd); in __blk_mq_run_hw_queue()
806 hctx->dispatched[0]++; in __blk_mq_run_hw_queue()
808 hctx->dispatched[ilog2(queued) + 1]++; in __blk_mq_run_hw_queue()
815 spin_lock(&hctx->lock); in __blk_mq_run_hw_queue()
816 list_splice(&rq_list, &hctx->dispatch); in __blk_mq_run_hw_queue()
817 spin_unlock(&hctx->lock); in __blk_mq_run_hw_queue()
827 blk_mq_run_hw_queue(hctx, true); in __blk_mq_run_hw_queue()
837 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) in blk_mq_hctx_next_cpu() argument
839 if (hctx->queue->nr_hw_queues == 1) in blk_mq_hctx_next_cpu()
842 if (--hctx->next_cpu_batch <= 0) { in blk_mq_hctx_next_cpu()
843 int cpu = hctx->next_cpu, next_cpu; in blk_mq_hctx_next_cpu()
845 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask); in blk_mq_hctx_next_cpu()
847 next_cpu = cpumask_first(hctx->cpumask); in blk_mq_hctx_next_cpu()
849 hctx->next_cpu = next_cpu; in blk_mq_hctx_next_cpu()
850 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; in blk_mq_hctx_next_cpu()
855 return hctx->next_cpu; in blk_mq_hctx_next_cpu()
858 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) in blk_mq_run_hw_queue() argument
860 if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state) || in blk_mq_run_hw_queue()
861 !blk_mq_hw_queue_mapped(hctx))) in blk_mq_run_hw_queue()
866 if (cpumask_test_cpu(cpu, hctx->cpumask)) { in blk_mq_run_hw_queue()
867 __blk_mq_run_hw_queue(hctx); in blk_mq_run_hw_queue()
875 kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx), in blk_mq_run_hw_queue()
876 &hctx->run_work, 0); in blk_mq_run_hw_queue()
881 struct blk_mq_hw_ctx *hctx; in blk_mq_run_hw_queues() local
884 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_run_hw_queues()
885 if ((!blk_mq_hctx_has_pending(hctx) && in blk_mq_run_hw_queues()
886 list_empty_careful(&hctx->dispatch)) || in blk_mq_run_hw_queues()
887 test_bit(BLK_MQ_S_STOPPED, &hctx->state)) in blk_mq_run_hw_queues()
890 blk_mq_run_hw_queue(hctx, async); in blk_mq_run_hw_queues()
895 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) in blk_mq_stop_hw_queue() argument
897 cancel_delayed_work(&hctx->run_work); in blk_mq_stop_hw_queue()
898 cancel_delayed_work(&hctx->delay_work); in blk_mq_stop_hw_queue()
899 set_bit(BLK_MQ_S_STOPPED, &hctx->state); in blk_mq_stop_hw_queue()
905 struct blk_mq_hw_ctx *hctx; in blk_mq_stop_hw_queues() local
908 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_stop_hw_queues()
909 blk_mq_stop_hw_queue(hctx); in blk_mq_stop_hw_queues()
913 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) in blk_mq_start_hw_queue() argument
915 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); in blk_mq_start_hw_queue()
917 blk_mq_run_hw_queue(hctx, false); in blk_mq_start_hw_queue()
923 struct blk_mq_hw_ctx *hctx; in blk_mq_start_hw_queues() local
926 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_start_hw_queues()
927 blk_mq_start_hw_queue(hctx); in blk_mq_start_hw_queues()
933 struct blk_mq_hw_ctx *hctx; in blk_mq_start_stopped_hw_queues() local
936 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_start_stopped_hw_queues()
937 if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state)) in blk_mq_start_stopped_hw_queues()
940 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); in blk_mq_start_stopped_hw_queues()
941 blk_mq_run_hw_queue(hctx, async); in blk_mq_start_stopped_hw_queues()
948 struct blk_mq_hw_ctx *hctx; in blk_mq_run_work_fn() local
950 hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work); in blk_mq_run_work_fn()
952 __blk_mq_run_hw_queue(hctx); in blk_mq_run_work_fn()
957 struct blk_mq_hw_ctx *hctx; in blk_mq_delay_work_fn() local
959 hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work); in blk_mq_delay_work_fn()
961 if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state)) in blk_mq_delay_work_fn()
962 __blk_mq_run_hw_queue(hctx); in blk_mq_delay_work_fn()
965 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) in blk_mq_delay_queue() argument
967 if (unlikely(!blk_mq_hw_queue_mapped(hctx))) in blk_mq_delay_queue()
970 kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx), in blk_mq_delay_queue()
971 &hctx->delay_work, msecs_to_jiffies(msecs)); in blk_mq_delay_queue()
975 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx, in __blk_mq_insert_req_list() argument
980 trace_block_rq_insert(hctx->queue, rq); in __blk_mq_insert_req_list()
988 static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, in __blk_mq_insert_request() argument
993 __blk_mq_insert_req_list(hctx, ctx, rq, at_head); in __blk_mq_insert_request()
994 blk_mq_hctx_mark_pending(hctx, ctx); in __blk_mq_insert_request()
1001 struct blk_mq_hw_ctx *hctx; in blk_mq_insert_request() local
1008 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_mq_insert_request()
1011 __blk_mq_insert_request(hctx, rq, at_head); in blk_mq_insert_request()
1015 blk_mq_run_hw_queue(hctx, async); in blk_mq_insert_request()
1027 struct blk_mq_hw_ctx *hctx; in blk_mq_insert_requests() local
1036 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_mq_insert_requests()
1049 __blk_mq_insert_req_list(hctx, ctx, rq, false); in blk_mq_insert_requests()
1051 blk_mq_hctx_mark_pending(hctx, ctx); in blk_mq_insert_requests()
1054 blk_mq_run_hw_queue(hctx, from_schedule); in blk_mq_insert_requests()
1123 static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx) in hctx_allow_merges() argument
1125 return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) && in hctx_allow_merges()
1126 !blk_queue_nomerges(hctx->queue); in hctx_allow_merges()
1129 static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx, in blk_mq_merge_queue_io() argument
1133 if (!hctx_allow_merges(hctx) || !bio_mergeable(bio)) { in blk_mq_merge_queue_io()
1137 __blk_mq_insert_request(hctx, rq, false); in blk_mq_merge_queue_io()
1141 struct request_queue *q = hctx->queue; in blk_mq_merge_queue_io()
1150 __blk_mq_free_request(hctx, ctx, rq); in blk_mq_merge_queue_io()
1156 struct blk_mq_hw_ctx *hctx; member
1164 struct blk_mq_hw_ctx *hctx; in blk_mq_map_request() local
1172 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_mq_map_request()
1179 hctx); in blk_mq_map_request()
1182 __blk_mq_run_hw_queue(hctx); in blk_mq_map_request()
1187 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_mq_map_request()
1189 __GFP_RECLAIM|__GFP_HIGH, false, ctx, hctx); in blk_mq_map_request()
1192 hctx = alloc_data.hctx; in blk_mq_map_request()
1195 hctx->queued++; in blk_mq_map_request()
1196 data->hctx = hctx; in blk_mq_map_request()
1205 struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, in blk_mq_direct_issue_request() local
1212 blk_qc_t new_cookie = blk_tag_to_qc_t(rq->tag, hctx->queue_num); in blk_mq_direct_issue_request()
1219 ret = q->mq_ops->queue_rq(hctx, &bd); in blk_mq_direct_issue_request()
1273 cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num); in blk_mq_make_request()
1288 !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) { in blk_mq_make_request()
1320 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) { in blk_mq_make_request()
1328 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua); in blk_mq_make_request()
1366 cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num); in blk_sq_make_request()
1396 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) { in blk_sq_make_request()
1404 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua); in blk_sq_make_request()
1573 static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu) in blk_mq_hctx_cpu_offline() argument
1575 struct request_queue *q = hctx->queue; in blk_mq_hctx_cpu_offline()
1587 blk_mq_hctx_clear_pending(hctx, ctx); in blk_mq_hctx_cpu_offline()
1605 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_mq_hctx_cpu_offline()
1606 blk_mq_hctx_mark_pending(hctx, ctx); in blk_mq_hctx_cpu_offline()
1610 blk_mq_run_hw_queue(hctx, true); in blk_mq_hctx_cpu_offline()
1618 struct blk_mq_hw_ctx *hctx = data; in blk_mq_hctx_notify() local
1621 return blk_mq_hctx_cpu_offline(hctx, cpu); in blk_mq_hctx_notify()
1634 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in blk_mq_exit_hctx() argument
1638 blk_mq_tag_idle(hctx); in blk_mq_exit_hctx()
1642 hctx->fq->flush_rq, hctx_idx, in blk_mq_exit_hctx()
1646 set->ops->exit_hctx(hctx, hctx_idx); in blk_mq_exit_hctx()
1648 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); in blk_mq_exit_hctx()
1649 blk_free_flush_queue(hctx->fq); in blk_mq_exit_hctx()
1650 blk_mq_free_bitmap(&hctx->ctx_map); in blk_mq_exit_hctx()
1656 struct blk_mq_hw_ctx *hctx; in blk_mq_exit_hw_queues() local
1659 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_exit_hw_queues()
1662 blk_mq_exit_hctx(q, set, hctx, i); in blk_mq_exit_hw_queues()
1669 struct blk_mq_hw_ctx *hctx; in blk_mq_free_hw_queues() local
1672 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_free_hw_queues()
1673 free_cpumask_var(hctx->cpumask); in blk_mq_free_hw_queues()
1678 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) in blk_mq_init_hctx() argument
1683 node = hctx->numa_node; in blk_mq_init_hctx()
1685 node = hctx->numa_node = set->numa_node; in blk_mq_init_hctx()
1687 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn); in blk_mq_init_hctx()
1688 INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn); in blk_mq_init_hctx()
1689 spin_lock_init(&hctx->lock); in blk_mq_init_hctx()
1690 INIT_LIST_HEAD(&hctx->dispatch); in blk_mq_init_hctx()
1691 hctx->queue = q; in blk_mq_init_hctx()
1692 hctx->queue_num = hctx_idx; in blk_mq_init_hctx()
1693 hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED; in blk_mq_init_hctx()
1695 blk_mq_init_cpu_notifier(&hctx->cpu_notifier, in blk_mq_init_hctx()
1696 blk_mq_hctx_notify, hctx); in blk_mq_init_hctx()
1697 blk_mq_register_cpu_notifier(&hctx->cpu_notifier); in blk_mq_init_hctx()
1699 hctx->tags = set->tags[hctx_idx]; in blk_mq_init_hctx()
1705 hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *), in blk_mq_init_hctx()
1707 if (!hctx->ctxs) in blk_mq_init_hctx()
1710 if (blk_mq_alloc_bitmap(&hctx->ctx_map, node)) in blk_mq_init_hctx()
1713 hctx->nr_ctx = 0; in blk_mq_init_hctx()
1716 set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) in blk_mq_init_hctx()
1719 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size); in blk_mq_init_hctx()
1720 if (!hctx->fq) in blk_mq_init_hctx()
1725 hctx->fq->flush_rq, hctx_idx, in blk_mq_init_hctx()
1732 kfree(hctx->fq); in blk_mq_init_hctx()
1735 set->ops->exit_hctx(hctx, hctx_idx); in blk_mq_init_hctx()
1737 blk_mq_free_bitmap(&hctx->ctx_map); in blk_mq_init_hctx()
1739 kfree(hctx->ctxs); in blk_mq_init_hctx()
1741 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); in blk_mq_init_hctx()
1749 struct blk_mq_hw_ctx *hctx; in blk_mq_init_hw_queues() local
1755 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_init_hw_queues()
1756 if (blk_mq_init_hctx(q, set, hctx, i)) in blk_mq_init_hw_queues()
1778 struct blk_mq_hw_ctx *hctx; in blk_mq_init_cpu_queues() local
1790 hctx = q->mq_ops->map_queue(q, i); in blk_mq_init_cpu_queues()
1796 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE) in blk_mq_init_cpu_queues()
1797 hctx->numa_node = cpu_to_node(i); in blk_mq_init_cpu_queues()
1805 struct blk_mq_hw_ctx *hctx; in blk_mq_map_swqueue() local
1814 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_map_swqueue()
1815 cpumask_clear(hctx->cpumask); in blk_mq_map_swqueue()
1816 hctx->nr_ctx = 0; in blk_mq_map_swqueue()
1827 hctx = q->mq_ops->map_queue(q, i); in blk_mq_map_swqueue()
1828 cpumask_set_cpu(i, hctx->cpumask); in blk_mq_map_swqueue()
1829 ctx->index_hw = hctx->nr_ctx; in blk_mq_map_swqueue()
1830 hctx->ctxs[hctx->nr_ctx++] = ctx; in blk_mq_map_swqueue()
1835 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_map_swqueue()
1836 struct blk_mq_ctxmap *map = &hctx->ctx_map; in blk_mq_map_swqueue()
1842 if (!hctx->nr_ctx) { in blk_mq_map_swqueue()
1847 hctx->tags = NULL; in blk_mq_map_swqueue()
1854 hctx->tags = set->tags[i]; in blk_mq_map_swqueue()
1855 WARN_ON(!hctx->tags); in blk_mq_map_swqueue()
1862 map->size = DIV_ROUND_UP(hctx->nr_ctx, map->bits_per_word); in blk_mq_map_swqueue()
1867 hctx->next_cpu = cpumask_first(hctx->cpumask); in blk_mq_map_swqueue()
1868 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; in blk_mq_map_swqueue()
1875 hctx = q->mq_ops->map_queue(q, i); in blk_mq_map_swqueue()
1876 cpumask_set_cpu(i, hctx->tags->cpumask); in blk_mq_map_swqueue()
1882 struct blk_mq_hw_ctx *hctx; in queue_set_hctx_shared() local
1885 queue_for_each_hw_ctx(q, hctx, i) { in queue_set_hctx_shared()
1887 hctx->flags |= BLK_MQ_F_TAG_SHARED; in queue_set_hctx_shared()
1889 hctx->flags &= ~BLK_MQ_F_TAG_SHARED; in queue_set_hctx_shared()
1947 struct blk_mq_hw_ctx *hctx; in blk_mq_release() local
1951 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_release()
1952 if (!hctx) in blk_mq_release()
1954 kfree(hctx->ctxs); in blk_mq_release()
1955 kfree(hctx); in blk_mq_release()
2331 struct blk_mq_hw_ctx *hctx; in blk_mq_update_nr_requests() local
2338 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_update_nr_requests()
2339 ret = blk_mq_tag_update_depth(hctx->tags, nr); in blk_mq_update_nr_requests()