Lines Matching refs:q
43 static ssize_t queue_requests_show(struct request_queue *q, char *page) in queue_requests_show() argument
45 return queue_var_show(q->nr_requests, (page)); in queue_requests_show()
49 queue_requests_store(struct request_queue *q, const char *page, size_t count) in queue_requests_store() argument
54 if (!q->request_fn && !q->mq_ops) in queue_requests_store()
64 if (q->request_fn) in queue_requests_store()
65 err = blk_update_nr_requests(q, nr); in queue_requests_store()
67 err = blk_mq_update_nr_requests(q, nr); in queue_requests_store()
75 static ssize_t queue_ra_show(struct request_queue *q, char *page) in queue_ra_show() argument
77 unsigned long ra_kb = q->backing_dev_info.ra_pages << in queue_ra_show()
84 queue_ra_store(struct request_queue *q, const char *page, size_t count) in queue_ra_store() argument
92 q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10); in queue_ra_store()
97 static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) in queue_max_sectors_show() argument
99 int max_sectors_kb = queue_max_sectors(q) >> 1; in queue_max_sectors_show()
104 static ssize_t queue_max_segments_show(struct request_queue *q, char *page) in queue_max_segments_show() argument
106 return queue_var_show(queue_max_segments(q), (page)); in queue_max_segments_show()
109 static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) in queue_max_integrity_segments_show() argument
111 return queue_var_show(q->limits.max_integrity_segments, (page)); in queue_max_integrity_segments_show()
114 static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) in queue_max_segment_size_show() argument
116 if (blk_queue_cluster(q)) in queue_max_segment_size_show()
117 return queue_var_show(queue_max_segment_size(q), (page)); in queue_max_segment_size_show()
122 static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) in queue_logical_block_size_show() argument
124 return queue_var_show(queue_logical_block_size(q), page); in queue_logical_block_size_show()
127 static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) in queue_physical_block_size_show() argument
129 return queue_var_show(queue_physical_block_size(q), page); in queue_physical_block_size_show()
132 static ssize_t queue_io_min_show(struct request_queue *q, char *page) in queue_io_min_show() argument
134 return queue_var_show(queue_io_min(q), page); in queue_io_min_show()
137 static ssize_t queue_io_opt_show(struct request_queue *q, char *page) in queue_io_opt_show() argument
139 return queue_var_show(queue_io_opt(q), page); in queue_io_opt_show()
142 static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) in queue_discard_granularity_show() argument
144 return queue_var_show(q->limits.discard_granularity, page); in queue_discard_granularity_show()
147 static ssize_t queue_discard_max_show(struct request_queue *q, char *page) in queue_discard_max_show() argument
150 (unsigned long long)q->limits.max_discard_sectors << 9); in queue_discard_max_show()
153 static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) in queue_discard_zeroes_data_show() argument
155 return queue_var_show(queue_discard_zeroes_data(q), page); in queue_discard_zeroes_data_show()
158 static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) in queue_write_same_max_show() argument
161 (unsigned long long)q->limits.max_write_same_sectors << 9); in queue_write_same_max_show()
166 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) in queue_max_sectors_store() argument
169 max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, in queue_max_sectors_store()
179 spin_lock_irq(q->queue_lock); in queue_max_sectors_store()
180 q->limits.max_sectors = max_sectors_kb << 1; in queue_max_sectors_store()
181 spin_unlock_irq(q->queue_lock); in queue_max_sectors_store()
186 static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) in queue_max_hw_sectors_show() argument
188 int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; in queue_max_hw_sectors_show()
195 queue_show_##name(struct request_queue *q, char *page) \
198 bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \
202 queue_store_##name(struct request_queue *q, const char *page, size_t count) \
212 spin_lock_irq(q->queue_lock); \
214 queue_flag_set(QUEUE_FLAG_##flag, q); \
216 queue_flag_clear(QUEUE_FLAG_##flag, q); \
217 spin_unlock_irq(q->queue_lock); \
226 static ssize_t queue_nomerges_show(struct request_queue *q, char *page) in queue_nomerges_show() argument
228 return queue_var_show((blk_queue_nomerges(q) << 1) | in queue_nomerges_show()
229 blk_queue_noxmerges(q), page); in queue_nomerges_show()
232 static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, in queue_nomerges_store() argument
241 spin_lock_irq(q->queue_lock); in queue_nomerges_store()
242 queue_flag_clear(QUEUE_FLAG_NOMERGES, q); in queue_nomerges_store()
243 queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); in queue_nomerges_store()
245 queue_flag_set(QUEUE_FLAG_NOMERGES, q); in queue_nomerges_store()
247 queue_flag_set(QUEUE_FLAG_NOXMERGES, q); in queue_nomerges_store()
248 spin_unlock_irq(q->queue_lock); in queue_nomerges_store()
253 static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) in queue_rq_affinity_show() argument
255 bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); in queue_rq_affinity_show()
256 bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); in queue_rq_affinity_show()
262 queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) in queue_rq_affinity_store() argument
272 spin_lock_irq(q->queue_lock); in queue_rq_affinity_store()
274 queue_flag_set(QUEUE_FLAG_SAME_COMP, q); in queue_rq_affinity_store()
275 queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); in queue_rq_affinity_store()
277 queue_flag_set(QUEUE_FLAG_SAME_COMP, q); in queue_rq_affinity_store()
278 queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); in queue_rq_affinity_store()
280 queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); in queue_rq_affinity_store()
281 queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); in queue_rq_affinity_store()
283 spin_unlock_irq(q->queue_lock); in queue_rq_affinity_store()
439 struct request_queue *q = in queue_attr_show() local
445 mutex_lock(&q->sysfs_lock); in queue_attr_show()
446 if (blk_queue_dying(q)) { in queue_attr_show()
447 mutex_unlock(&q->sysfs_lock); in queue_attr_show()
450 res = entry->show(q, page); in queue_attr_show()
451 mutex_unlock(&q->sysfs_lock); in queue_attr_show()
460 struct request_queue *q; in queue_attr_store() local
466 q = container_of(kobj, struct request_queue, kobj); in queue_attr_store()
467 mutex_lock(&q->sysfs_lock); in queue_attr_store()
468 if (blk_queue_dying(q)) { in queue_attr_store()
469 mutex_unlock(&q->sysfs_lock); in queue_attr_store()
472 res = entry->store(q, page, length); in queue_attr_store()
473 mutex_unlock(&q->sysfs_lock); in queue_attr_store()
479 struct request_queue *q = container_of(rcu_head, struct request_queue, in blk_free_queue_rcu() local
481 kmem_cache_free(blk_requestq_cachep, q); in blk_free_queue_rcu()
501 struct request_queue *q = in blk_release_queue() local
504 blkcg_exit_queue(q); in blk_release_queue()
506 if (q->elevator) { in blk_release_queue()
507 spin_lock_irq(q->queue_lock); in blk_release_queue()
508 ioc_clear_queue(q); in blk_release_queue()
509 spin_unlock_irq(q->queue_lock); in blk_release_queue()
510 elevator_exit(q->elevator); in blk_release_queue()
513 blk_exit_rl(&q->root_rl); in blk_release_queue()
515 if (q->queue_tags) in blk_release_queue()
516 __blk_queue_free_tags(q); in blk_release_queue()
518 if (!q->mq_ops) in blk_release_queue()
519 blk_free_flush_queue(q->fq); in blk_release_queue()
521 blk_mq_release(q); in blk_release_queue()
523 blk_trace_shutdown(q); in blk_release_queue()
525 ida_simple_remove(&blk_queue_ida, q->id); in blk_release_queue()
526 call_rcu(&q->rcu_head, blk_free_queue_rcu); in blk_release_queue()
544 struct request_queue *q = disk->queue; in blk_register_queue() local
546 if (WARN_ON(!q)) in blk_register_queue()
558 if (!blk_queue_init_done(q)) { in blk_register_queue()
559 queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q); in blk_register_queue()
560 blk_queue_bypass_end(q); in blk_register_queue()
561 if (q->mq_ops) in blk_register_queue()
562 blk_mq_finish_init(q); in blk_register_queue()
569 ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); in blk_register_queue()
575 kobject_uevent(&q->kobj, KOBJ_ADD); in blk_register_queue()
577 if (q->mq_ops) in blk_register_queue()
580 if (!q->request_fn) in blk_register_queue()
583 ret = elv_register_queue(q); in blk_register_queue()
585 kobject_uevent(&q->kobj, KOBJ_REMOVE); in blk_register_queue()
586 kobject_del(&q->kobj); in blk_register_queue()
597 struct request_queue *q = disk->queue; in blk_unregister_queue() local
599 if (WARN_ON(!q)) in blk_unregister_queue()
602 if (q->mq_ops) in blk_unregister_queue()
605 if (q->request_fn) in blk_unregister_queue()
606 elv_unregister_queue(q); in blk_unregister_queue()
608 kobject_uevent(&q->kobj, KOBJ_REMOVE); in blk_unregister_queue()
609 kobject_del(&q->kobj); in blk_unregister_queue()