Lines Matching refs:q

280 	struct request_queue *q;  in device_area_is_invalid()  local
294 q = bdev_get_queue(bdev); in device_area_is_invalid()
295 if (!q || !q->make_request_fn) { in device_area_is_invalid()
424 struct request_queue *q = bdev_get_queue(bdev); in dm_set_device_limits() local
427 if (unlikely(!q)) { in dm_set_device_limits()
438 q->limits.physical_block_size, in dm_set_device_limits()
439 q->limits.logical_block_size, in dm_set_device_limits()
440 q->limits.alignment_offset, in dm_set_device_limits()
448 if (dm_queue_merge_is_compulsory(q) && !ti->type->merge) in dm_set_device_limits()
889 struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev); in dm_table_set_type() local
891 if (!blk_queue_stackable(q)) { in dm_table_set_type()
897 if (q->mq_ops) in dm_table_set_type()
1318 struct request_queue *q = bdev_get_queue(dev->bdev); in device_flush_capable() local
1320 return q && (q->flush_flags & flush); in device_flush_capable()
1370 struct request_queue *q = bdev_get_queue(dev->bdev); in device_is_nonrot() local
1372 return q && blk_queue_nonrot(q); in device_is_nonrot()
1378 struct request_queue *q = bdev_get_queue(dev->bdev); in device_is_not_random() local
1380 return q && !blk_queue_add_random(q); in device_is_not_random()
1386 struct request_queue *q = bdev_get_queue(dev->bdev); in queue_supports_sg_merge() local
1388 return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags); in queue_supports_sg_merge()
1394 struct request_queue *q = bdev_get_queue(dev->bdev); in queue_supports_sg_gaps() local
1396 return q && !test_bit(QUEUE_FLAG_SG_GAPS, &q->queue_flags); in queue_supports_sg_gaps()
1419 struct request_queue *q = bdev_get_queue(dev->bdev); in device_not_write_same_capable() local
1421 return q && !q->limits.max_write_same_sectors; in device_not_write_same_capable()
1446 struct request_queue *q = bdev_get_queue(dev->bdev); in device_discard_capable() local
1448 return q && blk_queue_discard(q); in device_discard_capable()
1480 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, in dm_table_set_restrictions() argument
1488 q->limits = *limits; in dm_table_set_restrictions()
1491 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); in dm_table_set_restrictions()
1493 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); in dm_table_set_restrictions()
1500 blk_queue_flush(q, flush); in dm_table_set_restrictions()
1503 q->limits.discard_zeroes_data = 0; in dm_table_set_restrictions()
1507 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); in dm_table_set_restrictions()
1509 queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q); in dm_table_set_restrictions()
1512 q->limits.max_write_same_sectors = 0; in dm_table_set_restrictions()
1515 queue_flag_clear_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); in dm_table_set_restrictions()
1517 queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); in dm_table_set_restrictions()
1520 queue_flag_clear_unlocked(QUEUE_FLAG_SG_GAPS, q); in dm_table_set_restrictions()
1522 queue_flag_set_unlocked(QUEUE_FLAG_SG_GAPS, q); in dm_table_set_restrictions()
1532 if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random)) in dm_table_set_restrictions()
1533 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); in dm_table_set_restrictions()
1546 queue_flag_set_unlocked(QUEUE_FLAG_STACKABLE, q); in dm_table_set_restrictions()
1661 struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev); in dm_table_any_congested() local
1664 if (likely(q)) in dm_table_any_congested()
1665 r |= bdi_congested(&q->backing_dev_info, bdi_bits); in dm_table_any_congested()