Lines Matching refs:t
491 void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) in blk_queue_stack_limits() argument
493 blk_stack_limits(&t->limits, &b->limits, 0); in blk_queue_stack_limits()
518 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, in blk_stack_limits() argument
523 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); in blk_stack_limits()
524 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); in blk_stack_limits()
525 t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors); in blk_stack_limits()
526 t->max_write_same_sectors = min(t->max_write_same_sectors, in blk_stack_limits()
528 t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn); in blk_stack_limits()
530 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, in blk_stack_limits()
532 t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask, in blk_stack_limits()
535 t->max_segments = min_not_zero(t->max_segments, b->max_segments); in blk_stack_limits()
536 t->max_integrity_segments = min_not_zero(t->max_integrity_segments, in blk_stack_limits()
539 t->max_segment_size = min_not_zero(t->max_segment_size, in blk_stack_limits()
542 t->misaligned |= b->misaligned; in blk_stack_limits()
549 if (t->alignment_offset != alignment) { in blk_stack_limits()
551 top = max(t->physical_block_size, t->io_min) in blk_stack_limits()
552 + t->alignment_offset; in blk_stack_limits()
557 t->misaligned = 1; in blk_stack_limits()
562 t->logical_block_size = max(t->logical_block_size, in blk_stack_limits()
565 t->physical_block_size = max(t->physical_block_size, in blk_stack_limits()
568 t->io_min = max(t->io_min, b->io_min); in blk_stack_limits()
569 t->io_opt = lcm_not_zero(t->io_opt, b->io_opt); in blk_stack_limits()
571 t->cluster &= b->cluster; in blk_stack_limits()
572 t->discard_zeroes_data &= b->discard_zeroes_data; in blk_stack_limits()
575 if (t->physical_block_size & (t->logical_block_size - 1)) { in blk_stack_limits()
576 t->physical_block_size = t->logical_block_size; in blk_stack_limits()
577 t->misaligned = 1; in blk_stack_limits()
582 if (t->io_min & (t->physical_block_size - 1)) { in blk_stack_limits()
583 t->io_min = t->physical_block_size; in blk_stack_limits()
584 t->misaligned = 1; in blk_stack_limits()
589 if (t->io_opt & (t->physical_block_size - 1)) { in blk_stack_limits()
590 t->io_opt = 0; in blk_stack_limits()
591 t->misaligned = 1; in blk_stack_limits()
595 t->raid_partial_stripes_expensive = in blk_stack_limits()
596 max(t->raid_partial_stripes_expensive, in blk_stack_limits()
600 t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment) in blk_stack_limits()
601 % max(t->physical_block_size, t->io_min); in blk_stack_limits()
604 if (t->alignment_offset & (t->logical_block_size - 1)) { in blk_stack_limits()
605 t->misaligned = 1; in blk_stack_limits()
613 if (t->discard_granularity != 0 && in blk_stack_limits()
614 t->discard_alignment != alignment) { in blk_stack_limits()
615 top = t->discard_granularity + t->discard_alignment; in blk_stack_limits()
620 t->discard_misaligned = 1; in blk_stack_limits()
623 t->max_discard_sectors = min_not_zero(t->max_discard_sectors, in blk_stack_limits()
625 t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors, in blk_stack_limits()
627 t->discard_granularity = max(t->discard_granularity, in blk_stack_limits()
629 t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) % in blk_stack_limits()
630 t->discard_granularity; in blk_stack_limits()
648 int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, in bdev_stack_limits() argument
655 return blk_stack_limits(t, &bq->limits, start); in bdev_stack_limits()
672 struct request_queue *t = disk->queue; in disk_stack_limits() local
674 if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) { in disk_stack_limits()