This source file includes following definitions.
- blk_queue_rq_timeout
- blk_set_default_limits
- blk_set_stacking_limits
- blk_queue_make_request
- blk_queue_bounce_limit
- blk_queue_max_hw_sectors
- blk_queue_chunk_sectors
- blk_queue_max_discard_sectors
- blk_queue_max_write_same_sectors
- blk_queue_max_write_zeroes_sectors
- blk_queue_max_segments
- blk_queue_max_discard_segments
- blk_queue_max_segment_size
- blk_queue_logical_block_size
- blk_queue_physical_block_size
- blk_queue_alignment_offset
- blk_limits_io_min
- blk_queue_io_min
- blk_limits_io_opt
- blk_queue_io_opt
- blk_queue_stack_limits
- blk_stack_limits
- bdev_stack_limits
- disk_stack_limits
- blk_queue_update_dma_pad
- blk_queue_dma_drain
- blk_queue_segment_boundary
- blk_queue_virt_boundary
- blk_queue_dma_alignment
- blk_queue_update_dma_alignment
- blk_set_queue_depth
- blk_queue_write_cache
- blk_queue_required_elevator_features
- blk_queue_can_use_dma_map_merging
- blk_settings_init
1
2
3
4
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/bio.h>
9 #include <linux/blkdev.h>
10 #include <linux/memblock.h>
11 #include <linux/gcd.h>
12 #include <linux/lcm.h>
13 #include <linux/jiffies.h>
14 #include <linux/gfp.h>
15 #include <linux/dma-mapping.h>
16
17 #include "blk.h"
18 #include "blk-wbt.h"
19
20 unsigned long blk_max_low_pfn;
21 EXPORT_SYMBOL(blk_max_low_pfn);
22
23 unsigned long blk_max_pfn;
24
25 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
26 {
27 q->rq_timeout = timeout;
28 }
29 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
30
31
32
33
34
35
36
37
38 void blk_set_default_limits(struct queue_limits *lim)
39 {
40 lim->max_segments = BLK_MAX_SEGMENTS;
41 lim->max_discard_segments = 1;
42 lim->max_integrity_segments = 0;
43 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
44 lim->virt_boundary_mask = 0;
45 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
46 lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
47 lim->max_dev_sectors = 0;
48 lim->chunk_sectors = 0;
49 lim->max_write_same_sectors = 0;
50 lim->max_write_zeroes_sectors = 0;
51 lim->max_discard_sectors = 0;
52 lim->max_hw_discard_sectors = 0;
53 lim->discard_granularity = 0;
54 lim->discard_alignment = 0;
55 lim->discard_misaligned = 0;
56 lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
57 lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
58 lim->alignment_offset = 0;
59 lim->io_opt = 0;
60 lim->misaligned = 0;
61 lim->zoned = BLK_ZONED_NONE;
62 }
63 EXPORT_SYMBOL(blk_set_default_limits);
64
65
66
67
68
69
70
71
72
73 void blk_set_stacking_limits(struct queue_limits *lim)
74 {
75 blk_set_default_limits(lim);
76
77
78 lim->max_segments = USHRT_MAX;
79 lim->max_discard_segments = USHRT_MAX;
80 lim->max_hw_sectors = UINT_MAX;
81 lim->max_segment_size = UINT_MAX;
82 lim->max_sectors = UINT_MAX;
83 lim->max_dev_sectors = UINT_MAX;
84 lim->max_write_same_sectors = UINT_MAX;
85 lim->max_write_zeroes_sectors = UINT_MAX;
86 }
87 EXPORT_SYMBOL(blk_set_stacking_limits);
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111 void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
112 {
113
114
115
116 q->nr_requests = BLKDEV_MAX_RQ;
117
118 q->make_request_fn = mfn;
119 blk_queue_dma_alignment(q, 511);
120
121 blk_set_default_limits(&q->limits);
122 }
123 EXPORT_SYMBOL(blk_queue_make_request);
124
125
126
127
128
129
130
131
132
133
134
135
136 void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
137 {
138 unsigned long b_pfn = max_addr >> PAGE_SHIFT;
139 int dma = 0;
140
141 q->bounce_gfp = GFP_NOIO;
142 #if BITS_PER_LONG == 64
143
144
145
146
147
148 if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
149 dma = 1;
150 q->limits.bounce_pfn = max(max_low_pfn, b_pfn);
151 #else
152 if (b_pfn < blk_max_low_pfn)
153 dma = 1;
154 q->limits.bounce_pfn = b_pfn;
155 #endif
156 if (dma) {
157 init_emergency_isa_pool();
158 q->bounce_gfp = GFP_NOIO | GFP_DMA;
159 q->limits.bounce_pfn = b_pfn;
160 }
161 }
162 EXPORT_SYMBOL(blk_queue_bounce_limit);
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183 void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
184 {
185 struct queue_limits *limits = &q->limits;
186 unsigned int max_sectors;
187
188 if ((max_hw_sectors << 9) < PAGE_SIZE) {
189 max_hw_sectors = 1 << (PAGE_SHIFT - 9);
190 printk(KERN_INFO "%s: set to minimum %d\n",
191 __func__, max_hw_sectors);
192 }
193
194 limits->max_hw_sectors = max_hw_sectors;
195 max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
196 max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
197 limits->max_sectors = max_sectors;
198 q->backing_dev_info->io_pages = max_sectors >> (PAGE_SHIFT - 9);
199 }
200 EXPORT_SYMBOL(blk_queue_max_hw_sectors);
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215 void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
216 {
217 BUG_ON(!is_power_of_2(chunk_sectors));
218 q->limits.chunk_sectors = chunk_sectors;
219 }
220 EXPORT_SYMBOL(blk_queue_chunk_sectors);
221
222
223
224
225
226
227 void blk_queue_max_discard_sectors(struct request_queue *q,
228 unsigned int max_discard_sectors)
229 {
230 q->limits.max_hw_discard_sectors = max_discard_sectors;
231 q->limits.max_discard_sectors = max_discard_sectors;
232 }
233 EXPORT_SYMBOL(blk_queue_max_discard_sectors);
234
235
236
237
238
239
240 void blk_queue_max_write_same_sectors(struct request_queue *q,
241 unsigned int max_write_same_sectors)
242 {
243 q->limits.max_write_same_sectors = max_write_same_sectors;
244 }
245 EXPORT_SYMBOL(blk_queue_max_write_same_sectors);
246
247
248
249
250
251
252
253 void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
254 unsigned int max_write_zeroes_sectors)
255 {
256 q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors;
257 }
258 EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors);
259
260
261
262
263
264
265
266
267
268
269 void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
270 {
271 if (!max_segments) {
272 max_segments = 1;
273 printk(KERN_INFO "%s: set to minimum %d\n",
274 __func__, max_segments);
275 }
276
277 q->limits.max_segments = max_segments;
278 }
279 EXPORT_SYMBOL(blk_queue_max_segments);
280
281
282
283
284
285
286
287
288
289
290 void blk_queue_max_discard_segments(struct request_queue *q,
291 unsigned short max_segments)
292 {
293 q->limits.max_discard_segments = max_segments;
294 }
295 EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments);
296
297
298
299
300
301
302
303
304
305
306 void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
307 {
308 if (max_size < PAGE_SIZE) {
309 max_size = PAGE_SIZE;
310 printk(KERN_INFO "%s: set to minimum %d\n",
311 __func__, max_size);
312 }
313
314
315 WARN_ON_ONCE(q->limits.virt_boundary_mask);
316
317 q->limits.max_segment_size = max_size;
318 }
319 EXPORT_SYMBOL(blk_queue_max_segment_size);
320
321
322
323
324
325
326
327
328
329
330
331 void blk_queue_logical_block_size(struct request_queue *q, unsigned int size)
332 {
333 q->limits.logical_block_size = size;
334
335 if (q->limits.physical_block_size < size)
336 q->limits.physical_block_size = size;
337
338 if (q->limits.io_min < q->limits.physical_block_size)
339 q->limits.io_min = q->limits.physical_block_size;
340 }
341 EXPORT_SYMBOL(blk_queue_logical_block_size);
342
343
344
345
346
347
348
349
350
351
352
353 void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
354 {
355 q->limits.physical_block_size = size;
356
357 if (q->limits.physical_block_size < q->limits.logical_block_size)
358 q->limits.physical_block_size = q->limits.logical_block_size;
359
360 if (q->limits.io_min < q->limits.physical_block_size)
361 q->limits.io_min = q->limits.physical_block_size;
362 }
363 EXPORT_SYMBOL(blk_queue_physical_block_size);
364
365
366
367
368
369
370
371
372
373
374
375
376 void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
377 {
378 q->limits.alignment_offset =
379 offset & (q->limits.physical_block_size - 1);
380 q->limits.misaligned = 0;
381 }
382 EXPORT_SYMBOL(blk_queue_alignment_offset);
383
384
385
386
387
388
389
390
391
392
393
394
395 void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
396 {
397 limits->io_min = min;
398
399 if (limits->io_min < limits->logical_block_size)
400 limits->io_min = limits->logical_block_size;
401
402 if (limits->io_min < limits->physical_block_size)
403 limits->io_min = limits->physical_block_size;
404 }
405 EXPORT_SYMBOL(blk_limits_io_min);
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421 void blk_queue_io_min(struct request_queue *q, unsigned int min)
422 {
423 blk_limits_io_min(&q->limits, min);
424 }
425 EXPORT_SYMBOL(blk_queue_io_min);
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440 void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
441 {
442 limits->io_opt = opt;
443 }
444 EXPORT_SYMBOL(blk_limits_io_opt);
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459 void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
460 {
461 blk_limits_io_opt(&q->limits, opt);
462 }
463 EXPORT_SYMBOL(blk_queue_io_opt);
464
465
466
467
468
469
470 void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
471 {
472 blk_stack_limits(&t->limits, &b->limits, 0);
473 }
474 EXPORT_SYMBOL(blk_queue_stack_limits);
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
498 sector_t start)
499 {
500 unsigned int top, bottom, alignment, ret = 0;
501
502 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
503 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
504 t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
505 t->max_write_same_sectors = min(t->max_write_same_sectors,
506 b->max_write_same_sectors);
507 t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
508 b->max_write_zeroes_sectors);
509 t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
510
511 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
512 b->seg_boundary_mask);
513 t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
514 b->virt_boundary_mask);
515
516 t->max_segments = min_not_zero(t->max_segments, b->max_segments);
517 t->max_discard_segments = min_not_zero(t->max_discard_segments,
518 b->max_discard_segments);
519 t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
520 b->max_integrity_segments);
521
522 t->max_segment_size = min_not_zero(t->max_segment_size,
523 b->max_segment_size);
524
525 t->misaligned |= b->misaligned;
526
527 alignment = queue_limit_alignment_offset(b, start);
528
529
530
531
532 if (t->alignment_offset != alignment) {
533
534 top = max(t->physical_block_size, t->io_min)
535 + t->alignment_offset;
536 bottom = max(b->physical_block_size, b->io_min) + alignment;
537
538
539 if (max(top, bottom) % min(top, bottom)) {
540 t->misaligned = 1;
541 ret = -1;
542 }
543 }
544
545 t->logical_block_size = max(t->logical_block_size,
546 b->logical_block_size);
547
548 t->physical_block_size = max(t->physical_block_size,
549 b->physical_block_size);
550
551 t->io_min = max(t->io_min, b->io_min);
552 t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
553
554
555 if (t->physical_block_size & (t->logical_block_size - 1)) {
556 t->physical_block_size = t->logical_block_size;
557 t->misaligned = 1;
558 ret = -1;
559 }
560
561
562 if (t->io_min & (t->physical_block_size - 1)) {
563 t->io_min = t->physical_block_size;
564 t->misaligned = 1;
565 ret = -1;
566 }
567
568
569 if (t->io_opt & (t->physical_block_size - 1)) {
570 t->io_opt = 0;
571 t->misaligned = 1;
572 ret = -1;
573 }
574
575 t->raid_partial_stripes_expensive =
576 max(t->raid_partial_stripes_expensive,
577 b->raid_partial_stripes_expensive);
578
579
580 t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
581 % max(t->physical_block_size, t->io_min);
582
583
584 if (t->alignment_offset & (t->logical_block_size - 1)) {
585 t->misaligned = 1;
586 ret = -1;
587 }
588
589
590 if (b->discard_granularity) {
591 alignment = queue_limit_discard_alignment(b, start);
592
593 if (t->discard_granularity != 0 &&
594 t->discard_alignment != alignment) {
595 top = t->discard_granularity + t->discard_alignment;
596 bottom = b->discard_granularity + alignment;
597
598
599 if ((max(top, bottom) % min(top, bottom)) != 0)
600 t->discard_misaligned = 1;
601 }
602
603 t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
604 b->max_discard_sectors);
605 t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
606 b->max_hw_discard_sectors);
607 t->discard_granularity = max(t->discard_granularity,
608 b->discard_granularity);
609 t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
610 t->discard_granularity;
611 }
612
613 if (b->chunk_sectors)
614 t->chunk_sectors = min_not_zero(t->chunk_sectors,
615 b->chunk_sectors);
616
617 return ret;
618 }
619 EXPORT_SYMBOL(blk_stack_limits);
620
621
622
623
624
625
626
627
628
629
630
631
632 int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
633 sector_t start)
634 {
635 struct request_queue *bq = bdev_get_queue(bdev);
636
637 start += get_start_sect(bdev);
638
639 return blk_stack_limits(t, &bq->limits, start);
640 }
641 EXPORT_SYMBOL(bdev_stack_limits);
642
643
644
645
646
647
648
649
650
651
652
653 void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
654 sector_t offset)
655 {
656 struct request_queue *t = disk->queue;
657
658 if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
659 char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
660
661 disk_name(disk, 0, top);
662 bdevname(bdev, bottom);
663
664 printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
665 top, bottom);
666 }
667
668 t->backing_dev_info->io_pages =
669 t->limits.max_sectors >> (PAGE_SHIFT - 9);
670 }
671 EXPORT_SYMBOL(disk_stack_limits);
672
673
674
675
676
677
678
679
680
681
682
683 void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
684 {
685 if (mask > q->dma_pad_mask)
686 q->dma_pad_mask = mask;
687 }
688 EXPORT_SYMBOL(blk_queue_update_dma_pad);
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711 int blk_queue_dma_drain(struct request_queue *q,
712 dma_drain_needed_fn *dma_drain_needed,
713 void *buf, unsigned int size)
714 {
715 if (queue_max_segments(q) < 2)
716 return -EINVAL;
717
718 blk_queue_max_segments(q, queue_max_segments(q) - 1);
719 q->dma_drain_needed = dma_drain_needed;
720 q->dma_drain_buffer = buf;
721 q->dma_drain_size = size;
722
723 return 0;
724 }
725 EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
726
727
728
729
730
731
732 void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
733 {
734 if (mask < PAGE_SIZE - 1) {
735 mask = PAGE_SIZE - 1;
736 printk(KERN_INFO "%s: set to minimum %lx\n",
737 __func__, mask);
738 }
739
740 q->limits.seg_boundary_mask = mask;
741 }
742 EXPORT_SYMBOL(blk_queue_segment_boundary);
743
744
745
746
747
748
749 void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
750 {
751 q->limits.virt_boundary_mask = mask;
752
753
754
755
756
757
758
759 if (mask)
760 q->limits.max_segment_size = UINT_MAX;
761 }
762 EXPORT_SYMBOL(blk_queue_virt_boundary);
763
764
765
766
767
768
769
770
771
772
773
774 void blk_queue_dma_alignment(struct request_queue *q, int mask)
775 {
776 q->dma_alignment = mask;
777 }
778 EXPORT_SYMBOL(blk_queue_dma_alignment);
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794 void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
795 {
796 BUG_ON(mask > PAGE_SIZE);
797
798 if (mask > q->dma_alignment)
799 q->dma_alignment = mask;
800 }
801 EXPORT_SYMBOL(blk_queue_update_dma_alignment);
802
803
804
805
806
807
808
809 void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
810 {
811 q->queue_depth = depth;
812 rq_qos_queue_depth_changed(q);
813 }
814 EXPORT_SYMBOL(blk_set_queue_depth);
815
816
817
818
819
820
821
822
823
824 void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
825 {
826 if (wc)
827 blk_queue_flag_set(QUEUE_FLAG_WC, q);
828 else
829 blk_queue_flag_clear(QUEUE_FLAG_WC, q);
830 if (fua)
831 blk_queue_flag_set(QUEUE_FLAG_FUA, q);
832 else
833 blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
834
835 wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
836 }
837 EXPORT_SYMBOL_GPL(blk_queue_write_cache);
838
839
840
841
842
843
844
845
846
847
848 void blk_queue_required_elevator_features(struct request_queue *q,
849 unsigned int features)
850 {
851 q->required_elevator_features = features;
852 }
853 EXPORT_SYMBOL_GPL(blk_queue_required_elevator_features);
854
855
856
857
858
859
860
861
862 bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
863 struct device *dev)
864 {
865 unsigned long boundary = dma_get_merge_boundary(dev);
866
867 if (!boundary)
868 return false;
869
870
871 blk_queue_virt_boundary(q, boundary);
872
873 return true;
874 }
875 EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
876
877 static int __init blk_settings_init(void)
878 {
879 blk_max_low_pfn = max_low_pfn - 1;
880 blk_max_pfn = max_pfn - 1;
881 return 0;
882 }
883 subsys_initcall(blk_settings_init);