This source file includes following definitions.
- bfq_gt
- bfq_root_active_entity
- bfq_class_idx
- bfq_tot_busy_queues
- bfq_update_next_in_service
- bfq_bfqq_to_bfqg
- bfq_update_parent_budget
- bfq_no_longer_next_in_service
- bfq_bfqq_to_bfqg
- bfq_update_parent_budget
- bfq_no_longer_next_in_service
- bfq_entity_to_bfqq
- bfq_delta
- bfq_calc_finish
- bfq_entity_of
- bfq_extract
- bfq_idle_extract
- bfq_insert
- bfq_update_min
- bfq_update_active_node
- bfq_update_active_tree
- bfq_active_insert
- bfq_ioprio_to_weight
- bfq_weight_to_ioprio
- bfq_get_entity
- bfq_find_deepest
- bfq_active_extract
- bfq_idle_insert
- bfq_forget_entity
- bfq_put_idle_entity
- bfq_forget_idle
- bfq_entity_service_tree
- __bfq_entity_update_weight_prio
- bfq_bfqq_served
- bfq_bfqq_charge_time
- bfq_update_fin_time_enqueue
- __bfq_activate_entity
- __bfq_requeue_entity
- __bfq_activate_requeue_entity
- bfq_activate_requeue_entity
- __bfq_deactivate_entity
- bfq_deactivate_entity
- bfq_calc_vtime_jump
- bfq_update_vtime
- bfq_first_active_entity
- __bfq_lookup_next_entity
- bfq_lookup_next_entity
- next_queue_may_preempt
- bfq_get_next_queue
- __bfq_bfqd_reset_in_service
- bfq_deactivate_bfqq
- bfq_activate_bfqq
- bfq_requeue_bfqq
- bfq_del_bfqq_busy
- bfq_add_bfqq_busy
1
2
3
4
5
6
7
8
9 #include "bfq-iosched.h"
10
11
12
13
14
15
16
17
18 static int bfq_gt(u64 a, u64 b)
19 {
20 return (s64)(a - b) > 0;
21 }
22
23 static struct bfq_entity *bfq_root_active_entity(struct rb_root *tree)
24 {
25 struct rb_node *node = tree->rb_node;
26
27 return rb_entry(node, struct bfq_entity, rb_node);
28 }
29
30 static unsigned int bfq_class_idx(struct bfq_entity *entity)
31 {
32 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
33
34 return bfqq ? bfqq->ioprio_class - 1 :
35 BFQ_DEFAULT_GRP_CLASS - 1;
36 }
37
38 unsigned int bfq_tot_busy_queues(struct bfq_data *bfqd)
39 {
40 return bfqd->busy_queues[0] + bfqd->busy_queues[1] +
41 bfqd->busy_queues[2];
42 }
43
44 static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
45 bool expiration);
46
47 static bool bfq_update_parent_budget(struct bfq_entity *next_in_service);
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74 static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
75 struct bfq_entity *new_entity,
76 bool expiration)
77 {
78 struct bfq_entity *next_in_service = sd->next_in_service;
79 bool parent_sched_may_change = false;
80 bool change_without_lookup = false;
81
82
83
84
85
86
87
88
89
90
91
92
93 if (new_entity && new_entity != sd->next_in_service) {
94
95
96
97
98
99
100 change_without_lookup = true;
101
102
103
104
105
106
107 if (next_in_service) {
108 unsigned int new_entity_class_idx =
109 bfq_class_idx(new_entity);
110 struct bfq_service_tree *st =
111 sd->service_tree + new_entity_class_idx;
112
113 change_without_lookup =
114 (new_entity_class_idx ==
115 bfq_class_idx(next_in_service)
116 &&
117 !bfq_gt(new_entity->start, st->vtime)
118 &&
119 bfq_gt(next_in_service->finish,
120 new_entity->finish));
121 }
122
123 if (change_without_lookup)
124 next_in_service = new_entity;
125 }
126
127 if (!change_without_lookup)
128 next_in_service = bfq_lookup_next_entity(sd, expiration);
129
130 if (next_in_service) {
131 bool new_budget_triggers_change =
132 bfq_update_parent_budget(next_in_service);
133
134 parent_sched_may_change = !sd->next_in_service ||
135 new_budget_triggers_change;
136 }
137
138 sd->next_in_service = next_in_service;
139
140 if (!next_in_service)
141 return parent_sched_may_change;
142
143 return parent_sched_may_change;
144 }
145
146 #ifdef CONFIG_BFQ_GROUP_IOSCHED
147
148 struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq)
149 {
150 struct bfq_entity *group_entity = bfqq->entity.parent;
151
152 if (!group_entity)
153 group_entity = &bfqq->bfqd->root_group->entity;
154
155 return container_of(group_entity, struct bfq_group, entity);
156 }
157
158
159
160
161
162 static bool bfq_update_parent_budget(struct bfq_entity *next_in_service)
163 {
164 struct bfq_entity *bfqg_entity;
165 struct bfq_group *bfqg;
166 struct bfq_sched_data *group_sd;
167 bool ret = false;
168
169 group_sd = next_in_service->sched_data;
170
171 bfqg = container_of(group_sd, struct bfq_group, sched_data);
172
173
174
175
176
177 bfqg_entity = bfqg->my_entity;
178 if (bfqg_entity) {
179 if (bfqg_entity->budget > next_in_service->budget)
180 ret = true;
181 bfqg_entity->budget = next_in_service->budget;
182 }
183
184 return ret;
185 }
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207 static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
208 {
209 struct bfq_group *bfqg;
210
211 if (bfq_entity_to_bfqq(entity))
212 return true;
213
214 bfqg = container_of(entity, struct bfq_group, entity);
215
216
217
218
219
220
221
222
223
224
225
226
227
228 if (bfqg->active_entities == 1)
229 return true;
230
231 return false;
232 }
233
234 #else
235
236 struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq)
237 {
238 return bfqq->bfqd->root_group;
239 }
240
241 static bool bfq_update_parent_budget(struct bfq_entity *next_in_service)
242 {
243 return false;
244 }
245
246 static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
247 {
248 return true;
249 }
250
251 #endif
252
253
254
255
256
257
258
259
260 #define WFQ_SERVICE_SHIFT 22
261
262 struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity)
263 {
264 struct bfq_queue *bfqq = NULL;
265
266 if (!entity->my_sched_data)
267 bfqq = container_of(entity, struct bfq_queue, entity);
268
269 return bfqq;
270 }
271
272
273
274
275
276
277
278 static u64 bfq_delta(unsigned long service, unsigned long weight)
279 {
280 u64 d = (u64)service << WFQ_SERVICE_SHIFT;
281
282 do_div(d, weight);
283 return d;
284 }
285
286
287
288
289
290
291 static void bfq_calc_finish(struct bfq_entity *entity, unsigned long service)
292 {
293 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
294
295 entity->finish = entity->start +
296 bfq_delta(service, entity->weight);
297
298 if (bfqq) {
299 bfq_log_bfqq(bfqq->bfqd, bfqq,
300 "calc_finish: serv %lu, w %d",
301 service, entity->weight);
302 bfq_log_bfqq(bfqq->bfqd, bfqq,
303 "calc_finish: start %llu, finish %llu, delta %llu",
304 entity->start, entity->finish,
305 bfq_delta(service, entity->weight));
306 }
307 }
308
309
310
311
312
313
314
315
316
317
318 struct bfq_entity *bfq_entity_of(struct rb_node *node)
319 {
320 struct bfq_entity *entity = NULL;
321
322 if (node)
323 entity = rb_entry(node, struct bfq_entity, rb_node);
324
325 return entity;
326 }
327
328
329
330
331
332
333 static void bfq_extract(struct rb_root *root, struct bfq_entity *entity)
334 {
335 entity->tree = NULL;
336 rb_erase(&entity->rb_node, root);
337 }
338
339
340
341
342
343
344 static void bfq_idle_extract(struct bfq_service_tree *st,
345 struct bfq_entity *entity)
346 {
347 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
348 struct rb_node *next;
349
350 if (entity == st->first_idle) {
351 next = rb_next(&entity->rb_node);
352 st->first_idle = bfq_entity_of(next);
353 }
354
355 if (entity == st->last_idle) {
356 next = rb_prev(&entity->rb_node);
357 st->last_idle = bfq_entity_of(next);
358 }
359
360 bfq_extract(&st->idle, entity);
361
362 if (bfqq)
363 list_del(&bfqq->bfqq_list);
364 }
365
366
367
368
369
370
371
372
373
374 static void bfq_insert(struct rb_root *root, struct bfq_entity *entity)
375 {
376 struct bfq_entity *entry;
377 struct rb_node **node = &root->rb_node;
378 struct rb_node *parent = NULL;
379
380 while (*node) {
381 parent = *node;
382 entry = rb_entry(parent, struct bfq_entity, rb_node);
383
384 if (bfq_gt(entry->finish, entity->finish))
385 node = &parent->rb_left;
386 else
387 node = &parent->rb_right;
388 }
389
390 rb_link_node(&entity->rb_node, parent, node);
391 rb_insert_color(&entity->rb_node, root);
392
393 entity->tree = root;
394 }
395
396
397
398
399
400
401
402
403
404
405
406 static void bfq_update_min(struct bfq_entity *entity, struct rb_node *node)
407 {
408 struct bfq_entity *child;
409
410 if (node) {
411 child = rb_entry(node, struct bfq_entity, rb_node);
412 if (bfq_gt(entity->min_start, child->min_start))
413 entity->min_start = child->min_start;
414 }
415 }
416
417
418
419
420
421
422
423
424
425 static void bfq_update_active_node(struct rb_node *node)
426 {
427 struct bfq_entity *entity = rb_entry(node, struct bfq_entity, rb_node);
428
429 entity->min_start = entity->start;
430 bfq_update_min(entity, node->rb_right);
431 bfq_update_min(entity, node->rb_left);
432 }
433
434
435
436
437
438
439
440
441
442
443
444 static void bfq_update_active_tree(struct rb_node *node)
445 {
446 struct rb_node *parent;
447
448 up:
449 bfq_update_active_node(node);
450
451 parent = rb_parent(node);
452 if (!parent)
453 return;
454
455 if (node == parent->rb_left && parent->rb_right)
456 bfq_update_active_node(parent->rb_right);
457 else if (parent->rb_left)
458 bfq_update_active_node(parent->rb_left);
459
460 node = parent;
461 goto up;
462 }
463
464
465
466
467
468
469
470
471
472
473
474
475 static void bfq_active_insert(struct bfq_service_tree *st,
476 struct bfq_entity *entity)
477 {
478 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
479 struct rb_node *node = &entity->rb_node;
480 #ifdef CONFIG_BFQ_GROUP_IOSCHED
481 struct bfq_sched_data *sd = NULL;
482 struct bfq_group *bfqg = NULL;
483 struct bfq_data *bfqd = NULL;
484 #endif
485
486 bfq_insert(&st->active, entity);
487
488 if (node->rb_left)
489 node = node->rb_left;
490 else if (node->rb_right)
491 node = node->rb_right;
492
493 bfq_update_active_tree(node);
494
495 #ifdef CONFIG_BFQ_GROUP_IOSCHED
496 sd = entity->sched_data;
497 bfqg = container_of(sd, struct bfq_group, sched_data);
498 bfqd = (struct bfq_data *)bfqg->bfqd;
499 #endif
500 if (bfqq)
501 list_add(&bfqq->bfqq_list, &bfqq->bfqd->active_list);
502 #ifdef CONFIG_BFQ_GROUP_IOSCHED
503 if (bfqg != bfqd->root_group)
504 bfqg->active_entities++;
505 #endif
506 }
507
508
509
510
511
512 unsigned short bfq_ioprio_to_weight(int ioprio)
513 {
514 return (IOPRIO_BE_NR - ioprio) * BFQ_WEIGHT_CONVERSION_COEFF;
515 }
516
517
518
519
520
521
522
523
524
525 static unsigned short bfq_weight_to_ioprio(int weight)
526 {
527 return max_t(int, 0,
528 IOPRIO_BE_NR * BFQ_WEIGHT_CONVERSION_COEFF - weight);
529 }
530
531 static void bfq_get_entity(struct bfq_entity *entity)
532 {
533 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
534
535 if (bfqq) {
536 bfqq->ref++;
537 bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d",
538 bfqq, bfqq->ref);
539 } else
540 bfqg_and_blkg_get(container_of(entity, struct bfq_group,
541 entity));
542 }
543
544
545
546
547
548
549
550
551
552
553 static struct rb_node *bfq_find_deepest(struct rb_node *node)
554 {
555 struct rb_node *deepest;
556
557 if (!node->rb_right && !node->rb_left)
558 deepest = rb_parent(node);
559 else if (!node->rb_right)
560 deepest = node->rb_left;
561 else if (!node->rb_left)
562 deepest = node->rb_right;
563 else {
564 deepest = rb_next(node);
565 if (deepest->rb_right)
566 deepest = deepest->rb_right;
567 else if (rb_parent(deepest) != node)
568 deepest = rb_parent(deepest);
569 }
570
571 return deepest;
572 }
573
574
575
576
577
578
579 static void bfq_active_extract(struct bfq_service_tree *st,
580 struct bfq_entity *entity)
581 {
582 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
583 struct rb_node *node;
584 #ifdef CONFIG_BFQ_GROUP_IOSCHED
585 struct bfq_sched_data *sd = NULL;
586 struct bfq_group *bfqg = NULL;
587 struct bfq_data *bfqd = NULL;
588 #endif
589
590 node = bfq_find_deepest(&entity->rb_node);
591 bfq_extract(&st->active, entity);
592
593 if (node)
594 bfq_update_active_tree(node);
595
596 #ifdef CONFIG_BFQ_GROUP_IOSCHED
597 sd = entity->sched_data;
598 bfqg = container_of(sd, struct bfq_group, sched_data);
599 bfqd = (struct bfq_data *)bfqg->bfqd;
600 #endif
601 if (bfqq)
602 list_del(&bfqq->bfqq_list);
603 #ifdef CONFIG_BFQ_GROUP_IOSCHED
604 if (bfqg != bfqd->root_group)
605 bfqg->active_entities--;
606 #endif
607 }
608
609
610
611
612
613
614 static void bfq_idle_insert(struct bfq_service_tree *st,
615 struct bfq_entity *entity)
616 {
617 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
618 struct bfq_entity *first_idle = st->first_idle;
619 struct bfq_entity *last_idle = st->last_idle;
620
621 if (!first_idle || bfq_gt(first_idle->finish, entity->finish))
622 st->first_idle = entity;
623 if (!last_idle || bfq_gt(entity->finish, last_idle->finish))
624 st->last_idle = entity;
625
626 bfq_insert(&st->idle, entity);
627
628 if (bfqq)
629 list_add(&bfqq->bfqq_list, &bfqq->bfqd->idle_list);
630 }
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647 static void bfq_forget_entity(struct bfq_service_tree *st,
648 struct bfq_entity *entity,
649 bool is_in_service)
650 {
651 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
652
653 entity->on_st = false;
654 st->wsum -= entity->weight;
655 if (is_in_service)
656 return;
657
658 if (bfqq)
659 bfq_put_queue(bfqq);
660 else
661 bfqg_and_blkg_put(container_of(entity, struct bfq_group,
662 entity));
663 }
664
665
666
667
668
669
670 void bfq_put_idle_entity(struct bfq_service_tree *st, struct bfq_entity *entity)
671 {
672 bfq_idle_extract(st, entity);
673 bfq_forget_entity(st, entity,
674 entity == entity->sched_data->in_service_entity);
675 }
676
677
678
679
680
681
682
683
684 static void bfq_forget_idle(struct bfq_service_tree *st)
685 {
686 struct bfq_entity *first_idle = st->first_idle;
687 struct bfq_entity *last_idle = st->last_idle;
688
689 if (RB_EMPTY_ROOT(&st->active) && last_idle &&
690 !bfq_gt(last_idle->finish, st->vtime)) {
691
692
693
694
695 st->vtime = last_idle->finish;
696 }
697
698 if (first_idle && !bfq_gt(first_idle->finish, st->vtime))
699 bfq_put_idle_entity(st, first_idle);
700 }
701
702 struct bfq_service_tree *bfq_entity_service_tree(struct bfq_entity *entity)
703 {
704 struct bfq_sched_data *sched_data = entity->sched_data;
705 unsigned int idx = bfq_class_idx(entity);
706
707 return sched_data->service_tree + idx;
708 }
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728 struct bfq_service_tree *
729 __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
730 struct bfq_entity *entity,
731 bool update_class_too)
732 {
733 struct bfq_service_tree *new_st = old_st;
734
735 if (entity->prio_changed) {
736 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
737 unsigned int prev_weight, new_weight;
738 struct bfq_data *bfqd = NULL;
739 struct rb_root_cached *root;
740 #ifdef CONFIG_BFQ_GROUP_IOSCHED
741 struct bfq_sched_data *sd;
742 struct bfq_group *bfqg;
743 #endif
744
745 if (bfqq)
746 bfqd = bfqq->bfqd;
747 #ifdef CONFIG_BFQ_GROUP_IOSCHED
748 else {
749 sd = entity->my_sched_data;
750 bfqg = container_of(sd, struct bfq_group, sched_data);
751 bfqd = (struct bfq_data *)bfqg->bfqd;
752 }
753 #endif
754
755
756 smp_rmb();
757 old_st->wsum -= entity->weight;
758
759 if (entity->new_weight != entity->orig_weight) {
760 if (entity->new_weight < BFQ_MIN_WEIGHT ||
761 entity->new_weight > BFQ_MAX_WEIGHT) {
762 pr_crit("update_weight_prio: new_weight %d\n",
763 entity->new_weight);
764 if (entity->new_weight < BFQ_MIN_WEIGHT)
765 entity->new_weight = BFQ_MIN_WEIGHT;
766 else
767 entity->new_weight = BFQ_MAX_WEIGHT;
768 }
769 entity->orig_weight = entity->new_weight;
770 if (bfqq)
771 bfqq->ioprio =
772 bfq_weight_to_ioprio(entity->orig_weight);
773 }
774
775 if (bfqq && update_class_too)
776 bfqq->ioprio_class = bfqq->new_ioprio_class;
777
778
779
780
781
782 if (!bfqq || bfqq->ioprio_class == bfqq->new_ioprio_class)
783 entity->prio_changed = 0;
784
785
786
787
788
789
790
791
792 new_st = bfq_entity_service_tree(entity);
793
794 prev_weight = entity->weight;
795 new_weight = entity->orig_weight *
796 (bfqq ? bfqq->wr_coeff : 1);
797
798
799
800
801
802 if (prev_weight != new_weight && bfqq) {
803 root = &bfqd->queue_weights_tree;
804 __bfq_weights_tree_remove(bfqd, bfqq, root);
805 }
806 entity->weight = new_weight;
807
808
809
810
811 if (prev_weight != new_weight && bfqq && bfqq->wr_coeff == 1) {
812
813 bfq_weights_tree_add(bfqd, bfqq, root);
814 }
815
816 new_st->wsum += entity->weight;
817
818 if (new_st != old_st)
819 entity->start = new_st->vtime;
820 }
821
822 return new_st;
823 }
824
825
826
827
828
829
830
831
832
833
834
835 void bfq_bfqq_served(struct bfq_queue *bfqq, int served)
836 {
837 struct bfq_entity *entity = &bfqq->entity;
838 struct bfq_service_tree *st;
839
840 if (!bfqq->service_from_backlogged)
841 bfqq->first_IO_time = jiffies;
842
843 if (bfqq->wr_coeff > 1)
844 bfqq->service_from_wr += served;
845
846 bfqq->service_from_backlogged += served;
847 for_each_entity(entity) {
848 st = bfq_entity_service_tree(entity);
849
850 entity->service += served;
851
852 st->vtime += bfq_delta(served, st->wsum);
853 bfq_forget_idle(st);
854 }
855 bfq_log_bfqq(bfqq->bfqd, bfqq, "bfqq_served %d secs", served);
856 }
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885 void bfq_bfqq_charge_time(struct bfq_data *bfqd, struct bfq_queue *bfqq,
886 unsigned long time_ms)
887 {
888 struct bfq_entity *entity = &bfqq->entity;
889 unsigned long timeout_ms = jiffies_to_msecs(bfq_timeout);
890 unsigned long bounded_time_ms = min(time_ms, timeout_ms);
891 int serv_to_charge_for_time =
892 (bfqd->bfq_max_budget * bounded_time_ms) / timeout_ms;
893 int tot_serv_to_charge = max(serv_to_charge_for_time, entity->service);
894
895
896 if (tot_serv_to_charge > entity->budget)
897 entity->budget = tot_serv_to_charge;
898
899 bfq_bfqq_served(bfqq,
900 max_t(int, 0, tot_serv_to_charge - entity->service));
901 }
902
903 static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
904 struct bfq_service_tree *st,
905 bool backshifted)
906 {
907 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
908
909
910
911
912
913
914 st = __bfq_entity_update_weight_prio(st, entity, true);
915 bfq_calc_finish(entity, entity->budget);
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950 if (backshifted && bfq_gt(st->vtime, entity->finish)) {
951 unsigned long delta = st->vtime - entity->finish;
952
953 if (bfqq)
954 delta /= bfqq->wr_coeff;
955
956 entity->start += delta;
957 entity->finish += delta;
958 }
959
960 bfq_active_insert(st, entity);
961 }
962
963
964
965
966
967
968
969
970
971
972
973
974
975 static void __bfq_activate_entity(struct bfq_entity *entity,
976 bool non_blocking_wait_rq)
977 {
978 struct bfq_service_tree *st = bfq_entity_service_tree(entity);
979 bool backshifted = false;
980 unsigned long long min_vstart;
981
982
983 if (non_blocking_wait_rq && bfq_gt(st->vtime, entity->finish)) {
984 backshifted = true;
985 min_vstart = entity->finish;
986 } else
987 min_vstart = st->vtime;
988
989 if (entity->tree == &st->idle) {
990
991
992
993
994 bfq_idle_extract(st, entity);
995 entity->start = bfq_gt(min_vstart, entity->finish) ?
996 min_vstart : entity->finish;
997 } else {
998
999
1000
1001
1002
1003 entity->start = min_vstart;
1004 st->wsum += entity->weight;
1005
1006
1007
1008
1009
1010
1011 bfq_get_entity(entity);
1012
1013 entity->on_st = true;
1014 }
1015
1016 #ifdef CONFIG_BFQ_GROUP_IOSCHED
1017 if (!bfq_entity_to_bfqq(entity)) {
1018 struct bfq_group *bfqg =
1019 container_of(entity, struct bfq_group, entity);
1020 struct bfq_data *bfqd = bfqg->bfqd;
1021
1022 if (!entity->in_groups_with_pending_reqs) {
1023 entity->in_groups_with_pending_reqs = true;
1024 bfqd->num_groups_with_pending_reqs++;
1025 }
1026 }
1027 #endif
1028
1029 bfq_update_fin_time_enqueue(entity, st, backshifted);
1030 }
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047 static void __bfq_requeue_entity(struct bfq_entity *entity)
1048 {
1049 struct bfq_sched_data *sd = entity->sched_data;
1050 struct bfq_service_tree *st = bfq_entity_service_tree(entity);
1051
1052 if (entity == sd->in_service_entity) {
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075 bfq_calc_finish(entity, entity->service);
1076 entity->start = entity->finish;
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090 if (entity->tree)
1091 bfq_active_extract(st, entity);
1092 } else {
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107 bfq_active_extract(st, entity);
1108 }
1109
1110 bfq_update_fin_time_enqueue(entity, st, false);
1111 }
1112
1113 static void __bfq_activate_requeue_entity(struct bfq_entity *entity,
1114 struct bfq_sched_data *sd,
1115 bool non_blocking_wait_rq)
1116 {
1117 struct bfq_service_tree *st = bfq_entity_service_tree(entity);
1118
1119 if (sd->in_service_entity == entity || entity->tree == &st->active)
1120
1121
1122
1123
1124 __bfq_requeue_entity(entity);
1125 else
1126
1127
1128
1129
1130 __bfq_activate_entity(entity, non_blocking_wait_rq);
1131 }
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147 static void bfq_activate_requeue_entity(struct bfq_entity *entity,
1148 bool non_blocking_wait_rq,
1149 bool requeue, bool expiration)
1150 {
1151 struct bfq_sched_data *sd;
1152
1153 for_each_entity(entity) {
1154 sd = entity->sched_data;
1155 __bfq_activate_requeue_entity(entity, sd, non_blocking_wait_rq);
1156
1157 if (!bfq_update_next_in_service(sd, entity, expiration) &&
1158 !requeue)
1159 break;
1160 }
1161 }
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173 bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
1174 {
1175 struct bfq_sched_data *sd = entity->sched_data;
1176 struct bfq_service_tree *st;
1177 bool is_in_service;
1178
1179 if (!entity->on_st)
1180 return false;
1181
1182
1183
1184
1185
1186
1187
1188 st = bfq_entity_service_tree(entity);
1189 is_in_service = entity == sd->in_service_entity;
1190
1191 bfq_calc_finish(entity, entity->service);
1192
1193 if (is_in_service)
1194 sd->in_service_entity = NULL;
1195 else
1196
1197
1198
1199
1200
1201 entity->service = 0;
1202
1203 if (entity->tree == &st->active)
1204 bfq_active_extract(st, entity);
1205 else if (!is_in_service && entity->tree == &st->idle)
1206 bfq_idle_extract(st, entity);
1207
1208 if (!ins_into_idle_tree || !bfq_gt(entity->finish, st->vtime))
1209 bfq_forget_entity(st, entity, is_in_service);
1210 else
1211 bfq_idle_insert(st, entity);
1212
1213 return true;
1214 }
1215
1216
1217
1218
1219
1220
1221
1222
1223 static void bfq_deactivate_entity(struct bfq_entity *entity,
1224 bool ins_into_idle_tree,
1225 bool expiration)
1226 {
1227 struct bfq_sched_data *sd;
1228 struct bfq_entity *parent = NULL;
1229
1230 for_each_entity_safe(entity, parent) {
1231 sd = entity->sched_data;
1232
1233 if (!__bfq_deactivate_entity(entity, ins_into_idle_tree)) {
1234
1235
1236
1237
1238
1239
1240
1241 return;
1242 }
1243
1244 if (sd->next_in_service == entity)
1245
1246
1247
1248
1249
1250 bfq_update_next_in_service(sd, NULL, expiration);
1251
1252 if (sd->next_in_service || sd->in_service_entity) {
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273 break;
1274 }
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288 ins_into_idle_tree = true;
1289 }
1290
1291
1292
1293
1294
1295
1296
1297
1298 entity = parent;
1299 for_each_entity(entity) {
1300
1301
1302
1303
1304
1305
1306 __bfq_requeue_entity(entity);
1307
1308 sd = entity->sched_data;
1309 if (!bfq_update_next_in_service(sd, entity, expiration) &&
1310 !expiration)
1311
1312
1313
1314
1315
1316
1317 break;
1318 }
1319 }
1320
1321
1322
1323
1324
1325
1326
1327
1328 static u64 bfq_calc_vtime_jump(struct bfq_service_tree *st)
1329 {
1330 struct bfq_entity *root_entity = bfq_root_active_entity(&st->active);
1331
1332 if (bfq_gt(root_entity->min_start, st->vtime))
1333 return root_entity->min_start;
1334
1335 return st->vtime;
1336 }
1337
1338 static void bfq_update_vtime(struct bfq_service_tree *st, u64 new_value)
1339 {
1340 if (new_value > st->vtime) {
1341 st->vtime = new_value;
1342 bfq_forget_idle(st);
1343 }
1344 }
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358 static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st,
1359 u64 vtime)
1360 {
1361 struct bfq_entity *entry, *first = NULL;
1362 struct rb_node *node = st->active.rb_node;
1363
1364 while (node) {
1365 entry = rb_entry(node, struct bfq_entity, rb_node);
1366 left:
1367 if (!bfq_gt(entry->start, vtime))
1368 first = entry;
1369
1370 if (node->rb_left) {
1371 entry = rb_entry(node->rb_left,
1372 struct bfq_entity, rb_node);
1373 if (!bfq_gt(entry->min_start, vtime)) {
1374 node = node->rb_left;
1375 goto left;
1376 }
1377 }
1378 if (first)
1379 break;
1380 node = node->rb_right;
1381 }
1382
1383 return first;
1384 }
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408 static struct bfq_entity *
1409 __bfq_lookup_next_entity(struct bfq_service_tree *st, bool in_service)
1410 {
1411 struct bfq_entity *entity;
1412 u64 new_vtime;
1413
1414 if (RB_EMPTY_ROOT(&st->active))
1415 return NULL;
1416
1417
1418
1419
1420
1421 new_vtime = bfq_calc_vtime_jump(st);
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433 if (!in_service)
1434 bfq_update_vtime(st, new_vtime);
1435
1436 entity = bfq_first_active_entity(st, new_vtime);
1437
1438 return entity;
1439 }
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450 static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
1451 bool expiration)
1452 {
1453 struct bfq_service_tree *st = sd->service_tree;
1454 struct bfq_service_tree *idle_class_st = st + (BFQ_IOPRIO_CLASSES - 1);
1455 struct bfq_entity *entity = NULL;
1456 int class_idx = 0;
1457
1458
1459
1460
1461
1462
1463
1464
1465 if (time_is_before_jiffies(sd->bfq_class_idle_last_service +
1466 BFQ_CL_IDLE_TIMEOUT)) {
1467 if (!RB_EMPTY_ROOT(&idle_class_st->active))
1468 class_idx = BFQ_IOPRIO_CLASSES - 1;
1469
1470 sd->bfq_class_idle_last_service = jiffies;
1471 }
1472
1473
1474
1475
1476
1477 for (; class_idx < BFQ_IOPRIO_CLASSES; class_idx++) {
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493 entity = __bfq_lookup_next_entity(st + class_idx,
1494 sd->in_service_entity &&
1495 !expiration);
1496
1497 if (entity)
1498 break;
1499 }
1500
1501 if (!entity)
1502 return NULL;
1503
1504 return entity;
1505 }
1506
1507 bool next_queue_may_preempt(struct bfq_data *bfqd)
1508 {
1509 struct bfq_sched_data *sd = &bfqd->root_group->sched_data;
1510
1511 return sd->next_in_service != sd->in_service_entity;
1512 }
1513
1514
1515
1516
1517 struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
1518 {
1519 struct bfq_entity *entity = NULL;
1520 struct bfq_sched_data *sd;
1521 struct bfq_queue *bfqq;
1522
1523 if (bfq_tot_busy_queues(bfqd) == 0)
1524 return NULL;
1525
1526
1527
1528
1529
1530
1531 sd = &bfqd->root_group->sched_data;
1532 for (; sd ; sd = entity->my_sched_data) {
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558 entity = sd->next_in_service;
1559 sd->in_service_entity = entity;
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569 if (bfq_no_longer_next_in_service(entity))
1570 bfq_active_extract(bfq_entity_service_tree(entity),
1571 entity);
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591 }
1592
1593 bfqq = bfq_entity_to_bfqq(entity);
1594
1595
1596
1597
1598
1599 for_each_entity(entity) {
1600 struct bfq_sched_data *sd = entity->sched_data;
1601
1602 if (!bfq_update_next_in_service(sd, NULL, false))
1603 break;
1604 }
1605
1606 return bfqq;
1607 }
1608
1609
1610 bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
1611 {
1612 struct bfq_queue *in_serv_bfqq = bfqd->in_service_queue;
1613 struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity;
1614 struct bfq_entity *entity = in_serv_entity;
1615
1616 bfq_clear_bfqq_wait_request(in_serv_bfqq);
1617 hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
1618 bfqd->in_service_queue = NULL;
1619
1620
1621
1622
1623
1624
1625
1626 for_each_entity(entity)
1627 entity->sched_data->in_service_entity = NULL;
1628
1629
1630
1631
1632
1633
1634 if (!in_serv_entity->on_st) {
1635
1636
1637
1638
1639
1640
1641 int ref = in_serv_bfqq->ref;
1642 bfq_put_queue(in_serv_bfqq);
1643 if (ref == 1)
1644 return true;
1645 }
1646
1647 return false;
1648 }
1649
1650 void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1651 bool ins_into_idle_tree, bool expiration)
1652 {
1653 struct bfq_entity *entity = &bfqq->entity;
1654
1655 bfq_deactivate_entity(entity, ins_into_idle_tree, expiration);
1656 }
1657
1658 void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
1659 {
1660 struct bfq_entity *entity = &bfqq->entity;
1661
1662 bfq_activate_requeue_entity(entity, bfq_bfqq_non_blocking_wait_rq(bfqq),
1663 false, false);
1664 bfq_clear_bfqq_non_blocking_wait_rq(bfqq);
1665 }
1666
1667 void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1668 bool expiration)
1669 {
1670 struct bfq_entity *entity = &bfqq->entity;
1671
1672 bfq_activate_requeue_entity(entity, false,
1673 bfqq == bfqd->in_service_queue, expiration);
1674 }
1675
1676
1677
1678
1679
1680
1681 void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1682 bool expiration)
1683 {
1684 bfq_log_bfqq(bfqd, bfqq, "del from busy");
1685
1686 bfq_clear_bfqq_busy(bfqq);
1687
1688 bfqd->busy_queues[bfqq->ioprio_class - 1]--;
1689
1690 if (bfqq->wr_coeff > 1)
1691 bfqd->wr_busy_queues--;
1692
1693 bfqg_stats_update_dequeue(bfqq_group(bfqq));
1694
1695 bfq_deactivate_bfqq(bfqd, bfqq, true, expiration);
1696
1697 if (!bfqq->dispatched)
1698 bfq_weights_tree_remove(bfqd, bfqq);
1699 }
1700
1701
1702
1703
1704 void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq)
1705 {
1706 bfq_log_bfqq(bfqd, bfqq, "add to busy");
1707
1708 bfq_activate_bfqq(bfqd, bfqq);
1709
1710 bfq_mark_bfqq_busy(bfqq);
1711 bfqd->busy_queues[bfqq->ioprio_class - 1]++;
1712
1713 if (!bfqq->dispatched)
1714 if (bfqq->wr_coeff == 1)
1715 bfq_weights_tree_add(bfqd, bfqq,
1716 &bfqd->queue_weights_tree);
1717
1718 if (bfqq->wr_coeff > 1)
1719 bfqd->wr_busy_queues++;
1720 }