This source file includes following definitions.
- tick_broadcast_setup_oneshot
- tick_broadcast_clear_oneshot
- tick_resume_broadcast_oneshot
- tick_broadcast_oneshot_offline
- tick_get_broadcast_device
- tick_get_broadcast_mask
- tick_broadcast_start_periodic
- tick_check_broadcast_device
- tick_install_broadcast_device
- tick_is_broadcast_device
- tick_broadcast_update_freq
- err_broadcast
- tick_device_setup_broadcast_func
- tick_device_uses_broadcast
- tick_receive_broadcast
- tick_do_broadcast
- tick_do_periodic_broadcast
- tick_handle_periodic_broadcast
- tick_broadcast_control
- tick_set_periodic_handler
- tick_shutdown_broadcast
- tick_broadcast_offline
- tick_suspend_broadcast
- tick_resume_check_broadcast
- tick_resume_broadcast
- tick_get_broadcast_oneshot_mask
- tick_check_broadcast_expired
- tick_broadcast_set_affinity
- tick_broadcast_set_event
- tick_resume_broadcast_oneshot
- tick_check_oneshot_broadcast_this_cpu
- tick_handle_oneshot_broadcast
- broadcast_needs_cpu
- broadcast_shutdown_local
- __tick_broadcast_oneshot_control
- tick_broadcast_clear_oneshot
- tick_broadcast_init_next_event
- tick_broadcast_setup_oneshot
- tick_broadcast_switch_to_oneshot
- hotplug_cpu__broadcast_tick_pull
- tick_broadcast_oneshot_offline
- tick_broadcast_oneshot_active
- tick_broadcast_oneshot_available
- __tick_broadcast_oneshot_control
- tick_broadcast_init
1
2
3
4
5
6
7
8
9
10 #include <linux/cpu.h>
11 #include <linux/err.h>
12 #include <linux/hrtimer.h>
13 #include <linux/interrupt.h>
14 #include <linux/percpu.h>
15 #include <linux/profile.h>
16 #include <linux/sched.h>
17 #include <linux/smp.h>
18 #include <linux/module.h>
19
20 #include "tick-internal.h"
21
22
23
24
25
26
27 static struct tick_device tick_broadcast_device;
28 static cpumask_var_t tick_broadcast_mask __cpumask_var_read_mostly;
29 static cpumask_var_t tick_broadcast_on __cpumask_var_read_mostly;
30 static cpumask_var_t tmpmask __cpumask_var_read_mostly;
31 static int tick_broadcast_forced;
32
33 static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
34
35 #ifdef CONFIG_TICK_ONESHOT
36 static void tick_broadcast_setup_oneshot(struct clock_event_device *bc);
37 static void tick_broadcast_clear_oneshot(int cpu);
38 static void tick_resume_broadcast_oneshot(struct clock_event_device *bc);
39 # ifdef CONFIG_HOTPLUG_CPU
40 static void tick_broadcast_oneshot_offline(unsigned int cpu);
41 # endif
42 #else
43 static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); }
44 static inline void tick_broadcast_clear_oneshot(int cpu) { }
45 static inline void tick_resume_broadcast_oneshot(struct clock_event_device *bc) { }
46 # ifdef CONFIG_HOTPLUG_CPU
47 static inline void tick_broadcast_oneshot_offline(unsigned int cpu) { }
48 # endif
49 #endif
50
51
52
53
54 struct tick_device *tick_get_broadcast_device(void)
55 {
56 return &tick_broadcast_device;
57 }
58
59 struct cpumask *tick_get_broadcast_mask(void)
60 {
61 return tick_broadcast_mask;
62 }
63
64
65
66
67 static void tick_broadcast_start_periodic(struct clock_event_device *bc)
68 {
69 if (bc)
70 tick_setup_periodic(bc, 1);
71 }
72
73
74
75
76 static bool tick_check_broadcast_device(struct clock_event_device *curdev,
77 struct clock_event_device *newdev)
78 {
79 if ((newdev->features & CLOCK_EVT_FEAT_DUMMY) ||
80 (newdev->features & CLOCK_EVT_FEAT_PERCPU) ||
81 (newdev->features & CLOCK_EVT_FEAT_C3STOP))
82 return false;
83
84 if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT &&
85 !(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
86 return false;
87
88 return !curdev || newdev->rating > curdev->rating;
89 }
90
91
92
93
94 void tick_install_broadcast_device(struct clock_event_device *dev)
95 {
96 struct clock_event_device *cur = tick_broadcast_device.evtdev;
97
98 if (!tick_check_broadcast_device(cur, dev))
99 return;
100
101 if (!try_module_get(dev->owner))
102 return;
103
104 clockevents_exchange_device(cur, dev);
105 if (cur)
106 cur->event_handler = clockevents_handle_noop;
107 tick_broadcast_device.evtdev = dev;
108 if (!cpumask_empty(tick_broadcast_mask))
109 tick_broadcast_start_periodic(dev);
110
111
112
113
114
115
116
117
118 if (dev->features & CLOCK_EVT_FEAT_ONESHOT)
119 tick_clock_notify();
120 }
121
122
123
124
125 int tick_is_broadcast_device(struct clock_event_device *dev)
126 {
127 return (dev && tick_broadcast_device.evtdev == dev);
128 }
129
130 int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq)
131 {
132 int ret = -ENODEV;
133
134 if (tick_is_broadcast_device(dev)) {
135 raw_spin_lock(&tick_broadcast_lock);
136 ret = __clockevents_update_freq(dev, freq);
137 raw_spin_unlock(&tick_broadcast_lock);
138 }
139 return ret;
140 }
141
142
143 static void err_broadcast(const struct cpumask *mask)
144 {
145 pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n");
146 }
147
148 static void tick_device_setup_broadcast_func(struct clock_event_device *dev)
149 {
150 if (!dev->broadcast)
151 dev->broadcast = tick_broadcast;
152 if (!dev->broadcast) {
153 pr_warn_once("%s depends on broadcast, but no broadcast function available\n",
154 dev->name);
155 dev->broadcast = err_broadcast;
156 }
157 }
158
159
160
161
162
163 int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
164 {
165 struct clock_event_device *bc = tick_broadcast_device.evtdev;
166 unsigned long flags;
167 int ret = 0;
168
169 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
170
171
172
173
174
175
176
177 if (!tick_device_is_functional(dev)) {
178 dev->event_handler = tick_handle_periodic;
179 tick_device_setup_broadcast_func(dev);
180 cpumask_set_cpu(cpu, tick_broadcast_mask);
181 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
182 tick_broadcast_start_periodic(bc);
183 else
184 tick_broadcast_setup_oneshot(bc);
185 ret = 1;
186 } else {
187
188
189
190
191 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
192 cpumask_clear_cpu(cpu, tick_broadcast_mask);
193 else
194 tick_device_setup_broadcast_func(dev);
195
196
197
198
199
200 if (!cpumask_test_cpu(cpu, tick_broadcast_on))
201 cpumask_clear_cpu(cpu, tick_broadcast_mask);
202
203 switch (tick_broadcast_device.mode) {
204 case TICKDEV_MODE_ONESHOT:
205
206
207
208
209
210
211
212
213 tick_broadcast_clear_oneshot(cpu);
214 ret = 0;
215 break;
216
217 case TICKDEV_MODE_PERIODIC:
218
219
220
221
222
223 if (cpumask_empty(tick_broadcast_mask) && bc)
224 clockevents_shutdown(bc);
225
226
227
228
229
230
231
232
233 if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER))
234 ret = cpumask_test_cpu(cpu, tick_broadcast_mask);
235 break;
236 default:
237 break;
238 }
239 }
240 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
241 return ret;
242 }
243
244 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
245 int tick_receive_broadcast(void)
246 {
247 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
248 struct clock_event_device *evt = td->evtdev;
249
250 if (!evt)
251 return -ENODEV;
252
253 if (!evt->event_handler)
254 return -EINVAL;
255
256 evt->event_handler(evt);
257 return 0;
258 }
259 #endif
260
261
262
263
264 static bool tick_do_broadcast(struct cpumask *mask)
265 {
266 int cpu = smp_processor_id();
267 struct tick_device *td;
268 bool local = false;
269
270
271
272
273 if (cpumask_test_cpu(cpu, mask)) {
274 struct clock_event_device *bc = tick_broadcast_device.evtdev;
275
276 cpumask_clear_cpu(cpu, mask);
277
278
279
280
281
282
283
284
285
286
287
288
289 local = !(bc->features & CLOCK_EVT_FEAT_HRTIMER);
290 }
291
292 if (!cpumask_empty(mask)) {
293
294
295
296
297
298
299 td = &per_cpu(tick_cpu_device, cpumask_first(mask));
300 td->evtdev->broadcast(mask);
301 }
302 return local;
303 }
304
305
306
307
308
309 static bool tick_do_periodic_broadcast(void)
310 {
311 cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask);
312 return tick_do_broadcast(tmpmask);
313 }
314
315
316
317
318 static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
319 {
320 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
321 bool bc_local;
322
323 raw_spin_lock(&tick_broadcast_lock);
324
325
326 if (clockevent_state_shutdown(tick_broadcast_device.evtdev)) {
327 raw_spin_unlock(&tick_broadcast_lock);
328 return;
329 }
330
331 bc_local = tick_do_periodic_broadcast();
332
333 if (clockevent_state_oneshot(dev)) {
334 ktime_t next = ktime_add(dev->next_event, tick_period);
335
336 clockevents_program_event(dev, next, true);
337 }
338 raw_spin_unlock(&tick_broadcast_lock);
339
340
341
342
343
344
345 if (bc_local)
346 td->evtdev->event_handler(td->evtdev);
347 }
348
349
350
351
352
353
354
355
356 void tick_broadcast_control(enum tick_broadcast_mode mode)
357 {
358 struct clock_event_device *bc, *dev;
359 struct tick_device *td;
360 int cpu, bc_stopped;
361 unsigned long flags;
362
363
364 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
365 td = this_cpu_ptr(&tick_cpu_device);
366 dev = td->evtdev;
367
368
369
370
371 if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
372 goto out;
373
374 if (!tick_device_is_functional(dev))
375 goto out;
376
377 cpu = smp_processor_id();
378 bc = tick_broadcast_device.evtdev;
379 bc_stopped = cpumask_empty(tick_broadcast_mask);
380
381 switch (mode) {
382 case TICK_BROADCAST_FORCE:
383 tick_broadcast_forced = 1;
384
385 case TICK_BROADCAST_ON:
386 cpumask_set_cpu(cpu, tick_broadcast_on);
387 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
388
389
390
391
392
393
394
395
396 if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER) &&
397 tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
398 clockevents_shutdown(dev);
399 }
400 break;
401
402 case TICK_BROADCAST_OFF:
403 if (tick_broadcast_forced)
404 break;
405 cpumask_clear_cpu(cpu, tick_broadcast_on);
406 if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) {
407 if (tick_broadcast_device.mode ==
408 TICKDEV_MODE_PERIODIC)
409 tick_setup_periodic(dev, 0);
410 }
411 break;
412 }
413
414 if (bc) {
415 if (cpumask_empty(tick_broadcast_mask)) {
416 if (!bc_stopped)
417 clockevents_shutdown(bc);
418 } else if (bc_stopped) {
419 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
420 tick_broadcast_start_periodic(bc);
421 else
422 tick_broadcast_setup_oneshot(bc);
423 }
424 }
425 out:
426 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
427 }
428 EXPORT_SYMBOL_GPL(tick_broadcast_control);
429
430
431
432
433 void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
434 {
435 if (!broadcast)
436 dev->event_handler = tick_handle_periodic;
437 else
438 dev->event_handler = tick_handle_periodic_broadcast;
439 }
440
441 #ifdef CONFIG_HOTPLUG_CPU
442 static void tick_shutdown_broadcast(void)
443 {
444 struct clock_event_device *bc = tick_broadcast_device.evtdev;
445
446 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
447 if (bc && cpumask_empty(tick_broadcast_mask))
448 clockevents_shutdown(bc);
449 }
450 }
451
452
453
454
455 void tick_broadcast_offline(unsigned int cpu)
456 {
457 raw_spin_lock(&tick_broadcast_lock);
458 cpumask_clear_cpu(cpu, tick_broadcast_mask);
459 cpumask_clear_cpu(cpu, tick_broadcast_on);
460 tick_broadcast_oneshot_offline(cpu);
461 tick_shutdown_broadcast();
462 raw_spin_unlock(&tick_broadcast_lock);
463 }
464
465 #endif
466
467 void tick_suspend_broadcast(void)
468 {
469 struct clock_event_device *bc;
470 unsigned long flags;
471
472 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
473
474 bc = tick_broadcast_device.evtdev;
475 if (bc)
476 clockevents_shutdown(bc);
477
478 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
479 }
480
481
482
483
484
485
486
487
488
489 bool tick_resume_check_broadcast(void)
490 {
491 if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT)
492 return false;
493 else
494 return cpumask_test_cpu(smp_processor_id(), tick_broadcast_mask);
495 }
496
497 void tick_resume_broadcast(void)
498 {
499 struct clock_event_device *bc;
500 unsigned long flags;
501
502 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
503
504 bc = tick_broadcast_device.evtdev;
505
506 if (bc) {
507 clockevents_tick_resume(bc);
508
509 switch (tick_broadcast_device.mode) {
510 case TICKDEV_MODE_PERIODIC:
511 if (!cpumask_empty(tick_broadcast_mask))
512 tick_broadcast_start_periodic(bc);
513 break;
514 case TICKDEV_MODE_ONESHOT:
515 if (!cpumask_empty(tick_broadcast_mask))
516 tick_resume_broadcast_oneshot(bc);
517 break;
518 }
519 }
520 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
521 }
522
523 #ifdef CONFIG_TICK_ONESHOT
524
525 static cpumask_var_t tick_broadcast_oneshot_mask __cpumask_var_read_mostly;
526 static cpumask_var_t tick_broadcast_pending_mask __cpumask_var_read_mostly;
527 static cpumask_var_t tick_broadcast_force_mask __cpumask_var_read_mostly;
528
529
530
531
532 struct cpumask *tick_get_broadcast_oneshot_mask(void)
533 {
534 return tick_broadcast_oneshot_mask;
535 }
536
537
538
539
540
541
542
543
544 int tick_check_broadcast_expired(void)
545 {
546 return cpumask_test_cpu(smp_processor_id(), tick_broadcast_force_mask);
547 }
548
549
550
551
552 static void tick_broadcast_set_affinity(struct clock_event_device *bc,
553 const struct cpumask *cpumask)
554 {
555 if (!(bc->features & CLOCK_EVT_FEAT_DYNIRQ))
556 return;
557
558 if (cpumask_equal(bc->cpumask, cpumask))
559 return;
560
561 bc->cpumask = cpumask;
562 irq_set_affinity(bc->irq, bc->cpumask);
563 }
564
565 static void tick_broadcast_set_event(struct clock_event_device *bc, int cpu,
566 ktime_t expires)
567 {
568 if (!clockevent_state_oneshot(bc))
569 clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT);
570
571 clockevents_program_event(bc, expires, 1);
572 tick_broadcast_set_affinity(bc, cpumask_of(cpu));
573 }
574
575 static void tick_resume_broadcast_oneshot(struct clock_event_device *bc)
576 {
577 clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT);
578 }
579
580
581
582
583
584 void tick_check_oneshot_broadcast_this_cpu(void)
585 {
586 if (cpumask_test_cpu(smp_processor_id(), tick_broadcast_oneshot_mask)) {
587 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
588
589
590
591
592
593
594 if (td->mode == TICKDEV_MODE_ONESHOT) {
595 clockevents_switch_state(td->evtdev,
596 CLOCK_EVT_STATE_ONESHOT);
597 }
598 }
599 }
600
601
602
603
604 static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
605 {
606 struct tick_device *td;
607 ktime_t now, next_event;
608 int cpu, next_cpu = 0;
609 bool bc_local;
610
611 raw_spin_lock(&tick_broadcast_lock);
612 dev->next_event = KTIME_MAX;
613 next_event = KTIME_MAX;
614 cpumask_clear(tmpmask);
615 now = ktime_get();
616
617 for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
618
619
620
621
622 if (!IS_ENABLED(CONFIG_SMP) &&
623 cpumask_empty(tick_broadcast_oneshot_mask))
624 break;
625
626 td = &per_cpu(tick_cpu_device, cpu);
627 if (td->evtdev->next_event <= now) {
628 cpumask_set_cpu(cpu, tmpmask);
629
630
631
632
633
634 cpumask_set_cpu(cpu, tick_broadcast_pending_mask);
635 } else if (td->evtdev->next_event < next_event) {
636 next_event = td->evtdev->next_event;
637 next_cpu = cpu;
638 }
639 }
640
641
642
643
644
645 cpumask_clear_cpu(smp_processor_id(), tick_broadcast_pending_mask);
646
647
648 cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask);
649 cpumask_clear(tick_broadcast_force_mask);
650
651
652
653
654
655 if (WARN_ON_ONCE(!cpumask_subset(tmpmask, cpu_online_mask)))
656 cpumask_and(tmpmask, tmpmask, cpu_online_mask);
657
658
659
660
661 bc_local = tick_do_broadcast(tmpmask);
662
663
664
665
666
667
668
669
670
671
672
673 if (next_event != KTIME_MAX)
674 tick_broadcast_set_event(dev, next_cpu, next_event);
675
676 raw_spin_unlock(&tick_broadcast_lock);
677
678 if (bc_local) {
679 td = this_cpu_ptr(&tick_cpu_device);
680 td->evtdev->event_handler(td->evtdev);
681 }
682 }
683
684 static int broadcast_needs_cpu(struct clock_event_device *bc, int cpu)
685 {
686 if (!(bc->features & CLOCK_EVT_FEAT_HRTIMER))
687 return 0;
688 if (bc->next_event == KTIME_MAX)
689 return 0;
690 return bc->bound_on == cpu ? -EBUSY : 0;
691 }
692
693 static void broadcast_shutdown_local(struct clock_event_device *bc,
694 struct clock_event_device *dev)
695 {
696
697
698
699
700
701 if (bc->features & CLOCK_EVT_FEAT_HRTIMER) {
702 if (broadcast_needs_cpu(bc, smp_processor_id()))
703 return;
704 if (dev->next_event < bc->next_event)
705 return;
706 }
707 clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
708 }
709
710 int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
711 {
712 struct clock_event_device *bc, *dev;
713 int cpu, ret = 0;
714 ktime_t now;
715
716
717
718
719
720 if (!tick_broadcast_device.evtdev)
721 return -EBUSY;
722
723 dev = this_cpu_ptr(&tick_cpu_device)->evtdev;
724
725 raw_spin_lock(&tick_broadcast_lock);
726 bc = tick_broadcast_device.evtdev;
727 cpu = smp_processor_id();
728
729 if (state == TICK_BROADCAST_ENTER) {
730
731
732
733
734
735
736
737 ret = broadcast_needs_cpu(bc, cpu);
738 if (ret)
739 goto out;
740
741
742
743
744
745 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
746
747 if (bc->features & CLOCK_EVT_FEAT_HRTIMER)
748 ret = -EBUSY;
749 goto out;
750 }
751
752 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
753 WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
754
755
756 broadcast_shutdown_local(bc, dev);
757
758
759
760
761
762
763
764
765
766
767
768 if (cpumask_test_cpu(cpu, tick_broadcast_force_mask)) {
769 ret = -EBUSY;
770 } else if (dev->next_event < bc->next_event) {
771 tick_broadcast_set_event(bc, cpu, dev->next_event);
772
773
774
775
776
777
778
779 ret = broadcast_needs_cpu(bc, cpu);
780 if (ret) {
781 cpumask_clear_cpu(cpu,
782 tick_broadcast_oneshot_mask);
783 }
784 }
785 }
786 } else {
787 if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
788 clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
789
790
791
792
793
794
795
796
797
798 if (cpumask_test_and_clear_cpu(cpu,
799 tick_broadcast_pending_mask))
800 goto out;
801
802
803
804
805 if (dev->next_event == KTIME_MAX)
806 goto out;
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839 now = ktime_get();
840 if (dev->next_event <= now) {
841 cpumask_set_cpu(cpu, tick_broadcast_force_mask);
842 goto out;
843 }
844
845
846
847
848 tick_program_event(dev->next_event, 1);
849 }
850 }
851 out:
852 raw_spin_unlock(&tick_broadcast_lock);
853 return ret;
854 }
855
856
857
858
859
860
861 static void tick_broadcast_clear_oneshot(int cpu)
862 {
863 cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
864 cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
865 }
866
867 static void tick_broadcast_init_next_event(struct cpumask *mask,
868 ktime_t expires)
869 {
870 struct tick_device *td;
871 int cpu;
872
873 for_each_cpu(cpu, mask) {
874 td = &per_cpu(tick_cpu_device, cpu);
875 if (td->evtdev)
876 td->evtdev->next_event = expires;
877 }
878 }
879
880
881
882
883 static void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
884 {
885 int cpu = smp_processor_id();
886
887 if (!bc)
888 return;
889
890
891 if (bc->event_handler != tick_handle_oneshot_broadcast) {
892 int was_periodic = clockevent_state_periodic(bc);
893
894 bc->event_handler = tick_handle_oneshot_broadcast;
895
896
897
898
899
900
901
902 cpumask_copy(tmpmask, tick_broadcast_mask);
903 cpumask_clear_cpu(cpu, tmpmask);
904 cpumask_or(tick_broadcast_oneshot_mask,
905 tick_broadcast_oneshot_mask, tmpmask);
906
907 if (was_periodic && !cpumask_empty(tmpmask)) {
908 clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT);
909 tick_broadcast_init_next_event(tmpmask,
910 tick_next_period);
911 tick_broadcast_set_event(bc, cpu, tick_next_period);
912 } else
913 bc->next_event = KTIME_MAX;
914 } else {
915
916
917
918
919
920
921
922 tick_broadcast_clear_oneshot(cpu);
923 }
924 }
925
926
927
928
929 void tick_broadcast_switch_to_oneshot(void)
930 {
931 struct clock_event_device *bc;
932 unsigned long flags;
933
934 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
935
936 tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
937 bc = tick_broadcast_device.evtdev;
938 if (bc)
939 tick_broadcast_setup_oneshot(bc);
940
941 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
942 }
943
944 #ifdef CONFIG_HOTPLUG_CPU
945 void hotplug_cpu__broadcast_tick_pull(int deadcpu)
946 {
947 struct clock_event_device *bc;
948 unsigned long flags;
949
950 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
951 bc = tick_broadcast_device.evtdev;
952
953 if (bc && broadcast_needs_cpu(bc, deadcpu)) {
954
955 clockevents_program_event(bc, bc->next_event, 1);
956 }
957 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
958 }
959
960
961
962
963 static void tick_broadcast_oneshot_offline(unsigned int cpu)
964 {
965
966
967
968
969 cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
970 cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
971 cpumask_clear_cpu(cpu, tick_broadcast_force_mask);
972 }
973 #endif
974
975
976
977
978 int tick_broadcast_oneshot_active(void)
979 {
980 return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
981 }
982
983
984
985
986 bool tick_broadcast_oneshot_available(void)
987 {
988 struct clock_event_device *bc = tick_broadcast_device.evtdev;
989
990 return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
991 }
992
993 #else
994 int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
995 {
996 struct clock_event_device *bc = tick_broadcast_device.evtdev;
997
998 if (!bc || (bc->features & CLOCK_EVT_FEAT_HRTIMER))
999 return -EBUSY;
1000
1001 return 0;
1002 }
1003 #endif
1004
1005 void __init tick_broadcast_init(void)
1006 {
1007 zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT);
1008 zalloc_cpumask_var(&tick_broadcast_on, GFP_NOWAIT);
1009 zalloc_cpumask_var(&tmpmask, GFP_NOWAIT);
1010 #ifdef CONFIG_TICK_ONESHOT
1011 zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT);
1012 zalloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT);
1013 zalloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT);
1014 #endif
1015 }