This source file includes following definitions.
- timers_update_migration
- timers_update_migration
- timer_update_keys
- timers_update_nohz
- timer_migration_handler
- is_timers_nohz_active
- is_timers_nohz_active
- round_jiffies_common
- __round_jiffies
- __round_jiffies_relative
- round_jiffies
- round_jiffies_relative
- __round_jiffies_up
- __round_jiffies_up_relative
- round_jiffies_up
- round_jiffies_up_relative
- timer_get_idx
- timer_set_idx
- calc_index
- calc_wheel_index
- enqueue_timer
- __internal_add_timer
- trigger_dyntick_cpu
- internal_add_timer
- timer_debug_hint
- timer_is_static_object
- timer_fixup_init
- stub_timer
- timer_fixup_activate
- timer_fixup_free
- timer_fixup_assert_init
- debug_timer_init
- debug_timer_activate
- debug_timer_deactivate
- debug_timer_free
- debug_timer_assert_init
- init_timer_on_stack_key
- destroy_timer_on_stack
- debug_timer_init
- debug_timer_activate
- debug_timer_deactivate
- debug_timer_assert_init
- debug_init
- debug_deactivate
- debug_assert_init
- do_init_timer
- init_timer_key
- detach_timer
- detach_if_pending
- get_timer_cpu_base
- get_timer_this_cpu_base
- get_timer_base
- get_target_base
- forward_timer_base
- lock_timer_base
- __mod_timer
- mod_timer_pending
- mod_timer
- timer_reduce
- add_timer
- add_timer_on
- del_timer
- try_to_del_timer_sync
- timer_base_init_expiry_lock
- timer_base_lock_expiry
- timer_base_unlock_expiry
- timer_sync_wait_running
- del_timer_wait_running
- timer_base_init_expiry_lock
- timer_base_lock_expiry
- timer_base_unlock_expiry
- timer_sync_wait_running
- del_timer_wait_running
- del_timer_sync
- call_timer_fn
- expire_timers
- __collect_expired_timers
- next_pending_bucket
- __next_timer_interrupt
- cmp_next_hrtimer_event
- get_next_timer_interrupt
- timer_clear_idle
- collect_expired_timers
- collect_expired_timers
- update_process_times
- __run_timers
- run_timer_softirq
- run_local_timers
- process_timeout
- schedule_timeout
- schedule_timeout_interruptible
- schedule_timeout_killable
- schedule_timeout_uninterruptible
- schedule_timeout_idle
- migrate_timer_list
- timers_prepare_cpu
- timers_dead_cpu
- init_timer_cpu
- init_timer_cpus
- init_timers
- msleep
- msleep_interruptible
- usleep_range
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21 #include <linux/kernel_stat.h>
22 #include <linux/export.h>
23 #include <linux/interrupt.h>
24 #include <linux/percpu.h>
25 #include <linux/init.h>
26 #include <linux/mm.h>
27 #include <linux/swap.h>
28 #include <linux/pid_namespace.h>
29 #include <linux/notifier.h>
30 #include <linux/thread_info.h>
31 #include <linux/time.h>
32 #include <linux/jiffies.h>
33 #include <linux/posix-timers.h>
34 #include <linux/cpu.h>
35 #include <linux/syscalls.h>
36 #include <linux/delay.h>
37 #include <linux/tick.h>
38 #include <linux/kallsyms.h>
39 #include <linux/irq_work.h>
40 #include <linux/sched/signal.h>
41 #include <linux/sched/sysctl.h>
42 #include <linux/sched/nohz.h>
43 #include <linux/sched/debug.h>
44 #include <linux/slab.h>
45 #include <linux/compat.h>
46
47 #include <linux/uaccess.h>
48 #include <asm/unistd.h>
49 #include <asm/div64.h>
50 #include <asm/timex.h>
51 #include <asm/io.h>
52
53 #include "tick-internal.h"
54
55 #define CREATE_TRACE_POINTS
56 #include <trace/events/timer.h>
57
58 __visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
59
60 EXPORT_SYMBOL(jiffies_64);
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151 #define LVL_CLK_SHIFT 3
152 #define LVL_CLK_DIV (1UL << LVL_CLK_SHIFT)
153 #define LVL_CLK_MASK (LVL_CLK_DIV - 1)
154 #define LVL_SHIFT(n) ((n) * LVL_CLK_SHIFT)
155 #define LVL_GRAN(n) (1UL << LVL_SHIFT(n))
156
157
158
159
160
161 #define LVL_START(n) ((LVL_SIZE - 1) << (((n) - 1) * LVL_CLK_SHIFT))
162
163
164 #define LVL_BITS 6
165 #define LVL_SIZE (1UL << LVL_BITS)
166 #define LVL_MASK (LVL_SIZE - 1)
167 #define LVL_OFFS(n) ((n) * LVL_SIZE)
168
169
170 #if HZ > 100
171 # define LVL_DEPTH 9
172 # else
173 # define LVL_DEPTH 8
174 #endif
175
176
177 #define WHEEL_TIMEOUT_CUTOFF (LVL_START(LVL_DEPTH))
178 #define WHEEL_TIMEOUT_MAX (WHEEL_TIMEOUT_CUTOFF - LVL_GRAN(LVL_DEPTH - 1))
179
180
181
182
183
184 #define WHEEL_SIZE (LVL_SIZE * LVL_DEPTH)
185
186 #ifdef CONFIG_NO_HZ_COMMON
187 # define NR_BASES 2
188 # define BASE_STD 0
189 # define BASE_DEF 1
190 #else
191 # define NR_BASES 1
192 # define BASE_STD 0
193 # define BASE_DEF 0
194 #endif
195
196 struct timer_base {
197 raw_spinlock_t lock;
198 struct timer_list *running_timer;
199 #ifdef CONFIG_PREEMPT_RT
200 spinlock_t expiry_lock;
201 atomic_t timer_waiters;
202 #endif
203 unsigned long clk;
204 unsigned long next_expiry;
205 unsigned int cpu;
206 bool is_idle;
207 bool must_forward_clk;
208 DECLARE_BITMAP(pending_map, WHEEL_SIZE);
209 struct hlist_head vectors[WHEEL_SIZE];
210 } ____cacheline_aligned;
211
212 static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
213
214 #ifdef CONFIG_NO_HZ_COMMON
215
216 static DEFINE_STATIC_KEY_FALSE(timers_nohz_active);
217 static DEFINE_MUTEX(timer_keys_mutex);
218
219 static void timer_update_keys(struct work_struct *work);
220 static DECLARE_WORK(timer_update_work, timer_update_keys);
221
222 #ifdef CONFIG_SMP
223 unsigned int sysctl_timer_migration = 1;
224
225 DEFINE_STATIC_KEY_FALSE(timers_migration_enabled);
226
227 static void timers_update_migration(void)
228 {
229 if (sysctl_timer_migration && tick_nohz_active)
230 static_branch_enable(&timers_migration_enabled);
231 else
232 static_branch_disable(&timers_migration_enabled);
233 }
234 #else
235 static inline void timers_update_migration(void) { }
236 #endif
237
238 static void timer_update_keys(struct work_struct *work)
239 {
240 mutex_lock(&timer_keys_mutex);
241 timers_update_migration();
242 static_branch_enable(&timers_nohz_active);
243 mutex_unlock(&timer_keys_mutex);
244 }
245
246 void timers_update_nohz(void)
247 {
248 schedule_work(&timer_update_work);
249 }
250
251 int timer_migration_handler(struct ctl_table *table, int write,
252 void __user *buffer, size_t *lenp,
253 loff_t *ppos)
254 {
255 int ret;
256
257 mutex_lock(&timer_keys_mutex);
258 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
259 if (!ret && write)
260 timers_update_migration();
261 mutex_unlock(&timer_keys_mutex);
262 return ret;
263 }
264
265 static inline bool is_timers_nohz_active(void)
266 {
267 return static_branch_unlikely(&timers_nohz_active);
268 }
269 #else
270 static inline bool is_timers_nohz_active(void) { return false; }
271 #endif
272
273 static unsigned long round_jiffies_common(unsigned long j, int cpu,
274 bool force_up)
275 {
276 int rem;
277 unsigned long original = j;
278
279
280
281
282
283
284
285
286
287 j += cpu * 3;
288
289 rem = j % HZ;
290
291
292
293
294
295
296
297
298 if (rem < HZ/4 && !force_up)
299 j = j - rem;
300 else
301 j = j - rem + HZ;
302
303
304 j -= cpu * 3;
305
306
307
308
309
310 return time_is_after_jiffies(j) ? j : original;
311 }
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333 unsigned long __round_jiffies(unsigned long j, int cpu)
334 {
335 return round_jiffies_common(j, cpu, false);
336 }
337 EXPORT_SYMBOL_GPL(__round_jiffies);
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359 unsigned long __round_jiffies_relative(unsigned long j, int cpu)
360 {
361 unsigned long j0 = jiffies;
362
363
364 return round_jiffies_common(j + j0, cpu, false) - j0;
365 }
366 EXPORT_SYMBOL_GPL(__round_jiffies_relative);
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383 unsigned long round_jiffies(unsigned long j)
384 {
385 return round_jiffies_common(j, raw_smp_processor_id(), false);
386 }
387 EXPORT_SYMBOL_GPL(round_jiffies);
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404 unsigned long round_jiffies_relative(unsigned long j)
405 {
406 return __round_jiffies_relative(j, raw_smp_processor_id());
407 }
408 EXPORT_SYMBOL_GPL(round_jiffies_relative);
409
410
411
412
413
414
415
416
417
418
419
420 unsigned long __round_jiffies_up(unsigned long j, int cpu)
421 {
422 return round_jiffies_common(j, cpu, true);
423 }
424 EXPORT_SYMBOL_GPL(__round_jiffies_up);
425
426
427
428
429
430
431
432
433
434
435
436 unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
437 {
438 unsigned long j0 = jiffies;
439
440
441 return round_jiffies_common(j + j0, cpu, true) - j0;
442 }
443 EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
444
445
446
447
448
449
450
451
452
453
454 unsigned long round_jiffies_up(unsigned long j)
455 {
456 return round_jiffies_common(j, raw_smp_processor_id(), true);
457 }
458 EXPORT_SYMBOL_GPL(round_jiffies_up);
459
460
461
462
463
464
465
466
467
468
469 unsigned long round_jiffies_up_relative(unsigned long j)
470 {
471 return __round_jiffies_up_relative(j, raw_smp_processor_id());
472 }
473 EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
474
475
476 static inline unsigned int timer_get_idx(struct timer_list *timer)
477 {
478 return (timer->flags & TIMER_ARRAYMASK) >> TIMER_ARRAYSHIFT;
479 }
480
481 static inline void timer_set_idx(struct timer_list *timer, unsigned int idx)
482 {
483 timer->flags = (timer->flags & ~TIMER_ARRAYMASK) |
484 idx << TIMER_ARRAYSHIFT;
485 }
486
487
488
489
490
491 static inline unsigned calc_index(unsigned expires, unsigned lvl)
492 {
493 expires = (expires + LVL_GRAN(lvl)) >> LVL_SHIFT(lvl);
494 return LVL_OFFS(lvl) + (expires & LVL_MASK);
495 }
496
497 static int calc_wheel_index(unsigned long expires, unsigned long clk)
498 {
499 unsigned long delta = expires - clk;
500 unsigned int idx;
501
502 if (delta < LVL_START(1)) {
503 idx = calc_index(expires, 0);
504 } else if (delta < LVL_START(2)) {
505 idx = calc_index(expires, 1);
506 } else if (delta < LVL_START(3)) {
507 idx = calc_index(expires, 2);
508 } else if (delta < LVL_START(4)) {
509 idx = calc_index(expires, 3);
510 } else if (delta < LVL_START(5)) {
511 idx = calc_index(expires, 4);
512 } else if (delta < LVL_START(6)) {
513 idx = calc_index(expires, 5);
514 } else if (delta < LVL_START(7)) {
515 idx = calc_index(expires, 6);
516 } else if (LVL_DEPTH > 8 && delta < LVL_START(8)) {
517 idx = calc_index(expires, 7);
518 } else if ((long) delta < 0) {
519 idx = clk & LVL_MASK;
520 } else {
521
522
523
524
525 if (expires >= WHEEL_TIMEOUT_CUTOFF)
526 expires = WHEEL_TIMEOUT_MAX;
527
528 idx = calc_index(expires, LVL_DEPTH - 1);
529 }
530 return idx;
531 }
532
533
534
535
536
537 static void enqueue_timer(struct timer_base *base, struct timer_list *timer,
538 unsigned int idx)
539 {
540 hlist_add_head(&timer->entry, base->vectors + idx);
541 __set_bit(idx, base->pending_map);
542 timer_set_idx(timer, idx);
543
544 trace_timer_start(timer, timer->expires, timer->flags);
545 }
546
547 static void
548 __internal_add_timer(struct timer_base *base, struct timer_list *timer)
549 {
550 unsigned int idx;
551
552 idx = calc_wheel_index(timer->expires, base->clk);
553 enqueue_timer(base, timer, idx);
554 }
555
556 static void
557 trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
558 {
559 if (!is_timers_nohz_active())
560 return;
561
562
563
564
565
566 if (timer->flags & TIMER_DEFERRABLE) {
567 if (tick_nohz_full_cpu(base->cpu))
568 wake_up_nohz_cpu(base->cpu);
569 return;
570 }
571
572
573
574
575
576
577 if (!base->is_idle)
578 return;
579
580
581 if (time_after_eq(timer->expires, base->next_expiry))
582 return;
583
584
585
586
587
588 base->next_expiry = timer->expires;
589 wake_up_nohz_cpu(base->cpu);
590 }
591
592 static void
593 internal_add_timer(struct timer_base *base, struct timer_list *timer)
594 {
595 __internal_add_timer(base, timer);
596 trigger_dyntick_cpu(base, timer);
597 }
598
599 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
600
601 static struct debug_obj_descr timer_debug_descr;
602
603 static void *timer_debug_hint(void *addr)
604 {
605 return ((struct timer_list *) addr)->function;
606 }
607
608 static bool timer_is_static_object(void *addr)
609 {
610 struct timer_list *timer = addr;
611
612 return (timer->entry.pprev == NULL &&
613 timer->entry.next == TIMER_ENTRY_STATIC);
614 }
615
616
617
618
619
620 static bool timer_fixup_init(void *addr, enum debug_obj_state state)
621 {
622 struct timer_list *timer = addr;
623
624 switch (state) {
625 case ODEBUG_STATE_ACTIVE:
626 del_timer_sync(timer);
627 debug_object_init(timer, &timer_debug_descr);
628 return true;
629 default:
630 return false;
631 }
632 }
633
634
635 static void stub_timer(struct timer_list *unused)
636 {
637 WARN_ON(1);
638 }
639
640
641
642
643
644
645 static bool timer_fixup_activate(void *addr, enum debug_obj_state state)
646 {
647 struct timer_list *timer = addr;
648
649 switch (state) {
650 case ODEBUG_STATE_NOTAVAILABLE:
651 timer_setup(timer, stub_timer, 0);
652 return true;
653
654 case ODEBUG_STATE_ACTIVE:
655 WARN_ON(1);
656
657 default:
658 return false;
659 }
660 }
661
662
663
664
665
666 static bool timer_fixup_free(void *addr, enum debug_obj_state state)
667 {
668 struct timer_list *timer = addr;
669
670 switch (state) {
671 case ODEBUG_STATE_ACTIVE:
672 del_timer_sync(timer);
673 debug_object_free(timer, &timer_debug_descr);
674 return true;
675 default:
676 return false;
677 }
678 }
679
680
681
682
683
684 static bool timer_fixup_assert_init(void *addr, enum debug_obj_state state)
685 {
686 struct timer_list *timer = addr;
687
688 switch (state) {
689 case ODEBUG_STATE_NOTAVAILABLE:
690 timer_setup(timer, stub_timer, 0);
691 return true;
692 default:
693 return false;
694 }
695 }
696
697 static struct debug_obj_descr timer_debug_descr = {
698 .name = "timer_list",
699 .debug_hint = timer_debug_hint,
700 .is_static_object = timer_is_static_object,
701 .fixup_init = timer_fixup_init,
702 .fixup_activate = timer_fixup_activate,
703 .fixup_free = timer_fixup_free,
704 .fixup_assert_init = timer_fixup_assert_init,
705 };
706
707 static inline void debug_timer_init(struct timer_list *timer)
708 {
709 debug_object_init(timer, &timer_debug_descr);
710 }
711
712 static inline void debug_timer_activate(struct timer_list *timer)
713 {
714 debug_object_activate(timer, &timer_debug_descr);
715 }
716
717 static inline void debug_timer_deactivate(struct timer_list *timer)
718 {
719 debug_object_deactivate(timer, &timer_debug_descr);
720 }
721
722 static inline void debug_timer_free(struct timer_list *timer)
723 {
724 debug_object_free(timer, &timer_debug_descr);
725 }
726
727 static inline void debug_timer_assert_init(struct timer_list *timer)
728 {
729 debug_object_assert_init(timer, &timer_debug_descr);
730 }
731
732 static void do_init_timer(struct timer_list *timer,
733 void (*func)(struct timer_list *),
734 unsigned int flags,
735 const char *name, struct lock_class_key *key);
736
737 void init_timer_on_stack_key(struct timer_list *timer,
738 void (*func)(struct timer_list *),
739 unsigned int flags,
740 const char *name, struct lock_class_key *key)
741 {
742 debug_object_init_on_stack(timer, &timer_debug_descr);
743 do_init_timer(timer, func, flags, name, key);
744 }
745 EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
746
747 void destroy_timer_on_stack(struct timer_list *timer)
748 {
749 debug_object_free(timer, &timer_debug_descr);
750 }
751 EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
752
753 #else
754 static inline void debug_timer_init(struct timer_list *timer) { }
755 static inline void debug_timer_activate(struct timer_list *timer) { }
756 static inline void debug_timer_deactivate(struct timer_list *timer) { }
757 static inline void debug_timer_assert_init(struct timer_list *timer) { }
758 #endif
759
760 static inline void debug_init(struct timer_list *timer)
761 {
762 debug_timer_init(timer);
763 trace_timer_init(timer);
764 }
765
766 static inline void debug_deactivate(struct timer_list *timer)
767 {
768 debug_timer_deactivate(timer);
769 trace_timer_cancel(timer);
770 }
771
772 static inline void debug_assert_init(struct timer_list *timer)
773 {
774 debug_timer_assert_init(timer);
775 }
776
777 static void do_init_timer(struct timer_list *timer,
778 void (*func)(struct timer_list *),
779 unsigned int flags,
780 const char *name, struct lock_class_key *key)
781 {
782 timer->entry.pprev = NULL;
783 timer->function = func;
784 timer->flags = flags | raw_smp_processor_id();
785 lockdep_init_map(&timer->lockdep_map, name, key, 0);
786 }
787
788
789
790
791
792
793
794
795
796
797
798
799
800 void init_timer_key(struct timer_list *timer,
801 void (*func)(struct timer_list *), unsigned int flags,
802 const char *name, struct lock_class_key *key)
803 {
804 debug_init(timer);
805 do_init_timer(timer, func, flags, name, key);
806 }
807 EXPORT_SYMBOL(init_timer_key);
808
809 static inline void detach_timer(struct timer_list *timer, bool clear_pending)
810 {
811 struct hlist_node *entry = &timer->entry;
812
813 debug_deactivate(timer);
814
815 __hlist_del(entry);
816 if (clear_pending)
817 entry->pprev = NULL;
818 entry->next = LIST_POISON2;
819 }
820
821 static int detach_if_pending(struct timer_list *timer, struct timer_base *base,
822 bool clear_pending)
823 {
824 unsigned idx = timer_get_idx(timer);
825
826 if (!timer_pending(timer))
827 return 0;
828
829 if (hlist_is_singular_node(&timer->entry, base->vectors + idx))
830 __clear_bit(idx, base->pending_map);
831
832 detach_timer(timer, clear_pending);
833 return 1;
834 }
835
836 static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu)
837 {
838 struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu);
839
840
841
842
843
844 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE))
845 base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu);
846 return base;
847 }
848
849 static inline struct timer_base *get_timer_this_cpu_base(u32 tflags)
850 {
851 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
852
853
854
855
856
857 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE))
858 base = this_cpu_ptr(&timer_bases[BASE_DEF]);
859 return base;
860 }
861
862 static inline struct timer_base *get_timer_base(u32 tflags)
863 {
864 return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK);
865 }
866
867 static inline struct timer_base *
868 get_target_base(struct timer_base *base, unsigned tflags)
869 {
870 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
871 if (static_branch_likely(&timers_migration_enabled) &&
872 !(tflags & TIMER_PINNED))
873 return get_timer_cpu_base(tflags, get_nohz_timer_target());
874 #endif
875 return get_timer_this_cpu_base(tflags);
876 }
877
878 static inline void forward_timer_base(struct timer_base *base)
879 {
880 #ifdef CONFIG_NO_HZ_COMMON
881 unsigned long jnow;
882
883
884
885
886
887
888 if (likely(!base->must_forward_clk))
889 return;
890
891 jnow = READ_ONCE(jiffies);
892 base->must_forward_clk = base->is_idle;
893 if ((long)(jnow - base->clk) < 2)
894 return;
895
896
897
898
899
900 if (time_after(base->next_expiry, jnow))
901 base->clk = jnow;
902 else
903 base->clk = base->next_expiry;
904 #endif
905 }
906
907
908
909
910
911
912
913
914
915
916
917
918
919 static struct timer_base *lock_timer_base(struct timer_list *timer,
920 unsigned long *flags)
921 __acquires(timer->base->lock)
922 {
923 for (;;) {
924 struct timer_base *base;
925 u32 tf;
926
927
928
929
930
931
932 tf = READ_ONCE(timer->flags);
933
934 if (!(tf & TIMER_MIGRATING)) {
935 base = get_timer_base(tf);
936 raw_spin_lock_irqsave(&base->lock, *flags);
937 if (timer->flags == tf)
938 return base;
939 raw_spin_unlock_irqrestore(&base->lock, *flags);
940 }
941 cpu_relax();
942 }
943 }
944
945 #define MOD_TIMER_PENDING_ONLY 0x01
946 #define MOD_TIMER_REDUCE 0x02
947
948 static inline int
949 __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int options)
950 {
951 struct timer_base *base, *new_base;
952 unsigned int idx = UINT_MAX;
953 unsigned long clk = 0, flags;
954 int ret = 0;
955
956 BUG_ON(!timer->function);
957
958
959
960
961
962
963 if (timer_pending(timer)) {
964
965
966
967
968
969 long diff = timer->expires - expires;
970
971 if (!diff)
972 return 1;
973 if (options & MOD_TIMER_REDUCE && diff <= 0)
974 return 1;
975
976
977
978
979
980
981
982 base = lock_timer_base(timer, &flags);
983 forward_timer_base(base);
984
985 if (timer_pending(timer) && (options & MOD_TIMER_REDUCE) &&
986 time_before_eq(timer->expires, expires)) {
987 ret = 1;
988 goto out_unlock;
989 }
990
991 clk = base->clk;
992 idx = calc_wheel_index(expires, clk);
993
994
995
996
997
998
999 if (idx == timer_get_idx(timer)) {
1000 if (!(options & MOD_TIMER_REDUCE))
1001 timer->expires = expires;
1002 else if (time_after(timer->expires, expires))
1003 timer->expires = expires;
1004 ret = 1;
1005 goto out_unlock;
1006 }
1007 } else {
1008 base = lock_timer_base(timer, &flags);
1009 forward_timer_base(base);
1010 }
1011
1012 ret = detach_if_pending(timer, base, false);
1013 if (!ret && (options & MOD_TIMER_PENDING_ONLY))
1014 goto out_unlock;
1015
1016 new_base = get_target_base(base, timer->flags);
1017
1018 if (base != new_base) {
1019
1020
1021
1022
1023
1024
1025
1026 if (likely(base->running_timer != timer)) {
1027
1028 timer->flags |= TIMER_MIGRATING;
1029
1030 raw_spin_unlock(&base->lock);
1031 base = new_base;
1032 raw_spin_lock(&base->lock);
1033 WRITE_ONCE(timer->flags,
1034 (timer->flags & ~TIMER_BASEMASK) | base->cpu);
1035 forward_timer_base(base);
1036 }
1037 }
1038
1039 debug_timer_activate(timer);
1040
1041 timer->expires = expires;
1042
1043
1044
1045
1046
1047
1048
1049 if (idx != UINT_MAX && clk == base->clk) {
1050 enqueue_timer(base, timer, idx);
1051 trigger_dyntick_cpu(base, timer);
1052 } else {
1053 internal_add_timer(base, timer);
1054 }
1055
1056 out_unlock:
1057 raw_spin_unlock_irqrestore(&base->lock, flags);
1058
1059 return ret;
1060 }
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072 int mod_timer_pending(struct timer_list *timer, unsigned long expires)
1073 {
1074 return __mod_timer(timer, expires, MOD_TIMER_PENDING_ONLY);
1075 }
1076 EXPORT_SYMBOL(mod_timer_pending);
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098 int mod_timer(struct timer_list *timer, unsigned long expires)
1099 {
1100 return __mod_timer(timer, expires, 0);
1101 }
1102 EXPORT_SYMBOL(mod_timer);
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113 int timer_reduce(struct timer_list *timer, unsigned long expires)
1114 {
1115 return __mod_timer(timer, expires, MOD_TIMER_REDUCE);
1116 }
1117 EXPORT_SYMBOL(timer_reduce);
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133 void add_timer(struct timer_list *timer)
1134 {
1135 BUG_ON(timer_pending(timer));
1136 mod_timer(timer, timer->expires);
1137 }
1138 EXPORT_SYMBOL(add_timer);
1139
1140
1141
1142
1143
1144
1145
1146
1147 void add_timer_on(struct timer_list *timer, int cpu)
1148 {
1149 struct timer_base *new_base, *base;
1150 unsigned long flags;
1151
1152 BUG_ON(timer_pending(timer) || !timer->function);
1153
1154 new_base = get_timer_cpu_base(timer->flags, cpu);
1155
1156
1157
1158
1159
1160
1161 base = lock_timer_base(timer, &flags);
1162 if (base != new_base) {
1163 timer->flags |= TIMER_MIGRATING;
1164
1165 raw_spin_unlock(&base->lock);
1166 base = new_base;
1167 raw_spin_lock(&base->lock);
1168 WRITE_ONCE(timer->flags,
1169 (timer->flags & ~TIMER_BASEMASK) | cpu);
1170 }
1171 forward_timer_base(base);
1172
1173 debug_timer_activate(timer);
1174 internal_add_timer(base, timer);
1175 raw_spin_unlock_irqrestore(&base->lock, flags);
1176 }
1177 EXPORT_SYMBOL_GPL(add_timer_on);
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190 int del_timer(struct timer_list *timer)
1191 {
1192 struct timer_base *base;
1193 unsigned long flags;
1194 int ret = 0;
1195
1196 debug_assert_init(timer);
1197
1198 if (timer_pending(timer)) {
1199 base = lock_timer_base(timer, &flags);
1200 ret = detach_if_pending(timer, base, true);
1201 raw_spin_unlock_irqrestore(&base->lock, flags);
1202 }
1203
1204 return ret;
1205 }
1206 EXPORT_SYMBOL(del_timer);
1207
1208
1209
1210
1211
1212
1213
1214
1215 int try_to_del_timer_sync(struct timer_list *timer)
1216 {
1217 struct timer_base *base;
1218 unsigned long flags;
1219 int ret = -1;
1220
1221 debug_assert_init(timer);
1222
1223 base = lock_timer_base(timer, &flags);
1224
1225 if (base->running_timer != timer)
1226 ret = detach_if_pending(timer, base, true);
1227
1228 raw_spin_unlock_irqrestore(&base->lock, flags);
1229
1230 return ret;
1231 }
1232 EXPORT_SYMBOL(try_to_del_timer_sync);
1233
1234 #ifdef CONFIG_PREEMPT_RT
1235 static __init void timer_base_init_expiry_lock(struct timer_base *base)
1236 {
1237 spin_lock_init(&base->expiry_lock);
1238 }
1239
1240 static inline void timer_base_lock_expiry(struct timer_base *base)
1241 {
1242 spin_lock(&base->expiry_lock);
1243 }
1244
1245 static inline void timer_base_unlock_expiry(struct timer_base *base)
1246 {
1247 spin_unlock(&base->expiry_lock);
1248 }
1249
1250
1251
1252
1253
1254
1255
1256
1257 static void timer_sync_wait_running(struct timer_base *base)
1258 {
1259 if (atomic_read(&base->timer_waiters)) {
1260 spin_unlock(&base->expiry_lock);
1261 spin_lock(&base->expiry_lock);
1262 }
1263 }
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275 static void del_timer_wait_running(struct timer_list *timer)
1276 {
1277 u32 tf;
1278
1279 tf = READ_ONCE(timer->flags);
1280 if (!(tf & TIMER_MIGRATING)) {
1281 struct timer_base *base = get_timer_base(tf);
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291 atomic_inc(&base->timer_waiters);
1292 spin_lock_bh(&base->expiry_lock);
1293 atomic_dec(&base->timer_waiters);
1294 spin_unlock_bh(&base->expiry_lock);
1295 }
1296 }
1297 #else
1298 static inline void timer_base_init_expiry_lock(struct timer_base *base) { }
1299 static inline void timer_base_lock_expiry(struct timer_base *base) { }
1300 static inline void timer_base_unlock_expiry(struct timer_base *base) { }
1301 static inline void timer_sync_wait_running(struct timer_base *base) { }
1302 static inline void del_timer_wait_running(struct timer_list *timer) { }
1303 #endif
1304
1305 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342 int del_timer_sync(struct timer_list *timer)
1343 {
1344 int ret;
1345
1346 #ifdef CONFIG_LOCKDEP
1347 unsigned long flags;
1348
1349
1350
1351
1352
1353 local_irq_save(flags);
1354 lock_map_acquire(&timer->lockdep_map);
1355 lock_map_release(&timer->lockdep_map);
1356 local_irq_restore(flags);
1357 #endif
1358
1359
1360
1361
1362 WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE));
1363
1364 do {
1365 ret = try_to_del_timer_sync(timer);
1366
1367 if (unlikely(ret < 0)) {
1368 del_timer_wait_running(timer);
1369 cpu_relax();
1370 }
1371 } while (ret < 0);
1372
1373 return ret;
1374 }
1375 EXPORT_SYMBOL(del_timer_sync);
1376 #endif
1377
1378 static void call_timer_fn(struct timer_list *timer,
1379 void (*fn)(struct timer_list *),
1380 unsigned long baseclk)
1381 {
1382 int count = preempt_count();
1383
1384 #ifdef CONFIG_LOCKDEP
1385
1386
1387
1388
1389
1390
1391
1392 struct lockdep_map lockdep_map;
1393
1394 lockdep_copy_map(&lockdep_map, &timer->lockdep_map);
1395 #endif
1396
1397
1398
1399
1400
1401 lock_map_acquire(&lockdep_map);
1402
1403 trace_timer_expire_entry(timer, baseclk);
1404 fn(timer);
1405 trace_timer_expire_exit(timer);
1406
1407 lock_map_release(&lockdep_map);
1408
1409 if (count != preempt_count()) {
1410 WARN_ONCE(1, "timer: %pS preempt leak: %08x -> %08x\n",
1411 fn, count, preempt_count());
1412
1413
1414
1415
1416
1417
1418 preempt_count_set(count);
1419 }
1420 }
1421
1422 static void expire_timers(struct timer_base *base, struct hlist_head *head)
1423 {
1424
1425
1426
1427
1428
1429 unsigned long baseclk = base->clk - 1;
1430
1431 while (!hlist_empty(head)) {
1432 struct timer_list *timer;
1433 void (*fn)(struct timer_list *);
1434
1435 timer = hlist_entry(head->first, struct timer_list, entry);
1436
1437 base->running_timer = timer;
1438 detach_timer(timer, true);
1439
1440 fn = timer->function;
1441
1442 if (timer->flags & TIMER_IRQSAFE) {
1443 raw_spin_unlock(&base->lock);
1444 call_timer_fn(timer, fn, baseclk);
1445 base->running_timer = NULL;
1446 raw_spin_lock(&base->lock);
1447 } else {
1448 raw_spin_unlock_irq(&base->lock);
1449 call_timer_fn(timer, fn, baseclk);
1450 base->running_timer = NULL;
1451 timer_sync_wait_running(base);
1452 raw_spin_lock_irq(&base->lock);
1453 }
1454 }
1455 }
1456
1457 static int __collect_expired_timers(struct timer_base *base,
1458 struct hlist_head *heads)
1459 {
1460 unsigned long clk = base->clk;
1461 struct hlist_head *vec;
1462 int i, levels = 0;
1463 unsigned int idx;
1464
1465 for (i = 0; i < LVL_DEPTH; i++) {
1466 idx = (clk & LVL_MASK) + i * LVL_SIZE;
1467
1468 if (__test_and_clear_bit(idx, base->pending_map)) {
1469 vec = base->vectors + idx;
1470 hlist_move_list(vec, heads++);
1471 levels++;
1472 }
1473
1474 if (clk & LVL_CLK_MASK)
1475 break;
1476
1477 clk >>= LVL_CLK_SHIFT;
1478 }
1479 return levels;
1480 }
1481
1482 #ifdef CONFIG_NO_HZ_COMMON
1483
1484
1485
1486
1487
1488 static int next_pending_bucket(struct timer_base *base, unsigned offset,
1489 unsigned clk)
1490 {
1491 unsigned pos, start = offset + clk;
1492 unsigned end = offset + LVL_SIZE;
1493
1494 pos = find_next_bit(base->pending_map, end, start);
1495 if (pos < end)
1496 return pos - start;
1497
1498 pos = find_next_bit(base->pending_map, start, offset);
1499 return pos < start ? pos + LVL_SIZE - start : -1;
1500 }
1501
1502
1503
1504
1505
1506 static unsigned long __next_timer_interrupt(struct timer_base *base)
1507 {
1508 unsigned long clk, next, adj;
1509 unsigned lvl, offset = 0;
1510
1511 next = base->clk + NEXT_TIMER_MAX_DELTA;
1512 clk = base->clk;
1513 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) {
1514 int pos = next_pending_bucket(base, offset, clk & LVL_MASK);
1515
1516 if (pos >= 0) {
1517 unsigned long tmp = clk + (unsigned long) pos;
1518
1519 tmp <<= LVL_SHIFT(lvl);
1520 if (time_before(tmp, next))
1521 next = tmp;
1522 }
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559 adj = clk & LVL_CLK_MASK ? 1 : 0;
1560 clk >>= LVL_CLK_SHIFT;
1561 clk += adj;
1562 }
1563 return next;
1564 }
1565
1566
1567
1568
1569
1570 static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
1571 {
1572 u64 nextevt = hrtimer_get_next_event();
1573
1574
1575
1576
1577
1578 if (expires <= nextevt)
1579 return expires;
1580
1581
1582
1583
1584
1585 if (nextevt <= basem)
1586 return basem;
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596 return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC;
1597 }
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607 u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
1608 {
1609 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1610 u64 expires = KTIME_MAX;
1611 unsigned long nextevt;
1612 bool is_max_delta;
1613
1614
1615
1616
1617
1618 if (cpu_is_offline(smp_processor_id()))
1619 return expires;
1620
1621 raw_spin_lock(&base->lock);
1622 nextevt = __next_timer_interrupt(base);
1623 is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA);
1624 base->next_expiry = nextevt;
1625
1626
1627
1628
1629
1630 if (time_after(basej, base->clk)) {
1631 if (time_after(nextevt, basej))
1632 base->clk = basej;
1633 else if (time_after(nextevt, base->clk))
1634 base->clk = nextevt;
1635 }
1636
1637 if (time_before_eq(nextevt, basej)) {
1638 expires = basem;
1639 base->is_idle = false;
1640 } else {
1641 if (!is_max_delta)
1642 expires = basem + (u64)(nextevt - basej) * TICK_NSEC;
1643
1644
1645
1646
1647
1648
1649
1650 if ((expires - basem) > TICK_NSEC) {
1651 base->must_forward_clk = true;
1652 base->is_idle = true;
1653 }
1654 }
1655 raw_spin_unlock(&base->lock);
1656
1657 return cmp_next_hrtimer_event(basem, expires);
1658 }
1659
1660
1661
1662
1663
1664
1665 void timer_clear_idle(void)
1666 {
1667 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1668
1669
1670
1671
1672
1673
1674
1675 base->is_idle = false;
1676 }
1677
1678 static int collect_expired_timers(struct timer_base *base,
1679 struct hlist_head *heads)
1680 {
1681 unsigned long now = READ_ONCE(jiffies);
1682
1683
1684
1685
1686
1687
1688 if ((long)(now - base->clk) > 2) {
1689 unsigned long next = __next_timer_interrupt(base);
1690
1691
1692
1693
1694
1695 if (time_after(next, now)) {
1696
1697
1698
1699
1700 base->clk = now;
1701 return 0;
1702 }
1703 base->clk = next;
1704 }
1705 return __collect_expired_timers(base, heads);
1706 }
1707 #else
1708 static inline int collect_expired_timers(struct timer_base *base,
1709 struct hlist_head *heads)
1710 {
1711 return __collect_expired_timers(base, heads);
1712 }
1713 #endif
1714
1715
1716
1717
1718
1719 void update_process_times(int user_tick)
1720 {
1721 struct task_struct *p = current;
1722
1723
1724 account_process_tick(p, user_tick);
1725 run_local_timers();
1726 rcu_sched_clock_irq(user_tick);
1727 #ifdef CONFIG_IRQ_WORK
1728 if (in_irq())
1729 irq_work_tick();
1730 #endif
1731 scheduler_tick();
1732 if (IS_ENABLED(CONFIG_POSIX_TIMERS))
1733 run_posix_cpu_timers();
1734 }
1735
1736
1737
1738
1739
1740 static inline void __run_timers(struct timer_base *base)
1741 {
1742 struct hlist_head heads[LVL_DEPTH];
1743 int levels;
1744
1745 if (!time_after_eq(jiffies, base->clk))
1746 return;
1747
1748 timer_base_lock_expiry(base);
1749 raw_spin_lock_irq(&base->lock);
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765 base->must_forward_clk = false;
1766
1767 while (time_after_eq(jiffies, base->clk)) {
1768
1769 levels = collect_expired_timers(base, heads);
1770 base->clk++;
1771
1772 while (levels--)
1773 expire_timers(base, heads + levels);
1774 }
1775 raw_spin_unlock_irq(&base->lock);
1776 timer_base_unlock_expiry(base);
1777 }
1778
1779
1780
1781
1782 static __latent_entropy void run_timer_softirq(struct softirq_action *h)
1783 {
1784 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1785
1786 __run_timers(base);
1787 if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
1788 __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
1789 }
1790
1791
1792
1793
1794 void run_local_timers(void)
1795 {
1796 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1797
1798 hrtimer_run_queues();
1799
1800 if (time_before(jiffies, base->clk)) {
1801 if (!IS_ENABLED(CONFIG_NO_HZ_COMMON))
1802 return;
1803
1804 base++;
1805 if (time_before(jiffies, base->clk))
1806 return;
1807 }
1808 raise_softirq(TIMER_SOFTIRQ);
1809 }
1810
1811
1812
1813
1814
1815 struct process_timer {
1816 struct timer_list timer;
1817 struct task_struct *task;
1818 };
1819
1820 static void process_timeout(struct timer_list *t)
1821 {
1822 struct process_timer *timeout = from_timer(timeout, t, timer);
1823
1824 wake_up_process(timeout->task);
1825 }
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856 signed long __sched schedule_timeout(signed long timeout)
1857 {
1858 struct process_timer timer;
1859 unsigned long expire;
1860
1861 switch (timeout)
1862 {
1863 case MAX_SCHEDULE_TIMEOUT:
1864
1865
1866
1867
1868
1869
1870
1871 schedule();
1872 goto out;
1873 default:
1874
1875
1876
1877
1878
1879
1880
1881 if (timeout < 0) {
1882 printk(KERN_ERR "schedule_timeout: wrong timeout "
1883 "value %lx\n", timeout);
1884 dump_stack();
1885 current->state = TASK_RUNNING;
1886 goto out;
1887 }
1888 }
1889
1890 expire = timeout + jiffies;
1891
1892 timer.task = current;
1893 timer_setup_on_stack(&timer.timer, process_timeout, 0);
1894 __mod_timer(&timer.timer, expire, 0);
1895 schedule();
1896 del_singleshot_timer_sync(&timer.timer);
1897
1898
1899 destroy_timer_on_stack(&timer.timer);
1900
1901 timeout = expire - jiffies;
1902
1903 out:
1904 return timeout < 0 ? 0 : timeout;
1905 }
1906 EXPORT_SYMBOL(schedule_timeout);
1907
1908
1909
1910
1911
1912 signed long __sched schedule_timeout_interruptible(signed long timeout)
1913 {
1914 __set_current_state(TASK_INTERRUPTIBLE);
1915 return schedule_timeout(timeout);
1916 }
1917 EXPORT_SYMBOL(schedule_timeout_interruptible);
1918
1919 signed long __sched schedule_timeout_killable(signed long timeout)
1920 {
1921 __set_current_state(TASK_KILLABLE);
1922 return schedule_timeout(timeout);
1923 }
1924 EXPORT_SYMBOL(schedule_timeout_killable);
1925
1926 signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1927 {
1928 __set_current_state(TASK_UNINTERRUPTIBLE);
1929 return schedule_timeout(timeout);
1930 }
1931 EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1932
1933
1934
1935
1936
1937 signed long __sched schedule_timeout_idle(signed long timeout)
1938 {
1939 __set_current_state(TASK_IDLE);
1940 return schedule_timeout(timeout);
1941 }
1942 EXPORT_SYMBOL(schedule_timeout_idle);
1943
1944 #ifdef CONFIG_HOTPLUG_CPU
1945 static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *head)
1946 {
1947 struct timer_list *timer;
1948 int cpu = new_base->cpu;
1949
1950 while (!hlist_empty(head)) {
1951 timer = hlist_entry(head->first, struct timer_list, entry);
1952 detach_timer(timer, false);
1953 timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu;
1954 internal_add_timer(new_base, timer);
1955 }
1956 }
1957
1958 int timers_prepare_cpu(unsigned int cpu)
1959 {
1960 struct timer_base *base;
1961 int b;
1962
1963 for (b = 0; b < NR_BASES; b++) {
1964 base = per_cpu_ptr(&timer_bases[b], cpu);
1965 base->clk = jiffies;
1966 base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA;
1967 base->is_idle = false;
1968 base->must_forward_clk = true;
1969 }
1970 return 0;
1971 }
1972
1973 int timers_dead_cpu(unsigned int cpu)
1974 {
1975 struct timer_base *old_base;
1976 struct timer_base *new_base;
1977 int b, i;
1978
1979 BUG_ON(cpu_online(cpu));
1980
1981 for (b = 0; b < NR_BASES; b++) {
1982 old_base = per_cpu_ptr(&timer_bases[b], cpu);
1983 new_base = get_cpu_ptr(&timer_bases[b]);
1984
1985
1986
1987
1988 raw_spin_lock_irq(&new_base->lock);
1989 raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1990
1991
1992
1993
1994
1995 forward_timer_base(new_base);
1996
1997 BUG_ON(old_base->running_timer);
1998
1999 for (i = 0; i < WHEEL_SIZE; i++)
2000 migrate_timer_list(new_base, old_base->vectors + i);
2001
2002 raw_spin_unlock(&old_base->lock);
2003 raw_spin_unlock_irq(&new_base->lock);
2004 put_cpu_ptr(&timer_bases);
2005 }
2006 return 0;
2007 }
2008
2009 #endif
2010
2011 static void __init init_timer_cpu(int cpu)
2012 {
2013 struct timer_base *base;
2014 int i;
2015
2016 for (i = 0; i < NR_BASES; i++) {
2017 base = per_cpu_ptr(&timer_bases[i], cpu);
2018 base->cpu = cpu;
2019 raw_spin_lock_init(&base->lock);
2020 base->clk = jiffies;
2021 timer_base_init_expiry_lock(base);
2022 }
2023 }
2024
2025 static void __init init_timer_cpus(void)
2026 {
2027 int cpu;
2028
2029 for_each_possible_cpu(cpu)
2030 init_timer_cpu(cpu);
2031 }
2032
2033 void __init init_timers(void)
2034 {
2035 init_timer_cpus();
2036 open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
2037 }
2038
2039
2040
2041
2042
2043 void msleep(unsigned int msecs)
2044 {
2045 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
2046
2047 while (timeout)
2048 timeout = schedule_timeout_uninterruptible(timeout);
2049 }
2050
2051 EXPORT_SYMBOL(msleep);
2052
2053
2054
2055
2056
2057 unsigned long msleep_interruptible(unsigned int msecs)
2058 {
2059 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
2060
2061 while (timeout && !signal_pending(current))
2062 timeout = schedule_timeout_interruptible(timeout);
2063 return jiffies_to_msecs(timeout);
2064 }
2065
2066 EXPORT_SYMBOL(msleep_interruptible);
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079 void __sched usleep_range(unsigned long min, unsigned long max)
2080 {
2081 ktime_t exp = ktime_add_us(ktime_get(), min);
2082 u64 delta = (u64)(max - min) * NSEC_PER_USEC;
2083
2084 for (;;) {
2085 __set_current_state(TASK_UNINTERRUPTIBLE);
2086
2087 if (!schedule_hrtimeout_range(&exp, delta, HRTIMER_MODE_ABS))
2088 break;
2089 }
2090 }
2091 EXPORT_SYMBOL(usleep_range);