This source file includes following definitions.
- dummy_clock_read
- tk_normalize_xtime
- tk_xtime
- tk_set_xtime
- tk_xtime_add
- tk_set_wall_to_mono
- tk_update_sleep_time
- tk_clock_read
- timekeeping_check_update
- timekeeping_get_delta
- timekeeping_check_update
- timekeeping_get_delta
- tk_setup_internals
- default_arch_gettimeoffset
- arch_gettimeoffset
- timekeeping_delta_to_ns
- timekeeping_get_ns
- timekeeping_cycles_to_ns
- update_fast_timekeeper
- __ktime_get_fast_ns
- ktime_get_mono_fast_ns
- ktime_get_raw_fast_ns
- ktime_get_boot_fast_ns
- __ktime_get_real_fast_ns
- ktime_get_real_fast_ns
- halt_fast_timekeeper
- update_pvclock_gtod
- pvclock_gtod_register_notifier
- pvclock_gtod_unregister_notifier
- tk_update_leap_state
- tk_update_ktime_data
- timekeeping_update
- timekeeping_forward_now
- ktime_get_real_ts64
- ktime_get
- ktime_get_resolution_ns
- ktime_get_with_offset
- ktime_get_coarse_with_offset
- ktime_mono_to_any
- ktime_get_raw
- ktime_get_ts64
- ktime_get_seconds
- ktime_get_real_seconds
- __ktime_get_real_seconds
- ktime_get_snapshot
- scale64_check_overflow
- adjust_historical_crosststamp
- cycle_between
- get_device_system_crosststamp
- do_settimeofday64
- timekeeping_inject_offset
- timekeeping_warp_clock
- __timekeeping_set_tai_offset
- change_clocksource
- timekeeping_notify
- ktime_get_raw_ts64
- timekeeping_valid_for_hres
- timekeeping_max_deferment
- read_persistent_clock64
- read_persistent_wall_and_boot_offset
- timekeeping_init
- __timekeeping_inject_sleeptime
- timekeeping_rtc_skipresume
- timekeeping_rtc_skipsuspend
- timekeeping_inject_sleeptime64
- timekeeping_resume
- timekeeping_suspend
- timekeeping_init_ops
- timekeeping_apply_adjustment
- timekeeping_adjust
- accumulate_nsecs_to_secs
- logarithmic_accumulation
- timekeeping_advance
- update_wall_time
- getboottime64
- ktime_get_coarse_real_ts64
- ktime_get_coarse_ts64
- do_timer
- ktime_get_update_offsets_now
- timekeeping_validate_timex
- do_adjtimex
- hardpps
- xtime_update
1
2
3
4
5
6 #include <linux/timekeeper_internal.h>
7 #include <linux/module.h>
8 #include <linux/interrupt.h>
9 #include <linux/percpu.h>
10 #include <linux/init.h>
11 #include <linux/mm.h>
12 #include <linux/nmi.h>
13 #include <linux/sched.h>
14 #include <linux/sched/loadavg.h>
15 #include <linux/sched/clock.h>
16 #include <linux/syscore_ops.h>
17 #include <linux/clocksource.h>
18 #include <linux/jiffies.h>
19 #include <linux/time.h>
20 #include <linux/tick.h>
21 #include <linux/stop_machine.h>
22 #include <linux/pvclock_gtod.h>
23 #include <linux/compiler.h>
24 #include <linux/audit.h>
25
26 #include "tick-internal.h"
27 #include "ntp_internal.h"
28 #include "timekeeping_internal.h"
29
30 #define TK_CLEAR_NTP (1 << 0)
31 #define TK_MIRROR (1 << 1)
32 #define TK_CLOCK_WAS_SET (1 << 2)
33
34 enum timekeeping_adv_mode {
35
36 TK_ADV_TICK,
37
38
39 TK_ADV_FREQ
40 };
41
42
43
44
45
46 static struct {
47 seqcount_t seq;
48 struct timekeeper timekeeper;
49 } tk_core ____cacheline_aligned = {
50 .seq = SEQCNT_ZERO(tk_core.seq),
51 };
52
53 static DEFINE_RAW_SPINLOCK(timekeeper_lock);
54 static struct timekeeper shadow_timekeeper;
55
56
57
58
59
60
61
62
63
64
65 struct tk_fast {
66 seqcount_t seq;
67 struct tk_read_base base[2];
68 };
69
70
71 static u64 cycles_at_suspend;
72
73 static u64 dummy_clock_read(struct clocksource *cs)
74 {
75 return cycles_at_suspend;
76 }
77
78 static struct clocksource dummy_clock = {
79 .read = dummy_clock_read,
80 };
81
82 static struct tk_fast tk_fast_mono ____cacheline_aligned = {
83 .base[0] = { .clock = &dummy_clock, },
84 .base[1] = { .clock = &dummy_clock, },
85 };
86
87 static struct tk_fast tk_fast_raw ____cacheline_aligned = {
88 .base[0] = { .clock = &dummy_clock, },
89 .base[1] = { .clock = &dummy_clock, },
90 };
91
92
93 int __read_mostly timekeeping_suspended;
94
95 static inline void tk_normalize_xtime(struct timekeeper *tk)
96 {
97 while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) {
98 tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
99 tk->xtime_sec++;
100 }
101 while (tk->tkr_raw.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_raw.shift)) {
102 tk->tkr_raw.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
103 tk->raw_sec++;
104 }
105 }
106
107 static inline struct timespec64 tk_xtime(const struct timekeeper *tk)
108 {
109 struct timespec64 ts;
110
111 ts.tv_sec = tk->xtime_sec;
112 ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
113 return ts;
114 }
115
116 static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
117 {
118 tk->xtime_sec = ts->tv_sec;
119 tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift;
120 }
121
122 static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
123 {
124 tk->xtime_sec += ts->tv_sec;
125 tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift;
126 tk_normalize_xtime(tk);
127 }
128
129 static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
130 {
131 struct timespec64 tmp;
132
133
134
135
136
137 set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec,
138 -tk->wall_to_monotonic.tv_nsec);
139 WARN_ON_ONCE(tk->offs_real != timespec64_to_ktime(tmp));
140 tk->wall_to_monotonic = wtm;
141 set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
142 tk->offs_real = timespec64_to_ktime(tmp);
143 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));
144 }
145
146 static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
147 {
148 tk->offs_boot = ktime_add(tk->offs_boot, delta);
149
150
151
152
153 tk->monotonic_to_boot = ktime_to_timespec64(tk->offs_boot);
154 }
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169 static inline u64 tk_clock_read(const struct tk_read_base *tkr)
170 {
171 struct clocksource *clock = READ_ONCE(tkr->clock);
172
173 return clock->read(clock);
174 }
175
176 #ifdef CONFIG_DEBUG_TIMEKEEPING
177 #define WARNING_FREQ (HZ*300)
178
179 static void timekeeping_check_update(struct timekeeper *tk, u64 offset)
180 {
181
182 u64 max_cycles = tk->tkr_mono.clock->max_cycles;
183 const char *name = tk->tkr_mono.clock->name;
184
185 if (offset > max_cycles) {
186 printk_deferred("WARNING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value (%lld): time overflow danger\n",
187 offset, name, max_cycles);
188 printk_deferred(" timekeeping: Your kernel is sick, but tries to cope by capping time updates\n");
189 } else {
190 if (offset > (max_cycles >> 1)) {
191 printk_deferred("INFO: timekeeping: Cycle offset (%lld) is larger than the '%s' clock's 50%% safety margin (%lld)\n",
192 offset, name, max_cycles >> 1);
193 printk_deferred(" timekeeping: Your kernel is still fine, but is feeling a bit nervous\n");
194 }
195 }
196
197 if (tk->underflow_seen) {
198 if (jiffies - tk->last_warning > WARNING_FREQ) {
199 printk_deferred("WARNING: Underflow in clocksource '%s' observed, time update ignored.\n", name);
200 printk_deferred(" Please report this, consider using a different clocksource, if possible.\n");
201 printk_deferred(" Your kernel is probably still fine.\n");
202 tk->last_warning = jiffies;
203 }
204 tk->underflow_seen = 0;
205 }
206
207 if (tk->overflow_seen) {
208 if (jiffies - tk->last_warning > WARNING_FREQ) {
209 printk_deferred("WARNING: Overflow in clocksource '%s' observed, time update capped.\n", name);
210 printk_deferred(" Please report this, consider using a different clocksource, if possible.\n");
211 printk_deferred(" Your kernel is probably still fine.\n");
212 tk->last_warning = jiffies;
213 }
214 tk->overflow_seen = 0;
215 }
216 }
217
218 static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr)
219 {
220 struct timekeeper *tk = &tk_core.timekeeper;
221 u64 now, last, mask, max, delta;
222 unsigned int seq;
223
224
225
226
227
228
229
230
231 do {
232 seq = read_seqcount_begin(&tk_core.seq);
233 now = tk_clock_read(tkr);
234 last = tkr->cycle_last;
235 mask = tkr->mask;
236 max = tkr->clock->max_cycles;
237 } while (read_seqcount_retry(&tk_core.seq, seq));
238
239 delta = clocksource_delta(now, last, mask);
240
241
242
243
244
245 if (unlikely((~delta & mask) < (mask >> 3))) {
246 tk->underflow_seen = 1;
247 delta = 0;
248 }
249
250
251 if (unlikely(delta > max)) {
252 tk->overflow_seen = 1;
253 delta = tkr->clock->max_cycles;
254 }
255
256 return delta;
257 }
258 #else
259 static inline void timekeeping_check_update(struct timekeeper *tk, u64 offset)
260 {
261 }
262 static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr)
263 {
264 u64 cycle_now, delta;
265
266
267 cycle_now = tk_clock_read(tkr);
268
269
270 delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
271
272 return delta;
273 }
274 #endif
275
276
277
278
279
280
281
282
283
284
285
286
287 static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
288 {
289 u64 interval;
290 u64 tmp, ntpinterval;
291 struct clocksource *old_clock;
292
293 ++tk->cs_was_changed_seq;
294 old_clock = tk->tkr_mono.clock;
295 tk->tkr_mono.clock = clock;
296 tk->tkr_mono.mask = clock->mask;
297 tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono);
298
299 tk->tkr_raw.clock = clock;
300 tk->tkr_raw.mask = clock->mask;
301 tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
302
303
304 tmp = NTP_INTERVAL_LENGTH;
305 tmp <<= clock->shift;
306 ntpinterval = tmp;
307 tmp += clock->mult/2;
308 do_div(tmp, clock->mult);
309 if (tmp == 0)
310 tmp = 1;
311
312 interval = (u64) tmp;
313 tk->cycle_interval = interval;
314
315
316 tk->xtime_interval = interval * clock->mult;
317 tk->xtime_remainder = ntpinterval - tk->xtime_interval;
318 tk->raw_interval = interval * clock->mult;
319
320
321 if (old_clock) {
322 int shift_change = clock->shift - old_clock->shift;
323 if (shift_change < 0) {
324 tk->tkr_mono.xtime_nsec >>= -shift_change;
325 tk->tkr_raw.xtime_nsec >>= -shift_change;
326 } else {
327 tk->tkr_mono.xtime_nsec <<= shift_change;
328 tk->tkr_raw.xtime_nsec <<= shift_change;
329 }
330 }
331
332 tk->tkr_mono.shift = clock->shift;
333 tk->tkr_raw.shift = clock->shift;
334
335 tk->ntp_error = 0;
336 tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
337 tk->ntp_tick = ntpinterval << tk->ntp_error_shift;
338
339
340
341
342
343
344 tk->tkr_mono.mult = clock->mult;
345 tk->tkr_raw.mult = clock->mult;
346 tk->ntp_err_mult = 0;
347 tk->skip_second_overflow = 0;
348 }
349
350
351
352 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
353 static u32 default_arch_gettimeoffset(void) { return 0; }
354 u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
355 #else
356 static inline u32 arch_gettimeoffset(void) { return 0; }
357 #endif
358
359 static inline u64 timekeeping_delta_to_ns(const struct tk_read_base *tkr, u64 delta)
360 {
361 u64 nsec;
362
363 nsec = delta * tkr->mult + tkr->xtime_nsec;
364 nsec >>= tkr->shift;
365
366
367 return nsec + arch_gettimeoffset();
368 }
369
370 static inline u64 timekeeping_get_ns(const struct tk_read_base *tkr)
371 {
372 u64 delta;
373
374 delta = timekeeping_get_delta(tkr);
375 return timekeeping_delta_to_ns(tkr, delta);
376 }
377
378 static inline u64 timekeeping_cycles_to_ns(const struct tk_read_base *tkr, u64 cycles)
379 {
380 u64 delta;
381
382
383 delta = clocksource_delta(cycles, tkr->cycle_last, tkr->mask);
384 return timekeeping_delta_to_ns(tkr, delta);
385 }
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401 static void update_fast_timekeeper(const struct tk_read_base *tkr,
402 struct tk_fast *tkf)
403 {
404 struct tk_read_base *base = tkf->base;
405
406
407 raw_write_seqcount_latch(&tkf->seq);
408
409
410 memcpy(base, tkr, sizeof(*base));
411
412
413 raw_write_seqcount_latch(&tkf->seq);
414
415
416 memcpy(base + 1, base, sizeof(*base));
417 }
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451 static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
452 {
453 struct tk_read_base *tkr;
454 unsigned int seq;
455 u64 now;
456
457 do {
458 seq = raw_read_seqcount_latch(&tkf->seq);
459 tkr = tkf->base + (seq & 0x01);
460 now = ktime_to_ns(tkr->base);
461
462 now += timekeeping_delta_to_ns(tkr,
463 clocksource_delta(
464 tk_clock_read(tkr),
465 tkr->cycle_last,
466 tkr->mask));
467 } while (read_seqcount_retry(&tkf->seq, seq));
468
469 return now;
470 }
471
472 u64 ktime_get_mono_fast_ns(void)
473 {
474 return __ktime_get_fast_ns(&tk_fast_mono);
475 }
476 EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
477
478 u64 ktime_get_raw_fast_ns(void)
479 {
480 return __ktime_get_fast_ns(&tk_fast_raw);
481 }
482 EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505 u64 notrace ktime_get_boot_fast_ns(void)
506 {
507 struct timekeeper *tk = &tk_core.timekeeper;
508
509 return (ktime_get_mono_fast_ns() + ktime_to_ns(tk->offs_boot));
510 }
511 EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns);
512
513
514
515
516
517 static __always_inline u64 __ktime_get_real_fast_ns(struct tk_fast *tkf)
518 {
519 struct tk_read_base *tkr;
520 unsigned int seq;
521 u64 now;
522
523 do {
524 seq = raw_read_seqcount_latch(&tkf->seq);
525 tkr = tkf->base + (seq & 0x01);
526 now = ktime_to_ns(tkr->base_real);
527
528 now += timekeeping_delta_to_ns(tkr,
529 clocksource_delta(
530 tk_clock_read(tkr),
531 tkr->cycle_last,
532 tkr->mask));
533 } while (read_seqcount_retry(&tkf->seq, seq));
534
535 return now;
536 }
537
538
539
540
541 u64 ktime_get_real_fast_ns(void)
542 {
543 return __ktime_get_real_fast_ns(&tk_fast_mono);
544 }
545 EXPORT_SYMBOL_GPL(ktime_get_real_fast_ns);
546
547
548
549
550
551
552
553
554
555
556
557 static void halt_fast_timekeeper(const struct timekeeper *tk)
558 {
559 static struct tk_read_base tkr_dummy;
560 const struct tk_read_base *tkr = &tk->tkr_mono;
561
562 memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
563 cycles_at_suspend = tk_clock_read(tkr);
564 tkr_dummy.clock = &dummy_clock;
565 tkr_dummy.base_real = tkr->base + tk->offs_real;
566 update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
567
568 tkr = &tk->tkr_raw;
569 memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
570 tkr_dummy.clock = &dummy_clock;
571 update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
572 }
573
574 static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
575
576 static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
577 {
578 raw_notifier_call_chain(&pvclock_gtod_chain, was_set, tk);
579 }
580
581
582
583
584 int pvclock_gtod_register_notifier(struct notifier_block *nb)
585 {
586 struct timekeeper *tk = &tk_core.timekeeper;
587 unsigned long flags;
588 int ret;
589
590 raw_spin_lock_irqsave(&timekeeper_lock, flags);
591 ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
592 update_pvclock_gtod(tk, true);
593 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
594
595 return ret;
596 }
597 EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
598
599
600
601
602
603 int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
604 {
605 unsigned long flags;
606 int ret;
607
608 raw_spin_lock_irqsave(&timekeeper_lock, flags);
609 ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
610 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
611
612 return ret;
613 }
614 EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
615
616
617
618
619 static inline void tk_update_leap_state(struct timekeeper *tk)
620 {
621 tk->next_leap_ktime = ntp_get_next_leap();
622 if (tk->next_leap_ktime != KTIME_MAX)
623
624 tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real);
625 }
626
627
628
629
630 static inline void tk_update_ktime_data(struct timekeeper *tk)
631 {
632 u64 seconds;
633 u32 nsec;
634
635
636
637
638
639
640
641
642 seconds = (u64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
643 nsec = (u32) tk->wall_to_monotonic.tv_nsec;
644 tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
645
646
647
648
649
650
651 nsec += (u32)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
652 if (nsec >= NSEC_PER_SEC)
653 seconds++;
654 tk->ktime_sec = seconds;
655
656
657 tk->tkr_raw.base = ns_to_ktime(tk->raw_sec * NSEC_PER_SEC);
658 }
659
660
661 static void timekeeping_update(struct timekeeper *tk, unsigned int action)
662 {
663 if (action & TK_CLEAR_NTP) {
664 tk->ntp_error = 0;
665 ntp_clear();
666 }
667
668 tk_update_leap_state(tk);
669 tk_update_ktime_data(tk);
670
671 update_vsyscall(tk);
672 update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
673
674 tk->tkr_mono.base_real = tk->tkr_mono.base + tk->offs_real;
675 update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono);
676 update_fast_timekeeper(&tk->tkr_raw, &tk_fast_raw);
677
678 if (action & TK_CLOCK_WAS_SET)
679 tk->clock_was_set_seq++;
680
681
682
683
684
685 if (action & TK_MIRROR)
686 memcpy(&shadow_timekeeper, &tk_core.timekeeper,
687 sizeof(tk_core.timekeeper));
688 }
689
690
691
692
693
694
695
696
697 static void timekeeping_forward_now(struct timekeeper *tk)
698 {
699 u64 cycle_now, delta;
700
701 cycle_now = tk_clock_read(&tk->tkr_mono);
702 delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
703 tk->tkr_mono.cycle_last = cycle_now;
704 tk->tkr_raw.cycle_last = cycle_now;
705
706 tk->tkr_mono.xtime_nsec += delta * tk->tkr_mono.mult;
707
708
709 tk->tkr_mono.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_mono.shift;
710
711
712 tk->tkr_raw.xtime_nsec += delta * tk->tkr_raw.mult;
713
714
715 tk->tkr_raw.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_raw.shift;
716
717 tk_normalize_xtime(tk);
718 }
719
720
721
722
723
724
725
726 void ktime_get_real_ts64(struct timespec64 *ts)
727 {
728 struct timekeeper *tk = &tk_core.timekeeper;
729 unsigned int seq;
730 u64 nsecs;
731
732 WARN_ON(timekeeping_suspended);
733
734 do {
735 seq = read_seqcount_begin(&tk_core.seq);
736
737 ts->tv_sec = tk->xtime_sec;
738 nsecs = timekeeping_get_ns(&tk->tkr_mono);
739
740 } while (read_seqcount_retry(&tk_core.seq, seq));
741
742 ts->tv_nsec = 0;
743 timespec64_add_ns(ts, nsecs);
744 }
745 EXPORT_SYMBOL(ktime_get_real_ts64);
746
747 ktime_t ktime_get(void)
748 {
749 struct timekeeper *tk = &tk_core.timekeeper;
750 unsigned int seq;
751 ktime_t base;
752 u64 nsecs;
753
754 WARN_ON(timekeeping_suspended);
755
756 do {
757 seq = read_seqcount_begin(&tk_core.seq);
758 base = tk->tkr_mono.base;
759 nsecs = timekeeping_get_ns(&tk->tkr_mono);
760
761 } while (read_seqcount_retry(&tk_core.seq, seq));
762
763 return ktime_add_ns(base, nsecs);
764 }
765 EXPORT_SYMBOL_GPL(ktime_get);
766
767 u32 ktime_get_resolution_ns(void)
768 {
769 struct timekeeper *tk = &tk_core.timekeeper;
770 unsigned int seq;
771 u32 nsecs;
772
773 WARN_ON(timekeeping_suspended);
774
775 do {
776 seq = read_seqcount_begin(&tk_core.seq);
777 nsecs = tk->tkr_mono.mult >> tk->tkr_mono.shift;
778 } while (read_seqcount_retry(&tk_core.seq, seq));
779
780 return nsecs;
781 }
782 EXPORT_SYMBOL_GPL(ktime_get_resolution_ns);
783
784 static ktime_t *offsets[TK_OFFS_MAX] = {
785 [TK_OFFS_REAL] = &tk_core.timekeeper.offs_real,
786 [TK_OFFS_BOOT] = &tk_core.timekeeper.offs_boot,
787 [TK_OFFS_TAI] = &tk_core.timekeeper.offs_tai,
788 };
789
790 ktime_t ktime_get_with_offset(enum tk_offsets offs)
791 {
792 struct timekeeper *tk = &tk_core.timekeeper;
793 unsigned int seq;
794 ktime_t base, *offset = offsets[offs];
795 u64 nsecs;
796
797 WARN_ON(timekeeping_suspended);
798
799 do {
800 seq = read_seqcount_begin(&tk_core.seq);
801 base = ktime_add(tk->tkr_mono.base, *offset);
802 nsecs = timekeeping_get_ns(&tk->tkr_mono);
803
804 } while (read_seqcount_retry(&tk_core.seq, seq));
805
806 return ktime_add_ns(base, nsecs);
807
808 }
809 EXPORT_SYMBOL_GPL(ktime_get_with_offset);
810
811 ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs)
812 {
813 struct timekeeper *tk = &tk_core.timekeeper;
814 unsigned int seq;
815 ktime_t base, *offset = offsets[offs];
816 u64 nsecs;
817
818 WARN_ON(timekeeping_suspended);
819
820 do {
821 seq = read_seqcount_begin(&tk_core.seq);
822 base = ktime_add(tk->tkr_mono.base, *offset);
823 nsecs = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
824
825 } while (read_seqcount_retry(&tk_core.seq, seq));
826
827 return ktime_add_ns(base, nsecs);
828 }
829 EXPORT_SYMBOL_GPL(ktime_get_coarse_with_offset);
830
831
832
833
834
835
836 ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs)
837 {
838 ktime_t *offset = offsets[offs];
839 unsigned int seq;
840 ktime_t tconv;
841
842 do {
843 seq = read_seqcount_begin(&tk_core.seq);
844 tconv = ktime_add(tmono, *offset);
845 } while (read_seqcount_retry(&tk_core.seq, seq));
846
847 return tconv;
848 }
849 EXPORT_SYMBOL_GPL(ktime_mono_to_any);
850
851
852
853
854 ktime_t ktime_get_raw(void)
855 {
856 struct timekeeper *tk = &tk_core.timekeeper;
857 unsigned int seq;
858 ktime_t base;
859 u64 nsecs;
860
861 do {
862 seq = read_seqcount_begin(&tk_core.seq);
863 base = tk->tkr_raw.base;
864 nsecs = timekeeping_get_ns(&tk->tkr_raw);
865
866 } while (read_seqcount_retry(&tk_core.seq, seq));
867
868 return ktime_add_ns(base, nsecs);
869 }
870 EXPORT_SYMBOL_GPL(ktime_get_raw);
871
872
873
874
875
876
877
878
879
880 void ktime_get_ts64(struct timespec64 *ts)
881 {
882 struct timekeeper *tk = &tk_core.timekeeper;
883 struct timespec64 tomono;
884 unsigned int seq;
885 u64 nsec;
886
887 WARN_ON(timekeeping_suspended);
888
889 do {
890 seq = read_seqcount_begin(&tk_core.seq);
891 ts->tv_sec = tk->xtime_sec;
892 nsec = timekeeping_get_ns(&tk->tkr_mono);
893 tomono = tk->wall_to_monotonic;
894
895 } while (read_seqcount_retry(&tk_core.seq, seq));
896
897 ts->tv_sec += tomono.tv_sec;
898 ts->tv_nsec = 0;
899 timespec64_add_ns(ts, nsec + tomono.tv_nsec);
900 }
901 EXPORT_SYMBOL_GPL(ktime_get_ts64);
902
903
904
905
906
907
908
909
910
911
912 time64_t ktime_get_seconds(void)
913 {
914 struct timekeeper *tk = &tk_core.timekeeper;
915
916 WARN_ON(timekeeping_suspended);
917 return tk->ktime_sec;
918 }
919 EXPORT_SYMBOL_GPL(ktime_get_seconds);
920
921
922
923
924
925
926
927
928
929
930
931
932 time64_t ktime_get_real_seconds(void)
933 {
934 struct timekeeper *tk = &tk_core.timekeeper;
935 time64_t seconds;
936 unsigned int seq;
937
938 if (IS_ENABLED(CONFIG_64BIT))
939 return tk->xtime_sec;
940
941 do {
942 seq = read_seqcount_begin(&tk_core.seq);
943 seconds = tk->xtime_sec;
944
945 } while (read_seqcount_retry(&tk_core.seq, seq));
946
947 return seconds;
948 }
949 EXPORT_SYMBOL_GPL(ktime_get_real_seconds);
950
951
952
953
954
955
956 time64_t __ktime_get_real_seconds(void)
957 {
958 struct timekeeper *tk = &tk_core.timekeeper;
959
960 return tk->xtime_sec;
961 }
962
963
964
965
966
967 void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot)
968 {
969 struct timekeeper *tk = &tk_core.timekeeper;
970 unsigned int seq;
971 ktime_t base_raw;
972 ktime_t base_real;
973 u64 nsec_raw;
974 u64 nsec_real;
975 u64 now;
976
977 WARN_ON_ONCE(timekeeping_suspended);
978
979 do {
980 seq = read_seqcount_begin(&tk_core.seq);
981 now = tk_clock_read(&tk->tkr_mono);
982 systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq;
983 systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq;
984 base_real = ktime_add(tk->tkr_mono.base,
985 tk_core.timekeeper.offs_real);
986 base_raw = tk->tkr_raw.base;
987 nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, now);
988 nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw, now);
989 } while (read_seqcount_retry(&tk_core.seq, seq));
990
991 systime_snapshot->cycles = now;
992 systime_snapshot->real = ktime_add_ns(base_real, nsec_real);
993 systime_snapshot->raw = ktime_add_ns(base_raw, nsec_raw);
994 }
995 EXPORT_SYMBOL_GPL(ktime_get_snapshot);
996
997
998 static int scale64_check_overflow(u64 mult, u64 div, u64 *base)
999 {
1000 u64 tmp, rem;
1001
1002 tmp = div64_u64_rem(*base, div, &rem);
1003
1004 if (((int)sizeof(u64)*8 - fls64(mult) < fls64(tmp)) ||
1005 ((int)sizeof(u64)*8 - fls64(mult) < fls64(rem)))
1006 return -EOVERFLOW;
1007 tmp *= mult;
1008 rem *= mult;
1009
1010 do_div(rem, div);
1011 *base = tmp + rem;
1012 return 0;
1013 }
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032 static int adjust_historical_crosststamp(struct system_time_snapshot *history,
1033 u64 partial_history_cycles,
1034 u64 total_history_cycles,
1035 bool discontinuity,
1036 struct system_device_crosststamp *ts)
1037 {
1038 struct timekeeper *tk = &tk_core.timekeeper;
1039 u64 corr_raw, corr_real;
1040 bool interp_forward;
1041 int ret;
1042
1043 if (total_history_cycles == 0 || partial_history_cycles == 0)
1044 return 0;
1045
1046
1047 interp_forward = partial_history_cycles > total_history_cycles / 2;
1048 partial_history_cycles = interp_forward ?
1049 total_history_cycles - partial_history_cycles :
1050 partial_history_cycles;
1051
1052
1053
1054
1055
1056 corr_raw = (u64)ktime_to_ns(
1057 ktime_sub(ts->sys_monoraw, history->raw));
1058 ret = scale64_check_overflow(partial_history_cycles,
1059 total_history_cycles, &corr_raw);
1060 if (ret)
1061 return ret;
1062
1063
1064
1065
1066
1067
1068
1069
1070 if (discontinuity) {
1071 corr_real = mul_u64_u32_div
1072 (corr_raw, tk->tkr_mono.mult, tk->tkr_raw.mult);
1073 } else {
1074 corr_real = (u64)ktime_to_ns(
1075 ktime_sub(ts->sys_realtime, history->real));
1076 ret = scale64_check_overflow(partial_history_cycles,
1077 total_history_cycles, &corr_real);
1078 if (ret)
1079 return ret;
1080 }
1081
1082
1083 if (interp_forward) {
1084 ts->sys_monoraw = ktime_add_ns(history->raw, corr_raw);
1085 ts->sys_realtime = ktime_add_ns(history->real, corr_real);
1086 } else {
1087 ts->sys_monoraw = ktime_sub_ns(ts->sys_monoraw, corr_raw);
1088 ts->sys_realtime = ktime_sub_ns(ts->sys_realtime, corr_real);
1089 }
1090
1091 return 0;
1092 }
1093
1094
1095
1096
1097 static bool cycle_between(u64 before, u64 test, u64 after)
1098 {
1099 if (test > before && test < after)
1100 return true;
1101 if (test < before && before > after)
1102 return true;
1103 return false;
1104 }
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117 int get_device_system_crosststamp(int (*get_time_fn)
1118 (ktime_t *device_time,
1119 struct system_counterval_t *sys_counterval,
1120 void *ctx),
1121 void *ctx,
1122 struct system_time_snapshot *history_begin,
1123 struct system_device_crosststamp *xtstamp)
1124 {
1125 struct system_counterval_t system_counterval;
1126 struct timekeeper *tk = &tk_core.timekeeper;
1127 u64 cycles, now, interval_start;
1128 unsigned int clock_was_set_seq = 0;
1129 ktime_t base_real, base_raw;
1130 u64 nsec_real, nsec_raw;
1131 u8 cs_was_changed_seq;
1132 unsigned int seq;
1133 bool do_interp;
1134 int ret;
1135
1136 do {
1137 seq = read_seqcount_begin(&tk_core.seq);
1138
1139
1140
1141
1142 ret = get_time_fn(&xtstamp->device, &system_counterval, ctx);
1143 if (ret)
1144 return ret;
1145
1146
1147
1148
1149
1150
1151 if (tk->tkr_mono.clock != system_counterval.cs)
1152 return -ENODEV;
1153 cycles = system_counterval.cycles;
1154
1155
1156
1157
1158
1159 now = tk_clock_read(&tk->tkr_mono);
1160 interval_start = tk->tkr_mono.cycle_last;
1161 if (!cycle_between(interval_start, cycles, now)) {
1162 clock_was_set_seq = tk->clock_was_set_seq;
1163 cs_was_changed_seq = tk->cs_was_changed_seq;
1164 cycles = interval_start;
1165 do_interp = true;
1166 } else {
1167 do_interp = false;
1168 }
1169
1170 base_real = ktime_add(tk->tkr_mono.base,
1171 tk_core.timekeeper.offs_real);
1172 base_raw = tk->tkr_raw.base;
1173
1174 nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono,
1175 system_counterval.cycles);
1176 nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw,
1177 system_counterval.cycles);
1178 } while (read_seqcount_retry(&tk_core.seq, seq));
1179
1180 xtstamp->sys_realtime = ktime_add_ns(base_real, nsec_real);
1181 xtstamp->sys_monoraw = ktime_add_ns(base_raw, nsec_raw);
1182
1183
1184
1185
1186
1187 if (do_interp) {
1188 u64 partial_history_cycles, total_history_cycles;
1189 bool discontinuity;
1190
1191
1192
1193
1194
1195
1196 if (!history_begin ||
1197 !cycle_between(history_begin->cycles,
1198 system_counterval.cycles, cycles) ||
1199 history_begin->cs_was_changed_seq != cs_was_changed_seq)
1200 return -EINVAL;
1201 partial_history_cycles = cycles - system_counterval.cycles;
1202 total_history_cycles = cycles - history_begin->cycles;
1203 discontinuity =
1204 history_begin->clock_was_set_seq != clock_was_set_seq;
1205
1206 ret = adjust_historical_crosststamp(history_begin,
1207 partial_history_cycles,
1208 total_history_cycles,
1209 discontinuity, xtstamp);
1210 if (ret)
1211 return ret;
1212 }
1213
1214 return 0;
1215 }
1216 EXPORT_SYMBOL_GPL(get_device_system_crosststamp);
1217
1218
1219
1220
1221
1222
1223
1224 int do_settimeofday64(const struct timespec64 *ts)
1225 {
1226 struct timekeeper *tk = &tk_core.timekeeper;
1227 struct timespec64 ts_delta, xt;
1228 unsigned long flags;
1229 int ret = 0;
1230
1231 if (!timespec64_valid_settod(ts))
1232 return -EINVAL;
1233
1234 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1235 write_seqcount_begin(&tk_core.seq);
1236
1237 timekeeping_forward_now(tk);
1238
1239 xt = tk_xtime(tk);
1240 ts_delta.tv_sec = ts->tv_sec - xt.tv_sec;
1241 ts_delta.tv_nsec = ts->tv_nsec - xt.tv_nsec;
1242
1243 if (timespec64_compare(&tk->wall_to_monotonic, &ts_delta) > 0) {
1244 ret = -EINVAL;
1245 goto out;
1246 }
1247
1248 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts_delta));
1249
1250 tk_set_xtime(tk, ts);
1251 out:
1252 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1253
1254 write_seqcount_end(&tk_core.seq);
1255 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1256
1257
1258 clock_was_set();
1259
1260 if (!ret)
1261 audit_tk_injoffset(ts_delta);
1262
1263 return ret;
1264 }
1265 EXPORT_SYMBOL(do_settimeofday64);
1266
1267
1268
1269
1270
1271
1272
1273 static int timekeeping_inject_offset(const struct timespec64 *ts)
1274 {
1275 struct timekeeper *tk = &tk_core.timekeeper;
1276 unsigned long flags;
1277 struct timespec64 tmp;
1278 int ret = 0;
1279
1280 if (ts->tv_nsec < 0 || ts->tv_nsec >= NSEC_PER_SEC)
1281 return -EINVAL;
1282
1283 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1284 write_seqcount_begin(&tk_core.seq);
1285
1286 timekeeping_forward_now(tk);
1287
1288
1289 tmp = timespec64_add(tk_xtime(tk), *ts);
1290 if (timespec64_compare(&tk->wall_to_monotonic, ts) > 0 ||
1291 !timespec64_valid_settod(&tmp)) {
1292 ret = -EINVAL;
1293 goto error;
1294 }
1295
1296 tk_xtime_add(tk, ts);
1297 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *ts));
1298
1299 error:
1300 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1301
1302 write_seqcount_end(&tk_core.seq);
1303 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1304
1305
1306 clock_was_set();
1307
1308 return ret;
1309 }
1310
1311
1312
1313
1314
1315 int persistent_clock_is_local;
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333 void timekeeping_warp_clock(void)
1334 {
1335 if (sys_tz.tz_minuteswest != 0) {
1336 struct timespec64 adjust;
1337
1338 persistent_clock_is_local = 1;
1339 adjust.tv_sec = sys_tz.tz_minuteswest * 60;
1340 adjust.tv_nsec = 0;
1341 timekeeping_inject_offset(&adjust);
1342 }
1343 }
1344
1345
1346
1347
1348
1349 static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
1350 {
1351 tk->tai_offset = tai_offset;
1352 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
1353 }
1354
1355
1356
1357
1358
1359
1360 static int change_clocksource(void *data)
1361 {
1362 struct timekeeper *tk = &tk_core.timekeeper;
1363 struct clocksource *new, *old;
1364 unsigned long flags;
1365
1366 new = (struct clocksource *) data;
1367
1368 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1369 write_seqcount_begin(&tk_core.seq);
1370
1371 timekeeping_forward_now(tk);
1372
1373
1374
1375
1376 if (try_module_get(new->owner)) {
1377 if (!new->enable || new->enable(new) == 0) {
1378 old = tk->tkr_mono.clock;
1379 tk_setup_internals(tk, new);
1380 if (old->disable)
1381 old->disable(old);
1382 module_put(old->owner);
1383 } else {
1384 module_put(new->owner);
1385 }
1386 }
1387 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1388
1389 write_seqcount_end(&tk_core.seq);
1390 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1391
1392 return 0;
1393 }
1394
1395
1396
1397
1398
1399
1400
1401
1402 int timekeeping_notify(struct clocksource *clock)
1403 {
1404 struct timekeeper *tk = &tk_core.timekeeper;
1405
1406 if (tk->tkr_mono.clock == clock)
1407 return 0;
1408 stop_machine(change_clocksource, clock, NULL);
1409 tick_clock_notify();
1410 return tk->tkr_mono.clock == clock ? 0 : -1;
1411 }
1412
1413
1414
1415
1416
1417
1418
1419 void ktime_get_raw_ts64(struct timespec64 *ts)
1420 {
1421 struct timekeeper *tk = &tk_core.timekeeper;
1422 unsigned int seq;
1423 u64 nsecs;
1424
1425 do {
1426 seq = read_seqcount_begin(&tk_core.seq);
1427 ts->tv_sec = tk->raw_sec;
1428 nsecs = timekeeping_get_ns(&tk->tkr_raw);
1429
1430 } while (read_seqcount_retry(&tk_core.seq, seq));
1431
1432 ts->tv_nsec = 0;
1433 timespec64_add_ns(ts, nsecs);
1434 }
1435 EXPORT_SYMBOL(ktime_get_raw_ts64);
1436
1437
1438
1439
1440
1441 int timekeeping_valid_for_hres(void)
1442 {
1443 struct timekeeper *tk = &tk_core.timekeeper;
1444 unsigned int seq;
1445 int ret;
1446
1447 do {
1448 seq = read_seqcount_begin(&tk_core.seq);
1449
1450 ret = tk->tkr_mono.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
1451
1452 } while (read_seqcount_retry(&tk_core.seq, seq));
1453
1454 return ret;
1455 }
1456
1457
1458
1459
1460 u64 timekeeping_max_deferment(void)
1461 {
1462 struct timekeeper *tk = &tk_core.timekeeper;
1463 unsigned int seq;
1464 u64 ret;
1465
1466 do {
1467 seq = read_seqcount_begin(&tk_core.seq);
1468
1469 ret = tk->tkr_mono.clock->max_idle_ns;
1470
1471 } while (read_seqcount_retry(&tk_core.seq, seq));
1472
1473 return ret;
1474 }
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485 void __weak read_persistent_clock64(struct timespec64 *ts)
1486 {
1487 ts->tv_sec = 0;
1488 ts->tv_nsec = 0;
1489 }
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503 void __weak __init
1504 read_persistent_wall_and_boot_offset(struct timespec64 *wall_time,
1505 struct timespec64 *boot_offset)
1506 {
1507 read_persistent_clock64(wall_time);
1508 *boot_offset = ns_to_timespec64(local_clock());
1509 }
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524 static bool suspend_timing_needed;
1525
1526
1527 static bool persistent_clock_exists;
1528
1529
1530
1531
1532 void __init timekeeping_init(void)
1533 {
1534 struct timespec64 wall_time, boot_offset, wall_to_mono;
1535 struct timekeeper *tk = &tk_core.timekeeper;
1536 struct clocksource *clock;
1537 unsigned long flags;
1538
1539 read_persistent_wall_and_boot_offset(&wall_time, &boot_offset);
1540 if (timespec64_valid_settod(&wall_time) &&
1541 timespec64_to_ns(&wall_time) > 0) {
1542 persistent_clock_exists = true;
1543 } else if (timespec64_to_ns(&wall_time) != 0) {
1544 pr_warn("Persistent clock returned invalid value");
1545 wall_time = (struct timespec64){0};
1546 }
1547
1548 if (timespec64_compare(&wall_time, &boot_offset) < 0)
1549 boot_offset = (struct timespec64){0};
1550
1551
1552
1553
1554
1555 wall_to_mono = timespec64_sub(boot_offset, wall_time);
1556
1557 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1558 write_seqcount_begin(&tk_core.seq);
1559 ntp_init();
1560
1561 clock = clocksource_default_clock();
1562 if (clock->enable)
1563 clock->enable(clock);
1564 tk_setup_internals(tk, clock);
1565
1566 tk_set_xtime(tk, &wall_time);
1567 tk->raw_sec = 0;
1568
1569 tk_set_wall_to_mono(tk, wall_to_mono);
1570
1571 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1572
1573 write_seqcount_end(&tk_core.seq);
1574 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1575 }
1576
1577
1578 static struct timespec64 timekeeping_suspend_time;
1579
1580
1581
1582
1583
1584
1585
1586
1587 static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
1588 const struct timespec64 *delta)
1589 {
1590 if (!timespec64_valid_strict(delta)) {
1591 printk_deferred(KERN_WARNING
1592 "__timekeeping_inject_sleeptime: Invalid "
1593 "sleep delta value!\n");
1594 return;
1595 }
1596 tk_xtime_add(tk, delta);
1597 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta));
1598 tk_update_sleep_time(tk, timespec64_to_ktime(*delta));
1599 tk_debug_account_sleep_time(delta);
1600 }
1601
1602 #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_RTC_HCTOSYS_DEVICE)
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619 bool timekeeping_rtc_skipresume(void)
1620 {
1621 return !suspend_timing_needed;
1622 }
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633 bool timekeeping_rtc_skipsuspend(void)
1634 {
1635 return persistent_clock_exists;
1636 }
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649 void timekeeping_inject_sleeptime64(const struct timespec64 *delta)
1650 {
1651 struct timekeeper *tk = &tk_core.timekeeper;
1652 unsigned long flags;
1653
1654 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1655 write_seqcount_begin(&tk_core.seq);
1656
1657 suspend_timing_needed = false;
1658
1659 timekeeping_forward_now(tk);
1660
1661 __timekeeping_inject_sleeptime(tk, delta);
1662
1663 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1664
1665 write_seqcount_end(&tk_core.seq);
1666 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1667
1668
1669 clock_was_set();
1670 }
1671 #endif
1672
1673
1674
1675
1676 void timekeeping_resume(void)
1677 {
1678 struct timekeeper *tk = &tk_core.timekeeper;
1679 struct clocksource *clock = tk->tkr_mono.clock;
1680 unsigned long flags;
1681 struct timespec64 ts_new, ts_delta;
1682 u64 cycle_now, nsec;
1683 bool inject_sleeptime = false;
1684
1685 read_persistent_clock64(&ts_new);
1686
1687 clockevents_resume();
1688 clocksource_resume();
1689
1690 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1691 write_seqcount_begin(&tk_core.seq);
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705 cycle_now = tk_clock_read(&tk->tkr_mono);
1706 nsec = clocksource_stop_suspend_timing(clock, cycle_now);
1707 if (nsec > 0) {
1708 ts_delta = ns_to_timespec64(nsec);
1709 inject_sleeptime = true;
1710 } else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
1711 ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time);
1712 inject_sleeptime = true;
1713 }
1714
1715 if (inject_sleeptime) {
1716 suspend_timing_needed = false;
1717 __timekeeping_inject_sleeptime(tk, &ts_delta);
1718 }
1719
1720
1721 tk->tkr_mono.cycle_last = cycle_now;
1722 tk->tkr_raw.cycle_last = cycle_now;
1723
1724 tk->ntp_error = 0;
1725 timekeeping_suspended = 0;
1726 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1727 write_seqcount_end(&tk_core.seq);
1728 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1729
1730 touch_softlockup_watchdog();
1731
1732 tick_resume();
1733 hrtimers_resume();
1734 }
1735
1736 int timekeeping_suspend(void)
1737 {
1738 struct timekeeper *tk = &tk_core.timekeeper;
1739 unsigned long flags;
1740 struct timespec64 delta, delta_delta;
1741 static struct timespec64 old_delta;
1742 struct clocksource *curr_clock;
1743 u64 cycle_now;
1744
1745 read_persistent_clock64(&timekeeping_suspend_time);
1746
1747
1748
1749
1750
1751
1752 if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
1753 persistent_clock_exists = true;
1754
1755 suspend_timing_needed = true;
1756
1757 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1758 write_seqcount_begin(&tk_core.seq);
1759 timekeeping_forward_now(tk);
1760 timekeeping_suspended = 1;
1761
1762
1763
1764
1765
1766
1767 curr_clock = tk->tkr_mono.clock;
1768 cycle_now = tk->tkr_mono.cycle_last;
1769 clocksource_start_suspend_timing(curr_clock, cycle_now);
1770
1771 if (persistent_clock_exists) {
1772
1773
1774
1775
1776
1777
1778 delta = timespec64_sub(tk_xtime(tk), timekeeping_suspend_time);
1779 delta_delta = timespec64_sub(delta, old_delta);
1780 if (abs(delta_delta.tv_sec) >= 2) {
1781
1782
1783
1784
1785 old_delta = delta;
1786 } else {
1787
1788 timekeeping_suspend_time =
1789 timespec64_add(timekeeping_suspend_time, delta_delta);
1790 }
1791 }
1792
1793 timekeeping_update(tk, TK_MIRROR);
1794 halt_fast_timekeeper(tk);
1795 write_seqcount_end(&tk_core.seq);
1796 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1797
1798 tick_suspend();
1799 clocksource_suspend();
1800 clockevents_suspend();
1801
1802 return 0;
1803 }
1804
1805
1806 static struct syscore_ops timekeeping_syscore_ops = {
1807 .resume = timekeeping_resume,
1808 .suspend = timekeeping_suspend,
1809 };
1810
1811 static int __init timekeeping_init_ops(void)
1812 {
1813 register_syscore_ops(&timekeeping_syscore_ops);
1814 return 0;
1815 }
1816 device_initcall(timekeeping_init_ops);
1817
1818
1819
1820
1821 static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk,
1822 s64 offset,
1823 s32 mult_adj)
1824 {
1825 s64 interval = tk->cycle_interval;
1826
1827 if (mult_adj == 0) {
1828 return;
1829 } else if (mult_adj == -1) {
1830 interval = -interval;
1831 offset = -offset;
1832 } else if (mult_adj != 1) {
1833 interval *= mult_adj;
1834 offset *= mult_adj;
1835 }
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884 if ((mult_adj > 0) && (tk->tkr_mono.mult + mult_adj < mult_adj)) {
1885
1886 WARN_ON_ONCE(1);
1887 return;
1888 }
1889
1890 tk->tkr_mono.mult += mult_adj;
1891 tk->xtime_interval += interval;
1892 tk->tkr_mono.xtime_nsec -= offset;
1893 }
1894
1895
1896
1897
1898
1899 static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
1900 {
1901 u32 mult;
1902
1903
1904
1905
1906
1907 if (likely(tk->ntp_tick == ntp_tick_length())) {
1908 mult = tk->tkr_mono.mult - tk->ntp_err_mult;
1909 } else {
1910 tk->ntp_tick = ntp_tick_length();
1911 mult = div64_u64((tk->ntp_tick >> tk->ntp_error_shift) -
1912 tk->xtime_remainder, tk->cycle_interval);
1913 }
1914
1915
1916
1917
1918
1919
1920
1921 tk->ntp_err_mult = tk->ntp_error > 0 ? 1 : 0;
1922 mult += tk->ntp_err_mult;
1923
1924 timekeeping_apply_adjustment(tk, offset, mult - tk->tkr_mono.mult);
1925
1926 if (unlikely(tk->tkr_mono.clock->maxadj &&
1927 (abs(tk->tkr_mono.mult - tk->tkr_mono.clock->mult)
1928 > tk->tkr_mono.clock->maxadj))) {
1929 printk_once(KERN_WARNING
1930 "Adjusting %s more than 11%% (%ld vs %ld)\n",
1931 tk->tkr_mono.clock->name, (long)tk->tkr_mono.mult,
1932 (long)tk->tkr_mono.clock->mult + tk->tkr_mono.clock->maxadj);
1933 }
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945 if (unlikely((s64)tk->tkr_mono.xtime_nsec < 0)) {
1946 tk->tkr_mono.xtime_nsec += (u64)NSEC_PER_SEC <<
1947 tk->tkr_mono.shift;
1948 tk->xtime_sec--;
1949 tk->skip_second_overflow = 1;
1950 }
1951 }
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961 static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
1962 {
1963 u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
1964 unsigned int clock_set = 0;
1965
1966 while (tk->tkr_mono.xtime_nsec >= nsecps) {
1967 int leap;
1968
1969 tk->tkr_mono.xtime_nsec -= nsecps;
1970 tk->xtime_sec++;
1971
1972
1973
1974
1975
1976 if (unlikely(tk->skip_second_overflow)) {
1977 tk->skip_second_overflow = 0;
1978 continue;
1979 }
1980
1981
1982 leap = second_overflow(tk->xtime_sec);
1983 if (unlikely(leap)) {
1984 struct timespec64 ts;
1985
1986 tk->xtime_sec += leap;
1987
1988 ts.tv_sec = leap;
1989 ts.tv_nsec = 0;
1990 tk_set_wall_to_mono(tk,
1991 timespec64_sub(tk->wall_to_monotonic, ts));
1992
1993 __timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
1994
1995 clock_set = TK_CLOCK_WAS_SET;
1996 }
1997 }
1998 return clock_set;
1999 }
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010 static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
2011 u32 shift, unsigned int *clock_set)
2012 {
2013 u64 interval = tk->cycle_interval << shift;
2014 u64 snsec_per_sec;
2015
2016
2017 if (offset < interval)
2018 return offset;
2019
2020
2021 offset -= interval;
2022 tk->tkr_mono.cycle_last += interval;
2023 tk->tkr_raw.cycle_last += interval;
2024
2025 tk->tkr_mono.xtime_nsec += tk->xtime_interval << shift;
2026 *clock_set |= accumulate_nsecs_to_secs(tk);
2027
2028
2029 tk->tkr_raw.xtime_nsec += tk->raw_interval << shift;
2030 snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
2031 while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) {
2032 tk->tkr_raw.xtime_nsec -= snsec_per_sec;
2033 tk->raw_sec++;
2034 }
2035
2036
2037 tk->ntp_error += tk->ntp_tick << shift;
2038 tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
2039 (tk->ntp_error_shift + shift);
2040
2041 return offset;
2042 }
2043
2044
2045
2046
2047
2048 static void timekeeping_advance(enum timekeeping_adv_mode mode)
2049 {
2050 struct timekeeper *real_tk = &tk_core.timekeeper;
2051 struct timekeeper *tk = &shadow_timekeeper;
2052 u64 offset;
2053 int shift = 0, maxshift;
2054 unsigned int clock_set = 0;
2055 unsigned long flags;
2056
2057 raw_spin_lock_irqsave(&timekeeper_lock, flags);
2058
2059
2060 if (unlikely(timekeeping_suspended))
2061 goto out;
2062
2063 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
2064 offset = real_tk->cycle_interval;
2065
2066 if (mode != TK_ADV_TICK)
2067 goto out;
2068 #else
2069 offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
2070 tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
2071
2072
2073 if (offset < real_tk->cycle_interval && mode == TK_ADV_TICK)
2074 goto out;
2075 #endif
2076
2077
2078 timekeeping_check_update(tk, offset);
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088 shift = ilog2(offset) - ilog2(tk->cycle_interval);
2089 shift = max(0, shift);
2090
2091 maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
2092 shift = min(shift, maxshift);
2093 while (offset >= tk->cycle_interval) {
2094 offset = logarithmic_accumulation(tk, offset, shift,
2095 &clock_set);
2096 if (offset < tk->cycle_interval<<shift)
2097 shift--;
2098 }
2099
2100
2101 timekeeping_adjust(tk, offset);
2102
2103
2104
2105
2106
2107 clock_set |= accumulate_nsecs_to_secs(tk);
2108
2109 write_seqcount_begin(&tk_core.seq);
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120 timekeeping_update(tk, clock_set);
2121 memcpy(real_tk, tk, sizeof(*tk));
2122
2123 write_seqcount_end(&tk_core.seq);
2124 out:
2125 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2126 if (clock_set)
2127
2128 clock_was_set_delayed();
2129 }
2130
2131
2132
2133
2134
2135 void update_wall_time(void)
2136 {
2137 timekeeping_advance(TK_ADV_TICK);
2138 }
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151 void getboottime64(struct timespec64 *ts)
2152 {
2153 struct timekeeper *tk = &tk_core.timekeeper;
2154 ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);
2155
2156 *ts = ktime_to_timespec64(t);
2157 }
2158 EXPORT_SYMBOL_GPL(getboottime64);
2159
2160 void ktime_get_coarse_real_ts64(struct timespec64 *ts)
2161 {
2162 struct timekeeper *tk = &tk_core.timekeeper;
2163 unsigned int seq;
2164
2165 do {
2166 seq = read_seqcount_begin(&tk_core.seq);
2167
2168 *ts = tk_xtime(tk);
2169 } while (read_seqcount_retry(&tk_core.seq, seq));
2170 }
2171 EXPORT_SYMBOL(ktime_get_coarse_real_ts64);
2172
2173 void ktime_get_coarse_ts64(struct timespec64 *ts)
2174 {
2175 struct timekeeper *tk = &tk_core.timekeeper;
2176 struct timespec64 now, mono;
2177 unsigned int seq;
2178
2179 do {
2180 seq = read_seqcount_begin(&tk_core.seq);
2181
2182 now = tk_xtime(tk);
2183 mono = tk->wall_to_monotonic;
2184 } while (read_seqcount_retry(&tk_core.seq, seq));
2185
2186 set_normalized_timespec64(ts, now.tv_sec + mono.tv_sec,
2187 now.tv_nsec + mono.tv_nsec);
2188 }
2189 EXPORT_SYMBOL(ktime_get_coarse_ts64);
2190
2191
2192
2193
2194 void do_timer(unsigned long ticks)
2195 {
2196 jiffies_64 += ticks;
2197 calc_global_load(ticks);
2198 }
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213 ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
2214 ktime_t *offs_boot, ktime_t *offs_tai)
2215 {
2216 struct timekeeper *tk = &tk_core.timekeeper;
2217 unsigned int seq;
2218 ktime_t base;
2219 u64 nsecs;
2220
2221 do {
2222 seq = read_seqcount_begin(&tk_core.seq);
2223
2224 base = tk->tkr_mono.base;
2225 nsecs = timekeeping_get_ns(&tk->tkr_mono);
2226 base = ktime_add_ns(base, nsecs);
2227
2228 if (*cwsseq != tk->clock_was_set_seq) {
2229 *cwsseq = tk->clock_was_set_seq;
2230 *offs_real = tk->offs_real;
2231 *offs_boot = tk->offs_boot;
2232 *offs_tai = tk->offs_tai;
2233 }
2234
2235
2236 if (unlikely(base >= tk->next_leap_ktime))
2237 *offs_real = ktime_sub(tk->offs_real, ktime_set(1, 0));
2238
2239 } while (read_seqcount_retry(&tk_core.seq, seq));
2240
2241 return base;
2242 }
2243
2244
2245
2246
2247 static int timekeeping_validate_timex(const struct __kernel_timex *txc)
2248 {
2249 if (txc->modes & ADJ_ADJTIME) {
2250
2251 if (!(txc->modes & ADJ_OFFSET_SINGLESHOT))
2252 return -EINVAL;
2253 if (!(txc->modes & ADJ_OFFSET_READONLY) &&
2254 !capable(CAP_SYS_TIME))
2255 return -EPERM;
2256 } else {
2257
2258 if (txc->modes && !capable(CAP_SYS_TIME))
2259 return -EPERM;
2260
2261
2262
2263
2264 if (txc->modes & ADJ_TICK &&
2265 (txc->tick < 900000/USER_HZ ||
2266 txc->tick > 1100000/USER_HZ))
2267 return -EINVAL;
2268 }
2269
2270 if (txc->modes & ADJ_SETOFFSET) {
2271
2272 if (!capable(CAP_SYS_TIME))
2273 return -EPERM;
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283 if (txc->time.tv_usec < 0)
2284 return -EINVAL;
2285
2286 if (txc->modes & ADJ_NANO) {
2287 if (txc->time.tv_usec >= NSEC_PER_SEC)
2288 return -EINVAL;
2289 } else {
2290 if (txc->time.tv_usec >= USEC_PER_SEC)
2291 return -EINVAL;
2292 }
2293 }
2294
2295
2296
2297
2298
2299 if ((txc->modes & ADJ_FREQUENCY) && (BITS_PER_LONG == 64)) {
2300 if (LLONG_MIN / PPM_SCALE > txc->freq)
2301 return -EINVAL;
2302 if (LLONG_MAX / PPM_SCALE < txc->freq)
2303 return -EINVAL;
2304 }
2305
2306 return 0;
2307 }
2308
2309
2310
2311
2312
2313 int do_adjtimex(struct __kernel_timex *txc)
2314 {
2315 struct timekeeper *tk = &tk_core.timekeeper;
2316 struct audit_ntp_data ad;
2317 unsigned long flags;
2318 struct timespec64 ts;
2319 s32 orig_tai, tai;
2320 int ret;
2321
2322
2323 ret = timekeeping_validate_timex(txc);
2324 if (ret)
2325 return ret;
2326
2327 if (txc->modes & ADJ_SETOFFSET) {
2328 struct timespec64 delta;
2329 delta.tv_sec = txc->time.tv_sec;
2330 delta.tv_nsec = txc->time.tv_usec;
2331 if (!(txc->modes & ADJ_NANO))
2332 delta.tv_nsec *= 1000;
2333 ret = timekeeping_inject_offset(&delta);
2334 if (ret)
2335 return ret;
2336
2337 audit_tk_injoffset(delta);
2338 }
2339
2340 audit_ntp_init(&ad);
2341
2342 ktime_get_real_ts64(&ts);
2343
2344 raw_spin_lock_irqsave(&timekeeper_lock, flags);
2345 write_seqcount_begin(&tk_core.seq);
2346
2347 orig_tai = tai = tk->tai_offset;
2348 ret = __do_adjtimex(txc, &ts, &tai, &ad);
2349
2350 if (tai != orig_tai) {
2351 __timekeeping_set_tai_offset(tk, tai);
2352 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
2353 }
2354 tk_update_leap_state(tk);
2355
2356 write_seqcount_end(&tk_core.seq);
2357 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2358
2359 audit_ntp_log(&ad);
2360
2361
2362 if (txc->modes & (ADJ_FREQUENCY | ADJ_TICK))
2363 timekeeping_advance(TK_ADV_FREQ);
2364
2365 if (tai != orig_tai)
2366 clock_was_set();
2367
2368 ntp_notify_cmos_timer();
2369
2370 return ret;
2371 }
2372
2373 #ifdef CONFIG_NTP_PPS
2374
2375
2376
2377 void hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts)
2378 {
2379 unsigned long flags;
2380
2381 raw_spin_lock_irqsave(&timekeeper_lock, flags);
2382 write_seqcount_begin(&tk_core.seq);
2383
2384 __hardpps(phase_ts, raw_ts);
2385
2386 write_seqcount_end(&tk_core.seq);
2387 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2388 }
2389 EXPORT_SYMBOL(hardpps);
2390 #endif
2391
2392
2393
2394
2395
2396
2397
2398 void xtime_update(unsigned long ticks)
2399 {
2400 write_seqlock(&jiffies_lock);
2401 do_timer(ticks);
2402 write_sequnlock(&jiffies_lock);
2403 update_wall_time();
2404 }