1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * linux/include/linux/timecounter.h 4 * 5 * based on code that migrated away from 6 * linux/include/linux/clocksource.h 7 */ 8 #ifndef _LINUX_TIMECOUNTER_H 9 #define _LINUX_TIMECOUNTER_H 10 11 #include <linux/types.h> 12 13 /* simplify initialization of mask field */ 14 #define CYCLECOUNTER_MASK(bits) (u64)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) 15 16 /** 17 * struct cyclecounter - hardware abstraction for a free running counter 18 * Provides completely state-free accessors to the underlying hardware. 19 * Depending on which hardware it reads, the cycle counter may wrap 20 * around quickly. Locking rules (if necessary) have to be defined 21 * by the implementor and user of specific instances of this API. 22 * 23 * @read: returns the current cycle value 24 * @mask: bitmask for two's complement 25 * subtraction of non 64 bit counters, 26 * see CYCLECOUNTER_MASK() helper macro 27 * @mult: cycle to nanosecond multiplier 28 * @shift: cycle to nanosecond divisor (power of two) 29 */ 30 struct cyclecounter { 31 u64 (*read)(const struct cyclecounter *cc); 32 u64 mask; 33 u32 mult; 34 u32 shift; 35 }; 36 37 /** 38 * struct timecounter - layer above a %struct cyclecounter which counts nanoseconds 39 * Contains the state needed by timecounter_read() to detect 40 * cycle counter wrap around. Initialize with 41 * timecounter_init(). Also used to convert cycle counts into the 42 * corresponding nanosecond counts with timecounter_cyc2time(). Users 43 * of this code are responsible for initializing the underlying 44 * cycle counter hardware, locking issues and reading the time 45 * more often than the cycle counter wraps around. The nanosecond 46 * counter will only wrap around after ~585 years. 47 * 48 * @cc: the cycle counter used by this instance 49 * @cycle_last: most recent cycle counter value seen by 50 * timecounter_read() 51 * @nsec: continuously increasing count 52 * @mask: bit mask for maintaining the 'frac' field 53 * @frac: accumulated fractional nanoseconds 54 */ 55 struct timecounter { 56 const struct cyclecounter *cc; 57 u64 cycle_last; 58 u64 nsec; 59 u64 mask; 60 u64 frac; 61 }; 62 63 /** 64 * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds 65 * @cc: Pointer to cycle counter. 66 * @cycles: Cycles 67 * @mask: bit mask for maintaining the 'frac' field 68 * @frac: pointer to storage for the fractional nanoseconds. 69 */ 70 static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc, 71 u64 cycles, u64 mask, u64 *frac) 72 { 73 u64 ns = (u64) cycles; 74 75 ns = (ns * cc->mult) + *frac; 76 *frac = ns & mask; 77 return ns >> cc->shift; 78 } 79 80 /** 81 * timecounter_adjtime - Shifts the time of the clock. 82 * @delta: Desired change in nanoseconds. 83 */ 84 static inline void timecounter_adjtime(struct timecounter *tc, s64 delta) 85 { 86 tc->nsec += delta; 87 } 88 89 /** 90 * timecounter_init - initialize a time counter 91 * @tc: Pointer to time counter which is to be initialized/reset 92 * @cc: A cycle counter, ready to be used. 93 * @start_tstamp: Arbitrary initial time stamp. 94 * 95 * After this call the current cycle register (roughly) corresponds to 96 * the initial time stamp. Every call to timecounter_read() increments 97 * the time stamp counter by the number of elapsed nanoseconds. 98 */ 99 extern void timecounter_init(struct timecounter *tc, 100 const struct cyclecounter *cc, 101 u64 start_tstamp); 102 103 /** 104 * timecounter_read - return nanoseconds elapsed since timecounter_init() 105 * plus the initial time stamp 106 * @tc: Pointer to time counter. 107 * 108 * In other words, keeps track of time since the same epoch as 109 * the function which generated the initial time stamp. 110 */ 111 extern u64 timecounter_read(struct timecounter *tc); 112 113 /** 114 * timecounter_cyc2time - convert a cycle counter to same 115 * time base as values returned by 116 * timecounter_read() 117 * @tc: Pointer to time counter. 118 * @cycle_tstamp: a value returned by tc->cc->read() 119 * 120 * Cycle counts that are converted correctly as long as they 121 * fall into the interval [-1/2 max cycle count, +1/2 max cycle count], 122 * with "max cycle count" == cs->mask+1. 123 * 124 * This allows conversion of cycle counter values which were generated 125 * in the past. 126 */ 127 extern u64 timecounter_cyc2time(struct timecounter *tc, 128 u64 cycle_tstamp); 129 130 #endif