root/kernel/time/sched_clock.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. jiffy_sched_clock_read
  2. cyc_to_ns
  3. sched_clock
  4. update_clock_read_data
  5. update_sched_clock
  6. sched_clock_poll
  7. sched_clock_register
  8. generic_sched_clock_init
  9. suspended_sched_clock_read
  10. sched_clock_suspend
  11. sched_clock_resume
  12. sched_clock_syscore_init

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Generic sched_clock() support, to extend low level hardware time
   4  * counters to full 64-bit ns values.
   5  */
   6 #include <linux/clocksource.h>
   7 #include <linux/init.h>
   8 #include <linux/jiffies.h>
   9 #include <linux/ktime.h>
  10 #include <linux/kernel.h>
  11 #include <linux/moduleparam.h>
  12 #include <linux/sched.h>
  13 #include <linux/sched/clock.h>
  14 #include <linux/syscore_ops.h>
  15 #include <linux/hrtimer.h>
  16 #include <linux/sched_clock.h>
  17 #include <linux/seqlock.h>
  18 #include <linux/bitops.h>
  19 
  20 #include "timekeeping.h"
  21 
  22 /**
  23  * struct clock_read_data - data required to read from sched_clock()
  24  *
  25  * @epoch_ns:           sched_clock() value at last update
  26  * @epoch_cyc:          Clock cycle value at last update.
  27  * @sched_clock_mask:   Bitmask for two's complement subtraction of non 64bit
  28  *                      clocks.
  29  * @read_sched_clock:   Current clock source (or dummy source when suspended).
  30  * @mult:               Multipler for scaled math conversion.
  31  * @shift:              Shift value for scaled math conversion.
  32  *
  33  * Care must be taken when updating this structure; it is read by
  34  * some very hot code paths. It occupies <=40 bytes and, when combined
  35  * with the seqcount used to synchronize access, comfortably fits into
  36  * a 64 byte cache line.
  37  */
  38 struct clock_read_data {
  39         u64 epoch_ns;
  40         u64 epoch_cyc;
  41         u64 sched_clock_mask;
  42         u64 (*read_sched_clock)(void);
  43         u32 mult;
  44         u32 shift;
  45 };
  46 
  47 /**
  48  * struct clock_data - all data needed for sched_clock() (including
  49  *                     registration of a new clock source)
  50  *
  51  * @seq:                Sequence counter for protecting updates. The lowest
  52  *                      bit is the index for @read_data.
  53  * @read_data:          Data required to read from sched_clock.
  54  * @wrap_kt:            Duration for which clock can run before wrapping.
  55  * @rate:               Tick rate of the registered clock.
  56  * @actual_read_sched_clock: Registered hardware level clock read function.
  57  *
  58  * The ordering of this structure has been chosen to optimize cache
  59  * performance. In particular 'seq' and 'read_data[0]' (combined) should fit
  60  * into a single 64-byte cache line.
  61  */
  62 struct clock_data {
  63         seqcount_t              seq;
  64         struct clock_read_data  read_data[2];
  65         ktime_t                 wrap_kt;
  66         unsigned long           rate;
  67 
  68         u64 (*actual_read_sched_clock)(void);
  69 };
  70 
  71 static struct hrtimer sched_clock_timer;
  72 static int irqtime = -1;
  73 
  74 core_param(irqtime, irqtime, int, 0400);
  75 
  76 static u64 notrace jiffy_sched_clock_read(void)
  77 {
  78         /*
  79          * We don't need to use get_jiffies_64 on 32-bit arches here
  80          * because we register with BITS_PER_LONG
  81          */
  82         return (u64)(jiffies - INITIAL_JIFFIES);
  83 }
  84 
  85 static struct clock_data cd ____cacheline_aligned = {
  86         .read_data[0] = { .mult = NSEC_PER_SEC / HZ,
  87                           .read_sched_clock = jiffy_sched_clock_read, },
  88         .actual_read_sched_clock = jiffy_sched_clock_read,
  89 };
  90 
  91 static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
  92 {
  93         return (cyc * mult) >> shift;
  94 }
  95 
  96 unsigned long long notrace sched_clock(void)
  97 {
  98         u64 cyc, res;
  99         unsigned int seq;
 100         struct clock_read_data *rd;
 101 
 102         do {
 103                 seq = raw_read_seqcount(&cd.seq);
 104                 rd = cd.read_data + (seq & 1);
 105 
 106                 cyc = (rd->read_sched_clock() - rd->epoch_cyc) &
 107                       rd->sched_clock_mask;
 108                 res = rd->epoch_ns + cyc_to_ns(cyc, rd->mult, rd->shift);
 109         } while (read_seqcount_retry(&cd.seq, seq));
 110 
 111         return res;
 112 }
 113 
 114 /*
 115  * Updating the data required to read the clock.
 116  *
 117  * sched_clock() will never observe mis-matched data even if called from
 118  * an NMI. We do this by maintaining an odd/even copy of the data and
 119  * steering sched_clock() to one or the other using a sequence counter.
 120  * In order to preserve the data cache profile of sched_clock() as much
 121  * as possible the system reverts back to the even copy when the update
 122  * completes; the odd copy is used *only* during an update.
 123  */
 124 static void update_clock_read_data(struct clock_read_data *rd)
 125 {
 126         /* update the backup (odd) copy with the new data */
 127         cd.read_data[1] = *rd;
 128 
 129         /* steer readers towards the odd copy */
 130         raw_write_seqcount_latch(&cd.seq);
 131 
 132         /* now its safe for us to update the normal (even) copy */
 133         cd.read_data[0] = *rd;
 134 
 135         /* switch readers back to the even copy */
 136         raw_write_seqcount_latch(&cd.seq);
 137 }
 138 
 139 /*
 140  * Atomically update the sched_clock() epoch.
 141  */
 142 static void update_sched_clock(void)
 143 {
 144         u64 cyc;
 145         u64 ns;
 146         struct clock_read_data rd;
 147 
 148         rd = cd.read_data[0];
 149 
 150         cyc = cd.actual_read_sched_clock();
 151         ns = rd.epoch_ns + cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, rd.mult, rd.shift);
 152 
 153         rd.epoch_ns = ns;
 154         rd.epoch_cyc = cyc;
 155 
 156         update_clock_read_data(&rd);
 157 }
 158 
 159 static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt)
 160 {
 161         update_sched_clock();
 162         hrtimer_forward_now(hrt, cd.wrap_kt);
 163 
 164         return HRTIMER_RESTART;
 165 }
 166 
 167 void __init
 168 sched_clock_register(u64 (*read)(void), int bits, unsigned long rate)
 169 {
 170         u64 res, wrap, new_mask, new_epoch, cyc, ns;
 171         u32 new_mult, new_shift;
 172         unsigned long r;
 173         char r_unit;
 174         struct clock_read_data rd;
 175 
 176         if (cd.rate > rate)
 177                 return;
 178 
 179         WARN_ON(!irqs_disabled());
 180 
 181         /* Calculate the mult/shift to convert counter ticks to ns. */
 182         clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600);
 183 
 184         new_mask = CLOCKSOURCE_MASK(bits);
 185         cd.rate = rate;
 186 
 187         /* Calculate how many nanosecs until we risk wrapping */
 188         wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask, NULL);
 189         cd.wrap_kt = ns_to_ktime(wrap);
 190 
 191         rd = cd.read_data[0];
 192 
 193         /* Update epoch for new counter and update 'epoch_ns' from old counter*/
 194         new_epoch = read();
 195         cyc = cd.actual_read_sched_clock();
 196         ns = rd.epoch_ns + cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, rd.mult, rd.shift);
 197         cd.actual_read_sched_clock = read;
 198 
 199         rd.read_sched_clock     = read;
 200         rd.sched_clock_mask     = new_mask;
 201         rd.mult                 = new_mult;
 202         rd.shift                = new_shift;
 203         rd.epoch_cyc            = new_epoch;
 204         rd.epoch_ns             = ns;
 205 
 206         update_clock_read_data(&rd);
 207 
 208         if (sched_clock_timer.function != NULL) {
 209                 /* update timeout for clock wrap */
 210                 hrtimer_start(&sched_clock_timer, cd.wrap_kt,
 211                               HRTIMER_MODE_REL_HARD);
 212         }
 213 
 214         r = rate;
 215         if (r >= 4000000) {
 216                 r /= 1000000;
 217                 r_unit = 'M';
 218         } else {
 219                 if (r >= 1000) {
 220                         r /= 1000;
 221                         r_unit = 'k';
 222                 } else {
 223                         r_unit = ' ';
 224                 }
 225         }
 226 
 227         /* Calculate the ns resolution of this counter */
 228         res = cyc_to_ns(1ULL, new_mult, new_shift);
 229 
 230         pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n",
 231                 bits, r, r_unit, res, wrap);
 232 
 233         /* Enable IRQ time accounting if we have a fast enough sched_clock() */
 234         if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
 235                 enable_sched_clock_irqtime();
 236 
 237         pr_debug("Registered %pS as sched_clock source\n", read);
 238 }
 239 
 240 void __init generic_sched_clock_init(void)
 241 {
 242         /*
 243          * If no sched_clock() function has been provided at that point,
 244          * make it the final one one.
 245          */
 246         if (cd.actual_read_sched_clock == jiffy_sched_clock_read)
 247                 sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ);
 248 
 249         update_sched_clock();
 250 
 251         /*
 252          * Start the timer to keep sched_clock() properly updated and
 253          * sets the initial epoch.
 254          */
 255         hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
 256         sched_clock_timer.function = sched_clock_poll;
 257         hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL_HARD);
 258 }
 259 
 260 /*
 261  * Clock read function for use when the clock is suspended.
 262  *
 263  * This function makes it appear to sched_clock() as if the clock
 264  * stopped counting at its last update.
 265  *
 266  * This function must only be called from the critical
 267  * section in sched_clock(). It relies on the read_seqcount_retry()
 268  * at the end of the critical section to be sure we observe the
 269  * correct copy of 'epoch_cyc'.
 270  */
 271 static u64 notrace suspended_sched_clock_read(void)
 272 {
 273         unsigned int seq = raw_read_seqcount(&cd.seq);
 274 
 275         return cd.read_data[seq & 1].epoch_cyc;
 276 }
 277 
 278 int sched_clock_suspend(void)
 279 {
 280         struct clock_read_data *rd = &cd.read_data[0];
 281 
 282         update_sched_clock();
 283         hrtimer_cancel(&sched_clock_timer);
 284         rd->read_sched_clock = suspended_sched_clock_read;
 285 
 286         return 0;
 287 }
 288 
 289 void sched_clock_resume(void)
 290 {
 291         struct clock_read_data *rd = &cd.read_data[0];
 292 
 293         rd->epoch_cyc = cd.actual_read_sched_clock();
 294         hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL_HARD);
 295         rd->read_sched_clock = cd.actual_read_sched_clock;
 296 }
 297 
 298 static struct syscore_ops sched_clock_ops = {
 299         .suspend        = sched_clock_suspend,
 300         .resume         = sched_clock_resume,
 301 };
 302 
 303 static int __init sched_clock_syscore_init(void)
 304 {
 305         register_syscore_ops(&sched_clock_ops);
 306 
 307         return 0;
 308 }
 309 device_initcall(sched_clock_syscore_init);

/* [<][>][^][v][top][bottom][index][help] */