root/arch/parisc/lib/delay.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __cr16_delay
  2. __udelay

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  *      Precise Delay Loops for parisc
   4  *
   5  *      based on code by:
   6  *      Copyright (C) 1993 Linus Torvalds
   7  *      Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
   8  *      Copyright (C) 2008 Jiri Hladky <hladky _dot_ jiri _at_ gmail _dot_ com>
   9  *
  10  *      parisc implementation:
  11  *      Copyright (C) 2013 Helge Deller <deller@gmx.de>
  12  */
  13 
  14 
  15 #include <linux/module.h>
  16 #include <linux/preempt.h>
  17 #include <linux/init.h>
  18 
  19 #include <asm/delay.h>
  20 #include <asm/special_insns.h>    /* for mfctl() */
  21 #include <asm/processor.h> /* for boot_cpu_data */
  22 
  23 /* CR16 based delay: */
  24 static void __cr16_delay(unsigned long __loops)
  25 {
  26         /*
  27          * Note: Due to unsigned math, cr16 rollovers shouldn't be
  28          * a problem here. However, on 32 bit, we need to make sure
  29          * we don't pass in too big a value. The current default
  30          * value of MAX_UDELAY_MS should help prevent this.
  31          */
  32         u32 bclock, now, loops = __loops;
  33         int cpu;
  34 
  35         preempt_disable();
  36         cpu = smp_processor_id();
  37         bclock = mfctl(16);
  38         for (;;) {
  39                 now = mfctl(16);
  40                 if ((now - bclock) >= loops)
  41                         break;
  42 
  43                 /* Allow RT tasks to run */
  44                 preempt_enable();
  45                 asm volatile("  nop\n");
  46                 barrier();
  47                 preempt_disable();
  48 
  49                 /*
  50                  * It is possible that we moved to another CPU, and
  51                  * since CR16's are per-cpu we need to calculate
  52                  * that. The delay must guarantee that we wait "at
  53                  * least" the amount of time. Being moved to another
  54                  * CPU could make the wait longer but we just need to
  55                  * make sure we waited long enough. Rebalance the
  56                  * counter for this CPU.
  57                  */
  58                 if (unlikely(cpu != smp_processor_id())) {
  59                         loops -= (now - bclock);
  60                         cpu = smp_processor_id();
  61                         bclock = mfctl(16);
  62                 }
  63         }
  64         preempt_enable();
  65 }
  66 
  67 
  68 void __udelay(unsigned long usecs)
  69 {
  70         __cr16_delay(usecs * ((unsigned long)boot_cpu_data.cpu_hz / 1000000UL));
  71 }
  72 EXPORT_SYMBOL(__udelay);

/* [<][>][^][v][top][bottom][index][help] */