1/*
2 *	Precise Delay Loops for parisc
3 *
4 *	based on code by:
5 *	Copyright (C) 1993 Linus Torvalds
6 *	Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
7 *	Copyright (C) 2008 Jiri Hladky <hladky _dot_ jiri _at_ gmail _dot_ com>
8 *
9 *	parisc implementation:
10 *	Copyright (C) 2013 Helge Deller <deller@gmx.de>
11 */
12
13
14#include <linux/module.h>
15#include <linux/preempt.h>
16#include <linux/init.h>
17
18#include <asm/processor.h>
19#include <asm/delay.h>
20
21#include <asm/special_insns.h>    /* for mfctl() */
22#include <asm/processor.h> /* for boot_cpu_data */
23
24/* CR16 based delay: */
25static void __cr16_delay(unsigned long __loops)
26{
27	/*
28	 * Note: Due to unsigned math, cr16 rollovers shouldn't be
29	 * a problem here. However, on 32 bit, we need to make sure
30	 * we don't pass in too big a value. The current default
31	 * value of MAX_UDELAY_MS should help prevent this.
32	 */
33	u32 bclock, now, loops = __loops;
34	int cpu;
35
36	preempt_disable();
37	cpu = smp_processor_id();
38	bclock = mfctl(16);
39	for (;;) {
40		now = mfctl(16);
41		if ((now - bclock) >= loops)
42			break;
43
44		/* Allow RT tasks to run */
45		preempt_enable();
46		asm volatile("	nop\n");
47		barrier();
48		preempt_disable();
49
50		/*
51		 * It is possible that we moved to another CPU, and
52		 * since CR16's are per-cpu we need to calculate
53		 * that. The delay must guarantee that we wait "at
54		 * least" the amount of time. Being moved to another
55		 * CPU could make the wait longer but we just need to
56		 * make sure we waited long enough. Rebalance the
57		 * counter for this CPU.
58		 */
59		if (unlikely(cpu != smp_processor_id())) {
60			loops -= (now - bclock);
61			cpu = smp_processor_id();
62			bclock = mfctl(16);
63		}
64	}
65	preempt_enable();
66}
67
68
69void __udelay(unsigned long usecs)
70{
71	__cr16_delay(usecs * ((unsigned long)boot_cpu_data.cpu_hz / 1000000UL));
72}
73EXPORT_SYMBOL(__udelay);
74