1/*
2 * linux/arch/ia64/kernel/time.c
3 *
4 * Copyright (C) 1998-2003 Hewlett-Packard Co
5 *	Stephane Eranian <eranian@hpl.hp.com>
6 *	David Mosberger <davidm@hpl.hp.com>
7 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
8 * Copyright (C) 1999-2000 VA Linux Systems
9 * Copyright (C) 1999-2000 Walt Drummond <drummond@valinux.com>
10 */
11
12#include <linux/cpu.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/profile.h>
17#include <linux/sched.h>
18#include <linux/time.h>
19#include <linux/interrupt.h>
20#include <linux/efi.h>
21#include <linux/timex.h>
22#include <linux/timekeeper_internal.h>
23#include <linux/platform_device.h>
24
25#include <asm/machvec.h>
26#include <asm/delay.h>
27#include <asm/hw_irq.h>
28#include <asm/paravirt.h>
29#include <asm/ptrace.h>
30#include <asm/sal.h>
31#include <asm/sections.h>
32
33#include "fsyscall_gtod_data.h"
34
35static cycle_t itc_get_cycles(struct clocksource *cs);
36
37struct fsyscall_gtod_data_t fsyscall_gtod_data;
38
39struct itc_jitter_data_t itc_jitter_data;
40
41volatile int time_keeper_id = 0; /* smp_processor_id() of time-keeper */
42
43#ifdef CONFIG_IA64_DEBUG_IRQ
44
45unsigned long last_cli_ip;
46EXPORT_SYMBOL(last_cli_ip);
47
48#endif
49
50#ifdef CONFIG_PARAVIRT
51/* We need to define a real function for sched_clock, to override the
52   weak default version */
53unsigned long long sched_clock(void)
54{
55        return paravirt_sched_clock();
56}
57#endif
58
59#ifdef CONFIG_PARAVIRT
60static void
61paravirt_clocksource_resume(struct clocksource *cs)
62{
63	if (pv_time_ops.clocksource_resume)
64		pv_time_ops.clocksource_resume();
65}
66#endif
67
68static struct clocksource clocksource_itc = {
69	.name           = "itc",
70	.rating         = 350,
71	.read           = itc_get_cycles,
72	.mask           = CLOCKSOURCE_MASK(64),
73	.flags          = CLOCK_SOURCE_IS_CONTINUOUS,
74#ifdef CONFIG_PARAVIRT
75	.resume		= paravirt_clocksource_resume,
76#endif
77};
78static struct clocksource *itc_clocksource;
79
80#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
81
82#include <linux/kernel_stat.h>
83
84extern cputime_t cycle_to_cputime(u64 cyc);
85
86void vtime_account_user(struct task_struct *tsk)
87{
88	cputime_t delta_utime;
89	struct thread_info *ti = task_thread_info(tsk);
90
91	if (ti->ac_utime) {
92		delta_utime = cycle_to_cputime(ti->ac_utime);
93		account_user_time(tsk, delta_utime, delta_utime);
94		ti->ac_utime = 0;
95	}
96}
97
98/*
99 * Called from the context switch with interrupts disabled, to charge all
100 * accumulated times to the current process, and to prepare accounting on
101 * the next process.
102 */
103void arch_vtime_task_switch(struct task_struct *prev)
104{
105	struct thread_info *pi = task_thread_info(prev);
106	struct thread_info *ni = task_thread_info(current);
107
108	pi->ac_stamp = ni->ac_stamp;
109	ni->ac_stime = ni->ac_utime = 0;
110}
111
112/*
113 * Account time for a transition between system, hard irq or soft irq state.
114 * Note that this function is called with interrupts enabled.
115 */
116static cputime_t vtime_delta(struct task_struct *tsk)
117{
118	struct thread_info *ti = task_thread_info(tsk);
119	cputime_t delta_stime;
120	__u64 now;
121
122	WARN_ON_ONCE(!irqs_disabled());
123
124	now = ia64_get_itc();
125
126	delta_stime = cycle_to_cputime(ti->ac_stime + (now - ti->ac_stamp));
127	ti->ac_stime = 0;
128	ti->ac_stamp = now;
129
130	return delta_stime;
131}
132
133void vtime_account_system(struct task_struct *tsk)
134{
135	cputime_t delta = vtime_delta(tsk);
136
137	account_system_time(tsk, 0, delta, delta);
138}
139EXPORT_SYMBOL_GPL(vtime_account_system);
140
141void vtime_account_idle(struct task_struct *tsk)
142{
143	account_idle_time(vtime_delta(tsk));
144}
145
146#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
147
148static irqreturn_t
149timer_interrupt (int irq, void *dev_id)
150{
151	unsigned long new_itm;
152
153	if (cpu_is_offline(smp_processor_id())) {
154		return IRQ_HANDLED;
155	}
156
157	platform_timer_interrupt(irq, dev_id);
158
159	new_itm = local_cpu_data->itm_next;
160
161	if (!time_after(ia64_get_itc(), new_itm))
162		printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
163		       ia64_get_itc(), new_itm);
164
165	profile_tick(CPU_PROFILING);
166
167	if (paravirt_do_steal_accounting(&new_itm))
168		goto skip_process_time_accounting;
169
170	while (1) {
171		update_process_times(user_mode(get_irq_regs()));
172
173		new_itm += local_cpu_data->itm_delta;
174
175		if (smp_processor_id() == time_keeper_id)
176			xtime_update(1);
177
178		local_cpu_data->itm_next = new_itm;
179
180		if (time_after(new_itm, ia64_get_itc()))
181			break;
182
183		/*
184		 * Allow IPIs to interrupt the timer loop.
185		 */
186		local_irq_enable();
187		local_irq_disable();
188	}
189
190skip_process_time_accounting:
191
192	do {
193		/*
194		 * If we're too close to the next clock tick for
195		 * comfort, we increase the safety margin by
196		 * intentionally dropping the next tick(s).  We do NOT
197		 * update itm.next because that would force us to call
198		 * xtime_update() which in turn would let our clock run
199		 * too fast (with the potentially devastating effect
200		 * of losing monotony of time).
201		 */
202		while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2))
203			new_itm += local_cpu_data->itm_delta;
204		ia64_set_itm(new_itm);
205		/* double check, in case we got hit by a (slow) PMI: */
206	} while (time_after_eq(ia64_get_itc(), new_itm));
207	return IRQ_HANDLED;
208}
209
210/*
211 * Encapsulate access to the itm structure for SMP.
212 */
213void
214ia64_cpu_local_tick (void)
215{
216	int cpu = smp_processor_id();
217	unsigned long shift = 0, delta;
218
219	/* arrange for the cycle counter to generate a timer interrupt: */
220	ia64_set_itv(IA64_TIMER_VECTOR);
221
222	delta = local_cpu_data->itm_delta;
223	/*
224	 * Stagger the timer tick for each CPU so they don't occur all at (almost) the
225	 * same time:
226	 */
227	if (cpu) {
228		unsigned long hi = 1UL << ia64_fls(cpu);
229		shift = (2*(cpu - hi) + 1) * delta/hi/2;
230	}
231	local_cpu_data->itm_next = ia64_get_itc() + delta + shift;
232	ia64_set_itm(local_cpu_data->itm_next);
233}
234
235static int nojitter;
236
237static int __init nojitter_setup(char *str)
238{
239	nojitter = 1;
240	printk("Jitter checking for ITC timers disabled\n");
241	return 1;
242}
243
244__setup("nojitter", nojitter_setup);
245
246
247void ia64_init_itm(void)
248{
249	unsigned long platform_base_freq, itc_freq;
250	struct pal_freq_ratio itc_ratio, proc_ratio;
251	long status, platform_base_drift, itc_drift;
252
253	/*
254	 * According to SAL v2.6, we need to use a SAL call to determine the platform base
255	 * frequency and then a PAL call to determine the frequency ratio between the ITC
256	 * and the base frequency.
257	 */
258	status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
259				    &platform_base_freq, &platform_base_drift);
260	if (status != 0) {
261		printk(KERN_ERR "SAL_FREQ_BASE_PLATFORM failed: %s\n", ia64_sal_strerror(status));
262	} else {
263		status = ia64_pal_freq_ratios(&proc_ratio, NULL, &itc_ratio);
264		if (status != 0)
265			printk(KERN_ERR "PAL_FREQ_RATIOS failed with status=%ld\n", status);
266	}
267	if (status != 0) {
268		/* invent "random" values */
269		printk(KERN_ERR
270		       "SAL/PAL failed to obtain frequency info---inventing reasonable values\n");
271		platform_base_freq = 100000000;
272		platform_base_drift = -1;	/* no drift info */
273		itc_ratio.num = 3;
274		itc_ratio.den = 1;
275	}
276	if (platform_base_freq < 40000000) {
277		printk(KERN_ERR "Platform base frequency %lu bogus---resetting to 75MHz!\n",
278		       platform_base_freq);
279		platform_base_freq = 75000000;
280		platform_base_drift = -1;
281	}
282	if (!proc_ratio.den)
283		proc_ratio.den = 1;	/* avoid division by zero */
284	if (!itc_ratio.den)
285		itc_ratio.den = 1;	/* avoid division by zero */
286
287	itc_freq = (platform_base_freq*itc_ratio.num)/itc_ratio.den;
288
289	local_cpu_data->itm_delta = (itc_freq + HZ/2) / HZ;
290	printk(KERN_DEBUG "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%u/%u, "
291	       "ITC freq=%lu.%03luMHz", smp_processor_id(),
292	       platform_base_freq / 1000000, (platform_base_freq / 1000) % 1000,
293	       itc_ratio.num, itc_ratio.den, itc_freq / 1000000, (itc_freq / 1000) % 1000);
294
295	if (platform_base_drift != -1) {
296		itc_drift = platform_base_drift*itc_ratio.num/itc_ratio.den;
297		printk("+/-%ldppm\n", itc_drift);
298	} else {
299		itc_drift = -1;
300		printk("\n");
301	}
302
303	local_cpu_data->proc_freq = (platform_base_freq*proc_ratio.num)/proc_ratio.den;
304	local_cpu_data->itc_freq = itc_freq;
305	local_cpu_data->cyc_per_usec = (itc_freq + USEC_PER_SEC/2) / USEC_PER_SEC;
306	local_cpu_data->nsec_per_cyc = ((NSEC_PER_SEC<<IA64_NSEC_PER_CYC_SHIFT)
307					+ itc_freq/2)/itc_freq;
308
309	if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
310#ifdef CONFIG_SMP
311		/* On IA64 in an SMP configuration ITCs are never accurately synchronized.
312		 * Jitter compensation requires a cmpxchg which may limit
313		 * the scalability of the syscalls for retrieving time.
314		 * The ITC synchronization is usually successful to within a few
315		 * ITC ticks but this is not a sure thing. If you need to improve
316		 * timer performance in SMP situations then boot the kernel with the
317		 * "nojitter" option. However, doing so may result in time fluctuating (maybe
318		 * even going backward) if the ITC offsets between the individual CPUs
319		 * are too large.
320		 */
321		if (!nojitter)
322			itc_jitter_data.itc_jitter = 1;
323#endif
324	} else
325		/*
326		 * ITC is drifty and we have not synchronized the ITCs in smpboot.c.
327		 * ITC values may fluctuate significantly between processors.
328		 * Clock should not be used for hrtimers. Mark itc as only
329		 * useful for boot and testing.
330		 *
331		 * Note that jitter compensation is off! There is no point of
332		 * synchronizing ITCs since they may be large differentials
333		 * that change over time.
334		 *
335		 * The only way to fix this would be to repeatedly sync the
336		 * ITCs. Until that time we have to avoid ITC.
337		 */
338		clocksource_itc.rating = 50;
339
340	paravirt_init_missing_ticks_accounting(smp_processor_id());
341
342	/* avoid softlock up message when cpu is unplug and plugged again. */
343	touch_softlockup_watchdog();
344
345	/* Setup the CPU local timer tick */
346	ia64_cpu_local_tick();
347
348	if (!itc_clocksource) {
349		clocksource_register_hz(&clocksource_itc,
350						local_cpu_data->itc_freq);
351		itc_clocksource = &clocksource_itc;
352	}
353}
354
355static cycle_t itc_get_cycles(struct clocksource *cs)
356{
357	unsigned long lcycle, now, ret;
358
359	if (!itc_jitter_data.itc_jitter)
360		return get_cycles();
361
362	lcycle = itc_jitter_data.itc_lastcycle;
363	now = get_cycles();
364	if (lcycle && time_after(lcycle, now))
365		return lcycle;
366
367	/*
368	 * Keep track of the last timer value returned.
369	 * In an SMP environment, you could lose out in contention of
370	 * cmpxchg. If so, your cmpxchg returns new value which the
371	 * winner of contention updated to. Use the new value instead.
372	 */
373	ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, now);
374	if (unlikely(ret != lcycle))
375		return ret;
376
377	return now;
378}
379
380
381static struct irqaction timer_irqaction = {
382	.handler =	timer_interrupt,
383	.flags =	IRQF_IRQPOLL,
384	.name =		"timer"
385};
386
387void read_persistent_clock(struct timespec *ts)
388{
389	efi_gettimeofday(ts);
390}
391
392void __init
393time_init (void)
394{
395	register_percpu_irq(IA64_TIMER_VECTOR, &timer_irqaction);
396	ia64_init_itm();
397}
398
399/*
400 * Generic udelay assumes that if preemption is allowed and the thread
401 * migrates to another CPU, that the ITC values are synchronized across
402 * all CPUs.
403 */
404static void
405ia64_itc_udelay (unsigned long usecs)
406{
407	unsigned long start = ia64_get_itc();
408	unsigned long end = start + usecs*local_cpu_data->cyc_per_usec;
409
410	while (time_before(ia64_get_itc(), end))
411		cpu_relax();
412}
413
414void (*ia64_udelay)(unsigned long usecs) = &ia64_itc_udelay;
415
416void
417udelay (unsigned long usecs)
418{
419	(*ia64_udelay)(usecs);
420}
421EXPORT_SYMBOL(udelay);
422
423/* IA64 doesn't cache the timezone */
424void update_vsyscall_tz(void)
425{
426}
427
428void update_vsyscall_old(struct timespec *wall, struct timespec *wtm,
429			 struct clocksource *c, u32 mult, cycle_t cycle_last)
430{
431	write_seqcount_begin(&fsyscall_gtod_data.seq);
432
433        /* copy fsyscall clock data */
434        fsyscall_gtod_data.clk_mask = c->mask;
435        fsyscall_gtod_data.clk_mult = mult;
436        fsyscall_gtod_data.clk_shift = c->shift;
437        fsyscall_gtod_data.clk_fsys_mmio = c->archdata.fsys_mmio;
438        fsyscall_gtod_data.clk_cycle_last = cycle_last;
439
440	/* copy kernel time structures */
441        fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec;
442        fsyscall_gtod_data.wall_time.tv_nsec = wall->tv_nsec;
443	fsyscall_gtod_data.monotonic_time.tv_sec = wtm->tv_sec
444							+ wall->tv_sec;
445	fsyscall_gtod_data.monotonic_time.tv_nsec = wtm->tv_nsec
446							+ wall->tv_nsec;
447
448	/* normalize */
449	while (fsyscall_gtod_data.monotonic_time.tv_nsec >= NSEC_PER_SEC) {
450		fsyscall_gtod_data.monotonic_time.tv_nsec -= NSEC_PER_SEC;
451		fsyscall_gtod_data.monotonic_time.tv_sec++;
452	}
453
454	write_seqcount_end(&fsyscall_gtod_data.seq);
455}
456
457