1/*
2 * Based on arm clockevents implementation and old bfin time tick.
3 *
4 * Copyright 2008-2009 Analog Devics Inc.
5 *                2008 GeoTechnologies
6 *                     Vitja Makarov
7 *
8 * Licensed under the GPL-2
9 */
10
11#include <linux/module.h>
12#include <linux/profile.h>
13#include <linux/interrupt.h>
14#include <linux/time.h>
15#include <linux/timex.h>
16#include <linux/irq.h>
17#include <linux/clocksource.h>
18#include <linux/clockchips.h>
19#include <linux/cpufreq.h>
20
21#include <asm/blackfin.h>
22#include <asm/time.h>
23#include <asm/gptimers.h>
24#include <asm/nmi.h>
25
26
27#if defined(CONFIG_CYCLES_CLOCKSOURCE)
28
29static notrace cycle_t bfin_read_cycles(struct clocksource *cs)
30{
31#ifdef CONFIG_CPU_FREQ
32	return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod);
33#else
34	return get_cycles();
35#endif
36}
37
38static struct clocksource bfin_cs_cycles = {
39	.name		= "bfin_cs_cycles",
40	.rating		= 400,
41	.read		= bfin_read_cycles,
42	.mask		= CLOCKSOURCE_MASK(64),
43	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
44};
45
46static inline unsigned long long bfin_cs_cycles_sched_clock(void)
47{
48	return clocksource_cyc2ns(bfin_read_cycles(&bfin_cs_cycles),
49		bfin_cs_cycles.mult, bfin_cs_cycles.shift);
50}
51
52static int __init bfin_cs_cycles_init(void)
53{
54	if (clocksource_register_hz(&bfin_cs_cycles, get_cclk()))
55		panic("failed to register clocksource");
56
57	return 0;
58}
59#else
60# define bfin_cs_cycles_init()
61#endif
62
63#ifdef CONFIG_GPTMR0_CLOCKSOURCE
64
65void __init setup_gptimer0(void)
66{
67	disable_gptimers(TIMER0bit);
68
69#ifdef CONFIG_BF60x
70	bfin_write16(TIMER_DATA_IMSK, 0);
71	set_gptimer_config(TIMER0_id,  TIMER_OUT_DIS
72		| TIMER_MODE_PWM_CONT | TIMER_PULSE_HI | TIMER_IRQ_PER);
73#else
74	set_gptimer_config(TIMER0_id, \
75		TIMER_OUT_DIS | TIMER_PERIOD_CNT | TIMER_MODE_PWM);
76#endif
77	set_gptimer_period(TIMER0_id, -1);
78	set_gptimer_pwidth(TIMER0_id, -2);
79	SSYNC();
80	enable_gptimers(TIMER0bit);
81}
82
83static cycle_t bfin_read_gptimer0(struct clocksource *cs)
84{
85	return bfin_read_TIMER0_COUNTER();
86}
87
88static struct clocksource bfin_cs_gptimer0 = {
89	.name		= "bfin_cs_gptimer0",
90	.rating		= 350,
91	.read		= bfin_read_gptimer0,
92	.mask		= CLOCKSOURCE_MASK(32),
93	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
94};
95
96static inline unsigned long long bfin_cs_gptimer0_sched_clock(void)
97{
98	return clocksource_cyc2ns(bfin_read_TIMER0_COUNTER(),
99		bfin_cs_gptimer0.mult, bfin_cs_gptimer0.shift);
100}
101
102static int __init bfin_cs_gptimer0_init(void)
103{
104	setup_gptimer0();
105
106	if (clocksource_register_hz(&bfin_cs_gptimer0, get_sclk()))
107		panic("failed to register clocksource");
108
109	return 0;
110}
111#else
112# define bfin_cs_gptimer0_init()
113#endif
114
115#if defined(CONFIG_GPTMR0_CLOCKSOURCE) || defined(CONFIG_CYCLES_CLOCKSOURCE)
116/* prefer to use cycles since it has higher rating */
117notrace unsigned long long sched_clock(void)
118{
119#if defined(CONFIG_CYCLES_CLOCKSOURCE)
120	return bfin_cs_cycles_sched_clock();
121#else
122	return bfin_cs_gptimer0_sched_clock();
123#endif
124}
125#endif
126
127#if defined(CONFIG_TICKSOURCE_GPTMR0)
128static int bfin_gptmr0_set_next_event(unsigned long cycles,
129                                     struct clock_event_device *evt)
130{
131	disable_gptimers(TIMER0bit);
132
133	/* it starts counting three SCLK cycles after the TIMENx bit is set */
134	set_gptimer_pwidth(TIMER0_id, cycles - 3);
135	enable_gptimers(TIMER0bit);
136	return 0;
137}
138
139static void bfin_gptmr0_set_mode(enum clock_event_mode mode,
140				struct clock_event_device *evt)
141{
142	switch (mode) {
143	case CLOCK_EVT_MODE_PERIODIC: {
144#ifndef CONFIG_BF60x
145		set_gptimer_config(TIMER0_id, \
146			TIMER_OUT_DIS | TIMER_IRQ_ENA | \
147			TIMER_PERIOD_CNT | TIMER_MODE_PWM);
148#else
149		set_gptimer_config(TIMER0_id,  TIMER_OUT_DIS
150			| TIMER_MODE_PWM_CONT | TIMER_PULSE_HI | TIMER_IRQ_PER);
151#endif
152
153		set_gptimer_period(TIMER0_id, get_sclk() / HZ);
154		set_gptimer_pwidth(TIMER0_id, get_sclk() / HZ - 1);
155		enable_gptimers(TIMER0bit);
156		break;
157	}
158	case CLOCK_EVT_MODE_ONESHOT:
159		disable_gptimers(TIMER0bit);
160#ifndef CONFIG_BF60x
161		set_gptimer_config(TIMER0_id, \
162			TIMER_OUT_DIS | TIMER_IRQ_ENA | TIMER_MODE_PWM);
163#else
164		set_gptimer_config(TIMER0_id, TIMER_OUT_DIS | TIMER_MODE_PWM
165			| TIMER_PULSE_HI | TIMER_IRQ_WID_DLY);
166#endif
167
168		set_gptimer_period(TIMER0_id, 0);
169		break;
170	case CLOCK_EVT_MODE_UNUSED:
171	case CLOCK_EVT_MODE_SHUTDOWN:
172		disable_gptimers(TIMER0bit);
173		break;
174	case CLOCK_EVT_MODE_RESUME:
175		break;
176	}
177}
178
179static void bfin_gptmr0_ack(void)
180{
181	clear_gptimer_intr(TIMER0_id);
182}
183
184static void __init bfin_gptmr0_init(void)
185{
186	disable_gptimers(TIMER0bit);
187}
188
189#ifdef CONFIG_CORE_TIMER_IRQ_L1
190__attribute__((l1_text))
191#endif
192irqreturn_t bfin_gptmr0_interrupt(int irq, void *dev_id)
193{
194	struct clock_event_device *evt = dev_id;
195	smp_mb();
196	/*
197	 * We want to ACK before we handle so that we can handle smaller timer
198	 * intervals.  This way if the timer expires again while we're handling
199	 * things, we're more likely to see that 2nd int rather than swallowing
200	 * it by ACKing the int at the end of this handler.
201	 */
202	bfin_gptmr0_ack();
203	evt->event_handler(evt);
204	return IRQ_HANDLED;
205}
206
207static struct irqaction gptmr0_irq = {
208	.name		= "Blackfin GPTimer0",
209	.flags		= IRQF_TIMER | IRQF_IRQPOLL | IRQF_PERCPU,
210	.handler	= bfin_gptmr0_interrupt,
211};
212
213static struct clock_event_device clockevent_gptmr0 = {
214	.name		= "bfin_gptimer0",
215	.rating		= 300,
216	.irq		= IRQ_TIMER0,
217	.shift		= 32,
218	.features 	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
219	.set_next_event = bfin_gptmr0_set_next_event,
220	.set_mode	= bfin_gptmr0_set_mode,
221};
222
223static void __init bfin_gptmr0_clockevent_init(struct clock_event_device *evt)
224{
225	unsigned long clock_tick;
226
227	clock_tick = get_sclk();
228	evt->mult = div_sc(clock_tick, NSEC_PER_SEC, evt->shift);
229	evt->max_delta_ns = clockevent_delta2ns(-1, evt);
230	evt->min_delta_ns = clockevent_delta2ns(100, evt);
231
232	evt->cpumask = cpumask_of(0);
233
234	clockevents_register_device(evt);
235}
236#endif /* CONFIG_TICKSOURCE_GPTMR0 */
237
238#if defined(CONFIG_TICKSOURCE_CORETMR)
239/* per-cpu local core timer */
240DEFINE_PER_CPU(struct clock_event_device, coretmr_events);
241
242static int bfin_coretmr_set_next_event(unsigned long cycles,
243				struct clock_event_device *evt)
244{
245	bfin_write_TCNTL(TMPWR);
246	CSYNC();
247	bfin_write_TCOUNT(cycles);
248	CSYNC();
249	bfin_write_TCNTL(TMPWR | TMREN);
250	return 0;
251}
252
253static void bfin_coretmr_set_mode(enum clock_event_mode mode,
254				struct clock_event_device *evt)
255{
256	switch (mode) {
257	case CLOCK_EVT_MODE_PERIODIC: {
258		unsigned long tcount = ((get_cclk() / (HZ * TIME_SCALE)) - 1);
259		bfin_write_TCNTL(TMPWR);
260		CSYNC();
261		bfin_write_TSCALE(TIME_SCALE - 1);
262		bfin_write_TPERIOD(tcount);
263		bfin_write_TCOUNT(tcount);
264		CSYNC();
265		bfin_write_TCNTL(TMPWR | TMREN | TAUTORLD);
266		break;
267	}
268	case CLOCK_EVT_MODE_ONESHOT:
269		bfin_write_TCNTL(TMPWR);
270		CSYNC();
271		bfin_write_TSCALE(TIME_SCALE - 1);
272		bfin_write_TPERIOD(0);
273		bfin_write_TCOUNT(0);
274		break;
275	case CLOCK_EVT_MODE_UNUSED:
276	case CLOCK_EVT_MODE_SHUTDOWN:
277		bfin_write_TCNTL(0);
278		CSYNC();
279		break;
280	case CLOCK_EVT_MODE_RESUME:
281		break;
282	}
283}
284
285void bfin_coretmr_init(void)
286{
287	/* power up the timer, but don't enable it just yet */
288	bfin_write_TCNTL(TMPWR);
289	CSYNC();
290
291	/* the TSCALE prescaler counter. */
292	bfin_write_TSCALE(TIME_SCALE - 1);
293	bfin_write_TPERIOD(0);
294	bfin_write_TCOUNT(0);
295
296	CSYNC();
297}
298
299#ifdef CONFIG_CORE_TIMER_IRQ_L1
300__attribute__((l1_text))
301#endif
302
303irqreturn_t bfin_coretmr_interrupt(int irq, void *dev_id)
304{
305	int cpu = smp_processor_id();
306	struct clock_event_device *evt = &per_cpu(coretmr_events, cpu);
307
308	smp_mb();
309	evt->event_handler(evt);
310
311	touch_nmi_watchdog();
312
313	return IRQ_HANDLED;
314}
315
316static struct irqaction coretmr_irq = {
317	.name		= "Blackfin CoreTimer",
318	.flags		= IRQF_TIMER | IRQF_IRQPOLL | IRQF_PERCPU,
319	.handler	= bfin_coretmr_interrupt,
320};
321
322void bfin_coretmr_clockevent_init(void)
323{
324	unsigned long clock_tick;
325	unsigned int cpu = smp_processor_id();
326	struct clock_event_device *evt = &per_cpu(coretmr_events, cpu);
327
328#ifdef CONFIG_SMP
329	evt->broadcast = smp_timer_broadcast;
330#endif
331
332	evt->name = "bfin_core_timer";
333	evt->rating = 350;
334	evt->irq = -1;
335	evt->shift = 32;
336	evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
337	evt->set_next_event = bfin_coretmr_set_next_event;
338	evt->set_mode = bfin_coretmr_set_mode;
339
340	clock_tick = get_cclk() / TIME_SCALE;
341	evt->mult = div_sc(clock_tick, NSEC_PER_SEC, evt->shift);
342	evt->max_delta_ns = clockevent_delta2ns(-1, evt);
343	evt->min_delta_ns = clockevent_delta2ns(100, evt);
344
345	evt->cpumask = cpumask_of(cpu);
346
347	clockevents_register_device(evt);
348}
349#endif /* CONFIG_TICKSOURCE_CORETMR */
350
351
352void read_persistent_clock(struct timespec *ts)
353{
354	time_t secs_since_1970 = (365 * 37 + 9) * 24 * 60 * 60;	/* 1 Jan 2007 */
355	ts->tv_sec = secs_since_1970;
356	ts->tv_nsec = 0;
357}
358
359void __init time_init(void)
360{
361
362#ifdef CONFIG_RTC_DRV_BFIN
363	/* [#2663] hack to filter junk RTC values that would cause
364	 * userspace to have to deal with time values greater than
365	 * 2^31 seconds (which uClibc cannot cope with yet)
366	 */
367	if ((bfin_read_RTC_STAT() & 0xC0000000) == 0xC0000000) {
368		printk(KERN_NOTICE "bfin-rtc: invalid date; resetting\n");
369		bfin_write_RTC_STAT(0);
370	}
371#endif
372
373	bfin_cs_cycles_init();
374	bfin_cs_gptimer0_init();
375
376#if defined(CONFIG_TICKSOURCE_CORETMR)
377	bfin_coretmr_init();
378	setup_irq(IRQ_CORETMR, &coretmr_irq);
379	bfin_coretmr_clockevent_init();
380#endif
381
382#if defined(CONFIG_TICKSOURCE_GPTMR0)
383	bfin_gptmr0_init();
384	setup_irq(IRQ_TIMER0, &gptmr0_irq);
385	gptmr0_irq.dev_id = &clockevent_gptmr0;
386	bfin_gptmr0_clockevent_init(&clockevent_gptmr0);
387#endif
388
389#if !defined(CONFIG_TICKSOURCE_CORETMR) && !defined(CONFIG_TICKSOURCE_GPTMR0)
390# error at least one clock event device is required
391#endif
392}
393