1#include <linux/init.h>
2#include <linux/clocksource.h>
3#include <linux/clockchips.h>
4#include <linux/interrupt.h>
5#include <linux/irq.h>
6
7#include <linux/clk.h>
8#include <linux/err.h>
9#include <linux/ioport.h>
10#include <linux/io.h>
11#include <linux/platform_device.h>
12#include <linux/atmel_tc.h>
13
14
15/*
16 * We're configured to use a specific TC block, one that's not hooked
17 * up to external hardware, to provide a time solution:
18 *
19 *   - Two channels combine to create a free-running 32 bit counter
20 *     with a base rate of 5+ MHz, packaged as a clocksource (with
21 *     resolution better than 200 nsec).
22 *   - Some chips support 32 bit counter. A single channel is used for
23 *     this 32 bit free-running counter. the second channel is not used.
24 *
25 *   - The third channel may be used to provide a 16-bit clockevent
26 *     source, used in either periodic or oneshot mode.  This runs
27 *     at 32 KiHZ, and can handle delays of up to two seconds.
28 *
29 * A boot clocksource and clockevent source are also currently needed,
30 * unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so
31 * this code can be used when init_timers() is called, well before most
32 * devices are set up.  (Some low end AT91 parts, which can run uClinux,
33 * have only the timers in one TC block... they currently don't support
34 * the tclib code, because of that initialization issue.)
35 *
36 * REVISIT behavior during system suspend states... we should disable
37 * all clocks and save the power.  Easily done for clockevent devices,
38 * but clocksources won't necessarily get the needed notifications.
39 * For deeper system sleep states, this will be mandatory...
40 */
41
42static void __iomem *tcaddr;
43
44static cycle_t tc_get_cycles(struct clocksource *cs)
45{
46	unsigned long	flags;
47	u32		lower, upper;
48
49	raw_local_irq_save(flags);
50	do {
51		upper = __raw_readl(tcaddr + ATMEL_TC_REG(1, CV));
52		lower = __raw_readl(tcaddr + ATMEL_TC_REG(0, CV));
53	} while (upper != __raw_readl(tcaddr + ATMEL_TC_REG(1, CV)));
54
55	raw_local_irq_restore(flags);
56	return (upper << 16) | lower;
57}
58
59static cycle_t tc_get_cycles32(struct clocksource *cs)
60{
61	return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV));
62}
63
64static struct clocksource clksrc = {
65	.name           = "tcb_clksrc",
66	.rating         = 200,
67	.read           = tc_get_cycles,
68	.mask           = CLOCKSOURCE_MASK(32),
69	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
70};
71
72#ifdef CONFIG_GENERIC_CLOCKEVENTS
73
74struct tc_clkevt_device {
75	struct clock_event_device	clkevt;
76	struct clk			*clk;
77	void __iomem			*regs;
78};
79
80static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt)
81{
82	return container_of(clkevt, struct tc_clkevt_device, clkevt);
83}
84
85/* For now, we always use the 32K clock ... this optimizes for NO_HZ,
86 * because using one of the divided clocks would usually mean the
87 * tick rate can never be less than several dozen Hz (vs 0.5 Hz).
88 *
89 * A divided clock could be good for high resolution timers, since
90 * 30.5 usec resolution can seem "low".
91 */
92static u32 timer_clock;
93
94static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
95{
96	struct tc_clkevt_device *tcd = to_tc_clkevt(d);
97	void __iomem		*regs = tcd->regs;
98
99	if (tcd->clkevt.mode == CLOCK_EVT_MODE_PERIODIC
100			|| tcd->clkevt.mode == CLOCK_EVT_MODE_ONESHOT) {
101		__raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
102		__raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
103		clk_disable(tcd->clk);
104	}
105
106	switch (m) {
107
108	/* By not making the gentime core emulate periodic mode on top
109	 * of oneshot, we get lower overhead and improved accuracy.
110	 */
111	case CLOCK_EVT_MODE_PERIODIC:
112		clk_enable(tcd->clk);
113
114		/* slow clock, count up to RC, then irq and restart */
115		__raw_writel(timer_clock
116				| ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
117				regs + ATMEL_TC_REG(2, CMR));
118		__raw_writel((32768 + HZ/2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
119
120		/* Enable clock and interrupts on RC compare */
121		__raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
122
123		/* go go gadget! */
124		__raw_writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG,
125				regs + ATMEL_TC_REG(2, CCR));
126		break;
127
128	case CLOCK_EVT_MODE_ONESHOT:
129		clk_enable(tcd->clk);
130
131		/* slow clock, count up to RC, then irq and stop */
132		__raw_writel(timer_clock | ATMEL_TC_CPCSTOP
133				| ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
134				regs + ATMEL_TC_REG(2, CMR));
135		__raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
136
137		/* set_next_event() configures and starts the timer */
138		break;
139
140	default:
141		break;
142	}
143}
144
145static int tc_next_event(unsigned long delta, struct clock_event_device *d)
146{
147	__raw_writel(delta, tcaddr + ATMEL_TC_REG(2, RC));
148
149	/* go go gadget! */
150	__raw_writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG,
151			tcaddr + ATMEL_TC_REG(2, CCR));
152	return 0;
153}
154
155static struct tc_clkevt_device clkevt = {
156	.clkevt	= {
157		.name		= "tc_clkevt",
158		.features	= CLOCK_EVT_FEAT_PERIODIC
159					| CLOCK_EVT_FEAT_ONESHOT,
160		/* Should be lower than at91rm9200's system timer */
161		.rating		= 125,
162		.set_next_event	= tc_next_event,
163		.set_mode	= tc_mode,
164	},
165};
166
167static irqreturn_t ch2_irq(int irq, void *handle)
168{
169	struct tc_clkevt_device	*dev = handle;
170	unsigned int		sr;
171
172	sr = __raw_readl(dev->regs + ATMEL_TC_REG(2, SR));
173	if (sr & ATMEL_TC_CPCS) {
174		dev->clkevt.event_handler(&dev->clkevt);
175		return IRQ_HANDLED;
176	}
177
178	return IRQ_NONE;
179}
180
181static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
182{
183	int ret;
184	struct clk *t2_clk = tc->clk[2];
185	int irq = tc->irq[2];
186
187	/* try to enable t2 clk to avoid future errors in mode change */
188	ret = clk_prepare_enable(t2_clk);
189	if (ret)
190		return ret;
191	clk_disable(t2_clk);
192
193	clkevt.regs = tc->regs;
194	clkevt.clk = t2_clk;
195
196	timer_clock = clk32k_divisor_idx;
197
198	clkevt.clkevt.cpumask = cpumask_of(0);
199
200	ret = request_irq(irq, ch2_irq, IRQF_TIMER, "tc_clkevt", &clkevt);
201	if (ret) {
202		clk_disable_unprepare(t2_clk);
203		return ret;
204	}
205
206	clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff);
207
208	return ret;
209}
210
211#else /* !CONFIG_GENERIC_CLOCKEVENTS */
212
213static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
214{
215	/* NOTHING */
216	return 0;
217}
218
219#endif
220
221static void __init tcb_setup_dual_chan(struct atmel_tc *tc, int mck_divisor_idx)
222{
223	/* channel 0:  waveform mode, input mclk/8, clock TIOA0 on overflow */
224	__raw_writel(mck_divisor_idx			/* likely divide-by-8 */
225			| ATMEL_TC_WAVE
226			| ATMEL_TC_WAVESEL_UP		/* free-run */
227			| ATMEL_TC_ACPA_SET		/* TIOA0 rises at 0 */
228			| ATMEL_TC_ACPC_CLEAR,		/* (duty cycle 50%) */
229			tcaddr + ATMEL_TC_REG(0, CMR));
230	__raw_writel(0x0000, tcaddr + ATMEL_TC_REG(0, RA));
231	__raw_writel(0x8000, tcaddr + ATMEL_TC_REG(0, RC));
232	__raw_writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR));	/* no irqs */
233	__raw_writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR));
234
235	/* channel 1:  waveform mode, input TIOA0 */
236	__raw_writel(ATMEL_TC_XC1			/* input: TIOA0 */
237			| ATMEL_TC_WAVE
238			| ATMEL_TC_WAVESEL_UP,		/* free-run */
239			tcaddr + ATMEL_TC_REG(1, CMR));
240	__raw_writel(0xff, tcaddr + ATMEL_TC_REG(1, IDR));	/* no irqs */
241	__raw_writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(1, CCR));
242
243	/* chain channel 0 to channel 1*/
244	__raw_writel(ATMEL_TC_TC1XC1S_TIOA0, tcaddr + ATMEL_TC_BMR);
245	/* then reset all the timers */
246	__raw_writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
247}
248
249static void __init tcb_setup_single_chan(struct atmel_tc *tc, int mck_divisor_idx)
250{
251	/* channel 0:  waveform mode, input mclk/8 */
252	__raw_writel(mck_divisor_idx			/* likely divide-by-8 */
253			| ATMEL_TC_WAVE
254			| ATMEL_TC_WAVESEL_UP,		/* free-run */
255			tcaddr + ATMEL_TC_REG(0, CMR));
256	__raw_writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR));	/* no irqs */
257	__raw_writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR));
258
259	/* then reset all the timers */
260	__raw_writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
261}
262
263static int __init tcb_clksrc_init(void)
264{
265	static char bootinfo[] __initdata
266		= KERN_DEBUG "%s: tc%d at %d.%03d MHz\n";
267
268	struct platform_device *pdev;
269	struct atmel_tc *tc;
270	struct clk *t0_clk;
271	u32 rate, divided_rate = 0;
272	int best_divisor_idx = -1;
273	int clk32k_divisor_idx = -1;
274	int i;
275	int ret;
276
277	tc = atmel_tc_alloc(CONFIG_ATMEL_TCB_CLKSRC_BLOCK);
278	if (!tc) {
279		pr_debug("can't alloc TC for clocksource\n");
280		return -ENODEV;
281	}
282	tcaddr = tc->regs;
283	pdev = tc->pdev;
284
285	t0_clk = tc->clk[0];
286	ret = clk_prepare_enable(t0_clk);
287	if (ret) {
288		pr_debug("can't enable T0 clk\n");
289		goto err_free_tc;
290	}
291
292	/* How fast will we be counting?  Pick something over 5 MHz.  */
293	rate = (u32) clk_get_rate(t0_clk);
294	for (i = 0; i < 5; i++) {
295		unsigned divisor = atmel_tc_divisors[i];
296		unsigned tmp;
297
298		/* remember 32 KiHz clock for later */
299		if (!divisor) {
300			clk32k_divisor_idx = i;
301			continue;
302		}
303
304		tmp = rate / divisor;
305		pr_debug("TC: %u / %-3u [%d] --> %u\n", rate, divisor, i, tmp);
306		if (best_divisor_idx > 0) {
307			if (tmp < 5 * 1000 * 1000)
308				continue;
309		}
310		divided_rate = tmp;
311		best_divisor_idx = i;
312	}
313
314
315	printk(bootinfo, clksrc.name, CONFIG_ATMEL_TCB_CLKSRC_BLOCK,
316			divided_rate / 1000000,
317			((divided_rate + 500000) % 1000000) / 1000);
318
319	if (tc->tcb_config && tc->tcb_config->counter_width == 32) {
320		/* use apropriate function to read 32 bit counter */
321		clksrc.read = tc_get_cycles32;
322		/* setup ony channel 0 */
323		tcb_setup_single_chan(tc, best_divisor_idx);
324	} else {
325		/* tclib will give us three clocks no matter what the
326		 * underlying platform supports.
327		 */
328		ret = clk_prepare_enable(tc->clk[1]);
329		if (ret) {
330			pr_debug("can't enable T1 clk\n");
331			goto err_disable_t0;
332		}
333		/* setup both channel 0 & 1 */
334		tcb_setup_dual_chan(tc, best_divisor_idx);
335	}
336
337	/* and away we go! */
338	ret = clocksource_register_hz(&clksrc, divided_rate);
339	if (ret)
340		goto err_disable_t1;
341
342	/* channel 2:  periodic and oneshot timer support */
343	ret = setup_clkevents(tc, clk32k_divisor_idx);
344	if (ret)
345		goto err_unregister_clksrc;
346
347	return 0;
348
349err_unregister_clksrc:
350	clocksource_unregister(&clksrc);
351
352err_disable_t1:
353	if (!tc->tcb_config || tc->tcb_config->counter_width != 32)
354		clk_disable_unprepare(tc->clk[1]);
355
356err_disable_t0:
357	clk_disable_unprepare(t0_clk);
358
359err_free_tc:
360	atmel_tc_free(tc);
361	return ret;
362}
363arch_initcall(tcb_clksrc_init);
364