1/*
2 * OMAP WakeupGen Source file
3 *
4 * OMAP WakeupGen is the interrupt controller extension used along
5 * with ARM GIC to wake the CPU out from low power states on
6 * external interrupts. It is responsible for generating wakeup
7 * event from the incoming interrupts and enable bits. It is
8 * implemented in MPU always ON power domain. During normal operation,
9 * WakeupGen delivers external interrupts directly to the GIC.
10 *
11 * Copyright (C) 2011 Texas Instruments, Inc.
12 *	Santosh Shilimkar <santosh.shilimkar@ti.com>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License version 2 as
16 * published by the Free Software Foundation.
17 */
18
19#include <linux/kernel.h>
20#include <linux/init.h>
21#include <linux/io.h>
22#include <linux/irq.h>
23#include <linux/irqdomain.h>
24#include <linux/of_address.h>
25#include <linux/platform_device.h>
26#include <linux/cpu.h>
27#include <linux/notifier.h>
28#include <linux/cpu_pm.h>
29
30#include "omap-wakeupgen.h"
31#include "omap-secure.h"
32
33#include "soc.h"
34#include "omap4-sar-layout.h"
35#include "common.h"
36#include "pm.h"
37
38#define AM43XX_NR_REG_BANKS	7
39#define AM43XX_IRQS		224
40#define MAX_NR_REG_BANKS	AM43XX_NR_REG_BANKS
41#define MAX_IRQS		AM43XX_IRQS
42#define DEFAULT_NR_REG_BANKS	5
43#define DEFAULT_IRQS		160
44#define WKG_MASK_ALL		0x00000000
45#define WKG_UNMASK_ALL		0xffffffff
46#define CPU_ENA_OFFSET		0x400
47#define CPU0_ID			0x0
48#define CPU1_ID			0x1
49#define OMAP4_NR_BANKS		4
50#define OMAP4_NR_IRQS		128
51
52static void __iomem *wakeupgen_base;
53static void __iomem *sar_base;
54static DEFINE_RAW_SPINLOCK(wakeupgen_lock);
55static unsigned int irq_target_cpu[MAX_IRQS];
56static unsigned int irq_banks = DEFAULT_NR_REG_BANKS;
57static unsigned int max_irqs = DEFAULT_IRQS;
58static unsigned int omap_secure_apis;
59
60/*
61 * Static helper functions.
62 */
63static inline u32 wakeupgen_readl(u8 idx, u32 cpu)
64{
65	return readl_relaxed(wakeupgen_base + OMAP_WKG_ENB_A_0 +
66				(cpu * CPU_ENA_OFFSET) + (idx * 4));
67}
68
69static inline void wakeupgen_writel(u32 val, u8 idx, u32 cpu)
70{
71	writel_relaxed(val, wakeupgen_base + OMAP_WKG_ENB_A_0 +
72				(cpu * CPU_ENA_OFFSET) + (idx * 4));
73}
74
75static inline void sar_writel(u32 val, u32 offset, u8 idx)
76{
77	writel_relaxed(val, sar_base + offset + (idx * 4));
78}
79
80static inline int _wakeupgen_get_irq_info(u32 irq, u32 *bit_posn, u8 *reg_index)
81{
82	/*
83	 * Each WakeupGen register controls 32 interrupt.
84	 * i.e. 1 bit per SPI IRQ
85	 */
86	*reg_index = irq >> 5;
87	*bit_posn = irq %= 32;
88
89	return 0;
90}
91
92static void _wakeupgen_clear(unsigned int irq, unsigned int cpu)
93{
94	u32 val, bit_number;
95	u8 i;
96
97	if (_wakeupgen_get_irq_info(irq, &bit_number, &i))
98		return;
99
100	val = wakeupgen_readl(i, cpu);
101	val &= ~BIT(bit_number);
102	wakeupgen_writel(val, i, cpu);
103}
104
105static void _wakeupgen_set(unsigned int irq, unsigned int cpu)
106{
107	u32 val, bit_number;
108	u8 i;
109
110	if (_wakeupgen_get_irq_info(irq, &bit_number, &i))
111		return;
112
113	val = wakeupgen_readl(i, cpu);
114	val |= BIT(bit_number);
115	wakeupgen_writel(val, i, cpu);
116}
117
118/*
119 * Architecture specific Mask extension
120 */
121static void wakeupgen_mask(struct irq_data *d)
122{
123	unsigned long flags;
124
125	raw_spin_lock_irqsave(&wakeupgen_lock, flags);
126	_wakeupgen_clear(d->hwirq, irq_target_cpu[d->hwirq]);
127	raw_spin_unlock_irqrestore(&wakeupgen_lock, flags);
128	irq_chip_mask_parent(d);
129}
130
131/*
132 * Architecture specific Unmask extension
133 */
134static void wakeupgen_unmask(struct irq_data *d)
135{
136	unsigned long flags;
137
138	raw_spin_lock_irqsave(&wakeupgen_lock, flags);
139	_wakeupgen_set(d->hwirq, irq_target_cpu[d->hwirq]);
140	raw_spin_unlock_irqrestore(&wakeupgen_lock, flags);
141	irq_chip_unmask_parent(d);
142}
143
144#ifdef CONFIG_HOTPLUG_CPU
145static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks);
146
147static void _wakeupgen_save_masks(unsigned int cpu)
148{
149	u8 i;
150
151	for (i = 0; i < irq_banks; i++)
152		per_cpu(irqmasks, cpu)[i] = wakeupgen_readl(i, cpu);
153}
154
155static void _wakeupgen_restore_masks(unsigned int cpu)
156{
157	u8 i;
158
159	for (i = 0; i < irq_banks; i++)
160		wakeupgen_writel(per_cpu(irqmasks, cpu)[i], i, cpu);
161}
162
163static void _wakeupgen_set_all(unsigned int cpu, unsigned int reg)
164{
165	u8 i;
166
167	for (i = 0; i < irq_banks; i++)
168		wakeupgen_writel(reg, i, cpu);
169}
170
171/*
172 * Mask or unmask all interrupts on given CPU.
173 *	0 = Mask all interrupts on the 'cpu'
174 *	1 = Unmask all interrupts on the 'cpu'
175 * Ensure that the initial mask is maintained. This is faster than
176 * iterating through GIC registers to arrive at the correct masks.
177 */
178static void wakeupgen_irqmask_all(unsigned int cpu, unsigned int set)
179{
180	unsigned long flags;
181
182	raw_spin_lock_irqsave(&wakeupgen_lock, flags);
183	if (set) {
184		_wakeupgen_save_masks(cpu);
185		_wakeupgen_set_all(cpu, WKG_MASK_ALL);
186	} else {
187		_wakeupgen_set_all(cpu, WKG_UNMASK_ALL);
188		_wakeupgen_restore_masks(cpu);
189	}
190	raw_spin_unlock_irqrestore(&wakeupgen_lock, flags);
191}
192#endif
193
194#ifdef CONFIG_CPU_PM
195static inline void omap4_irq_save_context(void)
196{
197	u32 i, val;
198
199	if (omap_rev() == OMAP4430_REV_ES1_0)
200		return;
201
202	for (i = 0; i < irq_banks; i++) {
203		/* Save the CPUx interrupt mask for IRQ 0 to 127 */
204		val = wakeupgen_readl(i, 0);
205		sar_writel(val, WAKEUPGENENB_OFFSET_CPU0, i);
206		val = wakeupgen_readl(i, 1);
207		sar_writel(val, WAKEUPGENENB_OFFSET_CPU1, i);
208
209		/*
210		 * Disable the secure interrupts for CPUx. The restore
211		 * code blindly restores secure and non-secure interrupt
212		 * masks from SAR RAM. Secure interrupts are not suppose
213		 * to be enabled from HLOS. So overwrite the SAR location
214		 * so that the secure interrupt remains disabled.
215		 */
216		sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU0, i);
217		sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU1, i);
218	}
219
220	/* Save AuxBoot* registers */
221	val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
222	writel_relaxed(val, sar_base + AUXCOREBOOT0_OFFSET);
223	val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_1);
224	writel_relaxed(val, sar_base + AUXCOREBOOT1_OFFSET);
225
226	/* Save SyncReq generation logic */
227	val = readl_relaxed(wakeupgen_base + OMAP_PTMSYNCREQ_MASK);
228	writel_relaxed(val, sar_base + PTMSYNCREQ_MASK_OFFSET);
229	val = readl_relaxed(wakeupgen_base + OMAP_PTMSYNCREQ_EN);
230	writel_relaxed(val, sar_base + PTMSYNCREQ_EN_OFFSET);
231
232	/* Set the Backup Bit Mask status */
233	val = readl_relaxed(sar_base + SAR_BACKUP_STATUS_OFFSET);
234	val |= SAR_BACKUP_STATUS_WAKEUPGEN;
235	writel_relaxed(val, sar_base + SAR_BACKUP_STATUS_OFFSET);
236
237}
238
239static inline void omap5_irq_save_context(void)
240{
241	u32 i, val;
242
243	for (i = 0; i < irq_banks; i++) {
244		/* Save the CPUx interrupt mask for IRQ 0 to 159 */
245		val = wakeupgen_readl(i, 0);
246		sar_writel(val, OMAP5_WAKEUPGENENB_OFFSET_CPU0, i);
247		val = wakeupgen_readl(i, 1);
248		sar_writel(val, OMAP5_WAKEUPGENENB_OFFSET_CPU1, i);
249		sar_writel(0x0, OMAP5_WAKEUPGENENB_SECURE_OFFSET_CPU0, i);
250		sar_writel(0x0, OMAP5_WAKEUPGENENB_SECURE_OFFSET_CPU1, i);
251	}
252
253	/* Save AuxBoot* registers */
254	val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
255	writel_relaxed(val, sar_base + OMAP5_AUXCOREBOOT0_OFFSET);
256	val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
257	writel_relaxed(val, sar_base + OMAP5_AUXCOREBOOT1_OFFSET);
258
259	/* Set the Backup Bit Mask status */
260	val = readl_relaxed(sar_base + OMAP5_SAR_BACKUP_STATUS_OFFSET);
261	val |= SAR_BACKUP_STATUS_WAKEUPGEN;
262	writel_relaxed(val, sar_base + OMAP5_SAR_BACKUP_STATUS_OFFSET);
263
264}
265
266/*
267 * Save WakeupGen interrupt context in SAR BANK3. Restore is done by
268 * ROM code. WakeupGen IP is integrated along with GIC to manage the
269 * interrupt wakeups from CPU low power states. It manages
270 * masking/unmasking of Shared peripheral interrupts(SPI). So the
271 * interrupt enable/disable control should be in sync and consistent
272 * at WakeupGen and GIC so that interrupts are not lost.
273 */
274static void irq_save_context(void)
275{
276	if (!sar_base)
277		sar_base = omap4_get_sar_ram_base();
278
279	if (soc_is_omap54xx())
280		omap5_irq_save_context();
281	else
282		omap4_irq_save_context();
283}
284
285/*
286 * Clear WakeupGen SAR backup status.
287 */
288static void irq_sar_clear(void)
289{
290	u32 val;
291	u32 offset = SAR_BACKUP_STATUS_OFFSET;
292
293	if (soc_is_omap54xx())
294		offset = OMAP5_SAR_BACKUP_STATUS_OFFSET;
295
296	val = readl_relaxed(sar_base + offset);
297	val &= ~SAR_BACKUP_STATUS_WAKEUPGEN;
298	writel_relaxed(val, sar_base + offset);
299}
300
301/*
302 * Save GIC and Wakeupgen interrupt context using secure API
303 * for HS/EMU devices.
304 */
305static void irq_save_secure_context(void)
306{
307	u32 ret;
308	ret = omap_secure_dispatcher(OMAP4_HAL_SAVEGIC_INDEX,
309				FLAG_START_CRITICAL,
310				0, 0, 0, 0, 0);
311	if (ret != API_HAL_RET_VALUE_OK)
312		pr_err("GIC and Wakeupgen context save failed\n");
313}
314#endif
315
316#ifdef CONFIG_HOTPLUG_CPU
317static int irq_cpu_hotplug_notify(struct notifier_block *self,
318				  unsigned long action, void *hcpu)
319{
320	unsigned int cpu = (unsigned int)hcpu;
321
322	switch (action) {
323	case CPU_ONLINE:
324		wakeupgen_irqmask_all(cpu, 0);
325		break;
326	case CPU_DEAD:
327		wakeupgen_irqmask_all(cpu, 1);
328		break;
329	}
330	return NOTIFY_OK;
331}
332
333static struct notifier_block __refdata irq_hotplug_notifier = {
334	.notifier_call = irq_cpu_hotplug_notify,
335};
336
337static void __init irq_hotplug_init(void)
338{
339	register_hotcpu_notifier(&irq_hotplug_notifier);
340}
341#else
342static void __init irq_hotplug_init(void)
343{}
344#endif
345
346#ifdef CONFIG_CPU_PM
347static int irq_notifier(struct notifier_block *self, unsigned long cmd,	void *v)
348{
349	switch (cmd) {
350	case CPU_CLUSTER_PM_ENTER:
351		if (omap_type() == OMAP2_DEVICE_TYPE_GP)
352			irq_save_context();
353		else
354			irq_save_secure_context();
355		break;
356	case CPU_CLUSTER_PM_EXIT:
357		if (omap_type() == OMAP2_DEVICE_TYPE_GP)
358			irq_sar_clear();
359		break;
360	}
361	return NOTIFY_OK;
362}
363
364static struct notifier_block irq_notifier_block = {
365	.notifier_call = irq_notifier,
366};
367
368static void __init irq_pm_init(void)
369{
370	/* FIXME: Remove this when MPU OSWR support is added */
371	if (!IS_PM44XX_ERRATUM(PM_OMAP4_CPU_OSWR_DISABLE))
372		cpu_pm_register_notifier(&irq_notifier_block);
373}
374#else
375static void __init irq_pm_init(void)
376{}
377#endif
378
379void __iomem *omap_get_wakeupgen_base(void)
380{
381	return wakeupgen_base;
382}
383
384int omap_secure_apis_support(void)
385{
386	return omap_secure_apis;
387}
388
389static struct irq_chip wakeupgen_chip = {
390	.name			= "WUGEN",
391	.irq_eoi		= irq_chip_eoi_parent,
392	.irq_mask		= wakeupgen_mask,
393	.irq_unmask		= wakeupgen_unmask,
394	.irq_retrigger		= irq_chip_retrigger_hierarchy,
395	.irq_set_type		= irq_chip_set_type_parent,
396	.flags			= IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
397#ifdef CONFIG_SMP
398	.irq_set_affinity	= irq_chip_set_affinity_parent,
399#endif
400};
401
402static int wakeupgen_domain_xlate(struct irq_domain *domain,
403				  struct device_node *controller,
404				  const u32 *intspec,
405				  unsigned int intsize,
406				  unsigned long *out_hwirq,
407				  unsigned int *out_type)
408{
409	if (domain->of_node != controller)
410		return -EINVAL;	/* Shouldn't happen, really... */
411	if (intsize != 3)
412		return -EINVAL;	/* Not GIC compliant */
413	if (intspec[0] != 0)
414		return -EINVAL;	/* No PPI should point to this domain */
415
416	*out_hwirq = intspec[1];
417	*out_type = intspec[2];
418	return 0;
419}
420
421static int wakeupgen_domain_alloc(struct irq_domain *domain,
422				  unsigned int virq,
423				  unsigned int nr_irqs, void *data)
424{
425	struct of_phandle_args *args = data;
426	struct of_phandle_args parent_args;
427	irq_hw_number_t hwirq;
428	int i;
429
430	if (args->args_count != 3)
431		return -EINVAL;	/* Not GIC compliant */
432	if (args->args[0] != 0)
433		return -EINVAL;	/* No PPI should point to this domain */
434
435	hwirq = args->args[1];
436	if (hwirq >= MAX_IRQS)
437		return -EINVAL;	/* Can't deal with this */
438
439	for (i = 0; i < nr_irqs; i++)
440		irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
441					      &wakeupgen_chip, NULL);
442
443	parent_args = *args;
444	parent_args.np = domain->parent->of_node;
445	return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &parent_args);
446}
447
448static struct irq_domain_ops wakeupgen_domain_ops = {
449	.xlate	= wakeupgen_domain_xlate,
450	.alloc	= wakeupgen_domain_alloc,
451	.free	= irq_domain_free_irqs_common,
452};
453
454/*
455 * Initialise the wakeupgen module.
456 */
457static int __init wakeupgen_init(struct device_node *node,
458				 struct device_node *parent)
459{
460	struct irq_domain *parent_domain, *domain;
461	int i;
462	unsigned int boot_cpu = smp_processor_id();
463	u32 val;
464
465	if (!parent) {
466		pr_err("%s: no parent, giving up\n", node->full_name);
467		return -ENODEV;
468	}
469
470	parent_domain = irq_find_host(parent);
471	if (!parent_domain) {
472		pr_err("%s: unable to obtain parent domain\n", node->full_name);
473		return -ENXIO;
474	}
475	/* Not supported on OMAP4 ES1.0 silicon */
476	if (omap_rev() == OMAP4430_REV_ES1_0) {
477		WARN(1, "WakeupGen: Not supported on OMAP4430 ES1.0\n");
478		return -EPERM;
479	}
480
481	/* Static mapping, never released */
482	wakeupgen_base = of_iomap(node, 0);
483	if (WARN_ON(!wakeupgen_base))
484		return -ENOMEM;
485
486	if (cpu_is_omap44xx()) {
487		irq_banks = OMAP4_NR_BANKS;
488		max_irqs = OMAP4_NR_IRQS;
489		omap_secure_apis = 1;
490	} else if (soc_is_am43xx()) {
491		irq_banks = AM43XX_NR_REG_BANKS;
492		max_irqs = AM43XX_IRQS;
493	}
494
495	domain = irq_domain_add_hierarchy(parent_domain, 0, max_irqs,
496					  node, &wakeupgen_domain_ops,
497					  NULL);
498	if (!domain) {
499		iounmap(wakeupgen_base);
500		return -ENOMEM;
501	}
502
503	/* Clear all IRQ bitmasks at wakeupGen level */
504	for (i = 0; i < irq_banks; i++) {
505		wakeupgen_writel(0, i, CPU0_ID);
506		if (!soc_is_am43xx())
507			wakeupgen_writel(0, i, CPU1_ID);
508	}
509
510	/*
511	 * FIXME: Add support to set_smp_affinity() once the core
512	 * GIC code has necessary hooks in place.
513	 */
514
515	/* Associate all the IRQs to boot CPU like GIC init does. */
516	for (i = 0; i < max_irqs; i++)
517		irq_target_cpu[i] = boot_cpu;
518
519	/*
520	 * Enables OMAP5 ES2 PM Mode using ES2_PM_MODE in AMBA_IF_MODE
521	 * 0x0:	ES1 behavior, CPU cores would enter and exit OFF mode together.
522	 * 0x1:	ES2 behavior, CPU cores are allowed to enter/exit OFF mode
523	 * independently.
524	 * This needs to be set one time thanks to always ON domain.
525	 *
526	 * We do not support ES1 behavior anymore. OMAP5 is assumed to be
527	 * ES2.0, and the same is applicable for DRA7.
528	 */
529	if (soc_is_omap54xx() || soc_is_dra7xx()) {
530		val = __raw_readl(wakeupgen_base + OMAP_AMBA_IF_MODE);
531		val |= BIT(5);
532		omap_smc1(OMAP5_MON_AMBA_IF_INDEX, val);
533	}
534
535	irq_hotplug_init();
536	irq_pm_init();
537
538	return 0;
539}
540
541/*
542 * We cannot use the IRQCHIP_DECLARE macro that lives in
543 * drivers/irqchip, so we're forced to roll our own. Not very nice.
544 */
545OF_DECLARE_2(irqchip, ti_wakeupgen, "ti,omap4-wugen-mpu", wakeupgen_init);
546