1/* 2 * linux/arch/arm/mach-omap2/irq.c 3 * 4 * Interrupt handler for OMAP2 boards. 5 * 6 * Copyright (C) 2005 Nokia Corporation 7 * Author: Paul Mundt <paul.mundt@nokia.com> 8 * 9 * This file is subject to the terms and conditions of the GNU General Public 10 * License. See the file "COPYING" in the main directory of this archive 11 * for more details. 12 */ 13#include <linux/kernel.h> 14#include <linux/module.h> 15#include <linux/init.h> 16#include <linux/interrupt.h> 17#include <linux/io.h> 18 19#include <asm/exception.h> 20#include <linux/irqchip.h> 21#include <linux/irqdomain.h> 22#include <linux/of.h> 23#include <linux/of_address.h> 24#include <linux/of_irq.h> 25 26/* Define these here for now until we drop all board-files */ 27#define OMAP24XX_IC_BASE 0x480fe000 28#define OMAP34XX_IC_BASE 0x48200000 29 30/* selected INTC register offsets */ 31 32#define INTC_REVISION 0x0000 33#define INTC_SYSCONFIG 0x0010 34#define INTC_SYSSTATUS 0x0014 35#define INTC_SIR 0x0040 36#define INTC_CONTROL 0x0048 37#define INTC_PROTECTION 0x004C 38#define INTC_IDLE 0x0050 39#define INTC_THRESHOLD 0x0068 40#define INTC_MIR0 0x0084 41#define INTC_MIR_CLEAR0 0x0088 42#define INTC_MIR_SET0 0x008c 43#define INTC_PENDING_IRQ0 0x0098 44#define INTC_PENDING_IRQ1 0x00b8 45#define INTC_PENDING_IRQ2 0x00d8 46#define INTC_PENDING_IRQ3 0x00f8 47#define INTC_ILR0 0x0100 48 49#define ACTIVEIRQ_MASK 0x7f /* omap2/3 active interrupt bits */ 50#define SPURIOUSIRQ_MASK (0x1ffffff << 7) 51#define INTCPS_NR_ILR_REGS 128 52#define INTCPS_NR_MIR_REGS 4 53 54#define INTC_IDLE_FUNCIDLE (1 << 0) 55#define INTC_IDLE_TURBO (1 << 1) 56 57#define INTC_PROTECTION_ENABLE (1 << 0) 58 59struct omap_intc_regs { 60 u32 sysconfig; 61 u32 protection; 62 u32 idle; 63 u32 threshold; 64 u32 ilr[INTCPS_NR_ILR_REGS]; 65 u32 mir[INTCPS_NR_MIR_REGS]; 66}; 67static struct omap_intc_regs intc_context; 68 69static struct irq_domain *domain; 70static void __iomem *omap_irq_base; 71static int omap_nr_pending = 3; 72static int omap_nr_irqs = 96; 73 74static void intc_writel(u32 reg, u32 val) 75{ 76 writel_relaxed(val, omap_irq_base + reg); 77} 78 79static u32 intc_readl(u32 reg) 80{ 81 return readl_relaxed(omap_irq_base + reg); 82} 83 84void omap_intc_save_context(void) 85{ 86 int i; 87 88 intc_context.sysconfig = 89 intc_readl(INTC_SYSCONFIG); 90 intc_context.protection = 91 intc_readl(INTC_PROTECTION); 92 intc_context.idle = 93 intc_readl(INTC_IDLE); 94 intc_context.threshold = 95 intc_readl(INTC_THRESHOLD); 96 97 for (i = 0; i < omap_nr_irqs; i++) 98 intc_context.ilr[i] = 99 intc_readl((INTC_ILR0 + 0x4 * i)); 100 for (i = 0; i < INTCPS_NR_MIR_REGS; i++) 101 intc_context.mir[i] = 102 intc_readl(INTC_MIR0 + (0x20 * i)); 103} 104 105void omap_intc_restore_context(void) 106{ 107 int i; 108 109 intc_writel(INTC_SYSCONFIG, intc_context.sysconfig); 110 intc_writel(INTC_PROTECTION, intc_context.protection); 111 intc_writel(INTC_IDLE, intc_context.idle); 112 intc_writel(INTC_THRESHOLD, intc_context.threshold); 113 114 for (i = 0; i < omap_nr_irqs; i++) 115 intc_writel(INTC_ILR0 + 0x4 * i, 116 intc_context.ilr[i]); 117 118 for (i = 0; i < INTCPS_NR_MIR_REGS; i++) 119 intc_writel(INTC_MIR0 + 0x20 * i, 120 intc_context.mir[i]); 121 /* MIRs are saved and restore with other PRCM registers */ 122} 123 124void omap3_intc_prepare_idle(void) 125{ 126 /* 127 * Disable autoidle as it can stall interrupt controller, 128 * cf. errata ID i540 for 3430 (all revisions up to 3.1.x) 129 */ 130 intc_writel(INTC_SYSCONFIG, 0); 131 intc_writel(INTC_IDLE, INTC_IDLE_TURBO); 132} 133 134void omap3_intc_resume_idle(void) 135{ 136 /* Re-enable autoidle */ 137 intc_writel(INTC_SYSCONFIG, 1); 138 intc_writel(INTC_IDLE, 0); 139} 140 141/* XXX: FIQ and additional INTC support (only MPU at the moment) */ 142static void omap_ack_irq(struct irq_data *d) 143{ 144 intc_writel(INTC_CONTROL, 0x1); 145} 146 147static void omap_mask_ack_irq(struct irq_data *d) 148{ 149 irq_gc_mask_disable_reg(d); 150 omap_ack_irq(d); 151} 152 153static void __init omap_irq_soft_reset(void) 154{ 155 unsigned long tmp; 156 157 tmp = intc_readl(INTC_REVISION) & 0xff; 158 159 pr_info("IRQ: Found an INTC at 0x%p (revision %ld.%ld) with %d interrupts\n", 160 omap_irq_base, tmp >> 4, tmp & 0xf, omap_nr_irqs); 161 162 tmp = intc_readl(INTC_SYSCONFIG); 163 tmp |= 1 << 1; /* soft reset */ 164 intc_writel(INTC_SYSCONFIG, tmp); 165 166 while (!(intc_readl(INTC_SYSSTATUS) & 0x1)) 167 /* Wait for reset to complete */; 168 169 /* Enable autoidle */ 170 intc_writel(INTC_SYSCONFIG, 1 << 0); 171} 172 173int omap_irq_pending(void) 174{ 175 int i; 176 177 for (i = 0; i < omap_nr_pending; i++) 178 if (intc_readl(INTC_PENDING_IRQ0 + (0x20 * i))) 179 return 1; 180 return 0; 181} 182 183void omap3_intc_suspend(void) 184{ 185 /* A pending interrupt would prevent OMAP from entering suspend */ 186 omap_ack_irq(NULL); 187} 188 189static int __init omap_alloc_gc_of(struct irq_domain *d, void __iomem *base) 190{ 191 int ret; 192 int i; 193 194 ret = irq_alloc_domain_generic_chips(d, 32, 1, "INTC", 195 handle_level_irq, IRQ_NOREQUEST | IRQ_NOPROBE, 196 IRQ_LEVEL, 0); 197 if (ret) { 198 pr_warn("Failed to allocate irq chips\n"); 199 return ret; 200 } 201 202 for (i = 0; i < omap_nr_pending; i++) { 203 struct irq_chip_generic *gc; 204 struct irq_chip_type *ct; 205 206 gc = irq_get_domain_generic_chip(d, 32 * i); 207 gc->reg_base = base; 208 ct = gc->chip_types; 209 210 ct->type = IRQ_TYPE_LEVEL_MASK; 211 ct->handler = handle_level_irq; 212 213 ct->chip.irq_ack = omap_mask_ack_irq; 214 ct->chip.irq_mask = irq_gc_mask_disable_reg; 215 ct->chip.irq_unmask = irq_gc_unmask_enable_reg; 216 217 ct->chip.flags |= IRQCHIP_SKIP_SET_WAKE; 218 219 ct->regs.enable = INTC_MIR_CLEAR0 + 32 * i; 220 ct->regs.disable = INTC_MIR_SET0 + 32 * i; 221 } 222 223 return 0; 224} 225 226static void __init omap_alloc_gc_legacy(void __iomem *base, 227 unsigned int irq_start, unsigned int num) 228{ 229 struct irq_chip_generic *gc; 230 struct irq_chip_type *ct; 231 232 gc = irq_alloc_generic_chip("INTC", 1, irq_start, base, 233 handle_level_irq); 234 ct = gc->chip_types; 235 ct->chip.irq_ack = omap_mask_ack_irq; 236 ct->chip.irq_mask = irq_gc_mask_disable_reg; 237 ct->chip.irq_unmask = irq_gc_unmask_enable_reg; 238 ct->chip.flags |= IRQCHIP_SKIP_SET_WAKE; 239 240 ct->regs.enable = INTC_MIR_CLEAR0; 241 ct->regs.disable = INTC_MIR_SET0; 242 irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE, 243 IRQ_NOREQUEST | IRQ_NOPROBE, 0); 244} 245 246static int __init omap_init_irq_of(struct device_node *node) 247{ 248 int ret; 249 250 omap_irq_base = of_iomap(node, 0); 251 if (WARN_ON(!omap_irq_base)) 252 return -ENOMEM; 253 254 domain = irq_domain_add_linear(node, omap_nr_irqs, 255 &irq_generic_chip_ops, NULL); 256 257 omap_irq_soft_reset(); 258 259 ret = omap_alloc_gc_of(domain, omap_irq_base); 260 if (ret < 0) 261 irq_domain_remove(domain); 262 263 return ret; 264} 265 266static int __init omap_init_irq_legacy(u32 base, struct device_node *node) 267{ 268 int j, irq_base; 269 270 omap_irq_base = ioremap(base, SZ_4K); 271 if (WARN_ON(!omap_irq_base)) 272 return -ENOMEM; 273 274 irq_base = irq_alloc_descs(-1, 0, omap_nr_irqs, 0); 275 if (irq_base < 0) { 276 pr_warn("Couldn't allocate IRQ numbers\n"); 277 irq_base = 0; 278 } 279 280 domain = irq_domain_add_legacy(node, omap_nr_irqs, irq_base, 0, 281 &irq_domain_simple_ops, NULL); 282 283 omap_irq_soft_reset(); 284 285 for (j = 0; j < omap_nr_irqs; j += 32) 286 omap_alloc_gc_legacy(omap_irq_base + j, j + irq_base, 32); 287 288 return 0; 289} 290 291static void __init omap_irq_enable_protection(void) 292{ 293 u32 reg; 294 295 reg = intc_readl(INTC_PROTECTION); 296 reg |= INTC_PROTECTION_ENABLE; 297 intc_writel(INTC_PROTECTION, reg); 298} 299 300static int __init omap_init_irq(u32 base, struct device_node *node) 301{ 302 int ret; 303 304 /* 305 * FIXME legacy OMAP DMA driver sitting under arch/arm/plat-omap/dma.c 306 * depends is still not ready for linear IRQ domains; because of that 307 * we need to temporarily "blacklist" OMAP2 and OMAP3 devices from using 308 * linear IRQ Domain until that driver is finally fixed. 309 */ 310 if (of_device_is_compatible(node, "ti,omap2-intc") || 311 of_device_is_compatible(node, "ti,omap3-intc")) { 312 struct resource res; 313 314 if (of_address_to_resource(node, 0, &res)) 315 return -ENOMEM; 316 317 base = res.start; 318 ret = omap_init_irq_legacy(base, node); 319 } else if (node) { 320 ret = omap_init_irq_of(node); 321 } else { 322 ret = omap_init_irq_legacy(base, NULL); 323 } 324 325 if (ret == 0) 326 omap_irq_enable_protection(); 327 328 return ret; 329} 330 331static asmlinkage void __exception_irq_entry 332omap_intc_handle_irq(struct pt_regs *regs) 333{ 334 extern unsigned long irq_err_count; 335 u32 irqnr; 336 337 irqnr = intc_readl(INTC_SIR); 338 339 /* 340 * A spurious IRQ can result if interrupt that triggered the 341 * sorting is no longer active during the sorting (10 INTC 342 * functional clock cycles after interrupt assertion). Or a 343 * change in interrupt mask affected the result during sorting 344 * time. There is no special handling required except ignoring 345 * the SIR register value just read and retrying. 346 * See section 6.2.5 of AM335x TRM Literature Number: SPRUH73K 347 * 348 * Many a times, a spurious interrupt situation has been fixed 349 * by adding a flush for the posted write acking the IRQ in 350 * the device driver. Typically, this is going be the device 351 * driver whose interrupt was handled just before the spurious 352 * IRQ occurred. Pay attention to those device drivers if you 353 * run into hitting the spurious IRQ condition below. 354 */ 355 if (unlikely((irqnr & SPURIOUSIRQ_MASK) == SPURIOUSIRQ_MASK)) { 356 pr_err_once("%s: spurious irq!\n", __func__); 357 irq_err_count++; 358 omap_ack_irq(NULL); 359 return; 360 } 361 362 irqnr &= ACTIVEIRQ_MASK; 363 handle_domain_irq(domain, irqnr, regs); 364} 365 366void __init omap3_init_irq(void) 367{ 368 omap_nr_irqs = 96; 369 omap_nr_pending = 3; 370 omap_init_irq(OMAP34XX_IC_BASE, NULL); 371 set_handle_irq(omap_intc_handle_irq); 372} 373 374static int __init intc_of_init(struct device_node *node, 375 struct device_node *parent) 376{ 377 int ret; 378 379 omap_nr_pending = 3; 380 omap_nr_irqs = 96; 381 382 if (WARN_ON(!node)) 383 return -ENODEV; 384 385 if (of_device_is_compatible(node, "ti,dm814-intc") || 386 of_device_is_compatible(node, "ti,dm816-intc") || 387 of_device_is_compatible(node, "ti,am33xx-intc")) { 388 omap_nr_irqs = 128; 389 omap_nr_pending = 4; 390 } 391 392 ret = omap_init_irq(-1, of_node_get(node)); 393 if (ret < 0) 394 return ret; 395 396 set_handle_irq(omap_intc_handle_irq); 397 398 return 0; 399} 400 401IRQCHIP_DECLARE(omap2_intc, "ti,omap2-intc", intc_of_init); 402IRQCHIP_DECLARE(omap3_intc, "ti,omap3-intc", intc_of_init); 403IRQCHIP_DECLARE(dm814x_intc, "ti,dm814-intc", intc_of_init); 404IRQCHIP_DECLARE(dm816x_intc, "ti,dm816-intc", intc_of_init); 405IRQCHIP_DECLARE(am33xx_intc, "ti,am33xx-intc", intc_of_init); 406