1/* -*- linux-c -*- 2 * linux/arch/blackfin/kernel/ipipe.c 3 * 4 * Copyright (C) 2005-2007 Philippe Gerum. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139, 9 * USA; either version 2 of the License, or (at your option) any later 10 * version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 20 * 21 * Architecture-dependent I-pipe support for the Blackfin. 22 */ 23 24#include <linux/kernel.h> 25#include <linux/sched.h> 26#include <linux/module.h> 27#include <linux/interrupt.h> 28#include <linux/percpu.h> 29#include <linux/bitops.h> 30#include <linux/errno.h> 31#include <linux/kthread.h> 32#include <linux/unistd.h> 33#include <linux/io.h> 34#include <linux/atomic.h> 35#include <asm/irq_handler.h> 36 37DEFINE_PER_CPU(struct pt_regs, __ipipe_tick_regs); 38 39asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs); 40 41static void __ipipe_no_irqtail(void); 42 43unsigned long __ipipe_irq_tail_hook = (unsigned long)&__ipipe_no_irqtail; 44EXPORT_SYMBOL(__ipipe_irq_tail_hook); 45 46unsigned long __ipipe_core_clock; 47EXPORT_SYMBOL(__ipipe_core_clock); 48 49unsigned long __ipipe_freq_scale; 50EXPORT_SYMBOL(__ipipe_freq_scale); 51 52atomic_t __ipipe_irq_lvdepth[IVG15 + 1]; 53 54unsigned long __ipipe_irq_lvmask = bfin_no_irqs; 55EXPORT_SYMBOL(__ipipe_irq_lvmask); 56 57static void __ipipe_ack_irq(unsigned irq, struct irq_desc *desc) 58{ 59 desc->ipipe_ack(irq, desc); 60} 61 62/* 63 * __ipipe_enable_pipeline() -- We are running on the boot CPU, hw 64 * interrupts are off, and secondary CPUs are still lost in space. 65 */ 66void __ipipe_enable_pipeline(void) 67{ 68 unsigned irq; 69 70 __ipipe_core_clock = get_cclk(); /* Fetch this once. */ 71 __ipipe_freq_scale = 1000000000UL / __ipipe_core_clock; 72 73 for (irq = 0; irq < NR_IRQS; ++irq) 74 ipipe_virtualize_irq(ipipe_root_domain, 75 irq, 76 (ipipe_irq_handler_t)&asm_do_IRQ, 77 NULL, 78 &__ipipe_ack_irq, 79 IPIPE_HANDLE_MASK | IPIPE_PASS_MASK); 80} 81 82/* 83 * __ipipe_handle_irq() -- IPIPE's generic IRQ handler. An optimistic 84 * interrupt protection log is maintained here for each domain. Hw 85 * interrupts are masked on entry. 86 */ 87void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs) 88{ 89 struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr(); 90 struct ipipe_domain *this_domain, *next_domain; 91 struct list_head *head, *pos; 92 struct ipipe_irqdesc *idesc; 93 int m_ack, s = -1; 94 95 /* 96 * Software-triggered IRQs do not need any ack. The contents 97 * of the register frame should only be used when processing 98 * the timer interrupt, but not for handling any other 99 * interrupt. 100 */ 101 m_ack = (regs == NULL || irq == IRQ_SYSTMR || irq == IRQ_CORETMR); 102 this_domain = __ipipe_current_domain; 103 idesc = &this_domain->irqs[irq]; 104 105 if (unlikely(test_bit(IPIPE_STICKY_FLAG, &idesc->control))) 106 head = &this_domain->p_link; 107 else { 108 head = __ipipe_pipeline.next; 109 next_domain = list_entry(head, struct ipipe_domain, p_link); 110 idesc = &next_domain->irqs[irq]; 111 if (likely(test_bit(IPIPE_WIRED_FLAG, &idesc->control))) { 112 if (!m_ack && idesc->acknowledge != NULL) 113 idesc->acknowledge(irq, irq_to_desc(irq)); 114 if (test_bit(IPIPE_SYNCDEFER_FLAG, &p->status)) 115 s = __test_and_set_bit(IPIPE_STALL_FLAG, 116 &p->status); 117 __ipipe_dispatch_wired(next_domain, irq); 118 goto out; 119 } 120 } 121 122 /* Ack the interrupt. */ 123 124 pos = head; 125 while (pos != &__ipipe_pipeline) { 126 next_domain = list_entry(pos, struct ipipe_domain, p_link); 127 idesc = &next_domain->irqs[irq]; 128 if (test_bit(IPIPE_HANDLE_FLAG, &idesc->control)) { 129 __ipipe_set_irq_pending(next_domain, irq); 130 if (!m_ack && idesc->acknowledge != NULL) { 131 idesc->acknowledge(irq, irq_to_desc(irq)); 132 m_ack = 1; 133 } 134 } 135 if (!test_bit(IPIPE_PASS_FLAG, &idesc->control)) 136 break; 137 pos = next_domain->p_link.next; 138 } 139 140 /* 141 * Now walk the pipeline, yielding control to the highest 142 * priority domain that has pending interrupt(s) or 143 * immediately to the current domain if the interrupt has been 144 * marked as 'sticky'. This search does not go beyond the 145 * current domain in the pipeline. We also enforce the 146 * additional root stage lock (blackfin-specific). 147 */ 148 if (test_bit(IPIPE_SYNCDEFER_FLAG, &p->status)) 149 s = __test_and_set_bit(IPIPE_STALL_FLAG, &p->status); 150 151 /* 152 * If the interrupt preempted the head domain, then do not 153 * even try to walk the pipeline, unless an interrupt is 154 * pending for it. 155 */ 156 if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) && 157 !__ipipe_ipending_p(ipipe_head_cpudom_ptr())) 158 goto out; 159 160 __ipipe_walk_pipeline(head); 161out: 162 if (!s) 163 __clear_bit(IPIPE_STALL_FLAG, &p->status); 164} 165 166void __ipipe_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq) 167{ 168 struct irq_desc *desc = irq_to_desc(irq); 169 int prio = __ipipe_get_irq_priority(irq); 170 171 desc->depth = 0; 172 if (ipd != &ipipe_root && 173 atomic_inc_return(&__ipipe_irq_lvdepth[prio]) == 1) 174 __set_bit(prio, &__ipipe_irq_lvmask); 175} 176EXPORT_SYMBOL(__ipipe_enable_irqdesc); 177 178void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq) 179{ 180 int prio = __ipipe_get_irq_priority(irq); 181 182 if (ipd != &ipipe_root && 183 atomic_dec_and_test(&__ipipe_irq_lvdepth[prio])) 184 __clear_bit(prio, &__ipipe_irq_lvmask); 185} 186EXPORT_SYMBOL(__ipipe_disable_irqdesc); 187 188asmlinkage int __ipipe_syscall_root(struct pt_regs *regs) 189{ 190 struct ipipe_percpu_domain_data *p; 191 void (*hook)(void); 192 int ret; 193 194 WARN_ON_ONCE(irqs_disabled_hw()); 195 196 /* 197 * We need to run the IRQ tail hook each time we intercept a 198 * syscall, because we know that important operations might be 199 * pending there (e.g. Xenomai deferred rescheduling). 200 */ 201 hook = (__typeof__(hook))__ipipe_irq_tail_hook; 202 hook(); 203 204 /* 205 * This routine either returns: 206 * 0 -- if the syscall is to be passed to Linux; 207 * >0 -- if the syscall should not be passed to Linux, and no 208 * tail work should be performed; 209 * <0 -- if the syscall should not be passed to Linux but the 210 * tail work has to be performed (for handling signals etc). 211 */ 212 213 if (!__ipipe_syscall_watched_p(current, regs->orig_p0) || 214 !__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL)) 215 return 0; 216 217 ret = __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs); 218 219 hard_local_irq_disable(); 220 221 /* 222 * This is the end of the syscall path, so we may 223 * safely assume a valid Linux task stack here. 224 */ 225 if (current->ipipe_flags & PF_EVTRET) { 226 current->ipipe_flags &= ~PF_EVTRET; 227 __ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs); 228 } 229 230 if (!__ipipe_root_domain_p) 231 ret = -1; 232 else { 233 p = ipipe_root_cpudom_ptr(); 234 if (__ipipe_ipending_p(p)) 235 __ipipe_sync_pipeline(); 236 } 237 238 hard_local_irq_enable(); 239 240 return -ret; 241} 242 243static void __ipipe_no_irqtail(void) 244{ 245} 246 247int ipipe_get_sysinfo(struct ipipe_sysinfo *info) 248{ 249 info->sys_nr_cpus = num_online_cpus(); 250 info->sys_cpu_freq = ipipe_cpu_freq(); 251 info->sys_hrtimer_irq = IPIPE_TIMER_IRQ; 252 info->sys_hrtimer_freq = __ipipe_core_clock; 253 info->sys_hrclock_freq = __ipipe_core_clock; 254 255 return 0; 256} 257 258/* 259 * ipipe_trigger_irq() -- Push the interrupt at front of the pipeline 260 * just like if it has been actually received from a hw source. Also 261 * works for virtual interrupts. 262 */ 263int ipipe_trigger_irq(unsigned irq) 264{ 265 unsigned long flags; 266 267#ifdef CONFIG_IPIPE_DEBUG 268 if (irq >= IPIPE_NR_IRQS || 269 (ipipe_virtual_irq_p(irq) 270 && !test_bit(irq - IPIPE_VIRQ_BASE, &__ipipe_virtual_irq_map))) 271 return -EINVAL; 272#endif 273 274 flags = hard_local_irq_save(); 275 __ipipe_handle_irq(irq, NULL); 276 hard_local_irq_restore(flags); 277 278 return 1; 279} 280 281asmlinkage void __ipipe_sync_root(void) 282{ 283 void (*irq_tail_hook)(void) = (void (*)(void))__ipipe_irq_tail_hook; 284 struct ipipe_percpu_domain_data *p; 285 unsigned long flags; 286 287 BUG_ON(irqs_disabled()); 288 289 flags = hard_local_irq_save(); 290 291 if (irq_tail_hook) 292 irq_tail_hook(); 293 294 clear_thread_flag(TIF_IRQ_SYNC); 295 296 p = ipipe_root_cpudom_ptr(); 297 if (__ipipe_ipending_p(p)) 298 __ipipe_sync_pipeline(); 299 300 hard_local_irq_restore(flags); 301} 302 303void ___ipipe_sync_pipeline(void) 304{ 305 if (__ipipe_root_domain_p && 306 test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status))) 307 return; 308 309 __ipipe_sync_stage(); 310} 311 312void __ipipe_disable_root_irqs_hw(void) 313{ 314 /* 315 * This code is called by the ins{bwl} routines (see 316 * arch/blackfin/lib/ins.S), which are heavily used by the 317 * network stack. It masks all interrupts but those handled by 318 * non-root domains, so that we keep decent network transfer 319 * rates for Linux without inducing pathological jitter for 320 * the real-time domain. 321 */ 322 bfin_sti(__ipipe_irq_lvmask); 323 __set_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status)); 324} 325 326void __ipipe_enable_root_irqs_hw(void) 327{ 328 __clear_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status)); 329 bfin_sti(bfin_irq_flags); 330} 331 332/* 333 * We could use standard atomic bitops in the following root status 334 * manipulation routines, but let's prepare for SMP support in the 335 * same move, preventing CPU migration as required. 336 */ 337void __ipipe_stall_root(void) 338{ 339 unsigned long *p, flags; 340 341 flags = hard_local_irq_save(); 342 p = &__ipipe_root_status; 343 __set_bit(IPIPE_STALL_FLAG, p); 344 hard_local_irq_restore(flags); 345} 346EXPORT_SYMBOL(__ipipe_stall_root); 347 348unsigned long __ipipe_test_and_stall_root(void) 349{ 350 unsigned long *p, flags; 351 int x; 352 353 flags = hard_local_irq_save(); 354 p = &__ipipe_root_status; 355 x = __test_and_set_bit(IPIPE_STALL_FLAG, p); 356 hard_local_irq_restore(flags); 357 358 return x; 359} 360EXPORT_SYMBOL(__ipipe_test_and_stall_root); 361 362unsigned long __ipipe_test_root(void) 363{ 364 const unsigned long *p; 365 unsigned long flags; 366 int x; 367 368 flags = hard_local_irq_save_smp(); 369 p = &__ipipe_root_status; 370 x = test_bit(IPIPE_STALL_FLAG, p); 371 hard_local_irq_restore_smp(flags); 372 373 return x; 374} 375EXPORT_SYMBOL(__ipipe_test_root); 376 377void __ipipe_lock_root(void) 378{ 379 unsigned long *p, flags; 380 381 flags = hard_local_irq_save(); 382 p = &__ipipe_root_status; 383 __set_bit(IPIPE_SYNCDEFER_FLAG, p); 384 hard_local_irq_restore(flags); 385} 386EXPORT_SYMBOL(__ipipe_lock_root); 387 388void __ipipe_unlock_root(void) 389{ 390 unsigned long *p, flags; 391 392 flags = hard_local_irq_save(); 393 p = &__ipipe_root_status; 394 __clear_bit(IPIPE_SYNCDEFER_FLAG, p); 395 hard_local_irq_restore(flags); 396} 397EXPORT_SYMBOL(__ipipe_unlock_root); 398