1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2007 MIPS Technologies, Inc. 7 * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org> 8 */ 9#include <linux/clockchips.h> 10#include <linux/interrupt.h> 11#include <linux/percpu.h> 12#include <linux/smp.h> 13#include <linux/irq.h> 14 15#include <asm/time.h> 16#include <asm/cevt-r4k.h> 17 18static int mips_next_event(unsigned long delta, 19 struct clock_event_device *evt) 20{ 21 unsigned int cnt; 22 int res; 23 24 cnt = read_c0_count(); 25 cnt += delta; 26 write_c0_compare(cnt); 27 res = ((int)(read_c0_count() - cnt) >= 0) ? -ETIME : 0; 28 return res; 29} 30 31DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); 32int cp0_timer_irq_installed; 33 34/* 35 * Possibly handle a performance counter interrupt. 36 * Return true if the timer interrupt should not be checked 37 */ 38static inline int handle_perf_irq(int r2) 39{ 40 /* 41 * The performance counter overflow interrupt may be shared with the 42 * timer interrupt (cp0_perfcount_irq < 0). If it is and a 43 * performance counter has overflowed (perf_irq() == IRQ_HANDLED) 44 * and we can't reliably determine if a counter interrupt has also 45 * happened (!r2) then don't check for a timer interrupt. 46 */ 47 return (cp0_perfcount_irq < 0) && 48 perf_irq() == IRQ_HANDLED && 49 !r2; 50} 51 52irqreturn_t c0_compare_interrupt(int irq, void *dev_id) 53{ 54 const int r2 = cpu_has_mips_r2_r6; 55 struct clock_event_device *cd; 56 int cpu = smp_processor_id(); 57 58 /* 59 * Suckage alert: 60 * Before R2 of the architecture there was no way to see if a 61 * performance counter interrupt was pending, so we have to run 62 * the performance counter interrupt handler anyway. 63 */ 64 if (handle_perf_irq(r2)) 65 return IRQ_HANDLED; 66 67 /* 68 * The same applies to performance counter interrupts. But with the 69 * above we now know that the reason we got here must be a timer 70 * interrupt. Being the paranoiacs we are we check anyway. 71 */ 72 if (!r2 || (read_c0_cause() & CAUSEF_TI)) { 73 /* Clear Count/Compare Interrupt */ 74 write_c0_compare(read_c0_compare()); 75 cd = &per_cpu(mips_clockevent_device, cpu); 76 cd->event_handler(cd); 77 78 return IRQ_HANDLED; 79 } 80 81 return IRQ_NONE; 82} 83 84struct irqaction c0_compare_irqaction = { 85 .handler = c0_compare_interrupt, 86 /* 87 * IRQF_SHARED: The timer interrupt may be shared with other interrupts 88 * such as perf counter and FDC interrupts. 89 */ 90 .flags = IRQF_PERCPU | IRQF_TIMER | IRQF_SHARED, 91 .name = "timer", 92}; 93 94 95void mips_event_handler(struct clock_event_device *dev) 96{ 97} 98 99/* 100 * FIXME: This doesn't hold for the relocated E9000 compare interrupt. 101 */ 102static int c0_compare_int_pending(void) 103{ 104 /* When cpu_has_mips_r2, this checks Cause.TI instead of Cause.IP7 */ 105 return (read_c0_cause() >> cp0_compare_irq_shift) & (1ul << CAUSEB_IP); 106} 107 108/* 109 * Compare interrupt can be routed and latched outside the core, 110 * so wait up to worst case number of cycle counter ticks for timer interrupt 111 * changes to propagate to the cause register. 112 */ 113#define COMPARE_INT_SEEN_TICKS 50 114 115int c0_compare_int_usable(void) 116{ 117 unsigned int delta; 118 unsigned int cnt; 119 120#ifdef CONFIG_KVM_GUEST 121 return 1; 122#endif 123 124 /* 125 * IP7 already pending? Try to clear it by acking the timer. 126 */ 127 if (c0_compare_int_pending()) { 128 cnt = read_c0_count(); 129 write_c0_compare(cnt); 130 back_to_back_c0_hazard(); 131 while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS)) 132 if (!c0_compare_int_pending()) 133 break; 134 if (c0_compare_int_pending()) 135 return 0; 136 } 137 138 for (delta = 0x10; delta <= 0x400000; delta <<= 1) { 139 cnt = read_c0_count(); 140 cnt += delta; 141 write_c0_compare(cnt); 142 back_to_back_c0_hazard(); 143 if ((int)(read_c0_count() - cnt) < 0) 144 break; 145 /* increase delta if the timer was already expired */ 146 } 147 148 while ((int)(read_c0_count() - cnt) <= 0) 149 ; /* Wait for expiry */ 150 151 while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS)) 152 if (c0_compare_int_pending()) 153 break; 154 if (!c0_compare_int_pending()) 155 return 0; 156 cnt = read_c0_count(); 157 write_c0_compare(cnt); 158 back_to_back_c0_hazard(); 159 while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS)) 160 if (!c0_compare_int_pending()) 161 break; 162 if (c0_compare_int_pending()) 163 return 0; 164 165 /* 166 * Feels like a real count / compare timer. 167 */ 168 return 1; 169} 170 171unsigned int __weak get_c0_compare_int(void) 172{ 173 return MIPS_CPU_IRQ_BASE + cp0_compare_irq; 174} 175 176int r4k_clockevent_init(void) 177{ 178 unsigned int cpu = smp_processor_id(); 179 struct clock_event_device *cd; 180 unsigned int irq; 181 182 if (!cpu_has_counter || !mips_hpt_frequency) 183 return -ENXIO; 184 185 if (!c0_compare_int_usable()) 186 return -ENXIO; 187 188 /* 189 * With vectored interrupts things are getting platform specific. 190 * get_c0_compare_int is a hook to allow a platform to return the 191 * interrupt number of its liking. 192 */ 193 irq = get_c0_compare_int(); 194 195 cd = &per_cpu(mips_clockevent_device, cpu); 196 197 cd->name = "MIPS"; 198 cd->features = CLOCK_EVT_FEAT_ONESHOT | 199 CLOCK_EVT_FEAT_C3STOP | 200 CLOCK_EVT_FEAT_PERCPU; 201 202 clockevent_set_clock(cd, mips_hpt_frequency); 203 204 /* Calculate the min / max delta */ 205 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); 206 cd->min_delta_ns = clockevent_delta2ns(0x300, cd); 207 208 cd->rating = 300; 209 cd->irq = irq; 210 cd->cpumask = cpumask_of(cpu); 211 cd->set_next_event = mips_next_event; 212 cd->event_handler = mips_event_handler; 213 214 clockevents_register_device(cd); 215 216 if (cp0_timer_irq_installed) 217 return 0; 218 219 cp0_timer_irq_installed = 1; 220 221 setup_irq(irq, &c0_compare_irqaction); 222 223 return 0; 224} 225 226