root/arch/mips/kernel/cevt-r4k.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mips_next_event
  2. calculate_min_delta
  3. handle_perf_irq
  4. c0_compare_interrupt
  5. mips_event_handler
  6. c0_compare_int_pending
  7. c0_compare_int_usable
  8. get_c0_compare_int
  9. r4k_clockevent_init

   1 /*
   2  * This file is subject to the terms and conditions of the GNU General Public
   3  * License.  See the file "COPYING" in the main directory of this archive
   4  * for more details.
   5  *
   6  * Copyright (C) 2007 MIPS Technologies, Inc.
   7  * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
   8  */
   9 #include <linux/clockchips.h>
  10 #include <linux/interrupt.h>
  11 #include <linux/percpu.h>
  12 #include <linux/smp.h>
  13 #include <linux/irq.h>
  14 
  15 #include <asm/time.h>
  16 #include <asm/cevt-r4k.h>
  17 
  18 static int mips_next_event(unsigned long delta,
  19                            struct clock_event_device *evt)
  20 {
  21         unsigned int cnt;
  22         int res;
  23 
  24         cnt = read_c0_count();
  25         cnt += delta;
  26         write_c0_compare(cnt);
  27         res = ((int)(read_c0_count() - cnt) >= 0) ? -ETIME : 0;
  28         return res;
  29 }
  30 
  31 /**
  32  * calculate_min_delta() - Calculate a good minimum delta for mips_next_event().
  33  *
  34  * Running under virtualisation can introduce overhead into mips_next_event() in
  35  * the form of hypervisor emulation of CP0_Count/CP0_Compare registers,
  36  * potentially with an unnatural frequency, which makes a fixed min_delta_ns
  37  * value inappropriate as it may be too small.
  38  *
  39  * It can also introduce occasional latency from the guest being descheduled.
  40  *
  41  * This function calculates a good minimum delta based roughly on the 75th
  42  * percentile of the time taken to do the mips_next_event() sequence, in order
  43  * to handle potentially higher overhead while also eliminating outliers due to
  44  * unpredictable hypervisor latency (which can be handled by retries).
  45  *
  46  * Return:      An appropriate minimum delta for the clock event device.
  47  */
  48 static unsigned int calculate_min_delta(void)
  49 {
  50         unsigned int cnt, i, j, k, l;
  51         unsigned int buf1[4], buf2[3];
  52         unsigned int min_delta;
  53 
  54         /*
  55          * Calculate the median of 5 75th percentiles of 5 samples of how long
  56          * it takes to set CP0_Compare = CP0_Count + delta.
  57          */
  58         for (i = 0; i < 5; ++i) {
  59                 for (j = 0; j < 5; ++j) {
  60                         /*
  61                          * This is like the code in mips_next_event(), and
  62                          * directly measures the borderline "safe" delta.
  63                          */
  64                         cnt = read_c0_count();
  65                         write_c0_compare(cnt);
  66                         cnt = read_c0_count() - cnt;
  67 
  68                         /* Sorted insert into buf1 */
  69                         for (k = 0; k < j; ++k) {
  70                                 if (cnt < buf1[k]) {
  71                                         l = min_t(unsigned int,
  72                                                   j, ARRAY_SIZE(buf1) - 1);
  73                                         for (; l > k; --l)
  74                                                 buf1[l] = buf1[l - 1];
  75                                         break;
  76                                 }
  77                         }
  78                         if (k < ARRAY_SIZE(buf1))
  79                                 buf1[k] = cnt;
  80                 }
  81 
  82                 /* Sorted insert of 75th percentile into buf2 */
  83                 for (k = 0; k < i && k < ARRAY_SIZE(buf2); ++k) {
  84                         if (buf1[ARRAY_SIZE(buf1) - 1] < buf2[k]) {
  85                                 l = min_t(unsigned int,
  86                                           i, ARRAY_SIZE(buf2) - 1);
  87                                 for (; l > k; --l)
  88                                         buf2[l] = buf2[l - 1];
  89                                 break;
  90                         }
  91                 }
  92                 if (k < ARRAY_SIZE(buf2))
  93                         buf2[k] = buf1[ARRAY_SIZE(buf1) - 1];
  94         }
  95 
  96         /* Use 2 * median of 75th percentiles */
  97         min_delta = buf2[ARRAY_SIZE(buf2) - 1] * 2;
  98 
  99         /* Don't go too low */
 100         if (min_delta < 0x300)
 101                 min_delta = 0x300;
 102 
 103         pr_debug("%s: median 75th percentile=%#x, min_delta=%#x\n",
 104                  __func__, buf2[ARRAY_SIZE(buf2) - 1], min_delta);
 105         return min_delta;
 106 }
 107 
 108 DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
 109 int cp0_timer_irq_installed;
 110 
 111 /*
 112  * Possibly handle a performance counter interrupt.
 113  * Return true if the timer interrupt should not be checked
 114  */
 115 static inline int handle_perf_irq(int r2)
 116 {
 117         /*
 118          * The performance counter overflow interrupt may be shared with the
 119          * timer interrupt (cp0_perfcount_irq < 0). If it is and a
 120          * performance counter has overflowed (perf_irq() == IRQ_HANDLED)
 121          * and we can't reliably determine if a counter interrupt has also
 122          * happened (!r2) then don't check for a timer interrupt.
 123          */
 124         return (cp0_perfcount_irq < 0) &&
 125                 perf_irq() == IRQ_HANDLED &&
 126                 !r2;
 127 }
 128 
 129 irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
 130 {
 131         const int r2 = cpu_has_mips_r2_r6;
 132         struct clock_event_device *cd;
 133         int cpu = smp_processor_id();
 134 
 135         /*
 136          * Suckage alert:
 137          * Before R2 of the architecture there was no way to see if a
 138          * performance counter interrupt was pending, so we have to run
 139          * the performance counter interrupt handler anyway.
 140          */
 141         if (handle_perf_irq(r2))
 142                 return IRQ_HANDLED;
 143 
 144         /*
 145          * The same applies to performance counter interrupts.  But with the
 146          * above we now know that the reason we got here must be a timer
 147          * interrupt.  Being the paranoiacs we are we check anyway.
 148          */
 149         if (!r2 || (read_c0_cause() & CAUSEF_TI)) {
 150                 /* Clear Count/Compare Interrupt */
 151                 write_c0_compare(read_c0_compare());
 152                 cd = &per_cpu(mips_clockevent_device, cpu);
 153                 cd->event_handler(cd);
 154 
 155                 return IRQ_HANDLED;
 156         }
 157 
 158         return IRQ_NONE;
 159 }
 160 
 161 struct irqaction c0_compare_irqaction = {
 162         .handler = c0_compare_interrupt,
 163         /*
 164          * IRQF_SHARED: The timer interrupt may be shared with other interrupts
 165          * such as perf counter and FDC interrupts.
 166          */
 167         .flags = IRQF_PERCPU | IRQF_TIMER | IRQF_SHARED,
 168         .name = "timer",
 169 };
 170 
 171 
 172 void mips_event_handler(struct clock_event_device *dev)
 173 {
 174 }
 175 
 176 /*
 177  * FIXME: This doesn't hold for the relocated E9000 compare interrupt.
 178  */
 179 static int c0_compare_int_pending(void)
 180 {
 181         /* When cpu_has_mips_r2, this checks Cause.TI instead of Cause.IP7 */
 182         return (read_c0_cause() >> cp0_compare_irq_shift) & (1ul << CAUSEB_IP);
 183 }
 184 
 185 /*
 186  * Compare interrupt can be routed and latched outside the core,
 187  * so wait up to worst case number of cycle counter ticks for timer interrupt
 188  * changes to propagate to the cause register.
 189  */
 190 #define COMPARE_INT_SEEN_TICKS 50
 191 
 192 int c0_compare_int_usable(void)
 193 {
 194         unsigned int delta;
 195         unsigned int cnt;
 196 
 197 #ifdef CONFIG_KVM_GUEST
 198     return 1;
 199 #endif
 200 
 201         /*
 202          * IP7 already pending?  Try to clear it by acking the timer.
 203          */
 204         if (c0_compare_int_pending()) {
 205                 cnt = read_c0_count();
 206                 write_c0_compare(cnt);
 207                 back_to_back_c0_hazard();
 208                 while (read_c0_count() < (cnt  + COMPARE_INT_SEEN_TICKS))
 209                         if (!c0_compare_int_pending())
 210                                 break;
 211                 if (c0_compare_int_pending())
 212                         return 0;
 213         }
 214 
 215         for (delta = 0x10; delta <= 0x400000; delta <<= 1) {
 216                 cnt = read_c0_count();
 217                 cnt += delta;
 218                 write_c0_compare(cnt);
 219                 back_to_back_c0_hazard();
 220                 if ((int)(read_c0_count() - cnt) < 0)
 221                     break;
 222                 /* increase delta if the timer was already expired */
 223         }
 224 
 225         while ((int)(read_c0_count() - cnt) <= 0)
 226                 ;       /* Wait for expiry  */
 227 
 228         while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
 229                 if (c0_compare_int_pending())
 230                         break;
 231         if (!c0_compare_int_pending())
 232                 return 0;
 233         cnt = read_c0_count();
 234         write_c0_compare(cnt);
 235         back_to_back_c0_hazard();
 236         while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
 237                 if (!c0_compare_int_pending())
 238                         break;
 239         if (c0_compare_int_pending())
 240                 return 0;
 241 
 242         /*
 243          * Feels like a real count / compare timer.
 244          */
 245         return 1;
 246 }
 247 
 248 unsigned int __weak get_c0_compare_int(void)
 249 {
 250         return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
 251 }
 252 
 253 int r4k_clockevent_init(void)
 254 {
 255         unsigned int cpu = smp_processor_id();
 256         struct clock_event_device *cd;
 257         unsigned int irq, min_delta;
 258 
 259         if (!cpu_has_counter || !mips_hpt_frequency)
 260                 return -ENXIO;
 261 
 262         if (!c0_compare_int_usable())
 263                 return -ENXIO;
 264 
 265         /*
 266          * With vectored interrupts things are getting platform specific.
 267          * get_c0_compare_int is a hook to allow a platform to return the
 268          * interrupt number of its liking.
 269          */
 270         irq = get_c0_compare_int();
 271 
 272         cd = &per_cpu(mips_clockevent_device, cpu);
 273 
 274         cd->name                = "MIPS";
 275         cd->features            = CLOCK_EVT_FEAT_ONESHOT |
 276                                   CLOCK_EVT_FEAT_C3STOP |
 277                                   CLOCK_EVT_FEAT_PERCPU;
 278 
 279         min_delta               = calculate_min_delta();
 280 
 281         cd->rating              = 300;
 282         cd->irq                 = irq;
 283         cd->cpumask             = cpumask_of(cpu);
 284         cd->set_next_event      = mips_next_event;
 285         cd->event_handler       = mips_event_handler;
 286 
 287         clockevents_config_and_register(cd, mips_hpt_frequency, min_delta, 0x7fffffff);
 288 
 289         if (cp0_timer_irq_installed)
 290                 return 0;
 291 
 292         cp0_timer_irq_installed = 1;
 293 
 294         setup_irq(irq, &c0_compare_irqaction);
 295 
 296         return 0;
 297 }
 298 

/* [<][>][^][v][top][bottom][index][help] */