root/arch/mips/oprofile/op_model_mipsxx.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. vpe_shift
  2. vpe_shift
  3. counters_total_to_per_cpu
  4. counters_per_cpu_to_total
  5. __define_perf_accessors
  6. mipsxx_reg_setup
  7. mipsxx_cpu_setup
  8. mipsxx_cpu_start
  9. mipsxx_cpu_stop
  10. mipsxx_perfcount_handler
  11. __n_counters
  12. n_counters
  13. reset_counters
  14. mipsxx_perfcount_int
  15. mipsxx_init
  16. mipsxx_exit

   1 /*
   2  * This file is subject to the terms and conditions of the GNU General Public
   3  * License.  See the file "COPYING" in the main directory of this archive
   4  * for more details.
   5  *
   6  * Copyright (C) 2004, 05, 06 by Ralf Baechle
   7  * Copyright (C) 2005 by MIPS Technologies, Inc.
   8  */
   9 #include <linux/cpumask.h>
  10 #include <linux/oprofile.h>
  11 #include <linux/interrupt.h>
  12 #include <linux/smp.h>
  13 #include <asm/irq_regs.h>
  14 #include <asm/time.h>
  15 
  16 #include "op_impl.h"
  17 
  18 #define M_PERFCTL_EVENT(event)          (((event) << MIPS_PERFCTRL_EVENT_S) & \
  19                                          MIPS_PERFCTRL_EVENT)
  20 #define M_PERFCTL_VPEID(vpe)            ((vpe)    << MIPS_PERFCTRL_VPEID_S)
  21 
  22 #define M_COUNTER_OVERFLOW              (1UL      << 31)
  23 
  24 static int (*save_perf_irq)(void);
  25 static int perfcount_irq;
  26 
  27 /*
  28  * XLR has only one set of counters per core. Designate the
  29  * first hardware thread in the core for setup and init.
  30  * Skip CPUs with non-zero hardware thread id (4 hwt per core)
  31  */
  32 #if defined(CONFIG_CPU_XLR) && defined(CONFIG_SMP)
  33 #define oprofile_skip_cpu(c)    ((cpu_logical_map(c) & 0x3) != 0)
  34 #else
  35 #define oprofile_skip_cpu(c)    0
  36 #endif
  37 
  38 #ifdef CONFIG_MIPS_MT_SMP
  39 #define WHAT            (MIPS_PERFCTRL_MT_EN_VPE | \
  40                          M_PERFCTL_VPEID(cpu_vpe_id(&current_cpu_data)))
  41 #define vpe_id()        (cpu_has_mipsmt_pertccounters ? \
  42                         0 : cpu_vpe_id(&current_cpu_data))
  43 
  44 /*
  45  * The number of bits to shift to convert between counters per core and
  46  * counters per VPE.  There is no reasonable interface atm to obtain the
  47  * number of VPEs used by Linux and in the 34K this number is fixed to two
  48  * anyways so we hardcore a few things here for the moment.  The way it's
  49  * done here will ensure that oprofile VSMP kernel will run right on a lesser
  50  * core like a 24K also or with maxcpus=1.
  51  */
  52 static inline unsigned int vpe_shift(void)
  53 {
  54         if (num_possible_cpus() > 1)
  55                 return 1;
  56 
  57         return 0;
  58 }
  59 
  60 #else
  61 
  62 #define WHAT            0
  63 #define vpe_id()        0
  64 
  65 static inline unsigned int vpe_shift(void)
  66 {
  67         return 0;
  68 }
  69 
  70 #endif
  71 
  72 static inline unsigned int counters_total_to_per_cpu(unsigned int counters)
  73 {
  74         return counters >> vpe_shift();
  75 }
  76 
  77 static inline unsigned int counters_per_cpu_to_total(unsigned int counters)
  78 {
  79         return counters << vpe_shift();
  80 }
  81 
  82 #define __define_perf_accessors(r, n, np)                               \
  83                                                                         \
  84 static inline unsigned int r_c0_ ## r ## n(void)                        \
  85 {                                                                       \
  86         unsigned int cpu = vpe_id();                                    \
  87                                                                         \
  88         switch (cpu) {                                                  \
  89         case 0:                                                         \
  90                 return read_c0_ ## r ## n();                            \
  91         case 1:                                                         \
  92                 return read_c0_ ## r ## np();                           \
  93         default:                                                        \
  94                 BUG();                                                  \
  95         }                                                               \
  96         return 0;                                                       \
  97 }                                                                       \
  98                                                                         \
  99 static inline void w_c0_ ## r ## n(unsigned int value)                  \
 100 {                                                                       \
 101         unsigned int cpu = vpe_id();                                    \
 102                                                                         \
 103         switch (cpu) {                                                  \
 104         case 0:                                                         \
 105                 write_c0_ ## r ## n(value);                             \
 106                 return;                                                 \
 107         case 1:                                                         \
 108                 write_c0_ ## r ## np(value);                            \
 109                 return;                                                 \
 110         default:                                                        \
 111                 BUG();                                                  \
 112         }                                                               \
 113         return;                                                         \
 114 }                                                                       \
 115 
 116 __define_perf_accessors(perfcntr, 0, 2)
 117 __define_perf_accessors(perfcntr, 1, 3)
 118 __define_perf_accessors(perfcntr, 2, 0)
 119 __define_perf_accessors(perfcntr, 3, 1)
 120 
 121 __define_perf_accessors(perfctrl, 0, 2)
 122 __define_perf_accessors(perfctrl, 1, 3)
 123 __define_perf_accessors(perfctrl, 2, 0)
 124 __define_perf_accessors(perfctrl, 3, 1)
 125 
 126 struct op_mips_model op_model_mipsxx_ops;
 127 
 128 static struct mipsxx_register_config {
 129         unsigned int control[4];
 130         unsigned int counter[4];
 131 } reg;
 132 
 133 /* Compute all of the registers in preparation for enabling profiling.  */
 134 
 135 static void mipsxx_reg_setup(struct op_counter_config *ctr)
 136 {
 137         unsigned int counters = op_model_mipsxx_ops.num_counters;
 138         int i;
 139 
 140         /* Compute the performance counter control word.  */
 141         for (i = 0; i < counters; i++) {
 142                 reg.control[i] = 0;
 143                 reg.counter[i] = 0;
 144 
 145                 if (!ctr[i].enabled)
 146                         continue;
 147 
 148                 reg.control[i] = M_PERFCTL_EVENT(ctr[i].event) |
 149                                  MIPS_PERFCTRL_IE;
 150                 if (ctr[i].kernel)
 151                         reg.control[i] |= MIPS_PERFCTRL_K;
 152                 if (ctr[i].user)
 153                         reg.control[i] |= MIPS_PERFCTRL_U;
 154                 if (ctr[i].exl)
 155                         reg.control[i] |= MIPS_PERFCTRL_EXL;
 156                 if (boot_cpu_type() == CPU_XLR)
 157                         reg.control[i] |= XLR_PERFCTRL_ALLTHREADS;
 158                 reg.counter[i] = 0x80000000 - ctr[i].count;
 159         }
 160 }
 161 
 162 /* Program all of the registers in preparation for enabling profiling.  */
 163 
 164 static void mipsxx_cpu_setup(void *args)
 165 {
 166         unsigned int counters = op_model_mipsxx_ops.num_counters;
 167 
 168         if (oprofile_skip_cpu(smp_processor_id()))
 169                 return;
 170 
 171         switch (counters) {
 172         case 4:
 173                 w_c0_perfctrl3(0);
 174                 w_c0_perfcntr3(reg.counter[3]);
 175                 /* fall through */
 176         case 3:
 177                 w_c0_perfctrl2(0);
 178                 w_c0_perfcntr2(reg.counter[2]);
 179                 /* fall through */
 180         case 2:
 181                 w_c0_perfctrl1(0);
 182                 w_c0_perfcntr1(reg.counter[1]);
 183                 /* fall through */
 184         case 1:
 185                 w_c0_perfctrl0(0);
 186                 w_c0_perfcntr0(reg.counter[0]);
 187         }
 188 }
 189 
 190 /* Start all counters on current CPU */
 191 static void mipsxx_cpu_start(void *args)
 192 {
 193         unsigned int counters = op_model_mipsxx_ops.num_counters;
 194 
 195         if (oprofile_skip_cpu(smp_processor_id()))
 196                 return;
 197 
 198         switch (counters) {
 199         case 4:
 200                 w_c0_perfctrl3(WHAT | reg.control[3]);
 201                 /* fall through */
 202         case 3:
 203                 w_c0_perfctrl2(WHAT | reg.control[2]);
 204                 /* fall through */
 205         case 2:
 206                 w_c0_perfctrl1(WHAT | reg.control[1]);
 207                 /* fall through */
 208         case 1:
 209                 w_c0_perfctrl0(WHAT | reg.control[0]);
 210         }
 211 }
 212 
 213 /* Stop all counters on current CPU */
 214 static void mipsxx_cpu_stop(void *args)
 215 {
 216         unsigned int counters = op_model_mipsxx_ops.num_counters;
 217 
 218         if (oprofile_skip_cpu(smp_processor_id()))
 219                 return;
 220 
 221         switch (counters) {
 222         case 4:
 223                 w_c0_perfctrl3(0);
 224                 /* fall through */
 225         case 3:
 226                 w_c0_perfctrl2(0);
 227                 /* fall through */
 228         case 2:
 229                 w_c0_perfctrl1(0);
 230                 /* fall through */
 231         case 1:
 232                 w_c0_perfctrl0(0);
 233         }
 234 }
 235 
 236 static int mipsxx_perfcount_handler(void)
 237 {
 238         unsigned int counters = op_model_mipsxx_ops.num_counters;
 239         unsigned int control;
 240         unsigned int counter;
 241         int handled = IRQ_NONE;
 242 
 243         if (cpu_has_mips_r2 && !(read_c0_cause() & CAUSEF_PCI))
 244                 return handled;
 245 
 246         switch (counters) {
 247 #define HANDLE_COUNTER(n)                                               \
 248         /* fall through */                                              \
 249         case n + 1:                                                     \
 250                 control = r_c0_perfctrl ## n();                         \
 251                 counter = r_c0_perfcntr ## n();                         \
 252                 if ((control & MIPS_PERFCTRL_IE) &&                     \
 253                     (counter & M_COUNTER_OVERFLOW)) {                   \
 254                         oprofile_add_sample(get_irq_regs(), n);         \
 255                         w_c0_perfcntr ## n(reg.counter[n]);             \
 256                         handled = IRQ_HANDLED;                          \
 257                 }
 258         HANDLE_COUNTER(3)
 259         HANDLE_COUNTER(2)
 260         HANDLE_COUNTER(1)
 261         HANDLE_COUNTER(0)
 262         }
 263 
 264         return handled;
 265 }
 266 
 267 static inline int __n_counters(void)
 268 {
 269         if (!cpu_has_perf)
 270                 return 0;
 271         if (!(read_c0_perfctrl0() & MIPS_PERFCTRL_M))
 272                 return 1;
 273         if (!(read_c0_perfctrl1() & MIPS_PERFCTRL_M))
 274                 return 2;
 275         if (!(read_c0_perfctrl2() & MIPS_PERFCTRL_M))
 276                 return 3;
 277 
 278         return 4;
 279 }
 280 
 281 static inline int n_counters(void)
 282 {
 283         int counters;
 284 
 285         switch (current_cpu_type()) {
 286         case CPU_R10000:
 287                 counters = 2;
 288                 break;
 289 
 290         case CPU_R12000:
 291         case CPU_R14000:
 292         case CPU_R16000:
 293                 counters = 4;
 294                 break;
 295 
 296         default:
 297                 counters = __n_counters();
 298         }
 299 
 300         return counters;
 301 }
 302 
 303 static void reset_counters(void *arg)
 304 {
 305         int counters = (int)(long)arg;
 306         switch (counters) {
 307         case 4:
 308                 w_c0_perfctrl3(0);
 309                 w_c0_perfcntr3(0);
 310                 /* fall through */
 311         case 3:
 312                 w_c0_perfctrl2(0);
 313                 w_c0_perfcntr2(0);
 314                 /* fall through */
 315         case 2:
 316                 w_c0_perfctrl1(0);
 317                 w_c0_perfcntr1(0);
 318                 /* fall through */
 319         case 1:
 320                 w_c0_perfctrl0(0);
 321                 w_c0_perfcntr0(0);
 322         }
 323 }
 324 
 325 static irqreturn_t mipsxx_perfcount_int(int irq, void *dev_id)
 326 {
 327         return mipsxx_perfcount_handler();
 328 }
 329 
 330 static int __init mipsxx_init(void)
 331 {
 332         int counters;
 333 
 334         counters = n_counters();
 335         if (counters == 0) {
 336                 printk(KERN_ERR "Oprofile: CPU has no performance counters\n");
 337                 return -ENODEV;
 338         }
 339 
 340 #ifdef CONFIG_MIPS_MT_SMP
 341         if (!cpu_has_mipsmt_pertccounters)
 342                 counters = counters_total_to_per_cpu(counters);
 343 #endif
 344         on_each_cpu(reset_counters, (void *)(long)counters, 1);
 345 
 346         op_model_mipsxx_ops.num_counters = counters;
 347         switch (current_cpu_type()) {
 348         case CPU_M14KC:
 349                 op_model_mipsxx_ops.cpu_type = "mips/M14Kc";
 350                 break;
 351 
 352         case CPU_M14KEC:
 353                 op_model_mipsxx_ops.cpu_type = "mips/M14KEc";
 354                 break;
 355 
 356         case CPU_20KC:
 357                 op_model_mipsxx_ops.cpu_type = "mips/20K";
 358                 break;
 359 
 360         case CPU_24K:
 361                 op_model_mipsxx_ops.cpu_type = "mips/24K";
 362                 break;
 363 
 364         case CPU_25KF:
 365                 op_model_mipsxx_ops.cpu_type = "mips/25K";
 366                 break;
 367 
 368         case CPU_1004K:
 369         case CPU_34K:
 370                 op_model_mipsxx_ops.cpu_type = "mips/34K";
 371                 break;
 372 
 373         case CPU_1074K:
 374         case CPU_74K:
 375                 op_model_mipsxx_ops.cpu_type = "mips/74K";
 376                 break;
 377 
 378         case CPU_INTERAPTIV:
 379                 op_model_mipsxx_ops.cpu_type = "mips/interAptiv";
 380                 break;
 381 
 382         case CPU_PROAPTIV:
 383                 op_model_mipsxx_ops.cpu_type = "mips/proAptiv";
 384                 break;
 385 
 386         case CPU_P5600:
 387                 op_model_mipsxx_ops.cpu_type = "mips/P5600";
 388                 break;
 389 
 390         case CPU_I6400:
 391                 op_model_mipsxx_ops.cpu_type = "mips/I6400";
 392                 break;
 393 
 394         case CPU_M5150:
 395                 op_model_mipsxx_ops.cpu_type = "mips/M5150";
 396                 break;
 397 
 398         case CPU_5KC:
 399                 op_model_mipsxx_ops.cpu_type = "mips/5K";
 400                 break;
 401 
 402         case CPU_R10000:
 403                 if ((current_cpu_data.processor_id & 0xff) == 0x20)
 404                         op_model_mipsxx_ops.cpu_type = "mips/r10000-v2.x";
 405                 else
 406                         op_model_mipsxx_ops.cpu_type = "mips/r10000";
 407                 break;
 408 
 409         case CPU_R12000:
 410         case CPU_R14000:
 411                 op_model_mipsxx_ops.cpu_type = "mips/r12000";
 412                 break;
 413 
 414         case CPU_R16000:
 415                 op_model_mipsxx_ops.cpu_type = "mips/r16000";
 416                 break;
 417 
 418         case CPU_SB1:
 419         case CPU_SB1A:
 420                 op_model_mipsxx_ops.cpu_type = "mips/sb1";
 421                 break;
 422 
 423         case CPU_LOONGSON1:
 424                 op_model_mipsxx_ops.cpu_type = "mips/loongson1";
 425                 break;
 426 
 427         case CPU_XLR:
 428                 op_model_mipsxx_ops.cpu_type = "mips/xlr";
 429                 break;
 430 
 431         default:
 432                 printk(KERN_ERR "Profiling unsupported for this CPU\n");
 433 
 434                 return -ENODEV;
 435         }
 436 
 437         save_perf_irq = perf_irq;
 438         perf_irq = mipsxx_perfcount_handler;
 439 
 440         if (get_c0_perfcount_int)
 441                 perfcount_irq = get_c0_perfcount_int();
 442         else if (cp0_perfcount_irq >= 0)
 443                 perfcount_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
 444         else
 445                 perfcount_irq = -1;
 446 
 447         if (perfcount_irq >= 0)
 448                 return request_irq(perfcount_irq, mipsxx_perfcount_int,
 449                                    IRQF_PERCPU | IRQF_NOBALANCING |
 450                                    IRQF_NO_THREAD | IRQF_NO_SUSPEND |
 451                                    IRQF_SHARED,
 452                                    "Perfcounter", save_perf_irq);
 453 
 454         return 0;
 455 }
 456 
 457 static void mipsxx_exit(void)
 458 {
 459         int counters = op_model_mipsxx_ops.num_counters;
 460 
 461         if (perfcount_irq >= 0)
 462                 free_irq(perfcount_irq, save_perf_irq);
 463 
 464         counters = counters_per_cpu_to_total(counters);
 465         on_each_cpu(reset_counters, (void *)(long)counters, 1);
 466 
 467         perf_irq = save_perf_irq;
 468 }
 469 
 470 struct op_mips_model op_model_mipsxx_ops = {
 471         .reg_setup      = mipsxx_reg_setup,
 472         .cpu_setup      = mipsxx_cpu_setup,
 473         .init           = mipsxx_init,
 474         .exit           = mipsxx_exit,
 475         .cpu_start      = mipsxx_cpu_start,
 476         .cpu_stop       = mipsxx_cpu_stop,
 477 };

/* [<][>][^][v][top][bottom][index][help] */