root/arch/powerpc/oprofile/cell/spu_profiler.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. set_spu_profiling_frequency
  2. spu_pc_extract
  3. cell_spu_pc_collection
  4. profile_spus
  5. start_spu_profiling_cycles
  6. start_spu_profiling_events
  7. stop_spu_profiling_cycles
  8. stop_spu_profiling_events

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * Cell Broadband Engine OProfile Support
   4  *
   5  * (C) Copyright IBM Corporation 2006
   6  *
   7  * Authors: Maynard Johnson <maynardj@us.ibm.com>
   8  *          Carl Love <carll@us.ibm.com>
   9  */
  10 
  11 #include <linux/hrtimer.h>
  12 #include <linux/smp.h>
  13 #include <linux/slab.h>
  14 #include <asm/cell-pmu.h>
  15 #include <asm/time.h>
  16 #include "pr_util.h"
  17 
  18 #define SCALE_SHIFT 14
  19 
  20 static u32 *samples;
  21 
  22 /* spu_prof_running is a flag used to indicate if spu profiling is enabled
  23  * or not.  It is set by the routines start_spu_profiling_cycles() and
  24  * start_spu_profiling_events().  The flag is cleared by the routines
  25  * stop_spu_profiling_cycles() and stop_spu_profiling_events().  These
  26  * routines are called via global_start() and global_stop() which are called in
  27  * op_powerpc_start() and op_powerpc_stop().  These routines are called once
  28  * per system as a result of the user starting/stopping oprofile.  Hence, only
  29  * one CPU per user at a time will be changing  the value of spu_prof_running.
  30  * In general, OProfile does not protect against multiple users trying to run
  31  * OProfile at a time.
  32  */
  33 int spu_prof_running;
  34 static unsigned int profiling_interval;
  35 
  36 #define NUM_SPU_BITS_TRBUF 16
  37 #define SPUS_PER_TB_ENTRY   4
  38 
  39 #define SPU_PC_MASK          0xFFFF
  40 
  41 DEFINE_SPINLOCK(oprof_spu_smpl_arry_lck);
  42 static unsigned long oprof_spu_smpl_arry_lck_flags;
  43 
  44 void set_spu_profiling_frequency(unsigned int freq_khz, unsigned int cycles_reset)
  45 {
  46         unsigned long ns_per_cyc;
  47 
  48         if (!freq_khz)
  49                 freq_khz = ppc_proc_freq/1000;
  50 
  51         /* To calculate a timeout in nanoseconds, the basic
  52          * formula is ns = cycles_reset * (NSEC_PER_SEC / cpu frequency).
  53          * To avoid floating point math, we use the scale math
  54          * technique as described in linux/jiffies.h.  We use
  55          * a scale factor of SCALE_SHIFT, which provides 4 decimal places
  56          * of precision.  This is close enough for the purpose at hand.
  57          *
  58          * The value of the timeout should be small enough that the hw
  59          * trace buffer will not get more than about 1/3 full for the
  60          * maximum user specified (the LFSR value) hw sampling frequency.
  61          * This is to ensure the trace buffer will never fill even if the
  62          * kernel thread scheduling varies under a heavy system load.
  63          */
  64 
  65         ns_per_cyc = (USEC_PER_SEC << SCALE_SHIFT)/freq_khz;
  66         profiling_interval = (ns_per_cyc * cycles_reset) >> SCALE_SHIFT;
  67 
  68 }
  69 
  70 /*
  71  * Extract SPU PC from trace buffer entry
  72  */
  73 static void spu_pc_extract(int cpu, int entry)
  74 {
  75         /* the trace buffer is 128 bits */
  76         u64 trace_buffer[2];
  77         u64 spu_mask;
  78         int spu;
  79 
  80         spu_mask = SPU_PC_MASK;
  81 
  82         /* Each SPU PC is 16 bits; hence, four spus in each of
  83          * the two 64-bit buffer entries that make up the
  84          * 128-bit trace_buffer entry.  Process two 64-bit values
  85          * simultaneously.
  86          * trace[0] SPU PC contents are: 0 1 2 3
  87          * trace[1] SPU PC contents are: 4 5 6 7
  88          */
  89 
  90         cbe_read_trace_buffer(cpu, trace_buffer);
  91 
  92         for (spu = SPUS_PER_TB_ENTRY-1; spu >= 0; spu--) {
  93                 /* spu PC trace entry is upper 16 bits of the
  94                  * 18 bit SPU program counter
  95                  */
  96                 samples[spu * TRACE_ARRAY_SIZE + entry]
  97                         = (spu_mask & trace_buffer[0]) << 2;
  98                 samples[(spu + SPUS_PER_TB_ENTRY) * TRACE_ARRAY_SIZE + entry]
  99                         = (spu_mask & trace_buffer[1]) << 2;
 100 
 101                 trace_buffer[0] = trace_buffer[0] >> NUM_SPU_BITS_TRBUF;
 102                 trace_buffer[1] = trace_buffer[1] >> NUM_SPU_BITS_TRBUF;
 103         }
 104 }
 105 
 106 static int cell_spu_pc_collection(int cpu)
 107 {
 108         u32 trace_addr;
 109         int entry;
 110 
 111         /* process the collected SPU PC for the node */
 112 
 113         entry = 0;
 114 
 115         trace_addr = cbe_read_pm(cpu, trace_address);
 116         while (!(trace_addr & CBE_PM_TRACE_BUF_EMPTY)) {
 117                 /* there is data in the trace buffer to process */
 118                 spu_pc_extract(cpu, entry);
 119 
 120                 entry++;
 121 
 122                 if (entry >= TRACE_ARRAY_SIZE)
 123                         /* spu_samples is full */
 124                         break;
 125 
 126                 trace_addr = cbe_read_pm(cpu, trace_address);
 127         }
 128 
 129         return entry;
 130 }
 131 
 132 
 133 static enum hrtimer_restart profile_spus(struct hrtimer *timer)
 134 {
 135         ktime_t kt;
 136         int cpu, node, k, num_samples, spu_num;
 137 
 138         if (!spu_prof_running)
 139                 goto stop;
 140 
 141         for_each_online_cpu(cpu) {
 142                 if (cbe_get_hw_thread_id(cpu))
 143                         continue;
 144 
 145                 node = cbe_cpu_to_node(cpu);
 146 
 147                 /* There should only be one kernel thread at a time processing
 148                  * the samples.  In the very unlikely case that the processing
 149                  * is taking a very long time and multiple kernel threads are
 150                  * started to process the samples.  Make sure only one kernel
 151                  * thread is working on the samples array at a time.  The
 152                  * sample array must be loaded and then processed for a given
 153                  * cpu.  The sample array is not per cpu.
 154                  */
 155                 spin_lock_irqsave(&oprof_spu_smpl_arry_lck,
 156                                   oprof_spu_smpl_arry_lck_flags);
 157                 num_samples = cell_spu_pc_collection(cpu);
 158 
 159                 if (num_samples == 0) {
 160                         spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
 161                                                oprof_spu_smpl_arry_lck_flags);
 162                         continue;
 163                 }
 164 
 165                 for (k = 0; k < SPUS_PER_NODE; k++) {
 166                         spu_num = k + (node * SPUS_PER_NODE);
 167                         spu_sync_buffer(spu_num,
 168                                         samples + (k * TRACE_ARRAY_SIZE),
 169                                         num_samples);
 170                 }
 171 
 172                 spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
 173                                        oprof_spu_smpl_arry_lck_flags);
 174 
 175         }
 176         smp_wmb();      /* insure spu event buffer updates are written */
 177                         /* don't want events intermingled... */
 178 
 179         kt = profiling_interval;
 180         if (!spu_prof_running)
 181                 goto stop;
 182         hrtimer_forward(timer, timer->base->get_time(), kt);
 183         return HRTIMER_RESTART;
 184 
 185  stop:
 186         printk(KERN_INFO "SPU_PROF: spu-prof timer ending\n");
 187         return HRTIMER_NORESTART;
 188 }
 189 
 190 static struct hrtimer timer;
 191 /*
 192  * Entry point for SPU cycle profiling.
 193  * NOTE:  SPU profiling is done system-wide, not per-CPU.
 194  *
 195  * cycles_reset is the count value specified by the user when
 196  * setting up OProfile to count SPU_CYCLES.
 197  */
 198 int start_spu_profiling_cycles(unsigned int cycles_reset)
 199 {
 200         ktime_t kt;
 201 
 202         pr_debug("timer resolution: %lu\n", TICK_NSEC);
 203         kt = profiling_interval;
 204         hrtimer_init(&timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 205         hrtimer_set_expires(&timer, kt);
 206         timer.function = profile_spus;
 207 
 208         /* Allocate arrays for collecting SPU PC samples */
 209         samples = kcalloc(SPUS_PER_NODE * TRACE_ARRAY_SIZE, sizeof(u32),
 210                           GFP_KERNEL);
 211 
 212         if (!samples)
 213                 return -ENOMEM;
 214 
 215         spu_prof_running = 1;
 216         hrtimer_start(&timer, kt, HRTIMER_MODE_REL);
 217         schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE);
 218 
 219         return 0;
 220 }
 221 
 222 /*
 223  * Entry point for SPU event profiling.
 224  * NOTE:  SPU profiling is done system-wide, not per-CPU.
 225  *
 226  * cycles_reset is the count value specified by the user when
 227  * setting up OProfile to count SPU_CYCLES.
 228  */
 229 void start_spu_profiling_events(void)
 230 {
 231         spu_prof_running = 1;
 232         schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE);
 233 
 234         return;
 235 }
 236 
 237 void stop_spu_profiling_cycles(void)
 238 {
 239         spu_prof_running = 0;
 240         hrtimer_cancel(&timer);
 241         kfree(samples);
 242         pr_debug("SPU_PROF: stop_spu_profiling_cycles issued\n");
 243 }
 244 
 245 void stop_spu_profiling_events(void)
 246 {
 247         spu_prof_running = 0;
 248 }

/* [<][>][^][v][top][bottom][index][help] */