root/drivers/hwtracing/coresight/coresight-etm3x.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. etm_os_unlock
  2. etm_set_pwrdwn
  3. etm_clr_pwrdwn
  4. etm_set_pwrup
  5. etm_clr_pwrup
  6. coresight_timeout_etm
  7. etm_set_prog
  8. etm_clr_prog
  9. etm_set_default
  10. etm_config_trace_mode
  11. etm_parse_event_config
  12. etm_enable_hw
  13. etm_enable_hw_smp_call
  14. etm_cpu_id
  15. etm_get_trace_id
  16. etm_trace_id
  17. etm_enable_perf
  18. etm_enable_sysfs
  19. etm_enable
  20. etm_disable_hw
  21. etm_disable_perf
  22. etm_disable_sysfs
  23. etm_disable
  24. etm_online_cpu
  25. etm_starting_cpu
  26. etm_dying_cpu
  27. etm_arch_supported
  28. etm_init_arch_data
  29. etm_init_trace_id
  30. etm_probe
  31. etm_runtime_suspend
  32. etm_runtime_resume

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
   4  *
   5  * Description: CoreSight Program Flow Trace driver
   6  */
   7 
   8 #include <linux/kernel.h>
   9 #include <linux/moduleparam.h>
  10 #include <linux/init.h>
  11 #include <linux/types.h>
  12 #include <linux/device.h>
  13 #include <linux/io.h>
  14 #include <linux/err.h>
  15 #include <linux/fs.h>
  16 #include <linux/slab.h>
  17 #include <linux/delay.h>
  18 #include <linux/smp.h>
  19 #include <linux/sysfs.h>
  20 #include <linux/stat.h>
  21 #include <linux/pm_runtime.h>
  22 #include <linux/cpu.h>
  23 #include <linux/of.h>
  24 #include <linux/coresight.h>
  25 #include <linux/coresight-pmu.h>
  26 #include <linux/amba/bus.h>
  27 #include <linux/seq_file.h>
  28 #include <linux/uaccess.h>
  29 #include <linux/clk.h>
  30 #include <linux/perf_event.h>
  31 #include <asm/sections.h>
  32 
  33 #include "coresight-etm.h"
  34 #include "coresight-etm-perf.h"
  35 
  36 /*
  37  * Not really modular but using module_param is the easiest way to
  38  * remain consistent with existing use cases for now.
  39  */
  40 static int boot_enable;
  41 module_param_named(boot_enable, boot_enable, int, S_IRUGO);
  42 
  43 /* The number of ETM/PTM currently registered */
  44 static int etm_count;
  45 static struct etm_drvdata *etmdrvdata[NR_CPUS];
  46 
  47 static enum cpuhp_state hp_online;
  48 
  49 /*
  50  * Memory mapped writes to clear os lock are not supported on some processors
  51  * and OS lock must be unlocked before any memory mapped access on such
  52  * processors, otherwise memory mapped reads/writes will be invalid.
  53  */
  54 static void etm_os_unlock(struct etm_drvdata *drvdata)
  55 {
  56         /* Writing any value to ETMOSLAR unlocks the trace registers */
  57         etm_writel(drvdata, 0x0, ETMOSLAR);
  58         drvdata->os_unlock = true;
  59         isb();
  60 }
  61 
  62 static void etm_set_pwrdwn(struct etm_drvdata *drvdata)
  63 {
  64         u32 etmcr;
  65 
  66         /* Ensure pending cp14 accesses complete before setting pwrdwn */
  67         mb();
  68         isb();
  69         etmcr = etm_readl(drvdata, ETMCR);
  70         etmcr |= ETMCR_PWD_DWN;
  71         etm_writel(drvdata, etmcr, ETMCR);
  72 }
  73 
  74 static void etm_clr_pwrdwn(struct etm_drvdata *drvdata)
  75 {
  76         u32 etmcr;
  77 
  78         etmcr = etm_readl(drvdata, ETMCR);
  79         etmcr &= ~ETMCR_PWD_DWN;
  80         etm_writel(drvdata, etmcr, ETMCR);
  81         /* Ensure pwrup completes before subsequent cp14 accesses */
  82         mb();
  83         isb();
  84 }
  85 
  86 static void etm_set_pwrup(struct etm_drvdata *drvdata)
  87 {
  88         u32 etmpdcr;
  89 
  90         etmpdcr = readl_relaxed(drvdata->base + ETMPDCR);
  91         etmpdcr |= ETMPDCR_PWD_UP;
  92         writel_relaxed(etmpdcr, drvdata->base + ETMPDCR);
  93         /* Ensure pwrup completes before subsequent cp14 accesses */
  94         mb();
  95         isb();
  96 }
  97 
  98 static void etm_clr_pwrup(struct etm_drvdata *drvdata)
  99 {
 100         u32 etmpdcr;
 101 
 102         /* Ensure pending cp14 accesses complete before clearing pwrup */
 103         mb();
 104         isb();
 105         etmpdcr = readl_relaxed(drvdata->base + ETMPDCR);
 106         etmpdcr &= ~ETMPDCR_PWD_UP;
 107         writel_relaxed(etmpdcr, drvdata->base + ETMPDCR);
 108 }
 109 
 110 /**
 111  * coresight_timeout_etm - loop until a bit has changed to a specific state.
 112  * @drvdata: etm's private data structure.
 113  * @offset: address of a register, starting from @addr.
 114  * @position: the position of the bit of interest.
 115  * @value: the value the bit should have.
 116  *
 117  * Basically the same as @coresight_timeout except for the register access
 118  * method where we have to account for CP14 configurations.
 119 
 120  * Return: 0 as soon as the bit has taken the desired state or -EAGAIN if
 121  * TIMEOUT_US has elapsed, which ever happens first.
 122  */
 123 
 124 static int coresight_timeout_etm(struct etm_drvdata *drvdata, u32 offset,
 125                                   int position, int value)
 126 {
 127         int i;
 128         u32 val;
 129 
 130         for (i = TIMEOUT_US; i > 0; i--) {
 131                 val = etm_readl(drvdata, offset);
 132                 /* Waiting on the bit to go from 0 to 1 */
 133                 if (value) {
 134                         if (val & BIT(position))
 135                                 return 0;
 136                 /* Waiting on the bit to go from 1 to 0 */
 137                 } else {
 138                         if (!(val & BIT(position)))
 139                                 return 0;
 140                 }
 141 
 142                 /*
 143                  * Delay is arbitrary - the specification doesn't say how long
 144                  * we are expected to wait.  Extra check required to make sure
 145                  * we don't wait needlessly on the last iteration.
 146                  */
 147                 if (i - 1)
 148                         udelay(1);
 149         }
 150 
 151         return -EAGAIN;
 152 }
 153 
 154 
 155 static void etm_set_prog(struct etm_drvdata *drvdata)
 156 {
 157         u32 etmcr;
 158 
 159         etmcr = etm_readl(drvdata, ETMCR);
 160         etmcr |= ETMCR_ETM_PRG;
 161         etm_writel(drvdata, etmcr, ETMCR);
 162         /*
 163          * Recommended by spec for cp14 accesses to ensure etmcr write is
 164          * complete before polling etmsr
 165          */
 166         isb();
 167         if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 1)) {
 168                 dev_err(&drvdata->csdev->dev,
 169                         "%s: timeout observed when probing at offset %#x\n",
 170                         __func__, ETMSR);
 171         }
 172 }
 173 
 174 static void etm_clr_prog(struct etm_drvdata *drvdata)
 175 {
 176         u32 etmcr;
 177 
 178         etmcr = etm_readl(drvdata, ETMCR);
 179         etmcr &= ~ETMCR_ETM_PRG;
 180         etm_writel(drvdata, etmcr, ETMCR);
 181         /*
 182          * Recommended by spec for cp14 accesses to ensure etmcr write is
 183          * complete before polling etmsr
 184          */
 185         isb();
 186         if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 0)) {
 187                 dev_err(&drvdata->csdev->dev,
 188                         "%s: timeout observed when probing at offset %#x\n",
 189                         __func__, ETMSR);
 190         }
 191 }
 192 
 193 void etm_set_default(struct etm_config *config)
 194 {
 195         int i;
 196 
 197         if (WARN_ON_ONCE(!config))
 198                 return;
 199 
 200         /*
 201          * Taken verbatim from the TRM:
 202          *
 203          * To trace all memory:
 204          *  set bit [24] in register 0x009, the ETMTECR1, to 1
 205          *  set all other bits in register 0x009, the ETMTECR1, to 0
 206          *  set all bits in register 0x007, the ETMTECR2, to 0
 207          *  set register 0x008, the ETMTEEVR, to 0x6F (TRUE).
 208          */
 209         config->enable_ctrl1 = BIT(24);
 210         config->enable_ctrl2 = 0x0;
 211         config->enable_event = ETM_HARD_WIRE_RES_A;
 212 
 213         config->trigger_event = ETM_DEFAULT_EVENT_VAL;
 214         config->enable_event = ETM_HARD_WIRE_RES_A;
 215 
 216         config->seq_12_event = ETM_DEFAULT_EVENT_VAL;
 217         config->seq_21_event = ETM_DEFAULT_EVENT_VAL;
 218         config->seq_23_event = ETM_DEFAULT_EVENT_VAL;
 219         config->seq_31_event = ETM_DEFAULT_EVENT_VAL;
 220         config->seq_32_event = ETM_DEFAULT_EVENT_VAL;
 221         config->seq_13_event = ETM_DEFAULT_EVENT_VAL;
 222         config->timestamp_event = ETM_DEFAULT_EVENT_VAL;
 223 
 224         for (i = 0; i < ETM_MAX_CNTR; i++) {
 225                 config->cntr_rld_val[i] = 0x0;
 226                 config->cntr_event[i] = ETM_DEFAULT_EVENT_VAL;
 227                 config->cntr_rld_event[i] = ETM_DEFAULT_EVENT_VAL;
 228                 config->cntr_val[i] = 0x0;
 229         }
 230 
 231         config->seq_curr_state = 0x0;
 232         config->ctxid_idx = 0x0;
 233         for (i = 0; i < ETM_MAX_CTXID_CMP; i++)
 234                 config->ctxid_pid[i] = 0x0;
 235 
 236         config->ctxid_mask = 0x0;
 237         /* Setting default to 1024 as per TRM recommendation */
 238         config->sync_freq = 0x400;
 239 }
 240 
 241 void etm_config_trace_mode(struct etm_config *config)
 242 {
 243         u32 flags, mode;
 244 
 245         mode = config->mode;
 246 
 247         mode &= (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER);
 248 
 249         /* excluding kernel AND user space doesn't make sense */
 250         if (mode == (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
 251                 return;
 252 
 253         /* nothing to do if neither flags are set */
 254         if (!(mode & ETM_MODE_EXCL_KERN) && !(mode & ETM_MODE_EXCL_USER))
 255                 return;
 256 
 257         flags = (1 << 0 |       /* instruction execute */
 258                  3 << 3 |       /* ARM instruction */
 259                  0 << 5 |       /* No data value comparison */
 260                  0 << 7 |       /* No exact mach */
 261                  0 << 8);       /* Ignore context ID */
 262 
 263         /* No need to worry about single address comparators. */
 264         config->enable_ctrl2 = 0x0;
 265 
 266         /* Bit 0 is address range comparator 1 */
 267         config->enable_ctrl1 = ETMTECR1_ADDR_COMP_1;
 268 
 269         /*
 270          * On ETMv3.5:
 271          * ETMACTRn[13,11] == Non-secure state comparison control
 272          * ETMACTRn[12,10] == Secure state comparison control
 273          *
 274          * b00 == Match in all modes in this state
 275          * b01 == Do not match in any more in this state
 276          * b10 == Match in all modes excepts user mode in this state
 277          * b11 == Match only in user mode in this state
 278          */
 279 
 280         /* Tracing in secure mode is not supported at this time */
 281         flags |= (0 << 12 | 1 << 10);
 282 
 283         if (mode & ETM_MODE_EXCL_USER) {
 284                 /* exclude user, match all modes except user mode */
 285                 flags |= (1 << 13 | 0 << 11);
 286         } else {
 287                 /* exclude kernel, match only in user mode */
 288                 flags |= (1 << 13 | 1 << 11);
 289         }
 290 
 291         /*
 292          * The ETMEEVR register is already set to "hard wire A".  As such
 293          * all there is to do is setup an address comparator that spans
 294          * the entire address range and configure the state and mode bits.
 295          */
 296         config->addr_val[0] = (u32) 0x0;
 297         config->addr_val[1] = (u32) ~0x0;
 298         config->addr_acctype[0] = flags;
 299         config->addr_acctype[1] = flags;
 300         config->addr_type[0] = ETM_ADDR_TYPE_RANGE;
 301         config->addr_type[1] = ETM_ADDR_TYPE_RANGE;
 302 }
 303 
 304 #define ETM3X_SUPPORTED_OPTIONS (ETMCR_CYC_ACC | \
 305                                  ETMCR_TIMESTAMP_EN | \
 306                                  ETMCR_RETURN_STACK)
 307 
 308 static int etm_parse_event_config(struct etm_drvdata *drvdata,
 309                                   struct perf_event *event)
 310 {
 311         struct etm_config *config = &drvdata->config;
 312         struct perf_event_attr *attr = &event->attr;
 313 
 314         if (!attr)
 315                 return -EINVAL;
 316 
 317         /* Clear configuration from previous run */
 318         memset(config, 0, sizeof(struct etm_config));
 319 
 320         if (attr->exclude_kernel)
 321                 config->mode = ETM_MODE_EXCL_KERN;
 322 
 323         if (attr->exclude_user)
 324                 config->mode = ETM_MODE_EXCL_USER;
 325 
 326         /* Always start from the default config */
 327         etm_set_default(config);
 328 
 329         /*
 330          * By default the tracers are configured to trace the whole address
 331          * range.  Narrow the field only if requested by user space.
 332          */
 333         if (config->mode)
 334                 etm_config_trace_mode(config);
 335 
 336         /*
 337          * At this time only cycle accurate, return stack  and timestamp
 338          * options are available.
 339          */
 340         if (attr->config & ~ETM3X_SUPPORTED_OPTIONS)
 341                 return -EINVAL;
 342 
 343         config->ctrl = attr->config;
 344 
 345         /*
 346          * Possible to have cores with PTM (supports ret stack) and ETM
 347          * (never has ret stack) on the same SoC. So if we have a request
 348          * for return stack that can't be honoured on this core then
 349          * clear the bit - trace will still continue normally
 350          */
 351         if ((config->ctrl & ETMCR_RETURN_STACK) &&
 352             !(drvdata->etmccer & ETMCCER_RETSTACK))
 353                 config->ctrl &= ~ETMCR_RETURN_STACK;
 354 
 355         return 0;
 356 }
 357 
 358 static int etm_enable_hw(struct etm_drvdata *drvdata)
 359 {
 360         int i, rc;
 361         u32 etmcr;
 362         struct etm_config *config = &drvdata->config;
 363 
 364         CS_UNLOCK(drvdata->base);
 365 
 366         rc = coresight_claim_device_unlocked(drvdata->base);
 367         if (rc)
 368                 goto done;
 369 
 370         /* Turn engine on */
 371         etm_clr_pwrdwn(drvdata);
 372         /* Apply power to trace registers */
 373         etm_set_pwrup(drvdata);
 374         /* Make sure all registers are accessible */
 375         etm_os_unlock(drvdata);
 376 
 377         etm_set_prog(drvdata);
 378 
 379         etmcr = etm_readl(drvdata, ETMCR);
 380         /* Clear setting from a previous run if need be */
 381         etmcr &= ~ETM3X_SUPPORTED_OPTIONS;
 382         etmcr |= drvdata->port_size;
 383         etmcr |= ETMCR_ETM_EN;
 384         etm_writel(drvdata, config->ctrl | etmcr, ETMCR);
 385         etm_writel(drvdata, config->trigger_event, ETMTRIGGER);
 386         etm_writel(drvdata, config->startstop_ctrl, ETMTSSCR);
 387         etm_writel(drvdata, config->enable_event, ETMTEEVR);
 388         etm_writel(drvdata, config->enable_ctrl1, ETMTECR1);
 389         etm_writel(drvdata, config->fifofull_level, ETMFFLR);
 390         for (i = 0; i < drvdata->nr_addr_cmp; i++) {
 391                 etm_writel(drvdata, config->addr_val[i], ETMACVRn(i));
 392                 etm_writel(drvdata, config->addr_acctype[i], ETMACTRn(i));
 393         }
 394         for (i = 0; i < drvdata->nr_cntr; i++) {
 395                 etm_writel(drvdata, config->cntr_rld_val[i], ETMCNTRLDVRn(i));
 396                 etm_writel(drvdata, config->cntr_event[i], ETMCNTENRn(i));
 397                 etm_writel(drvdata, config->cntr_rld_event[i],
 398                            ETMCNTRLDEVRn(i));
 399                 etm_writel(drvdata, config->cntr_val[i], ETMCNTVRn(i));
 400         }
 401         etm_writel(drvdata, config->seq_12_event, ETMSQ12EVR);
 402         etm_writel(drvdata, config->seq_21_event, ETMSQ21EVR);
 403         etm_writel(drvdata, config->seq_23_event, ETMSQ23EVR);
 404         etm_writel(drvdata, config->seq_31_event, ETMSQ31EVR);
 405         etm_writel(drvdata, config->seq_32_event, ETMSQ32EVR);
 406         etm_writel(drvdata, config->seq_13_event, ETMSQ13EVR);
 407         etm_writel(drvdata, config->seq_curr_state, ETMSQR);
 408         for (i = 0; i < drvdata->nr_ext_out; i++)
 409                 etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i));
 410         for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
 411                 etm_writel(drvdata, config->ctxid_pid[i], ETMCIDCVRn(i));
 412         etm_writel(drvdata, config->ctxid_mask, ETMCIDCMR);
 413         etm_writel(drvdata, config->sync_freq, ETMSYNCFR);
 414         /* No external input selected */
 415         etm_writel(drvdata, 0x0, ETMEXTINSELR);
 416         etm_writel(drvdata, config->timestamp_event, ETMTSEVR);
 417         /* No auxiliary control selected */
 418         etm_writel(drvdata, 0x0, ETMAUXCR);
 419         etm_writel(drvdata, drvdata->traceid, ETMTRACEIDR);
 420         /* No VMID comparator value selected */
 421         etm_writel(drvdata, 0x0, ETMVMIDCVR);
 422 
 423         etm_clr_prog(drvdata);
 424 
 425 done:
 426         CS_LOCK(drvdata->base);
 427 
 428         dev_dbg(&drvdata->csdev->dev, "cpu: %d enable smp call done: %d\n",
 429                 drvdata->cpu, rc);
 430         return rc;
 431 }
 432 
 433 struct etm_enable_arg {
 434         struct etm_drvdata *drvdata;
 435         int rc;
 436 };
 437 
 438 static void etm_enable_hw_smp_call(void *info)
 439 {
 440         struct etm_enable_arg *arg = info;
 441 
 442         if (WARN_ON(!arg))
 443                 return;
 444         arg->rc = etm_enable_hw(arg->drvdata);
 445 }
 446 
 447 static int etm_cpu_id(struct coresight_device *csdev)
 448 {
 449         struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 450 
 451         return drvdata->cpu;
 452 }
 453 
 454 int etm_get_trace_id(struct etm_drvdata *drvdata)
 455 {
 456         unsigned long flags;
 457         int trace_id = -1;
 458         struct device *etm_dev;
 459 
 460         if (!drvdata)
 461                 goto out;
 462 
 463         etm_dev = drvdata->csdev->dev.parent;
 464         if (!local_read(&drvdata->mode))
 465                 return drvdata->traceid;
 466 
 467         pm_runtime_get_sync(etm_dev);
 468 
 469         spin_lock_irqsave(&drvdata->spinlock, flags);
 470 
 471         CS_UNLOCK(drvdata->base);
 472         trace_id = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
 473         CS_LOCK(drvdata->base);
 474 
 475         spin_unlock_irqrestore(&drvdata->spinlock, flags);
 476         pm_runtime_put(etm_dev);
 477 
 478 out:
 479         return trace_id;
 480 
 481 }
 482 
 483 static int etm_trace_id(struct coresight_device *csdev)
 484 {
 485         struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 486 
 487         return etm_get_trace_id(drvdata);
 488 }
 489 
 490 static int etm_enable_perf(struct coresight_device *csdev,
 491                            struct perf_event *event)
 492 {
 493         struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 494 
 495         if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
 496                 return -EINVAL;
 497 
 498         /* Configure the tracer based on the session's specifics */
 499         etm_parse_event_config(drvdata, event);
 500         /* And enable it */
 501         return etm_enable_hw(drvdata);
 502 }
 503 
 504 static int etm_enable_sysfs(struct coresight_device *csdev)
 505 {
 506         struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 507         struct etm_enable_arg arg = { 0 };
 508         int ret;
 509 
 510         spin_lock(&drvdata->spinlock);
 511 
 512         /*
 513          * Configure the ETM only if the CPU is online.  If it isn't online
 514          * hw configuration will take place on the local CPU during bring up.
 515          */
 516         if (cpu_online(drvdata->cpu)) {
 517                 arg.drvdata = drvdata;
 518                 ret = smp_call_function_single(drvdata->cpu,
 519                                                etm_enable_hw_smp_call, &arg, 1);
 520                 if (!ret)
 521                         ret = arg.rc;
 522                 if (!ret)
 523                         drvdata->sticky_enable = true;
 524         } else {
 525                 ret = -ENODEV;
 526         }
 527 
 528         spin_unlock(&drvdata->spinlock);
 529 
 530         if (!ret)
 531                 dev_dbg(&csdev->dev, "ETM tracing enabled\n");
 532         return ret;
 533 }
 534 
 535 static int etm_enable(struct coresight_device *csdev,
 536                       struct perf_event *event, u32 mode)
 537 {
 538         int ret;
 539         u32 val;
 540         struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 541 
 542         val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode);
 543 
 544         /* Someone is already using the tracer */
 545         if (val)
 546                 return -EBUSY;
 547 
 548         switch (mode) {
 549         case CS_MODE_SYSFS:
 550                 ret = etm_enable_sysfs(csdev);
 551                 break;
 552         case CS_MODE_PERF:
 553                 ret = etm_enable_perf(csdev, event);
 554                 break;
 555         default:
 556                 ret = -EINVAL;
 557         }
 558 
 559         /* The tracer didn't start */
 560         if (ret)
 561                 local_set(&drvdata->mode, CS_MODE_DISABLED);
 562 
 563         return ret;
 564 }
 565 
 566 static void etm_disable_hw(void *info)
 567 {
 568         int i;
 569         struct etm_drvdata *drvdata = info;
 570         struct etm_config *config = &drvdata->config;
 571 
 572         CS_UNLOCK(drvdata->base);
 573         etm_set_prog(drvdata);
 574 
 575         /* Read back sequencer and counters for post trace analysis */
 576         config->seq_curr_state = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
 577 
 578         for (i = 0; i < drvdata->nr_cntr; i++)
 579                 config->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i));
 580 
 581         etm_set_pwrdwn(drvdata);
 582         coresight_disclaim_device_unlocked(drvdata->base);
 583 
 584         CS_LOCK(drvdata->base);
 585 
 586         dev_dbg(&drvdata->csdev->dev,
 587                 "cpu: %d disable smp call done\n", drvdata->cpu);
 588 }
 589 
 590 static void etm_disable_perf(struct coresight_device *csdev)
 591 {
 592         struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 593 
 594         if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
 595                 return;
 596 
 597         CS_UNLOCK(drvdata->base);
 598 
 599         /* Setting the prog bit disables tracing immediately */
 600         etm_set_prog(drvdata);
 601 
 602         /*
 603          * There is no way to know when the tracer will be used again so
 604          * power down the tracer.
 605          */
 606         etm_set_pwrdwn(drvdata);
 607         coresight_disclaim_device_unlocked(drvdata->base);
 608 
 609         CS_LOCK(drvdata->base);
 610 }
 611 
 612 static void etm_disable_sysfs(struct coresight_device *csdev)
 613 {
 614         struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 615 
 616         /*
 617          * Taking hotplug lock here protects from clocks getting disabled
 618          * with tracing being left on (crash scenario) if user disable occurs
 619          * after cpu online mask indicates the cpu is offline but before the
 620          * DYING hotplug callback is serviced by the ETM driver.
 621          */
 622         cpus_read_lock();
 623         spin_lock(&drvdata->spinlock);
 624 
 625         /*
 626          * Executing etm_disable_hw on the cpu whose ETM is being disabled
 627          * ensures that register writes occur when cpu is powered.
 628          */
 629         smp_call_function_single(drvdata->cpu, etm_disable_hw, drvdata, 1);
 630 
 631         spin_unlock(&drvdata->spinlock);
 632         cpus_read_unlock();
 633 
 634         dev_dbg(&csdev->dev, "ETM tracing disabled\n");
 635 }
 636 
 637 static void etm_disable(struct coresight_device *csdev,
 638                         struct perf_event *event)
 639 {
 640         u32 mode;
 641         struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 642 
 643         /*
 644          * For as long as the tracer isn't disabled another entity can't
 645          * change its status.  As such we can read the status here without
 646          * fearing it will change under us.
 647          */
 648         mode = local_read(&drvdata->mode);
 649 
 650         switch (mode) {
 651         case CS_MODE_DISABLED:
 652                 break;
 653         case CS_MODE_SYSFS:
 654                 etm_disable_sysfs(csdev);
 655                 break;
 656         case CS_MODE_PERF:
 657                 etm_disable_perf(csdev);
 658                 break;
 659         default:
 660                 WARN_ON_ONCE(mode);
 661                 return;
 662         }
 663 
 664         if (mode)
 665                 local_set(&drvdata->mode, CS_MODE_DISABLED);
 666 }
 667 
 668 static const struct coresight_ops_source etm_source_ops = {
 669         .cpu_id         = etm_cpu_id,
 670         .trace_id       = etm_trace_id,
 671         .enable         = etm_enable,
 672         .disable        = etm_disable,
 673 };
 674 
 675 static const struct coresight_ops etm_cs_ops = {
 676         .source_ops     = &etm_source_ops,
 677 };
 678 
 679 static int etm_online_cpu(unsigned int cpu)
 680 {
 681         if (!etmdrvdata[cpu])
 682                 return 0;
 683 
 684         if (etmdrvdata[cpu]->boot_enable && !etmdrvdata[cpu]->sticky_enable)
 685                 coresight_enable(etmdrvdata[cpu]->csdev);
 686         return 0;
 687 }
 688 
 689 static int etm_starting_cpu(unsigned int cpu)
 690 {
 691         if (!etmdrvdata[cpu])
 692                 return 0;
 693 
 694         spin_lock(&etmdrvdata[cpu]->spinlock);
 695         if (!etmdrvdata[cpu]->os_unlock) {
 696                 etm_os_unlock(etmdrvdata[cpu]);
 697                 etmdrvdata[cpu]->os_unlock = true;
 698         }
 699 
 700         if (local_read(&etmdrvdata[cpu]->mode))
 701                 etm_enable_hw(etmdrvdata[cpu]);
 702         spin_unlock(&etmdrvdata[cpu]->spinlock);
 703         return 0;
 704 }
 705 
 706 static int etm_dying_cpu(unsigned int cpu)
 707 {
 708         if (!etmdrvdata[cpu])
 709                 return 0;
 710 
 711         spin_lock(&etmdrvdata[cpu]->spinlock);
 712         if (local_read(&etmdrvdata[cpu]->mode))
 713                 etm_disable_hw(etmdrvdata[cpu]);
 714         spin_unlock(&etmdrvdata[cpu]->spinlock);
 715         return 0;
 716 }
 717 
 718 static bool etm_arch_supported(u8 arch)
 719 {
 720         switch (arch) {
 721         case ETM_ARCH_V3_3:
 722                 break;
 723         case ETM_ARCH_V3_5:
 724                 break;
 725         case PFT_ARCH_V1_0:
 726                 break;
 727         case PFT_ARCH_V1_1:
 728                 break;
 729         default:
 730                 return false;
 731         }
 732         return true;
 733 }
 734 
 735 static void etm_init_arch_data(void *info)
 736 {
 737         u32 etmidr;
 738         u32 etmccr;
 739         struct etm_drvdata *drvdata = info;
 740 
 741         /* Make sure all registers are accessible */
 742         etm_os_unlock(drvdata);
 743 
 744         CS_UNLOCK(drvdata->base);
 745 
 746         /* First dummy read */
 747         (void)etm_readl(drvdata, ETMPDSR);
 748         /* Provide power to ETM: ETMPDCR[3] == 1 */
 749         etm_set_pwrup(drvdata);
 750         /*
 751          * Clear power down bit since when this bit is set writes to
 752          * certain registers might be ignored.
 753          */
 754         etm_clr_pwrdwn(drvdata);
 755         /*
 756          * Set prog bit. It will be set from reset but this is included to
 757          * ensure it is set
 758          */
 759         etm_set_prog(drvdata);
 760 
 761         /* Find all capabilities */
 762         etmidr = etm_readl(drvdata, ETMIDR);
 763         drvdata->arch = BMVAL(etmidr, 4, 11);
 764         drvdata->port_size = etm_readl(drvdata, ETMCR) & PORT_SIZE_MASK;
 765 
 766         drvdata->etmccer = etm_readl(drvdata, ETMCCER);
 767         etmccr = etm_readl(drvdata, ETMCCR);
 768         drvdata->etmccr = etmccr;
 769         drvdata->nr_addr_cmp = BMVAL(etmccr, 0, 3) * 2;
 770         drvdata->nr_cntr = BMVAL(etmccr, 13, 15);
 771         drvdata->nr_ext_inp = BMVAL(etmccr, 17, 19);
 772         drvdata->nr_ext_out = BMVAL(etmccr, 20, 22);
 773         drvdata->nr_ctxid_cmp = BMVAL(etmccr, 24, 25);
 774 
 775         etm_set_pwrdwn(drvdata);
 776         etm_clr_pwrup(drvdata);
 777         CS_LOCK(drvdata->base);
 778 }
 779 
 780 static void etm_init_trace_id(struct etm_drvdata *drvdata)
 781 {
 782         drvdata->traceid = coresight_get_trace_id(drvdata->cpu);
 783 }
 784 
 785 static int etm_probe(struct amba_device *adev, const struct amba_id *id)
 786 {
 787         int ret;
 788         void __iomem *base;
 789         struct device *dev = &adev->dev;
 790         struct coresight_platform_data *pdata = NULL;
 791         struct etm_drvdata *drvdata;
 792         struct resource *res = &adev->res;
 793         struct coresight_desc desc = { 0 };
 794 
 795         drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
 796         if (!drvdata)
 797                 return -ENOMEM;
 798 
 799         drvdata->use_cp14 = fwnode_property_read_bool(dev->fwnode, "arm,cp14");
 800         dev_set_drvdata(dev, drvdata);
 801 
 802         /* Validity for the resource is already checked by the AMBA core */
 803         base = devm_ioremap_resource(dev, res);
 804         if (IS_ERR(base))
 805                 return PTR_ERR(base);
 806 
 807         drvdata->base = base;
 808 
 809         spin_lock_init(&drvdata->spinlock);
 810 
 811         drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
 812         if (!IS_ERR(drvdata->atclk)) {
 813                 ret = clk_prepare_enable(drvdata->atclk);
 814                 if (ret)
 815                         return ret;
 816         }
 817 
 818         drvdata->cpu = coresight_get_cpu(dev);
 819         if (drvdata->cpu < 0)
 820                 return drvdata->cpu;
 821 
 822         desc.name  = devm_kasprintf(dev, GFP_KERNEL, "etm%d", drvdata->cpu);
 823         if (!desc.name)
 824                 return -ENOMEM;
 825 
 826         cpus_read_lock();
 827         etmdrvdata[drvdata->cpu] = drvdata;
 828 
 829         if (smp_call_function_single(drvdata->cpu,
 830                                      etm_init_arch_data,  drvdata, 1))
 831                 dev_err(dev, "ETM arch init failed\n");
 832 
 833         if (!etm_count++) {
 834                 cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING,
 835                                                      "arm/coresight:starting",
 836                                                      etm_starting_cpu, etm_dying_cpu);
 837                 ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
 838                                                            "arm/coresight:online",
 839                                                            etm_online_cpu, NULL);
 840                 if (ret < 0)
 841                         goto err_arch_supported;
 842                 hp_online = ret;
 843         }
 844         cpus_read_unlock();
 845 
 846         if (etm_arch_supported(drvdata->arch) == false) {
 847                 ret = -EINVAL;
 848                 goto err_arch_supported;
 849         }
 850 
 851         etm_init_trace_id(drvdata);
 852         etm_set_default(&drvdata->config);
 853 
 854         pdata = coresight_get_platform_data(dev);
 855         if (IS_ERR(pdata)) {
 856                 ret = PTR_ERR(pdata);
 857                 goto err_arch_supported;
 858         }
 859         adev->dev.platform_data = pdata;
 860 
 861         desc.type = CORESIGHT_DEV_TYPE_SOURCE;
 862         desc.subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
 863         desc.ops = &etm_cs_ops;
 864         desc.pdata = pdata;
 865         desc.dev = dev;
 866         desc.groups = coresight_etm_groups;
 867         drvdata->csdev = coresight_register(&desc);
 868         if (IS_ERR(drvdata->csdev)) {
 869                 ret = PTR_ERR(drvdata->csdev);
 870                 goto err_arch_supported;
 871         }
 872 
 873         ret = etm_perf_symlink(drvdata->csdev, true);
 874         if (ret) {
 875                 coresight_unregister(drvdata->csdev);
 876                 goto err_arch_supported;
 877         }
 878 
 879         pm_runtime_put(&adev->dev);
 880         dev_info(&drvdata->csdev->dev,
 881                  "%s initialized\n", (char *)coresight_get_uci_data(id));
 882         if (boot_enable) {
 883                 coresight_enable(drvdata->csdev);
 884                 drvdata->boot_enable = true;
 885         }
 886 
 887         return 0;
 888 
 889 err_arch_supported:
 890         if (--etm_count == 0) {
 891                 cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
 892                 if (hp_online)
 893                         cpuhp_remove_state_nocalls(hp_online);
 894         }
 895         return ret;
 896 }
 897 
 898 #ifdef CONFIG_PM
 899 static int etm_runtime_suspend(struct device *dev)
 900 {
 901         struct etm_drvdata *drvdata = dev_get_drvdata(dev);
 902 
 903         if (drvdata && !IS_ERR(drvdata->atclk))
 904                 clk_disable_unprepare(drvdata->atclk);
 905 
 906         return 0;
 907 }
 908 
 909 static int etm_runtime_resume(struct device *dev)
 910 {
 911         struct etm_drvdata *drvdata = dev_get_drvdata(dev);
 912 
 913         if (drvdata && !IS_ERR(drvdata->atclk))
 914                 clk_prepare_enable(drvdata->atclk);
 915 
 916         return 0;
 917 }
 918 #endif
 919 
 920 static const struct dev_pm_ops etm_dev_pm_ops = {
 921         SET_RUNTIME_PM_OPS(etm_runtime_suspend, etm_runtime_resume, NULL)
 922 };
 923 
 924 static const struct amba_id etm_ids[] = {
 925         /* ETM 3.3 */
 926         CS_AMBA_ID_DATA(0x000bb921, "ETM 3.3"),
 927         /* ETM 3.5 - Cortex-A5 */
 928         CS_AMBA_ID_DATA(0x000bb955, "ETM 3.5"),
 929         /* ETM 3.5 */
 930         CS_AMBA_ID_DATA(0x000bb956, "ETM 3.5"),
 931         /* PTM 1.0 */
 932         CS_AMBA_ID_DATA(0x000bb950, "PTM 1.0"),
 933         /* PTM 1.1 */
 934         CS_AMBA_ID_DATA(0x000bb95f, "PTM 1.1"),
 935         /* PTM 1.1 Qualcomm */
 936         CS_AMBA_ID_DATA(0x000b006f, "PTM 1.1"),
 937         { 0, 0},
 938 };
 939 
 940 static struct amba_driver etm_driver = {
 941         .drv = {
 942                 .name   = "coresight-etm3x",
 943                 .owner  = THIS_MODULE,
 944                 .pm     = &etm_dev_pm_ops,
 945                 .suppress_bind_attrs = true,
 946         },
 947         .probe          = etm_probe,
 948         .id_table       = etm_ids,
 949 };
 950 builtin_amba_driver(etm_driver);

/* [<][>][^][v][top][bottom][index][help] */