root/drivers/hwtracing/coresight/coresight-etb10.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. etb_get_buffer_depth
  2. __etb_enable_hw
  3. etb_enable_hw
  4. etb_enable_sysfs
  5. etb_enable_perf
  6. etb_enable
  7. __etb_disable_hw
  8. etb_dump_hw
  9. etb_disable_hw
  10. etb_disable
  11. etb_alloc_buffer
  12. etb_free_buffer
  13. etb_set_buffer
  14. etb_update_buffer
  15. etb_dump
  16. etb_open
  17. etb_read
  18. etb_release
  19. trigger_cntr_show
  20. trigger_cntr_store
  21. etb_probe
  22. etb_runtime_suspend
  23. etb_runtime_resume

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
   4  *
   5  * Description: CoreSight Embedded Trace Buffer driver
   6  */
   7 
   8 #include <linux/atomic.h>
   9 #include <linux/kernel.h>
  10 #include <linux/init.h>
  11 #include <linux/types.h>
  12 #include <linux/device.h>
  13 #include <linux/io.h>
  14 #include <linux/err.h>
  15 #include <linux/fs.h>
  16 #include <linux/miscdevice.h>
  17 #include <linux/uaccess.h>
  18 #include <linux/slab.h>
  19 #include <linux/spinlock.h>
  20 #include <linux/pm_runtime.h>
  21 #include <linux/seq_file.h>
  22 #include <linux/coresight.h>
  23 #include <linux/amba/bus.h>
  24 #include <linux/clk.h>
  25 #include <linux/circ_buf.h>
  26 #include <linux/mm.h>
  27 #include <linux/perf_event.h>
  28 
  29 
  30 #include "coresight-priv.h"
  31 #include "coresight-etm-perf.h"
  32 
  33 #define ETB_RAM_DEPTH_REG       0x004
  34 #define ETB_STATUS_REG          0x00c
  35 #define ETB_RAM_READ_DATA_REG   0x010
  36 #define ETB_RAM_READ_POINTER    0x014
  37 #define ETB_RAM_WRITE_POINTER   0x018
  38 #define ETB_TRG                 0x01c
  39 #define ETB_CTL_REG             0x020
  40 #define ETB_RWD_REG             0x024
  41 #define ETB_FFSR                0x300
  42 #define ETB_FFCR                0x304
  43 #define ETB_ITMISCOP0           0xee0
  44 #define ETB_ITTRFLINACK         0xee4
  45 #define ETB_ITTRFLIN            0xee8
  46 #define ETB_ITATBDATA0          0xeeC
  47 #define ETB_ITATBCTR2           0xef0
  48 #define ETB_ITATBCTR1           0xef4
  49 #define ETB_ITATBCTR0           0xef8
  50 
  51 /* register description */
  52 /* STS - 0x00C */
  53 #define ETB_STATUS_RAM_FULL     BIT(0)
  54 /* CTL - 0x020 */
  55 #define ETB_CTL_CAPT_EN         BIT(0)
  56 /* FFCR - 0x304 */
  57 #define ETB_FFCR_EN_FTC         BIT(0)
  58 #define ETB_FFCR_FON_MAN        BIT(6)
  59 #define ETB_FFCR_STOP_FI        BIT(12)
  60 #define ETB_FFCR_STOP_TRIGGER   BIT(13)
  61 
  62 #define ETB_FFCR_BIT            6
  63 #define ETB_FFSR_BIT            1
  64 #define ETB_FRAME_SIZE_WORDS    4
  65 
  66 DEFINE_CORESIGHT_DEVLIST(etb_devs, "etb");
  67 
  68 /**
  69  * struct etb_drvdata - specifics associated to an ETB component
  70  * @base:       memory mapped base address for this component.
  71  * @atclk:      optional clock for the core parts of the ETB.
  72  * @csdev:      component vitals needed by the framework.
  73  * @miscdev:    specifics to handle "/dev/xyz.etb" entry.
  74  * @spinlock:   only one at a time pls.
  75  * @reading:    synchronise user space access to etb buffer.
  76  * @pid:        Process ID of the process being monitored by the session
  77  *              that is using this component.
  78  * @buf:        area of memory where ETB buffer content gets sent.
  79  * @mode:       this ETB is being used.
  80  * @buffer_depth: size of @buf.
  81  * @trigger_cntr: amount of words to store after a trigger.
  82  */
  83 struct etb_drvdata {
  84         void __iomem            *base;
  85         struct clk              *atclk;
  86         struct coresight_device *csdev;
  87         struct miscdevice       miscdev;
  88         spinlock_t              spinlock;
  89         local_t                 reading;
  90         pid_t                   pid;
  91         u8                      *buf;
  92         u32                     mode;
  93         u32                     buffer_depth;
  94         u32                     trigger_cntr;
  95 };
  96 
  97 static int etb_set_buffer(struct coresight_device *csdev,
  98                           struct perf_output_handle *handle);
  99 
 100 static inline unsigned int etb_get_buffer_depth(struct etb_drvdata *drvdata)
 101 {
 102         return readl_relaxed(drvdata->base + ETB_RAM_DEPTH_REG);
 103 }
 104 
 105 static void __etb_enable_hw(struct etb_drvdata *drvdata)
 106 {
 107         int i;
 108         u32 depth;
 109 
 110         CS_UNLOCK(drvdata->base);
 111 
 112         depth = drvdata->buffer_depth;
 113         /* reset write RAM pointer address */
 114         writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER);
 115         /* clear entire RAM buffer */
 116         for (i = 0; i < depth; i++)
 117                 writel_relaxed(0x0, drvdata->base + ETB_RWD_REG);
 118 
 119         /* reset write RAM pointer address */
 120         writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER);
 121         /* reset read RAM pointer address */
 122         writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER);
 123 
 124         writel_relaxed(drvdata->trigger_cntr, drvdata->base + ETB_TRG);
 125         writel_relaxed(ETB_FFCR_EN_FTC | ETB_FFCR_STOP_TRIGGER,
 126                        drvdata->base + ETB_FFCR);
 127         /* ETB trace capture enable */
 128         writel_relaxed(ETB_CTL_CAPT_EN, drvdata->base + ETB_CTL_REG);
 129 
 130         CS_LOCK(drvdata->base);
 131 }
 132 
 133 static int etb_enable_hw(struct etb_drvdata *drvdata)
 134 {
 135         int rc = coresight_claim_device(drvdata->base);
 136 
 137         if (rc)
 138                 return rc;
 139 
 140         __etb_enable_hw(drvdata);
 141         return 0;
 142 }
 143 
 144 static int etb_enable_sysfs(struct coresight_device *csdev)
 145 {
 146         int ret = 0;
 147         unsigned long flags;
 148         struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 149 
 150         spin_lock_irqsave(&drvdata->spinlock, flags);
 151 
 152         /* Don't messup with perf sessions. */
 153         if (drvdata->mode == CS_MODE_PERF) {
 154                 ret = -EBUSY;
 155                 goto out;
 156         }
 157 
 158         if (drvdata->mode == CS_MODE_DISABLED) {
 159                 ret = etb_enable_hw(drvdata);
 160                 if (ret)
 161                         goto out;
 162 
 163                 drvdata->mode = CS_MODE_SYSFS;
 164         }
 165 
 166         atomic_inc(csdev->refcnt);
 167 out:
 168         spin_unlock_irqrestore(&drvdata->spinlock, flags);
 169         return ret;
 170 }
 171 
 172 static int etb_enable_perf(struct coresight_device *csdev, void *data)
 173 {
 174         int ret = 0;
 175         pid_t pid;
 176         unsigned long flags;
 177         struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 178         struct perf_output_handle *handle = data;
 179 
 180         spin_lock_irqsave(&drvdata->spinlock, flags);
 181 
 182         /* No need to continue if the component is already in used by sysFS. */
 183         if (drvdata->mode == CS_MODE_SYSFS) {
 184                 ret = -EBUSY;
 185                 goto out;
 186         }
 187 
 188         /* Get a handle on the pid of the process to monitor */
 189         pid = task_pid_nr(handle->event->owner);
 190 
 191         if (drvdata->pid != -1 && drvdata->pid != pid) {
 192                 ret = -EBUSY;
 193                 goto out;
 194         }
 195 
 196         /*
 197          * No HW configuration is needed if the sink is already in
 198          * use for this session.
 199          */
 200         if (drvdata->pid == pid) {
 201                 atomic_inc(csdev->refcnt);
 202                 goto out;
 203         }
 204 
 205         /*
 206          * We don't have an internal state to clean up if we fail to setup
 207          * the perf buffer. So we can perform the step before we turn the
 208          * ETB on and leave without cleaning up.
 209          */
 210         ret = etb_set_buffer(csdev, handle);
 211         if (ret)
 212                 goto out;
 213 
 214         ret = etb_enable_hw(drvdata);
 215         if (!ret) {
 216                 /* Associate with monitored process. */
 217                 drvdata->pid = pid;
 218                 drvdata->mode = CS_MODE_PERF;
 219                 atomic_inc(csdev->refcnt);
 220         }
 221 
 222 out:
 223         spin_unlock_irqrestore(&drvdata->spinlock, flags);
 224         return ret;
 225 }
 226 
 227 static int etb_enable(struct coresight_device *csdev, u32 mode, void *data)
 228 {
 229         int ret;
 230 
 231         switch (mode) {
 232         case CS_MODE_SYSFS:
 233                 ret = etb_enable_sysfs(csdev);
 234                 break;
 235         case CS_MODE_PERF:
 236                 ret = etb_enable_perf(csdev, data);
 237                 break;
 238         default:
 239                 ret = -EINVAL;
 240                 break;
 241         }
 242 
 243         if (ret)
 244                 return ret;
 245 
 246         dev_dbg(&csdev->dev, "ETB enabled\n");
 247         return 0;
 248 }
 249 
 250 static void __etb_disable_hw(struct etb_drvdata *drvdata)
 251 {
 252         u32 ffcr;
 253         struct device *dev = &drvdata->csdev->dev;
 254 
 255         CS_UNLOCK(drvdata->base);
 256 
 257         ffcr = readl_relaxed(drvdata->base + ETB_FFCR);
 258         /* stop formatter when a stop has completed */
 259         ffcr |= ETB_FFCR_STOP_FI;
 260         writel_relaxed(ffcr, drvdata->base + ETB_FFCR);
 261         /* manually generate a flush of the system */
 262         ffcr |= ETB_FFCR_FON_MAN;
 263         writel_relaxed(ffcr, drvdata->base + ETB_FFCR);
 264 
 265         if (coresight_timeout(drvdata->base, ETB_FFCR, ETB_FFCR_BIT, 0)) {
 266                 dev_err(dev,
 267                 "timeout while waiting for completion of Manual Flush\n");
 268         }
 269 
 270         /* disable trace capture */
 271         writel_relaxed(0x0, drvdata->base + ETB_CTL_REG);
 272 
 273         if (coresight_timeout(drvdata->base, ETB_FFSR, ETB_FFSR_BIT, 1)) {
 274                 dev_err(dev,
 275                         "timeout while waiting for Formatter to Stop\n");
 276         }
 277 
 278         CS_LOCK(drvdata->base);
 279 }
 280 
 281 static void etb_dump_hw(struct etb_drvdata *drvdata)
 282 {
 283         bool lost = false;
 284         int i;
 285         u8 *buf_ptr;
 286         u32 read_data, depth;
 287         u32 read_ptr, write_ptr;
 288         u32 frame_off, frame_endoff;
 289         struct device *dev = &drvdata->csdev->dev;
 290 
 291         CS_UNLOCK(drvdata->base);
 292 
 293         read_ptr = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER);
 294         write_ptr = readl_relaxed(drvdata->base + ETB_RAM_WRITE_POINTER);
 295 
 296         frame_off = write_ptr % ETB_FRAME_SIZE_WORDS;
 297         frame_endoff = ETB_FRAME_SIZE_WORDS - frame_off;
 298         if (frame_off) {
 299                 dev_err(dev,
 300                         "write_ptr: %lu not aligned to formatter frame size\n",
 301                         (unsigned long)write_ptr);
 302                 dev_err(dev, "frameoff: %lu, frame_endoff: %lu\n",
 303                         (unsigned long)frame_off, (unsigned long)frame_endoff);
 304                 write_ptr += frame_endoff;
 305         }
 306 
 307         if ((readl_relaxed(drvdata->base + ETB_STATUS_REG)
 308                       & ETB_STATUS_RAM_FULL) == 0) {
 309                 writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER);
 310         } else {
 311                 writel_relaxed(write_ptr, drvdata->base + ETB_RAM_READ_POINTER);
 312                 lost = true;
 313         }
 314 
 315         depth = drvdata->buffer_depth;
 316         buf_ptr = drvdata->buf;
 317         for (i = 0; i < depth; i++) {
 318                 read_data = readl_relaxed(drvdata->base +
 319                                           ETB_RAM_READ_DATA_REG);
 320                 *(u32 *)buf_ptr = read_data;
 321                 buf_ptr += 4;
 322         }
 323 
 324         if (lost)
 325                 coresight_insert_barrier_packet(drvdata->buf);
 326 
 327         if (frame_off) {
 328                 buf_ptr -= (frame_endoff * 4);
 329                 for (i = 0; i < frame_endoff; i++) {
 330                         *buf_ptr++ = 0x0;
 331                         *buf_ptr++ = 0x0;
 332                         *buf_ptr++ = 0x0;
 333                         *buf_ptr++ = 0x0;
 334                 }
 335         }
 336 
 337         writel_relaxed(read_ptr, drvdata->base + ETB_RAM_READ_POINTER);
 338 
 339         CS_LOCK(drvdata->base);
 340 }
 341 
 342 static void etb_disable_hw(struct etb_drvdata *drvdata)
 343 {
 344         __etb_disable_hw(drvdata);
 345         etb_dump_hw(drvdata);
 346         coresight_disclaim_device(drvdata->base);
 347 }
 348 
 349 static int etb_disable(struct coresight_device *csdev)
 350 {
 351         struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 352         unsigned long flags;
 353 
 354         spin_lock_irqsave(&drvdata->spinlock, flags);
 355 
 356         if (atomic_dec_return(csdev->refcnt)) {
 357                 spin_unlock_irqrestore(&drvdata->spinlock, flags);
 358                 return -EBUSY;
 359         }
 360 
 361         /* Complain if we (somehow) got out of sync */
 362         WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
 363         etb_disable_hw(drvdata);
 364         /* Dissociate from monitored process. */
 365         drvdata->pid = -1;
 366         drvdata->mode = CS_MODE_DISABLED;
 367         spin_unlock_irqrestore(&drvdata->spinlock, flags);
 368 
 369         dev_dbg(&csdev->dev, "ETB disabled\n");
 370         return 0;
 371 }
 372 
 373 static void *etb_alloc_buffer(struct coresight_device *csdev,
 374                               struct perf_event *event, void **pages,
 375                               int nr_pages, bool overwrite)
 376 {
 377         int node;
 378         struct cs_buffers *buf;
 379 
 380         node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
 381 
 382         buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
 383         if (!buf)
 384                 return NULL;
 385 
 386         buf->snapshot = overwrite;
 387         buf->nr_pages = nr_pages;
 388         buf->data_pages = pages;
 389 
 390         return buf;
 391 }
 392 
 393 static void etb_free_buffer(void *config)
 394 {
 395         struct cs_buffers *buf = config;
 396 
 397         kfree(buf);
 398 }
 399 
 400 static int etb_set_buffer(struct coresight_device *csdev,
 401                           struct perf_output_handle *handle)
 402 {
 403         int ret = 0;
 404         unsigned long head;
 405         struct cs_buffers *buf = etm_perf_sink_config(handle);
 406 
 407         if (!buf)
 408                 return -EINVAL;
 409 
 410         /* wrap head around to the amount of space we have */
 411         head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
 412 
 413         /* find the page to write to */
 414         buf->cur = head / PAGE_SIZE;
 415 
 416         /* and offset within that page */
 417         buf->offset = head % PAGE_SIZE;
 418 
 419         local_set(&buf->data_size, 0);
 420 
 421         return ret;
 422 }
 423 
 424 static unsigned long etb_update_buffer(struct coresight_device *csdev,
 425                               struct perf_output_handle *handle,
 426                               void *sink_config)
 427 {
 428         bool lost = false;
 429         int i, cur;
 430         u8 *buf_ptr;
 431         const u32 *barrier;
 432         u32 read_ptr, write_ptr, capacity;
 433         u32 status, read_data;
 434         unsigned long offset, to_read = 0, flags;
 435         struct cs_buffers *buf = sink_config;
 436         struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 437 
 438         if (!buf)
 439                 return 0;
 440 
 441         capacity = drvdata->buffer_depth * ETB_FRAME_SIZE_WORDS;
 442 
 443         spin_lock_irqsave(&drvdata->spinlock, flags);
 444 
 445         /* Don't do anything if another tracer is using this sink */
 446         if (atomic_read(csdev->refcnt) != 1)
 447                 goto out;
 448 
 449         __etb_disable_hw(drvdata);
 450         CS_UNLOCK(drvdata->base);
 451 
 452         /* unit is in words, not bytes */
 453         read_ptr = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER);
 454         write_ptr = readl_relaxed(drvdata->base + ETB_RAM_WRITE_POINTER);
 455 
 456         /*
 457          * Entries should be aligned to the frame size.  If they are not
 458          * go back to the last alignment point to give decoding tools a
 459          * chance to fix things.
 460          */
 461         if (write_ptr % ETB_FRAME_SIZE_WORDS) {
 462                 dev_err(&csdev->dev,
 463                         "write_ptr: %lu not aligned to formatter frame size\n",
 464                         (unsigned long)write_ptr);
 465 
 466                 write_ptr &= ~(ETB_FRAME_SIZE_WORDS - 1);
 467                 lost = true;
 468         }
 469 
 470         /*
 471          * Get a hold of the status register and see if a wrap around
 472          * has occurred.  If so adjust things accordingly.  Otherwise
 473          * start at the beginning and go until the write pointer has
 474          * been reached.
 475          */
 476         status = readl_relaxed(drvdata->base + ETB_STATUS_REG);
 477         if (status & ETB_STATUS_RAM_FULL) {
 478                 lost = true;
 479                 to_read = capacity;
 480                 read_ptr = write_ptr;
 481         } else {
 482                 to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->buffer_depth);
 483                 to_read *= ETB_FRAME_SIZE_WORDS;
 484         }
 485 
 486         /*
 487          * Make sure we don't overwrite data that hasn't been consumed yet.
 488          * It is entirely possible that the HW buffer has more data than the
 489          * ring buffer can currently handle.  If so adjust the start address
 490          * to take only the last traces.
 491          *
 492          * In snapshot mode we are looking to get the latest traces only and as
 493          * such, we don't care about not overwriting data that hasn't been
 494          * processed by user space.
 495          */
 496         if (!buf->snapshot && to_read > handle->size) {
 497                 u32 mask = ~(ETB_FRAME_SIZE_WORDS - 1);
 498 
 499                 /* The new read pointer must be frame size aligned */
 500                 to_read = handle->size & mask;
 501                 /*
 502                  * Move the RAM read pointer up, keeping in mind that
 503                  * everything is in frame size units.
 504                  */
 505                 read_ptr = (write_ptr + drvdata->buffer_depth) -
 506                                         to_read / ETB_FRAME_SIZE_WORDS;
 507                 /* Wrap around if need be*/
 508                 if (read_ptr > (drvdata->buffer_depth - 1))
 509                         read_ptr -= drvdata->buffer_depth;
 510                 /* let the decoder know we've skipped ahead */
 511                 lost = true;
 512         }
 513 
 514         /*
 515          * Don't set the TRUNCATED flag in snapshot mode because 1) the
 516          * captured buffer is expected to be truncated and 2) a full buffer
 517          * prevents the event from being re-enabled by the perf core,
 518          * resulting in stale data being send to user space.
 519          */
 520         if (!buf->snapshot && lost)
 521                 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
 522 
 523         /* finally tell HW where we want to start reading from */
 524         writel_relaxed(read_ptr, drvdata->base + ETB_RAM_READ_POINTER);
 525 
 526         cur = buf->cur;
 527         offset = buf->offset;
 528         barrier = barrier_pkt;
 529 
 530         for (i = 0; i < to_read; i += 4) {
 531                 buf_ptr = buf->data_pages[cur] + offset;
 532                 read_data = readl_relaxed(drvdata->base +
 533                                           ETB_RAM_READ_DATA_REG);
 534                 if (lost && i < CORESIGHT_BARRIER_PKT_SIZE) {
 535                         read_data = *barrier;
 536                         barrier++;
 537                 }
 538 
 539                 *(u32 *)buf_ptr = read_data;
 540                 buf_ptr += 4;
 541 
 542                 offset += 4;
 543                 if (offset >= PAGE_SIZE) {
 544                         offset = 0;
 545                         cur++;
 546                         /* wrap around at the end of the buffer */
 547                         cur &= buf->nr_pages - 1;
 548                 }
 549         }
 550 
 551         /* reset ETB buffer for next run */
 552         writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER);
 553         writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER);
 554 
 555         /*
 556          * In snapshot mode we simply increment the head by the number of byte
 557          * that were written.  User space function  cs_etm_find_snapshot() will
 558          * figure out how many bytes to get from the AUX buffer based on the
 559          * position of the head.
 560          */
 561         if (buf->snapshot)
 562                 handle->head += to_read;
 563 
 564         __etb_enable_hw(drvdata);
 565         CS_LOCK(drvdata->base);
 566 out:
 567         spin_unlock_irqrestore(&drvdata->spinlock, flags);
 568 
 569         return to_read;
 570 }
 571 
 572 static const struct coresight_ops_sink etb_sink_ops = {
 573         .enable         = etb_enable,
 574         .disable        = etb_disable,
 575         .alloc_buffer   = etb_alloc_buffer,
 576         .free_buffer    = etb_free_buffer,
 577         .update_buffer  = etb_update_buffer,
 578 };
 579 
 580 static const struct coresight_ops etb_cs_ops = {
 581         .sink_ops       = &etb_sink_ops,
 582 };
 583 
 584 static void etb_dump(struct etb_drvdata *drvdata)
 585 {
 586         unsigned long flags;
 587 
 588         spin_lock_irqsave(&drvdata->spinlock, flags);
 589         if (drvdata->mode == CS_MODE_SYSFS) {
 590                 __etb_disable_hw(drvdata);
 591                 etb_dump_hw(drvdata);
 592                 __etb_enable_hw(drvdata);
 593         }
 594         spin_unlock_irqrestore(&drvdata->spinlock, flags);
 595 
 596         dev_dbg(&drvdata->csdev->dev, "ETB dumped\n");
 597 }
 598 
 599 static int etb_open(struct inode *inode, struct file *file)
 600 {
 601         struct etb_drvdata *drvdata = container_of(file->private_data,
 602                                                    struct etb_drvdata, miscdev);
 603 
 604         if (local_cmpxchg(&drvdata->reading, 0, 1))
 605                 return -EBUSY;
 606 
 607         dev_dbg(&drvdata->csdev->dev, "%s: successfully opened\n", __func__);
 608         return 0;
 609 }
 610 
 611 static ssize_t etb_read(struct file *file, char __user *data,
 612                                 size_t len, loff_t *ppos)
 613 {
 614         u32 depth;
 615         struct etb_drvdata *drvdata = container_of(file->private_data,
 616                                                    struct etb_drvdata, miscdev);
 617         struct device *dev = &drvdata->csdev->dev;
 618 
 619         etb_dump(drvdata);
 620 
 621         depth = drvdata->buffer_depth;
 622         if (*ppos + len > depth * 4)
 623                 len = depth * 4 - *ppos;
 624 
 625         if (copy_to_user(data, drvdata->buf + *ppos, len)) {
 626                 dev_dbg(dev,
 627                         "%s: copy_to_user failed\n", __func__);
 628                 return -EFAULT;
 629         }
 630 
 631         *ppos += len;
 632 
 633         dev_dbg(dev, "%s: %zu bytes copied, %d bytes left\n",
 634                 __func__, len, (int)(depth * 4 - *ppos));
 635         return len;
 636 }
 637 
 638 static int etb_release(struct inode *inode, struct file *file)
 639 {
 640         struct etb_drvdata *drvdata = container_of(file->private_data,
 641                                                    struct etb_drvdata, miscdev);
 642         local_set(&drvdata->reading, 0);
 643 
 644         dev_dbg(&drvdata->csdev->dev, "%s: released\n", __func__);
 645         return 0;
 646 }
 647 
 648 static const struct file_operations etb_fops = {
 649         .owner          = THIS_MODULE,
 650         .open           = etb_open,
 651         .read           = etb_read,
 652         .release        = etb_release,
 653         .llseek         = no_llseek,
 654 };
 655 
 656 #define coresight_etb10_reg(name, offset)               \
 657         coresight_simple_reg32(struct etb_drvdata, name, offset)
 658 
 659 coresight_etb10_reg(rdp, ETB_RAM_DEPTH_REG);
 660 coresight_etb10_reg(sts, ETB_STATUS_REG);
 661 coresight_etb10_reg(rrp, ETB_RAM_READ_POINTER);
 662 coresight_etb10_reg(rwp, ETB_RAM_WRITE_POINTER);
 663 coresight_etb10_reg(trg, ETB_TRG);
 664 coresight_etb10_reg(ctl, ETB_CTL_REG);
 665 coresight_etb10_reg(ffsr, ETB_FFSR);
 666 coresight_etb10_reg(ffcr, ETB_FFCR);
 667 
 668 static struct attribute *coresight_etb_mgmt_attrs[] = {
 669         &dev_attr_rdp.attr,
 670         &dev_attr_sts.attr,
 671         &dev_attr_rrp.attr,
 672         &dev_attr_rwp.attr,
 673         &dev_attr_trg.attr,
 674         &dev_attr_ctl.attr,
 675         &dev_attr_ffsr.attr,
 676         &dev_attr_ffcr.attr,
 677         NULL,
 678 };
 679 
 680 static ssize_t trigger_cntr_show(struct device *dev,
 681                             struct device_attribute *attr, char *buf)
 682 {
 683         struct etb_drvdata *drvdata = dev_get_drvdata(dev->parent);
 684         unsigned long val = drvdata->trigger_cntr;
 685 
 686         return sprintf(buf, "%#lx\n", val);
 687 }
 688 
 689 static ssize_t trigger_cntr_store(struct device *dev,
 690                              struct device_attribute *attr,
 691                              const char *buf, size_t size)
 692 {
 693         int ret;
 694         unsigned long val;
 695         struct etb_drvdata *drvdata = dev_get_drvdata(dev->parent);
 696 
 697         ret = kstrtoul(buf, 16, &val);
 698         if (ret)
 699                 return ret;
 700 
 701         drvdata->trigger_cntr = val;
 702         return size;
 703 }
 704 static DEVICE_ATTR_RW(trigger_cntr);
 705 
 706 static struct attribute *coresight_etb_attrs[] = {
 707         &dev_attr_trigger_cntr.attr,
 708         NULL,
 709 };
 710 
 711 static const struct attribute_group coresight_etb_group = {
 712         .attrs = coresight_etb_attrs,
 713 };
 714 
 715 static const struct attribute_group coresight_etb_mgmt_group = {
 716         .attrs = coresight_etb_mgmt_attrs,
 717         .name = "mgmt",
 718 };
 719 
 720 const struct attribute_group *coresight_etb_groups[] = {
 721         &coresight_etb_group,
 722         &coresight_etb_mgmt_group,
 723         NULL,
 724 };
 725 
 726 static int etb_probe(struct amba_device *adev, const struct amba_id *id)
 727 {
 728         int ret;
 729         void __iomem *base;
 730         struct device *dev = &adev->dev;
 731         struct coresight_platform_data *pdata = NULL;
 732         struct etb_drvdata *drvdata;
 733         struct resource *res = &adev->res;
 734         struct coresight_desc desc = { 0 };
 735 
 736         desc.name = coresight_alloc_device_name(&etb_devs, dev);
 737         if (!desc.name)
 738                 return -ENOMEM;
 739 
 740         drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
 741         if (!drvdata)
 742                 return -ENOMEM;
 743 
 744         drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
 745         if (!IS_ERR(drvdata->atclk)) {
 746                 ret = clk_prepare_enable(drvdata->atclk);
 747                 if (ret)
 748                         return ret;
 749         }
 750         dev_set_drvdata(dev, drvdata);
 751 
 752         /* validity for the resource is already checked by the AMBA core */
 753         base = devm_ioremap_resource(dev, res);
 754         if (IS_ERR(base))
 755                 return PTR_ERR(base);
 756 
 757         drvdata->base = base;
 758 
 759         spin_lock_init(&drvdata->spinlock);
 760 
 761         drvdata->buffer_depth = etb_get_buffer_depth(drvdata);
 762 
 763         if (drvdata->buffer_depth & 0x80000000)
 764                 return -EINVAL;
 765 
 766         drvdata->buf = devm_kcalloc(dev,
 767                                     drvdata->buffer_depth, 4, GFP_KERNEL);
 768         if (!drvdata->buf)
 769                 return -ENOMEM;
 770 
 771         /* This device is not associated with a session */
 772         drvdata->pid = -1;
 773 
 774         pdata = coresight_get_platform_data(dev);
 775         if (IS_ERR(pdata))
 776                 return PTR_ERR(pdata);
 777         adev->dev.platform_data = pdata;
 778 
 779         desc.type = CORESIGHT_DEV_TYPE_SINK;
 780         desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
 781         desc.ops = &etb_cs_ops;
 782         desc.pdata = pdata;
 783         desc.dev = dev;
 784         desc.groups = coresight_etb_groups;
 785         drvdata->csdev = coresight_register(&desc);
 786         if (IS_ERR(drvdata->csdev))
 787                 return PTR_ERR(drvdata->csdev);
 788 
 789         drvdata->miscdev.name = desc.name;
 790         drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
 791         drvdata->miscdev.fops = &etb_fops;
 792         ret = misc_register(&drvdata->miscdev);
 793         if (ret)
 794                 goto err_misc_register;
 795 
 796         pm_runtime_put(&adev->dev);
 797         return 0;
 798 
 799 err_misc_register:
 800         coresight_unregister(drvdata->csdev);
 801         return ret;
 802 }
 803 
 804 #ifdef CONFIG_PM
 805 static int etb_runtime_suspend(struct device *dev)
 806 {
 807         struct etb_drvdata *drvdata = dev_get_drvdata(dev);
 808 
 809         if (drvdata && !IS_ERR(drvdata->atclk))
 810                 clk_disable_unprepare(drvdata->atclk);
 811 
 812         return 0;
 813 }
 814 
 815 static int etb_runtime_resume(struct device *dev)
 816 {
 817         struct etb_drvdata *drvdata = dev_get_drvdata(dev);
 818 
 819         if (drvdata && !IS_ERR(drvdata->atclk))
 820                 clk_prepare_enable(drvdata->atclk);
 821 
 822         return 0;
 823 }
 824 #endif
 825 
 826 static const struct dev_pm_ops etb_dev_pm_ops = {
 827         SET_RUNTIME_PM_OPS(etb_runtime_suspend, etb_runtime_resume, NULL)
 828 };
 829 
 830 static const struct amba_id etb_ids[] = {
 831         {
 832                 .id     = 0x000bb907,
 833                 .mask   = 0x000fffff,
 834         },
 835         { 0, 0},
 836 };
 837 
 838 static struct amba_driver etb_driver = {
 839         .drv = {
 840                 .name   = "coresight-etb10",
 841                 .owner  = THIS_MODULE,
 842                 .pm     = &etb_dev_pm_ops,
 843                 .suppress_bind_attrs = true,
 844 
 845         },
 846         .probe          = etb_probe,
 847         .id_table       = etb_ids,
 848 };
 849 builtin_amba_driver(etb_driver);

/* [<][>][^][v][top][bottom][index][help] */