root/drivers/hwtracing/coresight/coresight-catu.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. catu_get_table
  2. catu_dump_table
  3. catu_dump_table
  4. catu_make_entry
  5. catu_populate_table
  6. catu_init_sg_table
  7. catu_free_etr_buf
  8. catu_get_data_etr_buf
  9. catu_sync_etr_buf
  10. catu_alloc_etr_buf
  11. catu_wait_for_ready
  12. catu_enable_hw
  13. catu_enable
  14. catu_disable_hw
  15. catu_disable
  16. catu_probe

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Copyright (C) 2018 Arm Limited. All rights reserved.
   4  *
   5  * Coresight Address Translation Unit support
   6  *
   7  * Author: Suzuki K Poulose <suzuki.poulose@arm.com>
   8  */
   9 
  10 #include <linux/amba/bus.h>
  11 #include <linux/device.h>
  12 #include <linux/dma-mapping.h>
  13 #include <linux/io.h>
  14 #include <linux/kernel.h>
  15 #include <linux/slab.h>
  16 
  17 #include "coresight-catu.h"
  18 #include "coresight-priv.h"
  19 #include "coresight-tmc.h"
  20 
  21 #define csdev_to_catu_drvdata(csdev)    \
  22         dev_get_drvdata(csdev->dev.parent)
  23 
  24 /* Verbose output for CATU table contents */
  25 #ifdef CATU_DEBUG
  26 #define catu_dbg(x, ...) dev_dbg(x, __VA_ARGS__)
  27 #else
  28 #define catu_dbg(x, ...) do {} while (0)
  29 #endif
  30 
  31 DEFINE_CORESIGHT_DEVLIST(catu_devs, "catu");
  32 
  33 struct catu_etr_buf {
  34         struct tmc_sg_table *catu_table;
  35         dma_addr_t sladdr;
  36 };
  37 
  38 /*
  39  * CATU uses a page size of 4KB for page tables as well as data pages.
  40  * Each 64bit entry in the table has the following format.
  41  *
  42  *      63                      12      1  0
  43  *      ------------------------------------
  44  *      |        Address [63-12] | SBZ  | V|
  45  *      ------------------------------------
  46  *
  47  * Where bit[0] V indicates if the address is valid or not.
  48  * Each 4K table pages have upto 256 data page pointers, taking upto 2K
  49  * size. There are two Link pointers, pointing to the previous and next
  50  * table pages respectively at the end of the 4K page. (i.e, entry 510
  51  * and 511).
  52  *  E.g, a table of two pages could look like :
  53  *
  54  *                 Table Page 0               Table Page 1
  55  * SLADDR ===> x------------------x  x--> x-----------------x
  56  * INADDR    ->|  Page 0      | V |  |    | Page 256    | V | <- INADDR+1M
  57  *             |------------------|  |    |-----------------|
  58  * INADDR+4K ->|  Page 1      | V |  |    |                 |
  59  *             |------------------|  |    |-----------------|
  60  *             |  Page 2      | V |  |    |                 |
  61  *             |------------------|  |    |-----------------|
  62  *             |   ...        | V |  |    |    ...          |
  63  *             |------------------|  |    |-----------------|
  64  * INADDR+1020K|  Page 255    | V |  |    |   Page 511  | V |
  65  * SLADDR+2K==>|------------------|  |    |-----------------|
  66  *             |  UNUSED      |   |  |    |                 |
  67  *             |------------------|  |    |                 |
  68  *             |  UNUSED      |   |  |    |                 |
  69  *             |------------------|  |    |                 |
  70  *             |    ...       |   |  |    |                 |
  71  *             |------------------|  |    |-----------------|
  72  *             |   IGNORED    | 0 |  |    | Table Page 0| 1 |
  73  *             |------------------|  |    |-----------------|
  74  *             |  Table Page 1| 1 |--x    | IGNORED     | 0 |
  75  *             x------------------x       x-----------------x
  76  * SLADDR+4K==>
  77  *
  78  * The base input address (used by the ETR, programmed in INADDR_{LO,HI})
  79  * must be aligned to 1MB (the size addressable by a single page table).
  80  * The CATU maps INADDR{LO:HI} to the first page in the table pointed
  81  * to by SLADDR{LO:HI} and so on.
  82  *
  83  */
  84 typedef u64 cate_t;
  85 
  86 #define CATU_PAGE_SHIFT         12
  87 #define CATU_PAGE_SIZE          (1UL << CATU_PAGE_SHIFT)
  88 #define CATU_PAGES_PER_SYSPAGE  (PAGE_SIZE / CATU_PAGE_SIZE)
  89 
  90 /* Page pointers are only allocated in the first 2K half */
  91 #define CATU_PTRS_PER_PAGE      ((CATU_PAGE_SIZE >> 1) / sizeof(cate_t))
  92 #define CATU_PTRS_PER_SYSPAGE   (CATU_PAGES_PER_SYSPAGE * CATU_PTRS_PER_PAGE)
  93 #define CATU_LINK_PREV          ((CATU_PAGE_SIZE / sizeof(cate_t)) - 2)
  94 #define CATU_LINK_NEXT          ((CATU_PAGE_SIZE / sizeof(cate_t)) - 1)
  95 
  96 #define CATU_ADDR_SHIFT         12
  97 #define CATU_ADDR_MASK          ~(((cate_t)1 << CATU_ADDR_SHIFT) - 1)
  98 #define CATU_ENTRY_VALID        ((cate_t)0x1)
  99 #define CATU_VALID_ENTRY(addr) \
 100         (((cate_t)(addr) & CATU_ADDR_MASK) | CATU_ENTRY_VALID)
 101 #define CATU_ENTRY_ADDR(entry)  ((cate_t)(entry) & ~((cate_t)CATU_ENTRY_VALID))
 102 
 103 /* CATU expects the INADDR to be aligned to 1M. */
 104 #define CATU_DEFAULT_INADDR     (1ULL << 20)
 105 
 106 /*
 107  * catu_get_table : Retrieve the table pointers for the given @offset
 108  * within the buffer. The buffer is wrapped around to a valid offset.
 109  *
 110  * Returns : The CPU virtual address for the beginning of the table
 111  * containing the data page pointer for @offset. If @daddrp is not NULL,
 112  * @daddrp points the DMA address of the beginning of the table.
 113  */
 114 static inline cate_t *catu_get_table(struct tmc_sg_table *catu_table,
 115                                      unsigned long offset,
 116                                      dma_addr_t *daddrp)
 117 {
 118         unsigned long buf_size = tmc_sg_table_buf_size(catu_table);
 119         unsigned int table_nr, pg_idx, pg_offset;
 120         struct tmc_pages *table_pages = &catu_table->table_pages;
 121         void *ptr;
 122 
 123         /* Make sure offset is within the range */
 124         offset %= buf_size;
 125 
 126         /*
 127          * Each table can address 1MB and a single kernel page can
 128          * contain "CATU_PAGES_PER_SYSPAGE" CATU tables.
 129          */
 130         table_nr = offset >> 20;
 131         /* Find the table page where the table_nr lies in */
 132         pg_idx = table_nr / CATU_PAGES_PER_SYSPAGE;
 133         pg_offset = (table_nr % CATU_PAGES_PER_SYSPAGE) * CATU_PAGE_SIZE;
 134         if (daddrp)
 135                 *daddrp = table_pages->daddrs[pg_idx] + pg_offset;
 136         ptr = page_address(table_pages->pages[pg_idx]);
 137         return (cate_t *)((unsigned long)ptr + pg_offset);
 138 }
 139 
 140 #ifdef CATU_DEBUG
 141 static void catu_dump_table(struct tmc_sg_table *catu_table)
 142 {
 143         int i;
 144         cate_t *table;
 145         unsigned long table_end, buf_size, offset = 0;
 146 
 147         buf_size = tmc_sg_table_buf_size(catu_table);
 148         dev_dbg(catu_table->dev,
 149                 "Dump table %p, tdaddr: %llx\n",
 150                 catu_table, catu_table->table_daddr);
 151 
 152         while (offset < buf_size) {
 153                 table_end = offset + SZ_1M < buf_size ?
 154                             offset + SZ_1M : buf_size;
 155                 table = catu_get_table(catu_table, offset, NULL);
 156                 for (i = 0; offset < table_end; i++, offset += CATU_PAGE_SIZE)
 157                         dev_dbg(catu_table->dev, "%d: %llx\n", i, table[i]);
 158                 dev_dbg(catu_table->dev, "Prev : %llx, Next: %llx\n",
 159                         table[CATU_LINK_PREV], table[CATU_LINK_NEXT]);
 160                 dev_dbg(catu_table->dev, "== End of sub-table ===");
 161         }
 162         dev_dbg(catu_table->dev, "== End of Table ===");
 163 }
 164 
 165 #else
 166 static inline void catu_dump_table(struct tmc_sg_table *catu_table)
 167 {
 168 }
 169 #endif
 170 
 171 static inline cate_t catu_make_entry(dma_addr_t addr)
 172 {
 173         return addr ? CATU_VALID_ENTRY(addr) : 0;
 174 }
 175 
 176 /*
 177  * catu_populate_table : Populate the given CATU table.
 178  * The table is always populated as a circular table.
 179  * i.e, the "prev" link of the "first" table points to the "last"
 180  * table and the "next" link of the "last" table points to the
 181  * "first" table. The buffer should be made linear by calling
 182  * catu_set_table().
 183  */
 184 static void
 185 catu_populate_table(struct tmc_sg_table *catu_table)
 186 {
 187         int i;
 188         int sys_pidx;   /* Index to current system data page */
 189         int catu_pidx;  /* Index of CATU page within the system data page */
 190         unsigned long offset, buf_size, table_end;
 191         dma_addr_t data_daddr;
 192         dma_addr_t prev_taddr, next_taddr, cur_taddr;
 193         cate_t *table_ptr, *next_table;
 194 
 195         buf_size = tmc_sg_table_buf_size(catu_table);
 196         sys_pidx = catu_pidx = 0;
 197         offset = 0;
 198 
 199         table_ptr = catu_get_table(catu_table, 0, &cur_taddr);
 200         prev_taddr = 0; /* Prev link for the first table */
 201 
 202         while (offset < buf_size) {
 203                 /*
 204                  * The @offset is always 1M aligned here and we have an
 205                  * empty table @table_ptr to fill. Each table can address
 206                  * upto 1MB data buffer. The last table may have fewer
 207                  * entries if the buffer size is not aligned.
 208                  */
 209                 table_end = (offset + SZ_1M) < buf_size ?
 210                             (offset + SZ_1M) : buf_size;
 211                 for (i = 0; offset < table_end;
 212                      i++, offset += CATU_PAGE_SIZE) {
 213 
 214                         data_daddr = catu_table->data_pages.daddrs[sys_pidx] +
 215                                      catu_pidx * CATU_PAGE_SIZE;
 216                         catu_dbg(catu_table->dev,
 217                                 "[table %5ld:%03d] 0x%llx\n",
 218                                 (offset >> 20), i, data_daddr);
 219                         table_ptr[i] = catu_make_entry(data_daddr);
 220                         /* Move the pointers for data pages */
 221                         catu_pidx = (catu_pidx + 1) % CATU_PAGES_PER_SYSPAGE;
 222                         if (catu_pidx == 0)
 223                                 sys_pidx++;
 224                 }
 225 
 226                 /*
 227                  * If we have finished all the valid entries, fill the rest of
 228                  * the table (i.e, last table page) with invalid entries,
 229                  * to fail the lookups.
 230                  */
 231                 if (offset == buf_size) {
 232                         memset(&table_ptr[i], 0,
 233                                sizeof(cate_t) * (CATU_PTRS_PER_PAGE - i));
 234                         next_taddr = 0;
 235                 } else {
 236                         next_table = catu_get_table(catu_table,
 237                                                     offset, &next_taddr);
 238                 }
 239 
 240                 table_ptr[CATU_LINK_PREV] = catu_make_entry(prev_taddr);
 241                 table_ptr[CATU_LINK_NEXT] = catu_make_entry(next_taddr);
 242 
 243                 catu_dbg(catu_table->dev,
 244                         "[table%5ld]: Cur: 0x%llx Prev: 0x%llx, Next: 0x%llx\n",
 245                         (offset >> 20) - 1,  cur_taddr, prev_taddr, next_taddr);
 246 
 247                 /* Update the prev/next addresses */
 248                 if (next_taddr) {
 249                         prev_taddr = cur_taddr;
 250                         cur_taddr = next_taddr;
 251                         table_ptr = next_table;
 252                 }
 253         }
 254 
 255         /* Sync the table for device */
 256         tmc_sg_table_sync_table(catu_table);
 257 }
 258 
 259 static struct tmc_sg_table *
 260 catu_init_sg_table(struct device *catu_dev, int node,
 261                    ssize_t size, void **pages)
 262 {
 263         int nr_tpages;
 264         struct tmc_sg_table *catu_table;
 265 
 266         /*
 267          * Each table can address upto 1MB and we can have
 268          * CATU_PAGES_PER_SYSPAGE tables in a system page.
 269          */
 270         nr_tpages = DIV_ROUND_UP(size, SZ_1M) / CATU_PAGES_PER_SYSPAGE;
 271         catu_table = tmc_alloc_sg_table(catu_dev, node, nr_tpages,
 272                                         size >> PAGE_SHIFT, pages);
 273         if (IS_ERR(catu_table))
 274                 return catu_table;
 275 
 276         catu_populate_table(catu_table);
 277         dev_dbg(catu_dev,
 278                 "Setup table %p, size %ldKB, %d table pages\n",
 279                 catu_table, (unsigned long)size >> 10,  nr_tpages);
 280         catu_dump_table(catu_table);
 281         return catu_table;
 282 }
 283 
 284 static void catu_free_etr_buf(struct etr_buf *etr_buf)
 285 {
 286         struct catu_etr_buf *catu_buf;
 287 
 288         if (!etr_buf || etr_buf->mode != ETR_MODE_CATU || !etr_buf->private)
 289                 return;
 290 
 291         catu_buf = etr_buf->private;
 292         tmc_free_sg_table(catu_buf->catu_table);
 293         kfree(catu_buf);
 294 }
 295 
 296 static ssize_t catu_get_data_etr_buf(struct etr_buf *etr_buf, u64 offset,
 297                                      size_t len, char **bufpp)
 298 {
 299         struct catu_etr_buf *catu_buf = etr_buf->private;
 300 
 301         return tmc_sg_table_get_data(catu_buf->catu_table, offset, len, bufpp);
 302 }
 303 
 304 static void catu_sync_etr_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
 305 {
 306         struct catu_etr_buf *catu_buf = etr_buf->private;
 307         struct tmc_sg_table *catu_table = catu_buf->catu_table;
 308         u64 r_offset, w_offset;
 309 
 310         /*
 311          * ETR started off at etr_buf->hwaddr. Convert the RRP/RWP to
 312          * offsets within the trace buffer.
 313          */
 314         r_offset = rrp - etr_buf->hwaddr;
 315         w_offset = rwp - etr_buf->hwaddr;
 316 
 317         if (!etr_buf->full) {
 318                 etr_buf->len = w_offset - r_offset;
 319                 if (w_offset < r_offset)
 320                         etr_buf->len += etr_buf->size;
 321         } else {
 322                 etr_buf->len = etr_buf->size;
 323         }
 324 
 325         etr_buf->offset = r_offset;
 326         tmc_sg_table_sync_data_range(catu_table, r_offset, etr_buf->len);
 327 }
 328 
 329 static int catu_alloc_etr_buf(struct tmc_drvdata *tmc_drvdata,
 330                               struct etr_buf *etr_buf, int node, void **pages)
 331 {
 332         struct coresight_device *csdev;
 333         struct tmc_sg_table *catu_table;
 334         struct catu_etr_buf *catu_buf;
 335 
 336         csdev = tmc_etr_get_catu_device(tmc_drvdata);
 337         if (!csdev)
 338                 return -ENODEV;
 339         catu_buf = kzalloc(sizeof(*catu_buf), GFP_KERNEL);
 340         if (!catu_buf)
 341                 return -ENOMEM;
 342 
 343         catu_table = catu_init_sg_table(&csdev->dev, node,
 344                                         etr_buf->size, pages);
 345         if (IS_ERR(catu_table)) {
 346                 kfree(catu_buf);
 347                 return PTR_ERR(catu_table);
 348         }
 349 
 350         etr_buf->mode = ETR_MODE_CATU;
 351         etr_buf->private = catu_buf;
 352         etr_buf->hwaddr = CATU_DEFAULT_INADDR;
 353 
 354         catu_buf->catu_table = catu_table;
 355         /* Get the table base address */
 356         catu_buf->sladdr = catu_table->table_daddr;
 357 
 358         return 0;
 359 }
 360 
 361 const struct etr_buf_operations etr_catu_buf_ops = {
 362         .alloc = catu_alloc_etr_buf,
 363         .free = catu_free_etr_buf,
 364         .sync = catu_sync_etr_buf,
 365         .get_data = catu_get_data_etr_buf,
 366 };
 367 
 368 coresight_simple_reg32(struct catu_drvdata, devid, CORESIGHT_DEVID);
 369 coresight_simple_reg32(struct catu_drvdata, control, CATU_CONTROL);
 370 coresight_simple_reg32(struct catu_drvdata, status, CATU_STATUS);
 371 coresight_simple_reg32(struct catu_drvdata, mode, CATU_MODE);
 372 coresight_simple_reg32(struct catu_drvdata, axictrl, CATU_AXICTRL);
 373 coresight_simple_reg32(struct catu_drvdata, irqen, CATU_IRQEN);
 374 coresight_simple_reg64(struct catu_drvdata, sladdr,
 375                        CATU_SLADDRLO, CATU_SLADDRHI);
 376 coresight_simple_reg64(struct catu_drvdata, inaddr,
 377                        CATU_INADDRLO, CATU_INADDRHI);
 378 
 379 static struct attribute *catu_mgmt_attrs[] = {
 380         &dev_attr_devid.attr,
 381         &dev_attr_control.attr,
 382         &dev_attr_status.attr,
 383         &dev_attr_mode.attr,
 384         &dev_attr_axictrl.attr,
 385         &dev_attr_irqen.attr,
 386         &dev_attr_sladdr.attr,
 387         &dev_attr_inaddr.attr,
 388         NULL,
 389 };
 390 
 391 static const struct attribute_group catu_mgmt_group = {
 392         .attrs = catu_mgmt_attrs,
 393         .name = "mgmt",
 394 };
 395 
 396 static const struct attribute_group *catu_groups[] = {
 397         &catu_mgmt_group,
 398         NULL,
 399 };
 400 
 401 
 402 static inline int catu_wait_for_ready(struct catu_drvdata *drvdata)
 403 {
 404         return coresight_timeout(drvdata->base,
 405                                  CATU_STATUS, CATU_STATUS_READY, 1);
 406 }
 407 
 408 static int catu_enable_hw(struct catu_drvdata *drvdata, void *data)
 409 {
 410         int rc;
 411         u32 control, mode;
 412         struct etr_buf *etr_buf = data;
 413         struct device *dev = &drvdata->csdev->dev;
 414 
 415         if (catu_wait_for_ready(drvdata))
 416                 dev_warn(dev, "Timeout while waiting for READY\n");
 417 
 418         control = catu_read_control(drvdata);
 419         if (control & BIT(CATU_CONTROL_ENABLE)) {
 420                 dev_warn(dev, "CATU is already enabled\n");
 421                 return -EBUSY;
 422         }
 423 
 424         rc = coresight_claim_device_unlocked(drvdata->base);
 425         if (rc)
 426                 return rc;
 427 
 428         control |= BIT(CATU_CONTROL_ENABLE);
 429 
 430         if (etr_buf && etr_buf->mode == ETR_MODE_CATU) {
 431                 struct catu_etr_buf *catu_buf = etr_buf->private;
 432 
 433                 mode = CATU_MODE_TRANSLATE;
 434                 catu_write_axictrl(drvdata, CATU_OS_AXICTRL);
 435                 catu_write_sladdr(drvdata, catu_buf->sladdr);
 436                 catu_write_inaddr(drvdata, CATU_DEFAULT_INADDR);
 437         } else {
 438                 mode = CATU_MODE_PASS_THROUGH;
 439                 catu_write_sladdr(drvdata, 0);
 440                 catu_write_inaddr(drvdata, 0);
 441         }
 442 
 443         catu_write_irqen(drvdata, 0);
 444         catu_write_mode(drvdata, mode);
 445         catu_write_control(drvdata, control);
 446         dev_dbg(dev, "Enabled in %s mode\n",
 447                 (mode == CATU_MODE_PASS_THROUGH) ?
 448                 "Pass through" :
 449                 "Translate");
 450         return 0;
 451 }
 452 
 453 static int catu_enable(struct coresight_device *csdev, void *data)
 454 {
 455         int rc;
 456         struct catu_drvdata *catu_drvdata = csdev_to_catu_drvdata(csdev);
 457 
 458         CS_UNLOCK(catu_drvdata->base);
 459         rc = catu_enable_hw(catu_drvdata, data);
 460         CS_LOCK(catu_drvdata->base);
 461         return rc;
 462 }
 463 
 464 static int catu_disable_hw(struct catu_drvdata *drvdata)
 465 {
 466         int rc = 0;
 467         struct device *dev = &drvdata->csdev->dev;
 468 
 469         catu_write_control(drvdata, 0);
 470         coresight_disclaim_device_unlocked(drvdata->base);
 471         if (catu_wait_for_ready(drvdata)) {
 472                 dev_info(dev, "Timeout while waiting for READY\n");
 473                 rc = -EAGAIN;
 474         }
 475 
 476         dev_dbg(dev, "Disabled\n");
 477         return rc;
 478 }
 479 
 480 static int catu_disable(struct coresight_device *csdev, void *__unused)
 481 {
 482         int rc;
 483         struct catu_drvdata *catu_drvdata = csdev_to_catu_drvdata(csdev);
 484 
 485         CS_UNLOCK(catu_drvdata->base);
 486         rc = catu_disable_hw(catu_drvdata);
 487         CS_LOCK(catu_drvdata->base);
 488         return rc;
 489 }
 490 
 491 static const struct coresight_ops_helper catu_helper_ops = {
 492         .enable = catu_enable,
 493         .disable = catu_disable,
 494 };
 495 
 496 static const struct coresight_ops catu_ops = {
 497         .helper_ops = &catu_helper_ops,
 498 };
 499 
 500 static int catu_probe(struct amba_device *adev, const struct amba_id *id)
 501 {
 502         int ret = 0;
 503         u32 dma_mask;
 504         struct catu_drvdata *drvdata;
 505         struct coresight_desc catu_desc;
 506         struct coresight_platform_data *pdata = NULL;
 507         struct device *dev = &adev->dev;
 508         void __iomem *base;
 509 
 510         catu_desc.name = coresight_alloc_device_name(&catu_devs, dev);
 511         if (!catu_desc.name)
 512                 return -ENOMEM;
 513 
 514         drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
 515         if (!drvdata) {
 516                 ret = -ENOMEM;
 517                 goto out;
 518         }
 519 
 520         dev_set_drvdata(dev, drvdata);
 521         base = devm_ioremap_resource(dev, &adev->res);
 522         if (IS_ERR(base)) {
 523                 ret = PTR_ERR(base);
 524                 goto out;
 525         }
 526 
 527         /* Setup dma mask for the device */
 528         dma_mask = readl_relaxed(base + CORESIGHT_DEVID) & 0x3f;
 529         switch (dma_mask) {
 530         case 32:
 531         case 40:
 532         case 44:
 533         case 48:
 534         case 52:
 535         case 56:
 536         case 64:
 537                 break;
 538         default:
 539                 /* Default to the 40bits as supported by TMC-ETR */
 540                 dma_mask = 40;
 541         }
 542         ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(dma_mask));
 543         if (ret)
 544                 goto out;
 545 
 546         pdata = coresight_get_platform_data(dev);
 547         if (IS_ERR(pdata)) {
 548                 ret = PTR_ERR(pdata);
 549                 goto out;
 550         }
 551         dev->platform_data = pdata;
 552 
 553         drvdata->base = base;
 554         catu_desc.pdata = pdata;
 555         catu_desc.dev = dev;
 556         catu_desc.groups = catu_groups;
 557         catu_desc.type = CORESIGHT_DEV_TYPE_HELPER;
 558         catu_desc.subtype.helper_subtype = CORESIGHT_DEV_SUBTYPE_HELPER_CATU;
 559         catu_desc.ops = &catu_ops;
 560 
 561         drvdata->csdev = coresight_register(&catu_desc);
 562         if (IS_ERR(drvdata->csdev))
 563                 ret = PTR_ERR(drvdata->csdev);
 564         else
 565                 pm_runtime_put(&adev->dev);
 566 out:
 567         return ret;
 568 }
 569 
 570 static struct amba_id catu_ids[] = {
 571         {
 572                 .id     = 0x000bb9ee,
 573                 .mask   = 0x000fffff,
 574         },
 575         {},
 576 };
 577 
 578 static struct amba_driver catu_driver = {
 579         .drv = {
 580                 .name                   = "coresight-catu",
 581                 .owner                  = THIS_MODULE,
 582                 .suppress_bind_attrs    = true,
 583         },
 584         .probe                          = catu_probe,
 585         .id_table                       = catu_ids,
 586 };
 587 
 588 builtin_amba_driver(catu_driver);

/* [<][>][^][v][top][bottom][index][help] */