root/drivers/media/platform/vsp1/vsp1_dl.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. vsp1_dl_body_pool_create
  2. vsp1_dl_body_pool_destroy
  3. vsp1_dl_body_get
  4. vsp1_dl_body_put
  5. vsp1_dl_body_write
  6. vsp1_dl_cmd_pool_create
  7. vsp1_dl_ext_cmd_get
  8. vsp1_dl_ext_cmd_put
  9. vsp1_dl_ext_cmd_pool_destroy
  10. vsp1_dl_get_pre_cmd
  11. vsp1_dl_list_alloc
  12. vsp1_dl_list_bodies_put
  13. vsp1_dl_list_free
  14. vsp1_dl_list_get
  15. __vsp1_dl_list_put
  16. vsp1_dl_list_put
  17. vsp1_dl_list_get_body0
  18. vsp1_dl_list_add_body
  19. vsp1_dl_list_add_chain
  20. vsp1_dl_ext_cmd_fill_header
  21. vsp1_dl_list_fill_header
  22. vsp1_dl_list_hw_update_pending
  23. vsp1_dl_list_hw_enqueue
  24. vsp1_dl_list_commit_continuous
  25. vsp1_dl_list_commit_singleshot
  26. vsp1_dl_list_commit
  27. vsp1_dlm_irq_frame_end
  28. vsp1_dlm_setup
  29. vsp1_dlm_reset
  30. vsp1_dlm_dl_body_get
  31. vsp1_dlm_create
  32. vsp1_dlm_destroy

   1 // SPDX-License-Identifier: GPL-2.0+
   2 /*
   3  * vsp1_dl.c  --  R-Car VSP1 Display List
   4  *
   5  * Copyright (C) 2015 Renesas Corporation
   6  *
   7  * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
   8  */
   9 
  10 #include <linux/device.h>
  11 #include <linux/dma-mapping.h>
  12 #include <linux/gfp.h>
  13 #include <linux/refcount.h>
  14 #include <linux/slab.h>
  15 #include <linux/workqueue.h>
  16 
  17 #include "vsp1.h"
  18 #include "vsp1_dl.h"
  19 
  20 #define VSP1_DL_NUM_ENTRIES             256
  21 
  22 #define VSP1_DLH_INT_ENABLE             (1 << 1)
  23 #define VSP1_DLH_AUTO_START             (1 << 0)
  24 
  25 #define VSP1_DLH_EXT_PRE_CMD_EXEC       (1 << 9)
  26 #define VSP1_DLH_EXT_POST_CMD_EXEC      (1 << 8)
  27 
  28 struct vsp1_dl_header_list {
  29         u32 num_bytes;
  30         u32 addr;
  31 } __packed;
  32 
  33 struct vsp1_dl_header {
  34         u32 num_lists;
  35         struct vsp1_dl_header_list lists[8];
  36         u32 next_header;
  37         u32 flags;
  38 } __packed;
  39 
  40 /**
  41  * struct vsp1_dl_ext_header - Extended display list header
  42  * @padding: padding zero bytes for alignment
  43  * @pre_ext_dl_num_cmd: number of pre-extended command bodies to parse
  44  * @flags: enables or disables execution of the pre and post command
  45  * @pre_ext_dl_plist: start address of pre-extended display list bodies
  46  * @post_ext_dl_num_cmd: number of post-extended command bodies to parse
  47  * @post_ext_dl_plist: start address of post-extended display list bodies
  48  */
  49 struct vsp1_dl_ext_header {
  50         u32 padding;
  51 
  52         /*
  53          * The datasheet represents flags as stored before pre_ext_dl_num_cmd,
  54          * expecting 32-bit accesses. The flags are appropriate to the whole
  55          * header, not just the pre_ext command, and thus warrant being
  56          * separated out. Due to byte ordering, and representing as 16 bit
  57          * values here, the flags must be positioned after the
  58          * pre_ext_dl_num_cmd.
  59          */
  60         u16 pre_ext_dl_num_cmd;
  61         u16 flags;
  62         u32 pre_ext_dl_plist;
  63 
  64         u32 post_ext_dl_num_cmd;
  65         u32 post_ext_dl_plist;
  66 } __packed;
  67 
  68 struct vsp1_dl_header_extended {
  69         struct vsp1_dl_header header;
  70         struct vsp1_dl_ext_header ext;
  71 } __packed;
  72 
  73 struct vsp1_dl_entry {
  74         u32 addr;
  75         u32 data;
  76 } __packed;
  77 
  78 /**
  79  * struct vsp1_pre_ext_dl_body - Pre Extended Display List Body
  80  * @opcode: Extended display list command operation code
  81  * @flags: Pre-extended command flags. These are specific to each command
  82  * @address_set: Source address set pointer. Must have 16-byte alignment
  83  * @reserved: Zero bits for alignment.
  84  */
  85 struct vsp1_pre_ext_dl_body {
  86         u32 opcode;
  87         u32 flags;
  88         u32 address_set;
  89         u32 reserved;
  90 } __packed;
  91 
  92 /**
  93  * struct vsp1_dl_body - Display list body
  94  * @list: entry in the display list list of bodies
  95  * @free: entry in the pool free body list
  96  * @refcnt: reference tracking for the body
  97  * @pool: pool to which this body belongs
  98  * @entries: array of entries
  99  * @dma: DMA address of the entries
 100  * @size: size of the DMA memory in bytes
 101  * @num_entries: number of stored entries
 102  * @max_entries: number of entries available
 103  */
 104 struct vsp1_dl_body {
 105         struct list_head list;
 106         struct list_head free;
 107 
 108         refcount_t refcnt;
 109 
 110         struct vsp1_dl_body_pool *pool;
 111 
 112         struct vsp1_dl_entry *entries;
 113         dma_addr_t dma;
 114         size_t size;
 115 
 116         unsigned int num_entries;
 117         unsigned int max_entries;
 118 };
 119 
 120 /**
 121  * struct vsp1_dl_body_pool - display list body pool
 122  * @dma: DMA address of the entries
 123  * @size: size of the full DMA memory pool in bytes
 124  * @mem: CPU memory pointer for the pool
 125  * @bodies: Array of DLB structures for the pool
 126  * @free: List of free DLB entries
 127  * @lock: Protects the free list
 128  * @vsp1: the VSP1 device
 129  */
 130 struct vsp1_dl_body_pool {
 131         /* DMA allocation */
 132         dma_addr_t dma;
 133         size_t size;
 134         void *mem;
 135 
 136         /* Body management */
 137         struct vsp1_dl_body *bodies;
 138         struct list_head free;
 139         spinlock_t lock;
 140 
 141         struct vsp1_device *vsp1;
 142 };
 143 
 144 /**
 145  * struct vsp1_cmd_pool - Display List commands pool
 146  * @dma: DMA address of the entries
 147  * @size: size of the full DMA memory pool in bytes
 148  * @mem: CPU memory pointer for the pool
 149  * @cmds: Array of command structures for the pool
 150  * @free: Free pool entries
 151  * @lock: Protects the free list
 152  * @vsp1: the VSP1 device
 153  */
 154 struct vsp1_dl_cmd_pool {
 155         /* DMA allocation */
 156         dma_addr_t dma;
 157         size_t size;
 158         void *mem;
 159 
 160         struct vsp1_dl_ext_cmd *cmds;
 161         struct list_head free;
 162 
 163         spinlock_t lock;
 164 
 165         struct vsp1_device *vsp1;
 166 };
 167 
 168 /**
 169  * struct vsp1_dl_list - Display list
 170  * @list: entry in the display list manager lists
 171  * @dlm: the display list manager
 172  * @header: display list header
 173  * @extension: extended display list header. NULL for normal lists
 174  * @dma: DMA address for the header
 175  * @body0: first display list body
 176  * @bodies: list of extra display list bodies
 177  * @pre_cmd: pre command to be issued through extended dl header
 178  * @post_cmd: post command to be issued through extended dl header
 179  * @has_chain: if true, indicates that there's a partition chain
 180  * @chain: entry in the display list partition chain
 181  * @flags: display list flags, a combination of VSP1_DL_FRAME_END_*
 182  */
 183 struct vsp1_dl_list {
 184         struct list_head list;
 185         struct vsp1_dl_manager *dlm;
 186 
 187         struct vsp1_dl_header *header;
 188         struct vsp1_dl_ext_header *extension;
 189         dma_addr_t dma;
 190 
 191         struct vsp1_dl_body *body0;
 192         struct list_head bodies;
 193 
 194         struct vsp1_dl_ext_cmd *pre_cmd;
 195         struct vsp1_dl_ext_cmd *post_cmd;
 196 
 197         bool has_chain;
 198         struct list_head chain;
 199 
 200         unsigned int flags;
 201 };
 202 
 203 /**
 204  * struct vsp1_dl_manager - Display List manager
 205  * @index: index of the related WPF
 206  * @singleshot: execute the display list in single-shot mode
 207  * @vsp1: the VSP1 device
 208  * @lock: protects the free, active, queued, and pending lists
 209  * @free: array of all free display lists
 210  * @active: list currently being processed (loaded) by hardware
 211  * @queued: list queued to the hardware (written to the DL registers)
 212  * @pending: list waiting to be queued to the hardware
 213  * @pool: body pool for the display list bodies
 214  * @cmdpool: commands pool for extended display list
 215  */
 216 struct vsp1_dl_manager {
 217         unsigned int index;
 218         bool singleshot;
 219         struct vsp1_device *vsp1;
 220 
 221         spinlock_t lock;
 222         struct list_head free;
 223         struct vsp1_dl_list *active;
 224         struct vsp1_dl_list *queued;
 225         struct vsp1_dl_list *pending;
 226 
 227         struct vsp1_dl_body_pool *pool;
 228         struct vsp1_dl_cmd_pool *cmdpool;
 229 };
 230 
 231 /* -----------------------------------------------------------------------------
 232  * Display List Body Management
 233  */
 234 
 235 /**
 236  * vsp1_dl_body_pool_create - Create a pool of bodies from a single allocation
 237  * @vsp1: The VSP1 device
 238  * @num_bodies: The number of bodies to allocate
 239  * @num_entries: The maximum number of entries that a body can contain
 240  * @extra_size: Extra allocation provided for the bodies
 241  *
 242  * Allocate a pool of display list bodies each with enough memory to contain the
 243  * requested number of entries plus the @extra_size.
 244  *
 245  * Return a pointer to a pool on success or NULL if memory can't be allocated.
 246  */
 247 struct vsp1_dl_body_pool *
 248 vsp1_dl_body_pool_create(struct vsp1_device *vsp1, unsigned int num_bodies,
 249                          unsigned int num_entries, size_t extra_size)
 250 {
 251         struct vsp1_dl_body_pool *pool;
 252         size_t dlb_size;
 253         unsigned int i;
 254 
 255         pool = kzalloc(sizeof(*pool), GFP_KERNEL);
 256         if (!pool)
 257                 return NULL;
 258 
 259         pool->vsp1 = vsp1;
 260 
 261         /*
 262          * TODO: 'extra_size' is only used by vsp1_dlm_create(), to allocate
 263          * extra memory for the display list header. We need only one header per
 264          * display list, not per display list body, thus this allocation is
 265          * extraneous and should be reworked in the future.
 266          */
 267         dlb_size = num_entries * sizeof(struct vsp1_dl_entry) + extra_size;
 268         pool->size = dlb_size * num_bodies;
 269 
 270         pool->bodies = kcalloc(num_bodies, sizeof(*pool->bodies), GFP_KERNEL);
 271         if (!pool->bodies) {
 272                 kfree(pool);
 273                 return NULL;
 274         }
 275 
 276         pool->mem = dma_alloc_wc(vsp1->bus_master, pool->size, &pool->dma,
 277                                  GFP_KERNEL);
 278         if (!pool->mem) {
 279                 kfree(pool->bodies);
 280                 kfree(pool);
 281                 return NULL;
 282         }
 283 
 284         spin_lock_init(&pool->lock);
 285         INIT_LIST_HEAD(&pool->free);
 286 
 287         for (i = 0; i < num_bodies; ++i) {
 288                 struct vsp1_dl_body *dlb = &pool->bodies[i];
 289 
 290                 dlb->pool = pool;
 291                 dlb->max_entries = num_entries;
 292 
 293                 dlb->dma = pool->dma + i * dlb_size;
 294                 dlb->entries = pool->mem + i * dlb_size;
 295 
 296                 list_add_tail(&dlb->free, &pool->free);
 297         }
 298 
 299         return pool;
 300 }
 301 
 302 /**
 303  * vsp1_dl_body_pool_destroy - Release a body pool
 304  * @pool: The body pool
 305  *
 306  * Release all components of a pool allocation.
 307  */
 308 void vsp1_dl_body_pool_destroy(struct vsp1_dl_body_pool *pool)
 309 {
 310         if (!pool)
 311                 return;
 312 
 313         if (pool->mem)
 314                 dma_free_wc(pool->vsp1->bus_master, pool->size, pool->mem,
 315                             pool->dma);
 316 
 317         kfree(pool->bodies);
 318         kfree(pool);
 319 }
 320 
 321 /**
 322  * vsp1_dl_body_get - Obtain a body from a pool
 323  * @pool: The body pool
 324  *
 325  * Obtain a body from the pool without blocking.
 326  *
 327  * Returns a display list body or NULL if there are none available.
 328  */
 329 struct vsp1_dl_body *vsp1_dl_body_get(struct vsp1_dl_body_pool *pool)
 330 {
 331         struct vsp1_dl_body *dlb = NULL;
 332         unsigned long flags;
 333 
 334         spin_lock_irqsave(&pool->lock, flags);
 335 
 336         if (!list_empty(&pool->free)) {
 337                 dlb = list_first_entry(&pool->free, struct vsp1_dl_body, free);
 338                 list_del(&dlb->free);
 339                 refcount_set(&dlb->refcnt, 1);
 340         }
 341 
 342         spin_unlock_irqrestore(&pool->lock, flags);
 343 
 344         return dlb;
 345 }
 346 
 347 /**
 348  * vsp1_dl_body_put - Return a body back to its pool
 349  * @dlb: The display list body
 350  *
 351  * Return a body back to the pool, and reset the num_entries to clear the list.
 352  */
 353 void vsp1_dl_body_put(struct vsp1_dl_body *dlb)
 354 {
 355         unsigned long flags;
 356 
 357         if (!dlb)
 358                 return;
 359 
 360         if (!refcount_dec_and_test(&dlb->refcnt))
 361                 return;
 362 
 363         dlb->num_entries = 0;
 364 
 365         spin_lock_irqsave(&dlb->pool->lock, flags);
 366         list_add_tail(&dlb->free, &dlb->pool->free);
 367         spin_unlock_irqrestore(&dlb->pool->lock, flags);
 368 }
 369 
 370 /**
 371  * vsp1_dl_body_write - Write a register to a display list body
 372  * @dlb: The body
 373  * @reg: The register address
 374  * @data: The register value
 375  *
 376  * Write the given register and value to the display list body. The maximum
 377  * number of entries that can be written in a body is specified when the body is
 378  * allocated by vsp1_dl_body_alloc().
 379  */
 380 void vsp1_dl_body_write(struct vsp1_dl_body *dlb, u32 reg, u32 data)
 381 {
 382         if (WARN_ONCE(dlb->num_entries >= dlb->max_entries,
 383                       "DLB size exceeded (max %u)", dlb->max_entries))
 384                 return;
 385 
 386         dlb->entries[dlb->num_entries].addr = reg;
 387         dlb->entries[dlb->num_entries].data = data;
 388         dlb->num_entries++;
 389 }
 390 
 391 /* -----------------------------------------------------------------------------
 392  * Display List Extended Command Management
 393  */
 394 
 395 enum vsp1_extcmd_type {
 396         VSP1_EXTCMD_AUTODISP,
 397         VSP1_EXTCMD_AUTOFLD,
 398 };
 399 
 400 struct vsp1_extended_command_info {
 401         u16 opcode;
 402         size_t body_size;
 403 };
 404 
 405 static const struct vsp1_extended_command_info vsp1_extended_commands[] = {
 406         [VSP1_EXTCMD_AUTODISP] = { 0x02, 96 },
 407         [VSP1_EXTCMD_AUTOFLD]  = { 0x03, 160 },
 408 };
 409 
 410 /**
 411  * vsp1_dl_cmd_pool_create - Create a pool of commands from a single allocation
 412  * @vsp1: The VSP1 device
 413  * @type: The command pool type
 414  * @num_cmds: The number of commands to allocate
 415  *
 416  * Allocate a pool of commands each with enough memory to contain the private
 417  * data of each command. The allocation sizes are dependent upon the command
 418  * type.
 419  *
 420  * Return a pointer to the pool on success or NULL if memory can't be allocated.
 421  */
 422 static struct vsp1_dl_cmd_pool *
 423 vsp1_dl_cmd_pool_create(struct vsp1_device *vsp1, enum vsp1_extcmd_type type,
 424                         unsigned int num_cmds)
 425 {
 426         struct vsp1_dl_cmd_pool *pool;
 427         unsigned int i;
 428         size_t cmd_size;
 429 
 430         pool = kzalloc(sizeof(*pool), GFP_KERNEL);
 431         if (!pool)
 432                 return NULL;
 433 
 434         spin_lock_init(&pool->lock);
 435         INIT_LIST_HEAD(&pool->free);
 436 
 437         pool->cmds = kcalloc(num_cmds, sizeof(*pool->cmds), GFP_KERNEL);
 438         if (!pool->cmds) {
 439                 kfree(pool);
 440                 return NULL;
 441         }
 442 
 443         cmd_size = sizeof(struct vsp1_pre_ext_dl_body) +
 444                    vsp1_extended_commands[type].body_size;
 445         cmd_size = ALIGN(cmd_size, 16);
 446 
 447         pool->size = cmd_size * num_cmds;
 448         pool->mem = dma_alloc_wc(vsp1->bus_master, pool->size, &pool->dma,
 449                                  GFP_KERNEL);
 450         if (!pool->mem) {
 451                 kfree(pool->cmds);
 452                 kfree(pool);
 453                 return NULL;
 454         }
 455 
 456         for (i = 0; i < num_cmds; ++i) {
 457                 struct vsp1_dl_ext_cmd *cmd = &pool->cmds[i];
 458                 size_t cmd_offset = i * cmd_size;
 459                 /* data_offset must be 16 byte aligned for DMA. */
 460                 size_t data_offset = sizeof(struct vsp1_pre_ext_dl_body) +
 461                                      cmd_offset;
 462 
 463                 cmd->pool = pool;
 464                 cmd->opcode = vsp1_extended_commands[type].opcode;
 465 
 466                 /*
 467                  * TODO: Auto-disp can utilise more than one extended body
 468                  * command per cmd.
 469                  */
 470                 cmd->num_cmds = 1;
 471                 cmd->cmds = pool->mem + cmd_offset;
 472                 cmd->cmd_dma = pool->dma + cmd_offset;
 473 
 474                 cmd->data = pool->mem + data_offset;
 475                 cmd->data_dma = pool->dma + data_offset;
 476 
 477                 list_add_tail(&cmd->free, &pool->free);
 478         }
 479 
 480         return pool;
 481 }
 482 
 483 static
 484 struct vsp1_dl_ext_cmd *vsp1_dl_ext_cmd_get(struct vsp1_dl_cmd_pool *pool)
 485 {
 486         struct vsp1_dl_ext_cmd *cmd = NULL;
 487         unsigned long flags;
 488 
 489         spin_lock_irqsave(&pool->lock, flags);
 490 
 491         if (!list_empty(&pool->free)) {
 492                 cmd = list_first_entry(&pool->free, struct vsp1_dl_ext_cmd,
 493                                        free);
 494                 list_del(&cmd->free);
 495         }
 496 
 497         spin_unlock_irqrestore(&pool->lock, flags);
 498 
 499         return cmd;
 500 }
 501 
 502 static void vsp1_dl_ext_cmd_put(struct vsp1_dl_ext_cmd *cmd)
 503 {
 504         unsigned long flags;
 505 
 506         if (!cmd)
 507                 return;
 508 
 509         /* Reset flags, these mark data usage. */
 510         cmd->flags = 0;
 511 
 512         spin_lock_irqsave(&cmd->pool->lock, flags);
 513         list_add_tail(&cmd->free, &cmd->pool->free);
 514         spin_unlock_irqrestore(&cmd->pool->lock, flags);
 515 }
 516 
 517 static void vsp1_dl_ext_cmd_pool_destroy(struct vsp1_dl_cmd_pool *pool)
 518 {
 519         if (!pool)
 520                 return;
 521 
 522         if (pool->mem)
 523                 dma_free_wc(pool->vsp1->bus_master, pool->size, pool->mem,
 524                             pool->dma);
 525 
 526         kfree(pool->cmds);
 527         kfree(pool);
 528 }
 529 
 530 struct vsp1_dl_ext_cmd *vsp1_dl_get_pre_cmd(struct vsp1_dl_list *dl)
 531 {
 532         struct vsp1_dl_manager *dlm = dl->dlm;
 533 
 534         if (dl->pre_cmd)
 535                 return dl->pre_cmd;
 536 
 537         dl->pre_cmd = vsp1_dl_ext_cmd_get(dlm->cmdpool);
 538 
 539         return dl->pre_cmd;
 540 }
 541 
 542 /* ----------------------------------------------------------------------------
 543  * Display List Transaction Management
 544  */
 545 
 546 static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm)
 547 {
 548         struct vsp1_dl_list *dl;
 549         size_t header_offset;
 550 
 551         dl = kzalloc(sizeof(*dl), GFP_KERNEL);
 552         if (!dl)
 553                 return NULL;
 554 
 555         INIT_LIST_HEAD(&dl->bodies);
 556         dl->dlm = dlm;
 557 
 558         /* Get a default body for our list. */
 559         dl->body0 = vsp1_dl_body_get(dlm->pool);
 560         if (!dl->body0) {
 561                 kfree(dl);
 562                 return NULL;
 563         }
 564 
 565         header_offset = dl->body0->max_entries * sizeof(*dl->body0->entries);
 566 
 567         dl->header = ((void *)dl->body0->entries) + header_offset;
 568         dl->dma = dl->body0->dma + header_offset;
 569 
 570         memset(dl->header, 0, sizeof(*dl->header));
 571         dl->header->lists[0].addr = dl->body0->dma;
 572 
 573         return dl;
 574 }
 575 
 576 static void vsp1_dl_list_bodies_put(struct vsp1_dl_list *dl)
 577 {
 578         struct vsp1_dl_body *dlb, *tmp;
 579 
 580         list_for_each_entry_safe(dlb, tmp, &dl->bodies, list) {
 581                 list_del(&dlb->list);
 582                 vsp1_dl_body_put(dlb);
 583         }
 584 }
 585 
 586 static void vsp1_dl_list_free(struct vsp1_dl_list *dl)
 587 {
 588         vsp1_dl_body_put(dl->body0);
 589         vsp1_dl_list_bodies_put(dl);
 590 
 591         kfree(dl);
 592 }
 593 
 594 /**
 595  * vsp1_dl_list_get - Get a free display list
 596  * @dlm: The display list manager
 597  *
 598  * Get a display list from the pool of free lists and return it.
 599  *
 600  * This function must be called without the display list manager lock held.
 601  */
 602 struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm)
 603 {
 604         struct vsp1_dl_list *dl = NULL;
 605         unsigned long flags;
 606 
 607         spin_lock_irqsave(&dlm->lock, flags);
 608 
 609         if (!list_empty(&dlm->free)) {
 610                 dl = list_first_entry(&dlm->free, struct vsp1_dl_list, list);
 611                 list_del(&dl->list);
 612 
 613                 /*
 614                  * The display list chain must be initialised to ensure every
 615                  * display list can assert list_empty() if it is not in a chain.
 616                  */
 617                 INIT_LIST_HEAD(&dl->chain);
 618         }
 619 
 620         spin_unlock_irqrestore(&dlm->lock, flags);
 621 
 622         return dl;
 623 }
 624 
 625 /* This function must be called with the display list manager lock held.*/
 626 static void __vsp1_dl_list_put(struct vsp1_dl_list *dl)
 627 {
 628         struct vsp1_dl_list *dl_next;
 629 
 630         if (!dl)
 631                 return;
 632 
 633         /*
 634          * Release any linked display-lists which were chained for a single
 635          * hardware operation.
 636          */
 637         if (dl->has_chain) {
 638                 list_for_each_entry(dl_next, &dl->chain, chain)
 639                         __vsp1_dl_list_put(dl_next);
 640         }
 641 
 642         dl->has_chain = false;
 643 
 644         vsp1_dl_list_bodies_put(dl);
 645 
 646         vsp1_dl_ext_cmd_put(dl->pre_cmd);
 647         vsp1_dl_ext_cmd_put(dl->post_cmd);
 648 
 649         dl->pre_cmd = NULL;
 650         dl->post_cmd = NULL;
 651 
 652         /*
 653          * body0 is reused as as an optimisation as presently every display list
 654          * has at least one body, thus we reinitialise the entries list.
 655          */
 656         dl->body0->num_entries = 0;
 657 
 658         list_add_tail(&dl->list, &dl->dlm->free);
 659 }
 660 
 661 /**
 662  * vsp1_dl_list_put - Release a display list
 663  * @dl: The display list
 664  *
 665  * Release the display list and return it to the pool of free lists.
 666  *
 667  * Passing a NULL pointer to this function is safe, in that case no operation
 668  * will be performed.
 669  */
 670 void vsp1_dl_list_put(struct vsp1_dl_list *dl)
 671 {
 672         unsigned long flags;
 673 
 674         if (!dl)
 675                 return;
 676 
 677         spin_lock_irqsave(&dl->dlm->lock, flags);
 678         __vsp1_dl_list_put(dl);
 679         spin_unlock_irqrestore(&dl->dlm->lock, flags);
 680 }
 681 
 682 /**
 683  * vsp1_dl_list_get_body0 - Obtain the default body for the display list
 684  * @dl: The display list
 685  *
 686  * Obtain a pointer to the internal display list body allowing this to be passed
 687  * directly to configure operations.
 688  */
 689 struct vsp1_dl_body *vsp1_dl_list_get_body0(struct vsp1_dl_list *dl)
 690 {
 691         return dl->body0;
 692 }
 693 
 694 /**
 695  * vsp1_dl_list_add_body - Add a body to the display list
 696  * @dl: The display list
 697  * @dlb: The body
 698  *
 699  * Add a display list body to a display list. Registers contained in bodies are
 700  * processed after registers contained in the main display list, in the order in
 701  * which bodies are added.
 702  *
 703  * Adding a body to a display list passes ownership of the body to the list. The
 704  * caller retains its reference to the body when adding it to the display list,
 705  * but is not allowed to add new entries to the body.
 706  *
 707  * The reference must be explicitly released by a call to vsp1_dl_body_put()
 708  * when the body isn't needed anymore.
 709  */
 710 int vsp1_dl_list_add_body(struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb)
 711 {
 712         refcount_inc(&dlb->refcnt);
 713 
 714         list_add_tail(&dlb->list, &dl->bodies);
 715 
 716         return 0;
 717 }
 718 
 719 /**
 720  * vsp1_dl_list_add_chain - Add a display list to a chain
 721  * @head: The head display list
 722  * @dl: The new display list
 723  *
 724  * Add a display list to an existing display list chain. The chained lists
 725  * will be automatically processed by the hardware without intervention from
 726  * the CPU. A display list end interrupt will only complete after the last
 727  * display list in the chain has completed processing.
 728  *
 729  * Adding a display list to a chain passes ownership of the display list to
 730  * the head display list item. The chain is released when the head dl item is
 731  * put back with __vsp1_dl_list_put().
 732  */
 733 int vsp1_dl_list_add_chain(struct vsp1_dl_list *head,
 734                            struct vsp1_dl_list *dl)
 735 {
 736         head->has_chain = true;
 737         list_add_tail(&dl->chain, &head->chain);
 738         return 0;
 739 }
 740 
 741 static void vsp1_dl_ext_cmd_fill_header(struct vsp1_dl_ext_cmd *cmd)
 742 {
 743         cmd->cmds[0].opcode = cmd->opcode;
 744         cmd->cmds[0].flags = cmd->flags;
 745         cmd->cmds[0].address_set = cmd->data_dma;
 746         cmd->cmds[0].reserved = 0;
 747 }
 748 
 749 static void vsp1_dl_list_fill_header(struct vsp1_dl_list *dl, bool is_last)
 750 {
 751         struct vsp1_dl_manager *dlm = dl->dlm;
 752         struct vsp1_dl_header_list *hdr = dl->header->lists;
 753         struct vsp1_dl_body *dlb;
 754         unsigned int num_lists = 0;
 755 
 756         /*
 757          * Fill the header with the display list bodies addresses and sizes. The
 758          * address of the first body has already been filled when the display
 759          * list was allocated.
 760          */
 761 
 762         hdr->num_bytes = dl->body0->num_entries
 763                        * sizeof(*dl->header->lists);
 764 
 765         list_for_each_entry(dlb, &dl->bodies, list) {
 766                 num_lists++;
 767                 hdr++;
 768 
 769                 hdr->addr = dlb->dma;
 770                 hdr->num_bytes = dlb->num_entries
 771                                * sizeof(*dl->header->lists);
 772         }
 773 
 774         dl->header->num_lists = num_lists;
 775         dl->header->flags = 0;
 776 
 777         /*
 778          * Enable the interrupt for the end of each frame. In continuous mode
 779          * chained lists are used with one list per frame, so enable the
 780          * interrupt for each list. In singleshot mode chained lists are used
 781          * to partition a single frame, so enable the interrupt for the last
 782          * list only.
 783          */
 784         if (!dlm->singleshot || is_last)
 785                 dl->header->flags |= VSP1_DLH_INT_ENABLE;
 786 
 787         /*
 788          * In continuous mode enable auto-start for all lists, as the VSP must
 789          * loop on the same list until a new one is queued. In singleshot mode
 790          * enable auto-start for all lists but the last to chain processing of
 791          * partitions without software intervention.
 792          */
 793         if (!dlm->singleshot || !is_last)
 794                 dl->header->flags |= VSP1_DLH_AUTO_START;
 795 
 796         if (!is_last) {
 797                 /*
 798                  * If this is not the last display list in the chain, queue the
 799                  * next item for automatic processing by the hardware.
 800                  */
 801                 struct vsp1_dl_list *next = list_next_entry(dl, chain);
 802 
 803                 dl->header->next_header = next->dma;
 804         } else if (!dlm->singleshot) {
 805                 /*
 806                  * if the display list manager works in continuous mode, the VSP
 807                  * should loop over the display list continuously until
 808                  * instructed to do otherwise.
 809                  */
 810                 dl->header->next_header = dl->dma;
 811         }
 812 
 813         if (!dl->extension)
 814                 return;
 815 
 816         dl->extension->flags = 0;
 817 
 818         if (dl->pre_cmd) {
 819                 dl->extension->pre_ext_dl_plist = dl->pre_cmd->cmd_dma;
 820                 dl->extension->pre_ext_dl_num_cmd = dl->pre_cmd->num_cmds;
 821                 dl->extension->flags |= VSP1_DLH_EXT_PRE_CMD_EXEC;
 822 
 823                 vsp1_dl_ext_cmd_fill_header(dl->pre_cmd);
 824         }
 825 
 826         if (dl->post_cmd) {
 827                 dl->extension->post_ext_dl_plist = dl->post_cmd->cmd_dma;
 828                 dl->extension->post_ext_dl_num_cmd = dl->post_cmd->num_cmds;
 829                 dl->extension->flags |= VSP1_DLH_EXT_POST_CMD_EXEC;
 830 
 831                 vsp1_dl_ext_cmd_fill_header(dl->post_cmd);
 832         }
 833 }
 834 
 835 static bool vsp1_dl_list_hw_update_pending(struct vsp1_dl_manager *dlm)
 836 {
 837         struct vsp1_device *vsp1 = dlm->vsp1;
 838 
 839         if (!dlm->queued)
 840                 return false;
 841 
 842         /*
 843          * Check whether the VSP1 has taken the update. The hardware indicates
 844          * this by clearing the UPDHDR bit in the CMD register.
 845          */
 846         return !!(vsp1_read(vsp1, VI6_CMD(dlm->index)) & VI6_CMD_UPDHDR);
 847 }
 848 
 849 static void vsp1_dl_list_hw_enqueue(struct vsp1_dl_list *dl)
 850 {
 851         struct vsp1_dl_manager *dlm = dl->dlm;
 852         struct vsp1_device *vsp1 = dlm->vsp1;
 853 
 854         /*
 855          * Program the display list header address. If the hardware is idle
 856          * (single-shot mode or first frame in continuous mode) it will then be
 857          * started independently. If the hardware is operating, the
 858          * VI6_DL_HDR_REF_ADDR register will be updated with the display list
 859          * address.
 860          */
 861         vsp1_write(vsp1, VI6_DL_HDR_ADDR(dlm->index), dl->dma);
 862 }
 863 
 864 static void vsp1_dl_list_commit_continuous(struct vsp1_dl_list *dl)
 865 {
 866         struct vsp1_dl_manager *dlm = dl->dlm;
 867 
 868         /*
 869          * If a previous display list has been queued to the hardware but not
 870          * processed yet, the VSP can start processing it at any time. In that
 871          * case we can't replace the queued list by the new one, as we could
 872          * race with the hardware. We thus mark the update as pending, it will
 873          * be queued up to the hardware by the frame end interrupt handler.
 874          *
 875          * If a display list is already pending we simply drop it as the new
 876          * display list is assumed to contain a more recent configuration. It is
 877          * an error if the already pending list has the
 878          * VSP1_DL_FRAME_END_INTERNAL flag set, as there is then a process
 879          * waiting for that list to complete. This shouldn't happen as the
 880          * waiting process should perform proper locking, but warn just in
 881          * case.
 882          */
 883         if (vsp1_dl_list_hw_update_pending(dlm)) {
 884                 WARN_ON(dlm->pending &&
 885                         (dlm->pending->flags & VSP1_DL_FRAME_END_INTERNAL));
 886                 __vsp1_dl_list_put(dlm->pending);
 887                 dlm->pending = dl;
 888                 return;
 889         }
 890 
 891         /*
 892          * Pass the new display list to the hardware and mark it as queued. It
 893          * will become active when the hardware starts processing it.
 894          */
 895         vsp1_dl_list_hw_enqueue(dl);
 896 
 897         __vsp1_dl_list_put(dlm->queued);
 898         dlm->queued = dl;
 899 }
 900 
 901 static void vsp1_dl_list_commit_singleshot(struct vsp1_dl_list *dl)
 902 {
 903         struct vsp1_dl_manager *dlm = dl->dlm;
 904 
 905         /*
 906          * When working in single-shot mode, the caller guarantees that the
 907          * hardware is idle at this point. Just commit the head display list
 908          * to hardware. Chained lists will be started automatically.
 909          */
 910         vsp1_dl_list_hw_enqueue(dl);
 911 
 912         dlm->active = dl;
 913 }
 914 
 915 void vsp1_dl_list_commit(struct vsp1_dl_list *dl, unsigned int dl_flags)
 916 {
 917         struct vsp1_dl_manager *dlm = dl->dlm;
 918         struct vsp1_dl_list *dl_next;
 919         unsigned long flags;
 920 
 921         /* Fill the header for the head and chained display lists. */
 922         vsp1_dl_list_fill_header(dl, list_empty(&dl->chain));
 923 
 924         list_for_each_entry(dl_next, &dl->chain, chain) {
 925                 bool last = list_is_last(&dl_next->chain, &dl->chain);
 926 
 927                 vsp1_dl_list_fill_header(dl_next, last);
 928         }
 929 
 930         dl->flags = dl_flags & ~VSP1_DL_FRAME_END_COMPLETED;
 931 
 932         spin_lock_irqsave(&dlm->lock, flags);
 933 
 934         if (dlm->singleshot)
 935                 vsp1_dl_list_commit_singleshot(dl);
 936         else
 937                 vsp1_dl_list_commit_continuous(dl);
 938 
 939         spin_unlock_irqrestore(&dlm->lock, flags);
 940 }
 941 
 942 /* -----------------------------------------------------------------------------
 943  * Display List Manager
 944  */
 945 
 946 /**
 947  * vsp1_dlm_irq_frame_end - Display list handler for the frame end interrupt
 948  * @dlm: the display list manager
 949  *
 950  * Return a set of flags that indicates display list completion status.
 951  *
 952  * The VSP1_DL_FRAME_END_COMPLETED flag indicates that the previous display list
 953  * has completed at frame end. If the flag is not returned display list
 954  * completion has been delayed by one frame because the display list commit
 955  * raced with the frame end interrupt. The function always returns with the flag
 956  * set in single-shot mode as display list processing is then not continuous and
 957  * races never occur.
 958  *
 959  * The following flags are only supported for continuous mode.
 960  *
 961  * The VSP1_DL_FRAME_END_INTERNAL flag indicates that the display list that just
 962  * became active had been queued with the internal notification flag.
 963  *
 964  * The VSP1_DL_FRAME_END_WRITEBACK flag indicates that the previously active
 965  * display list had been queued with the writeback flag.
 966  */
 967 unsigned int vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm)
 968 {
 969         struct vsp1_device *vsp1 = dlm->vsp1;
 970         u32 status = vsp1_read(vsp1, VI6_STATUS);
 971         unsigned int flags = 0;
 972 
 973         spin_lock(&dlm->lock);
 974 
 975         /*
 976          * The mem-to-mem pipelines work in single-shot mode. No new display
 977          * list can be queued, we don't have to do anything.
 978          */
 979         if (dlm->singleshot) {
 980                 __vsp1_dl_list_put(dlm->active);
 981                 dlm->active = NULL;
 982                 flags |= VSP1_DL_FRAME_END_COMPLETED;
 983                 goto done;
 984         }
 985 
 986         /*
 987          * If the commit operation raced with the interrupt and occurred after
 988          * the frame end event but before interrupt processing, the hardware
 989          * hasn't taken the update into account yet. We have to skip one frame
 990          * and retry.
 991          */
 992         if (vsp1_dl_list_hw_update_pending(dlm))
 993                 goto done;
 994 
 995         /*
 996          * Progressive streams report only TOP fields. If we have a BOTTOM
 997          * field, we are interlaced, and expect the frame to complete on the
 998          * next frame end interrupt.
 999          */
1000         if (status & VI6_STATUS_FLD_STD(dlm->index))
1001                 goto done;
1002 
1003         /*
1004          * If the active display list has the writeback flag set, the frame
1005          * completion marks the end of the writeback capture. Return the
1006          * VSP1_DL_FRAME_END_WRITEBACK flag and reset the display list's
1007          * writeback flag.
1008          */
1009         if (dlm->active && (dlm->active->flags & VSP1_DL_FRAME_END_WRITEBACK)) {
1010                 flags |= VSP1_DL_FRAME_END_WRITEBACK;
1011                 dlm->active->flags &= ~VSP1_DL_FRAME_END_WRITEBACK;
1012         }
1013 
1014         /*
1015          * The device starts processing the queued display list right after the
1016          * frame end interrupt. The display list thus becomes active.
1017          */
1018         if (dlm->queued) {
1019                 if (dlm->queued->flags & VSP1_DL_FRAME_END_INTERNAL)
1020                         flags |= VSP1_DL_FRAME_END_INTERNAL;
1021                 dlm->queued->flags &= ~VSP1_DL_FRAME_END_INTERNAL;
1022 
1023                 __vsp1_dl_list_put(dlm->active);
1024                 dlm->active = dlm->queued;
1025                 dlm->queued = NULL;
1026                 flags |= VSP1_DL_FRAME_END_COMPLETED;
1027         }
1028 
1029         /*
1030          * Now that the VSP has started processing the queued display list, we
1031          * can queue the pending display list to the hardware if one has been
1032          * prepared.
1033          */
1034         if (dlm->pending) {
1035                 vsp1_dl_list_hw_enqueue(dlm->pending);
1036                 dlm->queued = dlm->pending;
1037                 dlm->pending = NULL;
1038         }
1039 
1040 done:
1041         spin_unlock(&dlm->lock);
1042 
1043         return flags;
1044 }
1045 
1046 /* Hardware Setup */
1047 void vsp1_dlm_setup(struct vsp1_device *vsp1)
1048 {
1049         unsigned int i;
1050         u32 ctrl = (256 << VI6_DL_CTRL_AR_WAIT_SHIFT)
1051                  | VI6_DL_CTRL_DC2 | VI6_DL_CTRL_DC1 | VI6_DL_CTRL_DC0
1052                  | VI6_DL_CTRL_DLE;
1053         u32 ext_dl = (0x02 << VI6_DL_EXT_CTRL_POLINT_SHIFT)
1054                    | VI6_DL_EXT_CTRL_DLPRI | VI6_DL_EXT_CTRL_EXT;
1055 
1056         if (vsp1_feature(vsp1, VSP1_HAS_EXT_DL)) {
1057                 for (i = 0; i < vsp1->info->wpf_count; ++i)
1058                         vsp1_write(vsp1, VI6_DL_EXT_CTRL(i), ext_dl);
1059         }
1060 
1061         vsp1_write(vsp1, VI6_DL_CTRL, ctrl);
1062         vsp1_write(vsp1, VI6_DL_SWAP, VI6_DL_SWAP_LWS);
1063 }
1064 
1065 void vsp1_dlm_reset(struct vsp1_dl_manager *dlm)
1066 {
1067         unsigned long flags;
1068 
1069         spin_lock_irqsave(&dlm->lock, flags);
1070 
1071         __vsp1_dl_list_put(dlm->active);
1072         __vsp1_dl_list_put(dlm->queued);
1073         __vsp1_dl_list_put(dlm->pending);
1074 
1075         spin_unlock_irqrestore(&dlm->lock, flags);
1076 
1077         dlm->active = NULL;
1078         dlm->queued = NULL;
1079         dlm->pending = NULL;
1080 }
1081 
1082 struct vsp1_dl_body *vsp1_dlm_dl_body_get(struct vsp1_dl_manager *dlm)
1083 {
1084         return vsp1_dl_body_get(dlm->pool);
1085 }
1086 
1087 struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,
1088                                         unsigned int index,
1089                                         unsigned int prealloc)
1090 {
1091         struct vsp1_dl_manager *dlm;
1092         size_t header_size;
1093         unsigned int i;
1094 
1095         dlm = devm_kzalloc(vsp1->dev, sizeof(*dlm), GFP_KERNEL);
1096         if (!dlm)
1097                 return NULL;
1098 
1099         dlm->index = index;
1100         dlm->singleshot = vsp1->info->uapi;
1101         dlm->vsp1 = vsp1;
1102 
1103         spin_lock_init(&dlm->lock);
1104         INIT_LIST_HEAD(&dlm->free);
1105 
1106         /*
1107          * Initialize the display list body and allocate DMA memory for the body
1108          * and the header. Both are allocated together to avoid memory
1109          * fragmentation, with the header located right after the body in
1110          * memory. An extra body is allocated on top of the prealloc to account
1111          * for the cached body used by the vsp1_pipeline object.
1112          */
1113         header_size = vsp1_feature(vsp1, VSP1_HAS_EXT_DL) ?
1114                         sizeof(struct vsp1_dl_header_extended) :
1115                         sizeof(struct vsp1_dl_header);
1116 
1117         header_size = ALIGN(header_size, 8);
1118 
1119         dlm->pool = vsp1_dl_body_pool_create(vsp1, prealloc + 1,
1120                                              VSP1_DL_NUM_ENTRIES, header_size);
1121         if (!dlm->pool)
1122                 return NULL;
1123 
1124         for (i = 0; i < prealloc; ++i) {
1125                 struct vsp1_dl_list *dl;
1126 
1127                 dl = vsp1_dl_list_alloc(dlm);
1128                 if (!dl) {
1129                         vsp1_dlm_destroy(dlm);
1130                         return NULL;
1131                 }
1132 
1133                 /* The extended header immediately follows the header. */
1134                 if (vsp1_feature(vsp1, VSP1_HAS_EXT_DL))
1135                         dl->extension = (void *)dl->header
1136                                       + sizeof(*dl->header);
1137 
1138                 list_add_tail(&dl->list, &dlm->free);
1139         }
1140 
1141         if (vsp1_feature(vsp1, VSP1_HAS_EXT_DL)) {
1142                 dlm->cmdpool = vsp1_dl_cmd_pool_create(vsp1,
1143                                         VSP1_EXTCMD_AUTOFLD, prealloc);
1144                 if (!dlm->cmdpool) {
1145                         vsp1_dlm_destroy(dlm);
1146                         return NULL;
1147                 }
1148         }
1149 
1150         return dlm;
1151 }
1152 
1153 void vsp1_dlm_destroy(struct vsp1_dl_manager *dlm)
1154 {
1155         struct vsp1_dl_list *dl, *next;
1156 
1157         if (!dlm)
1158                 return;
1159 
1160         list_for_each_entry_safe(dl, next, &dlm->free, list) {
1161                 list_del(&dl->list);
1162                 vsp1_dl_list_free(dl);
1163         }
1164 
1165         vsp1_dl_body_pool_destroy(dlm->pool);
1166         vsp1_dl_ext_cmd_pool_destroy(dlm->cmdpool);
1167 }

/* [<][>][^][v][top][bottom][index][help] */