root/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. dpu_rm_init_hw_iter
  2. _dpu_rm_get_hw_locked
  3. dpu_rm_get_hw
  4. _dpu_rm_hw_destroy
  5. dpu_rm_destroy
  6. _dpu_rm_hw_blk_create
  7. dpu_rm_init
  8. _dpu_rm_needs_split_display
  9. _dpu_rm_check_lm_and_get_connected_blks
  10. _dpu_rm_reserve_lms
  11. _dpu_rm_reserve_ctls
  12. _dpu_rm_reserve_intf
  13. _dpu_rm_reserve_intf_related_hw
  14. _dpu_rm_make_reservation
  15. _dpu_rm_populate_requirements
  16. _dpu_rm_release_reservation
  17. dpu_rm_release
  18. dpu_rm_reserve

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
   4  */
   5 
   6 #define pr_fmt(fmt)     "[drm:%s] " fmt, __func__
   7 #include "dpu_kms.h"
   8 #include "dpu_hw_lm.h"
   9 #include "dpu_hw_ctl.h"
  10 #include "dpu_hw_pingpong.h"
  11 #include "dpu_hw_intf.h"
  12 #include "dpu_encoder.h"
  13 #include "dpu_trace.h"
  14 
  15 #define RESERVED_BY_OTHER(h, r)  \
  16                 ((h)->enc_id && (h)->enc_id != r)
  17 
  18 /**
  19  * struct dpu_rm_requirements - Reservation requirements parameter bundle
  20  * @topology:  selected topology for the display
  21  * @hw_res:        Hardware resources required as reported by the encoders
  22  */
  23 struct dpu_rm_requirements {
  24         struct msm_display_topology topology;
  25         struct dpu_encoder_hw_resources hw_res;
  26 };
  27 
  28 
  29 /**
  30  * struct dpu_rm_hw_blk - hardware block tracking list member
  31  * @list:       List head for list of all hardware blocks tracking items
  32  * @id:         Hardware ID number, within it's own space, ie. LM_X
  33  * @enc_id:     Encoder id to which this blk is binded
  34  * @hw:         Pointer to the hardware register access object for this block
  35  */
  36 struct dpu_rm_hw_blk {
  37         struct list_head list;
  38         uint32_t id;
  39         uint32_t enc_id;
  40         struct dpu_hw_blk *hw;
  41 };
  42 
  43 void dpu_rm_init_hw_iter(
  44                 struct dpu_rm_hw_iter *iter,
  45                 uint32_t enc_id,
  46                 enum dpu_hw_blk_type type)
  47 {
  48         memset(iter, 0, sizeof(*iter));
  49         iter->enc_id = enc_id;
  50         iter->type = type;
  51 }
  52 
  53 static bool _dpu_rm_get_hw_locked(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
  54 {
  55         struct list_head *blk_list;
  56 
  57         if (!rm || !i || i->type >= DPU_HW_BLK_MAX) {
  58                 DPU_ERROR("invalid rm\n");
  59                 return false;
  60         }
  61 
  62         i->hw = NULL;
  63         blk_list = &rm->hw_blks[i->type];
  64 
  65         if (i->blk && (&i->blk->list == blk_list)) {
  66                 DPU_DEBUG("attempt resume iteration past last\n");
  67                 return false;
  68         }
  69 
  70         i->blk = list_prepare_entry(i->blk, blk_list, list);
  71 
  72         list_for_each_entry_continue(i->blk, blk_list, list) {
  73                 if (i->enc_id == i->blk->enc_id) {
  74                         i->hw = i->blk->hw;
  75                         DPU_DEBUG("found type %d id %d for enc %d\n",
  76                                         i->type, i->blk->id, i->enc_id);
  77                         return true;
  78                 }
  79         }
  80 
  81         DPU_DEBUG("no match, type %d for enc %d\n", i->type, i->enc_id);
  82 
  83         return false;
  84 }
  85 
  86 bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
  87 {
  88         bool ret;
  89 
  90         mutex_lock(&rm->rm_lock);
  91         ret = _dpu_rm_get_hw_locked(rm, i);
  92         mutex_unlock(&rm->rm_lock);
  93 
  94         return ret;
  95 }
  96 
  97 static void _dpu_rm_hw_destroy(enum dpu_hw_blk_type type, void *hw)
  98 {
  99         switch (type) {
 100         case DPU_HW_BLK_LM:
 101                 dpu_hw_lm_destroy(hw);
 102                 break;
 103         case DPU_HW_BLK_CTL:
 104                 dpu_hw_ctl_destroy(hw);
 105                 break;
 106         case DPU_HW_BLK_PINGPONG:
 107                 dpu_hw_pingpong_destroy(hw);
 108                 break;
 109         case DPU_HW_BLK_INTF:
 110                 dpu_hw_intf_destroy(hw);
 111                 break;
 112         case DPU_HW_BLK_SSPP:
 113                 /* SSPPs are not managed by the resource manager */
 114         case DPU_HW_BLK_TOP:
 115                 /* Top is a singleton, not managed in hw_blks list */
 116         case DPU_HW_BLK_MAX:
 117         default:
 118                 DPU_ERROR("unsupported block type %d\n", type);
 119                 break;
 120         }
 121 }
 122 
 123 int dpu_rm_destroy(struct dpu_rm *rm)
 124 {
 125         struct dpu_rm_hw_blk *hw_cur, *hw_nxt;
 126         enum dpu_hw_blk_type type;
 127 
 128         for (type = 0; type < DPU_HW_BLK_MAX; type++) {
 129                 list_for_each_entry_safe(hw_cur, hw_nxt, &rm->hw_blks[type],
 130                                 list) {
 131                         list_del(&hw_cur->list);
 132                         _dpu_rm_hw_destroy(type, hw_cur->hw);
 133                         kfree(hw_cur);
 134                 }
 135         }
 136 
 137         mutex_destroy(&rm->rm_lock);
 138 
 139         return 0;
 140 }
 141 
 142 static int _dpu_rm_hw_blk_create(
 143                 struct dpu_rm *rm,
 144                 struct dpu_mdss_cfg *cat,
 145                 void __iomem *mmio,
 146                 enum dpu_hw_blk_type type,
 147                 uint32_t id,
 148                 void *hw_catalog_info)
 149 {
 150         struct dpu_rm_hw_blk *blk;
 151         void *hw;
 152 
 153         switch (type) {
 154         case DPU_HW_BLK_LM:
 155                 hw = dpu_hw_lm_init(id, mmio, cat);
 156                 break;
 157         case DPU_HW_BLK_CTL:
 158                 hw = dpu_hw_ctl_init(id, mmio, cat);
 159                 break;
 160         case DPU_HW_BLK_PINGPONG:
 161                 hw = dpu_hw_pingpong_init(id, mmio, cat);
 162                 break;
 163         case DPU_HW_BLK_INTF:
 164                 hw = dpu_hw_intf_init(id, mmio, cat);
 165                 break;
 166         case DPU_HW_BLK_SSPP:
 167                 /* SSPPs are not managed by the resource manager */
 168         case DPU_HW_BLK_TOP:
 169                 /* Top is a singleton, not managed in hw_blks list */
 170         case DPU_HW_BLK_MAX:
 171         default:
 172                 DPU_ERROR("unsupported block type %d\n", type);
 173                 return -EINVAL;
 174         }
 175 
 176         if (IS_ERR_OR_NULL(hw)) {
 177                 DPU_ERROR("failed hw object creation: type %d, err %ld\n",
 178                                 type, PTR_ERR(hw));
 179                 return -EFAULT;
 180         }
 181 
 182         blk = kzalloc(sizeof(*blk), GFP_KERNEL);
 183         if (!blk) {
 184                 _dpu_rm_hw_destroy(type, hw);
 185                 return -ENOMEM;
 186         }
 187 
 188         blk->id = id;
 189         blk->hw = hw;
 190         blk->enc_id = 0;
 191         list_add_tail(&blk->list, &rm->hw_blks[type]);
 192 
 193         return 0;
 194 }
 195 
 196 int dpu_rm_init(struct dpu_rm *rm,
 197                 struct dpu_mdss_cfg *cat,
 198                 void __iomem *mmio)
 199 {
 200         int rc, i;
 201         enum dpu_hw_blk_type type;
 202 
 203         if (!rm || !cat || !mmio) {
 204                 DPU_ERROR("invalid kms\n");
 205                 return -EINVAL;
 206         }
 207 
 208         /* Clear, setup lists */
 209         memset(rm, 0, sizeof(*rm));
 210 
 211         mutex_init(&rm->rm_lock);
 212 
 213         for (type = 0; type < DPU_HW_BLK_MAX; type++)
 214                 INIT_LIST_HEAD(&rm->hw_blks[type]);
 215 
 216         /* Interrogate HW catalog and create tracking items for hw blocks */
 217         for (i = 0; i < cat->mixer_count; i++) {
 218                 struct dpu_lm_cfg *lm = &cat->mixer[i];
 219 
 220                 if (lm->pingpong == PINGPONG_MAX) {
 221                         DPU_DEBUG("skip mixer %d without pingpong\n", lm->id);
 222                         continue;
 223                 }
 224 
 225                 rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_LM,
 226                                 cat->mixer[i].id, &cat->mixer[i]);
 227                 if (rc) {
 228                         DPU_ERROR("failed: lm hw not available\n");
 229                         goto fail;
 230                 }
 231 
 232                 if (!rm->lm_max_width) {
 233                         rm->lm_max_width = lm->sblk->maxwidth;
 234                 } else if (rm->lm_max_width != lm->sblk->maxwidth) {
 235                         /*
 236                          * Don't expect to have hw where lm max widths differ.
 237                          * If found, take the min.
 238                          */
 239                         DPU_ERROR("unsupported: lm maxwidth differs\n");
 240                         if (rm->lm_max_width > lm->sblk->maxwidth)
 241                                 rm->lm_max_width = lm->sblk->maxwidth;
 242                 }
 243         }
 244 
 245         for (i = 0; i < cat->pingpong_count; i++) {
 246                 rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_PINGPONG,
 247                                 cat->pingpong[i].id, &cat->pingpong[i]);
 248                 if (rc) {
 249                         DPU_ERROR("failed: pp hw not available\n");
 250                         goto fail;
 251                 }
 252         }
 253 
 254         for (i = 0; i < cat->intf_count; i++) {
 255                 if (cat->intf[i].type == INTF_NONE) {
 256                         DPU_DEBUG("skip intf %d with type none\n", i);
 257                         continue;
 258                 }
 259 
 260                 rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_INTF,
 261                                 cat->intf[i].id, &cat->intf[i]);
 262                 if (rc) {
 263                         DPU_ERROR("failed: intf hw not available\n");
 264                         goto fail;
 265                 }
 266         }
 267 
 268         for (i = 0; i < cat->ctl_count; i++) {
 269                 rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_CTL,
 270                                 cat->ctl[i].id, &cat->ctl[i]);
 271                 if (rc) {
 272                         DPU_ERROR("failed: ctl hw not available\n");
 273                         goto fail;
 274                 }
 275         }
 276 
 277         return 0;
 278 
 279 fail:
 280         dpu_rm_destroy(rm);
 281 
 282         return rc;
 283 }
 284 
 285 static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
 286 {
 287         return top->num_intf > 1;
 288 }
 289 
 290 /**
 291  * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
 292  *      proposed use case requirements, incl. hardwired dependent blocks like
 293  *      pingpong
 294  * @rm: dpu resource manager handle
 295  * @enc_id: encoder id requesting for allocation
 296  * @reqs: proposed use case requirements
 297  * @lm: proposed layer mixer, function checks if lm, and all other hardwired
 298  *      blocks connected to the lm (pp) is available and appropriate
 299  * @pp: output parameter, pingpong block attached to the layer mixer.
 300  *      NULL if pp was not available, or not matching requirements.
 301  * @primary_lm: if non-null, this function check if lm is compatible primary_lm
 302  *              as well as satisfying all other requirements
 303  * @Return: true if lm matches all requirements, false otherwise
 304  */
 305 static bool _dpu_rm_check_lm_and_get_connected_blks(
 306                 struct dpu_rm *rm,
 307                 uint32_t enc_id,
 308                 struct dpu_rm_requirements *reqs,
 309                 struct dpu_rm_hw_blk *lm,
 310                 struct dpu_rm_hw_blk **pp,
 311                 struct dpu_rm_hw_blk *primary_lm)
 312 {
 313         const struct dpu_lm_cfg *lm_cfg = to_dpu_hw_mixer(lm->hw)->cap;
 314         struct dpu_rm_hw_iter iter;
 315 
 316         *pp = NULL;
 317 
 318         DPU_DEBUG("check lm %d pp %d\n",
 319                            lm_cfg->id, lm_cfg->pingpong);
 320 
 321         /* Check if this layer mixer is a peer of the proposed primary LM */
 322         if (primary_lm) {
 323                 const struct dpu_lm_cfg *prim_lm_cfg =
 324                                 to_dpu_hw_mixer(primary_lm->hw)->cap;
 325 
 326                 if (!test_bit(lm_cfg->id, &prim_lm_cfg->lm_pair_mask)) {
 327                         DPU_DEBUG("lm %d not peer of lm %d\n", lm_cfg->id,
 328                                         prim_lm_cfg->id);
 329                         return false;
 330                 }
 331         }
 332 
 333         /* Already reserved? */
 334         if (RESERVED_BY_OTHER(lm, enc_id)) {
 335                 DPU_DEBUG("lm %d already reserved\n", lm_cfg->id);
 336                 return false;
 337         }
 338 
 339         dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_PINGPONG);
 340         while (_dpu_rm_get_hw_locked(rm, &iter)) {
 341                 if (iter.blk->id == lm_cfg->pingpong) {
 342                         *pp = iter.blk;
 343                         break;
 344                 }
 345         }
 346 
 347         if (!*pp) {
 348                 DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong);
 349                 return false;
 350         }
 351 
 352         if (RESERVED_BY_OTHER(*pp, enc_id)) {
 353                 DPU_DEBUG("lm %d pp %d already reserved\n", lm->id,
 354                                 (*pp)->id);
 355                 return false;
 356         }
 357 
 358         return true;
 359 }
 360 
 361 static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id,
 362                                struct dpu_rm_requirements *reqs)
 363 
 364 {
 365         struct dpu_rm_hw_blk *lm[MAX_BLOCKS];
 366         struct dpu_rm_hw_blk *pp[MAX_BLOCKS];
 367         struct dpu_rm_hw_iter iter_i, iter_j;
 368         int lm_count = 0;
 369         int i, rc = 0;
 370 
 371         if (!reqs->topology.num_lm) {
 372                 DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm);
 373                 return -EINVAL;
 374         }
 375 
 376         /* Find a primary mixer */
 377         dpu_rm_init_hw_iter(&iter_i, 0, DPU_HW_BLK_LM);
 378         while (lm_count != reqs->topology.num_lm &&
 379                         _dpu_rm_get_hw_locked(rm, &iter_i)) {
 380                 memset(&lm, 0, sizeof(lm));
 381                 memset(&pp, 0, sizeof(pp));
 382 
 383                 lm_count = 0;
 384                 lm[lm_count] = iter_i.blk;
 385 
 386                 if (!_dpu_rm_check_lm_and_get_connected_blks(
 387                                 rm, enc_id, reqs, lm[lm_count],
 388                                 &pp[lm_count], NULL))
 389                         continue;
 390 
 391                 ++lm_count;
 392 
 393                 /* Valid primary mixer found, find matching peers */
 394                 dpu_rm_init_hw_iter(&iter_j, 0, DPU_HW_BLK_LM);
 395 
 396                 while (lm_count != reqs->topology.num_lm &&
 397                                 _dpu_rm_get_hw_locked(rm, &iter_j)) {
 398                         if (iter_i.blk == iter_j.blk)
 399                                 continue;
 400 
 401                         if (!_dpu_rm_check_lm_and_get_connected_blks(
 402                                         rm, enc_id, reqs, iter_j.blk,
 403                                         &pp[lm_count], iter_i.blk))
 404                                 continue;
 405 
 406                         lm[lm_count] = iter_j.blk;
 407                         ++lm_count;
 408                 }
 409         }
 410 
 411         if (lm_count != reqs->topology.num_lm) {
 412                 DPU_DEBUG("unable to find appropriate mixers\n");
 413                 return -ENAVAIL;
 414         }
 415 
 416         for (i = 0; i < ARRAY_SIZE(lm); i++) {
 417                 if (!lm[i])
 418                         break;
 419 
 420                 lm[i]->enc_id = enc_id;
 421                 pp[i]->enc_id = enc_id;
 422 
 423                 trace_dpu_rm_reserve_lms(lm[i]->id, enc_id, pp[i]->id);
 424         }
 425 
 426         return rc;
 427 }
 428 
 429 static int _dpu_rm_reserve_ctls(
 430                 struct dpu_rm *rm,
 431                 uint32_t enc_id,
 432                 const struct msm_display_topology *top)
 433 {
 434         struct dpu_rm_hw_blk *ctls[MAX_BLOCKS];
 435         struct dpu_rm_hw_iter iter;
 436         int i = 0, num_ctls = 0;
 437         bool needs_split_display = false;
 438 
 439         memset(&ctls, 0, sizeof(ctls));
 440 
 441         /* each hw_intf needs its own hw_ctrl to program its control path */
 442         num_ctls = top->num_intf;
 443 
 444         needs_split_display = _dpu_rm_needs_split_display(top);
 445 
 446         dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CTL);
 447         while (_dpu_rm_get_hw_locked(rm, &iter)) {
 448                 const struct dpu_hw_ctl *ctl = to_dpu_hw_ctl(iter.blk->hw);
 449                 unsigned long features = ctl->caps->features;
 450                 bool has_split_display;
 451 
 452                 if (RESERVED_BY_OTHER(iter.blk, enc_id))
 453                         continue;
 454 
 455                 has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features;
 456 
 457                 DPU_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, features);
 458 
 459                 if (needs_split_display != has_split_display)
 460                         continue;
 461 
 462                 ctls[i] = iter.blk;
 463                 DPU_DEBUG("ctl %d match\n", iter.blk->id);
 464 
 465                 if (++i == num_ctls)
 466                         break;
 467         }
 468 
 469         if (i != num_ctls)
 470                 return -ENAVAIL;
 471 
 472         for (i = 0; i < ARRAY_SIZE(ctls) && i < num_ctls; i++) {
 473                 ctls[i]->enc_id = enc_id;
 474                 trace_dpu_rm_reserve_ctls(ctls[i]->id, enc_id);
 475         }
 476 
 477         return 0;
 478 }
 479 
 480 static int _dpu_rm_reserve_intf(
 481                 struct dpu_rm *rm,
 482                 uint32_t enc_id,
 483                 uint32_t id,
 484                 enum dpu_hw_blk_type type)
 485 {
 486         struct dpu_rm_hw_iter iter;
 487         int ret = 0;
 488 
 489         /* Find the block entry in the rm, and note the reservation */
 490         dpu_rm_init_hw_iter(&iter, 0, type);
 491         while (_dpu_rm_get_hw_locked(rm, &iter)) {
 492                 if (iter.blk->id != id)
 493                         continue;
 494 
 495                 if (RESERVED_BY_OTHER(iter.blk, enc_id)) {
 496                         DPU_ERROR("type %d id %d already reserved\n", type, id);
 497                         return -ENAVAIL;
 498                 }
 499 
 500                 iter.blk->enc_id = enc_id;
 501                 trace_dpu_rm_reserve_intf(iter.blk->id, enc_id);
 502                 break;
 503         }
 504 
 505         /* Shouldn't happen since intfs are fixed at probe */
 506         if (!iter.hw) {
 507                 DPU_ERROR("couldn't find type %d id %d\n", type, id);
 508                 return -EINVAL;
 509         }
 510 
 511         return ret;
 512 }
 513 
 514 static int _dpu_rm_reserve_intf_related_hw(
 515                 struct dpu_rm *rm,
 516                 uint32_t enc_id,
 517                 struct dpu_encoder_hw_resources *hw_res)
 518 {
 519         int i, ret = 0;
 520         u32 id;
 521 
 522         for (i = 0; i < ARRAY_SIZE(hw_res->intfs); i++) {
 523                 if (hw_res->intfs[i] == INTF_MODE_NONE)
 524                         continue;
 525                 id = i + INTF_0;
 526                 ret = _dpu_rm_reserve_intf(rm, enc_id, id,
 527                                 DPU_HW_BLK_INTF);
 528                 if (ret)
 529                         return ret;
 530         }
 531 
 532         return ret;
 533 }
 534 
 535 static int _dpu_rm_make_reservation(
 536                 struct dpu_rm *rm,
 537                 struct drm_encoder *enc,
 538                 struct drm_crtc_state *crtc_state,
 539                 struct dpu_rm_requirements *reqs)
 540 {
 541         int ret;
 542 
 543         ret = _dpu_rm_reserve_lms(rm, enc->base.id, reqs);
 544         if (ret) {
 545                 DPU_ERROR("unable to find appropriate mixers\n");
 546                 return ret;
 547         }
 548 
 549         ret = _dpu_rm_reserve_ctls(rm, enc->base.id, &reqs->topology);
 550         if (ret) {
 551                 DPU_ERROR("unable to find appropriate CTL\n");
 552                 return ret;
 553         }
 554 
 555         ret = _dpu_rm_reserve_intf_related_hw(rm, enc->base.id, &reqs->hw_res);
 556         if (ret)
 557                 return ret;
 558 
 559         return ret;
 560 }
 561 
 562 static int _dpu_rm_populate_requirements(
 563                 struct dpu_rm *rm,
 564                 struct drm_encoder *enc,
 565                 struct drm_crtc_state *crtc_state,
 566                 struct dpu_rm_requirements *reqs,
 567                 struct msm_display_topology req_topology)
 568 {
 569         dpu_encoder_get_hw_resources(enc, &reqs->hw_res);
 570 
 571         reqs->topology = req_topology;
 572 
 573         DRM_DEBUG_KMS("num_lm: %d num_enc: %d num_intf: %d\n",
 574                       reqs->topology.num_lm, reqs->topology.num_enc,
 575                       reqs->topology.num_intf);
 576 
 577         return 0;
 578 }
 579 
 580 static void _dpu_rm_release_reservation(struct dpu_rm *rm, uint32_t enc_id)
 581 {
 582         struct dpu_rm_hw_blk *blk;
 583         enum dpu_hw_blk_type type;
 584 
 585         for (type = 0; type < DPU_HW_BLK_MAX; type++) {
 586                 list_for_each_entry(blk, &rm->hw_blks[type], list) {
 587                         if (blk->enc_id == enc_id) {
 588                                 blk->enc_id = 0;
 589                                 DPU_DEBUG("rel enc %d %d %d\n", enc_id,
 590                                           type, blk->id);
 591                         }
 592                 }
 593         }
 594 }
 595 
 596 void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc)
 597 {
 598         mutex_lock(&rm->rm_lock);
 599 
 600         _dpu_rm_release_reservation(rm, enc->base.id);
 601 
 602         mutex_unlock(&rm->rm_lock);
 603 }
 604 
 605 int dpu_rm_reserve(
 606                 struct dpu_rm *rm,
 607                 struct drm_encoder *enc,
 608                 struct drm_crtc_state *crtc_state,
 609                 struct msm_display_topology topology,
 610                 bool test_only)
 611 {
 612         struct dpu_rm_requirements reqs;
 613         int ret;
 614 
 615         /* Check if this is just a page-flip */
 616         if (!drm_atomic_crtc_needs_modeset(crtc_state))
 617                 return 0;
 618 
 619         DRM_DEBUG_KMS("reserving hw for enc %d crtc %d test_only %d\n",
 620                       enc->base.id, crtc_state->crtc->base.id, test_only);
 621 
 622         mutex_lock(&rm->rm_lock);
 623 
 624         ret = _dpu_rm_populate_requirements(rm, enc, crtc_state, &reqs,
 625                                             topology);
 626         if (ret) {
 627                 DPU_ERROR("failed to populate hw requirements\n");
 628                 goto end;
 629         }
 630 
 631         ret = _dpu_rm_make_reservation(rm, enc, crtc_state, &reqs);
 632         if (ret) {
 633                 DPU_ERROR("failed to reserve hw resources: %d\n", ret);
 634                 _dpu_rm_release_reservation(rm, enc->base.id);
 635         } else if (test_only) {
 636                  /* test_only: test the reservation and then undo */
 637                 DPU_DEBUG("test_only: discard test [enc: %d]\n",
 638                                 enc->base.id);
 639                 _dpu_rm_release_reservation(rm, enc->base.id);
 640         }
 641 
 642 end:
 643         mutex_unlock(&rm->rm_lock);
 644 
 645         return ret;
 646 }

/* [<][>][^][v][top][bottom][index][help] */