root/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. get_kms
  2. request_pending
  3. request_pp_done_pending
  4. crtc_flush
  5. crtc_flush_all
  6. complete_flip
  7. unref_cursor_worker
  8. mdp5_crtc_destroy
  9. mdp5_lm_use_fg_alpha_mask
  10. blend_setup
  11. mdp5_crtc_mode_set_nofb
  12. mdp5_crtc_atomic_disable
  13. mdp5_crtc_vblank_on
  14. mdp5_crtc_atomic_enable
  15. mdp5_crtc_setup_pipeline
  16. pstate_cmp
  17. is_fullscreen
  18. get_start_stage
  19. mdp5_crtc_atomic_check
  20. mdp5_crtc_atomic_begin
  21. mdp5_crtc_atomic_flush
  22. get_roi
  23. mdp5_crtc_restore_cursor
  24. mdp5_crtc_cursor_set
  25. mdp5_crtc_cursor_move
  26. mdp5_crtc_atomic_print_state
  27. mdp5_crtc_duplicate_state
  28. mdp5_crtc_destroy_state
  29. mdp5_crtc_reset
  30. mdp5_crtc_vblank_irq
  31. mdp5_crtc_err_irq
  32. mdp5_crtc_pp_done_irq
  33. mdp5_crtc_wait_for_pp_done
  34. mdp5_crtc_wait_for_flush_done
  35. mdp5_crtc_vblank
  36. mdp5_crtc_set_pipeline
  37. mdp5_crtc_get_ctl
  38. mdp5_crtc_get_mixer
  39. mdp5_crtc_get_pipeline
  40. mdp5_crtc_wait_for_commit_done
  41. mdp5_crtc_init

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
   4  * Copyright (C) 2013 Red Hat
   5  * Author: Rob Clark <robdclark@gmail.com>
   6  */
   7 
   8 #include <linux/sort.h>
   9 
  10 #include <drm/drm_mode.h>
  11 #include <drm/drm_crtc.h>
  12 #include <drm/drm_flip_work.h>
  13 #include <drm/drm_fourcc.h>
  14 #include <drm/drm_probe_helper.h>
  15 #include <drm/drm_vblank.h>
  16 
  17 #include "mdp5_kms.h"
  18 
  19 #define CURSOR_WIDTH    64
  20 #define CURSOR_HEIGHT   64
  21 
  22 struct mdp5_crtc {
  23         struct drm_crtc base;
  24         int id;
  25         bool enabled;
  26 
  27         spinlock_t lm_lock;     /* protect REG_MDP5_LM_* registers */
  28 
  29         /* if there is a pending flip, these will be non-null: */
  30         struct drm_pending_vblank_event *event;
  31 
  32         /* Bits have been flushed at the last commit,
  33          * used to decide if a vsync has happened since last commit.
  34          */
  35         u32 flushed_mask;
  36 
  37 #define PENDING_CURSOR 0x1
  38 #define PENDING_FLIP   0x2
  39         atomic_t pending;
  40 
  41         /* for unref'ing cursor bo's after scanout completes: */
  42         struct drm_flip_work unref_cursor_work;
  43 
  44         struct mdp_irq vblank;
  45         struct mdp_irq err;
  46         struct mdp_irq pp_done;
  47 
  48         struct completion pp_completion;
  49 
  50         bool lm_cursor_enabled;
  51 
  52         struct {
  53                 /* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/
  54                 spinlock_t lock;
  55 
  56                 /* current cursor being scanned out: */
  57                 struct drm_gem_object *scanout_bo;
  58                 uint64_t iova;
  59                 uint32_t width, height;
  60                 int x, y;
  61         } cursor;
  62 };
  63 #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
  64 
  65 static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc);
  66 
  67 static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
  68 {
  69         struct msm_drm_private *priv = crtc->dev->dev_private;
  70         return to_mdp5_kms(to_mdp_kms(priv->kms));
  71 }
  72 
  73 static void request_pending(struct drm_crtc *crtc, uint32_t pending)
  74 {
  75         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  76 
  77         atomic_or(pending, &mdp5_crtc->pending);
  78         mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
  79 }
  80 
  81 static void request_pp_done_pending(struct drm_crtc *crtc)
  82 {
  83         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  84         reinit_completion(&mdp5_crtc->pp_completion);
  85 }
  86 
  87 static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
  88 {
  89         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
  90         struct mdp5_ctl *ctl = mdp5_cstate->ctl;
  91         struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
  92         bool start = !mdp5_cstate->defer_start;
  93 
  94         mdp5_cstate->defer_start = false;
  95 
  96         DBG("%s: flush=%08x", crtc->name, flush_mask);
  97 
  98         return mdp5_ctl_commit(ctl, pipeline, flush_mask, start);
  99 }
 100 
 101 /*
 102  * flush updates, to make sure hw is updated to new scanout fb,
 103  * so that we can safely queue unref to current fb (ie. next
 104  * vblank we know hw is done w/ previous scanout_fb).
 105  */
 106 static u32 crtc_flush_all(struct drm_crtc *crtc)
 107 {
 108         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
 109         struct mdp5_hw_mixer *mixer, *r_mixer;
 110         struct drm_plane *plane;
 111         uint32_t flush_mask = 0;
 112 
 113         /* this should not happen: */
 114         if (WARN_ON(!mdp5_cstate->ctl))
 115                 return 0;
 116 
 117         drm_atomic_crtc_for_each_plane(plane, crtc) {
 118                 if (!plane->state->visible)
 119                         continue;
 120                 flush_mask |= mdp5_plane_get_flush(plane);
 121         }
 122 
 123         mixer = mdp5_cstate->pipeline.mixer;
 124         flush_mask |= mdp_ctl_flush_mask_lm(mixer->lm);
 125 
 126         r_mixer = mdp5_cstate->pipeline.r_mixer;
 127         if (r_mixer)
 128                 flush_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
 129 
 130         return crtc_flush(crtc, flush_mask);
 131 }
 132 
 133 /* if file!=NULL, this is preclose potential cancel-flip path */
 134 static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
 135 {
 136         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
 137         struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
 138         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 139         struct mdp5_ctl *ctl = mdp5_cstate->ctl;
 140         struct drm_device *dev = crtc->dev;
 141         struct drm_pending_vblank_event *event;
 142         unsigned long flags;
 143 
 144         spin_lock_irqsave(&dev->event_lock, flags);
 145         event = mdp5_crtc->event;
 146         if (event) {
 147                 mdp5_crtc->event = NULL;
 148                 DBG("%s: send event: %p", crtc->name, event);
 149                 drm_crtc_send_vblank_event(crtc, event);
 150         }
 151         spin_unlock_irqrestore(&dev->event_lock, flags);
 152 
 153         if (ctl && !crtc->state->enable) {
 154                 /* set STAGE_UNUSED for all layers */
 155                 mdp5_ctl_blend(ctl, pipeline, NULL, NULL, 0, 0);
 156                 /* XXX: What to do here? */
 157                 /* mdp5_crtc->ctl = NULL; */
 158         }
 159 }
 160 
 161 static void unref_cursor_worker(struct drm_flip_work *work, void *val)
 162 {
 163         struct mdp5_crtc *mdp5_crtc =
 164                 container_of(work, struct mdp5_crtc, unref_cursor_work);
 165         struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
 166         struct msm_kms *kms = &mdp5_kms->base.base;
 167 
 168         msm_gem_unpin_iova(val, kms->aspace);
 169         drm_gem_object_put_unlocked(val);
 170 }
 171 
 172 static void mdp5_crtc_destroy(struct drm_crtc *crtc)
 173 {
 174         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 175 
 176         drm_crtc_cleanup(crtc);
 177         drm_flip_work_cleanup(&mdp5_crtc->unref_cursor_work);
 178 
 179         kfree(mdp5_crtc);
 180 }
 181 
 182 static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage)
 183 {
 184         switch (stage) {
 185         case STAGE0: return MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA;
 186         case STAGE1: return MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA;
 187         case STAGE2: return MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA;
 188         case STAGE3: return MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA;
 189         case STAGE4: return MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA;
 190         case STAGE5: return MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA;
 191         case STAGE6: return MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA;
 192         default:
 193                 return 0;
 194         }
 195 }
 196 
 197 /*
 198  * left/right pipe offsets for the stage array used in blend_setup()
 199  */
 200 #define PIPE_LEFT       0
 201 #define PIPE_RIGHT      1
 202 
 203 /*
 204  * blend_setup() - blend all the planes of a CRTC
 205  *
 206  * If no base layer is available, border will be enabled as the base layer.
 207  * Otherwise all layers will be blended based on their stage calculated
 208  * in mdp5_crtc_atomic_check.
 209  */
 210 static void blend_setup(struct drm_crtc *crtc)
 211 {
 212         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 213         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
 214         struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
 215         struct mdp5_kms *mdp5_kms = get_kms(crtc);
 216         struct drm_plane *plane;
 217         const struct mdp5_cfg_hw *hw_cfg;
 218         struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL};
 219         const struct mdp_format *format;
 220         struct mdp5_hw_mixer *mixer = pipeline->mixer;
 221         uint32_t lm = mixer->lm;
 222         struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
 223         uint32_t r_lm = r_mixer ? r_mixer->lm : 0;
 224         struct mdp5_ctl *ctl = mdp5_cstate->ctl;
 225         uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
 226         unsigned long flags;
 227         enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
 228         enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
 229         int i, plane_cnt = 0;
 230         bool bg_alpha_enabled = false;
 231         u32 mixer_op_mode = 0;
 232         u32 val;
 233 #define blender(stage)  ((stage) - STAGE0)
 234 
 235         hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
 236 
 237         spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
 238 
 239         /* ctl could be released already when we are shutting down: */
 240         /* XXX: Can this happen now? */
 241         if (!ctl)
 242                 goto out;
 243 
 244         /* Collect all plane information */
 245         drm_atomic_crtc_for_each_plane(plane, crtc) {
 246                 enum mdp5_pipe right_pipe;
 247 
 248                 if (!plane->state->visible)
 249                         continue;
 250 
 251                 pstate = to_mdp5_plane_state(plane->state);
 252                 pstates[pstate->stage] = pstate;
 253                 stage[pstate->stage][PIPE_LEFT] = mdp5_plane_pipe(plane);
 254                 /*
 255                  * if we have a right mixer, stage the same pipe as we
 256                  * have on the left mixer
 257                  */
 258                 if (r_mixer)
 259                         r_stage[pstate->stage][PIPE_LEFT] =
 260                                                 mdp5_plane_pipe(plane);
 261                 /*
 262                  * if we have a right pipe (i.e, the plane comprises of 2
 263                  * hwpipes, then stage the right pipe on the right side of both
 264                  * the layer mixers
 265                  */
 266                 right_pipe = mdp5_plane_right_pipe(plane);
 267                 if (right_pipe) {
 268                         stage[pstate->stage][PIPE_RIGHT] = right_pipe;
 269                         r_stage[pstate->stage][PIPE_RIGHT] = right_pipe;
 270                 }
 271 
 272                 plane_cnt++;
 273         }
 274 
 275         if (!pstates[STAGE_BASE]) {
 276                 ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT;
 277                 DBG("Border Color is enabled");
 278         } else if (plane_cnt) {
 279                 format = to_mdp_format(msm_framebuffer_format(pstates[STAGE_BASE]->base.fb));
 280 
 281                 if (format->alpha_enable)
 282                         bg_alpha_enabled = true;
 283         }
 284 
 285         /* The reset for blending */
 286         for (i = STAGE0; i <= STAGE_MAX; i++) {
 287                 if (!pstates[i])
 288                         continue;
 289 
 290                 format = to_mdp_format(
 291                         msm_framebuffer_format(pstates[i]->base.fb));
 292                 plane = pstates[i]->base.plane;
 293                 blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
 294                         MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST);
 295                 fg_alpha = pstates[i]->alpha;
 296                 bg_alpha = 0xFF - pstates[i]->alpha;
 297 
 298                 if (!format->alpha_enable && bg_alpha_enabled)
 299                         mixer_op_mode = 0;
 300                 else
 301                         mixer_op_mode |= mdp5_lm_use_fg_alpha_mask(i);
 302 
 303                 DBG("Stage %d fg_alpha %x bg_alpha %x", i, fg_alpha, bg_alpha);
 304 
 305                 if (format->alpha_enable && pstates[i]->premultiplied) {
 306                         blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
 307                                 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
 308                         if (fg_alpha != 0xff) {
 309                                 bg_alpha = fg_alpha;
 310                                 blend_op |=
 311                                         MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
 312                                         MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
 313                         } else {
 314                                 blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
 315                         }
 316                 } else if (format->alpha_enable) {
 317                         blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_PIXEL) |
 318                                 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
 319                         if (fg_alpha != 0xff) {
 320                                 bg_alpha = fg_alpha;
 321                                 blend_op |=
 322                                        MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA |
 323                                        MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA |
 324                                        MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
 325                                        MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
 326                         } else {
 327                                 blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
 328                         }
 329                 }
 330 
 331                 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(lm,
 332                                 blender(i)), blend_op);
 333                 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
 334                                 blender(i)), fg_alpha);
 335                 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
 336                                 blender(i)), bg_alpha);
 337                 if (r_mixer) {
 338                         mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(r_lm,
 339                                         blender(i)), blend_op);
 340                         mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(r_lm,
 341                                         blender(i)), fg_alpha);
 342                         mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(r_lm,
 343                                         blender(i)), bg_alpha);
 344                 }
 345         }
 346 
 347         val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
 348         mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm),
 349                    val | mixer_op_mode);
 350         if (r_mixer) {
 351                 val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
 352                 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm),
 353                            val | mixer_op_mode);
 354         }
 355 
 356         mdp5_ctl_blend(ctl, pipeline, stage, r_stage, plane_cnt,
 357                        ctl_blend_flags);
 358 out:
 359         spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
 360 }
 361 
 362 static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
 363 {
 364         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 365         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
 366         struct mdp5_kms *mdp5_kms = get_kms(crtc);
 367         struct mdp5_hw_mixer *mixer = mdp5_cstate->pipeline.mixer;
 368         struct mdp5_hw_mixer *r_mixer = mdp5_cstate->pipeline.r_mixer;
 369         uint32_t lm = mixer->lm;
 370         u32 mixer_width, val;
 371         unsigned long flags;
 372         struct drm_display_mode *mode;
 373 
 374         if (WARN_ON(!crtc->state))
 375                 return;
 376 
 377         mode = &crtc->state->adjusted_mode;
 378 
 379         DBG("%s: set mode: " DRM_MODE_FMT, crtc->name, DRM_MODE_ARG(mode));
 380 
 381         mixer_width = mode->hdisplay;
 382         if (r_mixer)
 383                 mixer_width /= 2;
 384 
 385         spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
 386         mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(lm),
 387                         MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
 388                         MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
 389 
 390         /* Assign mixer to LEFT side in source split mode */
 391         val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
 392         val &= ~MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
 393         mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), val);
 394 
 395         if (r_mixer) {
 396                 u32 r_lm = r_mixer->lm;
 397 
 398                 mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(r_lm),
 399                            MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
 400                            MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
 401 
 402                 /* Assign mixer to RIGHT side in source split mode */
 403                 val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
 404                 val |= MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
 405                 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm), val);
 406         }
 407 
 408         spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
 409 }
 410 
 411 static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
 412                                      struct drm_crtc_state *old_state)
 413 {
 414         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 415         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
 416         struct mdp5_kms *mdp5_kms = get_kms(crtc);
 417         struct device *dev = &mdp5_kms->pdev->dev;
 418         unsigned long flags;
 419 
 420         DBG("%s", crtc->name);
 421 
 422         if (WARN_ON(!mdp5_crtc->enabled))
 423                 return;
 424 
 425         /* Disable/save vblank irq handling before power is disabled */
 426         drm_crtc_vblank_off(crtc);
 427 
 428         if (mdp5_cstate->cmd_mode)
 429                 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done);
 430 
 431         mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
 432         pm_runtime_put_sync(dev);
 433 
 434         if (crtc->state->event && !crtc->state->active) {
 435                 WARN_ON(mdp5_crtc->event);
 436                 spin_lock_irqsave(&mdp5_kms->dev->event_lock, flags);
 437                 drm_crtc_send_vblank_event(crtc, crtc->state->event);
 438                 crtc->state->event = NULL;
 439                 spin_unlock_irqrestore(&mdp5_kms->dev->event_lock, flags);
 440         }
 441 
 442         mdp5_crtc->enabled = false;
 443 }
 444 
 445 static void mdp5_crtc_vblank_on(struct drm_crtc *crtc)
 446 {
 447         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
 448         struct mdp5_interface *intf = mdp5_cstate->pipeline.intf;
 449         u32 count;
 450 
 451         count = intf->mode == MDP5_INTF_DSI_MODE_COMMAND ? 0 : 0xffffffff;
 452         drm_crtc_set_max_vblank_count(crtc, count);
 453 
 454         drm_crtc_vblank_on(crtc);
 455 }
 456 
 457 static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
 458                                     struct drm_crtc_state *old_state)
 459 {
 460         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 461         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
 462         struct mdp5_kms *mdp5_kms = get_kms(crtc);
 463         struct device *dev = &mdp5_kms->pdev->dev;
 464 
 465         DBG("%s", crtc->name);
 466 
 467         if (WARN_ON(mdp5_crtc->enabled))
 468                 return;
 469 
 470         pm_runtime_get_sync(dev);
 471 
 472         if (mdp5_crtc->lm_cursor_enabled) {
 473                 /*
 474                  * Restore LM cursor state, as it might have been lost
 475                  * with suspend:
 476                  */
 477                 if (mdp5_crtc->cursor.iova) {
 478                         unsigned long flags;
 479 
 480                         spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
 481                         mdp5_crtc_restore_cursor(crtc);
 482                         spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
 483 
 484                         mdp5_ctl_set_cursor(mdp5_cstate->ctl,
 485                                             &mdp5_cstate->pipeline, 0, true);
 486                 } else {
 487                         mdp5_ctl_set_cursor(mdp5_cstate->ctl,
 488                                             &mdp5_cstate->pipeline, 0, false);
 489                 }
 490         }
 491 
 492         /* Restore vblank irq handling after power is enabled */
 493         mdp5_crtc_vblank_on(crtc);
 494 
 495         mdp5_crtc_mode_set_nofb(crtc);
 496 
 497         mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
 498 
 499         if (mdp5_cstate->cmd_mode)
 500                 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->pp_done);
 501 
 502         mdp5_crtc->enabled = true;
 503 }
 504 
 505 int mdp5_crtc_setup_pipeline(struct drm_crtc *crtc,
 506                              struct drm_crtc_state *new_crtc_state,
 507                              bool need_right_mixer)
 508 {
 509         struct mdp5_crtc_state *mdp5_cstate =
 510                         to_mdp5_crtc_state(new_crtc_state);
 511         struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
 512         struct mdp5_interface *intf;
 513         bool new_mixer = false;
 514 
 515         new_mixer = !pipeline->mixer;
 516 
 517         if ((need_right_mixer && !pipeline->r_mixer) ||
 518             (!need_right_mixer && pipeline->r_mixer))
 519                 new_mixer = true;
 520 
 521         if (new_mixer) {
 522                 struct mdp5_hw_mixer *old_mixer = pipeline->mixer;
 523                 struct mdp5_hw_mixer *old_r_mixer = pipeline->r_mixer;
 524                 u32 caps;
 525                 int ret;
 526 
 527                 caps = MDP_LM_CAP_DISPLAY;
 528                 if (need_right_mixer)
 529                         caps |= MDP_LM_CAP_PAIR;
 530 
 531                 ret = mdp5_mixer_assign(new_crtc_state->state, crtc, caps,
 532                                         &pipeline->mixer, need_right_mixer ?
 533                                         &pipeline->r_mixer : NULL);
 534                 if (ret)
 535                         return ret;
 536 
 537                 mdp5_mixer_release(new_crtc_state->state, old_mixer);
 538                 if (old_r_mixer) {
 539                         mdp5_mixer_release(new_crtc_state->state, old_r_mixer);
 540                         if (!need_right_mixer)
 541                                 pipeline->r_mixer = NULL;
 542                 }
 543         }
 544 
 545         /*
 546          * these should have been already set up in the encoder's atomic
 547          * check (called by drm_atomic_helper_check_modeset)
 548          */
 549         intf = pipeline->intf;
 550 
 551         mdp5_cstate->err_irqmask = intf2err(intf->num);
 552         mdp5_cstate->vblank_irqmask = intf2vblank(pipeline->mixer, intf);
 553 
 554         if ((intf->type == INTF_DSI) &&
 555             (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) {
 556                 mdp5_cstate->pp_done_irqmask = lm2ppdone(pipeline->mixer);
 557                 mdp5_cstate->cmd_mode = true;
 558         } else {
 559                 mdp5_cstate->pp_done_irqmask = 0;
 560                 mdp5_cstate->cmd_mode = false;
 561         }
 562 
 563         return 0;
 564 }
 565 
 566 struct plane_state {
 567         struct drm_plane *plane;
 568         struct mdp5_plane_state *state;
 569 };
 570 
 571 static int pstate_cmp(const void *a, const void *b)
 572 {
 573         struct plane_state *pa = (struct plane_state *)a;
 574         struct plane_state *pb = (struct plane_state *)b;
 575         return pa->state->zpos - pb->state->zpos;
 576 }
 577 
 578 /* is there a helper for this? */
 579 static bool is_fullscreen(struct drm_crtc_state *cstate,
 580                 struct drm_plane_state *pstate)
 581 {
 582         return (pstate->crtc_x <= 0) && (pstate->crtc_y <= 0) &&
 583                 ((pstate->crtc_x + pstate->crtc_w) >= cstate->mode.hdisplay) &&
 584                 ((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay);
 585 }
 586 
 587 static enum mdp_mixer_stage_id get_start_stage(struct drm_crtc *crtc,
 588                                         struct drm_crtc_state *new_crtc_state,
 589                                         struct drm_plane_state *bpstate)
 590 {
 591         struct mdp5_crtc_state *mdp5_cstate =
 592                         to_mdp5_crtc_state(new_crtc_state);
 593 
 594         /*
 595          * if we're in source split mode, it's mandatory to have
 596          * border out on the base stage
 597          */
 598         if (mdp5_cstate->pipeline.r_mixer)
 599                 return STAGE0;
 600 
 601         /* if the bottom-most layer is not fullscreen, we need to use
 602          * it for solid-color:
 603          */
 604         if (!is_fullscreen(new_crtc_state, bpstate))
 605                 return STAGE0;
 606 
 607         return STAGE_BASE;
 608 }
 609 
 610 static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
 611                 struct drm_crtc_state *state)
 612 {
 613         struct mdp5_kms *mdp5_kms = get_kms(crtc);
 614         struct drm_plane *plane;
 615         struct drm_device *dev = crtc->dev;
 616         struct plane_state pstates[STAGE_MAX + 1];
 617         const struct mdp5_cfg_hw *hw_cfg;
 618         const struct drm_plane_state *pstate;
 619         const struct drm_display_mode *mode = &state->adjusted_mode;
 620         bool cursor_plane = false;
 621         bool need_right_mixer = false;
 622         int cnt = 0, i;
 623         int ret;
 624         enum mdp_mixer_stage_id start;
 625 
 626         DBG("%s: check", crtc->name);
 627 
 628         drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
 629                 if (!pstate->visible)
 630                         continue;
 631 
 632                 pstates[cnt].plane = plane;
 633                 pstates[cnt].state = to_mdp5_plane_state(pstate);
 634 
 635                 /*
 636                  * if any plane on this crtc uses 2 hwpipes, then we need
 637                  * the crtc to have a right hwmixer.
 638                  */
 639                 if (pstates[cnt].state->r_hwpipe)
 640                         need_right_mixer = true;
 641                 cnt++;
 642 
 643                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
 644                         cursor_plane = true;
 645         }
 646 
 647         /* bail out early if there aren't any planes */
 648         if (!cnt)
 649                 return 0;
 650 
 651         hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
 652 
 653         /*
 654          * we need a right hwmixer if the mode's width is greater than a single
 655          * LM's max width
 656          */
 657         if (mode->hdisplay > hw_cfg->lm.max_width)
 658                 need_right_mixer = true;
 659 
 660         ret = mdp5_crtc_setup_pipeline(crtc, state, need_right_mixer);
 661         if (ret) {
 662                 DRM_DEV_ERROR(dev->dev, "couldn't assign mixers %d\n", ret);
 663                 return ret;
 664         }
 665 
 666         /* assign a stage based on sorted zpos property */
 667         sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
 668 
 669         /* trigger a warning if cursor isn't the highest zorder */
 670         WARN_ON(cursor_plane &&
 671                 (pstates[cnt - 1].plane->type != DRM_PLANE_TYPE_CURSOR));
 672 
 673         start = get_start_stage(crtc, state, &pstates[0].state->base);
 674 
 675         /* verify that there are not too many planes attached to crtc
 676          * and that we don't have conflicting mixer stages:
 677          */
 678         if ((cnt + start - 1) >= hw_cfg->lm.nb_stages) {
 679                 DRM_DEV_ERROR(dev->dev, "too many planes! cnt=%d, start stage=%d\n",
 680                         cnt, start);
 681                 return -EINVAL;
 682         }
 683 
 684         for (i = 0; i < cnt; i++) {
 685                 if (cursor_plane && (i == (cnt - 1)))
 686                         pstates[i].state->stage = hw_cfg->lm.nb_stages;
 687                 else
 688                         pstates[i].state->stage = start + i;
 689                 DBG("%s: assign pipe %s on stage=%d", crtc->name,
 690                                 pstates[i].plane->name,
 691                                 pstates[i].state->stage);
 692         }
 693 
 694         return 0;
 695 }
 696 
 697 static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc,
 698                                    struct drm_crtc_state *old_crtc_state)
 699 {
 700         DBG("%s: begin", crtc->name);
 701 }
 702 
 703 static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
 704                                    struct drm_crtc_state *old_crtc_state)
 705 {
 706         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 707         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
 708         struct drm_device *dev = crtc->dev;
 709         unsigned long flags;
 710 
 711         DBG("%s: event: %p", crtc->name, crtc->state->event);
 712 
 713         WARN_ON(mdp5_crtc->event);
 714 
 715         spin_lock_irqsave(&dev->event_lock, flags);
 716         mdp5_crtc->event = crtc->state->event;
 717         crtc->state->event = NULL;
 718         spin_unlock_irqrestore(&dev->event_lock, flags);
 719 
 720         /*
 721          * If no CTL has been allocated in mdp5_crtc_atomic_check(),
 722          * it means we are trying to flush a CRTC whose state is disabled:
 723          * nothing else needs to be done.
 724          */
 725         /* XXX: Can this happen now ? */
 726         if (unlikely(!mdp5_cstate->ctl))
 727                 return;
 728 
 729         blend_setup(crtc);
 730 
 731         /* PP_DONE irq is only used by command mode for now.
 732          * It is better to request pending before FLUSH and START trigger
 733          * to make sure no pp_done irq missed.
 734          * This is safe because no pp_done will happen before SW trigger
 735          * in command mode.
 736          */
 737         if (mdp5_cstate->cmd_mode)
 738                 request_pp_done_pending(crtc);
 739 
 740         mdp5_crtc->flushed_mask = crtc_flush_all(crtc);
 741 
 742         /* XXX are we leaking out state here? */
 743         mdp5_crtc->vblank.irqmask = mdp5_cstate->vblank_irqmask;
 744         mdp5_crtc->err.irqmask = mdp5_cstate->err_irqmask;
 745         mdp5_crtc->pp_done.irqmask = mdp5_cstate->pp_done_irqmask;
 746 
 747         request_pending(crtc, PENDING_FLIP);
 748 }
 749 
 750 static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
 751 {
 752         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 753         uint32_t xres = crtc->mode.hdisplay;
 754         uint32_t yres = crtc->mode.vdisplay;
 755 
 756         /*
 757          * Cursor Region Of Interest (ROI) is a plane read from cursor
 758          * buffer to render. The ROI region is determined by the visibility of
 759          * the cursor point. In the default Cursor image the cursor point will
 760          * be at the top left of the cursor image.
 761          *
 762          * Without rotation:
 763          * If the cursor point reaches the right (xres - x < cursor.width) or
 764          * bottom (yres - y < cursor.height) boundary of the screen, then ROI
 765          * width and ROI height need to be evaluated to crop the cursor image
 766          * accordingly.
 767          * (xres-x) will be new cursor width when x > (xres - cursor.width)
 768          * (yres-y) will be new cursor height when y > (yres - cursor.height)
 769          *
 770          * With rotation:
 771          * We get negative x and/or y coordinates.
 772          * (cursor.width - abs(x)) will be new cursor width when x < 0
 773          * (cursor.height - abs(y)) will be new cursor width when y < 0
 774          */
 775         if (mdp5_crtc->cursor.x >= 0)
 776                 *roi_w = min(mdp5_crtc->cursor.width, xres -
 777                         mdp5_crtc->cursor.x);
 778         else
 779                 *roi_w = mdp5_crtc->cursor.width - abs(mdp5_crtc->cursor.x);
 780         if (mdp5_crtc->cursor.y >= 0)
 781                 *roi_h = min(mdp5_crtc->cursor.height, yres -
 782                         mdp5_crtc->cursor.y);
 783         else
 784                 *roi_h = mdp5_crtc->cursor.height - abs(mdp5_crtc->cursor.y);
 785 }
 786 
 787 static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
 788 {
 789         const struct drm_format_info *info = drm_format_info(DRM_FORMAT_ARGB8888);
 790         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
 791         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 792         struct mdp5_kms *mdp5_kms = get_kms(crtc);
 793         const enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
 794         uint32_t blendcfg, stride;
 795         uint32_t x, y, src_x, src_y, width, height;
 796         uint32_t roi_w, roi_h;
 797         int lm;
 798 
 799         assert_spin_locked(&mdp5_crtc->cursor.lock);
 800 
 801         lm = mdp5_cstate->pipeline.mixer->lm;
 802 
 803         x = mdp5_crtc->cursor.x;
 804         y = mdp5_crtc->cursor.y;
 805         width = mdp5_crtc->cursor.width;
 806         height = mdp5_crtc->cursor.height;
 807 
 808         stride = width * info->cpp[0];
 809 
 810         get_roi(crtc, &roi_w, &roi_h);
 811 
 812         /* If cusror buffer overlaps due to rotation on the
 813          * upper or left screen border the pixel offset inside
 814          * the cursor buffer of the ROI is the positive overlap
 815          * distance.
 816          */
 817         if (mdp5_crtc->cursor.x < 0) {
 818                 src_x = abs(mdp5_crtc->cursor.x);
 819                 x = 0;
 820         } else {
 821                 src_x = 0;
 822         }
 823         if (mdp5_crtc->cursor.y < 0) {
 824                 src_y = abs(mdp5_crtc->cursor.y);
 825                 y = 0;
 826         } else {
 827                 src_y = 0;
 828         }
 829         DBG("%s: x=%d, y=%d roi_w=%d roi_h=%d src_x=%d src_y=%d",
 830                 crtc->name, x, y, roi_w, roi_h, src_x, src_y);
 831 
 832         mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
 833         mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
 834                         MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
 835         mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_IMG_SIZE(lm),
 836                         MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) |
 837                         MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width));
 838         mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
 839                         MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
 840                         MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
 841         mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm),
 842                         MDP5_LM_CURSOR_START_XY_Y_START(y) |
 843                         MDP5_LM_CURSOR_START_XY_X_START(x));
 844         mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_XY(lm),
 845                         MDP5_LM_CURSOR_XY_SRC_Y(src_y) |
 846                         MDP5_LM_CURSOR_XY_SRC_X(src_x));
 847         mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm),
 848                         mdp5_crtc->cursor.iova);
 849 
 850         blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN;
 851         blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha);
 852         mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg);
 853 }
 854 
 855 static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
 856                 struct drm_file *file, uint32_t handle,
 857                 uint32_t width, uint32_t height)
 858 {
 859         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 860         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
 861         struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
 862         struct drm_device *dev = crtc->dev;
 863         struct mdp5_kms *mdp5_kms = get_kms(crtc);
 864         struct platform_device *pdev = mdp5_kms->pdev;
 865         struct msm_kms *kms = &mdp5_kms->base.base;
 866         struct drm_gem_object *cursor_bo, *old_bo = NULL;
 867         struct mdp5_ctl *ctl;
 868         int ret;
 869         uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
 870         bool cursor_enable = true;
 871         unsigned long flags;
 872 
 873         if (!mdp5_crtc->lm_cursor_enabled) {
 874                 dev_warn(dev->dev,
 875                          "cursor_set is deprecated with cursor planes\n");
 876                 return -EINVAL;
 877         }
 878 
 879         if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
 880                 DRM_DEV_ERROR(dev->dev, "bad cursor size: %dx%d\n", width, height);
 881                 return -EINVAL;
 882         }
 883 
 884         ctl = mdp5_cstate->ctl;
 885         if (!ctl)
 886                 return -EINVAL;
 887 
 888         /* don't support LM cursors when we we have source split enabled */
 889         if (mdp5_cstate->pipeline.r_mixer)
 890                 return -EINVAL;
 891 
 892         if (!handle) {
 893                 DBG("Cursor off");
 894                 cursor_enable = false;
 895                 mdp5_crtc->cursor.iova = 0;
 896                 pm_runtime_get_sync(&pdev->dev);
 897                 goto set_cursor;
 898         }
 899 
 900         cursor_bo = drm_gem_object_lookup(file, handle);
 901         if (!cursor_bo)
 902                 return -ENOENT;
 903 
 904         ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace,
 905                         &mdp5_crtc->cursor.iova);
 906         if (ret)
 907                 return -EINVAL;
 908 
 909         pm_runtime_get_sync(&pdev->dev);
 910 
 911         spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
 912         old_bo = mdp5_crtc->cursor.scanout_bo;
 913 
 914         mdp5_crtc->cursor.scanout_bo = cursor_bo;
 915         mdp5_crtc->cursor.width = width;
 916         mdp5_crtc->cursor.height = height;
 917 
 918         mdp5_crtc_restore_cursor(crtc);
 919 
 920         spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
 921 
 922 set_cursor:
 923         ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
 924         if (ret) {
 925                 DRM_DEV_ERROR(dev->dev, "failed to %sable cursor: %d\n",
 926                                 cursor_enable ? "en" : "dis", ret);
 927                 goto end;
 928         }
 929 
 930         crtc_flush(crtc, flush_mask);
 931 
 932 end:
 933         pm_runtime_put_sync(&pdev->dev);
 934         if (old_bo) {
 935                 drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
 936                 /* enable vblank to complete cursor work: */
 937                 request_pending(crtc, PENDING_CURSOR);
 938         }
 939         return ret;
 940 }
 941 
 942 static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
 943 {
 944         struct mdp5_kms *mdp5_kms = get_kms(crtc);
 945         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 946         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
 947         uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
 948         struct drm_device *dev = crtc->dev;
 949         uint32_t roi_w;
 950         uint32_t roi_h;
 951         unsigned long flags;
 952 
 953         if (!mdp5_crtc->lm_cursor_enabled) {
 954                 dev_warn(dev->dev,
 955                          "cursor_move is deprecated with cursor planes\n");
 956                 return -EINVAL;
 957         }
 958 
 959         /* don't support LM cursors when we we have source split enabled */
 960         if (mdp5_cstate->pipeline.r_mixer)
 961                 return -EINVAL;
 962 
 963         /* In case the CRTC is disabled, just drop the cursor update */
 964         if (unlikely(!crtc->state->enable))
 965                 return 0;
 966 
 967         /* accept negative x/y coordinates up to maximum cursor overlap */
 968         mdp5_crtc->cursor.x = x = max(x, -(int)mdp5_crtc->cursor.width);
 969         mdp5_crtc->cursor.y = y = max(y, -(int)mdp5_crtc->cursor.height);
 970 
 971         get_roi(crtc, &roi_w, &roi_h);
 972 
 973         pm_runtime_get_sync(&mdp5_kms->pdev->dev);
 974 
 975         spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
 976         mdp5_crtc_restore_cursor(crtc);
 977         spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
 978 
 979         crtc_flush(crtc, flush_mask);
 980 
 981         pm_runtime_put_sync(&mdp5_kms->pdev->dev);
 982 
 983         return 0;
 984 }
 985 
 986 static void
 987 mdp5_crtc_atomic_print_state(struct drm_printer *p,
 988                              const struct drm_crtc_state *state)
 989 {
 990         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
 991         struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
 992         struct mdp5_kms *mdp5_kms = get_kms(state->crtc);
 993 
 994         if (WARN_ON(!pipeline))
 995                 return;
 996 
 997         if (mdp5_cstate->ctl)
 998                 drm_printf(p, "\tctl=%d\n", mdp5_ctl_get_ctl_id(mdp5_cstate->ctl));
 999 
1000         drm_printf(p, "\thwmixer=%s\n", pipeline->mixer ?
1001                         pipeline->mixer->name : "(null)");
1002 
1003         if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT)
1004                 drm_printf(p, "\tright hwmixer=%s\n", pipeline->r_mixer ?
1005                            pipeline->r_mixer->name : "(null)");
1006 
1007         drm_printf(p, "\tcmd_mode=%d\n", mdp5_cstate->cmd_mode);
1008 }
1009 
1010 static struct drm_crtc_state *
1011 mdp5_crtc_duplicate_state(struct drm_crtc *crtc)
1012 {
1013         struct mdp5_crtc_state *mdp5_cstate;
1014 
1015         if (WARN_ON(!crtc->state))
1016                 return NULL;
1017 
1018         mdp5_cstate = kmemdup(to_mdp5_crtc_state(crtc->state),
1019                               sizeof(*mdp5_cstate), GFP_KERNEL);
1020         if (!mdp5_cstate)
1021                 return NULL;
1022 
1023         __drm_atomic_helper_crtc_duplicate_state(crtc, &mdp5_cstate->base);
1024 
1025         return &mdp5_cstate->base;
1026 }
1027 
1028 static void mdp5_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state)
1029 {
1030         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
1031 
1032         __drm_atomic_helper_crtc_destroy_state(state);
1033 
1034         kfree(mdp5_cstate);
1035 }
1036 
1037 static void mdp5_crtc_reset(struct drm_crtc *crtc)
1038 {
1039         struct mdp5_crtc_state *mdp5_cstate =
1040                 kzalloc(sizeof(*mdp5_cstate), GFP_KERNEL);
1041 
1042         if (crtc->state)
1043                 mdp5_crtc_destroy_state(crtc, crtc->state);
1044 
1045         __drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base);
1046 
1047         drm_crtc_vblank_reset(crtc);
1048 }
1049 
1050 static const struct drm_crtc_funcs mdp5_crtc_funcs = {
1051         .set_config = drm_atomic_helper_set_config,
1052         .destroy = mdp5_crtc_destroy,
1053         .page_flip = drm_atomic_helper_page_flip,
1054         .reset = mdp5_crtc_reset,
1055         .atomic_duplicate_state = mdp5_crtc_duplicate_state,
1056         .atomic_destroy_state = mdp5_crtc_destroy_state,
1057         .cursor_set = mdp5_crtc_cursor_set,
1058         .cursor_move = mdp5_crtc_cursor_move,
1059         .atomic_print_state = mdp5_crtc_atomic_print_state,
1060 };
1061 
1062 static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
1063         .mode_set_nofb = mdp5_crtc_mode_set_nofb,
1064         .atomic_check = mdp5_crtc_atomic_check,
1065         .atomic_begin = mdp5_crtc_atomic_begin,
1066         .atomic_flush = mdp5_crtc_atomic_flush,
1067         .atomic_enable = mdp5_crtc_atomic_enable,
1068         .atomic_disable = mdp5_crtc_atomic_disable,
1069 };
1070 
1071 static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
1072 {
1073         struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
1074         struct drm_crtc *crtc = &mdp5_crtc->base;
1075         struct msm_drm_private *priv = crtc->dev->dev_private;
1076         unsigned pending;
1077 
1078         mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
1079 
1080         pending = atomic_xchg(&mdp5_crtc->pending, 0);
1081 
1082         if (pending & PENDING_FLIP) {
1083                 complete_flip(crtc, NULL);
1084         }
1085 
1086         if (pending & PENDING_CURSOR)
1087                 drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->wq);
1088 }
1089 
1090 static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
1091 {
1092         struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
1093 
1094         DBG("%s: error: %08x", mdp5_crtc->base.name, irqstatus);
1095 }
1096 
1097 static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus)
1098 {
1099         struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc,
1100                                                                 pp_done);
1101 
1102         complete(&mdp5_crtc->pp_completion);
1103 }
1104 
1105 static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
1106 {
1107         struct drm_device *dev = crtc->dev;
1108         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
1109         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1110         int ret;
1111 
1112         ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,
1113                                                 msecs_to_jiffies(50));
1114         if (ret == 0)
1115                 dev_warn_ratelimited(dev->dev, "pp done time out, lm=%d\n",
1116                                      mdp5_cstate->pipeline.mixer->lm);
1117 }
1118 
1119 static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
1120 {
1121         struct drm_device *dev = crtc->dev;
1122         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
1123         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1124         struct mdp5_ctl *ctl = mdp5_cstate->ctl;
1125         int ret;
1126 
1127         /* Should not call this function if crtc is disabled. */
1128         if (!ctl)
1129                 return;
1130 
1131         ret = drm_crtc_vblank_get(crtc);
1132         if (ret)
1133                 return;
1134 
1135         ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
1136                 ((mdp5_ctl_get_commit_status(ctl) &
1137                 mdp5_crtc->flushed_mask) == 0),
1138                 msecs_to_jiffies(50));
1139         if (ret <= 0)
1140                 dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp5_crtc->id);
1141 
1142         mdp5_crtc->flushed_mask = 0;
1143 
1144         drm_crtc_vblank_put(crtc);
1145 }
1146 
1147 uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
1148 {
1149         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
1150         return mdp5_crtc->vblank.irqmask;
1151 }
1152 
1153 void mdp5_crtc_set_pipeline(struct drm_crtc *crtc)
1154 {
1155         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1156         struct mdp5_kms *mdp5_kms = get_kms(crtc);
1157 
1158         /* should this be done elsewhere ? */
1159         mdp_irq_update(&mdp5_kms->base);
1160 
1161         mdp5_ctl_set_pipeline(mdp5_cstate->ctl, &mdp5_cstate->pipeline);
1162 }
1163 
1164 struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
1165 {
1166         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1167 
1168         return mdp5_cstate->ctl;
1169 }
1170 
1171 struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc)
1172 {
1173         struct mdp5_crtc_state *mdp5_cstate;
1174 
1175         if (WARN_ON(!crtc))
1176                 return ERR_PTR(-EINVAL);
1177 
1178         mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1179 
1180         return WARN_ON(!mdp5_cstate->pipeline.mixer) ?
1181                 ERR_PTR(-EINVAL) : mdp5_cstate->pipeline.mixer;
1182 }
1183 
1184 struct mdp5_pipeline *mdp5_crtc_get_pipeline(struct drm_crtc *crtc)
1185 {
1186         struct mdp5_crtc_state *mdp5_cstate;
1187 
1188         if (WARN_ON(!crtc))
1189                 return ERR_PTR(-EINVAL);
1190 
1191         mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1192 
1193         return &mdp5_cstate->pipeline;
1194 }
1195 
1196 void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc)
1197 {
1198         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1199 
1200         if (mdp5_cstate->cmd_mode)
1201                 mdp5_crtc_wait_for_pp_done(crtc);
1202         else
1203                 mdp5_crtc_wait_for_flush_done(crtc);
1204 }
1205 
1206 /* initialize crtc */
1207 struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
1208                                 struct drm_plane *plane,
1209                                 struct drm_plane *cursor_plane, int id)
1210 {
1211         struct drm_crtc *crtc = NULL;
1212         struct mdp5_crtc *mdp5_crtc;
1213 
1214         mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
1215         if (!mdp5_crtc)
1216                 return ERR_PTR(-ENOMEM);
1217 
1218         crtc = &mdp5_crtc->base;
1219 
1220         mdp5_crtc->id = id;
1221 
1222         spin_lock_init(&mdp5_crtc->lm_lock);
1223         spin_lock_init(&mdp5_crtc->cursor.lock);
1224         init_completion(&mdp5_crtc->pp_completion);
1225 
1226         mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
1227         mdp5_crtc->err.irq = mdp5_crtc_err_irq;
1228         mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq;
1229 
1230         mdp5_crtc->lm_cursor_enabled = cursor_plane ? false : true;
1231 
1232         drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
1233                                   &mdp5_crtc_funcs, NULL);
1234 
1235         drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
1236                         "unref cursor", unref_cursor_worker);
1237 
1238         drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
1239 
1240         return crtc;
1241 }

/* [<][>][^][v][top][bottom][index][help] */