root/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. get_kms
  2. ctl_write
  3. ctl_read
  4. set_display_intf
  5. set_ctl_op
  6. mdp5_ctl_set_pipeline
  7. start_signal_needed
  8. send_start_signal
  9. mdp5_ctl_set_encoder_state
  10. mdp5_ctl_set_cursor
  11. mdp_ctl_blend_mask
  12. mdp_ctl_blend_ext_mask
  13. mdp5_ctl_reset_blend_regs
  14. mdp5_ctl_blend
  15. mdp_ctl_flush_mask_encoder
  16. mdp_ctl_flush_mask_cursor
  17. mdp_ctl_flush_mask_pipe
  18. mdp_ctl_flush_mask_lm
  19. fix_sw_flush
  20. fix_for_single_flush
  21. mdp5_ctl_commit
  22. mdp5_ctl_get_commit_status
  23. mdp5_ctl_get_ctl_id
  24. mdp5_ctl_pair
  25. mdp5_ctlm_request
  26. mdp5_ctlm_hw_reset
  27. mdp5_ctlm_destroy
  28. mdp5_ctlm_init

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
   4  */
   5 
   6 #include "mdp5_kms.h"
   7 #include "mdp5_ctl.h"
   8 
   9 /*
  10  * CTL - MDP Control Pool Manager
  11  *
  12  * Controls are shared between all display interfaces.
  13  *
  14  * They are intended to be used for data path configuration.
  15  * The top level register programming describes the complete data path for
  16  * a specific data path ID - REG_MDP5_CTL_*(<id>, ...)
  17  *
  18  * Hardware capabilities determine the number of concurrent data paths
  19  *
  20  * In certain use cases (high-resolution dual pipe), one single CTL can be
  21  * shared across multiple CRTCs.
  22  */
  23 
  24 #define CTL_STAT_BUSY           0x1
  25 #define CTL_STAT_BOOKED 0x2
  26 
  27 struct mdp5_ctl {
  28         struct mdp5_ctl_manager *ctlm;
  29 
  30         u32 id;
  31 
  32         /* CTL status bitmask */
  33         u32 status;
  34 
  35         bool encoder_enabled;
  36 
  37         /* pending flush_mask bits */
  38         u32 flush_mask;
  39 
  40         /* REG_MDP5_CTL_*(<id>) registers access info + lock: */
  41         spinlock_t hw_lock;
  42         u32 reg_offset;
  43 
  44         /* when do CTL registers need to be flushed? (mask of trigger bits) */
  45         u32 pending_ctl_trigger;
  46 
  47         bool cursor_on;
  48 
  49         /* True if the current CTL has FLUSH bits pending for single FLUSH. */
  50         bool flush_pending;
  51 
  52         struct mdp5_ctl *pair; /* Paired CTL to be flushed together */
  53 };
  54 
  55 struct mdp5_ctl_manager {
  56         struct drm_device *dev;
  57 
  58         /* number of CTL / Layer Mixers in this hw config: */
  59         u32 nlm;
  60         u32 nctl;
  61 
  62         /* to filter out non-present bits in the current hardware config */
  63         u32 flush_hw_mask;
  64 
  65         /* status for single FLUSH */
  66         bool single_flush_supported;
  67         u32 single_flush_pending_mask;
  68 
  69         /* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
  70         spinlock_t pool_lock;
  71         struct mdp5_ctl ctls[MAX_CTL];
  72 };
  73 
  74 static inline
  75 struct mdp5_kms *get_kms(struct mdp5_ctl_manager *ctl_mgr)
  76 {
  77         struct msm_drm_private *priv = ctl_mgr->dev->dev_private;
  78 
  79         return to_mdp5_kms(to_mdp_kms(priv->kms));
  80 }
  81 
  82 static inline
  83 void ctl_write(struct mdp5_ctl *ctl, u32 reg, u32 data)
  84 {
  85         struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
  86 
  87         (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
  88         mdp5_write(mdp5_kms, reg, data);
  89 }
  90 
  91 static inline
  92 u32 ctl_read(struct mdp5_ctl *ctl, u32 reg)
  93 {
  94         struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
  95 
  96         (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
  97         return mdp5_read(mdp5_kms, reg);
  98 }
  99 
 100 static void set_display_intf(struct mdp5_kms *mdp5_kms,
 101                 struct mdp5_interface *intf)
 102 {
 103         unsigned long flags;
 104         u32 intf_sel;
 105 
 106         spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
 107         intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
 108 
 109         switch (intf->num) {
 110         case 0:
 111                 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK;
 112                 intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf->type);
 113                 break;
 114         case 1:
 115                 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK;
 116                 intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf->type);
 117                 break;
 118         case 2:
 119                 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK;
 120                 intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf->type);
 121                 break;
 122         case 3:
 123                 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK;
 124                 intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf->type);
 125                 break;
 126         default:
 127                 BUG();
 128                 break;
 129         }
 130 
 131         mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
 132         spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
 133 }
 134 
 135 static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
 136 {
 137         unsigned long flags;
 138         struct mdp5_interface *intf = pipeline->intf;
 139         u32 ctl_op = 0;
 140 
 141         if (!mdp5_cfg_intf_is_virtual(intf->type))
 142                 ctl_op |= MDP5_CTL_OP_INTF_NUM(INTF0 + intf->num);
 143 
 144         switch (intf->type) {
 145         case INTF_DSI:
 146                 if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
 147                         ctl_op |= MDP5_CTL_OP_CMD_MODE;
 148                 break;
 149 
 150         case INTF_WB:
 151                 if (intf->mode == MDP5_INTF_WB_MODE_LINE)
 152                         ctl_op |= MDP5_CTL_OP_MODE(MODE_WB_2_LINE);
 153                 break;
 154 
 155         default:
 156                 break;
 157         }
 158 
 159         if (pipeline->r_mixer)
 160                 ctl_op |= MDP5_CTL_OP_PACK_3D_ENABLE |
 161                           MDP5_CTL_OP_PACK_3D(1);
 162 
 163         spin_lock_irqsave(&ctl->hw_lock, flags);
 164         ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), ctl_op);
 165         spin_unlock_irqrestore(&ctl->hw_lock, flags);
 166 }
 167 
 168 int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
 169 {
 170         struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
 171         struct mdp5_interface *intf = pipeline->intf;
 172 
 173         /* Virtual interfaces need not set a display intf (e.g.: Writeback) */
 174         if (!mdp5_cfg_intf_is_virtual(intf->type))
 175                 set_display_intf(mdp5_kms, intf);
 176 
 177         set_ctl_op(ctl, pipeline);
 178 
 179         return 0;
 180 }
 181 
 182 static bool start_signal_needed(struct mdp5_ctl *ctl,
 183                                 struct mdp5_pipeline *pipeline)
 184 {
 185         struct mdp5_interface *intf = pipeline->intf;
 186 
 187         if (!ctl->encoder_enabled)
 188                 return false;
 189 
 190         switch (intf->type) {
 191         case INTF_WB:
 192                 return true;
 193         case INTF_DSI:
 194                 return intf->mode == MDP5_INTF_DSI_MODE_COMMAND;
 195         default:
 196                 return false;
 197         }
 198 }
 199 
 200 /*
 201  * send_start_signal() - Overlay Processor Start Signal
 202  *
 203  * For a given control operation (display pipeline), a START signal needs to be
 204  * executed in order to kick off operation and activate all layers.
 205  * e.g.: DSI command mode, Writeback
 206  */
 207 static void send_start_signal(struct mdp5_ctl *ctl)
 208 {
 209         unsigned long flags;
 210 
 211         spin_lock_irqsave(&ctl->hw_lock, flags);
 212         ctl_write(ctl, REG_MDP5_CTL_START(ctl->id), 1);
 213         spin_unlock_irqrestore(&ctl->hw_lock, flags);
 214 }
 215 
 216 /**
 217  * mdp5_ctl_set_encoder_state() - set the encoder state
 218  *
 219  * @enable: true, when encoder is ready for data streaming; false, otherwise.
 220  *
 221  * Note:
 222  * This encoder state is needed to trigger START signal (data path kickoff).
 223  */
 224 int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl,
 225                                struct mdp5_pipeline *pipeline,
 226                                bool enabled)
 227 {
 228         struct mdp5_interface *intf = pipeline->intf;
 229 
 230         if (WARN_ON(!ctl))
 231                 return -EINVAL;
 232 
 233         ctl->encoder_enabled = enabled;
 234         DBG("intf_%d: %s", intf->num, enabled ? "on" : "off");
 235 
 236         if (start_signal_needed(ctl, pipeline)) {
 237                 send_start_signal(ctl);
 238         }
 239 
 240         return 0;
 241 }
 242 
 243 /*
 244  * Note:
 245  * CTL registers need to be flushed after calling this function
 246  * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
 247  */
 248 int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
 249                         int cursor_id, bool enable)
 250 {
 251         struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
 252         unsigned long flags;
 253         u32 blend_cfg;
 254         struct mdp5_hw_mixer *mixer = pipeline->mixer;
 255 
 256         if (WARN_ON(!mixer)) {
 257                 DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTL %d cannot find LM",
 258                         ctl->id);
 259                 return -EINVAL;
 260         }
 261 
 262         if (pipeline->r_mixer) {
 263                 DRM_DEV_ERROR(ctl_mgr->dev->dev, "unsupported configuration");
 264                 return -EINVAL;
 265         }
 266 
 267         spin_lock_irqsave(&ctl->hw_lock, flags);
 268 
 269         blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm));
 270 
 271         if (enable)
 272                 blend_cfg |=  MDP5_CTL_LAYER_REG_CURSOR_OUT;
 273         else
 274                 blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
 275 
 276         ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg);
 277         ctl->cursor_on = enable;
 278 
 279         spin_unlock_irqrestore(&ctl->hw_lock, flags);
 280 
 281         ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id);
 282 
 283         return 0;
 284 }
 285 
 286 static u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
 287                 enum mdp_mixer_stage_id stage)
 288 {
 289         switch (pipe) {
 290         case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage);
 291         case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage);
 292         case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage);
 293         case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage);
 294         case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage);
 295         case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage);
 296         case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage);
 297         case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage);
 298         case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage);
 299         case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage);
 300         case SSPP_CURSOR0:
 301         case SSPP_CURSOR1:
 302         default:        return 0;
 303         }
 304 }
 305 
 306 static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe,
 307                 enum mdp_mixer_stage_id stage)
 308 {
 309         if (stage < STAGE6 && (pipe != SSPP_CURSOR0 && pipe != SSPP_CURSOR1))
 310                 return 0;
 311 
 312         switch (pipe) {
 313         case SSPP_VIG0: return MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3;
 314         case SSPP_VIG1: return MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3;
 315         case SSPP_VIG2: return MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3;
 316         case SSPP_RGB0: return MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3;
 317         case SSPP_RGB1: return MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3;
 318         case SSPP_RGB2: return MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3;
 319         case SSPP_DMA0: return MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3;
 320         case SSPP_DMA1: return MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3;
 321         case SSPP_VIG3: return MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3;
 322         case SSPP_RGB3: return MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3;
 323         case SSPP_CURSOR0: return MDP5_CTL_LAYER_EXT_REG_CURSOR0(stage);
 324         case SSPP_CURSOR1: return MDP5_CTL_LAYER_EXT_REG_CURSOR1(stage);
 325         default:        return 0;
 326         }
 327 }
 328 
 329 static void mdp5_ctl_reset_blend_regs(struct mdp5_ctl *ctl)
 330 {
 331         unsigned long flags;
 332         struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
 333         int i;
 334 
 335         spin_lock_irqsave(&ctl->hw_lock, flags);
 336 
 337         for (i = 0; i < ctl_mgr->nlm; i++) {
 338                 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, i), 0x0);
 339                 ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, i), 0x0);
 340         }
 341 
 342         spin_unlock_irqrestore(&ctl->hw_lock, flags);
 343 }
 344 
 345 #define PIPE_LEFT       0
 346 #define PIPE_RIGHT      1
 347 int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
 348                    enum mdp5_pipe stage[][MAX_PIPE_STAGE],
 349                    enum mdp5_pipe r_stage[][MAX_PIPE_STAGE],
 350                    u32 stage_cnt, u32 ctl_blend_op_flags)
 351 {
 352         struct mdp5_hw_mixer *mixer = pipeline->mixer;
 353         struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
 354         unsigned long flags;
 355         u32 blend_cfg = 0, blend_ext_cfg = 0;
 356         u32 r_blend_cfg = 0, r_blend_ext_cfg = 0;
 357         int i, start_stage;
 358 
 359         mdp5_ctl_reset_blend_regs(ctl);
 360 
 361         if (ctl_blend_op_flags & MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT) {
 362                 start_stage = STAGE0;
 363                 blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
 364                 if (r_mixer)
 365                         r_blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
 366         } else {
 367                 start_stage = STAGE_BASE;
 368         }
 369 
 370         for (i = start_stage; stage_cnt && i <= STAGE_MAX; i++) {
 371                 blend_cfg |=
 372                         mdp_ctl_blend_mask(stage[i][PIPE_LEFT], i) |
 373                         mdp_ctl_blend_mask(stage[i][PIPE_RIGHT], i);
 374                 blend_ext_cfg |=
 375                         mdp_ctl_blend_ext_mask(stage[i][PIPE_LEFT], i) |
 376                         mdp_ctl_blend_ext_mask(stage[i][PIPE_RIGHT], i);
 377                 if (r_mixer) {
 378                         r_blend_cfg |=
 379                                 mdp_ctl_blend_mask(r_stage[i][PIPE_LEFT], i) |
 380                                 mdp_ctl_blend_mask(r_stage[i][PIPE_RIGHT], i);
 381                         r_blend_ext_cfg |=
 382                              mdp_ctl_blend_ext_mask(r_stage[i][PIPE_LEFT], i) |
 383                              mdp_ctl_blend_ext_mask(r_stage[i][PIPE_RIGHT], i);
 384                 }
 385         }
 386 
 387         spin_lock_irqsave(&ctl->hw_lock, flags);
 388         if (ctl->cursor_on)
 389                 blend_cfg |=  MDP5_CTL_LAYER_REG_CURSOR_OUT;
 390 
 391         ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg);
 392         ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, mixer->lm),
 393                   blend_ext_cfg);
 394         if (r_mixer) {
 395                 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, r_mixer->lm),
 396                           r_blend_cfg);
 397                 ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, r_mixer->lm),
 398                           r_blend_ext_cfg);
 399         }
 400         spin_unlock_irqrestore(&ctl->hw_lock, flags);
 401 
 402         ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(mixer->lm);
 403         if (r_mixer)
 404                 ctl->pending_ctl_trigger |= mdp_ctl_flush_mask_lm(r_mixer->lm);
 405 
 406         DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", mixer->lm,
 407                 blend_cfg, blend_ext_cfg);
 408         if (r_mixer)
 409                 DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x",
 410                     r_mixer->lm, r_blend_cfg, r_blend_ext_cfg);
 411 
 412         return 0;
 413 }
 414 
 415 u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf)
 416 {
 417         if (intf->type == INTF_WB)
 418                 return MDP5_CTL_FLUSH_WB;
 419 
 420         switch (intf->num) {
 421         case 0: return MDP5_CTL_FLUSH_TIMING_0;
 422         case 1: return MDP5_CTL_FLUSH_TIMING_1;
 423         case 2: return MDP5_CTL_FLUSH_TIMING_2;
 424         case 3: return MDP5_CTL_FLUSH_TIMING_3;
 425         default: return 0;
 426         }
 427 }
 428 
 429 u32 mdp_ctl_flush_mask_cursor(int cursor_id)
 430 {
 431         switch (cursor_id) {
 432         case 0: return MDP5_CTL_FLUSH_CURSOR_0;
 433         case 1: return MDP5_CTL_FLUSH_CURSOR_1;
 434         default: return 0;
 435         }
 436 }
 437 
 438 u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)
 439 {
 440         switch (pipe) {
 441         case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
 442         case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
 443         case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
 444         case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
 445         case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
 446         case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
 447         case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
 448         case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
 449         case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
 450         case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
 451         case SSPP_CURSOR0: return MDP5_CTL_FLUSH_CURSOR_0;
 452         case SSPP_CURSOR1: return MDP5_CTL_FLUSH_CURSOR_1;
 453         default:        return 0;
 454         }
 455 }
 456 
 457 u32 mdp_ctl_flush_mask_lm(int lm)
 458 {
 459         switch (lm) {
 460         case 0:  return MDP5_CTL_FLUSH_LM0;
 461         case 1:  return MDP5_CTL_FLUSH_LM1;
 462         case 2:  return MDP5_CTL_FLUSH_LM2;
 463         case 3:  return MDP5_CTL_FLUSH_LM3;
 464         case 4:  return MDP5_CTL_FLUSH_LM4;
 465         case 5:  return MDP5_CTL_FLUSH_LM5;
 466         default: return 0;
 467         }
 468 }
 469 
 470 static u32 fix_sw_flush(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
 471                         u32 flush_mask)
 472 {
 473         struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
 474         u32 sw_mask = 0;
 475 #define BIT_NEEDS_SW_FIX(bit) \
 476         (!(ctl_mgr->flush_hw_mask & bit) && (flush_mask & bit))
 477 
 478         /* for some targets, cursor bit is the same as LM bit */
 479         if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0))
 480                 sw_mask |= mdp_ctl_flush_mask_lm(pipeline->mixer->lm);
 481 
 482         return sw_mask;
 483 }
 484 
 485 static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask,
 486                 u32 *flush_id)
 487 {
 488         struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
 489 
 490         if (ctl->pair) {
 491                 DBG("CTL %d FLUSH pending mask %x", ctl->id, *flush_mask);
 492                 ctl->flush_pending = true;
 493                 ctl_mgr->single_flush_pending_mask |= (*flush_mask);
 494                 *flush_mask = 0;
 495 
 496                 if (ctl->pair->flush_pending) {
 497                         *flush_id = min_t(u32, ctl->id, ctl->pair->id);
 498                         *flush_mask = ctl_mgr->single_flush_pending_mask;
 499 
 500                         ctl->flush_pending = false;
 501                         ctl->pair->flush_pending = false;
 502                         ctl_mgr->single_flush_pending_mask = 0;
 503 
 504                         DBG("Single FLUSH mask %x,ID %d", *flush_mask,
 505                                 *flush_id);
 506                 }
 507         }
 508 }
 509 
 510 /**
 511  * mdp5_ctl_commit() - Register Flush
 512  *
 513  * The flush register is used to indicate several registers are all
 514  * programmed, and are safe to update to the back copy of the double
 515  * buffered registers.
 516  *
 517  * Some registers FLUSH bits are shared when the hardware does not have
 518  * dedicated bits for them; handling these is the job of fix_sw_flush().
 519  *
 520  * CTL registers need to be flushed in some circumstances; if that is the
 521  * case, some trigger bits will be present in both flush mask and
 522  * ctl->pending_ctl_trigger.
 523  *
 524  * Return H/W flushed bit mask.
 525  */
 526 u32 mdp5_ctl_commit(struct mdp5_ctl *ctl,
 527                     struct mdp5_pipeline *pipeline,
 528                     u32 flush_mask, bool start)
 529 {
 530         struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
 531         unsigned long flags;
 532         u32 flush_id = ctl->id;
 533         u32 curr_ctl_flush_mask;
 534 
 535         VERB("flush_mask=%x, trigger=%x", flush_mask, ctl->pending_ctl_trigger);
 536 
 537         if (ctl->pending_ctl_trigger & flush_mask) {
 538                 flush_mask |= MDP5_CTL_FLUSH_CTL;
 539                 ctl->pending_ctl_trigger = 0;
 540         }
 541 
 542         flush_mask |= fix_sw_flush(ctl, pipeline, flush_mask);
 543 
 544         flush_mask &= ctl_mgr->flush_hw_mask;
 545 
 546         curr_ctl_flush_mask = flush_mask;
 547 
 548         fix_for_single_flush(ctl, &flush_mask, &flush_id);
 549 
 550         if (!start) {
 551                 ctl->flush_mask |= flush_mask;
 552                 return curr_ctl_flush_mask;
 553         } else {
 554                 flush_mask |= ctl->flush_mask;
 555                 ctl->flush_mask = 0;
 556         }
 557 
 558         if (flush_mask) {
 559                 spin_lock_irqsave(&ctl->hw_lock, flags);
 560                 ctl_write(ctl, REG_MDP5_CTL_FLUSH(flush_id), flush_mask);
 561                 spin_unlock_irqrestore(&ctl->hw_lock, flags);
 562         }
 563 
 564         if (start_signal_needed(ctl, pipeline)) {
 565                 send_start_signal(ctl);
 566         }
 567 
 568         return curr_ctl_flush_mask;
 569 }
 570 
 571 u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl)
 572 {
 573         return ctl_read(ctl, REG_MDP5_CTL_FLUSH(ctl->id));
 574 }
 575 
 576 int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl)
 577 {
 578         return WARN_ON(!ctl) ? -EINVAL : ctl->id;
 579 }
 580 
 581 /*
 582  * mdp5_ctl_pair() - Associate 2 booked CTLs for single FLUSH
 583  */
 584 int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable)
 585 {
 586         struct mdp5_ctl_manager *ctl_mgr = ctlx->ctlm;
 587         struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
 588 
 589         /* do nothing silently if hw doesn't support */
 590         if (!ctl_mgr->single_flush_supported)
 591                 return 0;
 592 
 593         if (!enable) {
 594                 ctlx->pair = NULL;
 595                 ctly->pair = NULL;
 596                 mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, 0);
 597                 return 0;
 598         } else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) {
 599                 DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTLs already paired\n");
 600                 return -EINVAL;
 601         } else if (!(ctlx->status & ctly->status & CTL_STAT_BOOKED)) {
 602                 DRM_DEV_ERROR(ctl_mgr->dev->dev, "Only pair booked CTLs\n");
 603                 return -EINVAL;
 604         }
 605 
 606         ctlx->pair = ctly;
 607         ctly->pair = ctlx;
 608 
 609         mdp5_write(mdp5_kms, REG_MDP5_SPARE_0,
 610                    MDP5_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
 611 
 612         return 0;
 613 }
 614 
 615 /*
 616  * mdp5_ctl_request() - CTL allocation
 617  *
 618  * Try to return booked CTL for @intf_num is 1 or 2, unbooked for other INTFs.
 619  * If no CTL is available in preferred category, allocate from the other one.
 620  *
 621  * @return fail if no CTL is available.
 622  */
 623 struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
 624                 int intf_num)
 625 {
 626         struct mdp5_ctl *ctl = NULL;
 627         const u32 checkm = CTL_STAT_BUSY | CTL_STAT_BOOKED;
 628         u32 match = ((intf_num == 1) || (intf_num == 2)) ? CTL_STAT_BOOKED : 0;
 629         unsigned long flags;
 630         int c;
 631 
 632         spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
 633 
 634         /* search the preferred */
 635         for (c = 0; c < ctl_mgr->nctl; c++)
 636                 if ((ctl_mgr->ctls[c].status & checkm) == match)
 637                         goto found;
 638 
 639         dev_warn(ctl_mgr->dev->dev,
 640                 "fall back to the other CTL category for INTF %d!\n", intf_num);
 641 
 642         match ^= CTL_STAT_BOOKED;
 643         for (c = 0; c < ctl_mgr->nctl; c++)
 644                 if ((ctl_mgr->ctls[c].status & checkm) == match)
 645                         goto found;
 646 
 647         DRM_DEV_ERROR(ctl_mgr->dev->dev, "No more CTL available!");
 648         goto unlock;
 649 
 650 found:
 651         ctl = &ctl_mgr->ctls[c];
 652         ctl->status |= CTL_STAT_BUSY;
 653         ctl->pending_ctl_trigger = 0;
 654         DBG("CTL %d allocated", ctl->id);
 655 
 656 unlock:
 657         spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
 658         return ctl;
 659 }
 660 
 661 void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctl_mgr)
 662 {
 663         unsigned long flags;
 664         int c;
 665 
 666         for (c = 0; c < ctl_mgr->nctl; c++) {
 667                 struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
 668 
 669                 spin_lock_irqsave(&ctl->hw_lock, flags);
 670                 ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), 0);
 671                 spin_unlock_irqrestore(&ctl->hw_lock, flags);
 672         }
 673 }
 674 
 675 void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr)
 676 {
 677         kfree(ctl_mgr);
 678 }
 679 
 680 struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
 681                 void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd)
 682 {
 683         struct mdp5_ctl_manager *ctl_mgr;
 684         const struct mdp5_cfg_hw *hw_cfg = mdp5_cfg_get_hw_config(cfg_hnd);
 685         int rev = mdp5_cfg_get_hw_rev(cfg_hnd);
 686         unsigned dsi_cnt = 0;
 687         const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl;
 688         unsigned long flags;
 689         int c, ret;
 690 
 691         ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL);
 692         if (!ctl_mgr) {
 693                 DRM_DEV_ERROR(dev->dev, "failed to allocate CTL manager\n");
 694                 ret = -ENOMEM;
 695                 goto fail;
 696         }
 697 
 698         if (WARN_ON(ctl_cfg->count > MAX_CTL)) {
 699                 DRM_DEV_ERROR(dev->dev, "Increase static pool size to at least %d\n",
 700                                 ctl_cfg->count);
 701                 ret = -ENOSPC;
 702                 goto fail;
 703         }
 704 
 705         /* initialize the CTL manager: */
 706         ctl_mgr->dev = dev;
 707         ctl_mgr->nlm = hw_cfg->lm.count;
 708         ctl_mgr->nctl = ctl_cfg->count;
 709         ctl_mgr->flush_hw_mask = ctl_cfg->flush_hw_mask;
 710         spin_lock_init(&ctl_mgr->pool_lock);
 711 
 712         /* initialize each CTL of the pool: */
 713         spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
 714         for (c = 0; c < ctl_mgr->nctl; c++) {
 715                 struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
 716 
 717                 if (WARN_ON(!ctl_cfg->base[c])) {
 718                         DRM_DEV_ERROR(dev->dev, "CTL_%d: base is null!\n", c);
 719                         ret = -EINVAL;
 720                         spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
 721                         goto fail;
 722                 }
 723                 ctl->ctlm = ctl_mgr;
 724                 ctl->id = c;
 725                 ctl->reg_offset = ctl_cfg->base[c];
 726                 ctl->status = 0;
 727                 spin_lock_init(&ctl->hw_lock);
 728         }
 729 
 730         /*
 731          * In Dual DSI case, CTL0 and CTL1 are always assigned to two DSI
 732          * interfaces to support single FLUSH feature (Flush CTL0 and CTL1 when
 733          * only write into CTL0's FLUSH register) to keep two DSI pipes in sync.
 734          * Single FLUSH is supported from hw rev v3.0.
 735          */
 736         for (c = 0; c < ARRAY_SIZE(hw_cfg->intf.connect); c++)
 737                 if (hw_cfg->intf.connect[c] == INTF_DSI)
 738                         dsi_cnt++;
 739         if ((rev >= 3) && (dsi_cnt > 1)) {
 740                 ctl_mgr->single_flush_supported = true;
 741                 /* Reserve CTL0/1 for INTF1/2 */
 742                 ctl_mgr->ctls[0].status |= CTL_STAT_BOOKED;
 743                 ctl_mgr->ctls[1].status |= CTL_STAT_BOOKED;
 744         }
 745         spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
 746         DBG("Pool of %d CTLs created.", ctl_mgr->nctl);
 747 
 748         return ctl_mgr;
 749 
 750 fail:
 751         if (ctl_mgr)
 752                 mdp5_ctlm_destroy(ctl_mgr);
 753 
 754         return ERR_PTR(ret);
 755 }

/* [<][>][^][v][top][bottom][index][help] */