ctl_mgr 75 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c struct mdp5_kms *get_kms(struct mdp5_ctl_manager *ctl_mgr) ctl_mgr 77 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c struct msm_drm_private *priv = ctl_mgr->dev->dev_private; ctl_mgr 251 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; ctl_mgr 257 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTL %d cannot find LM", ctl_mgr 263 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c DRM_DEV_ERROR(ctl_mgr->dev->dev, "unsupported configuration"); ctl_mgr 332 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; ctl_mgr 337 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c for (i = 0; i < ctl_mgr->nlm; i++) { ctl_mgr 473 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; ctl_mgr 476 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c (!(ctl_mgr->flush_hw_mask & bit) && (flush_mask & bit)) ctl_mgr 488 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; ctl_mgr 493 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c ctl_mgr->single_flush_pending_mask |= (*flush_mask); ctl_mgr 498 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c *flush_mask = ctl_mgr->single_flush_pending_mask; ctl_mgr 502 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c ctl_mgr->single_flush_pending_mask = 0; ctl_mgr 530 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; ctl_mgr 544 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c flush_mask &= ctl_mgr->flush_hw_mask; ctl_mgr 586 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c struct mdp5_ctl_manager *ctl_mgr = ctlx->ctlm; ctl_mgr 587 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr); ctl_mgr 590 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c if (!ctl_mgr->single_flush_supported) ctl_mgr 599 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTLs already paired\n"); ctl_mgr 602 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c DRM_DEV_ERROR(ctl_mgr->dev->dev, "Only pair booked CTLs\n"); ctl_mgr 623 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr, ctl_mgr 632 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c spin_lock_irqsave(&ctl_mgr->pool_lock, flags); ctl_mgr 635 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c for (c = 0; c < ctl_mgr->nctl; c++) ctl_mgr 636 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c if ((ctl_mgr->ctls[c].status & checkm) == match) ctl_mgr 639 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c dev_warn(ctl_mgr->dev->dev, ctl_mgr 643 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c for (c = 0; c < ctl_mgr->nctl; c++) ctl_mgr 644 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c if ((ctl_mgr->ctls[c].status & checkm) == match) ctl_mgr 647 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c DRM_DEV_ERROR(ctl_mgr->dev->dev, "No more CTL available!"); ctl_mgr 651 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c ctl = &ctl_mgr->ctls[c]; ctl_mgr 657 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags); ctl_mgr 661 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctl_mgr) ctl_mgr 666 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c for (c = 0; c < ctl_mgr->nctl; c++) { ctl_mgr 667 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c struct mdp5_ctl *ctl = &ctl_mgr->ctls[c]; ctl_mgr 675 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr) ctl_mgr 677 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c kfree(ctl_mgr); ctl_mgr 683 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c struct mdp5_ctl_manager *ctl_mgr; ctl_mgr 691 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL); ctl_mgr 692 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c if (!ctl_mgr) { ctl_mgr 706 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c ctl_mgr->dev = dev; ctl_mgr 707 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c ctl_mgr->nlm = hw_cfg->lm.count; ctl_mgr 708 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c ctl_mgr->nctl = ctl_cfg->count; ctl_mgr 709 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c ctl_mgr->flush_hw_mask = ctl_cfg->flush_hw_mask; ctl_mgr 710 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c spin_lock_init(&ctl_mgr->pool_lock); ctl_mgr 713 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c spin_lock_irqsave(&ctl_mgr->pool_lock, flags); ctl_mgr 714 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c for (c = 0; c < ctl_mgr->nctl; c++) { ctl_mgr 715 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c struct mdp5_ctl *ctl = &ctl_mgr->ctls[c]; ctl_mgr 720 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags); ctl_mgr 723 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c ctl->ctlm = ctl_mgr; ctl_mgr 740 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c ctl_mgr->single_flush_supported = true; ctl_mgr 742 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c ctl_mgr->ctls[0].status |= CTL_STAT_BOOKED; ctl_mgr 743 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c ctl_mgr->ctls[1].status |= CTL_STAT_BOOKED; ctl_mgr 745 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags); ctl_mgr 746 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c DBG("Pool of %d CTLs created.", ctl_mgr->nctl); ctl_mgr 748 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c return ctl_mgr; ctl_mgr 751 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c if (ctl_mgr) ctl_mgr 752 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c mdp5_ctlm_destroy(ctl_mgr);