Lines Matching refs:ctl

97 void ctl_write(struct mdp5_ctl *ctl, u32 reg, u32 data)  in ctl_write()  argument
99 struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm); in ctl_write()
101 (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */ in ctl_write()
106 u32 ctl_read(struct mdp5_ctl *ctl, u32 reg) in ctl_read() argument
108 struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm); in ctl_read()
110 (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */ in ctl_read()
149 static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_interface *intf) in set_ctl_op() argument
172 spin_lock_irqsave(&ctl->hw_lock, flags); in set_ctl_op()
173 ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), ctl_op); in set_ctl_op()
174 spin_unlock_irqrestore(&ctl->hw_lock, flags); in set_ctl_op()
177 int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, in mdp5_ctl_set_pipeline() argument
180 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; in mdp5_ctl_set_pipeline()
183 if (unlikely(WARN_ON(intf->num != ctl->pipeline.intf.num))) { in mdp5_ctl_set_pipeline()
186 ctl->id, ctl->pipeline.intf.num, intf->num); in mdp5_ctl_set_pipeline()
190 ctl->lm = lm; in mdp5_ctl_set_pipeline()
192 memcpy(&ctl->pipeline.intf, intf, sizeof(*intf)); in mdp5_ctl_set_pipeline()
194 ctl->pipeline.start_mask = mdp_ctl_flush_mask_lm(ctl->lm) | in mdp5_ctl_set_pipeline()
201 set_ctl_op(ctl, intf); in mdp5_ctl_set_pipeline()
206 static bool start_signal_needed(struct mdp5_ctl *ctl) in start_signal_needed() argument
208 struct op_mode *pipeline = &ctl->pipeline; in start_signal_needed()
230 static void send_start_signal(struct mdp5_ctl *ctl) in send_start_signal() argument
234 spin_lock_irqsave(&ctl->hw_lock, flags); in send_start_signal()
235 ctl_write(ctl, REG_MDP5_CTL_START(ctl->id), 1); in send_start_signal()
236 spin_unlock_irqrestore(&ctl->hw_lock, flags); in send_start_signal()
239 static void refill_start_mask(struct mdp5_ctl *ctl) in refill_start_mask() argument
241 struct op_mode *pipeline = &ctl->pipeline; in refill_start_mask()
242 struct mdp5_interface *intf = &ctl->pipeline.intf; in refill_start_mask()
244 pipeline->start_mask = mdp_ctl_flush_mask_lm(ctl->lm); in refill_start_mask()
262 int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled) in mdp5_ctl_set_encoder_state() argument
264 if (WARN_ON(!ctl)) in mdp5_ctl_set_encoder_state()
267 ctl->pipeline.encoder_enabled = enabled; in mdp5_ctl_set_encoder_state()
268 DBG("intf_%d: %s", ctl->pipeline.intf.num, enabled ? "on" : "off"); in mdp5_ctl_set_encoder_state()
270 if (start_signal_needed(ctl)) { in mdp5_ctl_set_encoder_state()
271 send_start_signal(ctl); in mdp5_ctl_set_encoder_state()
272 refill_start_mask(ctl); in mdp5_ctl_set_encoder_state()
283 int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable) in mdp5_ctl_set_cursor() argument
285 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; in mdp5_ctl_set_cursor()
288 int lm = ctl->lm; in mdp5_ctl_set_cursor()
292 ctl->id, lm); in mdp5_ctl_set_cursor()
296 spin_lock_irqsave(&ctl->hw_lock, flags); in mdp5_ctl_set_cursor()
298 blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm)); in mdp5_ctl_set_cursor()
305 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg); in mdp5_ctl_set_cursor()
306 ctl->cursor_on = enable; in mdp5_ctl_set_cursor()
308 spin_unlock_irqrestore(&ctl->hw_lock, flags); in mdp5_ctl_set_cursor()
310 ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id); in mdp5_ctl_set_cursor()
354 int mdp5_ctl_blend(struct mdp5_ctl *ctl, u8 *stage, u32 stage_cnt, in mdp5_ctl_blend() argument
373 spin_lock_irqsave(&ctl->hw_lock, flags); in mdp5_ctl_blend()
374 if (ctl->cursor_on) in mdp5_ctl_blend()
377 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, ctl->lm), blend_cfg); in mdp5_ctl_blend()
378 ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, ctl->lm), blend_ext_cfg); in mdp5_ctl_blend()
379 spin_unlock_irqrestore(&ctl->hw_lock, flags); in mdp5_ctl_blend()
381 ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(ctl->lm); in mdp5_ctl_blend()
383 DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", ctl->lm, in mdp5_ctl_blend()
440 static u32 fix_sw_flush(struct mdp5_ctl *ctl, u32 flush_mask) in fix_sw_flush() argument
442 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; in fix_sw_flush()
449 sw_mask |= mdp_ctl_flush_mask_lm(ctl->lm); in fix_sw_flush()
454 static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask, in fix_for_single_flush() argument
457 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; in fix_for_single_flush()
459 if (ctl->pair) { in fix_for_single_flush()
460 DBG("CTL %d FLUSH pending mask %x", ctl->id, *flush_mask); in fix_for_single_flush()
461 ctl->flush_pending = true; in fix_for_single_flush()
465 if (ctl->pair->flush_pending) { in fix_for_single_flush()
466 *flush_id = min_t(u32, ctl->id, ctl->pair->id); in fix_for_single_flush()
469 ctl->flush_pending = false; in fix_for_single_flush()
470 ctl->pair->flush_pending = false; in fix_for_single_flush()
495 u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask) in mdp5_ctl_commit() argument
497 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; in mdp5_ctl_commit()
498 struct op_mode *pipeline = &ctl->pipeline; in mdp5_ctl_commit()
500 u32 flush_id = ctl->id; in mdp5_ctl_commit()
506 pipeline->start_mask, ctl->pending_ctl_trigger); in mdp5_ctl_commit()
508 if (ctl->pending_ctl_trigger & flush_mask) { in mdp5_ctl_commit()
510 ctl->pending_ctl_trigger = 0; in mdp5_ctl_commit()
513 flush_mask |= fix_sw_flush(ctl, flush_mask); in mdp5_ctl_commit()
519 fix_for_single_flush(ctl, &flush_mask, &flush_id); in mdp5_ctl_commit()
522 spin_lock_irqsave(&ctl->hw_lock, flags); in mdp5_ctl_commit()
523 ctl_write(ctl, REG_MDP5_CTL_FLUSH(flush_id), flush_mask); in mdp5_ctl_commit()
524 spin_unlock_irqrestore(&ctl->hw_lock, flags); in mdp5_ctl_commit()
527 if (start_signal_needed(ctl)) { in mdp5_ctl_commit()
528 send_start_signal(ctl); in mdp5_ctl_commit()
529 refill_start_mask(ctl); in mdp5_ctl_commit()
535 u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl) in mdp5_ctl_get_commit_status() argument
537 return ctl_read(ctl, REG_MDP5_CTL_FLUSH(ctl->id)); in mdp5_ctl_get_commit_status()
540 int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl) in mdp5_ctl_get_ctl_id() argument
542 return WARN_ON(!ctl) ? -EINVAL : ctl->id; in mdp5_ctl_get_ctl_id()
590 struct mdp5_ctl *ctl = NULL; in mdp5_ctlm_request() local
615 ctl = &ctl_mgr->ctls[c]; in mdp5_ctlm_request()
616 ctl->pipeline.intf.num = intf_num; in mdp5_ctlm_request()
617 ctl->lm = -1; in mdp5_ctlm_request()
618 ctl->status |= CTL_STAT_BUSY; in mdp5_ctlm_request()
619 ctl->pending_ctl_trigger = 0; in mdp5_ctlm_request()
620 DBG("CTL %d allocated", ctl->id); in mdp5_ctlm_request()
624 return ctl; in mdp5_ctlm_request()
633 struct mdp5_ctl *ctl = &ctl_mgr->ctls[c]; in mdp5_ctlm_hw_reset() local
635 spin_lock_irqsave(&ctl->hw_lock, flags); in mdp5_ctlm_hw_reset()
636 ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), 0); in mdp5_ctlm_hw_reset()
637 spin_unlock_irqrestore(&ctl->hw_lock, flags); in mdp5_ctlm_hw_reset()
652 const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl; in mdp5_ctlm_init()
680 struct mdp5_ctl *ctl = &ctl_mgr->ctls[c]; in mdp5_ctlm_init() local
688 ctl->ctlm = ctl_mgr; in mdp5_ctlm_init()
689 ctl->id = c; in mdp5_ctlm_init()
690 ctl->reg_offset = ctl_cfg->base[c]; in mdp5_ctlm_init()
691 ctl->status = 0; in mdp5_ctlm_init()
692 spin_lock_init(&ctl->hw_lock); in mdp5_ctlm_init()