1/*
2 * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11 * GNU General Public License for more details.
12 */
13
14#include "mdp5_kms.h"
15#include "mdp5_ctl.h"
16
17/*
18 * CTL - MDP Control Pool Manager
19 *
20 * Controls are shared between all CRTCs.
21 *
22 * They are intended to be used for data path configuration.
23 * The top level register programming describes the complete data path for
24 * a specific data path ID - REG_MDP5_CTL_*(<id>, ...)
25 *
26 * Hardware capabilities determine the number of concurrent data paths
27 *
28 * In certain use cases (high-resolution dual pipe), one single CTL can be
29 * shared across multiple CRTCs.
30 *
31 * Because the number of CTLs can be less than the number of CRTCs,
32 * CTLs are dynamically allocated from a pool of CTLs, only once a CRTC is
33 * requested by the client (in mdp5_crtc_mode_set()).
34 */
35
36struct op_mode {
37	struct mdp5_interface intf;
38
39	bool encoder_enabled;
40	uint32_t start_mask;
41};
42
43struct mdp5_ctl {
44	struct mdp5_ctl_manager *ctlm;
45
46	u32 id;
47	int lm;
48
49	/* whether this CTL has been allocated or not: */
50	bool busy;
51
52	/* Operation Mode Configuration for the Pipeline */
53	struct op_mode pipeline;
54
55	/* REG_MDP5_CTL_*(<id>) registers access info + lock: */
56	spinlock_t hw_lock;
57	u32 reg_offset;
58
59	/* when do CTL registers need to be flushed? (mask of trigger bits) */
60	u32 pending_ctl_trigger;
61
62	bool cursor_on;
63
64	struct drm_crtc *crtc;
65};
66
67struct mdp5_ctl_manager {
68	struct drm_device *dev;
69
70	/* number of CTL / Layer Mixers in this hw config: */
71	u32 nlm;
72	u32 nctl;
73
74	/* to filter out non-present bits in the current hardware config */
75	u32 flush_hw_mask;
76
77	/* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
78	spinlock_t pool_lock;
79	struct mdp5_ctl ctls[MAX_CTL];
80};
81
82static inline
83struct mdp5_kms *get_kms(struct mdp5_ctl_manager *ctl_mgr)
84{
85	struct msm_drm_private *priv = ctl_mgr->dev->dev_private;
86
87	return to_mdp5_kms(to_mdp_kms(priv->kms));
88}
89
90static inline
91void ctl_write(struct mdp5_ctl *ctl, u32 reg, u32 data)
92{
93	struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
94
95	(void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
96	mdp5_write(mdp5_kms, reg, data);
97}
98
99static inline
100u32 ctl_read(struct mdp5_ctl *ctl, u32 reg)
101{
102	struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
103
104	(void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
105	return mdp5_read(mdp5_kms, reg);
106}
107
108static void set_display_intf(struct mdp5_kms *mdp5_kms,
109		struct mdp5_interface *intf)
110{
111	unsigned long flags;
112	u32 intf_sel;
113
114	spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
115	intf_sel = mdp5_read(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0));
116
117	switch (intf->num) {
118	case 0:
119		intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF0__MASK;
120		intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF0(intf->type);
121		break;
122	case 1:
123		intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF1__MASK;
124		intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF1(intf->type);
125		break;
126	case 2:
127		intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF2__MASK;
128		intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF2(intf->type);
129		break;
130	case 3:
131		intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF3__MASK;
132		intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF3(intf->type);
133		break;
134	default:
135		BUG();
136		break;
137	}
138
139	mdp5_write(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0), intf_sel);
140	spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
141}
142
143static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
144{
145	unsigned long flags;
146	u32 ctl_op = 0;
147
148	if (!mdp5_cfg_intf_is_virtual(intf->type))
149		ctl_op |= MDP5_CTL_OP_INTF_NUM(INTF0 + intf->num);
150
151	switch (intf->type) {
152	case INTF_DSI:
153		if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
154			ctl_op |= MDP5_CTL_OP_CMD_MODE;
155		break;
156
157	case INTF_WB:
158		if (intf->mode == MDP5_INTF_WB_MODE_LINE)
159			ctl_op |= MDP5_CTL_OP_MODE(MODE_WB_2_LINE);
160		break;
161
162	default:
163		break;
164	}
165
166	spin_lock_irqsave(&ctl->hw_lock, flags);
167	ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), ctl_op);
168	spin_unlock_irqrestore(&ctl->hw_lock, flags);
169}
170
171int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
172{
173	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
174	struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
175
176	memcpy(&ctl->pipeline.intf, intf, sizeof(*intf));
177
178	ctl->pipeline.start_mask = mdp_ctl_flush_mask_lm(ctl->lm) |
179				   mdp_ctl_flush_mask_encoder(intf);
180
181	/* Virtual interfaces need not set a display intf (e.g.: Writeback) */
182	if (!mdp5_cfg_intf_is_virtual(intf->type))
183		set_display_intf(mdp5_kms, intf);
184
185	set_ctl_op(ctl, intf);
186
187	return 0;
188}
189
190static bool start_signal_needed(struct mdp5_ctl *ctl)
191{
192	struct op_mode *pipeline = &ctl->pipeline;
193
194	if (!pipeline->encoder_enabled || pipeline->start_mask != 0)
195		return false;
196
197	switch (pipeline->intf.type) {
198	case INTF_WB:
199		return true;
200	case INTF_DSI:
201		return pipeline->intf.mode == MDP5_INTF_DSI_MODE_COMMAND;
202	default:
203		return false;
204	}
205}
206
207/*
208 * send_start_signal() - Overlay Processor Start Signal
209 *
210 * For a given control operation (display pipeline), a START signal needs to be
211 * executed in order to kick off operation and activate all layers.
212 * e.g.: DSI command mode, Writeback
213 */
214static void send_start_signal(struct mdp5_ctl *ctl)
215{
216	unsigned long flags;
217
218	spin_lock_irqsave(&ctl->hw_lock, flags);
219	ctl_write(ctl, REG_MDP5_CTL_START(ctl->id), 1);
220	spin_unlock_irqrestore(&ctl->hw_lock, flags);
221}
222
223static void refill_start_mask(struct mdp5_ctl *ctl)
224{
225	struct op_mode *pipeline = &ctl->pipeline;
226	struct mdp5_interface *intf = &ctl->pipeline.intf;
227
228	pipeline->start_mask = mdp_ctl_flush_mask_lm(ctl->lm);
229
230	/*
231	 * Writeback encoder needs to program & flush
232	 * address registers for each page flip..
233	 */
234	if (intf->type == INTF_WB)
235		pipeline->start_mask |= mdp_ctl_flush_mask_encoder(intf);
236}
237
238/**
239 * mdp5_ctl_set_encoder_state() - set the encoder state
240 *
241 * @enable: true, when encoder is ready for data streaming; false, otherwise.
242 *
243 * Note:
244 * This encoder state is needed to trigger START signal (data path kickoff).
245 */
246int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled)
247{
248	if (WARN_ON(!ctl))
249		return -EINVAL;
250
251	ctl->pipeline.encoder_enabled = enabled;
252	DBG("intf_%d: %s", ctl->pipeline.intf.num, enabled ? "on" : "off");
253
254	if (start_signal_needed(ctl)) {
255		send_start_signal(ctl);
256		refill_start_mask(ctl);
257	}
258
259	return 0;
260}
261
262/*
263 * Note:
264 * CTL registers need to be flushed after calling this function
265 * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
266 */
267int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable)
268{
269	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
270	unsigned long flags;
271	u32 blend_cfg;
272	int lm = ctl->lm;
273
274	if (unlikely(WARN_ON(lm < 0))) {
275		dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
276				ctl->id, lm);
277		return -EINVAL;
278	}
279
280	spin_lock_irqsave(&ctl->hw_lock, flags);
281
282	blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm));
283
284	if (enable)
285		blend_cfg |=  MDP5_CTL_LAYER_REG_CURSOR_OUT;
286	else
287		blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
288
289	ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
290
291	spin_unlock_irqrestore(&ctl->hw_lock, flags);
292
293	ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id);
294	ctl->cursor_on = enable;
295
296	return 0;
297}
298
299int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg)
300{
301	unsigned long flags;
302
303	if (ctl->cursor_on)
304		blend_cfg |=  MDP5_CTL_LAYER_REG_CURSOR_OUT;
305	else
306		blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
307
308	spin_lock_irqsave(&ctl->hw_lock, flags);
309	ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
310	spin_unlock_irqrestore(&ctl->hw_lock, flags);
311
312	ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(lm);
313
314	return 0;
315}
316
317u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf)
318{
319	if (intf->type == INTF_WB)
320		return MDP5_CTL_FLUSH_WB;
321
322	switch (intf->num) {
323	case 0: return MDP5_CTL_FLUSH_TIMING_0;
324	case 1: return MDP5_CTL_FLUSH_TIMING_1;
325	case 2: return MDP5_CTL_FLUSH_TIMING_2;
326	case 3: return MDP5_CTL_FLUSH_TIMING_3;
327	default: return 0;
328	}
329}
330
331u32 mdp_ctl_flush_mask_cursor(int cursor_id)
332{
333	switch (cursor_id) {
334	case 0: return MDP5_CTL_FLUSH_CURSOR_0;
335	case 1: return MDP5_CTL_FLUSH_CURSOR_1;
336	default: return 0;
337	}
338}
339
340u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)
341{
342	switch (pipe) {
343	case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
344	case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
345	case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
346	case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
347	case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
348	case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
349	case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
350	case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
351	case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
352	case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
353	default:        return 0;
354	}
355}
356
357u32 mdp_ctl_flush_mask_lm(int lm)
358{
359	switch (lm) {
360	case 0:  return MDP5_CTL_FLUSH_LM0;
361	case 1:  return MDP5_CTL_FLUSH_LM1;
362	case 2:  return MDP5_CTL_FLUSH_LM2;
363	case 5:  return MDP5_CTL_FLUSH_LM5;
364	default: return 0;
365	}
366}
367
368static u32 fix_sw_flush(struct mdp5_ctl *ctl, u32 flush_mask)
369{
370	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
371	u32 sw_mask = 0;
372#define BIT_NEEDS_SW_FIX(bit) \
373	(!(ctl_mgr->flush_hw_mask & bit) && (flush_mask & bit))
374
375	/* for some targets, cursor bit is the same as LM bit */
376	if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0))
377		sw_mask |= mdp_ctl_flush_mask_lm(ctl->lm);
378
379	return sw_mask;
380}
381
382/**
383 * mdp5_ctl_commit() - Register Flush
384 *
385 * The flush register is used to indicate several registers are all
386 * programmed, and are safe to update to the back copy of the double
387 * buffered registers.
388 *
389 * Some registers FLUSH bits are shared when the hardware does not have
390 * dedicated bits for them; handling these is the job of fix_sw_flush().
391 *
392 * CTL registers need to be flushed in some circumstances; if that is the
393 * case, some trigger bits will be present in both flush mask and
394 * ctl->pending_ctl_trigger.
395 */
396int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
397{
398	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
399	struct op_mode *pipeline = &ctl->pipeline;
400	unsigned long flags;
401
402	pipeline->start_mask &= ~flush_mask;
403
404	VERB("flush_mask=%x, start_mask=%x, trigger=%x", flush_mask,
405			pipeline->start_mask, ctl->pending_ctl_trigger);
406
407	if (ctl->pending_ctl_trigger & flush_mask) {
408		flush_mask |= MDP5_CTL_FLUSH_CTL;
409		ctl->pending_ctl_trigger = 0;
410	}
411
412	flush_mask |= fix_sw_flush(ctl, flush_mask);
413
414	flush_mask &= ctl_mgr->flush_hw_mask;
415
416	if (flush_mask) {
417		spin_lock_irqsave(&ctl->hw_lock, flags);
418		ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask);
419		spin_unlock_irqrestore(&ctl->hw_lock, flags);
420	}
421
422	if (start_signal_needed(ctl)) {
423		send_start_signal(ctl);
424		refill_start_mask(ctl);
425	}
426
427	return 0;
428}
429
430void mdp5_ctl_release(struct mdp5_ctl *ctl)
431{
432	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
433	unsigned long flags;
434
435	if (unlikely(WARN_ON(ctl->id >= MAX_CTL) || !ctl->busy)) {
436		dev_err(ctl_mgr->dev->dev, "CTL %d in bad state (%d)",
437				ctl->id, ctl->busy);
438		return;
439	}
440
441	spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
442	ctl->busy = false;
443	spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
444
445	DBG("CTL %d released", ctl->id);
446}
447
448int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl)
449{
450	return WARN_ON(!ctl) ? -EINVAL : ctl->id;
451}
452
453/*
454 * mdp5_ctl_request() - CTL dynamic allocation
455 *
456 * Note: Current implementation considers that we can only have one CRTC per CTL
457 *
458 * @return first free CTL
459 */
460struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
461		struct drm_crtc *crtc)
462{
463	struct mdp5_ctl *ctl = NULL;
464	unsigned long flags;
465	int c;
466
467	spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
468
469	for (c = 0; c < ctl_mgr->nctl; c++)
470		if (!ctl_mgr->ctls[c].busy)
471			break;
472
473	if (unlikely(c >= ctl_mgr->nctl)) {
474		dev_err(ctl_mgr->dev->dev, "No more CTL available!");
475		goto unlock;
476	}
477
478	ctl = &ctl_mgr->ctls[c];
479
480	ctl->lm = mdp5_crtc_get_lm(crtc);
481	ctl->crtc = crtc;
482	ctl->busy = true;
483	ctl->pending_ctl_trigger = 0;
484	DBG("CTL %d allocated", ctl->id);
485
486unlock:
487	spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
488	return ctl;
489}
490
491void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctl_mgr)
492{
493	unsigned long flags;
494	int c;
495
496	for (c = 0; c < ctl_mgr->nctl; c++) {
497		struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
498
499		spin_lock_irqsave(&ctl->hw_lock, flags);
500		ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), 0);
501		spin_unlock_irqrestore(&ctl->hw_lock, flags);
502	}
503}
504
505void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr)
506{
507	kfree(ctl_mgr);
508}
509
510struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
511		void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg)
512{
513	struct mdp5_ctl_manager *ctl_mgr;
514	const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl;
515	unsigned long flags;
516	int c, ret;
517
518	ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL);
519	if (!ctl_mgr) {
520		dev_err(dev->dev, "failed to allocate CTL manager\n");
521		ret = -ENOMEM;
522		goto fail;
523	}
524
525	if (unlikely(WARN_ON(ctl_cfg->count > MAX_CTL))) {
526		dev_err(dev->dev, "Increase static pool size to at least %d\n",
527				ctl_cfg->count);
528		ret = -ENOSPC;
529		goto fail;
530	}
531
532	/* initialize the CTL manager: */
533	ctl_mgr->dev = dev;
534	ctl_mgr->nlm = hw_cfg->lm.count;
535	ctl_mgr->nctl = ctl_cfg->count;
536	ctl_mgr->flush_hw_mask = ctl_cfg->flush_hw_mask;
537	spin_lock_init(&ctl_mgr->pool_lock);
538
539	/* initialize each CTL of the pool: */
540	spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
541	for (c = 0; c < ctl_mgr->nctl; c++) {
542		struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
543
544		if (WARN_ON(!ctl_cfg->base[c])) {
545			dev_err(dev->dev, "CTL_%d: base is null!\n", c);
546			ret = -EINVAL;
547			goto fail;
548		}
549		ctl->ctlm = ctl_mgr;
550		ctl->id = c;
551		ctl->reg_offset = ctl_cfg->base[c];
552		ctl->busy = false;
553		spin_lock_init(&ctl->hw_lock);
554	}
555	spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
556	DBG("Pool of %d CTLs created.", ctl_mgr->nctl);
557
558	return ctl_mgr;
559
560fail:
561	if (ctl_mgr)
562		mdp5_ctlm_destroy(ctl_mgr);
563
564	return ERR_PTR(ret);
565}
566