1/*
2 * Copyright �� 2012-2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 *    Eugeni Dodonov <eugeni.dodonov@intel.com>
25 *    Daniel Vetter <daniel.vetter@ffwll.ch>
26 *
27 */
28
29#include <linux/pm_runtime.h>
30#include <linux/vgaarb.h>
31
32#include "i915_drv.h"
33#include "intel_drv.h"
34
35/**
36 * DOC: runtime pm
37 *
38 * The i915 driver supports dynamic enabling and disabling of entire hardware
39 * blocks at runtime. This is especially important on the display side where
40 * software is supposed to control many power gates manually on recent hardware,
41 * since on the GT side a lot of the power management is done by the hardware.
42 * But even there some manual control at the device level is required.
43 *
44 * Since i915 supports a diverse set of platforms with a unified codebase and
45 * hardware engineers just love to shuffle functionality around between power
46 * domains there's a sizeable amount of indirection required. This file provides
47 * generic functions to the driver for grabbing and releasing references for
48 * abstract power domains. It then maps those to the actual power wells
49 * present for a given platform.
50 */
51
52#define GEN9_ENABLE_DC5(dev) 0
53#define SKL_ENABLE_DC6(dev) IS_SKYLAKE(dev)
54
55#define for_each_power_well(i, power_well, domain_mask, power_domains)	\
56	for (i = 0;							\
57	     i < (power_domains)->power_well_count &&			\
58		 ((power_well) = &(power_domains)->power_wells[i]);	\
59	     i++)							\
60		if ((power_well)->domains & (domain_mask))
61
62#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
63	for (i = (power_domains)->power_well_count - 1;			 \
64	     i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
65	     i--)							 \
66		if ((power_well)->domains & (domain_mask))
67
68bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
69				    int power_well_id);
70
71static void intel_power_well_enable(struct drm_i915_private *dev_priv,
72				    struct i915_power_well *power_well)
73{
74	DRM_DEBUG_KMS("enabling %s\n", power_well->name);
75	power_well->ops->enable(dev_priv, power_well);
76	power_well->hw_enabled = true;
77}
78
79static void intel_power_well_disable(struct drm_i915_private *dev_priv,
80				     struct i915_power_well *power_well)
81{
82	DRM_DEBUG_KMS("disabling %s\n", power_well->name);
83	power_well->hw_enabled = false;
84	power_well->ops->disable(dev_priv, power_well);
85}
86
87/*
88 * We should only use the power well if we explicitly asked the hardware to
89 * enable it, so check if it's enabled and also check if we've requested it to
90 * be enabled.
91 */
92static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
93				   struct i915_power_well *power_well)
94{
95	return I915_READ(HSW_PWR_WELL_DRIVER) ==
96		     (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
97}
98
99/**
100 * __intel_display_power_is_enabled - unlocked check for a power domain
101 * @dev_priv: i915 device instance
102 * @domain: power domain to check
103 *
104 * This is the unlocked version of intel_display_power_is_enabled() and should
105 * only be used from error capture and recovery code where deadlocks are
106 * possible.
107 *
108 * Returns:
109 * True when the power domain is enabled, false otherwise.
110 */
111bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
112				      enum intel_display_power_domain domain)
113{
114	struct i915_power_domains *power_domains;
115	struct i915_power_well *power_well;
116	bool is_enabled;
117	int i;
118
119	if (dev_priv->pm.suspended)
120		return false;
121
122	power_domains = &dev_priv->power_domains;
123
124	is_enabled = true;
125
126	for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
127		if (power_well->always_on)
128			continue;
129
130		if (!power_well->hw_enabled) {
131			is_enabled = false;
132			break;
133		}
134	}
135
136	return is_enabled;
137}
138
139/**
140 * intel_display_power_is_enabled - check for a power domain
141 * @dev_priv: i915 device instance
142 * @domain: power domain to check
143 *
144 * This function can be used to check the hw power domain state. It is mostly
145 * used in hardware state readout functions. Everywhere else code should rely
146 * upon explicit power domain reference counting to ensure that the hardware
147 * block is powered up before accessing it.
148 *
149 * Callers must hold the relevant modesetting locks to ensure that concurrent
150 * threads can't disable the power well while the caller tries to read a few
151 * registers.
152 *
153 * Returns:
154 * True when the power domain is enabled, false otherwise.
155 */
156bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
157				    enum intel_display_power_domain domain)
158{
159	struct i915_power_domains *power_domains;
160	bool ret;
161
162	power_domains = &dev_priv->power_domains;
163
164	mutex_lock(&power_domains->lock);
165	ret = __intel_display_power_is_enabled(dev_priv, domain);
166	mutex_unlock(&power_domains->lock);
167
168	return ret;
169}
170
171/**
172 * intel_display_set_init_power - set the initial power domain state
173 * @dev_priv: i915 device instance
174 * @enable: whether to enable or disable the initial power domain state
175 *
176 * For simplicity our driver load/unload and system suspend/resume code assumes
177 * that all power domains are always enabled. This functions controls the state
178 * of this little hack. While the initial power domain state is enabled runtime
179 * pm is effectively disabled.
180 */
181void intel_display_set_init_power(struct drm_i915_private *dev_priv,
182				  bool enable)
183{
184	if (dev_priv->power_domains.init_power_on == enable)
185		return;
186
187	if (enable)
188		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
189	else
190		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
191
192	dev_priv->power_domains.init_power_on = enable;
193}
194
195/*
196 * Starting with Haswell, we have a "Power Down Well" that can be turned off
197 * when not needed anymore. We have 4 registers that can request the power well
198 * to be enabled, and it will only be disabled if none of the registers is
199 * requesting it to be enabled.
200 */
201static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
202{
203	struct drm_device *dev = dev_priv->dev;
204
205	/*
206	 * After we re-enable the power well, if we touch VGA register 0x3d5
207	 * we'll get unclaimed register interrupts. This stops after we write
208	 * anything to the VGA MSR register. The vgacon module uses this
209	 * register all the time, so if we unbind our driver and, as a
210	 * consequence, bind vgacon, we'll get stuck in an infinite loop at
211	 * console_unlock(). So make here we touch the VGA MSR register, making
212	 * sure vgacon can keep working normally without triggering interrupts
213	 * and error messages.
214	 */
215	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
216	outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
217	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
218
219	if (IS_BROADWELL(dev))
220		gen8_irq_power_well_post_enable(dev_priv,
221						1 << PIPE_C | 1 << PIPE_B);
222}
223
224static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
225				       struct i915_power_well *power_well)
226{
227	struct drm_device *dev = dev_priv->dev;
228
229	/*
230	 * After we re-enable the power well, if we touch VGA register 0x3d5
231	 * we'll get unclaimed register interrupts. This stops after we write
232	 * anything to the VGA MSR register. The vgacon module uses this
233	 * register all the time, so if we unbind our driver and, as a
234	 * consequence, bind vgacon, we'll get stuck in an infinite loop at
235	 * console_unlock(). So make here we touch the VGA MSR register, making
236	 * sure vgacon can keep working normally without triggering interrupts
237	 * and error messages.
238	 */
239	if (power_well->data == SKL_DISP_PW_2) {
240		vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
241		outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
242		vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
243
244		gen8_irq_power_well_post_enable(dev_priv,
245						1 << PIPE_C | 1 << PIPE_B);
246	}
247
248	if (power_well->data == SKL_DISP_PW_1) {
249		if (!dev_priv->power_domains.initializing)
250			intel_prepare_ddi(dev);
251		gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A);
252	}
253}
254
255static void hsw_set_power_well(struct drm_i915_private *dev_priv,
256			       struct i915_power_well *power_well, bool enable)
257{
258	bool is_enabled, enable_requested;
259	uint32_t tmp;
260
261	tmp = I915_READ(HSW_PWR_WELL_DRIVER);
262	is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
263	enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
264
265	if (enable) {
266		if (!enable_requested)
267			I915_WRITE(HSW_PWR_WELL_DRIVER,
268				   HSW_PWR_WELL_ENABLE_REQUEST);
269
270		if (!is_enabled) {
271			DRM_DEBUG_KMS("Enabling power well\n");
272			if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
273				      HSW_PWR_WELL_STATE_ENABLED), 20))
274				DRM_ERROR("Timeout enabling power well\n");
275			hsw_power_well_post_enable(dev_priv);
276		}
277
278	} else {
279		if (enable_requested) {
280			I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
281			POSTING_READ(HSW_PWR_WELL_DRIVER);
282			DRM_DEBUG_KMS("Requesting to disable the power well\n");
283		}
284	}
285}
286
287#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
288	BIT(POWER_DOMAIN_TRANSCODER_A) |		\
289	BIT(POWER_DOMAIN_PIPE_B) |			\
290	BIT(POWER_DOMAIN_TRANSCODER_B) |		\
291	BIT(POWER_DOMAIN_PIPE_C) |			\
292	BIT(POWER_DOMAIN_TRANSCODER_C) |		\
293	BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
294	BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
295	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |		\
296	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |		\
297	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |		\
298	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |		\
299	BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |		\
300	BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |		\
301	BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) |		\
302	BIT(POWER_DOMAIN_AUX_B) |                       \
303	BIT(POWER_DOMAIN_AUX_C) |			\
304	BIT(POWER_DOMAIN_AUX_D) |			\
305	BIT(POWER_DOMAIN_AUDIO) |			\
306	BIT(POWER_DOMAIN_VGA) |				\
307	BIT(POWER_DOMAIN_INIT))
308#define SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS (		\
309	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
310	BIT(POWER_DOMAIN_PLLS) |			\
311	BIT(POWER_DOMAIN_PIPE_A) |			\
312	BIT(POWER_DOMAIN_TRANSCODER_EDP) |		\
313	BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |		\
314	BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) |		\
315	BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) |		\
316	BIT(POWER_DOMAIN_AUX_A) |			\
317	BIT(POWER_DOMAIN_INIT))
318#define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS (		\
319	BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) |		\
320	BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) |		\
321	BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) |		\
322	BIT(POWER_DOMAIN_INIT))
323#define SKL_DISPLAY_DDI_B_POWER_DOMAINS (		\
324	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |		\
325	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |		\
326	BIT(POWER_DOMAIN_INIT))
327#define SKL_DISPLAY_DDI_C_POWER_DOMAINS (		\
328	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |		\
329	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |		\
330	BIT(POWER_DOMAIN_INIT))
331#define SKL_DISPLAY_DDI_D_POWER_DOMAINS (		\
332	BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |		\
333	BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |		\
334	BIT(POWER_DOMAIN_INIT))
335#define SKL_DISPLAY_MISC_IO_POWER_DOMAINS (		\
336	SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS |		\
337	BIT(POWER_DOMAIN_PLLS) |			\
338	BIT(POWER_DOMAIN_INIT))
339#define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS (		\
340	(POWER_DOMAIN_MASK & ~(SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS |	\
341	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
342	SKL_DISPLAY_DDI_A_E_POWER_DOMAINS |		\
343	SKL_DISPLAY_DDI_B_POWER_DOMAINS |		\
344	SKL_DISPLAY_DDI_C_POWER_DOMAINS |		\
345	SKL_DISPLAY_DDI_D_POWER_DOMAINS |		\
346	SKL_DISPLAY_MISC_IO_POWER_DOMAINS)) |		\
347	BIT(POWER_DOMAIN_INIT))
348
349#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
350	BIT(POWER_DOMAIN_TRANSCODER_A) |		\
351	BIT(POWER_DOMAIN_PIPE_B) |			\
352	BIT(POWER_DOMAIN_TRANSCODER_B) |		\
353	BIT(POWER_DOMAIN_PIPE_C) |			\
354	BIT(POWER_DOMAIN_TRANSCODER_C) |		\
355	BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
356	BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
357	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |		\
358	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |		\
359	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |		\
360	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |		\
361	BIT(POWER_DOMAIN_AUX_B) |			\
362	BIT(POWER_DOMAIN_AUX_C) |			\
363	BIT(POWER_DOMAIN_AUDIO) |			\
364	BIT(POWER_DOMAIN_VGA) |				\
365	BIT(POWER_DOMAIN_GMBUS) |			\
366	BIT(POWER_DOMAIN_INIT))
367#define BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS (		\
368	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
369	BIT(POWER_DOMAIN_PIPE_A) |			\
370	BIT(POWER_DOMAIN_TRANSCODER_EDP) |		\
371	BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |		\
372	BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) |		\
373	BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) |		\
374	BIT(POWER_DOMAIN_AUX_A) |			\
375	BIT(POWER_DOMAIN_PLLS) |			\
376	BIT(POWER_DOMAIN_INIT))
377#define BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS (		\
378	(POWER_DOMAIN_MASK & ~(BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS |	\
379	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS)) |	\
380	BIT(POWER_DOMAIN_INIT))
381
382static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
383{
384	struct drm_device *dev = dev_priv->dev;
385
386	WARN(!IS_BROXTON(dev), "Platform doesn't support DC9.\n");
387	WARN((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
388		"DC9 already programmed to be enabled.\n");
389	WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
390		"DC5 still not disabled to enable DC9.\n");
391	WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n");
392	WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n");
393
394	 /*
395	  * TODO: check for the following to verify the conditions to enter DC9
396	  * state are satisfied:
397	  * 1] Check relevant display engine registers to verify if mode set
398	  * disable sequence was followed.
399	  * 2] Check if display uninitialize sequence is initialized.
400	  */
401}
402
403static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
404{
405	WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n");
406	WARN(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
407		"DC9 already programmed to be disabled.\n");
408	WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
409		"DC5 still not disabled.\n");
410
411	 /*
412	  * TODO: check for the following to verify DC9 state was indeed
413	  * entered before programming to disable it:
414	  * 1] Check relevant display engine registers to verify if mode
415	  *  set disable sequence was followed.
416	  * 2] Check if display uninitialize sequence is initialized.
417	  */
418}
419
420void bxt_enable_dc9(struct drm_i915_private *dev_priv)
421{
422	uint32_t val;
423
424	assert_can_enable_dc9(dev_priv);
425
426	DRM_DEBUG_KMS("Enabling DC9\n");
427
428	val = I915_READ(DC_STATE_EN);
429	val |= DC_STATE_EN_DC9;
430	I915_WRITE(DC_STATE_EN, val);
431	POSTING_READ(DC_STATE_EN);
432}
433
434void bxt_disable_dc9(struct drm_i915_private *dev_priv)
435{
436	uint32_t val;
437
438	assert_can_disable_dc9(dev_priv);
439
440	DRM_DEBUG_KMS("Disabling DC9\n");
441
442	val = I915_READ(DC_STATE_EN);
443	val &= ~DC_STATE_EN_DC9;
444	I915_WRITE(DC_STATE_EN, val);
445	POSTING_READ(DC_STATE_EN);
446}
447
448static void gen9_set_dc_state_debugmask_memory_up(
449			struct drm_i915_private *dev_priv)
450{
451	uint32_t val;
452
453	/* The below bit doesn't need to be cleared ever afterwards */
454	val = I915_READ(DC_STATE_DEBUG);
455	if (!(val & DC_STATE_DEBUG_MASK_MEMORY_UP)) {
456		val |= DC_STATE_DEBUG_MASK_MEMORY_UP;
457		I915_WRITE(DC_STATE_DEBUG, val);
458		POSTING_READ(DC_STATE_DEBUG);
459	}
460}
461
462static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
463{
464	struct drm_device *dev = dev_priv->dev;
465	bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
466					SKL_DISP_PW_2);
467
468	WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC5.\n");
469	WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
470	WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
471
472	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
473		  "DC5 already programmed to be enabled.\n");
474	WARN_ONCE(dev_priv->pm.suspended,
475		  "DC5 cannot be enabled, if platform is runtime-suspended.\n");
476
477	assert_csr_loaded(dev_priv);
478}
479
480static void assert_can_disable_dc5(struct drm_i915_private *dev_priv)
481{
482	bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
483					SKL_DISP_PW_2);
484	/*
485	 * During initialization, the firmware may not be loaded yet.
486	 * We still want to make sure that the DC enabling flag is cleared.
487	 */
488	if (dev_priv->power_domains.initializing)
489		return;
490
491	WARN_ONCE(!pg2_enabled, "PG2 not enabled to disable DC5.\n");
492	WARN_ONCE(dev_priv->pm.suspended,
493		"Disabling of DC5 while platform is runtime-suspended should never happen.\n");
494}
495
496static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
497{
498	uint32_t val;
499
500	assert_can_enable_dc5(dev_priv);
501
502	DRM_DEBUG_KMS("Enabling DC5\n");
503
504	gen9_set_dc_state_debugmask_memory_up(dev_priv);
505
506	val = I915_READ(DC_STATE_EN);
507	val &= ~DC_STATE_EN_UPTO_DC5_DC6_MASK;
508	val |= DC_STATE_EN_UPTO_DC5;
509	I915_WRITE(DC_STATE_EN, val);
510	POSTING_READ(DC_STATE_EN);
511}
512
513static void gen9_disable_dc5(struct drm_i915_private *dev_priv)
514{
515	uint32_t val;
516
517	assert_can_disable_dc5(dev_priv);
518
519	DRM_DEBUG_KMS("Disabling DC5\n");
520
521	val = I915_READ(DC_STATE_EN);
522	val &= ~DC_STATE_EN_UPTO_DC5;
523	I915_WRITE(DC_STATE_EN, val);
524	POSTING_READ(DC_STATE_EN);
525}
526
527static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
528{
529	struct drm_device *dev = dev_priv->dev;
530
531	WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC6.\n");
532	WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
533	WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
534		  "Backlight is not disabled.\n");
535	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
536		  "DC6 already programmed to be enabled.\n");
537
538	assert_csr_loaded(dev_priv);
539}
540
541static void assert_can_disable_dc6(struct drm_i915_private *dev_priv)
542{
543	/*
544	 * During initialization, the firmware may not be loaded yet.
545	 * We still want to make sure that the DC enabling flag is cleared.
546	 */
547	if (dev_priv->power_domains.initializing)
548		return;
549
550	assert_csr_loaded(dev_priv);
551	WARN_ONCE(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
552		  "DC6 already programmed to be disabled.\n");
553}
554
555static void skl_enable_dc6(struct drm_i915_private *dev_priv)
556{
557	uint32_t val;
558
559	assert_can_enable_dc6(dev_priv);
560
561	DRM_DEBUG_KMS("Enabling DC6\n");
562
563	gen9_set_dc_state_debugmask_memory_up(dev_priv);
564
565	val = I915_READ(DC_STATE_EN);
566	val &= ~DC_STATE_EN_UPTO_DC5_DC6_MASK;
567	val |= DC_STATE_EN_UPTO_DC6;
568	I915_WRITE(DC_STATE_EN, val);
569	POSTING_READ(DC_STATE_EN);
570}
571
572static void skl_disable_dc6(struct drm_i915_private *dev_priv)
573{
574	uint32_t val;
575
576	assert_can_disable_dc6(dev_priv);
577
578	DRM_DEBUG_KMS("Disabling DC6\n");
579
580	val = I915_READ(DC_STATE_EN);
581	val &= ~DC_STATE_EN_UPTO_DC6;
582	I915_WRITE(DC_STATE_EN, val);
583	POSTING_READ(DC_STATE_EN);
584}
585
586static void skl_set_power_well(struct drm_i915_private *dev_priv,
587			struct i915_power_well *power_well, bool enable)
588{
589	struct drm_device *dev = dev_priv->dev;
590	uint32_t tmp, fuse_status;
591	uint32_t req_mask, state_mask;
592	bool is_enabled, enable_requested, check_fuse_status = false;
593
594	tmp = I915_READ(HSW_PWR_WELL_DRIVER);
595	fuse_status = I915_READ(SKL_FUSE_STATUS);
596
597	switch (power_well->data) {
598	case SKL_DISP_PW_1:
599		if (wait_for((I915_READ(SKL_FUSE_STATUS) &
600			SKL_FUSE_PG0_DIST_STATUS), 1)) {
601			DRM_ERROR("PG0 not enabled\n");
602			return;
603		}
604		break;
605	case SKL_DISP_PW_2:
606		if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) {
607			DRM_ERROR("PG1 in disabled state\n");
608			return;
609		}
610		break;
611	case SKL_DISP_PW_DDI_A_E:
612	case SKL_DISP_PW_DDI_B:
613	case SKL_DISP_PW_DDI_C:
614	case SKL_DISP_PW_DDI_D:
615	case SKL_DISP_PW_MISC_IO:
616		break;
617	default:
618		WARN(1, "Unknown power well %lu\n", power_well->data);
619		return;
620	}
621
622	req_mask = SKL_POWER_WELL_REQ(power_well->data);
623	enable_requested = tmp & req_mask;
624	state_mask = SKL_POWER_WELL_STATE(power_well->data);
625	is_enabled = tmp & state_mask;
626
627	if (enable) {
628		if (!enable_requested) {
629			WARN((tmp & state_mask) &&
630				!I915_READ(HSW_PWR_WELL_BIOS),
631				"Invalid for power well status to be enabled, unless done by the BIOS, \
632				when request is to disable!\n");
633			if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) &&
634				power_well->data == SKL_DISP_PW_2) {
635				if (SKL_ENABLE_DC6(dev)) {
636					skl_disable_dc6(dev_priv);
637					/*
638					 * DDI buffer programming unnecessary during driver-load/resume
639					 * as it's already done during modeset initialization then.
640					 * It's also invalid here as encoder list is still uninitialized.
641					 */
642					if (!dev_priv->power_domains.initializing)
643						intel_prepare_ddi(dev);
644				} else {
645					gen9_disable_dc5(dev_priv);
646				}
647			}
648			I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
649		}
650
651		if (!is_enabled) {
652			DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
653			if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
654				state_mask), 1))
655				DRM_ERROR("%s enable timeout\n",
656					power_well->name);
657			check_fuse_status = true;
658		}
659	} else {
660		if (enable_requested) {
661			if (IS_SKYLAKE(dev) &&
662				(power_well->data == SKL_DISP_PW_1) &&
663				(intel_csr_load_status_get(dev_priv) == FW_LOADED))
664				DRM_DEBUG_KMS("Not Disabling PW1, dmc will handle\n");
665			else {
666				I915_WRITE(HSW_PWR_WELL_DRIVER,	tmp & ~req_mask);
667				POSTING_READ(HSW_PWR_WELL_DRIVER);
668				DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
669			}
670
671			if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) &&
672				power_well->data == SKL_DISP_PW_2) {
673				enum csr_state state;
674				/* TODO: wait for a completion event or
675				 * similar here instead of busy
676				 * waiting using wait_for function.
677				 */
678				wait_for((state = intel_csr_load_status_get(dev_priv)) !=
679						FW_UNINITIALIZED, 1000);
680				if (state != FW_LOADED)
681					DRM_DEBUG("CSR firmware not ready (%d)\n",
682							state);
683				else
684					if (SKL_ENABLE_DC6(dev))
685						skl_enable_dc6(dev_priv);
686					else
687						gen9_enable_dc5(dev_priv);
688			}
689		}
690	}
691
692	if (check_fuse_status) {
693		if (power_well->data == SKL_DISP_PW_1) {
694			if (wait_for((I915_READ(SKL_FUSE_STATUS) &
695				SKL_FUSE_PG1_DIST_STATUS), 1))
696				DRM_ERROR("PG1 distributing status timeout\n");
697		} else if (power_well->data == SKL_DISP_PW_2) {
698			if (wait_for((I915_READ(SKL_FUSE_STATUS) &
699				SKL_FUSE_PG2_DIST_STATUS), 1))
700				DRM_ERROR("PG2 distributing status timeout\n");
701		}
702	}
703
704	if (enable && !is_enabled)
705		skl_power_well_post_enable(dev_priv, power_well);
706}
707
708static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
709				   struct i915_power_well *power_well)
710{
711	hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
712
713	/*
714	 * We're taking over the BIOS, so clear any requests made by it since
715	 * the driver is in charge now.
716	 */
717	if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
718		I915_WRITE(HSW_PWR_WELL_BIOS, 0);
719}
720
721static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
722				  struct i915_power_well *power_well)
723{
724	hsw_set_power_well(dev_priv, power_well, true);
725}
726
727static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
728				   struct i915_power_well *power_well)
729{
730	hsw_set_power_well(dev_priv, power_well, false);
731}
732
733static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
734					struct i915_power_well *power_well)
735{
736	uint32_t mask = SKL_POWER_WELL_REQ(power_well->data) |
737		SKL_POWER_WELL_STATE(power_well->data);
738
739	return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask;
740}
741
742static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv,
743				struct i915_power_well *power_well)
744{
745	skl_set_power_well(dev_priv, power_well, power_well->count > 0);
746
747	/* Clear any request made by BIOS as driver is taking over */
748	I915_WRITE(HSW_PWR_WELL_BIOS, 0);
749}
750
751static void skl_power_well_enable(struct drm_i915_private *dev_priv,
752				struct i915_power_well *power_well)
753{
754	skl_set_power_well(dev_priv, power_well, true);
755}
756
757static void skl_power_well_disable(struct drm_i915_private *dev_priv,
758				struct i915_power_well *power_well)
759{
760	skl_set_power_well(dev_priv, power_well, false);
761}
762
763static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
764					   struct i915_power_well *power_well)
765{
766}
767
768static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
769					     struct i915_power_well *power_well)
770{
771	return true;
772}
773
774static void vlv_set_power_well(struct drm_i915_private *dev_priv,
775			       struct i915_power_well *power_well, bool enable)
776{
777	enum punit_power_well power_well_id = power_well->data;
778	u32 mask;
779	u32 state;
780	u32 ctrl;
781
782	mask = PUNIT_PWRGT_MASK(power_well_id);
783	state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
784			 PUNIT_PWRGT_PWR_GATE(power_well_id);
785
786	mutex_lock(&dev_priv->rps.hw_lock);
787
788#define COND \
789	((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
790
791	if (COND)
792		goto out;
793
794	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
795	ctrl &= ~mask;
796	ctrl |= state;
797	vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
798
799	if (wait_for(COND, 100))
800		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
801			  state,
802			  vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
803
804#undef COND
805
806out:
807	mutex_unlock(&dev_priv->rps.hw_lock);
808}
809
810static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
811				   struct i915_power_well *power_well)
812{
813	vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
814}
815
816static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
817				  struct i915_power_well *power_well)
818{
819	vlv_set_power_well(dev_priv, power_well, true);
820}
821
822static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
823				   struct i915_power_well *power_well)
824{
825	vlv_set_power_well(dev_priv, power_well, false);
826}
827
828static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
829				   struct i915_power_well *power_well)
830{
831	int power_well_id = power_well->data;
832	bool enabled = false;
833	u32 mask;
834	u32 state;
835	u32 ctrl;
836
837	mask = PUNIT_PWRGT_MASK(power_well_id);
838	ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
839
840	mutex_lock(&dev_priv->rps.hw_lock);
841
842	state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
843	/*
844	 * We only ever set the power-on and power-gate states, anything
845	 * else is unexpected.
846	 */
847	WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
848		state != PUNIT_PWRGT_PWR_GATE(power_well_id));
849	if (state == ctrl)
850		enabled = true;
851
852	/*
853	 * A transient state at this point would mean some unexpected party
854	 * is poking at the power controls too.
855	 */
856	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
857	WARN_ON(ctrl != state);
858
859	mutex_unlock(&dev_priv->rps.hw_lock);
860
861	return enabled;
862}
863
864static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
865{
866	enum pipe pipe;
867
868	/*
869	 * Enable the CRI clock source so we can get at the
870	 * display and the reference clock for VGA
871	 * hotplug / manual detection. Supposedly DSI also
872	 * needs the ref clock up and running.
873	 *
874	 * CHV DPLL B/C have some issues if VGA mode is enabled.
875	 */
876	for_each_pipe(dev_priv->dev, pipe) {
877		u32 val = I915_READ(DPLL(pipe));
878
879		val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
880		if (pipe != PIPE_A)
881			val |= DPLL_INTEGRATED_CRI_CLK_VLV;
882
883		I915_WRITE(DPLL(pipe), val);
884	}
885
886	spin_lock_irq(&dev_priv->irq_lock);
887	valleyview_enable_display_irqs(dev_priv);
888	spin_unlock_irq(&dev_priv->irq_lock);
889
890	/*
891	 * During driver initialization/resume we can avoid restoring the
892	 * part of the HW/SW state that will be inited anyway explicitly.
893	 */
894	if (dev_priv->power_domains.initializing)
895		return;
896
897	intel_hpd_init(dev_priv);
898
899	i915_redisable_vga_power_on(dev_priv->dev);
900}
901
902static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
903{
904	spin_lock_irq(&dev_priv->irq_lock);
905	valleyview_disable_display_irqs(dev_priv);
906	spin_unlock_irq(&dev_priv->irq_lock);
907
908	vlv_power_sequencer_reset(dev_priv);
909}
910
911static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
912					  struct i915_power_well *power_well)
913{
914	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
915
916	vlv_set_power_well(dev_priv, power_well, true);
917
918	vlv_display_power_well_init(dev_priv);
919}
920
921static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
922					   struct i915_power_well *power_well)
923{
924	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
925
926	vlv_display_power_well_deinit(dev_priv);
927
928	vlv_set_power_well(dev_priv, power_well, false);
929}
930
931static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
932					   struct i915_power_well *power_well)
933{
934	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
935
936	/* since ref/cri clock was enabled */
937	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
938
939	vlv_set_power_well(dev_priv, power_well, true);
940
941	/*
942	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
943	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
944	 *   a.	GUnit 0x2110 bit[0] set to 1 (def 0)
945	 *   b.	The other bits such as sfr settings / modesel may all
946	 *	be set to 0.
947	 *
948	 * This should only be done on init and resume from S3 with
949	 * both PLLs disabled, or we risk losing DPIO and PLL
950	 * synchronization.
951	 */
952	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
953}
954
955static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
956					    struct i915_power_well *power_well)
957{
958	enum pipe pipe;
959
960	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
961
962	for_each_pipe(dev_priv, pipe)
963		assert_pll_disabled(dev_priv, pipe);
964
965	/* Assert common reset */
966	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
967
968	vlv_set_power_well(dev_priv, power_well, false);
969}
970
971#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
972
973static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
974						 int power_well_id)
975{
976	struct i915_power_domains *power_domains = &dev_priv->power_domains;
977	struct i915_power_well *power_well;
978	int i;
979
980	for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
981		if (power_well->data == power_well_id)
982			return power_well;
983	}
984
985	return NULL;
986}
987
988#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
989
990static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
991{
992	struct i915_power_well *cmn_bc =
993		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
994	struct i915_power_well *cmn_d =
995		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
996	u32 phy_control = dev_priv->chv_phy_control;
997	u32 phy_status = 0;
998	u32 phy_status_mask = 0xffffffff;
999	u32 tmp;
1000
1001	/*
1002	 * The BIOS can leave the PHY is some weird state
1003	 * where it doesn't fully power down some parts.
1004	 * Disable the asserts until the PHY has been fully
1005	 * reset (ie. the power well has been disabled at
1006	 * least once).
1007	 */
1008	if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1009		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1010				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1011				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1012				     PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1013				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1014				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1015
1016	if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1017		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1018				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1019				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1020
1021	if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
1022		phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1023
1024		/* this assumes override is only used to enable lanes */
1025		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1026			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1027
1028		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1029			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1030
1031		/* CL1 is on whenever anything is on in either channel */
1032		if (BITS_SET(phy_control,
1033			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1034			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1035			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1036
1037		/*
1038		 * The DPLLB check accounts for the pipe B + port A usage
1039		 * with CL2 powered up but all the lanes in the second channel
1040		 * powered down.
1041		 */
1042		if (BITS_SET(phy_control,
1043			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1044		    (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1045			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1046
1047		if (BITS_SET(phy_control,
1048			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1049			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1050		if (BITS_SET(phy_control,
1051			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1052			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1053
1054		if (BITS_SET(phy_control,
1055			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1056			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1057		if (BITS_SET(phy_control,
1058			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1059			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1060	}
1061
1062	if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
1063		phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1064
1065		/* this assumes override is only used to enable lanes */
1066		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1067			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1068
1069		if (BITS_SET(phy_control,
1070			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1071			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1072
1073		if (BITS_SET(phy_control,
1074			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1075			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1076		if (BITS_SET(phy_control,
1077			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1078			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1079	}
1080
1081	phy_status &= phy_status_mask;
1082
1083	/*
1084	 * The PHY may be busy with some initial calibration and whatnot,
1085	 * so the power state can take a while to actually change.
1086	 */
1087	if (wait_for((tmp = I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask) == phy_status, 10))
1088		WARN(phy_status != tmp,
1089		     "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1090		     tmp, phy_status, dev_priv->chv_phy_control);
1091}
1092
1093#undef BITS_SET
1094
1095static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1096					   struct i915_power_well *power_well)
1097{
1098	enum dpio_phy phy;
1099	enum pipe pipe;
1100	uint32_t tmp;
1101
1102	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1103		     power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
1104
1105	if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1106		pipe = PIPE_A;
1107		phy = DPIO_PHY0;
1108	} else {
1109		pipe = PIPE_C;
1110		phy = DPIO_PHY1;
1111	}
1112
1113	/* since ref/cri clock was enabled */
1114	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1115	vlv_set_power_well(dev_priv, power_well, true);
1116
1117	/* Poll for phypwrgood signal */
1118	if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
1119		DRM_ERROR("Display PHY %d is not power up\n", phy);
1120
1121	mutex_lock(&dev_priv->sb_lock);
1122
1123	/* Enable dynamic power down */
1124	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1125	tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1126		DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1127	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1128
1129	if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1130		tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1131		tmp |= DPIO_DYNPWRDOWNEN_CH1;
1132		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1133	} else {
1134		/*
1135		 * Force the non-existing CL2 off. BXT does this
1136		 * too, so maybe it saves some power even though
1137		 * CL2 doesn't exist?
1138		 */
1139		tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1140		tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1141		vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1142	}
1143
1144	mutex_unlock(&dev_priv->sb_lock);
1145
1146	dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1147	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1148
1149	DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1150		      phy, dev_priv->chv_phy_control);
1151
1152	assert_chv_phy_status(dev_priv);
1153}
1154
1155static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1156					    struct i915_power_well *power_well)
1157{
1158	enum dpio_phy phy;
1159
1160	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1161		     power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
1162
1163	if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1164		phy = DPIO_PHY0;
1165		assert_pll_disabled(dev_priv, PIPE_A);
1166		assert_pll_disabled(dev_priv, PIPE_B);
1167	} else {
1168		phy = DPIO_PHY1;
1169		assert_pll_disabled(dev_priv, PIPE_C);
1170	}
1171
1172	dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1173	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1174
1175	vlv_set_power_well(dev_priv, power_well, false);
1176
1177	DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1178		      phy, dev_priv->chv_phy_control);
1179
1180	/* PHY is fully reset now, so we can enable the PHY state asserts */
1181	dev_priv->chv_phy_assert[phy] = true;
1182
1183	assert_chv_phy_status(dev_priv);
1184}
1185
1186static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1187				     enum dpio_channel ch, bool override, unsigned int mask)
1188{
1189	enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1190	u32 reg, val, expected, actual;
1191
1192	/*
1193	 * The BIOS can leave the PHY is some weird state
1194	 * where it doesn't fully power down some parts.
1195	 * Disable the asserts until the PHY has been fully
1196	 * reset (ie. the power well has been disabled at
1197	 * least once).
1198	 */
1199	if (!dev_priv->chv_phy_assert[phy])
1200		return;
1201
1202	if (ch == DPIO_CH0)
1203		reg = _CHV_CMN_DW0_CH0;
1204	else
1205		reg = _CHV_CMN_DW6_CH1;
1206
1207	mutex_lock(&dev_priv->sb_lock);
1208	val = vlv_dpio_read(dev_priv, pipe, reg);
1209	mutex_unlock(&dev_priv->sb_lock);
1210
1211	/*
1212	 * This assumes !override is only used when the port is disabled.
1213	 * All lanes should power down even without the override when
1214	 * the port is disabled.
1215	 */
1216	if (!override || mask == 0xf) {
1217		expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1218		/*
1219		 * If CH1 common lane is not active anymore
1220		 * (eg. for pipe B DPLL) the entire channel will
1221		 * shut down, which causes the common lane registers
1222		 * to read as 0. That means we can't actually check
1223		 * the lane power down status bits, but as the entire
1224		 * register reads as 0 it's a good indication that the
1225		 * channel is indeed entirely powered down.
1226		 */
1227		if (ch == DPIO_CH1 && val == 0)
1228			expected = 0;
1229	} else if (mask != 0x0) {
1230		expected = DPIO_ANYDL_POWERDOWN;
1231	} else {
1232		expected = 0;
1233	}
1234
1235	if (ch == DPIO_CH0)
1236		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1237	else
1238		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1239	actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1240
1241	WARN(actual != expected,
1242	     "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1243	     !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
1244	     !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
1245	     reg, val);
1246}
1247
1248bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1249			  enum dpio_channel ch, bool override)
1250{
1251	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1252	bool was_override;
1253
1254	mutex_lock(&power_domains->lock);
1255
1256	was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1257
1258	if (override == was_override)
1259		goto out;
1260
1261	if (override)
1262		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1263	else
1264		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1265
1266	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1267
1268	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1269		      phy, ch, dev_priv->chv_phy_control);
1270
1271	assert_chv_phy_status(dev_priv);
1272
1273out:
1274	mutex_unlock(&power_domains->lock);
1275
1276	return was_override;
1277}
1278
1279void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1280			     bool override, unsigned int mask)
1281{
1282	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1283	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1284	enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
1285	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1286
1287	mutex_lock(&power_domains->lock);
1288
1289	dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1290	dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1291
1292	if (override)
1293		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1294	else
1295		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1296
1297	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1298
1299	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1300		      phy, ch, mask, dev_priv->chv_phy_control);
1301
1302	assert_chv_phy_status(dev_priv);
1303
1304	assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1305
1306	mutex_unlock(&power_domains->lock);
1307}
1308
1309static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1310					struct i915_power_well *power_well)
1311{
1312	enum pipe pipe = power_well->data;
1313	bool enabled;
1314	u32 state, ctrl;
1315
1316	mutex_lock(&dev_priv->rps.hw_lock);
1317
1318	state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
1319	/*
1320	 * We only ever set the power-on and power-gate states, anything
1321	 * else is unexpected.
1322	 */
1323	WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
1324	enabled = state == DP_SSS_PWR_ON(pipe);
1325
1326	/*
1327	 * A transient state at this point would mean some unexpected party
1328	 * is poking at the power controls too.
1329	 */
1330	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
1331	WARN_ON(ctrl << 16 != state);
1332
1333	mutex_unlock(&dev_priv->rps.hw_lock);
1334
1335	return enabled;
1336}
1337
1338static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1339				    struct i915_power_well *power_well,
1340				    bool enable)
1341{
1342	enum pipe pipe = power_well->data;
1343	u32 state;
1344	u32 ctrl;
1345
1346	state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1347
1348	mutex_lock(&dev_priv->rps.hw_lock);
1349
1350#define COND \
1351	((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
1352
1353	if (COND)
1354		goto out;
1355
1356	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
1357	ctrl &= ~DP_SSC_MASK(pipe);
1358	ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1359	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
1360
1361	if (wait_for(COND, 100))
1362		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1363			  state,
1364			  vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
1365
1366#undef COND
1367
1368out:
1369	mutex_unlock(&dev_priv->rps.hw_lock);
1370}
1371
1372static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
1373					struct i915_power_well *power_well)
1374{
1375	WARN_ON_ONCE(power_well->data != PIPE_A);
1376
1377	chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
1378}
1379
1380static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1381				       struct i915_power_well *power_well)
1382{
1383	WARN_ON_ONCE(power_well->data != PIPE_A);
1384
1385	chv_set_pipe_power_well(dev_priv, power_well, true);
1386
1387	vlv_display_power_well_init(dev_priv);
1388}
1389
1390static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1391					struct i915_power_well *power_well)
1392{
1393	WARN_ON_ONCE(power_well->data != PIPE_A);
1394
1395	vlv_display_power_well_deinit(dev_priv);
1396
1397	chv_set_pipe_power_well(dev_priv, power_well, false);
1398}
1399
1400/**
1401 * intel_display_power_get - grab a power domain reference
1402 * @dev_priv: i915 device instance
1403 * @domain: power domain to reference
1404 *
1405 * This function grabs a power domain reference for @domain and ensures that the
1406 * power domain and all its parents are powered up. Therefore users should only
1407 * grab a reference to the innermost power domain they need.
1408 *
1409 * Any power domain reference obtained by this function must have a symmetric
1410 * call to intel_display_power_put() to release the reference again.
1411 */
1412void intel_display_power_get(struct drm_i915_private *dev_priv,
1413			     enum intel_display_power_domain domain)
1414{
1415	struct i915_power_domains *power_domains;
1416	struct i915_power_well *power_well;
1417	int i;
1418
1419	intel_runtime_pm_get(dev_priv);
1420
1421	power_domains = &dev_priv->power_domains;
1422
1423	mutex_lock(&power_domains->lock);
1424
1425	for_each_power_well(i, power_well, BIT(domain), power_domains) {
1426		if (!power_well->count++)
1427			intel_power_well_enable(dev_priv, power_well);
1428	}
1429
1430	power_domains->domain_use_count[domain]++;
1431
1432	mutex_unlock(&power_domains->lock);
1433}
1434
1435/**
1436 * intel_display_power_put - release a power domain reference
1437 * @dev_priv: i915 device instance
1438 * @domain: power domain to reference
1439 *
1440 * This function drops the power domain reference obtained by
1441 * intel_display_power_get() and might power down the corresponding hardware
1442 * block right away if this is the last reference.
1443 */
1444void intel_display_power_put(struct drm_i915_private *dev_priv,
1445			     enum intel_display_power_domain domain)
1446{
1447	struct i915_power_domains *power_domains;
1448	struct i915_power_well *power_well;
1449	int i;
1450
1451	power_domains = &dev_priv->power_domains;
1452
1453	mutex_lock(&power_domains->lock);
1454
1455	WARN_ON(!power_domains->domain_use_count[domain]);
1456	power_domains->domain_use_count[domain]--;
1457
1458	for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
1459		WARN_ON(!power_well->count);
1460
1461		if (!--power_well->count && i915.disable_power_well)
1462			intel_power_well_disable(dev_priv, power_well);
1463	}
1464
1465	mutex_unlock(&power_domains->lock);
1466
1467	intel_runtime_pm_put(dev_priv);
1468}
1469
1470#define HSW_ALWAYS_ON_POWER_DOMAINS (			\
1471	BIT(POWER_DOMAIN_PIPE_A) |			\
1472	BIT(POWER_DOMAIN_TRANSCODER_EDP) |		\
1473	BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) |		\
1474	BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) |		\
1475	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |		\
1476	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |		\
1477	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |		\
1478	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |		\
1479	BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |		\
1480	BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |		\
1481	BIT(POWER_DOMAIN_PORT_CRT) |			\
1482	BIT(POWER_DOMAIN_PLLS) |			\
1483	BIT(POWER_DOMAIN_AUX_A) |			\
1484	BIT(POWER_DOMAIN_AUX_B) |			\
1485	BIT(POWER_DOMAIN_AUX_C) |			\
1486	BIT(POWER_DOMAIN_AUX_D) |			\
1487	BIT(POWER_DOMAIN_GMBUS) |			\
1488	BIT(POWER_DOMAIN_INIT))
1489#define HSW_DISPLAY_POWER_DOMAINS (				\
1490	(POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) |	\
1491	BIT(POWER_DOMAIN_INIT))
1492
1493#define BDW_ALWAYS_ON_POWER_DOMAINS (			\
1494	HSW_ALWAYS_ON_POWER_DOMAINS |			\
1495	BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
1496#define BDW_DISPLAY_POWER_DOMAINS (				\
1497	(POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) |	\
1498	BIT(POWER_DOMAIN_INIT))
1499
1500#define VLV_ALWAYS_ON_POWER_DOMAINS	BIT(POWER_DOMAIN_INIT)
1501#define VLV_DISPLAY_POWER_DOMAINS	POWER_DOMAIN_MASK
1502
1503#define VLV_DPIO_CMN_BC_POWER_DOMAINS (		\
1504	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |	\
1505	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |	\
1506	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |	\
1507	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |	\
1508	BIT(POWER_DOMAIN_PORT_CRT) |		\
1509	BIT(POWER_DOMAIN_AUX_B) |		\
1510	BIT(POWER_DOMAIN_AUX_C) |		\
1511	BIT(POWER_DOMAIN_INIT))
1512
1513#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (	\
1514	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |	\
1515	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |	\
1516	BIT(POWER_DOMAIN_AUX_B) |		\
1517	BIT(POWER_DOMAIN_INIT))
1518
1519#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (	\
1520	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |	\
1521	BIT(POWER_DOMAIN_AUX_B) |		\
1522	BIT(POWER_DOMAIN_INIT))
1523
1524#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (	\
1525	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |	\
1526	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |	\
1527	BIT(POWER_DOMAIN_AUX_C) |		\
1528	BIT(POWER_DOMAIN_INIT))
1529
1530#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (	\
1531	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |	\
1532	BIT(POWER_DOMAIN_AUX_C) |		\
1533	BIT(POWER_DOMAIN_INIT))
1534
1535#define CHV_DPIO_CMN_BC_POWER_DOMAINS (		\
1536	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |	\
1537	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |	\
1538	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |	\
1539	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |	\
1540	BIT(POWER_DOMAIN_AUX_B) |		\
1541	BIT(POWER_DOMAIN_AUX_C) |		\
1542	BIT(POWER_DOMAIN_INIT))
1543
1544#define CHV_DPIO_CMN_D_POWER_DOMAINS (		\
1545	BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |	\
1546	BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |	\
1547	BIT(POWER_DOMAIN_AUX_D) |		\
1548	BIT(POWER_DOMAIN_INIT))
1549
1550static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
1551	.sync_hw = i9xx_always_on_power_well_noop,
1552	.enable = i9xx_always_on_power_well_noop,
1553	.disable = i9xx_always_on_power_well_noop,
1554	.is_enabled = i9xx_always_on_power_well_enabled,
1555};
1556
1557static const struct i915_power_well_ops chv_pipe_power_well_ops = {
1558	.sync_hw = chv_pipe_power_well_sync_hw,
1559	.enable = chv_pipe_power_well_enable,
1560	.disable = chv_pipe_power_well_disable,
1561	.is_enabled = chv_pipe_power_well_enabled,
1562};
1563
1564static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
1565	.sync_hw = vlv_power_well_sync_hw,
1566	.enable = chv_dpio_cmn_power_well_enable,
1567	.disable = chv_dpio_cmn_power_well_disable,
1568	.is_enabled = vlv_power_well_enabled,
1569};
1570
1571static struct i915_power_well i9xx_always_on_power_well[] = {
1572	{
1573		.name = "always-on",
1574		.always_on = 1,
1575		.domains = POWER_DOMAIN_MASK,
1576		.ops = &i9xx_always_on_power_well_ops,
1577	},
1578};
1579
1580static const struct i915_power_well_ops hsw_power_well_ops = {
1581	.sync_hw = hsw_power_well_sync_hw,
1582	.enable = hsw_power_well_enable,
1583	.disable = hsw_power_well_disable,
1584	.is_enabled = hsw_power_well_enabled,
1585};
1586
1587static const struct i915_power_well_ops skl_power_well_ops = {
1588	.sync_hw = skl_power_well_sync_hw,
1589	.enable = skl_power_well_enable,
1590	.disable = skl_power_well_disable,
1591	.is_enabled = skl_power_well_enabled,
1592};
1593
1594static struct i915_power_well hsw_power_wells[] = {
1595	{
1596		.name = "always-on",
1597		.always_on = 1,
1598		.domains = HSW_ALWAYS_ON_POWER_DOMAINS,
1599		.ops = &i9xx_always_on_power_well_ops,
1600	},
1601	{
1602		.name = "display",
1603		.domains = HSW_DISPLAY_POWER_DOMAINS,
1604		.ops = &hsw_power_well_ops,
1605	},
1606};
1607
1608static struct i915_power_well bdw_power_wells[] = {
1609	{
1610		.name = "always-on",
1611		.always_on = 1,
1612		.domains = BDW_ALWAYS_ON_POWER_DOMAINS,
1613		.ops = &i9xx_always_on_power_well_ops,
1614	},
1615	{
1616		.name = "display",
1617		.domains = BDW_DISPLAY_POWER_DOMAINS,
1618		.ops = &hsw_power_well_ops,
1619	},
1620};
1621
1622static const struct i915_power_well_ops vlv_display_power_well_ops = {
1623	.sync_hw = vlv_power_well_sync_hw,
1624	.enable = vlv_display_power_well_enable,
1625	.disable = vlv_display_power_well_disable,
1626	.is_enabled = vlv_power_well_enabled,
1627};
1628
1629static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
1630	.sync_hw = vlv_power_well_sync_hw,
1631	.enable = vlv_dpio_cmn_power_well_enable,
1632	.disable = vlv_dpio_cmn_power_well_disable,
1633	.is_enabled = vlv_power_well_enabled,
1634};
1635
1636static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
1637	.sync_hw = vlv_power_well_sync_hw,
1638	.enable = vlv_power_well_enable,
1639	.disable = vlv_power_well_disable,
1640	.is_enabled = vlv_power_well_enabled,
1641};
1642
1643static struct i915_power_well vlv_power_wells[] = {
1644	{
1645		.name = "always-on",
1646		.always_on = 1,
1647		.domains = VLV_ALWAYS_ON_POWER_DOMAINS,
1648		.ops = &i9xx_always_on_power_well_ops,
1649	},
1650	{
1651		.name = "display",
1652		.domains = VLV_DISPLAY_POWER_DOMAINS,
1653		.data = PUNIT_POWER_WELL_DISP2D,
1654		.ops = &vlv_display_power_well_ops,
1655	},
1656	{
1657		.name = "dpio-tx-b-01",
1658		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1659			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1660			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1661			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1662		.ops = &vlv_dpio_power_well_ops,
1663		.data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
1664	},
1665	{
1666		.name = "dpio-tx-b-23",
1667		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1668			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1669			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1670			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1671		.ops = &vlv_dpio_power_well_ops,
1672		.data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
1673	},
1674	{
1675		.name = "dpio-tx-c-01",
1676		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1677			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1678			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1679			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1680		.ops = &vlv_dpio_power_well_ops,
1681		.data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
1682	},
1683	{
1684		.name = "dpio-tx-c-23",
1685		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1686			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1687			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1688			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1689		.ops = &vlv_dpio_power_well_ops,
1690		.data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
1691	},
1692	{
1693		.name = "dpio-common",
1694		.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
1695		.data = PUNIT_POWER_WELL_DPIO_CMN_BC,
1696		.ops = &vlv_dpio_cmn_power_well_ops,
1697	},
1698};
1699
1700static struct i915_power_well chv_power_wells[] = {
1701	{
1702		.name = "always-on",
1703		.always_on = 1,
1704		.domains = VLV_ALWAYS_ON_POWER_DOMAINS,
1705		.ops = &i9xx_always_on_power_well_ops,
1706	},
1707	{
1708		.name = "display",
1709		/*
1710		 * Pipe A power well is the new disp2d well. Pipe B and C
1711		 * power wells don't actually exist. Pipe A power well is
1712		 * required for any pipe to work.
1713		 */
1714		.domains = VLV_DISPLAY_POWER_DOMAINS,
1715		.data = PIPE_A,
1716		.ops = &chv_pipe_power_well_ops,
1717	},
1718	{
1719		.name = "dpio-common-bc",
1720		.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
1721		.data = PUNIT_POWER_WELL_DPIO_CMN_BC,
1722		.ops = &chv_dpio_cmn_power_well_ops,
1723	},
1724	{
1725		.name = "dpio-common-d",
1726		.domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
1727		.data = PUNIT_POWER_WELL_DPIO_CMN_D,
1728		.ops = &chv_dpio_cmn_power_well_ops,
1729	},
1730};
1731
1732bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
1733				    int power_well_id)
1734{
1735	struct i915_power_well *power_well;
1736	bool ret;
1737
1738	power_well = lookup_power_well(dev_priv, power_well_id);
1739	ret = power_well->ops->is_enabled(dev_priv, power_well);
1740
1741	return ret;
1742}
1743
1744static struct i915_power_well skl_power_wells[] = {
1745	{
1746		.name = "always-on",
1747		.always_on = 1,
1748		.domains = SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
1749		.ops = &i9xx_always_on_power_well_ops,
1750	},
1751	{
1752		.name = "power well 1",
1753		.domains = SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS,
1754		.ops = &skl_power_well_ops,
1755		.data = SKL_DISP_PW_1,
1756	},
1757	{
1758		.name = "MISC IO power well",
1759		.domains = SKL_DISPLAY_MISC_IO_POWER_DOMAINS,
1760		.ops = &skl_power_well_ops,
1761		.data = SKL_DISP_PW_MISC_IO,
1762	},
1763	{
1764		.name = "power well 2",
1765		.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
1766		.ops = &skl_power_well_ops,
1767		.data = SKL_DISP_PW_2,
1768	},
1769	{
1770		.name = "DDI A/E power well",
1771		.domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS,
1772		.ops = &skl_power_well_ops,
1773		.data = SKL_DISP_PW_DDI_A_E,
1774	},
1775	{
1776		.name = "DDI B power well",
1777		.domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS,
1778		.ops = &skl_power_well_ops,
1779		.data = SKL_DISP_PW_DDI_B,
1780	},
1781	{
1782		.name = "DDI C power well",
1783		.domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS,
1784		.ops = &skl_power_well_ops,
1785		.data = SKL_DISP_PW_DDI_C,
1786	},
1787	{
1788		.name = "DDI D power well",
1789		.domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS,
1790		.ops = &skl_power_well_ops,
1791		.data = SKL_DISP_PW_DDI_D,
1792	},
1793};
1794
1795static struct i915_power_well bxt_power_wells[] = {
1796	{
1797		.name = "always-on",
1798		.always_on = 1,
1799		.domains = BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
1800		.ops = &i9xx_always_on_power_well_ops,
1801	},
1802	{
1803		.name = "power well 1",
1804		.domains = BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS,
1805		.ops = &skl_power_well_ops,
1806		.data = SKL_DISP_PW_1,
1807	},
1808	{
1809		.name = "power well 2",
1810		.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
1811		.ops = &skl_power_well_ops,
1812		.data = SKL_DISP_PW_2,
1813	}
1814};
1815
1816static int
1817sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
1818				   int disable_power_well)
1819{
1820	if (disable_power_well >= 0)
1821		return !!disable_power_well;
1822
1823	if (IS_SKYLAKE(dev_priv)) {
1824		DRM_DEBUG_KMS("Disabling display power well support\n");
1825		return 0;
1826	}
1827
1828	return 1;
1829}
1830
1831#define set_power_wells(power_domains, __power_wells) ({		\
1832	(power_domains)->power_wells = (__power_wells);			\
1833	(power_domains)->power_well_count = ARRAY_SIZE(__power_wells);	\
1834})
1835
1836/**
1837 * intel_power_domains_init - initializes the power domain structures
1838 * @dev_priv: i915 device instance
1839 *
1840 * Initializes the power domain structures for @dev_priv depending upon the
1841 * supported platform.
1842 */
1843int intel_power_domains_init(struct drm_i915_private *dev_priv)
1844{
1845	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1846
1847	i915.disable_power_well = sanitize_disable_power_well_option(dev_priv,
1848						     i915.disable_power_well);
1849
1850	BUILD_BUG_ON(POWER_DOMAIN_NUM > 31);
1851
1852	mutex_init(&power_domains->lock);
1853
1854	/*
1855	 * The enabling order will be from lower to higher indexed wells,
1856	 * the disabling order is reversed.
1857	 */
1858	if (IS_HASWELL(dev_priv->dev)) {
1859		set_power_wells(power_domains, hsw_power_wells);
1860	} else if (IS_BROADWELL(dev_priv->dev)) {
1861		set_power_wells(power_domains, bdw_power_wells);
1862	} else if (IS_SKYLAKE(dev_priv->dev)) {
1863		set_power_wells(power_domains, skl_power_wells);
1864	} else if (IS_BROXTON(dev_priv->dev)) {
1865		set_power_wells(power_domains, bxt_power_wells);
1866	} else if (IS_CHERRYVIEW(dev_priv->dev)) {
1867		set_power_wells(power_domains, chv_power_wells);
1868	} else if (IS_VALLEYVIEW(dev_priv->dev)) {
1869		set_power_wells(power_domains, vlv_power_wells);
1870	} else {
1871		set_power_wells(power_domains, i9xx_always_on_power_well);
1872	}
1873
1874	return 0;
1875}
1876
1877static void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
1878{
1879	struct drm_device *dev = dev_priv->dev;
1880	struct device *device = &dev->pdev->dev;
1881
1882	if (!HAS_RUNTIME_PM(dev))
1883		return;
1884
1885	if (!intel_enable_rc6(dev))
1886		return;
1887
1888	/* Make sure we're not suspended first. */
1889	pm_runtime_get_sync(device);
1890}
1891
1892/**
1893 * intel_power_domains_fini - finalizes the power domain structures
1894 * @dev_priv: i915 device instance
1895 *
1896 * Finalizes the power domain structures for @dev_priv depending upon the
1897 * supported platform. This function also disables runtime pm and ensures that
1898 * the device stays powered up so that the driver can be reloaded.
1899 */
1900void intel_power_domains_fini(struct drm_i915_private *dev_priv)
1901{
1902	intel_runtime_pm_disable(dev_priv);
1903
1904	/* The i915.ko module is still not prepared to be loaded when
1905	 * the power well is not enabled, so just enable it in case
1906	 * we're going to unload/reload. */
1907	intel_display_set_init_power(dev_priv, true);
1908}
1909
1910static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
1911{
1912	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1913	struct i915_power_well *power_well;
1914	int i;
1915
1916	mutex_lock(&power_domains->lock);
1917	for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
1918		power_well->ops->sync_hw(dev_priv, power_well);
1919		power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
1920								     power_well);
1921	}
1922	mutex_unlock(&power_domains->lock);
1923}
1924
1925static void chv_phy_control_init(struct drm_i915_private *dev_priv)
1926{
1927	struct i915_power_well *cmn_bc =
1928		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
1929	struct i915_power_well *cmn_d =
1930		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
1931
1932	/*
1933	 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
1934	 * workaround never ever read DISPLAY_PHY_CONTROL, and
1935	 * instead maintain a shadow copy ourselves. Use the actual
1936	 * power well state and lane status to reconstruct the
1937	 * expected initial value.
1938	 */
1939	dev_priv->chv_phy_control =
1940		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
1941		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
1942		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
1943		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
1944		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
1945
1946	/*
1947	 * If all lanes are disabled we leave the override disabled
1948	 * with all power down bits cleared to match the state we
1949	 * would use after disabling the port. Otherwise enable the
1950	 * override and set the lane powerdown bits accding to the
1951	 * current lane status.
1952	 */
1953	if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
1954		uint32_t status = I915_READ(DPLL(PIPE_A));
1955		unsigned int mask;
1956
1957		mask = status & DPLL_PORTB_READY_MASK;
1958		if (mask == 0xf)
1959			mask = 0x0;
1960		else
1961			dev_priv->chv_phy_control |=
1962				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
1963
1964		dev_priv->chv_phy_control |=
1965			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
1966
1967		mask = (status & DPLL_PORTC_READY_MASK) >> 4;
1968		if (mask == 0xf)
1969			mask = 0x0;
1970		else
1971			dev_priv->chv_phy_control |=
1972				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
1973
1974		dev_priv->chv_phy_control |=
1975			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
1976
1977		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
1978
1979		dev_priv->chv_phy_assert[DPIO_PHY0] = false;
1980	} else {
1981		dev_priv->chv_phy_assert[DPIO_PHY0] = true;
1982	}
1983
1984	if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
1985		uint32_t status = I915_READ(DPIO_PHY_STATUS);
1986		unsigned int mask;
1987
1988		mask = status & DPLL_PORTD_READY_MASK;
1989
1990		if (mask == 0xf)
1991			mask = 0x0;
1992		else
1993			dev_priv->chv_phy_control |=
1994				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
1995
1996		dev_priv->chv_phy_control |=
1997			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
1998
1999		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
2000
2001		dev_priv->chv_phy_assert[DPIO_PHY1] = false;
2002	} else {
2003		dev_priv->chv_phy_assert[DPIO_PHY1] = true;
2004	}
2005
2006	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
2007
2008	DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
2009		      dev_priv->chv_phy_control);
2010}
2011
2012static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
2013{
2014	struct i915_power_well *cmn =
2015		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
2016	struct i915_power_well *disp2d =
2017		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
2018
2019	/* If the display might be already active skip this */
2020	if (cmn->ops->is_enabled(dev_priv, cmn) &&
2021	    disp2d->ops->is_enabled(dev_priv, disp2d) &&
2022	    I915_READ(DPIO_CTL) & DPIO_CMNRST)
2023		return;
2024
2025	DRM_DEBUG_KMS("toggling display PHY side reset\n");
2026
2027	/* cmnlane needs DPLL registers */
2028	disp2d->ops->enable(dev_priv, disp2d);
2029
2030	/*
2031	 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
2032	 * Need to assert and de-assert PHY SB reset by gating the
2033	 * common lane power, then un-gating it.
2034	 * Simply ungating isn't enough to reset the PHY enough to get
2035	 * ports and lanes running.
2036	 */
2037	cmn->ops->disable(dev_priv, cmn);
2038}
2039
2040/**
2041 * intel_power_domains_init_hw - initialize hardware power domain state
2042 * @dev_priv: i915 device instance
2043 *
2044 * This function initializes the hardware power domain state and enables all
2045 * power domains using intel_display_set_init_power().
2046 */
2047void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
2048{
2049	struct drm_device *dev = dev_priv->dev;
2050	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2051
2052	power_domains->initializing = true;
2053
2054	if (IS_CHERRYVIEW(dev)) {
2055		mutex_lock(&power_domains->lock);
2056		chv_phy_control_init(dev_priv);
2057		mutex_unlock(&power_domains->lock);
2058	} else if (IS_VALLEYVIEW(dev)) {
2059		mutex_lock(&power_domains->lock);
2060		vlv_cmnlane_wa(dev_priv);
2061		mutex_unlock(&power_domains->lock);
2062	}
2063
2064	/* For now, we need the power well to be always enabled. */
2065	intel_display_set_init_power(dev_priv, true);
2066	intel_power_domains_resume(dev_priv);
2067	power_domains->initializing = false;
2068}
2069
2070/**
2071 * intel_runtime_pm_get - grab a runtime pm reference
2072 * @dev_priv: i915 device instance
2073 *
2074 * This function grabs a device-level runtime pm reference (mostly used for GEM
2075 * code to ensure the GTT or GT is on) and ensures that it is powered up.
2076 *
2077 * Any runtime pm reference obtained by this function must have a symmetric
2078 * call to intel_runtime_pm_put() to release the reference again.
2079 */
2080void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
2081{
2082	struct drm_device *dev = dev_priv->dev;
2083	struct device *device = &dev->pdev->dev;
2084
2085	if (!HAS_RUNTIME_PM(dev))
2086		return;
2087
2088	pm_runtime_get_sync(device);
2089	WARN(dev_priv->pm.suspended, "Device still suspended.\n");
2090}
2091
2092/**
2093 * intel_runtime_pm_get_noresume - grab a runtime pm reference
2094 * @dev_priv: i915 device instance
2095 *
2096 * This function grabs a device-level runtime pm reference (mostly used for GEM
2097 * code to ensure the GTT or GT is on).
2098 *
2099 * It will _not_ power up the device but instead only check that it's powered
2100 * on.  Therefore it is only valid to call this functions from contexts where
2101 * the device is known to be powered up and where trying to power it up would
2102 * result in hilarity and deadlocks. That pretty much means only the system
2103 * suspend/resume code where this is used to grab runtime pm references for
2104 * delayed setup down in work items.
2105 *
2106 * Any runtime pm reference obtained by this function must have a symmetric
2107 * call to intel_runtime_pm_put() to release the reference again.
2108 */
2109void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
2110{
2111	struct drm_device *dev = dev_priv->dev;
2112	struct device *device = &dev->pdev->dev;
2113
2114	if (!HAS_RUNTIME_PM(dev))
2115		return;
2116
2117	WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
2118	pm_runtime_get_noresume(device);
2119}
2120
2121/**
2122 * intel_runtime_pm_put - release a runtime pm reference
2123 * @dev_priv: i915 device instance
2124 *
2125 * This function drops the device-level runtime pm reference obtained by
2126 * intel_runtime_pm_get() and might power down the corresponding
2127 * hardware block right away if this is the last reference.
2128 */
2129void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
2130{
2131	struct drm_device *dev = dev_priv->dev;
2132	struct device *device = &dev->pdev->dev;
2133
2134	if (!HAS_RUNTIME_PM(dev))
2135		return;
2136
2137	pm_runtime_mark_last_busy(device);
2138	pm_runtime_put_autosuspend(device);
2139}
2140
2141/**
2142 * intel_runtime_pm_enable - enable runtime pm
2143 * @dev_priv: i915 device instance
2144 *
2145 * This function enables runtime pm at the end of the driver load sequence.
2146 *
2147 * Note that this function does currently not enable runtime pm for the
2148 * subordinate display power domains. That is only done on the first modeset
2149 * using intel_display_set_init_power().
2150 */
2151void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
2152{
2153	struct drm_device *dev = dev_priv->dev;
2154	struct device *device = &dev->pdev->dev;
2155
2156	if (!HAS_RUNTIME_PM(dev))
2157		return;
2158
2159	/*
2160	 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
2161	 * requirement.
2162	 */
2163	if (!intel_enable_rc6(dev)) {
2164		DRM_INFO("RC6 disabled, disabling runtime PM support\n");
2165		return;
2166	}
2167
2168	pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
2169	pm_runtime_mark_last_busy(device);
2170	pm_runtime_use_autosuspend(device);
2171
2172	pm_runtime_put_autosuspend(device);
2173}
2174
2175