1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include "i915_drv.h"
25#include "intel_drv.h"
26#include "i915_vgpu.h"
27
28#include <linux/pm_runtime.h>
29
30#define FORCEWAKE_ACK_TIMEOUT_MS 2
31
32#define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
33#define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
34
35#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
36#define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))
37
38#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
39#define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))
40
41#define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
42#define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
43
44#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
45
46static const char * const forcewake_domain_names[] = {
47	"render",
48	"blitter",
49	"media",
50};
51
52const char *
53intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
54{
55	BUILD_BUG_ON((sizeof(forcewake_domain_names)/sizeof(const char *)) !=
56		     FW_DOMAIN_ID_COUNT);
57
58	if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
59		return forcewake_domain_names[id];
60
61	WARN_ON(id);
62
63	return "unknown";
64}
65
66static void
67assert_device_not_suspended(struct drm_i915_private *dev_priv)
68{
69	WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
70		  "Device suspended\n");
71}
72
73static inline void
74fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
75{
76	WARN_ON(d->reg_set == 0);
77	__raw_i915_write32(d->i915, d->reg_set, d->val_reset);
78}
79
80static inline void
81fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
82{
83	mod_timer_pinned(&d->timer, jiffies + 1);
84}
85
86static inline void
87fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
88{
89	if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
90			     FORCEWAKE_KERNEL) == 0,
91			    FORCEWAKE_ACK_TIMEOUT_MS))
92		DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
93			  intel_uncore_forcewake_domain_to_str(d->id));
94}
95
96static inline void
97fw_domain_get(const struct intel_uncore_forcewake_domain *d)
98{
99	__raw_i915_write32(d->i915, d->reg_set, d->val_set);
100}
101
102static inline void
103fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
104{
105	if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
106			     FORCEWAKE_KERNEL),
107			    FORCEWAKE_ACK_TIMEOUT_MS))
108		DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
109			  intel_uncore_forcewake_domain_to_str(d->id));
110}
111
112static inline void
113fw_domain_put(const struct intel_uncore_forcewake_domain *d)
114{
115	__raw_i915_write32(d->i915, d->reg_set, d->val_clear);
116}
117
118static inline void
119fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
120{
121	/* something from same cacheline, but not from the set register */
122	if (d->reg_post)
123		__raw_posting_read(d->i915, d->reg_post);
124}
125
126static void
127fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
128{
129	struct intel_uncore_forcewake_domain *d;
130	enum forcewake_domain_id id;
131
132	for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
133		fw_domain_wait_ack_clear(d);
134		fw_domain_get(d);
135		fw_domain_wait_ack(d);
136	}
137}
138
139static void
140fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
141{
142	struct intel_uncore_forcewake_domain *d;
143	enum forcewake_domain_id id;
144
145	for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
146		fw_domain_put(d);
147		fw_domain_posting_read(d);
148	}
149}
150
151static void
152fw_domains_posting_read(struct drm_i915_private *dev_priv)
153{
154	struct intel_uncore_forcewake_domain *d;
155	enum forcewake_domain_id id;
156
157	/* No need to do for all, just do for first found */
158	for_each_fw_domain(d, dev_priv, id) {
159		fw_domain_posting_read(d);
160		break;
161	}
162}
163
164static void
165fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
166{
167	struct intel_uncore_forcewake_domain *d;
168	enum forcewake_domain_id id;
169
170	if (dev_priv->uncore.fw_domains == 0)
171		return;
172
173	for_each_fw_domain_mask(d, fw_domains, dev_priv, id)
174		fw_domain_reset(d);
175
176	fw_domains_posting_read(dev_priv);
177}
178
179static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
180{
181	/* w/a for a sporadic read returning 0 by waiting for the GT
182	 * thread to wake up.
183	 */
184	if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
185				GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
186		DRM_ERROR("GT thread status wait timed out\n");
187}
188
189static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
190					      enum forcewake_domains fw_domains)
191{
192	fw_domains_get(dev_priv, fw_domains);
193
194	/* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
195	__gen6_gt_wait_for_thread_c0(dev_priv);
196}
197
198static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
199{
200	u32 gtfifodbg;
201
202	gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
203	if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
204		__raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
205}
206
207static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
208				     enum forcewake_domains fw_domains)
209{
210	fw_domains_put(dev_priv, fw_domains);
211	gen6_gt_check_fifodbg(dev_priv);
212}
213
214static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
215{
216	u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
217
218	return count & GT_FIFO_FREE_ENTRIES_MASK;
219}
220
221static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
222{
223	int ret = 0;
224
225	/* On VLV, FIFO will be shared by both SW and HW.
226	 * So, we need to read the FREE_ENTRIES everytime */
227	if (IS_VALLEYVIEW(dev_priv->dev))
228		dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
229
230	if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
231		int loop = 500;
232		u32 fifo = fifo_free_entries(dev_priv);
233
234		while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
235			udelay(10);
236			fifo = fifo_free_entries(dev_priv);
237		}
238		if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
239			++ret;
240		dev_priv->uncore.fifo_count = fifo;
241	}
242	dev_priv->uncore.fifo_count--;
243
244	return ret;
245}
246
247static void intel_uncore_fw_release_timer(unsigned long arg)
248{
249	struct intel_uncore_forcewake_domain *domain = (void *)arg;
250	unsigned long irqflags;
251
252	assert_device_not_suspended(domain->i915);
253
254	spin_lock_irqsave(&domain->i915->uncore.lock, irqflags);
255	if (WARN_ON(domain->wake_count == 0))
256		domain->wake_count++;
257
258	if (--domain->wake_count == 0)
259		domain->i915->uncore.funcs.force_wake_put(domain->i915,
260							  1 << domain->id);
261
262	spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags);
263}
264
265void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
266{
267	struct drm_i915_private *dev_priv = dev->dev_private;
268	unsigned long irqflags;
269	struct intel_uncore_forcewake_domain *domain;
270	int retry_count = 100;
271	enum forcewake_domain_id id;
272	enum forcewake_domains fw = 0, active_domains;
273
274	/* Hold uncore.lock across reset to prevent any register access
275	 * with forcewake not set correctly. Wait until all pending
276	 * timers are run before holding.
277	 */
278	while (1) {
279		active_domains = 0;
280
281		for_each_fw_domain(domain, dev_priv, id) {
282			if (del_timer_sync(&domain->timer) == 0)
283				continue;
284
285			intel_uncore_fw_release_timer((unsigned long)domain);
286		}
287
288		spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
289
290		for_each_fw_domain(domain, dev_priv, id) {
291			if (timer_pending(&domain->timer))
292				active_domains |= (1 << id);
293		}
294
295		if (active_domains == 0)
296			break;
297
298		if (--retry_count == 0) {
299			DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
300			break;
301		}
302
303		spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
304		cond_resched();
305	}
306
307	WARN_ON(active_domains);
308
309	for_each_fw_domain(domain, dev_priv, id)
310		if (domain->wake_count)
311			fw |= 1 << id;
312
313	if (fw)
314		dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
315
316	fw_domains_reset(dev_priv, FORCEWAKE_ALL);
317
318	if (restore) { /* If reset with a user forcewake, try to restore */
319		if (fw)
320			dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
321
322		if (IS_GEN6(dev) || IS_GEN7(dev))
323			dev_priv->uncore.fifo_count =
324				fifo_free_entries(dev_priv);
325	}
326
327	if (!restore)
328		assert_forcewakes_inactive(dev_priv);
329
330	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
331}
332
333static void intel_uncore_ellc_detect(struct drm_device *dev)
334{
335	struct drm_i915_private *dev_priv = dev->dev_private;
336
337	if ((IS_HASWELL(dev) || IS_BROADWELL(dev) ||
338	     INTEL_INFO(dev)->gen >= 9) &&
339	    (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) & EDRAM_ENABLED)) {
340		/* The docs do not explain exactly how the calculation can be
341		 * made. It is somewhat guessable, but for now, it's always
342		 * 128MB.
343		 * NB: We can't write IDICR yet because we do not have gt funcs
344		 * set up */
345		dev_priv->ellc_size = 128;
346		DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
347	}
348}
349
350static void __intel_uncore_early_sanitize(struct drm_device *dev,
351					  bool restore_forcewake)
352{
353	struct drm_i915_private *dev_priv = dev->dev_private;
354
355	if (HAS_FPGA_DBG_UNCLAIMED(dev))
356		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
357
358	/* clear out old GT FIFO errors */
359	if (IS_GEN6(dev) || IS_GEN7(dev))
360		__raw_i915_write32(dev_priv, GTFIFODBG,
361				   __raw_i915_read32(dev_priv, GTFIFODBG));
362
363	/* WaDisableShadowRegForCpd:chv */
364	if (IS_CHERRYVIEW(dev)) {
365		__raw_i915_write32(dev_priv, GTFIFOCTL,
366				   __raw_i915_read32(dev_priv, GTFIFOCTL) |
367				   GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
368				   GT_FIFO_CTL_RC6_POLICY_STALL);
369	}
370
371	intel_uncore_forcewake_reset(dev, restore_forcewake);
372}
373
374void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
375{
376	__intel_uncore_early_sanitize(dev, restore_forcewake);
377	i915_check_and_clear_faults(dev);
378}
379
380void intel_uncore_sanitize(struct drm_device *dev)
381{
382	/* BIOS often leaves RC6 enabled, but disable it for hw init */
383	intel_disable_gt_powersave(dev);
384}
385
386/**
387 * intel_uncore_forcewake_get - grab forcewake domain references
388 * @dev_priv: i915 device instance
389 * @fw_domains: forcewake domains to get reference on
390 *
391 * This function can be used get GT's forcewake domain references.
392 * Normal register access will handle the forcewake domains automatically.
393 * However if some sequence requires the GT to not power down a particular
394 * forcewake domains this function should be called at the beginning of the
395 * sequence. And subsequently the reference should be dropped by symmetric
396 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
397 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
398 */
399void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
400				enum forcewake_domains fw_domains)
401{
402	unsigned long irqflags;
403	struct intel_uncore_forcewake_domain *domain;
404	enum forcewake_domain_id id;
405
406	if (!dev_priv->uncore.funcs.force_wake_get)
407		return;
408
409	WARN_ON(dev_priv->pm.suspended);
410
411	fw_domains &= dev_priv->uncore.fw_domains;
412
413	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
414
415	for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
416		if (domain->wake_count++)
417			fw_domains &= ~(1 << id);
418	}
419
420	if (fw_domains)
421		dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
422
423	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
424}
425
426/**
427 * intel_uncore_forcewake_put - release a forcewake domain reference
428 * @dev_priv: i915 device instance
429 * @fw_domains: forcewake domains to put references
430 *
431 * This function drops the device-level forcewakes for specified
432 * domains obtained by intel_uncore_forcewake_get().
433 */
434void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
435				enum forcewake_domains fw_domains)
436{
437	unsigned long irqflags;
438	struct intel_uncore_forcewake_domain *domain;
439	enum forcewake_domain_id id;
440
441	if (!dev_priv->uncore.funcs.force_wake_put)
442		return;
443
444	fw_domains &= dev_priv->uncore.fw_domains;
445
446	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
447
448	for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
449		if (WARN_ON(domain->wake_count == 0))
450			continue;
451
452		if (--domain->wake_count)
453			continue;
454
455		domain->wake_count++;
456		fw_domain_arm_timer(domain);
457	}
458
459	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
460}
461
462void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
463{
464	struct intel_uncore_forcewake_domain *domain;
465	enum forcewake_domain_id id;
466
467	if (!dev_priv->uncore.funcs.force_wake_get)
468		return;
469
470	for_each_fw_domain(domain, dev_priv, id)
471		WARN_ON(domain->wake_count);
472}
473
474/* We give fast paths for the really cool registers */
475#define NEEDS_FORCE_WAKE(dev_priv, reg) \
476	 ((reg) < 0x40000 && (reg) != FORCEWAKE)
477
478#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
479
480#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
481	(REG_RANGE((reg), 0x2000, 0x4000) || \
482	 REG_RANGE((reg), 0x5000, 0x8000) || \
483	 REG_RANGE((reg), 0xB000, 0x12000) || \
484	 REG_RANGE((reg), 0x2E000, 0x30000))
485
486#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
487	(REG_RANGE((reg), 0x12000, 0x14000) || \
488	 REG_RANGE((reg), 0x22000, 0x24000) || \
489	 REG_RANGE((reg), 0x30000, 0x40000))
490
491#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
492	(REG_RANGE((reg), 0x2000, 0x4000) || \
493	 REG_RANGE((reg), 0x5200, 0x8000) || \
494	 REG_RANGE((reg), 0x8300, 0x8500) || \
495	 REG_RANGE((reg), 0xB000, 0xB480) || \
496	 REG_RANGE((reg), 0xE000, 0xE800))
497
498#define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
499	(REG_RANGE((reg), 0x8800, 0x8900) || \
500	 REG_RANGE((reg), 0xD000, 0xD800) || \
501	 REG_RANGE((reg), 0x12000, 0x14000) || \
502	 REG_RANGE((reg), 0x1A000, 0x1C000) || \
503	 REG_RANGE((reg), 0x1E800, 0x1EA00) || \
504	 REG_RANGE((reg), 0x30000, 0x38000))
505
506#define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
507	(REG_RANGE((reg), 0x4000, 0x5000) || \
508	 REG_RANGE((reg), 0x8000, 0x8300) || \
509	 REG_RANGE((reg), 0x8500, 0x8600) || \
510	 REG_RANGE((reg), 0x9000, 0xB000) || \
511	 REG_RANGE((reg), 0xF000, 0x10000))
512
513#define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
514	REG_RANGE((reg), 0xB00,  0x2000)
515
516#define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \
517	(REG_RANGE((reg), 0x2000, 0x2700) || \
518	 REG_RANGE((reg), 0x3000, 0x4000) || \
519	 REG_RANGE((reg), 0x5200, 0x8000) || \
520	 REG_RANGE((reg), 0x8140, 0x8160) || \
521	 REG_RANGE((reg), 0x8300, 0x8500) || \
522	 REG_RANGE((reg), 0x8C00, 0x8D00) || \
523	 REG_RANGE((reg), 0xB000, 0xB480) || \
524	 REG_RANGE((reg), 0xE000, 0xE900) || \
525	 REG_RANGE((reg), 0x24400, 0x24800))
526
527#define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \
528	(REG_RANGE((reg), 0x8130, 0x8140) || \
529	 REG_RANGE((reg), 0x8800, 0x8A00) || \
530	 REG_RANGE((reg), 0xD000, 0xD800) || \
531	 REG_RANGE((reg), 0x12000, 0x14000) || \
532	 REG_RANGE((reg), 0x1A000, 0x1EA00) || \
533	 REG_RANGE((reg), 0x30000, 0x40000))
534
535#define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \
536	REG_RANGE((reg), 0x9400, 0x9800)
537
538#define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
539	((reg) < 0x40000 &&\
540	 !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
541	 !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
542	 !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
543	 !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
544
545static void
546ilk_dummy_write(struct drm_i915_private *dev_priv)
547{
548	/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
549	 * the chip from rc6 before touching it for real. MI_MODE is masked,
550	 * hence harmless to write 0 into. */
551	__raw_i915_write32(dev_priv, MI_MODE, 0);
552}
553
554static void
555hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read,
556			bool before)
557{
558	const char *op = read ? "reading" : "writing to";
559	const char *when = before ? "before" : "after";
560
561	if (!i915.mmio_debug)
562		return;
563
564	if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
565		WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
566		     when, op, reg);
567		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
568		i915.mmio_debug--; /* Only report the first N failures */
569	}
570}
571
572static void
573hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
574{
575	static bool mmio_debug_once = true;
576
577	if (i915.mmio_debug || !mmio_debug_once)
578		return;
579
580	if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
581		DRM_DEBUG("Unclaimed register detected, "
582			  "enabling oneshot unclaimed register reporting. "
583			  "Please use i915.mmio_debug=N for more information.\n");
584		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
585		i915.mmio_debug = mmio_debug_once--;
586	}
587}
588
589#define GEN2_READ_HEADER(x) \
590	u##x val = 0; \
591	assert_device_not_suspended(dev_priv);
592
593#define GEN2_READ_FOOTER \
594	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
595	return val
596
597#define __gen2_read(x) \
598static u##x \
599gen2_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
600	GEN2_READ_HEADER(x); \
601	val = __raw_i915_read##x(dev_priv, reg); \
602	GEN2_READ_FOOTER; \
603}
604
605#define __gen5_read(x) \
606static u##x \
607gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
608	GEN2_READ_HEADER(x); \
609	ilk_dummy_write(dev_priv); \
610	val = __raw_i915_read##x(dev_priv, reg); \
611	GEN2_READ_FOOTER; \
612}
613
614__gen5_read(8)
615__gen5_read(16)
616__gen5_read(32)
617__gen5_read(64)
618__gen2_read(8)
619__gen2_read(16)
620__gen2_read(32)
621__gen2_read(64)
622
623#undef __gen5_read
624#undef __gen2_read
625
626#undef GEN2_READ_FOOTER
627#undef GEN2_READ_HEADER
628
629#define GEN6_READ_HEADER(x) \
630	unsigned long irqflags; \
631	u##x val = 0; \
632	assert_device_not_suspended(dev_priv); \
633	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
634
635#define GEN6_READ_FOOTER \
636	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
637	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
638	return val
639
640static inline void __force_wake_get(struct drm_i915_private *dev_priv,
641				    enum forcewake_domains fw_domains)
642{
643	struct intel_uncore_forcewake_domain *domain;
644	enum forcewake_domain_id id;
645
646	if (WARN_ON(!fw_domains))
647		return;
648
649	/* Ideally GCC would be constant-fold and eliminate this loop */
650	for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
651		if (domain->wake_count) {
652			fw_domains &= ~(1 << id);
653			continue;
654		}
655
656		domain->wake_count++;
657		fw_domain_arm_timer(domain);
658	}
659
660	if (fw_domains)
661		dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
662}
663
664#define __vgpu_read(x) \
665static u##x \
666vgpu_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
667	GEN6_READ_HEADER(x); \
668	val = __raw_i915_read##x(dev_priv, reg); \
669	GEN6_READ_FOOTER; \
670}
671
672#define __gen6_read(x) \
673static u##x \
674gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
675	GEN6_READ_HEADER(x); \
676	hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
677	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) \
678		__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
679	val = __raw_i915_read##x(dev_priv, reg); \
680	hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
681	GEN6_READ_FOOTER; \
682}
683
684#define __vlv_read(x) \
685static u##x \
686vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
687	GEN6_READ_HEADER(x); \
688	if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) \
689		__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
690	else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) \
691		__force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
692	val = __raw_i915_read##x(dev_priv, reg); \
693	GEN6_READ_FOOTER; \
694}
695
696#define __chv_read(x) \
697static u##x \
698chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
699	GEN6_READ_HEADER(x); \
700	if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \
701		__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
702	else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \
703		__force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
704	else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \
705		__force_wake_get(dev_priv, \
706				 FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \
707	val = __raw_i915_read##x(dev_priv, reg); \
708	GEN6_READ_FOOTER; \
709}
710
711#define SKL_NEEDS_FORCE_WAKE(dev_priv, reg)	\
712	 ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
713
714#define __gen9_read(x) \
715static u##x \
716gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
717	enum forcewake_domains fw_engine; \
718	GEN6_READ_HEADER(x); \
719	if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)))	\
720		fw_engine = 0; \
721	else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg))	\
722		fw_engine = FORCEWAKE_RENDER; \
723	else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
724		fw_engine = FORCEWAKE_MEDIA; \
725	else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \
726		fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
727	else \
728		fw_engine = FORCEWAKE_BLITTER; \
729	if (fw_engine) \
730		__force_wake_get(dev_priv, fw_engine); \
731	val = __raw_i915_read##x(dev_priv, reg); \
732	GEN6_READ_FOOTER; \
733}
734
735__vgpu_read(8)
736__vgpu_read(16)
737__vgpu_read(32)
738__vgpu_read(64)
739__gen9_read(8)
740__gen9_read(16)
741__gen9_read(32)
742__gen9_read(64)
743__chv_read(8)
744__chv_read(16)
745__chv_read(32)
746__chv_read(64)
747__vlv_read(8)
748__vlv_read(16)
749__vlv_read(32)
750__vlv_read(64)
751__gen6_read(8)
752__gen6_read(16)
753__gen6_read(32)
754__gen6_read(64)
755
756#undef __gen9_read
757#undef __chv_read
758#undef __vlv_read
759#undef __gen6_read
760#undef __vgpu_read
761#undef GEN6_READ_FOOTER
762#undef GEN6_READ_HEADER
763
764#define GEN2_WRITE_HEADER \
765	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
766	assert_device_not_suspended(dev_priv); \
767
768#define GEN2_WRITE_FOOTER
769
770#define __gen2_write(x) \
771static void \
772gen2_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
773	GEN2_WRITE_HEADER; \
774	__raw_i915_write##x(dev_priv, reg, val); \
775	GEN2_WRITE_FOOTER; \
776}
777
778#define __gen5_write(x) \
779static void \
780gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
781	GEN2_WRITE_HEADER; \
782	ilk_dummy_write(dev_priv); \
783	__raw_i915_write##x(dev_priv, reg, val); \
784	GEN2_WRITE_FOOTER; \
785}
786
787__gen5_write(8)
788__gen5_write(16)
789__gen5_write(32)
790__gen5_write(64)
791__gen2_write(8)
792__gen2_write(16)
793__gen2_write(32)
794__gen2_write(64)
795
796#undef __gen5_write
797#undef __gen2_write
798
799#undef GEN2_WRITE_FOOTER
800#undef GEN2_WRITE_HEADER
801
802#define GEN6_WRITE_HEADER \
803	unsigned long irqflags; \
804	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
805	assert_device_not_suspended(dev_priv); \
806	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
807
808#define GEN6_WRITE_FOOTER \
809	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
810
811#define __gen6_write(x) \
812static void \
813gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
814	u32 __fifo_ret = 0; \
815	GEN6_WRITE_HEADER; \
816	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
817		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
818	} \
819	__raw_i915_write##x(dev_priv, reg, val); \
820	if (unlikely(__fifo_ret)) { \
821		gen6_gt_check_fifodbg(dev_priv); \
822	} \
823	GEN6_WRITE_FOOTER; \
824}
825
826#define __hsw_write(x) \
827static void \
828hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
829	u32 __fifo_ret = 0; \
830	GEN6_WRITE_HEADER; \
831	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
832		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
833	} \
834	hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
835	__raw_i915_write##x(dev_priv, reg, val); \
836	if (unlikely(__fifo_ret)) { \
837		gen6_gt_check_fifodbg(dev_priv); \
838	} \
839	hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
840	hsw_unclaimed_reg_detect(dev_priv); \
841	GEN6_WRITE_FOOTER; \
842}
843
844#define __vgpu_write(x) \
845static void vgpu_write##x(struct drm_i915_private *dev_priv, \
846			  off_t reg, u##x val, bool trace) { \
847	GEN6_WRITE_HEADER; \
848	__raw_i915_write##x(dev_priv, reg, val); \
849	GEN6_WRITE_FOOTER; \
850}
851
852static const u32 gen8_shadowed_regs[] = {
853	FORCEWAKE_MT,
854	GEN6_RPNSWREQ,
855	GEN6_RC_VIDEO_FREQ,
856	RING_TAIL(RENDER_RING_BASE),
857	RING_TAIL(GEN6_BSD_RING_BASE),
858	RING_TAIL(VEBOX_RING_BASE),
859	RING_TAIL(BLT_RING_BASE),
860	/* TODO: Other registers are not yet used */
861};
862
863static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
864{
865	int i;
866	for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
867		if (reg == gen8_shadowed_regs[i])
868			return true;
869
870	return false;
871}
872
873#define __gen8_write(x) \
874static void \
875gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
876	GEN6_WRITE_HEADER; \
877	hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
878	if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) \
879		__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
880	__raw_i915_write##x(dev_priv, reg, val); \
881	hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
882	hsw_unclaimed_reg_detect(dev_priv); \
883	GEN6_WRITE_FOOTER; \
884}
885
886#define __chv_write(x) \
887static void \
888chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
889	bool shadowed = is_gen8_shadowed(dev_priv, reg); \
890	GEN6_WRITE_HEADER; \
891	if (!shadowed) { \
892		if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \
893			__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
894		else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \
895			__force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
896		else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \
897			__force_wake_get(dev_priv, FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \
898	} \
899	__raw_i915_write##x(dev_priv, reg, val); \
900	GEN6_WRITE_FOOTER; \
901}
902
903static const u32 gen9_shadowed_regs[] = {
904	RING_TAIL(RENDER_RING_BASE),
905	RING_TAIL(GEN6_BSD_RING_BASE),
906	RING_TAIL(VEBOX_RING_BASE),
907	RING_TAIL(BLT_RING_BASE),
908	FORCEWAKE_BLITTER_GEN9,
909	FORCEWAKE_RENDER_GEN9,
910	FORCEWAKE_MEDIA_GEN9,
911	GEN6_RPNSWREQ,
912	GEN6_RC_VIDEO_FREQ,
913	/* TODO: Other registers are not yet used */
914};
915
916static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg)
917{
918	int i;
919	for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
920		if (reg == gen9_shadowed_regs[i])
921			return true;
922
923	return false;
924}
925
926#define __gen9_write(x) \
927static void \
928gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \
929		bool trace) { \
930	enum forcewake_domains fw_engine; \
931	GEN6_WRITE_HEADER; \
932	if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)) ||	\
933	    is_gen9_shadowed(dev_priv, reg)) \
934		fw_engine = 0; \
935	else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
936		fw_engine = FORCEWAKE_RENDER; \
937	else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
938		fw_engine = FORCEWAKE_MEDIA; \
939	else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \
940		fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
941	else \
942		fw_engine = FORCEWAKE_BLITTER; \
943	if (fw_engine) \
944		__force_wake_get(dev_priv, fw_engine); \
945	__raw_i915_write##x(dev_priv, reg, val); \
946	GEN6_WRITE_FOOTER; \
947}
948
949__gen9_write(8)
950__gen9_write(16)
951__gen9_write(32)
952__gen9_write(64)
953__chv_write(8)
954__chv_write(16)
955__chv_write(32)
956__chv_write(64)
957__gen8_write(8)
958__gen8_write(16)
959__gen8_write(32)
960__gen8_write(64)
961__hsw_write(8)
962__hsw_write(16)
963__hsw_write(32)
964__hsw_write(64)
965__gen6_write(8)
966__gen6_write(16)
967__gen6_write(32)
968__gen6_write(64)
969__vgpu_write(8)
970__vgpu_write(16)
971__vgpu_write(32)
972__vgpu_write(64)
973
974#undef __gen9_write
975#undef __chv_write
976#undef __gen8_write
977#undef __hsw_write
978#undef __gen6_write
979#undef __vgpu_write
980#undef GEN6_WRITE_FOOTER
981#undef GEN6_WRITE_HEADER
982
983#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
984do { \
985	dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
986	dev_priv->uncore.funcs.mmio_writew = x##_write16; \
987	dev_priv->uncore.funcs.mmio_writel = x##_write32; \
988	dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
989} while (0)
990
991#define ASSIGN_READ_MMIO_VFUNCS(x) \
992do { \
993	dev_priv->uncore.funcs.mmio_readb = x##_read8; \
994	dev_priv->uncore.funcs.mmio_readw = x##_read16; \
995	dev_priv->uncore.funcs.mmio_readl = x##_read32; \
996	dev_priv->uncore.funcs.mmio_readq = x##_read64; \
997} while (0)
998
999
1000static void fw_domain_init(struct drm_i915_private *dev_priv,
1001			   enum forcewake_domain_id domain_id,
1002			   u32 reg_set, u32 reg_ack)
1003{
1004	struct intel_uncore_forcewake_domain *d;
1005
1006	if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
1007		return;
1008
1009	d = &dev_priv->uncore.fw_domain[domain_id];
1010
1011	WARN_ON(d->wake_count);
1012
1013	d->wake_count = 0;
1014	d->reg_set = reg_set;
1015	d->reg_ack = reg_ack;
1016
1017	if (IS_GEN6(dev_priv)) {
1018		d->val_reset = 0;
1019		d->val_set = FORCEWAKE_KERNEL;
1020		d->val_clear = 0;
1021	} else {
1022		/* WaRsClearFWBitsAtReset:bdw,skl */
1023		d->val_reset = _MASKED_BIT_DISABLE(0xffff);
1024		d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
1025		d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
1026	}
1027
1028	if (IS_VALLEYVIEW(dev_priv))
1029		d->reg_post = FORCEWAKE_ACK_VLV;
1030	else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
1031		d->reg_post = ECOBUS;
1032	else
1033		d->reg_post = 0;
1034
1035	d->i915 = dev_priv;
1036	d->id = domain_id;
1037
1038	setup_timer(&d->timer, intel_uncore_fw_release_timer, (unsigned long)d);
1039
1040	dev_priv->uncore.fw_domains |= (1 << domain_id);
1041
1042	fw_domain_reset(d);
1043}
1044
1045static void intel_uncore_fw_domains_init(struct drm_device *dev)
1046{
1047	struct drm_i915_private *dev_priv = dev->dev_private;
1048
1049	if (INTEL_INFO(dev_priv->dev)->gen <= 5)
1050		return;
1051
1052	if (IS_GEN9(dev)) {
1053		dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1054		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1055		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1056			       FORCEWAKE_RENDER_GEN9,
1057			       FORCEWAKE_ACK_RENDER_GEN9);
1058		fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
1059			       FORCEWAKE_BLITTER_GEN9,
1060			       FORCEWAKE_ACK_BLITTER_GEN9);
1061		fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1062			       FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
1063	} else if (IS_VALLEYVIEW(dev)) {
1064		dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1065		if (!IS_CHERRYVIEW(dev))
1066			dev_priv->uncore.funcs.force_wake_put =
1067				fw_domains_put_with_fifo;
1068		else
1069			dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1070		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1071			       FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1072		fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1073			       FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
1074	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1075		dev_priv->uncore.funcs.force_wake_get =
1076			fw_domains_get_with_thread_status;
1077		if (IS_HASWELL(dev))
1078			dev_priv->uncore.funcs.force_wake_put =
1079				fw_domains_put_with_fifo;
1080		else
1081			dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1082		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1083			       FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1084	} else if (IS_IVYBRIDGE(dev)) {
1085		u32 ecobus;
1086
1087		/* IVB configs may use multi-threaded forcewake */
1088
1089		/* A small trick here - if the bios hasn't configured
1090		 * MT forcewake, and if the device is in RC6, then
1091		 * force_wake_mt_get will not wake the device and the
1092		 * ECOBUS read will return zero. Which will be
1093		 * (correctly) interpreted by the test below as MT
1094		 * forcewake being disabled.
1095		 */
1096		dev_priv->uncore.funcs.force_wake_get =
1097			fw_domains_get_with_thread_status;
1098		dev_priv->uncore.funcs.force_wake_put =
1099			fw_domains_put_with_fifo;
1100
1101		/* We need to init first for ECOBUS access and then
1102		 * determine later if we want to reinit, in case of MT access is
1103		 * not working. In this stage we don't know which flavour this
1104		 * ivb is, so it is better to reset also the gen6 fw registers
1105		 * before the ecobus check.
1106		 */
1107
1108		__raw_i915_write32(dev_priv, FORCEWAKE, 0);
1109		__raw_posting_read(dev_priv, ECOBUS);
1110
1111		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1112			       FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1113
1114		mutex_lock(&dev->struct_mutex);
1115		fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
1116		ecobus = __raw_i915_read32(dev_priv, ECOBUS);
1117		fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
1118		mutex_unlock(&dev->struct_mutex);
1119
1120		if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
1121			DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1122			DRM_INFO("when using vblank-synced partial screen updates.\n");
1123			fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1124				       FORCEWAKE, FORCEWAKE_ACK);
1125		}
1126	} else if (IS_GEN6(dev)) {
1127		dev_priv->uncore.funcs.force_wake_get =
1128			fw_domains_get_with_thread_status;
1129		dev_priv->uncore.funcs.force_wake_put =
1130			fw_domains_put_with_fifo;
1131		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1132			       FORCEWAKE, FORCEWAKE_ACK);
1133	}
1134
1135	/* All future platforms are expected to require complex power gating */
1136	WARN_ON(dev_priv->uncore.fw_domains == 0);
1137}
1138
1139void intel_uncore_init(struct drm_device *dev)
1140{
1141	struct drm_i915_private *dev_priv = dev->dev_private;
1142
1143	i915_check_vgpu(dev);
1144
1145	intel_uncore_ellc_detect(dev);
1146	intel_uncore_fw_domains_init(dev);
1147	__intel_uncore_early_sanitize(dev, false);
1148
1149	switch (INTEL_INFO(dev)->gen) {
1150	default:
1151		MISSING_CASE(INTEL_INFO(dev)->gen);
1152		return;
1153	case 9:
1154		ASSIGN_WRITE_MMIO_VFUNCS(gen9);
1155		ASSIGN_READ_MMIO_VFUNCS(gen9);
1156		break;
1157	case 8:
1158		if (IS_CHERRYVIEW(dev)) {
1159			ASSIGN_WRITE_MMIO_VFUNCS(chv);
1160			ASSIGN_READ_MMIO_VFUNCS(chv);
1161
1162		} else {
1163			ASSIGN_WRITE_MMIO_VFUNCS(gen8);
1164			ASSIGN_READ_MMIO_VFUNCS(gen6);
1165		}
1166		break;
1167	case 7:
1168	case 6:
1169		if (IS_HASWELL(dev)) {
1170			ASSIGN_WRITE_MMIO_VFUNCS(hsw);
1171		} else {
1172			ASSIGN_WRITE_MMIO_VFUNCS(gen6);
1173		}
1174
1175		if (IS_VALLEYVIEW(dev)) {
1176			ASSIGN_READ_MMIO_VFUNCS(vlv);
1177		} else {
1178			ASSIGN_READ_MMIO_VFUNCS(gen6);
1179		}
1180		break;
1181	case 5:
1182		ASSIGN_WRITE_MMIO_VFUNCS(gen5);
1183		ASSIGN_READ_MMIO_VFUNCS(gen5);
1184		break;
1185	case 4:
1186	case 3:
1187	case 2:
1188		ASSIGN_WRITE_MMIO_VFUNCS(gen2);
1189		ASSIGN_READ_MMIO_VFUNCS(gen2);
1190		break;
1191	}
1192
1193	if (intel_vgpu_active(dev)) {
1194		ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
1195		ASSIGN_READ_MMIO_VFUNCS(vgpu);
1196	}
1197
1198	i915_check_and_clear_faults(dev);
1199}
1200#undef ASSIGN_WRITE_MMIO_VFUNCS
1201#undef ASSIGN_READ_MMIO_VFUNCS
1202
1203void intel_uncore_fini(struct drm_device *dev)
1204{
1205	/* Paranoia: make sure we have disabled everything before we exit. */
1206	intel_uncore_sanitize(dev);
1207	intel_uncore_forcewake_reset(dev, false);
1208}
1209
1210#define GEN_RANGE(l, h) GENMASK(h, l)
1211
1212static const struct register_whitelist {
1213	uint64_t offset;
1214	uint32_t size;
1215	/* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1216	uint32_t gen_bitmask;
1217} whitelist[] = {
1218	{ RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) },
1219};
1220
1221int i915_reg_read_ioctl(struct drm_device *dev,
1222			void *data, struct drm_file *file)
1223{
1224	struct drm_i915_private *dev_priv = dev->dev_private;
1225	struct drm_i915_reg_read *reg = data;
1226	struct register_whitelist const *entry = whitelist;
1227	unsigned size;
1228	u64 offset;
1229	int i, ret = 0;
1230
1231	for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1232		if (entry->offset == (reg->offset & -entry->size) &&
1233		    (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
1234			break;
1235	}
1236
1237	if (i == ARRAY_SIZE(whitelist))
1238		return -EINVAL;
1239
1240	/* We use the low bits to encode extra flags as the register should
1241	 * be naturally aligned (and those that are not so aligned merely
1242	 * limit the available flags for that register).
1243	 */
1244	offset = entry->offset;
1245	size = entry->size;
1246	size |= reg->offset ^ offset;
1247
1248	intel_runtime_pm_get(dev_priv);
1249
1250	switch (size) {
1251	case 8 | 1:
1252		reg->val = I915_READ64_2x32(offset, offset+4);
1253		break;
1254	case 8:
1255		reg->val = I915_READ64(offset);
1256		break;
1257	case 4:
1258		reg->val = I915_READ(offset);
1259		break;
1260	case 2:
1261		reg->val = I915_READ16(offset);
1262		break;
1263	case 1:
1264		reg->val = I915_READ8(offset);
1265		break;
1266	default:
1267		ret = -EINVAL;
1268		goto out;
1269	}
1270
1271out:
1272	intel_runtime_pm_put(dev_priv);
1273	return ret;
1274}
1275
1276int i915_get_reset_stats_ioctl(struct drm_device *dev,
1277			       void *data, struct drm_file *file)
1278{
1279	struct drm_i915_private *dev_priv = dev->dev_private;
1280	struct drm_i915_reset_stats *args = data;
1281	struct i915_ctx_hang_stats *hs;
1282	struct intel_context *ctx;
1283	int ret;
1284
1285	if (args->flags || args->pad)
1286		return -EINVAL;
1287
1288	if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
1289		return -EPERM;
1290
1291	ret = mutex_lock_interruptible(&dev->struct_mutex);
1292	if (ret)
1293		return ret;
1294
1295	ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
1296	if (IS_ERR(ctx)) {
1297		mutex_unlock(&dev->struct_mutex);
1298		return PTR_ERR(ctx);
1299	}
1300	hs = &ctx->hang_stats;
1301
1302	if (capable(CAP_SYS_ADMIN))
1303		args->reset_count = i915_reset_count(&dev_priv->gpu_error);
1304	else
1305		args->reset_count = 0;
1306
1307	args->batch_active = hs->batch_active;
1308	args->batch_pending = hs->batch_pending;
1309
1310	mutex_unlock(&dev->struct_mutex);
1311
1312	return 0;
1313}
1314
1315static int i915_reset_complete(struct drm_device *dev)
1316{
1317	u8 gdrst;
1318	pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
1319	return (gdrst & GRDOM_RESET_STATUS) == 0;
1320}
1321
1322static int i915_do_reset(struct drm_device *dev)
1323{
1324	/* assert reset for at least 20 usec */
1325	pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1326	udelay(20);
1327	pci_write_config_byte(dev->pdev, I915_GDRST, 0);
1328
1329	return wait_for(i915_reset_complete(dev), 500);
1330}
1331
1332static int g4x_reset_complete(struct drm_device *dev)
1333{
1334	u8 gdrst;
1335	pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
1336	return (gdrst & GRDOM_RESET_ENABLE) == 0;
1337}
1338
1339static int g33_do_reset(struct drm_device *dev)
1340{
1341	pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1342	return wait_for(g4x_reset_complete(dev), 500);
1343}
1344
1345static int g4x_do_reset(struct drm_device *dev)
1346{
1347	struct drm_i915_private *dev_priv = dev->dev_private;
1348	int ret;
1349
1350	pci_write_config_byte(dev->pdev, I915_GDRST,
1351			      GRDOM_RENDER | GRDOM_RESET_ENABLE);
1352	ret =  wait_for(g4x_reset_complete(dev), 500);
1353	if (ret)
1354		return ret;
1355
1356	/* WaVcpClkGateDisableForMediaReset:ctg,elk */
1357	I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
1358	POSTING_READ(VDECCLK_GATE_D);
1359
1360	pci_write_config_byte(dev->pdev, I915_GDRST,
1361			      GRDOM_MEDIA | GRDOM_RESET_ENABLE);
1362	ret =  wait_for(g4x_reset_complete(dev), 500);
1363	if (ret)
1364		return ret;
1365
1366	/* WaVcpClkGateDisableForMediaReset:ctg,elk */
1367	I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
1368	POSTING_READ(VDECCLK_GATE_D);
1369
1370	pci_write_config_byte(dev->pdev, I915_GDRST, 0);
1371
1372	return 0;
1373}
1374
1375static int ironlake_do_reset(struct drm_device *dev)
1376{
1377	struct drm_i915_private *dev_priv = dev->dev_private;
1378	int ret;
1379
1380	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
1381		   ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
1382	ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
1383			ILK_GRDOM_RESET_ENABLE) == 0, 500);
1384	if (ret)
1385		return ret;
1386
1387	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
1388		   ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
1389	ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
1390			ILK_GRDOM_RESET_ENABLE) == 0, 500);
1391	if (ret)
1392		return ret;
1393
1394	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 0);
1395
1396	return 0;
1397}
1398
1399static int gen6_do_reset(struct drm_device *dev)
1400{
1401	struct drm_i915_private *dev_priv = dev->dev_private;
1402	int	ret;
1403
1404	/* Reset the chip */
1405
1406	/* GEN6_GDRST is not in the gt power well, no need to check
1407	 * for fifo space for the write or forcewake the chip for
1408	 * the read
1409	 */
1410	__raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
1411
1412	/* Spin waiting for the device to ack the reset request */
1413	ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
1414
1415	intel_uncore_forcewake_reset(dev, true);
1416
1417	return ret;
1418}
1419
1420int intel_gpu_reset(struct drm_device *dev)
1421{
1422	if (INTEL_INFO(dev)->gen >= 6)
1423		return gen6_do_reset(dev);
1424	else if (IS_GEN5(dev))
1425		return ironlake_do_reset(dev);
1426	else if (IS_G4X(dev))
1427		return g4x_do_reset(dev);
1428	else if (IS_G33(dev))
1429		return g33_do_reset(dev);
1430	else if (INTEL_INFO(dev)->gen >= 3)
1431		return i915_do_reset(dev);
1432	else
1433		return -ENODEV;
1434}
1435
1436void intel_uncore_check_errors(struct drm_device *dev)
1437{
1438	struct drm_i915_private *dev_priv = dev->dev_private;
1439
1440	if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
1441	    (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1442		DRM_ERROR("Unclaimed register before interrupt\n");
1443		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1444	}
1445}
1446