1/*
2 * Copyright �� 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include "i915_drv.h"
25#include "intel_drv.h"
26#include "i915_vgpu.h"
27
28#include <linux/pm_runtime.h>
29
30#define FORCEWAKE_ACK_TIMEOUT_MS 50
31
32#define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
33#define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
34
35#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
36#define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))
37
38#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
39#define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))
40
41#define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
42#define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
43
44#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
45
46static const char * const forcewake_domain_names[] = {
47	"render",
48	"blitter",
49	"media",
50};
51
52const char *
53intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
54{
55	BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
56
57	if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
58		return forcewake_domain_names[id];
59
60	WARN_ON(id);
61
62	return "unknown";
63}
64
65static void
66assert_device_not_suspended(struct drm_i915_private *dev_priv)
67{
68	WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
69		  "Device suspended\n");
70}
71
72static inline void
73fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
74{
75	WARN_ON(d->reg_set == 0);
76	__raw_i915_write32(d->i915, d->reg_set, d->val_reset);
77}
78
79static inline void
80fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
81{
82	mod_timer_pinned(&d->timer, jiffies + 1);
83}
84
85static inline void
86fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
87{
88	if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
89			     FORCEWAKE_KERNEL) == 0,
90			    FORCEWAKE_ACK_TIMEOUT_MS))
91		DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
92			  intel_uncore_forcewake_domain_to_str(d->id));
93}
94
95static inline void
96fw_domain_get(const struct intel_uncore_forcewake_domain *d)
97{
98	__raw_i915_write32(d->i915, d->reg_set, d->val_set);
99}
100
101static inline void
102fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
103{
104	if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
105			     FORCEWAKE_KERNEL),
106			    FORCEWAKE_ACK_TIMEOUT_MS))
107		DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
108			  intel_uncore_forcewake_domain_to_str(d->id));
109}
110
111static inline void
112fw_domain_put(const struct intel_uncore_forcewake_domain *d)
113{
114	__raw_i915_write32(d->i915, d->reg_set, d->val_clear);
115}
116
117static inline void
118fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
119{
120	/* something from same cacheline, but not from the set register */
121	if (d->reg_post)
122		__raw_posting_read(d->i915, d->reg_post);
123}
124
125static void
126fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
127{
128	struct intel_uncore_forcewake_domain *d;
129	enum forcewake_domain_id id;
130
131	for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
132		fw_domain_wait_ack_clear(d);
133		fw_domain_get(d);
134		fw_domain_wait_ack(d);
135	}
136}
137
138static void
139fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
140{
141	struct intel_uncore_forcewake_domain *d;
142	enum forcewake_domain_id id;
143
144	for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
145		fw_domain_put(d);
146		fw_domain_posting_read(d);
147	}
148}
149
150static void
151fw_domains_posting_read(struct drm_i915_private *dev_priv)
152{
153	struct intel_uncore_forcewake_domain *d;
154	enum forcewake_domain_id id;
155
156	/* No need to do for all, just do for first found */
157	for_each_fw_domain(d, dev_priv, id) {
158		fw_domain_posting_read(d);
159		break;
160	}
161}
162
163static void
164fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
165{
166	struct intel_uncore_forcewake_domain *d;
167	enum forcewake_domain_id id;
168
169	if (dev_priv->uncore.fw_domains == 0)
170		return;
171
172	for_each_fw_domain_mask(d, fw_domains, dev_priv, id)
173		fw_domain_reset(d);
174
175	fw_domains_posting_read(dev_priv);
176}
177
178static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
179{
180	/* w/a for a sporadic read returning 0 by waiting for the GT
181	 * thread to wake up.
182	 */
183	if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
184				GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
185		DRM_ERROR("GT thread status wait timed out\n");
186}
187
188static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
189					      enum forcewake_domains fw_domains)
190{
191	fw_domains_get(dev_priv, fw_domains);
192
193	/* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
194	__gen6_gt_wait_for_thread_c0(dev_priv);
195}
196
197static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
198{
199	u32 gtfifodbg;
200
201	gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
202	if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
203		__raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
204}
205
206static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
207				     enum forcewake_domains fw_domains)
208{
209	fw_domains_put(dev_priv, fw_domains);
210	gen6_gt_check_fifodbg(dev_priv);
211}
212
213static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
214{
215	u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
216
217	return count & GT_FIFO_FREE_ENTRIES_MASK;
218}
219
220static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
221{
222	int ret = 0;
223
224	/* On VLV, FIFO will be shared by both SW and HW.
225	 * So, we need to read the FREE_ENTRIES everytime */
226	if (IS_VALLEYVIEW(dev_priv->dev))
227		dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
228
229	if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
230		int loop = 500;
231		u32 fifo = fifo_free_entries(dev_priv);
232
233		while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
234			udelay(10);
235			fifo = fifo_free_entries(dev_priv);
236		}
237		if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
238			++ret;
239		dev_priv->uncore.fifo_count = fifo;
240	}
241	dev_priv->uncore.fifo_count--;
242
243	return ret;
244}
245
246static void intel_uncore_fw_release_timer(unsigned long arg)
247{
248	struct intel_uncore_forcewake_domain *domain = (void *)arg;
249	unsigned long irqflags;
250
251	assert_device_not_suspended(domain->i915);
252
253	spin_lock_irqsave(&domain->i915->uncore.lock, irqflags);
254	if (WARN_ON(domain->wake_count == 0))
255		domain->wake_count++;
256
257	if (--domain->wake_count == 0)
258		domain->i915->uncore.funcs.force_wake_put(domain->i915,
259							  1 << domain->id);
260
261	spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags);
262}
263
264void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
265{
266	struct drm_i915_private *dev_priv = dev->dev_private;
267	unsigned long irqflags;
268	struct intel_uncore_forcewake_domain *domain;
269	int retry_count = 100;
270	enum forcewake_domain_id id;
271	enum forcewake_domains fw = 0, active_domains;
272
273	/* Hold uncore.lock across reset to prevent any register access
274	 * with forcewake not set correctly. Wait until all pending
275	 * timers are run before holding.
276	 */
277	while (1) {
278		active_domains = 0;
279
280		for_each_fw_domain(domain, dev_priv, id) {
281			if (del_timer_sync(&domain->timer) == 0)
282				continue;
283
284			intel_uncore_fw_release_timer((unsigned long)domain);
285		}
286
287		spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
288
289		for_each_fw_domain(domain, dev_priv, id) {
290			if (timer_pending(&domain->timer))
291				active_domains |= (1 << id);
292		}
293
294		if (active_domains == 0)
295			break;
296
297		if (--retry_count == 0) {
298			DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
299			break;
300		}
301
302		spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
303		cond_resched();
304	}
305
306	WARN_ON(active_domains);
307
308	for_each_fw_domain(domain, dev_priv, id)
309		if (domain->wake_count)
310			fw |= 1 << id;
311
312	if (fw)
313		dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
314
315	fw_domains_reset(dev_priv, FORCEWAKE_ALL);
316
317	if (restore) { /* If reset with a user forcewake, try to restore */
318		if (fw)
319			dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
320
321		if (IS_GEN6(dev) || IS_GEN7(dev))
322			dev_priv->uncore.fifo_count =
323				fifo_free_entries(dev_priv);
324	}
325
326	if (!restore)
327		assert_forcewakes_inactive(dev_priv);
328
329	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
330}
331
332static void intel_uncore_ellc_detect(struct drm_device *dev)
333{
334	struct drm_i915_private *dev_priv = dev->dev_private;
335
336	if ((IS_HASWELL(dev) || IS_BROADWELL(dev) ||
337	     INTEL_INFO(dev)->gen >= 9) &&
338	    (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) & EDRAM_ENABLED)) {
339		/* The docs do not explain exactly how the calculation can be
340		 * made. It is somewhat guessable, but for now, it's always
341		 * 128MB.
342		 * NB: We can't write IDICR yet because we do not have gt funcs
343		 * set up */
344		dev_priv->ellc_size = 128;
345		DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
346	}
347}
348
349static void __intel_uncore_early_sanitize(struct drm_device *dev,
350					  bool restore_forcewake)
351{
352	struct drm_i915_private *dev_priv = dev->dev_private;
353
354	if (HAS_FPGA_DBG_UNCLAIMED(dev))
355		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
356
357	/* clear out old GT FIFO errors */
358	if (IS_GEN6(dev) || IS_GEN7(dev))
359		__raw_i915_write32(dev_priv, GTFIFODBG,
360				   __raw_i915_read32(dev_priv, GTFIFODBG));
361
362	/* WaDisableShadowRegForCpd:chv */
363	if (IS_CHERRYVIEW(dev)) {
364		__raw_i915_write32(dev_priv, GTFIFOCTL,
365				   __raw_i915_read32(dev_priv, GTFIFOCTL) |
366				   GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
367				   GT_FIFO_CTL_RC6_POLICY_STALL);
368	}
369
370	intel_uncore_forcewake_reset(dev, restore_forcewake);
371}
372
373void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
374{
375	__intel_uncore_early_sanitize(dev, restore_forcewake);
376	i915_check_and_clear_faults(dev);
377}
378
379void intel_uncore_sanitize(struct drm_device *dev)
380{
381	/* BIOS often leaves RC6 enabled, but disable it for hw init */
382	intel_disable_gt_powersave(dev);
383}
384
385static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
386					 enum forcewake_domains fw_domains)
387{
388	struct intel_uncore_forcewake_domain *domain;
389	enum forcewake_domain_id id;
390
391	if (!dev_priv->uncore.funcs.force_wake_get)
392		return;
393
394	fw_domains &= dev_priv->uncore.fw_domains;
395
396	for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
397		if (domain->wake_count++)
398			fw_domains &= ~(1 << id);
399	}
400
401	if (fw_domains)
402		dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
403}
404
405/**
406 * intel_uncore_forcewake_get - grab forcewake domain references
407 * @dev_priv: i915 device instance
408 * @fw_domains: forcewake domains to get reference on
409 *
410 * This function can be used get GT's forcewake domain references.
411 * Normal register access will handle the forcewake domains automatically.
412 * However if some sequence requires the GT to not power down a particular
413 * forcewake domains this function should be called at the beginning of the
414 * sequence. And subsequently the reference should be dropped by symmetric
415 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
416 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
417 */
418void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
419				enum forcewake_domains fw_domains)
420{
421	unsigned long irqflags;
422
423	if (!dev_priv->uncore.funcs.force_wake_get)
424		return;
425
426	WARN_ON(dev_priv->pm.suspended);
427
428	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
429	__intel_uncore_forcewake_get(dev_priv, fw_domains);
430	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
431}
432
433/**
434 * intel_uncore_forcewake_get__locked - grab forcewake domain references
435 * @dev_priv: i915 device instance
436 * @fw_domains: forcewake domains to get reference on
437 *
438 * See intel_uncore_forcewake_get(). This variant places the onus
439 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
440 */
441void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
442					enum forcewake_domains fw_domains)
443{
444	assert_spin_locked(&dev_priv->uncore.lock);
445
446	if (!dev_priv->uncore.funcs.force_wake_get)
447		return;
448
449	__intel_uncore_forcewake_get(dev_priv, fw_domains);
450}
451
452static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
453					 enum forcewake_domains fw_domains)
454{
455	struct intel_uncore_forcewake_domain *domain;
456	enum forcewake_domain_id id;
457
458	if (!dev_priv->uncore.funcs.force_wake_put)
459		return;
460
461	fw_domains &= dev_priv->uncore.fw_domains;
462
463	for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
464		if (WARN_ON(domain->wake_count == 0))
465			continue;
466
467		if (--domain->wake_count)
468			continue;
469
470		domain->wake_count++;
471		fw_domain_arm_timer(domain);
472	}
473}
474
475/**
476 * intel_uncore_forcewake_put - release a forcewake domain reference
477 * @dev_priv: i915 device instance
478 * @fw_domains: forcewake domains to put references
479 *
480 * This function drops the device-level forcewakes for specified
481 * domains obtained by intel_uncore_forcewake_get().
482 */
483void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
484				enum forcewake_domains fw_domains)
485{
486	unsigned long irqflags;
487
488	if (!dev_priv->uncore.funcs.force_wake_put)
489		return;
490
491	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
492	__intel_uncore_forcewake_put(dev_priv, fw_domains);
493	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
494}
495
496/**
497 * intel_uncore_forcewake_put__locked - grab forcewake domain references
498 * @dev_priv: i915 device instance
499 * @fw_domains: forcewake domains to get reference on
500 *
501 * See intel_uncore_forcewake_put(). This variant places the onus
502 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
503 */
504void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
505					enum forcewake_domains fw_domains)
506{
507	assert_spin_locked(&dev_priv->uncore.lock);
508
509	if (!dev_priv->uncore.funcs.force_wake_put)
510		return;
511
512	__intel_uncore_forcewake_put(dev_priv, fw_domains);
513}
514
515void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
516{
517	struct intel_uncore_forcewake_domain *domain;
518	enum forcewake_domain_id id;
519
520	if (!dev_priv->uncore.funcs.force_wake_get)
521		return;
522
523	for_each_fw_domain(domain, dev_priv, id)
524		WARN_ON(domain->wake_count);
525}
526
527/* We give fast paths for the really cool registers */
528#define NEEDS_FORCE_WAKE(reg) \
529	 ((reg) < 0x40000 && (reg) != FORCEWAKE)
530
531#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
532
533#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
534	(REG_RANGE((reg), 0x2000, 0x4000) || \
535	 REG_RANGE((reg), 0x5000, 0x8000) || \
536	 REG_RANGE((reg), 0xB000, 0x12000) || \
537	 REG_RANGE((reg), 0x2E000, 0x30000))
538
539#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
540	(REG_RANGE((reg), 0x12000, 0x14000) || \
541	 REG_RANGE((reg), 0x22000, 0x24000) || \
542	 REG_RANGE((reg), 0x30000, 0x40000))
543
544#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
545	(REG_RANGE((reg), 0x2000, 0x4000) || \
546	 REG_RANGE((reg), 0x5200, 0x8000) || \
547	 REG_RANGE((reg), 0x8300, 0x8500) || \
548	 REG_RANGE((reg), 0xB000, 0xB480) || \
549	 REG_RANGE((reg), 0xE000, 0xE800))
550
551#define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
552	(REG_RANGE((reg), 0x8800, 0x8900) || \
553	 REG_RANGE((reg), 0xD000, 0xD800) || \
554	 REG_RANGE((reg), 0x12000, 0x14000) || \
555	 REG_RANGE((reg), 0x1A000, 0x1C000) || \
556	 REG_RANGE((reg), 0x1E800, 0x1EA00) || \
557	 REG_RANGE((reg), 0x30000, 0x38000))
558
559#define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
560	(REG_RANGE((reg), 0x4000, 0x5000) || \
561	 REG_RANGE((reg), 0x8000, 0x8300) || \
562	 REG_RANGE((reg), 0x8500, 0x8600) || \
563	 REG_RANGE((reg), 0x9000, 0xB000) || \
564	 REG_RANGE((reg), 0xF000, 0x10000))
565
566#define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
567	REG_RANGE((reg), 0xB00,  0x2000)
568
569#define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \
570	(REG_RANGE((reg), 0x2000, 0x2700) || \
571	 REG_RANGE((reg), 0x3000, 0x4000) || \
572	 REG_RANGE((reg), 0x5200, 0x8000) || \
573	 REG_RANGE((reg), 0x8140, 0x8160) || \
574	 REG_RANGE((reg), 0x8300, 0x8500) || \
575	 REG_RANGE((reg), 0x8C00, 0x8D00) || \
576	 REG_RANGE((reg), 0xB000, 0xB480) || \
577	 REG_RANGE((reg), 0xE000, 0xE900) || \
578	 REG_RANGE((reg), 0x24400, 0x24800))
579
580#define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \
581	(REG_RANGE((reg), 0x8130, 0x8140) || \
582	 REG_RANGE((reg), 0x8800, 0x8A00) || \
583	 REG_RANGE((reg), 0xD000, 0xD800) || \
584	 REG_RANGE((reg), 0x12000, 0x14000) || \
585	 REG_RANGE((reg), 0x1A000, 0x1EA00) || \
586	 REG_RANGE((reg), 0x30000, 0x40000))
587
588#define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \
589	REG_RANGE((reg), 0x9400, 0x9800)
590
591#define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
592	((reg) < 0x40000 &&\
593	 !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
594	 !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
595	 !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
596	 !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
597
598static void
599ilk_dummy_write(struct drm_i915_private *dev_priv)
600{
601	/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
602	 * the chip from rc6 before touching it for real. MI_MODE is masked,
603	 * hence harmless to write 0 into. */
604	__raw_i915_write32(dev_priv, MI_MODE, 0);
605}
606
607static void
608hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read,
609			bool before)
610{
611	const char *op = read ? "reading" : "writing to";
612	const char *when = before ? "before" : "after";
613
614	if (!i915.mmio_debug)
615		return;
616
617	if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
618		WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
619		     when, op, reg);
620		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
621		i915.mmio_debug--; /* Only report the first N failures */
622	}
623}
624
625static void
626hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
627{
628	static bool mmio_debug_once = true;
629
630	if (i915.mmio_debug || !mmio_debug_once)
631		return;
632
633	if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
634		DRM_DEBUG("Unclaimed register detected, "
635			  "enabling oneshot unclaimed register reporting. "
636			  "Please use i915.mmio_debug=N for more information.\n");
637		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
638		i915.mmio_debug = mmio_debug_once--;
639	}
640}
641
642#define GEN2_READ_HEADER(x) \
643	u##x val = 0; \
644	assert_device_not_suspended(dev_priv);
645
646#define GEN2_READ_FOOTER \
647	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
648	return val
649
650#define __gen2_read(x) \
651static u##x \
652gen2_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
653	GEN2_READ_HEADER(x); \
654	val = __raw_i915_read##x(dev_priv, reg); \
655	GEN2_READ_FOOTER; \
656}
657
658#define __gen5_read(x) \
659static u##x \
660gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
661	GEN2_READ_HEADER(x); \
662	ilk_dummy_write(dev_priv); \
663	val = __raw_i915_read##x(dev_priv, reg); \
664	GEN2_READ_FOOTER; \
665}
666
667__gen5_read(8)
668__gen5_read(16)
669__gen5_read(32)
670__gen5_read(64)
671__gen2_read(8)
672__gen2_read(16)
673__gen2_read(32)
674__gen2_read(64)
675
676#undef __gen5_read
677#undef __gen2_read
678
679#undef GEN2_READ_FOOTER
680#undef GEN2_READ_HEADER
681
682#define GEN6_READ_HEADER(x) \
683	unsigned long irqflags; \
684	u##x val = 0; \
685	assert_device_not_suspended(dev_priv); \
686	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
687
688#define GEN6_READ_FOOTER \
689	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
690	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
691	return val
692
693static inline void __force_wake_get(struct drm_i915_private *dev_priv,
694				    enum forcewake_domains fw_domains)
695{
696	struct intel_uncore_forcewake_domain *domain;
697	enum forcewake_domain_id id;
698
699	if (WARN_ON(!fw_domains))
700		return;
701
702	/* Ideally GCC would be constant-fold and eliminate this loop */
703	for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
704		if (domain->wake_count) {
705			fw_domains &= ~(1 << id);
706			continue;
707		}
708
709		domain->wake_count++;
710		fw_domain_arm_timer(domain);
711	}
712
713	if (fw_domains)
714		dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
715}
716
717#define __vgpu_read(x) \
718static u##x \
719vgpu_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
720	GEN6_READ_HEADER(x); \
721	val = __raw_i915_read##x(dev_priv, reg); \
722	GEN6_READ_FOOTER; \
723}
724
725#define __gen6_read(x) \
726static u##x \
727gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
728	GEN6_READ_HEADER(x); \
729	hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
730	if (NEEDS_FORCE_WAKE(reg)) \
731		__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
732	val = __raw_i915_read##x(dev_priv, reg); \
733	hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
734	GEN6_READ_FOOTER; \
735}
736
737#define __vlv_read(x) \
738static u##x \
739vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
740	GEN6_READ_HEADER(x); \
741	if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) \
742		__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
743	else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) \
744		__force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
745	val = __raw_i915_read##x(dev_priv, reg); \
746	GEN6_READ_FOOTER; \
747}
748
749#define __chv_read(x) \
750static u##x \
751chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
752	GEN6_READ_HEADER(x); \
753	if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \
754		__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
755	else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \
756		__force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
757	else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \
758		__force_wake_get(dev_priv, \
759				 FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \
760	val = __raw_i915_read##x(dev_priv, reg); \
761	GEN6_READ_FOOTER; \
762}
763
764#define SKL_NEEDS_FORCE_WAKE(reg) \
765	 ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
766
767#define __gen9_read(x) \
768static u##x \
769gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
770	enum forcewake_domains fw_engine; \
771	GEN6_READ_HEADER(x); \
772	hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
773	if (!SKL_NEEDS_FORCE_WAKE(reg)) \
774		fw_engine = 0; \
775	else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
776		fw_engine = FORCEWAKE_RENDER; \
777	else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
778		fw_engine = FORCEWAKE_MEDIA; \
779	else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \
780		fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
781	else \
782		fw_engine = FORCEWAKE_BLITTER; \
783	if (fw_engine) \
784		__force_wake_get(dev_priv, fw_engine); \
785	val = __raw_i915_read##x(dev_priv, reg); \
786	hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
787	GEN6_READ_FOOTER; \
788}
789
790__vgpu_read(8)
791__vgpu_read(16)
792__vgpu_read(32)
793__vgpu_read(64)
794__gen9_read(8)
795__gen9_read(16)
796__gen9_read(32)
797__gen9_read(64)
798__chv_read(8)
799__chv_read(16)
800__chv_read(32)
801__chv_read(64)
802__vlv_read(8)
803__vlv_read(16)
804__vlv_read(32)
805__vlv_read(64)
806__gen6_read(8)
807__gen6_read(16)
808__gen6_read(32)
809__gen6_read(64)
810
811#undef __gen9_read
812#undef __chv_read
813#undef __vlv_read
814#undef __gen6_read
815#undef __vgpu_read
816#undef GEN6_READ_FOOTER
817#undef GEN6_READ_HEADER
818
819#define GEN2_WRITE_HEADER \
820	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
821	assert_device_not_suspended(dev_priv); \
822
823#define GEN2_WRITE_FOOTER
824
825#define __gen2_write(x) \
826static void \
827gen2_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
828	GEN2_WRITE_HEADER; \
829	__raw_i915_write##x(dev_priv, reg, val); \
830	GEN2_WRITE_FOOTER; \
831}
832
833#define __gen5_write(x) \
834static void \
835gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
836	GEN2_WRITE_HEADER; \
837	ilk_dummy_write(dev_priv); \
838	__raw_i915_write##x(dev_priv, reg, val); \
839	GEN2_WRITE_FOOTER; \
840}
841
842__gen5_write(8)
843__gen5_write(16)
844__gen5_write(32)
845__gen5_write(64)
846__gen2_write(8)
847__gen2_write(16)
848__gen2_write(32)
849__gen2_write(64)
850
851#undef __gen5_write
852#undef __gen2_write
853
854#undef GEN2_WRITE_FOOTER
855#undef GEN2_WRITE_HEADER
856
857#define GEN6_WRITE_HEADER \
858	unsigned long irqflags; \
859	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
860	assert_device_not_suspended(dev_priv); \
861	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
862
863#define GEN6_WRITE_FOOTER \
864	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
865
866#define __gen6_write(x) \
867static void \
868gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
869	u32 __fifo_ret = 0; \
870	GEN6_WRITE_HEADER; \
871	if (NEEDS_FORCE_WAKE(reg)) { \
872		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
873	} \
874	__raw_i915_write##x(dev_priv, reg, val); \
875	if (unlikely(__fifo_ret)) { \
876		gen6_gt_check_fifodbg(dev_priv); \
877	} \
878	GEN6_WRITE_FOOTER; \
879}
880
881#define __hsw_write(x) \
882static void \
883hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
884	u32 __fifo_ret = 0; \
885	GEN6_WRITE_HEADER; \
886	if (NEEDS_FORCE_WAKE(reg)) { \
887		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
888	} \
889	hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
890	__raw_i915_write##x(dev_priv, reg, val); \
891	if (unlikely(__fifo_ret)) { \
892		gen6_gt_check_fifodbg(dev_priv); \
893	} \
894	hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
895	hsw_unclaimed_reg_detect(dev_priv); \
896	GEN6_WRITE_FOOTER; \
897}
898
899#define __vgpu_write(x) \
900static void vgpu_write##x(struct drm_i915_private *dev_priv, \
901			  off_t reg, u##x val, bool trace) { \
902	GEN6_WRITE_HEADER; \
903	__raw_i915_write##x(dev_priv, reg, val); \
904	GEN6_WRITE_FOOTER; \
905}
906
907static const u32 gen8_shadowed_regs[] = {
908	FORCEWAKE_MT,
909	GEN6_RPNSWREQ,
910	GEN6_RC_VIDEO_FREQ,
911	RING_TAIL(RENDER_RING_BASE),
912	RING_TAIL(GEN6_BSD_RING_BASE),
913	RING_TAIL(VEBOX_RING_BASE),
914	RING_TAIL(BLT_RING_BASE),
915	/* TODO: Other registers are not yet used */
916};
917
918static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
919{
920	int i;
921	for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
922		if (reg == gen8_shadowed_regs[i])
923			return true;
924
925	return false;
926}
927
928#define __gen8_write(x) \
929static void \
930gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
931	GEN6_WRITE_HEADER; \
932	hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
933	if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) \
934		__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
935	__raw_i915_write##x(dev_priv, reg, val); \
936	hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
937	hsw_unclaimed_reg_detect(dev_priv); \
938	GEN6_WRITE_FOOTER; \
939}
940
941#define __chv_write(x) \
942static void \
943chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
944	bool shadowed = is_gen8_shadowed(dev_priv, reg); \
945	GEN6_WRITE_HEADER; \
946	if (!shadowed) { \
947		if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \
948			__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
949		else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \
950			__force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
951		else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \
952			__force_wake_get(dev_priv, FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \
953	} \
954	__raw_i915_write##x(dev_priv, reg, val); \
955	GEN6_WRITE_FOOTER; \
956}
957
958static const u32 gen9_shadowed_regs[] = {
959	RING_TAIL(RENDER_RING_BASE),
960	RING_TAIL(GEN6_BSD_RING_BASE),
961	RING_TAIL(VEBOX_RING_BASE),
962	RING_TAIL(BLT_RING_BASE),
963	FORCEWAKE_BLITTER_GEN9,
964	FORCEWAKE_RENDER_GEN9,
965	FORCEWAKE_MEDIA_GEN9,
966	GEN6_RPNSWREQ,
967	GEN6_RC_VIDEO_FREQ,
968	/* TODO: Other registers are not yet used */
969};
970
971static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg)
972{
973	int i;
974	for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
975		if (reg == gen9_shadowed_regs[i])
976			return true;
977
978	return false;
979}
980
981#define __gen9_write(x) \
982static void \
983gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \
984		bool trace) { \
985	enum forcewake_domains fw_engine; \
986	GEN6_WRITE_HEADER; \
987	hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
988	if (!SKL_NEEDS_FORCE_WAKE(reg) || \
989	    is_gen9_shadowed(dev_priv, reg)) \
990		fw_engine = 0; \
991	else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
992		fw_engine = FORCEWAKE_RENDER; \
993	else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
994		fw_engine = FORCEWAKE_MEDIA; \
995	else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \
996		fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
997	else \
998		fw_engine = FORCEWAKE_BLITTER; \
999	if (fw_engine) \
1000		__force_wake_get(dev_priv, fw_engine); \
1001	__raw_i915_write##x(dev_priv, reg, val); \
1002	hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
1003	hsw_unclaimed_reg_detect(dev_priv); \
1004	GEN6_WRITE_FOOTER; \
1005}
1006
1007__gen9_write(8)
1008__gen9_write(16)
1009__gen9_write(32)
1010__gen9_write(64)
1011__chv_write(8)
1012__chv_write(16)
1013__chv_write(32)
1014__chv_write(64)
1015__gen8_write(8)
1016__gen8_write(16)
1017__gen8_write(32)
1018__gen8_write(64)
1019__hsw_write(8)
1020__hsw_write(16)
1021__hsw_write(32)
1022__hsw_write(64)
1023__gen6_write(8)
1024__gen6_write(16)
1025__gen6_write(32)
1026__gen6_write(64)
1027__vgpu_write(8)
1028__vgpu_write(16)
1029__vgpu_write(32)
1030__vgpu_write(64)
1031
1032#undef __gen9_write
1033#undef __chv_write
1034#undef __gen8_write
1035#undef __hsw_write
1036#undef __gen6_write
1037#undef __vgpu_write
1038#undef GEN6_WRITE_FOOTER
1039#undef GEN6_WRITE_HEADER
1040
1041#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
1042do { \
1043	dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
1044	dev_priv->uncore.funcs.mmio_writew = x##_write16; \
1045	dev_priv->uncore.funcs.mmio_writel = x##_write32; \
1046	dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
1047} while (0)
1048
1049#define ASSIGN_READ_MMIO_VFUNCS(x) \
1050do { \
1051	dev_priv->uncore.funcs.mmio_readb = x##_read8; \
1052	dev_priv->uncore.funcs.mmio_readw = x##_read16; \
1053	dev_priv->uncore.funcs.mmio_readl = x##_read32; \
1054	dev_priv->uncore.funcs.mmio_readq = x##_read64; \
1055} while (0)
1056
1057
1058static void fw_domain_init(struct drm_i915_private *dev_priv,
1059			   enum forcewake_domain_id domain_id,
1060			   u32 reg_set, u32 reg_ack)
1061{
1062	struct intel_uncore_forcewake_domain *d;
1063
1064	if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
1065		return;
1066
1067	d = &dev_priv->uncore.fw_domain[domain_id];
1068
1069	WARN_ON(d->wake_count);
1070
1071	d->wake_count = 0;
1072	d->reg_set = reg_set;
1073	d->reg_ack = reg_ack;
1074
1075	if (IS_GEN6(dev_priv)) {
1076		d->val_reset = 0;
1077		d->val_set = FORCEWAKE_KERNEL;
1078		d->val_clear = 0;
1079	} else {
1080		/* WaRsClearFWBitsAtReset:bdw,skl */
1081		d->val_reset = _MASKED_BIT_DISABLE(0xffff);
1082		d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
1083		d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
1084	}
1085
1086	if (IS_VALLEYVIEW(dev_priv))
1087		d->reg_post = FORCEWAKE_ACK_VLV;
1088	else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
1089		d->reg_post = ECOBUS;
1090	else
1091		d->reg_post = 0;
1092
1093	d->i915 = dev_priv;
1094	d->id = domain_id;
1095
1096	setup_timer(&d->timer, intel_uncore_fw_release_timer, (unsigned long)d);
1097
1098	dev_priv->uncore.fw_domains |= (1 << domain_id);
1099
1100	fw_domain_reset(d);
1101}
1102
1103static void intel_uncore_fw_domains_init(struct drm_device *dev)
1104{
1105	struct drm_i915_private *dev_priv = dev->dev_private;
1106
1107	if (INTEL_INFO(dev_priv->dev)->gen <= 5)
1108		return;
1109
1110	if (IS_GEN9(dev)) {
1111		dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1112		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1113		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1114			       FORCEWAKE_RENDER_GEN9,
1115			       FORCEWAKE_ACK_RENDER_GEN9);
1116		fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
1117			       FORCEWAKE_BLITTER_GEN9,
1118			       FORCEWAKE_ACK_BLITTER_GEN9);
1119		fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1120			       FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
1121	} else if (IS_VALLEYVIEW(dev)) {
1122		dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1123		if (!IS_CHERRYVIEW(dev))
1124			dev_priv->uncore.funcs.force_wake_put =
1125				fw_domains_put_with_fifo;
1126		else
1127			dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1128		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1129			       FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1130		fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1131			       FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
1132	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1133		dev_priv->uncore.funcs.force_wake_get =
1134			fw_domains_get_with_thread_status;
1135		if (IS_HASWELL(dev))
1136			dev_priv->uncore.funcs.force_wake_put =
1137				fw_domains_put_with_fifo;
1138		else
1139			dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1140		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1141			       FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1142	} else if (IS_IVYBRIDGE(dev)) {
1143		u32 ecobus;
1144
1145		/* IVB configs may use multi-threaded forcewake */
1146
1147		/* A small trick here - if the bios hasn't configured
1148		 * MT forcewake, and if the device is in RC6, then
1149		 * force_wake_mt_get will not wake the device and the
1150		 * ECOBUS read will return zero. Which will be
1151		 * (correctly) interpreted by the test below as MT
1152		 * forcewake being disabled.
1153		 */
1154		dev_priv->uncore.funcs.force_wake_get =
1155			fw_domains_get_with_thread_status;
1156		dev_priv->uncore.funcs.force_wake_put =
1157			fw_domains_put_with_fifo;
1158
1159		/* We need to init first for ECOBUS access and then
1160		 * determine later if we want to reinit, in case of MT access is
1161		 * not working. In this stage we don't know which flavour this
1162		 * ivb is, so it is better to reset also the gen6 fw registers
1163		 * before the ecobus check.
1164		 */
1165
1166		__raw_i915_write32(dev_priv, FORCEWAKE, 0);
1167		__raw_posting_read(dev_priv, ECOBUS);
1168
1169		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1170			       FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1171
1172		mutex_lock(&dev->struct_mutex);
1173		fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
1174		ecobus = __raw_i915_read32(dev_priv, ECOBUS);
1175		fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
1176		mutex_unlock(&dev->struct_mutex);
1177
1178		if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
1179			DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1180			DRM_INFO("when using vblank-synced partial screen updates.\n");
1181			fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1182				       FORCEWAKE, FORCEWAKE_ACK);
1183		}
1184	} else if (IS_GEN6(dev)) {
1185		dev_priv->uncore.funcs.force_wake_get =
1186			fw_domains_get_with_thread_status;
1187		dev_priv->uncore.funcs.force_wake_put =
1188			fw_domains_put_with_fifo;
1189		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1190			       FORCEWAKE, FORCEWAKE_ACK);
1191	}
1192
1193	/* All future platforms are expected to require complex power gating */
1194	WARN_ON(dev_priv->uncore.fw_domains == 0);
1195}
1196
1197void intel_uncore_init(struct drm_device *dev)
1198{
1199	struct drm_i915_private *dev_priv = dev->dev_private;
1200
1201	i915_check_vgpu(dev);
1202
1203	intel_uncore_ellc_detect(dev);
1204	intel_uncore_fw_domains_init(dev);
1205	__intel_uncore_early_sanitize(dev, false);
1206
1207	switch (INTEL_INFO(dev)->gen) {
1208	default:
1209	case 9:
1210		ASSIGN_WRITE_MMIO_VFUNCS(gen9);
1211		ASSIGN_READ_MMIO_VFUNCS(gen9);
1212		break;
1213	case 8:
1214		if (IS_CHERRYVIEW(dev)) {
1215			ASSIGN_WRITE_MMIO_VFUNCS(chv);
1216			ASSIGN_READ_MMIO_VFUNCS(chv);
1217
1218		} else {
1219			ASSIGN_WRITE_MMIO_VFUNCS(gen8);
1220			ASSIGN_READ_MMIO_VFUNCS(gen6);
1221		}
1222		break;
1223	case 7:
1224	case 6:
1225		if (IS_HASWELL(dev)) {
1226			ASSIGN_WRITE_MMIO_VFUNCS(hsw);
1227		} else {
1228			ASSIGN_WRITE_MMIO_VFUNCS(gen6);
1229		}
1230
1231		if (IS_VALLEYVIEW(dev)) {
1232			ASSIGN_READ_MMIO_VFUNCS(vlv);
1233		} else {
1234			ASSIGN_READ_MMIO_VFUNCS(gen6);
1235		}
1236		break;
1237	case 5:
1238		ASSIGN_WRITE_MMIO_VFUNCS(gen5);
1239		ASSIGN_READ_MMIO_VFUNCS(gen5);
1240		break;
1241	case 4:
1242	case 3:
1243	case 2:
1244		ASSIGN_WRITE_MMIO_VFUNCS(gen2);
1245		ASSIGN_READ_MMIO_VFUNCS(gen2);
1246		break;
1247	}
1248
1249	if (intel_vgpu_active(dev)) {
1250		ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
1251		ASSIGN_READ_MMIO_VFUNCS(vgpu);
1252	}
1253
1254	i915_check_and_clear_faults(dev);
1255}
1256#undef ASSIGN_WRITE_MMIO_VFUNCS
1257#undef ASSIGN_READ_MMIO_VFUNCS
1258
1259void intel_uncore_fini(struct drm_device *dev)
1260{
1261	/* Paranoia: make sure we have disabled everything before we exit. */
1262	intel_uncore_sanitize(dev);
1263	intel_uncore_forcewake_reset(dev, false);
1264}
1265
1266#define GEN_RANGE(l, h) GENMASK(h, l)
1267
1268static const struct register_whitelist {
1269	uint64_t offset;
1270	uint32_t size;
1271	/* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1272	uint32_t gen_bitmask;
1273} whitelist[] = {
1274	{ RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) },
1275};
1276
1277int i915_reg_read_ioctl(struct drm_device *dev,
1278			void *data, struct drm_file *file)
1279{
1280	struct drm_i915_private *dev_priv = dev->dev_private;
1281	struct drm_i915_reg_read *reg = data;
1282	struct register_whitelist const *entry = whitelist;
1283	unsigned size;
1284	u64 offset;
1285	int i, ret = 0;
1286
1287	for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1288		if (entry->offset == (reg->offset & -entry->size) &&
1289		    (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
1290			break;
1291	}
1292
1293	if (i == ARRAY_SIZE(whitelist))
1294		return -EINVAL;
1295
1296	/* We use the low bits to encode extra flags as the register should
1297	 * be naturally aligned (and those that are not so aligned merely
1298	 * limit the available flags for that register).
1299	 */
1300	offset = entry->offset;
1301	size = entry->size;
1302	size |= reg->offset ^ offset;
1303
1304	intel_runtime_pm_get(dev_priv);
1305
1306	switch (size) {
1307	case 8 | 1:
1308		reg->val = I915_READ64_2x32(offset, offset+4);
1309		break;
1310	case 8:
1311		reg->val = I915_READ64(offset);
1312		break;
1313	case 4:
1314		reg->val = I915_READ(offset);
1315		break;
1316	case 2:
1317		reg->val = I915_READ16(offset);
1318		break;
1319	case 1:
1320		reg->val = I915_READ8(offset);
1321		break;
1322	default:
1323		ret = -EINVAL;
1324		goto out;
1325	}
1326
1327out:
1328	intel_runtime_pm_put(dev_priv);
1329	return ret;
1330}
1331
1332int i915_get_reset_stats_ioctl(struct drm_device *dev,
1333			       void *data, struct drm_file *file)
1334{
1335	struct drm_i915_private *dev_priv = dev->dev_private;
1336	struct drm_i915_reset_stats *args = data;
1337	struct i915_ctx_hang_stats *hs;
1338	struct intel_context *ctx;
1339	int ret;
1340
1341	if (args->flags || args->pad)
1342		return -EINVAL;
1343
1344	if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
1345		return -EPERM;
1346
1347	ret = mutex_lock_interruptible(&dev->struct_mutex);
1348	if (ret)
1349		return ret;
1350
1351	ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
1352	if (IS_ERR(ctx)) {
1353		mutex_unlock(&dev->struct_mutex);
1354		return PTR_ERR(ctx);
1355	}
1356	hs = &ctx->hang_stats;
1357
1358	if (capable(CAP_SYS_ADMIN))
1359		args->reset_count = i915_reset_count(&dev_priv->gpu_error);
1360	else
1361		args->reset_count = 0;
1362
1363	args->batch_active = hs->batch_active;
1364	args->batch_pending = hs->batch_pending;
1365
1366	mutex_unlock(&dev->struct_mutex);
1367
1368	return 0;
1369}
1370
1371static int i915_reset_complete(struct drm_device *dev)
1372{
1373	u8 gdrst;
1374	pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
1375	return (gdrst & GRDOM_RESET_STATUS) == 0;
1376}
1377
1378static int i915_do_reset(struct drm_device *dev)
1379{
1380	/* assert reset for at least 20 usec */
1381	pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1382	udelay(20);
1383	pci_write_config_byte(dev->pdev, I915_GDRST, 0);
1384
1385	return wait_for(i915_reset_complete(dev), 500);
1386}
1387
1388static int g4x_reset_complete(struct drm_device *dev)
1389{
1390	u8 gdrst;
1391	pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
1392	return (gdrst & GRDOM_RESET_ENABLE) == 0;
1393}
1394
1395static int g33_do_reset(struct drm_device *dev)
1396{
1397	pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1398	return wait_for(g4x_reset_complete(dev), 500);
1399}
1400
1401static int g4x_do_reset(struct drm_device *dev)
1402{
1403	struct drm_i915_private *dev_priv = dev->dev_private;
1404	int ret;
1405
1406	pci_write_config_byte(dev->pdev, I915_GDRST,
1407			      GRDOM_RENDER | GRDOM_RESET_ENABLE);
1408	ret =  wait_for(g4x_reset_complete(dev), 500);
1409	if (ret)
1410		return ret;
1411
1412	/* WaVcpClkGateDisableForMediaReset:ctg,elk */
1413	I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
1414	POSTING_READ(VDECCLK_GATE_D);
1415
1416	pci_write_config_byte(dev->pdev, I915_GDRST,
1417			      GRDOM_MEDIA | GRDOM_RESET_ENABLE);
1418	ret =  wait_for(g4x_reset_complete(dev), 500);
1419	if (ret)
1420		return ret;
1421
1422	/* WaVcpClkGateDisableForMediaReset:ctg,elk */
1423	I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
1424	POSTING_READ(VDECCLK_GATE_D);
1425
1426	pci_write_config_byte(dev->pdev, I915_GDRST, 0);
1427
1428	return 0;
1429}
1430
1431static int ironlake_do_reset(struct drm_device *dev)
1432{
1433	struct drm_i915_private *dev_priv = dev->dev_private;
1434	int ret;
1435
1436	I915_WRITE(ILK_GDSR,
1437		   ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
1438	ret = wait_for((I915_READ(ILK_GDSR) &
1439			ILK_GRDOM_RESET_ENABLE) == 0, 500);
1440	if (ret)
1441		return ret;
1442
1443	I915_WRITE(ILK_GDSR,
1444		   ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
1445	ret = wait_for((I915_READ(ILK_GDSR) &
1446			ILK_GRDOM_RESET_ENABLE) == 0, 500);
1447	if (ret)
1448		return ret;
1449
1450	I915_WRITE(ILK_GDSR, 0);
1451
1452	return 0;
1453}
1454
1455static int gen6_do_reset(struct drm_device *dev)
1456{
1457	struct drm_i915_private *dev_priv = dev->dev_private;
1458	int	ret;
1459
1460	/* Reset the chip */
1461
1462	/* GEN6_GDRST is not in the gt power well, no need to check
1463	 * for fifo space for the write or forcewake the chip for
1464	 * the read
1465	 */
1466	__raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
1467
1468	/* Spin waiting for the device to ack the reset request */
1469	ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
1470
1471	intel_uncore_forcewake_reset(dev, true);
1472
1473	return ret;
1474}
1475
1476static int wait_for_register(struct drm_i915_private *dev_priv,
1477			     const u32 reg,
1478			     const u32 mask,
1479			     const u32 value,
1480			     const unsigned long timeout_ms)
1481{
1482	return wait_for((I915_READ(reg) & mask) == value, timeout_ms);
1483}
1484
1485static int gen8_do_reset(struct drm_device *dev)
1486{
1487	struct drm_i915_private *dev_priv = dev->dev_private;
1488	struct intel_engine_cs *engine;
1489	int i;
1490
1491	for_each_ring(engine, dev_priv, i) {
1492		I915_WRITE(RING_RESET_CTL(engine->mmio_base),
1493			   _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
1494
1495		if (wait_for_register(dev_priv,
1496				      RING_RESET_CTL(engine->mmio_base),
1497				      RESET_CTL_READY_TO_RESET,
1498				      RESET_CTL_READY_TO_RESET,
1499				      700)) {
1500			DRM_ERROR("%s: reset request timeout\n", engine->name);
1501			goto not_ready;
1502		}
1503	}
1504
1505	return gen6_do_reset(dev);
1506
1507not_ready:
1508	for_each_ring(engine, dev_priv, i)
1509		I915_WRITE(RING_RESET_CTL(engine->mmio_base),
1510			   _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
1511
1512	return -EIO;
1513}
1514
1515static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *)
1516{
1517	if (!i915.reset)
1518		return NULL;
1519
1520	if (INTEL_INFO(dev)->gen >= 8)
1521		return gen8_do_reset;
1522	else if (INTEL_INFO(dev)->gen >= 6)
1523		return gen6_do_reset;
1524	else if (IS_GEN5(dev))
1525		return ironlake_do_reset;
1526	else if (IS_G4X(dev))
1527		return g4x_do_reset;
1528	else if (IS_G33(dev))
1529		return g33_do_reset;
1530	else if (INTEL_INFO(dev)->gen >= 3)
1531		return i915_do_reset;
1532	else
1533		return NULL;
1534}
1535
1536int intel_gpu_reset(struct drm_device *dev)
1537{
1538	struct drm_i915_private *dev_priv = to_i915(dev);
1539	int (*reset)(struct drm_device *);
1540	int ret;
1541
1542	reset = intel_get_gpu_reset(dev);
1543	if (reset == NULL)
1544		return -ENODEV;
1545
1546	/* If the power well sleeps during the reset, the reset
1547	 * request may be dropped and never completes (causing -EIO).
1548	 */
1549	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1550	ret = reset(dev);
1551	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1552
1553	return ret;
1554}
1555
1556bool intel_has_gpu_reset(struct drm_device *dev)
1557{
1558	return intel_get_gpu_reset(dev) != NULL;
1559}
1560
1561void intel_uncore_check_errors(struct drm_device *dev)
1562{
1563	struct drm_i915_private *dev_priv = dev->dev_private;
1564
1565	if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
1566	    (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1567		DRM_ERROR("Unclaimed register before interrupt\n");
1568		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1569	}
1570}
1571