This source file includes following definitions.
- fbc_supported
- no_fbc_on_multiple_pipes
- get_crtc_fence_y_offset
- intel_fbc_get_plane_source_size
- intel_fbc_calculate_cfb_size
- i8xx_fbc_deactivate
- i8xx_fbc_activate
- i8xx_fbc_is_active
- g4x_fbc_activate
- g4x_fbc_deactivate
- g4x_fbc_is_active
- intel_fbc_recompress
- ilk_fbc_activate
- ilk_fbc_deactivate
- ilk_fbc_is_active
- gen7_fbc_activate
- intel_fbc_hw_is_active
- intel_fbc_hw_activate
- intel_fbc_hw_deactivate
- intel_fbc_is_active
- intel_fbc_deactivate
- multiple_pipes_ok
- find_compression_threshold
- intel_fbc_alloc_cfb
- __intel_fbc_cleanup_cfb
- intel_fbc_cleanup_cfb
- stride_is_valid
- pixel_format_is_valid
- intel_fbc_hw_tracking_covers_screen
- intel_fbc_update_state_cache
- intel_fbc_can_activate
- intel_fbc_can_enable
- intel_fbc_get_reg_params
- intel_fbc_pre_update
- __intel_fbc_disable
- __intel_fbc_post_update
- intel_fbc_post_update
- intel_fbc_get_frontbuffer_bit
- intel_fbc_invalidate
- intel_fbc_flush
- intel_fbc_choose_crtc
- intel_fbc_enable
- intel_fbc_disable
- intel_fbc_global_disable
- intel_fbc_underrun_work_fn
- intel_fbc_reset_underrun
- intel_fbc_handle_fifo_underrun_irq
- intel_fbc_init_pipe_state
- intel_sanitize_fbc_option
- need_fbc_vtd_wa
- intel_fbc_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41 #include <drm/drm_fourcc.h>
42
43 #include "i915_drv.h"
44 #include "intel_display_types.h"
45 #include "intel_fbc.h"
46 #include "intel_frontbuffer.h"
47
48 static inline bool fbc_supported(struct drm_i915_private *dev_priv)
49 {
50 return HAS_FBC(dev_priv);
51 }
52
53 static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv)
54 {
55 return INTEL_GEN(dev_priv) <= 3;
56 }
57
58
59
60
61
62
63
64
65
66 static unsigned int get_crtc_fence_y_offset(struct intel_fbc *fbc)
67 {
68 return fbc->state_cache.plane.y - fbc->state_cache.plane.adjusted_y;
69 }
70
71
72
73
74
75
76 static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
77 int *width, int *height)
78 {
79 if (width)
80 *width = cache->plane.src_w;
81 if (height)
82 *height = cache->plane.src_h;
83 }
84
85 static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
86 struct intel_fbc_state_cache *cache)
87 {
88 int lines;
89
90 intel_fbc_get_plane_source_size(cache, NULL, &lines);
91 if (IS_GEN(dev_priv, 7))
92 lines = min(lines, 2048);
93 else if (INTEL_GEN(dev_priv) >= 8)
94 lines = min(lines, 2560);
95
96
97 return lines * cache->fb.stride;
98 }
99
100 static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
101 {
102 u32 fbc_ctl;
103
104
105 fbc_ctl = I915_READ(FBC_CONTROL);
106 if ((fbc_ctl & FBC_CTL_EN) == 0)
107 return;
108
109 fbc_ctl &= ~FBC_CTL_EN;
110 I915_WRITE(FBC_CONTROL, fbc_ctl);
111
112
113 if (intel_de_wait_for_clear(dev_priv, FBC_STATUS,
114 FBC_STAT_COMPRESSING, 10)) {
115 DRM_DEBUG_KMS("FBC idle timed out\n");
116 return;
117 }
118 }
119
120 static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
121 {
122 struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
123 int cfb_pitch;
124 int i;
125 u32 fbc_ctl;
126
127
128 cfb_pitch = params->cfb_size / FBC_LL_SIZE;
129 if (params->fb.stride < cfb_pitch)
130 cfb_pitch = params->fb.stride;
131
132
133 if (IS_GEN(dev_priv, 2))
134 cfb_pitch = (cfb_pitch / 32) - 1;
135 else
136 cfb_pitch = (cfb_pitch / 64) - 1;
137
138
139 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
140 I915_WRITE(FBC_TAG(i), 0);
141
142 if (IS_GEN(dev_priv, 4)) {
143 u32 fbc_ctl2;
144
145
146 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
147 fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.i9xx_plane);
148 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
149 I915_WRITE(FBC_FENCE_OFF, params->crtc.fence_y_offset);
150 }
151
152
153 fbc_ctl = I915_READ(FBC_CONTROL);
154 fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
155 fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
156 if (IS_I945GM(dev_priv))
157 fbc_ctl |= FBC_CTL_C3_IDLE;
158 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
159 fbc_ctl |= params->vma->fence->id;
160 I915_WRITE(FBC_CONTROL, fbc_ctl);
161 }
162
163 static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv)
164 {
165 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
166 }
167
168 static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
169 {
170 struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
171 u32 dpfc_ctl;
172
173 dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane) | DPFC_SR_EN;
174 if (params->fb.format->cpp[0] == 2)
175 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
176 else
177 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
178
179 if (params->flags & PLANE_HAS_FENCE) {
180 dpfc_ctl |= DPFC_CTL_FENCE_EN | params->vma->fence->id;
181 I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
182 } else {
183 I915_WRITE(DPFC_FENCE_YOFF, 0);
184 }
185
186
187 I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
188 }
189
190 static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv)
191 {
192 u32 dpfc_ctl;
193
194
195 dpfc_ctl = I915_READ(DPFC_CONTROL);
196 if (dpfc_ctl & DPFC_CTL_EN) {
197 dpfc_ctl &= ~DPFC_CTL_EN;
198 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
199 }
200 }
201
202 static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv)
203 {
204 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
205 }
206
207
208 static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
209 {
210 I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE);
211 POSTING_READ(MSG_FBC_REND_STATE);
212 }
213
214 static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
215 {
216 struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
217 u32 dpfc_ctl;
218 int threshold = dev_priv->fbc.threshold;
219
220 dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane);
221 if (params->fb.format->cpp[0] == 2)
222 threshold++;
223
224 switch (threshold) {
225 case 4:
226 case 3:
227 dpfc_ctl |= DPFC_CTL_LIMIT_4X;
228 break;
229 case 2:
230 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
231 break;
232 case 1:
233 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
234 break;
235 }
236
237 if (params->flags & PLANE_HAS_FENCE) {
238 dpfc_ctl |= DPFC_CTL_FENCE_EN;
239 if (IS_GEN(dev_priv, 5))
240 dpfc_ctl |= params->vma->fence->id;
241 if (IS_GEN(dev_priv, 6)) {
242 I915_WRITE(SNB_DPFC_CTL_SA,
243 SNB_CPU_FENCE_ENABLE |
244 params->vma->fence->id);
245 I915_WRITE(DPFC_CPU_FENCE_OFFSET,
246 params->crtc.fence_y_offset);
247 }
248 } else {
249 if (IS_GEN(dev_priv, 6)) {
250 I915_WRITE(SNB_DPFC_CTL_SA, 0);
251 I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0);
252 }
253 }
254
255 I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
256 I915_WRITE(ILK_FBC_RT_BASE,
257 i915_ggtt_offset(params->vma) | ILK_FBC_RT_VALID);
258
259 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
260
261 intel_fbc_recompress(dev_priv);
262 }
263
264 static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv)
265 {
266 u32 dpfc_ctl;
267
268
269 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
270 if (dpfc_ctl & DPFC_CTL_EN) {
271 dpfc_ctl &= ~DPFC_CTL_EN;
272 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
273 }
274 }
275
276 static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv)
277 {
278 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
279 }
280
281 static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
282 {
283 struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
284 u32 dpfc_ctl;
285 int threshold = dev_priv->fbc.threshold;
286
287
288 if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv)) {
289 u32 val = I915_READ(CHICKEN_MISC_4);
290
291 val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK);
292
293 if (i915_gem_object_get_tiling(params->vma->obj) !=
294 I915_TILING_X)
295 val |= FBC_STRIDE_OVERRIDE | params->gen9_wa_cfb_stride;
296
297 I915_WRITE(CHICKEN_MISC_4, val);
298 }
299
300 dpfc_ctl = 0;
301 if (IS_IVYBRIDGE(dev_priv))
302 dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.i9xx_plane);
303
304 if (params->fb.format->cpp[0] == 2)
305 threshold++;
306
307 switch (threshold) {
308 case 4:
309 case 3:
310 dpfc_ctl |= DPFC_CTL_LIMIT_4X;
311 break;
312 case 2:
313 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
314 break;
315 case 1:
316 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
317 break;
318 }
319
320 if (params->flags & PLANE_HAS_FENCE) {
321 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
322 I915_WRITE(SNB_DPFC_CTL_SA,
323 SNB_CPU_FENCE_ENABLE |
324 params->vma->fence->id);
325 I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
326 } else {
327 I915_WRITE(SNB_DPFC_CTL_SA,0);
328 I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0);
329 }
330
331 if (dev_priv->fbc.false_color)
332 dpfc_ctl |= FBC_CTL_FALSE_COLOR;
333
334 if (IS_IVYBRIDGE(dev_priv)) {
335
336 I915_WRITE(ILK_DISPLAY_CHICKEN1,
337 I915_READ(ILK_DISPLAY_CHICKEN1) |
338 ILK_FBCQ_DIS);
339 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
340
341 I915_WRITE(CHICKEN_PIPESL_1(params->crtc.pipe),
342 I915_READ(CHICKEN_PIPESL_1(params->crtc.pipe)) |
343 HSW_FBCQ_DIS);
344 }
345
346 if (IS_GEN(dev_priv, 11))
347
348 I915_WRITE(ILK_DPFC_CHICKEN, ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
349
350 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
351
352 intel_fbc_recompress(dev_priv);
353 }
354
355 static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv)
356 {
357 if (INTEL_GEN(dev_priv) >= 5)
358 return ilk_fbc_is_active(dev_priv);
359 else if (IS_GM45(dev_priv))
360 return g4x_fbc_is_active(dev_priv);
361 else
362 return i8xx_fbc_is_active(dev_priv);
363 }
364
365 static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv)
366 {
367 struct intel_fbc *fbc = &dev_priv->fbc;
368
369 fbc->active = true;
370
371 if (INTEL_GEN(dev_priv) >= 7)
372 gen7_fbc_activate(dev_priv);
373 else if (INTEL_GEN(dev_priv) >= 5)
374 ilk_fbc_activate(dev_priv);
375 else if (IS_GM45(dev_priv))
376 g4x_fbc_activate(dev_priv);
377 else
378 i8xx_fbc_activate(dev_priv);
379 }
380
381 static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv)
382 {
383 struct intel_fbc *fbc = &dev_priv->fbc;
384
385 fbc->active = false;
386
387 if (INTEL_GEN(dev_priv) >= 5)
388 ilk_fbc_deactivate(dev_priv);
389 else if (IS_GM45(dev_priv))
390 g4x_fbc_deactivate(dev_priv);
391 else
392 i8xx_fbc_deactivate(dev_priv);
393 }
394
395
396
397
398
399
400
401
402
403
404 bool intel_fbc_is_active(struct drm_i915_private *dev_priv)
405 {
406 return dev_priv->fbc.active;
407 }
408
409 static void intel_fbc_deactivate(struct drm_i915_private *dev_priv,
410 const char *reason)
411 {
412 struct intel_fbc *fbc = &dev_priv->fbc;
413
414 WARN_ON(!mutex_is_locked(&fbc->lock));
415
416 if (fbc->active)
417 intel_fbc_hw_deactivate(dev_priv);
418
419 fbc->no_fbc_reason = reason;
420 }
421
422 static bool multiple_pipes_ok(struct intel_crtc *crtc,
423 struct intel_plane_state *plane_state)
424 {
425 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
426 struct intel_fbc *fbc = &dev_priv->fbc;
427 enum pipe pipe = crtc->pipe;
428
429
430 if (!no_fbc_on_multiple_pipes(dev_priv))
431 return true;
432
433 if (plane_state->base.visible)
434 fbc->visible_pipes_mask |= (1 << pipe);
435 else
436 fbc->visible_pipes_mask &= ~(1 << pipe);
437
438 return (fbc->visible_pipes_mask & ~(1 << pipe)) != 0;
439 }
440
441 static int find_compression_threshold(struct drm_i915_private *dev_priv,
442 struct drm_mm_node *node,
443 int size,
444 int fb_cpp)
445 {
446 int compression_threshold = 1;
447 int ret;
448 u64 end;
449
450
451
452
453
454 if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv))
455 end = resource_size(&dev_priv->dsm) - 8 * 1024 * 1024;
456 else
457 end = U64_MAX;
458
459
460
461
462
463
464
465
466
467 ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1,
468 4096, 0, end);
469 if (ret == 0)
470 return compression_threshold;
471
472 again:
473
474 if (compression_threshold > 4 ||
475 (fb_cpp == 2 && compression_threshold == 2))
476 return 0;
477
478 ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1,
479 4096, 0, end);
480 if (ret && INTEL_GEN(dev_priv) <= 4) {
481 return 0;
482 } else if (ret) {
483 compression_threshold <<= 1;
484 goto again;
485 } else {
486 return compression_threshold;
487 }
488 }
489
490 static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
491 {
492 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
493 struct intel_fbc *fbc = &dev_priv->fbc;
494 struct drm_mm_node *uninitialized_var(compressed_llb);
495 int size, fb_cpp, ret;
496
497 WARN_ON(drm_mm_node_allocated(&fbc->compressed_fb));
498
499 size = intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache);
500 fb_cpp = fbc->state_cache.fb.format->cpp[0];
501
502 ret = find_compression_threshold(dev_priv, &fbc->compressed_fb,
503 size, fb_cpp);
504 if (!ret)
505 goto err_llb;
506 else if (ret > 1) {
507 DRM_INFO_ONCE("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
508 }
509
510 fbc->threshold = ret;
511
512 if (INTEL_GEN(dev_priv) >= 5)
513 I915_WRITE(ILK_DPFC_CB_BASE, fbc->compressed_fb.start);
514 else if (IS_GM45(dev_priv)) {
515 I915_WRITE(DPFC_CB_BASE, fbc->compressed_fb.start);
516 } else {
517 compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
518 if (!compressed_llb)
519 goto err_fb;
520
521 ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb,
522 4096, 4096);
523 if (ret)
524 goto err_fb;
525
526 fbc->compressed_llb = compressed_llb;
527
528 GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
529 fbc->compressed_fb.start,
530 U32_MAX));
531 GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
532 fbc->compressed_llb->start,
533 U32_MAX));
534 I915_WRITE(FBC_CFB_BASE,
535 dev_priv->dsm.start + fbc->compressed_fb.start);
536 I915_WRITE(FBC_LL_BASE,
537 dev_priv->dsm.start + compressed_llb->start);
538 }
539
540 DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
541 fbc->compressed_fb.size, fbc->threshold);
542
543 return 0;
544
545 err_fb:
546 kfree(compressed_llb);
547 i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
548 err_llb:
549 if (drm_mm_initialized(&dev_priv->mm.stolen))
550 pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
551 return -ENOSPC;
552 }
553
554 static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
555 {
556 struct intel_fbc *fbc = &dev_priv->fbc;
557
558 if (drm_mm_node_allocated(&fbc->compressed_fb))
559 i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
560
561 if (fbc->compressed_llb) {
562 i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb);
563 kfree(fbc->compressed_llb);
564 }
565 }
566
567 void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
568 {
569 struct intel_fbc *fbc = &dev_priv->fbc;
570
571 if (!fbc_supported(dev_priv))
572 return;
573
574 mutex_lock(&fbc->lock);
575 __intel_fbc_cleanup_cfb(dev_priv);
576 mutex_unlock(&fbc->lock);
577 }
578
579 static bool stride_is_valid(struct drm_i915_private *dev_priv,
580 unsigned int stride)
581 {
582
583 if (WARN_ON_ONCE((stride & (64 - 1)) != 0))
584 return false;
585
586
587 if (stride < 512)
588 return false;
589
590 if (IS_GEN(dev_priv, 2) || IS_GEN(dev_priv, 3))
591 return stride == 4096 || stride == 8192;
592
593 if (IS_GEN(dev_priv, 4) && !IS_G4X(dev_priv) && stride < 2048)
594 return false;
595
596 if (stride > 16384)
597 return false;
598
599 return true;
600 }
601
602 static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
603 u32 pixel_format)
604 {
605 switch (pixel_format) {
606 case DRM_FORMAT_XRGB8888:
607 case DRM_FORMAT_XBGR8888:
608 return true;
609 case DRM_FORMAT_XRGB1555:
610 case DRM_FORMAT_RGB565:
611
612 if (IS_GEN(dev_priv, 2))
613 return false;
614
615 if (IS_G4X(dev_priv))
616 return false;
617 return true;
618 default:
619 return false;
620 }
621 }
622
623
624
625
626
627
628
629 static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
630 {
631 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
632 struct intel_fbc *fbc = &dev_priv->fbc;
633 unsigned int effective_w, effective_h, max_w, max_h;
634
635 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
636 max_w = 5120;
637 max_h = 4096;
638 } else if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) {
639 max_w = 4096;
640 max_h = 4096;
641 } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
642 max_w = 4096;
643 max_h = 2048;
644 } else {
645 max_w = 2048;
646 max_h = 1536;
647 }
648
649 intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w,
650 &effective_h);
651 effective_w += fbc->state_cache.plane.adjusted_x;
652 effective_h += fbc->state_cache.plane.adjusted_y;
653
654 return effective_w <= max_w && effective_h <= max_h;
655 }
656
657 static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
658 struct intel_crtc_state *crtc_state,
659 struct intel_plane_state *plane_state)
660 {
661 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
662 struct intel_fbc *fbc = &dev_priv->fbc;
663 struct intel_fbc_state_cache *cache = &fbc->state_cache;
664 struct drm_framebuffer *fb = plane_state->base.fb;
665
666 cache->vma = NULL;
667 cache->flags = 0;
668
669 cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
670 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
671 cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate;
672
673 cache->plane.rotation = plane_state->base.rotation;
674
675
676
677
678
679 cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16;
680 cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16;
681 cache->plane.visible = plane_state->base.visible;
682 cache->plane.adjusted_x = plane_state->color_plane[0].x;
683 cache->plane.adjusted_y = plane_state->color_plane[0].y;
684 cache->plane.y = plane_state->base.src.y1 >> 16;
685
686 cache->plane.pixel_blend_mode = plane_state->base.pixel_blend_mode;
687
688 if (!cache->plane.visible)
689 return;
690
691 cache->fb.format = fb->format;
692 cache->fb.stride = fb->pitches[0];
693
694 cache->vma = plane_state->vma;
695 cache->flags = plane_state->flags;
696 if (WARN_ON(cache->flags & PLANE_HAS_FENCE && !cache->vma->fence))
697 cache->flags &= ~PLANE_HAS_FENCE;
698 }
699
700 static bool intel_fbc_can_activate(struct intel_crtc *crtc)
701 {
702 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
703 struct intel_fbc *fbc = &dev_priv->fbc;
704 struct intel_fbc_state_cache *cache = &fbc->state_cache;
705
706
707
708
709 if (fbc->underrun_detected) {
710 fbc->no_fbc_reason = "underrun detected";
711 return false;
712 }
713
714 if (!cache->vma) {
715 fbc->no_fbc_reason = "primary plane not visible";
716 return false;
717 }
718
719 if (cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) {
720 fbc->no_fbc_reason = "incompatible mode";
721 return false;
722 }
723
724 if (!intel_fbc_hw_tracking_covers_screen(crtc)) {
725 fbc->no_fbc_reason = "mode too large for compression";
726 return false;
727 }
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742 if (!(cache->flags & PLANE_HAS_FENCE)) {
743 fbc->no_fbc_reason = "framebuffer not tiled or fenced";
744 return false;
745 }
746 if (INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv) &&
747 cache->plane.rotation != DRM_MODE_ROTATE_0) {
748 fbc->no_fbc_reason = "rotation unsupported";
749 return false;
750 }
751
752 if (!stride_is_valid(dev_priv, cache->fb.stride)) {
753 fbc->no_fbc_reason = "framebuffer stride not supported";
754 return false;
755 }
756
757 if (!pixel_format_is_valid(dev_priv, cache->fb.format->format)) {
758 fbc->no_fbc_reason = "pixel format is invalid";
759 return false;
760 }
761
762 if (cache->plane.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE &&
763 cache->fb.format->has_alpha) {
764 fbc->no_fbc_reason = "per-pixel alpha blending is incompatible with FBC";
765 return false;
766 }
767
768
769 if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
770 cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) {
771 fbc->no_fbc_reason = "pixel rate is too big";
772 return false;
773 }
774
775
776
777
778
779
780
781
782
783
784
785 if (intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) >
786 fbc->compressed_fb.size * fbc->threshold) {
787 fbc->no_fbc_reason = "CFB requirements changed";
788 return false;
789 }
790
791
792
793
794
795
796 if (IS_GEN_RANGE(dev_priv, 9, 10) &&
797 (fbc->state_cache.plane.adjusted_y & 3)) {
798 fbc->no_fbc_reason = "plane Y offset is misaligned";
799 return false;
800 }
801
802 return true;
803 }
804
805 static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
806 {
807 struct intel_fbc *fbc = &dev_priv->fbc;
808
809 if (intel_vgpu_active(dev_priv)) {
810 fbc->no_fbc_reason = "VGPU is active";
811 return false;
812 }
813
814 if (!i915_modparams.enable_fbc) {
815 fbc->no_fbc_reason = "disabled per module param or by default";
816 return false;
817 }
818
819 if (fbc->underrun_detected) {
820 fbc->no_fbc_reason = "underrun detected";
821 return false;
822 }
823
824 return true;
825 }
826
827 static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
828 struct intel_fbc_reg_params *params)
829 {
830 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
831 struct intel_fbc *fbc = &dev_priv->fbc;
832 struct intel_fbc_state_cache *cache = &fbc->state_cache;
833
834
835
836
837 memset(params, 0, sizeof(*params));
838
839 params->vma = cache->vma;
840 params->flags = cache->flags;
841
842 params->crtc.pipe = crtc->pipe;
843 params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane;
844 params->crtc.fence_y_offset = get_crtc_fence_y_offset(fbc);
845
846 params->fb.format = cache->fb.format;
847 params->fb.stride = cache->fb.stride;
848
849 params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
850
851 if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
852 params->gen9_wa_cfb_stride = DIV_ROUND_UP(cache->plane.src_w,
853 32 * fbc->threshold) * 8;
854 }
855
856 void intel_fbc_pre_update(struct intel_crtc *crtc,
857 struct intel_crtc_state *crtc_state,
858 struct intel_plane_state *plane_state)
859 {
860 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
861 struct intel_fbc *fbc = &dev_priv->fbc;
862 const char *reason = "update pending";
863
864 if (!fbc_supported(dev_priv))
865 return;
866
867 mutex_lock(&fbc->lock);
868
869 if (!multiple_pipes_ok(crtc, plane_state)) {
870 reason = "more than one pipe active";
871 goto deactivate;
872 }
873
874 if (!fbc->enabled || fbc->crtc != crtc)
875 goto unlock;
876
877 intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
878 fbc->flip_pending = true;
879
880 deactivate:
881 intel_fbc_deactivate(dev_priv, reason);
882 unlock:
883 mutex_unlock(&fbc->lock);
884 }
885
886
887
888
889
890
891
892
893 static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
894 {
895 struct intel_fbc *fbc = &dev_priv->fbc;
896 struct intel_crtc *crtc = fbc->crtc;
897
898 WARN_ON(!mutex_is_locked(&fbc->lock));
899 WARN_ON(!fbc->enabled);
900 WARN_ON(fbc->active);
901
902 DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));
903
904 __intel_fbc_cleanup_cfb(dev_priv);
905
906 fbc->enabled = false;
907 fbc->crtc = NULL;
908 }
909
910 static void __intel_fbc_post_update(struct intel_crtc *crtc)
911 {
912 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
913 struct intel_fbc *fbc = &dev_priv->fbc;
914
915 WARN_ON(!mutex_is_locked(&fbc->lock));
916
917 if (!fbc->enabled || fbc->crtc != crtc)
918 return;
919
920 fbc->flip_pending = false;
921 WARN_ON(fbc->active);
922
923 if (!i915_modparams.enable_fbc) {
924 intel_fbc_deactivate(dev_priv, "disabled at runtime per module param");
925 __intel_fbc_disable(dev_priv);
926
927 return;
928 }
929
930 intel_fbc_get_reg_params(crtc, &fbc->params);
931
932 if (!intel_fbc_can_activate(crtc))
933 return;
934
935 if (!fbc->busy_bits) {
936 intel_fbc_deactivate(dev_priv, "FBC enabled (active or scheduled)");
937 intel_fbc_hw_activate(dev_priv);
938 } else
939 intel_fbc_deactivate(dev_priv, "frontbuffer write");
940 }
941
942 void intel_fbc_post_update(struct intel_crtc *crtc)
943 {
944 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
945 struct intel_fbc *fbc = &dev_priv->fbc;
946
947 if (!fbc_supported(dev_priv))
948 return;
949
950 mutex_lock(&fbc->lock);
951 __intel_fbc_post_update(crtc);
952 mutex_unlock(&fbc->lock);
953 }
954
955 static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc)
956 {
957 if (fbc->enabled)
958 return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit;
959 else
960 return fbc->possible_framebuffer_bits;
961 }
962
963 void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
964 unsigned int frontbuffer_bits,
965 enum fb_op_origin origin)
966 {
967 struct intel_fbc *fbc = &dev_priv->fbc;
968
969 if (!fbc_supported(dev_priv))
970 return;
971
972 if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
973 return;
974
975 mutex_lock(&fbc->lock);
976
977 fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits;
978
979 if (fbc->enabled && fbc->busy_bits)
980 intel_fbc_deactivate(dev_priv, "frontbuffer write");
981
982 mutex_unlock(&fbc->lock);
983 }
984
985 void intel_fbc_flush(struct drm_i915_private *dev_priv,
986 unsigned int frontbuffer_bits, enum fb_op_origin origin)
987 {
988 struct intel_fbc *fbc = &dev_priv->fbc;
989
990 if (!fbc_supported(dev_priv))
991 return;
992
993 mutex_lock(&fbc->lock);
994
995 fbc->busy_bits &= ~frontbuffer_bits;
996
997 if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
998 goto out;
999
1000 if (!fbc->busy_bits && fbc->enabled &&
1001 (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) {
1002 if (fbc->active)
1003 intel_fbc_recompress(dev_priv);
1004 else if (!fbc->flip_pending)
1005 __intel_fbc_post_update(fbc->crtc);
1006 }
1007
1008 out:
1009 mutex_unlock(&fbc->lock);
1010 }
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024 void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
1025 struct intel_atomic_state *state)
1026 {
1027 struct intel_fbc *fbc = &dev_priv->fbc;
1028 struct intel_plane *plane;
1029 struct intel_plane_state *plane_state;
1030 bool crtc_chosen = false;
1031 int i;
1032
1033 mutex_lock(&fbc->lock);
1034
1035
1036 if (fbc->crtc &&
1037 !intel_atomic_get_new_crtc_state(state, fbc->crtc))
1038 goto out;
1039
1040 if (!intel_fbc_can_enable(dev_priv))
1041 goto out;
1042
1043
1044
1045
1046
1047 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1048 struct intel_crtc_state *crtc_state;
1049 struct intel_crtc *crtc = to_intel_crtc(plane_state->base.crtc);
1050
1051 if (!plane->has_fbc)
1052 continue;
1053
1054 if (!plane_state->base.visible)
1055 continue;
1056
1057 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
1058
1059 crtc_state->enable_fbc = true;
1060 crtc_chosen = true;
1061 break;
1062 }
1063
1064 if (!crtc_chosen)
1065 fbc->no_fbc_reason = "no suitable CRTC for FBC";
1066
1067 out:
1068 mutex_unlock(&fbc->lock);
1069 }
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082 void intel_fbc_enable(struct intel_crtc *crtc,
1083 struct intel_crtc_state *crtc_state,
1084 struct intel_plane_state *plane_state)
1085 {
1086 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1087 struct intel_fbc *fbc = &dev_priv->fbc;
1088
1089 if (!fbc_supported(dev_priv))
1090 return;
1091
1092 mutex_lock(&fbc->lock);
1093
1094 if (fbc->enabled) {
1095 WARN_ON(fbc->crtc == NULL);
1096 if (fbc->crtc == crtc) {
1097 WARN_ON(!crtc_state->enable_fbc);
1098 WARN_ON(fbc->active);
1099 }
1100 goto out;
1101 }
1102
1103 if (!crtc_state->enable_fbc)
1104 goto out;
1105
1106 WARN_ON(fbc->active);
1107 WARN_ON(fbc->crtc != NULL);
1108
1109 intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
1110 if (intel_fbc_alloc_cfb(crtc)) {
1111 fbc->no_fbc_reason = "not enough stolen memory";
1112 goto out;
1113 }
1114
1115 DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe));
1116 fbc->no_fbc_reason = "FBC enabled but not active yet\n";
1117
1118 fbc->enabled = true;
1119 fbc->crtc = crtc;
1120 out:
1121 mutex_unlock(&fbc->lock);
1122 }
1123
1124
1125
1126
1127
1128
1129
1130 void intel_fbc_disable(struct intel_crtc *crtc)
1131 {
1132 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1133 struct intel_fbc *fbc = &dev_priv->fbc;
1134
1135 if (!fbc_supported(dev_priv))
1136 return;
1137
1138 mutex_lock(&fbc->lock);
1139 if (fbc->crtc == crtc)
1140 __intel_fbc_disable(dev_priv);
1141 mutex_unlock(&fbc->lock);
1142 }
1143
1144
1145
1146
1147
1148
1149
1150 void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
1151 {
1152 struct intel_fbc *fbc = &dev_priv->fbc;
1153
1154 if (!fbc_supported(dev_priv))
1155 return;
1156
1157 mutex_lock(&fbc->lock);
1158 if (fbc->enabled) {
1159 WARN_ON(fbc->crtc->active);
1160 __intel_fbc_disable(dev_priv);
1161 }
1162 mutex_unlock(&fbc->lock);
1163 }
1164
1165 static void intel_fbc_underrun_work_fn(struct work_struct *work)
1166 {
1167 struct drm_i915_private *dev_priv =
1168 container_of(work, struct drm_i915_private, fbc.underrun_work);
1169 struct intel_fbc *fbc = &dev_priv->fbc;
1170
1171 mutex_lock(&fbc->lock);
1172
1173
1174 if (fbc->underrun_detected || !fbc->enabled)
1175 goto out;
1176
1177 DRM_DEBUG_KMS("Disabling FBC due to FIFO underrun.\n");
1178 fbc->underrun_detected = true;
1179
1180 intel_fbc_deactivate(dev_priv, "FIFO underrun");
1181 out:
1182 mutex_unlock(&fbc->lock);
1183 }
1184
1185
1186
1187
1188
1189
1190
1191
1192 int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv)
1193 {
1194 int ret;
1195
1196 cancel_work_sync(&dev_priv->fbc.underrun_work);
1197
1198 ret = mutex_lock_interruptible(&dev_priv->fbc.lock);
1199 if (ret)
1200 return ret;
1201
1202 if (dev_priv->fbc.underrun_detected) {
1203 DRM_DEBUG_KMS("Re-allowing FBC after fifo underrun\n");
1204 dev_priv->fbc.no_fbc_reason = "FIFO underrun cleared";
1205 }
1206
1207 dev_priv->fbc.underrun_detected = false;
1208 mutex_unlock(&dev_priv->fbc.lock);
1209
1210 return 0;
1211 }
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227 void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv)
1228 {
1229 struct intel_fbc *fbc = &dev_priv->fbc;
1230
1231 if (!fbc_supported(dev_priv))
1232 return;
1233
1234
1235
1236
1237
1238
1239
1240 if (READ_ONCE(fbc->underrun_detected))
1241 return;
1242
1243 schedule_work(&fbc->underrun_work);
1244 }
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254 void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
1255 {
1256 struct intel_crtc *crtc;
1257
1258
1259 if (!no_fbc_on_multiple_pipes(dev_priv))
1260 return;
1261
1262 for_each_intel_crtc(&dev_priv->drm, crtc)
1263 if (intel_crtc_active(crtc) &&
1264 crtc->base.primary->state->visible)
1265 dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
1266 }
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277 static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
1278 {
1279 if (i915_modparams.enable_fbc >= 0)
1280 return !!i915_modparams.enable_fbc;
1281
1282 if (!HAS_FBC(dev_priv))
1283 return 0;
1284
1285
1286 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
1287 return 0;
1288
1289 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
1290 return 1;
1291
1292 return 0;
1293 }
1294
1295 static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv)
1296 {
1297
1298 if (intel_vtd_active() &&
1299 (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) {
1300 DRM_INFO("Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
1301 return true;
1302 }
1303
1304 return false;
1305 }
1306
1307
1308
1309
1310
1311
1312
1313 void intel_fbc_init(struct drm_i915_private *dev_priv)
1314 {
1315 struct intel_fbc *fbc = &dev_priv->fbc;
1316
1317 INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
1318 mutex_init(&fbc->lock);
1319 fbc->enabled = false;
1320 fbc->active = false;
1321
1322 if (need_fbc_vtd_wa(dev_priv))
1323 mkwrite_device_info(dev_priv)->display.has_fbc = false;
1324
1325 i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv);
1326 DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n",
1327 i915_modparams.enable_fbc);
1328
1329 if (!HAS_FBC(dev_priv)) {
1330 fbc->no_fbc_reason = "unsupported by this chipset";
1331 return;
1332 }
1333
1334
1335 if (INTEL_GEN(dev_priv) <= 4 && !IS_GM45(dev_priv))
1336 I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
1337
1338
1339
1340
1341 if (intel_fbc_hw_is_active(dev_priv))
1342 intel_fbc_hw_deactivate(dev_priv);
1343 }