This source file includes following definitions.
- amdgpu_display_flip_callback
- amdgpu_display_flip_handle_fence
- amdgpu_display_flip_work_func
- amdgpu_display_unpin_work_func
- amdgpu_display_crtc_page_flip_target
- amdgpu_display_crtc_set_config
- amdgpu_display_print_display_setup
- amdgpu_display_ddc_probe
- amdgpu_display_supported_domains
- amdgpu_display_framebuffer_init
- amdgpu_display_user_framebuffer_create
- amdgpu_display_modeset_create_props
- amdgpu_display_update_priority
- amdgpu_display_is_hdtv_mode
- amdgpu_display_crtc_scaling_mode_fixup
- amdgpu_display_get_crtc_scanoutpos
- amdgpu_display_crtc_idx_to_irq_type
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27 #include <drm/amdgpu_drm.h>
28 #include "amdgpu.h"
29 #include "amdgpu_i2c.h"
30 #include "atom.h"
31 #include "amdgpu_connectors.h"
32 #include "amdgpu_display.h"
33 #include <asm/div64.h>
34
35 #include <linux/pci.h>
36 #include <linux/pm_runtime.h>
37 #include <drm/drm_crtc_helper.h>
38 #include <drm/drm_edid.h>
39 #include <drm/drm_gem_framebuffer_helper.h>
40 #include <drm/drm_fb_helper.h>
41 #include <drm/drm_vblank.h>
42
43 static void amdgpu_display_flip_callback(struct dma_fence *f,
44 struct dma_fence_cb *cb)
45 {
46 struct amdgpu_flip_work *work =
47 container_of(cb, struct amdgpu_flip_work, cb);
48
49 dma_fence_put(f);
50 schedule_work(&work->flip_work.work);
51 }
52
53 static bool amdgpu_display_flip_handle_fence(struct amdgpu_flip_work *work,
54 struct dma_fence **f)
55 {
56 struct dma_fence *fence= *f;
57
58 if (fence == NULL)
59 return false;
60
61 *f = NULL;
62
63 if (!dma_fence_add_callback(fence, &work->cb,
64 amdgpu_display_flip_callback))
65 return true;
66
67 dma_fence_put(fence);
68 return false;
69 }
70
71 static void amdgpu_display_flip_work_func(struct work_struct *__work)
72 {
73 struct delayed_work *delayed_work =
74 container_of(__work, struct delayed_work, work);
75 struct amdgpu_flip_work *work =
76 container_of(delayed_work, struct amdgpu_flip_work, flip_work);
77 struct amdgpu_device *adev = work->adev;
78 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[work->crtc_id];
79
80 struct drm_crtc *crtc = &amdgpu_crtc->base;
81 unsigned long flags;
82 unsigned i;
83 int vpos, hpos;
84
85 if (amdgpu_display_flip_handle_fence(work, &work->excl))
86 return;
87
88 for (i = 0; i < work->shared_count; ++i)
89 if (amdgpu_display_flip_handle_fence(work, &work->shared[i]))
90 return;
91
92
93
94
95 if (amdgpu_crtc->enabled &&
96 (amdgpu_display_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0,
97 &vpos, &hpos, NULL, NULL,
98 &crtc->hwmode)
99 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
100 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
101 (int)(work->target_vblank -
102 amdgpu_get_vblank_counter_kms(adev->ddev, amdgpu_crtc->crtc_id)) > 0) {
103 schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
104 return;
105 }
106
107
108 spin_lock_irqsave(&crtc->dev->event_lock, flags);
109
110
111 adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async);
112
113
114 amdgpu_crtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
115 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
116
117
118 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n",
119 amdgpu_crtc->crtc_id, amdgpu_crtc, work);
120
121 }
122
123
124
125
126 static void amdgpu_display_unpin_work_func(struct work_struct *__work)
127 {
128 struct amdgpu_flip_work *work =
129 container_of(__work, struct amdgpu_flip_work, unpin_work);
130 int r;
131
132
133 r = amdgpu_bo_reserve(work->old_abo, true);
134 if (likely(r == 0)) {
135 r = amdgpu_bo_unpin(work->old_abo);
136 if (unlikely(r != 0)) {
137 DRM_ERROR("failed to unpin buffer after flip\n");
138 }
139 amdgpu_bo_unreserve(work->old_abo);
140 } else
141 DRM_ERROR("failed to reserve buffer after flip\n");
142
143 amdgpu_bo_unref(&work->old_abo);
144 kfree(work->shared);
145 kfree(work);
146 }
147
148 int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
149 struct drm_framebuffer *fb,
150 struct drm_pending_vblank_event *event,
151 uint32_t page_flip_flags, uint32_t target,
152 struct drm_modeset_acquire_ctx *ctx)
153 {
154 struct drm_device *dev = crtc->dev;
155 struct amdgpu_device *adev = dev->dev_private;
156 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
157 struct drm_gem_object *obj;
158 struct amdgpu_flip_work *work;
159 struct amdgpu_bo *new_abo;
160 unsigned long flags;
161 u64 tiling_flags;
162 int i, r;
163
164 work = kzalloc(sizeof *work, GFP_KERNEL);
165 if (work == NULL)
166 return -ENOMEM;
167
168 INIT_DELAYED_WORK(&work->flip_work, amdgpu_display_flip_work_func);
169 INIT_WORK(&work->unpin_work, amdgpu_display_unpin_work_func);
170
171 work->event = event;
172 work->adev = adev;
173 work->crtc_id = amdgpu_crtc->crtc_id;
174 work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
175
176
177 obj = crtc->primary->fb->obj[0];
178
179
180 work->old_abo = gem_to_amdgpu_bo(obj);
181 amdgpu_bo_ref(work->old_abo);
182
183 obj = fb->obj[0];
184 new_abo = gem_to_amdgpu_bo(obj);
185
186
187 r = amdgpu_bo_reserve(new_abo, false);
188 if (unlikely(r != 0)) {
189 DRM_ERROR("failed to reserve new abo buffer before flip\n");
190 goto cleanup;
191 }
192
193 if (!adev->enable_virtual_display) {
194 r = amdgpu_bo_pin(new_abo,
195 amdgpu_display_supported_domains(adev, new_abo->flags));
196 if (unlikely(r != 0)) {
197 DRM_ERROR("failed to pin new abo buffer before flip\n");
198 goto unreserve;
199 }
200 }
201
202 r = amdgpu_ttm_alloc_gart(&new_abo->tbo);
203 if (unlikely(r != 0)) {
204 DRM_ERROR("%p bind failed\n", new_abo);
205 goto unpin;
206 }
207
208 r = dma_resv_get_fences_rcu(new_abo->tbo.base.resv, &work->excl,
209 &work->shared_count,
210 &work->shared);
211 if (unlikely(r != 0)) {
212 DRM_ERROR("failed to get fences for buffer\n");
213 goto unpin;
214 }
215
216 amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
217 amdgpu_bo_unreserve(new_abo);
218
219 if (!adev->enable_virtual_display)
220 work->base = amdgpu_bo_gpu_offset(new_abo);
221 work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
222 amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
223
224
225 spin_lock_irqsave(&crtc->dev->event_lock, flags);
226 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_NONE) {
227 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
228 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
229 r = -EBUSY;
230 goto pflip_cleanup;
231 }
232
233 amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING;
234 amdgpu_crtc->pflip_works = work;
235
236
237 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n",
238 amdgpu_crtc->crtc_id, amdgpu_crtc, work);
239
240 crtc->primary->fb = fb;
241 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
242 amdgpu_display_flip_work_func(&work->flip_work.work);
243 return 0;
244
245 pflip_cleanup:
246 if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) {
247 DRM_ERROR("failed to reserve new abo in error path\n");
248 goto cleanup;
249 }
250 unpin:
251 if (!adev->enable_virtual_display)
252 if (unlikely(amdgpu_bo_unpin(new_abo) != 0))
253 DRM_ERROR("failed to unpin new abo in error path\n");
254
255 unreserve:
256 amdgpu_bo_unreserve(new_abo);
257
258 cleanup:
259 amdgpu_bo_unref(&work->old_abo);
260 dma_fence_put(work->excl);
261 for (i = 0; i < work->shared_count; ++i)
262 dma_fence_put(work->shared[i]);
263 kfree(work->shared);
264 kfree(work);
265
266 return r;
267 }
268
269 int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
270 struct drm_modeset_acquire_ctx *ctx)
271 {
272 struct drm_device *dev;
273 struct amdgpu_device *adev;
274 struct drm_crtc *crtc;
275 bool active = false;
276 int ret;
277
278 if (!set || !set->crtc)
279 return -EINVAL;
280
281 dev = set->crtc->dev;
282
283 ret = pm_runtime_get_sync(dev->dev);
284 if (ret < 0)
285 return ret;
286
287 ret = drm_crtc_helper_set_config(set, ctx);
288
289 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
290 if (crtc->enabled)
291 active = true;
292
293 pm_runtime_mark_last_busy(dev->dev);
294
295 adev = dev->dev_private;
296
297
298 if (active && !adev->have_disp_power_ref) {
299 adev->have_disp_power_ref = true;
300 return ret;
301 }
302
303
304 if (!active && adev->have_disp_power_ref) {
305 pm_runtime_put_autosuspend(dev->dev);
306 adev->have_disp_power_ref = false;
307 }
308
309
310 pm_runtime_put_autosuspend(dev->dev);
311 return ret;
312 }
313
314 static const char *encoder_names[41] = {
315 "NONE",
316 "INTERNAL_LVDS",
317 "INTERNAL_TMDS1",
318 "INTERNAL_TMDS2",
319 "INTERNAL_DAC1",
320 "INTERNAL_DAC2",
321 "INTERNAL_SDVOA",
322 "INTERNAL_SDVOB",
323 "SI170B",
324 "CH7303",
325 "CH7301",
326 "INTERNAL_DVO1",
327 "EXTERNAL_SDVOA",
328 "EXTERNAL_SDVOB",
329 "TITFP513",
330 "INTERNAL_LVTM1",
331 "VT1623",
332 "HDMI_SI1930",
333 "HDMI_INTERNAL",
334 "INTERNAL_KLDSCP_TMDS1",
335 "INTERNAL_KLDSCP_DVO1",
336 "INTERNAL_KLDSCP_DAC1",
337 "INTERNAL_KLDSCP_DAC2",
338 "SI178",
339 "MVPU_FPGA",
340 "INTERNAL_DDI",
341 "VT1625",
342 "HDMI_SI1932",
343 "DP_AN9801",
344 "DP_DP501",
345 "INTERNAL_UNIPHY",
346 "INTERNAL_KLDSCP_LVTMA",
347 "INTERNAL_UNIPHY1",
348 "INTERNAL_UNIPHY2",
349 "NUTMEG",
350 "TRAVIS",
351 "INTERNAL_VCE",
352 "INTERNAL_UNIPHY3",
353 "HDMI_ANX9805",
354 "INTERNAL_AMCLK",
355 "VIRTUAL",
356 };
357
358 static const char *hpd_names[6] = {
359 "HPD1",
360 "HPD2",
361 "HPD3",
362 "HPD4",
363 "HPD5",
364 "HPD6",
365 };
366
367 void amdgpu_display_print_display_setup(struct drm_device *dev)
368 {
369 struct drm_connector *connector;
370 struct amdgpu_connector *amdgpu_connector;
371 struct drm_encoder *encoder;
372 struct amdgpu_encoder *amdgpu_encoder;
373 uint32_t devices;
374 int i = 0;
375
376 DRM_INFO("AMDGPU Display Connectors\n");
377 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
378 amdgpu_connector = to_amdgpu_connector(connector);
379 DRM_INFO("Connector %d:\n", i);
380 DRM_INFO(" %s\n", connector->name);
381 if (amdgpu_connector->hpd.hpd != AMDGPU_HPD_NONE)
382 DRM_INFO(" %s\n", hpd_names[amdgpu_connector->hpd.hpd]);
383 if (amdgpu_connector->ddc_bus) {
384 DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
385 amdgpu_connector->ddc_bus->rec.mask_clk_reg,
386 amdgpu_connector->ddc_bus->rec.mask_data_reg,
387 amdgpu_connector->ddc_bus->rec.a_clk_reg,
388 amdgpu_connector->ddc_bus->rec.a_data_reg,
389 amdgpu_connector->ddc_bus->rec.en_clk_reg,
390 amdgpu_connector->ddc_bus->rec.en_data_reg,
391 amdgpu_connector->ddc_bus->rec.y_clk_reg,
392 amdgpu_connector->ddc_bus->rec.y_data_reg);
393 if (amdgpu_connector->router.ddc_valid)
394 DRM_INFO(" DDC Router 0x%x/0x%x\n",
395 amdgpu_connector->router.ddc_mux_control_pin,
396 amdgpu_connector->router.ddc_mux_state);
397 if (amdgpu_connector->router.cd_valid)
398 DRM_INFO(" Clock/Data Router 0x%x/0x%x\n",
399 amdgpu_connector->router.cd_mux_control_pin,
400 amdgpu_connector->router.cd_mux_state);
401 } else {
402 if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
403 connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
404 connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
405 connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
406 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
407 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
408 DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
409 }
410 DRM_INFO(" Encoders:\n");
411 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
412 amdgpu_encoder = to_amdgpu_encoder(encoder);
413 devices = amdgpu_encoder->devices & amdgpu_connector->devices;
414 if (devices) {
415 if (devices & ATOM_DEVICE_CRT1_SUPPORT)
416 DRM_INFO(" CRT1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
417 if (devices & ATOM_DEVICE_CRT2_SUPPORT)
418 DRM_INFO(" CRT2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
419 if (devices & ATOM_DEVICE_LCD1_SUPPORT)
420 DRM_INFO(" LCD1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
421 if (devices & ATOM_DEVICE_DFP1_SUPPORT)
422 DRM_INFO(" DFP1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
423 if (devices & ATOM_DEVICE_DFP2_SUPPORT)
424 DRM_INFO(" DFP2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
425 if (devices & ATOM_DEVICE_DFP3_SUPPORT)
426 DRM_INFO(" DFP3: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
427 if (devices & ATOM_DEVICE_DFP4_SUPPORT)
428 DRM_INFO(" DFP4: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
429 if (devices & ATOM_DEVICE_DFP5_SUPPORT)
430 DRM_INFO(" DFP5: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
431 if (devices & ATOM_DEVICE_DFP6_SUPPORT)
432 DRM_INFO(" DFP6: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
433 if (devices & ATOM_DEVICE_TV1_SUPPORT)
434 DRM_INFO(" TV1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
435 if (devices & ATOM_DEVICE_CV_SUPPORT)
436 DRM_INFO(" CV: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
437 }
438 }
439 i++;
440 }
441 }
442
443
444
445
446
447 bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
448 bool use_aux)
449 {
450 u8 out = 0x0;
451 u8 buf[8];
452 int ret;
453 struct i2c_msg msgs[] = {
454 {
455 .addr = DDC_ADDR,
456 .flags = 0,
457 .len = 1,
458 .buf = &out,
459 },
460 {
461 .addr = DDC_ADDR,
462 .flags = I2C_M_RD,
463 .len = 8,
464 .buf = buf,
465 }
466 };
467
468
469 if (amdgpu_connector->router.ddc_valid)
470 amdgpu_i2c_router_select_ddc_port(amdgpu_connector);
471
472 if (use_aux) {
473 ret = i2c_transfer(&amdgpu_connector->ddc_bus->aux.ddc, msgs, 2);
474 } else {
475 ret = i2c_transfer(&amdgpu_connector->ddc_bus->adapter, msgs, 2);
476 }
477
478 if (ret != 2)
479
480 return false;
481
482
483
484
485
486 if (drm_edid_header_is_valid(buf) < 6) {
487
488
489 return false;
490 }
491 return true;
492 }
493
494 static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
495 .destroy = drm_gem_fb_destroy,
496 .create_handle = drm_gem_fb_create_handle,
497 };
498
499 uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
500 uint64_t bo_flags)
501 {
502 uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
503
504 #if defined(CONFIG_DRM_AMD_DC)
505
506
507
508
509
510
511
512
513 if (adev->asic_type >= CHIP_CARRIZO &&
514 adev->asic_type < CHIP_RAVEN &&
515 (adev->flags & AMD_IS_APU) &&
516 (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) &&
517 amdgpu_bo_support_uswc(bo_flags) &&
518 amdgpu_device_asic_has_dc_support(adev->asic_type))
519 domain |= AMDGPU_GEM_DOMAIN_GTT;
520 #endif
521
522 return domain;
523 }
524
525 int amdgpu_display_framebuffer_init(struct drm_device *dev,
526 struct amdgpu_framebuffer *rfb,
527 const struct drm_mode_fb_cmd2 *mode_cmd,
528 struct drm_gem_object *obj)
529 {
530 int ret;
531 rfb->base.obj[0] = obj;
532 drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
533 ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
534 if (ret) {
535 rfb->base.obj[0] = NULL;
536 return ret;
537 }
538 return 0;
539 }
540
541 struct drm_framebuffer *
542 amdgpu_display_user_framebuffer_create(struct drm_device *dev,
543 struct drm_file *file_priv,
544 const struct drm_mode_fb_cmd2 *mode_cmd)
545 {
546 struct drm_gem_object *obj;
547 struct amdgpu_framebuffer *amdgpu_fb;
548 int ret;
549
550 obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
551 if (obj == NULL) {
552 dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
553 "can't create framebuffer\n", mode_cmd->handles[0]);
554 return ERR_PTR(-ENOENT);
555 }
556
557
558 if (obj->import_attach) {
559 DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n");
560 return ERR_PTR(-EINVAL);
561 }
562
563 amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
564 if (amdgpu_fb == NULL) {
565 drm_gem_object_put_unlocked(obj);
566 return ERR_PTR(-ENOMEM);
567 }
568
569 ret = amdgpu_display_framebuffer_init(dev, amdgpu_fb, mode_cmd, obj);
570 if (ret) {
571 kfree(amdgpu_fb);
572 drm_gem_object_put_unlocked(obj);
573 return ERR_PTR(ret);
574 }
575
576 return &amdgpu_fb->base;
577 }
578
579 const struct drm_mode_config_funcs amdgpu_mode_funcs = {
580 .fb_create = amdgpu_display_user_framebuffer_create,
581 .output_poll_changed = drm_fb_helper_output_poll_changed,
582 };
583
584 static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] =
585 { { UNDERSCAN_OFF, "off" },
586 { UNDERSCAN_ON, "on" },
587 { UNDERSCAN_AUTO, "auto" },
588 };
589
590 static const struct drm_prop_enum_list amdgpu_audio_enum_list[] =
591 { { AMDGPU_AUDIO_DISABLE, "off" },
592 { AMDGPU_AUDIO_ENABLE, "on" },
593 { AMDGPU_AUDIO_AUTO, "auto" },
594 };
595
596
597 static const struct drm_prop_enum_list amdgpu_dither_enum_list[] =
598 { { AMDGPU_FMT_DITHER_DISABLE, "off" },
599 { AMDGPU_FMT_DITHER_ENABLE, "on" },
600 };
601
602 int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
603 {
604 int sz;
605
606 adev->mode_info.coherent_mode_property =
607 drm_property_create_range(adev->ddev, 0 , "coherent", 0, 1);
608 if (!adev->mode_info.coherent_mode_property)
609 return -ENOMEM;
610
611 adev->mode_info.load_detect_property =
612 drm_property_create_range(adev->ddev, 0, "load detection", 0, 1);
613 if (!adev->mode_info.load_detect_property)
614 return -ENOMEM;
615
616 drm_mode_create_scaling_mode_property(adev->ddev);
617
618 sz = ARRAY_SIZE(amdgpu_underscan_enum_list);
619 adev->mode_info.underscan_property =
620 drm_property_create_enum(adev->ddev, 0,
621 "underscan",
622 amdgpu_underscan_enum_list, sz);
623
624 adev->mode_info.underscan_hborder_property =
625 drm_property_create_range(adev->ddev, 0,
626 "underscan hborder", 0, 128);
627 if (!adev->mode_info.underscan_hborder_property)
628 return -ENOMEM;
629
630 adev->mode_info.underscan_vborder_property =
631 drm_property_create_range(adev->ddev, 0,
632 "underscan vborder", 0, 128);
633 if (!adev->mode_info.underscan_vborder_property)
634 return -ENOMEM;
635
636 sz = ARRAY_SIZE(amdgpu_audio_enum_list);
637 adev->mode_info.audio_property =
638 drm_property_create_enum(adev->ddev, 0,
639 "audio",
640 amdgpu_audio_enum_list, sz);
641
642 sz = ARRAY_SIZE(amdgpu_dither_enum_list);
643 adev->mode_info.dither_property =
644 drm_property_create_enum(adev->ddev, 0,
645 "dither",
646 amdgpu_dither_enum_list, sz);
647
648 if (amdgpu_device_has_dc_support(adev)) {
649 adev->mode_info.abm_level_property =
650 drm_property_create_range(adev->ddev, 0,
651 "abm level", 0, 4);
652 if (!adev->mode_info.abm_level_property)
653 return -ENOMEM;
654 }
655
656 return 0;
657 }
658
659 void amdgpu_display_update_priority(struct amdgpu_device *adev)
660 {
661
662 if ((amdgpu_disp_priority == 0) || (amdgpu_disp_priority > 2))
663 adev->mode_info.disp_priority = 0;
664 else
665 adev->mode_info.disp_priority = amdgpu_disp_priority;
666
667 }
668
669 static bool amdgpu_display_is_hdtv_mode(const struct drm_display_mode *mode)
670 {
671
672 if ((mode->vdisplay == 480 && mode->hdisplay == 720) ||
673 (mode->vdisplay == 576) ||
674 (mode->vdisplay == 720) ||
675 (mode->vdisplay == 1080))
676 return true;
677 else
678 return false;
679 }
680
681 bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
682 const struct drm_display_mode *mode,
683 struct drm_display_mode *adjusted_mode)
684 {
685 struct drm_device *dev = crtc->dev;
686 struct drm_encoder *encoder;
687 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
688 struct amdgpu_encoder *amdgpu_encoder;
689 struct drm_connector *connector;
690 struct amdgpu_connector *amdgpu_connector;
691 u32 src_v = 1, dst_v = 1;
692 u32 src_h = 1, dst_h = 1;
693
694 amdgpu_crtc->h_border = 0;
695 amdgpu_crtc->v_border = 0;
696
697 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
698 if (encoder->crtc != crtc)
699 continue;
700 amdgpu_encoder = to_amdgpu_encoder(encoder);
701 connector = amdgpu_get_connector_for_encoder(encoder);
702 amdgpu_connector = to_amdgpu_connector(connector);
703
704
705 if (amdgpu_encoder->rmx_type == RMX_OFF)
706 amdgpu_crtc->rmx_type = RMX_OFF;
707 else if (mode->hdisplay < amdgpu_encoder->native_mode.hdisplay ||
708 mode->vdisplay < amdgpu_encoder->native_mode.vdisplay)
709 amdgpu_crtc->rmx_type = amdgpu_encoder->rmx_type;
710 else
711 amdgpu_crtc->rmx_type = RMX_OFF;
712
713 memcpy(&amdgpu_crtc->native_mode,
714 &amdgpu_encoder->native_mode,
715 sizeof(struct drm_display_mode));
716 src_v = crtc->mode.vdisplay;
717 dst_v = amdgpu_crtc->native_mode.vdisplay;
718 src_h = crtc->mode.hdisplay;
719 dst_h = amdgpu_crtc->native_mode.hdisplay;
720
721
722 if ((!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
723 ((amdgpu_encoder->underscan_type == UNDERSCAN_ON) ||
724 ((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) &&
725 drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) &&
726 amdgpu_display_is_hdtv_mode(mode)))) {
727 if (amdgpu_encoder->underscan_hborder != 0)
728 amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder;
729 else
730 amdgpu_crtc->h_border = (mode->hdisplay >> 5) + 16;
731 if (amdgpu_encoder->underscan_vborder != 0)
732 amdgpu_crtc->v_border = amdgpu_encoder->underscan_vborder;
733 else
734 amdgpu_crtc->v_border = (mode->vdisplay >> 5) + 16;
735 amdgpu_crtc->rmx_type = RMX_FULL;
736 src_v = crtc->mode.vdisplay;
737 dst_v = crtc->mode.vdisplay - (amdgpu_crtc->v_border * 2);
738 src_h = crtc->mode.hdisplay;
739 dst_h = crtc->mode.hdisplay - (amdgpu_crtc->h_border * 2);
740 }
741 }
742 if (amdgpu_crtc->rmx_type != RMX_OFF) {
743 fixed20_12 a, b;
744 a.full = dfixed_const(src_v);
745 b.full = dfixed_const(dst_v);
746 amdgpu_crtc->vsc.full = dfixed_div(a, b);
747 a.full = dfixed_const(src_h);
748 b.full = dfixed_const(dst_h);
749 amdgpu_crtc->hsc.full = dfixed_div(a, b);
750 } else {
751 amdgpu_crtc->vsc.full = dfixed_const(1);
752 amdgpu_crtc->hsc.full = dfixed_const(1);
753 }
754 return true;
755 }
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794 int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
795 unsigned int pipe, unsigned int flags, int *vpos,
796 int *hpos, ktime_t *stime, ktime_t *etime,
797 const struct drm_display_mode *mode)
798 {
799 u32 vbl = 0, position = 0;
800 int vbl_start, vbl_end, vtotal, ret = 0;
801 bool in_vbl = true;
802
803 struct amdgpu_device *adev = dev->dev_private;
804
805
806
807
808 if (stime)
809 *stime = ktime_get();
810
811 if (amdgpu_display_page_flip_get_scanoutpos(adev, pipe, &vbl, &position) == 0)
812 ret |= DRM_SCANOUTPOS_VALID;
813
814
815 if (etime)
816 *etime = ktime_get();
817
818
819
820
821 *vpos = position & 0x1fff;
822 *hpos = (position >> 16) & 0x1fff;
823
824
825 if (vbl > 0) {
826
827 ret |= DRM_SCANOUTPOS_ACCURATE;
828 vbl_start = vbl & 0x1fff;
829 vbl_end = (vbl >> 16) & 0x1fff;
830 }
831 else {
832
833 vbl_start = mode->crtc_vdisplay;
834 vbl_end = 0;
835 }
836
837
838 if (flags & GET_DISTANCE_TO_VBLANKSTART) {
839
840 *hpos = *vpos - vbl_start;
841 }
842
843
844
845
846
847
848
849
850
851
852
853 if (!(flags & USE_REAL_VBLANKSTART))
854 vbl_start -= adev->mode_info.crtcs[pipe]->lb_vblank_lead_lines;
855
856
857 if ((*vpos < vbl_start) && (*vpos >= vbl_end))
858 in_vbl = false;
859
860
861 if (in_vbl)
862 ret |= DRM_SCANOUTPOS_IN_VBLANK;
863
864
865 if (flags & GET_DISTANCE_TO_VBLANKSTART) {
866
867 *vpos -= vbl_start;
868 return ret;
869 }
870
871
872
873
874
875
876
877
878 if (in_vbl && (*vpos >= vbl_start)) {
879 vtotal = mode->crtc_vtotal;
880
881
882
883
884
885 *vpos = (*vpos < vtotal) ? (*vpos - vtotal) : 0;
886 }
887
888
889 *vpos = *vpos - vbl_end;
890
891 return ret;
892 }
893
894 int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc)
895 {
896 if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
897 return AMDGPU_CRTC_IRQ_NONE;
898
899 switch (crtc) {
900 case 0:
901 return AMDGPU_CRTC_IRQ_VBLANK1;
902 case 1:
903 return AMDGPU_CRTC_IRQ_VBLANK2;
904 case 2:
905 return AMDGPU_CRTC_IRQ_VBLANK3;
906 case 3:
907 return AMDGPU_CRTC_IRQ_VBLANK4;
908 case 4:
909 return AMDGPU_CRTC_IRQ_VBLANK5;
910 case 5:
911 return AMDGPU_CRTC_IRQ_VBLANK6;
912 default:
913 return AMDGPU_CRTC_IRQ_NONE;
914 }
915 }