This source file includes following definitions.
- elevate_update_type
- destroy_links
- create_links
- dc_perf_trace_create
- dc_perf_trace_destroy
- dc_stream_adjust_vmin_vmax
- dc_stream_get_crtc_position
- dc_stream_configure_crc
- dc_stream_get_crc
- dc_stream_set_dither_option
- dc_stream_set_gamut_remap
- dc_stream_program_csc_matrix
- dc_stream_set_static_screen_events
- destruct
- construct
- disable_all_writeback_pipes_for_stream
- disable_dangling_plane
- dc_create
- dc_init_callbacks
- dc_destroy
- enable_timing_multisync
- program_timing_sync
- context_changed
- dc_validate_seamless_boot_timing
- dc_enable_stereo
- dc_commit_state_no_check
- dc_commit_state
- is_flip_pending_in_pipes
- dc_post_update_surfaces_to_stream
- dc_create_state
- dc_copy_state
- dc_retain_state
- dc_state_free
- dc_release_state
- dc_set_generic_gpio_for_stereo
- is_surface_in_context
- get_plane_info_update_type
- get_scaling_info_update_type
- det_surface_update
- check_update_surfaces_for_stream
- dc_check_update_surfaces_for_stream
- stream_get_status
- copy_surface_update_to_plane
- copy_stream_update_to_stream
- commit_planes_do_stream_update
- commit_planes_for_stream
- dc_commit_updates_for_stream
- dc_get_current_stream_count
- dc_get_stream_at_index
- dc_interrupt_to_irq_source
- dc_interrupt_set
- dc_interrupt_ack
- dc_set_power_state
- dc_resume
- dc_get_current_backlight_pwm
- dc_get_target_backlight_pwm
- dc_is_dmcu_initialized
- dc_submit_i2c
- link_add_remote_sink_helper
- dc_link_add_remote_sink
- dc_link_remove_remote_sink
- get_clock_requirements_for_state
- dc_set_clock
- dc_get_clock
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25 #include <linux/slab.h>
26 #include <linux/mm.h>
27
28 #include "dm_services.h"
29
30 #include "dc.h"
31
32 #include "core_status.h"
33 #include "core_types.h"
34 #include "hw_sequencer.h"
35 #include "dce/dce_hwseq.h"
36
37 #include "resource.h"
38
39 #include "clk_mgr.h"
40 #include "clock_source.h"
41 #include "dc_bios_types.h"
42
43 #include "bios_parser_interface.h"
44 #include "include/irq_service_interface.h"
45 #include "transform.h"
46 #include "dmcu.h"
47 #include "dpp.h"
48 #include "timing_generator.h"
49 #include "abm.h"
50 #include "virtual/virtual_link_encoder.h"
51
52 #include "link_hwss.h"
53 #include "link_encoder.h"
54
55 #include "dc_link_ddc.h"
56 #include "dm_helpers.h"
57 #include "mem_input.h"
58 #include "hubp.h"
59
60 #include "dc_link_dp.h"
61
62 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
63 #include "dsc.h"
64 #endif
65
66 #ifdef CONFIG_DRM_AMD_DC_DCN2_0
67 #include "vm_helper.h"
68 #endif
69
70 #include "dce/dce_i2c.h"
71
72 #define DC_LOGGER \
73 dc->ctx->logger
74
75 const static char DC_BUILD_ID[] = "production-build";
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
131 {
132 if (new > *original)
133 *original = new;
134 }
135
136 static void destroy_links(struct dc *dc)
137 {
138 uint32_t i;
139
140 for (i = 0; i < dc->link_count; i++) {
141 if (NULL != dc->links[i])
142 link_destroy(&dc->links[i]);
143 }
144 }
145
146 static bool create_links(
147 struct dc *dc,
148 uint32_t num_virtual_links)
149 {
150 int i;
151 int connectors_num;
152 struct dc_bios *bios = dc->ctx->dc_bios;
153
154 dc->link_count = 0;
155
156 connectors_num = bios->funcs->get_connectors_number(bios);
157
158 if (connectors_num > ENUM_ID_COUNT) {
159 dm_error(
160 "DC: Number of connectors %d exceeds maximum of %d!\n",
161 connectors_num,
162 ENUM_ID_COUNT);
163 return false;
164 }
165
166 dm_output_to_console(
167 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
168 __func__,
169 connectors_num,
170 num_virtual_links);
171
172 for (i = 0; i < connectors_num; i++) {
173 struct link_init_data link_init_params = {0};
174 struct dc_link *link;
175
176 link_init_params.ctx = dc->ctx;
177
178 link_init_params.connector_index = i;
179 link_init_params.link_index = dc->link_count;
180 link_init_params.dc = dc;
181 link = link_create(&link_init_params);
182
183 if (link) {
184 bool should_destory_link = false;
185
186 if (link->connector_signal == SIGNAL_TYPE_EDP) {
187 if (dc->config.edp_not_connected)
188 should_destory_link = true;
189 else if (dc->debug.remove_disconnect_edp) {
190 enum dc_connection_type type;
191 dc_link_detect_sink(link, &type);
192 if (type == dc_connection_none)
193 should_destory_link = true;
194 }
195 }
196
197 if (!should_destory_link) {
198 dc->links[dc->link_count] = link;
199 link->dc = dc;
200 ++dc->link_count;
201 } else {
202 link_destroy(&link);
203 }
204 }
205 }
206
207 for (i = 0; i < num_virtual_links; i++) {
208 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
209 struct encoder_init_data enc_init = {0};
210
211 if (link == NULL) {
212 BREAK_TO_DEBUGGER();
213 goto failed_alloc;
214 }
215
216 link->link_index = dc->link_count;
217 dc->links[dc->link_count] = link;
218 dc->link_count++;
219
220 link->ctx = dc->ctx;
221 link->dc = dc;
222 link->connector_signal = SIGNAL_TYPE_VIRTUAL;
223 link->link_id.type = OBJECT_TYPE_CONNECTOR;
224 link->link_id.id = CONNECTOR_ID_VIRTUAL;
225 link->link_id.enum_id = ENUM_ID_1;
226 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
227
228 if (!link->link_enc) {
229 BREAK_TO_DEBUGGER();
230 goto failed_alloc;
231 }
232
233 link->link_status.dpcd_caps = &link->dpcd_caps;
234
235 enc_init.ctx = dc->ctx;
236 enc_init.channel = CHANNEL_ID_UNKNOWN;
237 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
238 enc_init.transmitter = TRANSMITTER_UNKNOWN;
239 enc_init.connector = link->link_id;
240 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
241 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
242 enc_init.encoder.enum_id = ENUM_ID_1;
243 virtual_link_encoder_construct(link->link_enc, &enc_init);
244 }
245
246 return true;
247
248 failed_alloc:
249 return false;
250 }
251
252 static struct dc_perf_trace *dc_perf_trace_create(void)
253 {
254 return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
255 }
256
257 static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
258 {
259 kfree(*perf_trace);
260 *perf_trace = NULL;
261 }
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279 bool dc_stream_adjust_vmin_vmax(struct dc *dc,
280 struct dc_stream_state *stream,
281 struct dc_crtc_timing_adjust *adjust)
282 {
283 int i = 0;
284 bool ret = false;
285
286 stream->adjust = *adjust;
287
288 for (i = 0; i < MAX_PIPES; i++) {
289 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
290
291 if (pipe->stream == stream && pipe->stream_res.tg) {
292 pipe->stream->adjust = *adjust;
293 dc->hwss.set_drr(&pipe,
294 1,
295 adjust->v_total_min,
296 adjust->v_total_max,
297 adjust->v_total_mid,
298 adjust->v_total_mid_frame_num);
299
300 ret = true;
301 }
302 }
303 return ret;
304 }
305
306 bool dc_stream_get_crtc_position(struct dc *dc,
307 struct dc_stream_state **streams, int num_streams,
308 unsigned int *v_pos, unsigned int *nom_v_pos)
309 {
310
311 const struct dc_stream_state *stream = streams[0];
312 int i = 0;
313 bool ret = false;
314 struct crtc_position position;
315
316 for (i = 0; i < MAX_PIPES; i++) {
317 struct pipe_ctx *pipe =
318 &dc->current_state->res_ctx.pipe_ctx[i];
319
320 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
321 dc->hwss.get_position(&pipe, 1, &position);
322
323 *v_pos = position.vertical_count;
324 *nom_v_pos = position.nominal_vcount;
325 ret = true;
326 }
327 }
328 return ret;
329 }
330
331
332
333
334
335
336
337
338
339
340
341
342 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
343 bool enable, bool continuous)
344 {
345 int i;
346 struct pipe_ctx *pipe;
347 struct crc_params param;
348 struct timing_generator *tg;
349
350 for (i = 0; i < MAX_PIPES; i++) {
351 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
352 if (pipe->stream == stream)
353 break;
354 }
355
356 if (i == MAX_PIPES)
357 return false;
358
359
360 param.windowa_x_start = 0;
361 param.windowa_y_start = 0;
362 param.windowa_x_end = pipe->stream->timing.h_addressable;
363 param.windowa_y_end = pipe->stream->timing.v_addressable;
364 param.windowb_x_start = 0;
365 param.windowb_y_start = 0;
366 param.windowb_x_end = pipe->stream->timing.h_addressable;
367 param.windowb_y_end = pipe->stream->timing.v_addressable;
368
369
370 param.selection = UNION_WINDOW_A_B;
371 param.continuous_mode = continuous;
372 param.enable = enable;
373
374 tg = pipe->stream_res.tg;
375
376
377 if (tg->funcs->configure_crc)
378 return tg->funcs->configure_crc(tg, ¶m);
379 DC_LOG_WARNING("CRC capture not supported.");
380 return false;
381 }
382
383
384
385
386
387
388
389
390
391
392 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
393 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
394 {
395 int i;
396 struct pipe_ctx *pipe;
397 struct timing_generator *tg;
398
399 for (i = 0; i < MAX_PIPES; i++) {
400 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
401 if (pipe->stream == stream)
402 break;
403 }
404
405 if (i == MAX_PIPES)
406 return false;
407
408 tg = pipe->stream_res.tg;
409
410 if (tg->funcs->get_crc)
411 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
412 DC_LOG_WARNING("CRC capture not supported.");
413 return false;
414 }
415
416 void dc_stream_set_dither_option(struct dc_stream_state *stream,
417 enum dc_dither_option option)
418 {
419 struct bit_depth_reduction_params params;
420 struct dc_link *link = stream->link;
421 struct pipe_ctx *pipes = NULL;
422 int i;
423
424 for (i = 0; i < MAX_PIPES; i++) {
425 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
426 stream) {
427 pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
428 break;
429 }
430 }
431
432 if (!pipes)
433 return;
434 if (option > DITHER_OPTION_MAX)
435 return;
436
437 stream->dither_option = option;
438
439 memset(¶ms, 0, sizeof(params));
440 resource_build_bit_depth_reduction_params(stream, ¶ms);
441 stream->bit_depth_params = params;
442
443 if (pipes->plane_res.xfm &&
444 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
445 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
446 pipes->plane_res.xfm,
447 pipes->plane_res.scl_data.lb_params.depth,
448 &stream->bit_depth_params);
449 }
450
451 pipes->stream_res.opp->funcs->
452 opp_program_bit_depth_reduction(pipes->stream_res.opp, ¶ms);
453 }
454
455 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
456 {
457 int i = 0;
458 bool ret = false;
459 struct pipe_ctx *pipes;
460
461 for (i = 0; i < MAX_PIPES; i++) {
462 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
463 pipes = &dc->current_state->res_ctx.pipe_ctx[i];
464 dc->hwss.program_gamut_remap(pipes);
465 ret = true;
466 }
467 }
468
469 return ret;
470 }
471
472 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
473 {
474 int i = 0;
475 bool ret = false;
476 struct pipe_ctx *pipes;
477
478 for (i = 0; i < MAX_PIPES; i++) {
479 if (dc->current_state->res_ctx.pipe_ctx[i].stream
480 == stream) {
481
482 pipes = &dc->current_state->res_ctx.pipe_ctx[i];
483 dc->hwss.program_output_csc(dc,
484 pipes,
485 stream->output_color_space,
486 stream->csc_color_matrix.matrix,
487 pipes->stream_res.opp->inst);
488 ret = true;
489 }
490 }
491
492 return ret;
493 }
494
495 void dc_stream_set_static_screen_events(struct dc *dc,
496 struct dc_stream_state **streams,
497 int num_streams,
498 const struct dc_static_screen_events *events)
499 {
500 int i = 0;
501 int j = 0;
502 struct pipe_ctx *pipes_affected[MAX_PIPES];
503 int num_pipes_affected = 0;
504
505 for (i = 0; i < num_streams; i++) {
506 struct dc_stream_state *stream = streams[i];
507
508 for (j = 0; j < MAX_PIPES; j++) {
509 if (dc->current_state->res_ctx.pipe_ctx[j].stream
510 == stream) {
511 pipes_affected[num_pipes_affected++] =
512 &dc->current_state->res_ctx.pipe_ctx[j];
513 }
514 }
515 }
516
517 dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
518 }
519
520 static void destruct(struct dc *dc)
521 {
522 if (dc->current_state) {
523 dc_release_state(dc->current_state);
524 dc->current_state = NULL;
525 }
526
527 destroy_links(dc);
528
529 if (dc->clk_mgr) {
530 dc_destroy_clk_mgr(dc->clk_mgr);
531 dc->clk_mgr = NULL;
532 }
533
534 dc_destroy_resource_pool(dc);
535
536 if (dc->ctx->gpio_service)
537 dal_gpio_service_destroy(&dc->ctx->gpio_service);
538
539 if (dc->ctx->created_bios)
540 dal_bios_parser_destroy(&dc->ctx->dc_bios);
541
542 dc_perf_trace_destroy(&dc->ctx->perf_trace);
543
544 kfree(dc->ctx);
545 dc->ctx = NULL;
546
547 kfree(dc->bw_vbios);
548 dc->bw_vbios = NULL;
549
550 kfree(dc->bw_dceip);
551 dc->bw_dceip = NULL;
552
553 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
554 kfree(dc->dcn_soc);
555 dc->dcn_soc = NULL;
556
557 kfree(dc->dcn_ip);
558 dc->dcn_ip = NULL;
559
560 #endif
561 #ifdef CONFIG_DRM_AMD_DC_DCN2_0
562 kfree(dc->vm_helper);
563 dc->vm_helper = NULL;
564
565 #endif
566 }
567
568 static bool construct(struct dc *dc,
569 const struct dc_init_data *init_params)
570 {
571 struct dc_context *dc_ctx;
572 struct bw_calcs_dceip *dc_dceip;
573 struct bw_calcs_vbios *dc_vbios;
574 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
575 struct dcn_soc_bounding_box *dcn_soc;
576 struct dcn_ip_params *dcn_ip;
577 #endif
578
579 enum dce_version dc_version = DCE_VERSION_UNKNOWN;
580 dc->config = init_params->flags;
581
582 #ifdef CONFIG_DRM_AMD_DC_DCN2_0
583
584 dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
585 if (!dc->vm_helper) {
586 dm_error("%s: failed to create dc->vm_helper\n", __func__);
587 goto fail;
588 }
589
590 #endif
591 memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
592
593 dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
594 if (!dc_dceip) {
595 dm_error("%s: failed to create dceip\n", __func__);
596 goto fail;
597 }
598
599 dc->bw_dceip = dc_dceip;
600
601 dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
602 if (!dc_vbios) {
603 dm_error("%s: failed to create vbios\n", __func__);
604 goto fail;
605 }
606
607 dc->bw_vbios = dc_vbios;
608 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
609 dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
610 if (!dcn_soc) {
611 dm_error("%s: failed to create dcn_soc\n", __func__);
612 goto fail;
613 }
614
615 dc->dcn_soc = dcn_soc;
616
617 dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
618 if (!dcn_ip) {
619 dm_error("%s: failed to create dcn_ip\n", __func__);
620 goto fail;
621 }
622
623 dc->dcn_ip = dcn_ip;
624 #ifdef CONFIG_DRM_AMD_DC_DCN2_0
625 dc->soc_bounding_box = init_params->soc_bounding_box;
626 #endif
627 #endif
628
629 dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
630 if (!dc_ctx) {
631 dm_error("%s: failed to create ctx\n", __func__);
632 goto fail;
633 }
634
635 dc_ctx->cgs_device = init_params->cgs_device;
636 dc_ctx->driver_context = init_params->driver;
637 dc_ctx->dc = dc;
638 dc_ctx->asic_id = init_params->asic_id;
639 dc_ctx->dc_sink_id_count = 0;
640 dc_ctx->dc_stream_id_count = 0;
641 dc->ctx = dc_ctx;
642
643
644
645 dc_ctx->dce_environment = init_params->dce_environment;
646
647 dc_version = resource_parse_asic_id(init_params->asic_id);
648 dc_ctx->dce_version = dc_version;
649
650
651
652
653 if (init_params->vbios_override)
654 dc_ctx->dc_bios = init_params->vbios_override;
655 else {
656
657 struct bp_init_data bp_init_data;
658
659 bp_init_data.ctx = dc_ctx;
660 bp_init_data.bios = init_params->asic_id.atombios_base_address;
661
662 dc_ctx->dc_bios = dal_bios_parser_create(
663 &bp_init_data, dc_version);
664
665 if (!dc_ctx->dc_bios) {
666 ASSERT_CRITICAL(false);
667 goto fail;
668 }
669
670 dc_ctx->created_bios = true;
671 }
672
673 dc_ctx->perf_trace = dc_perf_trace_create();
674 if (!dc_ctx->perf_trace) {
675 ASSERT_CRITICAL(false);
676 goto fail;
677 }
678
679
680 dc_ctx->gpio_service = dal_gpio_service_create(
681 dc_version,
682 dc_ctx->dce_environment,
683 dc_ctx);
684
685 if (!dc_ctx->gpio_service) {
686 ASSERT_CRITICAL(false);
687 goto fail;
688 }
689
690 dc->res_pool = dc_create_resource_pool(dc, init_params, dc_version);
691 if (!dc->res_pool)
692 goto fail;
693
694 dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
695 if (!dc->clk_mgr)
696 goto fail;
697
698 #ifdef CONFIG_DRM_AMD_DC_DCN2_1
699 if (dc->res_pool->funcs->update_bw_bounding_box)
700 dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
701 #endif
702
703
704
705
706
707
708 dc->current_state = dc_create_state(dc);
709
710 if (!dc->current_state) {
711 dm_error("%s: failed to create validate ctx\n", __func__);
712 goto fail;
713 }
714
715 dc_resource_state_construct(dc, dc->current_state);
716
717 if (!create_links(dc, init_params->num_virtual_links))
718 goto fail;
719
720 return true;
721
722 fail:
723
724 destruct(dc);
725 return false;
726 }
727
728 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
729 static bool disable_all_writeback_pipes_for_stream(
730 const struct dc *dc,
731 struct dc_stream_state *stream,
732 struct dc_state *context)
733 {
734 int i;
735
736 for (i = 0; i < stream->num_wb_info; i++)
737 stream->writeback_info[i].wb_enabled = false;
738
739 return true;
740 }
741 #endif
742
743 static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
744 {
745 int i, j;
746 struct dc_state *dangling_context = dc_create_state(dc);
747 struct dc_state *current_ctx;
748
749 if (dangling_context == NULL)
750 return;
751
752 dc_resource_state_copy_construct(dc->current_state, dangling_context);
753
754 for (i = 0; i < dc->res_pool->pipe_count; i++) {
755 struct dc_stream_state *old_stream =
756 dc->current_state->res_ctx.pipe_ctx[i].stream;
757 bool should_disable = true;
758
759 for (j = 0; j < context->stream_count; j++) {
760 if (old_stream == context->streams[j]) {
761 should_disable = false;
762 break;
763 }
764 }
765 if (should_disable && old_stream) {
766 dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
767 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
768 disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
769 #endif
770 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
771 }
772 }
773
774 current_ctx = dc->current_state;
775 dc->current_state = dangling_context;
776 dc_release_state(current_ctx);
777 }
778
779
780
781
782
783 struct dc *dc_create(const struct dc_init_data *init_params)
784 {
785 struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
786 unsigned int full_pipe_count;
787
788 if (NULL == dc)
789 goto alloc_fail;
790
791 if (false == construct(dc, init_params))
792 goto construct_fail;
793
794
795 dc->hwss.init_hw(dc);
796
797 full_pipe_count = dc->res_pool->pipe_count;
798 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
799 full_pipe_count--;
800 dc->caps.max_streams = min(
801 full_pipe_count,
802 dc->res_pool->stream_enc_count);
803
804 dc->caps.max_links = dc->link_count;
805 dc->caps.max_audios = dc->res_pool->audio_count;
806 dc->caps.linear_pitch_alignment = 64;
807
808
809 dc->versions.dc_ver = DC_VER;
810
811 if (dc->res_pool->dmcu != NULL)
812 dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
813
814 dc->build_id = DC_BUILD_ID;
815
816 DC_LOG_DC("Display Core initialized\n");
817
818
819
820 return dc;
821
822 construct_fail:
823 kfree(dc);
824
825 alloc_fail:
826 return NULL;
827 }
828
829 void dc_init_callbacks(struct dc *dc,
830 const struct dc_callback_init *init_params)
831 {
832 }
833
834 void dc_destroy(struct dc **dc)
835 {
836 destruct(*dc);
837 kfree(*dc);
838 *dc = NULL;
839 }
840
841 static void enable_timing_multisync(
842 struct dc *dc,
843 struct dc_state *ctx)
844 {
845 int i = 0, multisync_count = 0;
846 int pipe_count = dc->res_pool->pipe_count;
847 struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
848
849 for (i = 0; i < pipe_count; i++) {
850 if (!ctx->res_ctx.pipe_ctx[i].stream ||
851 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
852 continue;
853 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
854 continue;
855 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
856 multisync_count++;
857 }
858
859 if (multisync_count > 0) {
860 dc->hwss.enable_per_frame_crtc_position_reset(
861 dc, multisync_count, multisync_pipes);
862 }
863 }
864
865 static void program_timing_sync(
866 struct dc *dc,
867 struct dc_state *ctx)
868 {
869 int i, j, k;
870 int group_index = 0;
871 int num_group = 0;
872 int pipe_count = dc->res_pool->pipe_count;
873 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
874
875 for (i = 0; i < pipe_count; i++) {
876 if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
877 continue;
878
879 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
880 }
881
882 for (i = 0; i < pipe_count; i++) {
883 int group_size = 1;
884 struct pipe_ctx *pipe_set[MAX_PIPES];
885
886 if (!unsynced_pipes[i])
887 continue;
888
889 pipe_set[0] = unsynced_pipes[i];
890 unsynced_pipes[i] = NULL;
891
892
893
894
895 for (j = i + 1; j < pipe_count; j++) {
896 if (!unsynced_pipes[j])
897 continue;
898
899 if (resource_are_streams_timing_synchronizable(
900 unsynced_pipes[j]->stream,
901 pipe_set[0]->stream)) {
902 pipe_set[group_size] = unsynced_pipes[j];
903 unsynced_pipes[j] = NULL;
904 group_size++;
905 }
906 }
907
908
909 for (j = 0; j < group_size; j++) {
910 struct pipe_ctx *temp;
911
912 if (pipe_set[j]->plane_state) {
913 if (j == 0)
914 break;
915
916 temp = pipe_set[0];
917 pipe_set[0] = pipe_set[j];
918 pipe_set[j] = temp;
919 break;
920 }
921 }
922
923
924 for (k = 0; k < group_size; k++) {
925 struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream);
926
927 status->timing_sync_info.group_id = num_group;
928 status->timing_sync_info.group_size = group_size;
929 if (k == 0)
930 status->timing_sync_info.master = true;
931 else
932 status->timing_sync_info.master = false;
933
934 }
935
936 for (j = j + 1; j < group_size; j++) {
937 if (pipe_set[j]->plane_state) {
938 group_size--;
939 pipe_set[j] = pipe_set[group_size];
940 j--;
941 }
942 }
943
944 if (group_size > 1) {
945 dc->hwss.enable_timing_synchronization(
946 dc, group_index, group_size, pipe_set);
947 group_index++;
948 }
949 num_group++;
950 }
951 }
952
953 static bool context_changed(
954 struct dc *dc,
955 struct dc_state *context)
956 {
957 uint8_t i;
958
959 if (context->stream_count != dc->current_state->stream_count)
960 return true;
961
962 for (i = 0; i < dc->current_state->stream_count; i++) {
963 if (dc->current_state->streams[i] != context->streams[i])
964 return true;
965 }
966
967 return false;
968 }
969
970 bool dc_validate_seamless_boot_timing(const struct dc *dc,
971 const struct dc_sink *sink,
972 struct dc_crtc_timing *crtc_timing)
973 {
974 struct timing_generator *tg;
975 struct dc_link *link = sink->link;
976 unsigned int enc_inst, tg_inst;
977
978
979 if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
980 return false;
981
982
983
984
985
986
987
988 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
989
990
991 if (enc_inst >= dc->res_pool->pipe_count)
992 return false;
993
994 if (enc_inst >= dc->res_pool->stream_enc_count)
995 return false;
996
997 tg_inst = dc->res_pool->stream_enc[enc_inst]->funcs->dig_source_otg(
998 dc->res_pool->stream_enc[enc_inst]);
999
1000 if (tg_inst >= dc->res_pool->timing_generator_count)
1001 return false;
1002
1003 tg = dc->res_pool->timing_generators[tg_inst];
1004
1005 if (!tg->funcs->is_matching_timing)
1006 return false;
1007
1008 if (!tg->funcs->is_matching_timing(tg, crtc_timing))
1009 return false;
1010
1011 if (dc_is_dp_signal(link->connector_signal)) {
1012 unsigned int pix_clk_100hz;
1013
1014 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1015 dc->res_pool->dp_clock_source,
1016 tg_inst, &pix_clk_100hz);
1017
1018 if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
1019 return false;
1020
1021 }
1022
1023 return true;
1024 }
1025
1026 bool dc_enable_stereo(
1027 struct dc *dc,
1028 struct dc_state *context,
1029 struct dc_stream_state *streams[],
1030 uint8_t stream_count)
1031 {
1032 bool ret = true;
1033 int i, j;
1034 struct pipe_ctx *pipe;
1035
1036 for (i = 0; i < MAX_PIPES; i++) {
1037 if (context != NULL)
1038 pipe = &context->res_ctx.pipe_ctx[i];
1039 else
1040 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1041 for (j = 0 ; pipe && j < stream_count; j++) {
1042 if (streams[j] && streams[j] == pipe->stream &&
1043 dc->hwss.setup_stereo)
1044 dc->hwss.setup_stereo(pipe, dc);
1045 }
1046 }
1047
1048 return ret;
1049 }
1050
1051
1052
1053
1054
1055 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
1056 {
1057 struct dc_bios *dcb = dc->ctx->dc_bios;
1058 enum dc_status result = DC_ERROR_UNEXPECTED;
1059 struct pipe_ctx *pipe;
1060 int i, k, l;
1061 struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
1062
1063 disable_dangling_plane(dc, context);
1064
1065 for (i = 0; i < context->stream_count; i++)
1066 dc_streams[i] = context->streams[i];
1067
1068 if (!dcb->funcs->is_accelerated_mode(dcb))
1069 dc->hwss.enable_accelerated_mode(dc, context);
1070
1071 for (i = 0; i < context->stream_count; i++) {
1072 if (context->streams[i]->apply_seamless_boot_optimization)
1073 dc->optimize_seamless_boot = true;
1074 }
1075
1076 if (!dc->optimize_seamless_boot)
1077 dc->hwss.prepare_bandwidth(dc, context);
1078
1079
1080
1081
1082 for (i = 0; i < context->stream_count; i++) {
1083 if (context->streams[i]->mode_changed)
1084 continue;
1085
1086 dc->hwss.apply_ctx_for_surface(
1087 dc, context->streams[i],
1088 context->stream_status[i].plane_count,
1089 context);
1090 }
1091
1092
1093 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1094 pipe = &context->res_ctx.pipe_ctx[i];
1095 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
1096 }
1097
1098 result = dc->hwss.apply_ctx_to_hw(dc, context);
1099
1100 if (result != DC_OK)
1101 return result;
1102
1103 if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
1104 enable_timing_multisync(dc, context);
1105 program_timing_sync(dc, context);
1106 }
1107
1108
1109 for (i = 0; i < context->stream_count; i++) {
1110 const struct dc_link *link = context->streams[i]->link;
1111
1112 if (!context->streams[i]->mode_changed)
1113 continue;
1114
1115 dc->hwss.apply_ctx_for_surface(
1116 dc, context->streams[i],
1117 context->stream_status[i].plane_count,
1118 context);
1119
1120
1121
1122
1123
1124 for (k = 0; k < MAX_PIPES; k++) {
1125 pipe = &context->res_ctx.pipe_ctx[k];
1126
1127 for (l = 0 ; pipe && l < context->stream_count; l++) {
1128 if (context->streams[l] &&
1129 context->streams[l] == pipe->stream &&
1130 dc->hwss.setup_stereo)
1131 dc->hwss.setup_stereo(pipe, dc);
1132 }
1133 }
1134
1135 CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}",
1136 context->streams[i]->timing.h_addressable,
1137 context->streams[i]->timing.v_addressable,
1138 context->streams[i]->timing.h_total,
1139 context->streams[i]->timing.v_total,
1140 context->streams[i]->timing.pix_clk_100hz / 10);
1141 }
1142
1143 dc_enable_stereo(dc, context, dc_streams, context->stream_count);
1144
1145 if (!dc->optimize_seamless_boot)
1146
1147 dc->hwss.optimize_bandwidth(dc, context);
1148
1149 for (i = 0; i < context->stream_count; i++)
1150 context->streams[i]->mode_changed = false;
1151
1152 memset(&context->commit_hints, 0, sizeof(context->commit_hints));
1153
1154 dc_release_state(dc->current_state);
1155
1156 dc->current_state = context;
1157
1158 dc_retain_state(dc->current_state);
1159
1160 return result;
1161 }
1162
1163 bool dc_commit_state(struct dc *dc, struct dc_state *context)
1164 {
1165 enum dc_status result = DC_ERROR_UNEXPECTED;
1166 int i;
1167
1168 if (false == context_changed(dc, context))
1169 return DC_OK;
1170
1171 DC_LOG_DC("%s: %d streams\n",
1172 __func__, context->stream_count);
1173
1174 for (i = 0; i < context->stream_count; i++) {
1175 struct dc_stream_state *stream = context->streams[i];
1176
1177 dc_stream_log(dc, stream);
1178 }
1179
1180 result = dc_commit_state_no_check(dc, context);
1181
1182 return (result == DC_OK);
1183 }
1184
1185 static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
1186 {
1187 int i;
1188 struct pipe_ctx *pipe;
1189
1190 for (i = 0; i < MAX_PIPES; i++) {
1191 pipe = &context->res_ctx.pipe_ctx[i];
1192
1193 if (!pipe->plane_state)
1194 continue;
1195
1196
1197 pipe->plane_state->status.is_flip_pending = false;
1198 dc->hwss.update_pending_status(pipe);
1199 if (pipe->plane_state->status.is_flip_pending)
1200 return true;
1201 }
1202 return false;
1203 }
1204
1205 bool dc_post_update_surfaces_to_stream(struct dc *dc)
1206 {
1207 int i;
1208 struct dc_state *context = dc->current_state;
1209
1210 if (!dc->optimized_required || dc->optimize_seamless_boot)
1211 return true;
1212
1213 post_surface_trace(dc);
1214
1215 if (is_flip_pending_in_pipes(dc, context))
1216 return true;
1217
1218 for (i = 0; i < dc->res_pool->pipe_count; i++)
1219 if (context->res_ctx.pipe_ctx[i].stream == NULL ||
1220 context->res_ctx.pipe_ctx[i].plane_state == NULL) {
1221 context->res_ctx.pipe_ctx[i].pipe_idx = i;
1222 dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
1223 }
1224
1225 dc->optimized_required = false;
1226
1227 dc->hwss.optimize_bandwidth(dc, context);
1228 return true;
1229 }
1230
1231 struct dc_state *dc_create_state(struct dc *dc)
1232 {
1233 struct dc_state *context = kvzalloc(sizeof(struct dc_state),
1234 GFP_KERNEL);
1235
1236 if (!context)
1237 return NULL;
1238
1239
1240
1241
1242 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
1243 memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
1244 #endif
1245
1246 kref_init(&context->refcount);
1247
1248 return context;
1249 }
1250
1251 struct dc_state *dc_copy_state(struct dc_state *src_ctx)
1252 {
1253 int i, j;
1254 struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
1255
1256 if (!new_ctx)
1257 return NULL;
1258 memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
1259
1260 for (i = 0; i < MAX_PIPES; i++) {
1261 struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
1262
1263 if (cur_pipe->top_pipe)
1264 cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
1265
1266 if (cur_pipe->bottom_pipe)
1267 cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
1268
1269 if (cur_pipe->prev_odm_pipe)
1270 cur_pipe->prev_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
1271
1272 if (cur_pipe->next_odm_pipe)
1273 cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
1274
1275 }
1276
1277 for (i = 0; i < new_ctx->stream_count; i++) {
1278 dc_stream_retain(new_ctx->streams[i]);
1279 for (j = 0; j < new_ctx->stream_status[i].plane_count; j++)
1280 dc_plane_state_retain(
1281 new_ctx->stream_status[i].plane_states[j]);
1282 }
1283
1284 kref_init(&new_ctx->refcount);
1285
1286 return new_ctx;
1287 }
1288
1289 void dc_retain_state(struct dc_state *context)
1290 {
1291 kref_get(&context->refcount);
1292 }
1293
1294 static void dc_state_free(struct kref *kref)
1295 {
1296 struct dc_state *context = container_of(kref, struct dc_state, refcount);
1297 dc_resource_state_destruct(context);
1298 kvfree(context);
1299 }
1300
1301 void dc_release_state(struct dc_state *context)
1302 {
1303 kref_put(&context->refcount, dc_state_free);
1304 }
1305
1306 bool dc_set_generic_gpio_for_stereo(bool enable,
1307 struct gpio_service *gpio_service)
1308 {
1309 enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR;
1310 struct gpio_pin_info pin_info;
1311 struct gpio *generic;
1312 struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config),
1313 GFP_KERNEL);
1314
1315 if (!config)
1316 return false;
1317 pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0);
1318
1319 if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) {
1320 kfree(config);
1321 return false;
1322 } else {
1323 generic = dal_gpio_service_create_generic_mux(
1324 gpio_service,
1325 pin_info.offset,
1326 pin_info.mask);
1327 }
1328
1329 if (!generic) {
1330 kfree(config);
1331 return false;
1332 }
1333
1334 gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT);
1335
1336 config->enable_output_from_mux = enable;
1337 config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC;
1338
1339 if (gpio_result == GPIO_RESULT_OK)
1340 gpio_result = dal_mux_setup_config(generic, config);
1341
1342 if (gpio_result == GPIO_RESULT_OK) {
1343 dal_gpio_close(generic);
1344 dal_gpio_destroy_generic_mux(&generic);
1345 kfree(config);
1346 return true;
1347 } else {
1348 dal_gpio_close(generic);
1349 dal_gpio_destroy_generic_mux(&generic);
1350 kfree(config);
1351 return false;
1352 }
1353 }
1354
1355 static bool is_surface_in_context(
1356 const struct dc_state *context,
1357 const struct dc_plane_state *plane_state)
1358 {
1359 int j;
1360
1361 for (j = 0; j < MAX_PIPES; j++) {
1362 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1363
1364 if (plane_state == pipe_ctx->plane_state) {
1365 return true;
1366 }
1367 }
1368
1369 return false;
1370 }
1371
1372 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
1373 {
1374 union surface_update_flags *update_flags = &u->surface->update_flags;
1375 enum surface_update_type update_type = UPDATE_TYPE_FAST;
1376
1377 if (!u->plane_info)
1378 return UPDATE_TYPE_FAST;
1379
1380 if (u->plane_info->color_space != u->surface->color_space) {
1381 update_flags->bits.color_space_change = 1;
1382 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1383 }
1384
1385 if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
1386 update_flags->bits.horizontal_mirror_change = 1;
1387 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1388 }
1389
1390 if (u->plane_info->rotation != u->surface->rotation) {
1391 update_flags->bits.rotation_change = 1;
1392 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1393 }
1394
1395 if (u->plane_info->format != u->surface->format) {
1396 update_flags->bits.pixel_format_change = 1;
1397 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1398 }
1399
1400 if (u->plane_info->stereo_format != u->surface->stereo_format) {
1401 update_flags->bits.stereo_format_change = 1;
1402 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1403 }
1404
1405 if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
1406 update_flags->bits.per_pixel_alpha_change = 1;
1407 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1408 }
1409
1410 if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
1411 update_flags->bits.global_alpha_change = 1;
1412 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1413 }
1414
1415 if (u->plane_info->sdr_white_level != u->surface->sdr_white_level) {
1416 update_flags->bits.sdr_white_level = 1;
1417 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1418 }
1419
1420 if (u->plane_info->dcc.enable != u->surface->dcc.enable
1421 || u->plane_info->dcc.independent_64b_blks != u->surface->dcc.independent_64b_blks
1422 || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
1423 update_flags->bits.dcc_change = 1;
1424 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1425 }
1426
1427 if (resource_pixel_format_to_bpp(u->plane_info->format) !=
1428 resource_pixel_format_to_bpp(u->surface->format)) {
1429
1430
1431
1432 update_flags->bits.bpp_change = 1;
1433 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1434 }
1435
1436 if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
1437 || u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
1438 || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
1439 update_flags->bits.plane_size_change = 1;
1440 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1441 }
1442
1443
1444 if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
1445 sizeof(union dc_tiling_info)) != 0) {
1446 update_flags->bits.swizzle_change = 1;
1447 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1448
1449
1450
1451
1452 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
1453
1454
1455
1456 update_flags->bits.bandwidth_change = 1;
1457 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1458 }
1459 }
1460
1461
1462 return update_type;
1463 }
1464
1465 static enum surface_update_type get_scaling_info_update_type(
1466 const struct dc_surface_update *u)
1467 {
1468 union surface_update_flags *update_flags = &u->surface->update_flags;
1469
1470 if (!u->scaling_info)
1471 return UPDATE_TYPE_FAST;
1472
1473 if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width
1474 || u->scaling_info->clip_rect.height != u->surface->clip_rect.height
1475 || u->scaling_info->dst_rect.width != u->surface->dst_rect.width
1476 || u->scaling_info->dst_rect.height != u->surface->dst_rect.height) {
1477 update_flags->bits.scaling_change = 1;
1478
1479 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
1480 || u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
1481 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width
1482 || u->scaling_info->dst_rect.height < u->surface->src_rect.height))
1483
1484 update_flags->bits.bandwidth_change = 1;
1485 }
1486
1487 if (u->scaling_info->src_rect.width != u->surface->src_rect.width
1488 || u->scaling_info->src_rect.height != u->surface->src_rect.height) {
1489
1490 update_flags->bits.scaling_change = 1;
1491 if (u->scaling_info->src_rect.width > u->surface->src_rect.width
1492 && u->scaling_info->src_rect.height > u->surface->src_rect.height)
1493
1494 update_flags->bits.clock_change = 1;
1495 }
1496
1497 if (u->scaling_info->src_rect.x != u->surface->src_rect.x
1498 || u->scaling_info->src_rect.y != u->surface->src_rect.y
1499 || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
1500 || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
1501 || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
1502 || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
1503 update_flags->bits.position_change = 1;
1504
1505 if (update_flags->bits.clock_change
1506 || update_flags->bits.bandwidth_change)
1507 return UPDATE_TYPE_FULL;
1508
1509 if (update_flags->bits.scaling_change
1510 || update_flags->bits.position_change)
1511 return UPDATE_TYPE_MED;
1512
1513 return UPDATE_TYPE_FAST;
1514 }
1515
1516 static enum surface_update_type det_surface_update(const struct dc *dc,
1517 const struct dc_surface_update *u)
1518 {
1519 const struct dc_state *context = dc->current_state;
1520 enum surface_update_type type;
1521 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1522 union surface_update_flags *update_flags = &u->surface->update_flags;
1523
1524 update_flags->raw = 0;
1525
1526 if (u->flip_addr)
1527 update_flags->bits.addr_update = 1;
1528
1529 if (!is_surface_in_context(context, u->surface)) {
1530 update_flags->bits.new_plane = 1;
1531 return UPDATE_TYPE_FULL;
1532 }
1533
1534 if (u->surface->force_full_update) {
1535 update_flags->bits.full_update = 1;
1536 return UPDATE_TYPE_FULL;
1537 }
1538
1539 type = get_plane_info_update_type(u);
1540 elevate_update_type(&overall_type, type);
1541
1542 type = get_scaling_info_update_type(u);
1543 elevate_update_type(&overall_type, type);
1544
1545 if (u->flip_addr)
1546 update_flags->bits.addr_update = 1;
1547
1548 if (u->in_transfer_func)
1549 update_flags->bits.in_transfer_func_change = 1;
1550
1551 if (u->input_csc_color_matrix)
1552 update_flags->bits.input_csc_change = 1;
1553
1554 if (u->coeff_reduction_factor)
1555 update_flags->bits.coeff_reduction_change = 1;
1556
1557 if (u->gamma) {
1558 enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
1559
1560 if (u->plane_info)
1561 format = u->plane_info->format;
1562 else if (u->surface)
1563 format = u->surface->format;
1564
1565 if (dce_use_lut(format))
1566 update_flags->bits.gamma_change = 1;
1567 }
1568
1569 if (update_flags->bits.in_transfer_func_change) {
1570 type = UPDATE_TYPE_MED;
1571 elevate_update_type(&overall_type, type);
1572 }
1573
1574 if (update_flags->bits.input_csc_change
1575 || update_flags->bits.coeff_reduction_change
1576 || update_flags->bits.gamma_change) {
1577 type = UPDATE_TYPE_FULL;
1578 elevate_update_type(&overall_type, type);
1579 }
1580
1581 return overall_type;
1582 }
1583
1584 static enum surface_update_type check_update_surfaces_for_stream(
1585 struct dc *dc,
1586 struct dc_surface_update *updates,
1587 int surface_count,
1588 struct dc_stream_update *stream_update,
1589 const struct dc_stream_status *stream_status)
1590 {
1591 int i;
1592 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1593
1594 if (stream_status == NULL || stream_status->plane_count != surface_count)
1595 return UPDATE_TYPE_FULL;
1596
1597
1598 if (stream_update) {
1599 if ((stream_update->src.height != 0) &&
1600 (stream_update->src.width != 0))
1601 return UPDATE_TYPE_FULL;
1602
1603 if ((stream_update->dst.height != 0) &&
1604 (stream_update->dst.width != 0))
1605 return UPDATE_TYPE_FULL;
1606
1607 if (stream_update->out_transfer_func)
1608 return UPDATE_TYPE_FULL;
1609
1610 if (stream_update->abm_level)
1611 return UPDATE_TYPE_FULL;
1612
1613 if (stream_update->dpms_off)
1614 return UPDATE_TYPE_FULL;
1615
1616 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
1617 if (stream_update->wb_update)
1618 return UPDATE_TYPE_FULL;
1619 #endif
1620 }
1621
1622 for (i = 0 ; i < surface_count; i++) {
1623 enum surface_update_type type =
1624 det_surface_update(dc, &updates[i]);
1625
1626 if (type == UPDATE_TYPE_FULL)
1627 return type;
1628
1629 elevate_update_type(&overall_type, type);
1630 }
1631
1632 return overall_type;
1633 }
1634
1635
1636
1637
1638
1639
1640 enum surface_update_type dc_check_update_surfaces_for_stream(
1641 struct dc *dc,
1642 struct dc_surface_update *updates,
1643 int surface_count,
1644 struct dc_stream_update *stream_update,
1645 const struct dc_stream_status *stream_status)
1646 {
1647 int i;
1648 enum surface_update_type type;
1649
1650 for (i = 0; i < surface_count; i++)
1651 updates[i].surface->update_flags.raw = 0;
1652
1653 type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
1654 if (type == UPDATE_TYPE_FULL)
1655 for (i = 0; i < surface_count; i++)
1656 updates[i].surface->update_flags.raw = 0xFFFFFFFF;
1657
1658 if (type == UPDATE_TYPE_FAST && memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0)
1659 dc->optimized_required = true;
1660
1661 return type;
1662 }
1663
1664 static struct dc_stream_status *stream_get_status(
1665 struct dc_state *ctx,
1666 struct dc_stream_state *stream)
1667 {
1668 uint8_t i;
1669
1670 for (i = 0; i < ctx->stream_count; i++) {
1671 if (stream == ctx->streams[i]) {
1672 return &ctx->stream_status[i];
1673 }
1674 }
1675
1676 return NULL;
1677 }
1678
1679 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
1680
1681 static void copy_surface_update_to_plane(
1682 struct dc_plane_state *surface,
1683 struct dc_surface_update *srf_update)
1684 {
1685 if (srf_update->flip_addr) {
1686 surface->address = srf_update->flip_addr->address;
1687 surface->flip_immediate =
1688 srf_update->flip_addr->flip_immediate;
1689 surface->time.time_elapsed_in_us[surface->time.index] =
1690 srf_update->flip_addr->flip_timestamp_in_us -
1691 surface->time.prev_update_time_in_us;
1692 surface->time.prev_update_time_in_us =
1693 srf_update->flip_addr->flip_timestamp_in_us;
1694 surface->time.index++;
1695 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
1696 surface->time.index = 0;
1697 }
1698
1699 if (srf_update->scaling_info) {
1700 surface->scaling_quality =
1701 srf_update->scaling_info->scaling_quality;
1702 surface->dst_rect =
1703 srf_update->scaling_info->dst_rect;
1704 surface->src_rect =
1705 srf_update->scaling_info->src_rect;
1706 surface->clip_rect =
1707 srf_update->scaling_info->clip_rect;
1708 }
1709
1710 if (srf_update->plane_info) {
1711 surface->color_space =
1712 srf_update->plane_info->color_space;
1713 surface->format =
1714 srf_update->plane_info->format;
1715 surface->plane_size =
1716 srf_update->plane_info->plane_size;
1717 surface->rotation =
1718 srf_update->plane_info->rotation;
1719 surface->horizontal_mirror =
1720 srf_update->plane_info->horizontal_mirror;
1721 surface->stereo_format =
1722 srf_update->plane_info->stereo_format;
1723 surface->tiling_info =
1724 srf_update->plane_info->tiling_info;
1725 surface->visible =
1726 srf_update->plane_info->visible;
1727 surface->per_pixel_alpha =
1728 srf_update->plane_info->per_pixel_alpha;
1729 surface->global_alpha =
1730 srf_update->plane_info->global_alpha;
1731 surface->global_alpha_value =
1732 srf_update->plane_info->global_alpha_value;
1733 surface->dcc =
1734 srf_update->plane_info->dcc;
1735 surface->sdr_white_level =
1736 srf_update->plane_info->sdr_white_level;
1737 surface->layer_index =
1738 srf_update->plane_info->layer_index;
1739 }
1740
1741 if (srf_update->gamma &&
1742 (surface->gamma_correction !=
1743 srf_update->gamma)) {
1744 memcpy(&surface->gamma_correction->entries,
1745 &srf_update->gamma->entries,
1746 sizeof(struct dc_gamma_entries));
1747 surface->gamma_correction->is_identity =
1748 srf_update->gamma->is_identity;
1749 surface->gamma_correction->num_entries =
1750 srf_update->gamma->num_entries;
1751 surface->gamma_correction->type =
1752 srf_update->gamma->type;
1753 }
1754
1755 if (srf_update->in_transfer_func &&
1756 (surface->in_transfer_func !=
1757 srf_update->in_transfer_func)) {
1758 surface->in_transfer_func->sdr_ref_white_level =
1759 srf_update->in_transfer_func->sdr_ref_white_level;
1760 surface->in_transfer_func->tf =
1761 srf_update->in_transfer_func->tf;
1762 surface->in_transfer_func->type =
1763 srf_update->in_transfer_func->type;
1764 memcpy(&surface->in_transfer_func->tf_pts,
1765 &srf_update->in_transfer_func->tf_pts,
1766 sizeof(struct dc_transfer_func_distributed_points));
1767 }
1768
1769 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
1770 if (srf_update->func_shaper &&
1771 (surface->in_shaper_func !=
1772 srf_update->func_shaper))
1773 memcpy(surface->in_shaper_func, srf_update->func_shaper,
1774 sizeof(*surface->in_shaper_func));
1775
1776 if (srf_update->lut3d_func &&
1777 (surface->lut3d_func !=
1778 srf_update->lut3d_func))
1779 memcpy(surface->lut3d_func, srf_update->lut3d_func,
1780 sizeof(*surface->lut3d_func));
1781
1782 if (srf_update->blend_tf &&
1783 (surface->blend_tf !=
1784 srf_update->blend_tf))
1785 memcpy(surface->blend_tf, srf_update->blend_tf,
1786 sizeof(*surface->blend_tf));
1787
1788 #endif
1789 if (srf_update->input_csc_color_matrix)
1790 surface->input_csc_color_matrix =
1791 *srf_update->input_csc_color_matrix;
1792
1793 if (srf_update->coeff_reduction_factor)
1794 surface->coeff_reduction_factor =
1795 *srf_update->coeff_reduction_factor;
1796 }
1797
1798 static void copy_stream_update_to_stream(struct dc *dc,
1799 struct dc_state *context,
1800 struct dc_stream_state *stream,
1801 const struct dc_stream_update *update)
1802 {
1803 if (update == NULL || stream == NULL)
1804 return;
1805
1806 if (update->src.height && update->src.width)
1807 stream->src = update->src;
1808
1809 if (update->dst.height && update->dst.width)
1810 stream->dst = update->dst;
1811
1812 if (update->out_transfer_func &&
1813 stream->out_transfer_func != update->out_transfer_func) {
1814 stream->out_transfer_func->sdr_ref_white_level =
1815 update->out_transfer_func->sdr_ref_white_level;
1816 stream->out_transfer_func->tf = update->out_transfer_func->tf;
1817 stream->out_transfer_func->type =
1818 update->out_transfer_func->type;
1819 memcpy(&stream->out_transfer_func->tf_pts,
1820 &update->out_transfer_func->tf_pts,
1821 sizeof(struct dc_transfer_func_distributed_points));
1822 }
1823
1824 if (update->hdr_static_metadata)
1825 stream->hdr_static_metadata = *update->hdr_static_metadata;
1826
1827 if (update->abm_level)
1828 stream->abm_level = *update->abm_level;
1829
1830 if (update->periodic_interrupt0)
1831 stream->periodic_interrupt0 = *update->periodic_interrupt0;
1832
1833 if (update->periodic_interrupt1)
1834 stream->periodic_interrupt1 = *update->periodic_interrupt1;
1835
1836 if (update->gamut_remap)
1837 stream->gamut_remap_matrix = *update->gamut_remap;
1838
1839
1840
1841
1842
1843 if (update->output_color_space)
1844 stream->output_color_space = *update->output_color_space;
1845
1846 if (update->output_csc_transform)
1847 stream->csc_color_matrix = *update->output_csc_transform;
1848
1849 if (update->vrr_infopacket)
1850 stream->vrr_infopacket = *update->vrr_infopacket;
1851
1852 if (update->dpms_off)
1853 stream->dpms_off = *update->dpms_off;
1854
1855 if (update->vsc_infopacket)
1856 stream->vsc_infopacket = *update->vsc_infopacket;
1857
1858 if (update->vsp_infopacket)
1859 stream->vsp_infopacket = *update->vsp_infopacket;
1860
1861 if (update->dither_option)
1862 stream->dither_option = *update->dither_option;
1863 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
1864
1865 if (update->wb_update) {
1866 int i;
1867
1868 stream->num_wb_info = update->wb_update->num_wb_info;
1869 ASSERT(stream->num_wb_info <= MAX_DWB_PIPES);
1870 for (i = 0; i < stream->num_wb_info; i++)
1871 stream->writeback_info[i] =
1872 update->wb_update->writeback_info[i];
1873 }
1874 #endif
1875 #if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT)
1876 if (update->dsc_config) {
1877 struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;
1878 uint32_t old_dsc_enabled = stream->timing.flags.DSC;
1879 uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
1880 update->dsc_config->num_slices_v != 0);
1881
1882 stream->timing.dsc_cfg = *update->dsc_config;
1883 stream->timing.flags.DSC = enable_dsc;
1884 if (!dc->res_pool->funcs->validate_bandwidth(dc, context,
1885 true)) {
1886 stream->timing.dsc_cfg = old_dsc_cfg;
1887 stream->timing.flags.DSC = old_dsc_enabled;
1888 }
1889 }
1890 #endif
1891 }
1892
1893 static void commit_planes_do_stream_update(struct dc *dc,
1894 struct dc_stream_state *stream,
1895 struct dc_stream_update *stream_update,
1896 enum surface_update_type update_type,
1897 struct dc_state *context)
1898 {
1899 int j;
1900
1901
1902 for (j = 0; j < dc->res_pool->pipe_count; j++) {
1903 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1904
1905 if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) {
1906
1907 if (stream_update->periodic_interrupt0 &&
1908 dc->hwss.setup_periodic_interrupt)
1909 dc->hwss.setup_periodic_interrupt(pipe_ctx, VLINE0);
1910
1911 if (stream_update->periodic_interrupt1 &&
1912 dc->hwss.setup_periodic_interrupt)
1913 dc->hwss.setup_periodic_interrupt(pipe_ctx, VLINE1);
1914
1915 if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
1916 stream_update->vrr_infopacket ||
1917 stream_update->vsc_infopacket ||
1918 stream_update->vsp_infopacket) {
1919 resource_build_info_frame(pipe_ctx);
1920 dc->hwss.update_info_frame(pipe_ctx);
1921 }
1922
1923 if (stream_update->gamut_remap)
1924 dc_stream_set_gamut_remap(dc, stream);
1925
1926 if (stream_update->output_csc_transform)
1927 dc_stream_program_csc_matrix(dc, stream);
1928
1929 if (stream_update->dither_option) {
1930 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
1931 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
1932 #endif
1933 resource_build_bit_depth_reduction_params(pipe_ctx->stream,
1934 &pipe_ctx->stream->bit_depth_params);
1935 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
1936 &stream->bit_depth_params,
1937 &stream->clamping);
1938 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
1939 while (odm_pipe) {
1940 odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
1941 &stream->bit_depth_params,
1942 &stream->clamping);
1943 odm_pipe = odm_pipe->next_odm_pipe;
1944 }
1945 #endif
1946 }
1947
1948 #if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT)
1949 if (stream_update->dsc_config && dc->hwss.pipe_control_lock_global) {
1950 dc->hwss.pipe_control_lock_global(dc, pipe_ctx, true);
1951 dp_update_dsc_config(pipe_ctx);
1952 dc->hwss.pipe_control_lock_global(dc, pipe_ctx, false);
1953 }
1954 #endif
1955
1956 if (update_type == UPDATE_TYPE_FAST)
1957 continue;
1958
1959 if (stream_update->dpms_off) {
1960 dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
1961
1962 if (*stream_update->dpms_off) {
1963 core_link_disable_stream(pipe_ctx);
1964
1965 if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
1966 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
1967
1968 dc->hwss.optimize_bandwidth(dc, dc->current_state);
1969 } else {
1970 if (!dc->optimize_seamless_boot)
1971 dc->hwss.prepare_bandwidth(dc, dc->current_state);
1972
1973 core_link_enable_stream(dc->current_state, pipe_ctx);
1974 }
1975
1976 dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
1977 }
1978
1979 if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
1980 if (pipe_ctx->stream_res.tg->funcs->is_blanked) {
1981
1982 if (!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
1983 pipe_ctx->stream_res.abm->funcs->set_abm_level(
1984 pipe_ctx->stream_res.abm, stream->abm_level);
1985 } else
1986 pipe_ctx->stream_res.abm->funcs->set_abm_level(
1987 pipe_ctx->stream_res.abm, stream->abm_level);
1988 }
1989 }
1990 }
1991 }
1992
1993 static void commit_planes_for_stream(struct dc *dc,
1994 struct dc_surface_update *srf_updates,
1995 int surface_count,
1996 struct dc_stream_state *stream,
1997 struct dc_stream_update *stream_update,
1998 enum surface_update_type update_type,
1999 struct dc_state *context)
2000 {
2001 int i, j;
2002 struct pipe_ctx *top_pipe_to_program = NULL;
2003
2004 if (dc->optimize_seamless_boot && surface_count > 0) {
2005
2006
2007
2008
2009
2010
2011 if (stream->apply_seamless_boot_optimization) {
2012 stream->apply_seamless_boot_optimization = false;
2013 dc->optimize_seamless_boot = false;
2014 dc->optimized_required = true;
2015 }
2016 }
2017
2018 if (update_type == UPDATE_TYPE_FULL && !dc->optimize_seamless_boot) {
2019 dc->hwss.prepare_bandwidth(dc, context);
2020 context_clock_trace(dc, context);
2021 }
2022
2023
2024 if (stream_update)
2025 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
2026
2027 if (surface_count == 0) {
2028
2029
2030
2031
2032 dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
2033 return;
2034 }
2035
2036 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2037 if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
2038 for (i = 0; i < surface_count; i++) {
2039 struct dc_plane_state *plane_state = srf_updates[i].surface;
2040
2041 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2042 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2043 if (!pipe_ctx->plane_state)
2044 continue;
2045 if (pipe_ctx->plane_state != plane_state)
2046 continue;
2047 plane_state->triplebuffer_flips = false;
2048 if (update_type == UPDATE_TYPE_FAST &&
2049 dc->hwss.program_triplebuffer != NULL &&
2050 !plane_state->flip_immediate &&
2051 !dc->debug.disable_tri_buf) {
2052
2053 plane_state->triplebuffer_flips = true;
2054 }
2055 }
2056 }
2057 }
2058 #endif
2059
2060
2061 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2062 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2063
2064 if (!pipe_ctx->top_pipe &&
2065 !pipe_ctx->prev_odm_pipe &&
2066 pipe_ctx->stream &&
2067 pipe_ctx->stream == stream) {
2068 struct dc_stream_status *stream_status = NULL;
2069
2070 top_pipe_to_program = pipe_ctx;
2071
2072 if (!pipe_ctx->plane_state)
2073 continue;
2074
2075
2076 if (update_type == UPDATE_TYPE_FAST)
2077 continue;
2078
2079 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2080 ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
2081
2082 if (dc->hwss.program_triplebuffer != NULL &&
2083 !dc->debug.disable_tri_buf) {
2084
2085 dc->hwss.program_triplebuffer(
2086 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
2087 }
2088 #endif
2089 stream_status =
2090 stream_get_status(context, pipe_ctx->stream);
2091
2092 dc->hwss.apply_ctx_for_surface(
2093 dc, pipe_ctx->stream, stream_status->plane_count, context);
2094 }
2095 }
2096
2097
2098 if (update_type == UPDATE_TYPE_FAST) {
2099
2100
2101
2102
2103 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
2104
2105 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2106 if (dc->hwss.set_flip_control_gsl)
2107 for (i = 0; i < surface_count; i++) {
2108 struct dc_plane_state *plane_state = srf_updates[i].surface;
2109
2110 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2111 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2112
2113 if (pipe_ctx->stream != stream)
2114 continue;
2115
2116 if (pipe_ctx->plane_state != plane_state)
2117 continue;
2118
2119
2120 dc->hwss.set_flip_control_gsl(pipe_ctx,
2121 plane_state->flip_immediate);
2122 }
2123 }
2124 #endif
2125
2126 for (i = 0; i < surface_count; i++) {
2127 struct dc_plane_state *plane_state = srf_updates[i].surface;
2128
2129 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2130 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2131
2132 if (pipe_ctx->stream != stream)
2133 continue;
2134
2135 if (pipe_ctx->plane_state != plane_state)
2136 continue;
2137 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2138
2139 if (dc->hwss.program_triplebuffer != NULL &&
2140 !dc->debug.disable_tri_buf) {
2141
2142 dc->hwss.program_triplebuffer(
2143 dc, pipe_ctx, plane_state->triplebuffer_flips);
2144 }
2145 #endif
2146 if (srf_updates[i].flip_addr)
2147 dc->hwss.update_plane_addr(dc, pipe_ctx);
2148 }
2149 }
2150
2151 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
2152 }
2153
2154
2155 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2156 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2157
2158 if (pipe_ctx->bottom_pipe ||
2159 !pipe_ctx->stream ||
2160 pipe_ctx->stream != stream ||
2161 !pipe_ctx->plane_state->update_flags.bits.addr_update)
2162 continue;
2163
2164 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
2165 pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
2166 }
2167 }
2168
2169 void dc_commit_updates_for_stream(struct dc *dc,
2170 struct dc_surface_update *srf_updates,
2171 int surface_count,
2172 struct dc_stream_state *stream,
2173 struct dc_stream_update *stream_update,
2174 struct dc_state *state)
2175 {
2176 const struct dc_stream_status *stream_status;
2177 enum surface_update_type update_type;
2178 struct dc_state *context;
2179 struct dc_context *dc_ctx = dc->ctx;
2180 int i, j;
2181
2182 stream_status = dc_stream_get_status(stream);
2183 context = dc->current_state;
2184
2185 update_type = dc_check_update_surfaces_for_stream(
2186 dc, srf_updates, surface_count, stream_update, stream_status);
2187
2188 if (update_type >= update_surface_trace_level)
2189 update_surface_trace(dc, srf_updates, surface_count);
2190
2191
2192 if (update_type >= UPDATE_TYPE_FULL) {
2193
2194
2195 context = dc_create_state(dc);
2196 if (context == NULL) {
2197 DC_ERROR("Failed to allocate new validate context!\n");
2198 return;
2199 }
2200
2201 dc_resource_state_copy_construct(state, context);
2202
2203 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2204 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
2205 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
2206
2207 if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
2208 new_pipe->plane_state->force_full_update = true;
2209 }
2210 }
2211
2212
2213 for (i = 0; i < surface_count; i++) {
2214 struct dc_plane_state *surface = srf_updates[i].surface;
2215
2216 copy_surface_update_to_plane(surface, &srf_updates[i]);
2217
2218 if (update_type >= UPDATE_TYPE_MED) {
2219 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2220 struct pipe_ctx *pipe_ctx =
2221 &context->res_ctx.pipe_ctx[j];
2222
2223 if (pipe_ctx->plane_state != surface)
2224 continue;
2225
2226 resource_build_scaling_params(pipe_ctx);
2227 }
2228 }
2229 }
2230
2231 copy_stream_update_to_stream(dc, context, stream, stream_update);
2232
2233 commit_planes_for_stream(
2234 dc,
2235 srf_updates,
2236 surface_count,
2237 stream,
2238 stream_update,
2239 update_type,
2240 context);
2241
2242 if (dc->current_state != context) {
2243
2244 struct dc_state *old = dc->current_state;
2245
2246 dc->current_state = context;
2247 dc_release_state(old);
2248
2249 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2250 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2251
2252 if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
2253 pipe_ctx->plane_state->force_full_update = false;
2254 }
2255 }
2256
2257 if (update_type >= UPDATE_TYPE_FULL)
2258 dc_post_update_surfaces_to_stream(dc);
2259
2260 return;
2261
2262 }
2263
2264 uint8_t dc_get_current_stream_count(struct dc *dc)
2265 {
2266 return dc->current_state->stream_count;
2267 }
2268
2269 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
2270 {
2271 if (i < dc->current_state->stream_count)
2272 return dc->current_state->streams[i];
2273 return NULL;
2274 }
2275
2276 enum dc_irq_source dc_interrupt_to_irq_source(
2277 struct dc *dc,
2278 uint32_t src_id,
2279 uint32_t ext_id)
2280 {
2281 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
2282 }
2283
2284
2285
2286
2287 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
2288 {
2289
2290 if (dc == NULL)
2291 return false;
2292
2293 return dal_irq_service_set(dc->res_pool->irqs, src, enable);
2294 }
2295
2296 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
2297 {
2298 dal_irq_service_ack(dc->res_pool->irqs, src);
2299 }
2300
2301 void dc_set_power_state(
2302 struct dc *dc,
2303 enum dc_acpi_cm_power_state power_state)
2304 {
2305 struct kref refcount;
2306 struct display_mode_lib *dml;
2307
2308 switch (power_state) {
2309 case DC_ACPI_CM_POWER_STATE_D0:
2310 dc_resource_state_construct(dc, dc->current_state);
2311
2312 dc->hwss.init_hw(dc);
2313
2314 #ifdef CONFIG_DRM_AMD_DC_DCN2_0
2315 if (dc->hwss.init_sys_ctx != NULL &&
2316 dc->vm_pa_config.valid) {
2317 dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
2318 }
2319 #endif
2320
2321 break;
2322 default:
2323 ASSERT(dc->current_state->stream_count == 0);
2324
2325
2326
2327
2328 dml = kzalloc(sizeof(struct display_mode_lib),
2329 GFP_KERNEL);
2330
2331 ASSERT(dml);
2332 if (!dml)
2333 return;
2334
2335
2336 refcount = dc->current_state->refcount;
2337
2338 memcpy(dml, &dc->current_state->bw_ctx.dml, sizeof(struct display_mode_lib));
2339
2340 dc_resource_state_destruct(dc->current_state);
2341 memset(dc->current_state, 0,
2342 sizeof(*dc->current_state));
2343
2344 dc->current_state->refcount = refcount;
2345 dc->current_state->bw_ctx.dml = *dml;
2346
2347 kfree(dml);
2348
2349 break;
2350 }
2351 }
2352
2353 void dc_resume(struct dc *dc)
2354 {
2355
2356 uint32_t i;
2357
2358 for (i = 0; i < dc->link_count; i++)
2359 core_link_resume(dc->links[i]);
2360 }
2361
2362 unsigned int dc_get_current_backlight_pwm(struct dc *dc)
2363 {
2364 struct abm *abm = dc->res_pool->abm;
2365
2366 if (abm)
2367 return abm->funcs->get_current_backlight(abm);
2368
2369 return 0;
2370 }
2371
2372 unsigned int dc_get_target_backlight_pwm(struct dc *dc)
2373 {
2374 struct abm *abm = dc->res_pool->abm;
2375
2376 if (abm)
2377 return abm->funcs->get_target_backlight(abm);
2378
2379 return 0;
2380 }
2381
2382 bool dc_is_dmcu_initialized(struct dc *dc)
2383 {
2384 struct dmcu *dmcu = dc->res_pool->dmcu;
2385
2386 if (dmcu)
2387 return dmcu->funcs->is_dmcu_initialized(dmcu);
2388 return false;
2389 }
2390
2391 bool dc_submit_i2c(
2392 struct dc *dc,
2393 uint32_t link_index,
2394 struct i2c_command *cmd)
2395 {
2396
2397 struct dc_link *link = dc->links[link_index];
2398 struct ddc_service *ddc = link->ddc;
2399 return dce_i2c_submit_command(
2400 dc->res_pool,
2401 ddc->ddc_pin,
2402 cmd);
2403 }
2404
2405 static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
2406 {
2407 if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
2408 BREAK_TO_DEBUGGER();
2409 return false;
2410 }
2411
2412 dc_sink_retain(sink);
2413
2414 dc_link->remote_sinks[dc_link->sink_count] = sink;
2415 dc_link->sink_count++;
2416
2417 return true;
2418 }
2419
2420
2421
2422
2423
2424
2425 struct dc_sink *dc_link_add_remote_sink(
2426 struct dc_link *link,
2427 const uint8_t *edid,
2428 int len,
2429 struct dc_sink_init_data *init_data)
2430 {
2431 struct dc_sink *dc_sink;
2432 enum dc_edid_status edid_status;
2433
2434 if (len > DC_MAX_EDID_BUFFER_SIZE) {
2435 dm_error("Max EDID buffer size breached!\n");
2436 return NULL;
2437 }
2438
2439 if (!init_data) {
2440 BREAK_TO_DEBUGGER();
2441 return NULL;
2442 }
2443
2444 if (!init_data->link) {
2445 BREAK_TO_DEBUGGER();
2446 return NULL;
2447 }
2448
2449 dc_sink = dc_sink_create(init_data);
2450
2451 if (!dc_sink)
2452 return NULL;
2453
2454 memmove(dc_sink->dc_edid.raw_edid, edid, len);
2455 dc_sink->dc_edid.length = len;
2456
2457 if (!link_add_remote_sink_helper(
2458 link,
2459 dc_sink))
2460 goto fail_add_sink;
2461
2462 edid_status = dm_helpers_parse_edid_caps(
2463 link->ctx,
2464 &dc_sink->dc_edid,
2465 &dc_sink->edid_caps);
2466
2467
2468
2469
2470
2471 if (edid_status != EDID_OK) {
2472 dc_sink->dc_edid.length = 0;
2473 dm_error("Bad EDID, status%d!\n", edid_status);
2474 }
2475
2476 return dc_sink;
2477
2478 fail_add_sink:
2479 dc_sink_release(dc_sink);
2480 return NULL;
2481 }
2482
2483
2484
2485
2486
2487
2488
2489 void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
2490 {
2491 int i;
2492
2493 if (!link->sink_count) {
2494 BREAK_TO_DEBUGGER();
2495 return;
2496 }
2497
2498 for (i = 0; i < link->sink_count; i++) {
2499 if (link->remote_sinks[i] == sink) {
2500 dc_sink_release(sink);
2501 link->remote_sinks[i] = NULL;
2502
2503
2504 while (i < link->sink_count - 1) {
2505 link->remote_sinks[i] = link->remote_sinks[i+1];
2506 i++;
2507 }
2508 link->remote_sinks[i] = NULL;
2509 link->sink_count--;
2510 return;
2511 }
2512 }
2513 }
2514
2515 void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
2516 {
2517 info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
2518 info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
2519 info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
2520 info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
2521 info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
2522 info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
2523 info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
2524 info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
2525 info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
2526 }
2527 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping)
2528 {
2529 if (dc->hwss.set_clock)
2530 return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping);
2531 return DC_ERROR_UNEXPECTED;
2532 }
2533 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg)
2534 {
2535 if (dc->hwss.get_clock)
2536 dc->hwss.get_clock(dc, clock_type, clock_cfg);
2537 }