This source file includes following definitions.
- phm_setup_asic
- phm_power_down_asic
- phm_set_power_state
- phm_enable_dynamic_state_management
- phm_disable_dynamic_state_management
- phm_force_dpm_levels
- phm_apply_state_adjust_rules
- phm_apply_clock_adjust_rules
- phm_powerdown_uvd
- phm_disable_clock_power_gatings
- phm_pre_display_configuration_changed
- phm_display_configuration_changed
- phm_notify_smc_display_config_after_ps_adjustment
- phm_stop_thermal_controller
- phm_register_irq_handlers
- phm_start_thermal_controller
- phm_check_smc_update_required_for_display_configuration
- phm_check_states_equal
- phm_store_dal_configuration_data
- phm_get_dal_power_level
- phm_set_cpu_power_state
- phm_get_performance_level
- phm_get_clock_info
- phm_get_current_shallow_sleep_clocks
- phm_get_clock_by_type
- phm_get_clock_by_type_with_latency
- phm_get_clock_by_type_with_voltage
- phm_set_watermarks_for_clocks_ranges
- phm_display_clock_voltage_request
- phm_get_max_high_clocks
- phm_disable_smc_firmware_ctf
- phm_set_active_display_count
- phm_set_min_deep_sleep_dcefclk
- phm_set_hard_min_dcefclk_by_freq
- phm_set_hard_min_fclk_by_freq
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23 #include "pp_debug.h"
24 #include <linux/errno.h>
25 #include "hwmgr.h"
26 #include "hardwaremanager.h"
27 #include "power_state.h"
28
29
30 #define TEMP_RANGE_MIN (0)
31 #define TEMP_RANGE_MAX (80 * 1000)
32
33 #define PHM_FUNC_CHECK(hw) \
34 do { \
35 if ((hw) == NULL || (hw)->hwmgr_func == NULL) \
36 return -EINVAL; \
37 } while (0)
38
39 int phm_setup_asic(struct pp_hwmgr *hwmgr)
40 {
41 PHM_FUNC_CHECK(hwmgr);
42
43 if (NULL != hwmgr->hwmgr_func->asic_setup)
44 return hwmgr->hwmgr_func->asic_setup(hwmgr);
45
46 return 0;
47 }
48
49 int phm_power_down_asic(struct pp_hwmgr *hwmgr)
50 {
51 PHM_FUNC_CHECK(hwmgr);
52
53 if (NULL != hwmgr->hwmgr_func->power_off_asic)
54 return hwmgr->hwmgr_func->power_off_asic(hwmgr);
55
56 return 0;
57 }
58
59 int phm_set_power_state(struct pp_hwmgr *hwmgr,
60 const struct pp_hw_power_state *pcurrent_state,
61 const struct pp_hw_power_state *pnew_power_state)
62 {
63 struct phm_set_power_state_input states;
64
65 PHM_FUNC_CHECK(hwmgr);
66
67 states.pcurrent_state = pcurrent_state;
68 states.pnew_state = pnew_power_state;
69
70 if (NULL != hwmgr->hwmgr_func->power_state_set)
71 return hwmgr->hwmgr_func->power_state_set(hwmgr, &states);
72
73 return 0;
74 }
75
76 int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
77 {
78 struct amdgpu_device *adev = NULL;
79 int ret = -EINVAL;
80 PHM_FUNC_CHECK(hwmgr);
81 adev = hwmgr->adev;
82
83
84 if (smum_is_dpm_running(hwmgr) && !amdgpu_passthrough(adev)
85 && adev->in_suspend) {
86 pr_info("dpm has been enabled\n");
87 return 0;
88 }
89
90 if (NULL != hwmgr->hwmgr_func->dynamic_state_management_enable)
91 ret = hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr);
92
93 return ret;
94 }
95
96 int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr)
97 {
98 int ret = -EINVAL;
99
100 PHM_FUNC_CHECK(hwmgr);
101
102 if (!smum_is_dpm_running(hwmgr)) {
103 pr_info("dpm has been disabled\n");
104 return 0;
105 }
106
107 if (hwmgr->hwmgr_func->dynamic_state_management_disable)
108 ret = hwmgr->hwmgr_func->dynamic_state_management_disable(hwmgr);
109
110 return ret;
111 }
112
113 int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level)
114 {
115 int ret = 0;
116
117 PHM_FUNC_CHECK(hwmgr);
118
119 if (hwmgr->hwmgr_func->force_dpm_level != NULL)
120 ret = hwmgr->hwmgr_func->force_dpm_level(hwmgr, level);
121
122 return ret;
123 }
124
125 int phm_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
126 struct pp_power_state *adjusted_ps,
127 const struct pp_power_state *current_ps)
128 {
129 PHM_FUNC_CHECK(hwmgr);
130
131 if (hwmgr->hwmgr_func->apply_state_adjust_rules != NULL)
132 return hwmgr->hwmgr_func->apply_state_adjust_rules(
133 hwmgr,
134 adjusted_ps,
135 current_ps);
136 return 0;
137 }
138
139 int phm_apply_clock_adjust_rules(struct pp_hwmgr *hwmgr)
140 {
141 PHM_FUNC_CHECK(hwmgr);
142
143 if (hwmgr->hwmgr_func->apply_clocks_adjust_rules != NULL)
144 return hwmgr->hwmgr_func->apply_clocks_adjust_rules(hwmgr);
145 return 0;
146 }
147
148 int phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
149 {
150 PHM_FUNC_CHECK(hwmgr);
151
152 if (hwmgr->hwmgr_func->powerdown_uvd != NULL)
153 return hwmgr->hwmgr_func->powerdown_uvd(hwmgr);
154 return 0;
155 }
156
157
158 int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr)
159 {
160 PHM_FUNC_CHECK(hwmgr);
161
162 if (NULL != hwmgr->hwmgr_func->disable_clock_power_gating)
163 return hwmgr->hwmgr_func->disable_clock_power_gating(hwmgr);
164
165 return 0;
166 }
167
168 int phm_pre_display_configuration_changed(struct pp_hwmgr *hwmgr)
169 {
170 PHM_FUNC_CHECK(hwmgr);
171
172 if (NULL != hwmgr->hwmgr_func->pre_display_config_changed)
173 hwmgr->hwmgr_func->pre_display_config_changed(hwmgr);
174
175 return 0;
176
177 }
178
179 int phm_display_configuration_changed(struct pp_hwmgr *hwmgr)
180 {
181 PHM_FUNC_CHECK(hwmgr);
182
183 if (NULL != hwmgr->hwmgr_func->display_config_changed)
184 hwmgr->hwmgr_func->display_config_changed(hwmgr);
185
186 return 0;
187 }
188
189 int phm_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
190 {
191 PHM_FUNC_CHECK(hwmgr);
192
193 if (NULL != hwmgr->hwmgr_func->notify_smc_display_config_after_ps_adjustment)
194 hwmgr->hwmgr_func->notify_smc_display_config_after_ps_adjustment(hwmgr);
195
196 return 0;
197 }
198
199 int phm_stop_thermal_controller(struct pp_hwmgr *hwmgr)
200 {
201 PHM_FUNC_CHECK(hwmgr);
202
203 if (hwmgr->hwmgr_func->stop_thermal_controller == NULL)
204 return -EINVAL;
205
206 return hwmgr->hwmgr_func->stop_thermal_controller(hwmgr);
207 }
208
209 int phm_register_irq_handlers(struct pp_hwmgr *hwmgr)
210 {
211 PHM_FUNC_CHECK(hwmgr);
212
213 if (hwmgr->hwmgr_func->register_irq_handlers != NULL)
214 return hwmgr->hwmgr_func->register_irq_handlers(hwmgr);
215
216 return 0;
217 }
218
219
220
221
222
223
224
225 int phm_start_thermal_controller(struct pp_hwmgr *hwmgr)
226 {
227 int ret = 0;
228 struct PP_TemperatureRange range = {
229 TEMP_RANGE_MIN,
230 TEMP_RANGE_MAX,
231 TEMP_RANGE_MAX,
232 TEMP_RANGE_MIN,
233 TEMP_RANGE_MAX,
234 TEMP_RANGE_MAX,
235 TEMP_RANGE_MIN,
236 TEMP_RANGE_MAX,
237 TEMP_RANGE_MAX};
238 struct amdgpu_device *adev = hwmgr->adev;
239
240 if (hwmgr->hwmgr_func->get_thermal_temperature_range)
241 hwmgr->hwmgr_func->get_thermal_temperature_range(
242 hwmgr, &range);
243
244 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
245 PHM_PlatformCaps_ThermalController)
246 && hwmgr->hwmgr_func->start_thermal_controller != NULL)
247 ret = hwmgr->hwmgr_func->start_thermal_controller(hwmgr, &range);
248
249 adev->pm.dpm.thermal.min_temp = range.min;
250 adev->pm.dpm.thermal.max_temp = range.max;
251 adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max;
252 adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min;
253 adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max;
254 adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max;
255 adev->pm.dpm.thermal.min_mem_temp = range.mem_min;
256 adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max;
257 adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max;
258
259 return ret;
260 }
261
262
263 bool phm_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
264 {
265 PHM_FUNC_CHECK(hwmgr);
266
267 if (hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration == NULL)
268 return false;
269
270 return hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration(hwmgr);
271 }
272
273
274 int phm_check_states_equal(struct pp_hwmgr *hwmgr,
275 const struct pp_hw_power_state *pstate1,
276 const struct pp_hw_power_state *pstate2,
277 bool *equal)
278 {
279 PHM_FUNC_CHECK(hwmgr);
280
281 if (hwmgr->hwmgr_func->check_states_equal == NULL)
282 return -EINVAL;
283
284 return hwmgr->hwmgr_func->check_states_equal(hwmgr, pstate1, pstate2, equal);
285 }
286
287 int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr,
288 const struct amd_pp_display_configuration *display_config)
289 {
290 int index = 0;
291 int number_of_active_display = 0;
292
293 PHM_FUNC_CHECK(hwmgr);
294
295 if (display_config == NULL)
296 return -EINVAL;
297
298 if (NULL != hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk)
299 hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, display_config->min_dcef_deep_sleep_set_clk);
300
301 for (index = 0; index < display_config->num_path_including_non_display; index++) {
302 if (display_config->displays[index].controller_id != 0)
303 number_of_active_display++;
304 }
305
306 if (NULL != hwmgr->hwmgr_func->set_active_display_count)
307 hwmgr->hwmgr_func->set_active_display_count(hwmgr, number_of_active_display);
308
309 if (hwmgr->hwmgr_func->store_cc6_data == NULL)
310 return -EINVAL;
311
312
313
314 if (hwmgr->hwmgr_func->store_cc6_data)
315 hwmgr->hwmgr_func->store_cc6_data(hwmgr,
316 display_config->cpu_pstate_separation_time,
317 display_config->cpu_cc6_disable,
318 display_config->cpu_pstate_disable,
319 display_config->nb_pstate_switch_disable);
320
321 return 0;
322 }
323
324 int phm_get_dal_power_level(struct pp_hwmgr *hwmgr,
325 struct amd_pp_simple_clock_info *info)
326 {
327 PHM_FUNC_CHECK(hwmgr);
328
329 if (info == NULL || hwmgr->hwmgr_func->get_dal_power_level == NULL)
330 return -EINVAL;
331 return hwmgr->hwmgr_func->get_dal_power_level(hwmgr, info);
332 }
333
334 int phm_set_cpu_power_state(struct pp_hwmgr *hwmgr)
335 {
336 PHM_FUNC_CHECK(hwmgr);
337
338 if (hwmgr->hwmgr_func->set_cpu_power_state != NULL)
339 return hwmgr->hwmgr_func->set_cpu_power_state(hwmgr);
340
341 return 0;
342 }
343
344
345 int phm_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
346 PHM_PerformanceLevelDesignation designation, uint32_t index,
347 PHM_PerformanceLevel *level)
348 {
349 PHM_FUNC_CHECK(hwmgr);
350 if (hwmgr->hwmgr_func->get_performance_level == NULL)
351 return -EINVAL;
352
353 return hwmgr->hwmgr_func->get_performance_level(hwmgr, state, designation, index, level);
354
355
356 }
357
358
359
360
361
362
363
364
365
366
367 int phm_get_clock_info(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, struct pp_clock_info *pclock_info,
368 PHM_PerformanceLevelDesignation designation)
369 {
370 int result;
371 PHM_PerformanceLevel performance_level = {0};
372
373 PHM_FUNC_CHECK(hwmgr);
374
375 PP_ASSERT_WITH_CODE((NULL != state), "Invalid Input!", return -EINVAL);
376 PP_ASSERT_WITH_CODE((NULL != pclock_info), "Invalid Input!", return -EINVAL);
377
378 result = phm_get_performance_level(hwmgr, state, PHM_PerformanceLevelDesignation_Activity, 0, &performance_level);
379
380 PP_ASSERT_WITH_CODE((0 == result), "Failed to retrieve minimum clocks.", return result);
381
382
383 pclock_info->min_mem_clk = performance_level.memory_clock;
384 pclock_info->min_eng_clk = performance_level.coreClock;
385 pclock_info->min_bus_bandwidth = performance_level.nonLocalMemoryFreq * performance_level.nonLocalMemoryWidth;
386
387
388 result = phm_get_performance_level(hwmgr, state, designation,
389 (hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1), &performance_level);
390
391 PP_ASSERT_WITH_CODE((0 == result), "Failed to retrieve maximum clocks.", return result);
392
393 pclock_info->max_mem_clk = performance_level.memory_clock;
394 pclock_info->max_eng_clk = performance_level.coreClock;
395 pclock_info->max_bus_bandwidth = performance_level.nonLocalMemoryFreq * performance_level.nonLocalMemoryWidth;
396
397 return 0;
398 }
399
400 int phm_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, struct pp_clock_info *clock_info)
401 {
402 PHM_FUNC_CHECK(hwmgr);
403
404 if (hwmgr->hwmgr_func->get_current_shallow_sleep_clocks == NULL)
405 return -EINVAL;
406
407 return hwmgr->hwmgr_func->get_current_shallow_sleep_clocks(hwmgr, state, clock_info);
408
409 }
410
411 int phm_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
412 {
413 PHM_FUNC_CHECK(hwmgr);
414
415 if (hwmgr->hwmgr_func->get_clock_by_type == NULL)
416 return -EINVAL;
417
418 return hwmgr->hwmgr_func->get_clock_by_type(hwmgr, type, clocks);
419
420 }
421
422 int phm_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
423 enum amd_pp_clock_type type,
424 struct pp_clock_levels_with_latency *clocks)
425 {
426 PHM_FUNC_CHECK(hwmgr);
427
428 if (hwmgr->hwmgr_func->get_clock_by_type_with_latency == NULL)
429 return -EINVAL;
430
431 return hwmgr->hwmgr_func->get_clock_by_type_with_latency(hwmgr, type, clocks);
432
433 }
434
435 int phm_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
436 enum amd_pp_clock_type type,
437 struct pp_clock_levels_with_voltage *clocks)
438 {
439 PHM_FUNC_CHECK(hwmgr);
440
441 if (hwmgr->hwmgr_func->get_clock_by_type_with_voltage == NULL)
442 return -EINVAL;
443
444 return hwmgr->hwmgr_func->get_clock_by_type_with_voltage(hwmgr, type, clocks);
445
446 }
447
448 int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
449 void *clock_ranges)
450 {
451 PHM_FUNC_CHECK(hwmgr);
452
453 if (!hwmgr->hwmgr_func->set_watermarks_for_clocks_ranges)
454 return -EINVAL;
455
456 return hwmgr->hwmgr_func->set_watermarks_for_clocks_ranges(hwmgr,
457 clock_ranges);
458 }
459
460 int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
461 struct pp_display_clock_request *clock)
462 {
463 PHM_FUNC_CHECK(hwmgr);
464
465 if (!hwmgr->hwmgr_func->display_clock_voltage_request)
466 return -EINVAL;
467
468 return hwmgr->hwmgr_func->display_clock_voltage_request(hwmgr, clock);
469 }
470
471 int phm_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
472 {
473 PHM_FUNC_CHECK(hwmgr);
474
475 if (hwmgr->hwmgr_func->get_max_high_clocks == NULL)
476 return -EINVAL;
477
478 return hwmgr->hwmgr_func->get_max_high_clocks(hwmgr, clocks);
479 }
480
481 int phm_disable_smc_firmware_ctf(struct pp_hwmgr *hwmgr)
482 {
483 PHM_FUNC_CHECK(hwmgr);
484
485 if (hwmgr->hwmgr_func->disable_smc_firmware_ctf == NULL)
486 return -EINVAL;
487
488 return hwmgr->hwmgr_func->disable_smc_firmware_ctf(hwmgr);
489 }
490
491 int phm_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count)
492 {
493 PHM_FUNC_CHECK(hwmgr);
494
495 if (!hwmgr->hwmgr_func->set_active_display_count)
496 return -EINVAL;
497
498 return hwmgr->hwmgr_func->set_active_display_count(hwmgr, count);
499 }
500
501 int phm_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
502 {
503 PHM_FUNC_CHECK(hwmgr);
504
505 if (!hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk)
506 return -EINVAL;
507
508 return hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock);
509 }
510
511 int phm_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
512 {
513 PHM_FUNC_CHECK(hwmgr);
514
515 if (!hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq)
516 return -EINVAL;
517
518 return hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock);
519 }
520
521 int phm_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
522 {
523 PHM_FUNC_CHECK(hwmgr);
524
525 if (!hwmgr->hwmgr_func->set_hard_min_fclk_by_freq)
526 return -EINVAL;
527
528 return hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock);
529 }
530