This source file includes following definitions.
- amdgpu_pm_acpi_event_handler
- amdgpu_dpm_read_sensor
- amdgpu_get_dpm_state
- amdgpu_set_dpm_state
- amdgpu_get_dpm_forced_performance_level
- amdgpu_set_dpm_forced_performance_level
- amdgpu_get_pp_num_states
- amdgpu_get_pp_cur_state
- amdgpu_get_pp_force_state
- amdgpu_set_pp_force_state
- amdgpu_get_pp_table
- amdgpu_set_pp_table
- amdgpu_set_pp_od_clk_voltage
- amdgpu_get_pp_od_clk_voltage
- amdgpu_set_pp_feature_status
- amdgpu_get_pp_feature_status
- amdgpu_get_pp_dpm_sclk
- amdgpu_read_mask
- amdgpu_set_pp_dpm_sclk
- amdgpu_get_pp_dpm_mclk
- amdgpu_set_pp_dpm_mclk
- amdgpu_get_pp_dpm_socclk
- amdgpu_set_pp_dpm_socclk
- amdgpu_get_pp_dpm_fclk
- amdgpu_set_pp_dpm_fclk
- amdgpu_get_pp_dpm_dcefclk
- amdgpu_set_pp_dpm_dcefclk
- amdgpu_get_pp_dpm_pcie
- amdgpu_set_pp_dpm_pcie
- amdgpu_get_pp_sclk_od
- amdgpu_set_pp_sclk_od
- amdgpu_get_pp_mclk_od
- amdgpu_set_pp_mclk_od
- amdgpu_get_pp_power_profile_mode
- amdgpu_set_pp_power_profile_mode
- amdgpu_get_busy_percent
- amdgpu_get_memory_busy_percent
- amdgpu_get_pcie_bw
- amdgpu_get_unique_id
- amdgpu_hwmon_show_temp
- amdgpu_hwmon_show_temp_thresh
- amdgpu_hwmon_show_hotspot_temp_thresh
- amdgpu_hwmon_show_mem_temp_thresh
- amdgpu_hwmon_show_temp_label
- amdgpu_hwmon_show_temp_emergency
- amdgpu_hwmon_get_pwm1_enable
- amdgpu_hwmon_set_pwm1_enable
- amdgpu_hwmon_get_pwm1_min
- amdgpu_hwmon_get_pwm1_max
- amdgpu_hwmon_set_pwm1
- amdgpu_hwmon_get_pwm1
- amdgpu_hwmon_get_fan1_input
- amdgpu_hwmon_get_fan1_min
- amdgpu_hwmon_get_fan1_max
- amdgpu_hwmon_get_fan1_target
- amdgpu_hwmon_set_fan1_target
- amdgpu_hwmon_get_fan1_enable
- amdgpu_hwmon_set_fan1_enable
- amdgpu_hwmon_show_vddgfx
- amdgpu_hwmon_show_vddgfx_label
- amdgpu_hwmon_show_vddnb
- amdgpu_hwmon_show_vddnb_label
- amdgpu_hwmon_show_power_avg
- amdgpu_hwmon_show_power_cap_min
- amdgpu_hwmon_show_power_cap_max
- amdgpu_hwmon_show_power_cap
- amdgpu_hwmon_set_power_cap
- amdgpu_hwmon_show_sclk
- amdgpu_hwmon_show_sclk_label
- amdgpu_hwmon_show_mclk
- amdgpu_hwmon_show_mclk_label
- hwmon_attributes_visible
- amdgpu_dpm_thermal_work_handler
- amdgpu_dpm_pick_power_state
- amdgpu_dpm_change_power_state_locked
- amdgpu_dpm_enable_uvd
- amdgpu_dpm_enable_vce
- amdgpu_pm_print_power_states
- amdgpu_pm_virt_sysfs_init
- amdgpu_pm_virt_sysfs_fini
- amdgpu_pm_load_smu_firmware
- amdgpu_pm_sysfs_init
- amdgpu_pm_sysfs_fini
- amdgpu_pm_compute_clocks
- amdgpu_debugfs_pm_info_pp
- amdgpu_parse_cg_state
- amdgpu_debugfs_pm_info
- amdgpu_debugfs_pm_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26 #include <drm/drm_debugfs.h>
27
28 #include "amdgpu.h"
29 #include "amdgpu_drv.h"
30 #include "amdgpu_pm.h"
31 #include "amdgpu_dpm.h"
32 #include "amdgpu_display.h"
33 #include "amdgpu_smu.h"
34 #include "atom.h"
35 #include <linux/power_supply.h>
36 #include <linux/pci.h>
37 #include <linux/hwmon.h>
38 #include <linux/hwmon-sysfs.h>
39 #include <linux/nospec.h>
40 #include "hwmgr.h"
41 #define WIDTH_4K 3840
42
43 static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
44
45 static const struct cg_flag_name clocks[] = {
46 {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
47 {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
48 {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
49 {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
50 {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
51 {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
52 {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
53 {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
54 {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
55 {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
56 {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
57 {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
58 {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
59 {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
60 {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
61 {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
62 {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
63 {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
64 {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
65 {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
66 {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
67 {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
68 {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
69 {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
70
71 {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"},
72 {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"},
73 {0, NULL},
74 };
75
76 static const struct hwmon_temp_label {
77 enum PP_HWMON_TEMP channel;
78 const char *label;
79 } temp_label[] = {
80 {PP_TEMP_EDGE, "edge"},
81 {PP_TEMP_JUNCTION, "junction"},
82 {PP_TEMP_MEM, "mem"},
83 };
84
85 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
86 {
87 if (adev->pm.dpm_enabled) {
88 mutex_lock(&adev->pm.mutex);
89 if (power_supply_is_system_supplied() > 0)
90 adev->pm.ac_power = true;
91 else
92 adev->pm.ac_power = false;
93 if (adev->powerplay.pp_funcs &&
94 adev->powerplay.pp_funcs->enable_bapm)
95 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
96 mutex_unlock(&adev->pm.mutex);
97 }
98 }
99
100 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
101 void *data, uint32_t *size)
102 {
103 int ret = 0;
104
105 if (!data || !size)
106 return -EINVAL;
107
108 if (is_support_sw_smu(adev))
109 ret = smu_read_sensor(&adev->smu, sensor, data, size);
110 else {
111 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
112 ret = adev->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle,
113 sensor, data, size);
114 else
115 ret = -EINVAL;
116 }
117
118 return ret;
119 }
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155 static ssize_t amdgpu_get_dpm_state(struct device *dev,
156 struct device_attribute *attr,
157 char *buf)
158 {
159 struct drm_device *ddev = dev_get_drvdata(dev);
160 struct amdgpu_device *adev = ddev->dev_private;
161 enum amd_pm_state_type pm;
162
163 if (is_support_sw_smu(adev)) {
164 if (adev->smu.ppt_funcs->get_current_power_state)
165 pm = amdgpu_smu_get_current_power_state(adev);
166 else
167 pm = adev->pm.dpm.user_state;
168 } else if (adev->powerplay.pp_funcs->get_current_power_state) {
169 pm = amdgpu_dpm_get_current_power_state(adev);
170 } else {
171 pm = adev->pm.dpm.user_state;
172 }
173
174 return snprintf(buf, PAGE_SIZE, "%s\n",
175 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
176 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
177 }
178
179 static ssize_t amdgpu_set_dpm_state(struct device *dev,
180 struct device_attribute *attr,
181 const char *buf,
182 size_t count)
183 {
184 struct drm_device *ddev = dev_get_drvdata(dev);
185 struct amdgpu_device *adev = ddev->dev_private;
186 enum amd_pm_state_type state;
187
188 if (strncmp("battery", buf, strlen("battery")) == 0)
189 state = POWER_STATE_TYPE_BATTERY;
190 else if (strncmp("balanced", buf, strlen("balanced")) == 0)
191 state = POWER_STATE_TYPE_BALANCED;
192 else if (strncmp("performance", buf, strlen("performance")) == 0)
193 state = POWER_STATE_TYPE_PERFORMANCE;
194 else {
195 count = -EINVAL;
196 goto fail;
197 }
198
199 if (is_support_sw_smu(adev)) {
200 mutex_lock(&adev->pm.mutex);
201 adev->pm.dpm.user_state = state;
202 mutex_unlock(&adev->pm.mutex);
203 } else if (adev->powerplay.pp_funcs->dispatch_tasks) {
204 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state);
205 } else {
206 mutex_lock(&adev->pm.mutex);
207 adev->pm.dpm.user_state = state;
208 mutex_unlock(&adev->pm.mutex);
209
210
211 if (!(adev->flags & AMD_IS_PX) ||
212 (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
213 amdgpu_pm_compute_clocks(adev);
214 }
215 fail:
216 return count;
217 }
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279 static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
280 struct device_attribute *attr,
281 char *buf)
282 {
283 struct drm_device *ddev = dev_get_drvdata(dev);
284 struct amdgpu_device *adev = ddev->dev_private;
285 enum amd_dpm_forced_level level = 0xff;
286
287 if (amdgpu_sriov_vf(adev))
288 return 0;
289
290 if ((adev->flags & AMD_IS_PX) &&
291 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
292 return snprintf(buf, PAGE_SIZE, "off\n");
293
294 if (is_support_sw_smu(adev))
295 level = smu_get_performance_level(&adev->smu);
296 else if (adev->powerplay.pp_funcs->get_performance_level)
297 level = amdgpu_dpm_get_performance_level(adev);
298 else
299 level = adev->pm.dpm.forced_level;
300
301 return snprintf(buf, PAGE_SIZE, "%s\n",
302 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
303 (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
304 (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
305 (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
306 (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
307 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
308 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
309 (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
310 "unknown");
311 }
312
313 static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
314 struct device_attribute *attr,
315 const char *buf,
316 size_t count)
317 {
318 struct drm_device *ddev = dev_get_drvdata(dev);
319 struct amdgpu_device *adev = ddev->dev_private;
320 enum amd_dpm_forced_level level;
321 enum amd_dpm_forced_level current_level = 0xff;
322 int ret = 0;
323
324
325 if ((adev->flags & AMD_IS_PX) &&
326 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
327 return -EINVAL;
328
329 if (strncmp("low", buf, strlen("low")) == 0) {
330 level = AMD_DPM_FORCED_LEVEL_LOW;
331 } else if (strncmp("high", buf, strlen("high")) == 0) {
332 level = AMD_DPM_FORCED_LEVEL_HIGH;
333 } else if (strncmp("auto", buf, strlen("auto")) == 0) {
334 level = AMD_DPM_FORCED_LEVEL_AUTO;
335 } else if (strncmp("manual", buf, strlen("manual")) == 0) {
336 level = AMD_DPM_FORCED_LEVEL_MANUAL;
337 } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
338 level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
339 } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
340 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
341 } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
342 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
343 } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
344 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
345 } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
346 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
347 } else {
348 count = -EINVAL;
349 goto fail;
350 }
351
352
353 if (amdgpu_sriov_vf(adev)) {
354 if (amdgim_is_hwperf(adev) &&
355 adev->virt.ops->force_dpm_level) {
356 mutex_lock(&adev->pm.mutex);
357 adev->virt.ops->force_dpm_level(adev, level);
358 mutex_unlock(&adev->pm.mutex);
359 return count;
360 } else {
361 return -EINVAL;
362 }
363 }
364
365 if (is_support_sw_smu(adev))
366 current_level = smu_get_performance_level(&adev->smu);
367 else if (adev->powerplay.pp_funcs->get_performance_level)
368 current_level = amdgpu_dpm_get_performance_level(adev);
369
370 if (current_level == level)
371 return count;
372
373
374 if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
375 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
376 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
377 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) &&
378 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) {
379 pr_err("Currently not in any profile mode!\n");
380 return -EINVAL;
381 }
382
383 if (is_support_sw_smu(adev)) {
384 ret = smu_force_performance_level(&adev->smu, level);
385 if (ret)
386 count = -EINVAL;
387 } else if (adev->powerplay.pp_funcs->force_performance_level) {
388 mutex_lock(&adev->pm.mutex);
389 if (adev->pm.dpm.thermal_active) {
390 count = -EINVAL;
391 mutex_unlock(&adev->pm.mutex);
392 goto fail;
393 }
394 ret = amdgpu_dpm_force_performance_level(adev, level);
395 if (ret)
396 count = -EINVAL;
397 else
398 adev->pm.dpm.forced_level = level;
399 mutex_unlock(&adev->pm.mutex);
400 }
401
402 fail:
403 return count;
404 }
405
406 static ssize_t amdgpu_get_pp_num_states(struct device *dev,
407 struct device_attribute *attr,
408 char *buf)
409 {
410 struct drm_device *ddev = dev_get_drvdata(dev);
411 struct amdgpu_device *adev = ddev->dev_private;
412 struct pp_states_info data;
413 int i, buf_len, ret;
414
415 if (is_support_sw_smu(adev)) {
416 ret = smu_get_power_num_states(&adev->smu, &data);
417 if (ret)
418 return ret;
419 } else if (adev->powerplay.pp_funcs->get_pp_num_states)
420 amdgpu_dpm_get_pp_num_states(adev, &data);
421
422 buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
423 for (i = 0; i < data.nums; i++)
424 buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i,
425 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
426 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
427 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
428 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
429
430 return buf_len;
431 }
432
433 static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
434 struct device_attribute *attr,
435 char *buf)
436 {
437 struct drm_device *ddev = dev_get_drvdata(dev);
438 struct amdgpu_device *adev = ddev->dev_private;
439 struct pp_states_info data;
440 struct smu_context *smu = &adev->smu;
441 enum amd_pm_state_type pm = 0;
442 int i = 0, ret = 0;
443
444 if (is_support_sw_smu(adev)) {
445 pm = smu_get_current_power_state(smu);
446 ret = smu_get_power_num_states(smu, &data);
447 if (ret)
448 return ret;
449 } else if (adev->powerplay.pp_funcs->get_current_power_state
450 && adev->powerplay.pp_funcs->get_pp_num_states) {
451 pm = amdgpu_dpm_get_current_power_state(adev);
452 amdgpu_dpm_get_pp_num_states(adev, &data);
453 }
454
455 for (i = 0; i < data.nums; i++) {
456 if (pm == data.states[i])
457 break;
458 }
459
460 if (i == data.nums)
461 i = -EINVAL;
462
463 return snprintf(buf, PAGE_SIZE, "%d\n", i);
464 }
465
466 static ssize_t amdgpu_get_pp_force_state(struct device *dev,
467 struct device_attribute *attr,
468 char *buf)
469 {
470 struct drm_device *ddev = dev_get_drvdata(dev);
471 struct amdgpu_device *adev = ddev->dev_private;
472
473 if (adev->pp_force_state_enabled)
474 return amdgpu_get_pp_cur_state(dev, attr, buf);
475 else
476 return snprintf(buf, PAGE_SIZE, "\n");
477 }
478
479 static ssize_t amdgpu_set_pp_force_state(struct device *dev,
480 struct device_attribute *attr,
481 const char *buf,
482 size_t count)
483 {
484 struct drm_device *ddev = dev_get_drvdata(dev);
485 struct amdgpu_device *adev = ddev->dev_private;
486 enum amd_pm_state_type state = 0;
487 unsigned long idx;
488 int ret;
489
490 if (strlen(buf) == 1)
491 adev->pp_force_state_enabled = false;
492 else if (is_support_sw_smu(adev))
493 adev->pp_force_state_enabled = false;
494 else if (adev->powerplay.pp_funcs->dispatch_tasks &&
495 adev->powerplay.pp_funcs->get_pp_num_states) {
496 struct pp_states_info data;
497
498 ret = kstrtoul(buf, 0, &idx);
499 if (ret || idx >= ARRAY_SIZE(data.states)) {
500 count = -EINVAL;
501 goto fail;
502 }
503 idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
504
505 amdgpu_dpm_get_pp_num_states(adev, &data);
506 state = data.states[idx];
507
508 if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
509 state != POWER_STATE_TYPE_DEFAULT) {
510 amdgpu_dpm_dispatch_task(adev,
511 AMD_PP_TASK_ENABLE_USER_STATE, &state);
512 adev->pp_force_state_enabled = true;
513 }
514 }
515 fail:
516 return count;
517 }
518
519
520
521
522
523
524
525
526
527
528
529
530 static ssize_t amdgpu_get_pp_table(struct device *dev,
531 struct device_attribute *attr,
532 char *buf)
533 {
534 struct drm_device *ddev = dev_get_drvdata(dev);
535 struct amdgpu_device *adev = ddev->dev_private;
536 char *table = NULL;
537 int size;
538
539 if (is_support_sw_smu(adev)) {
540 size = smu_sys_get_pp_table(&adev->smu, (void **)&table);
541 if (size < 0)
542 return size;
543 }
544 else if (adev->powerplay.pp_funcs->get_pp_table)
545 size = amdgpu_dpm_get_pp_table(adev, &table);
546 else
547 return 0;
548
549 if (size >= PAGE_SIZE)
550 size = PAGE_SIZE - 1;
551
552 memcpy(buf, table, size);
553
554 return size;
555 }
556
557 static ssize_t amdgpu_set_pp_table(struct device *dev,
558 struct device_attribute *attr,
559 const char *buf,
560 size_t count)
561 {
562 struct drm_device *ddev = dev_get_drvdata(dev);
563 struct amdgpu_device *adev = ddev->dev_private;
564 int ret = 0;
565
566 if (is_support_sw_smu(adev)) {
567 ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count);
568 if (ret)
569 return ret;
570 } else if (adev->powerplay.pp_funcs->set_pp_table)
571 amdgpu_dpm_set_pp_table(adev, buf, count);
572
573 return count;
574 }
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642 static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
643 struct device_attribute *attr,
644 const char *buf,
645 size_t count)
646 {
647 struct drm_device *ddev = dev_get_drvdata(dev);
648 struct amdgpu_device *adev = ddev->dev_private;
649 int ret;
650 uint32_t parameter_size = 0;
651 long parameter[64];
652 char buf_cpy[128];
653 char *tmp_str;
654 char *sub_str;
655 const char delimiter[3] = {' ', '\n', '\0'};
656 uint32_t type;
657
658 if (count > 127)
659 return -EINVAL;
660
661 if (*buf == 's')
662 type = PP_OD_EDIT_SCLK_VDDC_TABLE;
663 else if (*buf == 'm')
664 type = PP_OD_EDIT_MCLK_VDDC_TABLE;
665 else if(*buf == 'r')
666 type = PP_OD_RESTORE_DEFAULT_TABLE;
667 else if (*buf == 'c')
668 type = PP_OD_COMMIT_DPM_TABLE;
669 else if (!strncmp(buf, "vc", 2))
670 type = PP_OD_EDIT_VDDC_CURVE;
671 else
672 return -EINVAL;
673
674 memcpy(buf_cpy, buf, count+1);
675
676 tmp_str = buf_cpy;
677
678 if (type == PP_OD_EDIT_VDDC_CURVE)
679 tmp_str++;
680 while (isspace(*++tmp_str));
681
682 while (tmp_str[0]) {
683 sub_str = strsep(&tmp_str, delimiter);
684 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
685 if (ret)
686 return -EINVAL;
687 parameter_size++;
688
689 while (isspace(*tmp_str))
690 tmp_str++;
691 }
692
693 if (is_support_sw_smu(adev)) {
694 ret = smu_od_edit_dpm_table(&adev->smu, type,
695 parameter, parameter_size);
696
697 if (ret)
698 return -EINVAL;
699 } else {
700 if (adev->powerplay.pp_funcs->odn_edit_dpm_table) {
701 ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
702 parameter, parameter_size);
703 if (ret)
704 return -EINVAL;
705 }
706
707 if (type == PP_OD_COMMIT_DPM_TABLE) {
708 if (adev->powerplay.pp_funcs->dispatch_tasks) {
709 amdgpu_dpm_dispatch_task(adev,
710 AMD_PP_TASK_READJUST_POWER_STATE,
711 NULL);
712 return count;
713 } else {
714 return -EINVAL;
715 }
716 }
717 }
718
719 return count;
720 }
721
722 static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
723 struct device_attribute *attr,
724 char *buf)
725 {
726 struct drm_device *ddev = dev_get_drvdata(dev);
727 struct amdgpu_device *adev = ddev->dev_private;
728 uint32_t size = 0;
729
730 if (is_support_sw_smu(adev)) {
731 size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf);
732 size += smu_print_clk_levels(&adev->smu, SMU_OD_MCLK, buf+size);
733 size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDC_CURVE, buf+size);
734 size += smu_print_clk_levels(&adev->smu, SMU_OD_RANGE, buf+size);
735 return size;
736 } else if (adev->powerplay.pp_funcs->print_clock_levels) {
737 size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
738 size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
739 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size);
740 size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size);
741 return size;
742 } else {
743 return snprintf(buf, PAGE_SIZE, "\n");
744 }
745
746 }
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764 static ssize_t amdgpu_set_pp_feature_status(struct device *dev,
765 struct device_attribute *attr,
766 const char *buf,
767 size_t count)
768 {
769 struct drm_device *ddev = dev_get_drvdata(dev);
770 struct amdgpu_device *adev = ddev->dev_private;
771 uint64_t featuremask;
772 int ret;
773
774 ret = kstrtou64(buf, 0, &featuremask);
775 if (ret)
776 return -EINVAL;
777
778 pr_debug("featuremask = 0x%llx\n", featuremask);
779
780 if (is_support_sw_smu(adev)) {
781 ret = smu_sys_set_pp_feature_mask(&adev->smu, featuremask);
782 if (ret)
783 return -EINVAL;
784 } else if (adev->powerplay.pp_funcs->set_ppfeature_status) {
785 ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
786 if (ret)
787 return -EINVAL;
788 }
789
790 return count;
791 }
792
793 static ssize_t amdgpu_get_pp_feature_status(struct device *dev,
794 struct device_attribute *attr,
795 char *buf)
796 {
797 struct drm_device *ddev = dev_get_drvdata(dev);
798 struct amdgpu_device *adev = ddev->dev_private;
799
800 if (is_support_sw_smu(adev)) {
801 return smu_sys_get_pp_feature_mask(&adev->smu, buf);
802 } else if (adev->powerplay.pp_funcs->get_ppfeature_status)
803 return amdgpu_dpm_get_ppfeature_status(adev, buf);
804
805 return snprintf(buf, PAGE_SIZE, "\n");
806 }
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833 static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
834 struct device_attribute *attr,
835 char *buf)
836 {
837 struct drm_device *ddev = dev_get_drvdata(dev);
838 struct amdgpu_device *adev = ddev->dev_private;
839
840 if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
841 adev->virt.ops->get_pp_clk)
842 return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
843
844 if (is_support_sw_smu(adev))
845 return smu_print_clk_levels(&adev->smu, SMU_SCLK, buf);
846 else if (adev->powerplay.pp_funcs->print_clock_levels)
847 return amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
848 else
849 return snprintf(buf, PAGE_SIZE, "\n");
850 }
851
852
853
854
855
856 #define AMDGPU_MASK_BUF_MAX (32 * 13)
857
858 static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
859 {
860 int ret;
861 long level;
862 char *sub_str = NULL;
863 char *tmp;
864 char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
865 const char delimiter[3] = {' ', '\n', '\0'};
866 size_t bytes;
867
868 *mask = 0;
869
870 bytes = min(count, sizeof(buf_cpy) - 1);
871 memcpy(buf_cpy, buf, bytes);
872 buf_cpy[bytes] = '\0';
873 tmp = buf_cpy;
874 while (tmp[0]) {
875 sub_str = strsep(&tmp, delimiter);
876 if (strlen(sub_str)) {
877 ret = kstrtol(sub_str, 0, &level);
878 if (ret)
879 return -EINVAL;
880 *mask |= 1 << level;
881 } else
882 break;
883 }
884
885 return 0;
886 }
887
888 static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
889 struct device_attribute *attr,
890 const char *buf,
891 size_t count)
892 {
893 struct drm_device *ddev = dev_get_drvdata(dev);
894 struct amdgpu_device *adev = ddev->dev_private;
895 int ret;
896 uint32_t mask = 0;
897
898 if (amdgpu_sriov_vf(adev))
899 return 0;
900
901 ret = amdgpu_read_mask(buf, count, &mask);
902 if (ret)
903 return ret;
904
905 if (is_support_sw_smu(adev))
906 ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask);
907 else if (adev->powerplay.pp_funcs->force_clock_level)
908 ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
909
910 if (ret)
911 return -EINVAL;
912
913 return count;
914 }
915
916 static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
917 struct device_attribute *attr,
918 char *buf)
919 {
920 struct drm_device *ddev = dev_get_drvdata(dev);
921 struct amdgpu_device *adev = ddev->dev_private;
922
923 if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
924 adev->virt.ops->get_pp_clk)
925 return adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf);
926
927 if (is_support_sw_smu(adev))
928 return smu_print_clk_levels(&adev->smu, SMU_MCLK, buf);
929 else if (adev->powerplay.pp_funcs->print_clock_levels)
930 return amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
931 else
932 return snprintf(buf, PAGE_SIZE, "\n");
933 }
934
935 static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
936 struct device_attribute *attr,
937 const char *buf,
938 size_t count)
939 {
940 struct drm_device *ddev = dev_get_drvdata(dev);
941 struct amdgpu_device *adev = ddev->dev_private;
942 int ret;
943 uint32_t mask = 0;
944
945 if (amdgpu_sriov_vf(adev))
946 return 0;
947
948 ret = amdgpu_read_mask(buf, count, &mask);
949 if (ret)
950 return ret;
951
952 if (is_support_sw_smu(adev))
953 ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask);
954 else if (adev->powerplay.pp_funcs->force_clock_level)
955 ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
956
957 if (ret)
958 return -EINVAL;
959
960 return count;
961 }
962
963 static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
964 struct device_attribute *attr,
965 char *buf)
966 {
967 struct drm_device *ddev = dev_get_drvdata(dev);
968 struct amdgpu_device *adev = ddev->dev_private;
969
970 if (is_support_sw_smu(adev))
971 return smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf);
972 else if (adev->powerplay.pp_funcs->print_clock_levels)
973 return amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf);
974 else
975 return snprintf(buf, PAGE_SIZE, "\n");
976 }
977
978 static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
979 struct device_attribute *attr,
980 const char *buf,
981 size_t count)
982 {
983 struct drm_device *ddev = dev_get_drvdata(dev);
984 struct amdgpu_device *adev = ddev->dev_private;
985 int ret;
986 uint32_t mask = 0;
987
988 ret = amdgpu_read_mask(buf, count, &mask);
989 if (ret)
990 return ret;
991
992 if (is_support_sw_smu(adev))
993 ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask);
994 else if (adev->powerplay.pp_funcs->force_clock_level)
995 ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask);
996
997 if (ret)
998 return -EINVAL;
999
1000 return count;
1001 }
1002
1003 static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
1004 struct device_attribute *attr,
1005 char *buf)
1006 {
1007 struct drm_device *ddev = dev_get_drvdata(dev);
1008 struct amdgpu_device *adev = ddev->dev_private;
1009
1010 if (is_support_sw_smu(adev))
1011 return smu_print_clk_levels(&adev->smu, SMU_FCLK, buf);
1012 else if (adev->powerplay.pp_funcs->print_clock_levels)
1013 return amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf);
1014 else
1015 return snprintf(buf, PAGE_SIZE, "\n");
1016 }
1017
1018 static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
1019 struct device_attribute *attr,
1020 const char *buf,
1021 size_t count)
1022 {
1023 struct drm_device *ddev = dev_get_drvdata(dev);
1024 struct amdgpu_device *adev = ddev->dev_private;
1025 int ret;
1026 uint32_t mask = 0;
1027
1028 ret = amdgpu_read_mask(buf, count, &mask);
1029 if (ret)
1030 return ret;
1031
1032 if (is_support_sw_smu(adev))
1033 ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask);
1034 else if (adev->powerplay.pp_funcs->force_clock_level)
1035 ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask);
1036
1037 if (ret)
1038 return -EINVAL;
1039
1040 return count;
1041 }
1042
1043 static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
1044 struct device_attribute *attr,
1045 char *buf)
1046 {
1047 struct drm_device *ddev = dev_get_drvdata(dev);
1048 struct amdgpu_device *adev = ddev->dev_private;
1049
1050 if (is_support_sw_smu(adev))
1051 return smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf);
1052 else if (adev->powerplay.pp_funcs->print_clock_levels)
1053 return amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf);
1054 else
1055 return snprintf(buf, PAGE_SIZE, "\n");
1056 }
1057
1058 static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
1059 struct device_attribute *attr,
1060 const char *buf,
1061 size_t count)
1062 {
1063 struct drm_device *ddev = dev_get_drvdata(dev);
1064 struct amdgpu_device *adev = ddev->dev_private;
1065 int ret;
1066 uint32_t mask = 0;
1067
1068 ret = amdgpu_read_mask(buf, count, &mask);
1069 if (ret)
1070 return ret;
1071
1072 if (is_support_sw_smu(adev))
1073 ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask);
1074 else if (adev->powerplay.pp_funcs->force_clock_level)
1075 ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask);
1076
1077 if (ret)
1078 return -EINVAL;
1079
1080 return count;
1081 }
1082
1083 static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
1084 struct device_attribute *attr,
1085 char *buf)
1086 {
1087 struct drm_device *ddev = dev_get_drvdata(dev);
1088 struct amdgpu_device *adev = ddev->dev_private;
1089
1090 if (is_support_sw_smu(adev))
1091 return smu_print_clk_levels(&adev->smu, SMU_PCIE, buf);
1092 else if (adev->powerplay.pp_funcs->print_clock_levels)
1093 return amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
1094 else
1095 return snprintf(buf, PAGE_SIZE, "\n");
1096 }
1097
1098 static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
1099 struct device_attribute *attr,
1100 const char *buf,
1101 size_t count)
1102 {
1103 struct drm_device *ddev = dev_get_drvdata(dev);
1104 struct amdgpu_device *adev = ddev->dev_private;
1105 int ret;
1106 uint32_t mask = 0;
1107
1108 ret = amdgpu_read_mask(buf, count, &mask);
1109 if (ret)
1110 return ret;
1111
1112 if (is_support_sw_smu(adev))
1113 ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask);
1114 else if (adev->powerplay.pp_funcs->force_clock_level)
1115 ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
1116
1117 if (ret)
1118 return -EINVAL;
1119
1120 return count;
1121 }
1122
1123 static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
1124 struct device_attribute *attr,
1125 char *buf)
1126 {
1127 struct drm_device *ddev = dev_get_drvdata(dev);
1128 struct amdgpu_device *adev = ddev->dev_private;
1129 uint32_t value = 0;
1130
1131 if (is_support_sw_smu(adev))
1132 value = smu_get_od_percentage(&(adev->smu), SMU_OD_SCLK);
1133 else if (adev->powerplay.pp_funcs->get_sclk_od)
1134 value = amdgpu_dpm_get_sclk_od(adev);
1135
1136 return snprintf(buf, PAGE_SIZE, "%d\n", value);
1137 }
1138
1139 static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
1140 struct device_attribute *attr,
1141 const char *buf,
1142 size_t count)
1143 {
1144 struct drm_device *ddev = dev_get_drvdata(dev);
1145 struct amdgpu_device *adev = ddev->dev_private;
1146 int ret;
1147 long int value;
1148
1149 ret = kstrtol(buf, 0, &value);
1150
1151 if (ret) {
1152 count = -EINVAL;
1153 goto fail;
1154 }
1155
1156 if (is_support_sw_smu(adev)) {
1157 value = smu_set_od_percentage(&(adev->smu), SMU_OD_SCLK, (uint32_t)value);
1158 } else {
1159 if (adev->powerplay.pp_funcs->set_sclk_od)
1160 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
1161
1162 if (adev->powerplay.pp_funcs->dispatch_tasks) {
1163 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
1164 } else {
1165 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1166 amdgpu_pm_compute_clocks(adev);
1167 }
1168 }
1169
1170 fail:
1171 return count;
1172 }
1173
1174 static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
1175 struct device_attribute *attr,
1176 char *buf)
1177 {
1178 struct drm_device *ddev = dev_get_drvdata(dev);
1179 struct amdgpu_device *adev = ddev->dev_private;
1180 uint32_t value = 0;
1181
1182 if (is_support_sw_smu(adev))
1183 value = smu_get_od_percentage(&(adev->smu), SMU_OD_MCLK);
1184 else if (adev->powerplay.pp_funcs->get_mclk_od)
1185 value = amdgpu_dpm_get_mclk_od(adev);
1186
1187 return snprintf(buf, PAGE_SIZE, "%d\n", value);
1188 }
1189
1190 static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
1191 struct device_attribute *attr,
1192 const char *buf,
1193 size_t count)
1194 {
1195 struct drm_device *ddev = dev_get_drvdata(dev);
1196 struct amdgpu_device *adev = ddev->dev_private;
1197 int ret;
1198 long int value;
1199
1200 ret = kstrtol(buf, 0, &value);
1201
1202 if (ret) {
1203 count = -EINVAL;
1204 goto fail;
1205 }
1206
1207 if (is_support_sw_smu(adev)) {
1208 value = smu_set_od_percentage(&(adev->smu), SMU_OD_MCLK, (uint32_t)value);
1209 } else {
1210 if (adev->powerplay.pp_funcs->set_mclk_od)
1211 amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
1212
1213 if (adev->powerplay.pp_funcs->dispatch_tasks) {
1214 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
1215 } else {
1216 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1217 amdgpu_pm_compute_clocks(adev);
1218 }
1219 }
1220
1221 fail:
1222 return count;
1223 }
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245 static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
1246 struct device_attribute *attr,
1247 char *buf)
1248 {
1249 struct drm_device *ddev = dev_get_drvdata(dev);
1250 struct amdgpu_device *adev = ddev->dev_private;
1251
1252 if (is_support_sw_smu(adev))
1253 return smu_get_power_profile_mode(&adev->smu, buf);
1254 else if (adev->powerplay.pp_funcs->get_power_profile_mode)
1255 return amdgpu_dpm_get_power_profile_mode(adev, buf);
1256
1257 return snprintf(buf, PAGE_SIZE, "\n");
1258 }
1259
1260
1261 static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
1262 struct device_attribute *attr,
1263 const char *buf,
1264 size_t count)
1265 {
1266 int ret = 0xff;
1267 struct drm_device *ddev = dev_get_drvdata(dev);
1268 struct amdgpu_device *adev = ddev->dev_private;
1269 uint32_t parameter_size = 0;
1270 long parameter[64];
1271 char *sub_str, buf_cpy[128];
1272 char *tmp_str;
1273 uint32_t i = 0;
1274 char tmp[2];
1275 long int profile_mode = 0;
1276 const char delimiter[3] = {' ', '\n', '\0'};
1277
1278 tmp[0] = *(buf);
1279 tmp[1] = '\0';
1280 ret = kstrtol(tmp, 0, &profile_mode);
1281 if (ret)
1282 goto fail;
1283
1284 if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1285 if (count < 2 || count > 127)
1286 return -EINVAL;
1287 while (isspace(*++buf))
1288 i++;
1289 memcpy(buf_cpy, buf, count-i);
1290 tmp_str = buf_cpy;
1291 while (tmp_str[0]) {
1292 sub_str = strsep(&tmp_str, delimiter);
1293 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
1294 if (ret) {
1295 count = -EINVAL;
1296 goto fail;
1297 }
1298 parameter_size++;
1299 while (isspace(*tmp_str))
1300 tmp_str++;
1301 }
1302 }
1303 parameter[parameter_size] = profile_mode;
1304 if (is_support_sw_smu(adev))
1305 ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size);
1306 else if (adev->powerplay.pp_funcs->set_power_profile_mode)
1307 ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
1308 if (!ret)
1309 return count;
1310 fail:
1311 return -EINVAL;
1312 }
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322 static ssize_t amdgpu_get_busy_percent(struct device *dev,
1323 struct device_attribute *attr,
1324 char *buf)
1325 {
1326 struct drm_device *ddev = dev_get_drvdata(dev);
1327 struct amdgpu_device *adev = ddev->dev_private;
1328 int r, value, size = sizeof(value);
1329
1330
1331 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
1332 (void *)&value, &size);
1333
1334 if (r)
1335 return r;
1336
1337 return snprintf(buf, PAGE_SIZE, "%d\n", value);
1338 }
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348 static ssize_t amdgpu_get_memory_busy_percent(struct device *dev,
1349 struct device_attribute *attr,
1350 char *buf)
1351 {
1352 struct drm_device *ddev = dev_get_drvdata(dev);
1353 struct amdgpu_device *adev = ddev->dev_private;
1354 int r, value, size = sizeof(value);
1355
1356
1357 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD,
1358 (void *)&value, &size);
1359
1360 if (r)
1361 return r;
1362
1363 return snprintf(buf, PAGE_SIZE, "%d\n", value);
1364 }
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378 static ssize_t amdgpu_get_pcie_bw(struct device *dev,
1379 struct device_attribute *attr,
1380 char *buf)
1381 {
1382 struct drm_device *ddev = dev_get_drvdata(dev);
1383 struct amdgpu_device *adev = ddev->dev_private;
1384 uint64_t count0, count1;
1385
1386 amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
1387 return snprintf(buf, PAGE_SIZE, "%llu %llu %i\n",
1388 count0, count1, pcie_get_mps(adev->pdev));
1389 }
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401 static ssize_t amdgpu_get_unique_id(struct device *dev,
1402 struct device_attribute *attr,
1403 char *buf)
1404 {
1405 struct drm_device *ddev = dev_get_drvdata(dev);
1406 struct amdgpu_device *adev = ddev->dev_private;
1407
1408 if (adev->unique_id)
1409 return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id);
1410
1411 return 0;
1412 }
1413
1414 static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
1415 static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
1416 amdgpu_get_dpm_forced_performance_level,
1417 amdgpu_set_dpm_forced_performance_level);
1418 static DEVICE_ATTR(pp_num_states, S_IRUGO, amdgpu_get_pp_num_states, NULL);
1419 static DEVICE_ATTR(pp_cur_state, S_IRUGO, amdgpu_get_pp_cur_state, NULL);
1420 static DEVICE_ATTR(pp_force_state, S_IRUGO | S_IWUSR,
1421 amdgpu_get_pp_force_state,
1422 amdgpu_set_pp_force_state);
1423 static DEVICE_ATTR(pp_table, S_IRUGO | S_IWUSR,
1424 amdgpu_get_pp_table,
1425 amdgpu_set_pp_table);
1426 static DEVICE_ATTR(pp_dpm_sclk, S_IRUGO | S_IWUSR,
1427 amdgpu_get_pp_dpm_sclk,
1428 amdgpu_set_pp_dpm_sclk);
1429 static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR,
1430 amdgpu_get_pp_dpm_mclk,
1431 amdgpu_set_pp_dpm_mclk);
1432 static DEVICE_ATTR(pp_dpm_socclk, S_IRUGO | S_IWUSR,
1433 amdgpu_get_pp_dpm_socclk,
1434 amdgpu_set_pp_dpm_socclk);
1435 static DEVICE_ATTR(pp_dpm_fclk, S_IRUGO | S_IWUSR,
1436 amdgpu_get_pp_dpm_fclk,
1437 amdgpu_set_pp_dpm_fclk);
1438 static DEVICE_ATTR(pp_dpm_dcefclk, S_IRUGO | S_IWUSR,
1439 amdgpu_get_pp_dpm_dcefclk,
1440 amdgpu_set_pp_dpm_dcefclk);
1441 static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR,
1442 amdgpu_get_pp_dpm_pcie,
1443 amdgpu_set_pp_dpm_pcie);
1444 static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR,
1445 amdgpu_get_pp_sclk_od,
1446 amdgpu_set_pp_sclk_od);
1447 static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR,
1448 amdgpu_get_pp_mclk_od,
1449 amdgpu_set_pp_mclk_od);
1450 static DEVICE_ATTR(pp_power_profile_mode, S_IRUGO | S_IWUSR,
1451 amdgpu_get_pp_power_profile_mode,
1452 amdgpu_set_pp_power_profile_mode);
1453 static DEVICE_ATTR(pp_od_clk_voltage, S_IRUGO | S_IWUSR,
1454 amdgpu_get_pp_od_clk_voltage,
1455 amdgpu_set_pp_od_clk_voltage);
1456 static DEVICE_ATTR(gpu_busy_percent, S_IRUGO,
1457 amdgpu_get_busy_percent, NULL);
1458 static DEVICE_ATTR(mem_busy_percent, S_IRUGO,
1459 amdgpu_get_memory_busy_percent, NULL);
1460 static DEVICE_ATTR(pcie_bw, S_IRUGO, amdgpu_get_pcie_bw, NULL);
1461 static DEVICE_ATTR(pp_features, S_IRUGO | S_IWUSR,
1462 amdgpu_get_pp_feature_status,
1463 amdgpu_set_pp_feature_status);
1464 static DEVICE_ATTR(unique_id, S_IRUGO, amdgpu_get_unique_id, NULL);
1465
1466 static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
1467 struct device_attribute *attr,
1468 char *buf)
1469 {
1470 struct amdgpu_device *adev = dev_get_drvdata(dev);
1471 struct drm_device *ddev = adev->ddev;
1472 int channel = to_sensor_dev_attr(attr)->index;
1473 int r, temp = 0, size = sizeof(temp);
1474
1475
1476 if ((adev->flags & AMD_IS_PX) &&
1477 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1478 return -EINVAL;
1479
1480 if (channel >= PP_TEMP_MAX)
1481 return -EINVAL;
1482
1483 switch (channel) {
1484 case PP_TEMP_JUNCTION:
1485
1486 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
1487 (void *)&temp, &size);
1488 if (r)
1489 return r;
1490 break;
1491 case PP_TEMP_EDGE:
1492
1493 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
1494 (void *)&temp, &size);
1495 if (r)
1496 return r;
1497 break;
1498 case PP_TEMP_MEM:
1499
1500 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
1501 (void *)&temp, &size);
1502 if (r)
1503 return r;
1504 break;
1505 }
1506
1507 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
1508 }
1509
1510 static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
1511 struct device_attribute *attr,
1512 char *buf)
1513 {
1514 struct amdgpu_device *adev = dev_get_drvdata(dev);
1515 int hyst = to_sensor_dev_attr(attr)->index;
1516 int temp;
1517
1518 if (hyst)
1519 temp = adev->pm.dpm.thermal.min_temp;
1520 else
1521 temp = adev->pm.dpm.thermal.max_temp;
1522
1523 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
1524 }
1525
1526 static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
1527 struct device_attribute *attr,
1528 char *buf)
1529 {
1530 struct amdgpu_device *adev = dev_get_drvdata(dev);
1531 int hyst = to_sensor_dev_attr(attr)->index;
1532 int temp;
1533
1534 if (hyst)
1535 temp = adev->pm.dpm.thermal.min_hotspot_temp;
1536 else
1537 temp = adev->pm.dpm.thermal.max_hotspot_crit_temp;
1538
1539 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
1540 }
1541
1542 static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
1543 struct device_attribute *attr,
1544 char *buf)
1545 {
1546 struct amdgpu_device *adev = dev_get_drvdata(dev);
1547 int hyst = to_sensor_dev_attr(attr)->index;
1548 int temp;
1549
1550 if (hyst)
1551 temp = adev->pm.dpm.thermal.min_mem_temp;
1552 else
1553 temp = adev->pm.dpm.thermal.max_mem_crit_temp;
1554
1555 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
1556 }
1557
1558 static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
1559 struct device_attribute *attr,
1560 char *buf)
1561 {
1562 int channel = to_sensor_dev_attr(attr)->index;
1563
1564 if (channel >= PP_TEMP_MAX)
1565 return -EINVAL;
1566
1567 return snprintf(buf, PAGE_SIZE, "%s\n", temp_label[channel].label);
1568 }
1569
1570 static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
1571 struct device_attribute *attr,
1572 char *buf)
1573 {
1574 struct amdgpu_device *adev = dev_get_drvdata(dev);
1575 int channel = to_sensor_dev_attr(attr)->index;
1576 int temp = 0;
1577
1578 if (channel >= PP_TEMP_MAX)
1579 return -EINVAL;
1580
1581 switch (channel) {
1582 case PP_TEMP_JUNCTION:
1583 temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp;
1584 break;
1585 case PP_TEMP_EDGE:
1586 temp = adev->pm.dpm.thermal.max_edge_emergency_temp;
1587 break;
1588 case PP_TEMP_MEM:
1589 temp = adev->pm.dpm.thermal.max_mem_emergency_temp;
1590 break;
1591 }
1592
1593 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
1594 }
1595
1596 static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
1597 struct device_attribute *attr,
1598 char *buf)
1599 {
1600 struct amdgpu_device *adev = dev_get_drvdata(dev);
1601 u32 pwm_mode = 0;
1602 if (is_support_sw_smu(adev)) {
1603 pwm_mode = smu_get_fan_control_mode(&adev->smu);
1604 } else {
1605 if (!adev->powerplay.pp_funcs->get_fan_control_mode)
1606 return -EINVAL;
1607
1608 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
1609 }
1610
1611 return sprintf(buf, "%i\n", pwm_mode);
1612 }
1613
1614 static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
1615 struct device_attribute *attr,
1616 const char *buf,
1617 size_t count)
1618 {
1619 struct amdgpu_device *adev = dev_get_drvdata(dev);
1620 int err;
1621 int value;
1622
1623
1624 if ((adev->flags & AMD_IS_PX) &&
1625 (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1626 return -EINVAL;
1627
1628 err = kstrtoint(buf, 10, &value);
1629 if (err)
1630 return err;
1631
1632 if (is_support_sw_smu(adev)) {
1633 smu_set_fan_control_mode(&adev->smu, value);
1634 } else {
1635 if (!adev->powerplay.pp_funcs->set_fan_control_mode)
1636 return -EINVAL;
1637
1638 amdgpu_dpm_set_fan_control_mode(adev, value);
1639 }
1640
1641 return count;
1642 }
1643
1644 static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
1645 struct device_attribute *attr,
1646 char *buf)
1647 {
1648 return sprintf(buf, "%i\n", 0);
1649 }
1650
1651 static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
1652 struct device_attribute *attr,
1653 char *buf)
1654 {
1655 return sprintf(buf, "%i\n", 255);
1656 }
1657
1658 static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
1659 struct device_attribute *attr,
1660 const char *buf, size_t count)
1661 {
1662 struct amdgpu_device *adev = dev_get_drvdata(dev);
1663 int err;
1664 u32 value;
1665 u32 pwm_mode;
1666
1667
1668 if ((adev->flags & AMD_IS_PX) &&
1669 (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1670 return -EINVAL;
1671 if (is_support_sw_smu(adev))
1672 pwm_mode = smu_get_fan_control_mode(&adev->smu);
1673 else
1674 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
1675 if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
1676 pr_info("manual fan speed control should be enabled first\n");
1677 return -EINVAL;
1678 }
1679
1680 err = kstrtou32(buf, 10, &value);
1681 if (err)
1682 return err;
1683
1684 value = (value * 100) / 255;
1685
1686 if (is_support_sw_smu(adev)) {
1687 err = smu_set_fan_speed_percent(&adev->smu, value);
1688 if (err)
1689 return err;
1690 } else if (adev->powerplay.pp_funcs->set_fan_speed_percent) {
1691 err = amdgpu_dpm_set_fan_speed_percent(adev, value);
1692 if (err)
1693 return err;
1694 }
1695
1696 return count;
1697 }
1698
1699 static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
1700 struct device_attribute *attr,
1701 char *buf)
1702 {
1703 struct amdgpu_device *adev = dev_get_drvdata(dev);
1704 int err;
1705 u32 speed = 0;
1706
1707
1708 if ((adev->flags & AMD_IS_PX) &&
1709 (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1710 return -EINVAL;
1711
1712 if (is_support_sw_smu(adev)) {
1713 err = smu_get_fan_speed_percent(&adev->smu, &speed);
1714 if (err)
1715 return err;
1716 } else if (adev->powerplay.pp_funcs->get_fan_speed_percent) {
1717 err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
1718 if (err)
1719 return err;
1720 }
1721
1722 speed = (speed * 255) / 100;
1723
1724 return sprintf(buf, "%i\n", speed);
1725 }
1726
1727 static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
1728 struct device_attribute *attr,
1729 char *buf)
1730 {
1731 struct amdgpu_device *adev = dev_get_drvdata(dev);
1732 int err;
1733 u32 speed = 0;
1734
1735
1736 if ((adev->flags & AMD_IS_PX) &&
1737 (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1738 return -EINVAL;
1739
1740 if (is_support_sw_smu(adev)) {
1741 err = smu_get_fan_speed_rpm(&adev->smu, &speed);
1742 if (err)
1743 return err;
1744 } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
1745 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
1746 if (err)
1747 return err;
1748 }
1749
1750 return sprintf(buf, "%i\n", speed);
1751 }
1752
1753 static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
1754 struct device_attribute *attr,
1755 char *buf)
1756 {
1757 struct amdgpu_device *adev = dev_get_drvdata(dev);
1758 u32 min_rpm = 0;
1759 u32 size = sizeof(min_rpm);
1760 int r;
1761
1762 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
1763 (void *)&min_rpm, &size);
1764 if (r)
1765 return r;
1766
1767 return snprintf(buf, PAGE_SIZE, "%d\n", min_rpm);
1768 }
1769
1770 static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
1771 struct device_attribute *attr,
1772 char *buf)
1773 {
1774 struct amdgpu_device *adev = dev_get_drvdata(dev);
1775 u32 max_rpm = 0;
1776 u32 size = sizeof(max_rpm);
1777 int r;
1778
1779 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
1780 (void *)&max_rpm, &size);
1781 if (r)
1782 return r;
1783
1784 return snprintf(buf, PAGE_SIZE, "%d\n", max_rpm);
1785 }
1786
1787 static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
1788 struct device_attribute *attr,
1789 char *buf)
1790 {
1791 struct amdgpu_device *adev = dev_get_drvdata(dev);
1792 int err;
1793 u32 rpm = 0;
1794
1795
1796 if ((adev->flags & AMD_IS_PX) &&
1797 (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1798 return -EINVAL;
1799
1800 if (is_support_sw_smu(adev)) {
1801 err = smu_get_fan_speed_rpm(&adev->smu, &rpm);
1802 if (err)
1803 return err;
1804 } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
1805 err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
1806 if (err)
1807 return err;
1808 }
1809
1810 return sprintf(buf, "%i\n", rpm);
1811 }
1812
1813 static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
1814 struct device_attribute *attr,
1815 const char *buf, size_t count)
1816 {
1817 struct amdgpu_device *adev = dev_get_drvdata(dev);
1818 int err;
1819 u32 value;
1820 u32 pwm_mode;
1821
1822 if (is_support_sw_smu(adev))
1823 pwm_mode = smu_get_fan_control_mode(&adev->smu);
1824 else
1825 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
1826
1827 if (pwm_mode != AMD_FAN_CTRL_MANUAL)
1828 return -ENODATA;
1829
1830
1831 if ((adev->flags & AMD_IS_PX) &&
1832 (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1833 return -EINVAL;
1834
1835 err = kstrtou32(buf, 10, &value);
1836 if (err)
1837 return err;
1838
1839 if (is_support_sw_smu(adev)) {
1840 err = smu_set_fan_speed_rpm(&adev->smu, value);
1841 if (err)
1842 return err;
1843 } else if (adev->powerplay.pp_funcs->set_fan_speed_rpm) {
1844 err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
1845 if (err)
1846 return err;
1847 }
1848
1849 return count;
1850 }
1851
1852 static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
1853 struct device_attribute *attr,
1854 char *buf)
1855 {
1856 struct amdgpu_device *adev = dev_get_drvdata(dev);
1857 u32 pwm_mode = 0;
1858
1859 if (is_support_sw_smu(adev)) {
1860 pwm_mode = smu_get_fan_control_mode(&adev->smu);
1861 } else {
1862 if (!adev->powerplay.pp_funcs->get_fan_control_mode)
1863 return -EINVAL;
1864
1865 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
1866 }
1867 return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
1868 }
1869
1870 static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
1871 struct device_attribute *attr,
1872 const char *buf,
1873 size_t count)
1874 {
1875 struct amdgpu_device *adev = dev_get_drvdata(dev);
1876 int err;
1877 int value;
1878 u32 pwm_mode;
1879
1880
1881 if ((adev->flags & AMD_IS_PX) &&
1882 (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1883 return -EINVAL;
1884
1885
1886 err = kstrtoint(buf, 10, &value);
1887 if (err)
1888 return err;
1889
1890 if (value == 0)
1891 pwm_mode = AMD_FAN_CTRL_AUTO;
1892 else if (value == 1)
1893 pwm_mode = AMD_FAN_CTRL_MANUAL;
1894 else
1895 return -EINVAL;
1896
1897 if (is_support_sw_smu(adev)) {
1898 smu_set_fan_control_mode(&adev->smu, pwm_mode);
1899 } else {
1900 if (!adev->powerplay.pp_funcs->set_fan_control_mode)
1901 return -EINVAL;
1902 amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
1903 }
1904
1905 return count;
1906 }
1907
1908 static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
1909 struct device_attribute *attr,
1910 char *buf)
1911 {
1912 struct amdgpu_device *adev = dev_get_drvdata(dev);
1913 struct drm_device *ddev = adev->ddev;
1914 u32 vddgfx;
1915 int r, size = sizeof(vddgfx);
1916
1917
1918 if ((adev->flags & AMD_IS_PX) &&
1919 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1920 return -EINVAL;
1921
1922
1923 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
1924 (void *)&vddgfx, &size);
1925 if (r)
1926 return r;
1927
1928 return snprintf(buf, PAGE_SIZE, "%d\n", vddgfx);
1929 }
1930
1931 static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
1932 struct device_attribute *attr,
1933 char *buf)
1934 {
1935 return snprintf(buf, PAGE_SIZE, "vddgfx\n");
1936 }
1937
1938 static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
1939 struct device_attribute *attr,
1940 char *buf)
1941 {
1942 struct amdgpu_device *adev = dev_get_drvdata(dev);
1943 struct drm_device *ddev = adev->ddev;
1944 u32 vddnb;
1945 int r, size = sizeof(vddnb);
1946
1947
1948 if (!(adev->flags & AMD_IS_APU))
1949 return -EINVAL;
1950
1951
1952 if ((adev->flags & AMD_IS_PX) &&
1953 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1954 return -EINVAL;
1955
1956
1957 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
1958 (void *)&vddnb, &size);
1959 if (r)
1960 return r;
1961
1962 return snprintf(buf, PAGE_SIZE, "%d\n", vddnb);
1963 }
1964
1965 static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
1966 struct device_attribute *attr,
1967 char *buf)
1968 {
1969 return snprintf(buf, PAGE_SIZE, "vddnb\n");
1970 }
1971
1972 static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
1973 struct device_attribute *attr,
1974 char *buf)
1975 {
1976 struct amdgpu_device *adev = dev_get_drvdata(dev);
1977 struct drm_device *ddev = adev->ddev;
1978 u32 query = 0;
1979 int r, size = sizeof(u32);
1980 unsigned uw;
1981
1982
1983 if ((adev->flags & AMD_IS_PX) &&
1984 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1985 return -EINVAL;
1986
1987
1988 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
1989 (void *)&query, &size);
1990 if (r)
1991 return r;
1992
1993
1994 uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
1995
1996 return snprintf(buf, PAGE_SIZE, "%u\n", uw);
1997 }
1998
1999 static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
2000 struct device_attribute *attr,
2001 char *buf)
2002 {
2003 return sprintf(buf, "%i\n", 0);
2004 }
2005
2006 static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
2007 struct device_attribute *attr,
2008 char *buf)
2009 {
2010 struct amdgpu_device *adev = dev_get_drvdata(dev);
2011 uint32_t limit = 0;
2012
2013 if (is_support_sw_smu(adev)) {
2014 smu_get_power_limit(&adev->smu, &limit, true);
2015 return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2016 } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
2017 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
2018 return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2019 } else {
2020 return snprintf(buf, PAGE_SIZE, "\n");
2021 }
2022 }
2023
2024 static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
2025 struct device_attribute *attr,
2026 char *buf)
2027 {
2028 struct amdgpu_device *adev = dev_get_drvdata(dev);
2029 uint32_t limit = 0;
2030
2031 if (is_support_sw_smu(adev)) {
2032 smu_get_power_limit(&adev->smu, &limit, false);
2033 return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2034 } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
2035 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
2036 return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2037 } else {
2038 return snprintf(buf, PAGE_SIZE, "\n");
2039 }
2040 }
2041
2042
2043 static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
2044 struct device_attribute *attr,
2045 const char *buf,
2046 size_t count)
2047 {
2048 struct amdgpu_device *adev = dev_get_drvdata(dev);
2049 int err;
2050 u32 value;
2051
2052 err = kstrtou32(buf, 10, &value);
2053 if (err)
2054 return err;
2055
2056 value = value / 1000000;
2057
2058 if (is_support_sw_smu(adev)) {
2059 err = smu_set_power_limit(&adev->smu, value);
2060 } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit) {
2061 err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);
2062 } else {
2063 err = -EINVAL;
2064 }
2065
2066 if (err)
2067 return err;
2068
2069 return count;
2070 }
2071
2072 static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
2073 struct device_attribute *attr,
2074 char *buf)
2075 {
2076 struct amdgpu_device *adev = dev_get_drvdata(dev);
2077 struct drm_device *ddev = adev->ddev;
2078 uint32_t sclk;
2079 int r, size = sizeof(sclk);
2080
2081
2082 if ((adev->flags & AMD_IS_PX) &&
2083 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
2084 return -EINVAL;
2085
2086
2087 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
2088 (void *)&sclk, &size);
2089 if (r)
2090 return r;
2091
2092 return snprintf(buf, PAGE_SIZE, "%d\n", sclk * 10 * 1000);
2093 }
2094
2095 static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
2096 struct device_attribute *attr,
2097 char *buf)
2098 {
2099 return snprintf(buf, PAGE_SIZE, "sclk\n");
2100 }
2101
2102 static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
2103 struct device_attribute *attr,
2104 char *buf)
2105 {
2106 struct amdgpu_device *adev = dev_get_drvdata(dev);
2107 struct drm_device *ddev = adev->ddev;
2108 uint32_t mclk;
2109 int r, size = sizeof(mclk);
2110
2111
2112 if ((adev->flags & AMD_IS_PX) &&
2113 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
2114 return -EINVAL;
2115
2116
2117 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
2118 (void *)&mclk, &size);
2119 if (r)
2120 return r;
2121
2122 return snprintf(buf, PAGE_SIZE, "%d\n", mclk * 10 * 1000);
2123 }
2124
2125 static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
2126 struct device_attribute *attr,
2127 char *buf)
2128 {
2129 return snprintf(buf, PAGE_SIZE, "mclk\n");
2130 }
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE);
2215 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
2216 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
2217 static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE);
2218 static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION);
2219 static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0);
2220 static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1);
2221 static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION);
2222 static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM);
2223 static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0);
2224 static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1);
2225 static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM);
2226 static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE);
2227 static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION);
2228 static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM);
2229 static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
2230 static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
2231 static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
2232 static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
2233 static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
2234 static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0);
2235 static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0);
2236 static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0);
2237 static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0);
2238 static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
2239 static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
2240 static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
2241 static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
2242 static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
2243 static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
2244 static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
2245 static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
2246 static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
2247 static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
2248 static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
2249 static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0);
2250
2251 static struct attribute *hwmon_attributes[] = {
2252 &sensor_dev_attr_temp1_input.dev_attr.attr,
2253 &sensor_dev_attr_temp1_crit.dev_attr.attr,
2254 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
2255 &sensor_dev_attr_temp2_input.dev_attr.attr,
2256 &sensor_dev_attr_temp2_crit.dev_attr.attr,
2257 &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
2258 &sensor_dev_attr_temp3_input.dev_attr.attr,
2259 &sensor_dev_attr_temp3_crit.dev_attr.attr,
2260 &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
2261 &sensor_dev_attr_temp1_emergency.dev_attr.attr,
2262 &sensor_dev_attr_temp2_emergency.dev_attr.attr,
2263 &sensor_dev_attr_temp3_emergency.dev_attr.attr,
2264 &sensor_dev_attr_temp1_label.dev_attr.attr,
2265 &sensor_dev_attr_temp2_label.dev_attr.attr,
2266 &sensor_dev_attr_temp3_label.dev_attr.attr,
2267 &sensor_dev_attr_pwm1.dev_attr.attr,
2268 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
2269 &sensor_dev_attr_pwm1_min.dev_attr.attr,
2270 &sensor_dev_attr_pwm1_max.dev_attr.attr,
2271 &sensor_dev_attr_fan1_input.dev_attr.attr,
2272 &sensor_dev_attr_fan1_min.dev_attr.attr,
2273 &sensor_dev_attr_fan1_max.dev_attr.attr,
2274 &sensor_dev_attr_fan1_target.dev_attr.attr,
2275 &sensor_dev_attr_fan1_enable.dev_attr.attr,
2276 &sensor_dev_attr_in0_input.dev_attr.attr,
2277 &sensor_dev_attr_in0_label.dev_attr.attr,
2278 &sensor_dev_attr_in1_input.dev_attr.attr,
2279 &sensor_dev_attr_in1_label.dev_attr.attr,
2280 &sensor_dev_attr_power1_average.dev_attr.attr,
2281 &sensor_dev_attr_power1_cap_max.dev_attr.attr,
2282 &sensor_dev_attr_power1_cap_min.dev_attr.attr,
2283 &sensor_dev_attr_power1_cap.dev_attr.attr,
2284 &sensor_dev_attr_freq1_input.dev_attr.attr,
2285 &sensor_dev_attr_freq1_label.dev_attr.attr,
2286 &sensor_dev_attr_freq2_input.dev_attr.attr,
2287 &sensor_dev_attr_freq2_label.dev_attr.attr,
2288 NULL
2289 };
2290
2291 static umode_t hwmon_attributes_visible(struct kobject *kobj,
2292 struct attribute *attr, int index)
2293 {
2294 struct device *dev = kobj_to_dev(kobj);
2295 struct amdgpu_device *adev = dev_get_drvdata(dev);
2296 umode_t effective_mode = attr->mode;
2297
2298
2299 if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
2300 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
2301 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
2302 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
2303 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
2304 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
2305 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
2306 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
2307 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
2308 return 0;
2309
2310
2311 if ((adev->flags & AMD_IS_APU) &&
2312 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
2313 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
2314 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
2315 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
2316 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
2317 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
2318 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
2319 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
2320 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
2321 return 0;
2322
2323
2324 if (!adev->pm.dpm_enabled &&
2325 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
2326 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
2327 attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
2328 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
2329 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
2330 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
2331 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
2332 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
2333 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
2334 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
2335 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
2336 return 0;
2337
2338 if (!is_support_sw_smu(adev)) {
2339
2340 if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
2341 attr == &sensor_dev_attr_pwm1.dev_attr.attr) ||
2342 (!adev->powerplay.pp_funcs->get_fan_control_mode &&
2343 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr))
2344 effective_mode &= ~S_IRUGO;
2345
2346 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
2347 attr == &sensor_dev_attr_pwm1.dev_attr.attr) ||
2348 (!adev->powerplay.pp_funcs->set_fan_control_mode &&
2349 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr))
2350 effective_mode &= ~S_IWUSR;
2351 }
2352
2353 if (((adev->flags & AMD_IS_APU) ||
2354 adev->family == AMDGPU_FAMILY_SI ||
2355 adev->family == AMDGPU_FAMILY_KV) &&
2356 (attr == &sensor_dev_attr_power1_average.dev_attr.attr ||
2357 attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
2358 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
2359 attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
2360 return 0;
2361
2362 if (!is_support_sw_smu(adev)) {
2363
2364 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
2365 !adev->powerplay.pp_funcs->get_fan_speed_percent) &&
2366 (!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
2367 !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
2368 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
2369 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
2370 return 0;
2371
2372 if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
2373 !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
2374 (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
2375 attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
2376 return 0;
2377 }
2378
2379 if ((adev->family == AMDGPU_FAMILY_SI ||
2380 adev->family == AMDGPU_FAMILY_KV) &&
2381 (attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
2382 attr == &sensor_dev_attr_in0_label.dev_attr.attr))
2383 return 0;
2384
2385
2386 if (!(adev->flags & AMD_IS_APU) &&
2387 (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
2388 attr == &sensor_dev_attr_in1_label.dev_attr.attr))
2389 return 0;
2390
2391
2392 if ((adev->flags & AMD_IS_APU) &&
2393 (attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
2394 attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
2395 return 0;
2396
2397
2398 if (((adev->flags & AMD_IS_APU) ||
2399 adev->asic_type < CHIP_VEGA10) &&
2400 (attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
2401 attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
2402 attr == &sensor_dev_attr_temp3_crit.dev_attr.attr ||
2403 attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
2404 attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
2405 attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
2406 attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr ||
2407 attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
2408 attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
2409 attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
2410 attr == &sensor_dev_attr_temp3_label.dev_attr.attr))
2411 return 0;
2412
2413 return effective_mode;
2414 }
2415
2416 static const struct attribute_group hwmon_attrgroup = {
2417 .attrs = hwmon_attributes,
2418 .is_visible = hwmon_attributes_visible,
2419 };
2420
2421 static const struct attribute_group *hwmon_groups[] = {
2422 &hwmon_attrgroup,
2423 NULL
2424 };
2425
2426 void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
2427 {
2428 struct amdgpu_device *adev =
2429 container_of(work, struct amdgpu_device,
2430 pm.dpm.thermal.work);
2431
2432 enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
2433 int temp, size = sizeof(temp);
2434
2435 if (!adev->pm.dpm_enabled)
2436 return;
2437
2438 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
2439 (void *)&temp, &size)) {
2440 if (temp < adev->pm.dpm.thermal.min_temp)
2441
2442 dpm_state = adev->pm.dpm.user_state;
2443 } else {
2444 if (adev->pm.dpm.thermal.high_to_low)
2445
2446 dpm_state = adev->pm.dpm.user_state;
2447 }
2448 mutex_lock(&adev->pm.mutex);
2449 if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
2450 adev->pm.dpm.thermal_active = true;
2451 else
2452 adev->pm.dpm.thermal_active = false;
2453 adev->pm.dpm.state = dpm_state;
2454 mutex_unlock(&adev->pm.mutex);
2455
2456 amdgpu_pm_compute_clocks(adev);
2457 }
2458
2459 static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
2460 enum amd_pm_state_type dpm_state)
2461 {
2462 int i;
2463 struct amdgpu_ps *ps;
2464 u32 ui_class;
2465 bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
2466 true : false;
2467
2468
2469 if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
2470 if (amdgpu_dpm_vblank_too_short(adev))
2471 single_display = false;
2472 }
2473
2474
2475
2476
2477 if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
2478 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
2479
2480 if (dpm_state == POWER_STATE_TYPE_BALANCED)
2481 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
2482
2483 restart_search:
2484
2485 for (i = 0; i < adev->pm.dpm.num_ps; i++) {
2486 ps = &adev->pm.dpm.ps[i];
2487 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
2488 switch (dpm_state) {
2489
2490 case POWER_STATE_TYPE_BATTERY:
2491 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
2492 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
2493 if (single_display)
2494 return ps;
2495 } else
2496 return ps;
2497 }
2498 break;
2499 case POWER_STATE_TYPE_BALANCED:
2500 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
2501 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
2502 if (single_display)
2503 return ps;
2504 } else
2505 return ps;
2506 }
2507 break;
2508 case POWER_STATE_TYPE_PERFORMANCE:
2509 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
2510 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
2511 if (single_display)
2512 return ps;
2513 } else
2514 return ps;
2515 }
2516 break;
2517
2518 case POWER_STATE_TYPE_INTERNAL_UVD:
2519 if (adev->pm.dpm.uvd_ps)
2520 return adev->pm.dpm.uvd_ps;
2521 else
2522 break;
2523 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
2524 if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
2525 return ps;
2526 break;
2527 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
2528 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
2529 return ps;
2530 break;
2531 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
2532 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
2533 return ps;
2534 break;
2535 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
2536 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
2537 return ps;
2538 break;
2539 case POWER_STATE_TYPE_INTERNAL_BOOT:
2540 return adev->pm.dpm.boot_ps;
2541 case POWER_STATE_TYPE_INTERNAL_THERMAL:
2542 if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
2543 return ps;
2544 break;
2545 case POWER_STATE_TYPE_INTERNAL_ACPI:
2546 if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
2547 return ps;
2548 break;
2549 case POWER_STATE_TYPE_INTERNAL_ULV:
2550 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
2551 return ps;
2552 break;
2553 case POWER_STATE_TYPE_INTERNAL_3DPERF:
2554 if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
2555 return ps;
2556 break;
2557 default:
2558 break;
2559 }
2560 }
2561
2562 switch (dpm_state) {
2563 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
2564 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
2565 goto restart_search;
2566 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
2567 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
2568 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
2569 if (adev->pm.dpm.uvd_ps) {
2570 return adev->pm.dpm.uvd_ps;
2571 } else {
2572 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
2573 goto restart_search;
2574 }
2575 case POWER_STATE_TYPE_INTERNAL_THERMAL:
2576 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
2577 goto restart_search;
2578 case POWER_STATE_TYPE_INTERNAL_ACPI:
2579 dpm_state = POWER_STATE_TYPE_BATTERY;
2580 goto restart_search;
2581 case POWER_STATE_TYPE_BATTERY:
2582 case POWER_STATE_TYPE_BALANCED:
2583 case POWER_STATE_TYPE_INTERNAL_3DPERF:
2584 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
2585 goto restart_search;
2586 default:
2587 break;
2588 }
2589
2590 return NULL;
2591 }
2592
2593 static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
2594 {
2595 struct amdgpu_ps *ps;
2596 enum amd_pm_state_type dpm_state;
2597 int ret;
2598 bool equal = false;
2599
2600
2601 if (!adev->pm.dpm_enabled)
2602 return;
2603
2604 if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
2605
2606 if ((!adev->pm.dpm.thermal_active) &&
2607 (!adev->pm.dpm.uvd_active))
2608 adev->pm.dpm.state = adev->pm.dpm.user_state;
2609 }
2610 dpm_state = adev->pm.dpm.state;
2611
2612 ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
2613 if (ps)
2614 adev->pm.dpm.requested_ps = ps;
2615 else
2616 return;
2617
2618 if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) {
2619 printk("switching from power state:\n");
2620 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
2621 printk("switching to power state:\n");
2622 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
2623 }
2624
2625
2626 ps->vce_active = adev->pm.dpm.vce_active;
2627 if (adev->powerplay.pp_funcs->display_configuration_changed)
2628 amdgpu_dpm_display_configuration_changed(adev);
2629
2630 ret = amdgpu_dpm_pre_set_power_state(adev);
2631 if (ret)
2632 return;
2633
2634 if (adev->powerplay.pp_funcs->check_state_equal) {
2635 if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
2636 equal = false;
2637 }
2638
2639 if (equal)
2640 return;
2641
2642 amdgpu_dpm_set_power_state(adev);
2643 amdgpu_dpm_post_set_power_state(adev);
2644
2645 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
2646 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
2647
2648 if (adev->powerplay.pp_funcs->force_performance_level) {
2649 if (adev->pm.dpm.thermal_active) {
2650 enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
2651
2652 amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
2653
2654 adev->pm.dpm.forced_level = level;
2655 } else {
2656
2657 amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
2658 }
2659 }
2660 }
2661
2662 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
2663 {
2664 int ret = 0;
2665 if (is_support_sw_smu(adev)) {
2666 ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_UVD, enable);
2667 if (ret)
2668 DRM_ERROR("[SW SMU]: dpm enable uvd failed, state = %s, ret = %d. \n",
2669 enable ? "true" : "false", ret);
2670 } else if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
2671
2672 mutex_lock(&adev->pm.mutex);
2673 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
2674 mutex_unlock(&adev->pm.mutex);
2675 }
2676
2677 if (adev->asic_type == CHIP_STONEY &&
2678 adev->uvd.decode_image_width >= WIDTH_4K) {
2679 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
2680
2681 if (hwmgr && hwmgr->hwmgr_func &&
2682 hwmgr->hwmgr_func->update_nbdpm_pstate)
2683 hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
2684 !enable,
2685 true);
2686 }
2687 }
2688
2689 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
2690 {
2691 int ret = 0;
2692 if (is_support_sw_smu(adev)) {
2693 ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_VCE, enable);
2694 if (ret)
2695 DRM_ERROR("[SW SMU]: dpm enable vce failed, state = %s, ret = %d. \n",
2696 enable ? "true" : "false", ret);
2697 } else if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
2698
2699 mutex_lock(&adev->pm.mutex);
2700 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
2701 mutex_unlock(&adev->pm.mutex);
2702 }
2703 }
2704
2705 void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
2706 {
2707 int i;
2708
2709 if (adev->powerplay.pp_funcs->print_power_state == NULL)
2710 return;
2711
2712 for (i = 0; i < adev->pm.dpm.num_ps; i++)
2713 amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
2714
2715 }
2716
2717 int amdgpu_pm_virt_sysfs_init(struct amdgpu_device *adev)
2718 {
2719 int ret = 0;
2720
2721 if (!(amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev)))
2722 return ret;
2723
2724 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
2725 if (ret) {
2726 DRM_ERROR("failed to create device file pp_dpm_sclk\n");
2727 return ret;
2728 }
2729
2730 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
2731 if (ret) {
2732 DRM_ERROR("failed to create device file pp_dpm_mclk\n");
2733 return ret;
2734 }
2735
2736 ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
2737 if (ret) {
2738 DRM_ERROR("failed to create device file for dpm state\n");
2739 return ret;
2740 }
2741
2742 return ret;
2743 }
2744
2745 void amdgpu_pm_virt_sysfs_fini(struct amdgpu_device *adev)
2746 {
2747 if (!(amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev)))
2748 return;
2749
2750 device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
2751 device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
2752 device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
2753 }
2754
2755 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
2756 {
2757 int r;
2758
2759 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
2760 r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
2761 if (r) {
2762 pr_err("smu firmware loading failed\n");
2763 return r;
2764 }
2765 *smu_version = adev->pm.fw_version;
2766 }
2767 return 0;
2768 }
2769
2770 int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
2771 {
2772 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
2773 int ret;
2774
2775 if (adev->pm.sysfs_initialized)
2776 return 0;
2777
2778 if (adev->pm.dpm_enabled == 0)
2779 return 0;
2780
2781 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
2782 DRIVER_NAME, adev,
2783 hwmon_groups);
2784 if (IS_ERR(adev->pm.int_hwmon_dev)) {
2785 ret = PTR_ERR(adev->pm.int_hwmon_dev);
2786 dev_err(adev->dev,
2787 "Unable to register hwmon device: %d\n", ret);
2788 return ret;
2789 }
2790
2791 ret = device_create_file(adev->dev, &dev_attr_power_dpm_state);
2792 if (ret) {
2793 DRM_ERROR("failed to create device file for dpm state\n");
2794 return ret;
2795 }
2796 ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
2797 if (ret) {
2798 DRM_ERROR("failed to create device file for dpm state\n");
2799 return ret;
2800 }
2801
2802
2803 ret = device_create_file(adev->dev, &dev_attr_pp_num_states);
2804 if (ret) {
2805 DRM_ERROR("failed to create device file pp_num_states\n");
2806 return ret;
2807 }
2808 ret = device_create_file(adev->dev, &dev_attr_pp_cur_state);
2809 if (ret) {
2810 DRM_ERROR("failed to create device file pp_cur_state\n");
2811 return ret;
2812 }
2813 ret = device_create_file(adev->dev, &dev_attr_pp_force_state);
2814 if (ret) {
2815 DRM_ERROR("failed to create device file pp_force_state\n");
2816 return ret;
2817 }
2818 ret = device_create_file(adev->dev, &dev_attr_pp_table);
2819 if (ret) {
2820 DRM_ERROR("failed to create device file pp_table\n");
2821 return ret;
2822 }
2823
2824 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
2825 if (ret) {
2826 DRM_ERROR("failed to create device file pp_dpm_sclk\n");
2827 return ret;
2828 }
2829 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
2830 if (ret) {
2831 DRM_ERROR("failed to create device file pp_dpm_mclk\n");
2832 return ret;
2833 }
2834 if (adev->asic_type >= CHIP_VEGA10) {
2835 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_socclk);
2836 if (ret) {
2837 DRM_ERROR("failed to create device file pp_dpm_socclk\n");
2838 return ret;
2839 }
2840 if (adev->asic_type != CHIP_ARCTURUS) {
2841 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_dcefclk);
2842 if (ret) {
2843 DRM_ERROR("failed to create device file pp_dpm_dcefclk\n");
2844 return ret;
2845 }
2846 }
2847 }
2848 if (adev->asic_type >= CHIP_VEGA20) {
2849 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_fclk);
2850 if (ret) {
2851 DRM_ERROR("failed to create device file pp_dpm_fclk\n");
2852 return ret;
2853 }
2854 }
2855 if (adev->asic_type != CHIP_ARCTURUS) {
2856 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie);
2857 if (ret) {
2858 DRM_ERROR("failed to create device file pp_dpm_pcie\n");
2859 return ret;
2860 }
2861 }
2862 ret = device_create_file(adev->dev, &dev_attr_pp_sclk_od);
2863 if (ret) {
2864 DRM_ERROR("failed to create device file pp_sclk_od\n");
2865 return ret;
2866 }
2867 ret = device_create_file(adev->dev, &dev_attr_pp_mclk_od);
2868 if (ret) {
2869 DRM_ERROR("failed to create device file pp_mclk_od\n");
2870 return ret;
2871 }
2872 ret = device_create_file(adev->dev,
2873 &dev_attr_pp_power_profile_mode);
2874 if (ret) {
2875 DRM_ERROR("failed to create device file "
2876 "pp_power_profile_mode\n");
2877 return ret;
2878 }
2879 if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
2880 (!is_support_sw_smu(adev) && hwmgr->od_enabled)) {
2881 ret = device_create_file(adev->dev,
2882 &dev_attr_pp_od_clk_voltage);
2883 if (ret) {
2884 DRM_ERROR("failed to create device file "
2885 "pp_od_clk_voltage\n");
2886 return ret;
2887 }
2888 }
2889 ret = device_create_file(adev->dev,
2890 &dev_attr_gpu_busy_percent);
2891 if (ret) {
2892 DRM_ERROR("failed to create device file "
2893 "gpu_busy_level\n");
2894 return ret;
2895 }
2896
2897 if (!(adev->flags & AMD_IS_APU) &&
2898 (adev->asic_type != CHIP_VEGA10)) {
2899 ret = device_create_file(adev->dev,
2900 &dev_attr_mem_busy_percent);
2901 if (ret) {
2902 DRM_ERROR("failed to create device file "
2903 "mem_busy_percent\n");
2904 return ret;
2905 }
2906 }
2907
2908 if (!(adev->flags & AMD_IS_APU)) {
2909 ret = device_create_file(adev->dev, &dev_attr_pcie_bw);
2910 if (ret) {
2911 DRM_ERROR("failed to create device file pcie_bw\n");
2912 return ret;
2913 }
2914 }
2915 if (adev->unique_id)
2916 ret = device_create_file(adev->dev, &dev_attr_unique_id);
2917 if (ret) {
2918 DRM_ERROR("failed to create device file unique_id\n");
2919 return ret;
2920 }
2921 ret = amdgpu_debugfs_pm_init(adev);
2922 if (ret) {
2923 DRM_ERROR("Failed to register debugfs file for dpm!\n");
2924 return ret;
2925 }
2926
2927 if ((adev->asic_type >= CHIP_VEGA10) &&
2928 !(adev->flags & AMD_IS_APU)) {
2929 ret = device_create_file(adev->dev,
2930 &dev_attr_pp_features);
2931 if (ret) {
2932 DRM_ERROR("failed to create device file "
2933 "pp_features\n");
2934 return ret;
2935 }
2936 }
2937
2938 adev->pm.sysfs_initialized = true;
2939
2940 return 0;
2941 }
2942
2943 void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
2944 {
2945 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
2946
2947 if (adev->pm.dpm_enabled == 0)
2948 return;
2949
2950 if (adev->pm.int_hwmon_dev)
2951 hwmon_device_unregister(adev->pm.int_hwmon_dev);
2952 device_remove_file(adev->dev, &dev_attr_power_dpm_state);
2953 device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
2954
2955 device_remove_file(adev->dev, &dev_attr_pp_num_states);
2956 device_remove_file(adev->dev, &dev_attr_pp_cur_state);
2957 device_remove_file(adev->dev, &dev_attr_pp_force_state);
2958 device_remove_file(adev->dev, &dev_attr_pp_table);
2959
2960 device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
2961 device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
2962 if (adev->asic_type >= CHIP_VEGA10) {
2963 device_remove_file(adev->dev, &dev_attr_pp_dpm_socclk);
2964 if (adev->asic_type != CHIP_ARCTURUS)
2965 device_remove_file(adev->dev, &dev_attr_pp_dpm_dcefclk);
2966 }
2967 if (adev->asic_type != CHIP_ARCTURUS)
2968 device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
2969 if (adev->asic_type >= CHIP_VEGA20)
2970 device_remove_file(adev->dev, &dev_attr_pp_dpm_fclk);
2971 device_remove_file(adev->dev, &dev_attr_pp_sclk_od);
2972 device_remove_file(adev->dev, &dev_attr_pp_mclk_od);
2973 device_remove_file(adev->dev,
2974 &dev_attr_pp_power_profile_mode);
2975 if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
2976 (!is_support_sw_smu(adev) && hwmgr->od_enabled))
2977 device_remove_file(adev->dev,
2978 &dev_attr_pp_od_clk_voltage);
2979 device_remove_file(adev->dev, &dev_attr_gpu_busy_percent);
2980 if (!(adev->flags & AMD_IS_APU) &&
2981 (adev->asic_type != CHIP_VEGA10))
2982 device_remove_file(adev->dev, &dev_attr_mem_busy_percent);
2983 if (!(adev->flags & AMD_IS_APU))
2984 device_remove_file(adev->dev, &dev_attr_pcie_bw);
2985 if (adev->unique_id)
2986 device_remove_file(adev->dev, &dev_attr_unique_id);
2987 if ((adev->asic_type >= CHIP_VEGA10) &&
2988 !(adev->flags & AMD_IS_APU))
2989 device_remove_file(adev->dev, &dev_attr_pp_features);
2990 }
2991
2992 void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
2993 {
2994 int i = 0;
2995
2996 if (!adev->pm.dpm_enabled)
2997 return;
2998
2999 if (adev->mode_info.num_crtc)
3000 amdgpu_display_bandwidth_update(adev);
3001
3002 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
3003 struct amdgpu_ring *ring = adev->rings[i];
3004 if (ring && ring->sched.ready)
3005 amdgpu_fence_wait_empty(ring);
3006 }
3007
3008 if (is_support_sw_smu(adev)) {
3009 struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm;
3010 smu_handle_task(&adev->smu,
3011 smu_dpm->dpm_level,
3012 AMD_PP_TASK_DISPLAY_CONFIG_CHANGE);
3013 } else {
3014 if (adev->powerplay.pp_funcs->dispatch_tasks) {
3015 if (!amdgpu_device_has_dc_support(adev)) {
3016 mutex_lock(&adev->pm.mutex);
3017 amdgpu_dpm_get_active_displays(adev);
3018 adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
3019 adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
3020 adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
3021
3022 if (adev->pm.pm_display_cfg.vrefresh > 120)
3023 adev->pm.pm_display_cfg.min_vblank_time = 0;
3024 if (adev->powerplay.pp_funcs->display_configuration_change)
3025 adev->powerplay.pp_funcs->display_configuration_change(
3026 adev->powerplay.pp_handle,
3027 &adev->pm.pm_display_cfg);
3028 mutex_unlock(&adev->pm.mutex);
3029 }
3030 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
3031 } else {
3032 mutex_lock(&adev->pm.mutex);
3033 amdgpu_dpm_get_active_displays(adev);
3034 amdgpu_dpm_change_power_state_locked(adev);
3035 mutex_unlock(&adev->pm.mutex);
3036 }
3037 }
3038 }
3039
3040
3041
3042
3043 #if defined(CONFIG_DEBUG_FS)
3044
3045 static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
3046 {
3047 uint32_t value;
3048 uint64_t value64;
3049 uint32_t query = 0;
3050 int size;
3051
3052
3053 size = sizeof(value);
3054 seq_printf(m, "GFX Clocks and Power:\n");
3055 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
3056 seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
3057 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
3058 seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
3059 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size))
3060 seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100);
3061 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size))
3062 seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100);
3063 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
3064 seq_printf(m, "\t%u mV (VDDGFX)\n", value);
3065 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
3066 seq_printf(m, "\t%u mV (VDDNB)\n", value);
3067 size = sizeof(uint32_t);
3068 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size))
3069 seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff);
3070 size = sizeof(value);
3071 seq_printf(m, "\n");
3072
3073
3074 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
3075 seq_printf(m, "GPU Temperature: %u C\n", value/1000);
3076
3077
3078 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
3079 seq_printf(m, "GPU Load: %u %%\n", value);
3080
3081 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size))
3082 seq_printf(m, "MEM Load: %u %%\n", value);
3083
3084 seq_printf(m, "\n");
3085
3086
3087 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
3088 seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
3089
3090 if (adev->asic_type > CHIP_VEGA20) {
3091
3092 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
3093 if (!value) {
3094 seq_printf(m, "VCN: Disabled\n");
3095 } else {
3096 seq_printf(m, "VCN: Enabled\n");
3097 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
3098 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
3099 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
3100 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
3101 }
3102 }
3103 seq_printf(m, "\n");
3104 } else {
3105
3106 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
3107 if (!value) {
3108 seq_printf(m, "UVD: Disabled\n");
3109 } else {
3110 seq_printf(m, "UVD: Enabled\n");
3111 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
3112 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
3113 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
3114 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
3115 }
3116 }
3117 seq_printf(m, "\n");
3118
3119
3120 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
3121 if (!value) {
3122 seq_printf(m, "VCE: Disabled\n");
3123 } else {
3124 seq_printf(m, "VCE: Enabled\n");
3125 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
3126 seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
3127 }
3128 }
3129 }
3130
3131 return 0;
3132 }
3133
3134 static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags)
3135 {
3136 int i;
3137
3138 for (i = 0; clocks[i].flag; i++)
3139 seq_printf(m, "\t%s: %s\n", clocks[i].name,
3140 (flags & clocks[i].flag) ? "On" : "Off");
3141 }
3142
3143 static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
3144 {
3145 struct drm_info_node *node = (struct drm_info_node *) m->private;
3146 struct drm_device *dev = node->minor->dev;
3147 struct amdgpu_device *adev = dev->dev_private;
3148 struct drm_device *ddev = adev->ddev;
3149 u32 flags = 0;
3150
3151 amdgpu_device_ip_get_clockgating_state(adev, &flags);
3152 seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags);
3153 amdgpu_parse_cg_state(m, flags);
3154 seq_printf(m, "\n");
3155
3156 if (!adev->pm.dpm_enabled) {
3157 seq_printf(m, "dpm not enabled\n");
3158 return 0;
3159 }
3160 if ((adev->flags & AMD_IS_PX) &&
3161 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
3162 seq_printf(m, "PX asic powered off\n");
3163 } else if (!is_support_sw_smu(adev) && adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
3164 mutex_lock(&adev->pm.mutex);
3165 if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
3166 adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
3167 else
3168 seq_printf(m, "Debugfs support not implemented for this asic\n");
3169 mutex_unlock(&adev->pm.mutex);
3170 } else {
3171 return amdgpu_debugfs_pm_info_pp(m, adev);
3172 }
3173
3174 return 0;
3175 }
3176
3177 static const struct drm_info_list amdgpu_pm_info_list[] = {
3178 {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL},
3179 };
3180 #endif
3181
3182 static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
3183 {
3184 #if defined(CONFIG_DEBUG_FS)
3185 return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list));
3186 #else
3187 return 0;
3188 #endif
3189 }