This source file includes following definitions.
- amdgpu_dpm_print_class_info
- amdgpu_dpm_print_cap_info
- amdgpu_dpm_print_ps_status
- amdgpu_dpm_get_active_displays
- amdgpu_dpm_get_vblank_time
- amdgpu_dpm_get_vrefresh
- amdgpu_is_internal_thermal_sensor
- amdgpu_parse_clk_voltage_dep_table
- amdgpu_get_platform_caps
- amdgpu_parse_extended_power_table
- amdgpu_free_extended_power_table
- amdgpu_add_thermal_controller
- amdgpu_get_pcie_gen_support
- amdgpu_get_vce_clock_state
- amdgpu_dpm_get_sclk
- amdgpu_dpm_get_mclk
- amdgpu_dpm_set_powergating_by_smu
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25 #include "amdgpu.h"
26 #include "amdgpu_atombios.h"
27 #include "amdgpu_i2c.h"
28 #include "amdgpu_dpm.h"
29 #include "atom.h"
30 #include "amd_pcie.h"
31
32 void amdgpu_dpm_print_class_info(u32 class, u32 class2)
33 {
34 const char *s;
35
36 switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
37 case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
38 default:
39 s = "none";
40 break;
41 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
42 s = "battery";
43 break;
44 case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
45 s = "balanced";
46 break;
47 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
48 s = "performance";
49 break;
50 }
51 printk("\tui class: %s\n", s);
52 printk("\tinternal class:");
53 if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
54 (class2 == 0))
55 pr_cont(" none");
56 else {
57 if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
58 pr_cont(" boot");
59 if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
60 pr_cont(" thermal");
61 if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
62 pr_cont(" limited_pwr");
63 if (class & ATOM_PPLIB_CLASSIFICATION_REST)
64 pr_cont(" rest");
65 if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
66 pr_cont(" forced");
67 if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
68 pr_cont(" 3d_perf");
69 if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
70 pr_cont(" ovrdrv");
71 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
72 pr_cont(" uvd");
73 if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
74 pr_cont(" 3d_low");
75 if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
76 pr_cont(" acpi");
77 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
78 pr_cont(" uvd_hd2");
79 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
80 pr_cont(" uvd_hd");
81 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
82 pr_cont(" uvd_sd");
83 if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
84 pr_cont(" limited_pwr2");
85 if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
86 pr_cont(" ulv");
87 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
88 pr_cont(" uvd_mvc");
89 }
90 pr_cont("\n");
91 }
92
93 void amdgpu_dpm_print_cap_info(u32 caps)
94 {
95 printk("\tcaps:");
96 if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
97 pr_cont(" single_disp");
98 if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
99 pr_cont(" video");
100 if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
101 pr_cont(" no_dc");
102 pr_cont("\n");
103 }
104
105 void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
106 struct amdgpu_ps *rps)
107 {
108 printk("\tstatus:");
109 if (rps == adev->pm.dpm.current_ps)
110 pr_cont(" c");
111 if (rps == adev->pm.dpm.requested_ps)
112 pr_cont(" r");
113 if (rps == adev->pm.dpm.boot_ps)
114 pr_cont(" b");
115 pr_cont("\n");
116 }
117
118 void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
119 {
120 struct drm_device *ddev = adev->ddev;
121 struct drm_crtc *crtc;
122 struct amdgpu_crtc *amdgpu_crtc;
123
124 adev->pm.dpm.new_active_crtcs = 0;
125 adev->pm.dpm.new_active_crtc_count = 0;
126 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
127 list_for_each_entry(crtc,
128 &ddev->mode_config.crtc_list, head) {
129 amdgpu_crtc = to_amdgpu_crtc(crtc);
130 if (amdgpu_crtc->enabled) {
131 adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
132 adev->pm.dpm.new_active_crtc_count++;
133 }
134 }
135 }
136 }
137
138
139 u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
140 {
141 struct drm_device *dev = adev->ddev;
142 struct drm_crtc *crtc;
143 struct amdgpu_crtc *amdgpu_crtc;
144 u32 vblank_in_pixels;
145 u32 vblank_time_us = 0xffffffff;
146
147 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
148 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
149 amdgpu_crtc = to_amdgpu_crtc(crtc);
150 if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
151 vblank_in_pixels =
152 amdgpu_crtc->hw_mode.crtc_htotal *
153 (amdgpu_crtc->hw_mode.crtc_vblank_end -
154 amdgpu_crtc->hw_mode.crtc_vdisplay +
155 (amdgpu_crtc->v_border * 2));
156
157 vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
158 break;
159 }
160 }
161 }
162
163 return vblank_time_us;
164 }
165
166 u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
167 {
168 struct drm_device *dev = adev->ddev;
169 struct drm_crtc *crtc;
170 struct amdgpu_crtc *amdgpu_crtc;
171 u32 vrefresh = 0;
172
173 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
174 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
175 amdgpu_crtc = to_amdgpu_crtc(crtc);
176 if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
177 vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
178 break;
179 }
180 }
181 }
182
183 return vrefresh;
184 }
185
186 bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
187 {
188 switch (sensor) {
189 case THERMAL_TYPE_RV6XX:
190 case THERMAL_TYPE_RV770:
191 case THERMAL_TYPE_EVERGREEN:
192 case THERMAL_TYPE_SUMO:
193 case THERMAL_TYPE_NI:
194 case THERMAL_TYPE_SI:
195 case THERMAL_TYPE_CI:
196 case THERMAL_TYPE_KV:
197 return true;
198 case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
199 case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
200 return false;
201 case THERMAL_TYPE_NONE:
202 case THERMAL_TYPE_EXTERNAL:
203 case THERMAL_TYPE_EXTERNAL_GPIO:
204 default:
205 return false;
206 }
207 }
208
209 union power_info {
210 struct _ATOM_POWERPLAY_INFO info;
211 struct _ATOM_POWERPLAY_INFO_V2 info_2;
212 struct _ATOM_POWERPLAY_INFO_V3 info_3;
213 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
214 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
215 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
216 struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
217 struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
218 };
219
220 union fan_info {
221 struct _ATOM_PPLIB_FANTABLE fan;
222 struct _ATOM_PPLIB_FANTABLE2 fan2;
223 struct _ATOM_PPLIB_FANTABLE3 fan3;
224 };
225
226 static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
227 ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
228 {
229 u32 size = atom_table->ucNumEntries *
230 sizeof(struct amdgpu_clock_voltage_dependency_entry);
231 int i;
232 ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
233
234 amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
235 if (!amdgpu_table->entries)
236 return -ENOMEM;
237
238 entry = &atom_table->entries[0];
239 for (i = 0; i < atom_table->ucNumEntries; i++) {
240 amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
241 (entry->ucClockHigh << 16);
242 amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
243 entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
244 ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
245 }
246 amdgpu_table->count = atom_table->ucNumEntries;
247
248 return 0;
249 }
250
251 int amdgpu_get_platform_caps(struct amdgpu_device *adev)
252 {
253 struct amdgpu_mode_info *mode_info = &adev->mode_info;
254 union power_info *power_info;
255 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
256 u16 data_offset;
257 u8 frev, crev;
258
259 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
260 &frev, &crev, &data_offset))
261 return -EINVAL;
262 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
263
264 adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
265 adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
266 adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
267
268 return 0;
269 }
270
271
272 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
273 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
274 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
275 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
276 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
277 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
278 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
279 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
280
281 int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
282 {
283 struct amdgpu_mode_info *mode_info = &adev->mode_info;
284 union power_info *power_info;
285 union fan_info *fan_info;
286 ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
287 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
288 u16 data_offset;
289 u8 frev, crev;
290 int ret, i;
291
292 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
293 &frev, &crev, &data_offset))
294 return -EINVAL;
295 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
296
297
298 if (le16_to_cpu(power_info->pplib.usTableSize) >=
299 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
300 if (power_info->pplib3.usFanTableOffset) {
301 fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
302 le16_to_cpu(power_info->pplib3.usFanTableOffset));
303 adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
304 adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
305 adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
306 adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
307 adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
308 adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
309 adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
310 if (fan_info->fan.ucFanTableFormat >= 2)
311 adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
312 else
313 adev->pm.dpm.fan.t_max = 10900;
314 adev->pm.dpm.fan.cycle_delay = 100000;
315 if (fan_info->fan.ucFanTableFormat >= 3) {
316 adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
317 adev->pm.dpm.fan.default_max_fan_pwm =
318 le16_to_cpu(fan_info->fan3.usFanPWMMax);
319 adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
320 adev->pm.dpm.fan.fan_output_sensitivity =
321 le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
322 }
323 adev->pm.dpm.fan.ucode_fan_control = true;
324 }
325 }
326
327
328 if (le16_to_cpu(power_info->pplib.usTableSize) >=
329 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
330 if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
331 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
332 (mode_info->atom_context->bios + data_offset +
333 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
334 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
335 dep_table);
336 if (ret) {
337 amdgpu_free_extended_power_table(adev);
338 return ret;
339 }
340 }
341 if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
342 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
343 (mode_info->atom_context->bios + data_offset +
344 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
345 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
346 dep_table);
347 if (ret) {
348 amdgpu_free_extended_power_table(adev);
349 return ret;
350 }
351 }
352 if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
353 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
354 (mode_info->atom_context->bios + data_offset +
355 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
356 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
357 dep_table);
358 if (ret) {
359 amdgpu_free_extended_power_table(adev);
360 return ret;
361 }
362 }
363 if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
364 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
365 (mode_info->atom_context->bios + data_offset +
366 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
367 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
368 dep_table);
369 if (ret) {
370 amdgpu_free_extended_power_table(adev);
371 return ret;
372 }
373 }
374 if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
375 ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
376 (ATOM_PPLIB_Clock_Voltage_Limit_Table *)
377 (mode_info->atom_context->bios + data_offset +
378 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
379 if (clk_v->ucNumEntries) {
380 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
381 le16_to_cpu(clk_v->entries[0].usSclkLow) |
382 (clk_v->entries[0].ucSclkHigh << 16);
383 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
384 le16_to_cpu(clk_v->entries[0].usMclkLow) |
385 (clk_v->entries[0].ucMclkHigh << 16);
386 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
387 le16_to_cpu(clk_v->entries[0].usVddc);
388 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
389 le16_to_cpu(clk_v->entries[0].usVddci);
390 }
391 }
392 if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
393 ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
394 (ATOM_PPLIB_PhaseSheddingLimits_Table *)
395 (mode_info->atom_context->bios + data_offset +
396 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
397 ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
398
399 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
400 kcalloc(psl->ucNumEntries,
401 sizeof(struct amdgpu_phase_shedding_limits_entry),
402 GFP_KERNEL);
403 if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
404 amdgpu_free_extended_power_table(adev);
405 return -ENOMEM;
406 }
407
408 entry = &psl->entries[0];
409 for (i = 0; i < psl->ucNumEntries; i++) {
410 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
411 le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
412 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
413 le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
414 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
415 le16_to_cpu(entry->usVoltage);
416 entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
417 ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
418 }
419 adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
420 psl->ucNumEntries;
421 }
422 }
423
424
425 if (le16_to_cpu(power_info->pplib.usTableSize) >=
426 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
427 adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
428 adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
429 adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
430 adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
431 if (adev->pm.dpm.tdp_od_limit)
432 adev->pm.dpm.power_control = true;
433 else
434 adev->pm.dpm.power_control = false;
435 adev->pm.dpm.tdp_adjustment = 0;
436 adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
437 adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
438 adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
439 if (power_info->pplib5.usCACLeakageTableOffset) {
440 ATOM_PPLIB_CAC_Leakage_Table *cac_table =
441 (ATOM_PPLIB_CAC_Leakage_Table *)
442 (mode_info->atom_context->bios + data_offset +
443 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
444 ATOM_PPLIB_CAC_Leakage_Record *entry;
445 u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
446 adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
447 if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
448 amdgpu_free_extended_power_table(adev);
449 return -ENOMEM;
450 }
451 entry = &cac_table->entries[0];
452 for (i = 0; i < cac_table->ucNumEntries; i++) {
453 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
454 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
455 le16_to_cpu(entry->usVddc1);
456 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
457 le16_to_cpu(entry->usVddc2);
458 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
459 le16_to_cpu(entry->usVddc3);
460 } else {
461 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
462 le16_to_cpu(entry->usVddc);
463 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
464 le32_to_cpu(entry->ulLeakageValue);
465 }
466 entry = (ATOM_PPLIB_CAC_Leakage_Record *)
467 ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
468 }
469 adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
470 }
471 }
472
473
474 if (le16_to_cpu(power_info->pplib.usTableSize) >=
475 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
476 ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
477 (mode_info->atom_context->bios + data_offset +
478 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
479 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
480 ext_hdr->usVCETableOffset) {
481 VCEClockInfoArray *array = (VCEClockInfoArray *)
482 (mode_info->atom_context->bios + data_offset +
483 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
484 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
485 (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
486 (mode_info->atom_context->bios + data_offset +
487 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
488 1 + array->ucNumEntries * sizeof(VCEClockInfo));
489 ATOM_PPLIB_VCE_State_Table *states =
490 (ATOM_PPLIB_VCE_State_Table *)
491 (mode_info->atom_context->bios + data_offset +
492 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
493 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
494 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
495 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
496 ATOM_PPLIB_VCE_State_Record *state_entry;
497 VCEClockInfo *vce_clk;
498 u32 size = limits->numEntries *
499 sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
500 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
501 kzalloc(size, GFP_KERNEL);
502 if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
503 amdgpu_free_extended_power_table(adev);
504 return -ENOMEM;
505 }
506 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
507 limits->numEntries;
508 entry = &limits->entries[0];
509 state_entry = &states->entries[0];
510 for (i = 0; i < limits->numEntries; i++) {
511 vce_clk = (VCEClockInfo *)
512 ((u8 *)&array->entries[0] +
513 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
514 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
515 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
516 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
517 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
518 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
519 le16_to_cpu(entry->usVoltage);
520 entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
521 ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
522 }
523 adev->pm.dpm.num_of_vce_states =
524 states->numEntries > AMD_MAX_VCE_LEVELS ?
525 AMD_MAX_VCE_LEVELS : states->numEntries;
526 for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
527 vce_clk = (VCEClockInfo *)
528 ((u8 *)&array->entries[0] +
529 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
530 adev->pm.dpm.vce_states[i].evclk =
531 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
532 adev->pm.dpm.vce_states[i].ecclk =
533 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
534 adev->pm.dpm.vce_states[i].clk_idx =
535 state_entry->ucClockInfoIndex & 0x3f;
536 adev->pm.dpm.vce_states[i].pstate =
537 (state_entry->ucClockInfoIndex & 0xc0) >> 6;
538 state_entry = (ATOM_PPLIB_VCE_State_Record *)
539 ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
540 }
541 }
542 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
543 ext_hdr->usUVDTableOffset) {
544 UVDClockInfoArray *array = (UVDClockInfoArray *)
545 (mode_info->atom_context->bios + data_offset +
546 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
547 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
548 (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
549 (mode_info->atom_context->bios + data_offset +
550 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
551 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
552 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
553 u32 size = limits->numEntries *
554 sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
555 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
556 kzalloc(size, GFP_KERNEL);
557 if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
558 amdgpu_free_extended_power_table(adev);
559 return -ENOMEM;
560 }
561 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
562 limits->numEntries;
563 entry = &limits->entries[0];
564 for (i = 0; i < limits->numEntries; i++) {
565 UVDClockInfo *uvd_clk = (UVDClockInfo *)
566 ((u8 *)&array->entries[0] +
567 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
568 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
569 le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
570 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
571 le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
572 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
573 le16_to_cpu(entry->usVoltage);
574 entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
575 ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
576 }
577 }
578 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
579 ext_hdr->usSAMUTableOffset) {
580 ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
581 (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
582 (mode_info->atom_context->bios + data_offset +
583 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
584 ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
585 u32 size = limits->numEntries *
586 sizeof(struct amdgpu_clock_voltage_dependency_entry);
587 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
588 kzalloc(size, GFP_KERNEL);
589 if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
590 amdgpu_free_extended_power_table(adev);
591 return -ENOMEM;
592 }
593 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
594 limits->numEntries;
595 entry = &limits->entries[0];
596 for (i = 0; i < limits->numEntries; i++) {
597 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
598 le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
599 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
600 le16_to_cpu(entry->usVoltage);
601 entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
602 ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
603 }
604 }
605 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
606 ext_hdr->usPPMTableOffset) {
607 ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
608 (mode_info->atom_context->bios + data_offset +
609 le16_to_cpu(ext_hdr->usPPMTableOffset));
610 adev->pm.dpm.dyn_state.ppm_table =
611 kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
612 if (!adev->pm.dpm.dyn_state.ppm_table) {
613 amdgpu_free_extended_power_table(adev);
614 return -ENOMEM;
615 }
616 adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
617 adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
618 le16_to_cpu(ppm->usCpuCoreNumber);
619 adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
620 le32_to_cpu(ppm->ulPlatformTDP);
621 adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
622 le32_to_cpu(ppm->ulSmallACPlatformTDP);
623 adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
624 le32_to_cpu(ppm->ulPlatformTDC);
625 adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
626 le32_to_cpu(ppm->ulSmallACPlatformTDC);
627 adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
628 le32_to_cpu(ppm->ulApuTDP);
629 adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
630 le32_to_cpu(ppm->ulDGpuTDP);
631 adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
632 le32_to_cpu(ppm->ulDGpuUlvPower);
633 adev->pm.dpm.dyn_state.ppm_table->tj_max =
634 le32_to_cpu(ppm->ulTjmax);
635 }
636 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
637 ext_hdr->usACPTableOffset) {
638 ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
639 (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
640 (mode_info->atom_context->bios + data_offset +
641 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
642 ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
643 u32 size = limits->numEntries *
644 sizeof(struct amdgpu_clock_voltage_dependency_entry);
645 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
646 kzalloc(size, GFP_KERNEL);
647 if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
648 amdgpu_free_extended_power_table(adev);
649 return -ENOMEM;
650 }
651 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
652 limits->numEntries;
653 entry = &limits->entries[0];
654 for (i = 0; i < limits->numEntries; i++) {
655 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
656 le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
657 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
658 le16_to_cpu(entry->usVoltage);
659 entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
660 ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
661 }
662 }
663 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
664 ext_hdr->usPowerTuneTableOffset) {
665 u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
666 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
667 ATOM_PowerTune_Table *pt;
668 adev->pm.dpm.dyn_state.cac_tdp_table =
669 kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
670 if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
671 amdgpu_free_extended_power_table(adev);
672 return -ENOMEM;
673 }
674 if (rev > 0) {
675 ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
676 (mode_info->atom_context->bios + data_offset +
677 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
678 adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
679 ppt->usMaximumPowerDeliveryLimit;
680 pt = &ppt->power_tune_table;
681 } else {
682 ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
683 (mode_info->atom_context->bios + data_offset +
684 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
685 adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
686 pt = &ppt->power_tune_table;
687 }
688 adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
689 adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
690 le16_to_cpu(pt->usConfigurableTDP);
691 adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
692 adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
693 le16_to_cpu(pt->usBatteryPowerLimit);
694 adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
695 le16_to_cpu(pt->usSmallPowerLimit);
696 adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
697 le16_to_cpu(pt->usLowCACLeakage);
698 adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
699 le16_to_cpu(pt->usHighCACLeakage);
700 }
701 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
702 ext_hdr->usSclkVddgfxTableOffset) {
703 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
704 (mode_info->atom_context->bios + data_offset +
705 le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
706 ret = amdgpu_parse_clk_voltage_dep_table(
707 &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
708 dep_table);
709 if (ret) {
710 kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
711 return ret;
712 }
713 }
714 }
715
716 return 0;
717 }
718
719 void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
720 {
721 struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
722
723 kfree(dyn_state->vddc_dependency_on_sclk.entries);
724 kfree(dyn_state->vddci_dependency_on_mclk.entries);
725 kfree(dyn_state->vddc_dependency_on_mclk.entries);
726 kfree(dyn_state->mvdd_dependency_on_mclk.entries);
727 kfree(dyn_state->cac_leakage_table.entries);
728 kfree(dyn_state->phase_shedding_limits_table.entries);
729 kfree(dyn_state->ppm_table);
730 kfree(dyn_state->cac_tdp_table);
731 kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
732 kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
733 kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
734 kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
735 kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
736 }
737
738 static const char *pp_lib_thermal_controller_names[] = {
739 "NONE",
740 "lm63",
741 "adm1032",
742 "adm1030",
743 "max6649",
744 "lm64",
745 "f75375",
746 "RV6xx",
747 "RV770",
748 "adt7473",
749 "NONE",
750 "External GPIO",
751 "Evergreen",
752 "emc2103",
753 "Sumo",
754 "Northern Islands",
755 "Southern Islands",
756 "lm96163",
757 "Sea Islands",
758 "Kaveri/Kabini",
759 };
760
761 void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
762 {
763 struct amdgpu_mode_info *mode_info = &adev->mode_info;
764 ATOM_PPLIB_POWERPLAYTABLE *power_table;
765 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
766 ATOM_PPLIB_THERMALCONTROLLER *controller;
767 struct amdgpu_i2c_bus_rec i2c_bus;
768 u16 data_offset;
769 u8 frev, crev;
770
771 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
772 &frev, &crev, &data_offset))
773 return;
774 power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
775 (mode_info->atom_context->bios + data_offset);
776 controller = &power_table->sThermalController;
777
778
779 if (controller->ucType > 0) {
780 if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
781 adev->pm.no_fan = true;
782 adev->pm.fan_pulses_per_revolution =
783 controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
784 if (adev->pm.fan_pulses_per_revolution) {
785 adev->pm.fan_min_rpm = controller->ucFanMinRPM;
786 adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
787 }
788 if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
789 DRM_INFO("Internal thermal controller %s fan control\n",
790 (controller->ucFanParameters &
791 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
792 adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
793 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
794 DRM_INFO("Internal thermal controller %s fan control\n",
795 (controller->ucFanParameters &
796 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
797 adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
798 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
799 DRM_INFO("Internal thermal controller %s fan control\n",
800 (controller->ucFanParameters &
801 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
802 adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
803 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
804 DRM_INFO("Internal thermal controller %s fan control\n",
805 (controller->ucFanParameters &
806 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
807 adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
808 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
809 DRM_INFO("Internal thermal controller %s fan control\n",
810 (controller->ucFanParameters &
811 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
812 adev->pm.int_thermal_type = THERMAL_TYPE_NI;
813 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
814 DRM_INFO("Internal thermal controller %s fan control\n",
815 (controller->ucFanParameters &
816 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
817 adev->pm.int_thermal_type = THERMAL_TYPE_SI;
818 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
819 DRM_INFO("Internal thermal controller %s fan control\n",
820 (controller->ucFanParameters &
821 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
822 adev->pm.int_thermal_type = THERMAL_TYPE_CI;
823 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
824 DRM_INFO("Internal thermal controller %s fan control\n",
825 (controller->ucFanParameters &
826 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
827 adev->pm.int_thermal_type = THERMAL_TYPE_KV;
828 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
829 DRM_INFO("External GPIO thermal controller %s fan control\n",
830 (controller->ucFanParameters &
831 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
832 adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
833 } else if (controller->ucType ==
834 ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
835 DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
836 (controller->ucFanParameters &
837 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
838 adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
839 } else if (controller->ucType ==
840 ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
841 DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
842 (controller->ucFanParameters &
843 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
844 adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
845 } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
846 DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
847 pp_lib_thermal_controller_names[controller->ucType],
848 controller->ucI2cAddress >> 1,
849 (controller->ucFanParameters &
850 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
851 adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
852 i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
853 adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
854 if (adev->pm.i2c_bus) {
855 struct i2c_board_info info = { };
856 const char *name = pp_lib_thermal_controller_names[controller->ucType];
857 info.addr = controller->ucI2cAddress >> 1;
858 strlcpy(info.type, name, sizeof(info.type));
859 i2c_new_device(&adev->pm.i2c_bus->adapter, &info);
860 }
861 } else {
862 DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
863 controller->ucType,
864 controller->ucI2cAddress >> 1,
865 (controller->ucFanParameters &
866 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
867 }
868 }
869 }
870
871 enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
872 u32 sys_mask,
873 enum amdgpu_pcie_gen asic_gen,
874 enum amdgpu_pcie_gen default_gen)
875 {
876 switch (asic_gen) {
877 case AMDGPU_PCIE_GEN1:
878 return AMDGPU_PCIE_GEN1;
879 case AMDGPU_PCIE_GEN2:
880 return AMDGPU_PCIE_GEN2;
881 case AMDGPU_PCIE_GEN3:
882 return AMDGPU_PCIE_GEN3;
883 default:
884 if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) &&
885 (default_gen == AMDGPU_PCIE_GEN3))
886 return AMDGPU_PCIE_GEN3;
887 else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) &&
888 (default_gen == AMDGPU_PCIE_GEN2))
889 return AMDGPU_PCIE_GEN2;
890 else
891 return AMDGPU_PCIE_GEN1;
892 }
893 return AMDGPU_PCIE_GEN1;
894 }
895
896 struct amd_vce_state*
897 amdgpu_get_vce_clock_state(void *handle, u32 idx)
898 {
899 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
900
901 if (idx < adev->pm.dpm.num_of_vce_states)
902 return &adev->pm.dpm.vce_states[idx];
903
904 return NULL;
905 }
906
907 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
908 {
909 uint32_t clk_freq;
910 int ret = 0;
911 if (is_support_sw_smu(adev)) {
912 ret = smu_get_dpm_freq_range(&adev->smu, SMU_GFXCLK,
913 low ? &clk_freq : NULL,
914 !low ? &clk_freq : NULL);
915 if (ret)
916 return 0;
917 return clk_freq * 100;
918
919 } else {
920 return (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (low));
921 }
922 }
923
924 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
925 {
926 uint32_t clk_freq;
927 int ret = 0;
928 if (is_support_sw_smu(adev)) {
929 ret = smu_get_dpm_freq_range(&adev->smu, SMU_UCLK,
930 low ? &clk_freq : NULL,
931 !low ? &clk_freq : NULL);
932 if (ret)
933 return 0;
934 return clk_freq * 100;
935
936 } else {
937 return (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (low));
938 }
939 }
940
941 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
942 {
943 int ret = 0;
944 bool swsmu = is_support_sw_smu(adev);
945
946 switch (block_type) {
947 case AMD_IP_BLOCK_TYPE_GFX:
948 case AMD_IP_BLOCK_TYPE_UVD:
949 case AMD_IP_BLOCK_TYPE_VCN:
950 case AMD_IP_BLOCK_TYPE_VCE:
951 case AMD_IP_BLOCK_TYPE_SDMA:
952 if (swsmu)
953 ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
954 else
955 ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
956 (adev)->powerplay.pp_handle, block_type, gate));
957 break;
958 case AMD_IP_BLOCK_TYPE_GMC:
959 case AMD_IP_BLOCK_TYPE_ACP:
960 ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
961 (adev)->powerplay.pp_handle, block_type, gate));
962 break;
963 default:
964 break;
965 }
966
967 return ret;
968 }