1 /*
2  * Copyright 2011 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Alex Deucher
23  */
24 
25 #include "drmP.h"
26 #include "amdgpu.h"
27 #include "amdgpu_atombios.h"
28 #include "amdgpu_i2c.h"
29 #include "amdgpu_dpm.h"
30 #include "atom.h"
31 
amdgpu_dpm_print_class_info(u32 class,u32 class2)32 void amdgpu_dpm_print_class_info(u32 class, u32 class2)
33 {
34 	printk("\tui class: ");
35 	switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
36 	case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
37 	default:
38 		printk("none\n");
39 		break;
40 	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
41 		printk("battery\n");
42 		break;
43 	case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
44 		printk("balanced\n");
45 		break;
46 	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
47 		printk("performance\n");
48 		break;
49 	}
50 	printk("\tinternal class: ");
51 	if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
52 	    (class2 == 0))
53 		printk("none");
54 	else {
55 		if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
56 			printk("boot ");
57 		if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
58 			printk("thermal ");
59 		if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
60 			printk("limited_pwr ");
61 		if (class & ATOM_PPLIB_CLASSIFICATION_REST)
62 			printk("rest ");
63 		if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
64 			printk("forced ");
65 		if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
66 			printk("3d_perf ");
67 		if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
68 			printk("ovrdrv ");
69 		if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
70 			printk("uvd ");
71 		if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
72 			printk("3d_low ");
73 		if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
74 			printk("acpi ");
75 		if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
76 			printk("uvd_hd2 ");
77 		if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
78 			printk("uvd_hd ");
79 		if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
80 			printk("uvd_sd ");
81 		if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
82 			printk("limited_pwr2 ");
83 		if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
84 			printk("ulv ");
85 		if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
86 			printk("uvd_mvc ");
87 	}
88 	printk("\n");
89 }
90 
amdgpu_dpm_print_cap_info(u32 caps)91 void amdgpu_dpm_print_cap_info(u32 caps)
92 {
93 	printk("\tcaps: ");
94 	if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
95 		printk("single_disp ");
96 	if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
97 		printk("video ");
98 	if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
99 		printk("no_dc ");
100 	printk("\n");
101 }
102 
amdgpu_dpm_print_ps_status(struct amdgpu_device * adev,struct amdgpu_ps * rps)103 void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
104 				struct amdgpu_ps *rps)
105 {
106 	printk("\tstatus: ");
107 	if (rps == adev->pm.dpm.current_ps)
108 		printk("c ");
109 	if (rps == adev->pm.dpm.requested_ps)
110 		printk("r ");
111 	if (rps == adev->pm.dpm.boot_ps)
112 		printk("b ");
113 	printk("\n");
114 }
115 
amdgpu_dpm_get_vblank_time(struct amdgpu_device * adev)116 u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
117 {
118 	struct drm_device *dev = adev->ddev;
119 	struct drm_crtc *crtc;
120 	struct amdgpu_crtc *amdgpu_crtc;
121 	u32 line_time_us, vblank_lines;
122 	u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
123 
124 	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
125 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
126 			amdgpu_crtc = to_amdgpu_crtc(crtc);
127 			if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
128 				line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
129 					amdgpu_crtc->hw_mode.clock;
130 				vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
131 					amdgpu_crtc->hw_mode.crtc_vdisplay +
132 					(amdgpu_crtc->v_border * 2);
133 				vblank_time_us = vblank_lines * line_time_us;
134 				break;
135 			}
136 		}
137 	}
138 
139 	return vblank_time_us;
140 }
141 
amdgpu_dpm_get_vrefresh(struct amdgpu_device * adev)142 u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
143 {
144 	struct drm_device *dev = adev->ddev;
145 	struct drm_crtc *crtc;
146 	struct amdgpu_crtc *amdgpu_crtc;
147 	u32 vrefresh = 0;
148 
149 	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
150 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
151 			amdgpu_crtc = to_amdgpu_crtc(crtc);
152 			if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
153 				vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
154 				break;
155 			}
156 		}
157 	}
158 
159 	return vrefresh;
160 }
161 
amdgpu_calculate_u_and_p(u32 i,u32 r_c,u32 p_b,u32 * p,u32 * u)162 void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
163 			      u32 *p, u32 *u)
164 {
165 	u32 b_c = 0;
166 	u32 i_c;
167 	u32 tmp;
168 
169 	i_c = (i * r_c) / 100;
170 	tmp = i_c >> p_b;
171 
172 	while (tmp) {
173 		b_c++;
174 		tmp >>= 1;
175 	}
176 
177 	*u = (b_c + 1) / 2;
178 	*p = i_c / (1 << (2 * (*u)));
179 }
180 
amdgpu_calculate_at(u32 t,u32 h,u32 fh,u32 fl,u32 * tl,u32 * th)181 int amdgpu_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
182 {
183 	u32 k, a, ah, al;
184 	u32 t1;
185 
186 	if ((fl == 0) || (fh == 0) || (fl > fh))
187 		return -EINVAL;
188 
189 	k = (100 * fh) / fl;
190 	t1 = (t * (k - 100));
191 	a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
192 	a = (a + 5) / 10;
193 	ah = ((a * t) + 5000) / 10000;
194 	al = a - ah;
195 
196 	*th = t - ah;
197 	*tl = t + al;
198 
199 	return 0;
200 }
201 
amdgpu_is_uvd_state(u32 class,u32 class2)202 bool amdgpu_is_uvd_state(u32 class, u32 class2)
203 {
204 	if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
205 		return true;
206 	if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
207 		return true;
208 	if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
209 		return true;
210 	if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
211 		return true;
212 	if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
213 		return true;
214 	return false;
215 }
216 
amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)217 bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
218 {
219 	switch (sensor) {
220 	case THERMAL_TYPE_RV6XX:
221 	case THERMAL_TYPE_RV770:
222 	case THERMAL_TYPE_EVERGREEN:
223 	case THERMAL_TYPE_SUMO:
224 	case THERMAL_TYPE_NI:
225 	case THERMAL_TYPE_SI:
226 	case THERMAL_TYPE_CI:
227 	case THERMAL_TYPE_KV:
228 		return true;
229 	case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
230 	case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
231 		return false; /* need special handling */
232 	case THERMAL_TYPE_NONE:
233 	case THERMAL_TYPE_EXTERNAL:
234 	case THERMAL_TYPE_EXTERNAL_GPIO:
235 	default:
236 		return false;
237 	}
238 }
239 
240 union power_info {
241 	struct _ATOM_POWERPLAY_INFO info;
242 	struct _ATOM_POWERPLAY_INFO_V2 info_2;
243 	struct _ATOM_POWERPLAY_INFO_V3 info_3;
244 	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
245 	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
246 	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
247 	struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
248 	struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
249 };
250 
251 union fan_info {
252 	struct _ATOM_PPLIB_FANTABLE fan;
253 	struct _ATOM_PPLIB_FANTABLE2 fan2;
254 	struct _ATOM_PPLIB_FANTABLE3 fan3;
255 };
256 
amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table * amdgpu_table,ATOM_PPLIB_Clock_Voltage_Dependency_Table * atom_table)257 static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
258 					      ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
259 {
260 	u32 size = atom_table->ucNumEntries *
261 		sizeof(struct amdgpu_clock_voltage_dependency_entry);
262 	int i;
263 	ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
264 
265 	amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
266 	if (!amdgpu_table->entries)
267 		return -ENOMEM;
268 
269 	entry = &atom_table->entries[0];
270 	for (i = 0; i < atom_table->ucNumEntries; i++) {
271 		amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
272 			(entry->ucClockHigh << 16);
273 		amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
274 		entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
275 			((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
276 	}
277 	amdgpu_table->count = atom_table->ucNumEntries;
278 
279 	return 0;
280 }
281 
amdgpu_get_platform_caps(struct amdgpu_device * adev)282 int amdgpu_get_platform_caps(struct amdgpu_device *adev)
283 {
284 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
285 	union power_info *power_info;
286 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
287 	u16 data_offset;
288 	u8 frev, crev;
289 
290 	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
291 				   &frev, &crev, &data_offset))
292 		return -EINVAL;
293 	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
294 
295 	adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
296 	adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
297 	adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
298 
299 	return 0;
300 }
301 
302 /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
303 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
304 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
305 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
306 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
307 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
308 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
309 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
310 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
311 
amdgpu_parse_extended_power_table(struct amdgpu_device * adev)312 int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
313 {
314 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
315 	union power_info *power_info;
316 	union fan_info *fan_info;
317 	ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
318 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
319 	u16 data_offset;
320 	u8 frev, crev;
321 	int ret, i;
322 
323 	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
324 				   &frev, &crev, &data_offset))
325 		return -EINVAL;
326 	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
327 
328 	/* fan table */
329 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
330 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
331 		if (power_info->pplib3.usFanTableOffset) {
332 			fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
333 						      le16_to_cpu(power_info->pplib3.usFanTableOffset));
334 			adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
335 			adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
336 			adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
337 			adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
338 			adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
339 			adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
340 			adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
341 			if (fan_info->fan.ucFanTableFormat >= 2)
342 				adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
343 			else
344 				adev->pm.dpm.fan.t_max = 10900;
345 			adev->pm.dpm.fan.cycle_delay = 100000;
346 			if (fan_info->fan.ucFanTableFormat >= 3) {
347 				adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
348 				adev->pm.dpm.fan.default_max_fan_pwm =
349 					le16_to_cpu(fan_info->fan3.usFanPWMMax);
350 				adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
351 				adev->pm.dpm.fan.fan_output_sensitivity =
352 					le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
353 			}
354 			adev->pm.dpm.fan.ucode_fan_control = true;
355 		}
356 	}
357 
358 	/* clock dependancy tables, shedding tables */
359 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
360 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
361 		if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
362 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
363 				(mode_info->atom_context->bios + data_offset +
364 				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
365 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
366 								 dep_table);
367 			if (ret) {
368 				amdgpu_free_extended_power_table(adev);
369 				return ret;
370 			}
371 		}
372 		if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
373 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
374 				(mode_info->atom_context->bios + data_offset +
375 				 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
376 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
377 								 dep_table);
378 			if (ret) {
379 				amdgpu_free_extended_power_table(adev);
380 				return ret;
381 			}
382 		}
383 		if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
384 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
385 				(mode_info->atom_context->bios + data_offset +
386 				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
387 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
388 								 dep_table);
389 			if (ret) {
390 				amdgpu_free_extended_power_table(adev);
391 				return ret;
392 			}
393 		}
394 		if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
395 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
396 				(mode_info->atom_context->bios + data_offset +
397 				 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
398 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
399 								 dep_table);
400 			if (ret) {
401 				amdgpu_free_extended_power_table(adev);
402 				return ret;
403 			}
404 		}
405 		if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
406 			ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
407 				(ATOM_PPLIB_Clock_Voltage_Limit_Table *)
408 				(mode_info->atom_context->bios + data_offset +
409 				 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
410 			if (clk_v->ucNumEntries) {
411 				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
412 					le16_to_cpu(clk_v->entries[0].usSclkLow) |
413 					(clk_v->entries[0].ucSclkHigh << 16);
414 				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
415 					le16_to_cpu(clk_v->entries[0].usMclkLow) |
416 					(clk_v->entries[0].ucMclkHigh << 16);
417 				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
418 					le16_to_cpu(clk_v->entries[0].usVddc);
419 				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
420 					le16_to_cpu(clk_v->entries[0].usVddci);
421 			}
422 		}
423 		if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
424 			ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
425 				(ATOM_PPLIB_PhaseSheddingLimits_Table *)
426 				(mode_info->atom_context->bios + data_offset +
427 				 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
428 			ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
429 
430 			adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
431 				kzalloc(psl->ucNumEntries *
432 					sizeof(struct amdgpu_phase_shedding_limits_entry),
433 					GFP_KERNEL);
434 			if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
435 				amdgpu_free_extended_power_table(adev);
436 				return -ENOMEM;
437 			}
438 
439 			entry = &psl->entries[0];
440 			for (i = 0; i < psl->ucNumEntries; i++) {
441 				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
442 					le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
443 				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
444 					le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
445 				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
446 					le16_to_cpu(entry->usVoltage);
447 				entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
448 					((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
449 			}
450 			adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
451 				psl->ucNumEntries;
452 		}
453 	}
454 
455 	/* cac data */
456 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
457 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
458 		adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
459 		adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
460 		adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
461 		adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
462 		if (adev->pm.dpm.tdp_od_limit)
463 			adev->pm.dpm.power_control = true;
464 		else
465 			adev->pm.dpm.power_control = false;
466 		adev->pm.dpm.tdp_adjustment = 0;
467 		adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
468 		adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
469 		adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
470 		if (power_info->pplib5.usCACLeakageTableOffset) {
471 			ATOM_PPLIB_CAC_Leakage_Table *cac_table =
472 				(ATOM_PPLIB_CAC_Leakage_Table *)
473 				(mode_info->atom_context->bios + data_offset +
474 				 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
475 			ATOM_PPLIB_CAC_Leakage_Record *entry;
476 			u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
477 			adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
478 			if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
479 				amdgpu_free_extended_power_table(adev);
480 				return -ENOMEM;
481 			}
482 			entry = &cac_table->entries[0];
483 			for (i = 0; i < cac_table->ucNumEntries; i++) {
484 				if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
485 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
486 						le16_to_cpu(entry->usVddc1);
487 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
488 						le16_to_cpu(entry->usVddc2);
489 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
490 						le16_to_cpu(entry->usVddc3);
491 				} else {
492 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
493 						le16_to_cpu(entry->usVddc);
494 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
495 						le32_to_cpu(entry->ulLeakageValue);
496 				}
497 				entry = (ATOM_PPLIB_CAC_Leakage_Record *)
498 					((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
499 			}
500 			adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
501 		}
502 	}
503 
504 	/* ext tables */
505 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
506 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
507 		ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
508 			(mode_info->atom_context->bios + data_offset +
509 			 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
510 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
511 			ext_hdr->usVCETableOffset) {
512 			VCEClockInfoArray *array = (VCEClockInfoArray *)
513 				(mode_info->atom_context->bios + data_offset +
514 				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
515 			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
516 				(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
517 				(mode_info->atom_context->bios + data_offset +
518 				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
519 				 1 + array->ucNumEntries * sizeof(VCEClockInfo));
520 			ATOM_PPLIB_VCE_State_Table *states =
521 				(ATOM_PPLIB_VCE_State_Table *)
522 				(mode_info->atom_context->bios + data_offset +
523 				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
524 				 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
525 				 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
526 			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
527 			ATOM_PPLIB_VCE_State_Record *state_entry;
528 			VCEClockInfo *vce_clk;
529 			u32 size = limits->numEntries *
530 				sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
531 			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
532 				kzalloc(size, GFP_KERNEL);
533 			if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
534 				amdgpu_free_extended_power_table(adev);
535 				return -ENOMEM;
536 			}
537 			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
538 				limits->numEntries;
539 			entry = &limits->entries[0];
540 			state_entry = &states->entries[0];
541 			for (i = 0; i < limits->numEntries; i++) {
542 				vce_clk = (VCEClockInfo *)
543 					((u8 *)&array->entries[0] +
544 					 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
545 				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
546 					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
547 				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
548 					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
549 				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
550 					le16_to_cpu(entry->usVoltage);
551 				entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
552 					((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
553 			}
554 			for (i = 0; i < states->numEntries; i++) {
555 				if (i >= AMDGPU_MAX_VCE_LEVELS)
556 					break;
557 				vce_clk = (VCEClockInfo *)
558 					((u8 *)&array->entries[0] +
559 					 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
560 				adev->pm.dpm.vce_states[i].evclk =
561 					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
562 				adev->pm.dpm.vce_states[i].ecclk =
563 					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
564 				adev->pm.dpm.vce_states[i].clk_idx =
565 					state_entry->ucClockInfoIndex & 0x3f;
566 				adev->pm.dpm.vce_states[i].pstate =
567 					(state_entry->ucClockInfoIndex & 0xc0) >> 6;
568 				state_entry = (ATOM_PPLIB_VCE_State_Record *)
569 					((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
570 			}
571 		}
572 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
573 			ext_hdr->usUVDTableOffset) {
574 			UVDClockInfoArray *array = (UVDClockInfoArray *)
575 				(mode_info->atom_context->bios + data_offset +
576 				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
577 			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
578 				(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
579 				(mode_info->atom_context->bios + data_offset +
580 				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
581 				 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
582 			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
583 			u32 size = limits->numEntries *
584 				sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
585 			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
586 				kzalloc(size, GFP_KERNEL);
587 			if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
588 				amdgpu_free_extended_power_table(adev);
589 				return -ENOMEM;
590 			}
591 			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
592 				limits->numEntries;
593 			entry = &limits->entries[0];
594 			for (i = 0; i < limits->numEntries; i++) {
595 				UVDClockInfo *uvd_clk = (UVDClockInfo *)
596 					((u8 *)&array->entries[0] +
597 					 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
598 				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
599 					le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
600 				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
601 					le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
602 				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
603 					le16_to_cpu(entry->usVoltage);
604 				entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
605 					((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
606 			}
607 		}
608 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
609 			ext_hdr->usSAMUTableOffset) {
610 			ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
611 				(ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
612 				(mode_info->atom_context->bios + data_offset +
613 				 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
614 			ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
615 			u32 size = limits->numEntries *
616 				sizeof(struct amdgpu_clock_voltage_dependency_entry);
617 			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
618 				kzalloc(size, GFP_KERNEL);
619 			if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
620 				amdgpu_free_extended_power_table(adev);
621 				return -ENOMEM;
622 			}
623 			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
624 				limits->numEntries;
625 			entry = &limits->entries[0];
626 			for (i = 0; i < limits->numEntries; i++) {
627 				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
628 					le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
629 				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
630 					le16_to_cpu(entry->usVoltage);
631 				entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
632 					((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
633 			}
634 		}
635 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
636 		    ext_hdr->usPPMTableOffset) {
637 			ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
638 				(mode_info->atom_context->bios + data_offset +
639 				 le16_to_cpu(ext_hdr->usPPMTableOffset));
640 			adev->pm.dpm.dyn_state.ppm_table =
641 				kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
642 			if (!adev->pm.dpm.dyn_state.ppm_table) {
643 				amdgpu_free_extended_power_table(adev);
644 				return -ENOMEM;
645 			}
646 			adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
647 			adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
648 				le16_to_cpu(ppm->usCpuCoreNumber);
649 			adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
650 				le32_to_cpu(ppm->ulPlatformTDP);
651 			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
652 				le32_to_cpu(ppm->ulSmallACPlatformTDP);
653 			adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
654 				le32_to_cpu(ppm->ulPlatformTDC);
655 			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
656 				le32_to_cpu(ppm->ulSmallACPlatformTDC);
657 			adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
658 				le32_to_cpu(ppm->ulApuTDP);
659 			adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
660 				le32_to_cpu(ppm->ulDGpuTDP);
661 			adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
662 				le32_to_cpu(ppm->ulDGpuUlvPower);
663 			adev->pm.dpm.dyn_state.ppm_table->tj_max =
664 				le32_to_cpu(ppm->ulTjmax);
665 		}
666 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
667 			ext_hdr->usACPTableOffset) {
668 			ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
669 				(ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
670 				(mode_info->atom_context->bios + data_offset +
671 				 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
672 			ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
673 			u32 size = limits->numEntries *
674 				sizeof(struct amdgpu_clock_voltage_dependency_entry);
675 			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
676 				kzalloc(size, GFP_KERNEL);
677 			if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
678 				amdgpu_free_extended_power_table(adev);
679 				return -ENOMEM;
680 			}
681 			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
682 				limits->numEntries;
683 			entry = &limits->entries[0];
684 			for (i = 0; i < limits->numEntries; i++) {
685 				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
686 					le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
687 				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
688 					le16_to_cpu(entry->usVoltage);
689 				entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
690 					((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
691 			}
692 		}
693 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
694 			ext_hdr->usPowerTuneTableOffset) {
695 			u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
696 					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
697 			ATOM_PowerTune_Table *pt;
698 			adev->pm.dpm.dyn_state.cac_tdp_table =
699 				kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
700 			if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
701 				amdgpu_free_extended_power_table(adev);
702 				return -ENOMEM;
703 			}
704 			if (rev > 0) {
705 				ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
706 					(mode_info->atom_context->bios + data_offset +
707 					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
708 				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
709 					ppt->usMaximumPowerDeliveryLimit;
710 				pt = &ppt->power_tune_table;
711 			} else {
712 				ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
713 					(mode_info->atom_context->bios + data_offset +
714 					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
715 				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
716 				pt = &ppt->power_tune_table;
717 			}
718 			adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
719 			adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
720 				le16_to_cpu(pt->usConfigurableTDP);
721 			adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
722 			adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
723 				le16_to_cpu(pt->usBatteryPowerLimit);
724 			adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
725 				le16_to_cpu(pt->usSmallPowerLimit);
726 			adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
727 				le16_to_cpu(pt->usLowCACLeakage);
728 			adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
729 				le16_to_cpu(pt->usHighCACLeakage);
730 		}
731 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
732 				ext_hdr->usSclkVddgfxTableOffset) {
733 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
734 				(mode_info->atom_context->bios + data_offset +
735 				 le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
736 			ret = amdgpu_parse_clk_voltage_dep_table(
737 					&adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
738 					dep_table);
739 			if (ret) {
740 				kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
741 				return ret;
742 			}
743 		}
744 	}
745 
746 	return 0;
747 }
748 
amdgpu_free_extended_power_table(struct amdgpu_device * adev)749 void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
750 {
751 	struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
752 
753 	kfree(dyn_state->vddc_dependency_on_sclk.entries);
754 	kfree(dyn_state->vddci_dependency_on_mclk.entries);
755 	kfree(dyn_state->vddc_dependency_on_mclk.entries);
756 	kfree(dyn_state->mvdd_dependency_on_mclk.entries);
757 	kfree(dyn_state->cac_leakage_table.entries);
758 	kfree(dyn_state->phase_shedding_limits_table.entries);
759 	kfree(dyn_state->ppm_table);
760 	kfree(dyn_state->cac_tdp_table);
761 	kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
762 	kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
763 	kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
764 	kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
765 	kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
766 }
767 
768 static const char *pp_lib_thermal_controller_names[] = {
769 	"NONE",
770 	"lm63",
771 	"adm1032",
772 	"adm1030",
773 	"max6649",
774 	"lm64",
775 	"f75375",
776 	"RV6xx",
777 	"RV770",
778 	"adt7473",
779 	"NONE",
780 	"External GPIO",
781 	"Evergreen",
782 	"emc2103",
783 	"Sumo",
784 	"Northern Islands",
785 	"Southern Islands",
786 	"lm96163",
787 	"Sea Islands",
788 	"Kaveri/Kabini",
789 };
790 
amdgpu_add_thermal_controller(struct amdgpu_device * adev)791 void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
792 {
793 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
794 	ATOM_PPLIB_POWERPLAYTABLE *power_table;
795 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
796 	ATOM_PPLIB_THERMALCONTROLLER *controller;
797 	struct amdgpu_i2c_bus_rec i2c_bus;
798 	u16 data_offset;
799 	u8 frev, crev;
800 
801 	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
802 				   &frev, &crev, &data_offset))
803 		return;
804 	power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
805 		(mode_info->atom_context->bios + data_offset);
806 	controller = &power_table->sThermalController;
807 
808 	/* add the i2c bus for thermal/fan chip */
809 	if (controller->ucType > 0) {
810 		if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
811 			adev->pm.no_fan = true;
812 		adev->pm.fan_pulses_per_revolution =
813 			controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
814 		if (adev->pm.fan_pulses_per_revolution) {
815 			adev->pm.fan_min_rpm = controller->ucFanMinRPM;
816 			adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
817 		}
818 		if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
819 			DRM_INFO("Internal thermal controller %s fan control\n",
820 				 (controller->ucFanParameters &
821 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
822 			adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
823 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
824 			DRM_INFO("Internal thermal controller %s fan control\n",
825 				 (controller->ucFanParameters &
826 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
827 			adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
828 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
829 			DRM_INFO("Internal thermal controller %s fan control\n",
830 				 (controller->ucFanParameters &
831 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
832 			adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
833 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
834 			DRM_INFO("Internal thermal controller %s fan control\n",
835 				 (controller->ucFanParameters &
836 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
837 			adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
838 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
839 			DRM_INFO("Internal thermal controller %s fan control\n",
840 				 (controller->ucFanParameters &
841 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
842 			adev->pm.int_thermal_type = THERMAL_TYPE_NI;
843 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
844 			DRM_INFO("Internal thermal controller %s fan control\n",
845 				 (controller->ucFanParameters &
846 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
847 			adev->pm.int_thermal_type = THERMAL_TYPE_SI;
848 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
849 			DRM_INFO("Internal thermal controller %s fan control\n",
850 				 (controller->ucFanParameters &
851 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
852 			adev->pm.int_thermal_type = THERMAL_TYPE_CI;
853 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
854 			DRM_INFO("Internal thermal controller %s fan control\n",
855 				 (controller->ucFanParameters &
856 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
857 			adev->pm.int_thermal_type = THERMAL_TYPE_KV;
858 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
859 			DRM_INFO("External GPIO thermal controller %s fan control\n",
860 				 (controller->ucFanParameters &
861 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
862 			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
863 		} else if (controller->ucType ==
864 			   ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
865 			DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
866 				 (controller->ucFanParameters &
867 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
868 			adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
869 		} else if (controller->ucType ==
870 			   ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
871 			DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
872 				 (controller->ucFanParameters &
873 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
874 			adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
875 		} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
876 			DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
877 				 pp_lib_thermal_controller_names[controller->ucType],
878 				 controller->ucI2cAddress >> 1,
879 				 (controller->ucFanParameters &
880 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
881 			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
882 			i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
883 			adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
884 			if (adev->pm.i2c_bus) {
885 				struct i2c_board_info info = { };
886 				const char *name = pp_lib_thermal_controller_names[controller->ucType];
887 				info.addr = controller->ucI2cAddress >> 1;
888 				strlcpy(info.type, name, sizeof(info.type));
889 				i2c_new_device(&adev->pm.i2c_bus->adapter, &info);
890 			}
891 		} else {
892 			DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
893 				 controller->ucType,
894 				 controller->ucI2cAddress >> 1,
895 				 (controller->ucFanParameters &
896 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
897 		}
898 	}
899 }
900 
amdgpu_get_pcie_gen_support(struct amdgpu_device * adev,u32 sys_mask,enum amdgpu_pcie_gen asic_gen,enum amdgpu_pcie_gen default_gen)901 enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
902 						 u32 sys_mask,
903 						 enum amdgpu_pcie_gen asic_gen,
904 						 enum amdgpu_pcie_gen default_gen)
905 {
906 	switch (asic_gen) {
907 	case AMDGPU_PCIE_GEN1:
908 		return AMDGPU_PCIE_GEN1;
909 	case AMDGPU_PCIE_GEN2:
910 		return AMDGPU_PCIE_GEN2;
911 	case AMDGPU_PCIE_GEN3:
912 		return AMDGPU_PCIE_GEN3;
913 	default:
914 		if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
915 			return AMDGPU_PCIE_GEN3;
916 		else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
917 			return AMDGPU_PCIE_GEN2;
918 		else
919 			return AMDGPU_PCIE_GEN1;
920 	}
921 	return AMDGPU_PCIE_GEN1;
922 }
923 
amdgpu_get_pcie_lane_support(struct amdgpu_device * adev,u16 asic_lanes,u16 default_lanes)924 u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
925 				 u16 asic_lanes,
926 				 u16 default_lanes)
927 {
928 	switch (asic_lanes) {
929 	case 0:
930 	default:
931 		return default_lanes;
932 	case 1:
933 		return 1;
934 	case 2:
935 		return 2;
936 	case 4:
937 		return 4;
938 	case 8:
939 		return 8;
940 	case 12:
941 		return 12;
942 	case 16:
943 		return 16;
944 	}
945 }
946 
amdgpu_encode_pci_lane_width(u32 lanes)947 u8 amdgpu_encode_pci_lane_width(u32 lanes)
948 {
949 	u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 };
950 
951 	if (lanes > 16)
952 		return 0;
953 
954 	return encoded_lanes[lanes];
955 }
956