This source file includes following definitions.
- smu_v11_0_send_msg_without_waiting
- smu_v11_0_read_arg
- smu_v11_0_wait_for_response
- smu_v11_0_send_msg
- smu_v11_0_send_msg_with_param
- smu_v11_0_init_microcode
- smu_v11_0_load_microcode
- smu_v11_0_check_fw_status
- smu_v11_0_check_fw_version
- smu_v11_0_set_pptable_v2_0
- smu_v11_0_set_pptable_v2_1
- smu_v11_0_setup_pptable
- smu_v11_0_init_dpm_context
- smu_v11_0_fini_dpm_context
- smu_v11_0_init_smc_tables
- smu_v11_0_fini_smc_tables
- smu_v11_0_init_power
- smu_v11_0_fini_power
- smu_v11_0_get_vbios_bootup_values
- smu_v11_0_get_clk_info_from_vbios
- smu_v11_0_notify_memory_pool_location
- smu_v11_0_check_pptable
- smu_v11_0_parse_pptable
- smu_v11_0_populate_smc_pptable
- smu_v11_0_write_pptable
- smu_v11_0_write_watermarks_table
- smu_v11_0_set_deep_sleep_dcefclk
- smu_v11_0_set_min_dcef_deep_sleep
- smu_v11_0_set_tool_table_location
- smu_v11_0_init_display_count
- smu_v11_0_set_allowed_mask
- smu_v11_0_get_enabled_mask
- smu_v11_0_system_features_control
- smu_v11_0_notify_display_change
- smu_v11_0_get_max_sustainable_clock
- smu_v11_0_init_max_sustainable_clocks
- smu_v11_0_set_power_limit
- smu_v11_0_get_current_clk_freq
- smu_v11_0_set_thermal_range
- smu_v11_0_enable_thermal_alert
- smu_v11_0_start_thermal_control
- convert_to_vddc
- smu_v11_0_get_gfx_vdd
- smu_v11_0_read_sensor
- smu_v11_0_display_clock_voltage_request
- smu_v11_0_set_watermarks_for_clock_ranges
- smu_v11_0_gfx_off_control
- smu_v11_0_get_fan_control_mode
- smu_v11_0_auto_fan_control
- smu_v11_0_set_fan_static_mode
- smu_v11_0_set_fan_speed_percent
- smu_v11_0_set_fan_control_mode
- smu_v11_0_set_fan_speed_rpm
- smu_v11_0_set_xgmi_pstate
- smu_v11_0_irq_process
- smu_v11_0_register_irq_handler
- smu_v11_0_get_max_sustainable_clocks_by_dc
- smu_v11_0_set_azalia_d3_pme
- smu_v11_0_baco_set_armd3_sequence
- smu_v11_0_baco_is_support
- smu_v11_0_baco_get_state
- smu_v11_0_baco_set_state
- smu_v11_0_baco_reset
- smu_v11_0_get_dpm_ultimate_freq
- smu_v11_0_set_smu_funcs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23 #include <linux/firmware.h>
24 #include <linux/module.h>
25 #include <linux/pci.h>
26
27 #include "pp_debug.h"
28 #include "amdgpu.h"
29 #include "amdgpu_smu.h"
30 #include "atomfirmware.h"
31 #include "amdgpu_atomfirmware.h"
32 #include "smu_v11_0.h"
33 #include "soc15_common.h"
34 #include "atom.h"
35 #include "vega20_ppt.h"
36 #include "arcturus_ppt.h"
37 #include "navi10_ppt.h"
38
39 #include "asic_reg/thm/thm_11_0_2_offset.h"
40 #include "asic_reg/thm/thm_11_0_2_sh_mask.h"
41 #include "asic_reg/mp/mp_11_0_offset.h"
42 #include "asic_reg/mp/mp_11_0_sh_mask.h"
43 #include "asic_reg/nbio/nbio_7_4_offset.h"
44 #include "asic_reg/nbio/nbio_7_4_sh_mask.h"
45 #include "asic_reg/smuio/smuio_11_0_0_offset.h"
46 #include "asic_reg/smuio/smuio_11_0_0_sh_mask.h"
47
48 MODULE_FIRMWARE("amdgpu/vega20_smc.bin");
49 MODULE_FIRMWARE("amdgpu/arcturus_smc.bin");
50 MODULE_FIRMWARE("amdgpu/navi10_smc.bin");
51 MODULE_FIRMWARE("amdgpu/navi14_smc.bin");
52 MODULE_FIRMWARE("amdgpu/navi12_smc.bin");
53
54 #define SMU11_VOLTAGE_SCALE 4
55
56 static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu,
57 uint16_t msg)
58 {
59 struct amdgpu_device *adev = smu->adev;
60 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
61 return 0;
62 }
63
64 static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
65 {
66 struct amdgpu_device *adev = smu->adev;
67
68 *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
69 return 0;
70 }
71
72 static int smu_v11_0_wait_for_response(struct smu_context *smu)
73 {
74 struct amdgpu_device *adev = smu->adev;
75 uint32_t cur_value, i, timeout = adev->usec_timeout * 10;
76
77 for (i = 0; i < timeout; i++) {
78 cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
79 if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
80 break;
81 udelay(1);
82 }
83
84
85 if (i == timeout)
86 return -ETIME;
87
88 return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
89 }
90
91 static int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg)
92 {
93 struct amdgpu_device *adev = smu->adev;
94 int ret = 0, index = 0;
95
96 index = smu_msg_get_index(smu, msg);
97 if (index < 0)
98 return index;
99
100 smu_v11_0_wait_for_response(smu);
101
102 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
103
104 smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
105
106 ret = smu_v11_0_wait_for_response(smu);
107
108 if (ret)
109 pr_err("failed send message: %10s (%d) response %#x\n",
110 smu_get_message_name(smu, msg), index, ret);
111
112 return ret;
113
114 }
115
116 static int
117 smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
118 uint32_t param)
119 {
120
121 struct amdgpu_device *adev = smu->adev;
122 int ret = 0, index = 0;
123
124 index = smu_msg_get_index(smu, msg);
125 if (index < 0)
126 return index;
127
128 ret = smu_v11_0_wait_for_response(smu);
129 if (ret)
130 pr_err("failed send message: %10s (%d) \tparam: 0x%08x response %#x\n",
131 smu_get_message_name(smu, msg), index, param, ret);
132
133 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
134
135 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
136
137 smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
138
139 ret = smu_v11_0_wait_for_response(smu);
140 if (ret)
141 pr_err("failed send message: %10s (%d) \tparam: 0x%08x response %#x\n",
142 smu_get_message_name(smu, msg), index, param, ret);
143
144 return ret;
145 }
146
147 static int smu_v11_0_init_microcode(struct smu_context *smu)
148 {
149 struct amdgpu_device *adev = smu->adev;
150 const char *chip_name;
151 char fw_name[30];
152 int err = 0;
153 const struct smc_firmware_header_v1_0 *hdr;
154 const struct common_firmware_header *header;
155 struct amdgpu_firmware_info *ucode = NULL;
156
157 switch (adev->asic_type) {
158 case CHIP_VEGA20:
159 chip_name = "vega20";
160 break;
161 case CHIP_ARCTURUS:
162 chip_name = "arcturus";
163 break;
164 case CHIP_NAVI10:
165 chip_name = "navi10";
166 break;
167 case CHIP_NAVI14:
168 chip_name = "navi14";
169 break;
170 case CHIP_NAVI12:
171 chip_name = "navi12";
172 break;
173 default:
174 BUG();
175 }
176
177 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
178
179 err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
180 if (err)
181 goto out;
182 err = amdgpu_ucode_validate(adev->pm.fw);
183 if (err)
184 goto out;
185
186 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
187 amdgpu_ucode_print_smc_hdr(&hdr->header);
188 adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
189
190 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
191 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
192 ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
193 ucode->fw = adev->pm.fw;
194 header = (const struct common_firmware_header *)ucode->fw->data;
195 adev->firmware.fw_size +=
196 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
197 }
198
199 out:
200 if (err) {
201 DRM_ERROR("smu_v11_0: Failed to load firmware \"%s\"\n",
202 fw_name);
203 release_firmware(adev->pm.fw);
204 adev->pm.fw = NULL;
205 }
206 return err;
207 }
208
209 static int smu_v11_0_load_microcode(struct smu_context *smu)
210 {
211 struct amdgpu_device *adev = smu->adev;
212 const uint32_t *src;
213 const struct smc_firmware_header_v1_0 *hdr;
214 uint32_t addr_start = MP1_SRAM;
215 uint32_t i;
216 uint32_t mp1_fw_flags;
217
218 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
219 src = (const uint32_t *)(adev->pm.fw->data +
220 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
221
222 for (i = 1; i < MP1_SMC_SIZE/4 - 1; i++) {
223 WREG32_PCIE(addr_start, src[i]);
224 addr_start += 4;
225 }
226
227 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
228 1 & MP1_SMN_PUB_CTRL__RESET_MASK);
229 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
230 1 & ~MP1_SMN_PUB_CTRL__RESET_MASK);
231
232 for (i = 0; i < adev->usec_timeout; i++) {
233 mp1_fw_flags = RREG32_PCIE(MP1_Public |
234 (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
235 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
236 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
237 break;
238 udelay(1);
239 }
240
241 if (i == adev->usec_timeout)
242 return -ETIME;
243
244 return 0;
245 }
246
247 static int smu_v11_0_check_fw_status(struct smu_context *smu)
248 {
249 struct amdgpu_device *adev = smu->adev;
250 uint32_t mp1_fw_flags;
251
252 mp1_fw_flags = RREG32_PCIE(MP1_Public |
253 (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
254
255 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
256 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
257 return 0;
258
259 return -EIO;
260 }
261
262 static int smu_v11_0_check_fw_version(struct smu_context *smu)
263 {
264 uint32_t if_version = 0xff, smu_version = 0xff;
265 uint16_t smu_major;
266 uint8_t smu_minor, smu_debug;
267 int ret = 0;
268
269 ret = smu_get_smc_version(smu, &if_version, &smu_version);
270 if (ret)
271 return ret;
272
273 smu_major = (smu_version >> 16) & 0xffff;
274 smu_minor = (smu_version >> 8) & 0xff;
275 smu_debug = (smu_version >> 0) & 0xff;
276
277 switch (smu->adev->asic_type) {
278 case CHIP_VEGA20:
279 smu->smc_if_version = SMU11_DRIVER_IF_VERSION_VG20;
280 break;
281 case CHIP_ARCTURUS:
282 smu->smc_if_version = SMU11_DRIVER_IF_VERSION_ARCT;
283 break;
284 case CHIP_NAVI10:
285 smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV10;
286 break;
287 case CHIP_NAVI14:
288 smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV14;
289 break;
290 default:
291 pr_err("smu unsupported asic type:%d.\n", smu->adev->asic_type);
292 smu->smc_if_version = SMU11_DRIVER_IF_VERSION_INV;
293 break;
294 }
295
296
297
298
299
300
301
302
303
304 if (if_version != smu->smc_if_version) {
305 pr_info("smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
306 "smu fw version = 0x%08x (%d.%d.%d)\n",
307 smu->smc_if_version, if_version,
308 smu_version, smu_major, smu_minor, smu_debug);
309 pr_warn("SMU driver if version not matched\n");
310 }
311
312 return ret;
313 }
314
315 static int smu_v11_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size)
316 {
317 struct amdgpu_device *adev = smu->adev;
318 uint32_t ppt_offset_bytes;
319 const struct smc_firmware_header_v2_0 *v2;
320
321 v2 = (const struct smc_firmware_header_v2_0 *) adev->pm.fw->data;
322
323 ppt_offset_bytes = le32_to_cpu(v2->ppt_offset_bytes);
324 *size = le32_to_cpu(v2->ppt_size_bytes);
325 *table = (uint8_t *)v2 + ppt_offset_bytes;
326
327 return 0;
328 }
329
330 static int smu_v11_0_set_pptable_v2_1(struct smu_context *smu, void **table,
331 uint32_t *size, uint32_t pptable_id)
332 {
333 struct amdgpu_device *adev = smu->adev;
334 const struct smc_firmware_header_v2_1 *v2_1;
335 struct smc_soft_pptable_entry *entries;
336 uint32_t pptable_count = 0;
337 int i = 0;
338
339 v2_1 = (const struct smc_firmware_header_v2_1 *) adev->pm.fw->data;
340 entries = (struct smc_soft_pptable_entry *)
341 ((uint8_t *)v2_1 + le32_to_cpu(v2_1->pptable_entry_offset));
342 pptable_count = le32_to_cpu(v2_1->pptable_count);
343 for (i = 0; i < pptable_count; i++) {
344 if (le32_to_cpu(entries[i].id) == pptable_id) {
345 *table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes));
346 *size = le32_to_cpu(entries[i].ppt_size_bytes);
347 break;
348 }
349 }
350
351 if (i == pptable_count)
352 return -EINVAL;
353
354 return 0;
355 }
356
357 static int smu_v11_0_setup_pptable(struct smu_context *smu)
358 {
359 struct amdgpu_device *adev = smu->adev;
360 const struct smc_firmware_header_v1_0 *hdr;
361 int ret, index;
362 uint32_t size = 0;
363 uint16_t atom_table_size;
364 uint8_t frev, crev;
365 void *table;
366 uint16_t version_major, version_minor;
367
368 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
369 version_major = le16_to_cpu(hdr->header.header_version_major);
370 version_minor = le16_to_cpu(hdr->header.header_version_minor);
371 if (version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) {
372 switch (version_minor) {
373 case 0:
374 ret = smu_v11_0_set_pptable_v2_0(smu, &table, &size);
375 break;
376 case 1:
377 ret = smu_v11_0_set_pptable_v2_1(smu, &table, &size,
378 smu->smu_table.boot_values.pp_table_id);
379 break;
380 default:
381 ret = -EINVAL;
382 break;
383 }
384 if (ret)
385 return ret;
386
387 } else {
388 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
389 powerplayinfo);
390
391 ret = smu_get_atom_data_table(smu, index, &atom_table_size, &frev, &crev,
392 (uint8_t **)&table);
393 if (ret)
394 return ret;
395 size = atom_table_size;
396 }
397
398 if (!smu->smu_table.power_play_table)
399 smu->smu_table.power_play_table = table;
400 if (!smu->smu_table.power_play_table_size)
401 smu->smu_table.power_play_table_size = size;
402
403 return 0;
404 }
405
406 static int smu_v11_0_init_dpm_context(struct smu_context *smu)
407 {
408 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
409
410 if (smu_dpm->dpm_context || smu_dpm->dpm_context_size != 0)
411 return -EINVAL;
412
413 return smu_alloc_dpm_context(smu);
414 }
415
416 static int smu_v11_0_fini_dpm_context(struct smu_context *smu)
417 {
418 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
419
420 if (!smu_dpm->dpm_context || smu_dpm->dpm_context_size == 0)
421 return -EINVAL;
422
423 kfree(smu_dpm->dpm_context);
424 kfree(smu_dpm->golden_dpm_context);
425 kfree(smu_dpm->dpm_current_power_state);
426 kfree(smu_dpm->dpm_request_power_state);
427 smu_dpm->dpm_context = NULL;
428 smu_dpm->golden_dpm_context = NULL;
429 smu_dpm->dpm_context_size = 0;
430 smu_dpm->dpm_current_power_state = NULL;
431 smu_dpm->dpm_request_power_state = NULL;
432
433 return 0;
434 }
435
436 static int smu_v11_0_init_smc_tables(struct smu_context *smu)
437 {
438 struct smu_table_context *smu_table = &smu->smu_table;
439 struct smu_table *tables = NULL;
440 int ret = 0;
441
442 if (smu_table->tables || smu_table->table_count == 0)
443 return -EINVAL;
444
445 tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
446 GFP_KERNEL);
447 if (!tables)
448 return -ENOMEM;
449
450 smu_table->tables = tables;
451
452 ret = smu_tables_init(smu, tables);
453 if (ret)
454 return ret;
455
456 ret = smu_v11_0_init_dpm_context(smu);
457 if (ret)
458 return ret;
459
460 return 0;
461 }
462
463 static int smu_v11_0_fini_smc_tables(struct smu_context *smu)
464 {
465 struct smu_table_context *smu_table = &smu->smu_table;
466 int ret = 0;
467
468 if (!smu_table->tables || smu_table->table_count == 0)
469 return -EINVAL;
470
471 kfree(smu_table->tables);
472 kfree(smu_table->metrics_table);
473 smu_table->tables = NULL;
474 smu_table->table_count = 0;
475 smu_table->metrics_table = NULL;
476 smu_table->metrics_time = 0;
477
478 ret = smu_v11_0_fini_dpm_context(smu);
479 if (ret)
480 return ret;
481 return 0;
482 }
483
484 static int smu_v11_0_init_power(struct smu_context *smu)
485 {
486 struct smu_power_context *smu_power = &smu->smu_power;
487
488 if (!smu->pm_enabled)
489 return 0;
490 if (smu_power->power_context || smu_power->power_context_size != 0)
491 return -EINVAL;
492
493 smu_power->power_context = kzalloc(sizeof(struct smu_11_0_dpm_context),
494 GFP_KERNEL);
495 if (!smu_power->power_context)
496 return -ENOMEM;
497 smu_power->power_context_size = sizeof(struct smu_11_0_dpm_context);
498
499 return 0;
500 }
501
502 static int smu_v11_0_fini_power(struct smu_context *smu)
503 {
504 struct smu_power_context *smu_power = &smu->smu_power;
505
506 if (!smu->pm_enabled)
507 return 0;
508 if (!smu_power->power_context || smu_power->power_context_size == 0)
509 return -EINVAL;
510
511 kfree(smu_power->power_context);
512 smu_power->power_context = NULL;
513 smu_power->power_context_size = 0;
514
515 return 0;
516 }
517
518 int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
519 {
520 int ret, index;
521 uint16_t size;
522 uint8_t frev, crev;
523 struct atom_common_table_header *header;
524 struct atom_firmware_info_v3_3 *v_3_3;
525 struct atom_firmware_info_v3_1 *v_3_1;
526
527 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
528 firmwareinfo);
529
530 ret = smu_get_atom_data_table(smu, index, &size, &frev, &crev,
531 (uint8_t **)&header);
532 if (ret)
533 return ret;
534
535 if (header->format_revision != 3) {
536 pr_err("unknown atom_firmware_info version! for smu11\n");
537 return -EINVAL;
538 }
539
540 switch (header->content_revision) {
541 case 0:
542 case 1:
543 case 2:
544 v_3_1 = (struct atom_firmware_info_v3_1 *)header;
545 smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
546 smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
547 smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
548 smu->smu_table.boot_values.socclk = 0;
549 smu->smu_table.boot_values.dcefclk = 0;
550 smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
551 smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
552 smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
553 smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
554 smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
555 smu->smu_table.boot_values.pp_table_id = 0;
556 break;
557 case 3:
558 default:
559 v_3_3 = (struct atom_firmware_info_v3_3 *)header;
560 smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
561 smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
562 smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
563 smu->smu_table.boot_values.socclk = 0;
564 smu->smu_table.boot_values.dcefclk = 0;
565 smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
566 smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
567 smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
568 smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
569 smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
570 smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
571 }
572
573 smu->smu_table.boot_values.format_revision = header->format_revision;
574 smu->smu_table.boot_values.content_revision = header->content_revision;
575
576 return 0;
577 }
578
579 static int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu)
580 {
581 int ret, index;
582 struct amdgpu_device *adev = smu->adev;
583 struct atom_get_smu_clock_info_parameters_v3_1 input = {0};
584 struct atom_get_smu_clock_info_output_parameters_v3_1 *output;
585
586 input.clk_id = SMU11_SYSPLL0_SOCCLK_ID;
587 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
588 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
589 getsmuclockinfo);
590
591 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
592 (uint32_t *)&input);
593 if (ret)
594 return -EINVAL;
595
596 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
597 smu->smu_table.boot_values.socclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
598
599 memset(&input, 0, sizeof(input));
600 input.clk_id = SMU11_SYSPLL0_DCEFCLK_ID;
601 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
602 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
603 getsmuclockinfo);
604
605 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
606 (uint32_t *)&input);
607 if (ret)
608 return -EINVAL;
609
610 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
611 smu->smu_table.boot_values.dcefclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
612
613 memset(&input, 0, sizeof(input));
614 input.clk_id = SMU11_SYSPLL0_ECLK_ID;
615 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
616 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
617 getsmuclockinfo);
618
619 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
620 (uint32_t *)&input);
621 if (ret)
622 return -EINVAL;
623
624 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
625 smu->smu_table.boot_values.eclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
626
627 memset(&input, 0, sizeof(input));
628 input.clk_id = SMU11_SYSPLL0_VCLK_ID;
629 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
630 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
631 getsmuclockinfo);
632
633 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
634 (uint32_t *)&input);
635 if (ret)
636 return -EINVAL;
637
638 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
639 smu->smu_table.boot_values.vclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
640
641 memset(&input, 0, sizeof(input));
642 input.clk_id = SMU11_SYSPLL0_DCLK_ID;
643 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
644 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
645 getsmuclockinfo);
646
647 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
648 (uint32_t *)&input);
649 if (ret)
650 return -EINVAL;
651
652 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
653 smu->smu_table.boot_values.dclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
654
655 if ((smu->smu_table.boot_values.format_revision == 3) &&
656 (smu->smu_table.boot_values.content_revision >= 2)) {
657 memset(&input, 0, sizeof(input));
658 input.clk_id = SMU11_SYSPLL1_0_FCLK_ID;
659 input.syspll_id = SMU11_SYSPLL1_2_ID;
660 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
661 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
662 getsmuclockinfo);
663
664 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
665 (uint32_t *)&input);
666 if (ret)
667 return -EINVAL;
668
669 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
670 smu->smu_table.boot_values.fclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
671 }
672
673 return 0;
674 }
675
676 static int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
677 {
678 struct smu_table_context *smu_table = &smu->smu_table;
679 struct smu_table *memory_pool = &smu_table->memory_pool;
680 int ret = 0;
681 uint64_t address;
682 uint32_t address_low, address_high;
683
684 if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL)
685 return ret;
686
687 address = (uintptr_t)memory_pool->cpu_addr;
688 address_high = (uint32_t)upper_32_bits(address);
689 address_low = (uint32_t)lower_32_bits(address);
690
691 ret = smu_send_smc_msg_with_param(smu,
692 SMU_MSG_SetSystemVirtualDramAddrHigh,
693 address_high);
694 if (ret)
695 return ret;
696 ret = smu_send_smc_msg_with_param(smu,
697 SMU_MSG_SetSystemVirtualDramAddrLow,
698 address_low);
699 if (ret)
700 return ret;
701
702 address = memory_pool->mc_address;
703 address_high = (uint32_t)upper_32_bits(address);
704 address_low = (uint32_t)lower_32_bits(address);
705
706 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
707 address_high);
708 if (ret)
709 return ret;
710 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
711 address_low);
712 if (ret)
713 return ret;
714 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
715 (uint32_t)memory_pool->size);
716 if (ret)
717 return ret;
718
719 return ret;
720 }
721
722 static int smu_v11_0_check_pptable(struct smu_context *smu)
723 {
724 int ret;
725
726 ret = smu_check_powerplay_table(smu);
727 return ret;
728 }
729
730 static int smu_v11_0_parse_pptable(struct smu_context *smu)
731 {
732 int ret;
733
734 struct smu_table_context *table_context = &smu->smu_table;
735 struct smu_table *table = &table_context->tables[SMU_TABLE_PPTABLE];
736
737 if (table_context->driver_pptable)
738 return -EINVAL;
739
740 table_context->driver_pptable = kzalloc(table->size, GFP_KERNEL);
741
742 if (!table_context->driver_pptable)
743 return -ENOMEM;
744
745 ret = smu_store_powerplay_table(smu);
746 if (ret)
747 return -EINVAL;
748
749 ret = smu_append_powerplay_table(smu);
750
751 return ret;
752 }
753
754 static int smu_v11_0_populate_smc_pptable(struct smu_context *smu)
755 {
756 int ret;
757
758 ret = smu_set_default_dpm_table(smu);
759
760 return ret;
761 }
762
763 static int smu_v11_0_write_pptable(struct smu_context *smu)
764 {
765 struct smu_table_context *table_context = &smu->smu_table;
766 int ret = 0;
767
768 ret = smu_update_table(smu, SMU_TABLE_PPTABLE, 0,
769 table_context->driver_pptable, true);
770
771 return ret;
772 }
773
774 static int smu_v11_0_write_watermarks_table(struct smu_context *smu)
775 {
776 int ret = 0;
777 struct smu_table_context *smu_table = &smu->smu_table;
778 struct smu_table *table = NULL;
779
780 table = &smu_table->tables[SMU_TABLE_WATERMARKS];
781
782 if (!table->cpu_addr)
783 return -EINVAL;
784
785 ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table->cpu_addr,
786 true);
787
788 return ret;
789 }
790
791 static int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
792 {
793 int ret;
794
795 ret = smu_send_smc_msg_with_param(smu,
796 SMU_MSG_SetMinDeepSleepDcefclk, clk);
797 if (ret)
798 pr_err("SMU11 attempt to set divider for DCEFCLK Failed!");
799
800 return ret;
801 }
802
803 static int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
804 {
805 struct smu_table_context *table_context = &smu->smu_table;
806
807 if (!smu->pm_enabled)
808 return 0;
809 if (!table_context)
810 return -EINVAL;
811
812 return smu_set_deep_sleep_dcefclk(smu,
813 table_context->boot_values.dcefclk / 100);
814 }
815
816 static int smu_v11_0_set_tool_table_location(struct smu_context *smu)
817 {
818 int ret = 0;
819 struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
820
821 if (tool_table->mc_address) {
822 ret = smu_send_smc_msg_with_param(smu,
823 SMU_MSG_SetToolsDramAddrHigh,
824 upper_32_bits(tool_table->mc_address));
825 if (!ret)
826 ret = smu_send_smc_msg_with_param(smu,
827 SMU_MSG_SetToolsDramAddrLow,
828 lower_32_bits(tool_table->mc_address));
829 }
830
831 return ret;
832 }
833
834 static int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
835 {
836 int ret = 0;
837
838 if (!smu->pm_enabled)
839 return ret;
840
841 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count);
842 return ret;
843 }
844
845
846 static int smu_v11_0_set_allowed_mask(struct smu_context *smu)
847 {
848 struct smu_feature *feature = &smu->smu_feature;
849 int ret = 0;
850 uint32_t feature_mask[2];
851
852 mutex_lock(&feature->mutex);
853 if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64)
854 goto failed;
855
856 bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
857
858 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
859 feature_mask[1]);
860 if (ret)
861 goto failed;
862
863 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
864 feature_mask[0]);
865 if (ret)
866 goto failed;
867
868 failed:
869 mutex_unlock(&feature->mutex);
870 return ret;
871 }
872
873 static int smu_v11_0_get_enabled_mask(struct smu_context *smu,
874 uint32_t *feature_mask, uint32_t num)
875 {
876 uint32_t feature_mask_high = 0, feature_mask_low = 0;
877 int ret = 0;
878
879 if (!feature_mask || num < 2)
880 return -EINVAL;
881
882 ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
883 if (ret)
884 return ret;
885 ret = smu_read_smc_arg(smu, &feature_mask_high);
886 if (ret)
887 return ret;
888
889 ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
890 if (ret)
891 return ret;
892 ret = smu_read_smc_arg(smu, &feature_mask_low);
893 if (ret)
894 return ret;
895
896 feature_mask[0] = feature_mask_low;
897 feature_mask[1] = feature_mask_high;
898
899 return ret;
900 }
901
902 static int smu_v11_0_system_features_control(struct smu_context *smu,
903 bool en)
904 {
905 struct smu_feature *feature = &smu->smu_feature;
906 uint32_t feature_mask[2];
907 int ret = 0;
908
909 if (smu->pm_enabled) {
910 ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
911 SMU_MSG_DisableAllSmuFeatures));
912 if (ret)
913 return ret;
914 }
915
916 ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
917 if (ret)
918 return ret;
919
920 bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
921 feature->feature_num);
922 bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
923 feature->feature_num);
924
925 return ret;
926 }
927
928 static int smu_v11_0_notify_display_change(struct smu_context *smu)
929 {
930 int ret = 0;
931
932 if (!smu->pm_enabled)
933 return ret;
934 if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
935 smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
936 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1);
937
938 return ret;
939 }
940
941 static int
942 smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
943 enum smu_clk_type clock_select)
944 {
945 int ret = 0;
946 int clk_id;
947
948 if (!smu->pm_enabled)
949 return ret;
950
951 if ((smu_msg_get_index(smu, SMU_MSG_GetDcModeMaxDpmFreq) < 0) ||
952 (smu_msg_get_index(smu, SMU_MSG_GetMaxDpmFreq) < 0))
953 return 0;
954
955 clk_id = smu_clk_get_index(smu, clock_select);
956 if (clk_id < 0)
957 return -EINVAL;
958
959 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
960 clk_id << 16);
961 if (ret) {
962 pr_err("[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
963 return ret;
964 }
965
966 ret = smu_read_smc_arg(smu, clock);
967 if (ret)
968 return ret;
969
970 if (*clock != 0)
971 return 0;
972
973
974 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
975 clk_id << 16);
976 if (ret) {
977 pr_err("[GetMaxSustainableClock] failed to get max AC clock from SMC!");
978 return ret;
979 }
980
981 ret = smu_read_smc_arg(smu, clock);
982
983 return ret;
984 }
985
986 static int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
987 {
988 struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks;
989 int ret = 0;
990
991 if (!smu->smu_table.max_sustainable_clocks)
992 max_sustainable_clocks = kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks),
993 GFP_KERNEL);
994 else
995 max_sustainable_clocks = smu->smu_table.max_sustainable_clocks;
996
997 smu->smu_table.max_sustainable_clocks = (void *)max_sustainable_clocks;
998
999 max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100;
1000 max_sustainable_clocks->soc_clock = smu->smu_table.boot_values.socclk / 100;
1001 max_sustainable_clocks->dcef_clock = smu->smu_table.boot_values.dcefclk / 100;
1002 max_sustainable_clocks->display_clock = 0xFFFFFFFF;
1003 max_sustainable_clocks->phy_clock = 0xFFFFFFFF;
1004 max_sustainable_clocks->pixel_clock = 0xFFFFFFFF;
1005
1006 if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
1007 ret = smu_v11_0_get_max_sustainable_clock(smu,
1008 &(max_sustainable_clocks->uclock),
1009 SMU_UCLK);
1010 if (ret) {
1011 pr_err("[%s] failed to get max UCLK from SMC!",
1012 __func__);
1013 return ret;
1014 }
1015 }
1016
1017 if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
1018 ret = smu_v11_0_get_max_sustainable_clock(smu,
1019 &(max_sustainable_clocks->soc_clock),
1020 SMU_SOCCLK);
1021 if (ret) {
1022 pr_err("[%s] failed to get max SOCCLK from SMC!",
1023 __func__);
1024 return ret;
1025 }
1026 }
1027
1028 if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
1029 ret = smu_v11_0_get_max_sustainable_clock(smu,
1030 &(max_sustainable_clocks->dcef_clock),
1031 SMU_DCEFCLK);
1032 if (ret) {
1033 pr_err("[%s] failed to get max DCEFCLK from SMC!",
1034 __func__);
1035 return ret;
1036 }
1037
1038 ret = smu_v11_0_get_max_sustainable_clock(smu,
1039 &(max_sustainable_clocks->display_clock),
1040 SMU_DISPCLK);
1041 if (ret) {
1042 pr_err("[%s] failed to get max DISPCLK from SMC!",
1043 __func__);
1044 return ret;
1045 }
1046 ret = smu_v11_0_get_max_sustainable_clock(smu,
1047 &(max_sustainable_clocks->phy_clock),
1048 SMU_PHYCLK);
1049 if (ret) {
1050 pr_err("[%s] failed to get max PHYCLK from SMC!",
1051 __func__);
1052 return ret;
1053 }
1054 ret = smu_v11_0_get_max_sustainable_clock(smu,
1055 &(max_sustainable_clocks->pixel_clock),
1056 SMU_PIXCLK);
1057 if (ret) {
1058 pr_err("[%s] failed to get max PIXCLK from SMC!",
1059 __func__);
1060 return ret;
1061 }
1062 }
1063
1064 if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock)
1065 max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock;
1066
1067 return 0;
1068 }
1069
1070 static int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
1071 {
1072 int ret = 0;
1073
1074 if (n > smu->default_power_limit) {
1075 pr_err("New power limit is over the max allowed %d\n",
1076 smu->default_power_limit);
1077 return -EINVAL;
1078 }
1079
1080 if (n == 0)
1081 n = smu->default_power_limit;
1082
1083 if (!smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
1084 pr_err("Setting new power limit is not supported!\n");
1085 return -EOPNOTSUPP;
1086 }
1087
1088 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n);
1089 if (ret) {
1090 pr_err("[%s] Set power limit Failed!\n", __func__);
1091 return ret;
1092 }
1093 smu->power_limit = n;
1094
1095 return 0;
1096 }
1097
1098 static int smu_v11_0_get_current_clk_freq(struct smu_context *smu,
1099 enum smu_clk_type clk_id,
1100 uint32_t *value)
1101 {
1102 int ret = 0;
1103 uint32_t freq = 0;
1104 int asic_clk_id;
1105
1106 if (clk_id >= SMU_CLK_COUNT || !value)
1107 return -EINVAL;
1108
1109 asic_clk_id = smu_clk_get_index(smu, clk_id);
1110 if (asic_clk_id < 0)
1111 return -EINVAL;
1112
1113
1114 if (smu_msg_get_index(smu, SMU_MSG_GetDpmClockFreq) < 0)
1115 ret = smu_get_current_clk_freq_by_table(smu, clk_id, &freq);
1116 else {
1117 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmClockFreq,
1118 (asic_clk_id << 16));
1119 if (ret)
1120 return ret;
1121
1122 ret = smu_read_smc_arg(smu, &freq);
1123 if (ret)
1124 return ret;
1125 }
1126
1127 freq *= 100;
1128 *value = freq;
1129
1130 return ret;
1131 }
1132
1133 static int smu_v11_0_set_thermal_range(struct smu_context *smu,
1134 struct smu_temperature_range range)
1135 {
1136 struct amdgpu_device *adev = smu->adev;
1137 int low = SMU_THERMAL_MINIMUM_ALERT_TEMP;
1138 int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP;
1139 uint32_t val;
1140
1141 low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP,
1142 range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES);
1143 high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP,
1144 range.max / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES);
1145
1146 if (low > high)
1147 return -EINVAL;
1148
1149 val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
1150 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
1151 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
1152 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0);
1153 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0);
1154 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff));
1155 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff));
1156 val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
1157
1158 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
1159
1160 return 0;
1161 }
1162
1163 static int smu_v11_0_enable_thermal_alert(struct smu_context *smu)
1164 {
1165 struct amdgpu_device *adev = smu->adev;
1166 uint32_t val = 0;
1167
1168 val |= (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT);
1169 val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
1170 val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);
1171
1172 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val);
1173
1174 return 0;
1175 }
1176
1177 static int smu_v11_0_start_thermal_control(struct smu_context *smu)
1178 {
1179 int ret = 0;
1180 struct smu_temperature_range range;
1181 struct amdgpu_device *adev = smu->adev;
1182
1183 if (!smu->pm_enabled)
1184 return ret;
1185
1186 memcpy(&range, &smu11_thermal_policy[0], sizeof(struct smu_temperature_range));
1187
1188 ret = smu_get_thermal_temperature_range(smu, &range);
1189 if (ret)
1190 return ret;
1191
1192 if (smu->smu_table.thermal_controller_type) {
1193 ret = smu_v11_0_set_thermal_range(smu, range);
1194 if (ret)
1195 return ret;
1196
1197 ret = smu_v11_0_enable_thermal_alert(smu);
1198 if (ret)
1199 return ret;
1200
1201 ret = smu_set_thermal_fan_table(smu);
1202 if (ret)
1203 return ret;
1204 }
1205
1206 adev->pm.dpm.thermal.min_temp = range.min;
1207 adev->pm.dpm.thermal.max_temp = range.max;
1208 adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max;
1209 adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min;
1210 adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max;
1211 adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max;
1212 adev->pm.dpm.thermal.min_mem_temp = range.mem_min;
1213 adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max;
1214 adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max;
1215
1216 return ret;
1217 }
1218
1219 static uint16_t convert_to_vddc(uint8_t vid)
1220 {
1221 return (uint16_t) ((6200 - (vid * 25)) / SMU11_VOLTAGE_SCALE);
1222 }
1223
1224 static int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
1225 {
1226 struct amdgpu_device *adev = smu->adev;
1227 uint32_t vdd = 0, val_vid = 0;
1228
1229 if (!value)
1230 return -EINVAL;
1231 val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_TEL_PLANE0) &
1232 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >>
1233 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT;
1234
1235 vdd = (uint32_t)convert_to_vddc((uint8_t)val_vid);
1236
1237 *value = vdd;
1238
1239 return 0;
1240
1241 }
1242
1243 static int smu_v11_0_read_sensor(struct smu_context *smu,
1244 enum amd_pp_sensors sensor,
1245 void *data, uint32_t *size)
1246 {
1247 int ret = 0;
1248
1249 if(!data || !size)
1250 return -EINVAL;
1251
1252 switch (sensor) {
1253 case AMDGPU_PP_SENSOR_GFX_MCLK:
1254 ret = smu_get_current_clk_freq(smu, SMU_UCLK, (uint32_t *)data);
1255 *size = 4;
1256 break;
1257 case AMDGPU_PP_SENSOR_GFX_SCLK:
1258 ret = smu_get_current_clk_freq(smu, SMU_GFXCLK, (uint32_t *)data);
1259 *size = 4;
1260 break;
1261 case AMDGPU_PP_SENSOR_VDDGFX:
1262 ret = smu_v11_0_get_gfx_vdd(smu, (uint32_t *)data);
1263 *size = 4;
1264 break;
1265 case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
1266 *(uint32_t *)data = 0;
1267 *size = 4;
1268 break;
1269 default:
1270 ret = smu_common_read_sensor(smu, sensor, data, size);
1271 break;
1272 }
1273
1274 if (ret)
1275 *size = 0;
1276
1277 return ret;
1278 }
1279
1280 static int
1281 smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
1282 struct pp_display_clock_request
1283 *clock_req)
1284 {
1285 enum amd_pp_clock_type clk_type = clock_req->clock_type;
1286 int ret = 0;
1287 enum smu_clk_type clk_select = 0;
1288 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
1289
1290 if (!smu->pm_enabled)
1291 return -EINVAL;
1292
1293 if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) ||
1294 smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
1295 switch (clk_type) {
1296 case amd_pp_dcef_clock:
1297 clk_select = SMU_DCEFCLK;
1298 break;
1299 case amd_pp_disp_clock:
1300 clk_select = SMU_DISPCLK;
1301 break;
1302 case amd_pp_pixel_clock:
1303 clk_select = SMU_PIXCLK;
1304 break;
1305 case amd_pp_phy_clock:
1306 clk_select = SMU_PHYCLK;
1307 break;
1308 case amd_pp_mem_clock:
1309 clk_select = SMU_UCLK;
1310 break;
1311 default:
1312 pr_info("[%s] Invalid Clock Type!", __func__);
1313 ret = -EINVAL;
1314 break;
1315 }
1316
1317 if (ret)
1318 goto failed;
1319
1320 if (clk_select == SMU_UCLK && smu->disable_uclk_switch)
1321 return 0;
1322
1323 mutex_lock(&smu->mutex);
1324 ret = smu_set_hard_freq_range(smu, clk_select, clk_freq, 0);
1325 mutex_unlock(&smu->mutex);
1326
1327 if(clk_select == SMU_UCLK)
1328 smu->hard_min_uclk_req_from_dal = clk_freq;
1329 }
1330
1331 failed:
1332 return ret;
1333 }
1334
1335 static int
1336 smu_v11_0_set_watermarks_for_clock_ranges(struct smu_context *smu, struct
1337 dm_pp_wm_sets_with_clock_ranges_soc15
1338 *clock_ranges)
1339 {
1340 int ret = 0;
1341 struct smu_table *watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS];
1342 void *table = watermarks->cpu_addr;
1343
1344 if (!smu->disable_watermark &&
1345 smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
1346 smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
1347 smu_set_watermarks_table(smu, table, clock_ranges);
1348 smu->watermarks_bitmap |= WATERMARKS_EXIST;
1349 smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
1350 }
1351
1352 return ret;
1353 }
1354
1355 static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
1356 {
1357 int ret = 0;
1358 struct amdgpu_device *adev = smu->adev;
1359
1360 switch (adev->asic_type) {
1361 case CHIP_VEGA20:
1362 break;
1363 case CHIP_NAVI10:
1364 case CHIP_NAVI14:
1365 case CHIP_NAVI12:
1366 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
1367 return 0;
1368 mutex_lock(&smu->mutex);
1369 if (enable)
1370 ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff);
1371 else
1372 ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff);
1373 mutex_unlock(&smu->mutex);
1374 break;
1375 default:
1376 break;
1377 }
1378
1379 return ret;
1380 }
1381
1382 static uint32_t
1383 smu_v11_0_get_fan_control_mode(struct smu_context *smu)
1384 {
1385 if (!smu_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
1386 return AMD_FAN_CTRL_MANUAL;
1387 else
1388 return AMD_FAN_CTRL_AUTO;
1389 }
1390
1391 static int
1392 smu_v11_0_auto_fan_control(struct smu_context *smu, bool auto_fan_control)
1393 {
1394 int ret = 0;
1395
1396 if (!smu_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT))
1397 return 0;
1398
1399 ret = smu_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control);
1400 if (ret)
1401 pr_err("[%s]%s smc FAN CONTROL feature failed!",
1402 __func__, (auto_fan_control ? "Start" : "Stop"));
1403
1404 return ret;
1405 }
1406
1407 static int
1408 smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode)
1409 {
1410 struct amdgpu_device *adev = smu->adev;
1411
1412 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
1413 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
1414 CG_FDO_CTRL2, TMIN, 0));
1415 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
1416 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
1417 CG_FDO_CTRL2, FDO_PWM_MODE, mode));
1418
1419 return 0;
1420 }
1421
1422 static int
1423 smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
1424 {
1425 struct amdgpu_device *adev = smu->adev;
1426 uint32_t duty100, duty;
1427 uint64_t tmp64;
1428
1429 if (speed > 100)
1430 speed = 100;
1431
1432 if (smu_v11_0_auto_fan_control(smu, 0))
1433 return -EINVAL;
1434
1435 duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
1436 CG_FDO_CTRL1, FMAX_DUTY100);
1437 if (!duty100)
1438 return -EINVAL;
1439
1440 tmp64 = (uint64_t)speed * duty100;
1441 do_div(tmp64, 100);
1442 duty = (uint32_t)tmp64;
1443
1444 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
1445 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0),
1446 CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
1447
1448 return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
1449 }
1450
1451 static int
1452 smu_v11_0_set_fan_control_mode(struct smu_context *smu,
1453 uint32_t mode)
1454 {
1455 int ret = 0;
1456
1457 switch (mode) {
1458 case AMD_FAN_CTRL_NONE:
1459 ret = smu_v11_0_set_fan_speed_percent(smu, 100);
1460 break;
1461 case AMD_FAN_CTRL_MANUAL:
1462 ret = smu_v11_0_auto_fan_control(smu, 0);
1463 break;
1464 case AMD_FAN_CTRL_AUTO:
1465 ret = smu_v11_0_auto_fan_control(smu, 1);
1466 break;
1467 default:
1468 break;
1469 }
1470
1471 if (ret) {
1472 pr_err("[%s]Set fan control mode failed!", __func__);
1473 return -EINVAL;
1474 }
1475
1476 return ret;
1477 }
1478
1479 static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
1480 uint32_t speed)
1481 {
1482 struct amdgpu_device *adev = smu->adev;
1483 int ret;
1484 uint32_t tach_period, crystal_clock_freq;
1485
1486 if (!speed)
1487 return -EINVAL;
1488
1489 mutex_lock(&(smu->mutex));
1490 ret = smu_v11_0_auto_fan_control(smu, 0);
1491 if (ret)
1492 goto set_fan_speed_rpm_failed;
1493
1494 crystal_clock_freq = amdgpu_asic_get_xclk(adev);
1495 tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
1496 WREG32_SOC15(THM, 0, mmCG_TACH_CTRL,
1497 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
1498 CG_TACH_CTRL, TARGET_PERIOD,
1499 tach_period));
1500
1501 ret = smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM);
1502
1503 set_fan_speed_rpm_failed:
1504 mutex_unlock(&(smu->mutex));
1505 return ret;
1506 }
1507
1508 #define XGMI_STATE_D0 1
1509 #define XGMI_STATE_D3 0
1510
1511 static int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
1512 uint32_t pstate)
1513 {
1514 int ret = 0;
1515 mutex_lock(&(smu->mutex));
1516 ret = smu_send_smc_msg_with_param(smu,
1517 SMU_MSG_SetXgmiMode,
1518 pstate ? XGMI_STATE_D0 : XGMI_STATE_D3);
1519 mutex_unlock(&(smu->mutex));
1520 return ret;
1521 }
1522
1523 #define THM_11_0__SRCID__THM_DIG_THERM_L2H 0
1524 #define THM_11_0__SRCID__THM_DIG_THERM_H2L 1
1525
1526 static int smu_v11_0_irq_process(struct amdgpu_device *adev,
1527 struct amdgpu_irq_src *source,
1528 struct amdgpu_iv_entry *entry)
1529 {
1530 uint32_t client_id = entry->client_id;
1531 uint32_t src_id = entry->src_id;
1532
1533 if (client_id == SOC15_IH_CLIENTID_THM) {
1534 switch (src_id) {
1535 case THM_11_0__SRCID__THM_DIG_THERM_L2H:
1536 pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
1537 PCI_BUS_NUM(adev->pdev->devfn),
1538 PCI_SLOT(adev->pdev->devfn),
1539 PCI_FUNC(adev->pdev->devfn));
1540 break;
1541 case THM_11_0__SRCID__THM_DIG_THERM_H2L:
1542 pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
1543 PCI_BUS_NUM(adev->pdev->devfn),
1544 PCI_SLOT(adev->pdev->devfn),
1545 PCI_FUNC(adev->pdev->devfn));
1546 break;
1547 default:
1548 pr_warn("GPU under temperature range unknown src id (%d), detected on PCIe %d:%d.%d!\n",
1549 src_id,
1550 PCI_BUS_NUM(adev->pdev->devfn),
1551 PCI_SLOT(adev->pdev->devfn),
1552 PCI_FUNC(adev->pdev->devfn));
1553 break;
1554
1555 }
1556 }
1557
1558 return 0;
1559 }
1560
1561 static const struct amdgpu_irq_src_funcs smu_v11_0_irq_funcs =
1562 {
1563 .process = smu_v11_0_irq_process,
1564 };
1565
1566 static int smu_v11_0_register_irq_handler(struct smu_context *smu)
1567 {
1568 struct amdgpu_device *adev = smu->adev;
1569 struct amdgpu_irq_src *irq_src = smu->irq_source;
1570 int ret = 0;
1571
1572
1573 if (irq_src)
1574 return 0;
1575
1576 irq_src = kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
1577 if (!irq_src)
1578 return -ENOMEM;
1579 smu->irq_source = irq_src;
1580
1581 irq_src->funcs = &smu_v11_0_irq_funcs;
1582
1583 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
1584 THM_11_0__SRCID__THM_DIG_THERM_L2H,
1585 irq_src);
1586 if (ret)
1587 return ret;
1588
1589 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
1590 THM_11_0__SRCID__THM_DIG_THERM_H2L,
1591 irq_src);
1592 if (ret)
1593 return ret;
1594
1595 return ret;
1596 }
1597
1598 static int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
1599 struct pp_smu_nv_clock_table *max_clocks)
1600 {
1601 struct smu_table_context *table_context = &smu->smu_table;
1602 struct smu_11_0_max_sustainable_clocks *sustainable_clocks = NULL;
1603
1604 if (!max_clocks || !table_context->max_sustainable_clocks)
1605 return -EINVAL;
1606
1607 sustainable_clocks = table_context->max_sustainable_clocks;
1608
1609 max_clocks->dcfClockInKhz =
1610 (unsigned int) sustainable_clocks->dcef_clock * 1000;
1611 max_clocks->displayClockInKhz =
1612 (unsigned int) sustainable_clocks->display_clock * 1000;
1613 max_clocks->phyClockInKhz =
1614 (unsigned int) sustainable_clocks->phy_clock * 1000;
1615 max_clocks->pixelClockInKhz =
1616 (unsigned int) sustainable_clocks->pixel_clock * 1000;
1617 max_clocks->uClockInKhz =
1618 (unsigned int) sustainable_clocks->uclock * 1000;
1619 max_clocks->socClockInKhz =
1620 (unsigned int) sustainable_clocks->soc_clock * 1000;
1621 max_clocks->dscClockInKhz = 0;
1622 max_clocks->dppClockInKhz = 0;
1623 max_clocks->fabricClockInKhz = 0;
1624
1625 return 0;
1626 }
1627
1628 static int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
1629 {
1630 int ret = 0;
1631
1632 mutex_lock(&smu->mutex);
1633 ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME);
1634 mutex_unlock(&smu->mutex);
1635
1636 return ret;
1637 }
1638
1639 static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v11_0_baco_seq baco_seq)
1640 {
1641 return smu_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq);
1642 }
1643
1644 static bool smu_v11_0_baco_is_support(struct smu_context *smu)
1645 {
1646 struct amdgpu_device *adev = smu->adev;
1647 struct smu_baco_context *smu_baco = &smu->smu_baco;
1648 uint32_t val;
1649 bool baco_support;
1650
1651 mutex_lock(&smu_baco->mutex);
1652 baco_support = smu_baco->platform_support;
1653 mutex_unlock(&smu_baco->mutex);
1654
1655 if (!baco_support)
1656 return false;
1657
1658 if (!smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
1659 return false;
1660
1661 val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
1662 if (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK)
1663 return true;
1664
1665 return false;
1666 }
1667
1668 static enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
1669 {
1670 struct smu_baco_context *smu_baco = &smu->smu_baco;
1671 enum smu_baco_state baco_state;
1672
1673 mutex_lock(&smu_baco->mutex);
1674 baco_state = smu_baco->state;
1675 mutex_unlock(&smu_baco->mutex);
1676
1677 return baco_state;
1678 }
1679
1680 static int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
1681 {
1682
1683 struct smu_baco_context *smu_baco = &smu->smu_baco;
1684 int ret = 0;
1685
1686 if (smu_v11_0_baco_get_state(smu) == state)
1687 return 0;
1688
1689 mutex_lock(&smu_baco->mutex);
1690
1691 if (state == SMU_BACO_STATE_ENTER)
1692 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, BACO_SEQ_BACO);
1693 else
1694 ret = smu_send_smc_msg(smu, SMU_MSG_ExitBaco);
1695 if (ret)
1696 goto out;
1697
1698 smu_baco->state = state;
1699 out:
1700 mutex_unlock(&smu_baco->mutex);
1701 return ret;
1702 }
1703
1704 static int smu_v11_0_baco_reset(struct smu_context *smu)
1705 {
1706 int ret = 0;
1707
1708 ret = smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
1709 if (ret)
1710 return ret;
1711
1712 ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_ENTER);
1713 if (ret)
1714 return ret;
1715
1716 msleep(10);
1717
1718 ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_EXIT);
1719 if (ret)
1720 return ret;
1721
1722 return ret;
1723 }
1724
1725 static int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
1726 uint32_t *min, uint32_t *max)
1727 {
1728 int ret = 0, clk_id = 0;
1729 uint32_t param = 0;
1730
1731 mutex_lock(&smu->mutex);
1732 clk_id = smu_clk_get_index(smu, clk_type);
1733 if (clk_id < 0) {
1734 ret = -EINVAL;
1735 goto failed;
1736 }
1737 param = (clk_id & 0xffff) << 16;
1738
1739 if (max) {
1740 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param);
1741 if (ret)
1742 goto failed;
1743 ret = smu_read_smc_arg(smu, max);
1744 if (ret)
1745 goto failed;
1746 }
1747
1748 if (min) {
1749 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param);
1750 if (ret)
1751 goto failed;
1752 ret = smu_read_smc_arg(smu, min);
1753 if (ret)
1754 goto failed;
1755 }
1756
1757 failed:
1758 mutex_unlock(&smu->mutex);
1759 return ret;
1760 }
1761
1762 static const struct smu_funcs smu_v11_0_funcs = {
1763 .init_microcode = smu_v11_0_init_microcode,
1764 .load_microcode = smu_v11_0_load_microcode,
1765 .check_fw_status = smu_v11_0_check_fw_status,
1766 .check_fw_version = smu_v11_0_check_fw_version,
1767 .send_smc_msg = smu_v11_0_send_msg,
1768 .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
1769 .read_smc_arg = smu_v11_0_read_arg,
1770 .setup_pptable = smu_v11_0_setup_pptable,
1771 .init_smc_tables = smu_v11_0_init_smc_tables,
1772 .fini_smc_tables = smu_v11_0_fini_smc_tables,
1773 .init_power = smu_v11_0_init_power,
1774 .fini_power = smu_v11_0_fini_power,
1775 .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
1776 .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios,
1777 .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
1778 .check_pptable = smu_v11_0_check_pptable,
1779 .parse_pptable = smu_v11_0_parse_pptable,
1780 .populate_smc_tables = smu_v11_0_populate_smc_pptable,
1781 .write_pptable = smu_v11_0_write_pptable,
1782 .write_watermarks_table = smu_v11_0_write_watermarks_table,
1783 .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
1784 .set_tool_table_location = smu_v11_0_set_tool_table_location,
1785 .init_display_count = smu_v11_0_init_display_count,
1786 .set_allowed_mask = smu_v11_0_set_allowed_mask,
1787 .get_enabled_mask = smu_v11_0_get_enabled_mask,
1788 .system_features_control = smu_v11_0_system_features_control,
1789 .notify_display_change = smu_v11_0_notify_display_change,
1790 .set_power_limit = smu_v11_0_set_power_limit,
1791 .get_current_clk_freq = smu_v11_0_get_current_clk_freq,
1792 .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
1793 .start_thermal_control = smu_v11_0_start_thermal_control,
1794 .read_sensor = smu_v11_0_read_sensor,
1795 .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
1796 .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
1797 .set_watermarks_for_clock_ranges = smu_v11_0_set_watermarks_for_clock_ranges,
1798 .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
1799 .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
1800 .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
1801 .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
1802 .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
1803 .gfx_off_control = smu_v11_0_gfx_off_control,
1804 .register_irq_handler = smu_v11_0_register_irq_handler,
1805 .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
1806 .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
1807 .baco_is_support = smu_v11_0_baco_is_support,
1808 .baco_get_state = smu_v11_0_baco_get_state,
1809 .baco_set_state = smu_v11_0_baco_set_state,
1810 .baco_reset = smu_v11_0_baco_reset,
1811 .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
1812 };
1813
1814 void smu_v11_0_set_smu_funcs(struct smu_context *smu)
1815 {
1816 struct amdgpu_device *adev = smu->adev;
1817
1818 smu->funcs = &smu_v11_0_funcs;
1819 switch (adev->asic_type) {
1820 case CHIP_VEGA20:
1821 vega20_set_ppt_funcs(smu);
1822 break;
1823 case CHIP_ARCTURUS:
1824 arcturus_set_ppt_funcs(smu);
1825 break;
1826 case CHIP_NAVI10:
1827 case CHIP_NAVI14:
1828 case CHIP_NAVI12:
1829 navi10_set_ppt_funcs(smu);
1830 break;
1831 default:
1832 pr_warn("Unknown asic for smu11\n");
1833 }
1834 }