root/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. smu_get_message_name
  2. smu_get_feature_name
  3. smu_sys_get_pp_feature_mask
  4. smu_sys_set_pp_feature_mask
  5. smu_get_smc_version
  6. smu_set_soft_freq_range
  7. smu_set_hard_freq_range
  8. smu_get_dpm_freq_range
  9. smu_get_dpm_freq_by_index
  10. smu_get_dpm_level_count
  11. smu_clk_dpm_is_enabled
  12. smu_dpm_set_power_gate
  13. smu_get_current_power_state
  14. smu_get_power_num_states
  15. smu_common_read_sensor
  16. smu_update_table
  17. is_support_sw_smu
  18. is_support_sw_smu_xgmi
  19. smu_sys_get_pp_table
  20. smu_sys_set_pp_table
  21. smu_feature_init_dpm
  22. smu_feature_update_enable_state
  23. smu_feature_is_enabled
  24. smu_feature_set_enabled
  25. smu_feature_is_supported
  26. smu_feature_set_supported
  27. smu_set_funcs
  28. smu_early_init
  29. smu_late_init
  30. smu_get_atom_data_table
  31. smu_initialize_pptable
  32. smu_smc_table_sw_init
  33. smu_smc_table_sw_fini
  34. smu_sw_init
  35. smu_sw_fini
  36. smu_init_fb_allocations
  37. smu_fini_fb_allocations
  38. smu_override_pcie_parameters
  39. smu_smc_table_hw_init
  40. smu_alloc_memory_pool
  41. smu_free_memory_pool
  42. smu_hw_init
  43. smu_hw_fini
  44. smu_reset
  45. smu_suspend
  46. smu_resume
  47. smu_display_configuration_change
  48. smu_get_clock_info
  49. smu_get_current_clocks
  50. smu_set_clockgating_state
  51. smu_set_powergating_state
  52. smu_enable_umd_pstate
  53. smu_default_set_performance_level
  54. smu_adjust_power_state_dynamic
  55. smu_handle_task
  56. smu_switch_power_profile
  57. smu_get_performance_level
  58. smu_force_performance_level
  59. smu_set_display_count

   1 /*
   2  * Copyright 2019 Advanced Micro Devices, Inc.
   3  *
   4  * Permission is hereby granted, free of charge, to any person obtaining a
   5  * copy of this software and associated documentation files (the "Software"),
   6  * to deal in the Software without restriction, including without limitation
   7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8  * and/or sell copies of the Software, and to permit persons to whom the
   9  * Software is furnished to do so, subject to the following conditions:
  10  *
  11  * The above copyright notice and this permission notice shall be included in
  12  * all copies or substantial portions of the Software.
  13  *
  14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20  * OTHER DEALINGS IN THE SOFTWARE.
  21  */
  22 
  23 #include <linux/firmware.h>
  24 
  25 #include "pp_debug.h"
  26 #include "amdgpu.h"
  27 #include "amdgpu_smu.h"
  28 #include "soc15_common.h"
  29 #include "smu_v11_0.h"
  30 #include "smu_v12_0.h"
  31 #include "atom.h"
  32 #include "amd_pcie.h"
  33 
  34 #undef __SMU_DUMMY_MAP
  35 #define __SMU_DUMMY_MAP(type)   #type
  36 static const char* __smu_message_names[] = {
  37         SMU_MESSAGE_TYPES
  38 };
  39 
  40 const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type)
  41 {
  42         if (type < 0 || type >= SMU_MSG_MAX_COUNT)
  43                 return "unknown smu message";
  44         return __smu_message_names[type];
  45 }
  46 
  47 #undef __SMU_DUMMY_MAP
  48 #define __SMU_DUMMY_MAP(fea)    #fea
  49 static const char* __smu_feature_names[] = {
  50         SMU_FEATURE_MASKS
  51 };
  52 
  53 const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature)
  54 {
  55         if (feature < 0 || feature >= SMU_FEATURE_COUNT)
  56                 return "unknown smu feature";
  57         return __smu_feature_names[feature];
  58 }
  59 
  60 size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
  61 {
  62         size_t size = 0;
  63         int ret = 0, i = 0;
  64         uint32_t feature_mask[2] = { 0 };
  65         int32_t feature_index = 0;
  66         uint32_t count = 0;
  67         uint32_t sort_feature[SMU_FEATURE_COUNT];
  68         uint64_t hw_feature_count = 0;
  69 
  70         ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
  71         if (ret)
  72                 goto failed;
  73 
  74         size =  sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n",
  75                         feature_mask[1], feature_mask[0]);
  76 
  77         for (i = 0; i < SMU_FEATURE_COUNT; i++) {
  78                 feature_index = smu_feature_get_index(smu, i);
  79                 if (feature_index < 0)
  80                         continue;
  81                 sort_feature[feature_index] = i;
  82                 hw_feature_count++;
  83         }
  84 
  85         for (i = 0; i < hw_feature_count; i++) {
  86                 size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n",
  87                                count++,
  88                                smu_get_feature_name(smu, sort_feature[i]),
  89                                i,
  90                                !!smu_feature_is_enabled(smu, sort_feature[i]) ?
  91                                "enabled" : "disabled");
  92         }
  93 
  94 failed:
  95         return size;
  96 }
  97 
  98 int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
  99 {
 100         int ret = 0;
 101         uint32_t feature_mask[2] = { 0 };
 102         uint64_t feature_2_enabled = 0;
 103         uint64_t feature_2_disabled = 0;
 104         uint64_t feature_enables = 0;
 105 
 106         ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
 107         if (ret)
 108                 return ret;
 109 
 110         feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]);
 111 
 112         feature_2_enabled  = ~feature_enables & new_mask;
 113         feature_2_disabled = feature_enables & ~new_mask;
 114 
 115         if (feature_2_enabled) {
 116                 ret = smu_feature_update_enable_state(smu, feature_2_enabled, true);
 117                 if (ret)
 118                         return ret;
 119         }
 120         if (feature_2_disabled) {
 121                 ret = smu_feature_update_enable_state(smu, feature_2_disabled, false);
 122                 if (ret)
 123                         return ret;
 124         }
 125 
 126         return ret;
 127 }
 128 
 129 int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
 130 {
 131         int ret = 0;
 132 
 133         if (!if_version && !smu_version)
 134                 return -EINVAL;
 135 
 136         if (if_version) {
 137                 ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
 138                 if (ret)
 139                         return ret;
 140 
 141                 ret = smu_read_smc_arg(smu, if_version);
 142                 if (ret)
 143                         return ret;
 144         }
 145 
 146         if (smu_version) {
 147                 ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion);
 148                 if (ret)
 149                         return ret;
 150 
 151                 ret = smu_read_smc_arg(smu, smu_version);
 152                 if (ret)
 153                         return ret;
 154         }
 155 
 156         return ret;
 157 }
 158 
 159 int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
 160                             uint32_t min, uint32_t max)
 161 {
 162         int ret = 0, clk_id = 0;
 163         uint32_t param;
 164 
 165         if (min <= 0 && max <= 0)
 166                 return -EINVAL;
 167 
 168         if (!smu_clk_dpm_is_enabled(smu, clk_type))
 169                 return 0;
 170 
 171         clk_id = smu_clk_get_index(smu, clk_type);
 172         if (clk_id < 0)
 173                 return clk_id;
 174 
 175         if (max > 0) {
 176                 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
 177                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
 178                                                   param);
 179                 if (ret)
 180                         return ret;
 181         }
 182 
 183         if (min > 0) {
 184                 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
 185                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
 186                                                   param);
 187                 if (ret)
 188                         return ret;
 189         }
 190 
 191 
 192         return ret;
 193 }
 194 
 195 int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
 196                             uint32_t min, uint32_t max)
 197 {
 198         int ret = 0, clk_id = 0;
 199         uint32_t param;
 200 
 201         if (min <= 0 && max <= 0)
 202                 return -EINVAL;
 203 
 204         if (!smu_clk_dpm_is_enabled(smu, clk_type))
 205                 return 0;
 206 
 207         clk_id = smu_clk_get_index(smu, clk_type);
 208         if (clk_id < 0)
 209                 return clk_id;
 210 
 211         if (max > 0) {
 212                 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
 213                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
 214                                                   param);
 215                 if (ret)
 216                         return ret;
 217         }
 218 
 219         if (min > 0) {
 220                 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
 221                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
 222                                                   param);
 223                 if (ret)
 224                         return ret;
 225         }
 226 
 227 
 228         return ret;
 229 }
 230 
 231 int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
 232                            uint32_t *min, uint32_t *max)
 233 {
 234         uint32_t clock_limit;
 235         int ret = 0;
 236 
 237         if (!min && !max)
 238                 return -EINVAL;
 239 
 240         if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
 241                 switch (clk_type) {
 242                 case SMU_MCLK:
 243                 case SMU_UCLK:
 244                         clock_limit = smu->smu_table.boot_values.uclk;
 245                         break;
 246                 case SMU_GFXCLK:
 247                 case SMU_SCLK:
 248                         clock_limit = smu->smu_table.boot_values.gfxclk;
 249                         break;
 250                 case SMU_SOCCLK:
 251                         clock_limit = smu->smu_table.boot_values.socclk;
 252                         break;
 253                 default:
 254                         clock_limit = 0;
 255                         break;
 256                 }
 257 
 258                 /* clock in Mhz unit */
 259                 if (min)
 260                         *min = clock_limit / 100;
 261                 if (max)
 262                         *max = clock_limit / 100;
 263 
 264                 return 0;
 265         }
 266         /*
 267          * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the
 268          * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs).
 269          */
 270         ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max);
 271         return ret;
 272 }
 273 
 274 int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
 275                               uint16_t level, uint32_t *value)
 276 {
 277         int ret = 0, clk_id = 0;
 278         uint32_t param;
 279 
 280         if (!value)
 281                 return -EINVAL;
 282 
 283         if (!smu_clk_dpm_is_enabled(smu, clk_type))
 284                 return 0;
 285 
 286         clk_id = smu_clk_get_index(smu, clk_type);
 287         if (clk_id < 0)
 288                 return clk_id;
 289 
 290         param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
 291 
 292         ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex,
 293                                           param);
 294         if (ret)
 295                 return ret;
 296 
 297         ret = smu_read_smc_arg(smu, &param);
 298         if (ret)
 299                 return ret;
 300 
 301         /* BIT31:  0 - Fine grained DPM, 1 - Dicrete DPM
 302          * now, we un-support it */
 303         *value = param & 0x7fffffff;
 304 
 305         return ret;
 306 }
 307 
 308 int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
 309                             uint32_t *value)
 310 {
 311         return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
 312 }
 313 
 314 bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
 315 {
 316         enum smu_feature_mask feature_id = 0;
 317 
 318         switch (clk_type) {
 319         case SMU_MCLK:
 320         case SMU_UCLK:
 321                 feature_id = SMU_FEATURE_DPM_UCLK_BIT;
 322                 break;
 323         case SMU_GFXCLK:
 324         case SMU_SCLK:
 325                 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
 326                 break;
 327         case SMU_SOCCLK:
 328                 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
 329                 break;
 330         default:
 331                 return true;
 332         }
 333 
 334         if(!smu_feature_is_enabled(smu, feature_id)) {
 335                 return false;
 336         }
 337 
 338         return true;
 339 }
 340 
 341 
 342 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
 343                            bool gate)
 344 {
 345         int ret = 0;
 346 
 347         switch (block_type) {
 348         case AMD_IP_BLOCK_TYPE_UVD:
 349                 ret = smu_dpm_set_uvd_enable(smu, gate);
 350                 break;
 351         case AMD_IP_BLOCK_TYPE_VCE:
 352                 ret = smu_dpm_set_vce_enable(smu, gate);
 353                 break;
 354         case AMD_IP_BLOCK_TYPE_GFX:
 355                 ret = smu_gfx_off_control(smu, gate);
 356                 break;
 357         case AMD_IP_BLOCK_TYPE_SDMA:
 358                 ret = smu_powergate_sdma(smu, gate);
 359                 break;
 360         default:
 361                 break;
 362         }
 363 
 364         return ret;
 365 }
 366 
 367 enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
 368 {
 369         /* not support power state */
 370         return POWER_STATE_TYPE_DEFAULT;
 371 }
 372 
 373 int smu_get_power_num_states(struct smu_context *smu,
 374                              struct pp_states_info *state_info)
 375 {
 376         if (!state_info)
 377                 return -EINVAL;
 378 
 379         /* not support power state */
 380         memset(state_info, 0, sizeof(struct pp_states_info));
 381         state_info->nums = 1;
 382         state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
 383 
 384         return 0;
 385 }
 386 
 387 int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
 388                            void *data, uint32_t *size)
 389 {
 390         struct smu_power_context *smu_power = &smu->smu_power;
 391         struct smu_power_gate *power_gate = &smu_power->power_gate;
 392         int ret = 0;
 393 
 394         if(!data || !size)
 395                 return -EINVAL;
 396 
 397         switch (sensor) {
 398         case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
 399                 *((uint32_t *)data) = smu->pstate_sclk;
 400                 *size = 4;
 401                 break;
 402         case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
 403                 *((uint32_t *)data) = smu->pstate_mclk;
 404                 *size = 4;
 405                 break;
 406         case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
 407                 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
 408                 *size = 8;
 409                 break;
 410         case AMDGPU_PP_SENSOR_UVD_POWER:
 411                 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
 412                 *size = 4;
 413                 break;
 414         case AMDGPU_PP_SENSOR_VCE_POWER:
 415                 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
 416                 *size = 4;
 417                 break;
 418         case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
 419                 *(uint32_t *)data = power_gate->vcn_gated ? 0 : 1;
 420                 *size = 4;
 421                 break;
 422         default:
 423                 ret = -EINVAL;
 424                 break;
 425         }
 426 
 427         if (ret)
 428                 *size = 0;
 429 
 430         return ret;
 431 }
 432 
 433 int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
 434                      void *table_data, bool drv2smu)
 435 {
 436         struct smu_table_context *smu_table = &smu->smu_table;
 437         struct amdgpu_device *adev = smu->adev;
 438         struct smu_table *table = NULL;
 439         int ret = 0;
 440         int table_id = smu_table_get_index(smu, table_index);
 441 
 442         if (!table_data || table_id >= smu_table->table_count || table_id < 0)
 443                 return -EINVAL;
 444 
 445         table = &smu_table->tables[table_index];
 446 
 447         if (drv2smu)
 448                 memcpy(table->cpu_addr, table_data, table->size);
 449 
 450         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
 451                                           upper_32_bits(table->mc_address));
 452         if (ret)
 453                 return ret;
 454         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
 455                                           lower_32_bits(table->mc_address));
 456         if (ret)
 457                 return ret;
 458         ret = smu_send_smc_msg_with_param(smu, drv2smu ?
 459                                           SMU_MSG_TransferTableDram2Smu :
 460                                           SMU_MSG_TransferTableSmu2Dram,
 461                                           table_id | ((argument & 0xFFFF) << 16));
 462         if (ret)
 463                 return ret;
 464 
 465         /* flush hdp cache */
 466         adev->nbio_funcs->hdp_flush(adev, NULL);
 467 
 468         if (!drv2smu)
 469                 memcpy(table_data, table->cpu_addr, table->size);
 470 
 471         return ret;
 472 }
 473 
 474 bool is_support_sw_smu(struct amdgpu_device *adev)
 475 {
 476         if (adev->asic_type == CHIP_VEGA20)
 477                 return (amdgpu_dpm == 2) ? true : false;
 478         else if (adev->asic_type >= CHIP_ARCTURUS)
 479                 return true;
 480         else
 481                 return false;
 482 }
 483 
 484 bool is_support_sw_smu_xgmi(struct amdgpu_device *adev)
 485 {
 486         if (amdgpu_dpm != 1)
 487                 return false;
 488 
 489         if (adev->asic_type == CHIP_VEGA20)
 490                 return true;
 491 
 492         return false;
 493 }
 494 
 495 int smu_sys_get_pp_table(struct smu_context *smu, void **table)
 496 {
 497         struct smu_table_context *smu_table = &smu->smu_table;
 498 
 499         if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
 500                 return -EINVAL;
 501 
 502         if (smu_table->hardcode_pptable)
 503                 *table = smu_table->hardcode_pptable;
 504         else
 505                 *table = smu_table->power_play_table;
 506 
 507         return smu_table->power_play_table_size;
 508 }
 509 
 510 int smu_sys_set_pp_table(struct smu_context *smu,  void *buf, size_t size)
 511 {
 512         struct smu_table_context *smu_table = &smu->smu_table;
 513         ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
 514         int ret = 0;
 515 
 516         if (!smu->pm_enabled)
 517                 return -EINVAL;
 518         if (header->usStructureSize != size) {
 519                 pr_err("pp table size not matched !\n");
 520                 return -EIO;
 521         }
 522 
 523         mutex_lock(&smu->mutex);
 524         if (!smu_table->hardcode_pptable)
 525                 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
 526         if (!smu_table->hardcode_pptable) {
 527                 ret = -ENOMEM;
 528                 goto failed;
 529         }
 530 
 531         memcpy(smu_table->hardcode_pptable, buf, size);
 532         smu_table->power_play_table = smu_table->hardcode_pptable;
 533         smu_table->power_play_table_size = size;
 534         mutex_unlock(&smu->mutex);
 535 
 536         ret = smu_reset(smu);
 537         if (ret)
 538                 pr_info("smu reset failed, ret = %d\n", ret);
 539 
 540         return ret;
 541 
 542 failed:
 543         mutex_unlock(&smu->mutex);
 544         return ret;
 545 }
 546 
 547 int smu_feature_init_dpm(struct smu_context *smu)
 548 {
 549         struct smu_feature *feature = &smu->smu_feature;
 550         int ret = 0;
 551         uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
 552 
 553         if (!smu->pm_enabled)
 554                 return ret;
 555         mutex_lock(&feature->mutex);
 556         bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
 557         mutex_unlock(&feature->mutex);
 558 
 559         ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
 560                                              SMU_FEATURE_MAX/32);
 561         if (ret)
 562                 return ret;
 563 
 564         mutex_lock(&feature->mutex);
 565         bitmap_or(feature->allowed, feature->allowed,
 566                       (unsigned long *)allowed_feature_mask,
 567                       feature->feature_num);
 568         mutex_unlock(&feature->mutex);
 569 
 570         return ret;
 571 }
 572 int smu_feature_update_enable_state(struct smu_context *smu, uint64_t feature_mask, bool enabled)
 573 {
 574         uint32_t feature_low = 0, feature_high = 0;
 575         int ret = 0;
 576 
 577         if (!smu->pm_enabled)
 578                 return ret;
 579 
 580         feature_low = (feature_mask >> 0 ) & 0xffffffff;
 581         feature_high = (feature_mask >> 32) & 0xffffffff;
 582 
 583         if (enabled) {
 584                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
 585                                                   feature_low);
 586                 if (ret)
 587                         return ret;
 588                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
 589                                                   feature_high);
 590                 if (ret)
 591                         return ret;
 592 
 593         } else {
 594                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
 595                                                   feature_low);
 596                 if (ret)
 597                         return ret;
 598                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
 599                                                   feature_high);
 600                 if (ret)
 601                         return ret;
 602 
 603         }
 604 
 605         return ret;
 606 }
 607 
 608 int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
 609 {
 610         struct amdgpu_device *adev = smu->adev;
 611         struct smu_feature *feature = &smu->smu_feature;
 612         int feature_id;
 613         int ret = 0;
 614 
 615         if (adev->flags & AMD_IS_APU)
 616                 return 1;
 617 
 618         feature_id = smu_feature_get_index(smu, mask);
 619         if (feature_id < 0)
 620                 return 0;
 621 
 622         WARN_ON(feature_id > feature->feature_num);
 623 
 624         mutex_lock(&feature->mutex);
 625         ret = test_bit(feature_id, feature->enabled);
 626         mutex_unlock(&feature->mutex);
 627 
 628         return ret;
 629 }
 630 
 631 int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
 632                             bool enable)
 633 {
 634         struct smu_feature *feature = &smu->smu_feature;
 635         int feature_id;
 636         uint64_t feature_mask = 0;
 637         int ret = 0;
 638 
 639         feature_id = smu_feature_get_index(smu, mask);
 640         if (feature_id < 0)
 641                 return -EINVAL;
 642 
 643         WARN_ON(feature_id > feature->feature_num);
 644 
 645         feature_mask = 1ULL << feature_id;
 646 
 647         mutex_lock(&feature->mutex);
 648         ret = smu_feature_update_enable_state(smu, feature_mask, enable);
 649         if (ret)
 650                 goto failed;
 651 
 652         if (enable)
 653                 test_and_set_bit(feature_id, feature->enabled);
 654         else
 655                 test_and_clear_bit(feature_id, feature->enabled);
 656 
 657 failed:
 658         mutex_unlock(&feature->mutex);
 659 
 660         return ret;
 661 }
 662 
 663 int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
 664 {
 665         struct smu_feature *feature = &smu->smu_feature;
 666         int feature_id;
 667         int ret = 0;
 668 
 669         feature_id = smu_feature_get_index(smu, mask);
 670         if (feature_id < 0)
 671                 return 0;
 672 
 673         WARN_ON(feature_id > feature->feature_num);
 674 
 675         mutex_lock(&feature->mutex);
 676         ret = test_bit(feature_id, feature->supported);
 677         mutex_unlock(&feature->mutex);
 678 
 679         return ret;
 680 }
 681 
 682 int smu_feature_set_supported(struct smu_context *smu,
 683                               enum smu_feature_mask mask,
 684                               bool enable)
 685 {
 686         struct smu_feature *feature = &smu->smu_feature;
 687         int feature_id;
 688         int ret = 0;
 689 
 690         feature_id = smu_feature_get_index(smu, mask);
 691         if (feature_id < 0)
 692                 return -EINVAL;
 693 
 694         WARN_ON(feature_id > feature->feature_num);
 695 
 696         mutex_lock(&feature->mutex);
 697         if (enable)
 698                 test_and_set_bit(feature_id, feature->supported);
 699         else
 700                 test_and_clear_bit(feature_id, feature->supported);
 701         mutex_unlock(&feature->mutex);
 702 
 703         return ret;
 704 }
 705 
 706 static int smu_set_funcs(struct amdgpu_device *adev)
 707 {
 708         struct smu_context *smu = &adev->smu;
 709 
 710         switch (adev->asic_type) {
 711         case CHIP_VEGA20:
 712         case CHIP_NAVI10:
 713         case CHIP_NAVI14:
 714         case CHIP_NAVI12:
 715         case CHIP_ARCTURUS:
 716                 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
 717                         smu->od_enabled = true;
 718                 smu_v11_0_set_smu_funcs(smu);
 719                 break;
 720         case CHIP_RENOIR:
 721                 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
 722                         smu->od_enabled = true;
 723                 smu_v12_0_set_smu_funcs(smu);
 724                 break;
 725         default:
 726                 return -EINVAL;
 727         }
 728 
 729         return 0;
 730 }
 731 
 732 static int smu_early_init(void *handle)
 733 {
 734         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 735         struct smu_context *smu = &adev->smu;
 736 
 737         smu->adev = adev;
 738         smu->pm_enabled = !!amdgpu_dpm;
 739         mutex_init(&smu->mutex);
 740 
 741         return smu_set_funcs(adev);
 742 }
 743 
 744 static int smu_late_init(void *handle)
 745 {
 746         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 747         struct smu_context *smu = &adev->smu;
 748 
 749         if (!smu->pm_enabled)
 750                 return 0;
 751 
 752         mutex_lock(&smu->mutex);
 753         smu_handle_task(&adev->smu,
 754                         smu->smu_dpm.dpm_level,
 755                         AMD_PP_TASK_COMPLETE_INIT);
 756         mutex_unlock(&smu->mutex);
 757 
 758         return 0;
 759 }
 760 
 761 int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
 762                             uint16_t *size, uint8_t *frev, uint8_t *crev,
 763                             uint8_t **addr)
 764 {
 765         struct amdgpu_device *adev = smu->adev;
 766         uint16_t data_start;
 767 
 768         if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
 769                                            size, frev, crev, &data_start))
 770                 return -EINVAL;
 771 
 772         *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
 773 
 774         return 0;
 775 }
 776 
 777 static int smu_initialize_pptable(struct smu_context *smu)
 778 {
 779         /* TODO */
 780         return 0;
 781 }
 782 
 783 static int smu_smc_table_sw_init(struct smu_context *smu)
 784 {
 785         int ret;
 786 
 787         ret = smu_initialize_pptable(smu);
 788         if (ret) {
 789                 pr_err("Failed to init smu_initialize_pptable!\n");
 790                 return ret;
 791         }
 792 
 793         /**
 794          * Create smu_table structure, and init smc tables such as
 795          * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
 796          */
 797         ret = smu_init_smc_tables(smu);
 798         if (ret) {
 799                 pr_err("Failed to init smc tables!\n");
 800                 return ret;
 801         }
 802 
 803         /**
 804          * Create smu_power_context structure, and allocate smu_dpm_context and
 805          * context size to fill the smu_power_context data.
 806          */
 807         ret = smu_init_power(smu);
 808         if (ret) {
 809                 pr_err("Failed to init smu_init_power!\n");
 810                 return ret;
 811         }
 812 
 813         return 0;
 814 }
 815 
 816 static int smu_smc_table_sw_fini(struct smu_context *smu)
 817 {
 818         int ret;
 819 
 820         ret = smu_fini_smc_tables(smu);
 821         if (ret) {
 822                 pr_err("Failed to smu_fini_smc_tables!\n");
 823                 return ret;
 824         }
 825 
 826         return 0;
 827 }
 828 
 829 static int smu_sw_init(void *handle)
 830 {
 831         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 832         struct smu_context *smu = &adev->smu;
 833         int ret;
 834 
 835         smu->pool_size = adev->pm.smu_prv_buffer_size;
 836         smu->smu_feature.feature_num = SMU_FEATURE_MAX;
 837         mutex_init(&smu->smu_feature.mutex);
 838         bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
 839         bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
 840         bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
 841 
 842         mutex_init(&smu->smu_baco.mutex);
 843         smu->smu_baco.state = SMU_BACO_STATE_EXIT;
 844         smu->smu_baco.platform_support = false;
 845 
 846         mutex_init(&smu->sensor_lock);
 847         mutex_init(&smu->metrics_lock);
 848 
 849         smu->watermarks_bitmap = 0;
 850         smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
 851         smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
 852 
 853         smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
 854         smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
 855         smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
 856         smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
 857         smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
 858         smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
 859         smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
 860         smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
 861 
 862         smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
 863         smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
 864         smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
 865         smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
 866         smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
 867         smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
 868         smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
 869         smu->display_config = &adev->pm.pm_display_cfg;
 870 
 871         smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
 872         smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
 873         ret = smu_init_microcode(smu);
 874         if (ret) {
 875                 pr_err("Failed to load smu firmware!\n");
 876                 return ret;
 877         }
 878 
 879         ret = smu_smc_table_sw_init(smu);
 880         if (ret) {
 881                 pr_err("Failed to sw init smc table!\n");
 882                 return ret;
 883         }
 884 
 885         ret = smu_register_irq_handler(smu);
 886         if (ret) {
 887                 pr_err("Failed to register smc irq handler!\n");
 888                 return ret;
 889         }
 890 
 891         return 0;
 892 }
 893 
 894 static int smu_sw_fini(void *handle)
 895 {
 896         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 897         struct smu_context *smu = &adev->smu;
 898         int ret;
 899 
 900         kfree(smu->irq_source);
 901         smu->irq_source = NULL;
 902 
 903         ret = smu_smc_table_sw_fini(smu);
 904         if (ret) {
 905                 pr_err("Failed to sw fini smc table!\n");
 906                 return ret;
 907         }
 908 
 909         ret = smu_fini_power(smu);
 910         if (ret) {
 911                 pr_err("Failed to init smu_fini_power!\n");
 912                 return ret;
 913         }
 914 
 915         return 0;
 916 }
 917 
 918 static int smu_init_fb_allocations(struct smu_context *smu)
 919 {
 920         struct amdgpu_device *adev = smu->adev;
 921         struct smu_table_context *smu_table = &smu->smu_table;
 922         struct smu_table *tables = smu_table->tables;
 923         uint32_t table_count = smu_table->table_count;
 924         uint32_t i = 0;
 925         int32_t ret = 0;
 926 
 927         if (table_count <= 0)
 928                 return -EINVAL;
 929 
 930         for (i = 0 ; i < table_count; i++) {
 931                 if (tables[i].size == 0)
 932                         continue;
 933                 ret = amdgpu_bo_create_kernel(adev,
 934                                               tables[i].size,
 935                                               tables[i].align,
 936                                               tables[i].domain,
 937                                               &tables[i].bo,
 938                                               &tables[i].mc_address,
 939                                               &tables[i].cpu_addr);
 940                 if (ret)
 941                         goto failed;
 942         }
 943 
 944         return 0;
 945 failed:
 946         for (; i > 0; i--) {
 947                 if (tables[i].size == 0)
 948                         continue;
 949                 amdgpu_bo_free_kernel(&tables[i].bo,
 950                                       &tables[i].mc_address,
 951                                       &tables[i].cpu_addr);
 952 
 953         }
 954         return ret;
 955 }
 956 
 957 static int smu_fini_fb_allocations(struct smu_context *smu)
 958 {
 959         struct smu_table_context *smu_table = &smu->smu_table;
 960         struct smu_table *tables = smu_table->tables;
 961         uint32_t table_count = smu_table->table_count;
 962         uint32_t i = 0;
 963 
 964         if (table_count == 0 || tables == NULL)
 965                 return 0;
 966 
 967         for (i = 0 ; i < table_count; i++) {
 968                 if (tables[i].size == 0)
 969                         continue;
 970                 amdgpu_bo_free_kernel(&tables[i].bo,
 971                                       &tables[i].mc_address,
 972                                       &tables[i].cpu_addr);
 973         }
 974 
 975         return 0;
 976 }
 977 
 978 static int smu_override_pcie_parameters(struct smu_context *smu)
 979 {
 980         struct amdgpu_device *adev = smu->adev;
 981         uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg;
 982         int ret;
 983 
 984         if (adev->flags & AMD_IS_APU)
 985                 return 0;
 986 
 987         if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
 988                 pcie_gen = 3;
 989         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
 990                 pcie_gen = 2;
 991         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
 992                 pcie_gen = 1;
 993         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
 994                 pcie_gen = 0;
 995 
 996         /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
 997          * Bit 15:8:  PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
 998          * Bit 7:0:   PCIE lane width, 1 to 7 corresponds is x1 to x32
 999          */
1000         if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
1001                 pcie_width = 6;
1002         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
1003                 pcie_width = 5;
1004         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
1005                 pcie_width = 4;
1006         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
1007                 pcie_width = 3;
1008         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
1009                 pcie_width = 2;
1010         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
1011                 pcie_width = 1;
1012 
1013         smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width;
1014         ret = smu_send_smc_msg_with_param(smu,
1015                                           SMU_MSG_OverridePcieParameters,
1016                                           smu_pcie_arg);
1017         if (ret)
1018                 pr_err("[%s] Attempt to override pcie params failed!\n", __func__);
1019         return ret;
1020 }
1021 
1022 static int smu_smc_table_hw_init(struct smu_context *smu,
1023                                  bool initialize)
1024 {
1025         struct amdgpu_device *adev = smu->adev;
1026         int ret;
1027 
1028         if (smu_is_dpm_running(smu) && adev->in_suspend) {
1029                 pr_info("dpm has been enabled\n");
1030                 return 0;
1031         }
1032 
1033         if (adev->asic_type != CHIP_ARCTURUS) {
1034                 ret = smu_init_display_count(smu, 0);
1035                 if (ret)
1036                         return ret;
1037         }
1038 
1039         if (initialize) {
1040                 /* get boot_values from vbios to set revision, gfxclk, and etc. */
1041                 ret = smu_get_vbios_bootup_values(smu);
1042                 if (ret)
1043                         return ret;
1044 
1045                 ret = smu_setup_pptable(smu);
1046                 if (ret)
1047                         return ret;
1048 
1049                 ret = smu_get_clk_info_from_vbios(smu);
1050                 if (ret)
1051                         return ret;
1052 
1053                 /*
1054                  * check if the format_revision in vbios is up to pptable header
1055                  * version, and the structure size is not 0.
1056                  */
1057                 ret = smu_check_pptable(smu);
1058                 if (ret)
1059                         return ret;
1060 
1061                 /*
1062                  * allocate vram bos to store smc table contents.
1063                  */
1064                 ret = smu_init_fb_allocations(smu);
1065                 if (ret)
1066                         return ret;
1067 
1068                 /*
1069                  * Parse pptable format and fill PPTable_t smc_pptable to
1070                  * smu_table_context structure. And read the smc_dpm_table from vbios,
1071                  * then fill it into smc_pptable.
1072                  */
1073                 ret = smu_parse_pptable(smu);
1074                 if (ret)
1075                         return ret;
1076 
1077                 /*
1078                  * Send msg GetDriverIfVersion to check if the return value is equal
1079                  * with DRIVER_IF_VERSION of smc header.
1080                  */
1081                 ret = smu_check_fw_version(smu);
1082                 if (ret)
1083                         return ret;
1084         }
1085 
1086         /* smu_dump_pptable(smu); */
1087 
1088         /*
1089          * Copy pptable bo in the vram to smc with SMU MSGs such as
1090          * SetDriverDramAddr and TransferTableDram2Smu.
1091          */
1092         ret = smu_write_pptable(smu);
1093         if (ret)
1094                 return ret;
1095 
1096         /* issue RunAfllBtc msg */
1097         ret = smu_run_afll_btc(smu);
1098         if (ret)
1099                 return ret;
1100 
1101         ret = smu_feature_set_allowed_mask(smu);
1102         if (ret)
1103                 return ret;
1104 
1105         ret = smu_system_features_control(smu, true);
1106         if (ret)
1107                 return ret;
1108 
1109         if (adev->asic_type != CHIP_ARCTURUS) {
1110                 ret = smu_override_pcie_parameters(smu);
1111                 if (ret)
1112                         return ret;
1113 
1114                 ret = smu_notify_display_change(smu);
1115                 if (ret)
1116                         return ret;
1117 
1118                 /*
1119                  * Set min deep sleep dce fclk with bootup value from vbios via
1120                  * SetMinDeepSleepDcefclk MSG.
1121                  */
1122                 ret = smu_set_min_dcef_deep_sleep(smu);
1123                 if (ret)
1124                         return ret;
1125         }
1126 
1127         /*
1128          * Set initialized values (get from vbios) to dpm tables context such as
1129          * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1130          * type of clks.
1131          */
1132         if (initialize) {
1133                 ret = smu_populate_smc_tables(smu);
1134                 if (ret)
1135                         return ret;
1136 
1137                 ret = smu_init_max_sustainable_clocks(smu);
1138                 if (ret)
1139                         return ret;
1140         }
1141 
1142         ret = smu_set_default_od_settings(smu, initialize);
1143         if (ret)
1144                 return ret;
1145 
1146         if (initialize) {
1147                 ret = smu_populate_umd_state_clk(smu);
1148                 if (ret)
1149                         return ret;
1150 
1151                 ret = smu_get_power_limit(smu, &smu->default_power_limit, true);
1152                 if (ret)
1153                         return ret;
1154         }
1155 
1156         /*
1157          * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1158          */
1159         ret = smu_set_tool_table_location(smu);
1160 
1161         if (!smu_is_dpm_running(smu))
1162                 pr_info("dpm has been disabled\n");
1163 
1164         return ret;
1165 }
1166 
1167 /**
1168  * smu_alloc_memory_pool - allocate memory pool in the system memory
1169  *
1170  * @smu: amdgpu_device pointer
1171  *
1172  * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
1173  * and DramLogSetDramAddr can notify it changed.
1174  *
1175  * Returns 0 on success, error on failure.
1176  */
1177 static int smu_alloc_memory_pool(struct smu_context *smu)
1178 {
1179         struct amdgpu_device *adev = smu->adev;
1180         struct smu_table_context *smu_table = &smu->smu_table;
1181         struct smu_table *memory_pool = &smu_table->memory_pool;
1182         uint64_t pool_size = smu->pool_size;
1183         int ret = 0;
1184 
1185         if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
1186                 return ret;
1187 
1188         memory_pool->size = pool_size;
1189         memory_pool->align = PAGE_SIZE;
1190         memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
1191 
1192         switch (pool_size) {
1193         case SMU_MEMORY_POOL_SIZE_256_MB:
1194         case SMU_MEMORY_POOL_SIZE_512_MB:
1195         case SMU_MEMORY_POOL_SIZE_1_GB:
1196         case SMU_MEMORY_POOL_SIZE_2_GB:
1197                 ret = amdgpu_bo_create_kernel(adev,
1198                                               memory_pool->size,
1199                                               memory_pool->align,
1200                                               memory_pool->domain,
1201                                               &memory_pool->bo,
1202                                               &memory_pool->mc_address,
1203                                               &memory_pool->cpu_addr);
1204                 break;
1205         default:
1206                 break;
1207         }
1208 
1209         return ret;
1210 }
1211 
1212 static int smu_free_memory_pool(struct smu_context *smu)
1213 {
1214         struct smu_table_context *smu_table = &smu->smu_table;
1215         struct smu_table *memory_pool = &smu_table->memory_pool;
1216         int ret = 0;
1217 
1218         if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
1219                 return ret;
1220 
1221         amdgpu_bo_free_kernel(&memory_pool->bo,
1222                               &memory_pool->mc_address,
1223                               &memory_pool->cpu_addr);
1224 
1225         memset(memory_pool, 0, sizeof(struct smu_table));
1226 
1227         return ret;
1228 }
1229 
1230 static int smu_hw_init(void *handle)
1231 {
1232         int ret;
1233         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1234         struct smu_context *smu = &adev->smu;
1235 
1236         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1237                 if (adev->asic_type < CHIP_NAVI10) {
1238                         ret = smu_load_microcode(smu);
1239                         if (ret)
1240                                 return ret;
1241                 }
1242         }
1243 
1244         ret = smu_check_fw_status(smu);
1245         if (ret) {
1246                 pr_err("SMC firmware status is not correct\n");
1247                 return ret;
1248         }
1249 
1250         if (adev->flags & AMD_IS_APU) {
1251                 smu_powergate_sdma(&adev->smu, false);
1252                 smu_powergate_vcn(&adev->smu, false);
1253         }
1254 
1255         if (!smu->pm_enabled)
1256                 return 0;
1257 
1258         ret = smu_feature_init_dpm(smu);
1259         if (ret)
1260                 goto failed;
1261 
1262         ret = smu_smc_table_hw_init(smu, true);
1263         if (ret)
1264                 goto failed;
1265 
1266         ret = smu_alloc_memory_pool(smu);
1267         if (ret)
1268                 goto failed;
1269 
1270         /*
1271          * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1272          * pool location.
1273          */
1274         ret = smu_notify_memory_pool_location(smu);
1275         if (ret)
1276                 goto failed;
1277 
1278         ret = smu_start_thermal_control(smu);
1279         if (ret)
1280                 goto failed;
1281 
1282         if (!smu->pm_enabled)
1283                 adev->pm.dpm_enabled = false;
1284         else
1285                 adev->pm.dpm_enabled = true;    /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */
1286 
1287         pr_info("SMU is initialized successfully!\n");
1288 
1289         return 0;
1290 
1291 failed:
1292         return ret;
1293 }
1294 
1295 static int smu_hw_fini(void *handle)
1296 {
1297         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1298         struct smu_context *smu = &adev->smu;
1299         struct smu_table_context *table_context = &smu->smu_table;
1300         int ret = 0;
1301 
1302         if (adev->flags & AMD_IS_APU) {
1303                 smu_powergate_sdma(&adev->smu, true);
1304                 smu_powergate_vcn(&adev->smu, true);
1305         }
1306 
1307         kfree(table_context->driver_pptable);
1308         table_context->driver_pptable = NULL;
1309 
1310         kfree(table_context->max_sustainable_clocks);
1311         table_context->max_sustainable_clocks = NULL;
1312 
1313         kfree(table_context->overdrive_table);
1314         table_context->overdrive_table = NULL;
1315 
1316         ret = smu_fini_fb_allocations(smu);
1317         if (ret)
1318                 return ret;
1319 
1320         ret = smu_free_memory_pool(smu);
1321         if (ret)
1322                 return ret;
1323 
1324         return 0;
1325 }
1326 
1327 int smu_reset(struct smu_context *smu)
1328 {
1329         struct amdgpu_device *adev = smu->adev;
1330         int ret = 0;
1331 
1332         ret = smu_hw_fini(adev);
1333         if (ret)
1334                 return ret;
1335 
1336         ret = smu_hw_init(adev);
1337         if (ret)
1338                 return ret;
1339 
1340         return ret;
1341 }
1342 
1343 static int smu_suspend(void *handle)
1344 {
1345         int ret;
1346         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1347         struct smu_context *smu = &adev->smu;
1348         bool baco_feature_is_enabled = false;
1349 
1350         if(!(adev->flags & AMD_IS_APU))
1351                 baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
1352 
1353         ret = smu_system_features_control(smu, false);
1354         if (ret)
1355                 return ret;
1356 
1357         if (adev->in_gpu_reset && baco_feature_is_enabled) {
1358                 ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true);
1359                 if (ret) {
1360                         pr_warn("set BACO feature enabled failed, return %d\n", ret);
1361                         return ret;
1362                 }
1363         }
1364 
1365         smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1366 
1367         if (adev->asic_type >= CHIP_NAVI10 &&
1368             adev->gfx.rlc.funcs->stop)
1369                 adev->gfx.rlc.funcs->stop(adev);
1370 
1371         return 0;
1372 }
1373 
1374 static int smu_resume(void *handle)
1375 {
1376         int ret;
1377         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1378         struct smu_context *smu = &adev->smu;
1379 
1380         pr_info("SMU is resuming...\n");
1381 
1382         mutex_lock(&smu->mutex);
1383 
1384         ret = smu_smc_table_hw_init(smu, false);
1385         if (ret)
1386                 goto failed;
1387 
1388         ret = smu_start_thermal_control(smu);
1389         if (ret)
1390                 goto failed;
1391 
1392         mutex_unlock(&smu->mutex);
1393 
1394         pr_info("SMU is resumed successfully!\n");
1395 
1396         return 0;
1397 failed:
1398         mutex_unlock(&smu->mutex);
1399         return ret;
1400 }
1401 
1402 int smu_display_configuration_change(struct smu_context *smu,
1403                                      const struct amd_pp_display_configuration *display_config)
1404 {
1405         int index = 0;
1406         int num_of_active_display = 0;
1407 
1408         if (!smu->pm_enabled || !is_support_sw_smu(smu->adev))
1409                 return -EINVAL;
1410 
1411         if (!display_config)
1412                 return -EINVAL;
1413 
1414         mutex_lock(&smu->mutex);
1415 
1416         smu_set_deep_sleep_dcefclk(smu,
1417                                    display_config->min_dcef_deep_sleep_set_clk / 100);
1418 
1419         for (index = 0; index < display_config->num_path_including_non_display; index++) {
1420                 if (display_config->displays[index].controller_id != 0)
1421                         num_of_active_display++;
1422         }
1423 
1424         smu_set_active_display_count(smu, num_of_active_display);
1425 
1426         smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
1427                            display_config->cpu_cc6_disable,
1428                            display_config->cpu_pstate_disable,
1429                            display_config->nb_pstate_switch_disable);
1430 
1431         mutex_unlock(&smu->mutex);
1432 
1433         return 0;
1434 }
1435 
1436 static int smu_get_clock_info(struct smu_context *smu,
1437                               struct smu_clock_info *clk_info,
1438                               enum smu_perf_level_designation designation)
1439 {
1440         int ret;
1441         struct smu_performance_level level = {0};
1442 
1443         if (!clk_info)
1444                 return -EINVAL;
1445 
1446         ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
1447         if (ret)
1448                 return -EINVAL;
1449 
1450         clk_info->min_mem_clk = level.memory_clock;
1451         clk_info->min_eng_clk = level.core_clock;
1452         clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1453 
1454         ret = smu_get_perf_level(smu, designation, &level);
1455         if (ret)
1456                 return -EINVAL;
1457 
1458         clk_info->min_mem_clk = level.memory_clock;
1459         clk_info->min_eng_clk = level.core_clock;
1460         clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1461 
1462         return 0;
1463 }
1464 
1465 int smu_get_current_clocks(struct smu_context *smu,
1466                            struct amd_pp_clock_info *clocks)
1467 {
1468         struct amd_pp_simple_clock_info simple_clocks = {0};
1469         struct smu_clock_info hw_clocks;
1470         int ret = 0;
1471 
1472         if (!is_support_sw_smu(smu->adev))
1473                 return -EINVAL;
1474 
1475         mutex_lock(&smu->mutex);
1476 
1477         smu_get_dal_power_level(smu, &simple_clocks);
1478 
1479         if (smu->support_power_containment)
1480                 ret = smu_get_clock_info(smu, &hw_clocks,
1481                                          PERF_LEVEL_POWER_CONTAINMENT);
1482         else
1483                 ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
1484 
1485         if (ret) {
1486                 pr_err("Error in smu_get_clock_info\n");
1487                 goto failed;
1488         }
1489 
1490         clocks->min_engine_clock = hw_clocks.min_eng_clk;
1491         clocks->max_engine_clock = hw_clocks.max_eng_clk;
1492         clocks->min_memory_clock = hw_clocks.min_mem_clk;
1493         clocks->max_memory_clock = hw_clocks.max_mem_clk;
1494         clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1495         clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1496         clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1497         clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1498 
1499         if (simple_clocks.level == 0)
1500                 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1501         else
1502                 clocks->max_clocks_state = simple_clocks.level;
1503 
1504         if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
1505                 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1506                 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1507         }
1508 
1509 failed:
1510         mutex_unlock(&smu->mutex);
1511         return ret;
1512 }
1513 
1514 static int smu_set_clockgating_state(void *handle,
1515                                      enum amd_clockgating_state state)
1516 {
1517         return 0;
1518 }
1519 
1520 static int smu_set_powergating_state(void *handle,
1521                                      enum amd_powergating_state state)
1522 {
1523         return 0;
1524 }
1525 
1526 static int smu_enable_umd_pstate(void *handle,
1527                       enum amd_dpm_forced_level *level)
1528 {
1529         uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1530                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1531                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1532                                         AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1533 
1534         struct smu_context *smu = (struct smu_context*)(handle);
1535         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1536         if (!smu->pm_enabled || !smu_dpm_ctx->dpm_context)
1537                 return -EINVAL;
1538 
1539         if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1540                 /* enter umd pstate, save current level, disable gfx cg*/
1541                 if (*level & profile_mode_mask) {
1542                         smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1543                         smu_dpm_ctx->enable_umd_pstate = true;
1544                         amdgpu_device_ip_set_powergating_state(smu->adev,
1545                                                                AMD_IP_BLOCK_TYPE_GFX,
1546                                                                AMD_PG_STATE_UNGATE);
1547                         amdgpu_device_ip_set_clockgating_state(smu->adev,
1548                                                                AMD_IP_BLOCK_TYPE_GFX,
1549                                                                AMD_CG_STATE_UNGATE);
1550                 }
1551         } else {
1552                 /* exit umd pstate, restore level, enable gfx cg*/
1553                 if (!(*level & profile_mode_mask)) {
1554                         if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1555                                 *level = smu_dpm_ctx->saved_dpm_level;
1556                         smu_dpm_ctx->enable_umd_pstate = false;
1557                         amdgpu_device_ip_set_clockgating_state(smu->adev,
1558                                                                AMD_IP_BLOCK_TYPE_GFX,
1559                                                                AMD_CG_STATE_GATE);
1560                         amdgpu_device_ip_set_powergating_state(smu->adev,
1561                                                                AMD_IP_BLOCK_TYPE_GFX,
1562                                                                AMD_PG_STATE_GATE);
1563                 }
1564         }
1565 
1566         return 0;
1567 }
1568 
1569 static int smu_default_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1570 {
1571         int ret = 0;
1572         uint32_t sclk_mask, mclk_mask, soc_mask;
1573 
1574         switch (level) {
1575         case AMD_DPM_FORCED_LEVEL_HIGH:
1576                 ret = smu_force_dpm_limit_value(smu, true);
1577                 break;
1578         case AMD_DPM_FORCED_LEVEL_LOW:
1579                 ret = smu_force_dpm_limit_value(smu, false);
1580                 break;
1581         case AMD_DPM_FORCED_LEVEL_AUTO:
1582         case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1583                 ret = smu_unforce_dpm_levels(smu);
1584                 break;
1585         case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1586         case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1587         case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1588                 ret = smu_get_profiling_clk_mask(smu, level,
1589                                                  &sclk_mask,
1590                                                  &mclk_mask,
1591                                                  &soc_mask);
1592                 if (ret)
1593                         return ret;
1594                 smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask);
1595                 smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
1596                 smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
1597                 break;
1598         case AMD_DPM_FORCED_LEVEL_MANUAL:
1599         case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1600         default:
1601                 break;
1602         }
1603         return ret;
1604 }
1605 
1606 int smu_adjust_power_state_dynamic(struct smu_context *smu,
1607                                    enum amd_dpm_forced_level level,
1608                                    bool skip_display_settings)
1609 {
1610         int ret = 0;
1611         int index = 0;
1612         long workload;
1613         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1614 
1615         if (!smu->pm_enabled)
1616                 return -EINVAL;
1617 
1618         if (!skip_display_settings) {
1619                 ret = smu_display_config_changed(smu);
1620                 if (ret) {
1621                         pr_err("Failed to change display config!");
1622                         return ret;
1623                 }
1624         }
1625 
1626         ret = smu_apply_clocks_adjust_rules(smu);
1627         if (ret) {
1628                 pr_err("Failed to apply clocks adjust rules!");
1629                 return ret;
1630         }
1631 
1632         if (!skip_display_settings) {
1633                 ret = smu_notify_smc_dispaly_config(smu);
1634                 if (ret) {
1635                         pr_err("Failed to notify smc display config!");
1636                         return ret;
1637                 }
1638         }
1639 
1640         if (smu_dpm_ctx->dpm_level != level) {
1641                 ret = smu_asic_set_performance_level(smu, level);
1642                 if (ret) {
1643                         ret = smu_default_set_performance_level(smu, level);
1644                         if (ret) {
1645                                 pr_err("Failed to set performance level!");
1646                                 return ret;
1647                         }
1648                 }
1649 
1650                 /* update the saved copy */
1651                 smu_dpm_ctx->dpm_level = level;
1652         }
1653 
1654         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1655                 index = fls(smu->workload_mask);
1656                 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1657                 workload = smu->workload_setting[index];
1658 
1659                 if (smu->power_profile_mode != workload)
1660                         smu_set_power_profile_mode(smu, &workload, 0);
1661         }
1662 
1663         return ret;
1664 }
1665 
1666 int smu_handle_task(struct smu_context *smu,
1667                     enum amd_dpm_forced_level level,
1668                     enum amd_pp_task task_id)
1669 {
1670         int ret = 0;
1671 
1672         switch (task_id) {
1673         case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1674                 ret = smu_pre_display_config_changed(smu);
1675                 if (ret)
1676                         return ret;
1677                 ret = smu_set_cpu_power_state(smu);
1678                 if (ret)
1679                         return ret;
1680                 ret = smu_adjust_power_state_dynamic(smu, level, false);
1681                 break;
1682         case AMD_PP_TASK_COMPLETE_INIT:
1683         case AMD_PP_TASK_READJUST_POWER_STATE:
1684                 ret = smu_adjust_power_state_dynamic(smu, level, true);
1685                 break;
1686         default:
1687                 break;
1688         }
1689 
1690         return ret;
1691 }
1692 
1693 int smu_switch_power_profile(struct smu_context *smu,
1694                              enum PP_SMC_POWER_PROFILE type,
1695                              bool en)
1696 {
1697         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1698         long workload;
1699         uint32_t index;
1700 
1701         if (!smu->pm_enabled)
1702                 return -EINVAL;
1703 
1704         if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
1705                 return -EINVAL;
1706 
1707         mutex_lock(&smu->mutex);
1708 
1709         if (!en) {
1710                 smu->workload_mask &= ~(1 << smu->workload_prority[type]);
1711                 index = fls(smu->workload_mask);
1712                 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1713                 workload = smu->workload_setting[index];
1714         } else {
1715                 smu->workload_mask |= (1 << smu->workload_prority[type]);
1716                 index = fls(smu->workload_mask);
1717                 index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1718                 workload = smu->workload_setting[index];
1719         }
1720 
1721         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1722                 smu_set_power_profile_mode(smu, &workload, 0);
1723 
1724         mutex_unlock(&smu->mutex);
1725 
1726         return 0;
1727 }
1728 
1729 enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
1730 {
1731         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1732         enum amd_dpm_forced_level level;
1733 
1734         if (!smu_dpm_ctx->dpm_context)
1735                 return -EINVAL;
1736 
1737         mutex_lock(&(smu->mutex));
1738         level = smu_dpm_ctx->dpm_level;
1739         mutex_unlock(&(smu->mutex));
1740 
1741         return level;
1742 }
1743 
1744 int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1745 {
1746         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1747         int ret = 0;
1748 
1749         if (!smu_dpm_ctx->dpm_context)
1750                 return -EINVAL;
1751 
1752         ret = smu_enable_umd_pstate(smu, &level);
1753         if (ret)
1754                 return ret;
1755 
1756         ret = smu_handle_task(smu, level,
1757                               AMD_PP_TASK_READJUST_POWER_STATE);
1758 
1759         return ret;
1760 }
1761 
1762 int smu_set_display_count(struct smu_context *smu, uint32_t count)
1763 {
1764         int ret = 0;
1765 
1766         mutex_lock(&smu->mutex);
1767         ret = smu_init_display_count(smu, count);
1768         mutex_unlock(&smu->mutex);
1769 
1770         return ret;
1771 }
1772 
1773 const struct amd_ip_funcs smu_ip_funcs = {
1774         .name = "smu",
1775         .early_init = smu_early_init,
1776         .late_init = smu_late_init,
1777         .sw_init = smu_sw_init,
1778         .sw_fini = smu_sw_fini,
1779         .hw_init = smu_hw_init,
1780         .hw_fini = smu_hw_fini,
1781         .suspend = smu_suspend,
1782         .resume = smu_resume,
1783         .is_idle = NULL,
1784         .check_soft_reset = NULL,
1785         .wait_for_idle = NULL,
1786         .soft_reset = NULL,
1787         .set_clockgating_state = smu_set_clockgating_state,
1788         .set_powergating_state = smu_set_powergating_state,
1789         .enable_umd_pstate = smu_enable_umd_pstate,
1790 };
1791 
1792 const struct amdgpu_ip_block_version smu_v11_0_ip_block =
1793 {
1794         .type = AMD_IP_BLOCK_TYPE_SMC,
1795         .major = 11,
1796         .minor = 0,
1797         .rev = 0,
1798         .funcs = &smu_ip_funcs,
1799 };
1800 
1801 const struct amdgpu_ip_block_version smu_v12_0_ip_block =
1802 {
1803         .type = AMD_IP_BLOCK_TYPE_SMC,
1804         .major = 12,
1805         .minor = 0,
1806         .rev = 0,
1807         .funcs = &smu_ip_funcs,
1808 };

/* [<][>][^][v][top][bottom][index][help] */