root/drivers/gpu/drm/amd/amdgpu/df_v3_6.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. df_v3_6_get_fica
  2. df_v3_6_set_fica
  3. df_v3_6_perfmon_rreg
  4. df_v3_6_perfmon_wreg
  5. df_v3_6_get_df_cntr_avail
  6. df_v3_6_sw_init
  7. df_v3_6_enable_broadcast_mode
  8. df_v3_6_get_fb_channel_number
  9. df_v3_6_get_hbm_channel_number
  10. df_v3_6_update_medium_grain_clock_gating
  11. df_v3_6_get_clockgating_state
  12. df_v3_6_pmc_config_2_cntr
  13. df_v3_6_pmc_get_addr
  14. df_v3_6_pmc_get_read_settings
  15. df_v3_6_pmc_get_ctrl_settings
  16. df_v3_6_pmc_add_cntr
  17. df_v3_6_pmc_release_cntr
  18. df_v3_6_reset_perfmon_cntr
  19. df_v3_6_pmc_start
  20. df_v3_6_pmc_stop
  21. df_v3_6_pmc_get_count

   1 /*
   2  * Copyright 2018 Advanced Micro Devices, Inc.
   3  *
   4  * Permission is hereby granted, free of charge, to any person obtaining a
   5  * copy of this software and associated documentation files (the "Software"),
   6  * to deal in the Software without restriction, including without limitation
   7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8  * and/or sell copies of the Software, and to permit persons to whom the
   9  * Software is furnished to do so, subject to the following conditions:
  10  *
  11  * The above copyright notice and this permission notice shall be included in
  12  * all copies or substantial portions of the Software.
  13  *
  14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20  * OTHER DEALINGS IN THE SOFTWARE.
  21  *
  22  */
  23 #include "amdgpu.h"
  24 #include "df_v3_6.h"
  25 
  26 #include "df/df_3_6_default.h"
  27 #include "df/df_3_6_offset.h"
  28 #include "df/df_3_6_sh_mask.h"
  29 
  30 static u32 df_v3_6_channel_number[] = {1, 2, 0, 4, 0, 8, 0,
  31                                        16, 32, 0, 0, 0, 2, 4, 8};
  32 
  33 /* init df format attrs */
  34 AMDGPU_PMU_ATTR(event,          "config:0-7");
  35 AMDGPU_PMU_ATTR(instance,       "config:8-15");
  36 AMDGPU_PMU_ATTR(umask,          "config:16-23");
  37 
  38 /* df format attributes  */
  39 static struct attribute *df_v3_6_format_attrs[] = {
  40         &pmu_attr_event.attr,
  41         &pmu_attr_instance.attr,
  42         &pmu_attr_umask.attr,
  43         NULL
  44 };
  45 
  46 /* df format attribute group */
  47 static struct attribute_group df_v3_6_format_attr_group = {
  48         .name = "format",
  49         .attrs = df_v3_6_format_attrs,
  50 };
  51 
  52 /* df event attrs */
  53 AMDGPU_PMU_ATTR(cake0_pcsout_txdata,
  54                       "event=0x7,instance=0x46,umask=0x2");
  55 AMDGPU_PMU_ATTR(cake1_pcsout_txdata,
  56                       "event=0x7,instance=0x47,umask=0x2");
  57 AMDGPU_PMU_ATTR(cake0_pcsout_txmeta,
  58                       "event=0x7,instance=0x46,umask=0x4");
  59 AMDGPU_PMU_ATTR(cake1_pcsout_txmeta,
  60                       "event=0x7,instance=0x47,umask=0x4");
  61 AMDGPU_PMU_ATTR(cake0_ftiinstat_reqalloc,
  62                       "event=0xb,instance=0x46,umask=0x4");
  63 AMDGPU_PMU_ATTR(cake1_ftiinstat_reqalloc,
  64                       "event=0xb,instance=0x47,umask=0x4");
  65 AMDGPU_PMU_ATTR(cake0_ftiinstat_rspalloc,
  66                       "event=0xb,instance=0x46,umask=0x8");
  67 AMDGPU_PMU_ATTR(cake1_ftiinstat_rspalloc,
  68                       "event=0xb,instance=0x47,umask=0x8");
  69 
  70 /* df event attributes  */
  71 static struct attribute *df_v3_6_event_attrs[] = {
  72         &pmu_attr_cake0_pcsout_txdata.attr,
  73         &pmu_attr_cake1_pcsout_txdata.attr,
  74         &pmu_attr_cake0_pcsout_txmeta.attr,
  75         &pmu_attr_cake1_pcsout_txmeta.attr,
  76         &pmu_attr_cake0_ftiinstat_reqalloc.attr,
  77         &pmu_attr_cake1_ftiinstat_reqalloc.attr,
  78         &pmu_attr_cake0_ftiinstat_rspalloc.attr,
  79         &pmu_attr_cake1_ftiinstat_rspalloc.attr,
  80         NULL
  81 };
  82 
  83 /* df event attribute group */
  84 static struct attribute_group df_v3_6_event_attr_group = {
  85         .name = "events",
  86         .attrs = df_v3_6_event_attrs
  87 };
  88 
  89 /* df event attr groups  */
  90 const struct attribute_group *df_v3_6_attr_groups[] = {
  91                 &df_v3_6_format_attr_group,
  92                 &df_v3_6_event_attr_group,
  93                 NULL
  94 };
  95 
  96 static uint64_t df_v3_6_get_fica(struct amdgpu_device *adev,
  97                                  uint32_t ficaa_val)
  98 {
  99         unsigned long flags, address, data;
 100         uint32_t ficadl_val, ficadh_val;
 101 
 102         address = adev->nbio_funcs->get_pcie_index_offset(adev);
 103         data = adev->nbio_funcs->get_pcie_data_offset(adev);
 104 
 105         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 106         WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3);
 107         WREG32(data, ficaa_val);
 108 
 109         WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessDataLo3);
 110         ficadl_val = RREG32(data);
 111 
 112         WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessDataHi3);
 113         ficadh_val = RREG32(data);
 114 
 115         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 116 
 117         return (((ficadh_val & 0xFFFFFFFFFFFFFFFF) << 32) | ficadl_val);
 118 }
 119 
 120 static void df_v3_6_set_fica(struct amdgpu_device *adev, uint32_t ficaa_val,
 121                              uint32_t ficadl_val, uint32_t ficadh_val)
 122 {
 123         unsigned long flags, address, data;
 124 
 125         address = adev->nbio_funcs->get_pcie_index_offset(adev);
 126         data = adev->nbio_funcs->get_pcie_data_offset(adev);
 127 
 128         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 129         WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3);
 130         WREG32(data, ficaa_val);
 131 
 132         WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessDataLo3);
 133         WREG32(data, ficadl_val);
 134 
 135         WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessDataHi3);
 136         WREG32(data, ficadh_val);
 137 
 138         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 139 }
 140 
 141 /*
 142  * df_v3_6_perfmon_rreg - read perfmon lo and hi
 143  *
 144  * required to be atomic.  no mmio method provided so subsequent reads for lo
 145  * and hi require to preserve df finite state machine
 146  */
 147 static void df_v3_6_perfmon_rreg(struct amdgpu_device *adev,
 148                             uint32_t lo_addr, uint32_t *lo_val,
 149                             uint32_t hi_addr, uint32_t *hi_val)
 150 {
 151         unsigned long flags, address, data;
 152 
 153         address = adev->nbio_funcs->get_pcie_index_offset(adev);
 154         data = adev->nbio_funcs->get_pcie_data_offset(adev);
 155 
 156         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 157         WREG32(address, lo_addr);
 158         *lo_val = RREG32(data);
 159         WREG32(address, hi_addr);
 160         *hi_val = RREG32(data);
 161         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 162 }
 163 
 164 /*
 165  * df_v3_6_perfmon_wreg - write to perfmon lo and hi
 166  *
 167  * required to be atomic.  no mmio method provided so subsequent reads after
 168  * data writes cannot occur to preserve data fabrics finite state machine.
 169  */
 170 static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr,
 171                             uint32_t lo_val, uint32_t hi_addr, uint32_t hi_val)
 172 {
 173         unsigned long flags, address, data;
 174 
 175         address = adev->nbio_funcs->get_pcie_index_offset(adev);
 176         data = adev->nbio_funcs->get_pcie_data_offset(adev);
 177 
 178         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 179         WREG32(address, lo_addr);
 180         WREG32(data, lo_val);
 181         WREG32(address, hi_addr);
 182         WREG32(data, hi_val);
 183         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 184 }
 185 
 186 /* get the number of df counters available */
 187 static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev,
 188                 struct device_attribute *attr,
 189                 char *buf)
 190 {
 191         struct amdgpu_device *adev;
 192         struct drm_device *ddev;
 193         int i, count;
 194 
 195         ddev = dev_get_drvdata(dev);
 196         adev = ddev->dev_private;
 197         count = 0;
 198 
 199         for (i = 0; i < DF_V3_6_MAX_COUNTERS; i++) {
 200                 if (adev->df_perfmon_config_assign_mask[i] == 0)
 201                         count++;
 202         }
 203 
 204         return snprintf(buf, PAGE_SIZE, "%i\n", count);
 205 }
 206 
 207 /* device attr for available perfmon counters */
 208 static DEVICE_ATTR(df_cntr_avail, S_IRUGO, df_v3_6_get_df_cntr_avail, NULL);
 209 
 210 /* init perfmons */
 211 static void df_v3_6_sw_init(struct amdgpu_device *adev)
 212 {
 213         int i, ret;
 214 
 215         ret = device_create_file(adev->dev, &dev_attr_df_cntr_avail);
 216         if (ret)
 217                 DRM_ERROR("failed to create file for available df counters\n");
 218 
 219         for (i = 0; i < AMDGPU_MAX_DF_PERFMONS; i++)
 220                 adev->df_perfmon_config_assign_mask[i] = 0;
 221 }
 222 
 223 static void df_v3_6_enable_broadcast_mode(struct amdgpu_device *adev,
 224                                           bool enable)
 225 {
 226         u32 tmp;
 227 
 228         if (enable) {
 229                 tmp = RREG32_SOC15(DF, 0, mmFabricConfigAccessControl);
 230                 tmp &= ~FabricConfigAccessControl__CfgRegInstAccEn_MASK;
 231                 WREG32_SOC15(DF, 0, mmFabricConfigAccessControl, tmp);
 232         } else
 233                 WREG32_SOC15(DF, 0, mmFabricConfigAccessControl,
 234                              mmFabricConfigAccessControl_DEFAULT);
 235 }
 236 
 237 static u32 df_v3_6_get_fb_channel_number(struct amdgpu_device *adev)
 238 {
 239         u32 tmp;
 240 
 241         tmp = RREG32_SOC15(DF, 0, mmDF_CS_UMC_AON0_DramBaseAddress0);
 242         tmp &= DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan_MASK;
 243         tmp >>= DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
 244 
 245         return tmp;
 246 }
 247 
 248 static u32 df_v3_6_get_hbm_channel_number(struct amdgpu_device *adev)
 249 {
 250         int fb_channel_number;
 251 
 252         fb_channel_number = adev->df_funcs->get_fb_channel_number(adev);
 253         if (fb_channel_number >= ARRAY_SIZE(df_v3_6_channel_number))
 254                 fb_channel_number = 0;
 255 
 256         return df_v3_6_channel_number[fb_channel_number];
 257 }
 258 
 259 static void df_v3_6_update_medium_grain_clock_gating(struct amdgpu_device *adev,
 260                                                      bool enable)
 261 {
 262         u32 tmp;
 263 
 264         if (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG) {
 265                 /* Put DF on broadcast mode */
 266                 adev->df_funcs->enable_broadcast_mode(adev, true);
 267 
 268                 if (enable) {
 269                         tmp = RREG32_SOC15(DF, 0,
 270                                         mmDF_PIE_AON0_DfGlobalClkGater);
 271                         tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
 272                         tmp |= DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY;
 273                         WREG32_SOC15(DF, 0,
 274                                         mmDF_PIE_AON0_DfGlobalClkGater, tmp);
 275                 } else {
 276                         tmp = RREG32_SOC15(DF, 0,
 277                                         mmDF_PIE_AON0_DfGlobalClkGater);
 278                         tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
 279                         tmp |= DF_V3_6_MGCG_DISABLE;
 280                         WREG32_SOC15(DF, 0,
 281                                         mmDF_PIE_AON0_DfGlobalClkGater, tmp);
 282                 }
 283 
 284                 /* Exit broadcast mode */
 285                 adev->df_funcs->enable_broadcast_mode(adev, false);
 286         }
 287 }
 288 
 289 static void df_v3_6_get_clockgating_state(struct amdgpu_device *adev,
 290                                           u32 *flags)
 291 {
 292         u32 tmp;
 293 
 294         /* AMD_CG_SUPPORT_DF_MGCG */
 295         tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
 296         if (tmp & DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY)
 297                 *flags |= AMD_CG_SUPPORT_DF_MGCG;
 298 }
 299 
 300 /* get assigned df perfmon ctr as int */
 301 static int df_v3_6_pmc_config_2_cntr(struct amdgpu_device *adev,
 302                                       uint64_t config)
 303 {
 304         int i;
 305 
 306         for (i = 0; i < DF_V3_6_MAX_COUNTERS; i++) {
 307                 if ((config & 0x0FFFFFFUL) ==
 308                                         adev->df_perfmon_config_assign_mask[i])
 309                         return i;
 310         }
 311 
 312         return -EINVAL;
 313 }
 314 
 315 /* get address based on counter assignment */
 316 static void df_v3_6_pmc_get_addr(struct amdgpu_device *adev,
 317                                  uint64_t config,
 318                                  int is_ctrl,
 319                                  uint32_t *lo_base_addr,
 320                                  uint32_t *hi_base_addr)
 321 {
 322         int target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
 323 
 324         if (target_cntr < 0)
 325                 return;
 326 
 327         switch (target_cntr) {
 328 
 329         case 0:
 330                 *lo_base_addr = is_ctrl ? smnPerfMonCtlLo0 : smnPerfMonCtrLo0;
 331                 *hi_base_addr = is_ctrl ? smnPerfMonCtlHi0 : smnPerfMonCtrHi0;
 332                 break;
 333         case 1:
 334                 *lo_base_addr = is_ctrl ? smnPerfMonCtlLo1 : smnPerfMonCtrLo1;
 335                 *hi_base_addr = is_ctrl ? smnPerfMonCtlHi1 : smnPerfMonCtrHi1;
 336                 break;
 337         case 2:
 338                 *lo_base_addr = is_ctrl ? smnPerfMonCtlLo2 : smnPerfMonCtrLo2;
 339                 *hi_base_addr = is_ctrl ? smnPerfMonCtlHi2 : smnPerfMonCtrHi2;
 340                 break;
 341         case 3:
 342                 *lo_base_addr = is_ctrl ? smnPerfMonCtlLo3 : smnPerfMonCtrLo3;
 343                 *hi_base_addr = is_ctrl ? smnPerfMonCtlHi3 : smnPerfMonCtrHi3;
 344                 break;
 345 
 346         }
 347 
 348 }
 349 
 350 /* get read counter address */
 351 static void df_v3_6_pmc_get_read_settings(struct amdgpu_device *adev,
 352                                           uint64_t config,
 353                                           uint32_t *lo_base_addr,
 354                                           uint32_t *hi_base_addr)
 355 {
 356         df_v3_6_pmc_get_addr(adev, config, 0, lo_base_addr, hi_base_addr);
 357 }
 358 
 359 /* get control counter settings i.e. address and values to set */
 360 static int df_v3_6_pmc_get_ctrl_settings(struct amdgpu_device *adev,
 361                                           uint64_t config,
 362                                           uint32_t *lo_base_addr,
 363                                           uint32_t *hi_base_addr,
 364                                           uint32_t *lo_val,
 365                                           uint32_t *hi_val)
 366 {
 367 
 368         uint32_t eventsel, instance, unitmask;
 369         uint32_t instance_10, instance_5432, instance_76;
 370 
 371         df_v3_6_pmc_get_addr(adev, config, 1, lo_base_addr, hi_base_addr);
 372 
 373         if ((*lo_base_addr == 0) || (*hi_base_addr == 0)) {
 374                 DRM_ERROR("[DF PMC] addressing not retrieved! Lo: %x, Hi: %x",
 375                                 *lo_base_addr, *hi_base_addr);
 376                 return -ENXIO;
 377         }
 378 
 379         eventsel = DF_V3_6_GET_EVENT(config) & 0x3f;
 380         unitmask = DF_V3_6_GET_UNITMASK(config) & 0xf;
 381         instance = DF_V3_6_GET_INSTANCE(config);
 382 
 383         instance_10 = instance & 0x3;
 384         instance_5432 = (instance >> 2) & 0xf;
 385         instance_76 = (instance >> 6) & 0x3;
 386 
 387         *lo_val = (unitmask << 8) | (instance_10 << 6) | eventsel | (1 << 22);
 388         *hi_val = (instance_76 << 29) | instance_5432;
 389 
 390         DRM_DEBUG_DRIVER("config=%llx addr=%08x:%08x val=%08x:%08x",
 391                 config, *lo_base_addr, *hi_base_addr, *lo_val, *hi_val);
 392 
 393         return 0;
 394 }
 395 
 396 /* add df performance counters for read */
 397 static int df_v3_6_pmc_add_cntr(struct amdgpu_device *adev,
 398                                    uint64_t config)
 399 {
 400         int i, target_cntr;
 401 
 402         target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
 403 
 404         if (target_cntr >= 0)
 405                 return 0;
 406 
 407         for (i = 0; i < DF_V3_6_MAX_COUNTERS; i++) {
 408                 if (adev->df_perfmon_config_assign_mask[i] == 0U) {
 409                         adev->df_perfmon_config_assign_mask[i] =
 410                                                         config & 0x0FFFFFFUL;
 411                         return 0;
 412                 }
 413         }
 414 
 415         return -ENOSPC;
 416 }
 417 
 418 /* release performance counter */
 419 static void df_v3_6_pmc_release_cntr(struct amdgpu_device *adev,
 420                                      uint64_t config)
 421 {
 422         int target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
 423 
 424         if (target_cntr >= 0)
 425                 adev->df_perfmon_config_assign_mask[target_cntr] = 0ULL;
 426 }
 427 
 428 
 429 static void df_v3_6_reset_perfmon_cntr(struct amdgpu_device *adev,
 430                                          uint64_t config)
 431 {
 432         uint32_t lo_base_addr, hi_base_addr;
 433 
 434         df_v3_6_pmc_get_read_settings(adev, config, &lo_base_addr,
 435                                       &hi_base_addr);
 436 
 437         if ((lo_base_addr == 0) || (hi_base_addr == 0))
 438                 return;
 439 
 440         df_v3_6_perfmon_wreg(adev, lo_base_addr, 0, hi_base_addr, 0);
 441 }
 442 
 443 static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config,
 444                              int is_enable)
 445 {
 446         uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
 447         int ret = 0;
 448 
 449         switch (adev->asic_type) {
 450         case CHIP_VEGA20:
 451 
 452                 df_v3_6_reset_perfmon_cntr(adev, config);
 453 
 454                 if (is_enable) {
 455                         ret = df_v3_6_pmc_add_cntr(adev, config);
 456                 } else {
 457                         ret = df_v3_6_pmc_get_ctrl_settings(adev,
 458                                         config,
 459                                         &lo_base_addr,
 460                                         &hi_base_addr,
 461                                         &lo_val,
 462                                         &hi_val);
 463 
 464                         if (ret)
 465                                 return ret;
 466 
 467                         df_v3_6_perfmon_wreg(adev, lo_base_addr, lo_val,
 468                                         hi_base_addr, hi_val);
 469                 }
 470 
 471                 break;
 472         default:
 473                 break;
 474         }
 475 
 476         return ret;
 477 }
 478 
 479 static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config,
 480                             int is_disable)
 481 {
 482         uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
 483         int ret = 0;
 484 
 485         switch (adev->asic_type) {
 486         case CHIP_VEGA20:
 487                 ret = df_v3_6_pmc_get_ctrl_settings(adev,
 488                         config,
 489                         &lo_base_addr,
 490                         &hi_base_addr,
 491                         &lo_val,
 492                         &hi_val);
 493 
 494                 if (ret)
 495                         return ret;
 496 
 497                 df_v3_6_perfmon_wreg(adev, lo_base_addr, 0, hi_base_addr, 0);
 498 
 499                 if (is_disable)
 500                         df_v3_6_pmc_release_cntr(adev, config);
 501 
 502                 break;
 503         default:
 504                 break;
 505         }
 506 
 507         return ret;
 508 }
 509 
 510 static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
 511                                   uint64_t config,
 512                                   uint64_t *count)
 513 {
 514         uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
 515         *count = 0;
 516 
 517         switch (adev->asic_type) {
 518         case CHIP_VEGA20:
 519 
 520                 df_v3_6_pmc_get_read_settings(adev, config, &lo_base_addr,
 521                                       &hi_base_addr);
 522 
 523                 if ((lo_base_addr == 0) || (hi_base_addr == 0))
 524                         return;
 525 
 526                 df_v3_6_perfmon_rreg(adev, lo_base_addr, &lo_val,
 527                                 hi_base_addr, &hi_val);
 528 
 529                 *count  = ((hi_val | 0ULL) << 32) | (lo_val | 0ULL);
 530 
 531                 if (*count >= DF_V3_6_PERFMON_OVERFLOW)
 532                         *count = 0;
 533 
 534                 DRM_DEBUG_DRIVER("config=%llx addr=%08x:%08x val=%08x:%08x",
 535                          config, lo_base_addr, hi_base_addr, lo_val, hi_val);
 536 
 537                 break;
 538 
 539         default:
 540                 break;
 541         }
 542 }
 543 
 544 const struct amdgpu_df_funcs df_v3_6_funcs = {
 545         .sw_init = df_v3_6_sw_init,
 546         .enable_broadcast_mode = df_v3_6_enable_broadcast_mode,
 547         .get_fb_channel_number = df_v3_6_get_fb_channel_number,
 548         .get_hbm_channel_number = df_v3_6_get_hbm_channel_number,
 549         .update_medium_grain_clock_gating =
 550                         df_v3_6_update_medium_grain_clock_gating,
 551         .get_clockgating_state = df_v3_6_get_clockgating_state,
 552         .pmc_start = df_v3_6_pmc_start,
 553         .pmc_stop = df_v3_6_pmc_stop,
 554         .pmc_get_count = df_v3_6_pmc_get_count,
 555         .get_fica = df_v3_6_get_fica,
 556         .set_fica = df_v3_6_set_fica
 557 };

/* [<][>][^][v][top][bottom][index][help] */