root/drivers/gpu/drm/etnaviv/etnaviv_gpu.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. etnaviv_gpu_get_param
  2. etnaviv_hw_specs
  3. etnaviv_hw_identify
  4. etnaviv_gpu_load_clock
  5. etnaviv_gpu_update_clock
  6. etnaviv_hw_reset
  7. etnaviv_gpu_enable_mlcg
  8. etnaviv_gpu_start_fe
  9. etnaviv_gpu_start_fe_idleloop
  10. etnaviv_gpu_setup_pulse_eater
  11. etnaviv_gpu_hw_init
  12. etnaviv_gpu_init
  13. verify_dma
  14. etnaviv_gpu_debugfs
  15. etnaviv_gpu_recover_hang
  16. to_etnaviv_fence
  17. etnaviv_fence_get_driver_name
  18. etnaviv_fence_get_timeline_name
  19. etnaviv_fence_signaled
  20. etnaviv_fence_release
  21. etnaviv_gpu_fence_alloc
  22. fence_after
  23. event_alloc
  24. event_free
  25. etnaviv_gpu_wait_fence_interruptible
  26. etnaviv_gpu_wait_obj_inactive
  27. sync_point_perfmon_sample
  28. sync_point_perfmon_sample_pre
  29. sync_point_perfmon_sample_post
  30. etnaviv_gpu_submit
  31. sync_point_worker
  32. dump_mmu_fault
  33. irq_handler
  34. etnaviv_gpu_clk_enable
  35. etnaviv_gpu_clk_disable
  36. etnaviv_gpu_wait_idle
  37. etnaviv_gpu_hw_suspend
  38. etnaviv_gpu_hw_resume
  39. etnaviv_gpu_cooling_get_max_state
  40. etnaviv_gpu_cooling_get_cur_state
  41. etnaviv_gpu_cooling_set_cur_state
  42. etnaviv_gpu_bind
  43. etnaviv_gpu_unbind
  44. etnaviv_gpu_platform_probe
  45. etnaviv_gpu_platform_remove
  46. etnaviv_gpu_rpm_suspend
  47. etnaviv_gpu_rpm_resume

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Copyright (C) 2015-2018 Etnaviv Project
   4  */
   5 
   6 #include <linux/clk.h>
   7 #include <linux/component.h>
   8 #include <linux/delay.h>
   9 #include <linux/dma-fence.h>
  10 #include <linux/dma-mapping.h>
  11 #include <linux/module.h>
  12 #include <linux/of_device.h>
  13 #include <linux/platform_device.h>
  14 #include <linux/pm_runtime.h>
  15 #include <linux/regulator/consumer.h>
  16 #include <linux/thermal.h>
  17 
  18 #include "etnaviv_cmdbuf.h"
  19 #include "etnaviv_dump.h"
  20 #include "etnaviv_gpu.h"
  21 #include "etnaviv_gem.h"
  22 #include "etnaviv_mmu.h"
  23 #include "etnaviv_perfmon.h"
  24 #include "etnaviv_sched.h"
  25 #include "common.xml.h"
  26 #include "state.xml.h"
  27 #include "state_hi.xml.h"
  28 #include "cmdstream.xml.h"
  29 
  30 #ifndef PHYS_OFFSET
  31 #define PHYS_OFFSET 0
  32 #endif
  33 
  34 static const struct platform_device_id gpu_ids[] = {
  35         { .name = "etnaviv-gpu,2d" },
  36         { },
  37 };
  38 
  39 /*
  40  * Driver functions:
  41  */
  42 
  43 int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
  44 {
  45         struct etnaviv_drm_private *priv = gpu->drm->dev_private;
  46 
  47         switch (param) {
  48         case ETNAVIV_PARAM_GPU_MODEL:
  49                 *value = gpu->identity.model;
  50                 break;
  51 
  52         case ETNAVIV_PARAM_GPU_REVISION:
  53                 *value = gpu->identity.revision;
  54                 break;
  55 
  56         case ETNAVIV_PARAM_GPU_FEATURES_0:
  57                 *value = gpu->identity.features;
  58                 break;
  59 
  60         case ETNAVIV_PARAM_GPU_FEATURES_1:
  61                 *value = gpu->identity.minor_features0;
  62                 break;
  63 
  64         case ETNAVIV_PARAM_GPU_FEATURES_2:
  65                 *value = gpu->identity.minor_features1;
  66                 break;
  67 
  68         case ETNAVIV_PARAM_GPU_FEATURES_3:
  69                 *value = gpu->identity.minor_features2;
  70                 break;
  71 
  72         case ETNAVIV_PARAM_GPU_FEATURES_4:
  73                 *value = gpu->identity.minor_features3;
  74                 break;
  75 
  76         case ETNAVIV_PARAM_GPU_FEATURES_5:
  77                 *value = gpu->identity.minor_features4;
  78                 break;
  79 
  80         case ETNAVIV_PARAM_GPU_FEATURES_6:
  81                 *value = gpu->identity.minor_features5;
  82                 break;
  83 
  84         case ETNAVIV_PARAM_GPU_FEATURES_7:
  85                 *value = gpu->identity.minor_features6;
  86                 break;
  87 
  88         case ETNAVIV_PARAM_GPU_FEATURES_8:
  89                 *value = gpu->identity.minor_features7;
  90                 break;
  91 
  92         case ETNAVIV_PARAM_GPU_FEATURES_9:
  93                 *value = gpu->identity.minor_features8;
  94                 break;
  95 
  96         case ETNAVIV_PARAM_GPU_FEATURES_10:
  97                 *value = gpu->identity.minor_features9;
  98                 break;
  99 
 100         case ETNAVIV_PARAM_GPU_FEATURES_11:
 101                 *value = gpu->identity.minor_features10;
 102                 break;
 103 
 104         case ETNAVIV_PARAM_GPU_FEATURES_12:
 105                 *value = gpu->identity.minor_features11;
 106                 break;
 107 
 108         case ETNAVIV_PARAM_GPU_STREAM_COUNT:
 109                 *value = gpu->identity.stream_count;
 110                 break;
 111 
 112         case ETNAVIV_PARAM_GPU_REGISTER_MAX:
 113                 *value = gpu->identity.register_max;
 114                 break;
 115 
 116         case ETNAVIV_PARAM_GPU_THREAD_COUNT:
 117                 *value = gpu->identity.thread_count;
 118                 break;
 119 
 120         case ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE:
 121                 *value = gpu->identity.vertex_cache_size;
 122                 break;
 123 
 124         case ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT:
 125                 *value = gpu->identity.shader_core_count;
 126                 break;
 127 
 128         case ETNAVIV_PARAM_GPU_PIXEL_PIPES:
 129                 *value = gpu->identity.pixel_pipes;
 130                 break;
 131 
 132         case ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE:
 133                 *value = gpu->identity.vertex_output_buffer_size;
 134                 break;
 135 
 136         case ETNAVIV_PARAM_GPU_BUFFER_SIZE:
 137                 *value = gpu->identity.buffer_size;
 138                 break;
 139 
 140         case ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT:
 141                 *value = gpu->identity.instruction_count;
 142                 break;
 143 
 144         case ETNAVIV_PARAM_GPU_NUM_CONSTANTS:
 145                 *value = gpu->identity.num_constants;
 146                 break;
 147 
 148         case ETNAVIV_PARAM_GPU_NUM_VARYINGS:
 149                 *value = gpu->identity.varyings_count;
 150                 break;
 151 
 152         case ETNAVIV_PARAM_SOFTPIN_START_ADDR:
 153                 if (priv->mmu_global->version == ETNAVIV_IOMMU_V2)
 154                         *value = ETNAVIV_SOFTPIN_START_ADDRESS;
 155                 else
 156                         *value = ~0ULL;
 157                 break;
 158 
 159         default:
 160                 DBG("%s: invalid param: %u", dev_name(gpu->dev), param);
 161                 return -EINVAL;
 162         }
 163 
 164         return 0;
 165 }
 166 
 167 
 168 #define etnaviv_is_model_rev(gpu, mod, rev) \
 169         ((gpu)->identity.model == chipModel_##mod && \
 170          (gpu)->identity.revision == rev)
 171 #define etnaviv_field(val, field) \
 172         (((val) & field##__MASK) >> field##__SHIFT)
 173 
 174 static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
 175 {
 176         if (gpu->identity.minor_features0 &
 177             chipMinorFeatures0_MORE_MINOR_FEATURES) {
 178                 u32 specs[4];
 179                 unsigned int streams;
 180 
 181                 specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS);
 182                 specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2);
 183                 specs[2] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_3);
 184                 specs[3] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_4);
 185 
 186                 gpu->identity.stream_count = etnaviv_field(specs[0],
 187                                         VIVS_HI_CHIP_SPECS_STREAM_COUNT);
 188                 gpu->identity.register_max = etnaviv_field(specs[0],
 189                                         VIVS_HI_CHIP_SPECS_REGISTER_MAX);
 190                 gpu->identity.thread_count = etnaviv_field(specs[0],
 191                                         VIVS_HI_CHIP_SPECS_THREAD_COUNT);
 192                 gpu->identity.vertex_cache_size = etnaviv_field(specs[0],
 193                                         VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE);
 194                 gpu->identity.shader_core_count = etnaviv_field(specs[0],
 195                                         VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT);
 196                 gpu->identity.pixel_pipes = etnaviv_field(specs[0],
 197                                         VIVS_HI_CHIP_SPECS_PIXEL_PIPES);
 198                 gpu->identity.vertex_output_buffer_size =
 199                         etnaviv_field(specs[0],
 200                                 VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE);
 201 
 202                 gpu->identity.buffer_size = etnaviv_field(specs[1],
 203                                         VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE);
 204                 gpu->identity.instruction_count = etnaviv_field(specs[1],
 205                                         VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT);
 206                 gpu->identity.num_constants = etnaviv_field(specs[1],
 207                                         VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS);
 208 
 209                 gpu->identity.varyings_count = etnaviv_field(specs[2],
 210                                         VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT);
 211 
 212                 /* This overrides the value from older register if non-zero */
 213                 streams = etnaviv_field(specs[3],
 214                                         VIVS_HI_CHIP_SPECS_4_STREAM_COUNT);
 215                 if (streams)
 216                         gpu->identity.stream_count = streams;
 217         }
 218 
 219         /* Fill in the stream count if not specified */
 220         if (gpu->identity.stream_count == 0) {
 221                 if (gpu->identity.model >= 0x1000)
 222                         gpu->identity.stream_count = 4;
 223                 else
 224                         gpu->identity.stream_count = 1;
 225         }
 226 
 227         /* Convert the register max value */
 228         if (gpu->identity.register_max)
 229                 gpu->identity.register_max = 1 << gpu->identity.register_max;
 230         else if (gpu->identity.model == chipModel_GC400)
 231                 gpu->identity.register_max = 32;
 232         else
 233                 gpu->identity.register_max = 64;
 234 
 235         /* Convert thread count */
 236         if (gpu->identity.thread_count)
 237                 gpu->identity.thread_count = 1 << gpu->identity.thread_count;
 238         else if (gpu->identity.model == chipModel_GC400)
 239                 gpu->identity.thread_count = 64;
 240         else if (gpu->identity.model == chipModel_GC500 ||
 241                  gpu->identity.model == chipModel_GC530)
 242                 gpu->identity.thread_count = 128;
 243         else
 244                 gpu->identity.thread_count = 256;
 245 
 246         if (gpu->identity.vertex_cache_size == 0)
 247                 gpu->identity.vertex_cache_size = 8;
 248 
 249         if (gpu->identity.shader_core_count == 0) {
 250                 if (gpu->identity.model >= 0x1000)
 251                         gpu->identity.shader_core_count = 2;
 252                 else
 253                         gpu->identity.shader_core_count = 1;
 254         }
 255 
 256         if (gpu->identity.pixel_pipes == 0)
 257                 gpu->identity.pixel_pipes = 1;
 258 
 259         /* Convert virtex buffer size */
 260         if (gpu->identity.vertex_output_buffer_size) {
 261                 gpu->identity.vertex_output_buffer_size =
 262                         1 << gpu->identity.vertex_output_buffer_size;
 263         } else if (gpu->identity.model == chipModel_GC400) {
 264                 if (gpu->identity.revision < 0x4000)
 265                         gpu->identity.vertex_output_buffer_size = 512;
 266                 else if (gpu->identity.revision < 0x4200)
 267                         gpu->identity.vertex_output_buffer_size = 256;
 268                 else
 269                         gpu->identity.vertex_output_buffer_size = 128;
 270         } else {
 271                 gpu->identity.vertex_output_buffer_size = 512;
 272         }
 273 
 274         switch (gpu->identity.instruction_count) {
 275         case 0:
 276                 if (etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
 277                     gpu->identity.model == chipModel_GC880)
 278                         gpu->identity.instruction_count = 512;
 279                 else
 280                         gpu->identity.instruction_count = 256;
 281                 break;
 282 
 283         case 1:
 284                 gpu->identity.instruction_count = 1024;
 285                 break;
 286 
 287         case 2:
 288                 gpu->identity.instruction_count = 2048;
 289                 break;
 290 
 291         default:
 292                 gpu->identity.instruction_count = 256;
 293                 break;
 294         }
 295 
 296         if (gpu->identity.num_constants == 0)
 297                 gpu->identity.num_constants = 168;
 298 
 299         if (gpu->identity.varyings_count == 0) {
 300                 if (gpu->identity.minor_features1 & chipMinorFeatures1_HALTI0)
 301                         gpu->identity.varyings_count = 12;
 302                 else
 303                         gpu->identity.varyings_count = 8;
 304         }
 305 
 306         /*
 307          * For some cores, two varyings are consumed for position, so the
 308          * maximum varying count needs to be reduced by one.
 309          */
 310         if (etnaviv_is_model_rev(gpu, GC5000, 0x5434) ||
 311             etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
 312             etnaviv_is_model_rev(gpu, GC4000, 0x5245) ||
 313             etnaviv_is_model_rev(gpu, GC4000, 0x5208) ||
 314             etnaviv_is_model_rev(gpu, GC3000, 0x5435) ||
 315             etnaviv_is_model_rev(gpu, GC2200, 0x5244) ||
 316             etnaviv_is_model_rev(gpu, GC2100, 0x5108) ||
 317             etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
 318             etnaviv_is_model_rev(gpu, GC1500, 0x5246) ||
 319             etnaviv_is_model_rev(gpu, GC880, 0x5107) ||
 320             etnaviv_is_model_rev(gpu, GC880, 0x5106))
 321                 gpu->identity.varyings_count -= 1;
 322 }
 323 
 324 static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
 325 {
 326         u32 chipIdentity;
 327 
 328         chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY);
 329 
 330         /* Special case for older graphic cores. */
 331         if (etnaviv_field(chipIdentity, VIVS_HI_CHIP_IDENTITY_FAMILY) == 0x01) {
 332                 gpu->identity.model    = chipModel_GC500;
 333                 gpu->identity.revision = etnaviv_field(chipIdentity,
 334                                          VIVS_HI_CHIP_IDENTITY_REVISION);
 335         } else {
 336 
 337                 gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL);
 338                 gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV);
 339 
 340                 /*
 341                  * !!!! HACK ALERT !!!!
 342                  * Because people change device IDs without letting software
 343                  * know about it - here is the hack to make it all look the
 344                  * same.  Only for GC400 family.
 345                  */
 346                 if ((gpu->identity.model & 0xff00) == 0x0400 &&
 347                     gpu->identity.model != chipModel_GC420) {
 348                         gpu->identity.model = gpu->identity.model & 0x0400;
 349                 }
 350 
 351                 /* Another special case */
 352                 if (etnaviv_is_model_rev(gpu, GC300, 0x2201)) {
 353                         u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
 354                         u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME);
 355 
 356                         if (chipDate == 0x20080814 && chipTime == 0x12051100) {
 357                                 /*
 358                                  * This IP has an ECO; put the correct
 359                                  * revision in it.
 360                                  */
 361                                 gpu->identity.revision = 0x1051;
 362                         }
 363                 }
 364 
 365                 /*
 366                  * NXP likes to call the GPU on the i.MX6QP GC2000+, but in
 367                  * reality it's just a re-branded GC3000. We can identify this
 368                  * core by the upper half of the revision register being all 1.
 369                  * Fix model/rev here, so all other places can refer to this
 370                  * core by its real identity.
 371                  */
 372                 if (etnaviv_is_model_rev(gpu, GC2000, 0xffff5450)) {
 373                         gpu->identity.model = chipModel_GC3000;
 374                         gpu->identity.revision &= 0xffff;
 375                 }
 376         }
 377 
 378         dev_info(gpu->dev, "model: GC%x, revision: %x\n",
 379                  gpu->identity.model, gpu->identity.revision);
 380 
 381         gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP;
 382         /*
 383          * If there is a match in the HWDB, we aren't interested in the
 384          * remaining register values, as they might be wrong.
 385          */
 386         if (etnaviv_fill_identity_from_hwdb(gpu))
 387                 return;
 388 
 389         gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE);
 390 
 391         /* Disable fast clear on GC700. */
 392         if (gpu->identity.model == chipModel_GC700)
 393                 gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
 394 
 395         if ((gpu->identity.model == chipModel_GC500 &&
 396              gpu->identity.revision < 2) ||
 397             (gpu->identity.model == chipModel_GC300 &&
 398              gpu->identity.revision < 0x2000)) {
 399 
 400                 /*
 401                  * GC500 rev 1.x and GC300 rev < 2.0 doesn't have these
 402                  * registers.
 403                  */
 404                 gpu->identity.minor_features0 = 0;
 405                 gpu->identity.minor_features1 = 0;
 406                 gpu->identity.minor_features2 = 0;
 407                 gpu->identity.minor_features3 = 0;
 408                 gpu->identity.minor_features4 = 0;
 409                 gpu->identity.minor_features5 = 0;
 410         } else
 411                 gpu->identity.minor_features0 =
 412                                 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0);
 413 
 414         if (gpu->identity.minor_features0 &
 415             chipMinorFeatures0_MORE_MINOR_FEATURES) {
 416                 gpu->identity.minor_features1 =
 417                                 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_1);
 418                 gpu->identity.minor_features2 =
 419                                 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2);
 420                 gpu->identity.minor_features3 =
 421                                 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3);
 422                 gpu->identity.minor_features4 =
 423                                 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_4);
 424                 gpu->identity.minor_features5 =
 425                                 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_5);
 426         }
 427 
 428         /* GC600 idle register reports zero bits where modules aren't present */
 429         if (gpu->identity.model == chipModel_GC600)
 430                 gpu->idle_mask = VIVS_HI_IDLE_STATE_TX |
 431                                  VIVS_HI_IDLE_STATE_RA |
 432                                  VIVS_HI_IDLE_STATE_SE |
 433                                  VIVS_HI_IDLE_STATE_PA |
 434                                  VIVS_HI_IDLE_STATE_SH |
 435                                  VIVS_HI_IDLE_STATE_PE |
 436                                  VIVS_HI_IDLE_STATE_DE |
 437                                  VIVS_HI_IDLE_STATE_FE;
 438 
 439         etnaviv_hw_specs(gpu);
 440 }
 441 
 442 static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock)
 443 {
 444         gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock |
 445                   VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD);
 446         gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
 447 }
 448 
 449 static void etnaviv_gpu_update_clock(struct etnaviv_gpu *gpu)
 450 {
 451         if (gpu->identity.minor_features2 &
 452             chipMinorFeatures2_DYNAMIC_FREQUENCY_SCALING) {
 453                 clk_set_rate(gpu->clk_core,
 454                              gpu->base_rate_core >> gpu->freq_scale);
 455                 clk_set_rate(gpu->clk_shader,
 456                              gpu->base_rate_shader >> gpu->freq_scale);
 457         } else {
 458                 unsigned int fscale = 1 << (6 - gpu->freq_scale);
 459                 u32 clock = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
 460 
 461                 clock &= ~VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__MASK;
 462                 clock |= VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
 463                 etnaviv_gpu_load_clock(gpu, clock);
 464         }
 465 }
 466 
 467 static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
 468 {
 469         u32 control, idle;
 470         unsigned long timeout;
 471         bool failed = true;
 472 
 473         /* We hope that the GPU resets in under one second */
 474         timeout = jiffies + msecs_to_jiffies(1000);
 475 
 476         while (time_is_after_jiffies(timeout)) {
 477                 /* enable clock */
 478                 unsigned int fscale = 1 << (6 - gpu->freq_scale);
 479                 control = VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
 480                 etnaviv_gpu_load_clock(gpu, control);
 481 
 482                 /* isolate the GPU. */
 483                 control |= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
 484                 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
 485 
 486                 if (gpu->sec_mode == ETNA_SEC_KERNEL) {
 487                         gpu_write(gpu, VIVS_MMUv2_AHB_CONTROL,
 488                                   VIVS_MMUv2_AHB_CONTROL_RESET);
 489                 } else {
 490                         /* set soft reset. */
 491                         control |= VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
 492                         gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
 493                 }
 494 
 495                 /* wait for reset. */
 496                 usleep_range(10, 20);
 497 
 498                 /* reset soft reset bit. */
 499                 control &= ~VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
 500                 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
 501 
 502                 /* reset GPU isolation. */
 503                 control &= ~VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
 504                 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
 505 
 506                 /* read idle register. */
 507                 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
 508 
 509                 /* try reseting again if FE it not idle */
 510                 if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) {
 511                         dev_dbg(gpu->dev, "FE is not idle\n");
 512                         continue;
 513                 }
 514 
 515                 /* read reset register. */
 516                 control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
 517 
 518                 /* is the GPU idle? */
 519                 if (((control & VIVS_HI_CLOCK_CONTROL_IDLE_3D) == 0) ||
 520                     ((control & VIVS_HI_CLOCK_CONTROL_IDLE_2D) == 0)) {
 521                         dev_dbg(gpu->dev, "GPU is not idle\n");
 522                         continue;
 523                 }
 524 
 525                 /* disable debug registers, as they are not normally needed */
 526                 control |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
 527                 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
 528 
 529                 failed = false;
 530                 break;
 531         }
 532 
 533         if (failed) {
 534                 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
 535                 control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
 536 
 537                 dev_err(gpu->dev, "GPU failed to reset: FE %sidle, 3D %sidle, 2D %sidle\n",
 538                         idle & VIVS_HI_IDLE_STATE_FE ? "" : "not ",
 539                         control & VIVS_HI_CLOCK_CONTROL_IDLE_3D ? "" : "not ",
 540                         control & VIVS_HI_CLOCK_CONTROL_IDLE_2D ? "" : "not ");
 541 
 542                 return -EBUSY;
 543         }
 544 
 545         /* We rely on the GPU running, so program the clock */
 546         etnaviv_gpu_update_clock(gpu);
 547 
 548         return 0;
 549 }
 550 
 551 static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
 552 {
 553         u32 pmc, ppc;
 554 
 555         /* enable clock gating */
 556         ppc = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
 557         ppc |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
 558 
 559         /* Disable stall module clock gating for 4.3.0.1 and 4.3.0.2 revs */
 560         if (gpu->identity.revision == 0x4301 ||
 561             gpu->identity.revision == 0x4302)
 562                 ppc |= VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING;
 563 
 564         gpu_write(gpu, VIVS_PM_POWER_CONTROLS, ppc);
 565 
 566         pmc = gpu_read(gpu, VIVS_PM_MODULE_CONTROLS);
 567 
 568         /* Disable PA clock gating for GC400+ without bugfix except for GC420 */
 569         if (gpu->identity.model >= chipModel_GC400 &&
 570             gpu->identity.model != chipModel_GC420 &&
 571             !(gpu->identity.minor_features3 & chipMinorFeatures3_BUG_FIXES12))
 572                 pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PA;
 573 
 574         /*
 575          * Disable PE clock gating on revs < 5.0.0.0 when HZ is
 576          * present without a bug fix.
 577          */
 578         if (gpu->identity.revision < 0x5000 &&
 579             gpu->identity.minor_features0 & chipMinorFeatures0_HZ &&
 580             !(gpu->identity.minor_features1 &
 581               chipMinorFeatures1_DISABLE_PE_GATING))
 582                 pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE;
 583 
 584         if (gpu->identity.revision < 0x5422)
 585                 pmc |= BIT(15); /* Unknown bit */
 586 
 587         /* Disable TX clock gating on affected core revisions. */
 588         if (etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
 589             etnaviv_is_model_rev(gpu, GC2000, 0x5108))
 590                 pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX;
 591 
 592         pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ;
 593         pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ;
 594 
 595         gpu_write(gpu, VIVS_PM_MODULE_CONTROLS, pmc);
 596 }
 597 
 598 void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
 599 {
 600         gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS, address);
 601         gpu_write(gpu, VIVS_FE_COMMAND_CONTROL,
 602                   VIVS_FE_COMMAND_CONTROL_ENABLE |
 603                   VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch));
 604 
 605         if (gpu->sec_mode == ETNA_SEC_KERNEL) {
 606                 gpu_write(gpu, VIVS_MMUv2_SEC_COMMAND_CONTROL,
 607                           VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE |
 608                           VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(prefetch));
 609         }
 610 }
 611 
 612 static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu)
 613 {
 614         u32 address = etnaviv_cmdbuf_get_va(&gpu->buffer,
 615                                 &gpu->mmu_context->cmdbuf_mapping);
 616         u16 prefetch;
 617 
 618         /* setup the MMU */
 619         etnaviv_iommu_restore(gpu, gpu->mmu_context);
 620 
 621         /* Start command processor */
 622         prefetch = etnaviv_buffer_init(gpu);
 623 
 624         etnaviv_gpu_start_fe(gpu, address, prefetch);
 625 }
 626 
 627 static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu *gpu)
 628 {
 629         /*
 630          * Base value for VIVS_PM_PULSE_EATER register on models where it
 631          * cannot be read, extracted from vivante kernel driver.
 632          */
 633         u32 pulse_eater = 0x01590880;
 634 
 635         if (etnaviv_is_model_rev(gpu, GC4000, 0x5208) ||
 636             etnaviv_is_model_rev(gpu, GC4000, 0x5222)) {
 637                 pulse_eater |= BIT(23);
 638 
 639         }
 640 
 641         if (etnaviv_is_model_rev(gpu, GC1000, 0x5039) ||
 642             etnaviv_is_model_rev(gpu, GC1000, 0x5040)) {
 643                 pulse_eater &= ~BIT(16);
 644                 pulse_eater |= BIT(17);
 645         }
 646 
 647         if ((gpu->identity.revision > 0x5420) &&
 648             (gpu->identity.features & chipFeatures_PIPE_3D))
 649         {
 650                 /* Performance fix: disable internal DFS */
 651                 pulse_eater = gpu_read(gpu, VIVS_PM_PULSE_EATER);
 652                 pulse_eater |= BIT(18);
 653         }
 654 
 655         gpu_write(gpu, VIVS_PM_PULSE_EATER, pulse_eater);
 656 }
 657 
 658 static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
 659 {
 660         if ((etnaviv_is_model_rev(gpu, GC320, 0x5007) ||
 661              etnaviv_is_model_rev(gpu, GC320, 0x5220)) &&
 662             gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400) {
 663                 u32 mc_memory_debug;
 664 
 665                 mc_memory_debug = gpu_read(gpu, VIVS_MC_DEBUG_MEMORY) & ~0xff;
 666 
 667                 if (gpu->identity.revision == 0x5007)
 668                         mc_memory_debug |= 0x0c;
 669                 else
 670                         mc_memory_debug |= 0x08;
 671 
 672                 gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug);
 673         }
 674 
 675         /* enable module-level clock gating */
 676         etnaviv_gpu_enable_mlcg(gpu);
 677 
 678         /*
 679          * Update GPU AXI cache atttribute to "cacheable, no allocate".
 680          * This is necessary to prevent the iMX6 SoC locking up.
 681          */
 682         gpu_write(gpu, VIVS_HI_AXI_CONFIG,
 683                   VIVS_HI_AXI_CONFIG_AWCACHE(2) |
 684                   VIVS_HI_AXI_CONFIG_ARCACHE(2));
 685 
 686         /* GC2000 rev 5108 needs a special bus config */
 687         if (etnaviv_is_model_rev(gpu, GC2000, 0x5108)) {
 688                 u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG);
 689                 bus_config &= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK |
 690                                 VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK);
 691                 bus_config |= VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(1) |
 692                               VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(0);
 693                 gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config);
 694         }
 695 
 696         if (gpu->sec_mode == ETNA_SEC_KERNEL) {
 697                 u32 val = gpu_read(gpu, VIVS_MMUv2_AHB_CONTROL);
 698                 val |= VIVS_MMUv2_AHB_CONTROL_NONSEC_ACCESS;
 699                 gpu_write(gpu, VIVS_MMUv2_AHB_CONTROL, val);
 700         }
 701 
 702         /* setup the pulse eater */
 703         etnaviv_gpu_setup_pulse_eater(gpu);
 704 
 705         gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
 706 }
 707 
 708 int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
 709 {
 710         struct etnaviv_drm_private *priv = gpu->drm->dev_private;
 711         int ret, i;
 712 
 713         ret = pm_runtime_get_sync(gpu->dev);
 714         if (ret < 0) {
 715                 dev_err(gpu->dev, "Failed to enable GPU power domain\n");
 716                 return ret;
 717         }
 718 
 719         etnaviv_hw_identify(gpu);
 720 
 721         if (gpu->identity.model == 0) {
 722                 dev_err(gpu->dev, "Unknown GPU model\n");
 723                 ret = -ENXIO;
 724                 goto fail;
 725         }
 726 
 727         /* Exclude VG cores with FE2.0 */
 728         if (gpu->identity.features & chipFeatures_PIPE_VG &&
 729             gpu->identity.features & chipFeatures_FE20) {
 730                 dev_info(gpu->dev, "Ignoring GPU with VG and FE2.0\n");
 731                 ret = -ENXIO;
 732                 goto fail;
 733         }
 734 
 735         /*
 736          * On cores with security features supported, we claim control over the
 737          * security states.
 738          */
 739         if ((gpu->identity.minor_features7 & chipMinorFeatures7_BIT_SECURITY) &&
 740             (gpu->identity.minor_features10 & chipMinorFeatures10_SECURITY_AHB))
 741                 gpu->sec_mode = ETNA_SEC_KERNEL;
 742 
 743         ret = etnaviv_hw_reset(gpu);
 744         if (ret) {
 745                 dev_err(gpu->dev, "GPU reset failed\n");
 746                 goto fail;
 747         }
 748 
 749         ret = etnaviv_iommu_global_init(gpu);
 750         if (ret)
 751                 goto fail;
 752 
 753         /*
 754          * Set the GPU linear window to be at the end of the DMA window, where
 755          * the CMA area is likely to reside. This ensures that we are able to
 756          * map the command buffers while having the linear window overlap as
 757          * much RAM as possible, so we can optimize mappings for other buffers.
 758          *
 759          * For 3D cores only do this if MC2.0 is present, as with MC1.0 it leads
 760          * to different views of the memory on the individual engines.
 761          */
 762         if (!(gpu->identity.features & chipFeatures_PIPE_3D) ||
 763             (gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) {
 764                 u32 dma_mask = (u32)dma_get_required_mask(gpu->dev);
 765                 if (dma_mask < PHYS_OFFSET + SZ_2G)
 766                         priv->mmu_global->memory_base = PHYS_OFFSET;
 767                 else
 768                         priv->mmu_global->memory_base = dma_mask - SZ_2G + 1;
 769         } else if (PHYS_OFFSET >= SZ_2G) {
 770                 dev_info(gpu->dev, "Need to move linear window on MC1.0, disabling TS\n");
 771                 priv->mmu_global->memory_base = PHYS_OFFSET;
 772                 gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
 773         }
 774 
 775         /* Create buffer: */
 776         ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &gpu->buffer,
 777                                   PAGE_SIZE);
 778         if (ret) {
 779                 dev_err(gpu->dev, "could not create command buffer\n");
 780                 goto fail;
 781         }
 782 
 783         /* Setup event management */
 784         spin_lock_init(&gpu->event_spinlock);
 785         init_completion(&gpu->event_free);
 786         bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
 787         for (i = 0; i < ARRAY_SIZE(gpu->event); i++)
 788                 complete(&gpu->event_free);
 789 
 790         /* Now program the hardware */
 791         mutex_lock(&gpu->lock);
 792         etnaviv_gpu_hw_init(gpu);
 793         gpu->exec_state = -1;
 794         mutex_unlock(&gpu->lock);
 795 
 796         pm_runtime_mark_last_busy(gpu->dev);
 797         pm_runtime_put_autosuspend(gpu->dev);
 798 
 799         gpu->initialized = true;
 800 
 801         return 0;
 802 
 803 fail:
 804         pm_runtime_mark_last_busy(gpu->dev);
 805         pm_runtime_put_autosuspend(gpu->dev);
 806 
 807         return ret;
 808 }
 809 
 810 #ifdef CONFIG_DEBUG_FS
 811 struct dma_debug {
 812         u32 address[2];
 813         u32 state[2];
 814 };
 815 
 816 static void verify_dma(struct etnaviv_gpu *gpu, struct dma_debug *debug)
 817 {
 818         u32 i;
 819 
 820         debug->address[0] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
 821         debug->state[0]   = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
 822 
 823         for (i = 0; i < 500; i++) {
 824                 debug->address[1] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
 825                 debug->state[1]   = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
 826 
 827                 if (debug->address[0] != debug->address[1])
 828                         break;
 829 
 830                 if (debug->state[0] != debug->state[1])
 831                         break;
 832         }
 833 }
 834 
 835 int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
 836 {
 837         struct dma_debug debug;
 838         u32 dma_lo, dma_hi, axi, idle;
 839         int ret;
 840 
 841         seq_printf(m, "%s Status:\n", dev_name(gpu->dev));
 842 
 843         ret = pm_runtime_get_sync(gpu->dev);
 844         if (ret < 0)
 845                 return ret;
 846 
 847         dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW);
 848         dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH);
 849         axi = gpu_read(gpu, VIVS_HI_AXI_STATUS);
 850         idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
 851 
 852         verify_dma(gpu, &debug);
 853 
 854         seq_puts(m, "\tfeatures\n");
 855         seq_printf(m, "\t major_features: 0x%08x\n",
 856                    gpu->identity.features);
 857         seq_printf(m, "\t minor_features0: 0x%08x\n",
 858                    gpu->identity.minor_features0);
 859         seq_printf(m, "\t minor_features1: 0x%08x\n",
 860                    gpu->identity.minor_features1);
 861         seq_printf(m, "\t minor_features2: 0x%08x\n",
 862                    gpu->identity.minor_features2);
 863         seq_printf(m, "\t minor_features3: 0x%08x\n",
 864                    gpu->identity.minor_features3);
 865         seq_printf(m, "\t minor_features4: 0x%08x\n",
 866                    gpu->identity.minor_features4);
 867         seq_printf(m, "\t minor_features5: 0x%08x\n",
 868                    gpu->identity.minor_features5);
 869         seq_printf(m, "\t minor_features6: 0x%08x\n",
 870                    gpu->identity.minor_features6);
 871         seq_printf(m, "\t minor_features7: 0x%08x\n",
 872                    gpu->identity.minor_features7);
 873         seq_printf(m, "\t minor_features8: 0x%08x\n",
 874                    gpu->identity.minor_features8);
 875         seq_printf(m, "\t minor_features9: 0x%08x\n",
 876                    gpu->identity.minor_features9);
 877         seq_printf(m, "\t minor_features10: 0x%08x\n",
 878                    gpu->identity.minor_features10);
 879         seq_printf(m, "\t minor_features11: 0x%08x\n",
 880                    gpu->identity.minor_features11);
 881 
 882         seq_puts(m, "\tspecs\n");
 883         seq_printf(m, "\t stream_count:  %d\n",
 884                         gpu->identity.stream_count);
 885         seq_printf(m, "\t register_max: %d\n",
 886                         gpu->identity.register_max);
 887         seq_printf(m, "\t thread_count: %d\n",
 888                         gpu->identity.thread_count);
 889         seq_printf(m, "\t vertex_cache_size: %d\n",
 890                         gpu->identity.vertex_cache_size);
 891         seq_printf(m, "\t shader_core_count: %d\n",
 892                         gpu->identity.shader_core_count);
 893         seq_printf(m, "\t pixel_pipes: %d\n",
 894                         gpu->identity.pixel_pipes);
 895         seq_printf(m, "\t vertex_output_buffer_size: %d\n",
 896                         gpu->identity.vertex_output_buffer_size);
 897         seq_printf(m, "\t buffer_size: %d\n",
 898                         gpu->identity.buffer_size);
 899         seq_printf(m, "\t instruction_count: %d\n",
 900                         gpu->identity.instruction_count);
 901         seq_printf(m, "\t num_constants: %d\n",
 902                         gpu->identity.num_constants);
 903         seq_printf(m, "\t varyings_count: %d\n",
 904                         gpu->identity.varyings_count);
 905 
 906         seq_printf(m, "\taxi: 0x%08x\n", axi);
 907         seq_printf(m, "\tidle: 0x%08x\n", idle);
 908         idle |= ~gpu->idle_mask & ~VIVS_HI_IDLE_STATE_AXI_LP;
 909         if ((idle & VIVS_HI_IDLE_STATE_FE) == 0)
 910                 seq_puts(m, "\t FE is not idle\n");
 911         if ((idle & VIVS_HI_IDLE_STATE_DE) == 0)
 912                 seq_puts(m, "\t DE is not idle\n");
 913         if ((idle & VIVS_HI_IDLE_STATE_PE) == 0)
 914                 seq_puts(m, "\t PE is not idle\n");
 915         if ((idle & VIVS_HI_IDLE_STATE_SH) == 0)
 916                 seq_puts(m, "\t SH is not idle\n");
 917         if ((idle & VIVS_HI_IDLE_STATE_PA) == 0)
 918                 seq_puts(m, "\t PA is not idle\n");
 919         if ((idle & VIVS_HI_IDLE_STATE_SE) == 0)
 920                 seq_puts(m, "\t SE is not idle\n");
 921         if ((idle & VIVS_HI_IDLE_STATE_RA) == 0)
 922                 seq_puts(m, "\t RA is not idle\n");
 923         if ((idle & VIVS_HI_IDLE_STATE_TX) == 0)
 924                 seq_puts(m, "\t TX is not idle\n");
 925         if ((idle & VIVS_HI_IDLE_STATE_VG) == 0)
 926                 seq_puts(m, "\t VG is not idle\n");
 927         if ((idle & VIVS_HI_IDLE_STATE_IM) == 0)
 928                 seq_puts(m, "\t IM is not idle\n");
 929         if ((idle & VIVS_HI_IDLE_STATE_FP) == 0)
 930                 seq_puts(m, "\t FP is not idle\n");
 931         if ((idle & VIVS_HI_IDLE_STATE_TS) == 0)
 932                 seq_puts(m, "\t TS is not idle\n");
 933         if (idle & VIVS_HI_IDLE_STATE_AXI_LP)
 934                 seq_puts(m, "\t AXI low power mode\n");
 935 
 936         if (gpu->identity.features & chipFeatures_DEBUG_MODE) {
 937                 u32 read0 = gpu_read(gpu, VIVS_MC_DEBUG_READ0);
 938                 u32 read1 = gpu_read(gpu, VIVS_MC_DEBUG_READ1);
 939                 u32 write = gpu_read(gpu, VIVS_MC_DEBUG_WRITE);
 940 
 941                 seq_puts(m, "\tMC\n");
 942                 seq_printf(m, "\t read0: 0x%08x\n", read0);
 943                 seq_printf(m, "\t read1: 0x%08x\n", read1);
 944                 seq_printf(m, "\t write: 0x%08x\n", write);
 945         }
 946 
 947         seq_puts(m, "\tDMA ");
 948 
 949         if (debug.address[0] == debug.address[1] &&
 950             debug.state[0] == debug.state[1]) {
 951                 seq_puts(m, "seems to be stuck\n");
 952         } else if (debug.address[0] == debug.address[1]) {
 953                 seq_puts(m, "address is constant\n");
 954         } else {
 955                 seq_puts(m, "is running\n");
 956         }
 957 
 958         seq_printf(m, "\t address 0: 0x%08x\n", debug.address[0]);
 959         seq_printf(m, "\t address 1: 0x%08x\n", debug.address[1]);
 960         seq_printf(m, "\t state 0: 0x%08x\n", debug.state[0]);
 961         seq_printf(m, "\t state 1: 0x%08x\n", debug.state[1]);
 962         seq_printf(m, "\t last fetch 64 bit word: 0x%08x 0x%08x\n",
 963                    dma_lo, dma_hi);
 964 
 965         ret = 0;
 966 
 967         pm_runtime_mark_last_busy(gpu->dev);
 968         pm_runtime_put_autosuspend(gpu->dev);
 969 
 970         return ret;
 971 }
 972 #endif
 973 
 974 void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
 975 {
 976         unsigned int i = 0;
 977 
 978         dev_err(gpu->dev, "recover hung GPU!\n");
 979 
 980         if (pm_runtime_get_sync(gpu->dev) < 0)
 981                 return;
 982 
 983         mutex_lock(&gpu->lock);
 984 
 985         etnaviv_hw_reset(gpu);
 986 
 987         /* complete all events, the GPU won't do it after the reset */
 988         spin_lock(&gpu->event_spinlock);
 989         for_each_set_bit_from(i, gpu->event_bitmap, ETNA_NR_EVENTS)
 990                 complete(&gpu->event_free);
 991         bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
 992         spin_unlock(&gpu->event_spinlock);
 993 
 994         etnaviv_gpu_hw_init(gpu);
 995         gpu->exec_state = -1;
 996         gpu->mmu_context = NULL;
 997 
 998         mutex_unlock(&gpu->lock);
 999         pm_runtime_mark_last_busy(gpu->dev);
1000         pm_runtime_put_autosuspend(gpu->dev);
1001 }
1002 
1003 /* fence object management */
1004 struct etnaviv_fence {
1005         struct etnaviv_gpu *gpu;
1006         struct dma_fence base;
1007 };
1008 
1009 static inline struct etnaviv_fence *to_etnaviv_fence(struct dma_fence *fence)
1010 {
1011         return container_of(fence, struct etnaviv_fence, base);
1012 }
1013 
1014 static const char *etnaviv_fence_get_driver_name(struct dma_fence *fence)
1015 {
1016         return "etnaviv";
1017 }
1018 
1019 static const char *etnaviv_fence_get_timeline_name(struct dma_fence *fence)
1020 {
1021         struct etnaviv_fence *f = to_etnaviv_fence(fence);
1022 
1023         return dev_name(f->gpu->dev);
1024 }
1025 
1026 static bool etnaviv_fence_signaled(struct dma_fence *fence)
1027 {
1028         struct etnaviv_fence *f = to_etnaviv_fence(fence);
1029 
1030         return (s32)(f->gpu->completed_fence - f->base.seqno) >= 0;
1031 }
1032 
1033 static void etnaviv_fence_release(struct dma_fence *fence)
1034 {
1035         struct etnaviv_fence *f = to_etnaviv_fence(fence);
1036 
1037         kfree_rcu(f, base.rcu);
1038 }
1039 
1040 static const struct dma_fence_ops etnaviv_fence_ops = {
1041         .get_driver_name = etnaviv_fence_get_driver_name,
1042         .get_timeline_name = etnaviv_fence_get_timeline_name,
1043         .signaled = etnaviv_fence_signaled,
1044         .release = etnaviv_fence_release,
1045 };
1046 
1047 static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
1048 {
1049         struct etnaviv_fence *f;
1050 
1051         /*
1052          * GPU lock must already be held, otherwise fence completion order might
1053          * not match the seqno order assigned here.
1054          */
1055         lockdep_assert_held(&gpu->lock);
1056 
1057         f = kzalloc(sizeof(*f), GFP_KERNEL);
1058         if (!f)
1059                 return NULL;
1060 
1061         f->gpu = gpu;
1062 
1063         dma_fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
1064                        gpu->fence_context, ++gpu->next_fence);
1065 
1066         return &f->base;
1067 }
1068 
1069 /* returns true if fence a comes after fence b */
1070 static inline bool fence_after(u32 a, u32 b)
1071 {
1072         return (s32)(a - b) > 0;
1073 }
1074 
1075 /*
1076  * event management:
1077  */
1078 
1079 static int event_alloc(struct etnaviv_gpu *gpu, unsigned nr_events,
1080         unsigned int *events)
1081 {
1082         unsigned long timeout = msecs_to_jiffies(10 * 10000);
1083         unsigned i, acquired = 0;
1084 
1085         for (i = 0; i < nr_events; i++) {
1086                 unsigned long ret;
1087 
1088                 ret = wait_for_completion_timeout(&gpu->event_free, timeout);
1089 
1090                 if (!ret) {
1091                         dev_err(gpu->dev, "wait_for_completion_timeout failed");
1092                         goto out;
1093                 }
1094 
1095                 acquired++;
1096                 timeout = ret;
1097         }
1098 
1099         spin_lock(&gpu->event_spinlock);
1100 
1101         for (i = 0; i < nr_events; i++) {
1102                 int event = find_first_zero_bit(gpu->event_bitmap, ETNA_NR_EVENTS);
1103 
1104                 events[i] = event;
1105                 memset(&gpu->event[event], 0, sizeof(struct etnaviv_event));
1106                 set_bit(event, gpu->event_bitmap);
1107         }
1108 
1109         spin_unlock(&gpu->event_spinlock);
1110 
1111         return 0;
1112 
1113 out:
1114         for (i = 0; i < acquired; i++)
1115                 complete(&gpu->event_free);
1116 
1117         return -EBUSY;
1118 }
1119 
1120 static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
1121 {
1122         if (!test_bit(event, gpu->event_bitmap)) {
1123                 dev_warn(gpu->dev, "event %u is already marked as free",
1124                          event);
1125         } else {
1126                 clear_bit(event, gpu->event_bitmap);
1127                 complete(&gpu->event_free);
1128         }
1129 }
1130 
1131 /*
1132  * Cmdstream submission/retirement:
1133  */
1134 int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
1135         u32 id, struct timespec *timeout)
1136 {
1137         struct dma_fence *fence;
1138         int ret;
1139 
1140         /*
1141          * Look up the fence and take a reference. We might still find a fence
1142          * whose refcount has already dropped to zero. dma_fence_get_rcu
1143          * pretends we didn't find a fence in that case.
1144          */
1145         rcu_read_lock();
1146         fence = idr_find(&gpu->fence_idr, id);
1147         if (fence)
1148                 fence = dma_fence_get_rcu(fence);
1149         rcu_read_unlock();
1150 
1151         if (!fence)
1152                 return 0;
1153 
1154         if (!timeout) {
1155                 /* No timeout was requested: just test for completion */
1156                 ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY;
1157         } else {
1158                 unsigned long remaining = etnaviv_timeout_to_jiffies(timeout);
1159 
1160                 ret = dma_fence_wait_timeout(fence, true, remaining);
1161                 if (ret == 0)
1162                         ret = -ETIMEDOUT;
1163                 else if (ret != -ERESTARTSYS)
1164                         ret = 0;
1165 
1166         }
1167 
1168         dma_fence_put(fence);
1169         return ret;
1170 }
1171 
1172 /*
1173  * Wait for an object to become inactive.  This, on it's own, is not race
1174  * free: the object is moved by the scheduler off the active list, and
1175  * then the iova is put.  Moreover, the object could be re-submitted just
1176  * after we notice that it's become inactive.
1177  *
1178  * Although the retirement happens under the gpu lock, we don't want to hold
1179  * that lock in this function while waiting.
1180  */
1181 int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
1182         struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout)
1183 {
1184         unsigned long remaining;
1185         long ret;
1186 
1187         if (!timeout)
1188                 return !is_active(etnaviv_obj) ? 0 : -EBUSY;
1189 
1190         remaining = etnaviv_timeout_to_jiffies(timeout);
1191 
1192         ret = wait_event_interruptible_timeout(gpu->fence_event,
1193                                                !is_active(etnaviv_obj),
1194                                                remaining);
1195         if (ret > 0)
1196                 return 0;
1197         else if (ret == -ERESTARTSYS)
1198                 return -ERESTARTSYS;
1199         else
1200                 return -ETIMEDOUT;
1201 }
1202 
1203 static void sync_point_perfmon_sample(struct etnaviv_gpu *gpu,
1204         struct etnaviv_event *event, unsigned int flags)
1205 {
1206         const struct etnaviv_gem_submit *submit = event->submit;
1207         unsigned int i;
1208 
1209         for (i = 0; i < submit->nr_pmrs; i++) {
1210                 const struct etnaviv_perfmon_request *pmr = submit->pmrs + i;
1211 
1212                 if (pmr->flags == flags)
1213                         etnaviv_perfmon_process(gpu, pmr, submit->exec_state);
1214         }
1215 }
1216 
1217 static void sync_point_perfmon_sample_pre(struct etnaviv_gpu *gpu,
1218         struct etnaviv_event *event)
1219 {
1220         u32 val;
1221 
1222         /* disable clock gating */
1223         val = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
1224         val &= ~VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
1225         gpu_write(gpu, VIVS_PM_POWER_CONTROLS, val);
1226 
1227         /* enable debug register */
1228         val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
1229         val &= ~VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
1230         gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val);
1231 
1232         sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_PRE);
1233 }
1234 
1235 static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu,
1236         struct etnaviv_event *event)
1237 {
1238         const struct etnaviv_gem_submit *submit = event->submit;
1239         unsigned int i;
1240         u32 val;
1241 
1242         sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_POST);
1243 
1244         for (i = 0; i < submit->nr_pmrs; i++) {
1245                 const struct etnaviv_perfmon_request *pmr = submit->pmrs + i;
1246 
1247                 *pmr->bo_vma = pmr->sequence;
1248         }
1249 
1250         /* disable debug register */
1251         val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
1252         val |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
1253         gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val);
1254 
1255         /* enable clock gating */
1256         val = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
1257         val |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
1258         gpu_write(gpu, VIVS_PM_POWER_CONTROLS, val);
1259 }
1260 
1261 
1262 /* add bo's to gpu's ring, and kick gpu: */
1263 struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
1264 {
1265         struct etnaviv_gpu *gpu = submit->gpu;
1266         struct dma_fence *gpu_fence;
1267         unsigned int i, nr_events = 1, event[3];
1268         int ret;
1269 
1270         if (!submit->runtime_resumed) {
1271                 ret = pm_runtime_get_sync(gpu->dev);
1272                 if (ret < 0)
1273                         return NULL;
1274                 submit->runtime_resumed = true;
1275         }
1276 
1277         /*
1278          * if there are performance monitor requests we need to have
1279          * - a sync point to re-configure gpu and process ETNA_PM_PROCESS_PRE
1280          *   requests.
1281          * - a sync point to re-configure gpu, process ETNA_PM_PROCESS_POST requests
1282          *   and update the sequence number for userspace.
1283          */
1284         if (submit->nr_pmrs)
1285                 nr_events = 3;
1286 
1287         ret = event_alloc(gpu, nr_events, event);
1288         if (ret) {
1289                 DRM_ERROR("no free events\n");
1290                 return NULL;
1291         }
1292 
1293         mutex_lock(&gpu->lock);
1294 
1295         gpu_fence = etnaviv_gpu_fence_alloc(gpu);
1296         if (!gpu_fence) {
1297                 for (i = 0; i < nr_events; i++)
1298                         event_free(gpu, event[i]);
1299 
1300                 goto out_unlock;
1301         }
1302 
1303         if (!gpu->mmu_context) {
1304                 etnaviv_iommu_context_get(submit->mmu_context);
1305                 gpu->mmu_context = submit->mmu_context;
1306                 etnaviv_gpu_start_fe_idleloop(gpu);
1307         } else {
1308                 etnaviv_iommu_context_get(gpu->mmu_context);
1309                 submit->prev_mmu_context = gpu->mmu_context;
1310         }
1311 
1312         if (submit->nr_pmrs) {
1313                 gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
1314                 kref_get(&submit->refcount);
1315                 gpu->event[event[1]].submit = submit;
1316                 etnaviv_sync_point_queue(gpu, event[1]);
1317         }
1318 
1319         gpu->event[event[0]].fence = gpu_fence;
1320         submit->cmdbuf.user_size = submit->cmdbuf.size - 8;
1321         etnaviv_buffer_queue(gpu, submit->exec_state, submit->mmu_context,
1322                              event[0], &submit->cmdbuf);
1323 
1324         if (submit->nr_pmrs) {
1325                 gpu->event[event[2]].sync_point = &sync_point_perfmon_sample_post;
1326                 kref_get(&submit->refcount);
1327                 gpu->event[event[2]].submit = submit;
1328                 etnaviv_sync_point_queue(gpu, event[2]);
1329         }
1330 
1331 out_unlock:
1332         mutex_unlock(&gpu->lock);
1333 
1334         return gpu_fence;
1335 }
1336 
1337 static void sync_point_worker(struct work_struct *work)
1338 {
1339         struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
1340                                                sync_point_work);
1341         struct etnaviv_event *event = &gpu->event[gpu->sync_point_event];
1342         u32 addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
1343 
1344         event->sync_point(gpu, event);
1345         etnaviv_submit_put(event->submit);
1346         event_free(gpu, gpu->sync_point_event);
1347 
1348         /* restart FE last to avoid GPU and IRQ racing against this worker */
1349         etnaviv_gpu_start_fe(gpu, addr + 2, 2);
1350 }
1351 
1352 static void dump_mmu_fault(struct etnaviv_gpu *gpu)
1353 {
1354         u32 status_reg, status;
1355         int i;
1356 
1357         if (gpu->sec_mode == ETNA_SEC_NONE)
1358                 status_reg = VIVS_MMUv2_STATUS;
1359         else
1360                 status_reg = VIVS_MMUv2_SEC_STATUS;
1361 
1362         status = gpu_read(gpu, status_reg);
1363         dev_err_ratelimited(gpu->dev, "MMU fault status 0x%08x\n", status);
1364 
1365         for (i = 0; i < 4; i++) {
1366                 u32 address_reg;
1367 
1368                 if (!(status & (VIVS_MMUv2_STATUS_EXCEPTION0__MASK << (i * 4))))
1369                         continue;
1370 
1371                 if (gpu->sec_mode == ETNA_SEC_NONE)
1372                         address_reg = VIVS_MMUv2_EXCEPTION_ADDR(i);
1373                 else
1374                         address_reg = VIVS_MMUv2_SEC_EXCEPTION_ADDR;
1375 
1376                 dev_err_ratelimited(gpu->dev, "MMU %d fault addr 0x%08x\n", i,
1377                                     gpu_read(gpu, address_reg));
1378         }
1379 }
1380 
1381 static irqreturn_t irq_handler(int irq, void *data)
1382 {
1383         struct etnaviv_gpu *gpu = data;
1384         irqreturn_t ret = IRQ_NONE;
1385 
1386         u32 intr = gpu_read(gpu, VIVS_HI_INTR_ACKNOWLEDGE);
1387 
1388         if (intr != 0) {
1389                 int event;
1390 
1391                 pm_runtime_mark_last_busy(gpu->dev);
1392 
1393                 dev_dbg(gpu->dev, "intr 0x%08x\n", intr);
1394 
1395                 if (intr & VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR) {
1396                         dev_err(gpu->dev, "AXI bus error\n");
1397                         intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR;
1398                 }
1399 
1400                 if (intr & VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION) {
1401                         dump_mmu_fault(gpu);
1402                         intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION;
1403                 }
1404 
1405                 while ((event = ffs(intr)) != 0) {
1406                         struct dma_fence *fence;
1407 
1408                         event -= 1;
1409 
1410                         intr &= ~(1 << event);
1411 
1412                         dev_dbg(gpu->dev, "event %u\n", event);
1413 
1414                         if (gpu->event[event].sync_point) {
1415                                 gpu->sync_point_event = event;
1416                                 queue_work(gpu->wq, &gpu->sync_point_work);
1417                         }
1418 
1419                         fence = gpu->event[event].fence;
1420                         if (!fence)
1421                                 continue;
1422 
1423                         gpu->event[event].fence = NULL;
1424 
1425                         /*
1426                          * Events can be processed out of order.  Eg,
1427                          * - allocate and queue event 0
1428                          * - allocate event 1
1429                          * - event 0 completes, we process it
1430                          * - allocate and queue event 0
1431                          * - event 1 and event 0 complete
1432                          * we can end up processing event 0 first, then 1.
1433                          */
1434                         if (fence_after(fence->seqno, gpu->completed_fence))
1435                                 gpu->completed_fence = fence->seqno;
1436                         dma_fence_signal(fence);
1437 
1438                         event_free(gpu, event);
1439                 }
1440 
1441                 ret = IRQ_HANDLED;
1442         }
1443 
1444         return ret;
1445 }
1446 
1447 static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu)
1448 {
1449         int ret;
1450 
1451         if (gpu->clk_reg) {
1452                 ret = clk_prepare_enable(gpu->clk_reg);
1453                 if (ret)
1454                         return ret;
1455         }
1456 
1457         if (gpu->clk_bus) {
1458                 ret = clk_prepare_enable(gpu->clk_bus);
1459                 if (ret)
1460                         return ret;
1461         }
1462 
1463         if (gpu->clk_core) {
1464                 ret = clk_prepare_enable(gpu->clk_core);
1465                 if (ret)
1466                         goto disable_clk_bus;
1467         }
1468 
1469         if (gpu->clk_shader) {
1470                 ret = clk_prepare_enable(gpu->clk_shader);
1471                 if (ret)
1472                         goto disable_clk_core;
1473         }
1474 
1475         return 0;
1476 
1477 disable_clk_core:
1478         if (gpu->clk_core)
1479                 clk_disable_unprepare(gpu->clk_core);
1480 disable_clk_bus:
1481         if (gpu->clk_bus)
1482                 clk_disable_unprepare(gpu->clk_bus);
1483 
1484         return ret;
1485 }
1486 
1487 static int etnaviv_gpu_clk_disable(struct etnaviv_gpu *gpu)
1488 {
1489         if (gpu->clk_shader)
1490                 clk_disable_unprepare(gpu->clk_shader);
1491         if (gpu->clk_core)
1492                 clk_disable_unprepare(gpu->clk_core);
1493         if (gpu->clk_bus)
1494                 clk_disable_unprepare(gpu->clk_bus);
1495         if (gpu->clk_reg)
1496                 clk_disable_unprepare(gpu->clk_reg);
1497 
1498         return 0;
1499 }
1500 
1501 int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms)
1502 {
1503         unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
1504 
1505         do {
1506                 u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
1507 
1508                 if ((idle & gpu->idle_mask) == gpu->idle_mask)
1509                         return 0;
1510 
1511                 if (time_is_before_jiffies(timeout)) {
1512                         dev_warn(gpu->dev,
1513                                  "timed out waiting for idle: idle=0x%x\n",
1514                                  idle);
1515                         return -ETIMEDOUT;
1516                 }
1517 
1518                 udelay(5);
1519         } while (1);
1520 }
1521 
1522 static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
1523 {
1524         if (gpu->initialized && gpu->mmu_context) {
1525                 /* Replace the last WAIT with END */
1526                 mutex_lock(&gpu->lock);
1527                 etnaviv_buffer_end(gpu);
1528                 mutex_unlock(&gpu->lock);
1529 
1530                 /*
1531                  * We know that only the FE is busy here, this should
1532                  * happen quickly (as the WAIT is only 200 cycles).  If
1533                  * we fail, just warn and continue.
1534                  */
1535                 etnaviv_gpu_wait_idle(gpu, 100);
1536 
1537                 etnaviv_iommu_context_put(gpu->mmu_context);
1538                 gpu->mmu_context = NULL;
1539         }
1540 
1541         gpu->exec_state = -1;
1542 
1543         return etnaviv_gpu_clk_disable(gpu);
1544 }
1545 
1546 #ifdef CONFIG_PM
1547 static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
1548 {
1549         int ret;
1550 
1551         ret = mutex_lock_killable(&gpu->lock);
1552         if (ret)
1553                 return ret;
1554 
1555         etnaviv_gpu_update_clock(gpu);
1556         etnaviv_gpu_hw_init(gpu);
1557 
1558         mutex_unlock(&gpu->lock);
1559 
1560         return 0;
1561 }
1562 #endif
1563 
1564 static int
1565 etnaviv_gpu_cooling_get_max_state(struct thermal_cooling_device *cdev,
1566                                   unsigned long *state)
1567 {
1568         *state = 6;
1569 
1570         return 0;
1571 }
1572 
1573 static int
1574 etnaviv_gpu_cooling_get_cur_state(struct thermal_cooling_device *cdev,
1575                                   unsigned long *state)
1576 {
1577         struct etnaviv_gpu *gpu = cdev->devdata;
1578 
1579         *state = gpu->freq_scale;
1580 
1581         return 0;
1582 }
1583 
1584 static int
1585 etnaviv_gpu_cooling_set_cur_state(struct thermal_cooling_device *cdev,
1586                                   unsigned long state)
1587 {
1588         struct etnaviv_gpu *gpu = cdev->devdata;
1589 
1590         mutex_lock(&gpu->lock);
1591         gpu->freq_scale = state;
1592         if (!pm_runtime_suspended(gpu->dev))
1593                 etnaviv_gpu_update_clock(gpu);
1594         mutex_unlock(&gpu->lock);
1595 
1596         return 0;
1597 }
1598 
1599 static struct thermal_cooling_device_ops cooling_ops = {
1600         .get_max_state = etnaviv_gpu_cooling_get_max_state,
1601         .get_cur_state = etnaviv_gpu_cooling_get_cur_state,
1602         .set_cur_state = etnaviv_gpu_cooling_set_cur_state,
1603 };
1604 
1605 static int etnaviv_gpu_bind(struct device *dev, struct device *master,
1606         void *data)
1607 {
1608         struct drm_device *drm = data;
1609         struct etnaviv_drm_private *priv = drm->dev_private;
1610         struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1611         int ret;
1612 
1613         if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL)) {
1614                 gpu->cooling = thermal_of_cooling_device_register(dev->of_node,
1615                                 (char *)dev_name(dev), gpu, &cooling_ops);
1616                 if (IS_ERR(gpu->cooling))
1617                         return PTR_ERR(gpu->cooling);
1618         }
1619 
1620         gpu->wq = alloc_ordered_workqueue(dev_name(dev), 0);
1621         if (!gpu->wq) {
1622                 ret = -ENOMEM;
1623                 goto out_thermal;
1624         }
1625 
1626         ret = etnaviv_sched_init(gpu);
1627         if (ret)
1628                 goto out_workqueue;
1629 
1630 #ifdef CONFIG_PM
1631         ret = pm_runtime_get_sync(gpu->dev);
1632 #else
1633         ret = etnaviv_gpu_clk_enable(gpu);
1634 #endif
1635         if (ret < 0)
1636                 goto out_sched;
1637 
1638 
1639         gpu->drm = drm;
1640         gpu->fence_context = dma_fence_context_alloc(1);
1641         idr_init(&gpu->fence_idr);
1642         spin_lock_init(&gpu->fence_spinlock);
1643 
1644         INIT_WORK(&gpu->sync_point_work, sync_point_worker);
1645         init_waitqueue_head(&gpu->fence_event);
1646 
1647         priv->gpu[priv->num_gpus++] = gpu;
1648 
1649         pm_runtime_mark_last_busy(gpu->dev);
1650         pm_runtime_put_autosuspend(gpu->dev);
1651 
1652         return 0;
1653 
1654 out_sched:
1655         etnaviv_sched_fini(gpu);
1656 
1657 out_workqueue:
1658         destroy_workqueue(gpu->wq);
1659 
1660 out_thermal:
1661         if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
1662                 thermal_cooling_device_unregister(gpu->cooling);
1663 
1664         return ret;
1665 }
1666 
1667 static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
1668         void *data)
1669 {
1670         struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1671 
1672         DBG("%s", dev_name(gpu->dev));
1673 
1674         flush_workqueue(gpu->wq);
1675         destroy_workqueue(gpu->wq);
1676 
1677         etnaviv_sched_fini(gpu);
1678 
1679 #ifdef CONFIG_PM
1680         pm_runtime_get_sync(gpu->dev);
1681         pm_runtime_put_sync_suspend(gpu->dev);
1682 #else
1683         etnaviv_gpu_hw_suspend(gpu);
1684 #endif
1685 
1686         if (gpu->initialized) {
1687                 etnaviv_cmdbuf_free(&gpu->buffer);
1688                 etnaviv_iommu_global_fini(gpu);
1689                 gpu->initialized = false;
1690         }
1691 
1692         gpu->drm = NULL;
1693         idr_destroy(&gpu->fence_idr);
1694 
1695         if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
1696                 thermal_cooling_device_unregister(gpu->cooling);
1697         gpu->cooling = NULL;
1698 }
1699 
1700 static const struct component_ops gpu_ops = {
1701         .bind = etnaviv_gpu_bind,
1702         .unbind = etnaviv_gpu_unbind,
1703 };
1704 
1705 static const struct of_device_id etnaviv_gpu_match[] = {
1706         {
1707                 .compatible = "vivante,gc"
1708         },
1709         { /* sentinel */ }
1710 };
1711 MODULE_DEVICE_TABLE(of, etnaviv_gpu_match);
1712 
1713 static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
1714 {
1715         struct device *dev = &pdev->dev;
1716         struct etnaviv_gpu *gpu;
1717         int err;
1718 
1719         gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
1720         if (!gpu)
1721                 return -ENOMEM;
1722 
1723         gpu->dev = &pdev->dev;
1724         mutex_init(&gpu->lock);
1725         mutex_init(&gpu->fence_lock);
1726 
1727         /* Map registers: */
1728         gpu->mmio = devm_platform_ioremap_resource(pdev, 0);
1729         if (IS_ERR(gpu->mmio))
1730                 return PTR_ERR(gpu->mmio);
1731 
1732         /* Get Interrupt: */
1733         gpu->irq = platform_get_irq(pdev, 0);
1734         if (gpu->irq < 0) {
1735                 dev_err(dev, "failed to get irq: %d\n", gpu->irq);
1736                 return gpu->irq;
1737         }
1738 
1739         err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0,
1740                                dev_name(gpu->dev), gpu);
1741         if (err) {
1742                 dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err);
1743                 return err;
1744         }
1745 
1746         /* Get Clocks: */
1747         gpu->clk_reg = devm_clk_get(&pdev->dev, "reg");
1748         DBG("clk_reg: %p", gpu->clk_reg);
1749         if (IS_ERR(gpu->clk_reg))
1750                 gpu->clk_reg = NULL;
1751 
1752         gpu->clk_bus = devm_clk_get(&pdev->dev, "bus");
1753         DBG("clk_bus: %p", gpu->clk_bus);
1754         if (IS_ERR(gpu->clk_bus))
1755                 gpu->clk_bus = NULL;
1756 
1757         gpu->clk_core = devm_clk_get(&pdev->dev, "core");
1758         DBG("clk_core: %p", gpu->clk_core);
1759         if (IS_ERR(gpu->clk_core))
1760                 gpu->clk_core = NULL;
1761         gpu->base_rate_core = clk_get_rate(gpu->clk_core);
1762 
1763         gpu->clk_shader = devm_clk_get(&pdev->dev, "shader");
1764         DBG("clk_shader: %p", gpu->clk_shader);
1765         if (IS_ERR(gpu->clk_shader))
1766                 gpu->clk_shader = NULL;
1767         gpu->base_rate_shader = clk_get_rate(gpu->clk_shader);
1768 
1769         /* TODO: figure out max mapped size */
1770         dev_set_drvdata(dev, gpu);
1771 
1772         /*
1773          * We treat the device as initially suspended.  The runtime PM
1774          * autosuspend delay is rather arbitary: no measurements have
1775          * yet been performed to determine an appropriate value.
1776          */
1777         pm_runtime_use_autosuspend(gpu->dev);
1778         pm_runtime_set_autosuspend_delay(gpu->dev, 200);
1779         pm_runtime_enable(gpu->dev);
1780 
1781         err = component_add(&pdev->dev, &gpu_ops);
1782         if (err < 0) {
1783                 dev_err(&pdev->dev, "failed to register component: %d\n", err);
1784                 return err;
1785         }
1786 
1787         return 0;
1788 }
1789 
1790 static int etnaviv_gpu_platform_remove(struct platform_device *pdev)
1791 {
1792         component_del(&pdev->dev, &gpu_ops);
1793         pm_runtime_disable(&pdev->dev);
1794         return 0;
1795 }
1796 
1797 #ifdef CONFIG_PM
1798 static int etnaviv_gpu_rpm_suspend(struct device *dev)
1799 {
1800         struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1801         u32 idle, mask;
1802 
1803         /* If there are any jobs in the HW queue, we're not idle */
1804         if (atomic_read(&gpu->sched.hw_rq_count))
1805                 return -EBUSY;
1806 
1807         /* Check whether the hardware (except FE) is idle */
1808         mask = gpu->idle_mask & ~VIVS_HI_IDLE_STATE_FE;
1809         idle = gpu_read(gpu, VIVS_HI_IDLE_STATE) & mask;
1810         if (idle != mask)
1811                 return -EBUSY;
1812 
1813         return etnaviv_gpu_hw_suspend(gpu);
1814 }
1815 
1816 static int etnaviv_gpu_rpm_resume(struct device *dev)
1817 {
1818         struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1819         int ret;
1820 
1821         ret = etnaviv_gpu_clk_enable(gpu);
1822         if (ret)
1823                 return ret;
1824 
1825         /* Re-initialise the basic hardware state */
1826         if (gpu->drm && gpu->initialized) {
1827                 ret = etnaviv_gpu_hw_resume(gpu);
1828                 if (ret) {
1829                         etnaviv_gpu_clk_disable(gpu);
1830                         return ret;
1831                 }
1832         }
1833 
1834         return 0;
1835 }
1836 #endif
1837 
1838 static const struct dev_pm_ops etnaviv_gpu_pm_ops = {
1839         SET_RUNTIME_PM_OPS(etnaviv_gpu_rpm_suspend, etnaviv_gpu_rpm_resume,
1840                            NULL)
1841 };
1842 
1843 struct platform_driver etnaviv_gpu_driver = {
1844         .driver = {
1845                 .name = "etnaviv-gpu",
1846                 .owner = THIS_MODULE,
1847                 .pm = &etnaviv_gpu_pm_ops,
1848                 .of_match_table = etnaviv_gpu_match,
1849         },
1850         .probe = etnaviv_gpu_platform_probe,
1851         .remove = etnaviv_gpu_platform_remove,
1852         .id_table = gpu_ids,
1853 };

/* [<][>][^][v][top][bottom][index][help] */