This source file includes following definitions.
- init_handler_common_data
- dm_irq_work_func
- remove_irq_handler
- validate_irq_registration_params
- validate_irq_unregistration_params
- amdgpu_dm_irq_register_interrupt
- amdgpu_dm_irq_unregister_interrupt
- amdgpu_dm_irq_init
- amdgpu_dm_irq_fini
- amdgpu_dm_irq_suspend
- amdgpu_dm_irq_resume_early
- amdgpu_dm_irq_resume_late
- amdgpu_dm_irq_schedule_work
- amdgpu_dm_irq_immediate_work
- amdgpu_dm_irq_handler
- amdgpu_dm_hpd_to_dal_irq_source
- amdgpu_dm_set_hpd_irq_state
- dm_irq_state
- amdgpu_dm_set_pflip_irq_state
- amdgpu_dm_set_crtc_irq_state
- amdgpu_dm_set_vupdate_irq_state
- amdgpu_dm_set_irq_funcs
- amdgpu_dm_hpd_init
- amdgpu_dm_hpd_fini
   1 
   2 
   3 
   4 
   5 
   6 
   7 
   8 
   9 
  10 
  11 
  12 
  13 
  14 
  15 
  16 
  17 
  18 
  19 
  20 
  21 
  22 
  23 
  24 
  25 
  26 #include "dm_services_types.h"
  27 #include "dc.h"
  28 
  29 #include "amdgpu.h"
  30 #include "amdgpu_dm.h"
  31 #include "amdgpu_dm_irq.h"
  32 
  33 
  34 
  35 
  36 
  37 
  38 
  39 
  40 
  41 
  42 
  43 
  44 
  45 
  46 
  47 
  48 
  49 
  50 
  51 
  52 
  53 
  54 
  55 
  56 
  57 
  58 
  59 
  60 
  61 
  62 
  63 
  64 
  65 
  66 
  67 
  68 
  69 
  70 
  71 
  72 
  73 
  74 
  75 
  76 
  77 struct amdgpu_dm_irq_handler_data {
  78         struct list_head list;
  79         interrupt_handler handler;
  80         void *handler_arg;
  81 
  82         struct amdgpu_display_manager *dm;
  83         
  84         enum dc_irq_source irq_source;
  85 };
  86 
  87 #define DM_IRQ_TABLE_LOCK(adev, flags) \
  88         spin_lock_irqsave(&adev->dm.irq_handler_list_table_lock, flags)
  89 
  90 #define DM_IRQ_TABLE_UNLOCK(adev, flags) \
  91         spin_unlock_irqrestore(&adev->dm.irq_handler_list_table_lock, flags)
  92 
  93 
  94 
  95 
  96 
  97 static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd,
  98                                      void (*ih)(void *),
  99                                      void *args,
 100                                      struct amdgpu_display_manager *dm)
 101 {
 102         hcd->handler = ih;
 103         hcd->handler_arg = args;
 104         hcd->dm = dm;
 105 }
 106 
 107 
 108 
 109 
 110 
 111 
 112 static void dm_irq_work_func(struct work_struct *work)
 113 {
 114         struct list_head *entry;
 115         struct irq_list_head *irq_list_head =
 116                 container_of(work, struct irq_list_head, work);
 117         struct list_head *handler_list = &irq_list_head->head;
 118         struct amdgpu_dm_irq_handler_data *handler_data;
 119 
 120         list_for_each(entry, handler_list) {
 121                 handler_data = list_entry(entry,
 122                                           struct amdgpu_dm_irq_handler_data,
 123                                           list);
 124 
 125                 DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n",
 126                                 handler_data->irq_source);
 127 
 128                 DRM_DEBUG_KMS("DM_IRQ: schedule_work: for dal_src=%d\n",
 129                         handler_data->irq_source);
 130 
 131                 handler_data->handler(handler_data->handler_arg);
 132         }
 133 
 134         
 135 
 136 
 137 }
 138 
 139 
 140 
 141 
 142 
 143 static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
 144                                             void *ih,
 145                                             const struct dc_interrupt_params *int_params)
 146 {
 147         struct list_head *hnd_list;
 148         struct list_head *entry, *tmp;
 149         struct amdgpu_dm_irq_handler_data *handler;
 150         unsigned long irq_table_flags;
 151         bool handler_removed = false;
 152         enum dc_irq_source irq_source;
 153 
 154         DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
 155 
 156         irq_source = int_params->irq_source;
 157 
 158         switch (int_params->int_context) {
 159         case INTERRUPT_HIGH_IRQ_CONTEXT:
 160                 hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
 161                 break;
 162         case INTERRUPT_LOW_IRQ_CONTEXT:
 163         default:
 164                 hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
 165                 break;
 166         }
 167 
 168         list_for_each_safe(entry, tmp, hnd_list) {
 169 
 170                 handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
 171                                      list);
 172 
 173                 if (ih == handler) {
 174                         
 175                         list_del(&handler->list);
 176                         handler_removed = true;
 177                         break;
 178                 }
 179         }
 180 
 181         DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
 182 
 183         if (handler_removed == false) {
 184                 
 185 
 186                 return NULL;
 187         }
 188 
 189         kfree(handler);
 190 
 191         DRM_DEBUG_KMS(
 192         "DM_IRQ: removed irq handler: %p for: dal_src=%d, irq context=%d\n",
 193                 ih, int_params->irq_source, int_params->int_context);
 194 
 195         return hnd_list;
 196 }
 197 
 198 static bool
 199 validate_irq_registration_params(struct dc_interrupt_params *int_params,
 200                                  void (*ih)(void *))
 201 {
 202         if (NULL == int_params || NULL == ih) {
 203                 DRM_ERROR("DM_IRQ: invalid input!\n");
 204                 return false;
 205         }
 206 
 207         if (int_params->int_context >= INTERRUPT_CONTEXT_NUMBER) {
 208                 DRM_ERROR("DM_IRQ: invalid context: %d!\n",
 209                                 int_params->int_context);
 210                 return false;
 211         }
 212 
 213         if (!DAL_VALID_IRQ_SRC_NUM(int_params->irq_source)) {
 214                 DRM_ERROR("DM_IRQ: invalid irq_source: %d!\n",
 215                                 int_params->irq_source);
 216                 return false;
 217         }
 218 
 219         return true;
 220 }
 221 
 222 static bool validate_irq_unregistration_params(enum dc_irq_source irq_source,
 223                                                irq_handler_idx handler_idx)
 224 {
 225         if (DAL_INVALID_IRQ_HANDLER_IDX == handler_idx) {
 226                 DRM_ERROR("DM_IRQ: invalid handler_idx==NULL!\n");
 227                 return false;
 228         }
 229 
 230         if (!DAL_VALID_IRQ_SRC_NUM(irq_source)) {
 231                 DRM_ERROR("DM_IRQ: invalid irq_source:%d!\n", irq_source);
 232                 return false;
 233         }
 234 
 235         return true;
 236 }
 237 
 238 
 239 
 240 
 241 
 242 
 243 
 244 
 245 
 246 
 247 
 248 
 249 
 250 
 251 
 252 
 253 
 254 
 255 
 256 
 257 
 258 
 259 
 260 
 261 void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
 262                                        struct dc_interrupt_params *int_params,
 263                                        void (*ih)(void *),
 264                                        void *handler_args)
 265 {
 266         struct list_head *hnd_list;
 267         struct amdgpu_dm_irq_handler_data *handler_data;
 268         unsigned long irq_table_flags;
 269         enum dc_irq_source irq_source;
 270 
 271         if (false == validate_irq_registration_params(int_params, ih))
 272                 return DAL_INVALID_IRQ_HANDLER_IDX;
 273 
 274         handler_data = kzalloc(sizeof(*handler_data), GFP_KERNEL);
 275         if (!handler_data) {
 276                 DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
 277                 return DAL_INVALID_IRQ_HANDLER_IDX;
 278         }
 279 
 280         init_handler_common_data(handler_data, ih, handler_args, &adev->dm);
 281 
 282         irq_source = int_params->irq_source;
 283 
 284         handler_data->irq_source = irq_source;
 285 
 286         
 287         DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
 288 
 289         switch (int_params->int_context) {
 290         case INTERRUPT_HIGH_IRQ_CONTEXT:
 291                 hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
 292                 break;
 293         case INTERRUPT_LOW_IRQ_CONTEXT:
 294         default:
 295                 hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
 296                 break;
 297         }
 298 
 299         list_add_tail(&handler_data->list, hnd_list);
 300 
 301         DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
 302 
 303         
 304 
 305 
 306 
 307 
 308         DRM_DEBUG_KMS(
 309                 "DM_IRQ: added irq handler: %p for: dal_src=%d, irq context=%d\n",
 310                 handler_data,
 311                 irq_source,
 312                 int_params->int_context);
 313 
 314         return handler_data;
 315 }
 316 
 317 
 318 
 319 
 320 
 321 
 322 
 323 
 324 
 325 
 326 void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
 327                                         enum dc_irq_source irq_source,
 328                                         void *ih)
 329 {
 330         struct list_head *handler_list;
 331         struct dc_interrupt_params int_params;
 332         int i;
 333 
 334         if (false == validate_irq_unregistration_params(irq_source, ih))
 335                 return;
 336 
 337         memset(&int_params, 0, sizeof(int_params));
 338 
 339         int_params.irq_source = irq_source;
 340 
 341         for (i = 0; i < INTERRUPT_CONTEXT_NUMBER; i++) {
 342 
 343                 int_params.int_context = i;
 344 
 345                 handler_list = remove_irq_handler(adev, ih, &int_params);
 346 
 347                 if (handler_list != NULL)
 348                         break;
 349         }
 350 
 351         if (handler_list == NULL) {
 352                 
 353 
 354                 DRM_ERROR(
 355                 "DM_IRQ: failed to find irq handler:%p for irq_source:%d!\n",
 356                         ih, irq_source);
 357         }
 358 }
 359 
 360 
 361 
 362 
 363 
 364 
 365 
 366 
 367 
 368 
 369 
 370 
 371 
 372 
 373 
 374 int amdgpu_dm_irq_init(struct amdgpu_device *adev)
 375 {
 376         int src;
 377         struct irq_list_head *lh;
 378 
 379         DRM_DEBUG_KMS("DM_IRQ\n");
 380 
 381         spin_lock_init(&adev->dm.irq_handler_list_table_lock);
 382 
 383         for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
 384                 
 385                 lh = &adev->dm.irq_handler_list_low_tab[src];
 386                 INIT_LIST_HEAD(&lh->head);
 387                 INIT_WORK(&lh->work, dm_irq_work_func);
 388 
 389                 
 390                 INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]);
 391         }
 392 
 393         return 0;
 394 }
 395 
 396 
 397 
 398 
 399 
 400 
 401 
 402 void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
 403 {
 404         int src;
 405         struct irq_list_head *lh;
 406         unsigned long irq_table_flags;
 407         DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
 408         for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
 409                 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
 410                 
 411 
 412 
 413                 lh = &adev->dm.irq_handler_list_low_tab[src];
 414                 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
 415                 flush_work(&lh->work);
 416         }
 417 }
 418 
 419 int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
 420 {
 421         int src;
 422         struct list_head *hnd_list_h;
 423         struct list_head *hnd_list_l;
 424         unsigned long irq_table_flags;
 425 
 426         DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
 427 
 428         DRM_DEBUG_KMS("DM_IRQ: suspend\n");
 429 
 430         
 431 
 432 
 433 
 434         for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
 435                 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
 436                 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
 437                 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
 438                         dc_interrupt_set(adev->dm.dc, src, false);
 439 
 440                 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
 441                 flush_work(&adev->dm.irq_handler_list_low_tab[src].work);
 442 
 443                 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
 444         }
 445 
 446         DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
 447         return 0;
 448 }
 449 
 450 int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
 451 {
 452         int src;
 453         struct list_head *hnd_list_h, *hnd_list_l;
 454         unsigned long irq_table_flags;
 455 
 456         DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
 457 
 458         DRM_DEBUG_KMS("DM_IRQ: early resume\n");
 459 
 460         
 461         for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
 462                 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
 463                 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
 464                 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
 465                         dc_interrupt_set(adev->dm.dc, src, true);
 466         }
 467 
 468         DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
 469 
 470         return 0;
 471 }
 472 
 473 int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
 474 {
 475         int src;
 476         struct list_head *hnd_list_h, *hnd_list_l;
 477         unsigned long irq_table_flags;
 478 
 479         DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
 480 
 481         DRM_DEBUG_KMS("DM_IRQ: resume\n");
 482 
 483         
 484 
 485 
 486 
 487         for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6; src++) {
 488                 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
 489                 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
 490                 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
 491                         dc_interrupt_set(adev->dm.dc, src, true);
 492         }
 493 
 494         DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
 495         return 0;
 496 }
 497 
 498 
 499 
 500 
 501 
 502 static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
 503                                         enum dc_irq_source irq_source)
 504 {
 505         unsigned long irq_table_flags;
 506         struct work_struct *work = NULL;
 507 
 508         DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
 509 
 510         if (!list_empty(&adev->dm.irq_handler_list_low_tab[irq_source].head))
 511                 work = &adev->dm.irq_handler_list_low_tab[irq_source].work;
 512 
 513         DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
 514 
 515         if (work) {
 516                 if (!schedule_work(work))
 517                         DRM_INFO("amdgpu_dm_irq_schedule_work FAILED src %d\n",
 518                                                 irq_source);
 519         }
 520 
 521 }
 522 
 523 
 524 
 525 
 526 
 527 static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev,
 528                                          enum dc_irq_source irq_source)
 529 {
 530         struct amdgpu_dm_irq_handler_data *handler_data;
 531         struct list_head *entry;
 532         unsigned long irq_table_flags;
 533 
 534         DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
 535 
 536         list_for_each(
 537                 entry,
 538                 &adev->dm.irq_handler_list_high_tab[irq_source]) {
 539 
 540                 handler_data = list_entry(entry,
 541                                           struct amdgpu_dm_irq_handler_data,
 542                                           list);
 543 
 544                 
 545 
 546                 handler_data->handler(handler_data->handler_arg);
 547         }
 548 
 549         DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
 550 }
 551 
 552 
 553 
 554 
 555 
 556 
 557 
 558 
 559 
 560 
 561 static int amdgpu_dm_irq_handler(struct amdgpu_device *adev,
 562                                  struct amdgpu_irq_src *source,
 563                                  struct amdgpu_iv_entry *entry)
 564 {
 565 
 566         enum dc_irq_source src =
 567                 dc_interrupt_to_irq_source(
 568                         adev->dm.dc,
 569                         entry->src_id,
 570                         entry->src_data[0]);
 571 
 572         dc_interrupt_ack(adev->dm.dc, src);
 573 
 574         
 575         amdgpu_dm_irq_immediate_work(adev, src);
 576         
 577         amdgpu_dm_irq_schedule_work(adev, src);
 578 
 579         return 0;
 580 }
 581 
 582 static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned type)
 583 {
 584         switch (type) {
 585         case AMDGPU_HPD_1:
 586                 return DC_IRQ_SOURCE_HPD1;
 587         case AMDGPU_HPD_2:
 588                 return DC_IRQ_SOURCE_HPD2;
 589         case AMDGPU_HPD_3:
 590                 return DC_IRQ_SOURCE_HPD3;
 591         case AMDGPU_HPD_4:
 592                 return DC_IRQ_SOURCE_HPD4;
 593         case AMDGPU_HPD_5:
 594                 return DC_IRQ_SOURCE_HPD5;
 595         case AMDGPU_HPD_6:
 596                 return DC_IRQ_SOURCE_HPD6;
 597         default:
 598                 return DC_IRQ_SOURCE_INVALID;
 599         }
 600 }
 601 
 602 static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev,
 603                                        struct amdgpu_irq_src *source,
 604                                        unsigned type,
 605                                        enum amdgpu_interrupt_state state)
 606 {
 607         enum dc_irq_source src = amdgpu_dm_hpd_to_dal_irq_source(type);
 608         bool st = (state == AMDGPU_IRQ_STATE_ENABLE);
 609 
 610         dc_interrupt_set(adev->dm.dc, src, st);
 611         return 0;
 612 }
 613 
 614 static inline int dm_irq_state(struct amdgpu_device *adev,
 615                                struct amdgpu_irq_src *source,
 616                                unsigned crtc_id,
 617                                enum amdgpu_interrupt_state state,
 618                                const enum irq_type dal_irq_type,
 619                                const char *func)
 620 {
 621         bool st;
 622         enum dc_irq_source irq_source;
 623 
 624         struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc_id];
 625 
 626         if (!acrtc) {
 627                 DRM_ERROR(
 628                         "%s: crtc is NULL at id :%d\n",
 629                         func,
 630                         crtc_id);
 631                 return 0;
 632         }
 633 
 634         if (acrtc->otg_inst == -1)
 635                 return 0;
 636 
 637         irq_source = dal_irq_type + acrtc->otg_inst;
 638 
 639         st = (state == AMDGPU_IRQ_STATE_ENABLE);
 640 
 641         dc_interrupt_set(adev->dm.dc, irq_source, st);
 642         return 0;
 643 }
 644 
 645 static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev,
 646                                          struct amdgpu_irq_src *source,
 647                                          unsigned crtc_id,
 648                                          enum amdgpu_interrupt_state state)
 649 {
 650         return dm_irq_state(
 651                 adev,
 652                 source,
 653                 crtc_id,
 654                 state,
 655                 IRQ_TYPE_PFLIP,
 656                 __func__);
 657 }
 658 
 659 static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev,
 660                                         struct amdgpu_irq_src *source,
 661                                         unsigned crtc_id,
 662                                         enum amdgpu_interrupt_state state)
 663 {
 664         return dm_irq_state(
 665                 adev,
 666                 source,
 667                 crtc_id,
 668                 state,
 669                 IRQ_TYPE_VBLANK,
 670                 __func__);
 671 }
 672 
 673 static int amdgpu_dm_set_vupdate_irq_state(struct amdgpu_device *adev,
 674                                            struct amdgpu_irq_src *source,
 675                                            unsigned int crtc_id,
 676                                            enum amdgpu_interrupt_state state)
 677 {
 678         return dm_irq_state(
 679                 adev,
 680                 source,
 681                 crtc_id,
 682                 state,
 683                 IRQ_TYPE_VUPDATE,
 684                 __func__);
 685 }
 686 
 687 static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = {
 688         .set = amdgpu_dm_set_crtc_irq_state,
 689         .process = amdgpu_dm_irq_handler,
 690 };
 691 
 692 static const struct amdgpu_irq_src_funcs dm_vupdate_irq_funcs = {
 693         .set = amdgpu_dm_set_vupdate_irq_state,
 694         .process = amdgpu_dm_irq_handler,
 695 };
 696 
 697 static const struct amdgpu_irq_src_funcs dm_pageflip_irq_funcs = {
 698         .set = amdgpu_dm_set_pflip_irq_state,
 699         .process = amdgpu_dm_irq_handler,
 700 };
 701 
 702 static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = {
 703         .set = amdgpu_dm_set_hpd_irq_state,
 704         .process = amdgpu_dm_irq_handler,
 705 };
 706 
 707 void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
 708 {
 709 
 710         adev->crtc_irq.num_types = adev->mode_info.num_crtc;
 711         adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
 712 
 713         adev->vupdate_irq.num_types = adev->mode_info.num_crtc;
 714         adev->vupdate_irq.funcs = &dm_vupdate_irq_funcs;
 715 
 716         adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
 717         adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs;
 718 
 719         adev->hpd_irq.num_types = adev->mode_info.num_hpd;
 720         adev->hpd_irq.funcs = &dm_hpd_irq_funcs;
 721 }
 722 
 723 
 724 
 725 
 726 
 727 
 728 
 729 
 730 
 731 void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
 732 {
 733         struct drm_device *dev = adev->ddev;
 734         struct drm_connector *connector;
 735 
 736         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 737                 struct amdgpu_dm_connector *amdgpu_dm_connector =
 738                                 to_amdgpu_dm_connector(connector);
 739 
 740                 const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
 741 
 742                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
 743                         dc_interrupt_set(adev->dm.dc,
 744                                         dc_link->irq_source_hpd,
 745                                         true);
 746                 }
 747 
 748                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
 749                         dc_interrupt_set(adev->dm.dc,
 750                                         dc_link->irq_source_hpd_rx,
 751                                         true);
 752                 }
 753         }
 754 }
 755 
 756 
 757 
 758 
 759 
 760 
 761 
 762 
 763 
 764 void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
 765 {
 766         struct drm_device *dev = adev->ddev;
 767         struct drm_connector *connector;
 768 
 769         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 770                 struct amdgpu_dm_connector *amdgpu_dm_connector =
 771                                 to_amdgpu_dm_connector(connector);
 772                 const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
 773 
 774                 dc_interrupt_set(adev->dm.dc, dc_link->irq_source_hpd, false);
 775 
 776                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
 777                         dc_interrupt_set(adev->dm.dc,
 778                                         dc_link->irq_source_hpd_rx,
 779                                         false);
 780                 }
 781         }
 782 }