root/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. init_handler_common_data
  2. dm_irq_work_func
  3. remove_irq_handler
  4. validate_irq_registration_params
  5. validate_irq_unregistration_params
  6. amdgpu_dm_irq_register_interrupt
  7. amdgpu_dm_irq_unregister_interrupt
  8. amdgpu_dm_irq_init
  9. amdgpu_dm_irq_fini
  10. amdgpu_dm_irq_suspend
  11. amdgpu_dm_irq_resume_early
  12. amdgpu_dm_irq_resume_late
  13. amdgpu_dm_irq_schedule_work
  14. amdgpu_dm_irq_immediate_work
  15. amdgpu_dm_irq_handler
  16. amdgpu_dm_hpd_to_dal_irq_source
  17. amdgpu_dm_set_hpd_irq_state
  18. dm_irq_state
  19. amdgpu_dm_set_pflip_irq_state
  20. amdgpu_dm_set_crtc_irq_state
  21. amdgpu_dm_set_vupdate_irq_state
  22. amdgpu_dm_set_irq_funcs
  23. amdgpu_dm_hpd_init
  24. amdgpu_dm_hpd_fini

   1 /*
   2  * Copyright 2015 Advanced Micro Devices, Inc.
   3  *
   4  * Permission is hereby granted, free of charge, to any person obtaining a
   5  * copy of this software and associated documentation files (the "Software"),
   6  * to deal in the Software without restriction, including without limitation
   7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8  * and/or sell copies of the Software, and to permit persons to whom the
   9  * Software is furnished to do so, subject to the following conditions:
  10  *
  11  * The above copyright notice and this permission notice shall be included in
  12  * all copies or substantial portions of the Software.
  13  *
  14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20  * OTHER DEALINGS IN THE SOFTWARE.
  21  *
  22  * Authors: AMD
  23  *
  24  */
  25 
  26 #include "dm_services_types.h"
  27 #include "dc.h"
  28 
  29 #include "amdgpu.h"
  30 #include "amdgpu_dm.h"
  31 #include "amdgpu_dm_irq.h"
  32 
  33 /**
  34  * DOC: overview
  35  *
  36  * DM provides another layer of IRQ management on top of what the base driver
  37  * already provides. This is something that could be cleaned up, and is a
  38  * future TODO item.
  39  *
  40  * The base driver provides IRQ source registration with DRM, handler
  41  * registration into the base driver's IRQ table, and a handler callback
  42  * amdgpu_irq_handler(), with which DRM calls on interrupts. This generic
  43  * handler looks up the IRQ table, and calls the respective
  44  * &amdgpu_irq_src_funcs.process hookups.
  45  *
  46  * What DM provides on top are two IRQ tables specifically for top-half and
  47  * bottom-half IRQ handling, with the bottom-half implementing workqueues:
  48  *
  49  * - &amdgpu_display_manager.irq_handler_list_high_tab
  50  * - &amdgpu_display_manager.irq_handler_list_low_tab
  51  *
  52  * They override the base driver's IRQ table, and the effect can be seen
  53  * in the hooks that DM provides for &amdgpu_irq_src_funcs.process. They
  54  * are all set to the DM generic handler amdgpu_dm_irq_handler(), which looks up
  55  * DM's IRQ tables. However, in order for base driver to recognize this hook, DM
  56  * still needs to register the IRQ with the base driver. See
  57  * dce110_register_irq_handlers() and dcn10_register_irq_handlers().
  58  *
  59  * To expose DC's hardware interrupt toggle to the base driver, DM implements
  60  * &amdgpu_irq_src_funcs.set hooks. Base driver calls it through
  61  * amdgpu_irq_update() to enable or disable the interrupt.
  62  */
  63 
  64 /******************************************************************************
  65  * Private declarations.
  66  *****************************************************************************/
  67 
  68 /**
  69  * struct amdgpu_dm_irq_handler_data - Data for DM interrupt handlers.
  70  *
  71  * @list: Linked list entry referencing the next/previous handler
  72  * @handler: Handler function
  73  * @handler_arg: Argument passed to the handler when triggered
  74  * @dm: DM which this handler belongs to
  75  * @irq_source: DC interrupt source that this handler is registered for
  76  */
  77 struct amdgpu_dm_irq_handler_data {
  78         struct list_head list;
  79         interrupt_handler handler;
  80         void *handler_arg;
  81 
  82         struct amdgpu_display_manager *dm;
  83         /* DAL irq source which registered for this interrupt. */
  84         enum dc_irq_source irq_source;
  85 };
  86 
  87 #define DM_IRQ_TABLE_LOCK(adev, flags) \
  88         spin_lock_irqsave(&adev->dm.irq_handler_list_table_lock, flags)
  89 
  90 #define DM_IRQ_TABLE_UNLOCK(adev, flags) \
  91         spin_unlock_irqrestore(&adev->dm.irq_handler_list_table_lock, flags)
  92 
  93 /******************************************************************************
  94  * Private functions.
  95  *****************************************************************************/
  96 
  97 static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd,
  98                                      void (*ih)(void *),
  99                                      void *args,
 100                                      struct amdgpu_display_manager *dm)
 101 {
 102         hcd->handler = ih;
 103         hcd->handler_arg = args;
 104         hcd->dm = dm;
 105 }
 106 
 107 /**
 108  * dm_irq_work_func() - Handle an IRQ outside of the interrupt handler proper.
 109  *
 110  * @work: work struct
 111  */
 112 static void dm_irq_work_func(struct work_struct *work)
 113 {
 114         struct list_head *entry;
 115         struct irq_list_head *irq_list_head =
 116                 container_of(work, struct irq_list_head, work);
 117         struct list_head *handler_list = &irq_list_head->head;
 118         struct amdgpu_dm_irq_handler_data *handler_data;
 119 
 120         list_for_each(entry, handler_list) {
 121                 handler_data = list_entry(entry,
 122                                           struct amdgpu_dm_irq_handler_data,
 123                                           list);
 124 
 125                 DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n",
 126                                 handler_data->irq_source);
 127 
 128                 DRM_DEBUG_KMS("DM_IRQ: schedule_work: for dal_src=%d\n",
 129                         handler_data->irq_source);
 130 
 131                 handler_data->handler(handler_data->handler_arg);
 132         }
 133 
 134         /* Call a DAL subcomponent which registered for interrupt notification
 135          * at INTERRUPT_LOW_IRQ_CONTEXT.
 136          * (The most common use is HPD interrupt) */
 137 }
 138 
 139 /*
 140  * Remove a handler and return a pointer to handler list from which the
 141  * handler was removed.
 142  */
 143 static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
 144                                             void *ih,
 145                                             const struct dc_interrupt_params *int_params)
 146 {
 147         struct list_head *hnd_list;
 148         struct list_head *entry, *tmp;
 149         struct amdgpu_dm_irq_handler_data *handler;
 150         unsigned long irq_table_flags;
 151         bool handler_removed = false;
 152         enum dc_irq_source irq_source;
 153 
 154         DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
 155 
 156         irq_source = int_params->irq_source;
 157 
 158         switch (int_params->int_context) {
 159         case INTERRUPT_HIGH_IRQ_CONTEXT:
 160                 hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
 161                 break;
 162         case INTERRUPT_LOW_IRQ_CONTEXT:
 163         default:
 164                 hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
 165                 break;
 166         }
 167 
 168         list_for_each_safe(entry, tmp, hnd_list) {
 169 
 170                 handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
 171                                      list);
 172 
 173                 if (ih == handler) {
 174                         /* Found our handler. Remove it from the list. */
 175                         list_del(&handler->list);
 176                         handler_removed = true;
 177                         break;
 178                 }
 179         }
 180 
 181         DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
 182 
 183         if (handler_removed == false) {
 184                 /* Not necessarily an error - caller may not
 185                  * know the context. */
 186                 return NULL;
 187         }
 188 
 189         kfree(handler);
 190 
 191         DRM_DEBUG_KMS(
 192         "DM_IRQ: removed irq handler: %p for: dal_src=%d, irq context=%d\n",
 193                 ih, int_params->irq_source, int_params->int_context);
 194 
 195         return hnd_list;
 196 }
 197 
 198 static bool
 199 validate_irq_registration_params(struct dc_interrupt_params *int_params,
 200                                  void (*ih)(void *))
 201 {
 202         if (NULL == int_params || NULL == ih) {
 203                 DRM_ERROR("DM_IRQ: invalid input!\n");
 204                 return false;
 205         }
 206 
 207         if (int_params->int_context >= INTERRUPT_CONTEXT_NUMBER) {
 208                 DRM_ERROR("DM_IRQ: invalid context: %d!\n",
 209                                 int_params->int_context);
 210                 return false;
 211         }
 212 
 213         if (!DAL_VALID_IRQ_SRC_NUM(int_params->irq_source)) {
 214                 DRM_ERROR("DM_IRQ: invalid irq_source: %d!\n",
 215                                 int_params->irq_source);
 216                 return false;
 217         }
 218 
 219         return true;
 220 }
 221 
 222 static bool validate_irq_unregistration_params(enum dc_irq_source irq_source,
 223                                                irq_handler_idx handler_idx)
 224 {
 225         if (DAL_INVALID_IRQ_HANDLER_IDX == handler_idx) {
 226                 DRM_ERROR("DM_IRQ: invalid handler_idx==NULL!\n");
 227                 return false;
 228         }
 229 
 230         if (!DAL_VALID_IRQ_SRC_NUM(irq_source)) {
 231                 DRM_ERROR("DM_IRQ: invalid irq_source:%d!\n", irq_source);
 232                 return false;
 233         }
 234 
 235         return true;
 236 }
 237 /******************************************************************************
 238  * Public functions.
 239  *
 240  * Note: caller is responsible for input validation.
 241  *****************************************************************************/
 242 
 243 /**
 244  * amdgpu_dm_irq_register_interrupt() - Register a handler within DM.
 245  * @adev: The base driver device containing the DM device.
 246  * @int_params: Interrupt parameters containing the source, and handler context
 247  * @ih: Function pointer to the interrupt handler to register
 248  * @handler_args: Arguments passed to the handler when the interrupt occurs
 249  *
 250  * Register an interrupt handler for the given IRQ source, under the given
 251  * context. The context can either be high or low. High context handlers are
 252  * executed directly within ISR context, while low context is executed within a
 253  * workqueue, thereby allowing operations that sleep.
 254  *
 255  * Registered handlers are called in a FIFO manner, i.e. the most recently
 256  * registered handler will be called first.
 257  *
 258  * Return: Handler data &struct amdgpu_dm_irq_handler_data containing the IRQ
 259  *         source, handler function, and args
 260  */
 261 void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
 262                                        struct dc_interrupt_params *int_params,
 263                                        void (*ih)(void *),
 264                                        void *handler_args)
 265 {
 266         struct list_head *hnd_list;
 267         struct amdgpu_dm_irq_handler_data *handler_data;
 268         unsigned long irq_table_flags;
 269         enum dc_irq_source irq_source;
 270 
 271         if (false == validate_irq_registration_params(int_params, ih))
 272                 return DAL_INVALID_IRQ_HANDLER_IDX;
 273 
 274         handler_data = kzalloc(sizeof(*handler_data), GFP_KERNEL);
 275         if (!handler_data) {
 276                 DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
 277                 return DAL_INVALID_IRQ_HANDLER_IDX;
 278         }
 279 
 280         init_handler_common_data(handler_data, ih, handler_args, &adev->dm);
 281 
 282         irq_source = int_params->irq_source;
 283 
 284         handler_data->irq_source = irq_source;
 285 
 286         /* Lock the list, add the handler. */
 287         DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
 288 
 289         switch (int_params->int_context) {
 290         case INTERRUPT_HIGH_IRQ_CONTEXT:
 291                 hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
 292                 break;
 293         case INTERRUPT_LOW_IRQ_CONTEXT:
 294         default:
 295                 hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
 296                 break;
 297         }
 298 
 299         list_add_tail(&handler_data->list, hnd_list);
 300 
 301         DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
 302 
 303         /* This pointer will be stored by code which requested interrupt
 304          * registration.
 305          * The same pointer will be needed in order to unregister the
 306          * interrupt. */
 307 
 308         DRM_DEBUG_KMS(
 309                 "DM_IRQ: added irq handler: %p for: dal_src=%d, irq context=%d\n",
 310                 handler_data,
 311                 irq_source,
 312                 int_params->int_context);
 313 
 314         return handler_data;
 315 }
 316 
 317 /**
 318  * amdgpu_dm_irq_unregister_interrupt() - Remove a handler from the DM IRQ table
 319  * @adev: The base driver device containing the DM device
 320  * @irq_source: IRQ source to remove the given handler from
 321  * @ih: Function pointer to the interrupt handler to unregister
 322  *
 323  * Go through both low and high context IRQ tables, and find the given handler
 324  * for the given irq source. If found, remove it. Otherwise, do nothing.
 325  */
 326 void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
 327                                         enum dc_irq_source irq_source,
 328                                         void *ih)
 329 {
 330         struct list_head *handler_list;
 331         struct dc_interrupt_params int_params;
 332         int i;
 333 
 334         if (false == validate_irq_unregistration_params(irq_source, ih))
 335                 return;
 336 
 337         memset(&int_params, 0, sizeof(int_params));
 338 
 339         int_params.irq_source = irq_source;
 340 
 341         for (i = 0; i < INTERRUPT_CONTEXT_NUMBER; i++) {
 342 
 343                 int_params.int_context = i;
 344 
 345                 handler_list = remove_irq_handler(adev, ih, &int_params);
 346 
 347                 if (handler_list != NULL)
 348                         break;
 349         }
 350 
 351         if (handler_list == NULL) {
 352                 /* If we got here, it means we searched all irq contexts
 353                  * for this irq source, but the handler was not found. */
 354                 DRM_ERROR(
 355                 "DM_IRQ: failed to find irq handler:%p for irq_source:%d!\n",
 356                         ih, irq_source);
 357         }
 358 }
 359 
 360 /**
 361  * amdgpu_dm_irq_init() - Initialize DM IRQ management
 362  * @adev:  The base driver device containing the DM device
 363  *
 364  * Initialize DM's high and low context IRQ tables.
 365  *
 366  * The N by M table contains N IRQ sources, with M
 367  * &struct amdgpu_dm_irq_handler_data hooked together in a linked list. The
 368  * list_heads are initialized here. When an interrupt n is triggered, all m
 369  * handlers are called in sequence, FIFO according to registration order.
 370  *
 371  * The low context table requires special steps to initialize, since handlers
 372  * will be deferred to a workqueue. See &struct irq_list_head.
 373  */
 374 int amdgpu_dm_irq_init(struct amdgpu_device *adev)
 375 {
 376         int src;
 377         struct irq_list_head *lh;
 378 
 379         DRM_DEBUG_KMS("DM_IRQ\n");
 380 
 381         spin_lock_init(&adev->dm.irq_handler_list_table_lock);
 382 
 383         for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
 384                 /* low context handler list init */
 385                 lh = &adev->dm.irq_handler_list_low_tab[src];
 386                 INIT_LIST_HEAD(&lh->head);
 387                 INIT_WORK(&lh->work, dm_irq_work_func);
 388 
 389                 /* high context handler init */
 390                 INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]);
 391         }
 392 
 393         return 0;
 394 }
 395 
 396 /**
 397  * amdgpu_dm_irq_fini() - Tear down DM IRQ management
 398  * @adev: The base driver device containing the DM device
 399  *
 400  * Flush all work within the low context IRQ table.
 401  */
 402 void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
 403 {
 404         int src;
 405         struct irq_list_head *lh;
 406         unsigned long irq_table_flags;
 407         DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
 408         for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
 409                 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
 410                 /* The handler was removed from the table,
 411                  * it means it is safe to flush all the 'work'
 412                  * (because no code can schedule a new one). */
 413                 lh = &adev->dm.irq_handler_list_low_tab[src];
 414                 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
 415                 flush_work(&lh->work);
 416         }
 417 }
 418 
 419 int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
 420 {
 421         int src;
 422         struct list_head *hnd_list_h;
 423         struct list_head *hnd_list_l;
 424         unsigned long irq_table_flags;
 425 
 426         DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
 427 
 428         DRM_DEBUG_KMS("DM_IRQ: suspend\n");
 429 
 430         /**
 431          * Disable HW interrupt  for HPD and HPDRX only since FLIP and VBLANK
 432          * will be disabled from manage_dm_interrupts on disable CRTC.
 433          */
 434         for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
 435                 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
 436                 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
 437                 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
 438                         dc_interrupt_set(adev->dm.dc, src, false);
 439 
 440                 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
 441                 flush_work(&adev->dm.irq_handler_list_low_tab[src].work);
 442 
 443                 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
 444         }
 445 
 446         DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
 447         return 0;
 448 }
 449 
 450 int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
 451 {
 452         int src;
 453         struct list_head *hnd_list_h, *hnd_list_l;
 454         unsigned long irq_table_flags;
 455 
 456         DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
 457 
 458         DRM_DEBUG_KMS("DM_IRQ: early resume\n");
 459 
 460         /* re-enable short pulse interrupts HW interrupt */
 461         for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
 462                 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
 463                 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
 464                 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
 465                         dc_interrupt_set(adev->dm.dc, src, true);
 466         }
 467 
 468         DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
 469 
 470         return 0;
 471 }
 472 
 473 int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
 474 {
 475         int src;
 476         struct list_head *hnd_list_h, *hnd_list_l;
 477         unsigned long irq_table_flags;
 478 
 479         DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
 480 
 481         DRM_DEBUG_KMS("DM_IRQ: resume\n");
 482 
 483         /**
 484          * Renable HW interrupt  for HPD and only since FLIP and VBLANK
 485          * will be enabled from manage_dm_interrupts on enable CRTC.
 486          */
 487         for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6; src++) {
 488                 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
 489                 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
 490                 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
 491                         dc_interrupt_set(adev->dm.dc, src, true);
 492         }
 493 
 494         DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
 495         return 0;
 496 }
 497 
 498 /*
 499  * amdgpu_dm_irq_schedule_work - schedule all work items registered for the
 500  * "irq_source".
 501  */
 502 static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
 503                                         enum dc_irq_source irq_source)
 504 {
 505         unsigned long irq_table_flags;
 506         struct work_struct *work = NULL;
 507 
 508         DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
 509 
 510         if (!list_empty(&adev->dm.irq_handler_list_low_tab[irq_source].head))
 511                 work = &adev->dm.irq_handler_list_low_tab[irq_source].work;
 512 
 513         DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
 514 
 515         if (work) {
 516                 if (!schedule_work(work))
 517                         DRM_INFO("amdgpu_dm_irq_schedule_work FAILED src %d\n",
 518                                                 irq_source);
 519         }
 520 
 521 }
 522 
 523 /*
 524  * amdgpu_dm_irq_immediate_work
 525  * Callback high irq work immediately, don't send to work queue
 526  */
 527 static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev,
 528                                          enum dc_irq_source irq_source)
 529 {
 530         struct amdgpu_dm_irq_handler_data *handler_data;
 531         struct list_head *entry;
 532         unsigned long irq_table_flags;
 533 
 534         DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
 535 
 536         list_for_each(
 537                 entry,
 538                 &adev->dm.irq_handler_list_high_tab[irq_source]) {
 539 
 540                 handler_data = list_entry(entry,
 541                                           struct amdgpu_dm_irq_handler_data,
 542                                           list);
 543 
 544                 /* Call a subcomponent which registered for immediate
 545                  * interrupt notification */
 546                 handler_data->handler(handler_data->handler_arg);
 547         }
 548 
 549         DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
 550 }
 551 
 552 /**
 553  * amdgpu_dm_irq_handler - Generic DM IRQ handler
 554  * @adev: amdgpu base driver device containing the DM device
 555  * @source: Unused
 556  * @entry: Data about the triggered interrupt
 557  *
 558  * Calls all registered high irq work immediately, and schedules work for low
 559  * irq. The DM IRQ table is used to find the corresponding handlers.
 560  */
 561 static int amdgpu_dm_irq_handler(struct amdgpu_device *adev,
 562                                  struct amdgpu_irq_src *source,
 563                                  struct amdgpu_iv_entry *entry)
 564 {
 565 
 566         enum dc_irq_source src =
 567                 dc_interrupt_to_irq_source(
 568                         adev->dm.dc,
 569                         entry->src_id,
 570                         entry->src_data[0]);
 571 
 572         dc_interrupt_ack(adev->dm.dc, src);
 573 
 574         /* Call high irq work immediately */
 575         amdgpu_dm_irq_immediate_work(adev, src);
 576         /*Schedule low_irq work */
 577         amdgpu_dm_irq_schedule_work(adev, src);
 578 
 579         return 0;
 580 }
 581 
 582 static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned type)
 583 {
 584         switch (type) {
 585         case AMDGPU_HPD_1:
 586                 return DC_IRQ_SOURCE_HPD1;
 587         case AMDGPU_HPD_2:
 588                 return DC_IRQ_SOURCE_HPD2;
 589         case AMDGPU_HPD_3:
 590                 return DC_IRQ_SOURCE_HPD3;
 591         case AMDGPU_HPD_4:
 592                 return DC_IRQ_SOURCE_HPD4;
 593         case AMDGPU_HPD_5:
 594                 return DC_IRQ_SOURCE_HPD5;
 595         case AMDGPU_HPD_6:
 596                 return DC_IRQ_SOURCE_HPD6;
 597         default:
 598                 return DC_IRQ_SOURCE_INVALID;
 599         }
 600 }
 601 
 602 static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev,
 603                                        struct amdgpu_irq_src *source,
 604                                        unsigned type,
 605                                        enum amdgpu_interrupt_state state)
 606 {
 607         enum dc_irq_source src = amdgpu_dm_hpd_to_dal_irq_source(type);
 608         bool st = (state == AMDGPU_IRQ_STATE_ENABLE);
 609 
 610         dc_interrupt_set(adev->dm.dc, src, st);
 611         return 0;
 612 }
 613 
 614 static inline int dm_irq_state(struct amdgpu_device *adev,
 615                                struct amdgpu_irq_src *source,
 616                                unsigned crtc_id,
 617                                enum amdgpu_interrupt_state state,
 618                                const enum irq_type dal_irq_type,
 619                                const char *func)
 620 {
 621         bool st;
 622         enum dc_irq_source irq_source;
 623 
 624         struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc_id];
 625 
 626         if (!acrtc) {
 627                 DRM_ERROR(
 628                         "%s: crtc is NULL at id :%d\n",
 629                         func,
 630                         crtc_id);
 631                 return 0;
 632         }
 633 
 634         if (acrtc->otg_inst == -1)
 635                 return 0;
 636 
 637         irq_source = dal_irq_type + acrtc->otg_inst;
 638 
 639         st = (state == AMDGPU_IRQ_STATE_ENABLE);
 640 
 641         dc_interrupt_set(adev->dm.dc, irq_source, st);
 642         return 0;
 643 }
 644 
 645 static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev,
 646                                          struct amdgpu_irq_src *source,
 647                                          unsigned crtc_id,
 648                                          enum amdgpu_interrupt_state state)
 649 {
 650         return dm_irq_state(
 651                 adev,
 652                 source,
 653                 crtc_id,
 654                 state,
 655                 IRQ_TYPE_PFLIP,
 656                 __func__);
 657 }
 658 
 659 static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev,
 660                                         struct amdgpu_irq_src *source,
 661                                         unsigned crtc_id,
 662                                         enum amdgpu_interrupt_state state)
 663 {
 664         return dm_irq_state(
 665                 adev,
 666                 source,
 667                 crtc_id,
 668                 state,
 669                 IRQ_TYPE_VBLANK,
 670                 __func__);
 671 }
 672 
 673 static int amdgpu_dm_set_vupdate_irq_state(struct amdgpu_device *adev,
 674                                            struct amdgpu_irq_src *source,
 675                                            unsigned int crtc_id,
 676                                            enum amdgpu_interrupt_state state)
 677 {
 678         return dm_irq_state(
 679                 adev,
 680                 source,
 681                 crtc_id,
 682                 state,
 683                 IRQ_TYPE_VUPDATE,
 684                 __func__);
 685 }
 686 
 687 static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = {
 688         .set = amdgpu_dm_set_crtc_irq_state,
 689         .process = amdgpu_dm_irq_handler,
 690 };
 691 
 692 static const struct amdgpu_irq_src_funcs dm_vupdate_irq_funcs = {
 693         .set = amdgpu_dm_set_vupdate_irq_state,
 694         .process = amdgpu_dm_irq_handler,
 695 };
 696 
 697 static const struct amdgpu_irq_src_funcs dm_pageflip_irq_funcs = {
 698         .set = amdgpu_dm_set_pflip_irq_state,
 699         .process = amdgpu_dm_irq_handler,
 700 };
 701 
 702 static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = {
 703         .set = amdgpu_dm_set_hpd_irq_state,
 704         .process = amdgpu_dm_irq_handler,
 705 };
 706 
 707 void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
 708 {
 709 
 710         adev->crtc_irq.num_types = adev->mode_info.num_crtc;
 711         adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
 712 
 713         adev->vupdate_irq.num_types = adev->mode_info.num_crtc;
 714         adev->vupdate_irq.funcs = &dm_vupdate_irq_funcs;
 715 
 716         adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
 717         adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs;
 718 
 719         adev->hpd_irq.num_types = adev->mode_info.num_hpd;
 720         adev->hpd_irq.funcs = &dm_hpd_irq_funcs;
 721 }
 722 
 723 /**
 724  * amdgpu_dm_hpd_init - hpd setup callback.
 725  *
 726  * @adev: amdgpu_device pointer
 727  *
 728  * Setup the hpd pins used by the card (evergreen+).
 729  * Enable the pin, set the polarity, and enable the hpd interrupts.
 730  */
 731 void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
 732 {
 733         struct drm_device *dev = adev->ddev;
 734         struct drm_connector *connector;
 735 
 736         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 737                 struct amdgpu_dm_connector *amdgpu_dm_connector =
 738                                 to_amdgpu_dm_connector(connector);
 739 
 740                 const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
 741 
 742                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
 743                         dc_interrupt_set(adev->dm.dc,
 744                                         dc_link->irq_source_hpd,
 745                                         true);
 746                 }
 747 
 748                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
 749                         dc_interrupt_set(adev->dm.dc,
 750                                         dc_link->irq_source_hpd_rx,
 751                                         true);
 752                 }
 753         }
 754 }
 755 
 756 /**
 757  * amdgpu_dm_hpd_fini - hpd tear down callback.
 758  *
 759  * @adev: amdgpu_device pointer
 760  *
 761  * Tear down the hpd pins used by the card (evergreen+).
 762  * Disable the hpd interrupts.
 763  */
 764 void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
 765 {
 766         struct drm_device *dev = adev->ddev;
 767         struct drm_connector *connector;
 768 
 769         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 770                 struct amdgpu_dm_connector *amdgpu_dm_connector =
 771                                 to_amdgpu_dm_connector(connector);
 772                 const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
 773 
 774                 dc_interrupt_set(adev->dm.dc, dc_link->irq_source_hpd, false);
 775 
 776                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
 777                         dc_interrupt_set(adev->dm.dc,
 778                                         dc_link->irq_source_hpd_rx,
 779                                         false);
 780                 }
 781         }
 782 }

/* [<][>][^][v][top][bottom][index][help] */