root/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. get_mqd
  2. get_sdma_mqd
  3. update_cu_mask
  4. set_priority
  5. allocate_mqd
  6. init_mqd
  7. load_mqd
  8. update_mqd
  9. destroy_mqd
  10. free_mqd
  11. is_occupied
  12. get_wave_state
  13. init_mqd_hiq
  14. update_mqd_hiq
  15. init_mqd_sdma
  16. load_mqd_sdma
  17. update_mqd_sdma
  18. destroy_mqd_sdma
  19. is_occupied_sdma
  20. debugfs_show_mqd
  21. debugfs_show_mqd_sdma
  22. mqd_manager_init_v9

   1 /*
   2  * Copyright 2016-2018 Advanced Micro Devices, Inc.
   3  *
   4  * Permission is hereby granted, free of charge, to any person obtaining a
   5  * copy of this software and associated documentation files (the "Software"),
   6  * to deal in the Software without restriction, including without limitation
   7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8  * and/or sell copies of the Software, and to permit persons to whom the
   9  * Software is furnished to do so, subject to the following conditions:
  10  *
  11  * The above copyright notice and this permission notice shall be included in
  12  * all copies or substantial portions of the Software.
  13  *
  14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20  * OTHER DEALINGS IN THE SOFTWARE.
  21  *
  22  */
  23 
  24 #include <linux/printk.h>
  25 #include <linux/slab.h>
  26 #include <linux/uaccess.h>
  27 #include "kfd_priv.h"
  28 #include "kfd_mqd_manager.h"
  29 #include "v9_structs.h"
  30 #include "gc/gc_9_0_offset.h"
  31 #include "gc/gc_9_0_sh_mask.h"
  32 #include "sdma0/sdma0_4_0_sh_mask.h"
  33 #include "amdgpu_amdkfd.h"
  34 
  35 static inline struct v9_mqd *get_mqd(void *mqd)
  36 {
  37         return (struct v9_mqd *)mqd;
  38 }
  39 
  40 static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
  41 {
  42         return (struct v9_sdma_mqd *)mqd;
  43 }
  44 
  45 static void update_cu_mask(struct mqd_manager *mm, void *mqd,
  46                         struct queue_properties *q)
  47 {
  48         struct v9_mqd *m;
  49         uint32_t se_mask[KFD_MAX_NUM_SE] = {0};
  50 
  51         if (q->cu_mask_count == 0)
  52                 return;
  53 
  54         mqd_symmetrically_map_cu_mask(mm,
  55                 q->cu_mask, q->cu_mask_count, se_mask);
  56 
  57         m = get_mqd(mqd);
  58         m->compute_static_thread_mgmt_se0 = se_mask[0];
  59         m->compute_static_thread_mgmt_se1 = se_mask[1];
  60         m->compute_static_thread_mgmt_se2 = se_mask[2];
  61         m->compute_static_thread_mgmt_se3 = se_mask[3];
  62         m->compute_static_thread_mgmt_se4 = se_mask[4];
  63         m->compute_static_thread_mgmt_se5 = se_mask[5];
  64         m->compute_static_thread_mgmt_se6 = se_mask[6];
  65         m->compute_static_thread_mgmt_se7 = se_mask[7];
  66 
  67         pr_debug("update cu mask to %#x %#x %#x %#x %#x %#x %#x %#x\n",
  68                 m->compute_static_thread_mgmt_se0,
  69                 m->compute_static_thread_mgmt_se1,
  70                 m->compute_static_thread_mgmt_se2,
  71                 m->compute_static_thread_mgmt_se3,
  72                 m->compute_static_thread_mgmt_se4,
  73                 m->compute_static_thread_mgmt_se5,
  74                 m->compute_static_thread_mgmt_se6,
  75                 m->compute_static_thread_mgmt_se7);
  76 }
  77 
  78 static void set_priority(struct v9_mqd *m, struct queue_properties *q)
  79 {
  80         m->cp_hqd_pipe_priority = pipe_priority_map[q->priority];
  81         m->cp_hqd_queue_priority = q->priority;
  82 }
  83 
  84 static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
  85                 struct queue_properties *q)
  86 {
  87         int retval;
  88         struct kfd_mem_obj *mqd_mem_obj = NULL;
  89 
  90         /* From V9,  for CWSR, the control stack is located on the next page
  91          * boundary after the mqd, we will use the gtt allocation function
  92          * instead of sub-allocation function.
  93          */
  94         if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) {
  95                 mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_NOIO);
  96                 if (!mqd_mem_obj)
  97                         return NULL;
  98                 retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->kgd,
  99                         ALIGN(q->ctl_stack_size, PAGE_SIZE) +
 100                                 ALIGN(sizeof(struct v9_mqd), PAGE_SIZE),
 101                         &(mqd_mem_obj->gtt_mem),
 102                         &(mqd_mem_obj->gpu_addr),
 103                         (void *)&(mqd_mem_obj->cpu_ptr), true);
 104         } else {
 105                 retval = kfd_gtt_sa_allocate(kfd, sizeof(struct v9_mqd),
 106                                 &mqd_mem_obj);
 107         }
 108 
 109         if (retval) {
 110                 kfree(mqd_mem_obj);
 111                 return NULL;
 112         }
 113 
 114         return mqd_mem_obj;
 115 
 116 }
 117 
 118 static void init_mqd(struct mqd_manager *mm, void **mqd,
 119                         struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
 120                         struct queue_properties *q)
 121 {
 122         uint64_t addr;
 123         struct v9_mqd *m;
 124 
 125         m = (struct v9_mqd *) mqd_mem_obj->cpu_ptr;
 126         addr = mqd_mem_obj->gpu_addr;
 127 
 128         memset(m, 0, sizeof(struct v9_mqd));
 129 
 130         m->header = 0xC0310800;
 131         m->compute_pipelinestat_enable = 1;
 132         m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
 133         m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
 134         m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
 135         m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
 136         m->compute_static_thread_mgmt_se4 = 0xFFFFFFFF;
 137         m->compute_static_thread_mgmt_se5 = 0xFFFFFFFF;
 138         m->compute_static_thread_mgmt_se6 = 0xFFFFFFFF;
 139         m->compute_static_thread_mgmt_se7 = 0xFFFFFFFF;
 140 
 141         m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
 142                         0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
 143 
 144         m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
 145 
 146         m->cp_mqd_base_addr_lo        = lower_32_bits(addr);
 147         m->cp_mqd_base_addr_hi        = upper_32_bits(addr);
 148 
 149         m->cp_hqd_quantum = 1 << CP_HQD_QUANTUM__QUANTUM_EN__SHIFT |
 150                         1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT |
 151                         10 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT;
 152 
 153         if (q->format == KFD_QUEUE_FORMAT_AQL) {
 154                 m->cp_hqd_aql_control =
 155                         1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT;
 156         }
 157 
 158         if (q->tba_addr) {
 159                 m->compute_pgm_rsrc2 |=
 160                         (1 << COMPUTE_PGM_RSRC2__TRAP_PRESENT__SHIFT);
 161         }
 162 
 163         if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) {
 164                 m->cp_hqd_persistent_state |=
 165                         (1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT);
 166                 m->cp_hqd_ctx_save_base_addr_lo =
 167                         lower_32_bits(q->ctx_save_restore_area_address);
 168                 m->cp_hqd_ctx_save_base_addr_hi =
 169                         upper_32_bits(q->ctx_save_restore_area_address);
 170                 m->cp_hqd_ctx_save_size = q->ctx_save_restore_area_size;
 171                 m->cp_hqd_cntl_stack_size = q->ctl_stack_size;
 172                 m->cp_hqd_cntl_stack_offset = q->ctl_stack_size;
 173                 m->cp_hqd_wg_state_offset = q->ctl_stack_size;
 174         }
 175 
 176         *mqd = m;
 177         if (gart_addr)
 178                 *gart_addr = addr;
 179         mm->update_mqd(mm, m, q);
 180 }
 181 
 182 static int load_mqd(struct mqd_manager *mm, void *mqd,
 183                         uint32_t pipe_id, uint32_t queue_id,
 184                         struct queue_properties *p, struct mm_struct *mms)
 185 {
 186         /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */
 187         uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
 188 
 189         return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id,
 190                                           (uint32_t __user *)p->write_ptr,
 191                                           wptr_shift, 0, mms);
 192 }
 193 
 194 static void update_mqd(struct mqd_manager *mm, void *mqd,
 195                       struct queue_properties *q)
 196 {
 197         struct v9_mqd *m;
 198 
 199         m = get_mqd(mqd);
 200 
 201         m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
 202         m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1;
 203         pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
 204 
 205         m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
 206         m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
 207 
 208         m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
 209         m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
 210         m->cp_hqd_pq_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr);
 211         m->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr);
 212 
 213         m->cp_hqd_pq_doorbell_control =
 214                 q->doorbell_off <<
 215                         CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
 216         pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
 217                         m->cp_hqd_pq_doorbell_control);
 218 
 219         m->cp_hqd_ib_control =
 220                 3 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT |
 221                 1 << CP_HQD_IB_CONTROL__IB_EXE_DISABLE__SHIFT;
 222 
 223         /*
 224          * HW does not clamp this field correctly. Maximum EOP queue size
 225          * is constrained by per-SE EOP done signal count, which is 8-bit.
 226          * Limit is 0xFF EOP entries (= 0x7F8 dwords). CP will not submit
 227          * more than (EOP entry count - 1) so a queue size of 0x800 dwords
 228          * is safe, giving a maximum field value of 0xA.
 229          */
 230         m->cp_hqd_eop_control = min(0xA,
 231                 order_base_2(q->eop_ring_buffer_size / 4) - 1);
 232         m->cp_hqd_eop_base_addr_lo =
 233                         lower_32_bits(q->eop_ring_buffer_address >> 8);
 234         m->cp_hqd_eop_base_addr_hi =
 235                         upper_32_bits(q->eop_ring_buffer_address >> 8);
 236 
 237         m->cp_hqd_iq_timer = 0;
 238 
 239         m->cp_hqd_vmid = q->vmid;
 240 
 241         if (q->format == KFD_QUEUE_FORMAT_AQL) {
 242                 m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK |
 243                                 2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT |
 244                                 1 << CP_HQD_PQ_CONTROL__QUEUE_FULL_EN__SHIFT |
 245                                 1 << CP_HQD_PQ_CONTROL__WPP_CLAMP_EN__SHIFT;
 246                 m->cp_hqd_pq_doorbell_control |= 1 <<
 247                         CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT;
 248         }
 249         if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address)
 250                 m->cp_hqd_ctx_save_control = 0;
 251 
 252         update_cu_mask(mm, mqd, q);
 253         set_priority(m, q);
 254 
 255         q->is_active = QUEUE_IS_ACTIVE(*q);
 256 }
 257 
 258 
 259 static int destroy_mqd(struct mqd_manager *mm, void *mqd,
 260                         enum kfd_preempt_type type,
 261                         unsigned int timeout, uint32_t pipe_id,
 262                         uint32_t queue_id)
 263 {
 264         return mm->dev->kfd2kgd->hqd_destroy
 265                 (mm->dev->kgd, mqd, type, timeout,
 266                 pipe_id, queue_id);
 267 }
 268 
 269 static void free_mqd(struct mqd_manager *mm, void *mqd,
 270                         struct kfd_mem_obj *mqd_mem_obj)
 271 {
 272         struct kfd_dev *kfd = mm->dev;
 273 
 274         if (mqd_mem_obj->gtt_mem) {
 275                 amdgpu_amdkfd_free_gtt_mem(kfd->kgd, mqd_mem_obj->gtt_mem);
 276                 kfree(mqd_mem_obj);
 277         } else {
 278                 kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
 279         }
 280 }
 281 
 282 static bool is_occupied(struct mqd_manager *mm, void *mqd,
 283                         uint64_t queue_address, uint32_t pipe_id,
 284                         uint32_t queue_id)
 285 {
 286         return mm->dev->kfd2kgd->hqd_is_occupied(
 287                 mm->dev->kgd, queue_address,
 288                 pipe_id, queue_id);
 289 }
 290 
 291 static int get_wave_state(struct mqd_manager *mm, void *mqd,
 292                           void __user *ctl_stack,
 293                           u32 *ctl_stack_used_size,
 294                           u32 *save_area_used_size)
 295 {
 296         struct v9_mqd *m;
 297 
 298         /* Control stack is located one page after MQD. */
 299         void *mqd_ctl_stack = (void *)((uintptr_t)mqd + PAGE_SIZE);
 300 
 301         m = get_mqd(mqd);
 302 
 303         *ctl_stack_used_size = m->cp_hqd_cntl_stack_size -
 304                 m->cp_hqd_cntl_stack_offset;
 305         *save_area_used_size = m->cp_hqd_wg_state_offset;
 306 
 307         if (copy_to_user(ctl_stack, mqd_ctl_stack, m->cp_hqd_cntl_stack_size))
 308                 return -EFAULT;
 309 
 310         return 0;
 311 }
 312 
 313 static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
 314                         struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
 315                         struct queue_properties *q)
 316 {
 317         struct v9_mqd *m;
 318 
 319         init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
 320 
 321         m = get_mqd(*mqd);
 322 
 323         m->cp_hqd_pq_control |= 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT |
 324                         1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
 325 }
 326 
 327 static void update_mqd_hiq(struct mqd_manager *mm, void *mqd,
 328                         struct queue_properties *q)
 329 {
 330         struct v9_mqd *m;
 331 
 332         update_mqd(mm, mqd, q);
 333 
 334         /* TODO: what's the point? update_mqd already does this. */
 335         m = get_mqd(mqd);
 336         m->cp_hqd_vmid = q->vmid;
 337 }
 338 
 339 static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
 340                 struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
 341                 struct queue_properties *q)
 342 {
 343         struct v9_sdma_mqd *m;
 344 
 345         m = (struct v9_sdma_mqd *) mqd_mem_obj->cpu_ptr;
 346 
 347         memset(m, 0, sizeof(struct v9_sdma_mqd));
 348 
 349         *mqd = m;
 350         if (gart_addr)
 351                 *gart_addr = mqd_mem_obj->gpu_addr;
 352 
 353         mm->update_mqd(mm, m, q);
 354 }
 355 
 356 static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
 357                 uint32_t pipe_id, uint32_t queue_id,
 358                 struct queue_properties *p, struct mm_struct *mms)
 359 {
 360         return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd,
 361                                                (uint32_t __user *)p->write_ptr,
 362                                                mms);
 363 }
 364 
 365 #define SDMA_RLC_DUMMY_DEFAULT 0xf
 366 
 367 static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
 368                 struct queue_properties *q)
 369 {
 370         struct v9_sdma_mqd *m;
 371 
 372         m = get_sdma_mqd(mqd);
 373         m->sdmax_rlcx_rb_cntl = order_base_2(q->queue_size / 4)
 374                 << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
 375                 q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
 376                 1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
 377                 6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
 378 
 379         m->sdmax_rlcx_rb_base = lower_32_bits(q->queue_address >> 8);
 380         m->sdmax_rlcx_rb_base_hi = upper_32_bits(q->queue_address >> 8);
 381         m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
 382         m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
 383         m->sdmax_rlcx_doorbell_offset =
 384                 q->doorbell_off << SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT;
 385 
 386         m->sdma_engine_id = q->sdma_engine_id;
 387         m->sdma_queue_id = q->sdma_queue_id;
 388         m->sdmax_rlcx_dummy_reg = SDMA_RLC_DUMMY_DEFAULT;
 389 
 390         q->is_active = QUEUE_IS_ACTIVE(*q);
 391 }
 392 
 393 /*
 394  *  * preempt type here is ignored because there is only one way
 395  *  * to preempt sdma queue
 396  */
 397 static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
 398                 enum kfd_preempt_type type,
 399                 unsigned int timeout, uint32_t pipe_id,
 400                 uint32_t queue_id)
 401 {
 402         return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout);
 403 }
 404 
 405 static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd,
 406                 uint64_t queue_address, uint32_t pipe_id,
 407                 uint32_t queue_id)
 408 {
 409         return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd);
 410 }
 411 
 412 #if defined(CONFIG_DEBUG_FS)
 413 
 414 static int debugfs_show_mqd(struct seq_file *m, void *data)
 415 {
 416         seq_hex_dump(m, "    ", DUMP_PREFIX_OFFSET, 32, 4,
 417                      data, sizeof(struct v9_mqd), false);
 418         return 0;
 419 }
 420 
 421 static int debugfs_show_mqd_sdma(struct seq_file *m, void *data)
 422 {
 423         seq_hex_dump(m, "    ", DUMP_PREFIX_OFFSET, 32, 4,
 424                      data, sizeof(struct v9_sdma_mqd), false);
 425         return 0;
 426 }
 427 
 428 #endif
 429 
 430 struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
 431                 struct kfd_dev *dev)
 432 {
 433         struct mqd_manager *mqd;
 434 
 435         if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
 436                 return NULL;
 437 
 438         mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
 439         if (!mqd)
 440                 return NULL;
 441 
 442         mqd->dev = dev;
 443 
 444         switch (type) {
 445         case KFD_MQD_TYPE_CP:
 446         case KFD_MQD_TYPE_COMPUTE:
 447                 mqd->allocate_mqd = allocate_mqd;
 448                 mqd->init_mqd = init_mqd;
 449                 mqd->free_mqd = free_mqd;
 450                 mqd->load_mqd = load_mqd;
 451                 mqd->update_mqd = update_mqd;
 452                 mqd->destroy_mqd = destroy_mqd;
 453                 mqd->is_occupied = is_occupied;
 454                 mqd->get_wave_state = get_wave_state;
 455                 mqd->mqd_size = sizeof(struct v9_mqd);
 456 #if defined(CONFIG_DEBUG_FS)
 457                 mqd->debugfs_show_mqd = debugfs_show_mqd;
 458 #endif
 459                 break;
 460         case KFD_MQD_TYPE_HIQ:
 461                 mqd->allocate_mqd = allocate_hiq_mqd;
 462                 mqd->init_mqd = init_mqd_hiq;
 463                 mqd->free_mqd = free_mqd_hiq_sdma;
 464                 mqd->load_mqd = load_mqd;
 465                 mqd->update_mqd = update_mqd_hiq;
 466                 mqd->destroy_mqd = destroy_mqd;
 467                 mqd->is_occupied = is_occupied;
 468                 mqd->mqd_size = sizeof(struct v9_mqd);
 469 #if defined(CONFIG_DEBUG_FS)
 470                 mqd->debugfs_show_mqd = debugfs_show_mqd;
 471 #endif
 472                 break;
 473         case KFD_MQD_TYPE_DIQ:
 474                 mqd->allocate_mqd = allocate_hiq_mqd;
 475                 mqd->init_mqd = init_mqd_hiq;
 476                 mqd->free_mqd = free_mqd;
 477                 mqd->load_mqd = load_mqd;
 478                 mqd->update_mqd = update_mqd_hiq;
 479                 mqd->destroy_mqd = destroy_mqd;
 480                 mqd->is_occupied = is_occupied;
 481                 mqd->mqd_size = sizeof(struct v9_mqd);
 482 #if defined(CONFIG_DEBUG_FS)
 483                 mqd->debugfs_show_mqd = debugfs_show_mqd;
 484 #endif
 485                 break;
 486         case KFD_MQD_TYPE_SDMA:
 487                 mqd->allocate_mqd = allocate_sdma_mqd;
 488                 mqd->init_mqd = init_mqd_sdma;
 489                 mqd->free_mqd = free_mqd_hiq_sdma;
 490                 mqd->load_mqd = load_mqd_sdma;
 491                 mqd->update_mqd = update_mqd_sdma;
 492                 mqd->destroy_mqd = destroy_mqd_sdma;
 493                 mqd->is_occupied = is_occupied_sdma;
 494                 mqd->mqd_size = sizeof(struct v9_sdma_mqd);
 495 #if defined(CONFIG_DEBUG_FS)
 496                 mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
 497 #endif
 498                 break;
 499         default:
 500                 kfree(mqd);
 501                 return NULL;
 502         }
 503 
 504         return mqd;
 505 }

/* [<][>][^][v][top][bottom][index][help] */