root/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. get_mqd
  2. get_sdma_mqd
  3. update_cu_mask
  4. allocate_mqd
  5. init_mqd
  6. load_mqd
  7. update_mqd
  8. destroy_mqd
  9. free_mqd
  10. is_occupied
  11. get_wave_state
  12. init_mqd_hiq
  13. update_mqd_hiq
  14. init_mqd_sdma
  15. load_mqd_sdma
  16. update_mqd_sdma
  17. destroy_mqd_sdma
  18. is_occupied_sdma
  19. debugfs_show_mqd
  20. debugfs_show_mqd_sdma
  21. mqd_manager_init_v10

   1 /*
   2  * Copyright 2018 Advanced Micro Devices, Inc.
   3  *
   4  * Permission is hereby granted, free of charge, to any person obtaining a
   5  * copy of this software and associated documentation files (the "Software"),
   6  * to deal in the Software without restriction, including without limitation
   7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8  * and/or sell copies of the Software, and to permit persons to whom the
   9  * Software is furnished to do so, subject to the following conditions:
  10  *
  11  * The above copyright notice and this permission notice shall be included in
  12  * all copies or substantial portions of the Software.
  13  *
  14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20  * OTHER DEALINGS IN THE SOFTWARE.
  21  *
  22  */
  23 
  24 #include <linux/printk.h>
  25 #include <linux/slab.h>
  26 #include <linux/uaccess.h>
  27 #include "kfd_priv.h"
  28 #include "kfd_mqd_manager.h"
  29 #include "v10_structs.h"
  30 #include "gc/gc_10_1_0_offset.h"
  31 #include "gc/gc_10_1_0_sh_mask.h"
  32 #include "amdgpu_amdkfd.h"
  33 
  34 static inline struct v10_compute_mqd *get_mqd(void *mqd)
  35 {
  36         return (struct v10_compute_mqd *)mqd;
  37 }
  38 
  39 static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd)
  40 {
  41         return (struct v10_sdma_mqd *)mqd;
  42 }
  43 
  44 static void update_cu_mask(struct mqd_manager *mm, void *mqd,
  45                            struct queue_properties *q)
  46 {
  47         struct v10_compute_mqd *m;
  48         uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */
  49 
  50         if (q->cu_mask_count == 0)
  51                 return;
  52 
  53         mqd_symmetrically_map_cu_mask(mm,
  54                 q->cu_mask, q->cu_mask_count, se_mask);
  55 
  56         m = get_mqd(mqd);
  57         m->compute_static_thread_mgmt_se0 = se_mask[0];
  58         m->compute_static_thread_mgmt_se1 = se_mask[1];
  59         m->compute_static_thread_mgmt_se2 = se_mask[2];
  60         m->compute_static_thread_mgmt_se3 = se_mask[3];
  61 
  62         pr_debug("update cu mask to %#x %#x %#x %#x\n",
  63                 m->compute_static_thread_mgmt_se0,
  64                 m->compute_static_thread_mgmt_se1,
  65                 m->compute_static_thread_mgmt_se2,
  66                 m->compute_static_thread_mgmt_se3);
  67 }
  68 
  69 static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
  70                 struct queue_properties *q)
  71 {
  72         int retval;
  73         struct kfd_mem_obj *mqd_mem_obj = NULL;
  74 
  75         /* From V9,  for CWSR, the control stack is located on the next page
  76          * boundary after the mqd, we will use the gtt allocation function
  77          * instead of sub-allocation function.
  78          */
  79         if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) {
  80                 mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_NOIO);
  81                 if (!mqd_mem_obj)
  82                         return NULL;
  83                 retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->kgd,
  84                         ALIGN(q->ctl_stack_size, PAGE_SIZE) +
  85                                 ALIGN(sizeof(struct v10_compute_mqd), PAGE_SIZE),
  86                         &(mqd_mem_obj->gtt_mem),
  87                         &(mqd_mem_obj->gpu_addr),
  88                         (void *)&(mqd_mem_obj->cpu_ptr), true);
  89         } else {
  90                 retval = kfd_gtt_sa_allocate(kfd, sizeof(struct v10_compute_mqd),
  91                                 &mqd_mem_obj);
  92         }
  93 
  94         if (retval) {
  95                 kfree(mqd_mem_obj);
  96                 return NULL;
  97         }
  98 
  99         return mqd_mem_obj;
 100 
 101 }
 102 
 103 static void init_mqd(struct mqd_manager *mm, void **mqd,
 104                         struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
 105                         struct queue_properties *q)
 106 {
 107         uint64_t addr;
 108         struct v10_compute_mqd *m;
 109 
 110         m = (struct v10_compute_mqd *) mqd_mem_obj->cpu_ptr;
 111         addr = mqd_mem_obj->gpu_addr;
 112 
 113         memset(m, 0, sizeof(struct v10_compute_mqd));
 114 
 115         m->header = 0xC0310800;
 116         m->compute_pipelinestat_enable = 1;
 117         m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
 118         m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
 119         m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
 120         m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
 121 
 122         m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
 123                         0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
 124 
 125         m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
 126 
 127         m->cp_mqd_base_addr_lo        = lower_32_bits(addr);
 128         m->cp_mqd_base_addr_hi        = upper_32_bits(addr);
 129 
 130         m->cp_hqd_quantum = 1 << CP_HQD_QUANTUM__QUANTUM_EN__SHIFT |
 131                         1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT |
 132                         10 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT;
 133 
 134         m->cp_hqd_pipe_priority = 1;
 135         m->cp_hqd_queue_priority = 15;
 136 
 137         if (q->format == KFD_QUEUE_FORMAT_AQL) {
 138                 m->cp_hqd_aql_control =
 139                         1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT;
 140         }
 141 
 142         if (mm->dev->cwsr_enabled) {
 143                 m->cp_hqd_persistent_state |=
 144                         (1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT);
 145                 m->cp_hqd_ctx_save_base_addr_lo =
 146                         lower_32_bits(q->ctx_save_restore_area_address);
 147                 m->cp_hqd_ctx_save_base_addr_hi =
 148                         upper_32_bits(q->ctx_save_restore_area_address);
 149                 m->cp_hqd_ctx_save_size = q->ctx_save_restore_area_size;
 150                 m->cp_hqd_cntl_stack_size = q->ctl_stack_size;
 151                 m->cp_hqd_cntl_stack_offset = q->ctl_stack_size;
 152                 m->cp_hqd_wg_state_offset = q->ctl_stack_size;
 153         }
 154 
 155         *mqd = m;
 156         if (gart_addr)
 157                 *gart_addr = addr;
 158         mm->update_mqd(mm, m, q);
 159 }
 160 
 161 static int load_mqd(struct mqd_manager *mm, void *mqd,
 162                         uint32_t pipe_id, uint32_t queue_id,
 163                         struct queue_properties *p, struct mm_struct *mms)
 164 {
 165         int r = 0;
 166         /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */
 167         uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
 168 
 169         r = mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id,
 170                                           (uint32_t __user *)p->write_ptr,
 171                                           wptr_shift, 0, mms);
 172         return r;
 173 }
 174 
 175 static void update_mqd(struct mqd_manager *mm, void *mqd,
 176                       struct queue_properties *q)
 177 {
 178         struct v10_compute_mqd *m;
 179 
 180         m = get_mqd(mqd);
 181 
 182         m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
 183         m->cp_hqd_pq_control |=
 184                         ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
 185         pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
 186 
 187         m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
 188         m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
 189 
 190         m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
 191         m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
 192         m->cp_hqd_pq_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr);
 193         m->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr);
 194 
 195         m->cp_hqd_pq_doorbell_control =
 196                 q->doorbell_off <<
 197                         CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
 198         pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
 199                         m->cp_hqd_pq_doorbell_control);
 200 
 201         m->cp_hqd_ib_control = 3 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT;
 202 
 203         /*
 204          * HW does not clamp this field correctly. Maximum EOP queue size
 205          * is constrained by per-SE EOP done signal count, which is 8-bit.
 206          * Limit is 0xFF EOP entries (= 0x7F8 dwords). CP will not submit
 207          * more than (EOP entry count - 1) so a queue size of 0x800 dwords
 208          * is safe, giving a maximum field value of 0xA.
 209          */
 210         m->cp_hqd_eop_control = min(0xA,
 211                 ffs(q->eop_ring_buffer_size / sizeof(unsigned int)) - 1 - 1);
 212         m->cp_hqd_eop_base_addr_lo =
 213                         lower_32_bits(q->eop_ring_buffer_address >> 8);
 214         m->cp_hqd_eop_base_addr_hi =
 215                         upper_32_bits(q->eop_ring_buffer_address >> 8);
 216 
 217         m->cp_hqd_iq_timer = 0;
 218 
 219         m->cp_hqd_vmid = q->vmid;
 220 
 221         if (q->format == KFD_QUEUE_FORMAT_AQL) {
 222                 /* GC 10 removed WPP_CLAMP from PQ Control */
 223                 m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK |
 224                                 2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT |
 225                                 1 << CP_HQD_PQ_CONTROL__QUEUE_FULL_EN__SHIFT ;
 226                 m->cp_hqd_pq_doorbell_control |=
 227                         1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT;
 228         }
 229         if (mm->dev->cwsr_enabled)
 230                 m->cp_hqd_ctx_save_control = 0;
 231 
 232         update_cu_mask(mm, mqd, q);
 233 
 234         q->is_active = (q->queue_size > 0 &&
 235                         q->queue_address != 0 &&
 236                         q->queue_percent > 0 &&
 237                         !q->is_evicted);
 238 }
 239 
 240 static int destroy_mqd(struct mqd_manager *mm, void *mqd,
 241                        enum kfd_preempt_type type,
 242                        unsigned int timeout, uint32_t pipe_id,
 243                        uint32_t queue_id)
 244 {
 245         return mm->dev->kfd2kgd->hqd_destroy
 246                 (mm->dev->kgd, mqd, type, timeout,
 247                  pipe_id, queue_id);
 248 }
 249 
 250 static void free_mqd(struct mqd_manager *mm, void *mqd,
 251                         struct kfd_mem_obj *mqd_mem_obj)
 252 {
 253         struct kfd_dev *kfd = mm->dev;
 254 
 255         if (mqd_mem_obj->gtt_mem) {
 256                 amdgpu_amdkfd_free_gtt_mem(kfd->kgd, mqd_mem_obj->gtt_mem);
 257                 kfree(mqd_mem_obj);
 258         } else {
 259                 kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
 260         }
 261 }
 262 
 263 static bool is_occupied(struct mqd_manager *mm, void *mqd,
 264                         uint64_t queue_address, uint32_t pipe_id,
 265                         uint32_t queue_id)
 266 {
 267         return mm->dev->kfd2kgd->hqd_is_occupied(
 268                 mm->dev->kgd, queue_address,
 269                 pipe_id, queue_id);
 270 }
 271 
 272 static int get_wave_state(struct mqd_manager *mm, void *mqd,
 273                           void __user *ctl_stack,
 274                           u32 *ctl_stack_used_size,
 275                           u32 *save_area_used_size)
 276 {
 277         struct v10_compute_mqd *m;
 278 
 279         /* Control stack is located one page after MQD. */
 280         void *mqd_ctl_stack = (void *)((uintptr_t)mqd + PAGE_SIZE);
 281 
 282         m = get_mqd(mqd);
 283 
 284         *ctl_stack_used_size = m->cp_hqd_cntl_stack_size -
 285                 m->cp_hqd_cntl_stack_offset;
 286         *save_area_used_size = m->cp_hqd_wg_state_offset -
 287                 m->cp_hqd_cntl_stack_size;
 288 
 289         if (copy_to_user(ctl_stack, mqd_ctl_stack, m->cp_hqd_cntl_stack_size))
 290                 return -EFAULT;
 291 
 292         return 0;
 293 }
 294 
 295 static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
 296                         struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
 297                         struct queue_properties *q)
 298 {
 299         struct v10_compute_mqd *m;
 300 
 301         init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
 302 
 303         m = get_mqd(*mqd);
 304 
 305         m->cp_hqd_pq_control |= 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT |
 306                         1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
 307 }
 308 
 309 static void update_mqd_hiq(struct mqd_manager *mm, void *mqd,
 310                         struct queue_properties *q)
 311 {
 312         struct v10_compute_mqd *m;
 313 
 314         update_mqd(mm, mqd, q);
 315 
 316         /* TODO: what's the point? update_mqd already does this. */
 317         m = get_mqd(mqd);
 318         m->cp_hqd_vmid = q->vmid;
 319 }
 320 
 321 static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
 322                 struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
 323                 struct queue_properties *q)
 324 {
 325         struct v10_sdma_mqd *m;
 326 
 327         m = (struct v10_sdma_mqd *) mqd_mem_obj->cpu_ptr;
 328 
 329         memset(m, 0, sizeof(struct v10_sdma_mqd));
 330 
 331         *mqd = m;
 332         if (gart_addr)
 333                 *gart_addr = mqd_mem_obj->gpu_addr;
 334 
 335         mm->update_mqd(mm, m, q);
 336 }
 337 
 338 static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
 339                 uint32_t pipe_id, uint32_t queue_id,
 340                 struct queue_properties *p, struct mm_struct *mms)
 341 {
 342         return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd,
 343                                                (uint32_t __user *)p->write_ptr,
 344                                                mms);
 345 }
 346 
 347 #define SDMA_RLC_DUMMY_DEFAULT 0xf
 348 
 349 static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
 350                 struct queue_properties *q)
 351 {
 352         struct v10_sdma_mqd *m;
 353 
 354         m = get_sdma_mqd(mqd);
 355         m->sdmax_rlcx_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1)
 356                 << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
 357                 q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
 358                 1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
 359                 6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
 360 
 361         m->sdmax_rlcx_rb_base = lower_32_bits(q->queue_address >> 8);
 362         m->sdmax_rlcx_rb_base_hi = upper_32_bits(q->queue_address >> 8);
 363         m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
 364         m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
 365         m->sdmax_rlcx_doorbell_offset =
 366                 q->doorbell_off << SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT;
 367 
 368         m->sdma_engine_id = q->sdma_engine_id;
 369         m->sdma_queue_id = q->sdma_queue_id;
 370         m->sdmax_rlcx_dummy_reg = SDMA_RLC_DUMMY_DEFAULT;
 371 
 372 
 373         q->is_active = (q->queue_size > 0 &&
 374                         q->queue_address != 0 &&
 375                         q->queue_percent > 0 &&
 376                         !q->is_evicted);
 377 }
 378 
 379 /*
 380  *  * preempt type here is ignored because there is only one way
 381  *  * to preempt sdma queue
 382  */
 383 static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
 384                 enum kfd_preempt_type type,
 385                 unsigned int timeout, uint32_t pipe_id,
 386                 uint32_t queue_id)
 387 {
 388         return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout);
 389 }
 390 
 391 static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd,
 392                 uint64_t queue_address, uint32_t pipe_id,
 393                 uint32_t queue_id)
 394 {
 395         return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd);
 396 }
 397 
 398 #if defined(CONFIG_DEBUG_FS)
 399 
 400 static int debugfs_show_mqd(struct seq_file *m, void *data)
 401 {
 402         seq_hex_dump(m, "    ", DUMP_PREFIX_OFFSET, 32, 4,
 403                      data, sizeof(struct v10_compute_mqd), false);
 404         return 0;
 405 }
 406 
 407 static int debugfs_show_mqd_sdma(struct seq_file *m, void *data)
 408 {
 409         seq_hex_dump(m, "    ", DUMP_PREFIX_OFFSET, 32, 4,
 410                      data, sizeof(struct v10_sdma_mqd), false);
 411         return 0;
 412 }
 413 
 414 #endif
 415 
 416 struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
 417                 struct kfd_dev *dev)
 418 {
 419         struct mqd_manager *mqd;
 420 
 421         if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
 422                 return NULL;
 423 
 424         mqd = kzalloc(sizeof(*mqd), GFP_NOIO);
 425         if (!mqd)
 426                 return NULL;
 427 
 428         mqd->dev = dev;
 429 
 430         switch (type) {
 431         case KFD_MQD_TYPE_CP:
 432         case KFD_MQD_TYPE_COMPUTE:
 433                 pr_debug("%s@%i\n", __func__, __LINE__);
 434                 mqd->allocate_mqd = allocate_mqd;
 435                 mqd->init_mqd = init_mqd;
 436                 mqd->free_mqd = free_mqd;
 437                 mqd->load_mqd = load_mqd;
 438                 mqd->update_mqd = update_mqd;
 439                 mqd->destroy_mqd = destroy_mqd;
 440                 mqd->is_occupied = is_occupied;
 441                 mqd->mqd_size = sizeof(struct v10_compute_mqd);
 442                 mqd->get_wave_state = get_wave_state;
 443 #if defined(CONFIG_DEBUG_FS)
 444                 mqd->debugfs_show_mqd = debugfs_show_mqd;
 445 #endif
 446                 pr_debug("%s@%i\n", __func__, __LINE__);
 447                 break;
 448         case KFD_MQD_TYPE_HIQ:
 449                 pr_debug("%s@%i\n", __func__, __LINE__);
 450                 mqd->allocate_mqd = allocate_hiq_mqd;
 451                 mqd->init_mqd = init_mqd_hiq;
 452                 mqd->free_mqd = free_mqd_hiq_sdma;
 453                 mqd->load_mqd = load_mqd;
 454                 mqd->update_mqd = update_mqd_hiq;
 455                 mqd->destroy_mqd = destroy_mqd;
 456                 mqd->is_occupied = is_occupied;
 457                 mqd->mqd_size = sizeof(struct v10_compute_mqd);
 458 #if defined(CONFIG_DEBUG_FS)
 459                 mqd->debugfs_show_mqd = debugfs_show_mqd;
 460 #endif
 461                 pr_debug("%s@%i\n", __func__, __LINE__);
 462                 break;
 463         case KFD_MQD_TYPE_DIQ:
 464                 mqd->allocate_mqd = allocate_hiq_mqd;
 465                 mqd->init_mqd = init_mqd_hiq;
 466                 mqd->free_mqd = free_mqd;
 467                 mqd->load_mqd = load_mqd;
 468                 mqd->update_mqd = update_mqd_hiq;
 469                 mqd->destroy_mqd = destroy_mqd;
 470                 mqd->is_occupied = is_occupied;
 471                 mqd->mqd_size = sizeof(struct v10_compute_mqd);
 472 #if defined(CONFIG_DEBUG_FS)
 473                 mqd->debugfs_show_mqd = debugfs_show_mqd;
 474 #endif
 475                 break;
 476         case KFD_MQD_TYPE_SDMA:
 477                 pr_debug("%s@%i\n", __func__, __LINE__);
 478                 mqd->allocate_mqd = allocate_sdma_mqd;
 479                 mqd->init_mqd = init_mqd_sdma;
 480                 mqd->free_mqd = free_mqd_hiq_sdma;
 481                 mqd->load_mqd = load_mqd_sdma;
 482                 mqd->update_mqd = update_mqd_sdma;
 483                 mqd->destroy_mqd = destroy_mqd_sdma;
 484                 mqd->is_occupied = is_occupied_sdma;
 485                 mqd->mqd_size = sizeof(struct v10_sdma_mqd);
 486 #if defined(CONFIG_DEBUG_FS)
 487                 mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
 488 #endif
 489                 pr_debug("%s@%i\n", __func__, __LINE__);
 490                 break;
 491         default:
 492                 kfree(mqd);
 493                 return NULL;
 494         }
 495 
 496         return mqd;
 497 }

/* [<][>][^][v][top][bottom][index][help] */