root/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. kernel_queue_init_v9
  2. initialize_v9
  3. uninitialize_v9
  4. submit_packet_v9
  5. pm_map_process_v9
  6. pm_runlist_v9
  7. pm_set_resources_v9
  8. pm_map_queues_v9
  9. pm_unmap_queues_v9
  10. pm_query_status_v9
  11. pm_release_mem_v9

   1 /*
   2  * Copyright 2016-2018 Advanced Micro Devices, Inc.
   3  *
   4  * Permission is hereby granted, free of charge, to any person obtaining a
   5  * copy of this software and associated documentation files (the "Software"),
   6  * to deal in the Software without restriction, including without limitation
   7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8  * and/or sell copies of the Software, and to permit persons to whom the
   9  * Software is furnished to do so, subject to the following conditions:
  10  *
  11  * The above copyright notice and this permission notice shall be included in
  12  * all copies or substantial portions of the Software.
  13  *
  14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20  * OTHER DEALINGS IN THE SOFTWARE.
  21  *
  22  */
  23 
  24 #include "kfd_kernel_queue.h"
  25 #include "kfd_device_queue_manager.h"
  26 #include "kfd_pm4_headers_ai.h"
  27 #include "kfd_pm4_opcodes.h"
  28 
  29 static bool initialize_v9(struct kernel_queue *kq, struct kfd_dev *dev,
  30                         enum kfd_queue_type type, unsigned int queue_size);
  31 static void uninitialize_v9(struct kernel_queue *kq);
  32 static void submit_packet_v9(struct kernel_queue *kq);
  33 
  34 void kernel_queue_init_v9(struct kernel_queue_ops *ops)
  35 {
  36         ops->initialize = initialize_v9;
  37         ops->uninitialize = uninitialize_v9;
  38         ops->submit_packet = submit_packet_v9;
  39 }
  40 
  41 static bool initialize_v9(struct kernel_queue *kq, struct kfd_dev *dev,
  42                         enum kfd_queue_type type, unsigned int queue_size)
  43 {
  44         int retval;
  45 
  46         retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem);
  47         if (retval)
  48                 return false;
  49 
  50         kq->eop_gpu_addr = kq->eop_mem->gpu_addr;
  51         kq->eop_kernel_addr = kq->eop_mem->cpu_ptr;
  52 
  53         memset(kq->eop_kernel_addr, 0, PAGE_SIZE);
  54 
  55         return true;
  56 }
  57 
  58 static void uninitialize_v9(struct kernel_queue *kq)
  59 {
  60         kfd_gtt_sa_free(kq->dev, kq->eop_mem);
  61 }
  62 
  63 static void submit_packet_v9(struct kernel_queue *kq)
  64 {
  65         *kq->wptr64_kernel = kq->pending_wptr64;
  66         write_kernel_doorbell64(kq->queue->properties.doorbell_ptr,
  67                                 kq->pending_wptr64);
  68 }
  69 
  70 static int pm_map_process_v9(struct packet_manager *pm,
  71                 uint32_t *buffer, struct qcm_process_device *qpd)
  72 {
  73         struct pm4_mes_map_process *packet;
  74         uint64_t vm_page_table_base_addr = qpd->page_table_base;
  75 
  76         packet = (struct pm4_mes_map_process *)buffer;
  77         memset(buffer, 0, sizeof(struct pm4_mes_map_process));
  78 
  79         packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS,
  80                                         sizeof(struct pm4_mes_map_process));
  81         packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
  82         packet->bitfields2.process_quantum = 1;
  83         packet->bitfields2.pasid = qpd->pqm->process->pasid;
  84         packet->bitfields14.gds_size = qpd->gds_size & 0x3F;
  85         packet->bitfields14.gds_size_hi = (qpd->gds_size >> 6) & 0xF;
  86         packet->bitfields14.num_gws = qpd->num_gws;
  87         packet->bitfields14.num_oac = qpd->num_oac;
  88         packet->bitfields14.sdma_enable = 1;
  89         packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
  90 
  91         packet->sh_mem_config = qpd->sh_mem_config;
  92         packet->sh_mem_bases = qpd->sh_mem_bases;
  93         packet->sq_shader_tba_lo = lower_32_bits(qpd->tba_addr >> 8);
  94         packet->sq_shader_tba_hi = upper_32_bits(qpd->tba_addr >> 8);
  95         packet->sq_shader_tma_lo = lower_32_bits(qpd->tma_addr >> 8);
  96         packet->sq_shader_tma_hi = upper_32_bits(qpd->tma_addr >> 8);
  97 
  98         packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
  99         packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
 100 
 101         packet->vm_context_page_table_base_addr_lo32 =
 102                         lower_32_bits(vm_page_table_base_addr);
 103         packet->vm_context_page_table_base_addr_hi32 =
 104                         upper_32_bits(vm_page_table_base_addr);
 105 
 106         return 0;
 107 }
 108 
 109 static int pm_runlist_v9(struct packet_manager *pm, uint32_t *buffer,
 110                         uint64_t ib, size_t ib_size_in_dwords, bool chain)
 111 {
 112         struct pm4_mes_runlist *packet;
 113 
 114         int concurrent_proc_cnt = 0;
 115         struct kfd_dev *kfd = pm->dqm->dev;
 116 
 117         /* Determine the number of processes to map together to HW:
 118          * it can not exceed the number of VMIDs available to the
 119          * scheduler, and it is determined by the smaller of the number
 120          * of processes in the runlist and kfd module parameter
 121          * hws_max_conc_proc.
 122          * Note: the arbitration between the number of VMIDs and
 123          * hws_max_conc_proc has been done in
 124          * kgd2kfd_device_init().
 125          */
 126         concurrent_proc_cnt = min(pm->dqm->processes_count,
 127                         kfd->max_proc_per_quantum);
 128 
 129         packet = (struct pm4_mes_runlist *)buffer;
 130 
 131         memset(buffer, 0, sizeof(struct pm4_mes_runlist));
 132         packet->header.u32All = pm_build_pm4_header(IT_RUN_LIST,
 133                                                 sizeof(struct pm4_mes_runlist));
 134 
 135         packet->bitfields4.ib_size = ib_size_in_dwords;
 136         packet->bitfields4.chain = chain ? 1 : 0;
 137         packet->bitfields4.offload_polling = 0;
 138         packet->bitfields4.chained_runlist_idle_disable = chain ? 1 : 0;
 139         packet->bitfields4.valid = 1;
 140         packet->bitfields4.process_cnt = concurrent_proc_cnt;
 141         packet->ordinal2 = lower_32_bits(ib);
 142         packet->ib_base_hi = upper_32_bits(ib);
 143 
 144         return 0;
 145 }
 146 
 147 static int pm_set_resources_v9(struct packet_manager *pm, uint32_t *buffer,
 148                                 struct scheduling_resources *res)
 149 {
 150         struct pm4_mes_set_resources *packet;
 151 
 152         packet = (struct pm4_mes_set_resources *)buffer;
 153         memset(buffer, 0, sizeof(struct pm4_mes_set_resources));
 154 
 155         packet->header.u32All = pm_build_pm4_header(IT_SET_RESOURCES,
 156                                         sizeof(struct pm4_mes_set_resources));
 157 
 158         packet->bitfields2.queue_type =
 159                         queue_type__mes_set_resources__hsa_interface_queue_hiq;
 160         packet->bitfields2.vmid_mask = res->vmid_mask;
 161         packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100;
 162         packet->bitfields7.oac_mask = res->oac_mask;
 163         packet->bitfields8.gds_heap_base = res->gds_heap_base;
 164         packet->bitfields8.gds_heap_size = res->gds_heap_size;
 165 
 166         packet->gws_mask_lo = lower_32_bits(res->gws_mask);
 167         packet->gws_mask_hi = upper_32_bits(res->gws_mask);
 168 
 169         packet->queue_mask_lo = lower_32_bits(res->queue_mask);
 170         packet->queue_mask_hi = upper_32_bits(res->queue_mask);
 171 
 172         return 0;
 173 }
 174 
 175 static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer,
 176                 struct queue *q, bool is_static)
 177 {
 178         struct pm4_mes_map_queues *packet;
 179         bool use_static = is_static;
 180 
 181         packet = (struct pm4_mes_map_queues *)buffer;
 182         memset(buffer, 0, sizeof(struct pm4_mes_map_queues));
 183 
 184         packet->header.u32All = pm_build_pm4_header(IT_MAP_QUEUES,
 185                                         sizeof(struct pm4_mes_map_queues));
 186         packet->bitfields2.num_queues = 1;
 187         packet->bitfields2.queue_sel =
 188                 queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi;
 189 
 190         packet->bitfields2.engine_sel =
 191                 engine_sel__mes_map_queues__compute_vi;
 192         packet->bitfields2.gws_control_queue = q->gws ? 1 : 0;
 193         packet->bitfields2.extended_engine_sel =
 194                 extended_engine_sel__mes_map_queues__legacy_engine_sel;
 195         packet->bitfields2.queue_type =
 196                 queue_type__mes_map_queues__normal_compute_vi;
 197 
 198         switch (q->properties.type) {
 199         case KFD_QUEUE_TYPE_COMPUTE:
 200                 if (use_static)
 201                         packet->bitfields2.queue_type =
 202                 queue_type__mes_map_queues__normal_latency_static_queue_vi;
 203                 break;
 204         case KFD_QUEUE_TYPE_DIQ:
 205                 packet->bitfields2.queue_type =
 206                         queue_type__mes_map_queues__debug_interface_queue_vi;
 207                 break;
 208         case KFD_QUEUE_TYPE_SDMA:
 209         case KFD_QUEUE_TYPE_SDMA_XGMI:
 210                 use_static = false; /* no static queues under SDMA */
 211                 if (q->properties.sdma_engine_id < 2)
 212                         packet->bitfields2.engine_sel = q->properties.sdma_engine_id +
 213                                 engine_sel__mes_map_queues__sdma0_vi;
 214                 else {
 215                         packet->bitfields2.extended_engine_sel =
 216                                 extended_engine_sel__mes_map_queues__sdma0_to_7_sel;
 217                         packet->bitfields2.engine_sel = q->properties.sdma_engine_id;
 218                 }
 219                 break;
 220         default:
 221                 WARN(1, "queue type %d", q->properties.type);
 222                 return -EINVAL;
 223         }
 224         packet->bitfields3.doorbell_offset =
 225                         q->properties.doorbell_off;
 226 
 227         packet->mqd_addr_lo =
 228                         lower_32_bits(q->gart_mqd_addr);
 229 
 230         packet->mqd_addr_hi =
 231                         upper_32_bits(q->gart_mqd_addr);
 232 
 233         packet->wptr_addr_lo =
 234                         lower_32_bits((uint64_t)q->properties.write_ptr);
 235 
 236         packet->wptr_addr_hi =
 237                         upper_32_bits((uint64_t)q->properties.write_ptr);
 238 
 239         return 0;
 240 }
 241 
 242 static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer,
 243                         enum kfd_queue_type type,
 244                         enum kfd_unmap_queues_filter filter,
 245                         uint32_t filter_param, bool reset,
 246                         unsigned int sdma_engine)
 247 {
 248         struct pm4_mes_unmap_queues *packet;
 249 
 250         packet = (struct pm4_mes_unmap_queues *)buffer;
 251         memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues));
 252 
 253         packet->header.u32All = pm_build_pm4_header(IT_UNMAP_QUEUES,
 254                                         sizeof(struct pm4_mes_unmap_queues));
 255         switch (type) {
 256         case KFD_QUEUE_TYPE_COMPUTE:
 257         case KFD_QUEUE_TYPE_DIQ:
 258                 packet->bitfields2.extended_engine_sel =
 259                         extended_engine_sel__mes_unmap_queues__legacy_engine_sel;
 260                 packet->bitfields2.engine_sel =
 261                         engine_sel__mes_unmap_queues__compute;
 262                 break;
 263         case KFD_QUEUE_TYPE_SDMA:
 264         case KFD_QUEUE_TYPE_SDMA_XGMI:
 265                 if (sdma_engine < 2) {
 266                         packet->bitfields2.extended_engine_sel =
 267                                 extended_engine_sel__mes_unmap_queues__legacy_engine_sel;
 268                         packet->bitfields2.engine_sel =
 269                                 engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
 270                 } else {
 271                         packet->bitfields2.extended_engine_sel =
 272                                 extended_engine_sel__mes_unmap_queues__sdma0_to_7_sel;
 273                         packet->bitfields2.engine_sel = sdma_engine;
 274                 }
 275                 break;
 276         default:
 277                 WARN(1, "queue type %d", type);
 278                 return -EINVAL;
 279         }
 280 
 281         if (reset)
 282                 packet->bitfields2.action =
 283                         action__mes_unmap_queues__reset_queues;
 284         else
 285                 packet->bitfields2.action =
 286                         action__mes_unmap_queues__preempt_queues;
 287 
 288         switch (filter) {
 289         case KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE:
 290                 packet->bitfields2.queue_sel =
 291                         queue_sel__mes_unmap_queues__perform_request_on_specified_queues;
 292                 packet->bitfields2.num_queues = 1;
 293                 packet->bitfields3b.doorbell_offset0 = filter_param;
 294                 break;
 295         case KFD_UNMAP_QUEUES_FILTER_BY_PASID:
 296                 packet->bitfields2.queue_sel =
 297                         queue_sel__mes_unmap_queues__perform_request_on_pasid_queues;
 298                 packet->bitfields3a.pasid = filter_param;
 299                 break;
 300         case KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES:
 301                 packet->bitfields2.queue_sel =
 302                         queue_sel__mes_unmap_queues__unmap_all_queues;
 303                 break;
 304         case KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES:
 305                 /* in this case, we do not preempt static queues */
 306                 packet->bitfields2.queue_sel =
 307                         queue_sel__mes_unmap_queues__unmap_all_non_static_queues;
 308                 break;
 309         default:
 310                 WARN(1, "filter %d", filter);
 311                 return -EINVAL;
 312         }
 313 
 314         return 0;
 315 
 316 }
 317 
 318 static int pm_query_status_v9(struct packet_manager *pm, uint32_t *buffer,
 319                         uint64_t fence_address, uint32_t fence_value)
 320 {
 321         struct pm4_mes_query_status *packet;
 322 
 323         packet = (struct pm4_mes_query_status *)buffer;
 324         memset(buffer, 0, sizeof(struct pm4_mes_query_status));
 325 
 326 
 327         packet->header.u32All = pm_build_pm4_header(IT_QUERY_STATUS,
 328                                         sizeof(struct pm4_mes_query_status));
 329 
 330         packet->bitfields2.context_id = 0;
 331         packet->bitfields2.interrupt_sel =
 332                         interrupt_sel__mes_query_status__completion_status;
 333         packet->bitfields2.command =
 334                         command__mes_query_status__fence_only_after_write_ack;
 335 
 336         packet->addr_hi = upper_32_bits((uint64_t)fence_address);
 337         packet->addr_lo = lower_32_bits((uint64_t)fence_address);
 338         packet->data_hi = upper_32_bits((uint64_t)fence_value);
 339         packet->data_lo = lower_32_bits((uint64_t)fence_value);
 340 
 341         return 0;
 342 }
 343 
 344 
 345 static int pm_release_mem_v9(uint64_t gpu_addr, uint32_t *buffer)
 346 {
 347         struct pm4_mec_release_mem *packet;
 348 
 349         packet = (struct pm4_mec_release_mem *)buffer;
 350         memset(buffer, 0, sizeof(struct pm4_mec_release_mem));
 351 
 352         packet->header.u32All = pm_build_pm4_header(IT_RELEASE_MEM,
 353                                         sizeof(struct pm4_mec_release_mem));
 354 
 355         packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT;
 356         packet->bitfields2.event_index = event_index__mec_release_mem__end_of_pipe;
 357         packet->bitfields2.tcl1_action_ena = 1;
 358         packet->bitfields2.tc_action_ena = 1;
 359         packet->bitfields2.cache_policy = cache_policy__mec_release_mem__lru;
 360 
 361         packet->bitfields3.data_sel = data_sel__mec_release_mem__send_32_bit_low;
 362         packet->bitfields3.int_sel =
 363                 int_sel__mec_release_mem__send_interrupt_after_write_confirm;
 364 
 365         packet->bitfields4.address_lo_32b = (gpu_addr & 0xffffffff) >> 2;
 366         packet->address_hi = upper_32_bits(gpu_addr);
 367 
 368         packet->data_lo = 0;
 369 
 370         return 0;
 371 }
 372 
 373 const struct packet_manager_funcs kfd_v9_pm_funcs = {
 374         .map_process            = pm_map_process_v9,
 375         .runlist                = pm_runlist_v9,
 376         .set_resources          = pm_set_resources_v9,
 377         .map_queues             = pm_map_queues_v9,
 378         .unmap_queues           = pm_unmap_queues_v9,
 379         .query_status           = pm_query_status_v9,
 380         .release_mem            = pm_release_mem_v9,
 381         .map_process_size       = sizeof(struct pm4_mes_map_process),
 382         .runlist_size           = sizeof(struct pm4_mes_runlist),
 383         .set_resources_size     = sizeof(struct pm4_mes_set_resources),
 384         .map_queues_size        = sizeof(struct pm4_mes_map_queues),
 385         .unmap_queues_size      = sizeof(struct pm4_mes_unmap_queues),
 386         .query_status_size      = sizeof(struct pm4_mes_query_status),
 387         .release_mem_size       = sizeof(struct pm4_mec_release_mem)
 388 };

/* [<][>][^][v][top][bottom][index][help] */