This source file includes following definitions.
- get_mqd_type_from_queue_type
- is_pipe_enabled
- get_queues_num
- get_queues_per_pipe
- get_pipes_per_mec
- get_num_sdma_engines
- get_num_xgmi_sdma_engines
- get_num_sdma_queues
- get_num_xgmi_sdma_queues
- program_sh_mem_settings
- allocate_doorbell
- deallocate_doorbell
- allocate_vmid
- flush_texture_cache_nocpsch
- deallocate_vmid
- create_queue_nocpsch
- allocate_hqd
- deallocate_hqd
- destroy_queue_nocpsch_locked
- destroy_queue_nocpsch
- update_queue
- evict_process_queues_nocpsch
- evict_process_queues_cpsch
- restore_process_queues_nocpsch
- restore_process_queues_cpsch
- register_process
- unregister_process
- set_pasid_vmid_mapping
- init_interrupts
- initialize_nocpsch
- uninitialize
- start_nocpsch
- stop_nocpsch
- allocate_sdma_queue
- deallocate_sdma_queue
- set_sched_resources
- initialize_cpsch
- start_cpsch
- stop_cpsch
- create_kernel_queue_cpsch
- destroy_kernel_queue_cpsch
- create_queue_cpsch
- amdkfd_fence_wait_timeout
- unmap_sdma_queues
- map_queues_cpsch
- unmap_queues_cpsch
- execute_queues_cpsch
- destroy_queue_cpsch
- set_cache_memory_policy
- set_trap_handler
- process_termination_nocpsch
- get_wave_state
- process_termination_cpsch
- init_mqd_managers
- allocate_hiq_sdma_mqd
- device_queue_manager_init
- deallocate_hiq_sdma_mqd
- device_queue_manager_uninit
- kfd_process_vm_fault
- kfd_process_hw_exception
- seq_reg_dump
- dqm_debugfs_hqds
- dqm_debugfs_execute_queues
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24 #include <linux/ratelimit.h>
25 #include <linux/printk.h>
26 #include <linux/slab.h>
27 #include <linux/list.h>
28 #include <linux/types.h>
29 #include <linux/bitops.h>
30 #include <linux/sched.h>
31 #include "kfd_priv.h"
32 #include "kfd_device_queue_manager.h"
33 #include "kfd_mqd_manager.h"
34 #include "cik_regs.h"
35 #include "kfd_kernel_queue.h"
36 #include "amdgpu_amdkfd.h"
37
38
39 #define CIK_HPD_EOP_BYTES_LOG2 11
40 #define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
41
42 static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
43 unsigned int pasid, unsigned int vmid);
44
45 static int execute_queues_cpsch(struct device_queue_manager *dqm,
46 enum kfd_unmap_queues_filter filter,
47 uint32_t filter_param);
48 static int unmap_queues_cpsch(struct device_queue_manager *dqm,
49 enum kfd_unmap_queues_filter filter,
50 uint32_t filter_param);
51
52 static int map_queues_cpsch(struct device_queue_manager *dqm);
53
54 static void deallocate_sdma_queue(struct device_queue_manager *dqm,
55 struct queue *q);
56
57 static inline void deallocate_hqd(struct device_queue_manager *dqm,
58 struct queue *q);
59 static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q);
60 static int allocate_sdma_queue(struct device_queue_manager *dqm,
61 struct queue *q);
62 static void kfd_process_hw_exception(struct work_struct *work);
63
64 static inline
65 enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
66 {
67 if (type == KFD_QUEUE_TYPE_SDMA || type == KFD_QUEUE_TYPE_SDMA_XGMI)
68 return KFD_MQD_TYPE_SDMA;
69 return KFD_MQD_TYPE_CP;
70 }
71
72 static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
73 {
74 int i;
75 int pipe_offset = mec * dqm->dev->shared_resources.num_pipe_per_mec
76 + pipe * dqm->dev->shared_resources.num_queue_per_pipe;
77
78
79 for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i)
80 if (test_bit(pipe_offset + i,
81 dqm->dev->shared_resources.queue_bitmap))
82 return true;
83 return false;
84 }
85
86 unsigned int get_queues_num(struct device_queue_manager *dqm)
87 {
88 return bitmap_weight(dqm->dev->shared_resources.queue_bitmap,
89 KGD_MAX_QUEUES);
90 }
91
92 unsigned int get_queues_per_pipe(struct device_queue_manager *dqm)
93 {
94 return dqm->dev->shared_resources.num_queue_per_pipe;
95 }
96
97 unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
98 {
99 return dqm->dev->shared_resources.num_pipe_per_mec;
100 }
101
102 static unsigned int get_num_sdma_engines(struct device_queue_manager *dqm)
103 {
104 return dqm->dev->device_info->num_sdma_engines;
105 }
106
107 static unsigned int get_num_xgmi_sdma_engines(struct device_queue_manager *dqm)
108 {
109 return dqm->dev->device_info->num_xgmi_sdma_engines;
110 }
111
112 unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
113 {
114 return dqm->dev->device_info->num_sdma_engines
115 * dqm->dev->device_info->num_sdma_queues_per_engine;
116 }
117
118 unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm)
119 {
120 return dqm->dev->device_info->num_xgmi_sdma_engines
121 * dqm->dev->device_info->num_sdma_queues_per_engine;
122 }
123
124 void program_sh_mem_settings(struct device_queue_manager *dqm,
125 struct qcm_process_device *qpd)
126 {
127 return dqm->dev->kfd2kgd->program_sh_mem_settings(
128 dqm->dev->kgd, qpd->vmid,
129 qpd->sh_mem_config,
130 qpd->sh_mem_ape1_base,
131 qpd->sh_mem_ape1_limit,
132 qpd->sh_mem_bases);
133 }
134
135 static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
136 {
137 struct kfd_dev *dev = qpd->dqm->dev;
138
139 if (!KFD_IS_SOC15(dev->device_info->asic_family)) {
140
141
142
143 q->doorbell_id = q->properties.queue_id;
144 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
145 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
146
147
148
149
150
151 uint32_t *idx_offset =
152 dev->shared_resources.sdma_doorbell_idx;
153
154 q->doorbell_id = idx_offset[q->properties.sdma_engine_id]
155 + (q->properties.sdma_queue_id & 1)
156 * KFD_QUEUE_DOORBELL_MIRROR_OFFSET
157 + (q->properties.sdma_queue_id >> 1);
158 } else {
159
160 unsigned int found;
161
162 found = find_first_zero_bit(qpd->doorbell_bitmap,
163 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
164 if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
165 pr_debug("No doorbells available");
166 return -EBUSY;
167 }
168 set_bit(found, qpd->doorbell_bitmap);
169 q->doorbell_id = found;
170 }
171
172 q->properties.doorbell_off =
173 kfd_doorbell_id_to_offset(dev, q->process,
174 q->doorbell_id);
175
176 return 0;
177 }
178
179 static void deallocate_doorbell(struct qcm_process_device *qpd,
180 struct queue *q)
181 {
182 unsigned int old;
183 struct kfd_dev *dev = qpd->dqm->dev;
184
185 if (!KFD_IS_SOC15(dev->device_info->asic_family) ||
186 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
187 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
188 return;
189
190 old = test_and_clear_bit(q->doorbell_id, qpd->doorbell_bitmap);
191 WARN_ON(!old);
192 }
193
194 static int allocate_vmid(struct device_queue_manager *dqm,
195 struct qcm_process_device *qpd,
196 struct queue *q)
197 {
198 int bit, allocated_vmid;
199
200 if (dqm->vmid_bitmap == 0)
201 return -ENOMEM;
202
203 bit = ffs(dqm->vmid_bitmap) - 1;
204 dqm->vmid_bitmap &= ~(1 << bit);
205
206 allocated_vmid = bit + dqm->dev->vm_info.first_vmid_kfd;
207 pr_debug("vmid allocation %d\n", allocated_vmid);
208 qpd->vmid = allocated_vmid;
209 q->properties.vmid = allocated_vmid;
210
211 set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid);
212 program_sh_mem_settings(dqm, qpd);
213
214
215
216
217 dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->kgd,
218 qpd->vmid,
219 qpd->page_table_base);
220
221 kfd_flush_tlb(qpd_to_pdd(qpd));
222
223 dqm->dev->kfd2kgd->set_scratch_backing_va(
224 dqm->dev->kgd, qpd->sh_hidden_private_base, qpd->vmid);
225
226 return 0;
227 }
228
229 static int flush_texture_cache_nocpsch(struct kfd_dev *kdev,
230 struct qcm_process_device *qpd)
231 {
232 const struct packet_manager_funcs *pmf = qpd->dqm->packets.pmf;
233 int ret;
234
235 if (!qpd->ib_kaddr)
236 return -ENOMEM;
237
238 ret = pmf->release_mem(qpd->ib_base, (uint32_t *)qpd->ib_kaddr);
239 if (ret)
240 return ret;
241
242 return amdgpu_amdkfd_submit_ib(kdev->kgd, KGD_ENGINE_MEC1, qpd->vmid,
243 qpd->ib_base, (uint32_t *)qpd->ib_kaddr,
244 pmf->release_mem_size / sizeof(uint32_t));
245 }
246
247 static void deallocate_vmid(struct device_queue_manager *dqm,
248 struct qcm_process_device *qpd,
249 struct queue *q)
250 {
251 int bit = qpd->vmid - dqm->dev->vm_info.first_vmid_kfd;
252
253
254 if (q->device->device_info->asic_family == CHIP_HAWAII)
255 if (flush_texture_cache_nocpsch(q->device, qpd))
256 pr_err("Failed to flush TC\n");
257
258 kfd_flush_tlb(qpd_to_pdd(qpd));
259
260
261 set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
262
263 dqm->vmid_bitmap |= (1 << bit);
264 qpd->vmid = 0;
265 q->properties.vmid = 0;
266 }
267
268 static int create_queue_nocpsch(struct device_queue_manager *dqm,
269 struct queue *q,
270 struct qcm_process_device *qpd)
271 {
272 struct mqd_manager *mqd_mgr;
273 int retval;
274
275 print_queue(q);
276
277 dqm_lock(dqm);
278
279 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
280 pr_warn("Can't create new usermode queue because %d queues were already created\n",
281 dqm->total_queue_count);
282 retval = -EPERM;
283 goto out_unlock;
284 }
285
286 if (list_empty(&qpd->queues_list)) {
287 retval = allocate_vmid(dqm, qpd, q);
288 if (retval)
289 goto out_unlock;
290 }
291 q->properties.vmid = qpd->vmid;
292
293
294
295
296
297 q->properties.is_evicted = !!qpd->evicted;
298
299 q->properties.tba_addr = qpd->tba_addr;
300 q->properties.tma_addr = qpd->tma_addr;
301
302 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
303 q->properties.type)];
304 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
305 retval = allocate_hqd(dqm, q);
306 if (retval)
307 goto deallocate_vmid;
308 pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
309 q->pipe, q->queue);
310 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
311 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
312 retval = allocate_sdma_queue(dqm, q);
313 if (retval)
314 goto deallocate_vmid;
315 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
316 }
317
318 retval = allocate_doorbell(qpd, q);
319 if (retval)
320 goto out_deallocate_hqd;
321
322
323 dqm_unlock(dqm);
324 q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties);
325 dqm_lock(dqm);
326
327 if (!q->mqd_mem_obj) {
328 retval = -ENOMEM;
329 goto out_deallocate_doorbell;
330 }
331 mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
332 &q->gart_mqd_addr, &q->properties);
333 if (q->properties.is_active) {
334
335 if (WARN(q->process->mm != current->mm,
336 "should only run in user thread"))
337 retval = -EFAULT;
338 else
339 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
340 q->queue, &q->properties, current->mm);
341 if (retval)
342 goto out_free_mqd;
343 }
344
345 list_add(&q->list, &qpd->queues_list);
346 qpd->queue_count++;
347 if (q->properties.is_active)
348 dqm->queue_count++;
349
350 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
351 dqm->sdma_queue_count++;
352 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
353 dqm->xgmi_sdma_queue_count++;
354
355
356
357
358
359 dqm->total_queue_count++;
360 pr_debug("Total of %d queues are accountable so far\n",
361 dqm->total_queue_count);
362 goto out_unlock;
363
364 out_free_mqd:
365 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
366 out_deallocate_doorbell:
367 deallocate_doorbell(qpd, q);
368 out_deallocate_hqd:
369 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
370 deallocate_hqd(dqm, q);
371 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
372 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
373 deallocate_sdma_queue(dqm, q);
374 deallocate_vmid:
375 if (list_empty(&qpd->queues_list))
376 deallocate_vmid(dqm, qpd, q);
377 out_unlock:
378 dqm_unlock(dqm);
379 return retval;
380 }
381
382 static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
383 {
384 bool set;
385 int pipe, bit, i;
386
387 set = false;
388
389 for (pipe = dqm->next_pipe_to_allocate, i = 0;
390 i < get_pipes_per_mec(dqm);
391 pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) {
392
393 if (!is_pipe_enabled(dqm, 0, pipe))
394 continue;
395
396 if (dqm->allocated_queues[pipe] != 0) {
397 bit = ffs(dqm->allocated_queues[pipe]) - 1;
398 dqm->allocated_queues[pipe] &= ~(1 << bit);
399 q->pipe = pipe;
400 q->queue = bit;
401 set = true;
402 break;
403 }
404 }
405
406 if (!set)
407 return -EBUSY;
408
409 pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue);
410
411 dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm);
412
413 return 0;
414 }
415
416 static inline void deallocate_hqd(struct device_queue_manager *dqm,
417 struct queue *q)
418 {
419 dqm->allocated_queues[q->pipe] |= (1 << q->queue);
420 }
421
422
423
424
425 static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
426 struct qcm_process_device *qpd,
427 struct queue *q)
428 {
429 int retval;
430 struct mqd_manager *mqd_mgr;
431
432 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
433 q->properties.type)];
434
435 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
436 deallocate_hqd(dqm, q);
437 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
438 dqm->sdma_queue_count--;
439 deallocate_sdma_queue(dqm, q);
440 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
441 dqm->xgmi_sdma_queue_count--;
442 deallocate_sdma_queue(dqm, q);
443 } else {
444 pr_debug("q->properties.type %d is invalid\n",
445 q->properties.type);
446 return -EINVAL;
447 }
448 dqm->total_queue_count--;
449
450 deallocate_doorbell(qpd, q);
451
452 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
453 KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
454 KFD_UNMAP_LATENCY_MS,
455 q->pipe, q->queue);
456 if (retval == -ETIME)
457 qpd->reset_wavefronts = true;
458
459 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
460
461 list_del(&q->list);
462 if (list_empty(&qpd->queues_list)) {
463 if (qpd->reset_wavefronts) {
464 pr_warn("Resetting wave fronts (nocpsch) on dev %p\n",
465 dqm->dev);
466
467
468
469 dbgdev_wave_reset_wavefronts(dqm->dev,
470 qpd->pqm->process);
471 qpd->reset_wavefronts = false;
472 }
473
474 deallocate_vmid(dqm, qpd, q);
475 }
476 qpd->queue_count--;
477 if (q->properties.is_active)
478 dqm->queue_count--;
479
480 return retval;
481 }
482
483 static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
484 struct qcm_process_device *qpd,
485 struct queue *q)
486 {
487 int retval;
488
489 dqm_lock(dqm);
490 retval = destroy_queue_nocpsch_locked(dqm, qpd, q);
491 dqm_unlock(dqm);
492
493 return retval;
494 }
495
496 static int update_queue(struct device_queue_manager *dqm, struct queue *q)
497 {
498 int retval = 0;
499 struct mqd_manager *mqd_mgr;
500 struct kfd_process_device *pdd;
501 bool prev_active = false;
502
503 dqm_lock(dqm);
504 pdd = kfd_get_process_device_data(q->device, q->process);
505 if (!pdd) {
506 retval = -ENODEV;
507 goto out_unlock;
508 }
509 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
510 q->properties.type)];
511
512
513 prev_active = q->properties.is_active;
514
515
516 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
517 retval = unmap_queues_cpsch(dqm,
518 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
519 if (retval) {
520 pr_err("unmap queue failed\n");
521 goto out_unlock;
522 }
523 } else if (prev_active &&
524 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
525 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
526 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
527 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
528 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
529 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
530 if (retval) {
531 pr_err("destroy mqd failed\n");
532 goto out_unlock;
533 }
534 }
535
536 mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties);
537
538
539
540
541
542
543
544 if (q->properties.is_active && !prev_active)
545 dqm->queue_count++;
546 else if (!q->properties.is_active && prev_active)
547 dqm->queue_count--;
548
549 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS)
550 retval = map_queues_cpsch(dqm);
551 else if (q->properties.is_active &&
552 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
553 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
554 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
555 if (WARN(q->process->mm != current->mm,
556 "should only run in user thread"))
557 retval = -EFAULT;
558 else
559 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd,
560 q->pipe, q->queue,
561 &q->properties, current->mm);
562 }
563
564 out_unlock:
565 dqm_unlock(dqm);
566 return retval;
567 }
568
569 static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
570 struct qcm_process_device *qpd)
571 {
572 struct queue *q;
573 struct mqd_manager *mqd_mgr;
574 struct kfd_process_device *pdd;
575 int retval, ret = 0;
576
577 dqm_lock(dqm);
578 if (qpd->evicted++ > 0)
579 goto out;
580
581 pdd = qpd_to_pdd(qpd);
582 pr_info_ratelimited("Evicting PASID %u queues\n",
583 pdd->process->pasid);
584
585
586
587
588 list_for_each_entry(q, &qpd->queues_list, list) {
589 q->properties.is_evicted = true;
590 if (!q->properties.is_active)
591 continue;
592
593 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
594 q->properties.type)];
595 q->properties.is_active = false;
596 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
597 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
598 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
599 if (retval && !ret)
600
601
602
603 ret = retval;
604 dqm->queue_count--;
605 }
606
607 out:
608 dqm_unlock(dqm);
609 return ret;
610 }
611
612 static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
613 struct qcm_process_device *qpd)
614 {
615 struct queue *q;
616 struct kfd_process_device *pdd;
617 int retval = 0;
618
619 dqm_lock(dqm);
620 if (qpd->evicted++ > 0)
621 goto out;
622
623 pdd = qpd_to_pdd(qpd);
624 pr_info_ratelimited("Evicting PASID %u queues\n",
625 pdd->process->pasid);
626
627
628
629
630 list_for_each_entry(q, &qpd->queues_list, list) {
631 q->properties.is_evicted = true;
632 if (!q->properties.is_active)
633 continue;
634
635 q->properties.is_active = false;
636 dqm->queue_count--;
637 }
638 retval = execute_queues_cpsch(dqm,
639 qpd->is_debug ?
640 KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
641 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
642
643 out:
644 dqm_unlock(dqm);
645 return retval;
646 }
647
648 static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
649 struct qcm_process_device *qpd)
650 {
651 struct mm_struct *mm = NULL;
652 struct queue *q;
653 struct mqd_manager *mqd_mgr;
654 struct kfd_process_device *pdd;
655 uint64_t pd_base;
656 int retval, ret = 0;
657
658 pdd = qpd_to_pdd(qpd);
659
660 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
661
662 dqm_lock(dqm);
663 if (WARN_ON_ONCE(!qpd->evicted))
664 goto out;
665 if (qpd->evicted > 1) {
666 qpd->evicted--;
667 goto out;
668 }
669
670 pr_info_ratelimited("Restoring PASID %u queues\n",
671 pdd->process->pasid);
672
673
674 qpd->page_table_base = pd_base;
675 pr_debug("Updated PD address to 0x%llx\n", pd_base);
676
677 if (!list_empty(&qpd->queues_list)) {
678 dqm->dev->kfd2kgd->set_vm_context_page_table_base(
679 dqm->dev->kgd,
680 qpd->vmid,
681 qpd->page_table_base);
682 kfd_flush_tlb(pdd);
683 }
684
685
686
687
688 mm = get_task_mm(pdd->process->lead_thread);
689 if (!mm) {
690 ret = -EFAULT;
691 goto out;
692 }
693
694
695
696
697 list_for_each_entry(q, &qpd->queues_list, list) {
698 q->properties.is_evicted = false;
699 if (!QUEUE_IS_ACTIVE(q->properties))
700 continue;
701
702 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
703 q->properties.type)];
704 q->properties.is_active = true;
705 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
706 q->queue, &q->properties, mm);
707 if (retval && !ret)
708
709
710
711 ret = retval;
712 dqm->queue_count++;
713 }
714 qpd->evicted = 0;
715 out:
716 if (mm)
717 mmput(mm);
718 dqm_unlock(dqm);
719 return ret;
720 }
721
722 static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
723 struct qcm_process_device *qpd)
724 {
725 struct queue *q;
726 struct kfd_process_device *pdd;
727 uint64_t pd_base;
728 int retval = 0;
729
730 pdd = qpd_to_pdd(qpd);
731
732 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
733
734 dqm_lock(dqm);
735 if (WARN_ON_ONCE(!qpd->evicted))
736 goto out;
737 if (qpd->evicted > 1) {
738 qpd->evicted--;
739 goto out;
740 }
741
742 pr_info_ratelimited("Restoring PASID %u queues\n",
743 pdd->process->pasid);
744
745
746 qpd->page_table_base = pd_base;
747 pr_debug("Updated PD address to 0x%llx\n", pd_base);
748
749
750 list_for_each_entry(q, &qpd->queues_list, list) {
751 q->properties.is_evicted = false;
752 if (!QUEUE_IS_ACTIVE(q->properties))
753 continue;
754
755 q->properties.is_active = true;
756 dqm->queue_count++;
757 }
758 retval = execute_queues_cpsch(dqm,
759 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
760 qpd->evicted = 0;
761 out:
762 dqm_unlock(dqm);
763 return retval;
764 }
765
766 static int register_process(struct device_queue_manager *dqm,
767 struct qcm_process_device *qpd)
768 {
769 struct device_process_node *n;
770 struct kfd_process_device *pdd;
771 uint64_t pd_base;
772 int retval;
773
774 n = kzalloc(sizeof(*n), GFP_KERNEL);
775 if (!n)
776 return -ENOMEM;
777
778 n->qpd = qpd;
779
780 pdd = qpd_to_pdd(qpd);
781
782 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
783
784 dqm_lock(dqm);
785 list_add(&n->list, &dqm->queues);
786
787
788 qpd->page_table_base = pd_base;
789 pr_debug("Updated PD address to 0x%llx\n", pd_base);
790
791 retval = dqm->asic_ops.update_qpd(dqm, qpd);
792
793 dqm->processes_count++;
794
795 dqm_unlock(dqm);
796
797
798
799
800 kfd_inc_compute_active(dqm->dev);
801
802 return retval;
803 }
804
805 static int unregister_process(struct device_queue_manager *dqm,
806 struct qcm_process_device *qpd)
807 {
808 int retval;
809 struct device_process_node *cur, *next;
810
811 pr_debug("qpd->queues_list is %s\n",
812 list_empty(&qpd->queues_list) ? "empty" : "not empty");
813
814 retval = 0;
815 dqm_lock(dqm);
816
817 list_for_each_entry_safe(cur, next, &dqm->queues, list) {
818 if (qpd == cur->qpd) {
819 list_del(&cur->list);
820 kfree(cur);
821 dqm->processes_count--;
822 goto out;
823 }
824 }
825
826 retval = 1;
827 out:
828 dqm_unlock(dqm);
829
830
831
832
833 if (!retval)
834 kfd_dec_compute_active(dqm->dev);
835
836 return retval;
837 }
838
839 static int
840 set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
841 unsigned int vmid)
842 {
843 return dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
844 dqm->dev->kgd, pasid, vmid);
845 }
846
847 static void init_interrupts(struct device_queue_manager *dqm)
848 {
849 unsigned int i;
850
851 for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++)
852 if (is_pipe_enabled(dqm, 0, i))
853 dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, i);
854 }
855
856 static int initialize_nocpsch(struct device_queue_manager *dqm)
857 {
858 int pipe, queue;
859
860 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
861
862 dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
863 sizeof(unsigned int), GFP_KERNEL);
864 if (!dqm->allocated_queues)
865 return -ENOMEM;
866
867 mutex_init(&dqm->lock_hidden);
868 INIT_LIST_HEAD(&dqm->queues);
869 dqm->queue_count = dqm->next_pipe_to_allocate = 0;
870 dqm->sdma_queue_count = 0;
871 dqm->xgmi_sdma_queue_count = 0;
872
873 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
874 int pipe_offset = pipe * get_queues_per_pipe(dqm);
875
876 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
877 if (test_bit(pipe_offset + queue,
878 dqm->dev->shared_resources.queue_bitmap))
879 dqm->allocated_queues[pipe] |= 1 << queue;
880 }
881
882 dqm->vmid_bitmap = (1 << dqm->dev->vm_info.vmid_num_kfd) - 1;
883 dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
884 dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
885
886 return 0;
887 }
888
889 static void uninitialize(struct device_queue_manager *dqm)
890 {
891 int i;
892
893 WARN_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
894
895 kfree(dqm->allocated_queues);
896 for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
897 kfree(dqm->mqd_mgrs[i]);
898 mutex_destroy(&dqm->lock_hidden);
899 kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
900 }
901
902 static int start_nocpsch(struct device_queue_manager *dqm)
903 {
904 init_interrupts(dqm);
905 return pm_init(&dqm->packets, dqm);
906 }
907
908 static int stop_nocpsch(struct device_queue_manager *dqm)
909 {
910 pm_uninit(&dqm->packets);
911 return 0;
912 }
913
914 static int allocate_sdma_queue(struct device_queue_manager *dqm,
915 struct queue *q)
916 {
917 int bit;
918
919 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
920 if (dqm->sdma_bitmap == 0)
921 return -ENOMEM;
922 bit = __ffs64(dqm->sdma_bitmap);
923 dqm->sdma_bitmap &= ~(1ULL << bit);
924 q->sdma_id = bit;
925 q->properties.sdma_engine_id = q->sdma_id %
926 get_num_sdma_engines(dqm);
927 q->properties.sdma_queue_id = q->sdma_id /
928 get_num_sdma_engines(dqm);
929 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
930 if (dqm->xgmi_sdma_bitmap == 0)
931 return -ENOMEM;
932 bit = __ffs64(dqm->xgmi_sdma_bitmap);
933 dqm->xgmi_sdma_bitmap &= ~(1ULL << bit);
934 q->sdma_id = bit;
935
936
937
938
939
940
941 q->properties.sdma_engine_id = get_num_sdma_engines(dqm) +
942 q->sdma_id % get_num_xgmi_sdma_engines(dqm);
943 q->properties.sdma_queue_id = q->sdma_id /
944 get_num_xgmi_sdma_engines(dqm);
945 }
946
947 pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
948 pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id);
949
950 return 0;
951 }
952
953 static void deallocate_sdma_queue(struct device_queue_manager *dqm,
954 struct queue *q)
955 {
956 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
957 if (q->sdma_id >= get_num_sdma_queues(dqm))
958 return;
959 dqm->sdma_bitmap |= (1ULL << q->sdma_id);
960 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
961 if (q->sdma_id >= get_num_xgmi_sdma_queues(dqm))
962 return;
963 dqm->xgmi_sdma_bitmap |= (1ULL << q->sdma_id);
964 }
965 }
966
967
968
969
970
971 static int set_sched_resources(struct device_queue_manager *dqm)
972 {
973 int i, mec;
974 struct scheduling_resources res;
975
976 res.vmid_mask = dqm->dev->shared_resources.compute_vmid_bitmap;
977
978 res.queue_mask = 0;
979 for (i = 0; i < KGD_MAX_QUEUES; ++i) {
980 mec = (i / dqm->dev->shared_resources.num_queue_per_pipe)
981 / dqm->dev->shared_resources.num_pipe_per_mec;
982
983 if (!test_bit(i, dqm->dev->shared_resources.queue_bitmap))
984 continue;
985
986
987 if (mec > 0)
988 continue;
989
990
991
992
993
994 if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) {
995 pr_err("Invalid queue enabled by amdgpu: %d\n", i);
996 break;
997 }
998
999 res.queue_mask |= (1ull << i);
1000 }
1001 res.gws_mask = ~0ull;
1002 res.oac_mask = res.gds_heap_base = res.gds_heap_size = 0;
1003
1004 pr_debug("Scheduling resources:\n"
1005 "vmid mask: 0x%8X\n"
1006 "queue mask: 0x%8llX\n",
1007 res.vmid_mask, res.queue_mask);
1008
1009 return pm_send_set_resources(&dqm->packets, &res);
1010 }
1011
1012 static int initialize_cpsch(struct device_queue_manager *dqm)
1013 {
1014 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
1015
1016 mutex_init(&dqm->lock_hidden);
1017 INIT_LIST_HEAD(&dqm->queues);
1018 dqm->queue_count = dqm->processes_count = 0;
1019 dqm->sdma_queue_count = 0;
1020 dqm->xgmi_sdma_queue_count = 0;
1021 dqm->active_runlist = false;
1022 dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
1023 dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
1024
1025 INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
1026
1027 return 0;
1028 }
1029
1030 static int start_cpsch(struct device_queue_manager *dqm)
1031 {
1032 int retval;
1033
1034 retval = 0;
1035
1036 retval = pm_init(&dqm->packets, dqm);
1037 if (retval)
1038 goto fail_packet_manager_init;
1039
1040 retval = set_sched_resources(dqm);
1041 if (retval)
1042 goto fail_set_sched_resources;
1043
1044 pr_debug("Allocating fence memory\n");
1045
1046
1047 retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
1048 &dqm->fence_mem);
1049
1050 if (retval)
1051 goto fail_allocate_vidmem;
1052
1053 dqm->fence_addr = dqm->fence_mem->cpu_ptr;
1054 dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
1055
1056 init_interrupts(dqm);
1057
1058 dqm_lock(dqm);
1059
1060 dqm->is_hws_hang = false;
1061 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1062 dqm_unlock(dqm);
1063
1064 return 0;
1065 fail_allocate_vidmem:
1066 fail_set_sched_resources:
1067 pm_uninit(&dqm->packets);
1068 fail_packet_manager_init:
1069 return retval;
1070 }
1071
1072 static int stop_cpsch(struct device_queue_manager *dqm)
1073 {
1074 dqm_lock(dqm);
1075 unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
1076 dqm_unlock(dqm);
1077
1078 kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
1079 pm_uninit(&dqm->packets);
1080
1081 return 0;
1082 }
1083
1084 static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
1085 struct kernel_queue *kq,
1086 struct qcm_process_device *qpd)
1087 {
1088 dqm_lock(dqm);
1089 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
1090 pr_warn("Can't create new kernel queue because %d queues were already created\n",
1091 dqm->total_queue_count);
1092 dqm_unlock(dqm);
1093 return -EPERM;
1094 }
1095
1096
1097
1098
1099
1100 dqm->total_queue_count++;
1101 pr_debug("Total of %d queues are accountable so far\n",
1102 dqm->total_queue_count);
1103
1104 list_add(&kq->list, &qpd->priv_queue_list);
1105 dqm->queue_count++;
1106 qpd->is_debug = true;
1107 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1108 dqm_unlock(dqm);
1109
1110 return 0;
1111 }
1112
1113 static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
1114 struct kernel_queue *kq,
1115 struct qcm_process_device *qpd)
1116 {
1117 dqm_lock(dqm);
1118 list_del(&kq->list);
1119 dqm->queue_count--;
1120 qpd->is_debug = false;
1121 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
1122
1123
1124
1125
1126 dqm->total_queue_count--;
1127 pr_debug("Total of %d queues are accountable so far\n",
1128 dqm->total_queue_count);
1129 dqm_unlock(dqm);
1130 }
1131
1132 static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
1133 struct qcm_process_device *qpd)
1134 {
1135 int retval;
1136 struct mqd_manager *mqd_mgr;
1137
1138 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
1139 pr_warn("Can't create new usermode queue because %d queues were already created\n",
1140 dqm->total_queue_count);
1141 retval = -EPERM;
1142 goto out;
1143 }
1144
1145 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1146 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1147 dqm_lock(dqm);
1148 retval = allocate_sdma_queue(dqm, q);
1149 dqm_unlock(dqm);
1150 if (retval)
1151 goto out;
1152 }
1153
1154 retval = allocate_doorbell(qpd, q);
1155 if (retval)
1156 goto out_deallocate_sdma_queue;
1157
1158 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1159 q->properties.type)];
1160
1161 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1162 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
1163 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
1164 q->properties.tba_addr = qpd->tba_addr;
1165 q->properties.tma_addr = qpd->tma_addr;
1166 q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties);
1167 if (!q->mqd_mem_obj) {
1168 retval = -ENOMEM;
1169 goto out_deallocate_doorbell;
1170 }
1171
1172 dqm_lock(dqm);
1173
1174
1175
1176
1177
1178 q->properties.is_evicted = !!qpd->evicted;
1179 mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
1180 &q->gart_mqd_addr, &q->properties);
1181
1182 list_add(&q->list, &qpd->queues_list);
1183 qpd->queue_count++;
1184
1185 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
1186 dqm->sdma_queue_count++;
1187 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
1188 dqm->xgmi_sdma_queue_count++;
1189
1190 if (q->properties.is_active) {
1191 dqm->queue_count++;
1192 retval = execute_queues_cpsch(dqm,
1193 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1194 }
1195
1196
1197
1198
1199
1200 dqm->total_queue_count++;
1201
1202 pr_debug("Total of %d queues are accountable so far\n",
1203 dqm->total_queue_count);
1204
1205 dqm_unlock(dqm);
1206 return retval;
1207
1208 out_deallocate_doorbell:
1209 deallocate_doorbell(qpd, q);
1210 out_deallocate_sdma_queue:
1211 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1212 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1213 dqm_lock(dqm);
1214 deallocate_sdma_queue(dqm, q);
1215 dqm_unlock(dqm);
1216 }
1217 out:
1218 return retval;
1219 }
1220
1221 int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
1222 unsigned int fence_value,
1223 unsigned int timeout_ms)
1224 {
1225 unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
1226
1227 while (*fence_addr != fence_value) {
1228 if (time_after(jiffies, end_jiffies)) {
1229 pr_err("qcm fence wait loop timeout expired\n");
1230
1231
1232
1233
1234 while (halt_if_hws_hang)
1235 schedule();
1236
1237 return -ETIME;
1238 }
1239 schedule();
1240 }
1241
1242 return 0;
1243 }
1244
1245 static int unmap_sdma_queues(struct device_queue_manager *dqm)
1246 {
1247 int i, retval = 0;
1248
1249 for (i = 0; i < dqm->dev->device_info->num_sdma_engines +
1250 dqm->dev->device_info->num_xgmi_sdma_engines; i++) {
1251 retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
1252 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false, i);
1253 if (retval)
1254 return retval;
1255 }
1256 return retval;
1257 }
1258
1259
1260 static int map_queues_cpsch(struct device_queue_manager *dqm)
1261 {
1262 int retval;
1263
1264 if (dqm->queue_count <= 0 || dqm->processes_count <= 0)
1265 return 0;
1266
1267 if (dqm->active_runlist)
1268 return 0;
1269
1270 retval = pm_send_runlist(&dqm->packets, &dqm->queues);
1271 pr_debug("%s sent runlist\n", __func__);
1272 if (retval) {
1273 pr_err("failed to execute runlist\n");
1274 return retval;
1275 }
1276 dqm->active_runlist = true;
1277
1278 return retval;
1279 }
1280
1281
1282 static int unmap_queues_cpsch(struct device_queue_manager *dqm,
1283 enum kfd_unmap_queues_filter filter,
1284 uint32_t filter_param)
1285 {
1286 int retval = 0;
1287
1288 if (dqm->is_hws_hang)
1289 return -EIO;
1290 if (!dqm->active_runlist)
1291 return retval;
1292
1293 pr_debug("Before destroying queues, sdma queue count is : %u, xgmi sdma queue count is : %u\n",
1294 dqm->sdma_queue_count, dqm->xgmi_sdma_queue_count);
1295
1296 if (dqm->sdma_queue_count > 0 || dqm->xgmi_sdma_queue_count)
1297 unmap_sdma_queues(dqm);
1298
1299 retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
1300 filter, filter_param, false, 0);
1301 if (retval)
1302 return retval;
1303
1304 *dqm->fence_addr = KFD_FENCE_INIT;
1305 pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
1306 KFD_FENCE_COMPLETED);
1307
1308 retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
1309 queue_preemption_timeout_ms);
1310 if (retval)
1311 return retval;
1312
1313 pm_release_ib(&dqm->packets);
1314 dqm->active_runlist = false;
1315
1316 return retval;
1317 }
1318
1319
1320 static int execute_queues_cpsch(struct device_queue_manager *dqm,
1321 enum kfd_unmap_queues_filter filter,
1322 uint32_t filter_param)
1323 {
1324 int retval;
1325
1326 if (dqm->is_hws_hang)
1327 return -EIO;
1328 retval = unmap_queues_cpsch(dqm, filter, filter_param);
1329 if (retval) {
1330 pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
1331 dqm->is_hws_hang = true;
1332 schedule_work(&dqm->hw_exception_work);
1333 return retval;
1334 }
1335
1336 return map_queues_cpsch(dqm);
1337 }
1338
1339 static int destroy_queue_cpsch(struct device_queue_manager *dqm,
1340 struct qcm_process_device *qpd,
1341 struct queue *q)
1342 {
1343 int retval;
1344 struct mqd_manager *mqd_mgr;
1345
1346 retval = 0;
1347
1348
1349 dqm_lock(dqm);
1350
1351 if (qpd->is_debug) {
1352
1353
1354
1355
1356 retval = -EBUSY;
1357 goto failed_try_destroy_debugged_queue;
1358
1359 }
1360
1361 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1362 q->properties.type)];
1363
1364 deallocate_doorbell(qpd, q);
1365
1366 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1367 dqm->sdma_queue_count--;
1368 deallocate_sdma_queue(dqm, q);
1369 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1370 dqm->xgmi_sdma_queue_count--;
1371 deallocate_sdma_queue(dqm, q);
1372 }
1373
1374 list_del(&q->list);
1375 qpd->queue_count--;
1376 if (q->properties.is_active) {
1377 dqm->queue_count--;
1378 retval = execute_queues_cpsch(dqm,
1379 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1380 if (retval == -ETIME)
1381 qpd->reset_wavefronts = true;
1382 }
1383
1384
1385
1386
1387
1388 dqm->total_queue_count--;
1389 pr_debug("Total of %d queues are accountable so far\n",
1390 dqm->total_queue_count);
1391
1392 dqm_unlock(dqm);
1393
1394
1395 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
1396
1397 return retval;
1398
1399 failed_try_destroy_debugged_queue:
1400
1401 dqm_unlock(dqm);
1402 return retval;
1403 }
1404
1405
1406
1407
1408
1409 #define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
1410
1411 #define APE1_LIMIT_ALIGNMENT 0xFFFF
1412
1413 static bool set_cache_memory_policy(struct device_queue_manager *dqm,
1414 struct qcm_process_device *qpd,
1415 enum cache_policy default_policy,
1416 enum cache_policy alternate_policy,
1417 void __user *alternate_aperture_base,
1418 uint64_t alternate_aperture_size)
1419 {
1420 bool retval = true;
1421
1422 if (!dqm->asic_ops.set_cache_memory_policy)
1423 return retval;
1424
1425 dqm_lock(dqm);
1426
1427 if (alternate_aperture_size == 0) {
1428
1429 qpd->sh_mem_ape1_base = 1;
1430 qpd->sh_mem_ape1_limit = 0;
1431 } else {
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442 uint64_t base = (uintptr_t)alternate_aperture_base;
1443 uint64_t limit = base + alternate_aperture_size - 1;
1444
1445 if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
1446 (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
1447 retval = false;
1448 goto out;
1449 }
1450
1451 qpd->sh_mem_ape1_base = base >> 16;
1452 qpd->sh_mem_ape1_limit = limit >> 16;
1453 }
1454
1455 retval = dqm->asic_ops.set_cache_memory_policy(
1456 dqm,
1457 qpd,
1458 default_policy,
1459 alternate_policy,
1460 alternate_aperture_base,
1461 alternate_aperture_size);
1462
1463 if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
1464 program_sh_mem_settings(dqm, qpd);
1465
1466 pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
1467 qpd->sh_mem_config, qpd->sh_mem_ape1_base,
1468 qpd->sh_mem_ape1_limit);
1469
1470 out:
1471 dqm_unlock(dqm);
1472 return retval;
1473 }
1474
1475 static int set_trap_handler(struct device_queue_manager *dqm,
1476 struct qcm_process_device *qpd,
1477 uint64_t tba_addr,
1478 uint64_t tma_addr)
1479 {
1480 uint64_t *tma;
1481
1482 if (dqm->dev->cwsr_enabled) {
1483
1484 tma = (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
1485 tma[0] = tba_addr;
1486 tma[1] = tma_addr;
1487 } else {
1488 qpd->tba_addr = tba_addr;
1489 qpd->tma_addr = tma_addr;
1490 }
1491
1492 return 0;
1493 }
1494
1495 static int process_termination_nocpsch(struct device_queue_manager *dqm,
1496 struct qcm_process_device *qpd)
1497 {
1498 struct queue *q, *next;
1499 struct device_process_node *cur, *next_dpn;
1500 int retval = 0;
1501 bool found = false;
1502
1503 dqm_lock(dqm);
1504
1505
1506 list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
1507 int ret;
1508
1509 ret = destroy_queue_nocpsch_locked(dqm, qpd, q);
1510 if (ret)
1511 retval = ret;
1512 }
1513
1514
1515 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
1516 if (qpd == cur->qpd) {
1517 list_del(&cur->list);
1518 kfree(cur);
1519 dqm->processes_count--;
1520 found = true;
1521 break;
1522 }
1523 }
1524
1525 dqm_unlock(dqm);
1526
1527
1528
1529
1530 if (found)
1531 kfd_dec_compute_active(dqm->dev);
1532
1533 return retval;
1534 }
1535
1536 static int get_wave_state(struct device_queue_manager *dqm,
1537 struct queue *q,
1538 void __user *ctl_stack,
1539 u32 *ctl_stack_used_size,
1540 u32 *save_area_used_size)
1541 {
1542 struct mqd_manager *mqd_mgr;
1543 int r;
1544
1545 dqm_lock(dqm);
1546
1547 if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE ||
1548 q->properties.is_active || !q->device->cwsr_enabled) {
1549 r = -EINVAL;
1550 goto dqm_unlock;
1551 }
1552
1553 mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_COMPUTE];
1554
1555 if (!mqd_mgr->get_wave_state) {
1556 r = -EINVAL;
1557 goto dqm_unlock;
1558 }
1559
1560 r = mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack,
1561 ctl_stack_used_size, save_area_used_size);
1562
1563 dqm_unlock:
1564 dqm_unlock(dqm);
1565 return r;
1566 }
1567
1568 static int process_termination_cpsch(struct device_queue_manager *dqm,
1569 struct qcm_process_device *qpd)
1570 {
1571 int retval;
1572 struct queue *q, *next;
1573 struct kernel_queue *kq, *kq_next;
1574 struct mqd_manager *mqd_mgr;
1575 struct device_process_node *cur, *next_dpn;
1576 enum kfd_unmap_queues_filter filter =
1577 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
1578 bool found = false;
1579
1580 retval = 0;
1581
1582 dqm_lock(dqm);
1583
1584
1585 list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
1586 list_del(&kq->list);
1587 dqm->queue_count--;
1588 qpd->is_debug = false;
1589 dqm->total_queue_count--;
1590 filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
1591 }
1592
1593
1594 list_for_each_entry(q, &qpd->queues_list, list) {
1595 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1596 dqm->sdma_queue_count--;
1597 deallocate_sdma_queue(dqm, q);
1598 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1599 dqm->xgmi_sdma_queue_count--;
1600 deallocate_sdma_queue(dqm, q);
1601 }
1602
1603 if (q->properties.is_active)
1604 dqm->queue_count--;
1605
1606 dqm->total_queue_count--;
1607 }
1608
1609
1610 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
1611 if (qpd == cur->qpd) {
1612 list_del(&cur->list);
1613 kfree(cur);
1614 dqm->processes_count--;
1615 found = true;
1616 break;
1617 }
1618 }
1619
1620 retval = execute_queues_cpsch(dqm, filter, 0);
1621 if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) {
1622 pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
1623 dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process);
1624 qpd->reset_wavefronts = false;
1625 }
1626
1627 dqm_unlock(dqm);
1628
1629
1630
1631
1632 if (found)
1633 kfd_dec_compute_active(dqm->dev);
1634
1635
1636
1637
1638 list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
1639 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1640 q->properties.type)];
1641 list_del(&q->list);
1642 qpd->queue_count--;
1643 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
1644 }
1645
1646 return retval;
1647 }
1648
1649 static int init_mqd_managers(struct device_queue_manager *dqm)
1650 {
1651 int i, j;
1652 struct mqd_manager *mqd_mgr;
1653
1654 for (i = 0; i < KFD_MQD_TYPE_MAX; i++) {
1655 mqd_mgr = dqm->asic_ops.mqd_manager_init(i, dqm->dev);
1656 if (!mqd_mgr) {
1657 pr_err("mqd manager [%d] initialization failed\n", i);
1658 goto out_free;
1659 }
1660 dqm->mqd_mgrs[i] = mqd_mgr;
1661 }
1662
1663 return 0;
1664
1665 out_free:
1666 for (j = 0; j < i; j++) {
1667 kfree(dqm->mqd_mgrs[j]);
1668 dqm->mqd_mgrs[j] = NULL;
1669 }
1670
1671 return -ENOMEM;
1672 }
1673
1674
1675 static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
1676 {
1677 int retval;
1678 struct kfd_dev *dev = dqm->dev;
1679 struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd;
1680 uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size *
1681 (dev->device_info->num_sdma_engines +
1682 dev->device_info->num_xgmi_sdma_engines) *
1683 dev->device_info->num_sdma_queues_per_engine +
1684 dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;
1685
1686 retval = amdgpu_amdkfd_alloc_gtt_mem(dev->kgd, size,
1687 &(mem_obj->gtt_mem), &(mem_obj->gpu_addr),
1688 (void *)&(mem_obj->cpu_ptr), true);
1689
1690 return retval;
1691 }
1692
1693 struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
1694 {
1695 struct device_queue_manager *dqm;
1696
1697 pr_debug("Loading device queue manager\n");
1698
1699 dqm = kzalloc(sizeof(*dqm), GFP_KERNEL);
1700 if (!dqm)
1701 return NULL;
1702
1703 switch (dev->device_info->asic_family) {
1704
1705 case CHIP_HAWAII:
1706
1707
1708
1709
1710
1711 case CHIP_TONGA:
1712 dqm->sched_policy = KFD_SCHED_POLICY_NO_HWS;
1713 break;
1714 default:
1715 dqm->sched_policy = sched_policy;
1716 break;
1717 }
1718
1719 dqm->dev = dev;
1720 switch (dqm->sched_policy) {
1721 case KFD_SCHED_POLICY_HWS:
1722 case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
1723
1724 dqm->ops.create_queue = create_queue_cpsch;
1725 dqm->ops.initialize = initialize_cpsch;
1726 dqm->ops.start = start_cpsch;
1727 dqm->ops.stop = stop_cpsch;
1728 dqm->ops.destroy_queue = destroy_queue_cpsch;
1729 dqm->ops.update_queue = update_queue;
1730 dqm->ops.register_process = register_process;
1731 dqm->ops.unregister_process = unregister_process;
1732 dqm->ops.uninitialize = uninitialize;
1733 dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
1734 dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
1735 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1736 dqm->ops.set_trap_handler = set_trap_handler;
1737 dqm->ops.process_termination = process_termination_cpsch;
1738 dqm->ops.evict_process_queues = evict_process_queues_cpsch;
1739 dqm->ops.restore_process_queues = restore_process_queues_cpsch;
1740 dqm->ops.get_wave_state = get_wave_state;
1741 break;
1742 case KFD_SCHED_POLICY_NO_HWS:
1743
1744 dqm->ops.start = start_nocpsch;
1745 dqm->ops.stop = stop_nocpsch;
1746 dqm->ops.create_queue = create_queue_nocpsch;
1747 dqm->ops.destroy_queue = destroy_queue_nocpsch;
1748 dqm->ops.update_queue = update_queue;
1749 dqm->ops.register_process = register_process;
1750 dqm->ops.unregister_process = unregister_process;
1751 dqm->ops.initialize = initialize_nocpsch;
1752 dqm->ops.uninitialize = uninitialize;
1753 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1754 dqm->ops.set_trap_handler = set_trap_handler;
1755 dqm->ops.process_termination = process_termination_nocpsch;
1756 dqm->ops.evict_process_queues = evict_process_queues_nocpsch;
1757 dqm->ops.restore_process_queues =
1758 restore_process_queues_nocpsch;
1759 dqm->ops.get_wave_state = get_wave_state;
1760 break;
1761 default:
1762 pr_err("Invalid scheduling policy %d\n", dqm->sched_policy);
1763 goto out_free;
1764 }
1765
1766 switch (dev->device_info->asic_family) {
1767 case CHIP_CARRIZO:
1768 device_queue_manager_init_vi(&dqm->asic_ops);
1769 break;
1770
1771 case CHIP_KAVERI:
1772 device_queue_manager_init_cik(&dqm->asic_ops);
1773 break;
1774
1775 case CHIP_HAWAII:
1776 device_queue_manager_init_cik_hawaii(&dqm->asic_ops);
1777 break;
1778
1779 case CHIP_TONGA:
1780 case CHIP_FIJI:
1781 case CHIP_POLARIS10:
1782 case CHIP_POLARIS11:
1783 case CHIP_POLARIS12:
1784 case CHIP_VEGAM:
1785 device_queue_manager_init_vi_tonga(&dqm->asic_ops);
1786 break;
1787
1788 case CHIP_VEGA10:
1789 case CHIP_VEGA12:
1790 case CHIP_VEGA20:
1791 case CHIP_RAVEN:
1792 case CHIP_ARCTURUS:
1793 device_queue_manager_init_v9(&dqm->asic_ops);
1794 break;
1795 case CHIP_NAVI10:
1796 device_queue_manager_init_v10_navi10(&dqm->asic_ops);
1797 break;
1798 default:
1799 WARN(1, "Unexpected ASIC family %u",
1800 dev->device_info->asic_family);
1801 goto out_free;
1802 }
1803
1804 if (init_mqd_managers(dqm))
1805 goto out_free;
1806
1807 if (allocate_hiq_sdma_mqd(dqm)) {
1808 pr_err("Failed to allocate hiq sdma mqd trunk buffer\n");
1809 goto out_free;
1810 }
1811
1812 if (!dqm->ops.initialize(dqm))
1813 return dqm;
1814
1815 out_free:
1816 kfree(dqm);
1817 return NULL;
1818 }
1819
1820 static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev,
1821 struct kfd_mem_obj *mqd)
1822 {
1823 WARN(!mqd, "No hiq sdma mqd trunk to free");
1824
1825 amdgpu_amdkfd_free_gtt_mem(dev->kgd, mqd->gtt_mem);
1826 }
1827
1828 void device_queue_manager_uninit(struct device_queue_manager *dqm)
1829 {
1830 dqm->ops.uninitialize(dqm);
1831 deallocate_hiq_sdma_mqd(dqm->dev, &dqm->hiq_sdma_mqd);
1832 kfree(dqm);
1833 }
1834
1835 int kfd_process_vm_fault(struct device_queue_manager *dqm,
1836 unsigned int pasid)
1837 {
1838 struct kfd_process_device *pdd;
1839 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
1840 int ret = 0;
1841
1842 if (!p)
1843 return -EINVAL;
1844 pdd = kfd_get_process_device_data(dqm->dev, p);
1845 if (pdd)
1846 ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
1847 kfd_unref_process(p);
1848
1849 return ret;
1850 }
1851
1852 static void kfd_process_hw_exception(struct work_struct *work)
1853 {
1854 struct device_queue_manager *dqm = container_of(work,
1855 struct device_queue_manager, hw_exception_work);
1856 amdgpu_amdkfd_gpu_reset(dqm->dev->kgd);
1857 }
1858
1859 #if defined(CONFIG_DEBUG_FS)
1860
1861 static void seq_reg_dump(struct seq_file *m,
1862 uint32_t (*dump)[2], uint32_t n_regs)
1863 {
1864 uint32_t i, count;
1865
1866 for (i = 0, count = 0; i < n_regs; i++) {
1867 if (count == 0 ||
1868 dump[i-1][0] + sizeof(uint32_t) != dump[i][0]) {
1869 seq_printf(m, "%s %08x: %08x",
1870 i ? "\n" : "",
1871 dump[i][0], dump[i][1]);
1872 count = 7;
1873 } else {
1874 seq_printf(m, " %08x", dump[i][1]);
1875 count--;
1876 }
1877 }
1878
1879 seq_puts(m, "\n");
1880 }
1881
1882 int dqm_debugfs_hqds(struct seq_file *m, void *data)
1883 {
1884 struct device_queue_manager *dqm = data;
1885 uint32_t (*dump)[2], n_regs;
1886 int pipe, queue;
1887 int r = 0;
1888
1889 r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->kgd,
1890 KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE,
1891 &dump, &n_regs);
1892 if (!r) {
1893 seq_printf(m, " HIQ on MEC %d Pipe %d Queue %d\n",
1894 KFD_CIK_HIQ_PIPE/get_pipes_per_mec(dqm)+1,
1895 KFD_CIK_HIQ_PIPE%get_pipes_per_mec(dqm),
1896 KFD_CIK_HIQ_QUEUE);
1897 seq_reg_dump(m, dump, n_regs);
1898
1899 kfree(dump);
1900 }
1901
1902 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
1903 int pipe_offset = pipe * get_queues_per_pipe(dqm);
1904
1905 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
1906 if (!test_bit(pipe_offset + queue,
1907 dqm->dev->shared_resources.queue_bitmap))
1908 continue;
1909
1910 r = dqm->dev->kfd2kgd->hqd_dump(
1911 dqm->dev->kgd, pipe, queue, &dump, &n_regs);
1912 if (r)
1913 break;
1914
1915 seq_printf(m, " CP Pipe %d, Queue %d\n",
1916 pipe, queue);
1917 seq_reg_dump(m, dump, n_regs);
1918
1919 kfree(dump);
1920 }
1921 }
1922
1923 for (pipe = 0; pipe < get_num_sdma_engines(dqm); pipe++) {
1924 for (queue = 0;
1925 queue < dqm->dev->device_info->num_sdma_queues_per_engine;
1926 queue++) {
1927 r = dqm->dev->kfd2kgd->hqd_sdma_dump(
1928 dqm->dev->kgd, pipe, queue, &dump, &n_regs);
1929 if (r)
1930 break;
1931
1932 seq_printf(m, " SDMA Engine %d, RLC %d\n",
1933 pipe, queue);
1934 seq_reg_dump(m, dump, n_regs);
1935
1936 kfree(dump);
1937 }
1938 }
1939
1940 return r;
1941 }
1942
1943 int dqm_debugfs_execute_queues(struct device_queue_manager *dqm)
1944 {
1945 int r = 0;
1946
1947 dqm_lock(dqm);
1948 dqm->active_runlist = true;
1949 r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
1950 dqm_unlock(dqm);
1951
1952 return r;
1953 }
1954
1955 #endif