root/drivers/gpu/drm/amd/amdkfd/kfd_process.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. kfd_procfs_show
  2. kfd_procfs_kobj_release
  3. kfd_procfs_init
  4. kfd_procfs_shutdown
  5. kfd_process_create_wq
  6. kfd_process_destroy_wq
  7. kfd_process_free_gpuvm
  8. kfd_process_alloc_gpuvm
  9. kfd_process_device_reserve_ib_mem
  10. kfd_create_process
  11. kfd_get_process
  12. find_process_by_mm
  13. find_process
  14. kfd_unref_process
  15. kfd_process_device_free_bos
  16. kfd_process_free_outstanding_kfd_bos
  17. kfd_process_destroy_pdds
  18. kfd_process_wq_release
  19. kfd_process_ref_release
  20. kfd_process_free_notifier
  21. kfd_process_notifier_release
  22. kfd_process_init_cwsr_apu
  23. kfd_process_device_init_cwsr_dgpu
  24. create_process
  25. init_doorbell_bitmap
  26. kfd_get_process_device_data
  27. kfd_create_process_device_data
  28. kfd_process_device_init_vm
  29. kfd_bind_process_to_device
  30. kfd_get_first_process_device_data
  31. kfd_get_next_process_device_data
  32. kfd_has_process_device_data
  33. kfd_process_device_create_obj_handle
  34. kfd_process_device_translate_handle
  35. kfd_process_device_remove_obj_handle
  36. kfd_lookup_process_by_pasid
  37. kfd_lookup_process_by_mm
  38. kfd_process_evict_queues
  39. kfd_process_restore_queues
  40. evict_process_worker
  41. restore_process_worker
  42. kfd_suspend_all_processes
  43. kfd_resume_all_processes
  44. kfd_reserved_mem_mmap
  45. kfd_flush_tlb
  46. kfd_debugfs_mqds_by_process

   1 /*
   2  * Copyright 2014 Advanced Micro Devices, Inc.
   3  *
   4  * Permission is hereby granted, free of charge, to any person obtaining a
   5  * copy of this software and associated documentation files (the "Software"),
   6  * to deal in the Software without restriction, including without limitation
   7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8  * and/or sell copies of the Software, and to permit persons to whom the
   9  * Software is furnished to do so, subject to the following conditions:
  10  *
  11  * The above copyright notice and this permission notice shall be included in
  12  * all copies or substantial portions of the Software.
  13  *
  14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20  * OTHER DEALINGS IN THE SOFTWARE.
  21  */
  22 
  23 #include <linux/mutex.h>
  24 #include <linux/log2.h>
  25 #include <linux/sched.h>
  26 #include <linux/sched/mm.h>
  27 #include <linux/sched/task.h>
  28 #include <linux/slab.h>
  29 #include <linux/amd-iommu.h>
  30 #include <linux/notifier.h>
  31 #include <linux/compat.h>
  32 #include <linux/mman.h>
  33 #include <linux/file.h>
  34 #include "amdgpu_amdkfd.h"
  35 
  36 struct mm_struct;
  37 
  38 #include "kfd_priv.h"
  39 #include "kfd_device_queue_manager.h"
  40 #include "kfd_dbgmgr.h"
  41 #include "kfd_iommu.h"
  42 
  43 /*
  44  * List of struct kfd_process (field kfd_process).
  45  * Unique/indexed by mm_struct*
  46  */
  47 DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
  48 static DEFINE_MUTEX(kfd_processes_mutex);
  49 
  50 DEFINE_SRCU(kfd_processes_srcu);
  51 
  52 /* For process termination handling */
  53 static struct workqueue_struct *kfd_process_wq;
  54 
  55 /* Ordered, single-threaded workqueue for restoring evicted
  56  * processes. Restoring multiple processes concurrently under memory
  57  * pressure can lead to processes blocking each other from validating
  58  * their BOs and result in a live-lock situation where processes
  59  * remain evicted indefinitely.
  60  */
  61 static struct workqueue_struct *kfd_restore_wq;
  62 
  63 static struct kfd_process *find_process(const struct task_struct *thread);
  64 static void kfd_process_ref_release(struct kref *ref);
  65 static struct kfd_process *create_process(const struct task_struct *thread);
  66 static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep);
  67 
  68 static void evict_process_worker(struct work_struct *work);
  69 static void restore_process_worker(struct work_struct *work);
  70 
  71 struct kfd_procfs_tree {
  72         struct kobject *kobj;
  73 };
  74 
  75 static struct kfd_procfs_tree procfs;
  76 
  77 static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
  78                                char *buffer)
  79 {
  80         int val = 0;
  81 
  82         if (strcmp(attr->name, "pasid") == 0) {
  83                 struct kfd_process *p = container_of(attr, struct kfd_process,
  84                                                      attr_pasid);
  85                 val = p->pasid;
  86         } else {
  87                 pr_err("Invalid attribute");
  88                 return -EINVAL;
  89         }
  90 
  91         return snprintf(buffer, PAGE_SIZE, "%d\n", val);
  92 }
  93 
  94 static void kfd_procfs_kobj_release(struct kobject *kobj)
  95 {
  96         kfree(kobj);
  97 }
  98 
  99 static const struct sysfs_ops kfd_procfs_ops = {
 100         .show = kfd_procfs_show,
 101 };
 102 
 103 static struct kobj_type procfs_type = {
 104         .release = kfd_procfs_kobj_release,
 105         .sysfs_ops = &kfd_procfs_ops,
 106 };
 107 
 108 void kfd_procfs_init(void)
 109 {
 110         int ret = 0;
 111 
 112         procfs.kobj = kfd_alloc_struct(procfs.kobj);
 113         if (!procfs.kobj)
 114                 return;
 115 
 116         ret = kobject_init_and_add(procfs.kobj, &procfs_type,
 117                                    &kfd_device->kobj, "proc");
 118         if (ret) {
 119                 pr_warn("Could not create procfs proc folder");
 120                 /* If we fail to create the procfs, clean up */
 121                 kfd_procfs_shutdown();
 122         }
 123 }
 124 
 125 void kfd_procfs_shutdown(void)
 126 {
 127         if (procfs.kobj) {
 128                 kobject_del(procfs.kobj);
 129                 kobject_put(procfs.kobj);
 130                 procfs.kobj = NULL;
 131         }
 132 }
 133 
 134 int kfd_process_create_wq(void)
 135 {
 136         if (!kfd_process_wq)
 137                 kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
 138         if (!kfd_restore_wq)
 139                 kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq", 0);
 140 
 141         if (!kfd_process_wq || !kfd_restore_wq) {
 142                 kfd_process_destroy_wq();
 143                 return -ENOMEM;
 144         }
 145 
 146         return 0;
 147 }
 148 
 149 void kfd_process_destroy_wq(void)
 150 {
 151         if (kfd_process_wq) {
 152                 destroy_workqueue(kfd_process_wq);
 153                 kfd_process_wq = NULL;
 154         }
 155         if (kfd_restore_wq) {
 156                 destroy_workqueue(kfd_restore_wq);
 157                 kfd_restore_wq = NULL;
 158         }
 159 }
 160 
 161 static void kfd_process_free_gpuvm(struct kgd_mem *mem,
 162                         struct kfd_process_device *pdd)
 163 {
 164         struct kfd_dev *dev = pdd->dev;
 165 
 166         amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->vm);
 167         amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem);
 168 }
 169 
 170 /* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
 171  *      This function should be only called right after the process
 172  *      is created and when kfd_processes_mutex is still being held
 173  *      to avoid concurrency. Because of that exclusiveness, we do
 174  *      not need to take p->mutex.
 175  */
 176 static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
 177                                    uint64_t gpu_va, uint32_t size,
 178                                    uint32_t flags, void **kptr)
 179 {
 180         struct kfd_dev *kdev = pdd->dev;
 181         struct kgd_mem *mem = NULL;
 182         int handle;
 183         int err;
 184 
 185         err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->kgd, gpu_va, size,
 186                                                  pdd->vm, &mem, NULL, flags);
 187         if (err)
 188                 goto err_alloc_mem;
 189 
 190         err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, pdd->vm);
 191         if (err)
 192                 goto err_map_mem;
 193 
 194         err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->kgd, mem, true);
 195         if (err) {
 196                 pr_debug("Sync memory failed, wait interrupted by user signal\n");
 197                 goto sync_memory_failed;
 198         }
 199 
 200         /* Create an obj handle so kfd_process_device_remove_obj_handle
 201          * will take care of the bo removal when the process finishes.
 202          * We do not need to take p->mutex, because the process is just
 203          * created and the ioctls have not had the chance to run.
 204          */
 205         handle = kfd_process_device_create_obj_handle(pdd, mem);
 206 
 207         if (handle < 0) {
 208                 err = handle;
 209                 goto free_gpuvm;
 210         }
 211 
 212         if (kptr) {
 213                 err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kdev->kgd,
 214                                 (struct kgd_mem *)mem, kptr, NULL);
 215                 if (err) {
 216                         pr_debug("Map GTT BO to kernel failed\n");
 217                         goto free_obj_handle;
 218                 }
 219         }
 220 
 221         return err;
 222 
 223 free_obj_handle:
 224         kfd_process_device_remove_obj_handle(pdd, handle);
 225 free_gpuvm:
 226 sync_memory_failed:
 227         kfd_process_free_gpuvm(mem, pdd);
 228         return err;
 229 
 230 err_map_mem:
 231         amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem);
 232 err_alloc_mem:
 233         *kptr = NULL;
 234         return err;
 235 }
 236 
 237 /* kfd_process_device_reserve_ib_mem - Reserve memory inside the
 238  *      process for IB usage The memory reserved is for KFD to submit
 239  *      IB to AMDGPU from kernel.  If the memory is reserved
 240  *      successfully, ib_kaddr will have the CPU/kernel
 241  *      address. Check ib_kaddr before accessing the memory.
 242  */
 243 static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
 244 {
 245         struct qcm_process_device *qpd = &pdd->qpd;
 246         uint32_t flags = ALLOC_MEM_FLAGS_GTT |
 247                          ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
 248                          ALLOC_MEM_FLAGS_WRITABLE |
 249                          ALLOC_MEM_FLAGS_EXECUTABLE;
 250         void *kaddr;
 251         int ret;
 252 
 253         if (qpd->ib_kaddr || !qpd->ib_base)
 254                 return 0;
 255 
 256         /* ib_base is only set for dGPU */
 257         ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags,
 258                                       &kaddr);
 259         if (ret)
 260                 return ret;
 261 
 262         qpd->ib_kaddr = kaddr;
 263 
 264         return 0;
 265 }
 266 
 267 struct kfd_process *kfd_create_process(struct file *filep)
 268 {
 269         struct kfd_process *process;
 270         struct task_struct *thread = current;
 271         int ret;
 272 
 273         if (!thread->mm)
 274                 return ERR_PTR(-EINVAL);
 275 
 276         /* Only the pthreads threading model is supported. */
 277         if (thread->group_leader->mm != thread->mm)
 278                 return ERR_PTR(-EINVAL);
 279 
 280         /*
 281          * take kfd processes mutex before starting of process creation
 282          * so there won't be a case where two threads of the same process
 283          * create two kfd_process structures
 284          */
 285         mutex_lock(&kfd_processes_mutex);
 286 
 287         /* A prior open of /dev/kfd could have already created the process. */
 288         process = find_process(thread);
 289         if (process) {
 290                 pr_debug("Process already found\n");
 291         } else {
 292                 process = create_process(thread);
 293                 if (IS_ERR(process))
 294                         goto out;
 295 
 296                 ret = kfd_process_init_cwsr_apu(process, filep);
 297                 if (ret) {
 298                         process = ERR_PTR(ret);
 299                         goto out;
 300                 }
 301 
 302                 if (!procfs.kobj)
 303                         goto out;
 304 
 305                 process->kobj = kfd_alloc_struct(process->kobj);
 306                 if (!process->kobj) {
 307                         pr_warn("Creating procfs kobject failed");
 308                         goto out;
 309                 }
 310                 ret = kobject_init_and_add(process->kobj, &procfs_type,
 311                                            procfs.kobj, "%d",
 312                                            (int)process->lead_thread->pid);
 313                 if (ret) {
 314                         pr_warn("Creating procfs pid directory failed");
 315                         goto out;
 316                 }
 317 
 318                 process->attr_pasid.name = "pasid";
 319                 process->attr_pasid.mode = KFD_SYSFS_FILE_MODE;
 320                 sysfs_attr_init(&process->attr_pasid);
 321                 ret = sysfs_create_file(process->kobj, &process->attr_pasid);
 322                 if (ret)
 323                         pr_warn("Creating pasid for pid %d failed",
 324                                         (int)process->lead_thread->pid);
 325         }
 326 out:
 327         mutex_unlock(&kfd_processes_mutex);
 328 
 329         return process;
 330 }
 331 
 332 struct kfd_process *kfd_get_process(const struct task_struct *thread)
 333 {
 334         struct kfd_process *process;
 335 
 336         if (!thread->mm)
 337                 return ERR_PTR(-EINVAL);
 338 
 339         /* Only the pthreads threading model is supported. */
 340         if (thread->group_leader->mm != thread->mm)
 341                 return ERR_PTR(-EINVAL);
 342 
 343         process = find_process(thread);
 344         if (!process)
 345                 return ERR_PTR(-EINVAL);
 346 
 347         return process;
 348 }
 349 
 350 static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
 351 {
 352         struct kfd_process *process;
 353 
 354         hash_for_each_possible_rcu(kfd_processes_table, process,
 355                                         kfd_processes, (uintptr_t)mm)
 356                 if (process->mm == mm)
 357                         return process;
 358 
 359         return NULL;
 360 }
 361 
 362 static struct kfd_process *find_process(const struct task_struct *thread)
 363 {
 364         struct kfd_process *p;
 365         int idx;
 366 
 367         idx = srcu_read_lock(&kfd_processes_srcu);
 368         p = find_process_by_mm(thread->mm);
 369         srcu_read_unlock(&kfd_processes_srcu, idx);
 370 
 371         return p;
 372 }
 373 
 374 void kfd_unref_process(struct kfd_process *p)
 375 {
 376         kref_put(&p->ref, kfd_process_ref_release);
 377 }
 378 
 379 static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
 380 {
 381         struct kfd_process *p = pdd->process;
 382         void *mem;
 383         int id;
 384 
 385         /*
 386          * Remove all handles from idr and release appropriate
 387          * local memory object
 388          */
 389         idr_for_each_entry(&pdd->alloc_idr, mem, id) {
 390                 struct kfd_process_device *peer_pdd;
 391 
 392                 list_for_each_entry(peer_pdd, &p->per_device_data,
 393                                     per_device_list) {
 394                         if (!peer_pdd->vm)
 395                                 continue;
 396                         amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
 397                                 peer_pdd->dev->kgd, mem, peer_pdd->vm);
 398                 }
 399 
 400                 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem);
 401                 kfd_process_device_remove_obj_handle(pdd, id);
 402         }
 403 }
 404 
 405 static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p)
 406 {
 407         struct kfd_process_device *pdd;
 408 
 409         list_for_each_entry(pdd, &p->per_device_data, per_device_list)
 410                 kfd_process_device_free_bos(pdd);
 411 }
 412 
 413 static void kfd_process_destroy_pdds(struct kfd_process *p)
 414 {
 415         struct kfd_process_device *pdd, *temp;
 416 
 417         list_for_each_entry_safe(pdd, temp, &p->per_device_data,
 418                                  per_device_list) {
 419                 pr_debug("Releasing pdd (topology id %d) for process (pasid %d)\n",
 420                                 pdd->dev->id, p->pasid);
 421 
 422                 if (pdd->drm_file) {
 423                         amdgpu_amdkfd_gpuvm_release_process_vm(
 424                                         pdd->dev->kgd, pdd->vm);
 425                         fput(pdd->drm_file);
 426                 }
 427                 else if (pdd->vm)
 428                         amdgpu_amdkfd_gpuvm_destroy_process_vm(
 429                                 pdd->dev->kgd, pdd->vm);
 430 
 431                 list_del(&pdd->per_device_list);
 432 
 433                 if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base)
 434                         free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
 435                                 get_order(KFD_CWSR_TBA_TMA_SIZE));
 436 
 437                 kfree(pdd->qpd.doorbell_bitmap);
 438                 idr_destroy(&pdd->alloc_idr);
 439 
 440                 kfree(pdd);
 441         }
 442 }
 443 
 444 /* No process locking is needed in this function, because the process
 445  * is not findable any more. We must assume that no other thread is
 446  * using it any more, otherwise we couldn't safely free the process
 447  * structure in the end.
 448  */
 449 static void kfd_process_wq_release(struct work_struct *work)
 450 {
 451         struct kfd_process *p = container_of(work, struct kfd_process,
 452                                              release_work);
 453 
 454         /* Remove the procfs files */
 455         if (p->kobj) {
 456                 sysfs_remove_file(p->kobj, &p->attr_pasid);
 457                 kobject_del(p->kobj);
 458                 kobject_put(p->kobj);
 459                 p->kobj = NULL;
 460         }
 461 
 462         kfd_iommu_unbind_process(p);
 463 
 464         kfd_process_free_outstanding_kfd_bos(p);
 465 
 466         kfd_process_destroy_pdds(p);
 467         dma_fence_put(p->ef);
 468 
 469         kfd_event_free_process(p);
 470 
 471         kfd_pasid_free(p->pasid);
 472         kfd_free_process_doorbells(p);
 473 
 474         mutex_destroy(&p->mutex);
 475 
 476         put_task_struct(p->lead_thread);
 477 
 478         kfree(p);
 479 }
 480 
 481 static void kfd_process_ref_release(struct kref *ref)
 482 {
 483         struct kfd_process *p = container_of(ref, struct kfd_process, ref);
 484 
 485         INIT_WORK(&p->release_work, kfd_process_wq_release);
 486         queue_work(kfd_process_wq, &p->release_work);
 487 }
 488 
 489 static void kfd_process_free_notifier(struct mmu_notifier *mn)
 490 {
 491         kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier));
 492 }
 493 
 494 static void kfd_process_notifier_release(struct mmu_notifier *mn,
 495                                         struct mm_struct *mm)
 496 {
 497         struct kfd_process *p;
 498         struct kfd_process_device *pdd = NULL;
 499 
 500         /*
 501          * The kfd_process structure can not be free because the
 502          * mmu_notifier srcu is read locked
 503          */
 504         p = container_of(mn, struct kfd_process, mmu_notifier);
 505         if (WARN_ON(p->mm != mm))
 506                 return;
 507 
 508         mutex_lock(&kfd_processes_mutex);
 509         hash_del_rcu(&p->kfd_processes);
 510         mutex_unlock(&kfd_processes_mutex);
 511         synchronize_srcu(&kfd_processes_srcu);
 512 
 513         cancel_delayed_work_sync(&p->eviction_work);
 514         cancel_delayed_work_sync(&p->restore_work);
 515 
 516         mutex_lock(&p->mutex);
 517 
 518         /* Iterate over all process device data structures and if the
 519          * pdd is in debug mode, we should first force unregistration,
 520          * then we will be able to destroy the queues
 521          */
 522         list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
 523                 struct kfd_dev *dev = pdd->dev;
 524 
 525                 mutex_lock(kfd_get_dbgmgr_mutex());
 526                 if (dev && dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
 527                         if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
 528                                 kfd_dbgmgr_destroy(dev->dbgmgr);
 529                                 dev->dbgmgr = NULL;
 530                         }
 531                 }
 532                 mutex_unlock(kfd_get_dbgmgr_mutex());
 533         }
 534 
 535         kfd_process_dequeue_from_all_devices(p);
 536         pqm_uninit(&p->pqm);
 537 
 538         /* Indicate to other users that MM is no longer valid */
 539         p->mm = NULL;
 540 
 541         mutex_unlock(&p->mutex);
 542 
 543         mmu_notifier_put(&p->mmu_notifier);
 544 }
 545 
 546 static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
 547         .release = kfd_process_notifier_release,
 548         .free_notifier = kfd_process_free_notifier,
 549 };
 550 
 551 static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
 552 {
 553         unsigned long  offset;
 554         struct kfd_process_device *pdd;
 555 
 556         list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
 557                 struct kfd_dev *dev = pdd->dev;
 558                 struct qcm_process_device *qpd = &pdd->qpd;
 559 
 560                 if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
 561                         continue;
 562 
 563                 offset = (KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id))
 564                         << PAGE_SHIFT;
 565                 qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
 566                         KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
 567                         MAP_SHARED, offset);
 568 
 569                 if (IS_ERR_VALUE(qpd->tba_addr)) {
 570                         int err = qpd->tba_addr;
 571 
 572                         pr_err("Failure to set tba address. error %d.\n", err);
 573                         qpd->tba_addr = 0;
 574                         qpd->cwsr_kaddr = NULL;
 575                         return err;
 576                 }
 577 
 578                 memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
 579 
 580                 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
 581                 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
 582                         qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
 583         }
 584 
 585         return 0;
 586 }
 587 
 588 static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
 589 {
 590         struct kfd_dev *dev = pdd->dev;
 591         struct qcm_process_device *qpd = &pdd->qpd;
 592         uint32_t flags = ALLOC_MEM_FLAGS_GTT |
 593                 ALLOC_MEM_FLAGS_NO_SUBSTITUTE | ALLOC_MEM_FLAGS_EXECUTABLE;
 594         void *kaddr;
 595         int ret;
 596 
 597         if (!dev->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base)
 598                 return 0;
 599 
 600         /* cwsr_base is only set for dGPU */
 601         ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base,
 602                                       KFD_CWSR_TBA_TMA_SIZE, flags, &kaddr);
 603         if (ret)
 604                 return ret;
 605 
 606         qpd->cwsr_kaddr = kaddr;
 607         qpd->tba_addr = qpd->cwsr_base;
 608 
 609         memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
 610 
 611         qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
 612         pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
 613                  qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
 614 
 615         return 0;
 616 }
 617 
 618 /*
 619  * On return the kfd_process is fully operational and will be freed when the
 620  * mm is released
 621  */
 622 static struct kfd_process *create_process(const struct task_struct *thread)
 623 {
 624         struct kfd_process *process;
 625         int err = -ENOMEM;
 626 
 627         process = kzalloc(sizeof(*process), GFP_KERNEL);
 628         if (!process)
 629                 goto err_alloc_process;
 630 
 631         kref_init(&process->ref);
 632         mutex_init(&process->mutex);
 633         process->mm = thread->mm;
 634         process->lead_thread = thread->group_leader;
 635         INIT_LIST_HEAD(&process->per_device_data);
 636         INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
 637         INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
 638         process->last_restore_timestamp = get_jiffies_64();
 639         kfd_event_init_process(process);
 640         process->is_32bit_user_mode = in_compat_syscall();
 641 
 642         process->pasid = kfd_pasid_alloc();
 643         if (process->pasid == 0)
 644                 goto err_alloc_pasid;
 645 
 646         if (kfd_alloc_process_doorbells(process) < 0)
 647                 goto err_alloc_doorbells;
 648 
 649         err = pqm_init(&process->pqm, process);
 650         if (err != 0)
 651                 goto err_process_pqm_init;
 652 
 653         /* init process apertures*/
 654         err = kfd_init_apertures(process);
 655         if (err != 0)
 656                 goto err_init_apertures;
 657 
 658         /* Must be last, have to use release destruction after this */
 659         process->mmu_notifier.ops = &kfd_process_mmu_notifier_ops;
 660         err = mmu_notifier_register(&process->mmu_notifier, process->mm);
 661         if (err)
 662                 goto err_register_notifier;
 663 
 664         get_task_struct(process->lead_thread);
 665         hash_add_rcu(kfd_processes_table, &process->kfd_processes,
 666                         (uintptr_t)process->mm);
 667 
 668         return process;
 669 
 670 err_register_notifier:
 671         kfd_process_free_outstanding_kfd_bos(process);
 672         kfd_process_destroy_pdds(process);
 673 err_init_apertures:
 674         pqm_uninit(&process->pqm);
 675 err_process_pqm_init:
 676         kfd_free_process_doorbells(process);
 677 err_alloc_doorbells:
 678         kfd_pasid_free(process->pasid);
 679 err_alloc_pasid:
 680         mutex_destroy(&process->mutex);
 681         kfree(process);
 682 err_alloc_process:
 683         return ERR_PTR(err);
 684 }
 685 
 686 static int init_doorbell_bitmap(struct qcm_process_device *qpd,
 687                         struct kfd_dev *dev)
 688 {
 689         unsigned int i;
 690 
 691         if (!KFD_IS_SOC15(dev->device_info->asic_family))
 692                 return 0;
 693 
 694         qpd->doorbell_bitmap =
 695                 kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
 696                                      BITS_PER_BYTE), GFP_KERNEL);
 697         if (!qpd->doorbell_bitmap)
 698                 return -ENOMEM;
 699 
 700         /* Mask out doorbells reserved for SDMA, IH, and VCN on SOC15. */
 701         for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) {
 702                 if (i >= dev->shared_resources.non_cp_doorbells_start
 703                         && i <= dev->shared_resources.non_cp_doorbells_end) {
 704                         set_bit(i, qpd->doorbell_bitmap);
 705                         set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
 706                                 qpd->doorbell_bitmap);
 707                         pr_debug("reserved doorbell 0x%03x and 0x%03x\n", i,
 708                                 i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET);
 709                 }
 710         }
 711 
 712         return 0;
 713 }
 714 
 715 struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
 716                                                         struct kfd_process *p)
 717 {
 718         struct kfd_process_device *pdd = NULL;
 719 
 720         list_for_each_entry(pdd, &p->per_device_data, per_device_list)
 721                 if (pdd->dev == dev)
 722                         return pdd;
 723 
 724         return NULL;
 725 }
 726 
 727 struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
 728                                                         struct kfd_process *p)
 729 {
 730         struct kfd_process_device *pdd = NULL;
 731 
 732         pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
 733         if (!pdd)
 734                 return NULL;
 735 
 736         if (init_doorbell_bitmap(&pdd->qpd, dev)) {
 737                 pr_err("Failed to init doorbell for process\n");
 738                 kfree(pdd);
 739                 return NULL;
 740         }
 741 
 742         pdd->dev = dev;
 743         INIT_LIST_HEAD(&pdd->qpd.queues_list);
 744         INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
 745         pdd->qpd.dqm = dev->dqm;
 746         pdd->qpd.pqm = &p->pqm;
 747         pdd->qpd.evicted = 0;
 748         pdd->process = p;
 749         pdd->bound = PDD_UNBOUND;
 750         pdd->already_dequeued = false;
 751         list_add(&pdd->per_device_list, &p->per_device_data);
 752 
 753         /* Init idr used for memory handle translation */
 754         idr_init(&pdd->alloc_idr);
 755 
 756         return pdd;
 757 }
 758 
 759 /**
 760  * kfd_process_device_init_vm - Initialize a VM for a process-device
 761  *
 762  * @pdd: The process-device
 763  * @drm_file: Optional pointer to a DRM file descriptor
 764  *
 765  * If @drm_file is specified, it will be used to acquire the VM from
 766  * that file descriptor. If successful, the @pdd takes ownership of
 767  * the file descriptor.
 768  *
 769  * If @drm_file is NULL, a new VM is created.
 770  *
 771  * Returns 0 on success, -errno on failure.
 772  */
 773 int kfd_process_device_init_vm(struct kfd_process_device *pdd,
 774                                struct file *drm_file)
 775 {
 776         struct kfd_process *p;
 777         struct kfd_dev *dev;
 778         int ret;
 779 
 780         if (pdd->vm)
 781                 return drm_file ? -EBUSY : 0;
 782 
 783         p = pdd->process;
 784         dev = pdd->dev;
 785 
 786         if (drm_file)
 787                 ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(
 788                         dev->kgd, drm_file, p->pasid,
 789                         &pdd->vm, &p->kgd_process_info, &p->ef);
 790         else
 791                 ret = amdgpu_amdkfd_gpuvm_create_process_vm(dev->kgd, p->pasid,
 792                         &pdd->vm, &p->kgd_process_info, &p->ef);
 793         if (ret) {
 794                 pr_err("Failed to create process VM object\n");
 795                 return ret;
 796         }
 797 
 798         amdgpu_vm_set_task_info(pdd->vm);
 799 
 800         ret = kfd_process_device_reserve_ib_mem(pdd);
 801         if (ret)
 802                 goto err_reserve_ib_mem;
 803         ret = kfd_process_device_init_cwsr_dgpu(pdd);
 804         if (ret)
 805                 goto err_init_cwsr;
 806 
 807         pdd->drm_file = drm_file;
 808 
 809         return 0;
 810 
 811 err_init_cwsr:
 812 err_reserve_ib_mem:
 813         kfd_process_device_free_bos(pdd);
 814         if (!drm_file)
 815                 amdgpu_amdkfd_gpuvm_destroy_process_vm(dev->kgd, pdd->vm);
 816         pdd->vm = NULL;
 817 
 818         return ret;
 819 }
 820 
 821 /*
 822  * Direct the IOMMU to bind the process (specifically the pasid->mm)
 823  * to the device.
 824  * Unbinding occurs when the process dies or the device is removed.
 825  *
 826  * Assumes that the process lock is held.
 827  */
 828 struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
 829                                                         struct kfd_process *p)
 830 {
 831         struct kfd_process_device *pdd;
 832         int err;
 833 
 834         pdd = kfd_get_process_device_data(dev, p);
 835         if (!pdd) {
 836                 pr_err("Process device data doesn't exist\n");
 837                 return ERR_PTR(-ENOMEM);
 838         }
 839 
 840         err = kfd_iommu_bind_process_to_device(pdd);
 841         if (err)
 842                 return ERR_PTR(err);
 843 
 844         err = kfd_process_device_init_vm(pdd, NULL);
 845         if (err)
 846                 return ERR_PTR(err);
 847 
 848         return pdd;
 849 }
 850 
 851 struct kfd_process_device *kfd_get_first_process_device_data(
 852                                                 struct kfd_process *p)
 853 {
 854         return list_first_entry(&p->per_device_data,
 855                                 struct kfd_process_device,
 856                                 per_device_list);
 857 }
 858 
 859 struct kfd_process_device *kfd_get_next_process_device_data(
 860                                                 struct kfd_process *p,
 861                                                 struct kfd_process_device *pdd)
 862 {
 863         if (list_is_last(&pdd->per_device_list, &p->per_device_data))
 864                 return NULL;
 865         return list_next_entry(pdd, per_device_list);
 866 }
 867 
 868 bool kfd_has_process_device_data(struct kfd_process *p)
 869 {
 870         return !(list_empty(&p->per_device_data));
 871 }
 872 
 873 /* Create specific handle mapped to mem from process local memory idr
 874  * Assumes that the process lock is held.
 875  */
 876 int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
 877                                         void *mem)
 878 {
 879         return idr_alloc(&pdd->alloc_idr, mem, 0, 0, GFP_KERNEL);
 880 }
 881 
 882 /* Translate specific handle from process local memory idr
 883  * Assumes that the process lock is held.
 884  */
 885 void *kfd_process_device_translate_handle(struct kfd_process_device *pdd,
 886                                         int handle)
 887 {
 888         if (handle < 0)
 889                 return NULL;
 890 
 891         return idr_find(&pdd->alloc_idr, handle);
 892 }
 893 
 894 /* Remove specific handle from process local memory idr
 895  * Assumes that the process lock is held.
 896  */
 897 void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
 898                                         int handle)
 899 {
 900         if (handle >= 0)
 901                 idr_remove(&pdd->alloc_idr, handle);
 902 }
 903 
 904 /* This increments the process->ref counter. */
 905 struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid)
 906 {
 907         struct kfd_process *p, *ret_p = NULL;
 908         unsigned int temp;
 909 
 910         int idx = srcu_read_lock(&kfd_processes_srcu);
 911 
 912         hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
 913                 if (p->pasid == pasid) {
 914                         kref_get(&p->ref);
 915                         ret_p = p;
 916                         break;
 917                 }
 918         }
 919 
 920         srcu_read_unlock(&kfd_processes_srcu, idx);
 921 
 922         return ret_p;
 923 }
 924 
 925 /* This increments the process->ref counter. */
 926 struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
 927 {
 928         struct kfd_process *p;
 929 
 930         int idx = srcu_read_lock(&kfd_processes_srcu);
 931 
 932         p = find_process_by_mm(mm);
 933         if (p)
 934                 kref_get(&p->ref);
 935 
 936         srcu_read_unlock(&kfd_processes_srcu, idx);
 937 
 938         return p;
 939 }
 940 
 941 /* process_evict_queues - Evict all user queues of a process
 942  *
 943  * Eviction is reference-counted per process-device. This means multiple
 944  * evictions from different sources can be nested safely.
 945  */
 946 int kfd_process_evict_queues(struct kfd_process *p)
 947 {
 948         struct kfd_process_device *pdd;
 949         int r = 0;
 950         unsigned int n_evicted = 0;
 951 
 952         list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
 953                 r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm,
 954                                                             &pdd->qpd);
 955                 if (r) {
 956                         pr_err("Failed to evict process queues\n");
 957                         goto fail;
 958                 }
 959                 n_evicted++;
 960         }
 961 
 962         return r;
 963 
 964 fail:
 965         /* To keep state consistent, roll back partial eviction by
 966          * restoring queues
 967          */
 968         list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
 969                 if (n_evicted == 0)
 970                         break;
 971                 if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
 972                                                               &pdd->qpd))
 973                         pr_err("Failed to restore queues\n");
 974 
 975                 n_evicted--;
 976         }
 977 
 978         return r;
 979 }
 980 
 981 /* process_restore_queues - Restore all user queues of a process */
 982 int kfd_process_restore_queues(struct kfd_process *p)
 983 {
 984         struct kfd_process_device *pdd;
 985         int r, ret = 0;
 986 
 987         list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
 988                 r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
 989                                                               &pdd->qpd);
 990                 if (r) {
 991                         pr_err("Failed to restore process queues\n");
 992                         if (!ret)
 993                                 ret = r;
 994                 }
 995         }
 996 
 997         return ret;
 998 }
 999 
1000 static void evict_process_worker(struct work_struct *work)
1001 {
1002         int ret;
1003         struct kfd_process *p;
1004         struct delayed_work *dwork;
1005 
1006         dwork = to_delayed_work(work);
1007 
1008         /* Process termination destroys this worker thread. So during the
1009          * lifetime of this thread, kfd_process p will be valid
1010          */
1011         p = container_of(dwork, struct kfd_process, eviction_work);
1012         WARN_ONCE(p->last_eviction_seqno != p->ef->seqno,
1013                   "Eviction fence mismatch\n");
1014 
1015         /* Narrow window of overlap between restore and evict work
1016          * item is possible. Once amdgpu_amdkfd_gpuvm_restore_process_bos
1017          * unreserves KFD BOs, it is possible to evicted again. But
1018          * restore has few more steps of finish. So lets wait for any
1019          * previous restore work to complete
1020          */
1021         flush_delayed_work(&p->restore_work);
1022 
1023         pr_debug("Started evicting pasid %d\n", p->pasid);
1024         ret = kfd_process_evict_queues(p);
1025         if (!ret) {
1026                 dma_fence_signal(p->ef);
1027                 dma_fence_put(p->ef);
1028                 p->ef = NULL;
1029                 queue_delayed_work(kfd_restore_wq, &p->restore_work,
1030                                 msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
1031 
1032                 pr_debug("Finished evicting pasid %d\n", p->pasid);
1033         } else
1034                 pr_err("Failed to evict queues of pasid %d\n", p->pasid);
1035 }
1036 
1037 static void restore_process_worker(struct work_struct *work)
1038 {
1039         struct delayed_work *dwork;
1040         struct kfd_process *p;
1041         int ret = 0;
1042 
1043         dwork = to_delayed_work(work);
1044 
1045         /* Process termination destroys this worker thread. So during the
1046          * lifetime of this thread, kfd_process p will be valid
1047          */
1048         p = container_of(dwork, struct kfd_process, restore_work);
1049         pr_debug("Started restoring pasid %d\n", p->pasid);
1050 
1051         /* Setting last_restore_timestamp before successful restoration.
1052          * Otherwise this would have to be set by KGD (restore_process_bos)
1053          * before KFD BOs are unreserved. If not, the process can be evicted
1054          * again before the timestamp is set.
1055          * If restore fails, the timestamp will be set again in the next
1056          * attempt. This would mean that the minimum GPU quanta would be
1057          * PROCESS_ACTIVE_TIME_MS - (time to execute the following two
1058          * functions)
1059          */
1060 
1061         p->last_restore_timestamp = get_jiffies_64();
1062         ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info,
1063                                                      &p->ef);
1064         if (ret) {
1065                 pr_debug("Failed to restore BOs of pasid %d, retry after %d ms\n",
1066                          p->pasid, PROCESS_BACK_OFF_TIME_MS);
1067                 ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
1068                                 msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
1069                 WARN(!ret, "reschedule restore work failed\n");
1070                 return;
1071         }
1072 
1073         ret = kfd_process_restore_queues(p);
1074         if (!ret)
1075                 pr_debug("Finished restoring pasid %d\n", p->pasid);
1076         else
1077                 pr_err("Failed to restore queues of pasid %d\n", p->pasid);
1078 }
1079 
1080 void kfd_suspend_all_processes(void)
1081 {
1082         struct kfd_process *p;
1083         unsigned int temp;
1084         int idx = srcu_read_lock(&kfd_processes_srcu);
1085 
1086         hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1087                 cancel_delayed_work_sync(&p->eviction_work);
1088                 cancel_delayed_work_sync(&p->restore_work);
1089 
1090                 if (kfd_process_evict_queues(p))
1091                         pr_err("Failed to suspend process %d\n", p->pasid);
1092                 dma_fence_signal(p->ef);
1093                 dma_fence_put(p->ef);
1094                 p->ef = NULL;
1095         }
1096         srcu_read_unlock(&kfd_processes_srcu, idx);
1097 }
1098 
1099 int kfd_resume_all_processes(void)
1100 {
1101         struct kfd_process *p;
1102         unsigned int temp;
1103         int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu);
1104 
1105         hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1106                 if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) {
1107                         pr_err("Restore process %d failed during resume\n",
1108                                p->pasid);
1109                         ret = -EFAULT;
1110                 }
1111         }
1112         srcu_read_unlock(&kfd_processes_srcu, idx);
1113         return ret;
1114 }
1115 
1116 int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
1117                           struct vm_area_struct *vma)
1118 {
1119         struct kfd_process_device *pdd;
1120         struct qcm_process_device *qpd;
1121 
1122         if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
1123                 pr_err("Incorrect CWSR mapping size.\n");
1124                 return -EINVAL;
1125         }
1126 
1127         pdd = kfd_get_process_device_data(dev, process);
1128         if (!pdd)
1129                 return -EINVAL;
1130         qpd = &pdd->qpd;
1131 
1132         qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1133                                         get_order(KFD_CWSR_TBA_TMA_SIZE));
1134         if (!qpd->cwsr_kaddr) {
1135                 pr_err("Error allocating per process CWSR buffer.\n");
1136                 return -ENOMEM;
1137         }
1138 
1139         vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND
1140                 | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP;
1141         /* Mapping pages to user process */
1142         return remap_pfn_range(vma, vma->vm_start,
1143                                PFN_DOWN(__pa(qpd->cwsr_kaddr)),
1144                                KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
1145 }
1146 
1147 void kfd_flush_tlb(struct kfd_process_device *pdd)
1148 {
1149         struct kfd_dev *dev = pdd->dev;
1150         const struct kfd2kgd_calls *f2g = dev->kfd2kgd;
1151 
1152         if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
1153                 /* Nothing to flush until a VMID is assigned, which
1154                  * only happens when the first queue is created.
1155                  */
1156                 if (pdd->qpd.vmid)
1157                         f2g->invalidate_tlbs_vmid(dev->kgd, pdd->qpd.vmid);
1158         } else {
1159                 f2g->invalidate_tlbs(dev->kgd, pdd->process->pasid);
1160         }
1161 }
1162 
1163 #if defined(CONFIG_DEBUG_FS)
1164 
1165 int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
1166 {
1167         struct kfd_process *p;
1168         unsigned int temp;
1169         int r = 0;
1170 
1171         int idx = srcu_read_lock(&kfd_processes_srcu);
1172 
1173         hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1174                 seq_printf(m, "Process %d PASID %d:\n",
1175                            p->lead_thread->tgid, p->pasid);
1176 
1177                 mutex_lock(&p->mutex);
1178                 r = pqm_debugfs_mqds(m, &p->pqm);
1179                 mutex_unlock(&p->mutex);
1180 
1181                 if (r)
1182                         break;
1183         }
1184 
1185         srcu_read_unlock(&kfd_processes_srcu, idx);
1186 
1187         return r;
1188 }
1189 
1190 #endif
1191 

/* [<][>][^][v][top][bottom][index][help] */