Lines Matching refs:kfd

91 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
93 static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
112 struct kfd_dev *kfd; in kgd2kfd_probe() local
120 kfd = kzalloc(sizeof(*kfd), GFP_KERNEL); in kgd2kfd_probe()
121 if (!kfd) in kgd2kfd_probe()
124 kfd->kgd = kgd; in kgd2kfd_probe()
125 kfd->device_info = device_info; in kgd2kfd_probe()
126 kfd->pdev = pdev; in kgd2kfd_probe()
127 kfd->init_complete = false; in kgd2kfd_probe()
128 kfd->kfd2kgd = f2g; in kgd2kfd_probe()
130 mutex_init(&kfd->doorbell_mutex); in kgd2kfd_probe()
131 memset(&kfd->doorbell_available_index, 0, in kgd2kfd_probe()
132 sizeof(kfd->doorbell_available_index)); in kgd2kfd_probe()
134 return kfd; in kgd2kfd_probe()
137 static bool device_iommu_pasid_init(struct kfd_dev *kfd) in device_iommu_pasid_init() argument
147 err = amd_iommu_device_info(kfd->pdev, &iommu_info); in device_iommu_pasid_init()
163 (unsigned int)1 << kfd->device_info->max_pasid_bits, in device_iommu_pasid_init()
171 kfd->doorbell_process_limit - 1); in device_iommu_pasid_init()
173 err = amd_iommu_init_device(kfd->pdev, pasid_limit); in device_iommu_pasid_init()
181 amd_iommu_free_device(kfd->pdev); in device_iommu_pasid_init()
222 bool kgd2kfd_device_init(struct kfd_dev *kfd, in kgd2kfd_device_init() argument
227 kfd->shared_resources = *gpu_resources; in kgd2kfd_device_init()
231 kfd->device_info->mqd_size_aligned; in kgd2kfd_device_init()
247 if (kfd->kfd2kgd->init_gtt_mem_allocation( in kgd2kfd_device_init()
248 kfd->kgd, size, &kfd->gtt_mem, in kgd2kfd_device_init()
249 &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)){ in kgd2kfd_device_init()
252 size, kfd->pdev->vendor, kfd->pdev->device); in kgd2kfd_device_init()
258 size, kfd->pdev->vendor, kfd->pdev->device); in kgd2kfd_device_init()
261 if (kfd_gtt_sa_init(kfd, size, 512) != 0) { in kgd2kfd_device_init()
267 kfd_doorbell_init(kfd); in kgd2kfd_device_init()
269 if (kfd_topology_add_device(kfd) != 0) { in kgd2kfd_device_init()
272 kfd->pdev->vendor, kfd->pdev->device); in kgd2kfd_device_init()
276 if (kfd_interrupt_init(kfd)) { in kgd2kfd_device_init()
279 kfd->pdev->vendor, kfd->pdev->device); in kgd2kfd_device_init()
283 if (!device_iommu_pasid_init(kfd)) { in kgd2kfd_device_init()
286 kfd->pdev->vendor, kfd->pdev->device); in kgd2kfd_device_init()
289 amd_iommu_set_invalidate_ctx_cb(kfd->pdev, in kgd2kfd_device_init()
291 amd_iommu_set_invalid_ppr_cb(kfd->pdev, iommu_invalid_ppr_cb); in kgd2kfd_device_init()
293 kfd->dqm = device_queue_manager_init(kfd); in kgd2kfd_device_init()
294 if (!kfd->dqm) { in kgd2kfd_device_init()
297 kfd->pdev->vendor, kfd->pdev->device); in kgd2kfd_device_init()
301 if (kfd->dqm->ops.start(kfd->dqm) != 0) { in kgd2kfd_device_init()
304 kfd->pdev->vendor, kfd->pdev->device); in kgd2kfd_device_init()
308 kfd->dbgmgr = NULL; in kgd2kfd_device_init()
310 kfd->init_complete = true; in kgd2kfd_device_init()
311 dev_info(kfd_device, "added device (%x:%x)\n", kfd->pdev->vendor, in kgd2kfd_device_init()
312 kfd->pdev->device); in kgd2kfd_device_init()
320 device_queue_manager_uninit(kfd->dqm); in kgd2kfd_device_init()
322 amd_iommu_free_device(kfd->pdev); in kgd2kfd_device_init()
324 kfd_interrupt_exit(kfd); in kgd2kfd_device_init()
326 kfd_topology_remove_device(kfd); in kgd2kfd_device_init()
328 kfd_gtt_sa_fini(kfd); in kgd2kfd_device_init()
330 kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem); in kgd2kfd_device_init()
333 kfd->pdev->vendor, kfd->pdev->device); in kgd2kfd_device_init()
335 return kfd->init_complete; in kgd2kfd_device_init()
338 void kgd2kfd_device_exit(struct kfd_dev *kfd) in kgd2kfd_device_exit() argument
340 if (kfd->init_complete) { in kgd2kfd_device_exit()
341 device_queue_manager_uninit(kfd->dqm); in kgd2kfd_device_exit()
342 amd_iommu_free_device(kfd->pdev); in kgd2kfd_device_exit()
343 kfd_interrupt_exit(kfd); in kgd2kfd_device_exit()
344 kfd_topology_remove_device(kfd); in kgd2kfd_device_exit()
345 kfd_gtt_sa_fini(kfd); in kgd2kfd_device_exit()
346 kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem); in kgd2kfd_device_exit()
349 kfree(kfd); in kgd2kfd_device_exit()
352 void kgd2kfd_suspend(struct kfd_dev *kfd) in kgd2kfd_suspend() argument
354 BUG_ON(kfd == NULL); in kgd2kfd_suspend()
356 if (kfd->init_complete) { in kgd2kfd_suspend()
357 kfd->dqm->ops.stop(kfd->dqm); in kgd2kfd_suspend()
358 amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL); in kgd2kfd_suspend()
359 amd_iommu_set_invalid_ppr_cb(kfd->pdev, NULL); in kgd2kfd_suspend()
360 amd_iommu_free_device(kfd->pdev); in kgd2kfd_suspend()
364 int kgd2kfd_resume(struct kfd_dev *kfd) in kgd2kfd_resume() argument
369 BUG_ON(kfd == NULL); in kgd2kfd_resume()
373 if (kfd->init_complete) { in kgd2kfd_resume()
374 err = amd_iommu_init_device(kfd->pdev, pasid_limit); in kgd2kfd_resume()
377 amd_iommu_set_invalidate_ctx_cb(kfd->pdev, in kgd2kfd_resume()
379 amd_iommu_set_invalid_ppr_cb(kfd->pdev, iommu_invalid_ppr_cb); in kgd2kfd_resume()
380 kfd->dqm->ops.start(kfd->dqm); in kgd2kfd_resume()
387 void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) in kgd2kfd_interrupt() argument
389 if (!kfd->init_complete) in kgd2kfd_interrupt()
392 spin_lock(&kfd->interrupt_lock); in kgd2kfd_interrupt()
394 if (kfd->interrupts_active in kgd2kfd_interrupt()
395 && interrupt_is_wanted(kfd, ih_ring_entry) in kgd2kfd_interrupt()
396 && enqueue_ih_ring_entry(kfd, ih_ring_entry)) in kgd2kfd_interrupt()
397 schedule_work(&kfd->interrupt_work); in kgd2kfd_interrupt()
399 spin_unlock(&kfd->interrupt_lock); in kgd2kfd_interrupt()
402 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, in kfd_gtt_sa_init() argument
407 BUG_ON(!kfd); in kfd_gtt_sa_init()
408 BUG_ON(!kfd->gtt_mem); in kfd_gtt_sa_init()
413 kfd->gtt_sa_chunk_size = chunk_size; in kfd_gtt_sa_init()
414 kfd->gtt_sa_num_of_chunks = buf_size / chunk_size; in kfd_gtt_sa_init()
416 num_of_bits = kfd->gtt_sa_num_of_chunks / BITS_PER_BYTE; in kfd_gtt_sa_init()
419 kfd->gtt_sa_bitmap = kzalloc(num_of_bits, GFP_KERNEL); in kfd_gtt_sa_init()
421 if (!kfd->gtt_sa_bitmap) in kfd_gtt_sa_init()
425 kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap); in kfd_gtt_sa_init()
427 mutex_init(&kfd->gtt_sa_lock); in kfd_gtt_sa_init()
433 static void kfd_gtt_sa_fini(struct kfd_dev *kfd) in kfd_gtt_sa_fini() argument
435 mutex_destroy(&kfd->gtt_sa_lock); in kfd_gtt_sa_fini()
436 kfree(kfd->gtt_sa_bitmap); in kfd_gtt_sa_fini()
453 int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size, in kfd_gtt_sa_allocate() argument
458 BUG_ON(!kfd); in kfd_gtt_sa_allocate()
463 if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size) in kfd_gtt_sa_allocate()
474 mutex_lock(&kfd->gtt_sa_lock); in kfd_gtt_sa_allocate()
478 found = find_next_zero_bit(kfd->gtt_sa_bitmap, in kfd_gtt_sa_allocate()
479 kfd->gtt_sa_num_of_chunks, in kfd_gtt_sa_allocate()
485 if (found == kfd->gtt_sa_num_of_chunks) in kfd_gtt_sa_allocate()
492 kfd->gtt_start_gpu_addr, in kfd_gtt_sa_allocate()
494 kfd->gtt_sa_chunk_size); in kfd_gtt_sa_allocate()
496 kfd->gtt_start_cpu_ptr, in kfd_gtt_sa_allocate()
498 kfd->gtt_sa_chunk_size); in kfd_gtt_sa_allocate()
504 if (size <= kfd->gtt_sa_chunk_size) { in kfd_gtt_sa_allocate()
506 set_bit(found, kfd->gtt_sa_bitmap); in kfd_gtt_sa_allocate()
511 cur_size = size - kfd->gtt_sa_chunk_size; in kfd_gtt_sa_allocate()
514 find_next_zero_bit(kfd->gtt_sa_bitmap, in kfd_gtt_sa_allocate()
515 kfd->gtt_sa_num_of_chunks, ++found); in kfd_gtt_sa_allocate()
529 if (found == kfd->gtt_sa_num_of_chunks) in kfd_gtt_sa_allocate()
533 if (cur_size <= kfd->gtt_sa_chunk_size) in kfd_gtt_sa_allocate()
536 cur_size -= kfd->gtt_sa_chunk_size; in kfd_gtt_sa_allocate()
547 set_bit(found, kfd->gtt_sa_bitmap); in kfd_gtt_sa_allocate()
550 mutex_unlock(&kfd->gtt_sa_lock); in kfd_gtt_sa_allocate()
555 mutex_unlock(&kfd->gtt_sa_lock); in kfd_gtt_sa_allocate()
560 int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj) in kfd_gtt_sa_free() argument
564 BUG_ON(!kfd); in kfd_gtt_sa_free()
573 mutex_lock(&kfd->gtt_sa_lock); in kfd_gtt_sa_free()
579 clear_bit(bit, kfd->gtt_sa_bitmap); in kfd_gtt_sa_free()
581 mutex_unlock(&kfd->gtt_sa_lock); in kfd_gtt_sa_free()