Lines Matching refs:uvd
100 INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler); in amdgpu_uvd_sw_init()
136 r = request_firmware(&adev->uvd.fw, fw_name, adev->dev); in amdgpu_uvd_sw_init()
143 r = amdgpu_ucode_validate(adev->uvd.fw); in amdgpu_uvd_sw_init()
147 release_firmware(adev->uvd.fw); in amdgpu_uvd_sw_init()
148 adev->uvd.fw = NULL; in amdgpu_uvd_sw_init()
152 hdr = (const struct common_firmware_header *)adev->uvd.fw->data; in amdgpu_uvd_sw_init()
159 adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) | in amdgpu_uvd_sw_init()
167 NULL, NULL, &adev->uvd.vcpu_bo); in amdgpu_uvd_sw_init()
173 r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false); in amdgpu_uvd_sw_init()
175 amdgpu_bo_unref(&adev->uvd.vcpu_bo); in amdgpu_uvd_sw_init()
180 r = amdgpu_bo_pin(adev->uvd.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM, in amdgpu_uvd_sw_init()
181 &adev->uvd.gpu_addr); in amdgpu_uvd_sw_init()
183 amdgpu_bo_unreserve(adev->uvd.vcpu_bo); in amdgpu_uvd_sw_init()
184 amdgpu_bo_unref(&adev->uvd.vcpu_bo); in amdgpu_uvd_sw_init()
189 r = amdgpu_bo_kmap(adev->uvd.vcpu_bo, &adev->uvd.cpu_addr); in amdgpu_uvd_sw_init()
195 amdgpu_bo_unreserve(adev->uvd.vcpu_bo); in amdgpu_uvd_sw_init()
198 atomic_set(&adev->uvd.handles[i], 0); in amdgpu_uvd_sw_init()
199 adev->uvd.filp[i] = NULL; in amdgpu_uvd_sw_init()
204 adev->uvd.address_64_bit = true; in amdgpu_uvd_sw_init()
213 if (adev->uvd.vcpu_bo == NULL) in amdgpu_uvd_sw_fini()
216 r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false); in amdgpu_uvd_sw_fini()
218 amdgpu_bo_kunmap(adev->uvd.vcpu_bo); in amdgpu_uvd_sw_fini()
219 amdgpu_bo_unpin(adev->uvd.vcpu_bo); in amdgpu_uvd_sw_fini()
220 amdgpu_bo_unreserve(adev->uvd.vcpu_bo); in amdgpu_uvd_sw_fini()
223 amdgpu_bo_unref(&adev->uvd.vcpu_bo); in amdgpu_uvd_sw_fini()
225 amdgpu_ring_fini(&adev->uvd.ring); in amdgpu_uvd_sw_fini()
227 release_firmware(adev->uvd.fw); in amdgpu_uvd_sw_fini()
234 struct amdgpu_ring *ring = &adev->uvd.ring; in amdgpu_uvd_suspend()
237 if (adev->uvd.vcpu_bo == NULL) in amdgpu_uvd_suspend()
241 uint32_t handle = atomic_read(&adev->uvd.handles[i]); in amdgpu_uvd_suspend()
256 adev->uvd.filp[i] = NULL; in amdgpu_uvd_suspend()
257 atomic_set(&adev->uvd.handles[i], 0); in amdgpu_uvd_suspend()
271 if (adev->uvd.vcpu_bo == NULL) in amdgpu_uvd_resume()
274 hdr = (const struct common_firmware_header *)adev->uvd.fw->data; in amdgpu_uvd_resume()
276 memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset, in amdgpu_uvd_resume()
277 (adev->uvd.fw->size) - offset); in amdgpu_uvd_resume()
279 cancel_delayed_work_sync(&adev->uvd.idle_work); in amdgpu_uvd_resume()
281 size = amdgpu_bo_size(adev->uvd.vcpu_bo); in amdgpu_uvd_resume()
283 ptr = adev->uvd.cpu_addr; in amdgpu_uvd_resume()
293 struct amdgpu_ring *ring = &adev->uvd.ring; in amdgpu_uvd_free_handles()
297 uint32_t handle = atomic_read(&adev->uvd.handles[i]); in amdgpu_uvd_free_handles()
298 if (handle != 0 && adev->uvd.filp[i] == filp) { in amdgpu_uvd_free_handles()
312 adev->uvd.filp[i] = NULL; in amdgpu_uvd_free_handles()
313 atomic_set(&adev->uvd.handles[i], 0); in amdgpu_uvd_free_handles()
353 if (!ctx->parser->adev->uvd.address_64_bit) { in amdgpu_uvd_cs_pass1()
563 if (atomic_read(&adev->uvd.handles[i]) == handle) { in amdgpu_uvd_cs_msg()
568 if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) { in amdgpu_uvd_cs_msg()
569 adev->uvd.filp[i] = ctx->parser->filp; in amdgpu_uvd_cs_msg()
586 if (atomic_read(&adev->uvd.handles[i]) == handle) { in amdgpu_uvd_cs_msg()
587 if (adev->uvd.filp[i] != ctx->parser->filp) { in amdgpu_uvd_cs_msg()
601 atomic_cmpxchg(&adev->uvd.handles[i], handle, 0); in amdgpu_uvd_cs_msg()
671 if (!ctx->parser->adev->uvd.address_64_bit) { in amdgpu_uvd_cs_pass2()
679 (start >> 28) != (ctx->parser->adev->uvd.gpu_addr >> 28)) { in amdgpu_uvd_cs_pass2()
862 if (!bo->adev->uvd.address_64_bit) { in amdgpu_uvd_send_msg()
1015 container_of(work, struct amdgpu_device, uvd.idle_work.work); in amdgpu_uvd_idle_work_handler()
1018 fences = amdgpu_fence_count_emitted(&adev->uvd.ring); in amdgpu_uvd_idle_work_handler()
1021 if (atomic_read(&adev->uvd.handles[i])) in amdgpu_uvd_idle_work_handler()
1031 schedule_delayed_work(&adev->uvd.idle_work, in amdgpu_uvd_idle_work_handler()
1038 bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); in amdgpu_uvd_note_usage()
1039 set_clocks &= schedule_delayed_work(&adev->uvd.idle_work, in amdgpu_uvd_note_usage()