Lines Matching refs:adev

89 static void amdgpu_uvd_note_usage(struct amdgpu_device *adev);
92 int amdgpu_uvd_sw_init(struct amdgpu_device *adev) in amdgpu_uvd_sw_init() argument
100 INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler); in amdgpu_uvd_sw_init()
102 switch (adev->asic_type) { in amdgpu_uvd_sw_init()
136 r = request_firmware(&adev->uvd.fw, fw_name, adev->dev); in amdgpu_uvd_sw_init()
138 dev_err(adev->dev, "amdgpu_uvd: Can't load firmware \"%s\"\n", in amdgpu_uvd_sw_init()
143 r = amdgpu_ucode_validate(adev->uvd.fw); in amdgpu_uvd_sw_init()
145 dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n", in amdgpu_uvd_sw_init()
147 release_firmware(adev->uvd.fw); in amdgpu_uvd_sw_init()
148 adev->uvd.fw = NULL; in amdgpu_uvd_sw_init()
152 hdr = (const struct common_firmware_header *)adev->uvd.fw->data; in amdgpu_uvd_sw_init()
159 adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) | in amdgpu_uvd_sw_init()
164 r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true, in amdgpu_uvd_sw_init()
167 NULL, NULL, &adev->uvd.vcpu_bo); in amdgpu_uvd_sw_init()
169 dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); in amdgpu_uvd_sw_init()
173 r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false); in amdgpu_uvd_sw_init()
175 amdgpu_bo_unref(&adev->uvd.vcpu_bo); in amdgpu_uvd_sw_init()
176 dev_err(adev->dev, "(%d) failed to reserve UVD bo\n", r); in amdgpu_uvd_sw_init()
180 r = amdgpu_bo_pin(adev->uvd.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM, in amdgpu_uvd_sw_init()
181 &adev->uvd.gpu_addr); in amdgpu_uvd_sw_init()
183 amdgpu_bo_unreserve(adev->uvd.vcpu_bo); in amdgpu_uvd_sw_init()
184 amdgpu_bo_unref(&adev->uvd.vcpu_bo); in amdgpu_uvd_sw_init()
185 dev_err(adev->dev, "(%d) UVD bo pin failed\n", r); in amdgpu_uvd_sw_init()
189 r = amdgpu_bo_kmap(adev->uvd.vcpu_bo, &adev->uvd.cpu_addr); in amdgpu_uvd_sw_init()
191 dev_err(adev->dev, "(%d) UVD map failed\n", r); in amdgpu_uvd_sw_init()
195 amdgpu_bo_unreserve(adev->uvd.vcpu_bo); in amdgpu_uvd_sw_init()
198 atomic_set(&adev->uvd.handles[i], 0); in amdgpu_uvd_sw_init()
199 adev->uvd.filp[i] = NULL; in amdgpu_uvd_sw_init()
203 if (!amdgpu_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0)) in amdgpu_uvd_sw_init()
204 adev->uvd.address_64_bit = true; in amdgpu_uvd_sw_init()
209 int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) in amdgpu_uvd_sw_fini() argument
213 if (adev->uvd.vcpu_bo == NULL) in amdgpu_uvd_sw_fini()
216 r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false); in amdgpu_uvd_sw_fini()
218 amdgpu_bo_kunmap(adev->uvd.vcpu_bo); in amdgpu_uvd_sw_fini()
219 amdgpu_bo_unpin(adev->uvd.vcpu_bo); in amdgpu_uvd_sw_fini()
220 amdgpu_bo_unreserve(adev->uvd.vcpu_bo); in amdgpu_uvd_sw_fini()
223 amdgpu_bo_unref(&adev->uvd.vcpu_bo); in amdgpu_uvd_sw_fini()
225 amdgpu_ring_fini(&adev->uvd.ring); in amdgpu_uvd_sw_fini()
227 release_firmware(adev->uvd.fw); in amdgpu_uvd_sw_fini()
232 int amdgpu_uvd_suspend(struct amdgpu_device *adev) in amdgpu_uvd_suspend() argument
234 struct amdgpu_ring *ring = &adev->uvd.ring; in amdgpu_uvd_suspend()
237 if (adev->uvd.vcpu_bo == NULL) in amdgpu_uvd_suspend()
241 uint32_t handle = atomic_read(&adev->uvd.handles[i]); in amdgpu_uvd_suspend()
245 amdgpu_uvd_note_usage(adev); in amdgpu_uvd_suspend()
256 adev->uvd.filp[i] = NULL; in amdgpu_uvd_suspend()
257 atomic_set(&adev->uvd.handles[i], 0); in amdgpu_uvd_suspend()
264 int amdgpu_uvd_resume(struct amdgpu_device *adev) in amdgpu_uvd_resume() argument
271 if (adev->uvd.vcpu_bo == NULL) in amdgpu_uvd_resume()
274 hdr = (const struct common_firmware_header *)adev->uvd.fw->data; in amdgpu_uvd_resume()
276 memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset, in amdgpu_uvd_resume()
277 (adev->uvd.fw->size) - offset); in amdgpu_uvd_resume()
279 cancel_delayed_work_sync(&adev->uvd.idle_work); in amdgpu_uvd_resume()
281 size = amdgpu_bo_size(adev->uvd.vcpu_bo); in amdgpu_uvd_resume()
283 ptr = adev->uvd.cpu_addr; in amdgpu_uvd_resume()
291 void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) in amdgpu_uvd_free_handles() argument
293 struct amdgpu_ring *ring = &adev->uvd.ring; in amdgpu_uvd_free_handles()
297 uint32_t handle = atomic_read(&adev->uvd.handles[i]); in amdgpu_uvd_free_handles()
298 if (handle != 0 && adev->uvd.filp[i] == filp) { in amdgpu_uvd_free_handles()
301 amdgpu_uvd_note_usage(adev); in amdgpu_uvd_free_handles()
312 adev->uvd.filp[i] = NULL; in amdgpu_uvd_free_handles()
313 atomic_set(&adev->uvd.handles[i], 0); in amdgpu_uvd_free_handles()
353 if (!ctx->parser->adev->uvd.address_64_bit) { in amdgpu_uvd_cs_pass1()
522 struct amdgpu_device *adev = ctx->parser->adev; in amdgpu_uvd_cs_msg() local
563 if (atomic_read(&adev->uvd.handles[i]) == handle) { in amdgpu_uvd_cs_msg()
568 if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) { in amdgpu_uvd_cs_msg()
569 adev->uvd.filp[i] = ctx->parser->filp; in amdgpu_uvd_cs_msg()
586 if (atomic_read(&adev->uvd.handles[i]) == handle) { in amdgpu_uvd_cs_msg()
587 if (adev->uvd.filp[i] != ctx->parser->filp) { in amdgpu_uvd_cs_msg()
601 atomic_cmpxchg(&adev->uvd.handles[i], handle, 0); in amdgpu_uvd_cs_msg()
671 if (!ctx->parser->adev->uvd.address_64_bit) { in amdgpu_uvd_cs_pass2()
679 (start >> 28) != (ctx->parser->adev->uvd.gpu_addr >> 28)) { in amdgpu_uvd_cs_pass2()
826 amdgpu_uvd_note_usage(ctx.parser->adev); in amdgpu_uvd_ring_parse_cs()
834 amdgpu_ib_free(job->adev, job->ibs); in amdgpu_uvd_free_job()
848 struct amdgpu_device *adev = ring->adev; in amdgpu_uvd_send_msg() local
862 if (!bo->adev->uvd.address_64_bit) { in amdgpu_uvd_send_msg()
890 r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, in amdgpu_uvd_send_msg()
906 amdgpu_ib_free(ring->adev, ib); in amdgpu_uvd_send_msg()
910 amdgpu_ib_free(ring->adev, ib); in amdgpu_uvd_send_msg()
924 struct amdgpu_device *adev = ring->adev; in amdgpu_uvd_get_create_msg() local
929 r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, in amdgpu_uvd_get_create_msg()
973 struct amdgpu_device *adev = ring->adev; in amdgpu_uvd_get_destroy_msg() local
978 r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, in amdgpu_uvd_get_destroy_msg()
1014 struct amdgpu_device *adev = in amdgpu_uvd_idle_work_handler() local
1018 fences = amdgpu_fence_count_emitted(&adev->uvd.ring); in amdgpu_uvd_idle_work_handler()
1021 if (atomic_read(&adev->uvd.handles[i])) in amdgpu_uvd_idle_work_handler()
1025 if (adev->pm.dpm_enabled) { in amdgpu_uvd_idle_work_handler()
1026 amdgpu_dpm_enable_uvd(adev, false); in amdgpu_uvd_idle_work_handler()
1028 amdgpu_asic_set_uvd_clocks(adev, 0, 0); in amdgpu_uvd_idle_work_handler()
1031 schedule_delayed_work(&adev->uvd.idle_work, in amdgpu_uvd_idle_work_handler()
1036 static void amdgpu_uvd_note_usage(struct amdgpu_device *adev) in amdgpu_uvd_note_usage() argument
1038 bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); in amdgpu_uvd_note_usage()
1039 set_clocks &= schedule_delayed_work(&adev->uvd.idle_work, in amdgpu_uvd_note_usage()
1043 if (adev->pm.dpm_enabled) { in amdgpu_uvd_note_usage()
1044 amdgpu_dpm_enable_uvd(adev, true); in amdgpu_uvd_note_usage()
1046 amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); in amdgpu_uvd_note_usage()