s_job             979 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	struct drm_sched_job *s_job;
s_job             983 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
s_job             984 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 		fence = sched->ops->run_job(s_job);
s_job             993 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	struct drm_sched_job *s_job;
s_job            1011 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
s_job            1012 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 		job = to_amdgpu_job(s_job);
s_job              31 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c static void amdgpu_job_timedout(struct drm_sched_job *s_job)
s_job              33 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 	struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
s_job              34 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 	struct amdgpu_job *job = to_amdgpu_job(s_job);
s_job              39 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 	if (amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
s_job              41 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 			  s_job->sched->name);
s_job             118 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
s_job             120 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 	struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
s_job             121 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 	struct amdgpu_job *job = to_amdgpu_job(s_job);
s_job             123 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 	drm_sched_job_cleanup(s_job);
s_job             125 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 	amdgpu_ring_priority_put(ring, s_job->s_priority);
s_job             269 drivers/gpu/drm/scheduler/sched_main.c static void drm_sched_job_begin(struct drm_sched_job *s_job)
s_job             271 drivers/gpu/drm/scheduler/sched_main.c 	struct drm_gpu_scheduler *sched = s_job->sched;
s_job             275 drivers/gpu/drm/scheduler/sched_main.c 	list_add_tail(&s_job->node, &sched->ring_mirror_list);
s_job             367 drivers/gpu/drm/scheduler/sched_main.c 	struct drm_sched_job *s_job, *tmp;
s_job             378 drivers/gpu/drm/scheduler/sched_main.c 	list_for_each_entry_safe_reverse(s_job, tmp, &sched->ring_mirror_list, node) {
s_job             379 drivers/gpu/drm/scheduler/sched_main.c 		if (s_job->s_fence->parent &&
s_job             380 drivers/gpu/drm/scheduler/sched_main.c 		    dma_fence_remove_callback(s_job->s_fence->parent,
s_job             381 drivers/gpu/drm/scheduler/sched_main.c 					      &s_job->cb)) {
s_job             389 drivers/gpu/drm/scheduler/sched_main.c 			list_del_init(&s_job->node);
s_job             398 drivers/gpu/drm/scheduler/sched_main.c 			dma_fence_wait(&s_job->s_fence->finished, false);
s_job             405 drivers/gpu/drm/scheduler/sched_main.c 			if (bad != s_job)
s_job             406 drivers/gpu/drm/scheduler/sched_main.c 				sched->ops->free_job(s_job);
s_job             432 drivers/gpu/drm/scheduler/sched_main.c 	struct drm_sched_job *s_job, *tmp;
s_job             441 drivers/gpu/drm/scheduler/sched_main.c 	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
s_job             442 drivers/gpu/drm/scheduler/sched_main.c 		struct dma_fence *fence = s_job->s_fence->parent;
s_job             450 drivers/gpu/drm/scheduler/sched_main.c 			r = dma_fence_add_callback(fence, &s_job->cb,
s_job             453 drivers/gpu/drm/scheduler/sched_main.c 				drm_sched_process_job(fence, &s_job->cb);
s_job             458 drivers/gpu/drm/scheduler/sched_main.c 			drm_sched_process_job(NULL, &s_job->cb);
s_job             479 drivers/gpu/drm/scheduler/sched_main.c 	struct drm_sched_job *s_job, *tmp;
s_job             484 drivers/gpu/drm/scheduler/sched_main.c 	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
s_job             485 drivers/gpu/drm/scheduler/sched_main.c 		struct drm_sched_fence *s_fence = s_job->s_fence;
s_job             487 drivers/gpu/drm/scheduler/sched_main.c 		if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
s_job             489 drivers/gpu/drm/scheduler/sched_main.c 			guilty_context = s_job->s_fence->scheduled.context;
s_job             492 drivers/gpu/drm/scheduler/sched_main.c 		if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
s_job             495 drivers/gpu/drm/scheduler/sched_main.c 		dma_fence_put(s_job->s_fence->parent);
s_job             496 drivers/gpu/drm/scheduler/sched_main.c 		fence = sched->ops->run_job(s_job);
s_job             499 drivers/gpu/drm/scheduler/sched_main.c 			s_job->s_fence->parent = NULL;
s_job             502 drivers/gpu/drm/scheduler/sched_main.c 			s_job->s_fence->parent = fence;
s_job             621 drivers/gpu/drm/scheduler/sched_main.c 	struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
s_job             622 drivers/gpu/drm/scheduler/sched_main.c 	struct drm_sched_fence *s_fence = s_job->s_fence;
s_job             196 include/drm/gpu_scheduler.h static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
s_job             199 include/drm/gpu_scheduler.h 	return (s_job && atomic_inc_return(&s_job->karma) > threshold);
s_job             304 include/drm/gpu_scheduler.h void drm_sched_job_kickout(struct drm_sched_job *s_job);