sched              90 arch/powerpc/include/asm/book3s/32/kup.h #include <linux/sched.h>
sched             720 arch/x86/events/core.c static void perf_sched_init(struct perf_sched *sched, struct event_constraint **constraints,
sched             725 arch/x86/events/core.c 	memset(sched, 0, sizeof(*sched));
sched             726 arch/x86/events/core.c 	sched->max_events	= num;
sched             727 arch/x86/events/core.c 	sched->max_weight	= wmax;
sched             728 arch/x86/events/core.c 	sched->max_gp		= gpmax;
sched             729 arch/x86/events/core.c 	sched->constraints	= constraints;
sched             736 arch/x86/events/core.c 	sched->state.event	= idx;		/* start with min weight */
sched             737 arch/x86/events/core.c 	sched->state.weight	= wmin;
sched             738 arch/x86/events/core.c 	sched->state.unassigned	= num;
sched             741 arch/x86/events/core.c static void perf_sched_save_state(struct perf_sched *sched)
sched             743 arch/x86/events/core.c 	if (WARN_ON_ONCE(sched->saved_states >= SCHED_STATES_MAX))
sched             746 arch/x86/events/core.c 	sched->saved[sched->saved_states] = sched->state;
sched             747 arch/x86/events/core.c 	sched->saved_states++;
sched             750 arch/x86/events/core.c static bool perf_sched_restore_state(struct perf_sched *sched)
sched             752 arch/x86/events/core.c 	if (!sched->saved_states)
sched             755 arch/x86/events/core.c 	sched->saved_states--;
sched             756 arch/x86/events/core.c 	sched->state = sched->saved[sched->saved_states];
sched             759 arch/x86/events/core.c 	clear_bit(sched->state.counter++, sched->state.used);
sched             768 arch/x86/events/core.c static bool __perf_sched_find_counter(struct perf_sched *sched)
sched             773 arch/x86/events/core.c 	if (!sched->state.unassigned)
sched             776 arch/x86/events/core.c 	if (sched->state.event >= sched->max_events)
sched             779 arch/x86/events/core.c 	c = sched->constraints[sched->state.event];
sched             784 arch/x86/events/core.c 			if (!__test_and_set_bit(idx, sched->state.used))
sched             790 arch/x86/events/core.c 	idx = sched->state.counter;
sched             792 arch/x86/events/core.c 		if (!__test_and_set_bit(idx, sched->state.used)) {
sched             793 arch/x86/events/core.c 			if (sched->state.nr_gp++ >= sched->max_gp)
sched             803 arch/x86/events/core.c 	sched->state.counter = idx;
sched             806 arch/x86/events/core.c 		perf_sched_save_state(sched);
sched             811 arch/x86/events/core.c static bool perf_sched_find_counter(struct perf_sched *sched)
sched             813 arch/x86/events/core.c 	while (!__perf_sched_find_counter(sched)) {
sched             814 arch/x86/events/core.c 		if (!perf_sched_restore_state(sched))
sched             825 arch/x86/events/core.c static bool perf_sched_next_event(struct perf_sched *sched)
sched             829 arch/x86/events/core.c 	if (!sched->state.unassigned || !--sched->state.unassigned)
sched             834 arch/x86/events/core.c 		sched->state.event++;
sched             835 arch/x86/events/core.c 		if (sched->state.event >= sched->max_events) {
sched             837 arch/x86/events/core.c 			sched->state.event = 0;
sched             838 arch/x86/events/core.c 			sched->state.weight++;
sched             839 arch/x86/events/core.c 			if (sched->state.weight > sched->max_weight)
sched             842 arch/x86/events/core.c 		c = sched->constraints[sched->state.event];
sched             843 arch/x86/events/core.c 	} while (c->weight != sched->state.weight);
sched             845 arch/x86/events/core.c 	sched->state.counter = 0;	/* start with first counter */
sched             856 arch/x86/events/core.c 	struct perf_sched sched;
sched             858 arch/x86/events/core.c 	perf_sched_init(&sched, constraints, n, wmin, wmax, gpmax);
sched             861 arch/x86/events/core.c 		if (!perf_sched_find_counter(&sched))
sched             864 arch/x86/events/core.c 			assign[sched.state.event] = sched.state.counter;
sched             865 arch/x86/events/core.c 	} while (perf_sched_next_event(&sched));
sched             867 arch/x86/events/core.c 	return sched.state.unassigned;
sched              54 crypto/fcrypt.c 	__be32 sched[ROUNDS];
sched             226 crypto/fcrypt.c #define F_ENCRYPT(R, L, sched)						\
sched             229 crypto/fcrypt.c 	u.l = sched ^ R;						\
sched             245 crypto/fcrypt.c 	F_ENCRYPT(X.r, X.l, ctx->sched[0x0]);
sched             246 crypto/fcrypt.c 	F_ENCRYPT(X.l, X.r, ctx->sched[0x1]);
sched             247 crypto/fcrypt.c 	F_ENCRYPT(X.r, X.l, ctx->sched[0x2]);
sched             248 crypto/fcrypt.c 	F_ENCRYPT(X.l, X.r, ctx->sched[0x3]);
sched             249 crypto/fcrypt.c 	F_ENCRYPT(X.r, X.l, ctx->sched[0x4]);
sched             250 crypto/fcrypt.c 	F_ENCRYPT(X.l, X.r, ctx->sched[0x5]);
sched             251 crypto/fcrypt.c 	F_ENCRYPT(X.r, X.l, ctx->sched[0x6]);
sched             252 crypto/fcrypt.c 	F_ENCRYPT(X.l, X.r, ctx->sched[0x7]);
sched             253 crypto/fcrypt.c 	F_ENCRYPT(X.r, X.l, ctx->sched[0x8]);
sched             254 crypto/fcrypt.c 	F_ENCRYPT(X.l, X.r, ctx->sched[0x9]);
sched             255 crypto/fcrypt.c 	F_ENCRYPT(X.r, X.l, ctx->sched[0xa]);
sched             256 crypto/fcrypt.c 	F_ENCRYPT(X.l, X.r, ctx->sched[0xb]);
sched             257 crypto/fcrypt.c 	F_ENCRYPT(X.r, X.l, ctx->sched[0xc]);
sched             258 crypto/fcrypt.c 	F_ENCRYPT(X.l, X.r, ctx->sched[0xd]);
sched             259 crypto/fcrypt.c 	F_ENCRYPT(X.r, X.l, ctx->sched[0xe]);
sched             260 crypto/fcrypt.c 	F_ENCRYPT(X.l, X.r, ctx->sched[0xf]);
sched             277 crypto/fcrypt.c 	F_ENCRYPT(X.l, X.r, ctx->sched[0xf]);
sched             278 crypto/fcrypt.c 	F_ENCRYPT(X.r, X.l, ctx->sched[0xe]);
sched             279 crypto/fcrypt.c 	F_ENCRYPT(X.l, X.r, ctx->sched[0xd]);
sched             280 crypto/fcrypt.c 	F_ENCRYPT(X.r, X.l, ctx->sched[0xc]);
sched             281 crypto/fcrypt.c 	F_ENCRYPT(X.l, X.r, ctx->sched[0xb]);
sched             282 crypto/fcrypt.c 	F_ENCRYPT(X.r, X.l, ctx->sched[0xa]);
sched             283 crypto/fcrypt.c 	F_ENCRYPT(X.l, X.r, ctx->sched[0x9]);
sched             284 crypto/fcrypt.c 	F_ENCRYPT(X.r, X.l, ctx->sched[0x8]);
sched             285 crypto/fcrypt.c 	F_ENCRYPT(X.l, X.r, ctx->sched[0x7]);
sched             286 crypto/fcrypt.c 	F_ENCRYPT(X.r, X.l, ctx->sched[0x6]);
sched             287 crypto/fcrypt.c 	F_ENCRYPT(X.l, X.r, ctx->sched[0x5]);
sched             288 crypto/fcrypt.c 	F_ENCRYPT(X.r, X.l, ctx->sched[0x4]);
sched             289 crypto/fcrypt.c 	F_ENCRYPT(X.l, X.r, ctx->sched[0x3]);
sched             290 crypto/fcrypt.c 	F_ENCRYPT(X.r, X.l, ctx->sched[0x2]);
sched             291 crypto/fcrypt.c 	F_ENCRYPT(X.l, X.r, ctx->sched[0x1]);
sched             292 crypto/fcrypt.c 	F_ENCRYPT(X.r, X.l, ctx->sched[0x0]);
sched             331 crypto/fcrypt.c 	ctx->sched[0x0] = cpu_to_be32(k); ror56_64(k, 11);
sched             332 crypto/fcrypt.c 	ctx->sched[0x1] = cpu_to_be32(k); ror56_64(k, 11);
sched             333 crypto/fcrypt.c 	ctx->sched[0x2] = cpu_to_be32(k); ror56_64(k, 11);
sched             334 crypto/fcrypt.c 	ctx->sched[0x3] = cpu_to_be32(k); ror56_64(k, 11);
sched             335 crypto/fcrypt.c 	ctx->sched[0x4] = cpu_to_be32(k); ror56_64(k, 11);
sched             336 crypto/fcrypt.c 	ctx->sched[0x5] = cpu_to_be32(k); ror56_64(k, 11);
sched             337 crypto/fcrypt.c 	ctx->sched[0x6] = cpu_to_be32(k); ror56_64(k, 11);
sched             338 crypto/fcrypt.c 	ctx->sched[0x7] = cpu_to_be32(k); ror56_64(k, 11);
sched             339 crypto/fcrypt.c 	ctx->sched[0x8] = cpu_to_be32(k); ror56_64(k, 11);
sched             340 crypto/fcrypt.c 	ctx->sched[0x9] = cpu_to_be32(k); ror56_64(k, 11);
sched             341 crypto/fcrypt.c 	ctx->sched[0xa] = cpu_to_be32(k); ror56_64(k, 11);
sched             342 crypto/fcrypt.c 	ctx->sched[0xb] = cpu_to_be32(k); ror56_64(k, 11);
sched             343 crypto/fcrypt.c 	ctx->sched[0xc] = cpu_to_be32(k); ror56_64(k, 11);
sched             344 crypto/fcrypt.c 	ctx->sched[0xd] = cpu_to_be32(k); ror56_64(k, 11);
sched             345 crypto/fcrypt.c 	ctx->sched[0xe] = cpu_to_be32(k); ror56_64(k, 11);
sched             346 crypto/fcrypt.c 	ctx->sched[0xf] = cpu_to_be32(k);
sched             372 crypto/fcrypt.c 	ctx->sched[0x0] = cpu_to_be32(lo); ror56(hi, lo, 11);
sched             373 crypto/fcrypt.c 	ctx->sched[0x1] = cpu_to_be32(lo); ror56(hi, lo, 11);
sched             374 crypto/fcrypt.c 	ctx->sched[0x2] = cpu_to_be32(lo); ror56(hi, lo, 11);
sched             375 crypto/fcrypt.c 	ctx->sched[0x3] = cpu_to_be32(lo); ror56(hi, lo, 11);
sched             376 crypto/fcrypt.c 	ctx->sched[0x4] = cpu_to_be32(lo); ror56(hi, lo, 11);
sched             377 crypto/fcrypt.c 	ctx->sched[0x5] = cpu_to_be32(lo); ror56(hi, lo, 11);
sched             378 crypto/fcrypt.c 	ctx->sched[0x6] = cpu_to_be32(lo); ror56(hi, lo, 11);
sched             379 crypto/fcrypt.c 	ctx->sched[0x7] = cpu_to_be32(lo); ror56(hi, lo, 11);
sched             380 crypto/fcrypt.c 	ctx->sched[0x8] = cpu_to_be32(lo); ror56(hi, lo, 11);
sched             381 crypto/fcrypt.c 	ctx->sched[0x9] = cpu_to_be32(lo); ror56(hi, lo, 11);
sched             382 crypto/fcrypt.c 	ctx->sched[0xa] = cpu_to_be32(lo); ror56(hi, lo, 11);
sched             383 crypto/fcrypt.c 	ctx->sched[0xb] = cpu_to_be32(lo); ror56(hi, lo, 11);
sched             384 crypto/fcrypt.c 	ctx->sched[0xc] = cpu_to_be32(lo); ror56(hi, lo, 11);
sched             385 crypto/fcrypt.c 	ctx->sched[0xd] = cpu_to_be32(lo); ror56(hi, lo, 11);
sched             386 crypto/fcrypt.c 	ctx->sched[0xe] = cpu_to_be32(lo); ror56(hi, lo, 11);
sched             387 crypto/fcrypt.c 	ctx->sched[0xf] = cpu_to_be32(lo);
sched             169 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c 		if (adev->gfx.kiq.ring.sched.ready)
sched             835 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c 	if (amdgpu_emu_mode == 0 && ring->sched.ready)
sched             683 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c 	if (ring->sched.ready)
sched             786 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
sched             991 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		ring = to_amdgpu_ring(entity->rq->sched);
sched            1007 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	ring = to_amdgpu_ring(parser->entity->rq->sched);
sched            1331 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	ring = to_amdgpu_ring(entity->rq->sched);
sched             184 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 			rqs[num_rqs++] = &rings[j]->sched.sched_rq[priority];
sched             869 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 		if (!ring || !ring->sched.thread)
sched             871 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 		kthread_park(ring->sched.thread);
sched             885 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 		if (!ring || !ring->sched.thread)
sched             887 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 		kthread_unpark(ring->sched.thread);
sched             977 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched)
sched             982 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	spin_lock(&sched->job_list_lock);
sched             983 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
sched             984 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 		fence = sched->ops->run_job(s_job);
sched             987 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	spin_unlock(&sched->job_list_lock);
sched             997 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	struct drm_gpu_scheduler *sched = &ring->sched;
sched            1010 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	spin_lock(&sched->job_list_lock);
sched            1011 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
sched            1017 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	spin_unlock(&sched->job_list_lock);
sched            1032 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	if (!ring || !ring->funcs->preempt_ib || !ring->sched.thread)
sched            1048 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	kthread_park(ring->sched.thread);
sched            1073 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 		amdgpu_ib_preempt_job_recovery(&ring->sched);
sched            1084 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	kthread_unpark(ring->sched.thread);
sched            3562 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 		if (!ring || !ring->sched.thread)
sched            3847 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 			if (!ring || !ring->sched.thread)
sched            3850 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 			drm_sched_stop(&ring->sched, job ? &job->base : NULL);
sched            3920 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 			if (!ring || !ring->sched.thread)
sched            3925 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 				drm_sched_resubmit_jobs(&ring->sched);
sched            3927 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 			drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
sched             486 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
sched             544 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		drm_sched_fini(&ring->sched);
sched             153 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 	if (!ring->sched.ready) {
sched             364 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 		if (!ring->sched.ready || !ring->funcs->test_ib)
sched             385 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 		ring->sched.ready = false;
sched              33 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 	struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
sched              41 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 			  s_job->sched->name);
sched              47 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 		  job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
sched              55 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 		drm_sched_suspend_timeout(&ring->sched);
sched              76 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 	(*job)->base.sched = &adev->rings[0]->sched;
sched             107 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 	struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
sched             120 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 	struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
sched             162 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 	ring = to_amdgpu_ring(entity->rq->sched);
sched             173 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 	job->base.sched = &ring->sched;
sched             186 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 	struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
sched             218 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 	struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
sched             319 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c 			if (adev->gfx.gfx_ring[i].sched.ready)
sched             327 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c 			if (adev->gfx.compute_ring[i].sched.ready)
sched             335 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c 			if (adev->sdma.instance[i].ring.sched.ready)
sched             346 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c 			if (adev->uvd.inst[i].ring.sched.ready)
sched             355 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c 			if (adev->vce.ring[i].sched.ready)
sched             367 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c 				if (adev->uvd.inst[i].ring_enc[j].sched.ready)
sched             379 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c 			if (adev->vcn.inst[i].ring_dec.sched.ready)
sched             392 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c 				if (adev->vcn.inst[i].ring_enc[j].sched.ready)
sched             404 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c 			if (adev->vcn.inst[i].ring_jpeg.sched.ready)
sched            3004 drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c 		if (ring && ring->sched.ready)
sched             354 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	ring->sched.ready = false;
sched             539 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	ring->sched.ready = !r;
sched              47 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h #define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
sched             182 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	struct drm_gpu_scheduler	sched;
sched              72 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 		ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
sched             279 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 			if (s_fence->sched == &ring->sched) {
sched             154 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 			   __entry->ring = to_amdgpu_ring(p->entity->rq->sched)->idx;
sched             157 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 				to_amdgpu_ring(p->entity->rq->sched));
sched             173 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 			     __string(ring, to_amdgpu_ring(job->base.sched)->name)
sched             182 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 			   __assign_str(ring, to_amdgpu_ring(job->base.sched)->name)
sched             198 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 			     __string(ring, to_amdgpu_ring(job->base.sched)->name)
sched             207 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 			   __assign_str(ring, to_amdgpu_ring(job->base.sched)->name)
sched             471 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 			     __string(ring, sched_job->base.sched->name);
sched             479 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 			   __assign_str(ring, sched_job->base.sched->name)
sched            1864 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
sched            1977 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	if (direct_submit && !ring->sched.ready) {
sched             334 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
sched             241 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
sched             102 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c 	ring = container_of(p->vm->entity.rq->sched, struct amdgpu_ring, sched);
sched             323 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	sdma0->sched.ready = false;
sched             324 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	sdma1->sched.ready = false;
sched             501 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 		ring->sched.ready = true;
sched            1206 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched);
sched            1375 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	struct drm_gpu_scheduler *sched;
sched            1380 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 		sched = &adev->sdma.instance[i].ring.sched;
sched            1382 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 			&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
sched            2444 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 			adev->gfx.gfx_ring[i].sched.ready = false;
sched            2911 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		ring->sched.ready = true;
sched            2928 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 			adev->gfx.compute_ring[i].sched.ready = false;
sched            2929 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		adev->gfx.kiq.ring.sched.ready = false;
sched            3215 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		kiq_ring->sched.ready = false;
sched            3254 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		ring->sched.ready = true;
sched            3586 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	ring->sched.ready = true;
sched            3662 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 			ring->sched.ready = false;
sched            3669 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		ring->sched.ready = true;
sched            3674 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 			ring->sched.ready = false;
sched            5079 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 				drm_sched_fault(&ring->sched);
sched            5088 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 				drm_sched_fault(&ring->sched);
sched            1963 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 			adev->gfx.gfx_ring[i].sched.ready = false;
sched            1965 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 			adev->gfx.compute_ring[i].sched.ready = false;
sched            3402 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	drm_sched_fault(&ring->sched);
sched            2441 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 			adev->gfx.gfx_ring[i].sched.ready = false;
sched            2710 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 			adev->gfx.compute_ring[i].sched.ready = false;
sched            4901 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 		drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
sched            4908 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 				drm_sched_fault(&ring->sched);
sched            1562 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	if (!ring->sched.ready)
sched            4159 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 			adev->gfx.gfx_ring[i].sched.ready = false;
sched            4340 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	ring->sched.ready = true;
sched            4354 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 			adev->gfx.compute_ring[i].sched.ready = false;
sched            4355 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		adev->gfx.kiq.ring.sched.ready = false;
sched            4736 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	ring->sched.ready = true;
sched            6772 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
sched            6780 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 				drm_sched_fault(&ring->sched);
sched            3074 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 			adev->gfx.gfx_ring[i].sched.ready = false;
sched            3264 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	ring->sched.ready = true;
sched            3279 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 			adev->gfx.compute_ring[i].sched.ready = false;
sched            3280 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		adev->gfx.kiq.ring.sched.ready = false;
sched            3789 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	ring->sched.ready = true;
sched            4277 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	if (!ring->sched.ready)
sched            5707 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
sched            5715 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 				drm_sched_fault(&ring->sched);
sched             501 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 	if (adev->gfx.kiq.ring.sched.ready &&
sched             358 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	sdma0->sched.ready = false;
sched             359 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	sdma1->sched.ready = false;
sched             480 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 		ring->sched.ready = true;
sched            1101 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 		drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched);
sched            1263 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	struct drm_gpu_scheduler *sched;
sched            1268 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 		sched = &adev->sdma.instance[i].ring.sched;
sched            1270 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 			&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
sched             532 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	sdma0->sched.ready = false;
sched             533 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	sdma1->sched.ready = false;
sched             748 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 		ring->sched.ready = true;
sched            1435 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 		drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched);
sched            1701 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	struct drm_gpu_scheduler *sched;
sched            1706 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 		sched = &adev->sdma.instance[i].ring.sched;
sched            1708 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 			&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
sched             827 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		sdma[i]->sched.ready = false;
sched             875 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		sdma[i]->sched.ready = false;
sched            1061 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	ring->sched.ready = true;
sched            1152 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	ring->sched.ready = true;
sched            2088 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		drm_sched_fault(&adev->sdma.instance[instance].ring.sched);
sched            2526 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	struct drm_gpu_scheduler *sched;
sched            2532 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 			sched = &adev->sdma.instance[i].page.sched;
sched            2534 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 			sched = &adev->sdma.instance[i].ring.sched;
sched            2536 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 			&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
sched             518 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	sdma0->sched.ready = false;
sched             519 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	sdma1->sched.ready = false;
sched             754 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		ring->sched.ready = true;
sched             763 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 			ring->sched.ready = false;
sched            1743 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	struct drm_gpu_scheduler *sched;
sched            1749 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 			sched = &adev->sdma.instance[i].ring.sched;
sched            1751 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 				&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
sched             127 drivers/gpu/drm/amd/amdgpu/si_dma.c 		ring->sched.ready = false;
sched             180 drivers/gpu/drm/amd/amdgpu/si_dma.c 		ring->sched.ready = true;
sched             837 drivers/gpu/drm/amd/amdgpu/si_dma.c 	struct drm_gpu_scheduler *sched;
sched             842 drivers/gpu/drm/amd/amdgpu/si_dma.c 		sched = &adev->sdma.instance[i].ring.sched;
sched             844 drivers/gpu/drm/amd/amdgpu/si_dma.c 			&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
sched             218 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	ring->sched.ready = false;
sched             216 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	ring->sched.ready = false;
sched             543 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	ring->sched.ready = false;
sched             613 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		adev->uvd.inst[i].ring.sched.ready = false;
sched            1263 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
sched             553 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 		adev->vce.ring[i].sched.ready = false;
sched             205 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 		ring->sched.ready = true;
sched             240 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	ring->sched.ready = false;
sched             250 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	ring->sched.ready = true;
sched             253 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		ring->sched.ready = false;
sched             259 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		ring->sched.ready = true;
sched             262 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 			ring->sched.ready = false;
sched             268 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	ring->sched.ready = true;
sched             271 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		ring->sched.ready = false;
sched             301 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	ring->sched.ready = false;
sched             305 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		ring->sched.ready = false;
sched             309 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	ring->sched.ready = false;
sched             263 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 			ring->sched.ready = false;
sched             269 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 			ring->sched.ready = false;
sched             273 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 				ring->sched.ready = false;
sched             281 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 			ring->sched.ready = false;
sched             313 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		ring->sched.ready = false;
sched             317 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 			ring->sched.ready = false;
sched             321 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		ring->sched.ready = false;
sched              71 drivers/gpu/drm/etnaviv/etnaviv_drv.c 			rq = &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
sched            1804 drivers/gpu/drm/etnaviv/etnaviv_gpu.c 	if (atomic_read(&gpu->sched.hw_rq_count))
sched             102 drivers/gpu/drm/etnaviv/etnaviv_gpu.h 	struct drm_gpu_scheduler sched;
sched             112 drivers/gpu/drm/etnaviv/etnaviv_sched.c 	drm_sched_stop(&gpu->sched, sched_job);
sched             121 drivers/gpu/drm/etnaviv/etnaviv_sched.c 	drm_sched_resubmit_jobs(&gpu->sched);
sched             124 drivers/gpu/drm/etnaviv/etnaviv_sched.c 	drm_sched_start(&gpu->sched, true);
sched             185 drivers/gpu/drm/etnaviv/etnaviv_sched.c 	ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops,
sched             196 drivers/gpu/drm/etnaviv/etnaviv_sched.c 	drm_sched_fini(&gpu->sched);
sched             421 drivers/gpu/drm/i915/gem/i915_gem_context.c 	ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
sched             603 drivers/gpu/drm/i915/gem/i915_gem_context.c 	ctx->sched.priority = I915_USER_PRIORITY(prio);
sched            1806 drivers/gpu/drm/i915/gem/i915_gem_context.c 				ctx->sched.priority =
sched            1920 drivers/gpu/drm/i915/gem/i915_gem_context.c 	dst->sched = src->sched;
sched            2258 drivers/gpu/drm/i915/gem/i915_gem_context.c 		args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
sched             170 drivers/gpu/drm/i915/gem/i915_gem_context_types.h 	struct i915_sched_attr sched;
sched            1149 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf));
sched            1556 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	list_for_each_entry(request, &engine->active.requests, sched.link) {
sched             113 drivers/gpu/drm/i915/gt/intel_engine_pm.c 	rq->sched.attr.priority = I915_PRIORITY_UNPREEMPTABLE;
sched              94 drivers/gpu/drm/i915/gt/intel_engine_user.c 		u8 sched;
sched             119 drivers/gpu/drm/i915/gt/intel_engine_user.c 				enabled |= BIT(map[i].sched);
sched             121 drivers/gpu/drm/i915/gt/intel_engine_user.c 				disabled |= BIT(map[i].sched);
sched             271 drivers/gpu/drm/i915/gt/intel_lrc.c 	return rq->sched.attr.priority;
sched             350 drivers/gpu/drm/i915/gt/intel_lrc.c 	if (!list_is_last(&rq->sched.link, &engine->active.requests) &&
sched             351 drivers/gpu/drm/i915/gt/intel_lrc.c 	    rq_prio(list_next_entry(rq, sched.link)) > last_prio)
sched             485 drivers/gpu/drm/i915/gt/intel_lrc.c 					 sched.link) {
sched             509 drivers/gpu/drm/i915/gt/intel_lrc.c 			list_move(&rq->sched.link, pl);
sched             938 drivers/gpu/drm/i915/gt/intel_lrc.c 				     &(rq__)->sched.waiters_list, \
sched             956 drivers/gpu/drm/i915/gt/intel_lrc.c 		list_move_tail(&rq->sched.link, pl);
sched             960 drivers/gpu/drm/i915/gt/intel_lrc.c 				container_of(p->waiter, typeof(*w), sched);
sched             971 drivers/gpu/drm/i915/gt/intel_lrc.c 			if (list_empty(&w->sched.link))
sched             978 drivers/gpu/drm/i915/gt/intel_lrc.c 			list_move_tail(&w->sched.link, &list);
sched             981 drivers/gpu/drm/i915/gt/intel_lrc.c 		rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
sched            1004 drivers/gpu/drm/i915/gt/intel_lrc.c 	if (list_is_last(&rq->sched.link, &engine->active.requests))
sched            1007 drivers/gpu/drm/i915/gt/intel_lrc.c 	hint = max(rq_prio(list_next_entry(rq, sched.link)),
sched            1016 drivers/gpu/drm/i915/gt/intel_lrc.c 	if (list_is_last(&rq->sched.link, &engine->active.requests))
sched            1019 drivers/gpu/drm/i915/gt/intel_lrc.c 	return rq_prio(list_next_entry(rq, sched.link));
sched            1104 drivers/gpu/drm/i915/gt/intel_lrc.c 				  last->sched.attr.priority,
sched            1131 drivers/gpu/drm/i915/gt/intel_lrc.c 				  last->sched.attr.priority,
sched            1161 drivers/gpu/drm/i915/gt/intel_lrc.c 			if (!list_is_last(&last->sched.link,
sched            1695 drivers/gpu/drm/i915/gt/intel_lrc.c 	queue_request(engine, &request->sched, rq_prio(request));
sched            1698 drivers/gpu/drm/i915/gt/intel_lrc.c 	GEM_BUG_ON(list_empty(&request->sched.link));
sched            2595 drivers/gpu/drm/i915/gt/intel_lrc.c 	list_for_each_entry(rq, &engine->active.requests, sched.link)
sched            3662 drivers/gpu/drm/i915/gt/intel_lrc.c 		list_move_tail(&rq->sched.link, virtual_queue(ve));
sched            3931 drivers/gpu/drm/i915/gt/intel_lrc.c 	list_for_each_entry(rq, &engine->active.requests, sched.link) {
sched              49 drivers/gpu/drm/i915/gt/intel_reset.c 	list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
sched             790 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	list_for_each_entry(pos, &engine->active.requests, sched.link) {
sched             918 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	list_for_each_entry(request, &engine->active.requests, sched.link) {
sched             226 drivers/gpu/drm/i915/gt/mock_engine.c 	list_for_each_entry(request, &engine->active.requests, sched.link) {
sched             753 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 			ctx[idx]->sched.priority =
sched             805 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 			h.ctx->sched.priority = 1024;
sched             325 drivers/gpu/drm/i915/gt/selftest_lrc.c 	ctx_hi->sched.priority =
sched             331 drivers/gpu/drm/i915/gt/selftest_lrc.c 	ctx_lo->sched.priority =
sched             530 drivers/gpu/drm/i915/gt/selftest_lrc.c 	ctx_hi->sched.priority =
sched             536 drivers/gpu/drm/i915/gt/selftest_lrc.c 	ctx_lo->sched.priority =
sched             640 drivers/gpu/drm/i915/gt/selftest_lrc.c 	ctx_lo->sched.priority = I915_USER_PRIORITY(1);
sched             773 drivers/gpu/drm/i915/gt/selftest_lrc.c 	b.ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
sched             990 drivers/gpu/drm/i915/gt/selftest_lrc.c 	i915_sched_node_init(&rq->sched);
sched            1016 drivers/gpu/drm/i915/gt/selftest_lrc.c 	i915_sched_node_fini(&dummy->sched);
sched            1318 drivers/gpu/drm/i915/gt/selftest_lrc.c 	ctx_hi->sched.priority =
sched            1324 drivers/gpu/drm/i915/gt/selftest_lrc.c 	ctx_lo->sched.priority =
sched            1452 drivers/gpu/drm/i915/gt/selftest_lrc.c 	ctx->sched.priority = prio;
sched             511 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	return rq->sched.attr.priority | __NO_PREEMPTION;
sched             577 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 			list_del_init(&rq->sched.link);
sched             708 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	list_for_each_entry(rq, &engine->active.requests, sched.link) {
sched             721 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 			list_del_init(&rq->sched.link);
sched            1177 drivers/gpu/drm/i915/i915_gpu_error.c 	erq->sched_attr = request->sched.attr;
sched            1197 drivers/gpu/drm/i915/i915_gpu_error.c 	list_for_each_entry_from(request, &engine->active.requests, sched.link)
sched            1211 drivers/gpu/drm/i915/i915_gpu_error.c 				 &engine->active.requests, sched.link) {
sched            1267 drivers/gpu/drm/i915/i915_gpu_error.c 	e->sched_attr = ctx->sched;
sched             214 drivers/gpu/drm/i915/i915_request.c 	list_del(&rq->sched.link);
sched             311 drivers/gpu/drm/i915/i915_request.c 	i915_sched_node_fini(&rq->sched);
sched             431 drivers/gpu/drm/i915/i915_request.c 	if (request->sched.semaphores &&
sched             433 drivers/gpu/drm/i915/i915_request.c 		engine->saturated |= request->sched.semaphores;
sched             446 drivers/gpu/drm/i915/i915_request.c 		list_move_tail(&request->sched.link, &engine->active.requests);
sched             502 drivers/gpu/drm/i915/i915_request.c 	if (request->sched.semaphores && i915_request_started(request)) {
sched             503 drivers/gpu/drm/i915/i915_request.c 		request->sched.attr.priority |= I915_PRIORITY_NOSEMAPHORE;
sched             504 drivers/gpu/drm/i915/i915_request.c 		request->sched.semaphores = 0;
sched             579 drivers/gpu/drm/i915/i915_request.c 		if (!(READ_ONCE(rq->sched.attr.priority) & I915_PRIORITY_NOSEMAPHORE)) {
sched             708 drivers/gpu/drm/i915/i915_request.c 	i915_sched_node_init(&rq->sched);
sched             757 drivers/gpu/drm/i915/i915_request.c 	GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
sched             758 drivers/gpu/drm/i915/i915_request.c 	GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
sched             828 drivers/gpu/drm/i915/i915_request.c 	return rq->sched.semaphores | rq->engine->saturated;
sched             884 drivers/gpu/drm/i915/i915_request.c 	to->sched.semaphores |= from->engine->mask;
sched             885 drivers/gpu/drm/i915/i915_request.c 	to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
sched             903 drivers/gpu/drm/i915/i915_request.c 		ret = i915_sched_node_add_dependency(&to->sched, &from->sched);
sched             913 drivers/gpu/drm/i915/i915_request.c 		   to->gem_context->sched.priority >= I915_PRIORITY_NORMAL) {
sched             923 drivers/gpu/drm/i915/i915_request.c 	if (to->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN) {
sched            1163 drivers/gpu/drm/i915/i915_request.c 			__i915_sched_node_add_dependency(&rq->sched,
sched            1164 drivers/gpu/drm/i915/i915_request.c 							 &prev->sched,
sched            1240 drivers/gpu/drm/i915/i915_request.c 	struct i915_sched_attr attr = rq->gem_context->sched;
sched            1263 drivers/gpu/drm/i915/i915_request.c 	if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
sched            1272 drivers/gpu/drm/i915/i915_request.c 	if (list_empty(&rq->sched.signalers_list))
sched             162 drivers/gpu/drm/i915/i915_request.h 	struct i915_sched_node sched;
sched              25 drivers/gpu/drm/i915/i915_scheduler.c 	return container_of(node, const struct i915_request, sched);
sched             177 drivers/gpu/drm/i915/i915_scheduler.c 	return rq->sched.attr.priority | __NO_PREEMPTION;
sched             363 drivers/gpu/drm/i915/i915_scheduler.c 	__i915_schedule(&rq->sched, attr);
sched             380 drivers/gpu/drm/i915/i915_scheduler.c 	if (READ_ONCE(rq->sched.attr.priority) & bump)
sched             384 drivers/gpu/drm/i915/i915_scheduler.c 	__bump_priority(&rq->sched, bump);
sched              18 drivers/gpu/drm/i915/i915_scheduler.h 		list_for_each_entry(it, &(plist)->requests[idx], sched.link)
sched              26 drivers/gpu/drm/i915/i915_scheduler.h 					 sched.link)
sched             757 drivers/gpu/drm/i915/i915_trace.h 			   __entry->prio = rq->sched.attr.priority;
sched             103 drivers/gpu/drm/lima/lima_sched.c static inline struct lima_sched_pipe *to_lima_pipe(struct drm_gpu_scheduler *sched)
sched             105 drivers/gpu/drm/lima/lima_sched.c 	return container_of(sched, struct lima_sched_pipe, base);
sched             196 drivers/gpu/drm/lima/lima_sched.c 	struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
sched             289 drivers/gpu/drm/lima/lima_sched.c 	struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
sched             300 drivers/gpu/drm/lima/lima_sched.c 	struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
sched              27 drivers/gpu/drm/panfrost/panfrost_job.c 	struct drm_gpu_scheduler sched;
sched             404 drivers/gpu/drm/panfrost/panfrost_job.c 		struct drm_gpu_scheduler *sched = &pfdev->js->queue[i].sched;
sched             406 drivers/gpu/drm/panfrost/panfrost_job.c 		drm_sched_stop(sched, sched_job);
sched             409 drivers/gpu/drm/panfrost/panfrost_job.c 			cancel_delayed_work_sync(&sched->work_tdr);
sched             429 drivers/gpu/drm/panfrost/panfrost_job.c 		drm_sched_resubmit_jobs(&pfdev->js->queue[i].sched);
sched             433 drivers/gpu/drm/panfrost/panfrost_job.c 		drm_sched_start(&pfdev->js->queue[i].sched, true);
sched             475 drivers/gpu/drm/panfrost/panfrost_job.c 			drm_sched_fault(&pfdev->js->queue[j].sched);
sched             527 drivers/gpu/drm/panfrost/panfrost_job.c 		ret = drm_sched_init(&js->queue[j].sched,
sched             543 drivers/gpu/drm/panfrost/panfrost_job.c 		drm_sched_fini(&js->queue[j].sched);
sched             556 drivers/gpu/drm/panfrost/panfrost_job.c 		drm_sched_fini(&js->queue[j].sched);
sched             568 drivers/gpu/drm/panfrost/panfrost_job.c 		rq = &js->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
sched             591 drivers/gpu/drm/panfrost/panfrost_job.c 		if (atomic_read(&js->queue[i].sched.hw_rq_count))
sched              51 drivers/gpu/drm/scheduler/gpu_scheduler_trace.h 			   __entry->name = sched_job->sched->name;
sched              54 drivers/gpu/drm/scheduler/gpu_scheduler_trace.h 				   &sched_job->sched->hw_rq_count);
sched              87 drivers/gpu/drm/scheduler/gpu_scheduler_trace.h 			   __entry->name = sched_job->sched->name;
sched             140 drivers/gpu/drm/scheduler/sched_entity.c 		struct drm_gpu_scheduler *sched = entity->rq_list[i]->sched;
sched             142 drivers/gpu/drm/scheduler/sched_entity.c 		if (!entity->rq_list[i]->sched->ready) {
sched             143 drivers/gpu/drm/scheduler/sched_entity.c 			DRM_WARN("sched%s is not ready, skipping", sched->name);
sched             147 drivers/gpu/drm/scheduler/sched_entity.c 		num_jobs = atomic_read(&sched->num_jobs);
sched             171 drivers/gpu/drm/scheduler/sched_entity.c 	struct drm_gpu_scheduler *sched;
sched             178 drivers/gpu/drm/scheduler/sched_entity.c 	sched = entity->rq->sched;
sched             186 drivers/gpu/drm/scheduler/sched_entity.c 					sched->job_scheduled,
sched             190 drivers/gpu/drm/scheduler/sched_entity.c 		wait_event_killable(sched->job_scheduled,
sched             224 drivers/gpu/drm/scheduler/sched_entity.c 	job->sched->ops->free_job(job);
sched             277 drivers/gpu/drm/scheduler/sched_entity.c 	struct drm_gpu_scheduler *sched = NULL;
sched             280 drivers/gpu/drm/scheduler/sched_entity.c 		sched = entity->rq->sched;
sched             288 drivers/gpu/drm/scheduler/sched_entity.c 		if (sched) {
sched             292 drivers/gpu/drm/scheduler/sched_entity.c 			kthread_park(sched->thread);
sched             293 drivers/gpu/drm/scheduler/sched_entity.c 			kthread_unpark(sched->thread);
sched             349 drivers/gpu/drm/scheduler/sched_entity.c 	drm_sched_wakeup(entity->rq->sched);
sched             358 drivers/gpu/drm/scheduler/sched_entity.c 	*rq = &(*rq)->sched->sched_rq[priority];
sched             399 drivers/gpu/drm/scheduler/sched_entity.c 	struct drm_gpu_scheduler *sched = entity->rq->sched;
sched             415 drivers/gpu/drm/scheduler/sched_entity.c 	if (s_fence && s_fence->sched == sched) {
sched             450 drivers/gpu/drm/scheduler/sched_entity.c 	struct drm_gpu_scheduler *sched = entity->rq->sched;
sched             458 drivers/gpu/drm/scheduler/sched_entity.c 			sched->ops->dependency(sched_job, entity))) {
sched             524 drivers/gpu/drm/scheduler/sched_entity.c 	atomic_inc(&entity->rq->sched->num_jobs);
sched             540 drivers/gpu/drm/scheduler/sched_entity.c 		drm_sched_wakeup(entity->rq->sched);
sched              83 drivers/gpu/drm/scheduler/sched_fence.c 	return (const char *)fence->sched->name;
sched             166 drivers/gpu/drm/scheduler/sched_fence.c 	fence->sched = entity->rq->sched;
sched              71 drivers/gpu/drm/scheduler/sched_main.c static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
sched              77 drivers/gpu/drm/scheduler/sched_main.c 	rq->sched = sched;
sched             171 drivers/gpu/drm/scheduler/sched_main.c 	struct drm_gpu_scheduler *sched = entity->rq->sched;
sched             179 drivers/gpu/drm/scheduler/sched_main.c 	if (s_fence && s_fence->sched == sched)
sched             193 drivers/gpu/drm/scheduler/sched_main.c static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
sched             195 drivers/gpu/drm/scheduler/sched_main.c 	if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
sched             196 drivers/gpu/drm/scheduler/sched_main.c 	    !list_empty(&sched->ring_mirror_list))
sched             197 drivers/gpu/drm/scheduler/sched_main.c 		schedule_delayed_work(&sched->work_tdr, sched->timeout);
sched             207 drivers/gpu/drm/scheduler/sched_main.c void drm_sched_fault(struct drm_gpu_scheduler *sched)
sched             209 drivers/gpu/drm/scheduler/sched_main.c 	mod_delayed_work(system_wq, &sched->work_tdr, 0);
sched             226 drivers/gpu/drm/scheduler/sched_main.c unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
sched             230 drivers/gpu/drm/scheduler/sched_main.c 	sched_timeout = sched->work_tdr.timer.expires;
sched             236 drivers/gpu/drm/scheduler/sched_main.c 	if (mod_delayed_work(system_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
sched             240 drivers/gpu/drm/scheduler/sched_main.c 		return sched->timeout;
sched             253 drivers/gpu/drm/scheduler/sched_main.c void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
sched             258 drivers/gpu/drm/scheduler/sched_main.c 	spin_lock_irqsave(&sched->job_list_lock, flags);
sched             260 drivers/gpu/drm/scheduler/sched_main.c 	if (list_empty(&sched->ring_mirror_list))
sched             261 drivers/gpu/drm/scheduler/sched_main.c 		cancel_delayed_work(&sched->work_tdr);
sched             263 drivers/gpu/drm/scheduler/sched_main.c 		mod_delayed_work(system_wq, &sched->work_tdr, remaining);
sched             265 drivers/gpu/drm/scheduler/sched_main.c 	spin_unlock_irqrestore(&sched->job_list_lock, flags);
sched             271 drivers/gpu/drm/scheduler/sched_main.c 	struct drm_gpu_scheduler *sched = s_job->sched;
sched             274 drivers/gpu/drm/scheduler/sched_main.c 	spin_lock_irqsave(&sched->job_list_lock, flags);
sched             275 drivers/gpu/drm/scheduler/sched_main.c 	list_add_tail(&s_job->node, &sched->ring_mirror_list);
sched             276 drivers/gpu/drm/scheduler/sched_main.c 	drm_sched_start_timeout(sched);
sched             277 drivers/gpu/drm/scheduler/sched_main.c 	spin_unlock_irqrestore(&sched->job_list_lock, flags);
sched             282 drivers/gpu/drm/scheduler/sched_main.c 	struct drm_gpu_scheduler *sched;
sched             286 drivers/gpu/drm/scheduler/sched_main.c 	sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
sched             287 drivers/gpu/drm/scheduler/sched_main.c 	job = list_first_entry_or_null(&sched->ring_mirror_list,
sched             291 drivers/gpu/drm/scheduler/sched_main.c 		job->sched->ops->timedout_job(job);
sched             297 drivers/gpu/drm/scheduler/sched_main.c 		if (sched->free_guilty) {
sched             298 drivers/gpu/drm/scheduler/sched_main.c 			job->sched->ops->free_job(job);
sched             299 drivers/gpu/drm/scheduler/sched_main.c 			sched->free_guilty = false;
sched             303 drivers/gpu/drm/scheduler/sched_main.c 	spin_lock_irqsave(&sched->job_list_lock, flags);
sched             304 drivers/gpu/drm/scheduler/sched_main.c 	drm_sched_start_timeout(sched);
sched             305 drivers/gpu/drm/scheduler/sched_main.c 	spin_unlock_irqrestore(&sched->job_list_lock, flags);
sched             322 drivers/gpu/drm/scheduler/sched_main.c 	struct drm_gpu_scheduler *sched = bad->sched;
sched             332 drivers/gpu/drm/scheduler/sched_main.c 			struct drm_sched_rq *rq = &sched->sched_rq[i];
sched             339 drivers/gpu/drm/scheduler/sched_main.c 					    bad->sched->hang_limit)
sched             365 drivers/gpu/drm/scheduler/sched_main.c void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
sched             370 drivers/gpu/drm/scheduler/sched_main.c 	kthread_park(sched->thread);
sched             378 drivers/gpu/drm/scheduler/sched_main.c 	list_for_each_entry_safe_reverse(s_job, tmp, &sched->ring_mirror_list, node) {
sched             382 drivers/gpu/drm/scheduler/sched_main.c 			atomic_dec(&sched->hw_rq_count);
sched             388 drivers/gpu/drm/scheduler/sched_main.c 			spin_lock_irqsave(&sched->job_list_lock, flags);
sched             390 drivers/gpu/drm/scheduler/sched_main.c 			spin_unlock_irqrestore(&sched->job_list_lock, flags);
sched             406 drivers/gpu/drm/scheduler/sched_main.c 				sched->ops->free_job(s_job);
sched             408 drivers/gpu/drm/scheduler/sched_main.c 				sched->free_guilty = true;
sched             418 drivers/gpu/drm/scheduler/sched_main.c 	cancel_delayed_work(&sched->work_tdr);
sched             430 drivers/gpu/drm/scheduler/sched_main.c void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
sched             441 drivers/gpu/drm/scheduler/sched_main.c 	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
sched             444 drivers/gpu/drm/scheduler/sched_main.c 		atomic_inc(&sched->hw_rq_count);
sched             462 drivers/gpu/drm/scheduler/sched_main.c 		spin_lock_irqsave(&sched->job_list_lock, flags);
sched             463 drivers/gpu/drm/scheduler/sched_main.c 		drm_sched_start_timeout(sched);
sched             464 drivers/gpu/drm/scheduler/sched_main.c 		spin_unlock_irqrestore(&sched->job_list_lock, flags);
sched             467 drivers/gpu/drm/scheduler/sched_main.c 	kthread_unpark(sched->thread);
sched             477 drivers/gpu/drm/scheduler/sched_main.c void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
sched             484 drivers/gpu/drm/scheduler/sched_main.c 	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
sched             487 drivers/gpu/drm/scheduler/sched_main.c 		if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
sched             496 drivers/gpu/drm/scheduler/sched_main.c 		fence = sched->ops->run_job(s_job);
sched             526 drivers/gpu/drm/scheduler/sched_main.c 	struct drm_gpu_scheduler *sched;
sched             532 drivers/gpu/drm/scheduler/sched_main.c 	sched = entity->rq->sched;
sched             534 drivers/gpu/drm/scheduler/sched_main.c 	job->sched = sched;
sched             536 drivers/gpu/drm/scheduler/sched_main.c 	job->s_priority = entity->rq - sched->sched_rq;
sched             540 drivers/gpu/drm/scheduler/sched_main.c 	job->id = atomic64_inc_return(&sched->job_id_count);
sched             567 drivers/gpu/drm/scheduler/sched_main.c static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
sched             569 drivers/gpu/drm/scheduler/sched_main.c 	return atomic_read(&sched->hw_rq_count) <
sched             570 drivers/gpu/drm/scheduler/sched_main.c 		sched->hw_submission_limit;
sched             579 drivers/gpu/drm/scheduler/sched_main.c void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
sched             581 drivers/gpu/drm/scheduler/sched_main.c 	if (drm_sched_ready(sched))
sched             582 drivers/gpu/drm/scheduler/sched_main.c 		wake_up_interruptible(&sched->wake_up_worker);
sched             593 drivers/gpu/drm/scheduler/sched_main.c drm_sched_select_entity(struct drm_gpu_scheduler *sched)
sched             598 drivers/gpu/drm/scheduler/sched_main.c 	if (!drm_sched_ready(sched))
sched             603 drivers/gpu/drm/scheduler/sched_main.c 		entity = drm_sched_rq_select_entity(&sched->sched_rq[i]);
sched             623 drivers/gpu/drm/scheduler/sched_main.c 	struct drm_gpu_scheduler *sched = s_fence->sched;
sched             625 drivers/gpu/drm/scheduler/sched_main.c 	atomic_dec(&sched->hw_rq_count);
sched             626 drivers/gpu/drm/scheduler/sched_main.c 	atomic_dec(&sched->num_jobs);
sched             633 drivers/gpu/drm/scheduler/sched_main.c 	wake_up_interruptible(&sched->wake_up_worker);
sched             645 drivers/gpu/drm/scheduler/sched_main.c drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
sched             651 drivers/gpu/drm/scheduler/sched_main.c 	if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
sched             652 drivers/gpu/drm/scheduler/sched_main.c 	    !cancel_delayed_work(&sched->work_tdr))
sched             655 drivers/gpu/drm/scheduler/sched_main.c 	spin_lock_irqsave(&sched->job_list_lock, flags);
sched             657 drivers/gpu/drm/scheduler/sched_main.c 	job = list_first_entry_or_null(&sched->ring_mirror_list,
sched             666 drivers/gpu/drm/scheduler/sched_main.c 		drm_sched_start_timeout(sched);
sched             669 drivers/gpu/drm/scheduler/sched_main.c 	spin_unlock_irqrestore(&sched->job_list_lock, flags);
sched             681 drivers/gpu/drm/scheduler/sched_main.c static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
sched             701 drivers/gpu/drm/scheduler/sched_main.c 	struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
sched             713 drivers/gpu/drm/scheduler/sched_main.c 		wait_event_interruptible(sched->wake_up_worker,
sched             714 drivers/gpu/drm/scheduler/sched_main.c 					 (cleanup_job = drm_sched_get_cleanup_job(sched)) ||
sched             715 drivers/gpu/drm/scheduler/sched_main.c 					 (!drm_sched_blocked(sched) &&
sched             716 drivers/gpu/drm/scheduler/sched_main.c 					  (entity = drm_sched_select_entity(sched))) ||
sched             720 drivers/gpu/drm/scheduler/sched_main.c 			sched->ops->free_job(cleanup_job);
sched             722 drivers/gpu/drm/scheduler/sched_main.c 			drm_sched_start_timeout(sched);
sched             734 drivers/gpu/drm/scheduler/sched_main.c 		atomic_inc(&sched->hw_rq_count);
sched             737 drivers/gpu/drm/scheduler/sched_main.c 		fence = sched->ops->run_job(sched_job);
sched             756 drivers/gpu/drm/scheduler/sched_main.c 		wake_up(&sched->job_scheduled);
sched             773 drivers/gpu/drm/scheduler/sched_main.c int drm_sched_init(struct drm_gpu_scheduler *sched,
sched             781 drivers/gpu/drm/scheduler/sched_main.c 	sched->ops = ops;
sched             782 drivers/gpu/drm/scheduler/sched_main.c 	sched->hw_submission_limit = hw_submission;
sched             783 drivers/gpu/drm/scheduler/sched_main.c 	sched->name = name;
sched             784 drivers/gpu/drm/scheduler/sched_main.c 	sched->timeout = timeout;
sched             785 drivers/gpu/drm/scheduler/sched_main.c 	sched->hang_limit = hang_limit;
sched             787 drivers/gpu/drm/scheduler/sched_main.c 		drm_sched_rq_init(sched, &sched->sched_rq[i]);
sched             789 drivers/gpu/drm/scheduler/sched_main.c 	init_waitqueue_head(&sched->wake_up_worker);
sched             790 drivers/gpu/drm/scheduler/sched_main.c 	init_waitqueue_head(&sched->job_scheduled);
sched             791 drivers/gpu/drm/scheduler/sched_main.c 	INIT_LIST_HEAD(&sched->ring_mirror_list);
sched             792 drivers/gpu/drm/scheduler/sched_main.c 	spin_lock_init(&sched->job_list_lock);
sched             793 drivers/gpu/drm/scheduler/sched_main.c 	atomic_set(&sched->hw_rq_count, 0);
sched             794 drivers/gpu/drm/scheduler/sched_main.c 	INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
sched             795 drivers/gpu/drm/scheduler/sched_main.c 	atomic_set(&sched->num_jobs, 0);
sched             796 drivers/gpu/drm/scheduler/sched_main.c 	atomic64_set(&sched->job_id_count, 0);
sched             799 drivers/gpu/drm/scheduler/sched_main.c 	sched->thread = kthread_run(drm_sched_main, sched, sched->name);
sched             800 drivers/gpu/drm/scheduler/sched_main.c 	if (IS_ERR(sched->thread)) {
sched             801 drivers/gpu/drm/scheduler/sched_main.c 		ret = PTR_ERR(sched->thread);
sched             802 drivers/gpu/drm/scheduler/sched_main.c 		sched->thread = NULL;
sched             807 drivers/gpu/drm/scheduler/sched_main.c 	sched->ready = true;
sched             819 drivers/gpu/drm/scheduler/sched_main.c void drm_sched_fini(struct drm_gpu_scheduler *sched)
sched             821 drivers/gpu/drm/scheduler/sched_main.c 	if (sched->thread)
sched             822 drivers/gpu/drm/scheduler/sched_main.c 		kthread_stop(sched->thread);
sched             824 drivers/gpu/drm/scheduler/sched_main.c 	sched->ready = false;
sched             150 drivers/gpu/drm/v3d/v3d_drv.c 		rq = &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
sched              35 drivers/gpu/drm/v3d/v3d_drv.h 	struct drm_gpu_scheduler sched;
sched             271 drivers/gpu/drm/v3d/v3d_sched.c 		drm_sched_stop(&v3d->queue[q].sched, sched_job);
sched             280 drivers/gpu/drm/v3d/v3d_sched.c 		drm_sched_resubmit_jobs(&v3d->queue[q].sched);
sched             284 drivers/gpu/drm/v3d/v3d_sched.c 		drm_sched_start(&v3d->queue[q].sched, true);
sched             400 drivers/gpu/drm/v3d/v3d_sched.c 	ret = drm_sched_init(&v3d->queue[V3D_BIN].sched,
sched             410 drivers/gpu/drm/v3d/v3d_sched.c 	ret = drm_sched_init(&v3d->queue[V3D_RENDER].sched,
sched             422 drivers/gpu/drm/v3d/v3d_sched.c 	ret = drm_sched_init(&v3d->queue[V3D_TFU].sched,
sched             435 drivers/gpu/drm/v3d/v3d_sched.c 		ret = drm_sched_init(&v3d->queue[V3D_CSD].sched,
sched             447 drivers/gpu/drm/v3d/v3d_sched.c 		ret = drm_sched_init(&v3d->queue[V3D_CACHE_CLEAN].sched,
sched             469 drivers/gpu/drm/v3d/v3d_sched.c 		if (v3d->queue[q].sched.ready)
sched             470 drivers/gpu/drm/v3d/v3d_sched.c 			drm_sched_fini(&v3d->queue[q].sched);
sched            2225 drivers/infiniband/hw/cxgb3/iwch_cm.c 	[CPL_ACT_ESTABLISH]	= sched,
sched            2226 drivers/infiniband/hw/cxgb3/iwch_cm.c 	[CPL_ACT_OPEN_RPL]	= sched,
sched            2227 drivers/infiniband/hw/cxgb3/iwch_cm.c 	[CPL_RX_DATA]		= sched,
sched            2228 drivers/infiniband/hw/cxgb3/iwch_cm.c 	[CPL_TX_DMA_ACK]	= sched,
sched            2229 drivers/infiniband/hw/cxgb3/iwch_cm.c 	[CPL_ABORT_RPL_RSS]	= sched,
sched            2230 drivers/infiniband/hw/cxgb3/iwch_cm.c 	[CPL_ABORT_RPL]		= sched,
sched            2231 drivers/infiniband/hw/cxgb3/iwch_cm.c 	[CPL_PASS_OPEN_RPL]	= sched,
sched            2232 drivers/infiniband/hw/cxgb3/iwch_cm.c 	[CPL_CLOSE_LISTSRV_RPL]	= sched,
sched            2233 drivers/infiniband/hw/cxgb3/iwch_cm.c 	[CPL_PASS_ACCEPT_REQ]	= sched,
sched            2234 drivers/infiniband/hw/cxgb3/iwch_cm.c 	[CPL_PASS_ESTABLISH]	= sched,
sched            2235 drivers/infiniband/hw/cxgb3/iwch_cm.c 	[CPL_PEER_CLOSE]	= sched,
sched            2236 drivers/infiniband/hw/cxgb3/iwch_cm.c 	[CPL_CLOSE_CON_RPL]	= sched,
sched            2237 drivers/infiniband/hw/cxgb3/iwch_cm.c 	[CPL_ABORT_REQ_RSS]	= sched,
sched            2238 drivers/infiniband/hw/cxgb3/iwch_cm.c 	[CPL_RDMA_TERMINATE]	= sched,
sched            2239 drivers/infiniband/hw/cxgb3/iwch_cm.c 	[CPL_RDMA_EC_STATUS]	= sched,
sched             145 drivers/infiniband/hw/cxgb4/cm.c static int sched(struct c4iw_dev *dev, struct sk_buff *skb);
sched             530 drivers/infiniband/hw/cxgb4/cm.c 	sched(ep->com.dev, skb);
sched            4389 drivers/infiniband/hw/cxgb4/cm.c 		sched(dev, skb);
sched            4423 drivers/infiniband/hw/cxgb4/cm.c 	sched(dev, skb);
sched            4432 drivers/infiniband/hw/cxgb4/cm.c 	[CPL_ACT_ESTABLISH] = sched,
sched            4433 drivers/infiniband/hw/cxgb4/cm.c 	[CPL_ACT_OPEN_RPL] = sched,
sched            4434 drivers/infiniband/hw/cxgb4/cm.c 	[CPL_RX_DATA] = sched,
sched            4435 drivers/infiniband/hw/cxgb4/cm.c 	[CPL_ABORT_RPL_RSS] = sched,
sched            4436 drivers/infiniband/hw/cxgb4/cm.c 	[CPL_ABORT_RPL] = sched,
sched            4437 drivers/infiniband/hw/cxgb4/cm.c 	[CPL_PASS_OPEN_RPL] = sched,
sched            4438 drivers/infiniband/hw/cxgb4/cm.c 	[CPL_CLOSE_LISTSRV_RPL] = sched,
sched            4439 drivers/infiniband/hw/cxgb4/cm.c 	[CPL_PASS_ACCEPT_REQ] = sched,
sched            4440 drivers/infiniband/hw/cxgb4/cm.c 	[CPL_PASS_ESTABLISH] = sched,
sched            4441 drivers/infiniband/hw/cxgb4/cm.c 	[CPL_PEER_CLOSE] = sched,
sched            4442 drivers/infiniband/hw/cxgb4/cm.c 	[CPL_CLOSE_CON_RPL] = sched,
sched            4444 drivers/infiniband/hw/cxgb4/cm.c 	[CPL_RDMA_TERMINATE] = sched,
sched            4445 drivers/infiniband/hw/cxgb4/cm.c 	[CPL_FW4_ACK] = sched,
sched            4447 drivers/infiniband/hw/cxgb4/cm.c 	[CPL_GET_TCB_RPL] = sched,
sched            4449 drivers/infiniband/hw/cxgb4/cm.c 	[CPL_RX_PKT] = sched
sched             154 drivers/infiniband/sw/rxe/rxe_task.c void rxe_run_task(struct rxe_task *task, int sched)
sched             159 drivers/infiniband/sw/rxe/rxe_task.c 	if (sched)
sched              88 drivers/infiniband/sw/rxe/rxe_task.h void rxe_run_task(struct rxe_task *task, int sched);
sched             122 drivers/input/joystick/sidewinder.c 	int timeout, bitout, sched, i, kick, start, strobe;
sched             132 drivers/input/joystick/sidewinder.c 	sched = 0;
sched             152 drivers/input/joystick/sidewinder.c 		sched--;
sched             165 drivers/input/joystick/sidewinder.c 			sched = kick;				/* Schedule second trigger */
sched             170 drivers/input/joystick/sidewinder.c 		if (pending && sched < 0 && (i > -SW_END)) {	/* Second trigger time */
sched             735 drivers/input/serio/hil_mlc.c 			goto sched;
sched             738 drivers/input/serio/hil_mlc.c 			 goto sched;
sched             741 drivers/input/serio/hil_mlc.c 	sched:
sched             271 drivers/net/ethernet/chelsio/cxgb/sge.c 	struct sched	*tx_sched;
sched             284 drivers/net/ethernet/chelsio/cxgb/sge.c 	struct sched *s = sge->tx_sched;
sched             300 drivers/net/ethernet/chelsio/cxgb/sge.c 	struct sched *s = sge->tx_sched;
sched             345 drivers/net/ethernet/chelsio/cxgb/sge.c 	struct sched *s = sge->tx_sched;
sched             360 drivers/net/ethernet/chelsio/cxgb/sge.c 	struct sched *s = sge->tx_sched;
sched             373 drivers/net/ethernet/chelsio/cxgb/sge.c 	struct sched *s;
sched             376 drivers/net/ethernet/chelsio/cxgb/sge.c 	s = kzalloc(sizeof (struct sched), GFP_KERNEL);
sched             399 drivers/net/ethernet/chelsio/cxgb/sge.c 	struct sched *s = sge->tx_sched;
sched             434 drivers/net/ethernet/chelsio/cxgb/sge.c 	struct sched *s = sge->tx_sched;
sched             732 drivers/net/ethernet/chelsio/cxgb3/common.h int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched);
sched             101 drivers/net/ethernet/chelsio/cxgb3/cxgb3_ioctl.h 	uint8_t sched;
sched             801 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			    char *buf, int sched)
sched             808 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
sched             812 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	if (sched & 1)
sched             827 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			     const char *buf, size_t len, int sched)
sched             844 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	ret = t3_config_sched(adap, val, sched);
sched             851 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c #define TM_ATTR(name, sched) \
sched             855 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	return tm_attr_show(d, buf, sched); \
sched             860 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	return tm_attr_store(d, buf, len, sched); \
sched             942 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
sched             958 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	req->sched = sched;
sched             889 drivers/net/ethernet/chelsio/cxgb3/t3_cpl.h 	__u8 sched;
sched            3036 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
sched            3062 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		     A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
sched            3064 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	if (sched & 1)
sched            1860 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h void t4_get_tx_sched(struct adapter *adap, unsigned int sched,
sched            10260 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c void t4_get_tx_sched(struct adapter *adap, unsigned int sched,
sched            10266 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		addr = TP_TX_MOD_Q1_Q0_RATE_LIMIT_A - sched / 2;
sched            10268 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		if (sched & 1)
sched            10280 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		addr = TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR_A - sched / 2;
sched            10282 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		if (sched & 1)
sched            1141 drivers/net/ethernet/intel/ice/ice_sched.c 	vsi_node = vsi_ctx->sched.vsi_node[tc];
sched            1435 drivers/net/ethernet/intel/ice/ice_sched.c 	prev_numqs = vsi_ctx->sched.max_lanq[tc];
sched            1456 drivers/net/ethernet/intel/ice/ice_sched.c 	vsi_ctx->sched.max_lanq[tc] = new_numqs;
sched            1515 drivers/net/ethernet/intel/ice/ice_sched.c 		vsi_ctx->sched.vsi_node[tc] = vsi_node;
sched            1521 drivers/net/ethernet/intel/ice/ice_sched.c 		vsi_ctx->sched.max_lanq[tc] = 0;
sched            1646 drivers/net/ethernet/intel/ice/ice_sched.c 			vsi_ctx->sched.vsi_node[i] = NULL;
sched            1652 drivers/net/ethernet/intel/ice/ice_sched.c 			vsi_ctx->sched.max_lanq[i] = 0;
sched              29 drivers/net/ethernet/intel/ice/ice_switch.h 	struct ice_sched_vsi_info sched;
sched             694 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 	u8 sched = *(u8 *)(inbox->buf + 64);
sched             700 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 	port = (sched >> 6 & 1) + 1;
sched            3807 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 	u8 sched = *(u8 *)(inbox->buf + 64);
sched            3810 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 	port = (sched >> 6 & 1) + 1;
sched            1008 drivers/net/wireless/ath/ath9k/ath9k.h 	struct ath_chanctx_sched sched;
sched             261 drivers/net/wireless/ath/ath9k/channel.c 		if (likely(sc->sched.channel_switch_time))
sched             263 drivers/net/wireless/ath/ath9k/channel.c 				usecs_to_jiffies(sc->sched.channel_switch_time);
sched             311 drivers/net/wireless/ath/ath9k/channel.c 	ictx->flush_timeout = usecs_to_jiffies(sc->sched.channel_switch_time);
sched             382 drivers/net/wireless/ath/ath9k/channel.c 	mod_timer(&sc->sched.timer, jiffies + timeout);
sched             399 drivers/net/wireless/ath/ath9k/channel.c 	if (ctx->active && sc->sched.extend_absence) {
sched             401 drivers/net/wireless/ath/ath9k/channel.c 		sc->sched.extend_absence = false;
sched             408 drivers/net/wireless/ath/ath9k/channel.c 	if (ctx->active && sc->sched.beacon_miss >= 2) {
sched             410 drivers/net/wireless/ath/ath9k/channel.c 		sc->sched.extend_absence = true;
sched             423 drivers/net/wireless/ath/ath9k/channel.c 	avp->offchannel_duration = sc->sched.offchannel_duration;
sched             451 drivers/net/wireless/ath/ath9k/channel.c 	if (sc->sched.extend_absence)
sched             453 drivers/net/wireless/ath/ath9k/channel.c 			sc->sched.channel_switch_time;
sched             457 drivers/net/wireless/ath/ath9k/channel.c 			sc->sched.channel_switch_time;
sched             460 drivers/net/wireless/ath/ath9k/channel.c 	    sc->sched.extend_absence)
sched             484 drivers/net/wireless/ath/ath9k/channel.c 	avp->noa_duration = duration + sc->sched.channel_switch_time;
sched             513 drivers/net/wireless/ath/ath9k/channel.c 		chanctx_state_string(sc->sched.state),
sched             535 drivers/net/wireless/ath/ath9k/channel.c 		if (sc->sched.offchannel_pending && !sc->sched.wait_switch) {
sched             536 drivers/net/wireless/ath/ath9k/channel.c 			sc->sched.offchannel_pending = false;
sched             538 drivers/net/wireless/ath/ath9k/channel.c 			sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_BEACON;
sched             544 drivers/net/wireless/ath/ath9k/channel.c 		if (ctx->active && sc->sched.state == ATH_CHANCTX_STATE_IDLE) {
sched             546 drivers/net/wireless/ath/ath9k/channel.c 			sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_BEACON;
sched             552 drivers/net/wireless/ath/ath9k/channel.c 		if (sc->sched.state == ATH_CHANCTX_STATE_WAIT_FOR_TIMER) {
sched             553 drivers/net/wireless/ath/ath9k/channel.c 			sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_BEACON;
sched             558 drivers/net/wireless/ath/ath9k/channel.c 		if (sc->sched.mgd_prepare_tx)
sched             559 drivers/net/wireless/ath/ath9k/channel.c 			sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_BEACON;
sched             568 drivers/net/wireless/ath/ath9k/channel.c 		    sc->sched.state != ATH_CHANCTX_STATE_WAIT_FOR_BEACON) {
sched             576 drivers/net/wireless/ath/ath9k/channel.c 		if (sc->sched.state != ATH_CHANCTX_STATE_WAIT_FOR_BEACON)
sched             581 drivers/net/wireless/ath/ath9k/channel.c 		sc->sched.beacon_pending = true;
sched             582 drivers/net/wireless/ath/ath9k/channel.c 		sc->sched.next_tbtt = REG_READ(ah, AR_NEXT_TBTT_TIMER);
sched             588 drivers/net/wireless/ath/ath9k/channel.c 		tsf_time = sc->sched.next_tbtt + beacon_int / 4;
sched             589 drivers/net/wireless/ath/ath9k/channel.c 		sc->sched.switch_start_time = tsf_time;
sched             590 drivers/net/wireless/ath/ath9k/channel.c 		sc->cur_chan->last_beacon = sc->sched.next_tbtt;
sched             610 drivers/net/wireless/ath/ath9k/channel.c 		if (sc->sched.mgd_prepare_tx) {
sched             626 drivers/net/wireless/ath/ath9k/channel.c 		    (!avp->noa_duration || sc->sched.force_noa_update))
sched             630 drivers/net/wireless/ath/ath9k/channel.c 		if (ctx->active && sc->sched.force_noa_update)
sched             631 drivers/net/wireless/ath/ath9k/channel.c 			sc->sched.force_noa_update = false;
sched             635 drivers/net/wireless/ath/ath9k/channel.c 		if (!sc->sched.beacon_pending) {
sched             641 drivers/net/wireless/ath/ath9k/channel.c 		sc->sched.beacon_pending = false;
sched             643 drivers/net/wireless/ath/ath9k/channel.c 		if (sc->sched.mgd_prepare_tx) {
sched             644 drivers/net/wireless/ath/ath9k/channel.c 			sc->sched.mgd_prepare_tx = false;
sched             651 drivers/net/wireless/ath/ath9k/channel.c 		if (sc->sched.state != ATH_CHANCTX_STATE_WAIT_FOR_BEACON)
sched             657 drivers/net/wireless/ath/ath9k/channel.c 		sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_TIMER;
sched             658 drivers/net/wireless/ath/ath9k/channel.c 		ath_chanctx_setup_timer(sc, sc->sched.switch_start_time);
sched             661 drivers/net/wireless/ath/ath9k/channel.c 		if (sc->sched.state != ATH_CHANCTX_STATE_WAIT_FOR_TIMER)
sched             665 drivers/net/wireless/ath/ath9k/channel.c 		    sc->sched.beacon_pending)
sched             666 drivers/net/wireless/ath/ath9k/channel.c 			sc->sched.beacon_miss++;
sched             671 drivers/net/wireless/ath/ath9k/channel.c 		sc->sched.state = ATH_CHANCTX_STATE_SWITCH;
sched             679 drivers/net/wireless/ath/ath9k/channel.c 		sc->sched.beacon_pending = false;
sched             680 drivers/net/wireless/ath/ath9k/channel.c 		sc->sched.beacon_miss = 0;
sched             682 drivers/net/wireless/ath/ath9k/channel.c 		if (sc->sched.state == ATH_CHANCTX_STATE_FORCE_ACTIVE ||
sched             683 drivers/net/wireless/ath/ath9k/channel.c 		    !sc->sched.beacon_adjust ||
sched             692 drivers/net/wireless/ath/ath9k/channel.c 		tsf_time = sc->sched.switch_start_time;
sched             697 drivers/net/wireless/ath/ath9k/channel.c 		sc->sched.beacon_adjust = false;
sched             701 drivers/net/wireless/ath/ath9k/channel.c 		if (sc->sched.state != ATH_CHANCTX_STATE_FORCE_ACTIVE ||
sched             708 drivers/net/wireless/ath/ath9k/channel.c 		sc->sched.state = ATH_CHANCTX_STATE_IDLE;
sched             712 drivers/net/wireless/ath/ath9k/channel.c 		    sc->sched.state == ATH_CHANCTX_STATE_FORCE_ACTIVE ||
sched             726 drivers/net/wireless/ath/ath9k/channel.c 		sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_TIMER;
sched             727 drivers/net/wireless/ath/ath9k/channel.c 		sc->sched.wait_switch = false;
sched             731 drivers/net/wireless/ath/ath9k/channel.c 		if (sc->sched.extend_absence) {
sched             732 drivers/net/wireless/ath/ath9k/channel.c 			sc->sched.beacon_miss = 0;
sched             736 drivers/net/wireless/ath/ath9k/channel.c 		tsf_time -= sc->sched.channel_switch_time;
sched             738 drivers/net/wireless/ath/ath9k/channel.c 		sc->sched.switch_start_time = tsf_time;
sched             741 drivers/net/wireless/ath/ath9k/channel.c 		sc->sched.beacon_pending = true;
sched             742 drivers/net/wireless/ath/ath9k/channel.c 		sc->sched.beacon_adjust = true;
sched             756 drivers/net/wireless/ath/ath9k/channel.c 				sc->sched.state = ATH_CHANCTX_STATE_IDLE;
sched             761 drivers/net/wireless/ath/ath9k/channel.c 		sc->sched.state = ATH_CHANCTX_STATE_IDLE;
sched             780 drivers/net/wireless/ath/ath9k/channel.c 	if (sc->sched.beacon_pending)
sched             813 drivers/net/wireless/ath/ath9k/channel.c 		sc->sched.offchannel_pending = true;
sched             814 drivers/net/wireless/ath/ath9k/channel.c 		sc->sched.wait_switch = true;
sched             815 drivers/net/wireless/ath/ath9k/channel.c 		sc->sched.offchannel_duration =
sched             817 drivers/net/wireless/ath/ath9k/channel.c 			sc->sched.channel_switch_time;
sched             833 drivers/net/wireless/ath/ath9k/channel.c 		sc->sched.offchannel_duration =
sched             835 drivers/net/wireless/ath/ath9k/channel.c 			sc->sched.channel_switch_time;
sched             841 drivers/net/wireless/ath/ath9k/channel.c 				sc->sched.offchannel_duration);
sched             927 drivers/net/wireless/ath/ath9k/channel.c 		sc->sched.offchannel_pending = false;
sched             928 drivers/net/wireless/ath/ath9k/channel.c 		sc->sched.wait_switch = false;
sched             983 drivers/net/wireless/ath/ath9k/channel.c 		sc->sched.force_noa_update = true;
sched            1048 drivers/net/wireless/ath/ath9k/channel.c 	struct ath_softc *sc = from_timer(sc, t, sched.timer);
sched            1170 drivers/net/wireless/ath/ath9k/channel.c 	switch (sc->sched.state) {
sched            1180 drivers/net/wireless/ath/ath9k/channel.c 		sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_BEACON;
sched            1289 drivers/net/wireless/ath/ath9k/channel.c 	if (!sc->sched.offchannel_pending)
sched            1290 drivers/net/wireless/ath/ath9k/channel.c 		sc->sched.offchannel_duration = 0;
sched            1292 drivers/net/wireless/ath/ath9k/channel.c 	if (sc->sched.state != ATH_CHANCTX_STATE_FORCE_ACTIVE)
sched            1293 drivers/net/wireless/ath/ath9k/channel.c 		sc->sched.state = ATH_CHANCTX_STATE_IDLE;
sched            1305 drivers/net/wireless/ath/ath9k/channel.c 			sc->sched.channel_switch_time =
sched            1366 drivers/net/wireless/ath/ath9k/channel.c 	timer_setup(&sc->sched.timer, ath_chanctx_timer, 0);
sched            1564 drivers/net/wireless/ath/ath9k/channel.c 	del_timer_sync(&sc->sched.timer);
sched             504 drivers/net/wireless/ath/ath9k/main.c 	bool sched = false;
sched             544 drivers/net/wireless/ath/ath9k/main.c 		sched = true;
sched             585 drivers/net/wireless/ath/ath9k/main.c 	if (sched) {
sched            2589 drivers/net/wireless/ath/ath9k/main.c 		sc->sched.mgd_prepare_tx = true;
sched            2605 drivers/net/wireless/ath/ath9k/main.c 			sc->sched.mgd_prepare_tx = false;
sched            2618 drivers/net/wireless/ath/ath9k/main.c 	sc->sched.state = ATH_CHANCTX_STATE_FORCE_ACTIVE;
sched             842 drivers/s390/cio/qdio_main.c 		goto sched;
sched             860 drivers/s390/cio/qdio_main.c sched:
sched             270 drivers/slimbus/core.c 	mutex_init(&ctrl->sched.m_reconf);
sched             271 drivers/slimbus/core.c 	init_completion(&ctrl->sched.pause_comp);
sched             500 drivers/slimbus/core.c 	if (ctrl->sched.clk_state != SLIM_CLK_ACTIVE) {
sched             502 drivers/slimbus/core.c 				    ctrl->sched.clk_state, ret);
sched             120 drivers/slimbus/messaging.c 	if (ctrl->sched.clk_state == SLIM_CLK_ENTERING_PAUSE &&
sched             128 drivers/slimbus/messaging.c 		if (ctrl->sched.clk_state != SLIM_CLK_ACTIVE) {
sched             130 drivers/slimbus/messaging.c 				ctrl->sched.clk_state, ret);
sched              29 drivers/slimbus/sched.c 	struct slim_sched *sched = &ctrl->sched;
sched              38 drivers/slimbus/sched.c 	mutex_lock(&sched->m_reconf);
sched              40 drivers/slimbus/sched.c 		if (sched->clk_state == SLIM_CLK_ACTIVE) {
sched              41 drivers/slimbus/sched.c 			mutex_unlock(&sched->m_reconf);
sched              49 drivers/slimbus/sched.c 		ret = wait_for_completion_timeout(&sched->pause_comp,
sched              52 drivers/slimbus/sched.c 			mutex_unlock(&sched->m_reconf);
sched              63 drivers/slimbus/sched.c 		if (sched->clk_state == SLIM_CLK_PAUSED && ctrl->wakeup)
sched              66 drivers/slimbus/sched.c 			sched->clk_state = SLIM_CLK_ACTIVE;
sched              67 drivers/slimbus/sched.c 		mutex_unlock(&sched->m_reconf);
sched              73 drivers/slimbus/sched.c 	if (ctrl->sched.clk_state == SLIM_CLK_PAUSED) {
sched              74 drivers/slimbus/sched.c 		mutex_unlock(&sched->m_reconf);
sched              83 drivers/slimbus/sched.c 			mutex_unlock(&sched->m_reconf);
sched              89 drivers/slimbus/sched.c 	sched->clk_state = SLIM_CLK_ENTERING_PAUSE;
sched             112 drivers/slimbus/sched.c 		sched->clk_state = SLIM_CLK_ACTIVE;
sched             114 drivers/slimbus/sched.c 		sched->clk_state = SLIM_CLK_PAUSED;
sched             115 drivers/slimbus/sched.c 		complete(&sched->pause_comp);
sched             117 drivers/slimbus/sched.c 	mutex_unlock(&sched->m_reconf);
sched             413 drivers/slimbus/slimbus.h 	struct slim_sched	sched;
sched            2584 drivers/staging/wlan-ng/hfa384x_usb.c 		int sched;
sched            2586 drivers/staging/wlan-ng/hfa384x_usb.c 		sched = !test_and_set_bit(WORK_TX_HALT, &hw->usb_flags);
sched            2587 drivers/staging/wlan-ng/hfa384x_usb.c 		sched |= !test_and_set_bit(WORK_RX_HALT, &hw->usb_flags);
sched            2588 drivers/staging/wlan-ng/hfa384x_usb.c 		if (sched)
sched             996 drivers/usb/gadget/function/u_serial.c 			goto sched;
sched            1024 drivers/usb/gadget/function/u_serial.c sched:
sched            1259 drivers/usb/host/ehci-sched.c 	struct ehci_iso_sched	*sched;
sched            1262 drivers/usb/host/ehci-sched.c 	sched = iso_sched_alloc(urb->number_of_packets, mem_flags);
sched            1263 drivers/usb/host/ehci-sched.c 	if (unlikely(sched == NULL))
sched            1266 drivers/usb/host/ehci-sched.c 	itd_sched_init(ehci, sched, stream, urb);
sched            1269 drivers/usb/host/ehci-sched.c 		num_itds = 1 + (sched->span + 7) / 8;
sched            1295 drivers/usb/host/ehci-sched.c 				iso_sched_free(stream, sched);
sched            1304 drivers/usb/host/ehci-sched.c 		list_add(&itd->itd_list, &sched->td_list);
sched            1309 drivers/usb/host/ehci-sched.c 	urb->hcpriv = sched;
sched            1397 drivers/usb/host/ehci-sched.c 	struct ehci_iso_sched	*sched,
sched            1490 drivers/usb/host/ehci-sched.c 	struct ehci_iso_sched	*sched = urb->hcpriv;
sched            1495 drivers/usb/host/ehci-sched.c 	span = sched->span;
sched            1533 drivers/usb/host/ehci-sched.c 							sched, tt))
sched            1634 drivers/usb/host/ehci-sched.c 			iso_sched_free(stream, sched);
sched            1635 drivers/usb/host/ehci-sched.c 			sched = NULL;
sched            1639 drivers/usb/host/ehci-sched.c 	if (sched)
sched            1640 drivers/usb/host/ehci-sched.c 		sched->first_packet = urb->error_count;
sched            1666 drivers/usb/host/ehci-sched.c 	iso_sched_free(stream, sched);
sched            2160 drivers/usb/host/ehci-sched.c 	struct ehci_iso_sched	*sched = urb->hcpriv;
sched            2178 drivers/usb/host/ehci-sched.c 	for (packet = sched->first_packet, sitd = NULL;
sched            2183 drivers/usb/host/ehci-sched.c 		BUG_ON(list_empty(&sched->td_list));
sched            2187 drivers/usb/host/ehci-sched.c 		sitd = list_entry(sched->td_list.next,
sched            2193 drivers/usb/host/ehci-sched.c 		sitd_patch(ehci, stream, sitd, sched, packet);
sched            2202 drivers/usb/host/ehci-sched.c 	iso_sched_free(stream, sched);
sched            4084 drivers/usb/host/fotg210-hcd.c 	struct fotg210_iso_sched *sched;
sched            4087 drivers/usb/host/fotg210-hcd.c 	sched = iso_sched_alloc(urb->number_of_packets, mem_flags);
sched            4088 drivers/usb/host/fotg210-hcd.c 	if (unlikely(sched == NULL))
sched            4091 drivers/usb/host/fotg210-hcd.c 	itd_sched_init(fotg210, sched, stream, urb);
sched            4094 drivers/usb/host/fotg210-hcd.c 		num_itds = 1 + (sched->span + 7) / 8;
sched            4120 drivers/usb/host/fotg210-hcd.c 				iso_sched_free(stream, sched);
sched            4127 drivers/usb/host/fotg210-hcd.c 		list_add(&itd->itd_list, &sched->td_list);
sched            4132 drivers/usb/host/fotg210-hcd.c 	urb->hcpriv = sched;
sched            4171 drivers/usb/host/fotg210-hcd.c 	struct fotg210_iso_sched *sched = urb->hcpriv;
sched            4174 drivers/usb/host/fotg210-hcd.c 	span = sched->span;
sched            4281 drivers/usb/host/fotg210-hcd.c 	iso_sched_free(stream, sched);
sched             533 include/asm-generic/vmlinux.lds.h 		*(.sched.text)						\
sched             113 include/drm/gpu_scheduler.h 	struct drm_gpu_scheduler	*sched;
sched             150 include/drm/gpu_scheduler.h 	struct drm_gpu_scheduler	*sched;
sched             185 include/drm/gpu_scheduler.h 	struct drm_gpu_scheduler	*sched;
sched             286 include/drm/gpu_scheduler.h int drm_sched_init(struct drm_gpu_scheduler *sched,
sched             291 include/drm/gpu_scheduler.h void drm_sched_fini(struct drm_gpu_scheduler *sched);
sched             296 include/drm/gpu_scheduler.h void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
sched             297 include/drm/gpu_scheduler.h void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
sched             298 include/drm/gpu_scheduler.h void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
sched             299 include/drm/gpu_scheduler.h void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
sched             303 include/drm/gpu_scheduler.h void drm_sched_fault(struct drm_gpu_scheduler *sched);
sched             331 include/drm/gpu_scheduler.h unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
sched             332 include/drm/gpu_scheduler.h void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
sched            1386 include/net/ip_vs.h 			    struct ip_vs_scheduler *sched);
sched              45 include/net/sctp/stream_sched.h 			 enum sctp_sched_type sched);
sched              57 include/net/sctp/stream_sched.h void sctp_sched_ops_register(enum sctp_sched_type sched,
sched            1063 include/net/sctp/structs.h 	struct sctp_sched_ops *sched;
sched               3 include/trace/events/sched.h #define TRACE_SYSTEM sched
sched             193 include/trace/events/xdp.h 		 int sched),
sched             195 include/trace/events/xdp.h 	TP_ARGS(map_id, processed, drops, sched),
sched             203 include/trace/events/xdp.h 		__field(int, sched)
sched             212 include/trace/events/xdp.h 		__entry->sched	= sched;
sched             222 include/trace/events/xdp.h 		  __entry->sched)
sched             261 kernel/bpf/cpumap.c 		unsigned int drops = 0, sched = 0;
sched             273 kernel/bpf/cpumap.c 				sched = 1;
sched             278 kernel/bpf/cpumap.c 			sched = cond_resched();
sched             324 kernel/bpf/cpumap.c 		trace_xdp_cpumap_kthread(rcpu->map_id, n, drops, sched);
sched             152 net/ipv6/ip6_flowlabel.c 	unsigned long sched = 0;
sched             174 net/ipv6/ip6_flowlabel.c 				if (!sched || time_before(ttd, sched))
sched             175 net/ipv6/ip6_flowlabel.c 					sched = ttd;
sched             180 net/ipv6/ip6_flowlabel.c 	if (!sched && atomic_read(&fl_size))
sched             181 net/ipv6/ip6_flowlabel.c 		sched = now + FL_MAX_LINGER;
sched             182 net/ipv6/ip6_flowlabel.c 	if (sched) {
sched             183 net/ipv6/ip6_flowlabel.c 		mod_timer(&ip6_fl_gc_timer, sched);
sched             354 net/netfilter/ipvs/ip_vs_core.c 		struct ip_vs_scheduler *sched;
sched             361 net/netfilter/ipvs/ip_vs_core.c 		sched = rcu_dereference(svc->scheduler);
sched             362 net/netfilter/ipvs/ip_vs_core.c 		if (sched) {
sched             365 net/netfilter/ipvs/ip_vs_core.c 			dest = sched->schedule(svc, skb, iph);
sched             459 net/netfilter/ipvs/ip_vs_core.c 	struct ip_vs_scheduler *sched;
sched             536 net/netfilter/ipvs/ip_vs_core.c 	sched = rcu_dereference(svc->scheduler);
sched             537 net/netfilter/ipvs/ip_vs_core.c 	if (sched) {
sched             540 net/netfilter/ipvs/ip_vs_core.c 		dest = sched->schedule(svc, skb, iph);
sched             863 net/netfilter/ipvs/ip_vs_ctl.c 	struct ip_vs_scheduler *sched;
sched             935 net/netfilter/ipvs/ip_vs_ctl.c 		sched = rcu_dereference_protected(svc->scheduler, 1);
sched             936 net/netfilter/ipvs/ip_vs_ctl.c 		if (sched && sched->add_dest)
sched             937 net/netfilter/ipvs/ip_vs_ctl.c 			sched->add_dest(svc, dest);
sched             939 net/netfilter/ipvs/ip_vs_ctl.c 		sched = rcu_dereference_protected(svc->scheduler, 1);
sched             940 net/netfilter/ipvs/ip_vs_ctl.c 		if (sched && sched->upd_dest)
sched             941 net/netfilter/ipvs/ip_vs_ctl.c 			sched->upd_dest(svc, dest);
sched            1188 net/netfilter/ipvs/ip_vs_ctl.c 		struct ip_vs_scheduler *sched;
sched            1190 net/netfilter/ipvs/ip_vs_ctl.c 		sched = rcu_dereference_protected(svc->scheduler, 1);
sched            1191 net/netfilter/ipvs/ip_vs_ctl.c 		if (sched && sched->del_dest)
sched            1192 net/netfilter/ipvs/ip_vs_ctl.c 			sched->del_dest(svc, dest);
sched            1272 net/netfilter/ipvs/ip_vs_ctl.c 	struct ip_vs_scheduler *sched = NULL;
sched            1282 net/netfilter/ipvs/ip_vs_ctl.c 		sched = ip_vs_scheduler_get(u->sched_name);
sched            1283 net/netfilter/ipvs/ip_vs_ctl.c 		if (!sched) {
sched            1353 net/netfilter/ipvs/ip_vs_ctl.c 	if (sched) {
sched            1354 net/netfilter/ipvs/ip_vs_ctl.c 		ret = ip_vs_bind_scheduler(svc, sched);
sched            1357 net/netfilter/ipvs/ip_vs_ctl.c 		sched = NULL;
sched            1389 net/netfilter/ipvs/ip_vs_ctl.c 		ip_vs_unbind_scheduler(svc, sched);
sched            1392 net/netfilter/ipvs/ip_vs_ctl.c 	ip_vs_scheduler_put(sched);
sched            1408 net/netfilter/ipvs/ip_vs_ctl.c 	struct ip_vs_scheduler *sched = NULL, *old_sched;
sched            1417 net/netfilter/ipvs/ip_vs_ctl.c 		sched = ip_vs_scheduler_get(u->sched_name);
sched            1418 net/netfilter/ipvs/ip_vs_ctl.c 		if (!sched) {
sched            1424 net/netfilter/ipvs/ip_vs_ctl.c 	old_sched = sched;
sched            1449 net/netfilter/ipvs/ip_vs_ctl.c 	if (sched != old_sched) {
sched            1457 net/netfilter/ipvs/ip_vs_ctl.c 		if (sched) {
sched            1458 net/netfilter/ipvs/ip_vs_ctl.c 			ret = ip_vs_bind_scheduler(svc, sched);
sched            1460 net/netfilter/ipvs/ip_vs_ctl.c 				ip_vs_scheduler_put(sched);
sched            2132 net/netfilter/ipvs/ip_vs_ctl.c 		struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler);
sched            2133 net/netfilter/ipvs/ip_vs_ctl.c 		char *sched_name = sched ? sched->name : "none";
sched            2558 net/netfilter/ipvs/ip_vs_ctl.c 	struct ip_vs_scheduler *sched;
sched            2562 net/netfilter/ipvs/ip_vs_ctl.c 	sched = rcu_dereference_protected(src->scheduler, 1);
sched            2563 net/netfilter/ipvs/ip_vs_ctl.c 	sched_name = sched ? sched->name : "none";
sched            3032 net/netfilter/ipvs/ip_vs_ctl.c 	struct ip_vs_scheduler *sched;
sched            3056 net/netfilter/ipvs/ip_vs_ctl.c 	sched = rcu_dereference_protected(svc->scheduler, 1);
sched            3057 net/netfilter/ipvs/ip_vs_ctl.c 	sched_name = sched ? sched->name : "none";
sched              61 net/netfilter/ipvs/ip_vs_sched.c 			    struct ip_vs_scheduler *sched)
sched              70 net/netfilter/ipvs/ip_vs_sched.c 	if (sched->done_service)
sched              71 net/netfilter/ipvs/ip_vs_sched.c 		sched->done_service(svc);
sched              81 net/netfilter/ipvs/ip_vs_sched.c 	struct ip_vs_scheduler *sched;
sched              87 net/netfilter/ipvs/ip_vs_sched.c 	list_for_each_entry(sched, &ip_vs_schedulers, n_list) {
sched              91 net/netfilter/ipvs/ip_vs_sched.c 		if (sched->module && !try_module_get(sched->module)) {
sched              97 net/netfilter/ipvs/ip_vs_sched.c 		if (strcmp(sched_name, sched->name)==0) {
sched             100 net/netfilter/ipvs/ip_vs_sched.c 			return sched;
sched             102 net/netfilter/ipvs/ip_vs_sched.c 		module_put(sched->module);
sched             115 net/netfilter/ipvs/ip_vs_sched.c 	struct ip_vs_scheduler *sched;
sched             120 net/netfilter/ipvs/ip_vs_sched.c 	sched = ip_vs_sched_getbyname(sched_name);
sched             125 net/netfilter/ipvs/ip_vs_sched.c 	if (sched == NULL) {
sched             127 net/netfilter/ipvs/ip_vs_sched.c 		sched = ip_vs_sched_getbyname(sched_name);
sched             130 net/netfilter/ipvs/ip_vs_sched.c 	return sched;
sched             145 net/netfilter/ipvs/ip_vs_sched.c 	struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler);
sched             146 net/netfilter/ipvs/ip_vs_sched.c 	char *sched_name = sched ? sched->name : "none";
sched             169 net/netfilter/ipvs/ip_vs_sched.c 	struct ip_vs_scheduler *sched;
sched             199 net/netfilter/ipvs/ip_vs_sched.c 	list_for_each_entry(sched, &ip_vs_schedulers, n_list) {
sched             200 net/netfilter/ipvs/ip_vs_sched.c 		if (strcmp(scheduler->name, sched->name) == 0) {
sched             449 net/sched/sch_cbq.c 	psched_time_t sched = now;
sched             472 net/sched/sch_cbq.c 		} else if (sched - cl->penalized > 0)
sched             473 net/sched/sch_cbq.c 			sched = cl->penalized;
sched             476 net/sched/sch_cbq.c 	return sched - now;
sched             122 net/sched/sch_hfsc.c 	struct hfsc_sched *sched;	/* scheduler data */
sched             188 net/sched/sch_hfsc.c 	struct rb_node **p = &cl->sched->eligible.rb_node;
sched             201 net/sched/sch_hfsc.c 	rb_insert_color(&cl->el_node, &cl->sched->eligible);
sched             207 net/sched/sch_hfsc.c 	rb_erase(&cl->el_node, &cl->sched->eligible);
sched            1054 net/sched/sch_hfsc.c 	cl->sched     = q;
sched            1409 net/sched/sch_hfsc.c 	q->root.sched   = q;
sched              89 net/sched/sch_taprio.c static ktime_t sched_base_time(const struct sched_gate_list *sched)
sched              91 net/sched/sch_taprio.c 	if (!sched)
sched              94 net/sched/sch_taprio.c 	return ns_to_ktime(sched->base_time);
sched             113 net/sched/sch_taprio.c 	struct sched_gate_list *sched = container_of(head, struct sched_gate_list, rcu);
sched             116 net/sched/sch_taprio.c 	if (!sched)
sched             119 net/sched/sch_taprio.c 	list_for_each_entry_safe(entry, n, &sched->entries, list) {
sched             124 net/sched/sch_taprio.c 	kfree(sched);
sched             142 net/sched/sch_taprio.c static s32 get_cycle_time_elapsed(struct sched_gate_list *sched, ktime_t time)
sched             147 net/sched/sch_taprio.c 	time_since_sched_start = ktime_sub(time, sched->base_time);
sched             148 net/sched/sch_taprio.c 	div_s64_rem(time_since_sched_start, sched->cycle_time, &time_elapsed);
sched             153 net/sched/sch_taprio.c static ktime_t get_interval_end_time(struct sched_gate_list *sched,
sched             158 net/sched/sch_taprio.c 	s32 cycle_elapsed = get_cycle_time_elapsed(sched, intv_start);
sched             161 net/sched/sch_taprio.c 	cycle_end = ktime_add_ns(intv_start, sched->cycle_time - cycle_elapsed);
sched             163 net/sched/sch_taprio.c 	cycle_ext_end = ktime_add(cycle_end, sched->cycle_time_extension);
sched             167 net/sched/sch_taprio.c 	else if (admin && admin != sched &&
sched             186 net/sched/sch_taprio.c 						  struct sched_gate_list *sched,
sched             208 net/sched/sch_taprio.c 	if (!sched)
sched             211 net/sched/sch_taprio.c 	cycle = sched->cycle_time;
sched             212 net/sched/sch_taprio.c 	cycle_elapsed = get_cycle_time_elapsed(sched, time);
sched             216 net/sched/sch_taprio.c 	list_for_each_entry(entry, &sched->entries, list) {
sched             218 net/sched/sch_taprio.c 		curr_intv_end = get_interval_end_time(sched, admin, entry,
sched             264 net/sched/sch_taprio.c 	struct sched_gate_list *sched, *admin;
sched             269 net/sched/sch_taprio.c 	sched = rcu_dereference(q->oper_sched);
sched             272 net/sched/sch_taprio.c 	entry = find_entry_to_transmit(skb, sch, sched, admin, skb->tstamp,
sched             346 net/sched/sch_taprio.c 	struct sched_gate_list *sched, *admin;
sched             360 net/sched/sch_taprio.c 	sched = rcu_dereference(q->oper_sched);
sched             362 net/sched/sch_taprio.c 		switch_schedules(q, &admin, &sched);
sched             365 net/sched/sch_taprio.c 	if (!sched || ktime_before(minimum_time, sched->base_time)) {
sched             376 net/sched/sch_taprio.c 		entry = find_entry_to_transmit(skb, sch, sched, admin,
sched             389 net/sched/sch_taprio.c 		if (admin && admin != sched &&
sched             391 net/sched/sch_taprio.c 			sched = admin;
sched             403 net/sched/sch_taprio.c 			entry->next_txtime = ktime_add(interval_start, sched->cycle_time);
sched             826 net/sched/sch_taprio.c 			    struct sched_gate_list *sched,
sched             856 net/sched/sch_taprio.c 		list_add_tail(&entry->list, &sched->entries);
sched             860 net/sched/sch_taprio.c 	sched->num_entries = i;
sched             970 net/sched/sch_taprio.c 				 struct sched_gate_list *sched,
sched             977 net/sched/sch_taprio.c 	base = sched_base_time(sched);
sched             985 net/sched/sch_taprio.c 	cycle = sched->cycle_time;
sched            1004 net/sched/sch_taprio.c 				   struct sched_gate_list *sched, ktime_t base)
sched            1009 net/sched/sch_taprio.c 	first = list_first_entry(&sched->entries,
sched            1012 net/sched/sch_taprio.c 	cycle = sched->cycle_time;
sched            1015 net/sched/sch_taprio.c 	sched->cycle_close_time = ktime_add_ns(base, cycle);
sched            1098 net/sched/sch_taprio.c 			 struct sched_gate_list *sched, ktime_t base)
sched            1103 net/sched/sch_taprio.c 	list_for_each_entry(entry, &sched->entries, list) {
sched            1181 net/sched/sch_taprio.c 				    struct sched_gate_list *sched,
sched            1188 net/sched/sch_taprio.c 	offload->base_time = sched->base_time;
sched            1189 net/sched/sch_taprio.c 	offload->cycle_time = sched->cycle_time;
sched            1190 net/sched/sch_taprio.c 	offload->cycle_time_extension = sched->cycle_time_extension;
sched            1192 net/sched/sch_taprio.c 	list_for_each_entry(entry, &sched->entries, list) {
sched            1207 net/sched/sch_taprio.c 				 struct sched_gate_list *sched,
sched            1220 net/sched/sch_taprio.c 	offload = taprio_offload_alloc(sched->num_entries);
sched            1227 net/sched/sch_taprio.c 	taprio_sched_to_offload(q, sched, mqprio, offload);
sched              75 net/sctp/outqueue.c 	return q->sched->dequeue(q);
sched             387 net/sctp/outqueue.c 	q->sched->unsched_all(&asoc->stream);
sched             413 net/sctp/outqueue.c 	q->sched->sched_all(&asoc->stream);
sched            1101 net/sctp/sm_sideeffect.c 	asoc->outqueue.sched->enqueue(&asoc->outqueue, msg);
sched             113 net/sctp/stream.c 	struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream);
sched             125 net/sctp/stream.c 	sched->unsched_all(stream);
sched             127 net/sctp/stream.c 	sched->sched_all(stream);
sched             148 net/sctp/stream.c 	sched->free(stream);
sched             178 net/sctp/stream.c 	struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream);
sched             181 net/sctp/stream.c 	sched->free(stream);
sched             203 net/sctp/stream.c 	struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream);
sched             205 net/sctp/stream.c 	sched->unsched_all(stream);
sched             214 net/sctp/stream.c 	sched->sched_all(stream);
sched             116 net/sctp/stream_sched.c void sctp_sched_ops_register(enum sctp_sched_type sched,
sched             119 net/sctp/stream_sched.c 	sctp_sched_ops[sched] = sched_ops;
sched             130 net/sctp/stream_sched.c 			 enum sctp_sched_type sched)
sched             132 net/sctp/stream_sched.c 	struct sctp_sched_ops *n = sctp_sched_ops[sched];
sched             133 net/sctp/stream_sched.c 	struct sctp_sched_ops *old = asoc->outqueue.sched;
sched             141 net/sctp/stream_sched.c 	if (sched > SCTP_SS_MAX)
sched             160 net/sctp/stream_sched.c 	asoc->outqueue.sched = n;
sched             183 net/sctp/stream_sched.c 	asoc->outqueue.sched = &sctp_sched_fcfs; /* Always safe */
sched             193 net/sctp/stream_sched.c 		if (asoc->outqueue.sched == sctp_sched_ops[i])
sched             213 net/sctp/stream_sched.c 	return asoc->outqueue.sched->set(&asoc->stream, sid, value, gfp);
sched             225 net/sctp/stream_sched.c 	return asoc->outqueue.sched->get(&asoc->stream, sid, value);
sched             246 net/sctp/stream_sched.c 	q->sched->dequeue_done(q, ch);
sched             259 net/sctp/stream_sched.c 	struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream);
sched             263 net/sctp/stream_sched.c 	return sched->init_sid(stream, sid, gfp);
sched             272 net/sctp/stream_sched.c 	return asoc->outqueue.sched;
sched            1194 net/unix/af_unix.c 	int sched;
sched            1199 net/unix/af_unix.c 	sched = !sock_flag(other, SOCK_DEAD) &&
sched            1205 net/unix/af_unix.c 	if (sched)
sched             191 samples/bpf/xdp_monitor_kern.c 	int sched;		//	offset:28; size:4; signed:1;
sched             207 samples/bpf/xdp_monitor_kern.c 	if (ctx->sched)
sched             701 samples/bpf/xdp_redirect_cpu_kern.c 	int sched;		//	offset:28; size:4; signed:1;
sched             717 samples/bpf/xdp_redirect_cpu_kern.c 	if (ctx->sched)
sched             142 tools/perf/builtin-sched.c 	int (*switch_event)(struct perf_sched *sched, struct evsel *evsel,
sched             145 tools/perf/builtin-sched.c 	int (*runtime_event)(struct perf_sched *sched, struct evsel *evsel,
sched             148 tools/perf/builtin-sched.c 	int (*wakeup_event)(struct perf_sched *sched, struct evsel *evsel,
sched             152 tools/perf/builtin-sched.c 	int (*fork_event)(struct perf_sched *sched, union perf_event *event,
sched             155 tools/perf/builtin-sched.c 	int (*migrate_task_event)(struct perf_sched *sched,
sched             300 tools/perf/builtin-sched.c static void burn_nsecs(struct perf_sched *sched, u64 nsecs)
sched             306 tools/perf/builtin-sched.c 	} while (T1 + sched->run_measurement_overhead < T0 + nsecs);
sched             319 tools/perf/builtin-sched.c static void calibrate_run_measurement_overhead(struct perf_sched *sched)
sched             326 tools/perf/builtin-sched.c 		burn_nsecs(sched, 0);
sched             331 tools/perf/builtin-sched.c 	sched->run_measurement_overhead = min_delta;
sched             336 tools/perf/builtin-sched.c static void calibrate_sleep_measurement_overhead(struct perf_sched *sched)
sched             349 tools/perf/builtin-sched.c 	sched->sleep_measurement_overhead = min_delta;
sched             382 tools/perf/builtin-sched.c static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task,
sched             392 tools/perf/builtin-sched.c 		sched->nr_run_events_optimized++;
sched             402 tools/perf/builtin-sched.c 	sched->nr_run_events++;
sched             405 tools/perf/builtin-sched.c static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task,
sched             416 tools/perf/builtin-sched.c 		sched->targetless_wakeups++;
sched             420 tools/perf/builtin-sched.c 		sched->multitarget_wakeups++;
sched             429 tools/perf/builtin-sched.c 	sched->nr_wakeup_events++;
sched             432 tools/perf/builtin-sched.c static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task,
sched             439 tools/perf/builtin-sched.c 	sched->nr_sleep_events++;
sched             442 tools/perf/builtin-sched.c static struct task_desc *register_pid(struct perf_sched *sched,
sched             448 tools/perf/builtin-sched.c 	if (sched->pid_to_task == NULL) {
sched             451 tools/perf/builtin-sched.c 		BUG_ON((sched->pid_to_task = calloc(pid_max, sizeof(struct task_desc *))) == NULL);
sched             454 tools/perf/builtin-sched.c 		BUG_ON((sched->pid_to_task = realloc(sched->pid_to_task, (pid + 1) *
sched             457 tools/perf/builtin-sched.c 			sched->pid_to_task[pid_max++] = NULL;
sched             460 tools/perf/builtin-sched.c 	task = sched->pid_to_task[pid];
sched             467 tools/perf/builtin-sched.c 	task->nr = sched->nr_tasks;
sched             473 tools/perf/builtin-sched.c 	add_sched_event_sleep(sched, task, 0, 0);
sched             475 tools/perf/builtin-sched.c 	sched->pid_to_task[pid] = task;
sched             476 tools/perf/builtin-sched.c 	sched->nr_tasks++;
sched             477 tools/perf/builtin-sched.c 	sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_desc *));
sched             478 tools/perf/builtin-sched.c 	BUG_ON(!sched->tasks);
sched             479 tools/perf/builtin-sched.c 	sched->tasks[task->nr] = task;
sched             482 tools/perf/builtin-sched.c 		printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm);
sched             488 tools/perf/builtin-sched.c static void print_task_traces(struct perf_sched *sched)
sched             493 tools/perf/builtin-sched.c 	for (i = 0; i < sched->nr_tasks; i++) {
sched             494 tools/perf/builtin-sched.c 		task = sched->tasks[i];
sched             500 tools/perf/builtin-sched.c static void add_cross_task_wakeups(struct perf_sched *sched)
sched             505 tools/perf/builtin-sched.c 	for (i = 0; i < sched->nr_tasks; i++) {
sched             506 tools/perf/builtin-sched.c 		task1 = sched->tasks[i];
sched             508 tools/perf/builtin-sched.c 		if (j == sched->nr_tasks)
sched             510 tools/perf/builtin-sched.c 		task2 = sched->tasks[j];
sched             511 tools/perf/builtin-sched.c 		add_sched_event_wakeup(sched, task1, 0, task2);
sched             515 tools/perf/builtin-sched.c static void perf_sched__process_event(struct perf_sched *sched,
sched             522 tools/perf/builtin-sched.c 			burn_nsecs(sched, atom->duration);
sched             556 tools/perf/builtin-sched.c static int self_open_counters(struct perf_sched *sched, unsigned long cur_task)
sched             575 tools/perf/builtin-sched.c 			if (sched->force) {
sched             577 tools/perf/builtin-sched.c 				limit.rlim_cur += sched->nr_tasks - cur_task;
sched             611 tools/perf/builtin-sched.c 	struct perf_sched *sched;
sched             619 tools/perf/builtin-sched.c 	struct perf_sched *sched = parms->sched;
sched             634 tools/perf/builtin-sched.c 	ret = pthread_mutex_lock(&sched->start_work_mutex);
sched             636 tools/perf/builtin-sched.c 	ret = pthread_mutex_unlock(&sched->start_work_mutex);
sched             643 tools/perf/builtin-sched.c 		perf_sched__process_event(sched, this_task->atoms[i]);
sched             651 tools/perf/builtin-sched.c 	ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
sched             653 tools/perf/builtin-sched.c 	ret = pthread_mutex_unlock(&sched->work_done_wait_mutex);
sched             659 tools/perf/builtin-sched.c static void create_tasks(struct perf_sched *sched)
sched             671 tools/perf/builtin-sched.c 	err = pthread_mutex_lock(&sched->start_work_mutex);
sched             673 tools/perf/builtin-sched.c 	err = pthread_mutex_lock(&sched->work_done_wait_mutex);
sched             675 tools/perf/builtin-sched.c 	for (i = 0; i < sched->nr_tasks; i++) {
sched             678 tools/perf/builtin-sched.c 		parms->task = task = sched->tasks[i];
sched             679 tools/perf/builtin-sched.c 		parms->sched = sched;
sched             680 tools/perf/builtin-sched.c 		parms->fd = self_open_counters(sched, i);
sched             690 tools/perf/builtin-sched.c static void wait_for_tasks(struct perf_sched *sched)
sched             696 tools/perf/builtin-sched.c 	sched->start_time = get_nsecs();
sched             697 tools/perf/builtin-sched.c 	sched->cpu_usage = 0;
sched             698 tools/perf/builtin-sched.c 	pthread_mutex_unlock(&sched->work_done_wait_mutex);
sched             700 tools/perf/builtin-sched.c 	for (i = 0; i < sched->nr_tasks; i++) {
sched             701 tools/perf/builtin-sched.c 		task = sched->tasks[i];
sched             706 tools/perf/builtin-sched.c 	ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
sched             711 tools/perf/builtin-sched.c 	pthread_mutex_unlock(&sched->start_work_mutex);
sched             713 tools/perf/builtin-sched.c 	for (i = 0; i < sched->nr_tasks; i++) {
sched             714 tools/perf/builtin-sched.c 		task = sched->tasks[i];
sched             718 tools/perf/builtin-sched.c 		sched->cpu_usage += task->cpu_usage;
sched             723 tools/perf/builtin-sched.c 	if (!sched->runavg_cpu_usage)
sched             724 tools/perf/builtin-sched.c 		sched->runavg_cpu_usage = sched->cpu_usage;
sched             725 tools/perf/builtin-sched.c 	sched->runavg_cpu_usage = (sched->runavg_cpu_usage * (sched->replay_repeat - 1) + sched->cpu_usage) / sched->replay_repeat;
sched             727 tools/perf/builtin-sched.c 	sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
sched             728 tools/perf/builtin-sched.c 	if (!sched->runavg_parent_cpu_usage)
sched             729 tools/perf/builtin-sched.c 		sched->runavg_parent_cpu_usage = sched->parent_cpu_usage;
sched             730 tools/perf/builtin-sched.c 	sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * (sched->replay_repeat - 1) +
sched             731 tools/perf/builtin-sched.c 					 sched->parent_cpu_usage)/sched->replay_repeat;
sched             733 tools/perf/builtin-sched.c 	ret = pthread_mutex_lock(&sched->start_work_mutex);
sched             736 tools/perf/builtin-sched.c 	for (i = 0; i < sched->nr_tasks; i++) {
sched             737 tools/perf/builtin-sched.c 		task = sched->tasks[i];
sched             743 tools/perf/builtin-sched.c static void run_one_test(struct perf_sched *sched)
sched             748 tools/perf/builtin-sched.c 	wait_for_tasks(sched);
sched             752 tools/perf/builtin-sched.c 	sched->sum_runtime += delta;
sched             753 tools/perf/builtin-sched.c 	sched->nr_runs++;
sched             755 tools/perf/builtin-sched.c 	avg_delta = sched->sum_runtime / sched->nr_runs;
sched             760 tools/perf/builtin-sched.c 	sched->sum_fluct += fluct;
sched             761 tools/perf/builtin-sched.c 	if (!sched->run_avg)
sched             762 tools/perf/builtin-sched.c 		sched->run_avg = delta;
sched             763 tools/perf/builtin-sched.c 	sched->run_avg = (sched->run_avg * (sched->replay_repeat - 1) + delta) / sched->replay_repeat;
sched             765 tools/perf/builtin-sched.c 	printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / NSEC_PER_MSEC);
sched             767 tools/perf/builtin-sched.c 	printf("ravg: %0.2f, ", (double)sched->run_avg / NSEC_PER_MSEC);
sched             770 tools/perf/builtin-sched.c 		(double)sched->cpu_usage / NSEC_PER_MSEC, (double)sched->runavg_cpu_usage / NSEC_PER_MSEC);
sched             778 tools/perf/builtin-sched.c 		(double)sched->parent_cpu_usage / NSEC_PER_MSEC,
sched             779 tools/perf/builtin-sched.c 		(double)sched->runavg_parent_cpu_usage / NSEC_PER_MSEC);
sched             784 tools/perf/builtin-sched.c 	if (sched->nr_sleep_corrections)
sched             785 tools/perf/builtin-sched.c 		printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections);
sched             786 tools/perf/builtin-sched.c 	sched->nr_sleep_corrections = 0;
sched             789 tools/perf/builtin-sched.c static void test_calibrations(struct perf_sched *sched)
sched             794 tools/perf/builtin-sched.c 	burn_nsecs(sched, NSEC_PER_MSEC);
sched             807 tools/perf/builtin-sched.c replay_wakeup_event(struct perf_sched *sched,
sched             821 tools/perf/builtin-sched.c 	waker = register_pid(sched, sample->tid, "<unknown>");
sched             822 tools/perf/builtin-sched.c 	wakee = register_pid(sched, pid, comm);
sched             824 tools/perf/builtin-sched.c 	add_sched_event_wakeup(sched, waker, sample->time, wakee);
sched             828 tools/perf/builtin-sched.c static int replay_switch_event(struct perf_sched *sched,
sched             849 tools/perf/builtin-sched.c 	timestamp0 = sched->cpu_last_switched[cpu];
sched             863 tools/perf/builtin-sched.c 	prev = register_pid(sched, prev_pid, prev_comm);
sched             864 tools/perf/builtin-sched.c 	next = register_pid(sched, next_pid, next_comm);
sched             866 tools/perf/builtin-sched.c 	sched->cpu_last_switched[cpu] = timestamp;
sched             868 tools/perf/builtin-sched.c 	add_sched_event_run(sched, prev, timestamp, delta);
sched             869 tools/perf/builtin-sched.c 	add_sched_event_sleep(sched, prev, timestamp, prev_state);
sched             874 tools/perf/builtin-sched.c static int replay_fork_event(struct perf_sched *sched,
sched             897 tools/perf/builtin-sched.c 	register_pid(sched, parent->tid, thread__comm_str(parent));
sched             898 tools/perf/builtin-sched.c 	register_pid(sched, child->tid, thread__comm_str(child));
sched            1013 tools/perf/builtin-sched.c static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
sched            1023 tools/perf/builtin-sched.c 	__thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid);
sched            1101 tools/perf/builtin-sched.c static int latency_switch_event(struct perf_sched *sched,
sched            1117 tools/perf/builtin-sched.c 	timestamp0 = sched->cpu_last_switched[cpu];
sched            1118 tools/perf/builtin-sched.c 	sched->cpu_last_switched[cpu] = timestamp;
sched            1134 tools/perf/builtin-sched.c 	out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
sched            1136 tools/perf/builtin-sched.c 		if (thread_atoms_insert(sched, sched_out))
sched            1138 tools/perf/builtin-sched.c 		out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
sched            1147 tools/perf/builtin-sched.c 	in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
sched            1149 tools/perf/builtin-sched.c 		if (thread_atoms_insert(sched, sched_in))
sched            1151 tools/perf/builtin-sched.c 		in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
sched            1171 tools/perf/builtin-sched.c static int latency_runtime_event(struct perf_sched *sched,
sched            1179 tools/perf/builtin-sched.c 	struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
sched            1188 tools/perf/builtin-sched.c 		if (thread_atoms_insert(sched, thread))
sched            1190 tools/perf/builtin-sched.c 		atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
sched            1206 tools/perf/builtin-sched.c static int latency_wakeup_event(struct perf_sched *sched,
sched            1221 tools/perf/builtin-sched.c 	atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
sched            1223 tools/perf/builtin-sched.c 		if (thread_atoms_insert(sched, wakee))
sched            1225 tools/perf/builtin-sched.c 		atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
sched            1249 tools/perf/builtin-sched.c 	if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING)
sched            1252 tools/perf/builtin-sched.c 	sched->nr_timestamps++;
sched            1254 tools/perf/builtin-sched.c 		sched->nr_unordered_timestamps++;
sched            1267 tools/perf/builtin-sched.c static int latency_migrate_task_event(struct perf_sched *sched,
sched            1282 tools/perf/builtin-sched.c 	if (sched->profile_cpu == -1)
sched            1288 tools/perf/builtin-sched.c 	atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
sched            1290 tools/perf/builtin-sched.c 		if (thread_atoms_insert(sched, migrant))
sched            1292 tools/perf/builtin-sched.c 		register_pid(sched, migrant->tid, thread__comm_str(migrant));
sched            1293 tools/perf/builtin-sched.c 		atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
sched            1307 tools/perf/builtin-sched.c 	sched->nr_timestamps++;
sched            1310 tools/perf/builtin-sched.c 		sched->nr_unordered_timestamps++;
sched            1317 tools/perf/builtin-sched.c static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list)
sched            1332 tools/perf/builtin-sched.c 	sched->all_runtime += work_list->total_runtime;
sched            1333 tools/perf/builtin-sched.c 	sched->all_count   += work_list->nb_atoms;
sched            1457 tools/perf/builtin-sched.c static void perf_sched__sort_lat(struct perf_sched *sched)
sched            1460 tools/perf/builtin-sched.c 	struct rb_root_cached *root = &sched->atom_root;
sched            1470 tools/perf/builtin-sched.c 		__thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list);
sched            1472 tools/perf/builtin-sched.c 	if (root == &sched->atom_root) {
sched            1473 tools/perf/builtin-sched.c 		root = &sched->merged_atom_root;
sched            1483 tools/perf/builtin-sched.c 	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
sched            1485 tools/perf/builtin-sched.c 	if (sched->tp_handler->wakeup_event)
sched            1486 tools/perf/builtin-sched.c 		return sched->tp_handler->wakeup_event(sched, evsel, sample, machine);
sched            1506 tools/perf/builtin-sched.c map__findnew_thread(struct perf_sched *sched, struct machine *machine, pid_t pid, pid_t tid)
sched            1513 tools/perf/builtin-sched.c 	if (!sched->map.color_pids || !thread || thread__priv(thread))
sched            1516 tools/perf/builtin-sched.c 	if (thread_map__has(sched->map.color_pids, tid))
sched            1523 tools/perf/builtin-sched.c static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
sched            1540 tools/perf/builtin-sched.c 	if (this_cpu > sched->max_cpu)
sched            1541 tools/perf/builtin-sched.c 		sched->max_cpu = this_cpu;
sched            1543 tools/perf/builtin-sched.c 	if (sched->map.comp) {
sched            1544 tools/perf/builtin-sched.c 		cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS);
sched            1545 tools/perf/builtin-sched.c 		if (!test_and_set_bit(this_cpu, sched->map.comp_cpus_mask)) {
sched            1546 tools/perf/builtin-sched.c 			sched->map.comp_cpus[cpus_nr++] = this_cpu;
sched            1550 tools/perf/builtin-sched.c 		cpus_nr = sched->max_cpu;
sched            1552 tools/perf/builtin-sched.c 	timestamp0 = sched->cpu_last_switched[this_cpu];
sched            1553 tools/perf/builtin-sched.c 	sched->cpu_last_switched[this_cpu] = timestamp;
sched            1564 tools/perf/builtin-sched.c 	sched_in = map__findnew_thread(sched, machine, -1, next_pid);
sched            1574 tools/perf/builtin-sched.c 	sched->curr_thread[this_cpu] = thread__get(sched_in);
sched            1588 tools/perf/builtin-sched.c 			tr->shortname[0] = sched->next_shortname1;
sched            1589 tools/perf/builtin-sched.c 			tr->shortname[1] = sched->next_shortname2;
sched            1591 tools/perf/builtin-sched.c 			if (sched->next_shortname1 < 'Z') {
sched            1592 tools/perf/builtin-sched.c 				sched->next_shortname1++;
sched            1594 tools/perf/builtin-sched.c 				sched->next_shortname1 = 'A';
sched            1595 tools/perf/builtin-sched.c 				if (sched->next_shortname2 < '9')
sched            1596 tools/perf/builtin-sched.c 					sched->next_shortname2++;
sched            1598 tools/perf/builtin-sched.c 					sched->next_shortname2 = '0';
sched            1605 tools/perf/builtin-sched.c 		int cpu = sched->map.comp ? sched->map.comp_cpus[i] : i;
sched            1606 tools/perf/builtin-sched.c 		struct thread *curr_thread = sched->curr_thread[cpu];
sched            1614 tools/perf/builtin-sched.c 		if (sched->map.cpus && !cpu_map__has(sched->map.cpus, cpu))
sched            1617 tools/perf/builtin-sched.c 		if (sched->map.color_cpus && cpu_map__has(sched->map.color_cpus, cpu))
sched            1625 tools/perf/builtin-sched.c 		if (sched->curr_thread[cpu]) {
sched            1626 tools/perf/builtin-sched.c 			curr_tr = thread__get_runtime(sched->curr_thread[cpu]);
sched            1636 tools/perf/builtin-sched.c 	if (sched->map.cpus && !cpu_map__has(sched->map.cpus, this_cpu))
sched            1652 tools/perf/builtin-sched.c 	if (sched->map.comp && new_cpu)
sched            1668 tools/perf/builtin-sched.c 	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
sched            1673 tools/perf/builtin-sched.c 	if (sched->curr_pid[this_cpu] != (u32)-1) {
sched            1678 tools/perf/builtin-sched.c 		if (sched->curr_pid[this_cpu] != prev_pid)
sched            1679 tools/perf/builtin-sched.c 			sched->nr_context_switch_bugs++;
sched            1682 tools/perf/builtin-sched.c 	if (sched->tp_handler->switch_event)
sched            1683 tools/perf/builtin-sched.c 		err = sched->tp_handler->switch_event(sched, evsel, sample, machine);
sched            1685 tools/perf/builtin-sched.c 	sched->curr_pid[this_cpu] = next_pid;
sched            1694 tools/perf/builtin-sched.c 	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
sched            1696 tools/perf/builtin-sched.c 	if (sched->tp_handler->runtime_event)
sched            1697 tools/perf/builtin-sched.c 		return sched->tp_handler->runtime_event(sched, evsel, sample, machine);
sched            1707 tools/perf/builtin-sched.c 	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
sched            1713 tools/perf/builtin-sched.c 	if (sched->tp_handler->fork_event)
sched            1714 tools/perf/builtin-sched.c 		return sched->tp_handler->fork_event(sched, event, machine);
sched            1724 tools/perf/builtin-sched.c 	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
sched            1726 tools/perf/builtin-sched.c 	if (sched->tp_handler->migrate_task_event)
sched            1727 tools/perf/builtin-sched.c 		return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine);
sched            1784 tools/perf/builtin-sched.c static int perf_sched__read_events(struct perf_sched *sched)
sched            1797 tools/perf/builtin-sched.c 		.force = sched->force,
sched            1801 tools/perf/builtin-sched.c 	session = perf_session__new(&data, false, &sched->tool);
sched            1819 tools/perf/builtin-sched.c 		sched->nr_events      = session->evlist->stats.nr_events[0];
sched            1820 tools/perf/builtin-sched.c 		sched->nr_lost_events = session->evlist->stats.total_lost;
sched            1821 tools/perf/builtin-sched.c 		sched->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
sched            1925 tools/perf/builtin-sched.c static void timehist_header(struct perf_sched *sched)
sched            1927 tools/perf/builtin-sched.c 	u32 ncpus = sched->max_cpu + 1;
sched            1932 tools/perf/builtin-sched.c 	if (sched->show_cpu_visual) {
sched            1945 tools/perf/builtin-sched.c 	if (sched->show_state)
sched            1955 tools/perf/builtin-sched.c 	if (sched->show_cpu_visual)
sched            1961 tools/perf/builtin-sched.c 	if (sched->show_state)
sched            1971 tools/perf/builtin-sched.c 	if (sched->show_cpu_visual)
sched            1978 tools/perf/builtin-sched.c 	if (sched->show_state)
sched            1996 tools/perf/builtin-sched.c static void timehist_print_sample(struct perf_sched *sched,
sched            2006 tools/perf/builtin-sched.c 	u32 max_cpus = sched->max_cpu + 1;
sched            2014 tools/perf/builtin-sched.c 	if (sched->show_cpu_visual) {
sched            2038 tools/perf/builtin-sched.c 	if (sched->show_state)
sched            2041 tools/perf/builtin-sched.c 	if (sched->show_next) {
sched            2046 tools/perf/builtin-sched.c 	if (sched->show_wakeups && !sched->show_next)
sched            2052 tools/perf/builtin-sched.c 	if (sched->show_callchain)
sched            2139 tools/perf/builtin-sched.c static void save_task_callchain(struct perf_sched *sched,
sched            2154 tools/perf/builtin-sched.c 	if (!sched->show_callchain || sample->callchain == NULL)
sched            2158 tools/perf/builtin-sched.c 				      NULL, NULL, sched->max_stack + 2) != 0) {
sched            2282 tools/perf/builtin-sched.c static void save_idle_callchain(struct perf_sched *sched,
sched            2286 tools/perf/builtin-sched.c 	if (!sched->show_callchain || sample->callchain == NULL)
sched            2292 tools/perf/builtin-sched.c static struct thread *timehist_get_thread(struct perf_sched *sched,
sched            2313 tools/perf/builtin-sched.c 		save_task_callchain(sched, sample, evsel, machine);
sched            2314 tools/perf/builtin-sched.c 		if (sched->idle_hist) {
sched            2332 tools/perf/builtin-sched.c 				save_idle_callchain(sched, itr, sample);
sched            2339 tools/perf/builtin-sched.c static bool timehist_skip_sample(struct perf_sched *sched,
sched            2348 tools/perf/builtin-sched.c 		sched->skipped_samples++;
sched            2351 tools/perf/builtin-sched.c 	if (sched->idle_hist) {
sched            2362 tools/perf/builtin-sched.c static void timehist_print_wakeup_event(struct perf_sched *sched,
sched            2376 tools/perf/builtin-sched.c 	if (timehist_skip_sample(sched, thread, evsel, sample) &&
sched            2377 tools/perf/builtin-sched.c 	    timehist_skip_sample(sched, awakened, evsel, sample)) {
sched            2383 tools/perf/builtin-sched.c 	if (sched->show_cpu_visual)
sched            2384 tools/perf/builtin-sched.c 		printf(" %*s ", sched->max_cpu + 1, "");
sched            2402 tools/perf/builtin-sched.c 	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
sched            2420 tools/perf/builtin-sched.c 	if (sched->show_wakeups &&
sched            2421 tools/perf/builtin-sched.c 	    !perf_time__skip_sample(&sched->ptime, sample->time))
sched            2422 tools/perf/builtin-sched.c 		timehist_print_wakeup_event(sched, evsel, sample, machine, thread);
sched            2427 tools/perf/builtin-sched.c static void timehist_print_migration_event(struct perf_sched *sched,
sched            2435 tools/perf/builtin-sched.c 	u32 max_cpus = sched->max_cpu + 1;
sched            2438 tools/perf/builtin-sched.c 	if (sched->summary_only)
sched            2441 tools/perf/builtin-sched.c 	max_cpus = sched->max_cpu + 1;
sched            2449 tools/perf/builtin-sched.c 	if (timehist_skip_sample(sched, thread, evsel, sample) &&
sched            2450 tools/perf/builtin-sched.c 	    timehist_skip_sample(sched, migrated, evsel, sample)) {
sched            2457 tools/perf/builtin-sched.c 	if (sched->show_cpu_visual) {
sched            2486 tools/perf/builtin-sched.c 	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
sched            2503 tools/perf/builtin-sched.c 	timehist_print_migration_event(sched, evsel, sample, machine, thread);
sched            2514 tools/perf/builtin-sched.c 	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
sched            2515 tools/perf/builtin-sched.c 	struct perf_time_interval *ptime = &sched->ptime;
sched            2531 tools/perf/builtin-sched.c 	thread = timehist_get_thread(sched, sample, machine, evsel);
sched            2537 tools/perf/builtin-sched.c 	if (timehist_skip_sample(sched, thread, evsel, sample))
sched            2573 tools/perf/builtin-sched.c 	if (!sched->idle_hist || thread->tid == 0) {
sched            2576 tools/perf/builtin-sched.c 		if (sched->idle_hist) {
sched            2609 tools/perf/builtin-sched.c 	if (!sched->summary_only)
sched            2610 tools/perf/builtin-sched.c 		timehist_print_sample(sched, evsel, sample, &al, thread, t, state);
sched            2613 tools/perf/builtin-sched.c 	if (sched->hist_time.start == 0 && t >= ptime->start)
sched            2614 tools/perf/builtin-sched.c 		sched->hist_time.start = t;
sched            2616 tools/perf/builtin-sched.c 		sched->hist_time.end = t;
sched            2700 tools/perf/builtin-sched.c 	struct perf_sched *sched;
sched            2720 tools/perf/builtin-sched.c 		if (stats->sched->show_state)
sched            2799 tools/perf/builtin-sched.c static void timehist_print_summary(struct perf_sched *sched,
sched            2808 tools/perf/builtin-sched.c 	u64 hist_time = sched->hist_time.end - sched->hist_time.start;
sched            2811 tools/perf/builtin-sched.c 	totals.sched = sched;
sched            2813 tools/perf/builtin-sched.c 	if (sched->idle_hist) {
sched            2817 tools/perf/builtin-sched.c 	} else if (sched->show_state) {
sched            2828 tools/perf/builtin-sched.c 	       sched->show_state ? "(msec)" : "%");
sched            2842 tools/perf/builtin-sched.c 	if (sched->skipped_samples && !sched->idle_hist)
sched            2861 tools/perf/builtin-sched.c 	if (sched->idle_hist && sched->show_callchain) {
sched            2901 tools/perf/builtin-sched.c 	printf(" (x %d)\n", sched->max_cpu);
sched            2916 tools/perf/builtin-sched.c 	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
sched            2920 tools/perf/builtin-sched.c 	if (this_cpu > sched->max_cpu)
sched            2921 tools/perf/builtin-sched.c 		sched->max_cpu = this_cpu;
sched            2932 tools/perf/builtin-sched.c static int timehist_check_attr(struct perf_sched *sched,
sched            2945 tools/perf/builtin-sched.c 		if (sched->show_callchain && !evsel__has_callchain(evsel)) {
sched            2947 tools/perf/builtin-sched.c 			sched->show_callchain = 0;
sched            2955 tools/perf/builtin-sched.c static int perf_sched__timehist(struct perf_sched *sched)
sched            2968 tools/perf/builtin-sched.c 		.force = sched->force,
sched            2978 tools/perf/builtin-sched.c 	sched->tool.sample	 = perf_timehist__process_sample;
sched            2979 tools/perf/builtin-sched.c 	sched->tool.mmap	 = perf_event__process_mmap;
sched            2980 tools/perf/builtin-sched.c 	sched->tool.comm	 = perf_event__process_comm;
sched            2981 tools/perf/builtin-sched.c 	sched->tool.exit	 = perf_event__process_exit;
sched            2982 tools/perf/builtin-sched.c 	sched->tool.fork	 = perf_event__process_fork;
sched            2983 tools/perf/builtin-sched.c 	sched->tool.lost	 = process_lost;
sched            2984 tools/perf/builtin-sched.c 	sched->tool.attr	 = perf_event__process_attr;
sched            2985 tools/perf/builtin-sched.c 	sched->tool.tracing_data = perf_event__process_tracing_data;
sched            2986 tools/perf/builtin-sched.c 	sched->tool.build_id	 = perf_event__process_build_id;
sched            2988 tools/perf/builtin-sched.c 	sched->tool.ordered_events = true;
sched            2989 tools/perf/builtin-sched.c 	sched->tool.ordering_requires_timestamps = true;
sched            2991 tools/perf/builtin-sched.c 	symbol_conf.use_callchain = sched->show_callchain;
sched            2993 tools/perf/builtin-sched.c 	session = perf_session__new(&data, false, &sched->tool);
sched            3001 tools/perf/builtin-sched.c 	if (perf_time__parse_str(&sched->ptime, sched->time_str) != 0) {
sched            3006 tools/perf/builtin-sched.c 	if (timehist_check_attr(sched, evlist) != 0)
sched            3022 tools/perf/builtin-sched.c 	if (sched->show_migrations &&
sched            3027 tools/perf/builtin-sched.c 	sched->max_cpu = session->header.env.nr_cpus_online;
sched            3028 tools/perf/builtin-sched.c 	if (sched->max_cpu == 0)
sched            3029 tools/perf/builtin-sched.c 		sched->max_cpu = 4;
sched            3030 tools/perf/builtin-sched.c 	if (init_idle_threads(sched->max_cpu))
sched            3034 tools/perf/builtin-sched.c 	if (sched->summary_only)
sched            3035 tools/perf/builtin-sched.c 		sched->summary = sched->summary_only;
sched            3037 tools/perf/builtin-sched.c 	if (!sched->summary_only)
sched            3038 tools/perf/builtin-sched.c 		timehist_header(sched);
sched            3046 tools/perf/builtin-sched.c 	sched->nr_events      = evlist->stats.nr_events[0];
sched            3047 tools/perf/builtin-sched.c 	sched->nr_lost_events = evlist->stats.total_lost;
sched            3048 tools/perf/builtin-sched.c 	sched->nr_lost_chunks = evlist->stats.nr_events[PERF_RECORD_LOST];
sched            3050 tools/perf/builtin-sched.c 	if (sched->summary)
sched            3051 tools/perf/builtin-sched.c 		timehist_print_summary(sched, session);
sched            3061 tools/perf/builtin-sched.c static void print_bad_events(struct perf_sched *sched)
sched            3063 tools/perf/builtin-sched.c 	if (sched->nr_unordered_timestamps && sched->nr_timestamps) {
sched            3065 tools/perf/builtin-sched.c 			(double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0,
sched            3066 tools/perf/builtin-sched.c 			sched->nr_unordered_timestamps, sched->nr_timestamps);
sched            3068 tools/perf/builtin-sched.c 	if (sched->nr_lost_events && sched->nr_events) {
sched            3070 tools/perf/builtin-sched.c 			(double)sched->nr_lost_events/(double)sched->nr_events * 100.0,
sched            3071 tools/perf/builtin-sched.c 			sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks);
sched            3073 tools/perf/builtin-sched.c 	if (sched->nr_context_switch_bugs && sched->nr_timestamps) {
sched            3075 tools/perf/builtin-sched.c 			(double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0,
sched            3076 tools/perf/builtin-sched.c 			sched->nr_context_switch_bugs, sched->nr_timestamps);
sched            3077 tools/perf/builtin-sched.c 		if (sched->nr_lost_events)
sched            3123 tools/perf/builtin-sched.c static void perf_sched__merge_lat(struct perf_sched *sched)
sched            3128 tools/perf/builtin-sched.c 	if (sched->skip_merge)
sched            3131 tools/perf/builtin-sched.c 	while ((node = rb_first_cached(&sched->atom_root))) {
sched            3132 tools/perf/builtin-sched.c 		rb_erase_cached(node, &sched->atom_root);
sched            3134 tools/perf/builtin-sched.c 		__merge_work_atoms(&sched->merged_atom_root, data);
sched            3138 tools/perf/builtin-sched.c static int perf_sched__lat(struct perf_sched *sched)
sched            3144 tools/perf/builtin-sched.c 	if (perf_sched__read_events(sched))
sched            3147 tools/perf/builtin-sched.c 	perf_sched__merge_lat(sched);
sched            3148 tools/perf/builtin-sched.c 	perf_sched__sort_lat(sched);
sched            3154 tools/perf/builtin-sched.c 	next = rb_first_cached(&sched->sorted_atom_root);
sched            3160 tools/perf/builtin-sched.c 		output_lat_thread(sched, work_list);
sched            3167 tools/perf/builtin-sched.c 		(double)sched->all_runtime / NSEC_PER_MSEC, sched->all_count);
sched            3171 tools/perf/builtin-sched.c 	print_bad_events(sched);
sched            3177 tools/perf/builtin-sched.c static int setup_map_cpus(struct perf_sched *sched)
sched            3181 tools/perf/builtin-sched.c 	sched->max_cpu  = sysconf(_SC_NPROCESSORS_CONF);
sched            3183 tools/perf/builtin-sched.c 	if (sched->map.comp) {
sched            3184 tools/perf/builtin-sched.c 		sched->map.comp_cpus = zalloc(sched->max_cpu * sizeof(int));
sched            3185 tools/perf/builtin-sched.c 		if (!sched->map.comp_cpus)
sched            3189 tools/perf/builtin-sched.c 	if (!sched->map.cpus_str)
sched            3192 tools/perf/builtin-sched.c 	map = perf_cpu_map__new(sched->map.cpus_str);
sched            3194 tools/perf/builtin-sched.c 		pr_err("failed to get cpus map from %s\n", sched->map.cpus_str);
sched            3198 tools/perf/builtin-sched.c 	sched->map.cpus = map;
sched            3202 tools/perf/builtin-sched.c static int setup_color_pids(struct perf_sched *sched)
sched            3206 tools/perf/builtin-sched.c 	if (!sched->map.color_pids_str)
sched            3209 tools/perf/builtin-sched.c 	map = thread_map__new_by_tid_str(sched->map.color_pids_str);
sched            3211 tools/perf/builtin-sched.c 		pr_err("failed to get thread map from %s\n", sched->map.color_pids_str);
sched            3215 tools/perf/builtin-sched.c 	sched->map.color_pids = map;
sched            3219 tools/perf/builtin-sched.c static int setup_color_cpus(struct perf_sched *sched)
sched            3223 tools/perf/builtin-sched.c 	if (!sched->map.color_cpus_str)
sched            3226 tools/perf/builtin-sched.c 	map = perf_cpu_map__new(sched->map.color_cpus_str);
sched            3228 tools/perf/builtin-sched.c 		pr_err("failed to get thread map from %s\n", sched->map.color_cpus_str);
sched            3232 tools/perf/builtin-sched.c 	sched->map.color_cpus = map;
sched            3236 tools/perf/builtin-sched.c static int perf_sched__map(struct perf_sched *sched)
sched            3238 tools/perf/builtin-sched.c 	if (setup_map_cpus(sched))
sched            3241 tools/perf/builtin-sched.c 	if (setup_color_pids(sched))
sched            3244 tools/perf/builtin-sched.c 	if (setup_color_cpus(sched))
sched            3248 tools/perf/builtin-sched.c 	if (perf_sched__read_events(sched))
sched            3250 tools/perf/builtin-sched.c 	print_bad_events(sched);
sched            3254 tools/perf/builtin-sched.c static int perf_sched__replay(struct perf_sched *sched)
sched            3258 tools/perf/builtin-sched.c 	calibrate_run_measurement_overhead(sched);
sched            3259 tools/perf/builtin-sched.c 	calibrate_sleep_measurement_overhead(sched);
sched            3261 tools/perf/builtin-sched.c 	test_calibrations(sched);
sched            3263 tools/perf/builtin-sched.c 	if (perf_sched__read_events(sched))
sched            3266 tools/perf/builtin-sched.c 	printf("nr_run_events:        %ld\n", sched->nr_run_events);
sched            3267 tools/perf/builtin-sched.c 	printf("nr_sleep_events:      %ld\n", sched->nr_sleep_events);
sched            3268 tools/perf/builtin-sched.c 	printf("nr_wakeup_events:     %ld\n", sched->nr_wakeup_events);
sched            3270 tools/perf/builtin-sched.c 	if (sched->targetless_wakeups)
sched            3271 tools/perf/builtin-sched.c 		printf("target-less wakeups:  %ld\n", sched->targetless_wakeups);
sched            3272 tools/perf/builtin-sched.c 	if (sched->multitarget_wakeups)
sched            3273 tools/perf/builtin-sched.c 		printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups);
sched            3274 tools/perf/builtin-sched.c 	if (sched->nr_run_events_optimized)
sched            3276 tools/perf/builtin-sched.c 			sched->nr_run_events_optimized);
sched            3278 tools/perf/builtin-sched.c 	print_task_traces(sched);
sched            3279 tools/perf/builtin-sched.c 	add_cross_task_wakeups(sched);
sched            3281 tools/perf/builtin-sched.c 	create_tasks(sched);
sched            3283 tools/perf/builtin-sched.c 	for (i = 0; i < sched->replay_repeat; i++)
sched            3284 tools/perf/builtin-sched.c 		run_one_test(sched);
sched            3289 tools/perf/builtin-sched.c static void setup_sorting(struct perf_sched *sched, const struct option *options,
sched            3292 tools/perf/builtin-sched.c 	char *tmp, *tok, *str = strdup(sched->sort_order);
sched            3296 tools/perf/builtin-sched.c 		if (sort_dimension__add(tok, &sched->sort_list) < 0) {
sched            3304 tools/perf/builtin-sched.c 	sort_dimension__add("pid", &sched->cmp_pid);
sched            3348 tools/perf/builtin-sched.c 	struct perf_sched sched = {
sched            3357 tools/perf/builtin-sched.c 		.cmp_pid	      = LIST_HEAD_INIT(sched.cmp_pid),
sched            3358 tools/perf/builtin-sched.c 		.sort_list	      = LIST_HEAD_INIT(sched.sort_list),
sched            3377 tools/perf/builtin-sched.c 	OPT_BOOLEAN('f', "force", &sched.force, "don't complain, do it"),
sched            3381 tools/perf/builtin-sched.c 	OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]",
sched            3383 tools/perf/builtin-sched.c 	OPT_INTEGER('C', "CPU", &sched.profile_cpu,
sched            3385 tools/perf/builtin-sched.c 	OPT_BOOLEAN('p', "pids", &sched.skip_merge,
sched            3390 tools/perf/builtin-sched.c 	OPT_UINTEGER('r', "repeat", &sched.replay_repeat,
sched            3395 tools/perf/builtin-sched.c 	OPT_BOOLEAN(0, "compact", &sched.map.comp,
sched            3397 tools/perf/builtin-sched.c 	OPT_STRING(0, "color-pids", &sched.map.color_pids_str, "pids",
sched            3399 tools/perf/builtin-sched.c 	OPT_STRING(0, "color-cpus", &sched.map.color_cpus_str, "cpus",
sched            3401 tools/perf/builtin-sched.c 	OPT_STRING(0, "cpus", &sched.map.cpus_str, "cpus",
sched            3410 tools/perf/builtin-sched.c 	OPT_BOOLEAN('g', "call-graph", &sched.show_callchain,
sched            3412 tools/perf/builtin-sched.c 	OPT_UINTEGER(0, "max-stack", &sched.max_stack,
sched            3416 tools/perf/builtin-sched.c 	OPT_BOOLEAN('s', "summary", &sched.summary_only,
sched            3418 tools/perf/builtin-sched.c 	OPT_BOOLEAN('S', "with-summary", &sched.summary,
sched            3420 tools/perf/builtin-sched.c 	OPT_BOOLEAN('w', "wakeups", &sched.show_wakeups, "Show wakeup events"),
sched            3421 tools/perf/builtin-sched.c 	OPT_BOOLEAN('n', "next", &sched.show_next, "Show next task"),
sched            3422 tools/perf/builtin-sched.c 	OPT_BOOLEAN('M', "migrations", &sched.show_migrations, "Show migration events"),
sched            3423 tools/perf/builtin-sched.c 	OPT_BOOLEAN('V', "cpu-visual", &sched.show_cpu_visual, "Add CPU visual"),
sched            3424 tools/perf/builtin-sched.c 	OPT_BOOLEAN('I', "idle-hist", &sched.idle_hist, "Show idle events only"),
sched            3425 tools/perf/builtin-sched.c 	OPT_STRING(0, "time", &sched.time_str, "str",
sched            3427 tools/perf/builtin-sched.c 	OPT_BOOLEAN(0, "state", &sched.show_state, "Show task state when sched-out"),
sched            3474 tools/perf/builtin-sched.c 	for (i = 0; i < ARRAY_SIZE(sched.curr_pid); i++)
sched            3475 tools/perf/builtin-sched.c 		sched.curr_pid[i] = -1;
sched            3491 tools/perf/builtin-sched.c 		sched.tp_handler = &lat_ops;
sched            3497 tools/perf/builtin-sched.c 		setup_sorting(&sched, latency_options, latency_usage);
sched            3498 tools/perf/builtin-sched.c 		return perf_sched__lat(&sched);
sched            3505 tools/perf/builtin-sched.c 		sched.tp_handler = &map_ops;
sched            3506 tools/perf/builtin-sched.c 		setup_sorting(&sched, latency_options, latency_usage);
sched            3507 tools/perf/builtin-sched.c 		return perf_sched__map(&sched);
sched            3509 tools/perf/builtin-sched.c 		sched.tp_handler = &replay_ops;
sched            3515 tools/perf/builtin-sched.c 		return perf_sched__replay(&sched);
sched            3523 tools/perf/builtin-sched.c 		if ((sched.show_wakeups || sched.show_next) &&
sched            3524 tools/perf/builtin-sched.c 		    sched.summary_only) {
sched            3527 tools/perf/builtin-sched.c 			if (sched.show_wakeups)
sched            3529 tools/perf/builtin-sched.c 			if (sched.show_next)
sched            3534 tools/perf/builtin-sched.c 		return perf_sched__timehist(&sched);
sched             146 tools/perf/builtin-trace.c 	bool			sched;
sched            3301 tools/perf/builtin-trace.c 	if (trace->sched &&
sched            3742 tools/perf/builtin-trace.c 	if (trace->sched)
sched            4136 tools/perf/builtin-trace.c 	OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
sched             531 tools/testing/selftests/x86/fsgsbase.c 	for (int sched = 0; sched < 2; sched++) {
sched             532 tools/testing/selftests/x86/fsgsbase.c 		mov_0_gs(0, !!sched);
sched             533 tools/testing/selftests/x86/fsgsbase.c 		mov_0_gs(1, !!sched);
sched             534 tools/testing/selftests/x86/fsgsbase.c 		mov_0_gs(0x200000000, !!sched);