scheduler        1797 drivers/gpu/drm/i915/gem/i915_gem_context.c 			else if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
scheduler         125 drivers/gpu/drm/i915/gt/intel_engine_user.c 	i915->caps.scheduler = enabled & ~disabled;
scheduler         126 drivers/gpu/drm/i915/gt/intel_engine_user.c 	if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED))
scheduler         127 drivers/gpu/drm/i915/gt/intel_engine_user.c 		i915->caps.scheduler = 0;
scheduler        1025 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 			if (!(gt->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
scheduler         515 drivers/gpu/drm/i915/gt/selftest_lrc.c 	if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
scheduler          99 drivers/gpu/drm/i915/gvt/debugfs.c 	spin_lock_bh(&gvt->scheduler.mmio_context_lock);
scheduler         106 drivers/gpu/drm/i915/gvt/debugfs.c 	spin_unlock_bh(&gvt->scheduler.mmio_context_lock);
scheduler         319 drivers/gpu/drm/i915/gvt/gvt.c 	spin_lock_init(&gvt->scheduler.mmio_context_lock);
scheduler         319 drivers/gpu/drm/i915/gvt/gvt.h 	struct intel_gvt_workload_scheduler scheduler;
scheduler        1653 drivers/gpu/drm/i915/gvt/handlers.c 	if (ring_id < 0 || vgpu  == gvt->scheduler.engine_owner[ring_id] ||
scheduler         134 drivers/gpu/drm/i915/gvt/sched_policy.c 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
scheduler         143 drivers/gpu/drm/i915/gvt/sched_policy.c 	if (scheduler->next_vgpu == scheduler->current_vgpu) {
scheduler         144 drivers/gpu/drm/i915/gvt/sched_policy.c 		scheduler->next_vgpu = NULL;
scheduler         152 drivers/gpu/drm/i915/gvt/sched_policy.c 	scheduler->need_reschedule = true;
scheduler         156 drivers/gpu/drm/i915/gvt/sched_policy.c 		if (scheduler->current_workload[i])
scheduler         161 drivers/gpu/drm/i915/gvt/sched_policy.c 	vgpu_update_timeslice(scheduler->current_vgpu, cur_time);
scheduler         162 drivers/gpu/drm/i915/gvt/sched_policy.c 	vgpu_data = scheduler->next_vgpu->sched_data;
scheduler         166 drivers/gpu/drm/i915/gvt/sched_policy.c 	scheduler->current_vgpu = scheduler->next_vgpu;
scheduler         167 drivers/gpu/drm/i915/gvt/sched_policy.c 	scheduler->next_vgpu = NULL;
scheduler         169 drivers/gpu/drm/i915/gvt/sched_policy.c 	scheduler->need_reschedule = false;
scheduler         173 drivers/gpu/drm/i915/gvt/sched_policy.c 		wake_up(&scheduler->waitq[i]);
scheduler         214 drivers/gpu/drm/i915/gvt/sched_policy.c 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
scheduler         219 drivers/gpu/drm/i915/gvt/sched_policy.c 	if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu)
scheduler         224 drivers/gpu/drm/i915/gvt/sched_policy.c 		scheduler->next_vgpu = vgpu;
scheduler         233 drivers/gpu/drm/i915/gvt/sched_policy.c 		scheduler->next_vgpu = gvt->idle_vgpu;
scheduler         236 drivers/gpu/drm/i915/gvt/sched_policy.c 	if (scheduler->next_vgpu)
scheduler         242 drivers/gpu/drm/i915/gvt/sched_policy.c 	struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
scheduler         258 drivers/gpu/drm/i915/gvt/sched_policy.c 	vgpu_update_timeslice(gvt->scheduler.current_vgpu, cur_time);
scheduler         279 drivers/gpu/drm/i915/gvt/sched_policy.c 	struct intel_gvt_workload_scheduler *scheduler =
scheduler         280 drivers/gpu/drm/i915/gvt/sched_policy.c 		&gvt->scheduler;
scheduler         294 drivers/gpu/drm/i915/gvt/sched_policy.c 	scheduler->sched_data = data;
scheduler         301 drivers/gpu/drm/i915/gvt/sched_policy.c 	struct intel_gvt_workload_scheduler *scheduler =
scheduler         302 drivers/gpu/drm/i915/gvt/sched_policy.c 		&gvt->scheduler;
scheduler         303 drivers/gpu/drm/i915/gvt/sched_policy.c 	struct gvt_sched_data *data = scheduler->sched_data;
scheduler         308 drivers/gpu/drm/i915/gvt/sched_policy.c 	scheduler->sched_data = NULL;
scheduler         331 drivers/gpu/drm/i915/gvt/sched_policy.c 	struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
scheduler         343 drivers/gpu/drm/i915/gvt/sched_policy.c 	struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
scheduler         385 drivers/gpu/drm/i915/gvt/sched_policy.c 	gvt->scheduler.sched_ops = &tbs_schedule_ops;
scheduler         386 drivers/gpu/drm/i915/gvt/sched_policy.c 	ret = gvt->scheduler.sched_ops->init(gvt);
scheduler         395 drivers/gpu/drm/i915/gvt/sched_policy.c 	gvt->scheduler.sched_ops->clean(gvt);
scheduler         411 drivers/gpu/drm/i915/gvt/sched_policy.c 	ret = vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
scheduler         420 drivers/gpu/drm/i915/gvt/sched_policy.c 	vgpu->gvt->scheduler.sched_ops->clean_vgpu(vgpu);
scheduler         431 drivers/gpu/drm/i915/gvt/sched_policy.c 		vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
scheduler         445 drivers/gpu/drm/i915/gvt/sched_policy.c 	struct intel_gvt_workload_scheduler *scheduler =
scheduler         446 drivers/gpu/drm/i915/gvt/sched_policy.c 		&vgpu->gvt->scheduler;
scheduler         457 drivers/gpu/drm/i915/gvt/sched_policy.c 	scheduler->sched_ops->stop_schedule(vgpu);
scheduler         459 drivers/gpu/drm/i915/gvt/sched_policy.c 	if (scheduler->next_vgpu == vgpu)
scheduler         460 drivers/gpu/drm/i915/gvt/sched_policy.c 		scheduler->next_vgpu = NULL;
scheduler         462 drivers/gpu/drm/i915/gvt/sched_policy.c 	if (scheduler->current_vgpu == vgpu) {
scheduler         464 drivers/gpu/drm/i915/gvt/sched_policy.c 		scheduler->need_reschedule = true;
scheduler         465 drivers/gpu/drm/i915/gvt/sched_policy.c 		scheduler->current_vgpu = NULL;
scheduler         469 drivers/gpu/drm/i915/gvt/sched_policy.c 	spin_lock_bh(&scheduler->mmio_context_lock);
scheduler         471 drivers/gpu/drm/i915/gvt/sched_policy.c 		if (scheduler->engine_owner[ring_id] == vgpu) {
scheduler         473 drivers/gpu/drm/i915/gvt/sched_policy.c 			scheduler->engine_owner[ring_id] = NULL;
scheduler         476 drivers/gpu/drm/i915/gvt/sched_policy.c 	spin_unlock_bh(&scheduler->mmio_context_lock);
scheduler         232 drivers/gpu/drm/i915/gvt/scheduler.c 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
scheduler         238 drivers/gpu/drm/i915/gvt/scheduler.c 		spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
scheduler         240 drivers/gpu/drm/i915/gvt/scheduler.c 		    scheduler->engine_owner[ring_id]) {
scheduler         242 drivers/gpu/drm/i915/gvt/scheduler.c 			intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
scheduler         244 drivers/gpu/drm/i915/gvt/scheduler.c 			scheduler->engine_owner[ring_id] = NULL;
scheduler         246 drivers/gpu/drm/i915/gvt/scheduler.c 		spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
scheduler         251 drivers/gpu/drm/i915/gvt/scheduler.c 	workload = scheduler->current_workload[ring_id];
scheduler         257 drivers/gpu/drm/i915/gvt/scheduler.c 		spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
scheduler         258 drivers/gpu/drm/i915/gvt/scheduler.c 		if (workload->vgpu != scheduler->engine_owner[ring_id]) {
scheduler         260 drivers/gpu/drm/i915/gvt/scheduler.c 			intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
scheduler         262 drivers/gpu/drm/i915/gvt/scheduler.c 			scheduler->engine_owner[ring_id] = workload->vgpu;
scheduler         266 drivers/gpu/drm/i915/gvt/scheduler.c 		spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
scheduler         744 drivers/gpu/drm/i915/gvt/scheduler.c 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
scheduler         753 drivers/gpu/drm/i915/gvt/scheduler.c 	if (!scheduler->current_vgpu) {
scheduler         758 drivers/gpu/drm/i915/gvt/scheduler.c 	if (scheduler->need_reschedule) {
scheduler         763 drivers/gpu/drm/i915/gvt/scheduler.c 	if (!scheduler->current_vgpu->active ||
scheduler         764 drivers/gpu/drm/i915/gvt/scheduler.c 	    list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
scheduler         771 drivers/gpu/drm/i915/gvt/scheduler.c 	if (scheduler->current_workload[ring_id]) {
scheduler         772 drivers/gpu/drm/i915/gvt/scheduler.c 		workload = scheduler->current_workload[ring_id];
scheduler         784 drivers/gpu/drm/i915/gvt/scheduler.c 	scheduler->current_workload[ring_id] = container_of(
scheduler         785 drivers/gpu/drm/i915/gvt/scheduler.c 			workload_q_head(scheduler->current_vgpu, ring_id)->next,
scheduler         788 drivers/gpu/drm/i915/gvt/scheduler.c 	workload = scheduler->current_workload[ring_id];
scheduler         906 drivers/gpu/drm/i915/gvt/scheduler.c 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
scheduler         908 drivers/gpu/drm/i915/gvt/scheduler.c 		scheduler->current_workload[ring_id];
scheduler         952 drivers/gpu/drm/i915/gvt/scheduler.c 	scheduler->current_workload[ring_id] = NULL;
scheduler         976 drivers/gpu/drm/i915/gvt/scheduler.c 	wake_up(&scheduler->workload_complete_wq);
scheduler         978 drivers/gpu/drm/i915/gvt/scheduler.c 	if (gvt->scheduler.need_reschedule)
scheduler         995 drivers/gpu/drm/i915/gvt/scheduler.c 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
scheduler        1008 drivers/gpu/drm/i915/gvt/scheduler.c 		add_wait_queue(&scheduler->waitq[ring_id], &wait);
scheduler        1016 drivers/gpu/drm/i915/gvt/scheduler.c 		remove_wait_queue(&scheduler->waitq[ring_id], &wait);
scheduler        1074 drivers/gpu/drm/i915/gvt/scheduler.c 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
scheduler        1079 drivers/gpu/drm/i915/gvt/scheduler.c 		wait_event(scheduler->workload_complete_wq,
scheduler        1086 drivers/gpu/drm/i915/gvt/scheduler.c 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
scheduler        1096 drivers/gpu/drm/i915/gvt/scheduler.c 		kthread_stop(scheduler->thread[i]);
scheduler        1102 drivers/gpu/drm/i915/gvt/scheduler.c 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
scheduler        1110 drivers/gpu/drm/i915/gvt/scheduler.c 	init_waitqueue_head(&scheduler->workload_complete_wq);
scheduler        1113 drivers/gpu/drm/i915/gvt/scheduler.c 		init_waitqueue_head(&scheduler->waitq[i]);
scheduler        1124 drivers/gpu/drm/i915/gvt/scheduler.c 		scheduler->thread[i] = kthread_run(workload_thread, param,
scheduler        1126 drivers/gpu/drm/i915/gvt/scheduler.c 		if (IS_ERR(scheduler->thread[i])) {
scheduler        1128 drivers/gpu/drm/i915/gvt/scheduler.c 			ret = PTR_ERR(scheduler->thread[i]);
scheduler        1629 drivers/gpu/drm/i915/gvt/scheduler.c 	wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->ring_id]);
scheduler         536 drivers/gpu/drm/i915/gvt/vgpu.c 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
scheduler         550 drivers/gpu/drm/i915/gvt/vgpu.c 	if (scheduler->current_vgpu == NULL) {
scheduler          62 drivers/gpu/drm/i915/i915_getparam.c 		value = !!(i915->caps.scheduler & I915_SCHEDULER_CAP_SEMAPHORES);
scheduler         108 drivers/gpu/drm/i915/i915_getparam.c 		value = i915->caps.scheduler;
scheduler         110 drivers/gpu/drm/i915/i915_pmu.c 	else if (i915->caps.scheduler & I915_SCHEDULER_CAP_ENGINE_BUSY_STATS)
scheduler         985 drivers/gpu/drm/i915/intel_device_info.c 	drm_printf(p, "scheduler: %x\n", caps->scheduler);
scheduler         219 drivers/gpu/drm/i915/intel_device_info.h 	unsigned int scheduler;
scheduler         103 drivers/gpu/drm/scheduler/gpu_scheduler_trace.h #define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/scheduler
scheduler         168 drivers/usb/host/u132-hcd.c 	struct delayed_work scheduler;
scheduler         176 drivers/usb/host/u132-hcd.c 	struct delayed_work scheduler;
scheduler         309 drivers/usb/host/u132-hcd.c 		if (queue_delayed_work(workqueue, &ring->scheduler, delta))
scheduler         311 drivers/usb/host/u132-hcd.c 	} else if (queue_delayed_work(workqueue, &ring->scheduler, 0))
scheduler         325 drivers/usb/host/u132-hcd.c 	if (cancel_delayed_work(&ring->scheduler))
scheduler         388 drivers/usb/host/u132-hcd.c 	if (queue_delayed_work(workqueue, &endp->scheduler, delta))
scheduler         394 drivers/usb/host/u132-hcd.c 	if (cancel_delayed_work(&endp->scheduler))
scheduler        1300 drivers/usb/host/u132-hcd.c 		container_of(work, struct u132_ring, scheduler.work);
scheduler        1359 drivers/usb/host/u132-hcd.c 		container_of(work, struct u132_endp, scheduler.work);
scheduler        1875 drivers/usb/host/u132-hcd.c 	INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
scheduler        1974 drivers/usb/host/u132-hcd.c 	INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
scheduler        2070 drivers/usb/host/u132-hcd.c 	INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
scheduler        3022 drivers/usb/host/u132-hcd.c 		INIT_DELAYED_WORK(&ring->scheduler,
scheduler         634 include/net/ip_vs.h 	struct ip_vs_scheduler __rcu *scheduler; /* bound scheduler object */
scheduler        1381 include/net/ip_vs.h int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler);
scheduler        1382 include/net/ip_vs.h int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler);
scheduler        1384 include/net/ip_vs.h 			 struct ip_vs_scheduler *scheduler);
scheduler        1388 include/net/ip_vs.h void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler);
scheduler         361 net/netfilter/ipvs/ip_vs_core.c 		sched = rcu_dereference(svc->scheduler);
scheduler         536 net/netfilter/ipvs/ip_vs_core.c 	sched = rcu_dereference(svc->scheduler);
scheduler         935 net/netfilter/ipvs/ip_vs_ctl.c 		sched = rcu_dereference_protected(svc->scheduler, 1);
scheduler         939 net/netfilter/ipvs/ip_vs_ctl.c 		sched = rcu_dereference_protected(svc->scheduler, 1);
scheduler        1190 net/netfilter/ipvs/ip_vs_ctl.c 		sched = rcu_dereference_protected(svc->scheduler, 1);
scheduler        1448 net/netfilter/ipvs/ip_vs_ctl.c 	old_sched = rcu_dereference_protected(svc->scheduler, 1);
scheduler        1452 net/netfilter/ipvs/ip_vs_ctl.c 			RCU_INIT_POINTER(svc->scheduler, NULL);
scheduler        1510 net/netfilter/ipvs/ip_vs_ctl.c 	old_sched = rcu_dereference_protected(svc->scheduler, 1);
scheduler        2132 net/netfilter/ipvs/ip_vs_ctl.c 		struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler);
scheduler        2562 net/netfilter/ipvs/ip_vs_ctl.c 	sched = rcu_dereference_protected(src->scheduler, 1);
scheduler        3056 net/netfilter/ipvs/ip_vs_ctl.c 	sched = rcu_dereference_protected(svc->scheduler, 1);
scheduler          41 net/netfilter/ipvs/ip_vs_sched.c 			 struct ip_vs_scheduler *scheduler)
scheduler          45 net/netfilter/ipvs/ip_vs_sched.c 	if (scheduler->init_service) {
scheduler          46 net/netfilter/ipvs/ip_vs_sched.c 		ret = scheduler->init_service(svc);
scheduler          52 net/netfilter/ipvs/ip_vs_sched.c 	rcu_assign_pointer(svc->scheduler, scheduler);
scheduler          65 net/netfilter/ipvs/ip_vs_sched.c 	cur_sched = rcu_dereference_protected(svc->scheduler, 1);
scheduler         133 net/netfilter/ipvs/ip_vs_sched.c void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler)
scheduler         135 net/netfilter/ipvs/ip_vs_sched.c 	if (scheduler)
scheduler         136 net/netfilter/ipvs/ip_vs_sched.c 		module_put(scheduler->module);
scheduler         145 net/netfilter/ipvs/ip_vs_sched.c 	struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler);
scheduler         167 net/netfilter/ipvs/ip_vs_sched.c int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
scheduler         171 net/netfilter/ipvs/ip_vs_sched.c 	if (!scheduler) {
scheduler         176 net/netfilter/ipvs/ip_vs_sched.c 	if (!scheduler->name) {
scheduler         187 net/netfilter/ipvs/ip_vs_sched.c 	if (!list_empty(&scheduler->n_list)) {
scheduler         191 net/netfilter/ipvs/ip_vs_sched.c 		       __func__, scheduler->name);
scheduler         200 net/netfilter/ipvs/ip_vs_sched.c 		if (strcmp(scheduler->name, sched->name) == 0) {
scheduler         204 net/netfilter/ipvs/ip_vs_sched.c 			       "in the system\n", __func__, scheduler->name);
scheduler         211 net/netfilter/ipvs/ip_vs_sched.c 	list_add(&scheduler->n_list, &ip_vs_schedulers);
scheduler         214 net/netfilter/ipvs/ip_vs_sched.c 	pr_info("[%s] scheduler registered.\n", scheduler->name);
scheduler         223 net/netfilter/ipvs/ip_vs_sched.c int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
scheduler         225 net/netfilter/ipvs/ip_vs_sched.c 	if (!scheduler) {
scheduler         231 net/netfilter/ipvs/ip_vs_sched.c 	if (list_empty(&scheduler->n_list)) {
scheduler         234 net/netfilter/ipvs/ip_vs_sched.c 		       __func__, scheduler->name);
scheduler         241 net/netfilter/ipvs/ip_vs_sched.c 	list_del(&scheduler->n_list);
scheduler         247 net/netfilter/ipvs/ip_vs_sched.c 	pr_info("[%s] scheduler unregistered.\n", scheduler->name);
scheduler         217 sound/pci/mixart/mixart_core.h 	u64                 scheduler;
scheduler         230 sound/pci/mixart/mixart_core.h 	u64           scheduler;
scheduler         239 sound/pci/mixart/mixart_core.h 	u64           scheduler;
scheduler         380 sound/pci/mixart/mixart_core.h 	u64 scheduler;
scheduler         431 sound/pci/mixart/mixart_core.h 	u64 scheduler;
scheduler         491 sound/pci/mixart/mixart_core.h 	u64 scheduler;
scheduler         536 sound/pci/mixart/mixart_core.h 	u64 scheduler;