sched_data        429 block/bfq-cgroup.c 	entity->sched_data = &bfqg->sched_data;
sched_data        532 block/bfq-cgroup.c 	entity->my_sched_data = &bfqg->sched_data;
sched_data        564 block/bfq-cgroup.c 	entity->sched_data = &parent->sched_data;
sched_data        651 block/bfq-cgroup.c 	entity->sched_data = &bfqg->sched_data;
sched_data        698 block/bfq-cgroup.c 		if (entity->sched_data != &bfqg->sched_data) {
sched_data        706 block/bfq-cgroup.c 		if (entity->sched_data != &bfqg->sched_data)
sched_data        844 block/bfq-cgroup.c 	if (bfqg->sched_data.in_service_entity)
sched_data        846 block/bfq-cgroup.c 					 bfqg->sched_data.in_service_entity,
sched_data        877 block/bfq-cgroup.c 		st = bfqg->sched_data.service_tree + i;
sched_data       1389 block/bfq-cgroup.c 	entity->sched_data = &bfqg->sched_data;
sched_data       1423 block/bfq-cgroup.c 		bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
sched_data       6419 block/bfq-iosched.c 		root_group->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
sched_data       6420 block/bfq-iosched.c 	root_group->sched_data.bfq_class_idle_last_service = jiffies;
sched_data        191 block/bfq-iosched.h 	struct bfq_sched_data *sched_data;
sched_data        901 block/bfq-iosched.h 	struct bfq_sched_data sched_data;
sched_data        920 block/bfq-iosched.h 	struct bfq_sched_data sched_data;
sched_data        169 block/bfq-wf2q.c 	group_sd = next_in_service->sched_data;
sched_data        171 block/bfq-wf2q.c 	bfqg = container_of(group_sd, struct bfq_group, sched_data);
sched_data        496 block/bfq-wf2q.c 	sd = entity->sched_data;
sched_data        497 block/bfq-wf2q.c 	bfqg = container_of(sd, struct bfq_group, sched_data);
sched_data        597 block/bfq-wf2q.c 	sd = entity->sched_data;
sched_data        598 block/bfq-wf2q.c 	bfqg = container_of(sd, struct bfq_group, sched_data);
sched_data        674 block/bfq-wf2q.c 			  entity == entity->sched_data->in_service_entity);
sched_data        704 block/bfq-wf2q.c 	struct bfq_sched_data *sched_data = entity->sched_data;
sched_data        707 block/bfq-wf2q.c 	return sched_data->service_tree + idx;
sched_data        750 block/bfq-wf2q.c 			bfqg = container_of(sd, struct bfq_group, sched_data);
sched_data       1049 block/bfq-wf2q.c 	struct bfq_sched_data *sd = entity->sched_data;
sched_data       1154 block/bfq-wf2q.c 		sd = entity->sched_data;
sched_data       1175 block/bfq-wf2q.c 	struct bfq_sched_data *sd = entity->sched_data;
sched_data       1231 block/bfq-wf2q.c 		sd = entity->sched_data;
sched_data       1308 block/bfq-wf2q.c 		sd = entity->sched_data;
sched_data       1509 block/bfq-wf2q.c 	struct bfq_sched_data *sd = &bfqd->root_group->sched_data;
sched_data       1531 block/bfq-wf2q.c 	sd = &bfqd->root_group->sched_data;
sched_data       1600 block/bfq-wf2q.c 		struct bfq_sched_data *sd = entity->sched_data;
sched_data       1627 block/bfq-wf2q.c 		entity->sched_data->in_service_entity = NULL;
sched_data         27 block/blk-mq-sched.c 		if (exit && hctx->sched_data)
sched_data         29 block/blk-mq-sched.c 		kfree(hctx->sched_data);
sched_data         30 block/blk-mq-sched.c 		hctx->sched_data = NULL;
sched_data        601 block/blk-mq-sched.c 		if (e->type->ops.exit_hctx && hctx->sched_data) {
sched_data        603 block/blk-mq-sched.c 			hctx->sched_data = NULL;
sched_data        504 block/kyber-iosched.c 	hctx->sched_data = khd;
sched_data        519 block/kyber-iosched.c 	struct kyber_hctx_data *khd = hctx->sched_data;
sched_data        525 block/kyber-iosched.c 	kfree(hctx->sched_data);
sched_data        568 block/kyber-iosched.c 	struct kyber_hctx_data *khd = hctx->sched_data;
sched_data        590 block/kyber-iosched.c 	struct kyber_hctx_data *khd = hctx->sched_data;
sched_data        802 block/kyber-iosched.c 	struct kyber_hctx_data *khd = hctx->sched_data;
sched_data        847 block/kyber-iosched.c 	struct kyber_hctx_data *khd = hctx->sched_data;
sched_data        910 block/kyber-iosched.c 	struct kyber_hctx_data *khd = hctx->sched_data;			\
sched_data        920 block/kyber-iosched.c 	struct kyber_hctx_data *khd = hctx->sched_data;			\
sched_data        929 block/kyber-iosched.c 	struct kyber_hctx_data *khd = hctx->sched_data;			\
sched_data        944 block/kyber-iosched.c 	struct kyber_hctx_data *khd = hctx->sched_data;			\
sched_data        968 block/kyber-iosched.c 	struct kyber_hctx_data *khd = hctx->sched_data;
sched_data        977 block/kyber-iosched.c 	struct kyber_hctx_data *khd = hctx->sched_data;
sched_data        182 drivers/gpu/drm/i915/gvt/gvt.h 	void *sched_data;
sched_data         83 drivers/gpu/drm/i915/gvt/sched_policy.c 	vgpu_data = vgpu->sched_data;
sched_data         93 drivers/gpu/drm/i915/gvt/sched_policy.c static void gvt_balance_timeslice(struct gvt_sched_data *sched_data)
sched_data        107 drivers/gpu/drm/i915/gvt/sched_policy.c 		list_for_each(pos, &sched_data->lru_runq_head) {
sched_data        112 drivers/gpu/drm/i915/gvt/sched_policy.c 		list_for_each(pos, &sched_data->lru_runq_head) {
sched_data        121 drivers/gpu/drm/i915/gvt/sched_policy.c 		list_for_each(pos, &sched_data->lru_runq_head) {
sched_data        162 drivers/gpu/drm/i915/gvt/sched_policy.c 	vgpu_data = scheduler->next_vgpu->sched_data;
sched_data        176 drivers/gpu/drm/i915/gvt/sched_policy.c static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data)
sched_data        180 drivers/gpu/drm/i915/gvt/sched_policy.c 	struct list_head *head = &sched_data->lru_runq_head;
sched_data        211 drivers/gpu/drm/i915/gvt/sched_policy.c static void tbs_sched_func(struct gvt_sched_data *sched_data)
sched_data        213 drivers/gpu/drm/i915/gvt/sched_policy.c 	struct intel_gvt *gvt = sched_data->gvt;
sched_data        219 drivers/gpu/drm/i915/gvt/sched_policy.c 	if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu)
sched_data        222 drivers/gpu/drm/i915/gvt/sched_policy.c 	vgpu = find_busy_vgpu(sched_data);
sched_data        225 drivers/gpu/drm/i915/gvt/sched_policy.c 		vgpu_data = vgpu->sched_data;
sched_data        230 drivers/gpu/drm/i915/gvt/sched_policy.c 				      &sched_data->lru_runq_head);
sched_data        242 drivers/gpu/drm/i915/gvt/sched_policy.c 	struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
sched_data        250 drivers/gpu/drm/i915/gvt/sched_policy.c 		if (cur_time >= sched_data->expire_time) {
sched_data        251 drivers/gpu/drm/i915/gvt/sched_policy.c 			gvt_balance_timeslice(sched_data);
sched_data        252 drivers/gpu/drm/i915/gvt/sched_policy.c 			sched_data->expire_time = ktime_add_ms(
sched_data        259 drivers/gpu/drm/i915/gvt/sched_policy.c 	tbs_sched_func(sched_data);
sched_data        294 drivers/gpu/drm/i915/gvt/sched_policy.c 	scheduler->sched_data = data;
sched_data        303 drivers/gpu/drm/i915/gvt/sched_policy.c 	struct gvt_sched_data *data = scheduler->sched_data;
sched_data        308 drivers/gpu/drm/i915/gvt/sched_policy.c 	scheduler->sched_data = NULL;
sched_data        323 drivers/gpu/drm/i915/gvt/sched_policy.c 	vgpu->sched_data = data;
sched_data        331 drivers/gpu/drm/i915/gvt/sched_policy.c 	struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
sched_data        333 drivers/gpu/drm/i915/gvt/sched_policy.c 	kfree(vgpu->sched_data);
sched_data        334 drivers/gpu/drm/i915/gvt/sched_policy.c 	vgpu->sched_data = NULL;
sched_data        338 drivers/gpu/drm/i915/gvt/sched_policy.c 		hrtimer_cancel(&sched_data->timer);
sched_data        343 drivers/gpu/drm/i915/gvt/sched_policy.c 	struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
sched_data        344 drivers/gpu/drm/i915/gvt/sched_policy.c 	struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
sched_data        355 drivers/gpu/drm/i915/gvt/sched_policy.c 	list_add(&vgpu_data->lru_list, &sched_data->lru_runq_head);
sched_data        357 drivers/gpu/drm/i915/gvt/sched_policy.c 	if (!hrtimer_active(&sched_data->timer))
sched_data        358 drivers/gpu/drm/i915/gvt/sched_policy.c 		hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(),
sched_data        359 drivers/gpu/drm/i915/gvt/sched_policy.c 			sched_data->period), HRTIMER_MODE_ABS);
sched_data        365 drivers/gpu/drm/i915/gvt/sched_policy.c 	struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
sched_data        426 drivers/gpu/drm/i915/gvt/sched_policy.c 	struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
sched_data        448 drivers/gpu/drm/i915/gvt/sched_policy.c 	struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
sched_data         53 drivers/gpu/drm/i915/gvt/scheduler.h 	void *sched_data;
sched_data         29 include/linux/blk-mq.h 	void			*sched_data;
sched_data        636 include/net/ip_vs.h 	void			*sched_data;   /* scheduler application data */
sched_data        161 net/netfilter/ipvs/ip_vs_dh.c 	svc->sched_data = s;
sched_data        175 net/netfilter/ipvs/ip_vs_dh.c 	struct ip_vs_dh_state *s = svc->sched_data;
sched_data        190 net/netfilter/ipvs/ip_vs_dh.c 	struct ip_vs_dh_state *s = svc->sched_data;
sched_data        221 net/netfilter/ipvs/ip_vs_dh.c 	s = (struct ip_vs_dh_state *) svc->sched_data;
sched_data        231 net/netfilter/ipvs/ip_vs_lblc.c 	struct ip_vs_lblc_table *tbl = svc->sched_data;
sched_data        258 net/netfilter/ipvs/ip_vs_lblc.c 	struct ip_vs_lblc_table *tbl = svc->sched_data;
sched_data        355 net/netfilter/ipvs/ip_vs_lblc.c 	svc->sched_data = tbl;
sched_data        384 net/netfilter/ipvs/ip_vs_lblc.c 	struct ip_vs_lblc_table *tbl = svc->sched_data;
sched_data        484 net/netfilter/ipvs/ip_vs_lblc.c 	struct ip_vs_lblc_table *tbl = svc->sched_data;
sched_data        397 net/netfilter/ipvs/ip_vs_lblcr.c 	struct ip_vs_lblcr_table *tbl = svc->sched_data;
sched_data        423 net/netfilter/ipvs/ip_vs_lblcr.c 	struct ip_vs_lblcr_table *tbl = svc->sched_data;
sched_data        518 net/netfilter/ipvs/ip_vs_lblcr.c 	svc->sched_data = tbl;
sched_data        547 net/netfilter/ipvs/ip_vs_lblcr.c 	struct ip_vs_lblcr_table *tbl = svc->sched_data;
sched_data        648 net/netfilter/ipvs/ip_vs_lblcr.c 	struct ip_vs_lblcr_table *tbl = svc->sched_data;
sched_data        416 net/netfilter/ipvs/ip_vs_mh.c 	svc->sched_data = s;
sched_data        422 net/netfilter/ipvs/ip_vs_mh.c 	struct ip_vs_mh_state *s = svc->sched_data;
sched_data        435 net/netfilter/ipvs/ip_vs_mh.c 	struct ip_vs_mh_state *s = svc->sched_data;
sched_data        490 net/netfilter/ipvs/ip_vs_mh.c 	s = (struct ip_vs_mh_state *)svc->sched_data;
sched_data         28 net/netfilter/ipvs/ip_vs_rr.c 	svc->sched_data = &svc->destinations;
sched_data         38 net/netfilter/ipvs/ip_vs_rr.c 	p = (struct list_head *) svc->sched_data;
sched_data         43 net/netfilter/ipvs/ip_vs_rr.c 		svc->sched_data = p->next->prev;
sched_data         63 net/netfilter/ipvs/ip_vs_rr.c 	p = (struct list_head *) svc->sched_data;
sched_data         89 net/netfilter/ipvs/ip_vs_rr.c 	svc->sched_data = &dest->n_list;
sched_data        237 net/netfilter/ipvs/ip_vs_sh.c 	svc->sched_data = s;
sched_data        251 net/netfilter/ipvs/ip_vs_sh.c 	struct ip_vs_sh_state *s = svc->sched_data;
sched_data        266 net/netfilter/ipvs/ip_vs_sh.c 	struct ip_vs_sh_state *s = svc->sched_data;
sched_data        324 net/netfilter/ipvs/ip_vs_sh.c 	s = (struct ip_vs_sh_state *) svc->sched_data;
sched_data        121 net/netfilter/ipvs/ip_vs_wrr.c 	svc->sched_data = mark;
sched_data        129 net/netfilter/ipvs/ip_vs_wrr.c 	struct ip_vs_wrr_mark *mark = svc->sched_data;
sched_data        141 net/netfilter/ipvs/ip_vs_wrr.c 	struct ip_vs_wrr_mark *mark = svc->sched_data;
sched_data        164 net/netfilter/ipvs/ip_vs_wrr.c 	struct ip_vs_wrr_mark *mark = svc->sched_data;