kqd               209 block/kyber-iosched.c static void flush_latency_buckets(struct kyber_queue_data *kqd,
kqd               213 block/kyber-iosched.c 	unsigned int *buckets = kqd->latency_buckets[sched_domain][type];
kqd               225 block/kyber-iosched.c static int calculate_percentile(struct kyber_queue_data *kqd,
kqd               229 block/kyber-iosched.c 	unsigned int *buckets = kqd->latency_buckets[sched_domain][type];
kqd               242 block/kyber-iosched.c 	if (!kqd->latency_timeout[sched_domain])
kqd               243 block/kyber-iosched.c 		kqd->latency_timeout[sched_domain] = max(jiffies + HZ, 1UL);
kqd               245 block/kyber-iosched.c 	    time_is_after_jiffies(kqd->latency_timeout[sched_domain])) {
kqd               248 block/kyber-iosched.c 	kqd->latency_timeout[sched_domain] = 0;
kqd               256 block/kyber-iosched.c 	memset(buckets, 0, sizeof(kqd->latency_buckets[sched_domain][type]));
kqd               258 block/kyber-iosched.c 	trace_kyber_latency(kqd->q, kyber_domain_names[sched_domain],
kqd               265 block/kyber-iosched.c static void kyber_resize_domain(struct kyber_queue_data *kqd,
kqd               269 block/kyber-iosched.c 	if (depth != kqd->domain_tokens[sched_domain].sb.depth) {
kqd               270 block/kyber-iosched.c 		sbitmap_queue_resize(&kqd->domain_tokens[sched_domain], depth);
kqd               271 block/kyber-iosched.c 		trace_kyber_adjust(kqd->q, kyber_domain_names[sched_domain],
kqd               278 block/kyber-iosched.c 	struct kyber_queue_data *kqd = from_timer(kqd, t, timer);
kqd               287 block/kyber-iosched.c 		cpu_latency = per_cpu_ptr(kqd->cpu_latency, cpu);
kqd               289 block/kyber-iosched.c 			flush_latency_buckets(kqd, cpu_latency, sched_domain,
kqd               291 block/kyber-iosched.c 			flush_latency_buckets(kqd, cpu_latency, sched_domain,
kqd               304 block/kyber-iosched.c 		p90 = calculate_percentile(kqd, sched_domain, KYBER_IO_LATENCY,
kqd               319 block/kyber-iosched.c 		p99 = calculate_percentile(kqd, sched_domain,
kqd               331 block/kyber-iosched.c 				p99 = kqd->domain_p99[sched_domain];
kqd               332 block/kyber-iosched.c 			kqd->domain_p99[sched_domain] = -1;
kqd               334 block/kyber-iosched.c 			kqd->domain_p99[sched_domain] = p99;
kqd               349 block/kyber-iosched.c 			orig_depth = kqd->domain_tokens[sched_domain].sb.depth;
kqd               351 block/kyber-iosched.c 			kyber_resize_domain(kqd, sched_domain, depth);
kqd               367 block/kyber-iosched.c 	struct kyber_queue_data *kqd;
kqd               372 block/kyber-iosched.c 	kqd = kzalloc_node(sizeof(*kqd), GFP_KERNEL, q->node);
kqd               373 block/kyber-iosched.c 	if (!kqd)
kqd               376 block/kyber-iosched.c 	kqd->q = q;
kqd               378 block/kyber-iosched.c 	kqd->cpu_latency = alloc_percpu_gfp(struct kyber_cpu_latency,
kqd               380 block/kyber-iosched.c 	if (!kqd->cpu_latency)
kqd               383 block/kyber-iosched.c 	timer_setup(&kqd->timer, kyber_timer_fn, 0);
kqd               388 block/kyber-iosched.c 		ret = sbitmap_queue_init_node(&kqd->domain_tokens[i],
kqd               393 block/kyber-iosched.c 				sbitmap_queue_free(&kqd->domain_tokens[i]);
kqd               399 block/kyber-iosched.c 		kqd->domain_p99[i] = -1;
kqd               400 block/kyber-iosched.c 		kqd->latency_targets[i] = kyber_latency_targets[i];
kqd               404 block/kyber-iosched.c 	kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U;
kqd               406 block/kyber-iosched.c 	return kqd;
kqd               409 block/kyber-iosched.c 	free_percpu(kqd->cpu_latency);
kqd               411 block/kyber-iosched.c 	kfree(kqd);
kqd               418 block/kyber-iosched.c 	struct kyber_queue_data *kqd;
kqd               425 block/kyber-iosched.c 	kqd = kyber_queue_data_alloc(q);
kqd               426 block/kyber-iosched.c 	if (IS_ERR(kqd)) {
kqd               428 block/kyber-iosched.c 		return PTR_ERR(kqd);
kqd               433 block/kyber-iosched.c 	eq->elevator_data = kqd;
kqd               441 block/kyber-iosched.c 	struct kyber_queue_data *kqd = e->elevator_data;
kqd               444 block/kyber-iosched.c 	del_timer_sync(&kqd->timer);
kqd               447 block/kyber-iosched.c 		sbitmap_queue_free(&kqd->domain_tokens[i]);
kqd               448 block/kyber-iosched.c 	free_percpu(kqd->cpu_latency);
kqd               449 block/kyber-iosched.c 	kfree(kqd);
kqd               463 block/kyber-iosched.c 	struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
kqd               506 block/kyber-iosched.c 					kqd->async_depth);
kqd               538 block/kyber-iosched.c static void rq_clear_domain_token(struct kyber_queue_data *kqd,
kqd               547 block/kyber-iosched.c 		sbitmap_queue_clear(&kqd->domain_tokens[sched_domain], nr,
kqd               559 block/kyber-iosched.c 		struct kyber_queue_data *kqd = data->q->elevator->elevator_data;
kqd               561 block/kyber-iosched.c 		data->shallow_depth = kqd->async_depth;
kqd               612 block/kyber-iosched.c 	struct kyber_queue_data *kqd = rq->q->elevator->elevator_data;
kqd               614 block/kyber-iosched.c 	rq_clear_domain_token(kqd, rq);
kqd               637 block/kyber-iosched.c 	struct kyber_queue_data *kqd = rq->q->elevator->elevator_data;
kqd               646 block/kyber-iosched.c 	cpu_latency = get_cpu_ptr(kqd->cpu_latency);
kqd               647 block/kyber-iosched.c 	target = kqd->latency_targets[sched_domain];
kqd               652 block/kyber-iosched.c 	put_cpu_ptr(kqd->cpu_latency);
kqd               654 block/kyber-iosched.c 	timer_reduce(&kqd->timer, jiffies + HZ / 10);
kqd               702 block/kyber-iosched.c static int kyber_get_domain_token(struct kyber_queue_data *kqd,
kqd               707 block/kyber-iosched.c 	struct sbitmap_queue *domain_tokens = &kqd->domain_tokens[sched_domain];
kqd               750 block/kyber-iosched.c kyber_dispatch_cur_domain(struct kyber_queue_data *kqd,
kqd               770 block/kyber-iosched.c 		nr = kyber_get_domain_token(kqd, khd, hctx);
kqd               777 block/kyber-iosched.c 			trace_kyber_throttled(kqd->q,
kqd               781 block/kyber-iosched.c 		nr = kyber_get_domain_token(kqd, khd, hctx);
kqd               790 block/kyber-iosched.c 			trace_kyber_throttled(kqd->q,
kqd               801 block/kyber-iosched.c 	struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
kqd               813 block/kyber-iosched.c 		rq = kyber_dispatch_cur_domain(kqd, khd, hctx);
kqd               834 block/kyber-iosched.c 		rq = kyber_dispatch_cur_domain(kqd, khd, hctx);
kqd               863 block/kyber-iosched.c 	struct kyber_queue_data *kqd = e->elevator_data;		\
kqd               865 block/kyber-iosched.c 	return sprintf(page, "%llu\n", kqd->latency_targets[domain]);	\
kqd               871 block/kyber-iosched.c 	struct kyber_queue_data *kqd = e->elevator_data;		\
kqd               879 block/kyber-iosched.c 	kqd->latency_targets[domain] = nsec;				\
kqd               900 block/kyber-iosched.c 	struct kyber_queue_data *kqd = q->elevator->elevator_data;	\
kqd               902 block/kyber-iosched.c 	sbitmap_queue_show(&kqd->domain_tokens[domain], m);		\
kqd               959 block/kyber-iosched.c 	struct kyber_queue_data *kqd = q->elevator->elevator_data;
kqd               961 block/kyber-iosched.c 	seq_printf(m, "%u\n", kqd->async_depth);