util             1209 drivers/net/wireless/ath/ath6kl/debug.c 				 info->util, info->bias);
util             1912 drivers/net/wireless/ath/ath6kl/wmi.h 	s8 util;
util              648 drivers/s390/scsi/zfcp_sysfs.c 	u64 util;
util              651 drivers/s390/scsi/zfcp_sysfs.c 	util = qdio->req_q_util;
util              655 drivers/s390/scsi/zfcp_sysfs.c 		       (unsigned long long)util);
util               27 include/linux/sched/cpufreq.h static inline unsigned long map_util_freq(unsigned long util,
util               30 include/linux/sched/cpufreq.h 	return (freq + (freq >> 2)) * util / cap;
util              150 include/uapi/linux/caif/caif_socket.h 		} util;				/* CAIFPROTO_UTIL */
util             7240 kernel/sched/core.c 	u64 util;
util             7249 kernel/sched/core.c 		.util = SCHED_CAPACITY_SCALE,
util             7264 kernel/sched/core.c 		req.util = req.percent << SCHED_CAPACITY_SHIFT;
util             7265 kernel/sched/core.c 		req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE);
util             7286 kernel/sched/core.c 	if (tg->uclamp_req[clamp_id].value != req.util)
util             7287 kernel/sched/core.c 		uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false);
util              170 kernel/sched/cpufreq_schedutil.c 				  unsigned long util, unsigned long max)
util              176 kernel/sched/cpufreq_schedutil.c 	freq = map_util_freq(util, freq, max);
util              210 kernel/sched/cpufreq_schedutil.c 	unsigned long dl_util, util, irq;
util              239 kernel/sched/cpufreq_schedutil.c 	util = util_cfs + cpu_util_rt(rq);
util              241 kernel/sched/cpufreq_schedutil.c 		util = uclamp_util_with(rq, util, p);
util              254 kernel/sched/cpufreq_schedutil.c 	if (util + dl_util >= max)
util              262 kernel/sched/cpufreq_schedutil.c 		util += dl_util;
util              273 kernel/sched/cpufreq_schedutil.c 	util = scale_irq_capacity(util, irq, max);
util              274 kernel/sched/cpufreq_schedutil.c 	util += irq;
util              287 kernel/sched/cpufreq_schedutil.c 		util += cpu_bw_dl(rq);
util              289 kernel/sched/cpufreq_schedutil.c 	return min(max, util);
util              295 kernel/sched/cpufreq_schedutil.c 	unsigned long util = cpu_util_cfs(rq);
util              301 kernel/sched/cpufreq_schedutil.c 	return schedutil_cpu_util(sg_cpu->cpu, util, max, FREQUENCY_UTIL, NULL);
util              394 kernel/sched/cpufreq_schedutil.c 					unsigned long util, unsigned long max)
util              400 kernel/sched/cpufreq_schedutil.c 		return util;
util              404 kernel/sched/cpufreq_schedutil.c 		return util;
util              413 kernel/sched/cpufreq_schedutil.c 			return util;
util              424 kernel/sched/cpufreq_schedutil.c 	return max(boost, util);
util              455 kernel/sched/cpufreq_schedutil.c 	unsigned long util, max;
util              470 kernel/sched/cpufreq_schedutil.c 	util = sugov_get_util(sg_cpu);
util              472 kernel/sched/cpufreq_schedutil.c 	util = sugov_iowait_apply(sg_cpu, time, util, max);
util              473 kernel/sched/cpufreq_schedutil.c 	next_f = get_next_freq(sg_policy, util, max);
util              503 kernel/sched/cpufreq_schedutil.c 	unsigned long util = 0, max = 1;
util              514 kernel/sched/cpufreq_schedutil.c 		if (j_util * max > j_max * util) {
util              515 kernel/sched/cpufreq_schedutil.c 			util = j_util;
util              520 kernel/sched/cpufreq_schedutil.c 	return get_next_freq(sg_policy, util, max);
util             6104 kernel/sched/fair.c 	unsigned int util;
util             6107 kernel/sched/fair.c 	util = READ_ONCE(cfs_rq->avg.util_avg);
util             6110 kernel/sched/fair.c 		util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued));
util             6112 kernel/sched/fair.c 	return min_t(unsigned long, util, capacity_orig_of(cpu));
util             6131 kernel/sched/fair.c 	unsigned int util;
util             6138 kernel/sched/fair.c 	util = READ_ONCE(cfs_rq->avg.util_avg);
util             6141 kernel/sched/fair.c 	lsub_positive(&util, task_util(p));
util             6193 kernel/sched/fair.c 		util = max(util, estimated);
util             6201 kernel/sched/fair.c 	return min_t(unsigned long, util, capacity_orig_of(cpu));
util             6238 kernel/sched/fair.c 	unsigned long util_est, util = READ_ONCE(cfs_rq->avg.util_avg);
util             6247 kernel/sched/fair.c 		sub_positive(&util, task_util(p));
util             6249 kernel/sched/fair.c 		util += task_util(p);
util             6263 kernel/sched/fair.c 		util = max(util, util_est);
util             6266 kernel/sched/fair.c 	return min(util, capacity_orig_of(cpu));
util             6364 kernel/sched/fair.c 	unsigned long cpu_cap, util, base_energy = 0;
util             6402 kernel/sched/fair.c 			util = cpu_util_next(cpu, p, cpu);
util             6404 kernel/sched/fair.c 			if (!fits_capacity(util, cpu_cap))
util             6418 kernel/sched/fair.c 			spare_cap = cpu_cap - util;
util             2321 kernel/sched/sched.h unsigned int uclamp_util_with(struct rq *rq, unsigned int util,
util             2340 kernel/sched/sched.h 	return clamp(util, min_util, max_util);
util             2343 kernel/sched/sched.h static inline unsigned int uclamp_util(struct rq *rq, unsigned int util)
util             2345 kernel/sched/sched.h 	return uclamp_util_with(rq, util, NULL);
util             2348 kernel/sched/sched.h static inline unsigned int uclamp_util_with(struct rq *rq, unsigned int util,
util             2351 kernel/sched/sched.h 	return util;
util             2353 kernel/sched/sched.h static inline unsigned int uclamp_util(struct rq *rq, unsigned int util)
util             2355 kernel/sched/sched.h 	return util;
util             2407 kernel/sched/sched.h 	unsigned long util = READ_ONCE(rq->cfs.avg.util_avg);
util             2410 kernel/sched/sched.h 		util = max_t(unsigned long, util,
util             2414 kernel/sched/sched.h 	return util;
util             2437 kernel/sched/sched.h unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max)
util             2439 kernel/sched/sched.h 	util *= (max - irq);
util             2440 kernel/sched/sched.h 	util /= max;
util             2442 kernel/sched/sched.h 	return util;
util             2452 kernel/sched/sched.h unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max)
util             2454 kernel/sched/sched.h 	return util;
util              278 net/caif/cfcnfg.c 		strlcpy(l->u.utility.name, s->sockaddr.u.util.service,
util               29 net/caif/cfutill.c 	struct cfsrvl *util = kzalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
util               30 net/caif/cfutill.c 	if (!util)
util               33 net/caif/cfutill.c 	cfsrvl_init(util, channel_id, dev_info, true);
util               34 net/caif/cfutill.c 	util->layer.receive = cfutill_receive;
util               35 net/caif/cfutill.c 	util->layer.transmit = cfutill_transmit;
util               36 net/caif/cfutill.c 	snprintf(util->layer.name, CAIF_LAYER_NAME_SZ, "util1");
util               37 net/caif/cfutill.c 	return &util->layer;