sg_cpu            507 drivers/mmc/host/dw_mmc.c 		for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
sg_cpu            535 drivers/mmc/host/dw_mmc.c 		for (i = 0, p = host->sg_cpu;
sg_cpu            583 drivers/mmc/host/dw_mmc.c 	desc_first = desc_last = desc = host->sg_cpu;
sg_cpu            640 drivers/mmc/host/dw_mmc.c 	memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
sg_cpu            655 drivers/mmc/host/dw_mmc.c 	desc_first = desc_last = desc = host->sg_cpu;
sg_cpu            714 drivers/mmc/host/dw_mmc.c 	memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
sg_cpu           1097 drivers/mmc/host/dw_mmc.c 			 (unsigned long)host->sg_cpu,
sg_cpu           2927 drivers/mmc/host/dw_mmc.c 		host->sg_cpu = dmam_alloc_coherent(host->dev,
sg_cpu           2930 drivers/mmc/host/dw_mmc.c 		if (!host->sg_cpu) {
sg_cpu            179 drivers/mmc/host/dw_mmc.h 	void			*sg_cpu;
sg_cpu             25 drivers/mmc/host/mmci_stm32_sdmmc.c 	void *sg_cpu;
sg_cpu            104 drivers/mmc/host/mmci_stm32_sdmmc.c 		idma->sg_cpu = dmam_alloc_coherent(mmc_dev(host->mmc),
sg_cpu            107 drivers/mmc/host/mmci_stm32_sdmmc.c 		if (!idma->sg_cpu) {
sg_cpu            127 drivers/mmc/host/mmci_stm32_sdmmc.c 	struct sdmmc_lli_desc *desc = (struct sdmmc_lli_desc *)idma->sg_cpu;
sg_cpu            290 drivers/mmc/host/sunxi-mmc.c 	void		*sg_cpu;
sg_cpu            360 drivers/mmc/host/sunxi-mmc.c 	struct sunxi_idma_des *pdes = (struct sunxi_idma_des *)host->sg_cpu;
sg_cpu           1352 drivers/mmc/host/sunxi-mmc.c 	host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
sg_cpu           1354 drivers/mmc/host/sunxi-mmc.c 	if (!host->sg_cpu) {
sg_cpu           1447 drivers/mmc/host/sunxi-mmc.c 	dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
sg_cpu           1462 drivers/mmc/host/sunxi-mmc.c 	dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
sg_cpu            292 kernel/sched/cpufreq_schedutil.c static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
sg_cpu            294 kernel/sched/cpufreq_schedutil.c 	struct rq *rq = cpu_rq(sg_cpu->cpu);
sg_cpu            296 kernel/sched/cpufreq_schedutil.c 	unsigned long max = arch_scale_cpu_capacity(sg_cpu->cpu);
sg_cpu            298 kernel/sched/cpufreq_schedutil.c 	sg_cpu->max = max;
sg_cpu            299 kernel/sched/cpufreq_schedutil.c 	sg_cpu->bw_dl = cpu_bw_dl(rq);
sg_cpu            301 kernel/sched/cpufreq_schedutil.c 	return schedutil_cpu_util(sg_cpu->cpu, util, max, FREQUENCY_UTIL, NULL);
sg_cpu            315 kernel/sched/cpufreq_schedutil.c static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
sg_cpu            318 kernel/sched/cpufreq_schedutil.c 	s64 delta_ns = time - sg_cpu->last_update;
sg_cpu            324 kernel/sched/cpufreq_schedutil.c 	sg_cpu->iowait_boost = set_iowait_boost ? IOWAIT_BOOST_MIN : 0;
sg_cpu            325 kernel/sched/cpufreq_schedutil.c 	sg_cpu->iowait_boost_pending = set_iowait_boost;
sg_cpu            344 kernel/sched/cpufreq_schedutil.c static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
sg_cpu            350 kernel/sched/cpufreq_schedutil.c 	if (sg_cpu->iowait_boost &&
sg_cpu            351 kernel/sched/cpufreq_schedutil.c 	    sugov_iowait_reset(sg_cpu, time, set_iowait_boost))
sg_cpu            359 kernel/sched/cpufreq_schedutil.c 	if (sg_cpu->iowait_boost_pending)
sg_cpu            361 kernel/sched/cpufreq_schedutil.c 	sg_cpu->iowait_boost_pending = true;
sg_cpu            364 kernel/sched/cpufreq_schedutil.c 	if (sg_cpu->iowait_boost) {
sg_cpu            365 kernel/sched/cpufreq_schedutil.c 		sg_cpu->iowait_boost =
sg_cpu            366 kernel/sched/cpufreq_schedutil.c 			min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE);
sg_cpu            371 kernel/sched/cpufreq_schedutil.c 	sg_cpu->iowait_boost = IOWAIT_BOOST_MIN;
sg_cpu            393 kernel/sched/cpufreq_schedutil.c static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
sg_cpu            399 kernel/sched/cpufreq_schedutil.c 	if (!sg_cpu->iowait_boost)
sg_cpu            403 kernel/sched/cpufreq_schedutil.c 	if (sugov_iowait_reset(sg_cpu, time, false))
sg_cpu            406 kernel/sched/cpufreq_schedutil.c 	if (!sg_cpu->iowait_boost_pending) {
sg_cpu            410 kernel/sched/cpufreq_schedutil.c 		sg_cpu->iowait_boost >>= 1;
sg_cpu            411 kernel/sched/cpufreq_schedutil.c 		if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) {
sg_cpu            412 kernel/sched/cpufreq_schedutil.c 			sg_cpu->iowait_boost = 0;
sg_cpu            417 kernel/sched/cpufreq_schedutil.c 	sg_cpu->iowait_boost_pending = false;
sg_cpu            423 kernel/sched/cpufreq_schedutil.c 	boost = (sg_cpu->iowait_boost * max) >> SCHED_CAPACITY_SHIFT;
sg_cpu            428 kernel/sched/cpufreq_schedutil.c static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
sg_cpu            430 kernel/sched/cpufreq_schedutil.c 	unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu);
sg_cpu            431 kernel/sched/cpufreq_schedutil.c 	bool ret = idle_calls == sg_cpu->saved_idle_calls;
sg_cpu            433 kernel/sched/cpufreq_schedutil.c 	sg_cpu->saved_idle_calls = idle_calls;
sg_cpu            437 kernel/sched/cpufreq_schedutil.c static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
sg_cpu            444 kernel/sched/cpufreq_schedutil.c static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
sg_cpu            446 kernel/sched/cpufreq_schedutil.c 	if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
sg_cpu            453 kernel/sched/cpufreq_schedutil.c 	struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
sg_cpu            454 kernel/sched/cpufreq_schedutil.c 	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
sg_cpu            459 kernel/sched/cpufreq_schedutil.c 	sugov_iowait_boost(sg_cpu, time, flags);
sg_cpu            460 kernel/sched/cpufreq_schedutil.c 	sg_cpu->last_update = time;
sg_cpu            462 kernel/sched/cpufreq_schedutil.c 	ignore_dl_rate_limit(sg_cpu, sg_policy);
sg_cpu            468 kernel/sched/cpufreq_schedutil.c 	busy = !sg_policy->need_freq_update && sugov_cpu_is_busy(sg_cpu);
sg_cpu            470 kernel/sched/cpufreq_schedutil.c 	util = sugov_get_util(sg_cpu);
sg_cpu            471 kernel/sched/cpufreq_schedutil.c 	max = sg_cpu->max;
sg_cpu            472 kernel/sched/cpufreq_schedutil.c 	util = sugov_iowait_apply(sg_cpu, time, util, max);
sg_cpu            499 kernel/sched/cpufreq_schedutil.c static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
sg_cpu            501 kernel/sched/cpufreq_schedutil.c 	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
sg_cpu            526 kernel/sched/cpufreq_schedutil.c 	struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
sg_cpu            527 kernel/sched/cpufreq_schedutil.c 	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
sg_cpu            532 kernel/sched/cpufreq_schedutil.c 	sugov_iowait_boost(sg_cpu, time, flags);
sg_cpu            533 kernel/sched/cpufreq_schedutil.c 	sg_cpu->last_update = time;
sg_cpu            535 kernel/sched/cpufreq_schedutil.c 	ignore_dl_rate_limit(sg_cpu, sg_policy);
sg_cpu            538 kernel/sched/cpufreq_schedutil.c 		next_f = sugov_next_freq_shared(sg_cpu, time);
sg_cpu            847 kernel/sched/cpufreq_schedutil.c 		struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
sg_cpu            849 kernel/sched/cpufreq_schedutil.c 		memset(sg_cpu, 0, sizeof(*sg_cpu));
sg_cpu            850 kernel/sched/cpufreq_schedutil.c 		sg_cpu->cpu			= cpu;
sg_cpu            851 kernel/sched/cpufreq_schedutil.c 		sg_cpu->sg_policy		= sg_policy;
sg_cpu            855 kernel/sched/cpufreq_schedutil.c 		struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
sg_cpu            857 kernel/sched/cpufreq_schedutil.c 		cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,