new_bw            190 drivers/edac/amd64_edac.c static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
new_bw            212 drivers/edac/amd64_edac.c 		if (scrubrates[i].bandwidth <= new_bw)
new_bw            989 drivers/edac/e752x_edac.c static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw)
new_bw           1006 drivers/edac/e752x_edac.c 		if (scrubrates[i].bandwidth >= new_bw)
new_bw            717 drivers/edac/edac_mc_sysfs.c 	int new_bw = 0;
new_bw            722 drivers/edac/edac_mc_sysfs.c 	new_bw = mci->set_sdram_scrub_rate(mci, bandwidth);
new_bw            723 drivers/edac/edac_mc_sysfs.c 	if (new_bw < 0) {
new_bw           1957 drivers/edac/i7core_edac.c static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw)
new_bw           1971 drivers/edac/i7core_edac.c 	if (new_bw == 0) {
new_bw           1992 drivers/edac/i7core_edac.c 		do_div(scrub_interval, new_bw);
new_bw           2011 drivers/edac/i7core_edac.c 	return new_bw;
new_bw            156 kernel/sched/deadline.c void dl_change_utilization(struct task_struct *p, u64 new_bw)
new_bw            180 kernel/sched/deadline.c 	__add_rq_bw(new_bw, &rq->dl);
new_bw           2470 kernel/sched/deadline.c 	u64 new_bw = to_ratio(period, runtime);
new_bw           2489 kernel/sched/deadline.c 		if (new_bw < dl_b->total_bw)
new_bw           2517 kernel/sched/deadline.c 	u64 new_bw = -1;
new_bw           2526 kernel/sched/deadline.c 		new_bw = to_ratio(global_rt_period(), global_rt_runtime());
new_bw           2536 kernel/sched/deadline.c 		dl_b->bw = new_bw;
new_bw           2558 kernel/sched/deadline.c 	u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
new_bw           2565 kernel/sched/deadline.c 	if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
new_bw           2576 kernel/sched/deadline.c 	    !__dl_overflow(dl_b, cpus, 0, new_bw)) {
new_bw           2579 kernel/sched/deadline.c 		__dl_add(dl_b, new_bw, cpus);
new_bw           2582 kernel/sched/deadline.c 		   !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
new_bw           2591 kernel/sched/deadline.c 		__dl_add(dl_b, new_bw, cpus);
new_bw           2592 kernel/sched/deadline.c 		dl_change_utilization(p, new_bw);
new_bw            308 kernel/sched/sched.h bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
new_bw            311 kernel/sched/sched.h 	       dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
new_bw            314 kernel/sched/sched.h extern void dl_change_utilization(struct task_struct *p, u64 new_bw);
new_bw           3121 net/mac80211/rx.c 			enum ieee80211_sta_rx_bandwidth max_bw, new_bw;
new_bw           3136 net/mac80211/rx.c 			new_bw = ieee80211_sta_cur_vht_bw(rx->sta);
new_bw           3138 net/mac80211/rx.c 			if (rx->sta->sta.bandwidth == new_bw)
new_bw           3141 net/mac80211/rx.c 			rx->sta->sta.bandwidth = new_bw;
new_bw            504 net/mac80211/vht.c 	enum ieee80211_sta_rx_bandwidth new_bw;
new_bw            539 net/mac80211/vht.c 	new_bw = ieee80211_sta_cur_vht_bw(sta);
new_bw            540 net/mac80211/vht.c 	if (new_bw != sta->sta.bandwidth) {
new_bw            541 net/mac80211/vht.c 		sta->sta.bandwidth = new_bw;