Home
last modified time | relevance | path

Searched refs:busiest (Results 1 – 3 of 3) sorted by relevance

/linux-4.1.27/kernel/sched/
Dfair.c5961 struct sched_group *busiest; /* Busiest group in this sd */ member
5980 .busiest = NULL, in init_sd_lb_stats()
6327 struct sg_lb_stats *busiest = &sds->busiest_stat; in update_sd_pick_busiest() local
6329 if (sgs->group_type > busiest->group_type) in update_sd_pick_busiest()
6332 if (sgs->group_type < busiest->group_type) in update_sd_pick_busiest()
6335 if (sgs->avg_load <= busiest->avg_load) in update_sd_pick_busiest()
6348 if (!sds->busiest) in update_sd_pick_busiest()
6351 if (group_first_cpu(sds->busiest) > group_first_cpu(sg)) in update_sd_pick_busiest()
6444 sds->busiest = sg; in update_sd_lb_stats()
6497 if (!sds->busiest) in check_asym_packing()
[all …]
Dsched.h1501 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) in _double_lock_balance() argument
1503 __acquires(busiest->lock) in _double_lock_balance()
1507 double_rq_lock(this_rq, busiest); in _double_lock_balance()
1520 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) in _double_lock_balance() argument
1522 __acquires(busiest->lock) in _double_lock_balance()
1527 if (unlikely(!raw_spin_trylock(&busiest->lock))) { in _double_lock_balance()
1528 if (busiest < this_rq) { in _double_lock_balance()
1530 raw_spin_lock(&busiest->lock); in _double_lock_balance()
1535 raw_spin_lock_nested(&busiest->lock, in _double_lock_balance()
1546 static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) in double_lock_balance() argument
[all …]
/linux-4.1.27/Documentation/scheduler/
Dsched-domains.txt42 Initially, load_balance() finds the busiest group in the current sched domain.
43 If it succeeds, it looks for the busiest runqueue of all the CPUs' runqueues in
45 CPU's runqueue and the newly found busiest one and starts moving tasks from it