sched/fair: let scheduler skip util checking if cpu is idle

Current cpu util includes util of runnable tasks plus the recent utilization
of currently non-runnable tasks, so it may return a non-zero value even
there is no task running on a cpu. When scheduler is selecting a cpu for
a task, it will check if cpu util is over its capacity, what could
happen is that it will skip a cpu even it is idle, so let scheduler
skip util checking if the task perfers idle cpu and the cpu is idle.

Bug: 133284637
Test: cpu selected as expected
Change-Id: I2c15d6b79b1cc83c72e84add70962a8e74c178b8
Signed-off-by: Rick Yiu <rickyiu@google.com>
Signed-off-by: Alexander Winkowski <dereference23@outlook.com>
fourteen
Rick Yiu 6 years ago committed by Jenna
parent 99d9c9b5b9
commit 30b1a73858
  1. 33
      kernel/sched/fair.c

@ -5274,6 +5274,8 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
struct cfs_rq *cfs_rq;
struct sched_entity *se = &p->se;
int task_new = !(flags & ENQUEUE_WAKEUP);
bool prefer_idle = sched_feat(EAS_PREFER_IDLE) ?
(schedtune_prefer_idle(p) > 0) : 0;
#ifdef CONFIG_SCHED_WALT
p->misfit = !task_fits_max(p, rq->cpu);
@ -5347,7 +5349,14 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (!se) {
add_nr_running(rq, 1);
inc_rq_walt_stats(rq, p);
if (!task_new)
/*
* If the task prefers idle cpu, and it also is the first
* task enqueued in this runqueue, then we don't check
* overutilized. Hopefully the cpu util will be back to
* normal before next overutilized check.
*/
if (!task_new &&
!(prefer_idle && rq->nr_running == 1))
update_overutilized_status(rq);
}
@ -7478,6 +7487,7 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
unsigned long best_active_util = ULONG_MAX;
unsigned long best_active_cuml_util = ULONG_MAX;
unsigned long best_idle_cuml_util = ULONG_MAX;
unsigned long best_idle_util = ULONG_MAX;
int best_idle_cstate = INT_MAX;
struct sched_domain *sd;
struct sched_group *sg;
@ -7611,9 +7621,12 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
* Ensure minimum capacity to grant the required boost.
* The target CPU can be already at a capacity level higher
* than the one required to boost the task.
* However, if the task prefers idle cpu and that
* cpu is idle, skip this check.
*/
new_util = max(min_util, new_util);
if (new_util > capacity_orig)
if (!(prefer_idle && idle_cpu(i))
&& new_util > capacity_orig)
continue;
/*
@ -7673,12 +7686,22 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
capacity_orig > target_capacity)
continue;
if (capacity_orig == target_capacity &&
sysctl_sched_cstate_aware &&
best_idle_cstate <= idle_idx)
continue;
sysctl_sched_cstate_aware) {
if (best_idle_cstate < idle_idx)
continue;
/*
* If idle state of cpu is the
* same, select least utilized.
*/
else if (best_idle_cstate ==
idle_idx &&
best_idle_util <= new_util)
continue;
}
target_capacity = capacity_orig;
best_idle_cstate = idle_idx;
best_idle_util = new_util;
best_idle_cpu = i;
continue;
}

Loading…
Cancel
Save