From 30b1a73858e9ef10dffc37a5fb71e7b7be37e40b Mon Sep 17 00:00:00 2001 From: Rick Yiu Date: Mon, 3 Jun 2019 19:17:17 +0800 Subject: [PATCH] sched/fair: let scheduler skip util checking if cpu is idle Current cpu util includes util of runnable tasks plus the recent utilization of currently non-runnable tasks, so it may return a non-zero value even there is no task running on a cpu. When scheduler is selecting a cpu for a task, it will check if cpu util is over its capacity, what could happen is that it will skip a cpu even it is idle, so let scheduler skip util checking if the task perfers idle cpu and the cpu is idle. Bug: 133284637 Test: cpu selected as expected Change-Id: I2c15d6b79b1cc83c72e84add70962a8e74c178b8 Signed-off-by: Rick Yiu Signed-off-by: Alexander Winkowski --- kernel/sched/fair.c | 33 ++++++++++++++++++++++++++++----- 1 file changed, 28 insertions(+), 5 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index e9611ae53914..ef23942cf634 100755 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5274,6 +5274,8 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) struct cfs_rq *cfs_rq; struct sched_entity *se = &p->se; int task_new = !(flags & ENQUEUE_WAKEUP); + bool prefer_idle = sched_feat(EAS_PREFER_IDLE) ? + (schedtune_prefer_idle(p) > 0) : 0; #ifdef CONFIG_SCHED_WALT p->misfit = !task_fits_max(p, rq->cpu); @@ -5347,7 +5349,14 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) if (!se) { add_nr_running(rq, 1); inc_rq_walt_stats(rq, p); - if (!task_new) + /* + * If the task prefers idle cpu, and it also is the first + * task enqueued in this runqueue, then we don't check + * overutilized. Hopefully the cpu util will be back to + * normal before next overutilized check. + */ + if (!task_new && + !(prefer_idle && rq->nr_running == 1)) update_overutilized_status(rq); } @@ -7478,6 +7487,7 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu, unsigned long best_active_util = ULONG_MAX; unsigned long best_active_cuml_util = ULONG_MAX; unsigned long best_idle_cuml_util = ULONG_MAX; + unsigned long best_idle_util = ULONG_MAX; int best_idle_cstate = INT_MAX; struct sched_domain *sd; struct sched_group *sg; @@ -7611,9 +7621,12 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu, * Ensure minimum capacity to grant the required boost. * The target CPU can be already at a capacity level higher * than the one required to boost the task. + * However, if the task prefers idle cpu and that + * cpu is idle, skip this check. */ new_util = max(min_util, new_util); - if (new_util > capacity_orig) + if (!(prefer_idle && idle_cpu(i)) + && new_util > capacity_orig) continue; /* @@ -7673,12 +7686,22 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu, capacity_orig > target_capacity) continue; if (capacity_orig == target_capacity && - sysctl_sched_cstate_aware && - best_idle_cstate <= idle_idx) - continue; + sysctl_sched_cstate_aware) { + if (best_idle_cstate < idle_idx) + continue; + /* + * If idle state of cpu is the + * same, select least utilized. + */ + else if (best_idle_cstate == + idle_idx && + best_idle_util <= new_util) + continue; + } target_capacity = capacity_orig; best_idle_cstate = idle_idx; + best_idle_util = new_util; best_idle_cpu = i; continue; }