From 9b49c18f8910d80548a53f20cabc95a531c4bbd5 Mon Sep 17 00:00:00 2001 From: Miguel de Dios Date: Tue, 15 Jan 2019 12:02:07 -0800 Subject: [PATCH] sched: fair: avoid little cpus due to sync, prev bias Important threads can get forced to little cpu's when the sync or prev_bias hints are followed blindly. This patch adds a check to see whether those paths are forcing the task to a cpu that has less capacity than other cpu's available for the task. If so, we ignore the sync and prev_bias and allow the scheduler to make a free decision. Bug: 117438867 Change-Id: Ie5a99f9a8b65ba9382a8d0de2ae0aad843e558d1 Signed-off-by: Miguel de Dios Signed-off-by: Alexander Winkowski --- kernel/sched/fair.c | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 0a23e23c4e62..3f836ecd457a 100755 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -76,6 +76,8 @@ walt_dec_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p) {} #endif +static inline bool cpu_is_in_target_set(struct task_struct *p, int cpu); + /* * Targeted preemption latency for CPU-bound tasks: * @@ -5806,7 +5808,8 @@ static inline bool bias_to_this_cpu(struct task_struct *p, int cpu, struct cpumask *rtg_target) { bool base_test = cpumask_test_cpu(cpu, &p->cpus_allowed) && - cpu_active(cpu) && task_fits_max(p, cpu); + cpu_active(cpu) && task_fits_max(p, cpu) && + cpu_is_in_target_set(p, cpu); bool rtg_test = rtg_target && cpumask_test_cpu(cpu, rtg_target); return base_test && (!rtg_target || rtg_test); @@ -7442,6 +7445,21 @@ enum fastpaths { MANY_WAKEUP, }; +/* + * Check whether cpu is in the fastest set of cpu's that p should run on. + * If p is boosted, prefer that p runs on a faster cpu; otherwise, allow p + * to run on any cpu. + */ +static inline bool +cpu_is_in_target_set(struct task_struct *p, int cpu) +{ + struct root_domain *rd = cpu_rq(cpu)->rd; + int first_cpu = (schedtune_task_boost(p)) ? + rd->mid_cap_orig_cpu : rd->min_cap_orig_cpu; + int next_usable_cpu = cpumask_next(first_cpu - 1, &p->cpus_allowed); + return cpu >= next_usable_cpu || next_usable_cpu >= nr_cpu_ids; +} + static inline int find_best_target(struct task_struct *p, int *backup_cpu, bool boosted, bool prefer_idle, struct find_best_target_env *fbt_env)