diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 366ac2d783ed..99761dd2f21c 100755 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -164,8 +164,8 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time, return; if (sugov_up_down_rate_limit(sg_policy, time, next_freq)) { - /* Restore cached freq as next_freq is not changed */ - sg_policy->cached_raw_freq = sg_policy->prev_cached_raw_freq; + /* Don't cache a raw freq that didn't become next_freq */ + sg_policy->cached_raw_freq = 0; return; } @@ -334,8 +334,9 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, busy = use_pelt() && sugov_cpu_is_busy(sg_cpu); - if (0) { - sg_policy->cached_raw_freq = sg_policy->prev_cached_raw_freq; + if (flags & SCHED_CPUFREQ_RT_DL) { + /* clear cache when it's bypassed */ + sg_policy->cached_raw_freq = 0; next_f = policy->cpuinfo.max_freq; } else { sugov_get_util(&util, &max, sg_cpu->cpu); @@ -382,8 +383,9 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time) j_sg_cpu->iowait_boost_pending = false; continue; } - if (0) { - sg_policy->cached_raw_freq = sg_policy->prev_cached_raw_freq; + if (j_sg_cpu->flags & SCHED_CPUFREQ_RT_DL) { + /* clear cache when it's bypassed */ + sg_policy->cached_raw_freq = 0; return policy->cpuinfo.max_freq; } @@ -424,9 +426,10 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time, if (sugov_should_update_freq(sg_policy, time) && !(flags & SCHED_CPUFREQ_CONTINUE)) { - if (0) { + if (flags & SCHED_CPUFREQ_RT_DL) { next_f = sg_policy->policy->cpuinfo.max_freq; - sg_policy->cached_raw_freq = sg_policy->prev_cached_raw_freq; + /* clear cache when it's bypassed */ + sg_policy->cached_raw_freq = 0; } else { next_f = sugov_next_freq_shared(sg_cpu, time); }