@ -5274,6 +5274,8 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
struct cfs_rq * cfs_rq ;
struct cfs_rq * cfs_rq ;
struct sched_entity * se = & p - > se ;
struct sched_entity * se = & p - > se ;
int task_new = ! ( flags & ENQUEUE_WAKEUP ) ;
int task_new = ! ( flags & ENQUEUE_WAKEUP ) ;
bool prefer_idle = sched_feat ( EAS_PREFER_IDLE ) ?
( schedtune_prefer_idle ( p ) > 0 ) : 0 ;
# ifdef CONFIG_SCHED_WALT
# ifdef CONFIG_SCHED_WALT
p - > misfit = ! task_fits_max ( p , rq - > cpu ) ;
p - > misfit = ! task_fits_max ( p , rq - > cpu ) ;
@ -5347,7 +5349,14 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if ( ! se ) {
if ( ! se ) {
add_nr_running ( rq , 1 ) ;
add_nr_running ( rq , 1 ) ;
inc_rq_walt_stats ( rq , p ) ;
inc_rq_walt_stats ( rq , p ) ;
if ( ! task_new )
/*
* If the task prefers idle cpu , and it also is the first
* task enqueued in this runqueue , then we don ' t check
* overutilized . Hopefully the cpu util will be back to
* normal before next overutilized check .
*/
if ( ! task_new & &
! ( prefer_idle & & rq - > nr_running = = 1 ) )
update_overutilized_status ( rq ) ;
update_overutilized_status ( rq ) ;
}
}
@ -7478,6 +7487,7 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
unsigned long best_active_util = ULONG_MAX ;
unsigned long best_active_util = ULONG_MAX ;
unsigned long best_active_cuml_util = ULONG_MAX ;
unsigned long best_active_cuml_util = ULONG_MAX ;
unsigned long best_idle_cuml_util = ULONG_MAX ;
unsigned long best_idle_cuml_util = ULONG_MAX ;
unsigned long best_idle_util = ULONG_MAX ;
int best_idle_cstate = INT_MAX ;
int best_idle_cstate = INT_MAX ;
struct sched_domain * sd ;
struct sched_domain * sd ;
struct sched_group * sg ;
struct sched_group * sg ;
@ -7611,9 +7621,12 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
* Ensure minimum capacity to grant the required boost .
* Ensure minimum capacity to grant the required boost .
* The target CPU can be already at a capacity level higher
* The target CPU can be already at a capacity level higher
* than the one required to boost the task .
* than the one required to boost the task .
* However , if the task prefers idle cpu and that
* cpu is idle , skip this check .
*/
*/
new_util = max ( min_util , new_util ) ;
new_util = max ( min_util , new_util ) ;
if ( new_util > capacity_orig )
if ( ! ( prefer_idle & & idle_cpu ( i ) )
& & new_util > capacity_orig )
continue ;
continue ;
/*
/*
@ -7673,12 +7686,22 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
capacity_orig > target_capacity )
capacity_orig > target_capacity )
continue ;
continue ;
if ( capacity_orig = = target_capacity & &
if ( capacity_orig = = target_capacity & &
sysctl_sched_cstate_aware & &
sysctl_sched_cstate_aware ) {
best_idle_cstate < = idle_idx )
if ( best_idle_cstate < idle_idx )
continue ;
continue ;
/*
* If idle state of cpu is the
* same , select least utilized .
*/
else if ( best_idle_cstate = =
idle_idx & &
best_idle_util < = new_util )
continue ;
}
target_capacity = capacity_orig ;
target_capacity = capacity_orig ;
best_idle_cstate = idle_idx ;
best_idle_cstate = idle_idx ;
best_idle_util = new_util ;
best_idle_cpu = i ;
best_idle_cpu = i ;
continue ;
continue ;
}
}