@ -76,8 +76,6 @@ walt_dec_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p) {}
# endif
static inline bool cpu_is_in_target_set ( struct task_struct * p , int cpu ) ;
/*
* Targeted preemption latency for CPU - bound tasks :
*
@ -5804,6 +5802,21 @@ static unsigned long __cpu_norm_util(unsigned long util, unsigned long capacity)
return ( util < < SCHED_CAPACITY_SHIFT ) / capacity ;
}
/*
* Check whether cpu is in the fastest set of cpu ' s that p should run on .
* If p is boosted , prefer that p runs on a faster cpu ; otherwise , allow p
* to run on any cpu .
*/
static inline bool
cpu_is_in_target_set ( struct task_struct * p , int cpu )
{
struct root_domain * rd = cpu_rq ( cpu ) - > rd ;
int first_cpu = ( schedtune_task_boost ( p ) ) ?
rd - > mid_cap_orig_cpu : rd - > min_cap_orig_cpu ;
int next_usable_cpu = cpumask_next ( first_cpu - 1 , & p - > cpus_allowed ) ;
return cpu > = next_usable_cpu | | next_usable_cpu > = nr_cpu_ids ;
}
static inline bool
bias_to_this_cpu ( struct task_struct * p , int cpu , struct cpumask * rtg_target )
{
@ -7445,21 +7458,6 @@ enum fastpaths {
MANY_WAKEUP ,
} ;
/*
* Check whether cpu is in the fastest set of cpu ' s that p should run on .
* If p is boosted , prefer that p runs on a faster cpu ; otherwise , allow p
* to run on any cpu .
*/
static inline bool
cpu_is_in_target_set ( struct task_struct * p , int cpu )
{
struct root_domain * rd = cpu_rq ( cpu ) - > rd ;
int first_cpu = ( schedtune_task_boost ( p ) ) ?
rd - > mid_cap_orig_cpu : rd - > min_cap_orig_cpu ;
int next_usable_cpu = cpumask_next ( first_cpu - 1 , & p - > cpus_allowed ) ;
return cpu > = next_usable_cpu | | next_usable_cpu > = nr_cpu_ids ;
}
static inline int find_best_target ( struct task_struct * p , int * backup_cpu ,
bool boosted , bool prefer_idle ,
struct find_best_target_env * fbt_env )