diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index b65c6ba6baf3..d6f389a7116e 100755 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6639,8 +6639,8 @@ static int wake_wide(struct task_struct *p, int sibling_count_hint) * soonest. For the purpose of speed we only consider the waking and previous * CPU. * - * wake_affine_idle() - only considers 'now', it check if the waking CPU is (or - * will be) idle. + * wake_affine_idle() - only considers 'now', it check if the waking CPU is + * cache-affine and is (or will be) idle. * * wake_affine_weight() - considers the weight to reflect the average * scheduling latency of the CPUs. This seems to work @@ -6650,7 +6650,13 @@ static int wake_wide(struct task_struct *p, int sibling_count_hint) static int wake_affine_idle(int this_cpu, int prev_cpu, int sync) { - if (idle_cpu(this_cpu)) + /* + * If this_cpu is idle, it implies the wakeup is from interrupt + * context. Only allow the move if cache is shared. Otherwise an + * interrupt intensive workload could force all tasks onto one + * node depending on the IO topology or IRQ affinity settings. + */ + if (idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu)) return this_cpu; if (sync && cpu_rq(this_cpu)->nr_running == 1)