wake_affine will need per_cpu cluster domain and cpus_share_cluster to select the proper idle sibling in the cluster.
Signed-off-by: Barry Song song.bao.hua@hisilicon.com --- include/linux/sched/sd_flags.h | 9 +++++++++ include/linux/sched/topology.h | 8 +++++++- kernel/sched/core.c | 5 +++++ kernel/sched/sched.h | 2 ++ kernel/sched/topology.c | 11 +++++++++++ 5 files changed, 34 insertions(+), 1 deletion(-)
diff --git a/include/linux/sched/sd_flags.h b/include/linux/sched/sd_flags.h index 34b21e9..e4e651e 100644 --- a/include/linux/sched/sd_flags.h +++ b/include/linux/sched/sd_flags.h @@ -100,6 +100,15 @@ SD_FLAG(SD_SHARE_CPUCAPACITY, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
/* + * Domain members share CPU cluster resources (i.e. llc cache tags or l2) + * + * SHARED_CHILD: Set from the base domain up until spanned CPUs no longer share + * the cluster resouces (such as llc tags or l2) + * NEEDS_GROUPS: Caches are shared between groups. + */ +SD_FLAG(SD_SHARE_CLS_RESOURCES, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS) + +/* * Domain members share CPU package resources (i.e. caches) * * SHARED_CHILD: Set from the base domain up until spanned CPUs no longer share diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index 2f9166f..feb6339 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -45,7 +45,7 @@ static inline int cpu_smt_flags(void) #ifdef CONFIG_SCHED_CLUSTER static inline int cpu_cluster_flags(void) { - return SD_SHARE_PKG_RESOURCES; + return SD_SHARE_CLS_RESOURCES | SD_SHARE_PKG_RESOURCES; } #endif
@@ -177,6 +177,7 @@ extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
bool cpus_share_cache(int this_cpu, int that_cpu); +bool cpus_share_cluster(int this_cpu, int that_cpu);
typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); typedef int (*sched_domain_flags_f)(void); @@ -230,6 +231,11 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu) return true; }
+bool cpus_share_cluster(int this_cpu, int that_cpu) +{ + return true; +} + #endif /* !CONFIG_SMP */
#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index dd5984d..47a4c82 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3502,6 +3502,11 @@ bool cpus_share_cache(int this_cpu, int that_cpu) return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); }
+bool cpus_share_cluster(int this_cpu, int that_cpu) +{ + return per_cpu(sd_cluster_id, this_cpu) == per_cpu(sd_cluster_id, that_cpu); +} + static inline bool ttwu_queue_cond(int cpu, int wake_flags) { /* diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 0f8f610..193e70d 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1765,6 +1765,8 @@ static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) DECLARE_PER_CPU(int, sd_llc_size); DECLARE_PER_CPU(int, sd_llc_id); DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); +DECLARE_PER_CPU(struct sched_domain __rcu *, sd_cluster); +DECLARE_PER_CPU(int, sd_cluster_id); DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa); DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 2b1bc26..63e17fe 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -644,6 +644,8 @@ static void destroy_sched_domains(struct sched_domain *sd) DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc); DEFINE_PER_CPU(int, sd_llc_size); DEFINE_PER_CPU(int, sd_llc_id); +DEFINE_PER_CPU(struct sched_domain __rcu *, sd_cluster); +DEFINE_PER_CPU(int, sd_cluster_id); DEFINE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); DEFINE_PER_CPU(struct sched_domain __rcu *, sd_numa); DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); @@ -657,6 +659,14 @@ static void update_top_cache_domain(int cpu) int id = cpu; int size = 1;
+ sd = highest_flag_domain(cpu, SD_SHARE_CLS_RESOURCES); + if (sd) { + id = cpumask_first(sched_domain_span(sd)); + size = cpumask_weight(sched_domain_span(sd)); + } + rcu_assign_pointer(per_cpu(sd_cluster, cpu), sd); + per_cpu(sd_cluster_id, cpu) = id; + sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); if (sd) { id = cpumask_first(sched_domain_span(sd)); @@ -1392,6 +1402,7 @@ static void claim_allocations(int cpu, struct sched_domain *sd) */ #define TOPOLOGY_SD_FLAGS \ (SD_SHARE_CPUCAPACITY | \ + SD_SHARE_CLS_RESOURCES | \ SD_SHARE_PKG_RESOURCES | \ SD_NUMA | \ SD_ASYM_PACKING)