use sched_cluster_present static key to describe if cluster scheduler is really working. and add sched_cluster_active() API to reflect the status of cluster scheduler as well. Only if cluster is bigger than smt, and less than coregroup, we are quite sure cluster scheduler is used.
Signed-off-by: Barry Song song.bao.hua@hisilicon.com --- include/linux/sched/cluster.h | 19 +++++++++++++++++++ kernel/sched/core.c | 20 ++++++++++++++++++++ kernel/sched/fair.c | 4 ++++ kernel/sched/sched.h | 1 + 4 files changed, 44 insertions(+) create mode 100644 include/linux/sched/cluster.h
diff --git a/include/linux/sched/cluster.h b/include/linux/sched/cluster.h new file mode 100644 index 0000000..ea6c475 --- /dev/null +++ b/include/linux/sched/cluster.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SCHED_CLUSTER_H +#define _LINUX_SCHED_CLUSTER_H + +#include <linux/static_key.h> + +#ifdef CONFIG_SCHED_CLUSTER +extern struct static_key_false sched_cluster_present; + +static __always_inline bool sched_cluster_active(void) +{ + return static_branch_likely(&sched_cluster_present); +} +#else +static inline bool sched_cluster_active(void) { return false; } + +#endif + +#endif diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 9e9a5be..dd5984d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -8655,6 +8655,17 @@ int sched_cpu_activate(unsigned int cpu) if (cpumask_weight(cpu_smt_mask(cpu)) == 2) static_branch_inc_cpuslocked(&sched_smt_present); #endif + +#ifdef CONFIG_SCHED_CLUSTER + /* + * When going up, increment the number of cluster cpus with + * cluster present. + */ + if (cpumask_weight(cpu_cluster_mask(cpu)) > cpumask_weight(cpu_smt_mask(cpu)) && + cpumask_weight(cpu_cluster_mask(cpu)) < cpumask_weight(cpu_coregroup_mask(cpu))) + static_branch_inc_cpuslocked(&sched_cluster_present); +#endif + set_cpu_active(cpu, true);
if (sched_smp_initialized) { @@ -8731,6 +8742,15 @@ int sched_cpu_deactivate(unsigned int cpu) static_branch_dec_cpuslocked(&sched_smt_present); #endif
+#ifdef CONFIG_SCHED_CLUSTER + /* + * When going down, decrement the number of cpus with cluster present. + */ + if (cpumask_weight(cpu_cluster_mask(cpu)) > cpumask_weight(cpu_smt_mask(cpu)) && + cpumask_weight(cpu_cluster_mask(cpu)) < cpumask_weight(cpu_coregroup_mask(cpu))) + static_branch_dec_cpuslocked(&sched_cluster_present); +#endif + if (!sched_smp_initialized) return 0;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index ce625bf..8578cb1 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5997,6 +5997,10 @@ static inline int __select_idle_cpu(int cpu, struct task_struct *p) return -1; }
+#ifdef CONFIG_SCHED_CLUSTER +DEFINE_STATIC_KEY_FALSE(sched_cluster_present); +#endif + #ifdef CONFIG_SCHED_SMT DEFINE_STATIC_KEY_FALSE(sched_smt_present); EXPORT_SYMBOL_GPL(sched_smt_present); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 8f0194c..0f8f610 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -6,6 +6,7 @@
#include <linux/sched/autogroup.h> #include <linux/sched/clock.h> +#include <linux/sched/cluster.h> #include <linux/sched/coredump.h> #include <linux/sched/cpufreq.h> #include <linux/sched/cputime.h>