From 9a05300da09dcfc406ba1523c600f47bf61de049 Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Thu, 4 Apr 2019 10:24:43 +0100 Subject: [PATCH] ANDROID: sched/tune: Move SchedTune cpu API into UtilClamp wrappers The SchedTune CPU boosting API is currently used from sugov_get_util() to get the boosted utilization and to pass it into schedutil_cpu_util(). When UtilClamp is in use instead we call schedutil_cpu_util() by passing in just the CFS utilization and the clamping is done internally on the aggregated CFS+RT utilization for FREQUENCY_UTIL calls. This asymmetry is not required moreover, schedutil code is polluted by non-mainline SchedTune code. Wrap SchedTune API call related to cpu utilization boosting with a more generic and mainlinish UtilClamp call: - uclamp_rq_util_with(cpu, util, p) <= boosted_cpu_util(cpu) This new API is already used in schedutil_cpu_util() to clamp the aggregated RT+CFS utilization on FREQUENCY_UTIL calls. Move the cpu boosting into uclamp_rq_util_with() so that we remove any SchedTune specific bit from kernel/sched/cpufreq_schedutil.c. Get rid of the no more required boosted_cpu_util(cpu) method and replace it with a stune_util(cpu, util) which signature is better aligned with its uclamp_rq_util_with(cpu, util, p) counterpart. Bug: 120440300 Signed-off-by: Patrick Bellasi Signed-off-by: Qais Yousef Change-Id: I45b0f0f54123fe0a2515fa9f1683842e6b99234f [Removed superfluous __maybe_unused for capacity_orig_of] Signed-off-by: Quentin Perret --- Documentation/scheduler/sched-tune.txt | 4 ++-- kernel/sched/cpufreq_schedutil.c | 10 ++++++++-- kernel/sched/fair.c | 2 +- kernel/sched/sched.h | 8 ++++++++ kernel/sched/tune.h | 4 +--- 5 files changed, 20 insertions(+), 8 deletions(-) diff --git a/Documentation/scheduler/sched-tune.txt b/Documentation/scheduler/sched-tune.txt index 1a103715f7bd..be728705fe25 100644 --- a/Documentation/scheduler/sched-tune.txt +++ b/Documentation/scheduler/sched-tune.txt @@ -233,9 +233,9 @@ Thus, with the sched_cfs_boost enabled we have the following main functions to get the current utilization of a CPU: cpu_util() - boosted_cpu_util() + stune_util() -The new boosted_cpu_util() is similar to the first but returns a boosted +The new stune_util() is similar to the first but returns a boosted utilization signal which is a function of the sched_cfs_boost value. This function is used in the CFS scheduler code paths where schedutil needs to diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 27e43e93dde3..d96a55ba7402 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -326,13 +326,19 @@ unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs, static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu) { struct rq *rq = cpu_rq(sg_cpu->cpu); - unsigned long util = boosted_cpu_util(sg_cpu->cpu, cpu_util_rt(rq)); +#ifdef CONFIG_SCHED_TUNE + unsigned long util = stune_util(sg_cpu->cpu, cpu_util_rt(rq)); +#else + unsigned long util = cpu_util_freq(sg_cpu->cpu); +#endif + unsigned long util_cfs = util - cpu_util_rt(rq); unsigned long max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu); sg_cpu->max = max; sg_cpu->bw_dl = cpu_bw_dl(rq); - return schedutil_cpu_util(sg_cpu->cpu, util, max, FREQUENCY_UTIL, NULL); + return schedutil_cpu_util(sg_cpu->cpu, util_cfs, max, + FREQUENCY_UTIL, NULL); } /** diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index d076ff907e23..5e445aeeb74b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5868,7 +5868,7 @@ schedtune_task_margin(struct task_struct *task) } unsigned long -boosted_cpu_util(int cpu, unsigned long other_util) +stune_util(int cpu, unsigned long other_util) { unsigned long util = min_t(unsigned long, SCHED_CAPACITY_SCALE, cpu_util_cfs(cpu_rq(cpu)) + other_util); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index c152d03b5b21..233aeff00051 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2416,6 +2416,14 @@ static inline unsigned long cpu_util_rt(struct rq *rq) { return READ_ONCE(rq->avg_rt.util_avg); } + +static inline unsigned long cpu_util_freq(int cpu) +{ + struct rq *rq = cpu_rq(cpu); + + return min(cpu_util_cfs(rq) + cpu_util_rt(rq), capacity_orig_of(cpu)); +} + #else /* CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ static inline unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs, unsigned long max, enum schedutil_type type, diff --git a/kernel/sched/tune.h b/kernel/sched/tune.h index 821f026b510f..ef63866e3dbf 100644 --- a/kernel/sched/tune.h +++ b/kernel/sched/tune.h @@ -20,7 +20,7 @@ int schedtune_prefer_idle(struct task_struct *tsk); void schedtune_enqueue_task(struct task_struct *p, int cpu); void schedtune_dequeue_task(struct task_struct *p, int cpu); -unsigned long boosted_cpu_util(int cpu, unsigned long other_util); +unsigned long stune_util(int cpu, unsigned long other_util); #else /* CONFIG_SCHED_TUNE */ @@ -32,6 +32,4 @@ unsigned long boosted_cpu_util(int cpu, unsigned long other_util); #define schedtune_enqueue_task(task, cpu) do { } while (0) #define schedtune_dequeue_task(task, cpu) do { } while (0) -#define boosted_cpu_util(cpu, other_util) cpu_util_cfs(cpu_rq(cpu)) - #endif /* CONFIG_SCHED_TUNE */