From 07d1bac4216939d24e98025411588f6e257bda6d Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Fri, 15 Feb 2019 11:49:36 +0000 Subject: [PATCH] ANDROID: sched/fair: Don't double account RT util in boosted_cpu_util() WALT is a cross-class utilization tracking signal which accounts not only for FAIR tasks utilization but also for that of RT and DL tasks. PELT instead provides per-class utilization signals, which is why in boosted_cpu_util() we currently aggregated the FAIR utilization with that generated by RT tasks. However, the aggregation provided in boosted_cpu_util() is not correct when WALT is in use, since we end up double accounting RT tasks generated utilization. Even worst, we add a PELT signal to a WALT signal. Since: commit 63d1657d00e0 ("ANDROID: sched/rt: fix the problem that rt_rq's util is always zero.") the rt rq signal is correctly updated. However, that fix missed to detect and fix the unconditional aggregation of the PELT signal when WALT is in use. Fix this by ensuring to aggregate RT and FAIR utilization only when PELT is in use. Do that by refactoring boosted_cpu_util() to get just one "properly aggregated" signal and by making sure that the correct aggregation is preformed in cpu_util_freq(), which already provides WALT-PELT switching logic. Change-Id: Ifd738102e9102b210b7be2c2565ab796e3b57061 Fixes: 63d1657d00e0 ANDROID: sched/rt: fix the problem that rt_rq's util is always zero. Reported-by: Ke Wang Signed-off-by: Patrick Bellasi --- kernel/sched/cpufreq_schedutil.c | 8 +++----- kernel/sched/fair.c | 29 ++++++++++++++++------------- 2 files changed, 19 insertions(+), 18 deletions(-) diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index d70794a6ee83f9..029bfcf00ad69e 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -19,7 +19,7 @@ #include "sched.h" -unsigned long boosted_cpu_util(int cpu, unsigned long other_util); +unsigned long boosted_cpu_util(int cpu); #define SUGOV_KTHREAD_PRIORITY 50 @@ -210,13 +210,11 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy, static void sugov_get_util(unsigned long *util, unsigned long *max, int cpu) { - unsigned long max_cap, rt; + unsigned long max_cap; max_cap = arch_scale_cpu_capacity(NULL, cpu); - rt = sched_get_rt_rq_util(cpu); - - *util = boosted_cpu_util(cpu, rt); + *util = boosted_cpu_util(cpu); *util = min(*util, max_cap); *max = max_cap; } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 94bcb9dbae4f9c..7fb7ec0ef868e1 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3474,12 +3474,6 @@ int update_rt_rq_load_avg(u64 now, int cpu, struct rt_rq *rt_rq, int running) return ret; } -unsigned long sched_get_rt_rq_util(int cpu) -{ - struct rt_rq *rt_rq = &(cpu_rq(cpu)->rt); - return rt_rq->avg.util_avg; -} - /* * Optional action to be done while updating the load average */ @@ -5189,11 +5183,11 @@ static inline void update_overutilized_status(struct rq *rq) rcu_read_unlock(); } -unsigned long boosted_cpu_util(int cpu, unsigned long other_util); +unsigned long boosted_cpu_util(int cpu); #else #define update_overutilized_status(rq) do {} while (0) -#define boosted_cpu_util(cpu, other_util) cpu_util_freq(cpu) +#define boosted_cpu_util(cpu) cpu_util_freq(cpu) #endif /* CONFIG_SMP */ @@ -5924,13 +5918,22 @@ static inline unsigned long cpu_util(int cpu) return min_t(unsigned long, util, capacity_orig_of(cpu)); } +static inline unsigned long cpu_util_rt(int cpu) +{ + struct rt_rq *rt_rq = &(cpu_rq(cpu)->rt); + + return rt_rq->avg.util_avg; +} + static inline unsigned long cpu_util_freq(int cpu) { #ifdef CONFIG_SCHED_WALT u64 walt_cpu_util; - if (unlikely(walt_disabled || !sysctl_sched_use_walt_cpu_util)) - return cpu_util(cpu); + if (unlikely(walt_disabled || !sysctl_sched_use_walt_cpu_util)) { + return min(cpu_util(cpu) + cpu_util_rt(cpu), + capacity_orig_of(cpu)); + } walt_cpu_util = cpu_rq(cpu)->prev_runnable_sum; walt_cpu_util <<= SCHED_CAPACITY_SHIFT; @@ -5938,7 +5941,7 @@ static inline unsigned long cpu_util_freq(int cpu) return min_t(unsigned long, walt_cpu_util, capacity_orig_of(cpu)); #else - return cpu_util(cpu); + return min(cpu_util(cpu) + cpu_util_rt(cpu), capacity_orig_of(cpu)); #endif } @@ -6723,9 +6726,9 @@ schedtune_task_margin(struct task_struct *task) #endif /* CONFIG_SCHED_TUNE */ unsigned long -boosted_cpu_util(int cpu, unsigned long other_util) +boosted_cpu_util(int cpu) { - unsigned long util = cpu_util_freq(cpu) + other_util; + unsigned long util = cpu_util_freq(cpu); long margin = schedtune_cpu_margin(util, cpu); trace_sched_boost_cpu(cpu, util, margin);