Skip to content

Commit

Permalink
ANDROID: sched/fair: Don't double account RT util in boosted_cpu_util()
Browse files Browse the repository at this point in the history
WALT is a cross-class utilization tracking signal which accounts not
only for FAIR tasks utilization but also for that of RT and DL tasks.
PELT instead provides per-class utilization signals, which is why in
boosted_cpu_util() we currently aggregated the FAIR utilization with
that generated by RT tasks.

However, the aggregation provided in boosted_cpu_util() is not correct
when WALT is in use, since we end up double accounting RT tasks
generated utilization. Even worst, we add a PELT signal to a WALT
signal.

Since:
   commit 63d1657 ("ANDROID: sched/rt: fix the problem that rt_rq's util is always zero.")
the rt rq signal is correctly updated. However, that fix missed to
detect and fix the unconditional aggregation of the PELT signal when
WALT is in use.

Fix this by ensuring to aggregate RT and FAIR utilization only when PELT
is in use. Do that by refactoring boosted_cpu_util() to get just one
"properly aggregated" signal and by making sure that the correct
aggregation is preformed in cpu_util_freq(), which already provides
WALT-PELT switching logic.

Change-Id: Ifd738102e9102b210b7be2c2565ab796e3b57061
Fixes: 63d1657 ANDROID: sched/rt: fix the problem that rt_rq's util is always zero.
Reported-by: Ke Wang <[email protected]>
Signed-off-by: Patrick Bellasi <[email protected]>
  • Loading branch information
derkling authored and toddkjos committed Feb 25, 2019
1 parent 6bdf39b commit 07d1bac
Show file tree
Hide file tree
Showing 2 changed files with 19 additions and 18 deletions.
8 changes: 3 additions & 5 deletions kernel/sched/cpufreq_schedutil.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@

#include "sched.h"

unsigned long boosted_cpu_util(int cpu, unsigned long other_util);
unsigned long boosted_cpu_util(int cpu);

#define SUGOV_KTHREAD_PRIORITY 50

Expand Down Expand Up @@ -210,13 +210,11 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,

static void sugov_get_util(unsigned long *util, unsigned long *max, int cpu)
{
unsigned long max_cap, rt;
unsigned long max_cap;

max_cap = arch_scale_cpu_capacity(NULL, cpu);

rt = sched_get_rt_rq_util(cpu);

*util = boosted_cpu_util(cpu, rt);
*util = boosted_cpu_util(cpu);
*util = min(*util, max_cap);
*max = max_cap;
}
Expand Down
29 changes: 16 additions & 13 deletions kernel/sched/fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -3474,12 +3474,6 @@ int update_rt_rq_load_avg(u64 now, int cpu, struct rt_rq *rt_rq, int running)
return ret;
}

unsigned long sched_get_rt_rq_util(int cpu)
{
struct rt_rq *rt_rq = &(cpu_rq(cpu)->rt);
return rt_rq->avg.util_avg;
}

/*
* Optional action to be done while updating the load average
*/
Expand Down Expand Up @@ -5189,11 +5183,11 @@ static inline void update_overutilized_status(struct rq *rq)
rcu_read_unlock();
}

unsigned long boosted_cpu_util(int cpu, unsigned long other_util);
unsigned long boosted_cpu_util(int cpu);
#else

#define update_overutilized_status(rq) do {} while (0)
#define boosted_cpu_util(cpu, other_util) cpu_util_freq(cpu)
#define boosted_cpu_util(cpu) cpu_util_freq(cpu)

#endif /* CONFIG_SMP */

Expand Down Expand Up @@ -5924,21 +5918,30 @@ static inline unsigned long cpu_util(int cpu)
return min_t(unsigned long, util, capacity_orig_of(cpu));
}

static inline unsigned long cpu_util_rt(int cpu)
{
struct rt_rq *rt_rq = &(cpu_rq(cpu)->rt);

return rt_rq->avg.util_avg;
}

static inline unsigned long cpu_util_freq(int cpu)
{
#ifdef CONFIG_SCHED_WALT
u64 walt_cpu_util;

if (unlikely(walt_disabled || !sysctl_sched_use_walt_cpu_util))
return cpu_util(cpu);
if (unlikely(walt_disabled || !sysctl_sched_use_walt_cpu_util)) {
return min(cpu_util(cpu) + cpu_util_rt(cpu),
capacity_orig_of(cpu));
}

walt_cpu_util = cpu_rq(cpu)->prev_runnable_sum;
walt_cpu_util <<= SCHED_CAPACITY_SHIFT;
do_div(walt_cpu_util, walt_ravg_window);

return min_t(unsigned long, walt_cpu_util, capacity_orig_of(cpu));
#else
return cpu_util(cpu);
return min(cpu_util(cpu) + cpu_util_rt(cpu), capacity_orig_of(cpu));
#endif
}

Expand Down Expand Up @@ -6723,9 +6726,9 @@ schedtune_task_margin(struct task_struct *task)
#endif /* CONFIG_SCHED_TUNE */

unsigned long
boosted_cpu_util(int cpu, unsigned long other_util)
boosted_cpu_util(int cpu)
{
unsigned long util = cpu_util_freq(cpu) + other_util;
unsigned long util = cpu_util_freq(cpu);
long margin = schedtune_cpu_margin(util, cpu);

trace_sched_boost_cpu(cpu, util, margin);
Expand Down

0 comments on commit 07d1bac

Please sign in to comment.