1 |
commit: 3840a675683c2df1aea2f9efed23617ce7eb9e01 |
2 |
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
3 |
AuthorDate: Sun Oct 3 19:14:20 2021 +0000 |
4 |
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
5 |
CommitDate: Sun Oct 3 19:14:20 2021 +0000 |
6 |
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=3840a675 |
7 |
|
8 |
Upgrade BMQ and PDS io scheduler to version v5.14-r1 |
9 |
|
10 |
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org> |
11 |
|
12 |
0000_README | 2 +- |
13 |
...=> 5020_BMQ-and-PDS-io-scheduler-v5.14-r3.patch | 284 ++++++++++----------- |
14 |
2 files changed, 142 insertions(+), 144 deletions(-) |
15 |
|
16 |
diff --git a/0000_README b/0000_README |
17 |
index 21444f8..2d15afd 100644 |
18 |
--- a/0000_README |
19 |
+++ b/0000_README |
20 |
@@ -115,7 +115,7 @@ Patch: 5010_enable-cpu-optimizations-universal.patch |
21 |
From: https://github.com/graysky2/kernel_compiler_patch |
22 |
Desc: Kernel >= 5.8 patch enables gcc = v9+ optimizations for additional CPUs. |
23 |
|
24 |
-Patch: 5020_BMQ-and-PDS-io-scheduler-v5.14-r1.patch |
25 |
+Patch: 5020_BMQ-and-PDS-io-scheduler-v5.14-r3.patch |
26 |
From: https://gitlab.com/alfredchen/linux-prjc |
27 |
Desc: BMQ(BitMap Queue) Scheduler. A new CPU scheduler developed from PDS(incld). Inspired by the scheduler in zircon. |
28 |
|
29 |
|
30 |
diff --git a/5020_BMQ-and-PDS-io-scheduler-v5.14-r1.patch b/5020_BMQ-and-PDS-io-scheduler-v5.14-r3.patch |
31 |
similarity index 98% |
32 |
rename from 5020_BMQ-and-PDS-io-scheduler-v5.14-r1.patch |
33 |
rename to 5020_BMQ-and-PDS-io-scheduler-v5.14-r3.patch |
34 |
index 4c6f75c..99adff7 100644 |
35 |
--- a/5020_BMQ-and-PDS-io-scheduler-v5.14-r1.patch |
36 |
+++ b/5020_BMQ-and-PDS-io-scheduler-v5.14-r3.patch |
37 |
@@ -341,6 +341,20 @@ index e5af028c08b4..0a7565d0d3cf 100644 |
38 |
return false; |
39 |
} |
40 |
|
41 |
+diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h |
42 |
+index 8f0f778b7c91..991f2280475b 100644 |
43 |
+--- a/include/linux/sched/topology.h |
44 |
++++ b/include/linux/sched/topology.h |
45 |
+@@ -225,7 +225,8 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu) |
46 |
+ |
47 |
+ #endif /* !CONFIG_SMP */ |
48 |
+ |
49 |
+-#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) |
50 |
++#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) && \ |
51 |
++ !defined(CONFIG_SCHED_ALT) |
52 |
+ extern void rebuild_sched_domains_energy(void); |
53 |
+ #else |
54 |
+ static inline void rebuild_sched_domains_energy(void) |
55 |
diff --git a/init/Kconfig b/init/Kconfig |
56 |
index 55f9f7738ebb..9a9b244d3ca3 100644 |
57 |
--- a/init/Kconfig |
58 |
@@ -659,10 +673,10 @@ index 978fcfca5871..0425ee149b4d 100644 |
59 |
obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o |
60 |
diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c |
61 |
new file mode 100644 |
62 |
-index 000000000000..900889c838ea |
63 |
+index 000000000000..56aed2b1e42c |
64 |
--- /dev/null |
65 |
+++ b/kernel/sched/alt_core.c |
66 |
-@@ -0,0 +1,7248 @@ |
67 |
+@@ -0,0 +1,7341 @@ |
68 |
+/* |
69 |
+ * kernel/sched/alt_core.c |
70 |
+ * |
71 |
@@ -732,7 +746,7 @@ index 000000000000..900889c838ea |
72 |
+#define sched_feat(x) (0) |
73 |
+#endif /* CONFIG_SCHED_DEBUG */ |
74 |
+ |
75 |
-+#define ALT_SCHED_VERSION "v5.14-r1" |
76 |
++#define ALT_SCHED_VERSION "v5.14-r3" |
77 |
+ |
78 |
+/* rt_prio(prio) defined in include/linux/sched/rt.h */ |
79 |
+#define rt_task(p) rt_prio((p)->prio) |
80 |
@@ -1249,6 +1263,101 @@ index 000000000000..900889c838ea |
81 |
+ update_rq_clock_task(rq, delta); |
82 |
+} |
83 |
+ |
84 |
++/* |
85 |
++ * RQ Load update routine |
86 |
++ */ |
87 |
++#define RQ_LOAD_HISTORY_BITS (sizeof(s32) * 8ULL) |
88 |
++#define RQ_UTIL_SHIFT (8) |
89 |
++#define RQ_LOAD_HISTORY_TO_UTIL(l) (((l) >> (RQ_LOAD_HISTORY_BITS - 1 - RQ_UTIL_SHIFT)) & 0xff) |
90 |
++ |
91 |
++#define LOAD_BLOCK(t) ((t) >> 17) |
92 |
++#define LOAD_HALF_BLOCK(t) ((t) >> 16) |
93 |
++#define BLOCK_MASK(t) ((t) & ((0x01 << 18) - 1)) |
94 |
++#define LOAD_BLOCK_BIT(b) (1UL << (RQ_LOAD_HISTORY_BITS - 1 - (b))) |
95 |
++#define CURRENT_LOAD_BIT LOAD_BLOCK_BIT(0) |
96 |
++ |
97 |
++static inline void rq_load_update(struct rq *rq) |
98 |
++{ |
99 |
++ u64 time = rq->clock; |
100 |
++ u64 delta = min(LOAD_BLOCK(time) - LOAD_BLOCK(rq->load_stamp), |
101 |
++ RQ_LOAD_HISTORY_BITS - 1); |
102 |
++ u64 prev = !!(rq->load_history & CURRENT_LOAD_BIT); |
103 |
++ u64 curr = !!cpu_rq(rq->cpu)->nr_running; |
104 |
++ |
105 |
++ if (delta) { |
106 |
++ rq->load_history = rq->load_history >> delta; |
107 |
++ |
108 |
++ if (delta < RQ_UTIL_SHIFT) { |
109 |
++ rq->load_block += (~BLOCK_MASK(rq->load_stamp)) * prev; |
110 |
++ if (!!LOAD_HALF_BLOCK(rq->load_block) ^ curr) |
111 |
++ rq->load_history ^= LOAD_BLOCK_BIT(delta); |
112 |
++ } |
113 |
++ |
114 |
++ rq->load_block = BLOCK_MASK(time) * prev; |
115 |
++ } else { |
116 |
++ rq->load_block += (time - rq->load_stamp) * prev; |
117 |
++ } |
118 |
++ if (prev ^ curr) |
119 |
++ rq->load_history ^= CURRENT_LOAD_BIT; |
120 |
++ rq->load_stamp = time; |
121 |
++} |
122 |
++ |
123 |
++unsigned long rq_load_util(struct rq *rq, unsigned long max) |
124 |
++{ |
125 |
++ return RQ_LOAD_HISTORY_TO_UTIL(rq->load_history) * (max >> RQ_UTIL_SHIFT); |
126 |
++} |
127 |
++ |
128 |
++#ifdef CONFIG_SMP |
129 |
++unsigned long sched_cpu_util(int cpu, unsigned long max) |
130 |
++{ |
131 |
++ return rq_load_util(cpu_rq(cpu), max); |
132 |
++} |
133 |
++#endif /* CONFIG_SMP */ |
134 |
++ |
135 |
++#ifdef CONFIG_CPU_FREQ |
136 |
++/** |
137 |
++ * cpufreq_update_util - Take a note about CPU utilization changes. |
138 |
++ * @rq: Runqueue to carry out the update for. |
139 |
++ * @flags: Update reason flags. |
140 |
++ * |
141 |
++ * This function is called by the scheduler on the CPU whose utilization is |
142 |
++ * being updated. |
143 |
++ * |
144 |
++ * It can only be called from RCU-sched read-side critical sections. |
145 |
++ * |
146 |
++ * The way cpufreq is currently arranged requires it to evaluate the CPU |
147 |
++ * performance state (frequency/voltage) on a regular basis to prevent it from |
148 |
++ * being stuck in a completely inadequate performance level for too long. |
149 |
++ * That is not guaranteed to happen if the updates are only triggered from CFS |
150 |
++ * and DL, though, because they may not be coming in if only RT tasks are |
151 |
++ * active all the time (or there are RT tasks only). |
152 |
++ * |
153 |
++ * As a workaround for that issue, this function is called periodically by the |
154 |
++ * RT sched class to trigger extra cpufreq updates to prevent it from stalling, |
155 |
++ * but that really is a band-aid. Going forward it should be replaced with |
156 |
++ * solutions targeted more specifically at RT tasks. |
157 |
++ */ |
158 |
++static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) |
159 |
++{ |
160 |
++ struct update_util_data *data; |
161 |
++ |
162 |
++#ifdef CONFIG_SMP |
163 |
++ rq_load_update(rq); |
164 |
++#endif |
165 |
++ data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, |
166 |
++ cpu_of(rq))); |
167 |
++ if (data) |
168 |
++ data->func(data, rq_clock(rq), flags); |
169 |
++} |
170 |
++#else |
171 |
++static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) |
172 |
++{ |
173 |
++#ifdef CONFIG_SMP |
174 |
++ rq_load_update(rq); |
175 |
++#endif |
176 |
++} |
177 |
++#endif /* CONFIG_CPU_FREQ */ |
178 |
++ |
179 |
+#ifdef CONFIG_NO_HZ_FULL |
180 |
+/* |
181 |
+ * Tick may be needed by tasks in the runqueue depending on their policy and |
182 |
@@ -4038,6 +4147,7 @@ index 000000000000..900889c838ea |
183 |
+ s64 ns = rq->clock_task - p->last_ran; |
184 |
+ |
185 |
+ p->sched_time += ns; |
186 |
++ cgroup_account_cputime(p, ns); |
187 |
+ account_group_exec_runtime(p, ns); |
188 |
+ |
189 |
+ p->time_slice -= ns; |
190 |
@@ -4600,6 +4710,7 @@ index 000000000000..900889c838ea |
191 |
+ if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) { |
192 |
+ __SCHED_DEQUEUE_TASK(p, rq, 0, ); |
193 |
+ set_task_cpu(p, dest_cpu); |
194 |
++ sched_task_sanity_check(p, dest_rq); |
195 |
+ __SCHED_ENQUEUE_TASK(p, dest_rq, 0); |
196 |
+ nr_migrated++; |
197 |
+ } |
198 |
@@ -5753,11 +5864,7 @@ index 000000000000..900889c838ea |
199 |
+ * the runqueue. This will be done when the task deboost |
200 |
+ * itself. |
201 |
+ */ |
202 |
-+ if (rt_effective_prio(p, newprio) == p->prio) { |
203 |
-+ __setscheduler_params(p, attr); |
204 |
-+ retval = 0; |
205 |
-+ goto unlock; |
206 |
-+ } |
207 |
++ newprio = rt_effective_prio(p, newprio); |
208 |
+ } |
209 |
+ |
210 |
+ if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) { |
211 |
@@ -6969,7 +7076,6 @@ index 000000000000..900889c838ea |
212 |
+ struct task_struct *push_task = rq->curr; |
213 |
+ |
214 |
+ lockdep_assert_held(&rq->lock); |
215 |
-+ SCHED_WARN_ON(rq->cpu != smp_processor_id()); |
216 |
+ |
217 |
+ /* |
218 |
+ * Ensure the thing is persistent until balance_push_set(.on = false); |
219 |
@@ -6977,9 +7083,10 @@ index 000000000000..900889c838ea |
220 |
+ rq->balance_callback = &balance_push_callback; |
221 |
+ |
222 |
+ /* |
223 |
-+ * Only active while going offline. |
224 |
++ * Only active while going offline and when invoked on the outgoing |
225 |
++ * CPU. |
226 |
+ */ |
227 |
-+ if (!cpu_dying(rq->cpu)) |
228 |
++ if (!cpu_dying(rq->cpu) || rq != this_rq()) |
229 |
+ return; |
230 |
+ |
231 |
+ /* |
232 |
@@ -7950,10 +8057,10 @@ index 000000000000..1212a031700e |
233 |
+{} |
234 |
diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h |
235 |
new file mode 100644 |
236 |
-index 000000000000..f03af9ab9123 |
237 |
+index 000000000000..289058a09bd5 |
238 |
--- /dev/null |
239 |
+++ b/kernel/sched/alt_sched.h |
240 |
-@@ -0,0 +1,692 @@ |
241 |
+@@ -0,0 +1,666 @@ |
242 |
+#ifndef ALT_SCHED_H |
243 |
+#define ALT_SCHED_H |
244 |
+ |
245 |
@@ -8153,6 +8260,7 @@ index 000000000000..f03af9ab9123 |
246 |
+ struct rcuwait hotplug_wait; |
247 |
+#endif |
248 |
+ unsigned int nr_pinned; |
249 |
++ |
250 |
+#endif /* CONFIG_SMP */ |
251 |
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING |
252 |
+ u64 prev_irq_time; |
253 |
@@ -8164,6 +8272,11 @@ index 000000000000..f03af9ab9123 |
254 |
+ u64 prev_steal_time_rq; |
255 |
+#endif /* CONFIG_PARAVIRT_TIME_ACCOUNTING */ |
256 |
+ |
257 |
++ /* For genenal cpu load util */ |
258 |
++ s32 load_history; |
259 |
++ u64 load_block; |
260 |
++ u64 load_stamp; |
261 |
++ |
262 |
+ /* calc_load related fields */ |
263 |
+ unsigned long calc_load_update; |
264 |
+ long calc_load_active; |
265 |
@@ -8216,6 +8329,8 @@ index 000000000000..f03af9ab9123 |
266 |
+#endif /* CONFIG_NO_HZ_COMMON */ |
267 |
+}; |
268 |
+ |
269 |
++extern unsigned long rq_load_util(struct rq *rq, unsigned long max); |
270 |
++ |
271 |
+extern unsigned long calc_load_update; |
272 |
+extern atomic_long_t calc_load_tasks; |
273 |
+ |
274 |
@@ -8528,40 +8643,6 @@ index 000000000000..f03af9ab9123 |
275 |
+ |
276 |
+#ifdef CONFIG_CPU_FREQ |
277 |
+DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data); |
278 |
-+ |
279 |
-+/** |
280 |
-+ * cpufreq_update_util - Take a note about CPU utilization changes. |
281 |
-+ * @rq: Runqueue to carry out the update for. |
282 |
-+ * @flags: Update reason flags. |
283 |
-+ * |
284 |
-+ * This function is called by the scheduler on the CPU whose utilization is |
285 |
-+ * being updated. |
286 |
-+ * |
287 |
-+ * It can only be called from RCU-sched read-side critical sections. |
288 |
-+ * |
289 |
-+ * The way cpufreq is currently arranged requires it to evaluate the CPU |
290 |
-+ * performance state (frequency/voltage) on a regular basis to prevent it from |
291 |
-+ * being stuck in a completely inadequate performance level for too long. |
292 |
-+ * That is not guaranteed to happen if the updates are only triggered from CFS |
293 |
-+ * and DL, though, because they may not be coming in if only RT tasks are |
294 |
-+ * active all the time (or there are RT tasks only). |
295 |
-+ * |
296 |
-+ * As a workaround for that issue, this function is called periodically by the |
297 |
-+ * RT sched class to trigger extra cpufreq updates to prevent it from stalling, |
298 |
-+ * but that really is a band-aid. Going forward it should be replaced with |
299 |
-+ * solutions targeted more specifically at RT tasks. |
300 |
-+ */ |
301 |
-+static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) |
302 |
-+{ |
303 |
-+ struct update_util_data *data; |
304 |
-+ |
305 |
-+ data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, |
306 |
-+ cpu_of(rq))); |
307 |
-+ if (data) |
308 |
-+ data->func(data, rq_clock(rq), flags); |
309 |
-+} |
310 |
-+#else |
311 |
-+static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} |
312 |
+#endif /* CONFIG_CPU_FREQ */ |
313 |
+ |
314 |
+#ifdef CONFIG_NO_HZ_FULL |
315 |
@@ -8764,88 +8845,25 @@ index 000000000000..be3ee4a553ca |
316 |
+ |
317 |
+static inline void update_rq_time_edge(struct rq *rq) {} |
318 |
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c |
319 |
-index 57124614363d..4057e51cef45 100644 |
320 |
+index 57124614363d..f0e9c7543542 100644 |
321 |
--- a/kernel/sched/cpufreq_schedutil.c |
322 |
+++ b/kernel/sched/cpufreq_schedutil.c |
323 |
-@@ -57,6 +57,13 @@ struct sugov_cpu { |
324 |
- unsigned long bw_dl; |
325 |
- unsigned long max; |
326 |
- |
327 |
-+#ifdef CONFIG_SCHED_ALT |
328 |
-+ /* For genenal cpu load util */ |
329 |
-+ s32 load_history; |
330 |
-+ u64 load_block; |
331 |
-+ u64 load_stamp; |
332 |
-+#endif |
333 |
-+ |
334 |
- /* The field below is for single-CPU policies only: */ |
335 |
- #ifdef CONFIG_NO_HZ_COMMON |
336 |
- unsigned long saved_idle_calls; |
337 |
-@@ -161,6 +168,7 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy, |
338 |
- return cpufreq_driver_resolve_freq(policy, freq); |
339 |
- } |
340 |
+@@ -167,9 +167,14 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu) |
341 |
+ unsigned long max = arch_scale_cpu_capacity(sg_cpu->cpu); |
342 |
|
343 |
+ sg_cpu->max = max; |
344 |
+#ifndef CONFIG_SCHED_ALT |
345 |
- static void sugov_get_util(struct sugov_cpu *sg_cpu) |
346 |
- { |
347 |
- struct rq *rq = cpu_rq(sg_cpu->cpu); |
348 |
-@@ -172,6 +180,55 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu) |
349 |
+ sg_cpu->bw_dl = cpu_bw_dl(rq); |
350 |
+ sg_cpu->util = effective_cpu_util(sg_cpu->cpu, cpu_util_cfs(rq), max, |
351 |
FREQUENCY_UTIL, NULL); |
352 |
- } |
353 |
- |
354 |
-+#else /* CONFIG_SCHED_ALT */ |
355 |
-+ |
356 |
-+#define SG_CPU_LOAD_HISTORY_BITS (sizeof(s32) * 8ULL) |
357 |
-+#define SG_CPU_UTIL_SHIFT (8) |
358 |
-+#define SG_CPU_LOAD_HISTORY_SHIFT (SG_CPU_LOAD_HISTORY_BITS - 1 - SG_CPU_UTIL_SHIFT) |
359 |
-+#define SG_CPU_LOAD_HISTORY_TO_UTIL(l) (((l) >> SG_CPU_LOAD_HISTORY_SHIFT) & 0xff) |
360 |
-+ |
361 |
-+#define LOAD_BLOCK(t) ((t) >> 17) |
362 |
-+#define LOAD_HALF_BLOCK(t) ((t) >> 16) |
363 |
-+#define BLOCK_MASK(t) ((t) & ((0x01 << 18) - 1)) |
364 |
-+#define LOAD_BLOCK_BIT(b) (1UL << (SG_CPU_LOAD_HISTORY_BITS - 1 - (b))) |
365 |
-+#define CURRENT_LOAD_BIT LOAD_BLOCK_BIT(0) |
366 |
-+ |
367 |
-+static void sugov_get_util(struct sugov_cpu *sg_cpu) |
368 |
-+{ |
369 |
-+ unsigned long max = arch_scale_cpu_capacity(sg_cpu->cpu); |
370 |
-+ |
371 |
-+ sg_cpu->max = max; |
372 |
++#else |
373 |
+ sg_cpu->bw_dl = 0; |
374 |
-+ sg_cpu->util = SG_CPU_LOAD_HISTORY_TO_UTIL(sg_cpu->load_history) * |
375 |
-+ (max >> SG_CPU_UTIL_SHIFT); |
376 |
-+} |
377 |
-+ |
378 |
-+static inline void sugov_cpu_load_update(struct sugov_cpu *sg_cpu, u64 time) |
379 |
-+{ |
380 |
-+ u64 delta = min(LOAD_BLOCK(time) - LOAD_BLOCK(sg_cpu->load_stamp), |
381 |
-+ SG_CPU_LOAD_HISTORY_BITS - 1); |
382 |
-+ u64 prev = !!(sg_cpu->load_history & CURRENT_LOAD_BIT); |
383 |
-+ u64 curr = !!cpu_rq(sg_cpu->cpu)->nr_running; |
384 |
-+ |
385 |
-+ if (delta) { |
386 |
-+ sg_cpu->load_history = sg_cpu->load_history >> delta; |
387 |
-+ |
388 |
-+ if (delta <= SG_CPU_UTIL_SHIFT) { |
389 |
-+ sg_cpu->load_block += (~BLOCK_MASK(sg_cpu->load_stamp)) * prev; |
390 |
-+ if (!!LOAD_HALF_BLOCK(sg_cpu->load_block) ^ curr) |
391 |
-+ sg_cpu->load_history ^= LOAD_BLOCK_BIT(delta); |
392 |
-+ } |
393 |
-+ |
394 |
-+ sg_cpu->load_block = BLOCK_MASK(time) * prev; |
395 |
-+ } else { |
396 |
-+ sg_cpu->load_block += (time - sg_cpu->load_stamp) * prev; |
397 |
-+ } |
398 |
-+ if (prev ^ curr) |
399 |
-+ sg_cpu->load_history ^= CURRENT_LOAD_BIT; |
400 |
-+ sg_cpu->load_stamp = time; |
401 |
-+} |
402 |
++ sg_cpu->util = rq_load_util(rq, max); |
403 |
+#endif /* CONFIG_SCHED_ALT */ |
404 |
-+ |
405 |
+ } |
406 |
+ |
407 |
/** |
408 |
- * sugov_iowait_reset() - Reset the IO boost status of a CPU. |
409 |
- * @sg_cpu: the sugov data for the CPU to boost |
410 |
-@@ -312,13 +369,19 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; } |
411 |
+@@ -312,8 +317,10 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; } |
412 |
*/ |
413 |
static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu) |
414 |
{ |
415 |
@@ -8856,27 +8874,7 @@ index 57124614363d..4057e51cef45 100644 |
416 |
} |
417 |
|
418 |
static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu, |
419 |
- u64 time, unsigned int flags) |
420 |
- { |
421 |
-+#ifdef CONFIG_SCHED_ALT |
422 |
-+ sugov_cpu_load_update(sg_cpu, time); |
423 |
-+#endif /* CONFIG_SCHED_ALT */ |
424 |
-+ |
425 |
- sugov_iowait_boost(sg_cpu, time, flags); |
426 |
- sg_cpu->last_update = time; |
427 |
- |
428 |
-@@ -439,6 +502,10 @@ sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags) |
429 |
- |
430 |
- raw_spin_lock(&sg_policy->update_lock); |
431 |
- |
432 |
-+#ifdef CONFIG_SCHED_ALT |
433 |
-+ sugov_cpu_load_update(sg_cpu, time); |
434 |
-+#endif /* CONFIG_SCHED_ALT */ |
435 |
-+ |
436 |
- sugov_iowait_boost(sg_cpu, time, flags); |
437 |
- sg_cpu->last_update = time; |
438 |
- |
439 |
-@@ -599,6 +666,7 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy) |
440 |
+@@ -599,6 +606,7 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy) |
441 |
} |
442 |
|
443 |
ret = sched_setattr_nocheck(thread, &attr); |
444 |
@@ -8884,7 +8882,7 @@ index 57124614363d..4057e51cef45 100644 |
445 |
if (ret) { |
446 |
kthread_stop(thread); |
447 |
pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__); |
448 |
-@@ -833,7 +901,9 @@ cpufreq_governor_init(schedutil_gov); |
449 |
+@@ -833,7 +841,9 @@ cpufreq_governor_init(schedutil_gov); |
450 |
#ifdef CONFIG_ENERGY_MODEL |
451 |
static void rebuild_sd_workfn(struct work_struct *work) |
452 |
{ |