1 |
commit: 9ae3c38079c69dc3335f4e20816987575a5ea5c7 |
2 |
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
3 |
AuthorDate: Sun Jun 26 21:51:26 2022 +0000 |
4 |
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
5 |
CommitDate: Sun Jun 26 21:51:26 2022 +0000 |
6 |
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9ae3c380 |
7 |
|
8 |
Updated BMQ Schedular patch to r2 |
9 |
|
10 |
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org> |
11 |
|
12 |
0000_README | 2 +- |
13 |
...=> 5020_BMQ-and-PDS-io-scheduler-v5.18-r2.patch | 55 +++++++--------------- |
14 |
2 files changed, 19 insertions(+), 38 deletions(-) |
15 |
|
16 |
diff --git a/0000_README b/0000_README |
17 |
index 17ef0755..728697d0 100644 |
18 |
--- a/0000_README |
19 |
+++ b/0000_README |
20 |
@@ -111,7 +111,7 @@ Patch: 5010_enable-cpu-optimizations-universal.patch |
21 |
From: https://github.com/graysky2/kernel_compiler_patch |
22 |
Desc: Kernel >= 5.15 patch enables gcc = v11.1+ optimizations for additional CPUs. |
23 |
|
24 |
-Patch: 5020_BMQ-and-PDS-io-scheduler-v5.18-r1.patch |
25 |
+Patch: 5020_BMQ-and-PDS-io-scheduler-v5.18-r2.patch |
26 |
From: https://gitlab.com/alfredchen/linux-prjc |
27 |
Desc: BMQ(BitMap Queue) Scheduler. A new CPU scheduler developed from PDS(incld). Inspired by the scheduler in zircon. |
28 |
|
29 |
|
30 |
diff --git a/5020_BMQ-and-PDS-io-scheduler-v5.18-r1.patch b/5020_BMQ-and-PDS-io-scheduler-v5.18-r2.patch |
31 |
similarity index 99% |
32 |
rename from 5020_BMQ-and-PDS-io-scheduler-v5.18-r1.patch |
33 |
rename to 5020_BMQ-and-PDS-io-scheduler-v5.18-r2.patch |
34 |
index a130157e..cf13d856 100644 |
35 |
--- a/5020_BMQ-and-PDS-io-scheduler-v5.18-r1.patch |
36 |
+++ b/5020_BMQ-and-PDS-io-scheduler-v5.18-r2.patch |
37 |
@@ -632,10 +632,10 @@ index 976092b7bd45..31d587c16ec1 100644 |
38 |
obj-y += build_utility.o |
39 |
diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c |
40 |
new file mode 100644 |
41 |
-index 000000000000..189332cd6f99 |
42 |
+index 000000000000..b8e67d568e17 |
43 |
--- /dev/null |
44 |
+++ b/kernel/sched/alt_core.c |
45 |
-@@ -0,0 +1,7768 @@ |
46 |
+@@ -0,0 +1,7750 @@ |
47 |
+/* |
48 |
+ * kernel/sched/alt_core.c |
49 |
+ * |
50 |
@@ -705,7 +705,7 @@ index 000000000000..189332cd6f99 |
51 |
+#define sched_feat(x) (0) |
52 |
+#endif /* CONFIG_SCHED_DEBUG */ |
53 |
+ |
54 |
-+#define ALT_SCHED_VERSION "v5.18-r1" |
55 |
++#define ALT_SCHED_VERSION "v5.18-r2" |
56 |
+ |
57 |
+/* rt_prio(prio) defined in include/linux/sched/rt.h */ |
58 |
+#define rt_task(p) rt_prio((p)->prio) |
59 |
@@ -785,14 +785,14 @@ index 000000000000..189332cd6f99 |
60 |
+#ifdef CONFIG_SCHED_SMT |
61 |
+static cpumask_t sched_sg_idle_mask ____cacheline_aligned_in_smp; |
62 |
+#endif |
63 |
-+static cpumask_t sched_rq_watermark[SCHED_BITS] ____cacheline_aligned_in_smp; |
64 |
++static cpumask_t sched_rq_watermark[SCHED_QUEUE_BITS] ____cacheline_aligned_in_smp; |
65 |
+ |
66 |
+/* sched_queue related functions */ |
67 |
+static inline void sched_queue_init(struct sched_queue *q) |
68 |
+{ |
69 |
+ int i; |
70 |
+ |
71 |
-+ bitmap_zero(q->bitmap, SCHED_BITS); |
72 |
++ bitmap_zero(q->bitmap, SCHED_QUEUE_BITS); |
73 |
+ for(i = 0; i < SCHED_BITS; i++) |
74 |
+ INIT_LIST_HEAD(&q->heads[i]); |
75 |
+} |
76 |
@@ -824,7 +824,7 @@ index 000000000000..189332cd6f99 |
77 |
+ cpu = cpu_of(rq); |
78 |
+ if (watermark < last_wm) { |
79 |
+ for (i = last_wm; i > watermark; i--) |
80 |
-+ cpumask_clear_cpu(cpu, sched_rq_watermark + SCHED_BITS - 1 - i); |
81 |
++ cpumask_clear_cpu(cpu, sched_rq_watermark + SCHED_QUEUE_BITS - i); |
82 |
+#ifdef CONFIG_SCHED_SMT |
83 |
+ if (static_branch_likely(&sched_smt_present) && |
84 |
+ IDLE_TASK_SCHED_PRIO == last_wm) |
85 |
@@ -835,7 +835,7 @@ index 000000000000..189332cd6f99 |
86 |
+ } |
87 |
+ /* last_wm < watermark */ |
88 |
+ for (i = watermark; i > last_wm; i--) |
89 |
-+ cpumask_set_cpu(cpu, sched_rq_watermark + SCHED_BITS - 1 - i); |
90 |
++ cpumask_set_cpu(cpu, sched_rq_watermark + SCHED_QUEUE_BITS - i); |
91 |
+#ifdef CONFIG_SCHED_SMT |
92 |
+ if (static_branch_likely(&sched_smt_present) && |
93 |
+ IDLE_TASK_SCHED_PRIO == watermark) { |
94 |
@@ -2543,7 +2543,7 @@ index 000000000000..189332cd6f99 |
95 |
+#endif |
96 |
+ cpumask_and(&tmp, &chk_mask, sched_rq_watermark) || |
97 |
+ cpumask_and(&tmp, &chk_mask, |
98 |
-+ sched_rq_watermark + SCHED_BITS - task_sched_prio(p))) |
99 |
++ sched_rq_watermark + SCHED_QUEUE_BITS - 1 - task_sched_prio(p))) |
100 |
+ return best_mask_cpu(task_cpu(p), &tmp); |
101 |
+ |
102 |
+ return best_mask_cpu(task_cpu(p), &chk_mask); |
103 |
@@ -4334,24 +4334,6 @@ index 000000000000..189332cd6f99 |
104 |
+ */ |
105 |
+void sched_exec(void) |
106 |
+{ |
107 |
-+ struct task_struct *p = current; |
108 |
-+ unsigned long flags; |
109 |
-+ int dest_cpu; |
110 |
-+ |
111 |
-+ raw_spin_lock_irqsave(&p->pi_lock, flags); |
112 |
-+ dest_cpu = cpumask_any(p->cpus_ptr); |
113 |
-+ if (dest_cpu == smp_processor_id()) |
114 |
-+ goto unlock; |
115 |
-+ |
116 |
-+ if (likely(cpu_active(dest_cpu))) { |
117 |
-+ struct migration_arg arg = { p, dest_cpu }; |
118 |
-+ |
119 |
-+ raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
120 |
-+ stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); |
121 |
-+ return; |
122 |
-+ } |
123 |
-+unlock: |
124 |
-+ raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
125 |
+} |
126 |
+ |
127 |
+#endif |
128 |
@@ -4519,7 +4501,7 @@ index 000000000000..189332cd6f99 |
129 |
+} |
130 |
+ |
131 |
+#ifdef CONFIG_SCHED_SMT |
132 |
-+static inline int active_load_balance_cpu_stop(void *data) |
133 |
++static inline int sg_balance_cpu_stop(void *data) |
134 |
+{ |
135 |
+ struct rq *rq = this_rq(); |
136 |
+ struct task_struct *p = data; |
137 |
@@ -4570,15 +4552,15 @@ index 000000000000..189332cd6f99 |
138 |
+ raw_spin_unlock_irqrestore(&rq->lock, flags); |
139 |
+ |
140 |
+ if (res) |
141 |
-+ stop_one_cpu_nowait(cpu, active_load_balance_cpu_stop, |
142 |
-+ curr, &rq->active_balance_work); |
143 |
++ stop_one_cpu_nowait(cpu, sg_balance_cpu_stop, curr, |
144 |
++ &rq->active_balance_work); |
145 |
+ return res; |
146 |
+} |
147 |
+ |
148 |
+/* |
149 |
-+ * sg_balance_check - slibing group balance check for run queue @rq |
150 |
++ * sg_balance - slibing group balance check for run queue @rq |
151 |
+ */ |
152 |
-+static inline void sg_balance_check(struct rq *rq) |
153 |
++static inline void sg_balance(struct rq *rq) |
154 |
+{ |
155 |
+ cpumask_t chk; |
156 |
+ int cpu = cpu_of(rq); |
157 |
@@ -5243,7 +5225,7 @@ index 000000000000..189332cd6f99 |
158 |
+ } |
159 |
+ |
160 |
+#ifdef CONFIG_SCHED_SMT |
161 |
-+ sg_balance_check(rq); |
162 |
++ sg_balance(rq); |
163 |
+#endif |
164 |
+} |
165 |
+ |
166 |
@@ -7884,7 +7866,7 @@ index 000000000000..189332cd6f99 |
167 |
+ wait_bit_init(); |
168 |
+ |
169 |
+#ifdef CONFIG_SMP |
170 |
-+ for (i = 0; i < SCHED_BITS; i++) |
171 |
++ for (i = 0; i < SCHED_QUEUE_BITS; i++) |
172 |
+ cpumask_copy(sched_rq_watermark + i, cpu_present_mask); |
173 |
+#endif |
174 |
+ |
175 |
@@ -9094,10 +9076,10 @@ index 000000000000..611424bbfa9b |
176 |
+#endif /* ALT_SCHED_H */ |
177 |
diff --git a/kernel/sched/bmq.h b/kernel/sched/bmq.h |
178 |
new file mode 100644 |
179 |
-index 000000000000..bf7ac80ec242 |
180 |
+index 000000000000..66b77291b9d0 |
181 |
--- /dev/null |
182 |
+++ b/kernel/sched/bmq.h |
183 |
-@@ -0,0 +1,111 @@ |
184 |
+@@ -0,0 +1,110 @@ |
185 |
+#define ALT_SCHED_VERSION_MSG "sched/bmq: BMQ CPU Scheduler "ALT_SCHED_VERSION" by Alfred Chen.\n" |
186 |
+ |
187 |
+/* |
188 |
@@ -9185,8 +9167,7 @@ index 000000000000..bf7ac80ec242 |
189 |
+ |
190 |
+static void sched_task_fork(struct task_struct *p, struct rq *rq) |
191 |
+{ |
192 |
-+ p->boost_prio = (p->boost_prio < 0) ? |
193 |
-+ p->boost_prio + MAX_PRIORITY_ADJ : MAX_PRIORITY_ADJ; |
194 |
++ p->boost_prio = MAX_PRIORITY_ADJ; |
195 |
+} |
196 |
+ |
197 |
+static inline void do_sched_yield_type_1(struct task_struct *p, struct rq *rq) |