1 |
commit: 1056ff1007ac01709819b73a4d2f3e1e7b7d6ba8 |
2 |
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
3 |
AuthorDate: Thu Oct 1 12:49:47 2020 +0000 |
4 |
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
5 |
CommitDate: Thu Oct 1 12:49:47 2020 +0000 |
6 |
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1056ff10 |
7 |
|
8 |
Linux patch 5.4.69 |
9 |
|
10 |
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org> |
11 |
|
12 |
0000_README | 4 + |
13 |
1068_linux-5.4.69.patch | 18773 ++++++++++++++++++++++++++++++++++++++++++++++ |
14 |
2 files changed, 18777 insertions(+) |
15 |
|
16 |
diff --git a/0000_README b/0000_README |
17 |
index 83f469d..dd45626 100644 |
18 |
--- a/0000_README |
19 |
+++ b/0000_README |
20 |
@@ -315,6 +315,10 @@ Patch: 1067_linux-5.4.68.patch |
21 |
From: http://www.kernel.org |
22 |
Desc: Linux 5.4.68 |
23 |
|
24 |
+Patch: 1068_linux-5.4.69.patch |
25 |
+From: http://www.kernel.org |
26 |
+Desc: Linux 5.4.69 |
27 |
+ |
28 |
Patch: 1500_XATTR_USER_PREFIX.patch |
29 |
From: https://bugs.gentoo.org/show_bug.cgi?id=470644 |
30 |
Desc: Support for namespace user.pax.* on tmpfs. |
31 |
|
32 |
diff --git a/1068_linux-5.4.69.patch b/1068_linux-5.4.69.patch |
33 |
new file mode 100644 |
34 |
index 0000000..c9154d1 |
35 |
--- /dev/null |
36 |
+++ b/1068_linux-5.4.69.patch |
37 |
@@ -0,0 +1,18773 @@ |
38 |
+diff --git a/Documentation/devicetree/bindings/sound/wm8994.txt b/Documentation/devicetree/bindings/sound/wm8994.txt |
39 |
+index 68cccc4653ba3..367b58ce1bb92 100644 |
40 |
+--- a/Documentation/devicetree/bindings/sound/wm8994.txt |
41 |
++++ b/Documentation/devicetree/bindings/sound/wm8994.txt |
42 |
+@@ -14,9 +14,15 @@ Required properties: |
43 |
+ - #gpio-cells : Must be 2. The first cell is the pin number and the |
44 |
+ second cell is used to specify optional parameters (currently unused). |
45 |
+ |
46 |
+- - AVDD2-supply, DBVDD1-supply, DBVDD2-supply, DBVDD3-supply, CPVDD-supply, |
47 |
+- SPKVDD1-supply, SPKVDD2-supply : power supplies for the device, as covered |
48 |
+- in Documentation/devicetree/bindings/regulator/regulator.txt |
49 |
++ - power supplies for the device, as covered in |
50 |
++ Documentation/devicetree/bindings/regulator/regulator.txt, depending |
51 |
++ on compatible: |
52 |
++ - for wlf,wm1811 and wlf,wm8958: |
53 |
++ AVDD1-supply, AVDD2-supply, DBVDD1-supply, DBVDD2-supply, DBVDD3-supply, |
54 |
++ DCVDD-supply, CPVDD-supply, SPKVDD1-supply, SPKVDD2-supply |
55 |
++ - for wlf,wm8994: |
56 |
++ AVDD1-supply, AVDD2-supply, DBVDD-supply, DCVDD-supply, CPVDD-supply, |
57 |
++ SPKVDD1-supply, SPKVDD2-supply |
58 |
+ |
59 |
+ Optional properties: |
60 |
+ |
61 |
+@@ -73,11 +79,11 @@ wm8994: codec@1a { |
62 |
+ |
63 |
+ lineout1-se; |
64 |
+ |
65 |
++ AVDD1-supply = <®ulator>; |
66 |
+ AVDD2-supply = <®ulator>; |
67 |
+ CPVDD-supply = <®ulator>; |
68 |
+- DBVDD1-supply = <®ulator>; |
69 |
+- DBVDD2-supply = <®ulator>; |
70 |
+- DBVDD3-supply = <®ulator>; |
71 |
++ DBVDD-supply = <®ulator>; |
72 |
++ DCVDD-supply = <®ulator>; |
73 |
+ SPKVDD1-supply = <®ulator>; |
74 |
+ SPKVDD2-supply = <®ulator>; |
75 |
+ }; |
76 |
+diff --git a/Documentation/driver-api/libata.rst b/Documentation/driver-api/libata.rst |
77 |
+index 70e180e6b93dc..9f3e5dc311840 100644 |
78 |
+--- a/Documentation/driver-api/libata.rst |
79 |
++++ b/Documentation/driver-api/libata.rst |
80 |
+@@ -250,7 +250,7 @@ High-level taskfile hooks |
81 |
+ |
82 |
+ :: |
83 |
+ |
84 |
+- void (*qc_prep) (struct ata_queued_cmd *qc); |
85 |
++ enum ata_completion_errors (*qc_prep) (struct ata_queued_cmd *qc); |
86 |
+ int (*qc_issue) (struct ata_queued_cmd *qc); |
87 |
+ |
88 |
+ |
89 |
+diff --git a/Makefile b/Makefile |
90 |
+index acb2499d9b053..adf3847106775 100644 |
91 |
+--- a/Makefile |
92 |
++++ b/Makefile |
93 |
+@@ -1,7 +1,7 @@ |
94 |
+ # SPDX-License-Identifier: GPL-2.0 |
95 |
+ VERSION = 5 |
96 |
+ PATCHLEVEL = 4 |
97 |
+-SUBLEVEL = 68 |
98 |
++SUBLEVEL = 69 |
99 |
+ EXTRAVERSION = |
100 |
+ NAME = Kleptomaniac Octopus |
101 |
+ |
102 |
+diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h |
103 |
+index cbde9fa15792b..c1747fcb86d36 100644 |
104 |
+--- a/arch/arm/include/asm/kvm_emulate.h |
105 |
++++ b/arch/arm/include/asm/kvm_emulate.h |
106 |
+@@ -204,7 +204,7 @@ static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu) |
107 |
+ return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT; |
108 |
+ } |
109 |
+ |
110 |
+-static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu) |
111 |
++static inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu) |
112 |
+ { |
113 |
+ return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW; |
114 |
+ } |
115 |
+@@ -236,16 +236,21 @@ static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu) |
116 |
+ return kvm_vcpu_get_hsr(vcpu) & HSR_IL; |
117 |
+ } |
118 |
+ |
119 |
+-static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu) |
120 |
++static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu) |
121 |
+ { |
122 |
+ return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT; |
123 |
+ } |
124 |
+ |
125 |
+-static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu) |
126 |
++static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) |
127 |
+ { |
128 |
+ return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT; |
129 |
+ } |
130 |
+ |
131 |
++static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu) |
132 |
++{ |
133 |
++ return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu); |
134 |
++} |
135 |
++ |
136 |
+ static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu) |
137 |
+ { |
138 |
+ return kvm_vcpu_get_hsr(vcpu) & HSR_FSC; |
139 |
+diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c |
140 |
+index a082f6e4f0f4a..76ea4178a55cb 100644 |
141 |
+--- a/arch/arm/kernel/stacktrace.c |
142 |
++++ b/arch/arm/kernel/stacktrace.c |
143 |
+@@ -116,6 +116,8 @@ static int save_trace(struct stackframe *frame, void *d) |
144 |
+ return 0; |
145 |
+ |
146 |
+ regs = (struct pt_regs *)frame->sp; |
147 |
++ if ((unsigned long)®s[1] > ALIGN(frame->sp, THREAD_SIZE)) |
148 |
++ return 0; |
149 |
+ |
150 |
+ trace->entries[trace->nr_entries++] = regs->ARM_pc; |
151 |
+ |
152 |
+diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c |
153 |
+index c053abd1fb539..97a512551b217 100644 |
154 |
+--- a/arch/arm/kernel/traps.c |
155 |
++++ b/arch/arm/kernel/traps.c |
156 |
+@@ -64,14 +64,16 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long); |
157 |
+ |
158 |
+ void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame) |
159 |
+ { |
160 |
++ unsigned long end = frame + 4 + sizeof(struct pt_regs); |
161 |
++ |
162 |
+ #ifdef CONFIG_KALLSYMS |
163 |
+ printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from); |
164 |
+ #else |
165 |
+ printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from); |
166 |
+ #endif |
167 |
+ |
168 |
+- if (in_entry_text(from)) |
169 |
+- dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs)); |
170 |
++ if (in_entry_text(from) && end <= ALIGN(frame, THREAD_SIZE)) |
171 |
++ dump_mem("", "Exception stack", frame + 4, end); |
172 |
+ } |
173 |
+ |
174 |
+ void dump_backtrace_stm(u32 *stack, u32 instruction) |
175 |
+diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c |
176 |
+index 532a3e4b98c6f..090a8aafb25e1 100644 |
177 |
+--- a/arch/arm/mach-omap2/cpuidle34xx.c |
178 |
++++ b/arch/arm/mach-omap2/cpuidle34xx.c |
179 |
+@@ -109,6 +109,7 @@ static int omap3_enter_idle(struct cpuidle_device *dev, |
180 |
+ int index) |
181 |
+ { |
182 |
+ struct omap3_idle_statedata *cx = &omap3_idle_data[index]; |
183 |
++ int error; |
184 |
+ |
185 |
+ if (omap_irq_pending() || need_resched()) |
186 |
+ goto return_sleep_time; |
187 |
+@@ -125,8 +126,11 @@ static int omap3_enter_idle(struct cpuidle_device *dev, |
188 |
+ * Call idle CPU PM enter notifier chain so that |
189 |
+ * VFP context is saved. |
190 |
+ */ |
191 |
+- if (cx->mpu_state == PWRDM_POWER_OFF) |
192 |
+- cpu_pm_enter(); |
193 |
++ if (cx->mpu_state == PWRDM_POWER_OFF) { |
194 |
++ error = cpu_pm_enter(); |
195 |
++ if (error) |
196 |
++ goto out_clkdm_set; |
197 |
++ } |
198 |
+ |
199 |
+ /* Execute ARM wfi */ |
200 |
+ omap_sram_idle(); |
201 |
+@@ -139,6 +143,7 @@ static int omap3_enter_idle(struct cpuidle_device *dev, |
202 |
+ pwrdm_read_prev_pwrst(mpu_pd) == PWRDM_POWER_OFF) |
203 |
+ cpu_pm_exit(); |
204 |
+ |
205 |
++out_clkdm_set: |
206 |
+ /* Re-allow idle for C1 */ |
207 |
+ if (cx->flags & OMAP_CPUIDLE_CX_NO_CLKDM_IDLE) |
208 |
+ clkdm_allow_idle(mpu_pd->pwrdm_clkdms[0]); |
209 |
+diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c |
210 |
+index fe75d4fa60738..6f5f89711f256 100644 |
211 |
+--- a/arch/arm/mach-omap2/cpuidle44xx.c |
212 |
++++ b/arch/arm/mach-omap2/cpuidle44xx.c |
213 |
+@@ -122,6 +122,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, |
214 |
+ { |
215 |
+ struct idle_statedata *cx = state_ptr + index; |
216 |
+ u32 mpuss_can_lose_context = 0; |
217 |
++ int error; |
218 |
+ |
219 |
+ /* |
220 |
+ * CPU0 has to wait and stay ON until CPU1 is OFF state. |
221 |
+@@ -159,7 +160,9 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, |
222 |
+ * Call idle CPU PM enter notifier chain so that |
223 |
+ * VFP and per CPU interrupt context is saved. |
224 |
+ */ |
225 |
+- cpu_pm_enter(); |
226 |
++ error = cpu_pm_enter(); |
227 |
++ if (error) |
228 |
++ goto cpu_pm_out; |
229 |
+ |
230 |
+ if (dev->cpu == 0) { |
231 |
+ pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state); |
232 |
+@@ -169,13 +172,17 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, |
233 |
+ * Call idle CPU cluster PM enter notifier chain |
234 |
+ * to save GIC and wakeupgen context. |
235 |
+ */ |
236 |
+- if (mpuss_can_lose_context) |
237 |
+- cpu_cluster_pm_enter(); |
238 |
++ if (mpuss_can_lose_context) { |
239 |
++ error = cpu_cluster_pm_enter(); |
240 |
++ if (error) |
241 |
++ goto cpu_cluster_pm_out; |
242 |
++ } |
243 |
+ } |
244 |
+ |
245 |
+ omap4_enter_lowpower(dev->cpu, cx->cpu_state); |
246 |
+ cpu_done[dev->cpu] = true; |
247 |
+ |
248 |
++cpu_cluster_pm_out: |
249 |
+ /* Wakeup CPU1 only if it is not offlined */ |
250 |
+ if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) { |
251 |
+ |
252 |
+@@ -197,12 +204,6 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, |
253 |
+ } |
254 |
+ } |
255 |
+ |
256 |
+- /* |
257 |
+- * Call idle CPU PM exit notifier chain to restore |
258 |
+- * VFP and per CPU IRQ context. |
259 |
+- */ |
260 |
+- cpu_pm_exit(); |
261 |
+- |
262 |
+ /* |
263 |
+ * Call idle CPU cluster PM exit notifier chain |
264 |
+ * to restore GIC and wakeupgen context. |
265 |
+@@ -210,6 +211,13 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, |
266 |
+ if (dev->cpu == 0 && mpuss_can_lose_context) |
267 |
+ cpu_cluster_pm_exit(); |
268 |
+ |
269 |
++ /* |
270 |
++ * Call idle CPU PM exit notifier chain to restore |
271 |
++ * VFP and per CPU IRQ context. |
272 |
++ */ |
273 |
++ cpu_pm_exit(); |
274 |
++ |
275 |
++cpu_pm_out: |
276 |
+ tick_broadcast_exit(); |
277 |
+ |
278 |
+ fail: |
279 |
+diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c |
280 |
+index 54254fc92c2ed..fa66534a7ae22 100644 |
281 |
+--- a/arch/arm/mach-omap2/pm34xx.c |
282 |
++++ b/arch/arm/mach-omap2/pm34xx.c |
283 |
+@@ -194,6 +194,7 @@ void omap_sram_idle(void) |
284 |
+ int per_next_state = PWRDM_POWER_ON; |
285 |
+ int core_next_state = PWRDM_POWER_ON; |
286 |
+ u32 sdrc_pwr = 0; |
287 |
++ int error; |
288 |
+ |
289 |
+ mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm); |
290 |
+ switch (mpu_next_state) { |
291 |
+@@ -222,8 +223,11 @@ void omap_sram_idle(void) |
292 |
+ pwrdm_pre_transition(NULL); |
293 |
+ |
294 |
+ /* PER */ |
295 |
+- if (per_next_state == PWRDM_POWER_OFF) |
296 |
+- cpu_cluster_pm_enter(); |
297 |
++ if (per_next_state == PWRDM_POWER_OFF) { |
298 |
++ error = cpu_cluster_pm_enter(); |
299 |
++ if (error) |
300 |
++ return; |
301 |
++ } |
302 |
+ |
303 |
+ /* CORE */ |
304 |
+ if (core_next_state < PWRDM_POWER_ON) { |
305 |
+diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h |
306 |
+index f47081b40523e..f65ff6b90f4a9 100644 |
307 |
+--- a/arch/arm64/include/asm/kvm_emulate.h |
308 |
++++ b/arch/arm64/include/asm/kvm_emulate.h |
309 |
+@@ -299,7 +299,7 @@ static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) |
310 |
+ return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT; |
311 |
+ } |
312 |
+ |
313 |
+-static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) |
314 |
++static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu) |
315 |
+ { |
316 |
+ return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW); |
317 |
+ } |
318 |
+@@ -307,7 +307,7 @@ static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) |
319 |
+ static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) |
320 |
+ { |
321 |
+ return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) || |
322 |
+- kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */ |
323 |
++ kvm_vcpu_abt_iss1tw(vcpu); /* AF/DBM update */ |
324 |
+ } |
325 |
+ |
326 |
+ static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu) |
327 |
+@@ -336,6 +336,11 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) |
328 |
+ return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW; |
329 |
+ } |
330 |
+ |
331 |
++static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu) |
332 |
++{ |
333 |
++ return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu); |
334 |
++} |
335 |
++ |
336 |
+ static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) |
337 |
+ { |
338 |
+ return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC; |
339 |
+@@ -373,6 +378,9 @@ static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu) |
340 |
+ |
341 |
+ static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu) |
342 |
+ { |
343 |
++ if (kvm_vcpu_abt_iss1tw(vcpu)) |
344 |
++ return true; |
345 |
++ |
346 |
+ if (kvm_vcpu_trap_is_iabt(vcpu)) |
347 |
+ return false; |
348 |
+ |
349 |
+diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h |
350 |
+index 788ae971f11c1..25a73aab438f9 100644 |
351 |
+--- a/arch/arm64/include/asm/sections.h |
352 |
++++ b/arch/arm64/include/asm/sections.h |
353 |
+@@ -15,6 +15,7 @@ extern char __hyp_text_start[], __hyp_text_end[]; |
354 |
+ extern char __idmap_text_start[], __idmap_text_end[]; |
355 |
+ extern char __initdata_begin[], __initdata_end[]; |
356 |
+ extern char __inittext_begin[], __inittext_end[]; |
357 |
++extern char __exittext_begin[], __exittext_end[]; |
358 |
+ extern char __irqentry_text_start[], __irqentry_text_end[]; |
359 |
+ extern char __mmuoff_data_start[], __mmuoff_data_end[]; |
360 |
+ extern char __entry_tramp_text_start[], __entry_tramp_text_end[]; |
361 |
+diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c |
362 |
+index a100483b47c42..46ec402e97edc 100644 |
363 |
+--- a/arch/arm64/kernel/acpi.c |
364 |
++++ b/arch/arm64/kernel/acpi.c |
365 |
+@@ -19,6 +19,7 @@ |
366 |
+ #include <linux/init.h> |
367 |
+ #include <linux/irq.h> |
368 |
+ #include <linux/irqdomain.h> |
369 |
++#include <linux/irq_work.h> |
370 |
+ #include <linux/memblock.h> |
371 |
+ #include <linux/of_fdt.h> |
372 |
+ #include <linux/smp.h> |
373 |
+@@ -269,6 +270,7 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr) |
374 |
+ int apei_claim_sea(struct pt_regs *regs) |
375 |
+ { |
376 |
+ int err = -ENOENT; |
377 |
++ bool return_to_irqs_enabled; |
378 |
+ unsigned long current_flags; |
379 |
+ |
380 |
+ if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES)) |
381 |
+@@ -276,6 +278,12 @@ int apei_claim_sea(struct pt_regs *regs) |
382 |
+ |
383 |
+ current_flags = local_daif_save_flags(); |
384 |
+ |
385 |
++ /* current_flags isn't useful here as daif doesn't tell us about pNMI */ |
386 |
++ return_to_irqs_enabled = !irqs_disabled_flags(arch_local_save_flags()); |
387 |
++ |
388 |
++ if (regs) |
389 |
++ return_to_irqs_enabled = interrupts_enabled(regs); |
390 |
++ |
391 |
+ /* |
392 |
+ * SEA can interrupt SError, mask it and describe this as an NMI so |
393 |
+ * that APEI defers the handling. |
394 |
+@@ -284,6 +292,23 @@ int apei_claim_sea(struct pt_regs *regs) |
395 |
+ nmi_enter(); |
396 |
+ err = ghes_notify_sea(); |
397 |
+ nmi_exit(); |
398 |
++ |
399 |
++ /* |
400 |
++ * APEI NMI-like notifications are deferred to irq_work. Unless |
401 |
++ * we interrupted irqs-masked code, we can do that now. |
402 |
++ */ |
403 |
++ if (!err) { |
404 |
++ if (return_to_irqs_enabled) { |
405 |
++ local_daif_restore(DAIF_PROCCTX_NOIRQ); |
406 |
++ __irq_enter(); |
407 |
++ irq_work_run(); |
408 |
++ __irq_exit(); |
409 |
++ } else { |
410 |
++ pr_warn_ratelimited("APEI work queued but not completed"); |
411 |
++ err = -EINPROGRESS; |
412 |
++ } |
413 |
++ } |
414 |
++ |
415 |
+ local_daif_restore(current_flags); |
416 |
+ |
417 |
+ return err; |
418 |
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c |
419 |
+index f400cb29b811a..f2ec845404149 100644 |
420 |
+--- a/arch/arm64/kernel/cpufeature.c |
421 |
++++ b/arch/arm64/kernel/cpufeature.c |
422 |
+@@ -160,11 +160,10 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { |
423 |
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0), |
424 |
+ S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI), |
425 |
+ S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI), |
426 |
+- /* Linux doesn't care about the EL3 */ |
427 |
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0), |
428 |
+- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0), |
429 |
+- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY), |
430 |
+- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY), |
431 |
++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0), |
432 |
++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY), |
433 |
++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY), |
434 |
+ ARM64_FTR_END, |
435 |
+ }; |
436 |
+ |
437 |
+@@ -320,7 +319,7 @@ static const struct arm64_ftr_bits ftr_id_pfr0[] = { |
438 |
+ }; |
439 |
+ |
440 |
+ static const struct arm64_ftr_bits ftr_id_dfr0[] = { |
441 |
+- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0), |
442 |
++ /* [31:28] TraceFilt */ |
443 |
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf), /* PerfMon */ |
444 |
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), |
445 |
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0), |
446 |
+@@ -719,9 +718,6 @@ void update_cpu_features(int cpu, |
447 |
+ taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu, |
448 |
+ info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2); |
449 |
+ |
450 |
+- /* |
451 |
+- * EL3 is not our concern. |
452 |
+- */ |
453 |
+ taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu, |
454 |
+ info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0); |
455 |
+ taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu, |
456 |
+diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c |
457 |
+index a612da533ea20..53bcf5386907f 100644 |
458 |
+--- a/arch/arm64/kernel/insn.c |
459 |
++++ b/arch/arm64/kernel/insn.c |
460 |
+@@ -21,6 +21,7 @@ |
461 |
+ #include <asm/fixmap.h> |
462 |
+ #include <asm/insn.h> |
463 |
+ #include <asm/kprobes.h> |
464 |
++#include <asm/sections.h> |
465 |
+ |
466 |
+ #define AARCH64_INSN_SF_BIT BIT(31) |
467 |
+ #define AARCH64_INSN_N_BIT BIT(22) |
468 |
+@@ -78,16 +79,29 @@ bool aarch64_insn_is_branch_imm(u32 insn) |
469 |
+ |
470 |
+ static DEFINE_RAW_SPINLOCK(patch_lock); |
471 |
+ |
472 |
++static bool is_exit_text(unsigned long addr) |
473 |
++{ |
474 |
++ /* discarded with init text/data */ |
475 |
++ return system_state < SYSTEM_RUNNING && |
476 |
++ addr >= (unsigned long)__exittext_begin && |
477 |
++ addr < (unsigned long)__exittext_end; |
478 |
++} |
479 |
++ |
480 |
++static bool is_image_text(unsigned long addr) |
481 |
++{ |
482 |
++ return core_kernel_text(addr) || is_exit_text(addr); |
483 |
++} |
484 |
++ |
485 |
+ static void __kprobes *patch_map(void *addr, int fixmap) |
486 |
+ { |
487 |
+ unsigned long uintaddr = (uintptr_t) addr; |
488 |
+- bool module = !core_kernel_text(uintaddr); |
489 |
++ bool image = is_image_text(uintaddr); |
490 |
+ struct page *page; |
491 |
+ |
492 |
+- if (module && IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) |
493 |
+- page = vmalloc_to_page(addr); |
494 |
+- else if (!module) |
495 |
++ if (image) |
496 |
+ page = phys_to_page(__pa_symbol(addr)); |
497 |
++ else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) |
498 |
++ page = vmalloc_to_page(addr); |
499 |
+ else |
500 |
+ return addr; |
501 |
+ |
502 |
+diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S |
503 |
+index 4f77de8ce1384..0bab37b1acbe9 100644 |
504 |
+--- a/arch/arm64/kernel/vmlinux.lds.S |
505 |
++++ b/arch/arm64/kernel/vmlinux.lds.S |
506 |
+@@ -170,9 +170,12 @@ SECTIONS |
507 |
+ __inittext_begin = .; |
508 |
+ |
509 |
+ INIT_TEXT_SECTION(8) |
510 |
++ |
511 |
++ __exittext_begin = .; |
512 |
+ .exit.text : { |
513 |
+ ARM_EXIT_KEEP(EXIT_TEXT) |
514 |
+ } |
515 |
++ __exittext_end = .; |
516 |
+ |
517 |
+ . = ALIGN(4); |
518 |
+ .altinstructions : { |
519 |
+diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c |
520 |
+index 65660b6144740..84964983198e2 100644 |
521 |
+--- a/arch/arm64/kvm/hyp/switch.c |
522 |
++++ b/arch/arm64/kvm/hyp/switch.c |
523 |
+@@ -496,7 +496,7 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) |
524 |
+ kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT && |
525 |
+ kvm_vcpu_dabt_isvalid(vcpu) && |
526 |
+ !kvm_vcpu_dabt_isextabt(vcpu) && |
527 |
+- !kvm_vcpu_dabt_iss1tw(vcpu); |
528 |
++ !kvm_vcpu_abt_iss1tw(vcpu); |
529 |
+ |
530 |
+ if (valid) { |
531 |
+ int ret = __vgic_v2_perform_cpuif_access(vcpu); |
532 |
+diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c |
533 |
+index d26e6cd289539..2a7339aeb1ad4 100644 |
534 |
+--- a/arch/arm64/mm/fault.c |
535 |
++++ b/arch/arm64/mm/fault.c |
536 |
+@@ -654,11 +654,13 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs) |
537 |
+ |
538 |
+ inf = esr_to_fault_info(esr); |
539 |
+ |
540 |
+- /* |
541 |
+- * Return value ignored as we rely on signal merging. |
542 |
+- * Future patches will make this more robust. |
543 |
+- */ |
544 |
+- apei_claim_sea(regs); |
545 |
++ if (user_mode(regs) && apei_claim_sea(regs) == 0) { |
546 |
++ /* |
547 |
++ * APEI claimed this as a firmware-first notification. |
548 |
++ * Some processing deferred to task_work before ret_to_user(). |
549 |
++ */ |
550 |
++ return 0; |
551 |
++ } |
552 |
+ |
553 |
+ if (esr & ESR_ELx_FnV) |
554 |
+ siaddr = NULL; |
555 |
+diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c |
556 |
+index e63eb5f069995..f31890078197e 100644 |
557 |
+--- a/arch/m68k/q40/config.c |
558 |
++++ b/arch/m68k/q40/config.c |
559 |
+@@ -264,6 +264,7 @@ static int q40_get_rtc_pll(struct rtc_pll_info *pll) |
560 |
+ { |
561 |
+ int tmp = Q40_RTC_CTRL; |
562 |
+ |
563 |
++ pll->pll_ctrl = 0; |
564 |
+ pll->pll_value = tmp & Q40_RTC_PLL_MASK; |
565 |
+ if (tmp & Q40_RTC_PLL_SIGN) |
566 |
+ pll->pll_value = -pll->pll_value; |
567 |
+diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h |
568 |
+index 7bbb66760a07c..1809c408736b0 100644 |
569 |
+--- a/arch/mips/include/asm/cpu-type.h |
570 |
++++ b/arch/mips/include/asm/cpu-type.h |
571 |
+@@ -47,6 +47,7 @@ static inline int __pure __get_cpu_type(const int cpu_type) |
572 |
+ case CPU_34K: |
573 |
+ case CPU_1004K: |
574 |
+ case CPU_74K: |
575 |
++ case CPU_1074K: |
576 |
+ case CPU_M14KC: |
577 |
+ case CPU_M14KEC: |
578 |
+ case CPU_INTERAPTIV: |
579 |
+diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h |
580 |
+index 635fb154b33f9..a3633560493be 100644 |
581 |
+--- a/arch/powerpc/include/asm/kvm_asm.h |
582 |
++++ b/arch/powerpc/include/asm/kvm_asm.h |
583 |
+@@ -150,4 +150,7 @@ |
584 |
+ |
585 |
+ #define KVM_INST_FETCH_FAILED -1 |
586 |
+ |
587 |
++/* Extract PO and XOP opcode fields */ |
588 |
++#define PO_XOP_OPCODE_MASK 0xfc0007fe |
589 |
++ |
590 |
+ #endif /* __POWERPC_KVM_ASM_H__ */ |
591 |
+diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile |
592 |
+index dc0780f930d5b..59260eb962916 100644 |
593 |
+--- a/arch/powerpc/kernel/Makefile |
594 |
++++ b/arch/powerpc/kernel/Makefile |
595 |
+@@ -19,6 +19,7 @@ CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) |
596 |
+ CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) |
597 |
+ |
598 |
+ CFLAGS_prom_init.o += $(call cc-option, -fno-stack-protector) |
599 |
++CFLAGS_prom_init.o += -DDISABLE_BRANCH_PROFILING |
600 |
+ |
601 |
+ ifdef CONFIG_FUNCTION_TRACER |
602 |
+ # Do not trace early boot code |
603 |
+@@ -36,7 +37,6 @@ KASAN_SANITIZE_btext.o := n |
604 |
+ ifdef CONFIG_KASAN |
605 |
+ CFLAGS_early_32.o += -DDISABLE_BRANCH_PROFILING |
606 |
+ CFLAGS_cputable.o += -DDISABLE_BRANCH_PROFILING |
607 |
+-CFLAGS_prom_init.o += -DDISABLE_BRANCH_PROFILING |
608 |
+ CFLAGS_btext.o += -DDISABLE_BRANCH_PROFILING |
609 |
+ endif |
610 |
+ |
611 |
+diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c |
612 |
+index bc8a551013be9..c35069294ecfb 100644 |
613 |
+--- a/arch/powerpc/kernel/eeh.c |
614 |
++++ b/arch/powerpc/kernel/eeh.c |
615 |
+@@ -503,7 +503,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev) |
616 |
+ rc = 1; |
617 |
+ if (pe->state & EEH_PE_ISOLATED) { |
618 |
+ pe->check_count++; |
619 |
+- if (pe->check_count % EEH_MAX_FAILS == 0) { |
620 |
++ if (pe->check_count == EEH_MAX_FAILS) { |
621 |
+ dn = pci_device_to_OF_node(dev); |
622 |
+ if (dn) |
623 |
+ location = of_get_property(dn, "ibm,loc-code", |
624 |
+diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c |
625 |
+index 014ff0701f245..9432fc6af28a5 100644 |
626 |
+--- a/arch/powerpc/kernel/traps.c |
627 |
++++ b/arch/powerpc/kernel/traps.c |
628 |
+@@ -510,11 +510,11 @@ out: |
629 |
+ #ifdef CONFIG_PPC_BOOK3S_64 |
630 |
+ BUG_ON(get_paca()->in_nmi == 0); |
631 |
+ if (get_paca()->in_nmi > 1) |
632 |
+- nmi_panic(regs, "Unrecoverable nested System Reset"); |
633 |
++ die("Unrecoverable nested System Reset", regs, SIGABRT); |
634 |
+ #endif |
635 |
+ /* Must die if the interrupt is not recoverable */ |
636 |
+ if (!(regs->msr & MSR_RI)) |
637 |
+- nmi_panic(regs, "Unrecoverable System Reset"); |
638 |
++ die("Unrecoverable System Reset", regs, SIGABRT); |
639 |
+ |
640 |
+ if (saved_hsrrs) { |
641 |
+ mtspr(SPRN_HSRR0, hsrr0); |
642 |
+@@ -858,7 +858,7 @@ void machine_check_exception(struct pt_regs *regs) |
643 |
+ |
644 |
+ /* Must die if the interrupt is not recoverable */ |
645 |
+ if (!(regs->msr & MSR_RI)) |
646 |
+- nmi_panic(regs, "Unrecoverable Machine check"); |
647 |
++ die("Unrecoverable Machine check", regs, SIGBUS); |
648 |
+ |
649 |
+ return; |
650 |
+ |
651 |
+diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c |
652 |
+index da8375437d161..9d73448354698 100644 |
653 |
+--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c |
654 |
++++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c |
655 |
+@@ -1104,6 +1104,11 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm, |
656 |
+ kvm->arch.lpid); |
657 |
+ gpa += PAGE_SIZE; |
658 |
+ } |
659 |
++ /* |
660 |
++ * Increase the mmu notifier sequence number to prevent any page |
661 |
++ * fault that read the memslot earlier from writing a PTE. |
662 |
++ */ |
663 |
++ kvm->mmu_notifier_seq++; |
664 |
+ spin_unlock(&kvm->mmu_lock); |
665 |
+ } |
666 |
+ |
667 |
+diff --git a/arch/powerpc/kvm/book3s_hv_tm.c b/arch/powerpc/kvm/book3s_hv_tm.c |
668 |
+index 0db9374971697..cc90b8b823291 100644 |
669 |
+--- a/arch/powerpc/kvm/book3s_hv_tm.c |
670 |
++++ b/arch/powerpc/kvm/book3s_hv_tm.c |
671 |
+@@ -3,6 +3,8 @@ |
672 |
+ * Copyright 2017 Paul Mackerras, IBM Corp. <paulus@×××××××.com> |
673 |
+ */ |
674 |
+ |
675 |
++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
676 |
++ |
677 |
+ #include <linux/kvm_host.h> |
678 |
+ |
679 |
+ #include <asm/kvm_ppc.h> |
680 |
+@@ -44,7 +46,18 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) |
681 |
+ u64 newmsr, bescr; |
682 |
+ int ra, rs; |
683 |
+ |
684 |
+- switch (instr & 0xfc0007ff) { |
685 |
++ /* |
686 |
++ * rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit |
687 |
++ * in these instructions, so masking bit 31 out doesn't change these |
688 |
++ * instructions. For treclaim., tsr., and trechkpt. instructions if bit |
689 |
++ * 31 = 0 then they are per ISA invalid forms, however P9 UM, in section |
690 |
++ * 4.6.10 Book II Invalid Forms, informs specifically that ignoring bit |
691 |
++ * 31 is an acceptable way to handle these invalid forms that have |
692 |
++ * bit 31 = 0. Moreover, for emulation purposes both forms (w/ and wo/ |
693 |
++ * bit 31 set) can generate a softpatch interrupt. Hence both forms |
694 |
++ * are handled below for these instructions so they behave the same way. |
695 |
++ */ |
696 |
++ switch (instr & PO_XOP_OPCODE_MASK) { |
697 |
+ case PPC_INST_RFID: |
698 |
+ /* XXX do we need to check for PR=0 here? */ |
699 |
+ newmsr = vcpu->arch.shregs.srr1; |
700 |
+@@ -105,7 +118,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) |
701 |
+ vcpu->arch.shregs.msr = newmsr; |
702 |
+ return RESUME_GUEST; |
703 |
+ |
704 |
+- case PPC_INST_TSR: |
705 |
++ /* ignore bit 31, see comment above */ |
706 |
++ case (PPC_INST_TSR & PO_XOP_OPCODE_MASK): |
707 |
+ /* check for PR=1 and arch 2.06 bit set in PCR */ |
708 |
+ if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) { |
709 |
+ /* generate an illegal instruction interrupt */ |
710 |
+@@ -140,7 +154,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) |
711 |
+ vcpu->arch.shregs.msr = msr; |
712 |
+ return RESUME_GUEST; |
713 |
+ |
714 |
+- case PPC_INST_TRECLAIM: |
715 |
++ /* ignore bit 31, see comment above */ |
716 |
++ case (PPC_INST_TRECLAIM & PO_XOP_OPCODE_MASK): |
717 |
+ /* check for TM disabled in the HFSCR or MSR */ |
718 |
+ if (!(vcpu->arch.hfscr & HFSCR_TM)) { |
719 |
+ /* generate an illegal instruction interrupt */ |
720 |
+@@ -176,7 +191,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) |
721 |
+ vcpu->arch.shregs.msr &= ~MSR_TS_MASK; |
722 |
+ return RESUME_GUEST; |
723 |
+ |
724 |
+- case PPC_INST_TRECHKPT: |
725 |
++ /* ignore bit 31, see comment above */ |
726 |
++ case (PPC_INST_TRECHKPT & PO_XOP_OPCODE_MASK): |
727 |
+ /* XXX do we need to check for PR=0 here? */ |
728 |
+ /* check for TM disabled in the HFSCR or MSR */ |
729 |
+ if (!(vcpu->arch.hfscr & HFSCR_TM)) { |
730 |
+@@ -208,6 +224,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) |
731 |
+ } |
732 |
+ |
733 |
+ /* What should we do here? We didn't recognize the instruction */ |
734 |
+- WARN_ON_ONCE(1); |
735 |
++ kvmppc_core_queue_program(vcpu, SRR1_PROGILL); |
736 |
++ pr_warn_ratelimited("Unrecognized TM-related instruction %#x for emulation", instr); |
737 |
++ |
738 |
+ return RESUME_GUEST; |
739 |
+ } |
740 |
+diff --git a/arch/powerpc/kvm/book3s_hv_tm_builtin.c b/arch/powerpc/kvm/book3s_hv_tm_builtin.c |
741 |
+index 217246279dfae..fad931f224efd 100644 |
742 |
+--- a/arch/powerpc/kvm/book3s_hv_tm_builtin.c |
743 |
++++ b/arch/powerpc/kvm/book3s_hv_tm_builtin.c |
744 |
+@@ -23,7 +23,18 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu) |
745 |
+ u64 newmsr, msr, bescr; |
746 |
+ int rs; |
747 |
+ |
748 |
+- switch (instr & 0xfc0007ff) { |
749 |
++ /* |
750 |
++ * rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit |
751 |
++ * in these instructions, so masking bit 31 out doesn't change these |
752 |
++ * instructions. For the tsr. instruction if bit 31 = 0 then it is per |
753 |
++ * ISA an invalid form, however P9 UM, in section 4.6.10 Book II Invalid |
754 |
++ * Forms, informs specifically that ignoring bit 31 is an acceptable way |
755 |
++ * to handle TM-related invalid forms that have bit 31 = 0. Moreover, |
756 |
++ * for emulation purposes both forms (w/ and wo/ bit 31 set) can |
757 |
++ * generate a softpatch interrupt. Hence both forms are handled below |
758 |
++ * for tsr. to make them behave the same way. |
759 |
++ */ |
760 |
++ switch (instr & PO_XOP_OPCODE_MASK) { |
761 |
+ case PPC_INST_RFID: |
762 |
+ /* XXX do we need to check for PR=0 here? */ |
763 |
+ newmsr = vcpu->arch.shregs.srr1; |
764 |
+@@ -73,7 +84,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu) |
765 |
+ vcpu->arch.shregs.msr = newmsr; |
766 |
+ return 1; |
767 |
+ |
768 |
+- case PPC_INST_TSR: |
769 |
++ /* ignore bit 31, see comment above */ |
770 |
++ case (PPC_INST_TSR & PO_XOP_OPCODE_MASK): |
771 |
+ /* we know the MSR has the TS field = S (0b01) here */ |
772 |
+ msr = vcpu->arch.shregs.msr; |
773 |
+ /* check for PR=1 and arch 2.06 bit set in PCR */ |
774 |
+diff --git a/arch/powerpc/mm/book3s64/iommu_api.c b/arch/powerpc/mm/book3s64/iommu_api.c |
775 |
+index 56cc845205779..ef164851738b8 100644 |
776 |
+--- a/arch/powerpc/mm/book3s64/iommu_api.c |
777 |
++++ b/arch/powerpc/mm/book3s64/iommu_api.c |
778 |
+@@ -121,24 +121,6 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua, |
779 |
+ goto free_exit; |
780 |
+ } |
781 |
+ |
782 |
+- pageshift = PAGE_SHIFT; |
783 |
+- for (i = 0; i < entries; ++i) { |
784 |
+- struct page *page = mem->hpages[i]; |
785 |
+- |
786 |
+- /* |
787 |
+- * Allow to use larger than 64k IOMMU pages. Only do that |
788 |
+- * if we are backed by hugetlb. |
789 |
+- */ |
790 |
+- if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page)) |
791 |
+- pageshift = page_shift(compound_head(page)); |
792 |
+- mem->pageshift = min(mem->pageshift, pageshift); |
793 |
+- /* |
794 |
+- * We don't need struct page reference any more, switch |
795 |
+- * to physical address. |
796 |
+- */ |
797 |
+- mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT; |
798 |
+- } |
799 |
+- |
800 |
+ good_exit: |
801 |
+ atomic64_set(&mem->mapped, 1); |
802 |
+ mem->used = 1; |
803 |
+@@ -158,6 +140,27 @@ good_exit: |
804 |
+ } |
805 |
+ } |
806 |
+ |
807 |
++ if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) { |
808 |
++ /* |
809 |
++ * Allow to use larger than 64k IOMMU pages. Only do that |
810 |
++ * if we are backed by hugetlb. Skip device memory as it is not |
811 |
++ * backed with page structs. |
812 |
++ */ |
813 |
++ pageshift = PAGE_SHIFT; |
814 |
++ for (i = 0; i < entries; ++i) { |
815 |
++ struct page *page = mem->hpages[i]; |
816 |
++ |
817 |
++ if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page)) |
818 |
++ pageshift = page_shift(compound_head(page)); |
819 |
++ mem->pageshift = min(mem->pageshift, pageshift); |
820 |
++ /* |
821 |
++ * We don't need struct page reference any more, switch |
822 |
++ * to physical address. |
823 |
++ */ |
824 |
++ mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT; |
825 |
++ } |
826 |
++ } |
827 |
++ |
828 |
+ list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list); |
829 |
+ |
830 |
+ mutex_unlock(&mem_list_mutex); |
831 |
+diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c |
832 |
+index cb50a9e1fd2d7..eb82dda884e51 100644 |
833 |
+--- a/arch/powerpc/perf/imc-pmu.c |
834 |
++++ b/arch/powerpc/perf/imc-pmu.c |
835 |
+@@ -44,6 +44,16 @@ static DEFINE_PER_CPU(u64 *, trace_imc_mem); |
836 |
+ static struct imc_pmu_ref *trace_imc_refc; |
837 |
+ static int trace_imc_mem_size; |
838 |
+ |
839 |
++/* |
840 |
++ * Global data structure used to avoid races between thread, |
841 |
++ * core and trace-imc |
842 |
++ */ |
843 |
++static struct imc_pmu_ref imc_global_refc = { |
844 |
++ .lock = __MUTEX_INITIALIZER(imc_global_refc.lock), |
845 |
++ .id = 0, |
846 |
++ .refc = 0, |
847 |
++}; |
848 |
++ |
849 |
+ static struct imc_pmu *imc_event_to_pmu(struct perf_event *event) |
850 |
+ { |
851 |
+ return container_of(event->pmu, struct imc_pmu, pmu); |
852 |
+@@ -698,6 +708,16 @@ static int ppc_core_imc_cpu_offline(unsigned int cpu) |
853 |
+ return -EINVAL; |
854 |
+ |
855 |
+ ref->refc = 0; |
856 |
++ /* |
857 |
++ * Reduce the global reference count, if this is the |
858 |
++ * last cpu in this core and core-imc event running |
859 |
++ * in this cpu. |
860 |
++ */ |
861 |
++ mutex_lock(&imc_global_refc.lock); |
862 |
++ if (imc_global_refc.id == IMC_DOMAIN_CORE) |
863 |
++ imc_global_refc.refc--; |
864 |
++ |
865 |
++ mutex_unlock(&imc_global_refc.lock); |
866 |
+ } |
867 |
+ return 0; |
868 |
+ } |
869 |
+@@ -710,6 +730,23 @@ static int core_imc_pmu_cpumask_init(void) |
870 |
+ ppc_core_imc_cpu_offline); |
871 |
+ } |
872 |
+ |
873 |
++static void reset_global_refc(struct perf_event *event) |
874 |
++{ |
875 |
++ mutex_lock(&imc_global_refc.lock); |
876 |
++ imc_global_refc.refc--; |
877 |
++ |
878 |
++ /* |
879 |
++ * If no other thread is running any |
880 |
++ * event for this domain(thread/core/trace), |
881 |
++ * set the global id to zero. |
882 |
++ */ |
883 |
++ if (imc_global_refc.refc <= 0) { |
884 |
++ imc_global_refc.refc = 0; |
885 |
++ imc_global_refc.id = 0; |
886 |
++ } |
887 |
++ mutex_unlock(&imc_global_refc.lock); |
888 |
++} |
889 |
++ |
890 |
+ static void core_imc_counters_release(struct perf_event *event) |
891 |
+ { |
892 |
+ int rc, core_id; |
893 |
+@@ -759,6 +796,8 @@ static void core_imc_counters_release(struct perf_event *event) |
894 |
+ ref->refc = 0; |
895 |
+ } |
896 |
+ mutex_unlock(&ref->lock); |
897 |
++ |
898 |
++ reset_global_refc(event); |
899 |
+ } |
900 |
+ |
901 |
+ static int core_imc_event_init(struct perf_event *event) |
902 |
+@@ -819,6 +858,29 @@ static int core_imc_event_init(struct perf_event *event) |
903 |
+ ++ref->refc; |
904 |
+ mutex_unlock(&ref->lock); |
905 |
+ |
906 |
++ /* |
907 |
++ * Since the system can run either in accumulation or trace-mode |
908 |
++ * of IMC at a time, core-imc events are allowed only if no other |
909 |
++ * trace/thread imc events are enabled/monitored. |
910 |
++ * |
911 |
++ * Take the global lock, and check the refc.id |
912 |
++ * to know whether any other trace/thread imc |
913 |
++ * events are running. |
914 |
++ */ |
915 |
++ mutex_lock(&imc_global_refc.lock); |
916 |
++ if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_CORE) { |
917 |
++ /* |
918 |
++ * No other trace/thread imc events are running in |
919 |
++ * the system, so set the refc.id to core-imc. |
920 |
++ */ |
921 |
++ imc_global_refc.id = IMC_DOMAIN_CORE; |
922 |
++ imc_global_refc.refc++; |
923 |
++ } else { |
924 |
++ mutex_unlock(&imc_global_refc.lock); |
925 |
++ return -EBUSY; |
926 |
++ } |
927 |
++ mutex_unlock(&imc_global_refc.lock); |
928 |
++ |
929 |
+ event->hw.event_base = (u64)pcmi->vbase + (config & IMC_EVENT_OFFSET_MASK); |
930 |
+ event->destroy = core_imc_counters_release; |
931 |
+ return 0; |
932 |
+@@ -877,7 +939,23 @@ static int ppc_thread_imc_cpu_online(unsigned int cpu) |
933 |
+ |
934 |
+ static int ppc_thread_imc_cpu_offline(unsigned int cpu) |
935 |
+ { |
936 |
+- mtspr(SPRN_LDBAR, 0); |
937 |
++ /* |
938 |
++ * Set the bit 0 of LDBAR to zero. |
939 |
++ * |
940 |
++ * If bit 0 of LDBAR is unset, it will stop posting |
941 |
++ * the counter data to memory. |
942 |
++ * For thread-imc, bit 0 of LDBAR will be set to 1 in the |
943 |
++ * event_add function. So reset this bit here, to stop the updates |
944 |
++ * to memory in the cpu_offline path. |
945 |
++ */ |
946 |
++ mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63)))); |
947 |
++ |
948 |
++ /* Reduce the refc if thread-imc event running on this cpu */ |
949 |
++ mutex_lock(&imc_global_refc.lock); |
950 |
++ if (imc_global_refc.id == IMC_DOMAIN_THREAD) |
951 |
++ imc_global_refc.refc--; |
952 |
++ mutex_unlock(&imc_global_refc.lock); |
953 |
++ |
954 |
+ return 0; |
955 |
+ } |
956 |
+ |
957 |
+@@ -916,7 +994,22 @@ static int thread_imc_event_init(struct perf_event *event) |
958 |
+ if (!target) |
959 |
+ return -EINVAL; |
960 |
+ |
961 |
++ mutex_lock(&imc_global_refc.lock); |
962 |
++ /* |
963 |
++ * Check if any other trace/core imc events are running in the |
964 |
++ * system, if not set the global id to thread-imc. |
965 |
++ */ |
966 |
++ if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_THREAD) { |
967 |
++ imc_global_refc.id = IMC_DOMAIN_THREAD; |
968 |
++ imc_global_refc.refc++; |
969 |
++ } else { |
970 |
++ mutex_unlock(&imc_global_refc.lock); |
971 |
++ return -EBUSY; |
972 |
++ } |
973 |
++ mutex_unlock(&imc_global_refc.lock); |
974 |
++ |
975 |
+ event->pmu->task_ctx_nr = perf_sw_context; |
976 |
++ event->destroy = reset_global_refc; |
977 |
+ return 0; |
978 |
+ } |
979 |
+ |
980 |
+@@ -1063,10 +1156,12 @@ static void thread_imc_event_del(struct perf_event *event, int flags) |
981 |
+ int core_id; |
982 |
+ struct imc_pmu_ref *ref; |
983 |
+ |
984 |
+- mtspr(SPRN_LDBAR, 0); |
985 |
+- |
986 |
+ core_id = smp_processor_id() / threads_per_core; |
987 |
+ ref = &core_imc_refc[core_id]; |
988 |
++ if (!ref) { |
989 |
++ pr_debug("imc: Failed to get event reference count\n"); |
990 |
++ return; |
991 |
++ } |
992 |
+ |
993 |
+ mutex_lock(&ref->lock); |
994 |
+ ref->refc--; |
995 |
+@@ -1082,6 +1177,10 @@ static void thread_imc_event_del(struct perf_event *event, int flags) |
996 |
+ ref->refc = 0; |
997 |
+ } |
998 |
+ mutex_unlock(&ref->lock); |
999 |
++ |
1000 |
++ /* Set bit 0 of LDBAR to zero, to stop posting updates to memory */ |
1001 |
++ mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63)))); |
1002 |
++ |
1003 |
+ /* |
1004 |
+ * Take a snapshot and calculate the delta and update |
1005 |
+ * the event counter values. |
1006 |
+@@ -1133,7 +1232,18 @@ static int ppc_trace_imc_cpu_online(unsigned int cpu) |
1007 |
+ |
1008 |
+ static int ppc_trace_imc_cpu_offline(unsigned int cpu) |
1009 |
+ { |
1010 |
+- mtspr(SPRN_LDBAR, 0); |
1011 |
++ /* |
1012 |
++ * No need to set bit 0 of LDBAR to zero, as |
1013 |
++ * it is set to zero for imc trace-mode |
1014 |
++ * |
1015 |
++ * Reduce the refc if any trace-imc event running |
1016 |
++ * on this cpu. |
1017 |
++ */ |
1018 |
++ mutex_lock(&imc_global_refc.lock); |
1019 |
++ if (imc_global_refc.id == IMC_DOMAIN_TRACE) |
1020 |
++ imc_global_refc.refc--; |
1021 |
++ mutex_unlock(&imc_global_refc.lock); |
1022 |
++ |
1023 |
+ return 0; |
1024 |
+ } |
1025 |
+ |
1026 |
+@@ -1226,15 +1336,14 @@ static int trace_imc_event_add(struct perf_event *event, int flags) |
1027 |
+ local_mem = get_trace_imc_event_base_addr(); |
1028 |
+ ldbar_value = ((u64)local_mem & THREAD_IMC_LDBAR_MASK) | TRACE_IMC_ENABLE; |
1029 |
+ |
1030 |
+- if (core_imc_refc) |
1031 |
+- ref = &core_imc_refc[core_id]; |
1032 |
++ /* trace-imc reference count */ |
1033 |
++ if (trace_imc_refc) |
1034 |
++ ref = &trace_imc_refc[core_id]; |
1035 |
+ if (!ref) { |
1036 |
+- /* If core-imc is not enabled, use trace-imc reference count */ |
1037 |
+- if (trace_imc_refc) |
1038 |
+- ref = &trace_imc_refc[core_id]; |
1039 |
+- if (!ref) |
1040 |
+- return -EINVAL; |
1041 |
++ pr_debug("imc: Failed to get the event reference count\n"); |
1042 |
++ return -EINVAL; |
1043 |
+ } |
1044 |
++ |
1045 |
+ mtspr(SPRN_LDBAR, ldbar_value); |
1046 |
+ mutex_lock(&ref->lock); |
1047 |
+ if (ref->refc == 0) { |
1048 |
+@@ -1242,13 +1351,11 @@ static int trace_imc_event_add(struct perf_event *event, int flags) |
1049 |
+ get_hard_smp_processor_id(smp_processor_id()))) { |
1050 |
+ mutex_unlock(&ref->lock); |
1051 |
+ pr_err("trace-imc: Unable to start the counters for core %d\n", core_id); |
1052 |
+- mtspr(SPRN_LDBAR, 0); |
1053 |
+ return -EINVAL; |
1054 |
+ } |
1055 |
+ } |
1056 |
+ ++ref->refc; |
1057 |
+ mutex_unlock(&ref->lock); |
1058 |
+- |
1059 |
+ return 0; |
1060 |
+ } |
1061 |
+ |
1062 |
+@@ -1274,16 +1381,13 @@ static void trace_imc_event_del(struct perf_event *event, int flags) |
1063 |
+ int core_id = smp_processor_id() / threads_per_core; |
1064 |
+ struct imc_pmu_ref *ref = NULL; |
1065 |
+ |
1066 |
+- if (core_imc_refc) |
1067 |
+- ref = &core_imc_refc[core_id]; |
1068 |
++ if (trace_imc_refc) |
1069 |
++ ref = &trace_imc_refc[core_id]; |
1070 |
+ if (!ref) { |
1071 |
+- /* If core-imc is not enabled, use trace-imc reference count */ |
1072 |
+- if (trace_imc_refc) |
1073 |
+- ref = &trace_imc_refc[core_id]; |
1074 |
+- if (!ref) |
1075 |
+- return; |
1076 |
++ pr_debug("imc: Failed to get event reference count\n"); |
1077 |
++ return; |
1078 |
+ } |
1079 |
+- mtspr(SPRN_LDBAR, 0); |
1080 |
++ |
1081 |
+ mutex_lock(&ref->lock); |
1082 |
+ ref->refc--; |
1083 |
+ if (ref->refc == 0) { |
1084 |
+@@ -1297,6 +1401,7 @@ static void trace_imc_event_del(struct perf_event *event, int flags) |
1085 |
+ ref->refc = 0; |
1086 |
+ } |
1087 |
+ mutex_unlock(&ref->lock); |
1088 |
++ |
1089 |
+ trace_imc_event_stop(event, flags); |
1090 |
+ } |
1091 |
+ |
1092 |
+@@ -1314,10 +1419,30 @@ static int trace_imc_event_init(struct perf_event *event) |
1093 |
+ if (event->attr.sample_period == 0) |
1094 |
+ return -ENOENT; |
1095 |
+ |
1096 |
++ /* |
1097 |
++ * Take the global lock, and make sure |
1098 |
++ * no other thread is running any core/thread imc |
1099 |
++ * events |
1100 |
++ */ |
1101 |
++ mutex_lock(&imc_global_refc.lock); |
1102 |
++ if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_TRACE) { |
1103 |
++ /* |
1104 |
++ * No core/thread imc events are running in the |
1105 |
++ * system, so set the refc.id to trace-imc. |
1106 |
++ */ |
1107 |
++ imc_global_refc.id = IMC_DOMAIN_TRACE; |
1108 |
++ imc_global_refc.refc++; |
1109 |
++ } else { |
1110 |
++ mutex_unlock(&imc_global_refc.lock); |
1111 |
++ return -EBUSY; |
1112 |
++ } |
1113 |
++ mutex_unlock(&imc_global_refc.lock); |
1114 |
++ |
1115 |
+ event->hw.idx = -1; |
1116 |
+ target = event->hw.target; |
1117 |
+ |
1118 |
+ event->pmu->task_ctx_nr = perf_hw_context; |
1119 |
++ event->destroy = reset_global_refc; |
1120 |
+ return 0; |
1121 |
+ } |
1122 |
+ |
1123 |
+@@ -1429,10 +1554,10 @@ static void cleanup_all_core_imc_memory(void) |
1124 |
+ static void thread_imc_ldbar_disable(void *dummy) |
1125 |
+ { |
1126 |
+ /* |
1127 |
+- * By Zeroing LDBAR, we disable thread-imc |
1128 |
+- * updates. |
1129 |
++ * By setting 0th bit of LDBAR to zero, we disable thread-imc |
1130 |
++ * updates to memory. |
1131 |
+ */ |
1132 |
+- mtspr(SPRN_LDBAR, 0); |
1133 |
++ mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63)))); |
1134 |
+ } |
1135 |
+ |
1136 |
+ void thread_imc_disable(void) |
1137 |
+diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h |
1138 |
+index c6dcc5291f972..02fbc175142e2 100644 |
1139 |
+--- a/arch/riscv/include/asm/ftrace.h |
1140 |
++++ b/arch/riscv/include/asm/ftrace.h |
1141 |
+@@ -63,4 +63,11 @@ do { \ |
1142 |
+ * Let auipc+jalr be the basic *mcount unit*, so we make it 8 bytes here. |
1143 |
+ */ |
1144 |
+ #define MCOUNT_INSN_SIZE 8 |
1145 |
++ |
1146 |
++#ifndef __ASSEMBLY__ |
1147 |
++struct dyn_ftrace; |
1148 |
++int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec); |
1149 |
++#define ftrace_init_nop ftrace_init_nop |
1150 |
++#endif |
1151 |
++ |
1152 |
+ #endif |
1153 |
+diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c |
1154 |
+index c40fdcdeb950a..291c579e12457 100644 |
1155 |
+--- a/arch/riscv/kernel/ftrace.c |
1156 |
++++ b/arch/riscv/kernel/ftrace.c |
1157 |
+@@ -88,6 +88,25 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, |
1158 |
+ return __ftrace_modify_call(rec->ip, addr, false); |
1159 |
+ } |
1160 |
+ |
1161 |
++ |
1162 |
++/* |
1163 |
++ * This is called early on, and isn't wrapped by |
1164 |
++ * ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold |
1165 |
++ * text_mutex, which triggers a lockdep failure. SMP isn't running so we could |
1166 |
++ * just directly poke the text, but it's simpler to just take the lock |
1167 |
++ * ourselves. |
1168 |
++ */ |
1169 |
++int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) |
1170 |
++{ |
1171 |
++ int out; |
1172 |
++ |
1173 |
++ ftrace_arch_code_modify_prepare(); |
1174 |
++ out = ftrace_make_nop(mod, rec, MCOUNT_ADDR); |
1175 |
++ ftrace_arch_code_modify_post_process(); |
1176 |
++ |
1177 |
++ return out; |
1178 |
++} |
1179 |
++ |
1180 |
+ int ftrace_update_ftrace_func(ftrace_func_t func) |
1181 |
+ { |
1182 |
+ int ret = __ftrace_modify_call((unsigned long)&ftrace_call, |
1183 |
+diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h |
1184 |
+index 34a655ad71234..5ce586948d926 100644 |
1185 |
+--- a/arch/s390/include/asm/pgtable.h |
1186 |
++++ b/arch/s390/include/asm/pgtable.h |
1187 |
+@@ -1247,26 +1247,46 @@ static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address) |
1188 |
+ #define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address) |
1189 |
+ #define pgd_offset_k(address) pgd_offset(&init_mm, address) |
1190 |
+ |
1191 |
+-static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) |
1192 |
++static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address) |
1193 |
+ { |
1194 |
+- if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1) |
1195 |
+- return (p4d_t *) pgd_deref(*pgd) + p4d_index(address); |
1196 |
+- return (p4d_t *) pgd; |
1197 |
++ if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1) |
1198 |
++ return (p4d_t *) pgd_deref(pgd) + p4d_index(address); |
1199 |
++ return (p4d_t *) pgdp; |
1200 |
+ } |
1201 |
++#define p4d_offset_lockless p4d_offset_lockless |
1202 |
+ |
1203 |
+-static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) |
1204 |
++static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address) |
1205 |
+ { |
1206 |
+- if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2) |
1207 |
+- return (pud_t *) p4d_deref(*p4d) + pud_index(address); |
1208 |
+- return (pud_t *) p4d; |
1209 |
++ return p4d_offset_lockless(pgdp, *pgdp, address); |
1210 |
+ } |
1211 |
+ |
1212 |
+-static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) |
1213 |
++static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address) |
1214 |
+ { |
1215 |
+- if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3) |
1216 |
+- return (pmd_t *) pud_deref(*pud) + pmd_index(address); |
1217 |
+- return (pmd_t *) pud; |
1218 |
++ if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2) |
1219 |
++ return (pud_t *) p4d_deref(p4d) + pud_index(address); |
1220 |
++ return (pud_t *) p4dp; |
1221 |
+ } |
1222 |
++#define pud_offset_lockless pud_offset_lockless |
1223 |
++ |
1224 |
++static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address) |
1225 |
++{ |
1226 |
++ return pud_offset_lockless(p4dp, *p4dp, address); |
1227 |
++} |
1228 |
++#define pud_offset pud_offset |
1229 |
++ |
1230 |
++static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address) |
1231 |
++{ |
1232 |
++ if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3) |
1233 |
++ return (pmd_t *) pud_deref(pud) + pmd_index(address); |
1234 |
++ return (pmd_t *) pudp; |
1235 |
++} |
1236 |
++#define pmd_offset_lockless pmd_offset_lockless |
1237 |
++ |
1238 |
++static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address) |
1239 |
++{ |
1240 |
++ return pmd_offset_lockless(pudp, *pudp, address); |
1241 |
++} |
1242 |
++#define pmd_offset pmd_offset |
1243 |
+ |
1244 |
+ static inline pte_t *pte_offset(pmd_t *pmd, unsigned long address) |
1245 |
+ { |
1246 |
+diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h |
1247 |
+index 0ae4bbf7779c8..3679d224fd3c5 100644 |
1248 |
+--- a/arch/s390/include/asm/stacktrace.h |
1249 |
++++ b/arch/s390/include/asm/stacktrace.h |
1250 |
+@@ -111,4 +111,15 @@ struct stack_frame { |
1251 |
+ r2; \ |
1252 |
+ }) |
1253 |
+ |
1254 |
++#define CALL_ON_STACK_NORETURN(fn, stack) \ |
1255 |
++({ \ |
1256 |
++ asm volatile( \ |
1257 |
++ " la 15,0(%[_stack])\n" \ |
1258 |
++ " xc %[_bc](8,15),%[_bc](15)\n" \ |
1259 |
++ " brasl 14,%[_fn]\n" \ |
1260 |
++ ::[_bc] "i" (offsetof(struct stack_frame, back_chain)), \ |
1261 |
++ [_stack] "a" (stack), [_fn] "X" (fn)); \ |
1262 |
++ BUG(); \ |
1263 |
++}) |
1264 |
++ |
1265 |
+ #endif /* _ASM_S390_STACKTRACE_H */ |
1266 |
+diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c |
1267 |
+index 8371855042dc2..da550cb8b31bd 100644 |
1268 |
+--- a/arch/s390/kernel/irq.c |
1269 |
++++ b/arch/s390/kernel/irq.c |
1270 |
+@@ -294,11 +294,6 @@ static irqreturn_t do_ext_interrupt(int irq, void *dummy) |
1271 |
+ return IRQ_HANDLED; |
1272 |
+ } |
1273 |
+ |
1274 |
+-static struct irqaction external_interrupt = { |
1275 |
+- .name = "EXT", |
1276 |
+- .handler = do_ext_interrupt, |
1277 |
+-}; |
1278 |
+- |
1279 |
+ void __init init_ext_interrupts(void) |
1280 |
+ { |
1281 |
+ int idx; |
1282 |
+@@ -308,7 +303,8 @@ void __init init_ext_interrupts(void) |
1283 |
+ |
1284 |
+ irq_set_chip_and_handler(EXT_INTERRUPT, |
1285 |
+ &dummy_irq_chip, handle_percpu_irq); |
1286 |
+- setup_irq(EXT_INTERRUPT, &external_interrupt); |
1287 |
++ if (request_irq(EXT_INTERRUPT, do_ext_interrupt, 0, "EXT", NULL)) |
1288 |
++ panic("Failed to register EXT interrupt\n"); |
1289 |
+ } |
1290 |
+ |
1291 |
+ static DEFINE_SPINLOCK(irq_subclass_lock); |
1292 |
+diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c |
1293 |
+index 229e1e2f8253a..996e447ead3a6 100644 |
1294 |
+--- a/arch/s390/kernel/perf_cpum_sf.c |
1295 |
++++ b/arch/s390/kernel/perf_cpum_sf.c |
1296 |
+@@ -1429,8 +1429,8 @@ static int aux_output_begin(struct perf_output_handle *handle, |
1297 |
+ idx = aux->empty_mark + 1; |
1298 |
+ for (i = 0; i < range_scan; i++, idx++) { |
1299 |
+ te = aux_sdb_trailer(aux, idx); |
1300 |
+- te->flags = te->flags & ~SDB_TE_BUFFER_FULL_MASK; |
1301 |
+- te->flags = te->flags & ~SDB_TE_ALERT_REQ_MASK; |
1302 |
++ te->flags &= ~(SDB_TE_BUFFER_FULL_MASK | |
1303 |
++ SDB_TE_ALERT_REQ_MASK); |
1304 |
+ te->overflow = 0; |
1305 |
+ } |
1306 |
+ /* Save the position of empty SDBs */ |
1307 |
+@@ -1477,8 +1477,7 @@ static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index, |
1308 |
+ te = aux_sdb_trailer(aux, alert_index); |
1309 |
+ do { |
1310 |
+ orig_flags = te->flags; |
1311 |
+- orig_overflow = te->overflow; |
1312 |
+- *overflow = orig_overflow; |
1313 |
++ *overflow = orig_overflow = te->overflow; |
1314 |
+ if (orig_flags & SDB_TE_BUFFER_FULL_MASK) { |
1315 |
+ /* |
1316 |
+ * SDB is already set by hardware. |
1317 |
+@@ -1712,7 +1711,7 @@ static void *aux_buffer_setup(struct perf_event *event, void **pages, |
1318 |
+ } |
1319 |
+ |
1320 |
+ /* Allocate aux_buffer struct for the event */ |
1321 |
+- aux = kmalloc(sizeof(struct aux_buffer), GFP_KERNEL); |
1322 |
++ aux = kzalloc(sizeof(struct aux_buffer), GFP_KERNEL); |
1323 |
+ if (!aux) |
1324 |
+ goto no_aux; |
1325 |
+ sfb = &aux->sfb; |
1326 |
+diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c |
1327 |
+index 07b2b61a0289f..61c02a162d378 100644 |
1328 |
+--- a/arch/s390/kernel/setup.c |
1329 |
++++ b/arch/s390/kernel/setup.c |
1330 |
+@@ -356,7 +356,6 @@ early_initcall(async_stack_realloc); |
1331 |
+ |
1332 |
+ void __init arch_call_rest_init(void) |
1333 |
+ { |
1334 |
+- struct stack_frame *frame; |
1335 |
+ unsigned long stack; |
1336 |
+ |
1337 |
+ stack = stack_alloc(); |
1338 |
+@@ -369,13 +368,7 @@ void __init arch_call_rest_init(void) |
1339 |
+ set_task_stack_end_magic(current); |
1340 |
+ stack += STACK_INIT_OFFSET; |
1341 |
+ S390_lowcore.kernel_stack = stack; |
1342 |
+- frame = (struct stack_frame *) stack; |
1343 |
+- memset(frame, 0, sizeof(*frame)); |
1344 |
+- /* Branch to rest_init on the new stack, never returns */ |
1345 |
+- asm volatile( |
1346 |
+- " la 15,0(%[_frame])\n" |
1347 |
+- " jg rest_init\n" |
1348 |
+- : : [_frame] "a" (frame)); |
1349 |
++ CALL_ON_STACK_NORETURN(rest_init, stack); |
1350 |
+ } |
1351 |
+ |
1352 |
+ static void __init setup_lowcore_dat_off(void) |
1353 |
+@@ -634,7 +627,7 @@ static struct notifier_block kdump_mem_nb = { |
1354 |
+ /* |
1355 |
+ * Make sure that the area behind memory_end is protected |
1356 |
+ */ |
1357 |
+-static void reserve_memory_end(void) |
1358 |
++static void __init reserve_memory_end(void) |
1359 |
+ { |
1360 |
+ if (memory_end_set) |
1361 |
+ memblock_reserve(memory_end, ULONG_MAX); |
1362 |
+@@ -643,7 +636,7 @@ static void reserve_memory_end(void) |
1363 |
+ /* |
1364 |
+ * Make sure that oldmem, where the dump is stored, is protected |
1365 |
+ */ |
1366 |
+-static void reserve_oldmem(void) |
1367 |
++static void __init reserve_oldmem(void) |
1368 |
+ { |
1369 |
+ #ifdef CONFIG_CRASH_DUMP |
1370 |
+ if (OLDMEM_BASE) |
1371 |
+@@ -655,7 +648,7 @@ static void reserve_oldmem(void) |
1372 |
+ /* |
1373 |
+ * Make sure that oldmem, where the dump is stored, is protected |
1374 |
+ */ |
1375 |
+-static void remove_oldmem(void) |
1376 |
++static void __init remove_oldmem(void) |
1377 |
+ { |
1378 |
+ #ifdef CONFIG_CRASH_DUMP |
1379 |
+ if (OLDMEM_BASE) |
1380 |
+diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c |
1381 |
+index 66bf050d785cf..ad426cc656e56 100644 |
1382 |
+--- a/arch/s390/kernel/smp.c |
1383 |
++++ b/arch/s390/kernel/smp.c |
1384 |
+@@ -878,7 +878,7 @@ static void __no_sanitize_address smp_start_secondary(void *cpuvoid) |
1385 |
+ S390_lowcore.restart_source = -1UL; |
1386 |
+ __ctl_load(S390_lowcore.cregs_save_area, 0, 15); |
1387 |
+ __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT); |
1388 |
+- CALL_ON_STACK(smp_init_secondary, S390_lowcore.kernel_stack, 0); |
1389 |
++ CALL_ON_STACK_NORETURN(smp_init_secondary, S390_lowcore.kernel_stack); |
1390 |
+ } |
1391 |
+ |
1392 |
+ /* Upping and downing of CPUs */ |
1393 |
+diff --git a/arch/x86/include/asm/crash.h b/arch/x86/include/asm/crash.h |
1394 |
+index ef5638f641f2b..88eadd08ad708 100644 |
1395 |
+--- a/arch/x86/include/asm/crash.h |
1396 |
++++ b/arch/x86/include/asm/crash.h |
1397 |
+@@ -10,4 +10,10 @@ int crash_setup_memmap_entries(struct kimage *image, |
1398 |
+ struct boot_params *params); |
1399 |
+ void crash_smp_send_stop(void); |
1400 |
+ |
1401 |
++#ifdef CONFIG_KEXEC_CORE |
1402 |
++void __init crash_reserve_low_1M(void); |
1403 |
++#else |
1404 |
++static inline void __init crash_reserve_low_1M(void) { } |
1405 |
++#endif |
1406 |
++ |
1407 |
+ #endif /* _ASM_X86_CRASH_H */ |
1408 |
+diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h |
1409 |
+index 5c24a7b351665..b222a35959467 100644 |
1410 |
+--- a/arch/x86/include/asm/nospec-branch.h |
1411 |
++++ b/arch/x86/include/asm/nospec-branch.h |
1412 |
+@@ -320,7 +320,7 @@ DECLARE_STATIC_KEY_FALSE(mds_idle_clear); |
1413 |
+ * combination with microcode which triggers a CPU buffer flush when the |
1414 |
+ * instruction is executed. |
1415 |
+ */ |
1416 |
+-static inline void mds_clear_cpu_buffers(void) |
1417 |
++static __always_inline void mds_clear_cpu_buffers(void) |
1418 |
+ { |
1419 |
+ static const u16 ds = __KERNEL_DS; |
1420 |
+ |
1421 |
+@@ -341,7 +341,7 @@ static inline void mds_clear_cpu_buffers(void) |
1422 |
+ * |
1423 |
+ * Clear CPU buffers if the corresponding static key is enabled |
1424 |
+ */ |
1425 |
+-static inline void mds_user_clear_cpu_buffers(void) |
1426 |
++static __always_inline void mds_user_clear_cpu_buffers(void) |
1427 |
+ { |
1428 |
+ if (static_branch_likely(&mds_user_clear)) |
1429 |
+ mds_clear_cpu_buffers(); |
1430 |
+diff --git a/arch/x86/include/asm/pkeys.h b/arch/x86/include/asm/pkeys.h |
1431 |
+index 19b137f1b3beb..2ff9b98812b76 100644 |
1432 |
+--- a/arch/x86/include/asm/pkeys.h |
1433 |
++++ b/arch/x86/include/asm/pkeys.h |
1434 |
+@@ -4,6 +4,11 @@ |
1435 |
+ |
1436 |
+ #define ARCH_DEFAULT_PKEY 0 |
1437 |
+ |
1438 |
++/* |
1439 |
++ * If more than 16 keys are ever supported, a thorough audit |
1440 |
++ * will be necessary to ensure that the types that store key |
1441 |
++ * numbers and masks have sufficient capacity. |
1442 |
++ */ |
1443 |
+ #define arch_max_pkey() (boot_cpu_has(X86_FEATURE_OSPKE) ? 16 : 1) |
1444 |
+ |
1445 |
+ extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, |
1446 |
+diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c |
1447 |
+index ea6d9da9b0941..4b6301946f455 100644 |
1448 |
+--- a/arch/x86/kernel/apic/io_apic.c |
1449 |
++++ b/arch/x86/kernel/apic/io_apic.c |
1450 |
+@@ -2256,6 +2256,7 @@ static inline void __init check_timer(void) |
1451 |
+ legacy_pic->init(0); |
1452 |
+ legacy_pic->make_irq(0); |
1453 |
+ apic_write(APIC_LVT0, APIC_DM_EXTINT); |
1454 |
++ legacy_pic->unmask(0); |
1455 |
+ |
1456 |
+ unlock_ExtINT_logic(); |
1457 |
+ |
1458 |
+diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c |
1459 |
+index eb651fbde92ac..ff25a2ea271cf 100644 |
1460 |
+--- a/arch/x86/kernel/crash.c |
1461 |
++++ b/arch/x86/kernel/crash.c |
1462 |
+@@ -24,6 +24,7 @@ |
1463 |
+ #include <linux/export.h> |
1464 |
+ #include <linux/slab.h> |
1465 |
+ #include <linux/vmalloc.h> |
1466 |
++#include <linux/memblock.h> |
1467 |
+ |
1468 |
+ #include <asm/processor.h> |
1469 |
+ #include <asm/hardirq.h> |
1470 |
+@@ -39,6 +40,7 @@ |
1471 |
+ #include <asm/virtext.h> |
1472 |
+ #include <asm/intel_pt.h> |
1473 |
+ #include <asm/crash.h> |
1474 |
++#include <asm/cmdline.h> |
1475 |
+ |
1476 |
+ /* Used while preparing memory map entries for second kernel */ |
1477 |
+ struct crash_memmap_data { |
1478 |
+@@ -68,6 +70,19 @@ static inline void cpu_crash_vmclear_loaded_vmcss(void) |
1479 |
+ rcu_read_unlock(); |
1480 |
+ } |
1481 |
+ |
1482 |
++/* |
1483 |
++ * When the crashkernel option is specified, only use the low |
1484 |
++ * 1M for the real mode trampoline. |
1485 |
++ */ |
1486 |
++void __init crash_reserve_low_1M(void) |
1487 |
++{ |
1488 |
++ if (cmdline_find_option(boot_command_line, "crashkernel", NULL, 0) < 0) |
1489 |
++ return; |
1490 |
++ |
1491 |
++ memblock_reserve(0, 1<<20); |
1492 |
++ pr_info("Reserving the low 1M of memory for crashkernel\n"); |
1493 |
++} |
1494 |
++ |
1495 |
+ #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) |
1496 |
+ |
1497 |
+ static void kdump_nmi_callback(int cpu, struct pt_regs *regs) |
1498 |
+diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c |
1499 |
+index 755eb26cbec04..735d1f1bbabc7 100644 |
1500 |
+--- a/arch/x86/kernel/fpu/xstate.c |
1501 |
++++ b/arch/x86/kernel/fpu/xstate.c |
1502 |
+@@ -895,8 +895,6 @@ const void *get_xsave_field_ptr(int xfeature_nr) |
1503 |
+ |
1504 |
+ #ifdef CONFIG_ARCH_HAS_PKEYS |
1505 |
+ |
1506 |
+-#define NR_VALID_PKRU_BITS (CONFIG_NR_PROTECTION_KEYS * 2) |
1507 |
+-#define PKRU_VALID_MASK (NR_VALID_PKRU_BITS - 1) |
1508 |
+ /* |
1509 |
+ * This will go out and modify PKRU register to set the access |
1510 |
+ * rights for @pkey to @init_val. |
1511 |
+@@ -915,6 +913,13 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, |
1512 |
+ if (!boot_cpu_has(X86_FEATURE_OSPKE)) |
1513 |
+ return -EINVAL; |
1514 |
+ |
1515 |
++ /* |
1516 |
++ * This code should only be called with valid 'pkey' |
1517 |
++ * values originating from in-kernel users. Complain |
1518 |
++ * if a bad value is observed. |
1519 |
++ */ |
1520 |
++ WARN_ON_ONCE(pkey >= arch_max_pkey()); |
1521 |
++ |
1522 |
+ /* Set the bits we need in PKRU: */ |
1523 |
+ if (init_val & PKEY_DISABLE_ACCESS) |
1524 |
+ new_pkru_bits |= PKRU_AD_BIT; |
1525 |
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c |
1526 |
+index 128d3ad46e965..cc7823e7ef96c 100644 |
1527 |
+--- a/arch/x86/kvm/emulate.c |
1528 |
++++ b/arch/x86/kvm/emulate.c |
1529 |
+@@ -5836,6 +5836,8 @@ writeback: |
1530 |
+ } |
1531 |
+ |
1532 |
+ ctxt->eip = ctxt->_eip; |
1533 |
++ if (ctxt->mode != X86EMUL_MODE_PROT64) |
1534 |
++ ctxt->eip = (u32)ctxt->_eip; |
1535 |
+ |
1536 |
+ done: |
1537 |
+ if (rc == X86EMUL_PROPAGATE_FAULT) { |
1538 |
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c |
1539 |
+index 6920f1d3b66f5..9f793c9649cdf 100644 |
1540 |
+--- a/arch/x86/kvm/lapic.c |
1541 |
++++ b/arch/x86/kvm/lapic.c |
1542 |
+@@ -1684,7 +1684,7 @@ static void start_sw_period(struct kvm_lapic *apic) |
1543 |
+ |
1544 |
+ hrtimer_start(&apic->lapic_timer.timer, |
1545 |
+ apic->lapic_timer.target_expiration, |
1546 |
+- HRTIMER_MODE_ABS); |
1547 |
++ HRTIMER_MODE_ABS_HARD); |
1548 |
+ } |
1549 |
+ |
1550 |
+ bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu) |
1551 |
+diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h |
1552 |
+index 3c6522b84ff11..ffcd96fc02d0a 100644 |
1553 |
+--- a/arch/x86/kvm/mmutrace.h |
1554 |
++++ b/arch/x86/kvm/mmutrace.h |
1555 |
+@@ -339,7 +339,7 @@ TRACE_EVENT( |
1556 |
+ /* These depend on page entry type, so compute them now. */ |
1557 |
+ __field(bool, r) |
1558 |
+ __field(bool, x) |
1559 |
+- __field(u8, u) |
1560 |
++ __field(signed char, u) |
1561 |
+ ), |
1562 |
+ |
1563 |
+ TP_fast_assign( |
1564 |
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c |
1565 |
+index 3243a80ea32c0..b58495fde2e89 100644 |
1566 |
+--- a/arch/x86/kvm/svm.c |
1567 |
++++ b/arch/x86/kvm/svm.c |
1568 |
+@@ -787,9 +787,6 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu) |
1569 |
+ if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP)) |
1570 |
+ return 0; |
1571 |
+ } else { |
1572 |
+- if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE) |
1573 |
+- pr_err("%s: ip 0x%lx next 0x%llx\n", |
1574 |
+- __func__, kvm_rip_read(vcpu), svm->next_rip); |
1575 |
+ kvm_rip_write(vcpu, svm->next_rip); |
1576 |
+ } |
1577 |
+ svm_set_interrupt_shadow(vcpu, 0); |
1578 |
+@@ -3970,6 +3967,12 @@ static int iret_interception(struct vcpu_svm *svm) |
1579 |
+ return 1; |
1580 |
+ } |
1581 |
+ |
1582 |
++static int invd_interception(struct vcpu_svm *svm) |
1583 |
++{ |
1584 |
++ /* Treat an INVD instruction as a NOP and just skip it. */ |
1585 |
++ return kvm_skip_emulated_instruction(&svm->vcpu); |
1586 |
++} |
1587 |
++ |
1588 |
+ static int invlpg_interception(struct vcpu_svm *svm) |
1589 |
+ { |
1590 |
+ if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) |
1591 |
+@@ -4822,7 +4825,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = { |
1592 |
+ [SVM_EXIT_RDPMC] = rdpmc_interception, |
1593 |
+ [SVM_EXIT_CPUID] = cpuid_interception, |
1594 |
+ [SVM_EXIT_IRET] = iret_interception, |
1595 |
+- [SVM_EXIT_INVD] = emulate_on_interception, |
1596 |
++ [SVM_EXIT_INVD] = invd_interception, |
1597 |
+ [SVM_EXIT_PAUSE] = pause_interception, |
1598 |
+ [SVM_EXIT_HLT] = halt_interception, |
1599 |
+ [SVM_EXIT_INVLPG] = invlpg_interception, |
1600 |
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c |
1601 |
+index a1e62dda56074..2a1ed3aae100e 100644 |
1602 |
+--- a/arch/x86/kvm/vmx/vmx.c |
1603 |
++++ b/arch/x86/kvm/vmx/vmx.c |
1604 |
+@@ -1130,6 +1130,10 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) |
1605 |
+ vmx->guest_msrs[i].mask); |
1606 |
+ |
1607 |
+ } |
1608 |
++ |
1609 |
++ if (vmx->nested.need_vmcs12_to_shadow_sync) |
1610 |
++ nested_sync_vmcs12_to_shadow(vcpu); |
1611 |
++ |
1612 |
+ if (vmx->guest_state_loaded) |
1613 |
+ return; |
1614 |
+ |
1615 |
+@@ -1537,7 +1541,7 @@ static int vmx_rtit_ctl_check(struct kvm_vcpu *vcpu, u64 data) |
1616 |
+ |
1617 |
+ static int skip_emulated_instruction(struct kvm_vcpu *vcpu) |
1618 |
+ { |
1619 |
+- unsigned long rip; |
1620 |
++ unsigned long rip, orig_rip; |
1621 |
+ |
1622 |
+ /* |
1623 |
+ * Using VMCS.VM_EXIT_INSTRUCTION_LEN on EPT misconfig depends on |
1624 |
+@@ -1549,8 +1553,17 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu) |
1625 |
+ */ |
1626 |
+ if (!static_cpu_has(X86_FEATURE_HYPERVISOR) || |
1627 |
+ to_vmx(vcpu)->exit_reason != EXIT_REASON_EPT_MISCONFIG) { |
1628 |
+- rip = kvm_rip_read(vcpu); |
1629 |
+- rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN); |
1630 |
++ orig_rip = kvm_rip_read(vcpu); |
1631 |
++ rip = orig_rip + vmcs_read32(VM_EXIT_INSTRUCTION_LEN); |
1632 |
++#ifdef CONFIG_X86_64 |
1633 |
++ /* |
1634 |
++ * We need to mask out the high 32 bits of RIP if not in 64-bit |
1635 |
++ * mode, but just finding out that we are in 64-bit mode is |
1636 |
++ * quite expensive. Only do it if there was a carry. |
1637 |
++ */ |
1638 |
++ if (unlikely(((rip ^ orig_rip) >> 31) == 3) && !is_64_bit_mode(vcpu)) |
1639 |
++ rip = (u32)rip; |
1640 |
++#endif |
1641 |
+ kvm_rip_write(vcpu, rip); |
1642 |
+ } else { |
1643 |
+ if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP)) |
1644 |
+@@ -6486,8 +6499,11 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) |
1645 |
+ vmcs_write32(PLE_WINDOW, vmx->ple_window); |
1646 |
+ } |
1647 |
+ |
1648 |
+- if (vmx->nested.need_vmcs12_to_shadow_sync) |
1649 |
+- nested_sync_vmcs12_to_shadow(vcpu); |
1650 |
++ /* |
1651 |
++ * We did this in prepare_switch_to_guest, because it needs to |
1652 |
++ * be within srcu_read_lock. |
1653 |
++ */ |
1654 |
++ WARN_ON_ONCE(vmx->nested.need_vmcs12_to_shadow_sync); |
1655 |
+ |
1656 |
+ if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty)) |
1657 |
+ vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); |
1658 |
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c |
1659 |
+index 8920ee7b28811..12e83297ea020 100644 |
1660 |
+--- a/arch/x86/kvm/x86.c |
1661 |
++++ b/arch/x86/kvm/x86.c |
1662 |
+@@ -973,6 +973,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) |
1663 |
+ unsigned long old_cr4 = kvm_read_cr4(vcpu); |
1664 |
+ unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | |
1665 |
+ X86_CR4_SMEP; |
1666 |
++ unsigned long mmu_role_bits = pdptr_bits | X86_CR4_SMAP | X86_CR4_PKE; |
1667 |
+ |
1668 |
+ if (kvm_valid_cr4(vcpu, cr4)) |
1669 |
+ return 1; |
1670 |
+@@ -1000,7 +1001,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) |
1671 |
+ if (kvm_x86_ops->set_cr4(vcpu, cr4)) |
1672 |
+ return 1; |
1673 |
+ |
1674 |
+- if (((cr4 ^ old_cr4) & pdptr_bits) || |
1675 |
++ if (((cr4 ^ old_cr4) & mmu_role_bits) || |
1676 |
+ (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) |
1677 |
+ kvm_mmu_reset_context(vcpu); |
1678 |
+ |
1679 |
+@@ -5050,10 +5051,13 @@ set_identity_unlock: |
1680 |
+ r = -EFAULT; |
1681 |
+ if (copy_from_user(&u.ps, argp, sizeof(u.ps))) |
1682 |
+ goto out; |
1683 |
++ mutex_lock(&kvm->lock); |
1684 |
+ r = -ENXIO; |
1685 |
+ if (!kvm->arch.vpit) |
1686 |
+- goto out; |
1687 |
++ goto set_pit_out; |
1688 |
+ r = kvm_vm_ioctl_set_pit(kvm, &u.ps); |
1689 |
++set_pit_out: |
1690 |
++ mutex_unlock(&kvm->lock); |
1691 |
+ break; |
1692 |
+ } |
1693 |
+ case KVM_GET_PIT2: { |
1694 |
+@@ -5073,10 +5077,13 @@ set_identity_unlock: |
1695 |
+ r = -EFAULT; |
1696 |
+ if (copy_from_user(&u.ps2, argp, sizeof(u.ps2))) |
1697 |
+ goto out; |
1698 |
++ mutex_lock(&kvm->lock); |
1699 |
+ r = -ENXIO; |
1700 |
+ if (!kvm->arch.vpit) |
1701 |
+- goto out; |
1702 |
++ goto set_pit2_out; |
1703 |
+ r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2); |
1704 |
++set_pit2_out: |
1705 |
++ mutex_unlock(&kvm->lock); |
1706 |
+ break; |
1707 |
+ } |
1708 |
+ case KVM_REINJECT_CONTROL: { |
1709 |
+diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c |
1710 |
+index b0dfac3d3df71..1847e993ac63a 100644 |
1711 |
+--- a/arch/x86/lib/usercopy_64.c |
1712 |
++++ b/arch/x86/lib/usercopy_64.c |
1713 |
+@@ -120,7 +120,7 @@ long __copy_user_flushcache(void *dst, const void __user *src, unsigned size) |
1714 |
+ */ |
1715 |
+ if (size < 8) { |
1716 |
+ if (!IS_ALIGNED(dest, 4) || size != 4) |
1717 |
+- clean_cache_range(dst, 1); |
1718 |
++ clean_cache_range(dst, size); |
1719 |
+ } else { |
1720 |
+ if (!IS_ALIGNED(dest, 8)) { |
1721 |
+ dest = ALIGN(dest, boot_cpu_data.x86_clflush_size); |
1722 |
+diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c |
1723 |
+index 7dce39c8c034a..262f83cad3551 100644 |
1724 |
+--- a/arch/x86/realmode/init.c |
1725 |
++++ b/arch/x86/realmode/init.c |
1726 |
+@@ -8,6 +8,7 @@ |
1727 |
+ #include <asm/pgtable.h> |
1728 |
+ #include <asm/realmode.h> |
1729 |
+ #include <asm/tlbflush.h> |
1730 |
++#include <asm/crash.h> |
1731 |
+ |
1732 |
+ struct real_mode_header *real_mode_header; |
1733 |
+ u32 *trampoline_cr4_features; |
1734 |
+@@ -34,6 +35,7 @@ void __init reserve_real_mode(void) |
1735 |
+ |
1736 |
+ memblock_reserve(mem, size); |
1737 |
+ set_real_mode_mem(mem); |
1738 |
++ crash_reserve_low_1M(); |
1739 |
+ } |
1740 |
+ |
1741 |
+ static void __init setup_real_mode(void) |
1742 |
+diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S |
1743 |
+index 59671603c9c62..1f07876ea2ed7 100644 |
1744 |
+--- a/arch/xtensa/kernel/entry.S |
1745 |
++++ b/arch/xtensa/kernel/entry.S |
1746 |
+@@ -1897,6 +1897,7 @@ ENTRY(system_call) |
1747 |
+ |
1748 |
+ mov a6, a2 |
1749 |
+ call4 do_syscall_trace_enter |
1750 |
++ beqz a6, .Lsyscall_exit |
1751 |
+ l32i a7, a2, PT_SYSCALL |
1752 |
+ |
1753 |
+ 1: |
1754 |
+@@ -1911,8 +1912,6 @@ ENTRY(system_call) |
1755 |
+ |
1756 |
+ addx4 a4, a7, a4 |
1757 |
+ l32i a4, a4, 0 |
1758 |
+- movi a5, sys_ni_syscall; |
1759 |
+- beq a4, a5, 1f |
1760 |
+ |
1761 |
+ /* Load args: arg0 - arg5 are passed via regs. */ |
1762 |
+ |
1763 |
+@@ -1932,6 +1931,7 @@ ENTRY(system_call) |
1764 |
+ |
1765 |
+ s32i a6, a2, PT_AREG2 |
1766 |
+ bnez a3, 1f |
1767 |
++.Lsyscall_exit: |
1768 |
+ abi_ret(4) |
1769 |
+ |
1770 |
+ 1: |
1771 |
+diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c |
1772 |
+index b964f0b2d8864..145742d70a9f2 100644 |
1773 |
+--- a/arch/xtensa/kernel/ptrace.c |
1774 |
++++ b/arch/xtensa/kernel/ptrace.c |
1775 |
+@@ -542,14 +542,28 @@ long arch_ptrace(struct task_struct *child, long request, |
1776 |
+ return ret; |
1777 |
+ } |
1778 |
+ |
1779 |
+-void do_syscall_trace_enter(struct pt_regs *regs) |
1780 |
++void do_syscall_trace_leave(struct pt_regs *regs); |
1781 |
++int do_syscall_trace_enter(struct pt_regs *regs) |
1782 |
+ { |
1783 |
++ if (regs->syscall == NO_SYSCALL) |
1784 |
++ regs->areg[2] = -ENOSYS; |
1785 |
++ |
1786 |
+ if (test_thread_flag(TIF_SYSCALL_TRACE) && |
1787 |
+- tracehook_report_syscall_entry(regs)) |
1788 |
++ tracehook_report_syscall_entry(regs)) { |
1789 |
++ regs->areg[2] = -ENOSYS; |
1790 |
+ regs->syscall = NO_SYSCALL; |
1791 |
++ return 0; |
1792 |
++ } |
1793 |
++ |
1794 |
++ if (regs->syscall == NO_SYSCALL) { |
1795 |
++ do_syscall_trace_leave(regs); |
1796 |
++ return 0; |
1797 |
++ } |
1798 |
+ |
1799 |
+ if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) |
1800 |
+ trace_sys_enter(regs, syscall_get_nr(current, regs)); |
1801 |
++ |
1802 |
++ return 1; |
1803 |
+ } |
1804 |
+ |
1805 |
+ void do_syscall_trace_leave(struct pt_regs *regs) |
1806 |
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c |
1807 |
+index 57eacdcbf8208..1ec55345252b6 100644 |
1808 |
+--- a/drivers/acpi/ec.c |
1809 |
++++ b/drivers/acpi/ec.c |
1810 |
+@@ -1043,29 +1043,21 @@ void acpi_ec_unblock_transactions(void) |
1811 |
+ /* -------------------------------------------------------------------------- |
1812 |
+ Event Management |
1813 |
+ -------------------------------------------------------------------------- */ |
1814 |
+-static struct acpi_ec_query_handler * |
1815 |
+-acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler) |
1816 |
+-{ |
1817 |
+- if (handler) |
1818 |
+- kref_get(&handler->kref); |
1819 |
+- return handler; |
1820 |
+-} |
1821 |
+- |
1822 |
+ static struct acpi_ec_query_handler * |
1823 |
+ acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value) |
1824 |
+ { |
1825 |
+ struct acpi_ec_query_handler *handler; |
1826 |
+- bool found = false; |
1827 |
+ |
1828 |
+ mutex_lock(&ec->mutex); |
1829 |
+ list_for_each_entry(handler, &ec->list, node) { |
1830 |
+ if (value == handler->query_bit) { |
1831 |
+- found = true; |
1832 |
+- break; |
1833 |
++ kref_get(&handler->kref); |
1834 |
++ mutex_unlock(&ec->mutex); |
1835 |
++ return handler; |
1836 |
+ } |
1837 |
+ } |
1838 |
+ mutex_unlock(&ec->mutex); |
1839 |
+- return found ? acpi_ec_get_query_handler(handler) : NULL; |
1840 |
++ return NULL; |
1841 |
+ } |
1842 |
+ |
1843 |
+ static void acpi_ec_query_handler_release(struct kref *kref) |
1844 |
+diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c |
1845 |
+index 753985c015177..46dc54d18f0b7 100644 |
1846 |
+--- a/drivers/ata/acard-ahci.c |
1847 |
++++ b/drivers/ata/acard-ahci.c |
1848 |
+@@ -56,7 +56,7 @@ struct acard_sg { |
1849 |
+ __le32 size; /* bit 31 (EOT) max==0x10000 (64k) */ |
1850 |
+ }; |
1851 |
+ |
1852 |
+-static void acard_ahci_qc_prep(struct ata_queued_cmd *qc); |
1853 |
++static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc); |
1854 |
+ static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc); |
1855 |
+ static int acard_ahci_port_start(struct ata_port *ap); |
1856 |
+ static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); |
1857 |
+@@ -210,7 +210,7 @@ static unsigned int acard_ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl) |
1858 |
+ return si; |
1859 |
+ } |
1860 |
+ |
1861 |
+-static void acard_ahci_qc_prep(struct ata_queued_cmd *qc) |
1862 |
++static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc) |
1863 |
+ { |
1864 |
+ struct ata_port *ap = qc->ap; |
1865 |
+ struct ahci_port_priv *pp = ap->private_data; |
1866 |
+@@ -248,6 +248,8 @@ static void acard_ahci_qc_prep(struct ata_queued_cmd *qc) |
1867 |
+ opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; |
1868 |
+ |
1869 |
+ ahci_fill_cmd_slot(pp, qc->hw_tag, opts); |
1870 |
++ |
1871 |
++ return AC_ERR_OK; |
1872 |
+ } |
1873 |
+ |
1874 |
+ static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc) |
1875 |
+diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c |
1876 |
+index bff369d9a1a78..ea5bf5f4cbed5 100644 |
1877 |
+--- a/drivers/ata/libahci.c |
1878 |
++++ b/drivers/ata/libahci.c |
1879 |
+@@ -57,7 +57,7 @@ static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); |
1880 |
+ static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc); |
1881 |
+ static int ahci_port_start(struct ata_port *ap); |
1882 |
+ static void ahci_port_stop(struct ata_port *ap); |
1883 |
+-static void ahci_qc_prep(struct ata_queued_cmd *qc); |
1884 |
++static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc); |
1885 |
+ static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc); |
1886 |
+ static void ahci_freeze(struct ata_port *ap); |
1887 |
+ static void ahci_thaw(struct ata_port *ap); |
1888 |
+@@ -1624,7 +1624,7 @@ static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc) |
1889 |
+ return sata_pmp_qc_defer_cmd_switch(qc); |
1890 |
+ } |
1891 |
+ |
1892 |
+-static void ahci_qc_prep(struct ata_queued_cmd *qc) |
1893 |
++static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc) |
1894 |
+ { |
1895 |
+ struct ata_port *ap = qc->ap; |
1896 |
+ struct ahci_port_priv *pp = ap->private_data; |
1897 |
+@@ -1660,6 +1660,8 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc) |
1898 |
+ opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; |
1899 |
+ |
1900 |
+ ahci_fill_cmd_slot(pp, qc->hw_tag, opts); |
1901 |
++ |
1902 |
++ return AC_ERR_OK; |
1903 |
+ } |
1904 |
+ |
1905 |
+ static void ahci_fbs_dec_intr(struct ata_port *ap) |
1906 |
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c |
1907 |
+index 066b37963ad5f..f67b3fb33d579 100644 |
1908 |
+--- a/drivers/ata/libata-core.c |
1909 |
++++ b/drivers/ata/libata-core.c |
1910 |
+@@ -4978,7 +4978,10 @@ int ata_std_qc_defer(struct ata_queued_cmd *qc) |
1911 |
+ return ATA_DEFER_LINK; |
1912 |
+ } |
1913 |
+ |
1914 |
+-void ata_noop_qc_prep(struct ata_queued_cmd *qc) { } |
1915 |
++enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc) |
1916 |
++{ |
1917 |
++ return AC_ERR_OK; |
1918 |
++} |
1919 |
+ |
1920 |
+ /** |
1921 |
+ * ata_sg_init - Associate command with scatter-gather table. |
1922 |
+@@ -5465,7 +5468,9 @@ void ata_qc_issue(struct ata_queued_cmd *qc) |
1923 |
+ return; |
1924 |
+ } |
1925 |
+ |
1926 |
+- ap->ops->qc_prep(qc); |
1927 |
++ qc->err_mask |= ap->ops->qc_prep(qc); |
1928 |
++ if (unlikely(qc->err_mask)) |
1929 |
++ goto err; |
1930 |
+ trace_ata_qc_issue(qc); |
1931 |
+ qc->err_mask |= ap->ops->qc_issue(qc); |
1932 |
+ if (unlikely(qc->err_mask)) |
1933 |
+diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c |
1934 |
+index 4ed682da52ae9..038db94216a91 100644 |
1935 |
+--- a/drivers/ata/libata-sff.c |
1936 |
++++ b/drivers/ata/libata-sff.c |
1937 |
+@@ -2679,12 +2679,14 @@ static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc) |
1938 |
+ * LOCKING: |
1939 |
+ * spin_lock_irqsave(host lock) |
1940 |
+ */ |
1941 |
+-void ata_bmdma_qc_prep(struct ata_queued_cmd *qc) |
1942 |
++enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc) |
1943 |
+ { |
1944 |
+ if (!(qc->flags & ATA_QCFLAG_DMAMAP)) |
1945 |
+- return; |
1946 |
++ return AC_ERR_OK; |
1947 |
+ |
1948 |
+ ata_bmdma_fill_sg(qc); |
1949 |
++ |
1950 |
++ return AC_ERR_OK; |
1951 |
+ } |
1952 |
+ EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep); |
1953 |
+ |
1954 |
+@@ -2697,12 +2699,14 @@ EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep); |
1955 |
+ * LOCKING: |
1956 |
+ * spin_lock_irqsave(host lock) |
1957 |
+ */ |
1958 |
+-void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc) |
1959 |
++enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc) |
1960 |
+ { |
1961 |
+ if (!(qc->flags & ATA_QCFLAG_DMAMAP)) |
1962 |
+- return; |
1963 |
++ return AC_ERR_OK; |
1964 |
+ |
1965 |
+ ata_bmdma_fill_sg_dumb(qc); |
1966 |
++ |
1967 |
++ return AC_ERR_OK; |
1968 |
+ } |
1969 |
+ EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep); |
1970 |
+ |
1971 |
+diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c |
1972 |
+index 57f2ec71cfc34..1bfd0154dad5d 100644 |
1973 |
+--- a/drivers/ata/pata_macio.c |
1974 |
++++ b/drivers/ata/pata_macio.c |
1975 |
+@@ -510,7 +510,7 @@ static int pata_macio_cable_detect(struct ata_port *ap) |
1976 |
+ return ATA_CBL_PATA40; |
1977 |
+ } |
1978 |
+ |
1979 |
+-static void pata_macio_qc_prep(struct ata_queued_cmd *qc) |
1980 |
++static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc) |
1981 |
+ { |
1982 |
+ unsigned int write = (qc->tf.flags & ATA_TFLAG_WRITE); |
1983 |
+ struct ata_port *ap = qc->ap; |
1984 |
+@@ -523,7 +523,7 @@ static void pata_macio_qc_prep(struct ata_queued_cmd *qc) |
1985 |
+ __func__, qc, qc->flags, write, qc->dev->devno); |
1986 |
+ |
1987 |
+ if (!(qc->flags & ATA_QCFLAG_DMAMAP)) |
1988 |
+- return; |
1989 |
++ return AC_ERR_OK; |
1990 |
+ |
1991 |
+ table = (struct dbdma_cmd *) priv->dma_table_cpu; |
1992 |
+ |
1993 |
+@@ -568,6 +568,8 @@ static void pata_macio_qc_prep(struct ata_queued_cmd *qc) |
1994 |
+ table->command = cpu_to_le16(DBDMA_STOP); |
1995 |
+ |
1996 |
+ dev_dbgdma(priv->dev, "%s: %d DMA list entries\n", __func__, pi); |
1997 |
++ |
1998 |
++ return AC_ERR_OK; |
1999 |
+ } |
2000 |
+ |
2001 |
+ |
2002 |
+diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c |
2003 |
+index 4afcb8e63e211..41430f79663c1 100644 |
2004 |
+--- a/drivers/ata/pata_pxa.c |
2005 |
++++ b/drivers/ata/pata_pxa.c |
2006 |
+@@ -44,25 +44,27 @@ static void pxa_ata_dma_irq(void *d) |
2007 |
+ /* |
2008 |
+ * Prepare taskfile for submission. |
2009 |
+ */ |
2010 |
+-static void pxa_qc_prep(struct ata_queued_cmd *qc) |
2011 |
++static enum ata_completion_errors pxa_qc_prep(struct ata_queued_cmd *qc) |
2012 |
+ { |
2013 |
+ struct pata_pxa_data *pd = qc->ap->private_data; |
2014 |
+ struct dma_async_tx_descriptor *tx; |
2015 |
+ enum dma_transfer_direction dir; |
2016 |
+ |
2017 |
+ if (!(qc->flags & ATA_QCFLAG_DMAMAP)) |
2018 |
+- return; |
2019 |
++ return AC_ERR_OK; |
2020 |
+ |
2021 |
+ dir = (qc->dma_dir == DMA_TO_DEVICE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM); |
2022 |
+ tx = dmaengine_prep_slave_sg(pd->dma_chan, qc->sg, qc->n_elem, dir, |
2023 |
+ DMA_PREP_INTERRUPT); |
2024 |
+ if (!tx) { |
2025 |
+ ata_dev_err(qc->dev, "prep_slave_sg() failed\n"); |
2026 |
+- return; |
2027 |
++ return AC_ERR_OK; |
2028 |
+ } |
2029 |
+ tx->callback = pxa_ata_dma_irq; |
2030 |
+ tx->callback_param = pd; |
2031 |
+ pd->dma_cookie = dmaengine_submit(tx); |
2032 |
++ |
2033 |
++ return AC_ERR_OK; |
2034 |
+ } |
2035 |
+ |
2036 |
+ /* |
2037 |
+diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c |
2038 |
+index cb490531b62ec..5db55e1e2a61d 100644 |
2039 |
+--- a/drivers/ata/pdc_adma.c |
2040 |
++++ b/drivers/ata/pdc_adma.c |
2041 |
+@@ -116,7 +116,7 @@ static int adma_ata_init_one(struct pci_dev *pdev, |
2042 |
+ const struct pci_device_id *ent); |
2043 |
+ static int adma_port_start(struct ata_port *ap); |
2044 |
+ static void adma_port_stop(struct ata_port *ap); |
2045 |
+-static void adma_qc_prep(struct ata_queued_cmd *qc); |
2046 |
++static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc); |
2047 |
+ static unsigned int adma_qc_issue(struct ata_queued_cmd *qc); |
2048 |
+ static int adma_check_atapi_dma(struct ata_queued_cmd *qc); |
2049 |
+ static void adma_freeze(struct ata_port *ap); |
2050 |
+@@ -295,7 +295,7 @@ static int adma_fill_sg(struct ata_queued_cmd *qc) |
2051 |
+ return i; |
2052 |
+ } |
2053 |
+ |
2054 |
+-static void adma_qc_prep(struct ata_queued_cmd *qc) |
2055 |
++static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc) |
2056 |
+ { |
2057 |
+ struct adma_port_priv *pp = qc->ap->private_data; |
2058 |
+ u8 *buf = pp->pkt; |
2059 |
+@@ -306,7 +306,7 @@ static void adma_qc_prep(struct ata_queued_cmd *qc) |
2060 |
+ |
2061 |
+ adma_enter_reg_mode(qc->ap); |
2062 |
+ if (qc->tf.protocol != ATA_PROT_DMA) |
2063 |
+- return; |
2064 |
++ return AC_ERR_OK; |
2065 |
+ |
2066 |
+ buf[i++] = 0; /* Response flags */ |
2067 |
+ buf[i++] = 0; /* reserved */ |
2068 |
+@@ -371,6 +371,7 @@ static void adma_qc_prep(struct ata_queued_cmd *qc) |
2069 |
+ printk("%s\n", obuf); |
2070 |
+ } |
2071 |
+ #endif |
2072 |
++ return AC_ERR_OK; |
2073 |
+ } |
2074 |
+ |
2075 |
+ static inline void adma_packet_start(struct ata_queued_cmd *qc) |
2076 |
+diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c |
2077 |
+index ca6c706e9c256..d55ee244d6931 100644 |
2078 |
+--- a/drivers/ata/sata_fsl.c |
2079 |
++++ b/drivers/ata/sata_fsl.c |
2080 |
+@@ -502,7 +502,7 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc, |
2081 |
+ return num_prde; |
2082 |
+ } |
2083 |
+ |
2084 |
+-static void sata_fsl_qc_prep(struct ata_queued_cmd *qc) |
2085 |
++static enum ata_completion_errors sata_fsl_qc_prep(struct ata_queued_cmd *qc) |
2086 |
+ { |
2087 |
+ struct ata_port *ap = qc->ap; |
2088 |
+ struct sata_fsl_port_priv *pp = ap->private_data; |
2089 |
+@@ -548,6 +548,8 @@ static void sata_fsl_qc_prep(struct ata_queued_cmd *qc) |
2090 |
+ |
2091 |
+ VPRINTK("SATA FSL : xx_qc_prep, di = 0x%x, ttl = %d, num_prde = %d\n", |
2092 |
+ desc_info, ttl_dwords, num_prde); |
2093 |
++ |
2094 |
++ return AC_ERR_OK; |
2095 |
+ } |
2096 |
+ |
2097 |
+ static unsigned int sata_fsl_qc_issue(struct ata_queued_cmd *qc) |
2098 |
+diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c |
2099 |
+index 7f99e23bff88c..a6b76cc12a661 100644 |
2100 |
+--- a/drivers/ata/sata_inic162x.c |
2101 |
++++ b/drivers/ata/sata_inic162x.c |
2102 |
+@@ -478,7 +478,7 @@ static void inic_fill_sg(struct inic_prd *prd, struct ata_queued_cmd *qc) |
2103 |
+ prd[-1].flags |= PRD_END; |
2104 |
+ } |
2105 |
+ |
2106 |
+-static void inic_qc_prep(struct ata_queued_cmd *qc) |
2107 |
++static enum ata_completion_errors inic_qc_prep(struct ata_queued_cmd *qc) |
2108 |
+ { |
2109 |
+ struct inic_port_priv *pp = qc->ap->private_data; |
2110 |
+ struct inic_pkt *pkt = pp->pkt; |
2111 |
+@@ -538,6 +538,8 @@ static void inic_qc_prep(struct ata_queued_cmd *qc) |
2112 |
+ inic_fill_sg(prd, qc); |
2113 |
+ |
2114 |
+ pp->cpb_tbl[0] = pp->pkt_dma; |
2115 |
++ |
2116 |
++ return AC_ERR_OK; |
2117 |
+ } |
2118 |
+ |
2119 |
+ static unsigned int inic_qc_issue(struct ata_queued_cmd *qc) |
2120 |
+diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c |
2121 |
+index bde695a320973..d7228f8e9297c 100644 |
2122 |
+--- a/drivers/ata/sata_mv.c |
2123 |
++++ b/drivers/ata/sata_mv.c |
2124 |
+@@ -592,8 +592,8 @@ static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val) |
2125 |
+ static int mv_port_start(struct ata_port *ap); |
2126 |
+ static void mv_port_stop(struct ata_port *ap); |
2127 |
+ static int mv_qc_defer(struct ata_queued_cmd *qc); |
2128 |
+-static void mv_qc_prep(struct ata_queued_cmd *qc); |
2129 |
+-static void mv_qc_prep_iie(struct ata_queued_cmd *qc); |
2130 |
++static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc); |
2131 |
++static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc); |
2132 |
+ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); |
2133 |
+ static int mv_hardreset(struct ata_link *link, unsigned int *class, |
2134 |
+ unsigned long deadline); |
2135 |
+@@ -2031,7 +2031,7 @@ static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc) |
2136 |
+ * LOCKING: |
2137 |
+ * Inherited from caller. |
2138 |
+ */ |
2139 |
+-static void mv_qc_prep(struct ata_queued_cmd *qc) |
2140 |
++static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc) |
2141 |
+ { |
2142 |
+ struct ata_port *ap = qc->ap; |
2143 |
+ struct mv_port_priv *pp = ap->private_data; |
2144 |
+@@ -2043,15 +2043,15 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) |
2145 |
+ switch (tf->protocol) { |
2146 |
+ case ATA_PROT_DMA: |
2147 |
+ if (tf->command == ATA_CMD_DSM) |
2148 |
+- return; |
2149 |
++ return AC_ERR_OK; |
2150 |
+ /* fall-thru */ |
2151 |
+ case ATA_PROT_NCQ: |
2152 |
+ break; /* continue below */ |
2153 |
+ case ATA_PROT_PIO: |
2154 |
+ mv_rw_multi_errata_sata24(qc); |
2155 |
+- return; |
2156 |
++ return AC_ERR_OK; |
2157 |
+ default: |
2158 |
+- return; |
2159 |
++ return AC_ERR_OK; |
2160 |
+ } |
2161 |
+ |
2162 |
+ /* Fill in command request block |
2163 |
+@@ -2098,12 +2098,10 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) |
2164 |
+ * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none |
2165 |
+ * of which are defined/used by Linux. If we get here, this |
2166 |
+ * driver needs work. |
2167 |
+- * |
2168 |
+- * FIXME: modify libata to give qc_prep a return value and |
2169 |
+- * return error here. |
2170 |
+ */ |
2171 |
+- BUG_ON(tf->command); |
2172 |
+- break; |
2173 |
++ ata_port_err(ap, "%s: unsupported command: %.2x\n", __func__, |
2174 |
++ tf->command); |
2175 |
++ return AC_ERR_INVALID; |
2176 |
+ } |
2177 |
+ mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0); |
2178 |
+ mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0); |
2179 |
+@@ -2116,8 +2114,10 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) |
2180 |
+ mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */ |
2181 |
+ |
2182 |
+ if (!(qc->flags & ATA_QCFLAG_DMAMAP)) |
2183 |
+- return; |
2184 |
++ return AC_ERR_OK; |
2185 |
+ mv_fill_sg(qc); |
2186 |
++ |
2187 |
++ return AC_ERR_OK; |
2188 |
+ } |
2189 |
+ |
2190 |
+ /** |
2191 |
+@@ -2132,7 +2132,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) |
2192 |
+ * LOCKING: |
2193 |
+ * Inherited from caller. |
2194 |
+ */ |
2195 |
+-static void mv_qc_prep_iie(struct ata_queued_cmd *qc) |
2196 |
++static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc) |
2197 |
+ { |
2198 |
+ struct ata_port *ap = qc->ap; |
2199 |
+ struct mv_port_priv *pp = ap->private_data; |
2200 |
+@@ -2143,9 +2143,9 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) |
2201 |
+ |
2202 |
+ if ((tf->protocol != ATA_PROT_DMA) && |
2203 |
+ (tf->protocol != ATA_PROT_NCQ)) |
2204 |
+- return; |
2205 |
++ return AC_ERR_OK; |
2206 |
+ if (tf->command == ATA_CMD_DSM) |
2207 |
+- return; /* use bmdma for this */ |
2208 |
++ return AC_ERR_OK; /* use bmdma for this */ |
2209 |
+ |
2210 |
+ /* Fill in Gen IIE command request block */ |
2211 |
+ if (!(tf->flags & ATA_TFLAG_WRITE)) |
2212 |
+@@ -2186,8 +2186,10 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) |
2213 |
+ ); |
2214 |
+ |
2215 |
+ if (!(qc->flags & ATA_QCFLAG_DMAMAP)) |
2216 |
+- return; |
2217 |
++ return AC_ERR_OK; |
2218 |
+ mv_fill_sg(qc); |
2219 |
++ |
2220 |
++ return AC_ERR_OK; |
2221 |
+ } |
2222 |
+ |
2223 |
+ /** |
2224 |
+diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c |
2225 |
+index 7510303111fa0..18b147c182b96 100644 |
2226 |
+--- a/drivers/ata/sata_nv.c |
2227 |
++++ b/drivers/ata/sata_nv.c |
2228 |
+@@ -297,7 +297,7 @@ static void nv_ck804_freeze(struct ata_port *ap); |
2229 |
+ static void nv_ck804_thaw(struct ata_port *ap); |
2230 |
+ static int nv_adma_slave_config(struct scsi_device *sdev); |
2231 |
+ static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc); |
2232 |
+-static void nv_adma_qc_prep(struct ata_queued_cmd *qc); |
2233 |
++static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc); |
2234 |
+ static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc); |
2235 |
+ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance); |
2236 |
+ static void nv_adma_irq_clear(struct ata_port *ap); |
2237 |
+@@ -319,7 +319,7 @@ static void nv_mcp55_freeze(struct ata_port *ap); |
2238 |
+ static void nv_swncq_error_handler(struct ata_port *ap); |
2239 |
+ static int nv_swncq_slave_config(struct scsi_device *sdev); |
2240 |
+ static int nv_swncq_port_start(struct ata_port *ap); |
2241 |
+-static void nv_swncq_qc_prep(struct ata_queued_cmd *qc); |
2242 |
++static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc); |
2243 |
+ static void nv_swncq_fill_sg(struct ata_queued_cmd *qc); |
2244 |
+ static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc); |
2245 |
+ static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis); |
2246 |
+@@ -1344,7 +1344,7 @@ static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc) |
2247 |
+ return 1; |
2248 |
+ } |
2249 |
+ |
2250 |
+-static void nv_adma_qc_prep(struct ata_queued_cmd *qc) |
2251 |
++static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc) |
2252 |
+ { |
2253 |
+ struct nv_adma_port_priv *pp = qc->ap->private_data; |
2254 |
+ struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag]; |
2255 |
+@@ -1356,7 +1356,7 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc) |
2256 |
+ (qc->flags & ATA_QCFLAG_DMAMAP)); |
2257 |
+ nv_adma_register_mode(qc->ap); |
2258 |
+ ata_bmdma_qc_prep(qc); |
2259 |
+- return; |
2260 |
++ return AC_ERR_OK; |
2261 |
+ } |
2262 |
+ |
2263 |
+ cpb->resp_flags = NV_CPB_RESP_DONE; |
2264 |
+@@ -1388,6 +1388,8 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc) |
2265 |
+ cpb->ctl_flags = ctl_flags; |
2266 |
+ wmb(); |
2267 |
+ cpb->resp_flags = 0; |
2268 |
++ |
2269 |
++ return AC_ERR_OK; |
2270 |
+ } |
2271 |
+ |
2272 |
+ static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc) |
2273 |
+@@ -1950,17 +1952,19 @@ static int nv_swncq_port_start(struct ata_port *ap) |
2274 |
+ return 0; |
2275 |
+ } |
2276 |
+ |
2277 |
+-static void nv_swncq_qc_prep(struct ata_queued_cmd *qc) |
2278 |
++static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc) |
2279 |
+ { |
2280 |
+ if (qc->tf.protocol != ATA_PROT_NCQ) { |
2281 |
+ ata_bmdma_qc_prep(qc); |
2282 |
+- return; |
2283 |
++ return AC_ERR_OK; |
2284 |
+ } |
2285 |
+ |
2286 |
+ if (!(qc->flags & ATA_QCFLAG_DMAMAP)) |
2287 |
+- return; |
2288 |
++ return AC_ERR_OK; |
2289 |
+ |
2290 |
+ nv_swncq_fill_sg(qc); |
2291 |
++ |
2292 |
++ return AC_ERR_OK; |
2293 |
+ } |
2294 |
+ |
2295 |
+ static void nv_swncq_fill_sg(struct ata_queued_cmd *qc) |
2296 |
+diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c |
2297 |
+index 5fd464765ddcb..c451d7d1c817a 100644 |
2298 |
+--- a/drivers/ata/sata_promise.c |
2299 |
++++ b/drivers/ata/sata_promise.c |
2300 |
+@@ -139,7 +139,7 @@ static int pdc_sata_scr_write(struct ata_link *link, unsigned int sc_reg, u32 va |
2301 |
+ static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); |
2302 |
+ static int pdc_common_port_start(struct ata_port *ap); |
2303 |
+ static int pdc_sata_port_start(struct ata_port *ap); |
2304 |
+-static void pdc_qc_prep(struct ata_queued_cmd *qc); |
2305 |
++static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc); |
2306 |
+ static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); |
2307 |
+ static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf); |
2308 |
+ static int pdc_check_atapi_dma(struct ata_queued_cmd *qc); |
2309 |
+@@ -633,7 +633,7 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc) |
2310 |
+ prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); |
2311 |
+ } |
2312 |
+ |
2313 |
+-static void pdc_qc_prep(struct ata_queued_cmd *qc) |
2314 |
++static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc) |
2315 |
+ { |
2316 |
+ struct pdc_port_priv *pp = qc->ap->private_data; |
2317 |
+ unsigned int i; |
2318 |
+@@ -665,6 +665,8 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc) |
2319 |
+ default: |
2320 |
+ break; |
2321 |
+ } |
2322 |
++ |
2323 |
++ return AC_ERR_OK; |
2324 |
+ } |
2325 |
+ |
2326 |
+ static int pdc_is_sataii_tx4(unsigned long flags) |
2327 |
+diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c |
2328 |
+index c53c5a47204db..ef00ab644afb7 100644 |
2329 |
+--- a/drivers/ata/sata_qstor.c |
2330 |
++++ b/drivers/ata/sata_qstor.c |
2331 |
+@@ -100,7 +100,7 @@ static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); |
2332 |
+ static int qs_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); |
2333 |
+ static int qs_port_start(struct ata_port *ap); |
2334 |
+ static void qs_host_stop(struct ata_host *host); |
2335 |
+-static void qs_qc_prep(struct ata_queued_cmd *qc); |
2336 |
++static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc); |
2337 |
+ static unsigned int qs_qc_issue(struct ata_queued_cmd *qc); |
2338 |
+ static int qs_check_atapi_dma(struct ata_queued_cmd *qc); |
2339 |
+ static void qs_freeze(struct ata_port *ap); |
2340 |
+@@ -260,7 +260,7 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc) |
2341 |
+ return si; |
2342 |
+ } |
2343 |
+ |
2344 |
+-static void qs_qc_prep(struct ata_queued_cmd *qc) |
2345 |
++static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc) |
2346 |
+ { |
2347 |
+ struct qs_port_priv *pp = qc->ap->private_data; |
2348 |
+ u8 dflags = QS_DF_PORD, *buf = pp->pkt; |
2349 |
+@@ -272,7 +272,7 @@ static void qs_qc_prep(struct ata_queued_cmd *qc) |
2350 |
+ |
2351 |
+ qs_enter_reg_mode(qc->ap); |
2352 |
+ if (qc->tf.protocol != ATA_PROT_DMA) |
2353 |
+- return; |
2354 |
++ return AC_ERR_OK; |
2355 |
+ |
2356 |
+ nelem = qs_fill_sg(qc); |
2357 |
+ |
2358 |
+@@ -295,6 +295,8 @@ static void qs_qc_prep(struct ata_queued_cmd *qc) |
2359 |
+ |
2360 |
+ /* frame information structure (FIS) */ |
2361 |
+ ata_tf_to_fis(&qc->tf, 0, 1, &buf[32]); |
2362 |
++ |
2363 |
++ return AC_ERR_OK; |
2364 |
+ } |
2365 |
+ |
2366 |
+ static inline void qs_packet_start(struct ata_queued_cmd *qc) |
2367 |
+diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c |
2368 |
+index c35b7b993133e..141ac600b64c8 100644 |
2369 |
+--- a/drivers/ata/sata_rcar.c |
2370 |
++++ b/drivers/ata/sata_rcar.c |
2371 |
+@@ -550,12 +550,14 @@ static void sata_rcar_bmdma_fill_sg(struct ata_queued_cmd *qc) |
2372 |
+ prd[si - 1].addr |= cpu_to_le32(SATA_RCAR_DTEND); |
2373 |
+ } |
2374 |
+ |
2375 |
+-static void sata_rcar_qc_prep(struct ata_queued_cmd *qc) |
2376 |
++static enum ata_completion_errors sata_rcar_qc_prep(struct ata_queued_cmd *qc) |
2377 |
+ { |
2378 |
+ if (!(qc->flags & ATA_QCFLAG_DMAMAP)) |
2379 |
+- return; |
2380 |
++ return AC_ERR_OK; |
2381 |
+ |
2382 |
+ sata_rcar_bmdma_fill_sg(qc); |
2383 |
++ |
2384 |
++ return AC_ERR_OK; |
2385 |
+ } |
2386 |
+ |
2387 |
+ static void sata_rcar_bmdma_setup(struct ata_queued_cmd *qc) |
2388 |
+diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c |
2389 |
+index e6fbae2f645ac..75321f1ceba52 100644 |
2390 |
+--- a/drivers/ata/sata_sil.c |
2391 |
++++ b/drivers/ata/sata_sil.c |
2392 |
+@@ -103,7 +103,7 @@ static void sil_dev_config(struct ata_device *dev); |
2393 |
+ static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); |
2394 |
+ static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); |
2395 |
+ static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed); |
2396 |
+-static void sil_qc_prep(struct ata_queued_cmd *qc); |
2397 |
++static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc); |
2398 |
+ static void sil_bmdma_setup(struct ata_queued_cmd *qc); |
2399 |
+ static void sil_bmdma_start(struct ata_queued_cmd *qc); |
2400 |
+ static void sil_bmdma_stop(struct ata_queued_cmd *qc); |
2401 |
+@@ -317,12 +317,14 @@ static void sil_fill_sg(struct ata_queued_cmd *qc) |
2402 |
+ last_prd->flags_len |= cpu_to_le32(ATA_PRD_EOT); |
2403 |
+ } |
2404 |
+ |
2405 |
+-static void sil_qc_prep(struct ata_queued_cmd *qc) |
2406 |
++static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc) |
2407 |
+ { |
2408 |
+ if (!(qc->flags & ATA_QCFLAG_DMAMAP)) |
2409 |
+- return; |
2410 |
++ return AC_ERR_OK; |
2411 |
+ |
2412 |
+ sil_fill_sg(qc); |
2413 |
++ |
2414 |
++ return AC_ERR_OK; |
2415 |
+ } |
2416 |
+ |
2417 |
+ static unsigned char sil_get_device_cache_line(struct pci_dev *pdev) |
2418 |
+diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c |
2419 |
+index 7bef82de53ca9..560070d4f1d09 100644 |
2420 |
+--- a/drivers/ata/sata_sil24.c |
2421 |
++++ b/drivers/ata/sata_sil24.c |
2422 |
+@@ -326,7 +326,7 @@ static void sil24_dev_config(struct ata_device *dev); |
2423 |
+ static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val); |
2424 |
+ static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val); |
2425 |
+ static int sil24_qc_defer(struct ata_queued_cmd *qc); |
2426 |
+-static void sil24_qc_prep(struct ata_queued_cmd *qc); |
2427 |
++static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc); |
2428 |
+ static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc); |
2429 |
+ static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc); |
2430 |
+ static void sil24_pmp_attach(struct ata_port *ap); |
2431 |
+@@ -830,7 +830,7 @@ static int sil24_qc_defer(struct ata_queued_cmd *qc) |
2432 |
+ return ata_std_qc_defer(qc); |
2433 |
+ } |
2434 |
+ |
2435 |
+-static void sil24_qc_prep(struct ata_queued_cmd *qc) |
2436 |
++static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc) |
2437 |
+ { |
2438 |
+ struct ata_port *ap = qc->ap; |
2439 |
+ struct sil24_port_priv *pp = ap->private_data; |
2440 |
+@@ -874,6 +874,8 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc) |
2441 |
+ |
2442 |
+ if (qc->flags & ATA_QCFLAG_DMAMAP) |
2443 |
+ sil24_fill_sg(qc, sge); |
2444 |
++ |
2445 |
++ return AC_ERR_OK; |
2446 |
+ } |
2447 |
+ |
2448 |
+ static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc) |
2449 |
+diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c |
2450 |
+index 2277ba0c9c7f4..2c7b30c5ea3dd 100644 |
2451 |
+--- a/drivers/ata/sata_sx4.c |
2452 |
++++ b/drivers/ata/sata_sx4.c |
2453 |
+@@ -202,7 +202,7 @@ static void pdc_error_handler(struct ata_port *ap); |
2454 |
+ static void pdc_freeze(struct ata_port *ap); |
2455 |
+ static void pdc_thaw(struct ata_port *ap); |
2456 |
+ static int pdc_port_start(struct ata_port *ap); |
2457 |
+-static void pdc20621_qc_prep(struct ata_queued_cmd *qc); |
2458 |
++static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc); |
2459 |
+ static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); |
2460 |
+ static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf); |
2461 |
+ static unsigned int pdc20621_dimm_init(struct ata_host *host); |
2462 |
+@@ -530,7 +530,7 @@ static void pdc20621_nodata_prep(struct ata_queued_cmd *qc) |
2463 |
+ VPRINTK("ata pkt buf ofs %u, mmio copied\n", i); |
2464 |
+ } |
2465 |
+ |
2466 |
+-static void pdc20621_qc_prep(struct ata_queued_cmd *qc) |
2467 |
++static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc) |
2468 |
+ { |
2469 |
+ switch (qc->tf.protocol) { |
2470 |
+ case ATA_PROT_DMA: |
2471 |
+@@ -542,6 +542,8 @@ static void pdc20621_qc_prep(struct ata_queued_cmd *qc) |
2472 |
+ default: |
2473 |
+ break; |
2474 |
+ } |
2475 |
++ |
2476 |
++ return AC_ERR_OK; |
2477 |
+ } |
2478 |
+ |
2479 |
+ static void __pdc20621_push_hdma(struct ata_queued_cmd *qc, |
2480 |
+diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c |
2481 |
+index 9d0d65efcd94e..bedaebd5a4956 100644 |
2482 |
+--- a/drivers/atm/eni.c |
2483 |
++++ b/drivers/atm/eni.c |
2484 |
+@@ -2245,7 +2245,7 @@ static int eni_init_one(struct pci_dev *pci_dev, |
2485 |
+ |
2486 |
+ rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32)); |
2487 |
+ if (rc < 0) |
2488 |
+- goto out; |
2489 |
++ goto err_disable; |
2490 |
+ |
2491 |
+ rc = -ENOMEM; |
2492 |
+ eni_dev = kmalloc(sizeof(struct eni_dev), GFP_KERNEL); |
2493 |
+diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c |
2494 |
+index 1eb81f113786f..83e26fd188cc9 100644 |
2495 |
+--- a/drivers/base/arch_topology.c |
2496 |
++++ b/drivers/base/arch_topology.c |
2497 |
+@@ -270,7 +270,7 @@ static int __init get_cpu_for_node(struct device_node *node) |
2498 |
+ static int __init parse_core(struct device_node *core, int package_id, |
2499 |
+ int core_id) |
2500 |
+ { |
2501 |
+- char name[10]; |
2502 |
++ char name[20]; |
2503 |
+ bool leaf = true; |
2504 |
+ int i = 0; |
2505 |
+ int cpu; |
2506 |
+@@ -317,7 +317,7 @@ static int __init parse_core(struct device_node *core, int package_id, |
2507 |
+ |
2508 |
+ static int __init parse_cluster(struct device_node *cluster, int depth) |
2509 |
+ { |
2510 |
+- char name[10]; |
2511 |
++ char name[20]; |
2512 |
+ bool leaf = true; |
2513 |
+ bool has_cores = false; |
2514 |
+ struct device_node *c; |
2515 |
+diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h |
2516 |
+index 3d80c4b43f720..d7c01b70e43db 100644 |
2517 |
+--- a/drivers/base/regmap/internal.h |
2518 |
++++ b/drivers/base/regmap/internal.h |
2519 |
+@@ -259,7 +259,7 @@ bool regcache_set_val(struct regmap *map, void *base, unsigned int idx, |
2520 |
+ int regcache_lookup_reg(struct regmap *map, unsigned int reg); |
2521 |
+ |
2522 |
+ int _regmap_raw_write(struct regmap *map, unsigned int reg, |
2523 |
+- const void *val, size_t val_len); |
2524 |
++ const void *val, size_t val_len, bool noinc); |
2525 |
+ |
2526 |
+ void regmap_async_complete_cb(struct regmap_async *async, int ret); |
2527 |
+ |
2528 |
+diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c |
2529 |
+index a93cafd7be4f2..7f4b3b62492ca 100644 |
2530 |
+--- a/drivers/base/regmap/regcache.c |
2531 |
++++ b/drivers/base/regmap/regcache.c |
2532 |
+@@ -717,7 +717,7 @@ static int regcache_sync_block_raw_flush(struct regmap *map, const void **data, |
2533 |
+ |
2534 |
+ map->cache_bypass = true; |
2535 |
+ |
2536 |
+- ret = _regmap_raw_write(map, base, *data, count * val_bytes); |
2537 |
++ ret = _regmap_raw_write(map, base, *data, count * val_bytes, false); |
2538 |
+ if (ret) |
2539 |
+ dev_err(map->dev, "Unable to sync registers %#x-%#x. %d\n", |
2540 |
+ base, cur - map->reg_stride, ret); |
2541 |
+diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c |
2542 |
+index 927ebde1607be..e0893f1b14522 100644 |
2543 |
+--- a/drivers/base/regmap/regmap.c |
2544 |
++++ b/drivers/base/regmap/regmap.c |
2545 |
+@@ -1468,7 +1468,7 @@ static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes, |
2546 |
+ } |
2547 |
+ |
2548 |
+ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, |
2549 |
+- const void *val, size_t val_len) |
2550 |
++ const void *val, size_t val_len, bool noinc) |
2551 |
+ { |
2552 |
+ struct regmap_range_node *range; |
2553 |
+ unsigned long flags; |
2554 |
+@@ -1527,7 +1527,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, |
2555 |
+ win_residue, val_len / map->format.val_bytes); |
2556 |
+ ret = _regmap_raw_write_impl(map, reg, val, |
2557 |
+ win_residue * |
2558 |
+- map->format.val_bytes); |
2559 |
++ map->format.val_bytes, noinc); |
2560 |
+ if (ret != 0) |
2561 |
+ return ret; |
2562 |
+ |
2563 |
+@@ -1541,7 +1541,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, |
2564 |
+ win_residue = range->window_len - win_offset; |
2565 |
+ } |
2566 |
+ |
2567 |
+- ret = _regmap_select_page(map, ®, range, val_num); |
2568 |
++ ret = _regmap_select_page(map, ®, range, noinc ? 1 : val_num); |
2569 |
+ if (ret != 0) |
2570 |
+ return ret; |
2571 |
+ } |
2572 |
+@@ -1749,7 +1749,8 @@ static int _regmap_bus_raw_write(void *context, unsigned int reg, |
2573 |
+ map->work_buf + |
2574 |
+ map->format.reg_bytes + |
2575 |
+ map->format.pad_bytes, |
2576 |
+- map->format.val_bytes); |
2577 |
++ map->format.val_bytes, |
2578 |
++ false); |
2579 |
+ } |
2580 |
+ |
2581 |
+ static inline void *_regmap_map_get_context(struct regmap *map) |
2582 |
+@@ -1843,7 +1844,7 @@ int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val) |
2583 |
+ EXPORT_SYMBOL_GPL(regmap_write_async); |
2584 |
+ |
2585 |
+ int _regmap_raw_write(struct regmap *map, unsigned int reg, |
2586 |
+- const void *val, size_t val_len) |
2587 |
++ const void *val, size_t val_len, bool noinc) |
2588 |
+ { |
2589 |
+ size_t val_bytes = map->format.val_bytes; |
2590 |
+ size_t val_count = val_len / val_bytes; |
2591 |
+@@ -1864,7 +1865,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg, |
2592 |
+ |
2593 |
+ /* Write as many bytes as possible with chunk_size */ |
2594 |
+ for (i = 0; i < chunk_count; i++) { |
2595 |
+- ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes); |
2596 |
++ ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes, noinc); |
2597 |
+ if (ret) |
2598 |
+ return ret; |
2599 |
+ |
2600 |
+@@ -1875,7 +1876,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg, |
2601 |
+ |
2602 |
+ /* Write remaining bytes */ |
2603 |
+ if (val_len) |
2604 |
+- ret = _regmap_raw_write_impl(map, reg, val, val_len); |
2605 |
++ ret = _regmap_raw_write_impl(map, reg, val, val_len, noinc); |
2606 |
+ |
2607 |
+ return ret; |
2608 |
+ } |
2609 |
+@@ -1908,7 +1909,7 @@ int regmap_raw_write(struct regmap *map, unsigned int reg, |
2610 |
+ |
2611 |
+ map->lock(map->lock_arg); |
2612 |
+ |
2613 |
+- ret = _regmap_raw_write(map, reg, val, val_len); |
2614 |
++ ret = _regmap_raw_write(map, reg, val, val_len, false); |
2615 |
+ |
2616 |
+ map->unlock(map->lock_arg); |
2617 |
+ |
2618 |
+@@ -1966,7 +1967,7 @@ int regmap_noinc_write(struct regmap *map, unsigned int reg, |
2619 |
+ write_len = map->max_raw_write; |
2620 |
+ else |
2621 |
+ write_len = val_len; |
2622 |
+- ret = _regmap_raw_write(map, reg, val, write_len); |
2623 |
++ ret = _regmap_raw_write(map, reg, val, write_len, true); |
2624 |
+ if (ret) |
2625 |
+ goto out_unlock; |
2626 |
+ val = ((u8 *)val) + write_len; |
2627 |
+@@ -2443,7 +2444,7 @@ int regmap_raw_write_async(struct regmap *map, unsigned int reg, |
2628 |
+ |
2629 |
+ map->async = true; |
2630 |
+ |
2631 |
+- ret = _regmap_raw_write(map, reg, val, val_len); |
2632 |
++ ret = _regmap_raw_write(map, reg, val, val_len, false); |
2633 |
+ |
2634 |
+ map->async = false; |
2635 |
+ |
2636 |
+@@ -2454,7 +2455,7 @@ int regmap_raw_write_async(struct regmap *map, unsigned int reg, |
2637 |
+ EXPORT_SYMBOL_GPL(regmap_raw_write_async); |
2638 |
+ |
2639 |
+ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, |
2640 |
+- unsigned int val_len) |
2641 |
++ unsigned int val_len, bool noinc) |
2642 |
+ { |
2643 |
+ struct regmap_range_node *range; |
2644 |
+ int ret; |
2645 |
+@@ -2467,7 +2468,7 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, |
2646 |
+ range = _regmap_range_lookup(map, reg); |
2647 |
+ if (range) { |
2648 |
+ ret = _regmap_select_page(map, ®, range, |
2649 |
+- val_len / map->format.val_bytes); |
2650 |
++ noinc ? 1 : val_len / map->format.val_bytes); |
2651 |
+ if (ret != 0) |
2652 |
+ return ret; |
2653 |
+ } |
2654 |
+@@ -2505,7 +2506,7 @@ static int _regmap_bus_read(void *context, unsigned int reg, |
2655 |
+ if (!map->format.parse_val) |
2656 |
+ return -EINVAL; |
2657 |
+ |
2658 |
+- ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes); |
2659 |
++ ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes, false); |
2660 |
+ if (ret == 0) |
2661 |
+ *val = map->format.parse_val(work_val); |
2662 |
+ |
2663 |
+@@ -2621,7 +2622,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, |
2664 |
+ |
2665 |
+ /* Read bytes that fit into whole chunks */ |
2666 |
+ for (i = 0; i < chunk_count; i++) { |
2667 |
+- ret = _regmap_raw_read(map, reg, val, chunk_bytes); |
2668 |
++ ret = _regmap_raw_read(map, reg, val, chunk_bytes, false); |
2669 |
+ if (ret != 0) |
2670 |
+ goto out; |
2671 |
+ |
2672 |
+@@ -2632,7 +2633,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, |
2673 |
+ |
2674 |
+ /* Read remaining bytes */ |
2675 |
+ if (val_len) { |
2676 |
+- ret = _regmap_raw_read(map, reg, val, val_len); |
2677 |
++ ret = _regmap_raw_read(map, reg, val, val_len, false); |
2678 |
+ if (ret != 0) |
2679 |
+ goto out; |
2680 |
+ } |
2681 |
+@@ -2707,7 +2708,7 @@ int regmap_noinc_read(struct regmap *map, unsigned int reg, |
2682 |
+ read_len = map->max_raw_read; |
2683 |
+ else |
2684 |
+ read_len = val_len; |
2685 |
+- ret = _regmap_raw_read(map, reg, val, read_len); |
2686 |
++ ret = _regmap_raw_read(map, reg, val, read_len, true); |
2687 |
+ if (ret) |
2688 |
+ goto out_unlock; |
2689 |
+ val = ((u8 *)val) + read_len; |
2690 |
+diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c |
2691 |
+index bf3c02be69305..0dfaf90a31b06 100644 |
2692 |
+--- a/drivers/bluetooth/btrtl.c |
2693 |
++++ b/drivers/bluetooth/btrtl.c |
2694 |
+@@ -370,11 +370,11 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, |
2695 |
+ * the end. |
2696 |
+ */ |
2697 |
+ len = patch_length; |
2698 |
+- buf = kmemdup(btrtl_dev->fw_data + patch_offset, patch_length, |
2699 |
+- GFP_KERNEL); |
2700 |
++ buf = kvmalloc(patch_length, GFP_KERNEL); |
2701 |
+ if (!buf) |
2702 |
+ return -ENOMEM; |
2703 |
+ |
2704 |
++ memcpy(buf, btrtl_dev->fw_data + patch_offset, patch_length - 4); |
2705 |
+ memcpy(buf + patch_length - 4, &epatch_info->fw_version, 4); |
2706 |
+ |
2707 |
+ *_buf = buf; |
2708 |
+@@ -460,8 +460,10 @@ static int rtl_load_file(struct hci_dev *hdev, const char *name, u8 **buff) |
2709 |
+ if (ret < 0) |
2710 |
+ return ret; |
2711 |
+ ret = fw->size; |
2712 |
+- *buff = kmemdup(fw->data, ret, GFP_KERNEL); |
2713 |
+- if (!*buff) |
2714 |
++ *buff = kvmalloc(fw->size, GFP_KERNEL); |
2715 |
++ if (*buff) |
2716 |
++ memcpy(*buff, fw->data, ret); |
2717 |
++ else |
2718 |
+ ret = -ENOMEM; |
2719 |
+ |
2720 |
+ release_firmware(fw); |
2721 |
+@@ -499,14 +501,14 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev, |
2722 |
+ goto out; |
2723 |
+ |
2724 |
+ if (btrtl_dev->cfg_len > 0) { |
2725 |
+- tbuff = kzalloc(ret + btrtl_dev->cfg_len, GFP_KERNEL); |
2726 |
++ tbuff = kvzalloc(ret + btrtl_dev->cfg_len, GFP_KERNEL); |
2727 |
+ if (!tbuff) { |
2728 |
+ ret = -ENOMEM; |
2729 |
+ goto out; |
2730 |
+ } |
2731 |
+ |
2732 |
+ memcpy(tbuff, fw_data, ret); |
2733 |
+- kfree(fw_data); |
2734 |
++ kvfree(fw_data); |
2735 |
+ |
2736 |
+ memcpy(tbuff + ret, btrtl_dev->cfg_data, btrtl_dev->cfg_len); |
2737 |
+ ret += btrtl_dev->cfg_len; |
2738 |
+@@ -519,14 +521,14 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev, |
2739 |
+ ret = rtl_download_firmware(hdev, fw_data, ret); |
2740 |
+ |
2741 |
+ out: |
2742 |
+- kfree(fw_data); |
2743 |
++ kvfree(fw_data); |
2744 |
+ return ret; |
2745 |
+ } |
2746 |
+ |
2747 |
+ void btrtl_free(struct btrtl_device_info *btrtl_dev) |
2748 |
+ { |
2749 |
+- kfree(btrtl_dev->fw_data); |
2750 |
+- kfree(btrtl_dev->cfg_data); |
2751 |
++ kvfree(btrtl_dev->fw_data); |
2752 |
++ kvfree(btrtl_dev->cfg_data); |
2753 |
+ kfree(btrtl_dev); |
2754 |
+ } |
2755 |
+ EXPORT_SYMBOL_GPL(btrtl_free); |
2756 |
+diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c |
2757 |
+index 20c957185af20..2e9252d37a18f 100644 |
2758 |
+--- a/drivers/bus/hisi_lpc.c |
2759 |
++++ b/drivers/bus/hisi_lpc.c |
2760 |
+@@ -358,6 +358,26 @@ static int hisi_lpc_acpi_xlat_io_res(struct acpi_device *adev, |
2761 |
+ return 0; |
2762 |
+ } |
2763 |
+ |
2764 |
++/* |
2765 |
++ * Released firmware describes the IO port max address as 0x3fff, which is |
2766 |
++ * the max host bus address. Fixup to a proper range. This will probably |
2767 |
++ * never be fixed in firmware. |
2768 |
++ */ |
2769 |
++static void hisi_lpc_acpi_fixup_child_resource(struct device *hostdev, |
2770 |
++ struct resource *r) |
2771 |
++{ |
2772 |
++ if (r->end != 0x3fff) |
2773 |
++ return; |
2774 |
++ |
2775 |
++ if (r->start == 0xe4) |
2776 |
++ r->end = 0xe4 + 0x04 - 1; |
2777 |
++ else if (r->start == 0x2f8) |
2778 |
++ r->end = 0x2f8 + 0x08 - 1; |
2779 |
++ else |
2780 |
++ dev_warn(hostdev, "unrecognised resource %pR to fixup, ignoring\n", |
2781 |
++ r); |
2782 |
++} |
2783 |
++ |
2784 |
+ /* |
2785 |
+ * hisi_lpc_acpi_set_io_res - set the resources for a child |
2786 |
+ * @child: the device node to be updated the I/O resource |
2787 |
+@@ -419,8 +439,11 @@ static int hisi_lpc_acpi_set_io_res(struct device *child, |
2788 |
+ return -ENOMEM; |
2789 |
+ } |
2790 |
+ count = 0; |
2791 |
+- list_for_each_entry(rentry, &resource_list, node) |
2792 |
+- resources[count++] = *rentry->res; |
2793 |
++ list_for_each_entry(rentry, &resource_list, node) { |
2794 |
++ resources[count] = *rentry->res; |
2795 |
++ hisi_lpc_acpi_fixup_child_resource(hostdev, &resources[count]); |
2796 |
++ count++; |
2797 |
++ } |
2798 |
+ |
2799 |
+ acpi_dev_free_resource_list(&resource_list); |
2800 |
+ |
2801 |
+diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c |
2802 |
+index 40b9927c072c9..89a8faa9b6cfa 100644 |
2803 |
+--- a/drivers/char/ipmi/bt-bmc.c |
2804 |
++++ b/drivers/char/ipmi/bt-bmc.c |
2805 |
+@@ -399,15 +399,15 @@ static int bt_bmc_config_irq(struct bt_bmc *bt_bmc, |
2806 |
+ struct device *dev = &pdev->dev; |
2807 |
+ int rc; |
2808 |
+ |
2809 |
+- bt_bmc->irq = platform_get_irq(pdev, 0); |
2810 |
+- if (!bt_bmc->irq) |
2811 |
+- return -ENODEV; |
2812 |
++ bt_bmc->irq = platform_get_irq_optional(pdev, 0); |
2813 |
++ if (bt_bmc->irq < 0) |
2814 |
++ return bt_bmc->irq; |
2815 |
+ |
2816 |
+ rc = devm_request_irq(dev, bt_bmc->irq, bt_bmc_irq, IRQF_SHARED, |
2817 |
+ DEVICE_NAME, bt_bmc); |
2818 |
+ if (rc < 0) { |
2819 |
+ dev_warn(dev, "Unable to request IRQ %d\n", bt_bmc->irq); |
2820 |
+- bt_bmc->irq = 0; |
2821 |
++ bt_bmc->irq = rc; |
2822 |
+ return rc; |
2823 |
+ } |
2824 |
+ |
2825 |
+@@ -479,7 +479,7 @@ static int bt_bmc_probe(struct platform_device *pdev) |
2826 |
+ |
2827 |
+ bt_bmc_config_irq(bt_bmc, pdev); |
2828 |
+ |
2829 |
+- if (bt_bmc->irq) { |
2830 |
++ if (bt_bmc->irq >= 0) { |
2831 |
+ dev_info(dev, "Using IRQ %d\n", bt_bmc->irq); |
2832 |
+ } else { |
2833 |
+ dev_info(dev, "No IRQ; using timer\n"); |
2834 |
+@@ -505,7 +505,7 @@ static int bt_bmc_remove(struct platform_device *pdev) |
2835 |
+ struct bt_bmc *bt_bmc = dev_get_drvdata(&pdev->dev); |
2836 |
+ |
2837 |
+ misc_deregister(&bt_bmc->miscdev); |
2838 |
+- if (!bt_bmc->irq) |
2839 |
++ if (bt_bmc->irq < 0) |
2840 |
+ del_timer_sync(&bt_bmc->poll_timer); |
2841 |
+ return 0; |
2842 |
+ } |
2843 |
+diff --git a/drivers/char/random.c b/drivers/char/random.c |
2844 |
+index e877c20e0ee02..75a8f7f572697 100644 |
2845 |
+--- a/drivers/char/random.c |
2846 |
++++ b/drivers/char/random.c |
2847 |
+@@ -1223,14 +1223,14 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) |
2848 |
+ * We take into account the first, second and third-order deltas |
2849 |
+ * in order to make our estimate. |
2850 |
+ */ |
2851 |
+- delta = sample.jiffies - state->last_time; |
2852 |
+- state->last_time = sample.jiffies; |
2853 |
++ delta = sample.jiffies - READ_ONCE(state->last_time); |
2854 |
++ WRITE_ONCE(state->last_time, sample.jiffies); |
2855 |
+ |
2856 |
+- delta2 = delta - state->last_delta; |
2857 |
+- state->last_delta = delta; |
2858 |
++ delta2 = delta - READ_ONCE(state->last_delta); |
2859 |
++ WRITE_ONCE(state->last_delta, delta); |
2860 |
+ |
2861 |
+- delta3 = delta2 - state->last_delta2; |
2862 |
+- state->last_delta2 = delta2; |
2863 |
++ delta3 = delta2 - READ_ONCE(state->last_delta2); |
2864 |
++ WRITE_ONCE(state->last_delta2, delta2); |
2865 |
+ |
2866 |
+ if (delta < 0) |
2867 |
+ delta = -delta; |
2868 |
+diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c |
2869 |
+index 6d81bb3bb503f..896a3550fba9f 100644 |
2870 |
+--- a/drivers/char/tlclk.c |
2871 |
++++ b/drivers/char/tlclk.c |
2872 |
+@@ -777,17 +777,21 @@ static int __init tlclk_init(void) |
2873 |
+ { |
2874 |
+ int ret; |
2875 |
+ |
2876 |
++ telclk_interrupt = (inb(TLCLK_REG7) & 0x0f); |
2877 |
++ |
2878 |
++ alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL); |
2879 |
++ if (!alarm_events) { |
2880 |
++ ret = -ENOMEM; |
2881 |
++ goto out1; |
2882 |
++ } |
2883 |
++ |
2884 |
+ ret = register_chrdev(tlclk_major, "telco_clock", &tlclk_fops); |
2885 |
+ if (ret < 0) { |
2886 |
+ printk(KERN_ERR "tlclk: can't get major %d.\n", tlclk_major); |
2887 |
++ kfree(alarm_events); |
2888 |
+ return ret; |
2889 |
+ } |
2890 |
+ tlclk_major = ret; |
2891 |
+- alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL); |
2892 |
+- if (!alarm_events) { |
2893 |
+- ret = -ENOMEM; |
2894 |
+- goto out1; |
2895 |
+- } |
2896 |
+ |
2897 |
+ /* Read telecom clock IRQ number (Set by BIOS) */ |
2898 |
+ if (!request_region(TLCLK_BASE, 8, "telco_clock")) { |
2899 |
+@@ -796,7 +800,6 @@ static int __init tlclk_init(void) |
2900 |
+ ret = -EBUSY; |
2901 |
+ goto out2; |
2902 |
+ } |
2903 |
+- telclk_interrupt = (inb(TLCLK_REG7) & 0x0f); |
2904 |
+ |
2905 |
+ if (0x0F == telclk_interrupt ) { /* not MCPBL0010 ? */ |
2906 |
+ printk(KERN_ERR "telclk_interrupt = 0x%x non-mcpbl0010 hw.\n", |
2907 |
+@@ -837,8 +840,8 @@ out3: |
2908 |
+ release_region(TLCLK_BASE, 8); |
2909 |
+ out2: |
2910 |
+ kfree(alarm_events); |
2911 |
+-out1: |
2912 |
+ unregister_chrdev(tlclk_major, "telco_clock"); |
2913 |
++out1: |
2914 |
+ return ret; |
2915 |
+ } |
2916 |
+ |
2917 |
+diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c |
2918 |
+index e59f1f91d7f3e..a9dcf31eadd21 100644 |
2919 |
+--- a/drivers/char/tpm/tpm_crb.c |
2920 |
++++ b/drivers/char/tpm/tpm_crb.c |
2921 |
+@@ -22,6 +22,7 @@ |
2922 |
+ #include "tpm.h" |
2923 |
+ |
2924 |
+ #define ACPI_SIG_TPM2 "TPM2" |
2925 |
++#define TPM_CRB_MAX_RESOURCES 3 |
2926 |
+ |
2927 |
+ static const guid_t crb_acpi_start_guid = |
2928 |
+ GUID_INIT(0x6BBF6CAB, 0x5463, 0x4714, |
2929 |
+@@ -91,7 +92,6 @@ enum crb_status { |
2930 |
+ struct crb_priv { |
2931 |
+ u32 sm; |
2932 |
+ const char *hid; |
2933 |
+- void __iomem *iobase; |
2934 |
+ struct crb_regs_head __iomem *regs_h; |
2935 |
+ struct crb_regs_tail __iomem *regs_t; |
2936 |
+ u8 __iomem *cmd; |
2937 |
+@@ -434,21 +434,27 @@ static const struct tpm_class_ops tpm_crb = { |
2938 |
+ |
2939 |
+ static int crb_check_resource(struct acpi_resource *ares, void *data) |
2940 |
+ { |
2941 |
+- struct resource *io_res = data; |
2942 |
++ struct resource *iores_array = data; |
2943 |
+ struct resource_win win; |
2944 |
+ struct resource *res = &(win.res); |
2945 |
++ int i; |
2946 |
+ |
2947 |
+ if (acpi_dev_resource_memory(ares, res) || |
2948 |
+ acpi_dev_resource_address_space(ares, &win)) { |
2949 |
+- *io_res = *res; |
2950 |
+- io_res->name = NULL; |
2951 |
++ for (i = 0; i < TPM_CRB_MAX_RESOURCES + 1; ++i) { |
2952 |
++ if (resource_type(iores_array + i) != IORESOURCE_MEM) { |
2953 |
++ iores_array[i] = *res; |
2954 |
++ iores_array[i].name = NULL; |
2955 |
++ break; |
2956 |
++ } |
2957 |
++ } |
2958 |
+ } |
2959 |
+ |
2960 |
+ return 1; |
2961 |
+ } |
2962 |
+ |
2963 |
+-static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv, |
2964 |
+- struct resource *io_res, u64 start, u32 size) |
2965 |
++static void __iomem *crb_map_res(struct device *dev, struct resource *iores, |
2966 |
++ void __iomem **iobase_ptr, u64 start, u32 size) |
2967 |
+ { |
2968 |
+ struct resource new_res = { |
2969 |
+ .start = start, |
2970 |
+@@ -460,10 +466,16 @@ static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv, |
2971 |
+ if (start != new_res.start) |
2972 |
+ return (void __iomem *) ERR_PTR(-EINVAL); |
2973 |
+ |
2974 |
+- if (!resource_contains(io_res, &new_res)) |
2975 |
++ if (!iores) |
2976 |
+ return devm_ioremap_resource(dev, &new_res); |
2977 |
+ |
2978 |
+- return priv->iobase + (new_res.start - io_res->start); |
2979 |
++ if (!*iobase_ptr) { |
2980 |
++ *iobase_ptr = devm_ioremap_resource(dev, iores); |
2981 |
++ if (IS_ERR(*iobase_ptr)) |
2982 |
++ return *iobase_ptr; |
2983 |
++ } |
2984 |
++ |
2985 |
++ return *iobase_ptr + (new_res.start - iores->start); |
2986 |
+ } |
2987 |
+ |
2988 |
+ /* |
2989 |
+@@ -490,9 +502,13 @@ static u64 crb_fixup_cmd_size(struct device *dev, struct resource *io_res, |
2990 |
+ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, |
2991 |
+ struct acpi_table_tpm2 *buf) |
2992 |
+ { |
2993 |
+- struct list_head resources; |
2994 |
+- struct resource io_res; |
2995 |
++ struct list_head acpi_resource_list; |
2996 |
++ struct resource iores_array[TPM_CRB_MAX_RESOURCES + 1] = { {0} }; |
2997 |
++ void __iomem *iobase_array[TPM_CRB_MAX_RESOURCES] = {NULL}; |
2998 |
+ struct device *dev = &device->dev; |
2999 |
++ struct resource *iores; |
3000 |
++ void __iomem **iobase_ptr; |
3001 |
++ int i; |
3002 |
+ u32 pa_high, pa_low; |
3003 |
+ u64 cmd_pa; |
3004 |
+ u32 cmd_size; |
3005 |
+@@ -501,21 +517,41 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, |
3006 |
+ u32 rsp_size; |
3007 |
+ int ret; |
3008 |
+ |
3009 |
+- INIT_LIST_HEAD(&resources); |
3010 |
+- ret = acpi_dev_get_resources(device, &resources, crb_check_resource, |
3011 |
+- &io_res); |
3012 |
++ INIT_LIST_HEAD(&acpi_resource_list); |
3013 |
++ ret = acpi_dev_get_resources(device, &acpi_resource_list, |
3014 |
++ crb_check_resource, iores_array); |
3015 |
+ if (ret < 0) |
3016 |
+ return ret; |
3017 |
+- acpi_dev_free_resource_list(&resources); |
3018 |
++ acpi_dev_free_resource_list(&acpi_resource_list); |
3019 |
+ |
3020 |
+- if (resource_type(&io_res) != IORESOURCE_MEM) { |
3021 |
++ if (resource_type(iores_array) != IORESOURCE_MEM) { |
3022 |
+ dev_err(dev, FW_BUG "TPM2 ACPI table does not define a memory resource\n"); |
3023 |
+ return -EINVAL; |
3024 |
++ } else if (resource_type(iores_array + TPM_CRB_MAX_RESOURCES) == |
3025 |
++ IORESOURCE_MEM) { |
3026 |
++ dev_warn(dev, "TPM2 ACPI table defines too many memory resources\n"); |
3027 |
++ memset(iores_array + TPM_CRB_MAX_RESOURCES, |
3028 |
++ 0, sizeof(*iores_array)); |
3029 |
++ iores_array[TPM_CRB_MAX_RESOURCES].flags = 0; |
3030 |
+ } |
3031 |
+ |
3032 |
+- priv->iobase = devm_ioremap_resource(dev, &io_res); |
3033 |
+- if (IS_ERR(priv->iobase)) |
3034 |
+- return PTR_ERR(priv->iobase); |
3035 |
++ iores = NULL; |
3036 |
++ iobase_ptr = NULL; |
3037 |
++ for (i = 0; resource_type(iores_array + i) == IORESOURCE_MEM; ++i) { |
3038 |
++ if (buf->control_address >= iores_array[i].start && |
3039 |
++ buf->control_address + sizeof(struct crb_regs_tail) - 1 <= |
3040 |
++ iores_array[i].end) { |
3041 |
++ iores = iores_array + i; |
3042 |
++ iobase_ptr = iobase_array + i; |
3043 |
++ break; |
3044 |
++ } |
3045 |
++ } |
3046 |
++ |
3047 |
++ priv->regs_t = crb_map_res(dev, iores, iobase_ptr, buf->control_address, |
3048 |
++ sizeof(struct crb_regs_tail)); |
3049 |
++ |
3050 |
++ if (IS_ERR(priv->regs_t)) |
3051 |
++ return PTR_ERR(priv->regs_t); |
3052 |
+ |
3053 |
+ /* The ACPI IO region starts at the head area and continues to include |
3054 |
+ * the control area, as one nice sane region except for some older |
3055 |
+@@ -523,9 +559,10 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, |
3056 |
+ */ |
3057 |
+ if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) || |
3058 |
+ (priv->sm == ACPI_TPM2_MEMORY_MAPPED)) { |
3059 |
+- if (buf->control_address == io_res.start + |
3060 |
++ if (iores && |
3061 |
++ buf->control_address == iores->start + |
3062 |
+ sizeof(*priv->regs_h)) |
3063 |
+- priv->regs_h = priv->iobase; |
3064 |
++ priv->regs_h = *iobase_ptr; |
3065 |
+ else |
3066 |
+ dev_warn(dev, FW_BUG "Bad ACPI memory layout"); |
3067 |
+ } |
3068 |
+@@ -534,13 +571,6 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, |
3069 |
+ if (ret) |
3070 |
+ return ret; |
3071 |
+ |
3072 |
+- priv->regs_t = crb_map_res(dev, priv, &io_res, buf->control_address, |
3073 |
+- sizeof(struct crb_regs_tail)); |
3074 |
+- if (IS_ERR(priv->regs_t)) { |
3075 |
+- ret = PTR_ERR(priv->regs_t); |
3076 |
+- goto out_relinquish_locality; |
3077 |
+- } |
3078 |
+- |
3079 |
+ /* |
3080 |
+ * PTT HW bug w/a: wake up the device to access |
3081 |
+ * possibly not retained registers. |
3082 |
+@@ -552,13 +582,26 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, |
3083 |
+ pa_high = ioread32(&priv->regs_t->ctrl_cmd_pa_high); |
3084 |
+ pa_low = ioread32(&priv->regs_t->ctrl_cmd_pa_low); |
3085 |
+ cmd_pa = ((u64)pa_high << 32) | pa_low; |
3086 |
+- cmd_size = crb_fixup_cmd_size(dev, &io_res, cmd_pa, |
3087 |
+- ioread32(&priv->regs_t->ctrl_cmd_size)); |
3088 |
++ cmd_size = ioread32(&priv->regs_t->ctrl_cmd_size); |
3089 |
++ |
3090 |
++ iores = NULL; |
3091 |
++ iobase_ptr = NULL; |
3092 |
++ for (i = 0; iores_array[i].end; ++i) { |
3093 |
++ if (cmd_pa >= iores_array[i].start && |
3094 |
++ cmd_pa <= iores_array[i].end) { |
3095 |
++ iores = iores_array + i; |
3096 |
++ iobase_ptr = iobase_array + i; |
3097 |
++ break; |
3098 |
++ } |
3099 |
++ } |
3100 |
++ |
3101 |
++ if (iores) |
3102 |
++ cmd_size = crb_fixup_cmd_size(dev, iores, cmd_pa, cmd_size); |
3103 |
+ |
3104 |
+ dev_dbg(dev, "cmd_hi = %X cmd_low = %X cmd_size %X\n", |
3105 |
+ pa_high, pa_low, cmd_size); |
3106 |
+ |
3107 |
+- priv->cmd = crb_map_res(dev, priv, &io_res, cmd_pa, cmd_size); |
3108 |
++ priv->cmd = crb_map_res(dev, iores, iobase_ptr, cmd_pa, cmd_size); |
3109 |
+ if (IS_ERR(priv->cmd)) { |
3110 |
+ ret = PTR_ERR(priv->cmd); |
3111 |
+ goto out; |
3112 |
+@@ -566,11 +609,25 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, |
3113 |
+ |
3114 |
+ memcpy_fromio(&__rsp_pa, &priv->regs_t->ctrl_rsp_pa, 8); |
3115 |
+ rsp_pa = le64_to_cpu(__rsp_pa); |
3116 |
+- rsp_size = crb_fixup_cmd_size(dev, &io_res, rsp_pa, |
3117 |
+- ioread32(&priv->regs_t->ctrl_rsp_size)); |
3118 |
++ rsp_size = ioread32(&priv->regs_t->ctrl_rsp_size); |
3119 |
++ |
3120 |
++ iores = NULL; |
3121 |
++ iobase_ptr = NULL; |
3122 |
++ for (i = 0; resource_type(iores_array + i) == IORESOURCE_MEM; ++i) { |
3123 |
++ if (rsp_pa >= iores_array[i].start && |
3124 |
++ rsp_pa <= iores_array[i].end) { |
3125 |
++ iores = iores_array + i; |
3126 |
++ iobase_ptr = iobase_array + i; |
3127 |
++ break; |
3128 |
++ } |
3129 |
++ } |
3130 |
++ |
3131 |
++ if (iores) |
3132 |
++ rsp_size = crb_fixup_cmd_size(dev, iores, rsp_pa, rsp_size); |
3133 |
+ |
3134 |
+ if (cmd_pa != rsp_pa) { |
3135 |
+- priv->rsp = crb_map_res(dev, priv, &io_res, rsp_pa, rsp_size); |
3136 |
++ priv->rsp = crb_map_res(dev, iores, iobase_ptr, |
3137 |
++ rsp_pa, rsp_size); |
3138 |
+ ret = PTR_ERR_OR_ZERO(priv->rsp); |
3139 |
+ goto out; |
3140 |
+ } |
3141 |
+diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c |
3142 |
+index e82013d587b46..64428dbed9928 100644 |
3143 |
+--- a/drivers/char/tpm/tpm_ibmvtpm.c |
3144 |
++++ b/drivers/char/tpm/tpm_ibmvtpm.c |
3145 |
+@@ -581,6 +581,7 @@ static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance) |
3146 |
+ */ |
3147 |
+ while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) { |
3148 |
+ ibmvtpm_crq_process(crq, ibmvtpm); |
3149 |
++ wake_up_interruptible(&ibmvtpm->crq_queue.wq); |
3150 |
+ crq->valid = 0; |
3151 |
+ smp_wmb(); |
3152 |
+ } |
3153 |
+@@ -628,6 +629,7 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev, |
3154 |
+ } |
3155 |
+ |
3156 |
+ crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr); |
3157 |
++ init_waitqueue_head(&crq_q->wq); |
3158 |
+ ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr, |
3159 |
+ CRQ_RES_BUF_SIZE, |
3160 |
+ DMA_BIDIRECTIONAL); |
3161 |
+@@ -680,6 +682,13 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev, |
3162 |
+ if (rc) |
3163 |
+ goto init_irq_cleanup; |
3164 |
+ |
3165 |
++ if (!wait_event_timeout(ibmvtpm->crq_queue.wq, |
3166 |
++ ibmvtpm->rtce_buf != NULL, |
3167 |
++ HZ)) { |
3168 |
++ dev_err(dev, "CRQ response timed out\n"); |
3169 |
++ goto init_irq_cleanup; |
3170 |
++ } |
3171 |
++ |
3172 |
+ return tpm_chip_register(chip); |
3173 |
+ init_irq_cleanup: |
3174 |
+ do { |
3175 |
+diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h |
3176 |
+index 7983f1a33267e..b92aa7d3e93e7 100644 |
3177 |
+--- a/drivers/char/tpm/tpm_ibmvtpm.h |
3178 |
++++ b/drivers/char/tpm/tpm_ibmvtpm.h |
3179 |
+@@ -26,6 +26,7 @@ struct ibmvtpm_crq_queue { |
3180 |
+ struct ibmvtpm_crq *crq_addr; |
3181 |
+ u32 index; |
3182 |
+ u32 num_entry; |
3183 |
++ wait_queue_head_t wq; |
3184 |
+ }; |
3185 |
+ |
3186 |
+ struct ibmvtpm_dev { |
3187 |
+diff --git a/drivers/clk/imx/clk-pfdv2.c b/drivers/clk/imx/clk-pfdv2.c |
3188 |
+index a03bbed662c6b..2a46b9b61b466 100644 |
3189 |
+--- a/drivers/clk/imx/clk-pfdv2.c |
3190 |
++++ b/drivers/clk/imx/clk-pfdv2.c |
3191 |
+@@ -139,6 +139,12 @@ static int clk_pfdv2_set_rate(struct clk_hw *hw, unsigned long rate, |
3192 |
+ u32 val; |
3193 |
+ u8 frac; |
3194 |
+ |
3195 |
++ if (!rate) |
3196 |
++ return -EINVAL; |
3197 |
++ |
3198 |
++ /* PFD can NOT change rate without gating */ |
3199 |
++ WARN_ON(clk_pfdv2_is_enabled(hw)); |
3200 |
++ |
3201 |
+ tmp = tmp * 18 + rate / 2; |
3202 |
+ do_div(tmp, rate); |
3203 |
+ frac = tmp; |
3204 |
+diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c |
3205 |
+index 4705eb544f01b..8d7b1d0c46643 100644 |
3206 |
+--- a/drivers/clk/socfpga/clk-pll-s10.c |
3207 |
++++ b/drivers/clk/socfpga/clk-pll-s10.c |
3208 |
+@@ -39,7 +39,9 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk, |
3209 |
+ /* read VCO1 reg for numerator and denominator */ |
3210 |
+ reg = readl(socfpgaclk->hw.reg); |
3211 |
+ refdiv = (reg & SOCFPGA_PLL_REFDIV_MASK) >> SOCFPGA_PLL_REFDIV_SHIFT; |
3212 |
+- vco_freq = (unsigned long long)parent_rate / refdiv; |
3213 |
++ |
3214 |
++ vco_freq = parent_rate; |
3215 |
++ do_div(vco_freq, refdiv); |
3216 |
+ |
3217 |
+ /* Read mdiv and fdiv from the fdbck register */ |
3218 |
+ reg = readl(socfpgaclk->hw.reg + 0x4); |
3219 |
+diff --git a/drivers/clk/ti/adpll.c b/drivers/clk/ti/adpll.c |
3220 |
+index fdfb90058504c..bb2f2836dab22 100644 |
3221 |
+--- a/drivers/clk/ti/adpll.c |
3222 |
++++ b/drivers/clk/ti/adpll.c |
3223 |
+@@ -194,15 +194,8 @@ static const char *ti_adpll_clk_get_name(struct ti_adpll_data *d, |
3224 |
+ if (err) |
3225 |
+ return NULL; |
3226 |
+ } else { |
3227 |
+- const char *base_name = "adpll"; |
3228 |
+- char *buf; |
3229 |
+- |
3230 |
+- buf = devm_kzalloc(d->dev, 8 + 1 + strlen(base_name) + 1 + |
3231 |
+- strlen(postfix), GFP_KERNEL); |
3232 |
+- if (!buf) |
3233 |
+- return NULL; |
3234 |
+- sprintf(buf, "%08lx.%s.%s", d->pa, base_name, postfix); |
3235 |
+- name = buf; |
3236 |
++ name = devm_kasprintf(d->dev, GFP_KERNEL, "%08lx.adpll.%s", |
3237 |
++ d->pa, postfix); |
3238 |
+ } |
3239 |
+ |
3240 |
+ return name; |
3241 |
+diff --git a/drivers/clocksource/h8300_timer8.c b/drivers/clocksource/h8300_timer8.c |
3242 |
+index 1d740a8c42ab3..47114c2a7cb54 100644 |
3243 |
+--- a/drivers/clocksource/h8300_timer8.c |
3244 |
++++ b/drivers/clocksource/h8300_timer8.c |
3245 |
+@@ -169,7 +169,7 @@ static int __init h8300_8timer_init(struct device_node *node) |
3246 |
+ return PTR_ERR(clk); |
3247 |
+ } |
3248 |
+ |
3249 |
+- ret = ENXIO; |
3250 |
++ ret = -ENXIO; |
3251 |
+ base = of_iomap(node, 0); |
3252 |
+ if (!base) { |
3253 |
+ pr_err("failed to map registers for clockevent\n"); |
3254 |
+diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c |
3255 |
+index 1806b1da43665..3a2f022f6bde2 100644 |
3256 |
+--- a/drivers/cpufreq/powernv-cpufreq.c |
3257 |
++++ b/drivers/cpufreq/powernv-cpufreq.c |
3258 |
+@@ -902,6 +902,7 @@ static struct notifier_block powernv_cpufreq_reboot_nb = { |
3259 |
+ void powernv_cpufreq_work_fn(struct work_struct *work) |
3260 |
+ { |
3261 |
+ struct chip *chip = container_of(work, struct chip, throttle); |
3262 |
++ struct cpufreq_policy *policy; |
3263 |
+ unsigned int cpu; |
3264 |
+ cpumask_t mask; |
3265 |
+ |
3266 |
+@@ -916,12 +917,14 @@ void powernv_cpufreq_work_fn(struct work_struct *work) |
3267 |
+ chip->restore = false; |
3268 |
+ for_each_cpu(cpu, &mask) { |
3269 |
+ int index; |
3270 |
+- struct cpufreq_policy policy; |
3271 |
+ |
3272 |
+- cpufreq_get_policy(&policy, cpu); |
3273 |
+- index = cpufreq_table_find_index_c(&policy, policy.cur); |
3274 |
+- powernv_cpufreq_target_index(&policy, index); |
3275 |
+- cpumask_andnot(&mask, &mask, policy.cpus); |
3276 |
++ policy = cpufreq_cpu_get(cpu); |
3277 |
++ if (!policy) |
3278 |
++ continue; |
3279 |
++ index = cpufreq_table_find_index_c(policy, policy->cur); |
3280 |
++ powernv_cpufreq_target_index(policy, index); |
3281 |
++ cpumask_andnot(&mask, &mask, policy->cpus); |
3282 |
++ cpufreq_cpu_put(policy); |
3283 |
+ } |
3284 |
+ out: |
3285 |
+ put_online_cpus(); |
3286 |
+diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c |
3287 |
+index fe2eadc0ce83d..2d30ed5a2674b 100644 |
3288 |
+--- a/drivers/crypto/chelsio/chcr_algo.c |
3289 |
++++ b/drivers/crypto/chelsio/chcr_algo.c |
3290 |
+@@ -2480,8 +2480,9 @@ int chcr_aead_dma_map(struct device *dev, |
3291 |
+ else |
3292 |
+ reqctx->b0_dma = 0; |
3293 |
+ if (req->src == req->dst) { |
3294 |
+- error = dma_map_sg(dev, req->src, sg_nents(req->src), |
3295 |
+- DMA_BIDIRECTIONAL); |
3296 |
++ error = dma_map_sg(dev, req->src, |
3297 |
++ sg_nents_for_len(req->src, dst_size), |
3298 |
++ DMA_BIDIRECTIONAL); |
3299 |
+ if (!error) |
3300 |
+ goto err; |
3301 |
+ } else { |
3302 |
+diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c |
3303 |
+index ce1f1d5d7cd5a..c403d6b64e087 100644 |
3304 |
+--- a/drivers/crypto/chelsio/chtls/chtls_io.c |
3305 |
++++ b/drivers/crypto/chelsio/chtls/chtls_io.c |
3306 |
+@@ -1437,7 +1437,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, |
3307 |
+ csk->wr_max_credits)) |
3308 |
+ sk->sk_write_space(sk); |
3309 |
+ |
3310 |
+- if (copied >= target && !sk->sk_backlog.tail) |
3311 |
++ if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) |
3312 |
+ break; |
3313 |
+ |
3314 |
+ if (copied) { |
3315 |
+@@ -1470,7 +1470,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, |
3316 |
+ break; |
3317 |
+ } |
3318 |
+ } |
3319 |
+- if (sk->sk_backlog.tail) { |
3320 |
++ if (READ_ONCE(sk->sk_backlog.tail)) { |
3321 |
+ release_sock(sk); |
3322 |
+ lock_sock(sk); |
3323 |
+ chtls_cleanup_rbuf(sk, copied); |
3324 |
+@@ -1615,7 +1615,7 @@ static int peekmsg(struct sock *sk, struct msghdr *msg, |
3325 |
+ break; |
3326 |
+ } |
3327 |
+ |
3328 |
+- if (sk->sk_backlog.tail) { |
3329 |
++ if (READ_ONCE(sk->sk_backlog.tail)) { |
3330 |
+ /* Do not sleep, just process backlog. */ |
3331 |
+ release_sock(sk); |
3332 |
+ lock_sock(sk); |
3333 |
+@@ -1743,7 +1743,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, |
3334 |
+ csk->wr_max_credits)) |
3335 |
+ sk->sk_write_space(sk); |
3336 |
+ |
3337 |
+- if (copied >= target && !sk->sk_backlog.tail) |
3338 |
++ if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) |
3339 |
+ break; |
3340 |
+ |
3341 |
+ if (copied) { |
3342 |
+@@ -1774,7 +1774,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, |
3343 |
+ } |
3344 |
+ } |
3345 |
+ |
3346 |
+- if (sk->sk_backlog.tail) { |
3347 |
++ if (READ_ONCE(sk->sk_backlog.tail)) { |
3348 |
+ release_sock(sk); |
3349 |
+ lock_sock(sk); |
3350 |
+ chtls_cleanup_rbuf(sk, copied); |
3351 |
+diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c |
3352 |
+index 8fafbeab510a8..eccdda1f7b71b 100644 |
3353 |
+--- a/drivers/dax/bus.c |
3354 |
++++ b/drivers/dax/bus.c |
3355 |
+@@ -227,7 +227,7 @@ static void dax_region_unregister(void *region) |
3356 |
+ |
3357 |
+ struct dax_region *alloc_dax_region(struct device *parent, int region_id, |
3358 |
+ struct resource *res, int target_node, unsigned int align, |
3359 |
+- unsigned long pfn_flags) |
3360 |
++ unsigned long long pfn_flags) |
3361 |
+ { |
3362 |
+ struct dax_region *dax_region; |
3363 |
+ |
3364 |
+diff --git a/drivers/dax/bus.h b/drivers/dax/bus.h |
3365 |
+index 8619e32999436..9e4eba67e8b98 100644 |
3366 |
+--- a/drivers/dax/bus.h |
3367 |
++++ b/drivers/dax/bus.h |
3368 |
+@@ -11,7 +11,7 @@ struct dax_region; |
3369 |
+ void dax_region_put(struct dax_region *dax_region); |
3370 |
+ struct dax_region *alloc_dax_region(struct device *parent, int region_id, |
3371 |
+ struct resource *res, int target_node, unsigned int align, |
3372 |
+- unsigned long flags); |
3373 |
++ unsigned long long flags); |
3374 |
+ |
3375 |
+ enum dev_dax_subsys { |
3376 |
+ DEV_DAX_BUS, |
3377 |
+diff --git a/drivers/dax/dax-private.h b/drivers/dax/dax-private.h |
3378 |
+index 6ccca3b890d6f..3107ce80e8090 100644 |
3379 |
+--- a/drivers/dax/dax-private.h |
3380 |
++++ b/drivers/dax/dax-private.h |
3381 |
+@@ -32,7 +32,7 @@ struct dax_region { |
3382 |
+ struct device *dev; |
3383 |
+ unsigned int align; |
3384 |
+ struct resource res; |
3385 |
+- unsigned long pfn_flags; |
3386 |
++ unsigned long long pfn_flags; |
3387 |
+ }; |
3388 |
+ |
3389 |
+ /** |
3390 |
+diff --git a/drivers/devfreq/tegra30-devfreq.c b/drivers/devfreq/tegra30-devfreq.c |
3391 |
+index a6ba75f4106d8..e273011c83fbd 100644 |
3392 |
+--- a/drivers/devfreq/tegra30-devfreq.c |
3393 |
++++ b/drivers/devfreq/tegra30-devfreq.c |
3394 |
+@@ -68,6 +68,8 @@ |
3395 |
+ |
3396 |
+ #define KHZ 1000 |
3397 |
+ |
3398 |
++#define KHZ_MAX (ULONG_MAX / KHZ) |
3399 |
++ |
3400 |
+ /* Assume that the bus is saturated if the utilization is 25% */ |
3401 |
+ #define BUS_SATURATION_RATIO 25 |
3402 |
+ |
3403 |
+@@ -169,7 +171,7 @@ struct tegra_actmon_emc_ratio { |
3404 |
+ }; |
3405 |
+ |
3406 |
+ static struct tegra_actmon_emc_ratio actmon_emc_ratios[] = { |
3407 |
+- { 1400000, ULONG_MAX }, |
3408 |
++ { 1400000, KHZ_MAX }, |
3409 |
+ { 1200000, 750000 }, |
3410 |
+ { 1100000, 600000 }, |
3411 |
+ { 1000000, 500000 }, |
3412 |
+diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c |
3413 |
+index eba7e3fe769cf..f6df6ef1b0fbe 100644 |
3414 |
+--- a/drivers/dma-buf/dma-buf.c |
3415 |
++++ b/drivers/dma-buf/dma-buf.c |
3416 |
+@@ -59,6 +59,8 @@ static void dma_buf_release(struct dentry *dentry) |
3417 |
+ struct dma_buf *dmabuf; |
3418 |
+ |
3419 |
+ dmabuf = dentry->d_fsdata; |
3420 |
++ if (unlikely(!dmabuf)) |
3421 |
++ return; |
3422 |
+ |
3423 |
+ BUG_ON(dmabuf->vmapping_counter); |
3424 |
+ |
3425 |
+diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c |
3426 |
+index 2c136aee3e794..052a41e2451c1 100644 |
3427 |
+--- a/drivers/dma-buf/dma-fence.c |
3428 |
++++ b/drivers/dma-buf/dma-fence.c |
3429 |
+@@ -273,6 +273,30 @@ void dma_fence_free(struct dma_fence *fence) |
3430 |
+ } |
3431 |
+ EXPORT_SYMBOL(dma_fence_free); |
3432 |
+ |
3433 |
++static bool __dma_fence_enable_signaling(struct dma_fence *fence) |
3434 |
++{ |
3435 |
++ bool was_set; |
3436 |
++ |
3437 |
++ lockdep_assert_held(fence->lock); |
3438 |
++ |
3439 |
++ was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, |
3440 |
++ &fence->flags); |
3441 |
++ |
3442 |
++ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) |
3443 |
++ return false; |
3444 |
++ |
3445 |
++ if (!was_set && fence->ops->enable_signaling) { |
3446 |
++ trace_dma_fence_enable_signal(fence); |
3447 |
++ |
3448 |
++ if (!fence->ops->enable_signaling(fence)) { |
3449 |
++ dma_fence_signal_locked(fence); |
3450 |
++ return false; |
3451 |
++ } |
3452 |
++ } |
3453 |
++ |
3454 |
++ return true; |
3455 |
++} |
3456 |
++ |
3457 |
+ /** |
3458 |
+ * dma_fence_enable_sw_signaling - enable signaling on fence |
3459 |
+ * @fence: the fence to enable |
3460 |
+@@ -285,19 +309,12 @@ void dma_fence_enable_sw_signaling(struct dma_fence *fence) |
3461 |
+ { |
3462 |
+ unsigned long flags; |
3463 |
+ |
3464 |
+- if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, |
3465 |
+- &fence->flags) && |
3466 |
+- !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && |
3467 |
+- fence->ops->enable_signaling) { |
3468 |
+- trace_dma_fence_enable_signal(fence); |
3469 |
+- |
3470 |
+- spin_lock_irqsave(fence->lock, flags); |
3471 |
+- |
3472 |
+- if (!fence->ops->enable_signaling(fence)) |
3473 |
+- dma_fence_signal_locked(fence); |
3474 |
++ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) |
3475 |
++ return; |
3476 |
+ |
3477 |
+- spin_unlock_irqrestore(fence->lock, flags); |
3478 |
+- } |
3479 |
++ spin_lock_irqsave(fence->lock, flags); |
3480 |
++ __dma_fence_enable_signaling(fence); |
3481 |
++ spin_unlock_irqrestore(fence->lock, flags); |
3482 |
+ } |
3483 |
+ EXPORT_SYMBOL(dma_fence_enable_sw_signaling); |
3484 |
+ |
3485 |
+@@ -331,7 +348,6 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb, |
3486 |
+ { |
3487 |
+ unsigned long flags; |
3488 |
+ int ret = 0; |
3489 |
+- bool was_set; |
3490 |
+ |
3491 |
+ if (WARN_ON(!fence || !func)) |
3492 |
+ return -EINVAL; |
3493 |
+@@ -343,25 +359,14 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb, |
3494 |
+ |
3495 |
+ spin_lock_irqsave(fence->lock, flags); |
3496 |
+ |
3497 |
+- was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, |
3498 |
+- &fence->flags); |
3499 |
+- |
3500 |
+- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) |
3501 |
+- ret = -ENOENT; |
3502 |
+- else if (!was_set && fence->ops->enable_signaling) { |
3503 |
+- trace_dma_fence_enable_signal(fence); |
3504 |
+- |
3505 |
+- if (!fence->ops->enable_signaling(fence)) { |
3506 |
+- dma_fence_signal_locked(fence); |
3507 |
+- ret = -ENOENT; |
3508 |
+- } |
3509 |
+- } |
3510 |
+- |
3511 |
+- if (!ret) { |
3512 |
++ if (__dma_fence_enable_signaling(fence)) { |
3513 |
+ cb->func = func; |
3514 |
+ list_add_tail(&cb->node, &fence->cb_list); |
3515 |
+- } else |
3516 |
++ } else { |
3517 |
+ INIT_LIST_HEAD(&cb->node); |
3518 |
++ ret = -ENOENT; |
3519 |
++ } |
3520 |
++ |
3521 |
+ spin_unlock_irqrestore(fence->lock, flags); |
3522 |
+ |
3523 |
+ return ret; |
3524 |
+@@ -461,7 +466,6 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout) |
3525 |
+ struct default_wait_cb cb; |
3526 |
+ unsigned long flags; |
3527 |
+ signed long ret = timeout ? timeout : 1; |
3528 |
+- bool was_set; |
3529 |
+ |
3530 |
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) |
3531 |
+ return ret; |
3532 |
+@@ -473,21 +477,9 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout) |
3533 |
+ goto out; |
3534 |
+ } |
3535 |
+ |
3536 |
+- was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, |
3537 |
+- &fence->flags); |
3538 |
+- |
3539 |
+- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) |
3540 |
++ if (!__dma_fence_enable_signaling(fence)) |
3541 |
+ goto out; |
3542 |
+ |
3543 |
+- if (!was_set && fence->ops->enable_signaling) { |
3544 |
+- trace_dma_fence_enable_signal(fence); |
3545 |
+- |
3546 |
+- if (!fence->ops->enable_signaling(fence)) { |
3547 |
+- dma_fence_signal_locked(fence); |
3548 |
+- goto out; |
3549 |
+- } |
3550 |
+- } |
3551 |
+- |
3552 |
+ if (!timeout) { |
3553 |
+ ret = 0; |
3554 |
+ goto out; |
3555 |
+diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c |
3556 |
+index 1a2028e1c29e9..4c58da7421432 100644 |
3557 |
+--- a/drivers/dma/mediatek/mtk-hsdma.c |
3558 |
++++ b/drivers/dma/mediatek/mtk-hsdma.c |
3559 |
+@@ -997,7 +997,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev) |
3560 |
+ if (err) { |
3561 |
+ dev_err(&pdev->dev, |
3562 |
+ "request_irq failed with err %d\n", err); |
3563 |
+- goto err_unregister; |
3564 |
++ goto err_free; |
3565 |
+ } |
3566 |
+ |
3567 |
+ platform_set_drvdata(pdev, hsdma); |
3568 |
+@@ -1006,6 +1006,8 @@ static int mtk_hsdma_probe(struct platform_device *pdev) |
3569 |
+ |
3570 |
+ return 0; |
3571 |
+ |
3572 |
++err_free: |
3573 |
++ of_dma_controller_free(pdev->dev.of_node); |
3574 |
+ err_unregister: |
3575 |
+ dma_async_device_unregister(dd); |
3576 |
+ |
3577 |
+diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c |
3578 |
+index 5989b08935211..6c5771de32c67 100644 |
3579 |
+--- a/drivers/dma/stm32-dma.c |
3580 |
++++ b/drivers/dma/stm32-dma.c |
3581 |
+@@ -488,8 +488,10 @@ static int stm32_dma_terminate_all(struct dma_chan *c) |
3582 |
+ |
3583 |
+ spin_lock_irqsave(&chan->vchan.lock, flags); |
3584 |
+ |
3585 |
+- if (chan->busy) { |
3586 |
+- stm32_dma_stop(chan); |
3587 |
++ if (chan->desc) { |
3588 |
++ vchan_terminate_vdesc(&chan->desc->vdesc); |
3589 |
++ if (chan->busy) |
3590 |
++ stm32_dma_stop(chan); |
3591 |
+ chan->desc = NULL; |
3592 |
+ } |
3593 |
+ |
3594 |
+@@ -545,6 +547,8 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan) |
3595 |
+ if (!vdesc) |
3596 |
+ return; |
3597 |
+ |
3598 |
++ list_del(&vdesc->node); |
3599 |
++ |
3600 |
+ chan->desc = to_stm32_dma_desc(vdesc); |
3601 |
+ chan->next_sg = 0; |
3602 |
+ } |
3603 |
+@@ -622,7 +626,6 @@ static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan) |
3604 |
+ } else { |
3605 |
+ chan->busy = false; |
3606 |
+ if (chan->next_sg == chan->desc->num_sgs) { |
3607 |
+- list_del(&chan->desc->vdesc.node); |
3608 |
+ vchan_cookie_complete(&chan->desc->vdesc); |
3609 |
+ chan->desc = NULL; |
3610 |
+ } |
3611 |
+diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c |
3612 |
+index 5838311cf9900..ee1cbf3be75d5 100644 |
3613 |
+--- a/drivers/dma/stm32-mdma.c |
3614 |
++++ b/drivers/dma/stm32-mdma.c |
3615 |
+@@ -1127,6 +1127,8 @@ static void stm32_mdma_start_transfer(struct stm32_mdma_chan *chan) |
3616 |
+ return; |
3617 |
+ } |
3618 |
+ |
3619 |
++ list_del(&vdesc->node); |
3620 |
++ |
3621 |
+ chan->desc = to_stm32_mdma_desc(vdesc); |
3622 |
+ hwdesc = chan->desc->node[0].hwdesc; |
3623 |
+ chan->curr_hwdesc = 0; |
3624 |
+@@ -1242,8 +1244,10 @@ static int stm32_mdma_terminate_all(struct dma_chan *c) |
3625 |
+ LIST_HEAD(head); |
3626 |
+ |
3627 |
+ spin_lock_irqsave(&chan->vchan.lock, flags); |
3628 |
+- if (chan->busy) { |
3629 |
+- stm32_mdma_stop(chan); |
3630 |
++ if (chan->desc) { |
3631 |
++ vchan_terminate_vdesc(&chan->desc->vdesc); |
3632 |
++ if (chan->busy) |
3633 |
++ stm32_mdma_stop(chan); |
3634 |
+ chan->desc = NULL; |
3635 |
+ } |
3636 |
+ vchan_get_all_descriptors(&chan->vchan, &head); |
3637 |
+@@ -1331,7 +1335,6 @@ static enum dma_status stm32_mdma_tx_status(struct dma_chan *c, |
3638 |
+ |
3639 |
+ static void stm32_mdma_xfer_end(struct stm32_mdma_chan *chan) |
3640 |
+ { |
3641 |
+- list_del(&chan->desc->vdesc.node); |
3642 |
+ vchan_cookie_complete(&chan->desc->vdesc); |
3643 |
+ chan->desc = NULL; |
3644 |
+ chan->busy = false; |
3645 |
+diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c |
3646 |
+index 4a750e29bfb53..3fe27dbde5b2b 100644 |
3647 |
+--- a/drivers/dma/tegra20-apb-dma.c |
3648 |
++++ b/drivers/dma/tegra20-apb-dma.c |
3649 |
+@@ -1287,8 +1287,7 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc) |
3650 |
+ |
3651 |
+ dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id); |
3652 |
+ |
3653 |
+- if (tdc->busy) |
3654 |
+- tegra_dma_terminate_all(dc); |
3655 |
++ tegra_dma_terminate_all(dc); |
3656 |
+ |
3657 |
+ spin_lock_irqsave(&tdc->lock, flags); |
3658 |
+ list_splice_init(&tdc->pending_sg_req, &sg_req_list); |
3659 |
+diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c |
3660 |
+index 9c845c07b107c..d47749a35863f 100644 |
3661 |
+--- a/drivers/dma/xilinx/zynqmp_dma.c |
3662 |
++++ b/drivers/dma/xilinx/zynqmp_dma.c |
3663 |
+@@ -123,10 +123,12 @@ |
3664 |
+ /* Max transfer size per descriptor */ |
3665 |
+ #define ZYNQMP_DMA_MAX_TRANS_LEN 0x40000000 |
3666 |
+ |
3667 |
++/* Max burst lengths */ |
3668 |
++#define ZYNQMP_DMA_MAX_DST_BURST_LEN 32768U |
3669 |
++#define ZYNQMP_DMA_MAX_SRC_BURST_LEN 32768U |
3670 |
++ |
3671 |
+ /* Reset values for data attributes */ |
3672 |
+ #define ZYNQMP_DMA_AXCACHE_VAL 0xF |
3673 |
+-#define ZYNQMP_DMA_ARLEN_RST_VAL 0xF |
3674 |
+-#define ZYNQMP_DMA_AWLEN_RST_VAL 0xF |
3675 |
+ |
3676 |
+ #define ZYNQMP_DMA_SRC_ISSUE_RST_VAL 0x1F |
3677 |
+ |
3678 |
+@@ -534,17 +536,19 @@ static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status) |
3679 |
+ |
3680 |
+ static void zynqmp_dma_config(struct zynqmp_dma_chan *chan) |
3681 |
+ { |
3682 |
+- u32 val; |
3683 |
++ u32 val, burst_val; |
3684 |
+ |
3685 |
+ val = readl(chan->regs + ZYNQMP_DMA_CTRL0); |
3686 |
+ val |= ZYNQMP_DMA_POINT_TYPE_SG; |
3687 |
+ writel(val, chan->regs + ZYNQMP_DMA_CTRL0); |
3688 |
+ |
3689 |
+ val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR); |
3690 |
++ burst_val = __ilog2_u32(chan->src_burst_len); |
3691 |
+ val = (val & ~ZYNQMP_DMA_ARLEN) | |
3692 |
+- (chan->src_burst_len << ZYNQMP_DMA_ARLEN_OFST); |
3693 |
++ ((burst_val << ZYNQMP_DMA_ARLEN_OFST) & ZYNQMP_DMA_ARLEN); |
3694 |
++ burst_val = __ilog2_u32(chan->dst_burst_len); |
3695 |
+ val = (val & ~ZYNQMP_DMA_AWLEN) | |
3696 |
+- (chan->dst_burst_len << ZYNQMP_DMA_AWLEN_OFST); |
3697 |
++ ((burst_val << ZYNQMP_DMA_AWLEN_OFST) & ZYNQMP_DMA_AWLEN); |
3698 |
+ writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR); |
3699 |
+ } |
3700 |
+ |
3701 |
+@@ -560,8 +564,10 @@ static int zynqmp_dma_device_config(struct dma_chan *dchan, |
3702 |
+ { |
3703 |
+ struct zynqmp_dma_chan *chan = to_chan(dchan); |
3704 |
+ |
3705 |
+- chan->src_burst_len = config->src_maxburst; |
3706 |
+- chan->dst_burst_len = config->dst_maxburst; |
3707 |
++ chan->src_burst_len = clamp(config->src_maxburst, 1U, |
3708 |
++ ZYNQMP_DMA_MAX_SRC_BURST_LEN); |
3709 |
++ chan->dst_burst_len = clamp(config->dst_maxburst, 1U, |
3710 |
++ ZYNQMP_DMA_MAX_DST_BURST_LEN); |
3711 |
+ |
3712 |
+ return 0; |
3713 |
+ } |
3714 |
+@@ -887,8 +893,8 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev, |
3715 |
+ return PTR_ERR(chan->regs); |
3716 |
+ |
3717 |
+ chan->bus_width = ZYNQMP_DMA_BUS_WIDTH_64; |
3718 |
+- chan->dst_burst_len = ZYNQMP_DMA_AWLEN_RST_VAL; |
3719 |
+- chan->src_burst_len = ZYNQMP_DMA_ARLEN_RST_VAL; |
3720 |
++ chan->dst_burst_len = ZYNQMP_DMA_MAX_DST_BURST_LEN; |
3721 |
++ chan->src_burst_len = ZYNQMP_DMA_MAX_SRC_BURST_LEN; |
3722 |
+ err = of_property_read_u32(node, "xlnx,bus-width", &chan->bus_width); |
3723 |
+ if (err < 0) { |
3724 |
+ dev_err(&pdev->dev, "missing xlnx,bus-width property\n"); |
3725 |
+diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c |
3726 |
+index 523dd56a798c9..0031819402d0c 100644 |
3727 |
+--- a/drivers/edac/ghes_edac.c |
3728 |
++++ b/drivers/edac/ghes_edac.c |
3729 |
+@@ -488,6 +488,7 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev) |
3730 |
+ if (!force_load && idx < 0) |
3731 |
+ return -ENODEV; |
3732 |
+ } else { |
3733 |
++ force_load = true; |
3734 |
+ idx = 0; |
3735 |
+ } |
3736 |
+ |
3737 |
+@@ -586,6 +587,9 @@ void ghes_edac_unregister(struct ghes *ghes) |
3738 |
+ struct mem_ctl_info *mci; |
3739 |
+ unsigned long flags; |
3740 |
+ |
3741 |
++ if (!force_load) |
3742 |
++ return; |
3743 |
++ |
3744 |
+ mutex_lock(&ghes_reg_mutex); |
3745 |
+ |
3746 |
+ if (!refcount_dec_and_test(&ghes_refcount)) |
3747 |
+diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c |
3748 |
+index eb2df89d4924f..e497785cd99fe 100644 |
3749 |
+--- a/drivers/firmware/arm_sdei.c |
3750 |
++++ b/drivers/firmware/arm_sdei.c |
3751 |
+@@ -412,14 +412,19 @@ int sdei_event_enable(u32 event_num) |
3752 |
+ return -ENOENT; |
3753 |
+ } |
3754 |
+ |
3755 |
+- spin_lock(&sdei_list_lock); |
3756 |
+- event->reenable = true; |
3757 |
+- spin_unlock(&sdei_list_lock); |
3758 |
+ |
3759 |
++ cpus_read_lock(); |
3760 |
+ if (event->type == SDEI_EVENT_TYPE_SHARED) |
3761 |
+ err = sdei_api_event_enable(event->event_num); |
3762 |
+ else |
3763 |
+ err = sdei_do_cross_call(_local_event_enable, event); |
3764 |
++ |
3765 |
++ if (!err) { |
3766 |
++ spin_lock(&sdei_list_lock); |
3767 |
++ event->reenable = true; |
3768 |
++ spin_unlock(&sdei_list_lock); |
3769 |
++ } |
3770 |
++ cpus_read_unlock(); |
3771 |
+ mutex_unlock(&sdei_events_lock); |
3772 |
+ |
3773 |
+ return err; |
3774 |
+@@ -621,21 +626,18 @@ int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg) |
3775 |
+ break; |
3776 |
+ } |
3777 |
+ |
3778 |
+- spin_lock(&sdei_list_lock); |
3779 |
+- event->reregister = true; |
3780 |
+- spin_unlock(&sdei_list_lock); |
3781 |
+- |
3782 |
++ cpus_read_lock(); |
3783 |
+ err = _sdei_event_register(event); |
3784 |
+ if (err) { |
3785 |
+- spin_lock(&sdei_list_lock); |
3786 |
+- event->reregister = false; |
3787 |
+- event->reenable = false; |
3788 |
+- spin_unlock(&sdei_list_lock); |
3789 |
+- |
3790 |
+ sdei_event_destroy(event); |
3791 |
+ pr_warn("Failed to register event %u: %d\n", event_num, |
3792 |
+ err); |
3793 |
++ } else { |
3794 |
++ spin_lock(&sdei_list_lock); |
3795 |
++ event->reregister = true; |
3796 |
++ spin_unlock(&sdei_list_lock); |
3797 |
+ } |
3798 |
++ cpus_read_unlock(); |
3799 |
+ } while (0); |
3800 |
+ mutex_unlock(&sdei_events_lock); |
3801 |
+ |
3802 |
+diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c |
3803 |
+index 187984d26f47a..f0b6c68e848e3 100644 |
3804 |
+--- a/drivers/gpio/gpio-rcar.c |
3805 |
++++ b/drivers/gpio/gpio-rcar.c |
3806 |
+@@ -250,8 +250,10 @@ static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset) |
3807 |
+ int error; |
3808 |
+ |
3809 |
+ error = pm_runtime_get_sync(p->dev); |
3810 |
+- if (error < 0) |
3811 |
++ if (error < 0) { |
3812 |
++ pm_runtime_put(p->dev); |
3813 |
+ return error; |
3814 |
++ } |
3815 |
+ |
3816 |
+ error = pinctrl_gpio_request(chip->base + offset); |
3817 |
+ if (error) |
3818 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c |
3819 |
+index d10f483f5e273..ce30d4e8bf25f 100644 |
3820 |
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c |
3821 |
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c |
3822 |
+@@ -644,6 +644,9 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, |
3823 |
+ uint32_t temp; |
3824 |
+ struct v10_compute_mqd *m = get_mqd(mqd); |
3825 |
+ |
3826 |
++ if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset) |
3827 |
++ return 0; |
3828 |
++ |
3829 |
+ #if 0 |
3830 |
+ unsigned long flags; |
3831 |
+ int retry; |
3832 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c |
3833 |
+index e262f2ac07a35..92754cfb98086 100644 |
3834 |
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c |
3835 |
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c |
3836 |
+@@ -540,6 +540,9 @@ int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd, |
3837 |
+ uint32_t temp; |
3838 |
+ struct v9_mqd *m = get_mqd(mqd); |
3839 |
+ |
3840 |
++ if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset) |
3841 |
++ return 0; |
3842 |
++ |
3843 |
+ if (adev->in_gpu_reset) |
3844 |
+ return -EIO; |
3845 |
+ |
3846 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c |
3847 |
+index edb561baf8b90..f3fa271e3394c 100644 |
3848 |
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c |
3849 |
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c |
3850 |
+@@ -1247,15 +1247,15 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( |
3851 |
+ * be freed anyway |
3852 |
+ */ |
3853 |
+ |
3854 |
+- /* No more MMU notifiers */ |
3855 |
+- amdgpu_mn_unregister(mem->bo); |
3856 |
+- |
3857 |
+ /* Make sure restore workers don't access the BO any more */ |
3858 |
+ bo_list_entry = &mem->validate_list; |
3859 |
+ mutex_lock(&process_info->lock); |
3860 |
+ list_del(&bo_list_entry->head); |
3861 |
+ mutex_unlock(&process_info->lock); |
3862 |
+ |
3863 |
++ /* No more MMU notifiers */ |
3864 |
++ amdgpu_mn_unregister(mem->bo); |
3865 |
++ |
3866 |
+ ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx); |
3867 |
+ if (unlikely(ret)) |
3868 |
+ return ret; |
3869 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c |
3870 |
+index 50dff69a0f6e3..b1172d93c99c3 100644 |
3871 |
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c |
3872 |
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c |
3873 |
+@@ -192,30 +192,35 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev) |
3874 |
+ |
3875 |
+ static bool amdgpu_read_platform_bios(struct amdgpu_device *adev) |
3876 |
+ { |
3877 |
+- uint8_t __iomem *bios; |
3878 |
+- size_t size; |
3879 |
++ phys_addr_t rom = adev->pdev->rom; |
3880 |
++ size_t romlen = adev->pdev->romlen; |
3881 |
++ void __iomem *bios; |
3882 |
+ |
3883 |
+ adev->bios = NULL; |
3884 |
+ |
3885 |
+- bios = pci_platform_rom(adev->pdev, &size); |
3886 |
+- if (!bios) { |
3887 |
++ if (!rom || romlen == 0) |
3888 |
+ return false; |
3889 |
+- } |
3890 |
+ |
3891 |
+- adev->bios = kzalloc(size, GFP_KERNEL); |
3892 |
+- if (adev->bios == NULL) |
3893 |
++ adev->bios = kzalloc(romlen, GFP_KERNEL); |
3894 |
++ if (!adev->bios) |
3895 |
+ return false; |
3896 |
+ |
3897 |
+- memcpy_fromio(adev->bios, bios, size); |
3898 |
++ bios = ioremap(rom, romlen); |
3899 |
++ if (!bios) |
3900 |
++ goto free_bios; |
3901 |
+ |
3902 |
+- if (!check_atom_bios(adev->bios, size)) { |
3903 |
+- kfree(adev->bios); |
3904 |
+- return false; |
3905 |
+- } |
3906 |
++ memcpy_fromio(adev->bios, bios, romlen); |
3907 |
++ iounmap(bios); |
3908 |
+ |
3909 |
+- adev->bios_size = size; |
3910 |
++ if (!check_atom_bios(adev->bios, romlen)) |
3911 |
++ goto free_bios; |
3912 |
++ |
3913 |
++ adev->bios_size = romlen; |
3914 |
+ |
3915 |
+ return true; |
3916 |
++free_bios: |
3917 |
++ kfree(adev->bios); |
3918 |
++ return false; |
3919 |
+ } |
3920 |
+ |
3921 |
+ #ifdef CONFIG_ACPI |
3922 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |
3923 |
+index 5e1dce4241547..4105fbf571674 100644 |
3924 |
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |
3925 |
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |
3926 |
+@@ -3466,6 +3466,8 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, |
3927 |
+ if (r) |
3928 |
+ return r; |
3929 |
+ |
3930 |
++ amdgpu_amdkfd_pre_reset(adev); |
3931 |
++ |
3932 |
+ /* Resume IP prior to SMC */ |
3933 |
+ r = amdgpu_device_ip_reinit_early_sriov(adev); |
3934 |
+ if (r) |
3935 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c |
3936 |
+index c8793e6cc3c5d..6373bfb47d55d 100644 |
3937 |
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c |
3938 |
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c |
3939 |
+@@ -124,13 +124,12 @@ int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws) |
3940 |
+ */ |
3941 |
+ int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev) |
3942 |
+ { |
3943 |
+- volatile u32 *dst_ptr; |
3944 |
+ u32 dws; |
3945 |
+ int r; |
3946 |
+ |
3947 |
+ /* allocate clear state block */ |
3948 |
+ adev->gfx.rlc.clear_state_size = dws = adev->gfx.rlc.funcs->get_csb_size(adev); |
3949 |
+- r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, |
3950 |
++ r = amdgpu_bo_create_kernel(adev, dws * 4, PAGE_SIZE, |
3951 |
+ AMDGPU_GEM_DOMAIN_VRAM, |
3952 |
+ &adev->gfx.rlc.clear_state_obj, |
3953 |
+ &adev->gfx.rlc.clear_state_gpu_addr, |
3954 |
+@@ -141,13 +140,6 @@ int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev) |
3955 |
+ return r; |
3956 |
+ } |
3957 |
+ |
3958 |
+- /* set up the cs buffer */ |
3959 |
+- dst_ptr = adev->gfx.rlc.cs_ptr; |
3960 |
+- adev->gfx.rlc.funcs->get_csb_buffer(adev, dst_ptr); |
3961 |
+- amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj); |
3962 |
+- amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); |
3963 |
+- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); |
3964 |
+- |
3965 |
+ return 0; |
3966 |
+ } |
3967 |
+ |
3968 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c |
3969 |
+index dd30f4e61a8cd..cae426c7c0863 100644 |
3970 |
+--- a/drivers/gpu/drm/amd/amdgpu/atom.c |
3971 |
++++ b/drivers/gpu/drm/amd/amdgpu/atom.c |
3972 |
+@@ -744,8 +744,8 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) |
3973 |
+ cjiffies = jiffies; |
3974 |
+ if (time_after(cjiffies, ctx->last_jump_jiffies)) { |
3975 |
+ cjiffies -= ctx->last_jump_jiffies; |
3976 |
+- if ((jiffies_to_msecs(cjiffies) > 5000)) { |
3977 |
+- DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n"); |
3978 |
++ if ((jiffies_to_msecs(cjiffies) > 10000)) { |
3979 |
++ DRM_ERROR("atombios stuck in loop for more than 10secs aborting\n"); |
3980 |
+ ctx->abort = true; |
3981 |
+ } |
3982 |
+ } else { |
3983 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c |
3984 |
+index 19876c90be0e1..d17edc850427a 100644 |
3985 |
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c |
3986 |
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c |
3987 |
+@@ -993,39 +993,6 @@ static int gfx_v10_0_rlc_init(struct amdgpu_device *adev) |
3988 |
+ return 0; |
3989 |
+ } |
3990 |
+ |
3991 |
+-static int gfx_v10_0_csb_vram_pin(struct amdgpu_device *adev) |
3992 |
+-{ |
3993 |
+- int r; |
3994 |
+- |
3995 |
+- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); |
3996 |
+- if (unlikely(r != 0)) |
3997 |
+- return r; |
3998 |
+- |
3999 |
+- r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, |
4000 |
+- AMDGPU_GEM_DOMAIN_VRAM); |
4001 |
+- if (!r) |
4002 |
+- adev->gfx.rlc.clear_state_gpu_addr = |
4003 |
+- amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj); |
4004 |
+- |
4005 |
+- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); |
4006 |
+- |
4007 |
+- return r; |
4008 |
+-} |
4009 |
+- |
4010 |
+-static void gfx_v10_0_csb_vram_unpin(struct amdgpu_device *adev) |
4011 |
+-{ |
4012 |
+- int r; |
4013 |
+- |
4014 |
+- if (!adev->gfx.rlc.clear_state_obj) |
4015 |
+- return; |
4016 |
+- |
4017 |
+- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true); |
4018 |
+- if (likely(r == 0)) { |
4019 |
+- amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); |
4020 |
+- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); |
4021 |
+- } |
4022 |
+-} |
4023 |
+- |
4024 |
+ static void gfx_v10_0_mec_fini(struct amdgpu_device *adev) |
4025 |
+ { |
4026 |
+ amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); |
4027 |
+@@ -1787,25 +1754,7 @@ static void gfx_v10_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, |
4028 |
+ |
4029 |
+ static int gfx_v10_0_init_csb(struct amdgpu_device *adev) |
4030 |
+ { |
4031 |
+- int r; |
4032 |
+- |
4033 |
+- if (adev->in_gpu_reset) { |
4034 |
+- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); |
4035 |
+- if (r) |
4036 |
+- return r; |
4037 |
+- |
4038 |
+- r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, |
4039 |
+- (void **)&adev->gfx.rlc.cs_ptr); |
4040 |
+- if (!r) { |
4041 |
+- adev->gfx.rlc.funcs->get_csb_buffer(adev, |
4042 |
+- adev->gfx.rlc.cs_ptr); |
4043 |
+- amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj); |
4044 |
+- } |
4045 |
+- |
4046 |
+- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); |
4047 |
+- if (r) |
4048 |
+- return r; |
4049 |
+- } |
4050 |
++ adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); |
4051 |
+ |
4052 |
+ /* csib */ |
4053 |
+ WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_HI, |
4054 |
+@@ -3774,10 +3723,6 @@ static int gfx_v10_0_hw_init(void *handle) |
4055 |
+ int r; |
4056 |
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
4057 |
+ |
4058 |
+- r = gfx_v10_0_csb_vram_pin(adev); |
4059 |
+- if (r) |
4060 |
+- return r; |
4061 |
+- |
4062 |
+ if (!amdgpu_emu_mode) |
4063 |
+ gfx_v10_0_init_golden_registers(adev); |
4064 |
+ |
4065 |
+@@ -3865,7 +3810,6 @@ static int gfx_v10_0_hw_fini(void *handle) |
4066 |
+ } |
4067 |
+ gfx_v10_0_cp_enable(adev, false); |
4068 |
+ gfx_v10_0_enable_gui_idle_interrupt(adev, false); |
4069 |
+- gfx_v10_0_csb_vram_unpin(adev); |
4070 |
+ |
4071 |
+ return 0; |
4072 |
+ } |
4073 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c |
4074 |
+index 791ba398f007e..d92e92e5d50b7 100644 |
4075 |
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c |
4076 |
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c |
4077 |
+@@ -4554,6 +4554,8 @@ static int gfx_v7_0_hw_init(void *handle) |
4078 |
+ |
4079 |
+ gfx_v7_0_constants_init(adev); |
4080 |
+ |
4081 |
++ /* init CSB */ |
4082 |
++ adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); |
4083 |
+ /* init rlc */ |
4084 |
+ r = adev->gfx.rlc.funcs->resume(adev); |
4085 |
+ if (r) |
4086 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c |
4087 |
+index cc88ba76a8d4a..467ed7fca884d 100644 |
4088 |
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c |
4089 |
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c |
4090 |
+@@ -1321,39 +1321,6 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) |
4091 |
+ return 0; |
4092 |
+ } |
4093 |
+ |
4094 |
+-static int gfx_v8_0_csb_vram_pin(struct amdgpu_device *adev) |
4095 |
+-{ |
4096 |
+- int r; |
4097 |
+- |
4098 |
+- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); |
4099 |
+- if (unlikely(r != 0)) |
4100 |
+- return r; |
4101 |
+- |
4102 |
+- r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, |
4103 |
+- AMDGPU_GEM_DOMAIN_VRAM); |
4104 |
+- if (!r) |
4105 |
+- adev->gfx.rlc.clear_state_gpu_addr = |
4106 |
+- amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj); |
4107 |
+- |
4108 |
+- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); |
4109 |
+- |
4110 |
+- return r; |
4111 |
+-} |
4112 |
+- |
4113 |
+-static void gfx_v8_0_csb_vram_unpin(struct amdgpu_device *adev) |
4114 |
+-{ |
4115 |
+- int r; |
4116 |
+- |
4117 |
+- if (!adev->gfx.rlc.clear_state_obj) |
4118 |
+- return; |
4119 |
+- |
4120 |
+- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true); |
4121 |
+- if (likely(r == 0)) { |
4122 |
+- amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); |
4123 |
+- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); |
4124 |
+- } |
4125 |
+-} |
4126 |
+- |
4127 |
+ static void gfx_v8_0_mec_fini(struct amdgpu_device *adev) |
4128 |
+ { |
4129 |
+ amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); |
4130 |
+@@ -3917,6 +3884,7 @@ static void gfx_v8_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, |
4131 |
+ |
4132 |
+ static void gfx_v8_0_init_csb(struct amdgpu_device *adev) |
4133 |
+ { |
4134 |
++ adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); |
4135 |
+ /* csib */ |
4136 |
+ WREG32(mmRLC_CSIB_ADDR_HI, |
4137 |
+ adev->gfx.rlc.clear_state_gpu_addr >> 32); |
4138 |
+@@ -4837,10 +4805,6 @@ static int gfx_v8_0_hw_init(void *handle) |
4139 |
+ gfx_v8_0_init_golden_registers(adev); |
4140 |
+ gfx_v8_0_constants_init(adev); |
4141 |
+ |
4142 |
+- r = gfx_v8_0_csb_vram_pin(adev); |
4143 |
+- if (r) |
4144 |
+- return r; |
4145 |
+- |
4146 |
+ r = adev->gfx.rlc.funcs->resume(adev); |
4147 |
+ if (r) |
4148 |
+ return r; |
4149 |
+@@ -4958,8 +4922,6 @@ static int gfx_v8_0_hw_fini(void *handle) |
4150 |
+ pr_err("rlc is busy, skip halt rlc\n"); |
4151 |
+ amdgpu_gfx_rlc_exit_safe_mode(adev); |
4152 |
+ |
4153 |
+- gfx_v8_0_csb_vram_unpin(adev); |
4154 |
+- |
4155 |
+ return 0; |
4156 |
+ } |
4157 |
+ |
4158 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c |
4159 |
+index 6004fdacc8663..90dcc7afc9c43 100644 |
4160 |
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c |
4161 |
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c |
4162 |
+@@ -1675,39 +1675,6 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) |
4163 |
+ return 0; |
4164 |
+ } |
4165 |
+ |
4166 |
+-static int gfx_v9_0_csb_vram_pin(struct amdgpu_device *adev) |
4167 |
+-{ |
4168 |
+- int r; |
4169 |
+- |
4170 |
+- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); |
4171 |
+- if (unlikely(r != 0)) |
4172 |
+- return r; |
4173 |
+- |
4174 |
+- r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, |
4175 |
+- AMDGPU_GEM_DOMAIN_VRAM); |
4176 |
+- if (!r) |
4177 |
+- adev->gfx.rlc.clear_state_gpu_addr = |
4178 |
+- amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj); |
4179 |
+- |
4180 |
+- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); |
4181 |
+- |
4182 |
+- return r; |
4183 |
+-} |
4184 |
+- |
4185 |
+-static void gfx_v9_0_csb_vram_unpin(struct amdgpu_device *adev) |
4186 |
+-{ |
4187 |
+- int r; |
4188 |
+- |
4189 |
+- if (!adev->gfx.rlc.clear_state_obj) |
4190 |
+- return; |
4191 |
+- |
4192 |
+- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true); |
4193 |
+- if (likely(r == 0)) { |
4194 |
+- amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); |
4195 |
+- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); |
4196 |
+- } |
4197 |
+-} |
4198 |
+- |
4199 |
+ static void gfx_v9_0_mec_fini(struct amdgpu_device *adev) |
4200 |
+ { |
4201 |
+ amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); |
4202 |
+@@ -2596,6 +2563,7 @@ static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, |
4203 |
+ |
4204 |
+ static void gfx_v9_0_init_csb(struct amdgpu_device *adev) |
4205 |
+ { |
4206 |
++ adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); |
4207 |
+ /* csib */ |
4208 |
+ WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI), |
4209 |
+ adev->gfx.rlc.clear_state_gpu_addr >> 32); |
4210 |
+@@ -3888,10 +3856,6 @@ static int gfx_v9_0_hw_init(void *handle) |
4211 |
+ |
4212 |
+ gfx_v9_0_constants_init(adev); |
4213 |
+ |
4214 |
+- r = gfx_v9_0_csb_vram_pin(adev); |
4215 |
+- if (r) |
4216 |
+- return r; |
4217 |
+- |
4218 |
+ r = adev->gfx.rlc.funcs->resume(adev); |
4219 |
+ if (r) |
4220 |
+ return r; |
4221 |
+@@ -3977,8 +3941,6 @@ static int gfx_v9_0_hw_fini(void *handle) |
4222 |
+ gfx_v9_0_cp_enable(adev, false); |
4223 |
+ adev->gfx.rlc.funcs->stop(adev); |
4224 |
+ |
4225 |
+- gfx_v9_0_csb_vram_unpin(adev); |
4226 |
+- |
4227 |
+ return 0; |
4228 |
+ } |
4229 |
+ |
4230 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c |
4231 |
+index 36ad0c0e8efbc..cd2cbe760e883 100644 |
4232 |
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c |
4233 |
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c |
4234 |
+@@ -1026,6 +1026,10 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect) |
4235 |
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); |
4236 |
+ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp); |
4237 |
+ |
4238 |
++ /* Stall DPG before WPTR/RPTR reset */ |
4239 |
++ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), |
4240 |
++ UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK, |
4241 |
++ ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); |
4242 |
+ /* set the write pointer delay */ |
4243 |
+ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0); |
4244 |
+ |
4245 |
+@@ -1048,6 +1052,9 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect) |
4246 |
+ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, |
4247 |
+ lower_32_bits(ring->wptr)); |
4248 |
+ |
4249 |
++ /* Unstall DPG */ |
4250 |
++ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), |
4251 |
++ 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); |
4252 |
+ return 0; |
4253 |
+ } |
4254 |
+ |
4255 |
+@@ -1357,8 +1364,13 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev, |
4256 |
+ UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, |
4257 |
+ UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code); |
4258 |
+ |
4259 |
++ /* Stall DPG before WPTR/RPTR reset */ |
4260 |
++ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), |
4261 |
++ UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK, |
4262 |
++ ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); |
4263 |
+ /* Restore */ |
4264 |
+ ring = &adev->vcn.inst->ring_enc[0]; |
4265 |
++ ring->wptr = 0; |
4266 |
+ WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr); |
4267 |
+ WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); |
4268 |
+ WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4); |
4269 |
+@@ -1366,6 +1378,7 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev, |
4270 |
+ WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); |
4271 |
+ |
4272 |
+ ring = &adev->vcn.inst->ring_enc[1]; |
4273 |
++ ring->wptr = 0; |
4274 |
+ WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr); |
4275 |
+ WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); |
4276 |
+ WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4); |
4277 |
+@@ -1374,6 +1387,9 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev, |
4278 |
+ |
4279 |
+ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, |
4280 |
+ RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF); |
4281 |
++ /* Unstall DPG */ |
4282 |
++ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), |
4283 |
++ 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); |
4284 |
+ |
4285 |
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, |
4286 |
+ UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, |
4287 |
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h |
4288 |
+index 901fe35901656..d3400da6ab643 100644 |
4289 |
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h |
4290 |
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h |
4291 |
+@@ -905,7 +905,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { |
4292 |
+ 0x7a5d0000, 0x807c817c, |
4293 |
+ 0x807aff7a, 0x00000080, |
4294 |
+ 0xbf0a717c, 0xbf85fff8, |
4295 |
+- 0xbf820141, 0xbef4037e, |
4296 |
++ 0xbf820142, 0xbef4037e, |
4297 |
+ 0x8775ff7f, 0x0000ffff, |
4298 |
+ 0x8875ff75, 0x00040000, |
4299 |
+ 0xbef60380, 0xbef703ff, |
4300 |
+@@ -967,7 +967,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { |
4301 |
+ 0x725d0000, 0xe0304080, |
4302 |
+ 0x725d0100, 0xe0304100, |
4303 |
+ 0x725d0200, 0xe0304180, |
4304 |
+- 0x725d0300, 0xbf820031, |
4305 |
++ 0x725d0300, 0xbf820032, |
4306 |
+ 0xbef603ff, 0x01000000, |
4307 |
+ 0xbef20378, 0x8078ff78, |
4308 |
+ 0x00000400, 0xbefc0384, |
4309 |
+@@ -992,83 +992,84 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { |
4310 |
+ 0x725d0000, 0xe0304100, |
4311 |
+ 0x725d0100, 0xe0304200, |
4312 |
+ 0x725d0200, 0xe0304300, |
4313 |
+- 0x725d0300, 0xb9782a05, |
4314 |
+- 0x80788178, 0x907c9973, |
4315 |
+- 0x877c817c, 0xbf06817c, |
4316 |
+- 0xbf850002, 0x8f788978, |
4317 |
+- 0xbf820001, 0x8f788a78, |
4318 |
+- 0xb9721e06, 0x8f728a72, |
4319 |
+- 0x80787278, 0x8078ff78, |
4320 |
+- 0x00000200, 0x80f8ff78, |
4321 |
+- 0x00000050, 0xbef603ff, |
4322 |
+- 0x01000000, 0xbefc03ff, |
4323 |
+- 0x0000006c, 0x80f89078, |
4324 |
+- 0xf429003a, 0xf0000000, |
4325 |
+- 0xbf8cc07f, 0x80fc847c, |
4326 |
+- 0xbf800000, 0xbe803100, |
4327 |
+- 0xbe823102, 0x80f8a078, |
4328 |
+- 0xf42d003a, 0xf0000000, |
4329 |
+- 0xbf8cc07f, 0x80fc887c, |
4330 |
+- 0xbf800000, 0xbe803100, |
4331 |
+- 0xbe823102, 0xbe843104, |
4332 |
+- 0xbe863106, 0x80f8c078, |
4333 |
+- 0xf431003a, 0xf0000000, |
4334 |
+- 0xbf8cc07f, 0x80fc907c, |
4335 |
+- 0xbf800000, 0xbe803100, |
4336 |
+- 0xbe823102, 0xbe843104, |
4337 |
+- 0xbe863106, 0xbe883108, |
4338 |
+- 0xbe8a310a, 0xbe8c310c, |
4339 |
+- 0xbe8e310e, 0xbf06807c, |
4340 |
+- 0xbf84fff0, 0xb9782a05, |
4341 |
+- 0x80788178, 0x907c9973, |
4342 |
+- 0x877c817c, 0xbf06817c, |
4343 |
+- 0xbf850002, 0x8f788978, |
4344 |
+- 0xbf820001, 0x8f788a78, |
4345 |
+- 0xb9721e06, 0x8f728a72, |
4346 |
+- 0x80787278, 0x8078ff78, |
4347 |
+- 0x00000200, 0xbef603ff, |
4348 |
+- 0x01000000, 0xf4211bfa, |
4349 |
++ 0x725d0300, 0xbf8c3f70, |
4350 |
++ 0xb9782a05, 0x80788178, |
4351 |
++ 0x907c9973, 0x877c817c, |
4352 |
++ 0xbf06817c, 0xbf850002, |
4353 |
++ 0x8f788978, 0xbf820001, |
4354 |
++ 0x8f788a78, 0xb9721e06, |
4355 |
++ 0x8f728a72, 0x80787278, |
4356 |
++ 0x8078ff78, 0x00000200, |
4357 |
++ 0x80f8ff78, 0x00000050, |
4358 |
++ 0xbef603ff, 0x01000000, |
4359 |
++ 0xbefc03ff, 0x0000006c, |
4360 |
++ 0x80f89078, 0xf429003a, |
4361 |
++ 0xf0000000, 0xbf8cc07f, |
4362 |
++ 0x80fc847c, 0xbf800000, |
4363 |
++ 0xbe803100, 0xbe823102, |
4364 |
++ 0x80f8a078, 0xf42d003a, |
4365 |
++ 0xf0000000, 0xbf8cc07f, |
4366 |
++ 0x80fc887c, 0xbf800000, |
4367 |
++ 0xbe803100, 0xbe823102, |
4368 |
++ 0xbe843104, 0xbe863106, |
4369 |
++ 0x80f8c078, 0xf431003a, |
4370 |
++ 0xf0000000, 0xbf8cc07f, |
4371 |
++ 0x80fc907c, 0xbf800000, |
4372 |
++ 0xbe803100, 0xbe823102, |
4373 |
++ 0xbe843104, 0xbe863106, |
4374 |
++ 0xbe883108, 0xbe8a310a, |
4375 |
++ 0xbe8c310c, 0xbe8e310e, |
4376 |
++ 0xbf06807c, 0xbf84fff0, |
4377 |
++ 0xb9782a05, 0x80788178, |
4378 |
++ 0x907c9973, 0x877c817c, |
4379 |
++ 0xbf06817c, 0xbf850002, |
4380 |
++ 0x8f788978, 0xbf820001, |
4381 |
++ 0x8f788a78, 0xb9721e06, |
4382 |
++ 0x8f728a72, 0x80787278, |
4383 |
++ 0x8078ff78, 0x00000200, |
4384 |
++ 0xbef603ff, 0x01000000, |
4385 |
++ 0xf4211bfa, 0xf0000000, |
4386 |
++ 0x80788478, 0xf4211b3a, |
4387 |
+ 0xf0000000, 0x80788478, |
4388 |
+- 0xf4211b3a, 0xf0000000, |
4389 |
+- 0x80788478, 0xf4211b7a, |
4390 |
++ 0xf4211b7a, 0xf0000000, |
4391 |
++ 0x80788478, 0xf4211eba, |
4392 |
+ 0xf0000000, 0x80788478, |
4393 |
+- 0xf4211eba, 0xf0000000, |
4394 |
+- 0x80788478, 0xf4211efa, |
4395 |
++ 0xf4211efa, 0xf0000000, |
4396 |
++ 0x80788478, 0xf4211c3a, |
4397 |
+ 0xf0000000, 0x80788478, |
4398 |
+- 0xf4211c3a, 0xf0000000, |
4399 |
+- 0x80788478, 0xf4211c7a, |
4400 |
++ 0xf4211c7a, 0xf0000000, |
4401 |
++ 0x80788478, 0xf4211e7a, |
4402 |
+ 0xf0000000, 0x80788478, |
4403 |
+- 0xf4211e7a, 0xf0000000, |
4404 |
+- 0x80788478, 0xf4211cfa, |
4405 |
++ 0xf4211cfa, 0xf0000000, |
4406 |
++ 0x80788478, 0xf4211bba, |
4407 |
+ 0xf0000000, 0x80788478, |
4408 |
++ 0xbf8cc07f, 0xb9eef814, |
4409 |
+ 0xf4211bba, 0xf0000000, |
4410 |
+ 0x80788478, 0xbf8cc07f, |
4411 |
+- 0xb9eef814, 0xf4211bba, |
4412 |
+- 0xf0000000, 0x80788478, |
4413 |
+- 0xbf8cc07f, 0xb9eef815, |
4414 |
+- 0xbef2036d, 0x876dff72, |
4415 |
+- 0x0000ffff, 0xbefc036f, |
4416 |
+- 0xbefe037a, 0xbeff037b, |
4417 |
+- 0x876f71ff, 0x000003ff, |
4418 |
+- 0xb9ef4803, 0xb9f9f816, |
4419 |
+- 0x876f71ff, 0xfffff800, |
4420 |
+- 0x906f8b6f, 0xb9efa2c3, |
4421 |
+- 0xb9f3f801, 0x876fff72, |
4422 |
+- 0xfc000000, 0x906f9a6f, |
4423 |
+- 0x8f6f906f, 0xbef30380, |
4424 |
++ 0xb9eef815, 0xbef2036d, |
4425 |
++ 0x876dff72, 0x0000ffff, |
4426 |
++ 0xbefc036f, 0xbefe037a, |
4427 |
++ 0xbeff037b, 0x876f71ff, |
4428 |
++ 0x000003ff, 0xb9ef4803, |
4429 |
++ 0xb9f9f816, 0x876f71ff, |
4430 |
++ 0xfffff800, 0x906f8b6f, |
4431 |
++ 0xb9efa2c3, 0xb9f3f801, |
4432 |
++ 0x876fff72, 0xfc000000, |
4433 |
++ 0x906f9a6f, 0x8f6f906f, |
4434 |
++ 0xbef30380, 0x88736f73, |
4435 |
++ 0x876fff72, 0x02000000, |
4436 |
++ 0x906f996f, 0x8f6f8f6f, |
4437 |
+ 0x88736f73, 0x876fff72, |
4438 |
+- 0x02000000, 0x906f996f, |
4439 |
+- 0x8f6f8f6f, 0x88736f73, |
4440 |
+- 0x876fff72, 0x01000000, |
4441 |
+- 0x906f986f, 0x8f6f996f, |
4442 |
+- 0x88736f73, 0x876fff70, |
4443 |
+- 0x00800000, 0x906f976f, |
4444 |
+- 0xb9f3f807, 0x87fe7e7e, |
4445 |
+- 0x87ea6a6a, 0xb9f0f802, |
4446 |
+- 0xbf8a0000, 0xbe80226c, |
4447 |
+- 0xbf810000, 0xbf9f0000, |
4448 |
++ 0x01000000, 0x906f986f, |
4449 |
++ 0x8f6f996f, 0x88736f73, |
4450 |
++ 0x876fff70, 0x00800000, |
4451 |
++ 0x906f976f, 0xb9f3f807, |
4452 |
++ 0x87fe7e7e, 0x87ea6a6a, |
4453 |
++ 0xb9f0f802, 0xbf8a0000, |
4454 |
++ 0xbe80226c, 0xbf810000, |
4455 |
+ 0xbf9f0000, 0xbf9f0000, |
4456 |
+ 0xbf9f0000, 0xbf9f0000, |
4457 |
++ 0xbf9f0000, 0x00000000, |
4458 |
+ }; |
4459 |
+ static const uint32_t cwsr_trap_arcturus_hex[] = { |
4460 |
+ 0xbf820001, 0xbf8202c4, |
4461 |
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm |
4462 |
+index cdaa523ce6bee..4433bda2ce25e 100644 |
4463 |
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm |
4464 |
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm |
4465 |
+@@ -758,6 +758,7 @@ L_RESTORE_V0: |
4466 |
+ buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256 |
4467 |
+ buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*2 |
4468 |
+ buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*3 |
4469 |
++ s_waitcnt vmcnt(0) |
4470 |
+ |
4471 |
+ /* restore SGPRs */ |
4472 |
+ //will be 2+8+16*6 |
4473 |
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c |
4474 |
+index a2ed9c257cb0d..e9a2784400792 100644 |
4475 |
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c |
4476 |
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c |
4477 |
+@@ -1075,6 +1075,8 @@ static int stop_cpsch(struct device_queue_manager *dqm) |
4478 |
+ unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); |
4479 |
+ dqm_unlock(dqm); |
4480 |
+ |
4481 |
++ pm_release_ib(&dqm->packets); |
4482 |
++ |
4483 |
+ kfd_gtt_sa_free(dqm->dev, dqm->fence_mem); |
4484 |
+ pm_uninit(&dqm->packets); |
4485 |
+ |
4486 |
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |
4487 |
+index 60e50181f6d39..2384aa018993d 100644 |
4488 |
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |
4489 |
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |
4490 |
+@@ -4299,19 +4299,6 @@ static void dm_crtc_helper_disable(struct drm_crtc *crtc) |
4491 |
+ { |
4492 |
+ } |
4493 |
+ |
4494 |
+-static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state) |
4495 |
+-{ |
4496 |
+- struct drm_device *dev = new_crtc_state->crtc->dev; |
4497 |
+- struct drm_plane *plane; |
4498 |
+- |
4499 |
+- drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) { |
4500 |
+- if (plane->type == DRM_PLANE_TYPE_CURSOR) |
4501 |
+- return true; |
4502 |
+- } |
4503 |
+- |
4504 |
+- return false; |
4505 |
+-} |
4506 |
+- |
4507 |
+ static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state) |
4508 |
+ { |
4509 |
+ struct drm_atomic_state *state = new_crtc_state->state; |
4510 |
+@@ -4391,19 +4378,20 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, |
4511 |
+ return ret; |
4512 |
+ } |
4513 |
+ |
4514 |
+- /* In some use cases, like reset, no stream is attached */ |
4515 |
+- if (!dm_crtc_state->stream) |
4516 |
+- return 0; |
4517 |
+- |
4518 |
+ /* |
4519 |
+- * We want at least one hardware plane enabled to use |
4520 |
+- * the stream with a cursor enabled. |
4521 |
++ * We require the primary plane to be enabled whenever the CRTC is, otherwise |
4522 |
++ * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other |
4523 |
++ * planes are disabled, which is not supported by the hardware. And there is legacy |
4524 |
++ * userspace which stops using the HW cursor altogether in response to the resulting EINVAL. |
4525 |
+ */ |
4526 |
+- if (state->enable && state->active && |
4527 |
+- does_crtc_have_active_cursor(state) && |
4528 |
+- dm_crtc_state->active_planes == 0) |
4529 |
++ if (state->enable && |
4530 |
++ !(state->plane_mask & drm_plane_mask(crtc->primary))) |
4531 |
+ return -EINVAL; |
4532 |
+ |
4533 |
++ /* In some use cases, like reset, no stream is attached */ |
4534 |
++ if (!dm_crtc_state->stream) |
4535 |
++ return 0; |
4536 |
++ |
4537 |
+ if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK) |
4538 |
+ return 0; |
4539 |
+ |
4540 |
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c |
4541 |
+index b43bb7f90e4e9..2233d293a707a 100644 |
4542 |
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c |
4543 |
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c |
4544 |
+@@ -210,6 +210,8 @@ static int __set_legacy_tf(struct dc_transfer_func *func, |
4545 |
+ res = mod_color_calculate_regamma_params(func, gamma, true, has_rom, |
4546 |
+ NULL); |
4547 |
+ |
4548 |
++ dc_gamma_release(&gamma); |
4549 |
++ |
4550 |
+ return res ? 0 : -ENOMEM; |
4551 |
+ } |
4552 |
+ |
4553 |
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c |
4554 |
+index 5bf12a446e952..3efee7b3378a3 100644 |
4555 |
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c |
4556 |
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c |
4557 |
+@@ -1733,8 +1733,7 @@ static void write_i2c_retimer_setting( |
4558 |
+ slave_address, buffer[0], buffer[1], i2c_success?1:0); |
4559 |
+ |
4560 |
+ if (!i2c_success) |
4561 |
+- /* Write failure */ |
4562 |
+- ASSERT(i2c_success); |
4563 |
++ goto i2c_write_fail; |
4564 |
+ |
4565 |
+ /* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A |
4566 |
+ * needs to be set to 1 on every 0xA-0xC write. |
4567 |
+@@ -1752,8 +1751,7 @@ static void write_i2c_retimer_setting( |
4568 |
+ pipe_ctx->stream->link->ddc, |
4569 |
+ slave_address, &offset, 1, &value, 1); |
4570 |
+ if (!i2c_success) |
4571 |
+- /* Write failure */ |
4572 |
+- ASSERT(i2c_success); |
4573 |
++ goto i2c_write_fail; |
4574 |
+ } |
4575 |
+ |
4576 |
+ buffer[0] = offset; |
4577 |
+@@ -1765,8 +1763,7 @@ static void write_i2c_retimer_setting( |
4578 |
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", |
4579 |
+ slave_address, buffer[0], buffer[1], i2c_success?1:0); |
4580 |
+ if (!i2c_success) |
4581 |
+- /* Write failure */ |
4582 |
+- ASSERT(i2c_success); |
4583 |
++ goto i2c_write_fail; |
4584 |
+ } |
4585 |
+ } |
4586 |
+ } |
4587 |
+@@ -1786,8 +1783,7 @@ static void write_i2c_retimer_setting( |
4588 |
+ slave_address, buffer[0], buffer[1], i2c_success?1:0); |
4589 |
+ |
4590 |
+ if (!i2c_success) |
4591 |
+- /* Write failure */ |
4592 |
+- ASSERT(i2c_success); |
4593 |
++ goto i2c_write_fail; |
4594 |
+ |
4595 |
+ /* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A |
4596 |
+ * needs to be set to 1 on every 0xA-0xC write. |
4597 |
+@@ -1805,8 +1801,7 @@ static void write_i2c_retimer_setting( |
4598 |
+ pipe_ctx->stream->link->ddc, |
4599 |
+ slave_address, &offset, 1, &value, 1); |
4600 |
+ if (!i2c_success) |
4601 |
+- /* Write failure */ |
4602 |
+- ASSERT(i2c_success); |
4603 |
++ goto i2c_write_fail; |
4604 |
+ } |
4605 |
+ |
4606 |
+ buffer[0] = offset; |
4607 |
+@@ -1818,8 +1813,7 @@ static void write_i2c_retimer_setting( |
4608 |
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", |
4609 |
+ slave_address, buffer[0], buffer[1], i2c_success?1:0); |
4610 |
+ if (!i2c_success) |
4611 |
+- /* Write failure */ |
4612 |
+- ASSERT(i2c_success); |
4613 |
++ goto i2c_write_fail; |
4614 |
+ } |
4615 |
+ } |
4616 |
+ } |
4617 |
+@@ -1837,8 +1831,7 @@ static void write_i2c_retimer_setting( |
4618 |
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", |
4619 |
+ slave_address, buffer[0], buffer[1], i2c_success?1:0); |
4620 |
+ if (!i2c_success) |
4621 |
+- /* Write failure */ |
4622 |
+- ASSERT(i2c_success); |
4623 |
++ goto i2c_write_fail; |
4624 |
+ |
4625 |
+ /* Write offset 0x00 to 0x23 */ |
4626 |
+ buffer[0] = 0x00; |
4627 |
+@@ -1849,8 +1842,7 @@ static void write_i2c_retimer_setting( |
4628 |
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", |
4629 |
+ slave_address, buffer[0], buffer[1], i2c_success?1:0); |
4630 |
+ if (!i2c_success) |
4631 |
+- /* Write failure */ |
4632 |
+- ASSERT(i2c_success); |
4633 |
++ goto i2c_write_fail; |
4634 |
+ |
4635 |
+ /* Write offset 0xff to 0x00 */ |
4636 |
+ buffer[0] = 0xff; |
4637 |
+@@ -1861,10 +1853,14 @@ static void write_i2c_retimer_setting( |
4638 |
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", |
4639 |
+ slave_address, buffer[0], buffer[1], i2c_success?1:0); |
4640 |
+ if (!i2c_success) |
4641 |
+- /* Write failure */ |
4642 |
+- ASSERT(i2c_success); |
4643 |
++ goto i2c_write_fail; |
4644 |
+ |
4645 |
+ } |
4646 |
++ |
4647 |
++ return; |
4648 |
++ |
4649 |
++i2c_write_fail: |
4650 |
++ DC_LOG_DEBUG("Set retimer failed"); |
4651 |
+ } |
4652 |
+ |
4653 |
+ static void write_i2c_default_retimer_setting( |
4654 |
+@@ -1889,8 +1885,7 @@ static void write_i2c_default_retimer_setting( |
4655 |
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", |
4656 |
+ slave_address, buffer[0], buffer[1], i2c_success?1:0); |
4657 |
+ if (!i2c_success) |
4658 |
+- /* Write failure */ |
4659 |
+- ASSERT(i2c_success); |
4660 |
++ goto i2c_write_fail; |
4661 |
+ |
4662 |
+ /* Write offset 0x0A to 0x17 */ |
4663 |
+ buffer[0] = 0x0A; |
4664 |
+@@ -1901,8 +1896,7 @@ static void write_i2c_default_retimer_setting( |
4665 |
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", |
4666 |
+ slave_address, buffer[0], buffer[1], i2c_success?1:0); |
4667 |
+ if (!i2c_success) |
4668 |
+- /* Write failure */ |
4669 |
+- ASSERT(i2c_success); |
4670 |
++ goto i2c_write_fail; |
4671 |
+ |
4672 |
+ /* Write offset 0x0B to 0xDA or 0xD8 */ |
4673 |
+ buffer[0] = 0x0B; |
4674 |
+@@ -1913,8 +1907,7 @@ static void write_i2c_default_retimer_setting( |
4675 |
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", |
4676 |
+ slave_address, buffer[0], buffer[1], i2c_success?1:0); |
4677 |
+ if (!i2c_success) |
4678 |
+- /* Write failure */ |
4679 |
+- ASSERT(i2c_success); |
4680 |
++ goto i2c_write_fail; |
4681 |
+ |
4682 |
+ /* Write offset 0x0A to 0x17 */ |
4683 |
+ buffer[0] = 0x0A; |
4684 |
+@@ -1925,8 +1918,7 @@ static void write_i2c_default_retimer_setting( |
4685 |
+ offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n", |
4686 |
+ slave_address, buffer[0], buffer[1], i2c_success?1:0); |
4687 |
+ if (!i2c_success) |
4688 |
+- /* Write failure */ |
4689 |
+- ASSERT(i2c_success); |
4690 |
++ goto i2c_write_fail; |
4691 |
+ |
4692 |
+ /* Write offset 0x0C to 0x1D or 0x91 */ |
4693 |
+ buffer[0] = 0x0C; |
4694 |
+@@ -1937,8 +1929,7 @@ static void write_i2c_default_retimer_setting( |
4695 |
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", |
4696 |
+ slave_address, buffer[0], buffer[1], i2c_success?1:0); |
4697 |
+ if (!i2c_success) |
4698 |
+- /* Write failure */ |
4699 |
+- ASSERT(i2c_success); |
4700 |
++ goto i2c_write_fail; |
4701 |
+ |
4702 |
+ /* Write offset 0x0A to 0x17 */ |
4703 |
+ buffer[0] = 0x0A; |
4704 |
+@@ -1949,8 +1940,7 @@ static void write_i2c_default_retimer_setting( |
4705 |
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", |
4706 |
+ slave_address, buffer[0], buffer[1], i2c_success?1:0); |
4707 |
+ if (!i2c_success) |
4708 |
+- /* Write failure */ |
4709 |
+- ASSERT(i2c_success); |
4710 |
++ goto i2c_write_fail; |
4711 |
+ |
4712 |
+ |
4713 |
+ if (is_vga_mode) { |
4714 |
+@@ -1965,8 +1955,7 @@ static void write_i2c_default_retimer_setting( |
4715 |
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", |
4716 |
+ slave_address, buffer[0], buffer[1], i2c_success?1:0); |
4717 |
+ if (!i2c_success) |
4718 |
+- /* Write failure */ |
4719 |
+- ASSERT(i2c_success); |
4720 |
++ goto i2c_write_fail; |
4721 |
+ |
4722 |
+ /* Write offset 0x00 to 0x23 */ |
4723 |
+ buffer[0] = 0x00; |
4724 |
+@@ -1977,8 +1966,7 @@ static void write_i2c_default_retimer_setting( |
4725 |
+ offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n", |
4726 |
+ slave_address, buffer[0], buffer[1], i2c_success?1:0); |
4727 |
+ if (!i2c_success) |
4728 |
+- /* Write failure */ |
4729 |
+- ASSERT(i2c_success); |
4730 |
++ goto i2c_write_fail; |
4731 |
+ |
4732 |
+ /* Write offset 0xff to 0x00 */ |
4733 |
+ buffer[0] = 0xff; |
4734 |
+@@ -1989,9 +1977,13 @@ static void write_i2c_default_retimer_setting( |
4735 |
+ offset = 0x%x, reg_val= 0x%x, i2c_success = %d end here\n", |
4736 |
+ slave_address, buffer[0], buffer[1], i2c_success?1:0); |
4737 |
+ if (!i2c_success) |
4738 |
+- /* Write failure */ |
4739 |
+- ASSERT(i2c_success); |
4740 |
++ goto i2c_write_fail; |
4741 |
+ } |
4742 |
++ |
4743 |
++ return; |
4744 |
++ |
4745 |
++i2c_write_fail: |
4746 |
++ DC_LOG_DEBUG("Set default retimer failed"); |
4747 |
+ } |
4748 |
+ |
4749 |
+ static void write_i2c_redriver_setting( |
4750 |
+@@ -2020,8 +2012,7 @@ static void write_i2c_redriver_setting( |
4751 |
+ slave_address, buffer[3], buffer[4], buffer[5], buffer[6], i2c_success?1:0); |
4752 |
+ |
4753 |
+ if (!i2c_success) |
4754 |
+- /* Write failure */ |
4755 |
+- ASSERT(i2c_success); |
4756 |
++ DC_LOG_DEBUG("Set redriver failed"); |
4757 |
+ } |
4758 |
+ |
4759 |
+ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx) |
4760 |
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c |
4761 |
+index 51991bf26a93c..4c90d68db2307 100644 |
4762 |
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c |
4763 |
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c |
4764 |
+@@ -126,22 +126,16 @@ struct aux_payloads { |
4765 |
+ struct vector payloads; |
4766 |
+ }; |
4767 |
+ |
4768 |
+-static struct i2c_payloads *dal_ddc_i2c_payloads_create(struct dc_context *ctx, uint32_t count) |
4769 |
++static bool dal_ddc_i2c_payloads_create( |
4770 |
++ struct dc_context *ctx, |
4771 |
++ struct i2c_payloads *payloads, |
4772 |
++ uint32_t count) |
4773 |
+ { |
4774 |
+- struct i2c_payloads *payloads; |
4775 |
+- |
4776 |
+- payloads = kzalloc(sizeof(struct i2c_payloads), GFP_KERNEL); |
4777 |
+- |
4778 |
+- if (!payloads) |
4779 |
+- return NULL; |
4780 |
+- |
4781 |
+ if (dal_vector_construct( |
4782 |
+ &payloads->payloads, ctx, count, sizeof(struct i2c_payload))) |
4783 |
+- return payloads; |
4784 |
+- |
4785 |
+- kfree(payloads); |
4786 |
+- return NULL; |
4787 |
++ return true; |
4788 |
+ |
4789 |
++ return false; |
4790 |
+ } |
4791 |
+ |
4792 |
+ static struct i2c_payload *dal_ddc_i2c_payloads_get(struct i2c_payloads *p) |
4793 |
+@@ -154,14 +148,12 @@ static uint32_t dal_ddc_i2c_payloads_get_count(struct i2c_payloads *p) |
4794 |
+ return p->payloads.count; |
4795 |
+ } |
4796 |
+ |
4797 |
+-static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads **p) |
4798 |
++static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads *p) |
4799 |
+ { |
4800 |
+- if (!p || !*p) |
4801 |
++ if (!p) |
4802 |
+ return; |
4803 |
+- dal_vector_destruct(&(*p)->payloads); |
4804 |
+- kfree(*p); |
4805 |
+- *p = NULL; |
4806 |
+ |
4807 |
++ dal_vector_destruct(&p->payloads); |
4808 |
+ } |
4809 |
+ |
4810 |
+ #define DDC_MIN(a, b) (((a) < (b)) ? (a) : (b)) |
4811 |
+@@ -521,9 +513,13 @@ bool dal_ddc_service_query_ddc_data( |
4812 |
+ |
4813 |
+ uint32_t payloads_num = write_payloads + read_payloads; |
4814 |
+ |
4815 |
++ |
4816 |
+ if (write_size > EDID_SEGMENT_SIZE || read_size > EDID_SEGMENT_SIZE) |
4817 |
+ return false; |
4818 |
+ |
4819 |
++ if (!payloads_num) |
4820 |
++ return false; |
4821 |
++ |
4822 |
+ /*TODO: len of payload data for i2c and aux is uint8!!!!, |
4823 |
+ * but we want to read 256 over i2c!!!!*/ |
4824 |
+ if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) { |
4825 |
+@@ -556,23 +552,25 @@ bool dal_ddc_service_query_ddc_data( |
4826 |
+ |
4827 |
+ ret = dc_link_aux_transfer_with_retries(ddc, &read_payload); |
4828 |
+ } else { |
4829 |
+- struct i2c_payloads *payloads = |
4830 |
+- dal_ddc_i2c_payloads_create(ddc->ctx, payloads_num); |
4831 |
++ struct i2c_command command = {0}; |
4832 |
++ struct i2c_payloads payloads; |
4833 |
++ |
4834 |
++ if (!dal_ddc_i2c_payloads_create(ddc->ctx, &payloads, payloads_num)) |
4835 |
++ return false; |
4836 |
+ |
4837 |
+- struct i2c_command command = { |
4838 |
+- .payloads = dal_ddc_i2c_payloads_get(payloads), |
4839 |
+- .number_of_payloads = 0, |
4840 |
+- .engine = DDC_I2C_COMMAND_ENGINE, |
4841 |
+- .speed = ddc->ctx->dc->caps.i2c_speed_in_khz }; |
4842 |
++ command.payloads = dal_ddc_i2c_payloads_get(&payloads); |
4843 |
++ command.number_of_payloads = 0; |
4844 |
++ command.engine = DDC_I2C_COMMAND_ENGINE; |
4845 |
++ command.speed = ddc->ctx->dc->caps.i2c_speed_in_khz; |
4846 |
+ |
4847 |
+ dal_ddc_i2c_payloads_add( |
4848 |
+- payloads, address, write_size, write_buf, true); |
4849 |
++ &payloads, address, write_size, write_buf, true); |
4850 |
+ |
4851 |
+ dal_ddc_i2c_payloads_add( |
4852 |
+- payloads, address, read_size, read_buf, false); |
4853 |
++ &payloads, address, read_size, read_buf, false); |
4854 |
+ |
4855 |
+ command.number_of_payloads = |
4856 |
+- dal_ddc_i2c_payloads_get_count(payloads); |
4857 |
++ dal_ddc_i2c_payloads_get_count(&payloads); |
4858 |
+ |
4859 |
+ ret = dm_helpers_submit_i2c( |
4860 |
+ ddc->ctx, |
4861 |
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c |
4862 |
+index a519dbc5ecb65..5641a9477d291 100644 |
4863 |
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c |
4864 |
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c |
4865 |
+@@ -400,6 +400,7 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) |
4866 |
+ dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; |
4867 |
+ dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; |
4868 |
+ dsc_cfg.color_depth = stream->timing.display_color_depth; |
4869 |
++ dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; |
4870 |
+ dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; |
4871 |
+ ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0); |
4872 |
+ dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; |
4873 |
+@@ -496,11 +497,15 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable) |
4874 |
+ struct dsc_config dsc_cfg; |
4875 |
+ uint8_t dsc_packed_pps[128]; |
4876 |
+ |
4877 |
++ memset(&dsc_cfg, 0, sizeof(dsc_cfg)); |
4878 |
++ memset(dsc_packed_pps, 0, 128); |
4879 |
++ |
4880 |
+ /* Enable DSC hw block */ |
4881 |
+ dsc_cfg.pic_width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; |
4882 |
+ dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; |
4883 |
+ dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; |
4884 |
+ dsc_cfg.color_depth = stream->timing.display_color_depth; |
4885 |
++ dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; |
4886 |
+ dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; |
4887 |
+ |
4888 |
+ DC_LOG_DSC(" "); |
4889 |
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c |
4890 |
+index 16476ed255363..2064366322755 100644 |
4891 |
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c |
4892 |
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c |
4893 |
+@@ -119,32 +119,6 @@ void dccg2_get_dccg_ref_freq(struct dccg *dccg, |
4894 |
+ |
4895 |
+ void dccg2_init(struct dccg *dccg) |
4896 |
+ { |
4897 |
+- struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); |
4898 |
+- |
4899 |
+- // Fallthrough intentional to program all available dpp_dto's |
4900 |
+- switch (dccg_dcn->base.ctx->dc->res_pool->pipe_count) { |
4901 |
+- case 6: |
4902 |
+- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[5], 1); |
4903 |
+- /* Fall through */ |
4904 |
+- case 5: |
4905 |
+- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[4], 1); |
4906 |
+- /* Fall through */ |
4907 |
+- case 4: |
4908 |
+- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[3], 1); |
4909 |
+- /* Fall through */ |
4910 |
+- case 3: |
4911 |
+- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[2], 1); |
4912 |
+- /* Fall through */ |
4913 |
+- case 2: |
4914 |
+- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[1], 1); |
4915 |
+- /* Fall through */ |
4916 |
+- case 1: |
4917 |
+- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[0], 1); |
4918 |
+- break; |
4919 |
+- default: |
4920 |
+- ASSERT(false); |
4921 |
+- break; |
4922 |
+- } |
4923 |
+ } |
4924 |
+ |
4925 |
+ static const struct dccg_funcs dccg2_funcs = { |
4926 |
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c |
4927 |
+index 1b419407af942..5c45c39662fbb 100644 |
4928 |
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c |
4929 |
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c |
4930 |
+@@ -207,6 +207,9 @@ static bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, const str |
4931 |
+ struct dsc_reg_values dsc_reg_vals; |
4932 |
+ struct dsc_optc_config dsc_optc_cfg; |
4933 |
+ |
4934 |
++ memset(&dsc_reg_vals, 0, sizeof(dsc_reg_vals)); |
4935 |
++ memset(&dsc_optc_cfg, 0, sizeof(dsc_optc_cfg)); |
4936 |
++ |
4937 |
+ DC_LOG_DSC("Getting packed DSC PPS for DSC Config:"); |
4938 |
+ dsc_config_log(dsc, dsc_cfg); |
4939 |
+ DC_LOG_DSC("DSC Picture Parameter Set (PPS):"); |
4940 |
+@@ -348,6 +351,7 @@ static bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_ |
4941 |
+ dsc_reg_vals->pps.block_pred_enable = dsc_cfg->dc_dsc_cfg.block_pred_enable; |
4942 |
+ dsc_reg_vals->pps.line_buf_depth = dsc_cfg->dc_dsc_cfg.linebuf_depth; |
4943 |
+ dsc_reg_vals->alternate_ich_encoding_en = dsc_reg_vals->pps.dsc_version_minor == 1 ? 0 : 1; |
4944 |
++ dsc_reg_vals->ich_reset_at_eol = (dsc_cfg->is_odm || dsc_reg_vals->num_slices_h > 1) ? 0xF : 0; |
4945 |
+ |
4946 |
+ // TODO: in addition to validating slice height (pic height must be divisible by slice height), |
4947 |
+ // see what happens when the same condition doesn't apply for slice_width/pic_width. |
4948 |
+@@ -510,7 +514,6 @@ static void dsc_update_from_dsc_parameters(struct dsc_reg_values *reg_vals, cons |
4949 |
+ reg_vals->pps.rc_buf_thresh[i] = reg_vals->pps.rc_buf_thresh[i] >> 6; |
4950 |
+ |
4951 |
+ reg_vals->rc_buffer_model_size = dsc_params->rc_buffer_model_size; |
4952 |
+- reg_vals->ich_reset_at_eol = reg_vals->num_slices_h == 1 ? 0 : 0xf; |
4953 |
+ } |
4954 |
+ |
4955 |
+ static void dsc_write_to_registers(struct display_stream_compressor *dsc, const struct dsc_reg_values *reg_vals) |
4956 |
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c |
4957 |
+index 05b98eadc2899..08062de3fbebd 100644 |
4958 |
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c |
4959 |
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c |
4960 |
+@@ -340,8 +340,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = { |
4961 |
+ }, |
4962 |
+ }, |
4963 |
+ .num_states = 5, |
4964 |
+- .sr_exit_time_us = 8.6, |
4965 |
+- .sr_enter_plus_exit_time_us = 10.9, |
4966 |
++ .sr_exit_time_us = 11.6, |
4967 |
++ .sr_enter_plus_exit_time_us = 13.9, |
4968 |
+ .urgent_latency_us = 4.0, |
4969 |
+ .urgent_latency_pixel_data_only_us = 4.0, |
4970 |
+ .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, |
4971 |
+@@ -2275,6 +2275,7 @@ static bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx) |
4972 |
+ + stream->timing.v_border_bottom; |
4973 |
+ dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; |
4974 |
+ dsc_cfg.color_depth = stream->timing.display_color_depth; |
4975 |
++ dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; |
4976 |
+ dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; |
4977 |
+ dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; |
4978 |
+ |
4979 |
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c |
4980 |
+index a00af513aa2b0..c8f77bd0ce8a6 100644 |
4981 |
+--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c |
4982 |
++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c |
4983 |
+@@ -73,32 +73,47 @@ void apply_DEDCN21_142_wa_for_hostvm_deadline( |
4984 |
+ struct _vcs_dpi_display_dlg_regs_st *dlg_attr) |
4985 |
+ { |
4986 |
+ struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp); |
4987 |
+- uint32_t cur_value; |
4988 |
++ uint32_t refcyc_per_vm_group_vblank; |
4989 |
++ uint32_t refcyc_per_vm_req_vblank; |
4990 |
++ uint32_t refcyc_per_vm_group_flip; |
4991 |
++ uint32_t refcyc_per_vm_req_flip; |
4992 |
++ const uint32_t uninitialized_hw_default = 0; |
4993 |
+ |
4994 |
+- REG_GET(VBLANK_PARAMETERS_5, REFCYC_PER_VM_GROUP_VBLANK, &cur_value); |
4995 |
+- if (cur_value > dlg_attr->refcyc_per_vm_group_vblank) |
4996 |
++ REG_GET(VBLANK_PARAMETERS_5, |
4997 |
++ REFCYC_PER_VM_GROUP_VBLANK, &refcyc_per_vm_group_vblank); |
4998 |
++ |
4999 |
++ if (refcyc_per_vm_group_vblank == uninitialized_hw_default || |
5000 |
++ refcyc_per_vm_group_vblank > dlg_attr->refcyc_per_vm_group_vblank) |
5001 |
+ REG_SET(VBLANK_PARAMETERS_5, 0, |
5002 |
+ REFCYC_PER_VM_GROUP_VBLANK, dlg_attr->refcyc_per_vm_group_vblank); |
5003 |
+ |
5004 |
+ REG_GET(VBLANK_PARAMETERS_6, |
5005 |
+- REFCYC_PER_VM_REQ_VBLANK, |
5006 |
+- &cur_value); |
5007 |
+- if (cur_value > dlg_attr->refcyc_per_vm_req_vblank) |
5008 |
++ REFCYC_PER_VM_REQ_VBLANK, &refcyc_per_vm_req_vblank); |
5009 |
++ |
5010 |
++ if (refcyc_per_vm_req_vblank == uninitialized_hw_default || |
5011 |
++ refcyc_per_vm_req_vblank > dlg_attr->refcyc_per_vm_req_vblank) |
5012 |
+ REG_SET(VBLANK_PARAMETERS_6, 0, |
5013 |
+ REFCYC_PER_VM_REQ_VBLANK, dlg_attr->refcyc_per_vm_req_vblank); |
5014 |
+ |
5015 |
+- REG_GET(FLIP_PARAMETERS_3, REFCYC_PER_VM_GROUP_FLIP, &cur_value); |
5016 |
+- if (cur_value > dlg_attr->refcyc_per_vm_group_flip) |
5017 |
++ REG_GET(FLIP_PARAMETERS_3, |
5018 |
++ REFCYC_PER_VM_GROUP_FLIP, &refcyc_per_vm_group_flip); |
5019 |
++ |
5020 |
++ if (refcyc_per_vm_group_flip == uninitialized_hw_default || |
5021 |
++ refcyc_per_vm_group_flip > dlg_attr->refcyc_per_vm_group_flip) |
5022 |
+ REG_SET(FLIP_PARAMETERS_3, 0, |
5023 |
+ REFCYC_PER_VM_GROUP_FLIP, dlg_attr->refcyc_per_vm_group_flip); |
5024 |
+ |
5025 |
+- REG_GET(FLIP_PARAMETERS_4, REFCYC_PER_VM_REQ_FLIP, &cur_value); |
5026 |
+- if (cur_value > dlg_attr->refcyc_per_vm_req_flip) |
5027 |
++ REG_GET(FLIP_PARAMETERS_4, |
5028 |
++ REFCYC_PER_VM_REQ_FLIP, &refcyc_per_vm_req_flip); |
5029 |
++ |
5030 |
++ if (refcyc_per_vm_req_flip == uninitialized_hw_default || |
5031 |
++ refcyc_per_vm_req_flip > dlg_attr->refcyc_per_vm_req_flip) |
5032 |
+ REG_SET(FLIP_PARAMETERS_4, 0, |
5033 |
+ REFCYC_PER_VM_REQ_FLIP, dlg_attr->refcyc_per_vm_req_flip); |
5034 |
+ |
5035 |
+ REG_SET(FLIP_PARAMETERS_5, 0, |
5036 |
+ REFCYC_PER_PTE_GROUP_FLIP_C, dlg_attr->refcyc_per_pte_group_flip_c); |
5037 |
++ |
5038 |
+ REG_SET(FLIP_PARAMETERS_6, 0, |
5039 |
+ REFCYC_PER_META_CHUNK_FLIP_C, dlg_attr->refcyc_per_meta_chunk_flip_c); |
5040 |
+ } |
5041 |
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h |
5042 |
+index 1ddb1c6fa1493..75ecfdc5d5cd2 100644 |
5043 |
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h |
5044 |
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h |
5045 |
+@@ -36,6 +36,7 @@ struct dsc_config { |
5046 |
+ uint32_t pic_height; |
5047 |
+ enum dc_pixel_encoding pixel_encoding; |
5048 |
+ enum dc_color_depth color_depth; /* Bits per component */ |
5049 |
++ bool is_odm; |
5050 |
+ struct dc_dsc_config dc_dsc_cfg; |
5051 |
+ }; |
5052 |
+ |
5053 |
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c |
5054 |
+index 3a2a1dc9a786a..1b55f037ba4a7 100644 |
5055 |
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c |
5056 |
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c |
5057 |
+@@ -3987,6 +3987,13 @@ static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) |
5058 |
+ "Failed to populate and upload SCLK MCLK DPM levels!", |
5059 |
+ result = tmp_result); |
5060 |
+ |
5061 |
++ /* |
5062 |
++ * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag. |
5063 |
++ * That effectively disables AVFS feature. |
5064 |
++ */ |
5065 |
++ if (hwmgr->hardcode_pp_table != NULL) |
5066 |
++ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; |
5067 |
++ |
5068 |
+ tmp_result = smu7_update_avfs(hwmgr); |
5069 |
+ PP_ASSERT_WITH_CODE((0 == tmp_result), |
5070 |
+ "Failed to update avfs voltages!", |
5071 |
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c |
5072 |
+index beacfffbdc3eb..ecbc9daea57e0 100644 |
5073 |
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c |
5074 |
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c |
5075 |
+@@ -3691,6 +3691,13 @@ static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr, |
5076 |
+ PP_ASSERT_WITH_CODE(!result, |
5077 |
+ "Failed to upload PPtable!", return result); |
5078 |
+ |
5079 |
++ /* |
5080 |
++ * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag. |
5081 |
++ * That effectively disables AVFS feature. |
5082 |
++ */ |
5083 |
++ if(hwmgr->hardcode_pp_table != NULL) |
5084 |
++ data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; |
5085 |
++ |
5086 |
+ vega10_update_avfs(hwmgr); |
5087 |
+ |
5088 |
+ /* |
5089 |
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c |
5090 |
+index 8ed94c9948008..b83acd696774b 100644 |
5091 |
+--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c |
5092 |
++++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c |
5093 |
+@@ -1741,10 +1741,6 @@ static int exynos_dsi_probe(struct platform_device *pdev) |
5094 |
+ dsi->dev = dev; |
5095 |
+ dsi->driver_data = of_device_get_match_data(dev); |
5096 |
+ |
5097 |
+- ret = exynos_dsi_parse_dt(dsi); |
5098 |
+- if (ret) |
5099 |
+- return ret; |
5100 |
+- |
5101 |
+ dsi->supplies[0].supply = "vddcore"; |
5102 |
+ dsi->supplies[1].supply = "vddio"; |
5103 |
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(dsi->supplies), |
5104 |
+@@ -1805,11 +1801,25 @@ static int exynos_dsi_probe(struct platform_device *pdev) |
5105 |
+ return ret; |
5106 |
+ } |
5107 |
+ |
5108 |
++ ret = exynos_dsi_parse_dt(dsi); |
5109 |
++ if (ret) |
5110 |
++ return ret; |
5111 |
++ |
5112 |
+ platform_set_drvdata(pdev, &dsi->encoder); |
5113 |
+ |
5114 |
+ pm_runtime_enable(dev); |
5115 |
+ |
5116 |
+- return component_add(dev, &exynos_dsi_component_ops); |
5117 |
++ ret = component_add(dev, &exynos_dsi_component_ops); |
5118 |
++ if (ret) |
5119 |
++ goto err_disable_runtime; |
5120 |
++ |
5121 |
++ return 0; |
5122 |
++ |
5123 |
++err_disable_runtime: |
5124 |
++ pm_runtime_disable(dev); |
5125 |
++ of_node_put(dsi->in_bridge_node); |
5126 |
++ |
5127 |
++ return ret; |
5128 |
+ } |
5129 |
+ |
5130 |
+ static int exynos_dsi_remove(struct platform_device *pdev) |
5131 |
+diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c |
5132 |
+index f56852a503e8d..8b784947ed3b9 100644 |
5133 |
+--- a/drivers/gpu/drm/gma500/cdv_intel_display.c |
5134 |
++++ b/drivers/gpu/drm/gma500/cdv_intel_display.c |
5135 |
+@@ -405,6 +405,8 @@ static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit, |
5136 |
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc); |
5137 |
+ struct gma_clock_t clock; |
5138 |
+ |
5139 |
++ memset(&clock, 0, sizeof(clock)); |
5140 |
++ |
5141 |
+ switch (refclk) { |
5142 |
+ case 27000: |
5143 |
+ if (target < 200000) { |
5144 |
+diff --git a/drivers/gpu/drm/mcde/mcde_display.c b/drivers/gpu/drm/mcde/mcde_display.c |
5145 |
+index 751454ae3cd10..28ed506285018 100644 |
5146 |
+--- a/drivers/gpu/drm/mcde/mcde_display.c |
5147 |
++++ b/drivers/gpu/drm/mcde/mcde_display.c |
5148 |
+@@ -946,6 +946,7 @@ static void mcde_display_disable(struct drm_simple_display_pipe *pipe) |
5149 |
+ struct drm_crtc *crtc = &pipe->crtc; |
5150 |
+ struct drm_device *drm = crtc->dev; |
5151 |
+ struct mcde *mcde = drm->dev_private; |
5152 |
++ struct drm_pending_vblank_event *event; |
5153 |
+ |
5154 |
+ if (mcde->te_sync) |
5155 |
+ drm_crtc_vblank_off(crtc); |
5156 |
+@@ -953,6 +954,15 @@ static void mcde_display_disable(struct drm_simple_display_pipe *pipe) |
5157 |
+ /* Disable FIFO A flow */ |
5158 |
+ mcde_disable_fifo(mcde, MCDE_FIFO_A, true); |
5159 |
+ |
5160 |
++ event = crtc->state->event; |
5161 |
++ if (event) { |
5162 |
++ crtc->state->event = NULL; |
5163 |
++ |
5164 |
++ spin_lock_irq(&crtc->dev->event_lock); |
5165 |
++ drm_crtc_send_vblank_event(crtc, event); |
5166 |
++ spin_unlock_irq(&crtc->dev->event_lock); |
5167 |
++ } |
5168 |
++ |
5169 |
+ dev_info(drm->dev, "MCDE display is disabled\n"); |
5170 |
+ } |
5171 |
+ |
5172 |
+diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c |
5173 |
+index 24b55103bfe00..c8fb21cc0d6ff 100644 |
5174 |
+--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c |
5175 |
++++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c |
5176 |
+@@ -1414,18 +1414,31 @@ static const struct adreno_gpu_funcs funcs = { |
5177 |
+ static void check_speed_bin(struct device *dev) |
5178 |
+ { |
5179 |
+ struct nvmem_cell *cell; |
5180 |
+- u32 bin, val; |
5181 |
++ u32 val; |
5182 |
++ |
5183 |
++ /* |
5184 |
++ * If the OPP table specifies a opp-supported-hw property then we have |
5185 |
++ * to set something with dev_pm_opp_set_supported_hw() or the table |
5186 |
++ * doesn't get populated so pick an arbitrary value that should |
5187 |
++ * ensure the default frequencies are selected but not conflict with any |
5188 |
++ * actual bins |
5189 |
++ */ |
5190 |
++ val = 0x80; |
5191 |
+ |
5192 |
+ cell = nvmem_cell_get(dev, "speed_bin"); |
5193 |
+ |
5194 |
+- /* If a nvmem cell isn't defined, nothing to do */ |
5195 |
+- if (IS_ERR(cell)) |
5196 |
+- return; |
5197 |
++ if (!IS_ERR(cell)) { |
5198 |
++ void *buf = nvmem_cell_read(cell, NULL); |
5199 |
++ |
5200 |
++ if (!IS_ERR(buf)) { |
5201 |
++ u8 bin = *((u8 *) buf); |
5202 |
+ |
5203 |
+- bin = *((u32 *) nvmem_cell_read(cell, NULL)); |
5204 |
+- nvmem_cell_put(cell); |
5205 |
++ val = (1 << bin); |
5206 |
++ kfree(buf); |
5207 |
++ } |
5208 |
+ |
5209 |
+- val = (1 << bin); |
5210 |
++ nvmem_cell_put(cell); |
5211 |
++ } |
5212 |
+ |
5213 |
+ dev_pm_opp_set_supported_hw(dev, &val, 1); |
5214 |
+ } |
5215 |
+diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c |
5216 |
+index 4558d66761b3c..108632a1f2438 100644 |
5217 |
+--- a/drivers/gpu/drm/msm/msm_drv.c |
5218 |
++++ b/drivers/gpu/drm/msm/msm_drv.c |
5219 |
+@@ -444,8 +444,10 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) |
5220 |
+ if (!dev->dma_parms) { |
5221 |
+ dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), |
5222 |
+ GFP_KERNEL); |
5223 |
+- if (!dev->dma_parms) |
5224 |
+- return -ENOMEM; |
5225 |
++ if (!dev->dma_parms) { |
5226 |
++ ret = -ENOMEM; |
5227 |
++ goto err_msm_uninit; |
5228 |
++ } |
5229 |
+ } |
5230 |
+ dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); |
5231 |
+ |
5232 |
+diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c |
5233 |
+index 419a02260bfa7..ee2b1e1199e09 100644 |
5234 |
+--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c |
5235 |
++++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c |
5236 |
+@@ -1032,8 +1032,10 @@ nv50_mstc_detect(struct drm_connector *connector, bool force) |
5237 |
+ return connector_status_disconnected; |
5238 |
+ |
5239 |
+ ret = pm_runtime_get_sync(connector->dev->dev); |
5240 |
+- if (ret < 0 && ret != -EACCES) |
5241 |
++ if (ret < 0 && ret != -EACCES) { |
5242 |
++ pm_runtime_put_autosuspend(connector->dev->dev); |
5243 |
+ return connector_status_disconnected; |
5244 |
++ } |
5245 |
+ |
5246 |
+ conn_status = drm_dp_mst_detect_port(connector, mstc->port->mgr, |
5247 |
+ mstc->port); |
5248 |
+diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c |
5249 |
+index 5c314f135dd10..3b13feca970f7 100644 |
5250 |
+--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c |
5251 |
++++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c |
5252 |
+@@ -183,8 +183,11 @@ nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf, |
5253 |
+ } |
5254 |
+ |
5255 |
+ ret = pm_runtime_get_sync(drm->dev); |
5256 |
+- if (ret < 0 && ret != -EACCES) |
5257 |
++ if (ret < 0 && ret != -EACCES) { |
5258 |
++ pm_runtime_put_autosuspend(drm->dev); |
5259 |
+ return ret; |
5260 |
++ } |
5261 |
++ |
5262 |
+ ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args)); |
5263 |
+ pm_runtime_put_autosuspend(drm->dev); |
5264 |
+ if (ret < 0) |
5265 |
+diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c |
5266 |
+index fbfe254227740..7d39d4949ee77 100644 |
5267 |
+--- a/drivers/gpu/drm/nouveau/nouveau_gem.c |
5268 |
++++ b/drivers/gpu/drm/nouveau/nouveau_gem.c |
5269 |
+@@ -78,8 +78,10 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) |
5270 |
+ return ret; |
5271 |
+ |
5272 |
+ ret = pm_runtime_get_sync(dev); |
5273 |
+- if (ret < 0 && ret != -EACCES) |
5274 |
++ if (ret < 0 && ret != -EACCES) { |
5275 |
++ pm_runtime_put_autosuspend(dev); |
5276 |
+ goto out; |
5277 |
++ } |
5278 |
+ |
5279 |
+ ret = nouveau_vma_new(nvbo, vmm, &vma); |
5280 |
+ pm_runtime_mark_last_busy(dev); |
5281 |
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c |
5282 |
+index 9b91da09dc5f8..8d9812a51ef63 100644 |
5283 |
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c |
5284 |
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c |
5285 |
+@@ -101,9 +101,13 @@ platform_init(struct nvkm_bios *bios, const char *name) |
5286 |
+ else |
5287 |
+ return ERR_PTR(-ENODEV); |
5288 |
+ |
5289 |
++ if (!pdev->rom || pdev->romlen == 0) |
5290 |
++ return ERR_PTR(-ENODEV); |
5291 |
++ |
5292 |
+ if ((priv = kmalloc(sizeof(*priv), GFP_KERNEL))) { |
5293 |
++ priv->size = pdev->romlen; |
5294 |
+ if (ret = -ENODEV, |
5295 |
+- (priv->rom = pci_platform_rom(pdev, &priv->size))) |
5296 |
++ (priv->rom = ioremap(pdev->rom, pdev->romlen))) |
5297 |
+ return priv; |
5298 |
+ kfree(priv); |
5299 |
+ } |
5300 |
+@@ -111,11 +115,20 @@ platform_init(struct nvkm_bios *bios, const char *name) |
5301 |
+ return ERR_PTR(ret); |
5302 |
+ } |
5303 |
+ |
5304 |
++static void |
5305 |
++platform_fini(void *data) |
5306 |
++{ |
5307 |
++ struct priv *priv = data; |
5308 |
++ |
5309 |
++ iounmap(priv->rom); |
5310 |
++ kfree(priv); |
5311 |
++} |
5312 |
++ |
5313 |
+ const struct nvbios_source |
5314 |
+ nvbios_platform = { |
5315 |
+ .name = "PLATFORM", |
5316 |
+ .init = platform_init, |
5317 |
+- .fini = (void(*)(void *))kfree, |
5318 |
++ .fini = platform_fini, |
5319 |
+ .read = pcirom_read, |
5320 |
+ .rw = true, |
5321 |
+ }; |
5322 |
+diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c |
5323 |
+index 4bdd63b571002..ac93dae2a9c84 100644 |
5324 |
+--- a/drivers/gpu/drm/omapdrm/dss/dss.c |
5325 |
++++ b/drivers/gpu/drm/omapdrm/dss/dss.c |
5326 |
+@@ -1151,46 +1151,38 @@ static const struct dss_features dra7xx_dss_feats = { |
5327 |
+ .has_lcd_clk_src = true, |
5328 |
+ }; |
5329 |
+ |
5330 |
+-static int dss_init_ports(struct dss_device *dss) |
5331 |
++static void __dss_uninit_ports(struct dss_device *dss, unsigned int num_ports) |
5332 |
+ { |
5333 |
+ struct platform_device *pdev = dss->pdev; |
5334 |
+ struct device_node *parent = pdev->dev.of_node; |
5335 |
+ struct device_node *port; |
5336 |
+ unsigned int i; |
5337 |
+- int r; |
5338 |
+ |
5339 |
+- for (i = 0; i < dss->feat->num_ports; i++) { |
5340 |
++ for (i = 0; i < num_ports; i++) { |
5341 |
+ port = of_graph_get_port_by_id(parent, i); |
5342 |
+ if (!port) |
5343 |
+ continue; |
5344 |
+ |
5345 |
+ switch (dss->feat->ports[i]) { |
5346 |
+ case OMAP_DISPLAY_TYPE_DPI: |
5347 |
+- r = dpi_init_port(dss, pdev, port, dss->feat->model); |
5348 |
+- if (r) |
5349 |
+- return r; |
5350 |
++ dpi_uninit_port(port); |
5351 |
+ break; |
5352 |
+- |
5353 |
+ case OMAP_DISPLAY_TYPE_SDI: |
5354 |
+- r = sdi_init_port(dss, pdev, port); |
5355 |
+- if (r) |
5356 |
+- return r; |
5357 |
++ sdi_uninit_port(port); |
5358 |
+ break; |
5359 |
+- |
5360 |
+ default: |
5361 |
+ break; |
5362 |
+ } |
5363 |
+ } |
5364 |
+- |
5365 |
+- return 0; |
5366 |
+ } |
5367 |
+ |
5368 |
+-static void dss_uninit_ports(struct dss_device *dss) |
5369 |
++static int dss_init_ports(struct dss_device *dss) |
5370 |
+ { |
5371 |
+ struct platform_device *pdev = dss->pdev; |
5372 |
+ struct device_node *parent = pdev->dev.of_node; |
5373 |
+ struct device_node *port; |
5374 |
+- int i; |
5375 |
++ unsigned int i; |
5376 |
++ int r; |
5377 |
+ |
5378 |
+ for (i = 0; i < dss->feat->num_ports; i++) { |
5379 |
+ port = of_graph_get_port_by_id(parent, i); |
5380 |
+@@ -1199,15 +1191,32 @@ static void dss_uninit_ports(struct dss_device *dss) |
5381 |
+ |
5382 |
+ switch (dss->feat->ports[i]) { |
5383 |
+ case OMAP_DISPLAY_TYPE_DPI: |
5384 |
+- dpi_uninit_port(port); |
5385 |
++ r = dpi_init_port(dss, pdev, port, dss->feat->model); |
5386 |
++ if (r) |
5387 |
++ goto error; |
5388 |
+ break; |
5389 |
++ |
5390 |
+ case OMAP_DISPLAY_TYPE_SDI: |
5391 |
+- sdi_uninit_port(port); |
5392 |
++ r = sdi_init_port(dss, pdev, port); |
5393 |
++ if (r) |
5394 |
++ goto error; |
5395 |
+ break; |
5396 |
++ |
5397 |
+ default: |
5398 |
+ break; |
5399 |
+ } |
5400 |
+ } |
5401 |
++ |
5402 |
++ return 0; |
5403 |
++ |
5404 |
++error: |
5405 |
++ __dss_uninit_ports(dss, i); |
5406 |
++ return r; |
5407 |
++} |
5408 |
++ |
5409 |
++static void dss_uninit_ports(struct dss_device *dss) |
5410 |
++{ |
5411 |
++ __dss_uninit_ports(dss, dss->feat->num_ports); |
5412 |
+ } |
5413 |
+ |
5414 |
+ static int dss_video_pll_probe(struct dss_device *dss) |
5415 |
+diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c |
5416 |
+index 31502857f013d..ce67891eedd46 100644 |
5417 |
+--- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c |
5418 |
++++ b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c |
5419 |
+@@ -192,7 +192,7 @@ static int __init omapdss_boot_init(void) |
5420 |
+ dss = of_find_matching_node(NULL, omapdss_of_match); |
5421 |
+ |
5422 |
+ if (dss == NULL || !of_device_is_available(dss)) |
5423 |
+- return 0; |
5424 |
++ goto put_node; |
5425 |
+ |
5426 |
+ omapdss_walk_device(dss, true); |
5427 |
+ |
5428 |
+@@ -217,6 +217,8 @@ static int __init omapdss_boot_init(void) |
5429 |
+ kfree(n); |
5430 |
+ } |
5431 |
+ |
5432 |
++put_node: |
5433 |
++ of_node_put(dss); |
5434 |
+ return 0; |
5435 |
+ } |
5436 |
+ |
5437 |
+diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c |
5438 |
+index 4d1490fbb0750..756a50e8aff20 100644 |
5439 |
+--- a/drivers/gpu/drm/radeon/radeon_bios.c |
5440 |
++++ b/drivers/gpu/drm/radeon/radeon_bios.c |
5441 |
+@@ -108,25 +108,33 @@ static bool radeon_read_bios(struct radeon_device *rdev) |
5442 |
+ |
5443 |
+ static bool radeon_read_platform_bios(struct radeon_device *rdev) |
5444 |
+ { |
5445 |
+- uint8_t __iomem *bios; |
5446 |
+- size_t size; |
5447 |
++ phys_addr_t rom = rdev->pdev->rom; |
5448 |
++ size_t romlen = rdev->pdev->romlen; |
5449 |
++ void __iomem *bios; |
5450 |
+ |
5451 |
+ rdev->bios = NULL; |
5452 |
+ |
5453 |
+- bios = pci_platform_rom(rdev->pdev, &size); |
5454 |
+- if (!bios) { |
5455 |
++ if (!rom || romlen == 0) |
5456 |
+ return false; |
5457 |
+- } |
5458 |
+ |
5459 |
+- if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { |
5460 |
++ rdev->bios = kzalloc(romlen, GFP_KERNEL); |
5461 |
++ if (!rdev->bios) |
5462 |
+ return false; |
5463 |
+- } |
5464 |
+- rdev->bios = kmemdup(bios, size, GFP_KERNEL); |
5465 |
+- if (rdev->bios == NULL) { |
5466 |
+- return false; |
5467 |
+- } |
5468 |
++ |
5469 |
++ bios = ioremap(rom, romlen); |
5470 |
++ if (!bios) |
5471 |
++ goto free_bios; |
5472 |
++ |
5473 |
++ memcpy_fromio(rdev->bios, bios, romlen); |
5474 |
++ iounmap(bios); |
5475 |
++ |
5476 |
++ if (rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) |
5477 |
++ goto free_bios; |
5478 |
+ |
5479 |
+ return true; |
5480 |
++free_bios: |
5481 |
++ kfree(rdev->bios); |
5482 |
++ return false; |
5483 |
+ } |
5484 |
+ |
5485 |
+ #ifdef CONFIG_ACPI |
5486 |
+diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c |
5487 |
+index 30c5ddd6d081c..134e9106ebac1 100644 |
5488 |
+--- a/drivers/gpu/drm/scheduler/sched_main.c |
5489 |
++++ b/drivers/gpu/drm/scheduler/sched_main.c |
5490 |
+@@ -284,10 +284,21 @@ static void drm_sched_job_timedout(struct work_struct *work) |
5491 |
+ unsigned long flags; |
5492 |
+ |
5493 |
+ sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work); |
5494 |
++ |
5495 |
++ /* Protects against concurrent deletion in drm_sched_get_cleanup_job */ |
5496 |
++ spin_lock_irqsave(&sched->job_list_lock, flags); |
5497 |
+ job = list_first_entry_or_null(&sched->ring_mirror_list, |
5498 |
+ struct drm_sched_job, node); |
5499 |
+ |
5500 |
+ if (job) { |
5501 |
++ /* |
5502 |
++ * Remove the bad job so it cannot be freed by concurrent |
5503 |
++ * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread |
5504 |
++ * is parked at which point it's safe. |
5505 |
++ */ |
5506 |
++ list_del_init(&job->node); |
5507 |
++ spin_unlock_irqrestore(&sched->job_list_lock, flags); |
5508 |
++ |
5509 |
+ job->sched->ops->timedout_job(job); |
5510 |
+ |
5511 |
+ /* |
5512 |
+@@ -298,6 +309,8 @@ static void drm_sched_job_timedout(struct work_struct *work) |
5513 |
+ job->sched->ops->free_job(job); |
5514 |
+ sched->free_guilty = false; |
5515 |
+ } |
5516 |
++ } else { |
5517 |
++ spin_unlock_irqrestore(&sched->job_list_lock, flags); |
5518 |
+ } |
5519 |
+ |
5520 |
+ spin_lock_irqsave(&sched->job_list_lock, flags); |
5521 |
+@@ -369,6 +382,20 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) |
5522 |
+ |
5523 |
+ kthread_park(sched->thread); |
5524 |
+ |
5525 |
++ /* |
5526 |
++ * Reinsert back the bad job here - now it's safe as |
5527 |
++ * drm_sched_get_cleanup_job cannot race against us and release the |
5528 |
++ * bad job at this point - we parked (waited for) any in progress |
5529 |
++ * (earlier) cleanups and drm_sched_get_cleanup_job will not be called |
5530 |
++ * now until the scheduler thread is unparked. |
5531 |
++ */ |
5532 |
++ if (bad && bad->sched == sched) |
5533 |
++ /* |
5534 |
++ * Add at the head of the queue to reflect it was the earliest |
5535 |
++ * job extracted. |
5536 |
++ */ |
5537 |
++ list_add(&bad->node, &sched->ring_mirror_list); |
5538 |
++ |
5539 |
+ /* |
5540 |
+ * Iterate the job list from later to earlier one and either deactive |
5541 |
+ * their HW callbacks or remove them from mirror list if they already |
5542 |
+diff --git a/drivers/gpu/drm/sun4i/sun8i_csc.h b/drivers/gpu/drm/sun4i/sun8i_csc.h |
5543 |
+index f42441b1b14dd..a55a38ad849c1 100644 |
5544 |
+--- a/drivers/gpu/drm/sun4i/sun8i_csc.h |
5545 |
++++ b/drivers/gpu/drm/sun4i/sun8i_csc.h |
5546 |
+@@ -12,7 +12,7 @@ struct sun8i_mixer; |
5547 |
+ |
5548 |
+ /* VI channel CSC units offsets */ |
5549 |
+ #define CCSC00_OFFSET 0xAA050 |
5550 |
+-#define CCSC01_OFFSET 0xFA000 |
5551 |
++#define CCSC01_OFFSET 0xFA050 |
5552 |
+ #define CCSC10_OFFSET 0xA0000 |
5553 |
+ #define CCSC11_OFFSET 0xF0000 |
5554 |
+ |
5555 |
+diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c |
5556 |
+index d5f5ba4105241..54435b72b7611 100644 |
5557 |
+--- a/drivers/gpu/drm/vc4/vc4_hdmi.c |
5558 |
++++ b/drivers/gpu/drm/vc4/vc4_hdmi.c |
5559 |
+@@ -1125,6 +1125,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *hdmi) |
5560 |
+ card->num_links = 1; |
5561 |
+ card->name = "vc4-hdmi"; |
5562 |
+ card->dev = dev; |
5563 |
++ card->owner = THIS_MODULE; |
5564 |
+ |
5565 |
+ /* |
5566 |
+ * Be careful, snd_soc_register_card() calls dev_set_drvdata() and |
5567 |
+diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c |
5568 |
+index a128b5063f46c..83dccdeef9069 100644 |
5569 |
+--- a/drivers/hwtracing/coresight/coresight-etm4x.c |
5570 |
++++ b/drivers/hwtracing/coresight/coresight-etm4x.c |
5571 |
+@@ -1184,6 +1184,7 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id) |
5572 |
+ return 0; |
5573 |
+ |
5574 |
+ err_arch_supported: |
5575 |
++ etmdrvdata[drvdata->cpu] = NULL; |
5576 |
+ if (--etm4_count == 0) { |
5577 |
+ cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING); |
5578 |
+ if (hp_online) |
5579 |
+diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h |
5580 |
+index 6f4f5486fe6dc..5fe694708b7a3 100644 |
5581 |
+--- a/drivers/hwtracing/intel_th/intel_th.h |
5582 |
++++ b/drivers/hwtracing/intel_th/intel_th.h |
5583 |
+@@ -47,11 +47,13 @@ struct intel_th_output { |
5584 |
+ /** |
5585 |
+ * struct intel_th_drvdata - describes hardware capabilities and quirks |
5586 |
+ * @tscu_enable: device needs SW to enable time stamping unit |
5587 |
++ * @multi_is_broken: device has multiblock mode is broken |
5588 |
+ * @has_mintctl: device has interrupt control (MINTCTL) register |
5589 |
+ * @host_mode_only: device can only operate in 'host debugger' mode |
5590 |
+ */ |
5591 |
+ struct intel_th_drvdata { |
5592 |
+ unsigned int tscu_enable : 1, |
5593 |
++ multi_is_broken : 1, |
5594 |
+ has_mintctl : 1, |
5595 |
+ host_mode_only : 1; |
5596 |
+ }; |
5597 |
+diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c |
5598 |
+index 255f8f41c8ff7..3cd2489d398c5 100644 |
5599 |
+--- a/drivers/hwtracing/intel_th/msu.c |
5600 |
++++ b/drivers/hwtracing/intel_th/msu.c |
5601 |
+@@ -157,7 +157,8 @@ struct msc { |
5602 |
+ /* config */ |
5603 |
+ unsigned int enabled : 1, |
5604 |
+ wrap : 1, |
5605 |
+- do_irq : 1; |
5606 |
++ do_irq : 1, |
5607 |
++ multi_is_broken : 1; |
5608 |
+ unsigned int mode; |
5609 |
+ unsigned int burst_len; |
5610 |
+ unsigned int index; |
5611 |
+@@ -1665,7 +1666,7 @@ static int intel_th_msc_init(struct msc *msc) |
5612 |
+ { |
5613 |
+ atomic_set(&msc->user_count, -1); |
5614 |
+ |
5615 |
+- msc->mode = MSC_MODE_MULTI; |
5616 |
++ msc->mode = msc->multi_is_broken ? MSC_MODE_SINGLE : MSC_MODE_MULTI; |
5617 |
+ mutex_init(&msc->buf_mutex); |
5618 |
+ INIT_LIST_HEAD(&msc->win_list); |
5619 |
+ INIT_LIST_HEAD(&msc->iter_list); |
5620 |
+@@ -1877,6 +1878,9 @@ mode_store(struct device *dev, struct device_attribute *attr, const char *buf, |
5621 |
+ return -EINVAL; |
5622 |
+ |
5623 |
+ found: |
5624 |
++ if (i == MSC_MODE_MULTI && msc->multi_is_broken) |
5625 |
++ return -EOPNOTSUPP; |
5626 |
++ |
5627 |
+ mutex_lock(&msc->buf_mutex); |
5628 |
+ ret = 0; |
5629 |
+ |
5630 |
+@@ -2083,6 +2087,9 @@ static int intel_th_msc_probe(struct intel_th_device *thdev) |
5631 |
+ if (!res) |
5632 |
+ msc->do_irq = 1; |
5633 |
+ |
5634 |
++ if (INTEL_TH_CAP(to_intel_th(thdev), multi_is_broken)) |
5635 |
++ msc->multi_is_broken = 1; |
5636 |
++ |
5637 |
+ msc->index = thdev->id; |
5638 |
+ |
5639 |
+ msc->thdev = thdev; |
5640 |
+diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c |
5641 |
+index 0d26484d67955..21fdf0b935166 100644 |
5642 |
+--- a/drivers/hwtracing/intel_th/pci.c |
5643 |
++++ b/drivers/hwtracing/intel_th/pci.c |
5644 |
+@@ -120,6 +120,10 @@ static void intel_th_pci_remove(struct pci_dev *pdev) |
5645 |
+ pci_free_irq_vectors(pdev); |
5646 |
+ } |
5647 |
+ |
5648 |
++static const struct intel_th_drvdata intel_th_1x_multi_is_broken = { |
5649 |
++ .multi_is_broken = 1, |
5650 |
++}; |
5651 |
++ |
5652 |
+ static const struct intel_th_drvdata intel_th_2x = { |
5653 |
+ .tscu_enable = 1, |
5654 |
+ .has_mintctl = 1, |
5655 |
+@@ -152,7 +156,7 @@ static const struct pci_device_id intel_th_pci_id_table[] = { |
5656 |
+ { |
5657 |
+ /* Kaby Lake PCH-H */ |
5658 |
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa2a6), |
5659 |
+- .driver_data = (kernel_ulong_t)0, |
5660 |
++ .driver_data = (kernel_ulong_t)&intel_th_1x_multi_is_broken, |
5661 |
+ }, |
5662 |
+ { |
5663 |
+ /* Denverton */ |
5664 |
+@@ -207,7 +211,7 @@ static const struct pci_device_id intel_th_pci_id_table[] = { |
5665 |
+ { |
5666 |
+ /* Comet Lake PCH-V */ |
5667 |
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa3a6), |
5668 |
+- .driver_data = (kernel_ulong_t)&intel_th_2x, |
5669 |
++ .driver_data = (kernel_ulong_t)&intel_th_1x_multi_is_broken, |
5670 |
+ }, |
5671 |
+ { |
5672 |
+ /* Ice Lake NNPI */ |
5673 |
+diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c |
5674 |
+index dad6e432de89f..bdcc3c9d0abe5 100644 |
5675 |
+--- a/drivers/i2c/busses/i2c-aspeed.c |
5676 |
++++ b/drivers/i2c/busses/i2c-aspeed.c |
5677 |
+@@ -69,6 +69,7 @@ |
5678 |
+ * These share bit definitions, so use the same values for the enable & |
5679 |
+ * status bits. |
5680 |
+ */ |
5681 |
++#define ASPEED_I2CD_INTR_RECV_MASK 0xf000ffff |
5682 |
+ #define ASPEED_I2CD_INTR_SDA_DL_TIMEOUT BIT(14) |
5683 |
+ #define ASPEED_I2CD_INTR_BUS_RECOVER_DONE BIT(13) |
5684 |
+ #define ASPEED_I2CD_INTR_SLAVE_MATCH BIT(7) |
5685 |
+@@ -604,6 +605,7 @@ static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id) |
5686 |
+ writel(irq_received & ~ASPEED_I2CD_INTR_RX_DONE, |
5687 |
+ bus->base + ASPEED_I2C_INTR_STS_REG); |
5688 |
+ readl(bus->base + ASPEED_I2C_INTR_STS_REG); |
5689 |
++ irq_received &= ASPEED_I2CD_INTR_RECV_MASK; |
5690 |
+ irq_remaining = irq_received; |
5691 |
+ |
5692 |
+ #if IS_ENABLED(CONFIG_I2C_SLAVE) |
5693 |
+diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c |
5694 |
+index 331f7cca9babe..db94e96aed77e 100644 |
5695 |
+--- a/drivers/i2c/busses/i2c-tegra.c |
5696 |
++++ b/drivers/i2c/busses/i2c-tegra.c |
5697 |
+@@ -16,6 +16,7 @@ |
5698 |
+ #include <linux/interrupt.h> |
5699 |
+ #include <linux/io.h> |
5700 |
+ #include <linux/iopoll.h> |
5701 |
++#include <linux/irq.h> |
5702 |
+ #include <linux/kernel.h> |
5703 |
+ #include <linux/module.h> |
5704 |
+ #include <linux/of_device.h> |
5705 |
+@@ -230,7 +231,6 @@ struct tegra_i2c_hw_feature { |
5706 |
+ * @base_phys: physical base address of the I2C controller |
5707 |
+ * @cont_id: I2C controller ID, used for packet header |
5708 |
+ * @irq: IRQ number of transfer complete interrupt |
5709 |
+- * @irq_disabled: used to track whether or not the interrupt is enabled |
5710 |
+ * @is_dvc: identifies the DVC I2C controller, has a different register layout |
5711 |
+ * @msg_complete: transfer completion notifier |
5712 |
+ * @msg_err: error code for completed message |
5713 |
+@@ -240,7 +240,6 @@ struct tegra_i2c_hw_feature { |
5714 |
+ * @bus_clk_rate: current I2C bus clock rate |
5715 |
+ * @clk_divisor_non_hs_mode: clock divider for non-high-speed modes |
5716 |
+ * @is_multimaster_mode: track if I2C controller is in multi-master mode |
5717 |
+- * @xfer_lock: lock to serialize transfer submission and processing |
5718 |
+ * @tx_dma_chan: DMA transmit channel |
5719 |
+ * @rx_dma_chan: DMA receive channel |
5720 |
+ * @dma_phys: handle to DMA resources |
5721 |
+@@ -260,7 +259,6 @@ struct tegra_i2c_dev { |
5722 |
+ phys_addr_t base_phys; |
5723 |
+ int cont_id; |
5724 |
+ int irq; |
5725 |
+- bool irq_disabled; |
5726 |
+ int is_dvc; |
5727 |
+ struct completion msg_complete; |
5728 |
+ int msg_err; |
5729 |
+@@ -270,8 +268,6 @@ struct tegra_i2c_dev { |
5730 |
+ u32 bus_clk_rate; |
5731 |
+ u16 clk_divisor_non_hs_mode; |
5732 |
+ bool is_multimaster_mode; |
5733 |
+- /* xfer_lock: lock to serialize transfer submission and processing */ |
5734 |
+- spinlock_t xfer_lock; |
5735 |
+ struct dma_chan *tx_dma_chan; |
5736 |
+ struct dma_chan *rx_dma_chan; |
5737 |
+ dma_addr_t dma_phys; |
5738 |
+@@ -790,11 +786,6 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev, bool clk_reinit) |
5739 |
+ if (err) |
5740 |
+ return err; |
5741 |
+ |
5742 |
+- if (i2c_dev->irq_disabled) { |
5743 |
+- i2c_dev->irq_disabled = false; |
5744 |
+- enable_irq(i2c_dev->irq); |
5745 |
+- } |
5746 |
+- |
5747 |
+ return 0; |
5748 |
+ } |
5749 |
+ |
5750 |
+@@ -825,18 +816,12 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id) |
5751 |
+ |
5752 |
+ status = i2c_readl(i2c_dev, I2C_INT_STATUS); |
5753 |
+ |
5754 |
+- spin_lock(&i2c_dev->xfer_lock); |
5755 |
+ if (status == 0) { |
5756 |
+ dev_warn(i2c_dev->dev, "irq status 0 %08x %08x %08x\n", |
5757 |
+ i2c_readl(i2c_dev, I2C_PACKET_TRANSFER_STATUS), |
5758 |
+ i2c_readl(i2c_dev, I2C_STATUS), |
5759 |
+ i2c_readl(i2c_dev, I2C_CNFG)); |
5760 |
+ i2c_dev->msg_err |= I2C_ERR_UNKNOWN_INTERRUPT; |
5761 |
+- |
5762 |
+- if (!i2c_dev->irq_disabled) { |
5763 |
+- disable_irq_nosync(i2c_dev->irq); |
5764 |
+- i2c_dev->irq_disabled = true; |
5765 |
+- } |
5766 |
+ goto err; |
5767 |
+ } |
5768 |
+ |
5769 |
+@@ -925,7 +910,6 @@ err: |
5770 |
+ |
5771 |
+ complete(&i2c_dev->msg_complete); |
5772 |
+ done: |
5773 |
+- spin_unlock(&i2c_dev->xfer_lock); |
5774 |
+ return IRQ_HANDLED; |
5775 |
+ } |
5776 |
+ |
5777 |
+@@ -999,6 +983,30 @@ out: |
5778 |
+ i2c_writel(i2c_dev, val, reg); |
5779 |
+ } |
5780 |
+ |
5781 |
++static unsigned long |
5782 |
++tegra_i2c_wait_completion_timeout(struct tegra_i2c_dev *i2c_dev, |
5783 |
++ struct completion *complete, |
5784 |
++ unsigned int timeout_ms) |
5785 |
++{ |
5786 |
++ unsigned long ret; |
5787 |
++ |
5788 |
++ enable_irq(i2c_dev->irq); |
5789 |
++ ret = wait_for_completion_timeout(complete, |
5790 |
++ msecs_to_jiffies(timeout_ms)); |
5791 |
++ disable_irq(i2c_dev->irq); |
5792 |
++ |
5793 |
++ /* |
5794 |
++ * There is a chance that completion may happen after IRQ |
5795 |
++ * synchronization, which is done by disable_irq(). |
5796 |
++ */ |
5797 |
++ if (ret == 0 && completion_done(complete)) { |
5798 |
++ dev_warn(i2c_dev->dev, "completion done after timeout\n"); |
5799 |
++ ret = 1; |
5800 |
++ } |
5801 |
++ |
5802 |
++ return ret; |
5803 |
++} |
5804 |
++ |
5805 |
+ static int tegra_i2c_issue_bus_clear(struct i2c_adapter *adap) |
5806 |
+ { |
5807 |
+ struct tegra_i2c_dev *i2c_dev = i2c_get_adapdata(adap); |
5808 |
+@@ -1020,8 +1028,8 @@ static int tegra_i2c_issue_bus_clear(struct i2c_adapter *adap) |
5809 |
+ i2c_writel(i2c_dev, reg, I2C_BUS_CLEAR_CNFG); |
5810 |
+ tegra_i2c_unmask_irq(i2c_dev, I2C_INT_BUS_CLR_DONE); |
5811 |
+ |
5812 |
+- time_left = wait_for_completion_timeout(&i2c_dev->msg_complete, |
5813 |
+- msecs_to_jiffies(50)); |
5814 |
++ time_left = tegra_i2c_wait_completion_timeout( |
5815 |
++ i2c_dev, &i2c_dev->msg_complete, 50); |
5816 |
+ if (time_left == 0) { |
5817 |
+ dev_err(i2c_dev->dev, "timed out for bus clear\n"); |
5818 |
+ return -ETIMEDOUT; |
5819 |
+@@ -1044,7 +1052,6 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, |
5820 |
+ u32 packet_header; |
5821 |
+ u32 int_mask; |
5822 |
+ unsigned long time_left; |
5823 |
+- unsigned long flags; |
5824 |
+ size_t xfer_size; |
5825 |
+ u32 *buffer = NULL; |
5826 |
+ int err = 0; |
5827 |
+@@ -1075,7 +1082,6 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, |
5828 |
+ */ |
5829 |
+ xfer_time += DIV_ROUND_CLOSEST(((xfer_size * 9) + 2) * MSEC_PER_SEC, |
5830 |
+ i2c_dev->bus_clk_rate); |
5831 |
+- spin_lock_irqsave(&i2c_dev->xfer_lock, flags); |
5832 |
+ |
5833 |
+ int_mask = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST; |
5834 |
+ tegra_i2c_unmask_irq(i2c_dev, int_mask); |
5835 |
+@@ -1090,7 +1096,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, |
5836 |
+ dev_err(i2c_dev->dev, |
5837 |
+ "starting RX DMA failed, err %d\n", |
5838 |
+ err); |
5839 |
+- goto unlock; |
5840 |
++ return err; |
5841 |
+ } |
5842 |
+ |
5843 |
+ } else { |
5844 |
+@@ -1149,7 +1155,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, |
5845 |
+ dev_err(i2c_dev->dev, |
5846 |
+ "starting TX DMA failed, err %d\n", |
5847 |
+ err); |
5848 |
+- goto unlock; |
5849 |
++ return err; |
5850 |
+ } |
5851 |
+ } else { |
5852 |
+ tegra_i2c_fill_tx_fifo(i2c_dev); |
5853 |
+@@ -1169,15 +1175,10 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, |
5854 |
+ dev_dbg(i2c_dev->dev, "unmasked irq: %02x\n", |
5855 |
+ i2c_readl(i2c_dev, I2C_INT_MASK)); |
5856 |
+ |
5857 |
+-unlock: |
5858 |
+- spin_unlock_irqrestore(&i2c_dev->xfer_lock, flags); |
5859 |
+- |
5860 |
+ if (dma) { |
5861 |
+- if (err) |
5862 |
+- return err; |
5863 |
++ time_left = tegra_i2c_wait_completion_timeout( |
5864 |
++ i2c_dev, &i2c_dev->dma_complete, xfer_time); |
5865 |
+ |
5866 |
+- time_left = wait_for_completion_timeout(&i2c_dev->dma_complete, |
5867 |
+- msecs_to_jiffies(xfer_time)); |
5868 |
+ if (time_left == 0) { |
5869 |
+ dev_err(i2c_dev->dev, "DMA transfer timeout\n"); |
5870 |
+ dmaengine_terminate_sync(i2c_dev->msg_read ? |
5871 |
+@@ -1202,13 +1203,13 @@ unlock: |
5872 |
+ i2c_dev->tx_dma_chan); |
5873 |
+ } |
5874 |
+ |
5875 |
+- time_left = wait_for_completion_timeout(&i2c_dev->msg_complete, |
5876 |
+- msecs_to_jiffies(xfer_time)); |
5877 |
++ time_left = tegra_i2c_wait_completion_timeout( |
5878 |
++ i2c_dev, &i2c_dev->msg_complete, xfer_time); |
5879 |
++ |
5880 |
+ tegra_i2c_mask_irq(i2c_dev, int_mask); |
5881 |
+ |
5882 |
+ if (time_left == 0) { |
5883 |
+ dev_err(i2c_dev->dev, "i2c transfer timed out\n"); |
5884 |
+- |
5885 |
+ tegra_i2c_init(i2c_dev, true); |
5886 |
+ return -ETIMEDOUT; |
5887 |
+ } |
5888 |
+@@ -1568,7 +1569,6 @@ static int tegra_i2c_probe(struct platform_device *pdev) |
5889 |
+ I2C_PACKET_HEADER_SIZE; |
5890 |
+ init_completion(&i2c_dev->msg_complete); |
5891 |
+ init_completion(&i2c_dev->dma_complete); |
5892 |
+- spin_lock_init(&i2c_dev->xfer_lock); |
5893 |
+ |
5894 |
+ if (!i2c_dev->hw->has_single_clk_source) { |
5895 |
+ fast_clk = devm_clk_get(&pdev->dev, "fast-clk"); |
5896 |
+@@ -1644,6 +1644,8 @@ static int tegra_i2c_probe(struct platform_device *pdev) |
5897 |
+ goto release_dma; |
5898 |
+ } |
5899 |
+ |
5900 |
++ irq_set_status_flags(i2c_dev->irq, IRQ_NOAUTOEN); |
5901 |
++ |
5902 |
+ ret = devm_request_irq(&pdev->dev, i2c_dev->irq, |
5903 |
+ tegra_i2c_isr, 0, dev_name(&pdev->dev), i2c_dev); |
5904 |
+ if (ret) { |
5905 |
+@@ -1719,10 +1721,14 @@ static int tegra_i2c_remove(struct platform_device *pdev) |
5906 |
+ static int __maybe_unused tegra_i2c_suspend(struct device *dev) |
5907 |
+ { |
5908 |
+ struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev); |
5909 |
++ int err = 0; |
5910 |
+ |
5911 |
+ i2c_mark_adapter_suspended(&i2c_dev->adapter); |
5912 |
+ |
5913 |
+- return 0; |
5914 |
++ if (!pm_runtime_status_suspended(dev)) |
5915 |
++ err = tegra_i2c_runtime_suspend(dev); |
5916 |
++ |
5917 |
++ return err; |
5918 |
+ } |
5919 |
+ |
5920 |
+ static int __maybe_unused tegra_i2c_resume(struct device *dev) |
5921 |
+@@ -1730,6 +1736,10 @@ static int __maybe_unused tegra_i2c_resume(struct device *dev) |
5922 |
+ struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev); |
5923 |
+ int err; |
5924 |
+ |
5925 |
++ /* |
5926 |
++ * We need to ensure that clocks are enabled so that registers can be |
5927 |
++ * restored in tegra_i2c_init(). |
5928 |
++ */ |
5929 |
+ err = tegra_i2c_runtime_resume(dev); |
5930 |
+ if (err) |
5931 |
+ return err; |
5932 |
+@@ -1738,9 +1748,16 @@ static int __maybe_unused tegra_i2c_resume(struct device *dev) |
5933 |
+ if (err) |
5934 |
+ return err; |
5935 |
+ |
5936 |
+- err = tegra_i2c_runtime_suspend(dev); |
5937 |
+- if (err) |
5938 |
+- return err; |
5939 |
++ /* |
5940 |
++ * In case we are runtime suspended, disable clocks again so that we |
5941 |
++ * don't unbalance the clock reference counts during the next runtime |
5942 |
++ * resume transition. |
5943 |
++ */ |
5944 |
++ if (pm_runtime_status_suspended(dev)) { |
5945 |
++ err = tegra_i2c_runtime_suspend(dev); |
5946 |
++ if (err) |
5947 |
++ return err; |
5948 |
++ } |
5949 |
+ |
5950 |
+ i2c_mark_adapter_resumed(&i2c_dev->adapter); |
5951 |
+ |
5952 |
+diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c |
5953 |
+index def62d5b42ca7..2dfe2ffcf8825 100644 |
5954 |
+--- a/drivers/i2c/i2c-core-base.c |
5955 |
++++ b/drivers/i2c/i2c-core-base.c |
5956 |
+@@ -1385,8 +1385,8 @@ static int i2c_register_adapter(struct i2c_adapter *adap) |
5957 |
+ |
5958 |
+ /* create pre-declared device nodes */ |
5959 |
+ of_i2c_register_devices(adap); |
5960 |
+- i2c_acpi_register_devices(adap); |
5961 |
+ i2c_acpi_install_space_handler(adap); |
5962 |
++ i2c_acpi_register_devices(adap); |
5963 |
+ |
5964 |
+ if (adap->nr < __i2c_first_dynamic_bus_num) |
5965 |
+ i2c_scan_static_board_info(adap); |
5966 |
+diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c |
5967 |
+index 09af96ec41dd6..c1d6a068f50fe 100644 |
5968 |
+--- a/drivers/infiniband/core/cm.c |
5969 |
++++ b/drivers/infiniband/core/cm.c |
5970 |
+@@ -1092,14 +1092,22 @@ retest: |
5971 |
+ break; |
5972 |
+ } |
5973 |
+ |
5974 |
+- spin_lock_irq(&cm.lock); |
5975 |
++ spin_lock_irq(&cm_id_priv->lock); |
5976 |
++ spin_lock(&cm.lock); |
5977 |
++ /* Required for cleanup paths related cm_req_handler() */ |
5978 |
++ if (cm_id_priv->timewait_info) { |
5979 |
++ cm_cleanup_timewait(cm_id_priv->timewait_info); |
5980 |
++ kfree(cm_id_priv->timewait_info); |
5981 |
++ cm_id_priv->timewait_info = NULL; |
5982 |
++ } |
5983 |
+ if (!list_empty(&cm_id_priv->altr_list) && |
5984 |
+ (!cm_id_priv->altr_send_port_not_ready)) |
5985 |
+ list_del(&cm_id_priv->altr_list); |
5986 |
+ if (!list_empty(&cm_id_priv->prim_list) && |
5987 |
+ (!cm_id_priv->prim_send_port_not_ready)) |
5988 |
+ list_del(&cm_id_priv->prim_list); |
5989 |
+- spin_unlock_irq(&cm.lock); |
5990 |
++ spin_unlock(&cm.lock); |
5991 |
++ spin_unlock_irq(&cm_id_priv->lock); |
5992 |
+ |
5993 |
+ cm_free_id(cm_id->local_id); |
5994 |
+ cm_deref_id(cm_id_priv); |
5995 |
+@@ -1416,7 +1424,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, |
5996 |
+ /* Verify that we're not in timewait. */ |
5997 |
+ cm_id_priv = container_of(cm_id, struct cm_id_private, id); |
5998 |
+ spin_lock_irqsave(&cm_id_priv->lock, flags); |
5999 |
+- if (cm_id->state != IB_CM_IDLE) { |
6000 |
++ if (cm_id->state != IB_CM_IDLE || WARN_ON(cm_id_priv->timewait_info)) { |
6001 |
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags); |
6002 |
+ ret = -EINVAL; |
6003 |
+ goto out; |
6004 |
+@@ -1434,12 +1442,12 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, |
6005 |
+ param->ppath_sgid_attr, &cm_id_priv->av, |
6006 |
+ cm_id_priv); |
6007 |
+ if (ret) |
6008 |
+- goto error1; |
6009 |
++ goto out; |
6010 |
+ if (param->alternate_path) { |
6011 |
+ ret = cm_init_av_by_path(param->alternate_path, NULL, |
6012 |
+ &cm_id_priv->alt_av, cm_id_priv); |
6013 |
+ if (ret) |
6014 |
+- goto error1; |
6015 |
++ goto out; |
6016 |
+ } |
6017 |
+ cm_id->service_id = param->service_id; |
6018 |
+ cm_id->service_mask = ~cpu_to_be64(0); |
6019 |
+@@ -1457,7 +1465,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, |
6020 |
+ |
6021 |
+ ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg); |
6022 |
+ if (ret) |
6023 |
+- goto error1; |
6024 |
++ goto out; |
6025 |
+ |
6026 |
+ req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad; |
6027 |
+ cm_format_req(req_msg, cm_id_priv, param); |
6028 |
+@@ -1480,7 +1488,6 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, |
6029 |
+ return 0; |
6030 |
+ |
6031 |
+ error2: cm_free_msg(cm_id_priv->msg); |
6032 |
+-error1: kfree(cm_id_priv->timewait_info); |
6033 |
+ out: return ret; |
6034 |
+ } |
6035 |
+ EXPORT_SYMBOL(ib_send_cm_req); |
6036 |
+@@ -1965,7 +1972,7 @@ static int cm_req_handler(struct cm_work *work) |
6037 |
+ pr_debug("%s: local_id %d, no listen_cm_id_priv\n", __func__, |
6038 |
+ be32_to_cpu(cm_id->local_id)); |
6039 |
+ ret = -EINVAL; |
6040 |
+- goto free_timeinfo; |
6041 |
++ goto destroy; |
6042 |
+ } |
6043 |
+ |
6044 |
+ cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; |
6045 |
+@@ -2050,8 +2057,6 @@ static int cm_req_handler(struct cm_work *work) |
6046 |
+ rejected: |
6047 |
+ atomic_dec(&cm_id_priv->refcount); |
6048 |
+ cm_deref_id(listen_cm_id_priv); |
6049 |
+-free_timeinfo: |
6050 |
+- kfree(cm_id_priv->timewait_info); |
6051 |
+ destroy: |
6052 |
+ ib_destroy_cm_id(cm_id); |
6053 |
+ return ret; |
6054 |
+diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c |
6055 |
+index 6b4e7235d2f56..30e08bcc9afb5 100644 |
6056 |
+--- a/drivers/infiniband/hw/cxgb4/cm.c |
6057 |
++++ b/drivers/infiniband/hw/cxgb4/cm.c |
6058 |
+@@ -3382,7 +3382,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) |
6059 |
+ if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) { |
6060 |
+ err = pick_local_ipaddrs(dev, cm_id); |
6061 |
+ if (err) |
6062 |
+- goto fail2; |
6063 |
++ goto fail3; |
6064 |
+ } |
6065 |
+ |
6066 |
+ /* find a route */ |
6067 |
+@@ -3404,7 +3404,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) |
6068 |
+ if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) { |
6069 |
+ err = pick_local_ip6addrs(dev, cm_id); |
6070 |
+ if (err) |
6071 |
+- goto fail2; |
6072 |
++ goto fail3; |
6073 |
+ } |
6074 |
+ |
6075 |
+ /* find a route */ |
6076 |
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c |
6077 |
+index b1df93b69df44..fa7a5ff498c73 100644 |
6078 |
+--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c |
6079 |
++++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c |
6080 |
+@@ -2074,9 +2074,9 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev, |
6081 |
+ dst = i40iw_get_dst_ipv6(&src_addr, &dst_addr); |
6082 |
+ if (!dst || dst->error) { |
6083 |
+ if (dst) { |
6084 |
+- dst_release(dst); |
6085 |
+ i40iw_pr_err("ip6_route_output returned dst->error = %d\n", |
6086 |
+ dst->error); |
6087 |
++ dst_release(dst); |
6088 |
+ } |
6089 |
+ return rc; |
6090 |
+ } |
6091 |
+diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c |
6092 |
+index a7a926b7b5628..6dea49e11f5f0 100644 |
6093 |
+--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c |
6094 |
++++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c |
6095 |
+@@ -490,10 +490,10 @@ qedr_addr6_resolve(struct qedr_dev *dev, |
6096 |
+ |
6097 |
+ if ((!dst) || dst->error) { |
6098 |
+ if (dst) { |
6099 |
+- dst_release(dst); |
6100 |
+ DP_ERR(dev, |
6101 |
+ "ip6_route_output returned dst->error = %d\n", |
6102 |
+ dst->error); |
6103 |
++ dst_release(dst); |
6104 |
+ } |
6105 |
+ return -EINVAL; |
6106 |
+ } |
6107 |
+diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c |
6108 |
+index 70c4ea438664d..de5f3efe9fcb4 100644 |
6109 |
+--- a/drivers/infiniband/sw/rxe/rxe.c |
6110 |
++++ b/drivers/infiniband/sw/rxe/rxe.c |
6111 |
+@@ -118,6 +118,8 @@ static void rxe_init_device_param(struct rxe_dev *rxe) |
6112 |
+ rxe->attr.max_fast_reg_page_list_len = RXE_MAX_FMR_PAGE_LIST_LEN; |
6113 |
+ rxe->attr.max_pkeys = RXE_MAX_PKEYS; |
6114 |
+ rxe->attr.local_ca_ack_delay = RXE_LOCAL_CA_ACK_DELAY; |
6115 |
++ addrconf_addr_eui48((unsigned char *)&rxe->attr.sys_image_guid, |
6116 |
++ rxe->ndev->dev_addr); |
6117 |
+ |
6118 |
+ rxe->max_ucontext = RXE_MAX_UCONTEXT; |
6119 |
+ } |
6120 |
+diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c |
6121 |
+index e2c6d1cedf416..f85273883794b 100644 |
6122 |
+--- a/drivers/infiniband/sw/rxe/rxe_qp.c |
6123 |
++++ b/drivers/infiniband/sw/rxe/rxe_qp.c |
6124 |
+@@ -592,15 +592,16 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, |
6125 |
+ int err; |
6126 |
+ |
6127 |
+ if (mask & IB_QP_MAX_QP_RD_ATOMIC) { |
6128 |
+- int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic); |
6129 |
++ int max_rd_atomic = attr->max_rd_atomic ? |
6130 |
++ roundup_pow_of_two(attr->max_rd_atomic) : 0; |
6131 |
+ |
6132 |
+ qp->attr.max_rd_atomic = max_rd_atomic; |
6133 |
+ atomic_set(&qp->req.rd_atomic, max_rd_atomic); |
6134 |
+ } |
6135 |
+ |
6136 |
+ if (mask & IB_QP_MAX_DEST_RD_ATOMIC) { |
6137 |
+- int max_dest_rd_atomic = |
6138 |
+- __roundup_pow_of_two(attr->max_dest_rd_atomic); |
6139 |
++ int max_dest_rd_atomic = attr->max_dest_rd_atomic ? |
6140 |
++ roundup_pow_of_two(attr->max_dest_rd_atomic) : 0; |
6141 |
+ |
6142 |
+ qp->attr.max_dest_rd_atomic = max_dest_rd_atomic; |
6143 |
+ |
6144 |
+diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c |
6145 |
+index 2cc89a9b9e9bb..ea8e611397a3b 100644 |
6146 |
+--- a/drivers/infiniband/ulp/iser/iser_memory.c |
6147 |
++++ b/drivers/infiniband/ulp/iser/iser_memory.c |
6148 |
+@@ -292,12 +292,27 @@ void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task, |
6149 |
+ { |
6150 |
+ struct iser_device *device = iser_task->iser_conn->ib_conn.device; |
6151 |
+ struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir]; |
6152 |
++ struct iser_fr_desc *desc; |
6153 |
++ struct ib_mr_status mr_status; |
6154 |
+ |
6155 |
+- if (!reg->mem_h) |
6156 |
++ desc = reg->mem_h; |
6157 |
++ if (!desc) |
6158 |
+ return; |
6159 |
+ |
6160 |
+- device->reg_ops->reg_desc_put(&iser_task->iser_conn->ib_conn, |
6161 |
+- reg->mem_h); |
6162 |
++ /* |
6163 |
++ * The signature MR cannot be invalidated and reused without checking. |
6164 |
++ * libiscsi calls the check_protection transport handler only if |
6165 |
++ * SCSI-Response is received. And the signature MR is not checked if |
6166 |
++ * the task is completed for some other reason like a timeout or error |
6167 |
++ * handling. That's why we must check the signature MR here before |
6168 |
++ * putting it to the free pool. |
6169 |
++ */ |
6170 |
++ if (unlikely(desc->sig_protected)) { |
6171 |
++ desc->sig_protected = false; |
6172 |
++ ib_check_mr_status(desc->rsc.sig_mr, IB_MR_CHECK_SIG_STATUS, |
6173 |
++ &mr_status); |
6174 |
++ } |
6175 |
++ device->reg_ops->reg_desc_put(&iser_task->iser_conn->ib_conn, desc); |
6176 |
+ reg->mem_h = NULL; |
6177 |
+ } |
6178 |
+ |
6179 |
+diff --git a/drivers/leds/leds-mlxreg.c b/drivers/leds/leds-mlxreg.c |
6180 |
+index cabe379071a7c..82aea1cd0c125 100644 |
6181 |
+--- a/drivers/leds/leds-mlxreg.c |
6182 |
++++ b/drivers/leds/leds-mlxreg.c |
6183 |
+@@ -228,8 +228,8 @@ static int mlxreg_led_config(struct mlxreg_led_priv_data *priv) |
6184 |
+ brightness = LED_OFF; |
6185 |
+ led_data->base_color = MLXREG_LED_GREEN_SOLID; |
6186 |
+ } |
6187 |
+- sprintf(led_data->led_cdev_name, "%s:%s", "mlxreg", |
6188 |
+- data->label); |
6189 |
++ snprintf(led_data->led_cdev_name, sizeof(led_data->led_cdev_name), |
6190 |
++ "mlxreg:%s", data->label); |
6191 |
+ led_cdev->name = led_data->led_cdev_name; |
6192 |
+ led_cdev->brightness = brightness; |
6193 |
+ led_cdev->max_brightness = LED_ON; |
6194 |
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h |
6195 |
+index 217c838a1b405..859567ad3db4e 100644 |
6196 |
+--- a/drivers/md/bcache/bcache.h |
6197 |
++++ b/drivers/md/bcache/bcache.h |
6198 |
+@@ -585,6 +585,7 @@ struct cache_set { |
6199 |
+ */ |
6200 |
+ wait_queue_head_t btree_cache_wait; |
6201 |
+ struct task_struct *btree_cache_alloc_lock; |
6202 |
++ spinlock_t btree_cannibalize_lock; |
6203 |
+ |
6204 |
+ /* |
6205 |
+ * When we free a btree node, we increment the gen of the bucket the |
6206 |
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c |
6207 |
+index 46556bde032e2..8d06105fc9ff5 100644 |
6208 |
+--- a/drivers/md/bcache/btree.c |
6209 |
++++ b/drivers/md/bcache/btree.c |
6210 |
+@@ -886,15 +886,17 @@ out: |
6211 |
+ |
6212 |
+ static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op) |
6213 |
+ { |
6214 |
+- struct task_struct *old; |
6215 |
+- |
6216 |
+- old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current); |
6217 |
+- if (old && old != current) { |
6218 |
++ spin_lock(&c->btree_cannibalize_lock); |
6219 |
++ if (likely(c->btree_cache_alloc_lock == NULL)) { |
6220 |
++ c->btree_cache_alloc_lock = current; |
6221 |
++ } else if (c->btree_cache_alloc_lock != current) { |
6222 |
+ if (op) |
6223 |
+ prepare_to_wait(&c->btree_cache_wait, &op->wait, |
6224 |
+ TASK_UNINTERRUPTIBLE); |
6225 |
++ spin_unlock(&c->btree_cannibalize_lock); |
6226 |
+ return -EINTR; |
6227 |
+ } |
6228 |
++ spin_unlock(&c->btree_cannibalize_lock); |
6229 |
+ |
6230 |
+ return 0; |
6231 |
+ } |
6232 |
+@@ -929,10 +931,12 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op, |
6233 |
+ */ |
6234 |
+ static void bch_cannibalize_unlock(struct cache_set *c) |
6235 |
+ { |
6236 |
++ spin_lock(&c->btree_cannibalize_lock); |
6237 |
+ if (c->btree_cache_alloc_lock == current) { |
6238 |
+ c->btree_cache_alloc_lock = NULL; |
6239 |
+ wake_up(&c->btree_cache_wait); |
6240 |
+ } |
6241 |
++ spin_unlock(&c->btree_cannibalize_lock); |
6242 |
+ } |
6243 |
+ |
6244 |
+ static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op, |
6245 |
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c |
6246 |
+index 2cbfcd99b7ee7..63f5ce18311bb 100644 |
6247 |
+--- a/drivers/md/bcache/super.c |
6248 |
++++ b/drivers/md/bcache/super.c |
6249 |
+@@ -1798,6 +1798,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) |
6250 |
+ sema_init(&c->sb_write_mutex, 1); |
6251 |
+ mutex_init(&c->bucket_lock); |
6252 |
+ init_waitqueue_head(&c->btree_cache_wait); |
6253 |
++ spin_lock_init(&c->btree_cannibalize_lock); |
6254 |
+ init_waitqueue_head(&c->bucket_wait); |
6255 |
+ init_waitqueue_head(&c->gc_wait); |
6256 |
+ sema_init(&c->uuid_write_mutex, 1); |
6257 |
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c |
6258 |
+index edc3660759131..13ad791126618 100644 |
6259 |
+--- a/drivers/md/dm-table.c |
6260 |
++++ b/drivers/md/dm-table.c |
6261 |
+@@ -922,21 +922,15 @@ bool dm_table_supports_dax(struct dm_table *t, |
6262 |
+ |
6263 |
+ static bool dm_table_does_not_support_partial_completion(struct dm_table *t); |
6264 |
+ |
6265 |
+-struct verify_rq_based_data { |
6266 |
+- unsigned sq_count; |
6267 |
+- unsigned mq_count; |
6268 |
+-}; |
6269 |
+- |
6270 |
+-static int device_is_rq_based(struct dm_target *ti, struct dm_dev *dev, |
6271 |
+- sector_t start, sector_t len, void *data) |
6272 |
++static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev, |
6273 |
++ sector_t start, sector_t len, void *data) |
6274 |
+ { |
6275 |
+- struct request_queue *q = bdev_get_queue(dev->bdev); |
6276 |
+- struct verify_rq_based_data *v = data; |
6277 |
++ struct block_device *bdev = dev->bdev; |
6278 |
++ struct request_queue *q = bdev_get_queue(bdev); |
6279 |
+ |
6280 |
+- if (queue_is_mq(q)) |
6281 |
+- v->mq_count++; |
6282 |
+- else |
6283 |
+- v->sq_count++; |
6284 |
++ /* request-based cannot stack on partitions! */ |
6285 |
++ if (bdev != bdev->bd_contains) |
6286 |
++ return false; |
6287 |
+ |
6288 |
+ return queue_is_mq(q); |
6289 |
+ } |
6290 |
+@@ -945,7 +939,6 @@ static int dm_table_determine_type(struct dm_table *t) |
6291 |
+ { |
6292 |
+ unsigned i; |
6293 |
+ unsigned bio_based = 0, request_based = 0, hybrid = 0; |
6294 |
+- struct verify_rq_based_data v = {.sq_count = 0, .mq_count = 0}; |
6295 |
+ struct dm_target *tgt; |
6296 |
+ struct list_head *devices = dm_table_get_devices(t); |
6297 |
+ enum dm_queue_mode live_md_type = dm_get_md_type(t->md); |
6298 |
+@@ -1049,14 +1042,10 @@ verify_rq_based: |
6299 |
+ |
6300 |
+ /* Non-request-stackable devices can't be used for request-based dm */ |
6301 |
+ if (!tgt->type->iterate_devices || |
6302 |
+- !tgt->type->iterate_devices(tgt, device_is_rq_based, &v)) { |
6303 |
++ !tgt->type->iterate_devices(tgt, device_is_rq_stackable, NULL)) { |
6304 |
+ DMERR("table load rejected: including non-request-stackable devices"); |
6305 |
+ return -EINVAL; |
6306 |
+ } |
6307 |
+- if (v.sq_count > 0) { |
6308 |
+- DMERR("table load rejected: not all devices are blk-mq request-stackable"); |
6309 |
+- return -EINVAL; |
6310 |
+- } |
6311 |
+ |
6312 |
+ return 0; |
6313 |
+ } |
6314 |
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c |
6315 |
+index 63d59e2ed1582..667db23f10ee1 100644 |
6316 |
+--- a/drivers/md/dm.c |
6317 |
++++ b/drivers/md/dm.c |
6318 |
+@@ -1720,23 +1720,6 @@ out: |
6319 |
+ return ret; |
6320 |
+ } |
6321 |
+ |
6322 |
+-static void dm_queue_split(struct mapped_device *md, struct dm_target *ti, struct bio **bio) |
6323 |
+-{ |
6324 |
+- unsigned len, sector_count; |
6325 |
+- |
6326 |
+- sector_count = bio_sectors(*bio); |
6327 |
+- len = min_t(sector_t, max_io_len((*bio)->bi_iter.bi_sector, ti), sector_count); |
6328 |
+- |
6329 |
+- if (sector_count > len) { |
6330 |
+- struct bio *split = bio_split(*bio, len, GFP_NOIO, &md->queue->bio_split); |
6331 |
+- |
6332 |
+- bio_chain(split, *bio); |
6333 |
+- trace_block_split(md->queue, split, (*bio)->bi_iter.bi_sector); |
6334 |
+- generic_make_request(*bio); |
6335 |
+- *bio = split; |
6336 |
+- } |
6337 |
+-} |
6338 |
+- |
6339 |
+ static blk_qc_t dm_process_bio(struct mapped_device *md, |
6340 |
+ struct dm_table *map, struct bio *bio) |
6341 |
+ { |
6342 |
+@@ -1764,14 +1747,12 @@ static blk_qc_t dm_process_bio(struct mapped_device *md, |
6343 |
+ if (current->bio_list) { |
6344 |
+ if (is_abnormal_io(bio)) |
6345 |
+ blk_queue_split(md->queue, &bio); |
6346 |
+- else |
6347 |
+- dm_queue_split(md, ti, &bio); |
6348 |
++ /* regular IO is split by __split_and_process_bio */ |
6349 |
+ } |
6350 |
+ |
6351 |
+ if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED) |
6352 |
+ return __process_bio(md, map, bio, ti); |
6353 |
+- else |
6354 |
+- return __split_and_process_bio(md, map, bio); |
6355 |
++ return __split_and_process_bio(md, map, bio); |
6356 |
+ } |
6357 |
+ |
6358 |
+ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) |
6359 |
+diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c |
6360 |
+index 1953b00b3e487..685c0ac71819e 100644 |
6361 |
+--- a/drivers/media/dvb-frontends/tda10071.c |
6362 |
++++ b/drivers/media/dvb-frontends/tda10071.c |
6363 |
+@@ -470,10 +470,11 @@ static int tda10071_read_status(struct dvb_frontend *fe, enum fe_status *status) |
6364 |
+ goto error; |
6365 |
+ |
6366 |
+ if (dev->delivery_system == SYS_DVBS) { |
6367 |
+- dev->dvbv3_ber = buf[0] << 24 | buf[1] << 16 | |
6368 |
+- buf[2] << 8 | buf[3] << 0; |
6369 |
+- dev->post_bit_error += buf[0] << 24 | buf[1] << 16 | |
6370 |
+- buf[2] << 8 | buf[3] << 0; |
6371 |
++ u32 bit_error = buf[0] << 24 | buf[1] << 16 | |
6372 |
++ buf[2] << 8 | buf[3] << 0; |
6373 |
++ |
6374 |
++ dev->dvbv3_ber = bit_error; |
6375 |
++ dev->post_bit_error += bit_error; |
6376 |
+ c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; |
6377 |
+ c->post_bit_error.stat[0].uvalue = dev->post_bit_error; |
6378 |
+ dev->block_error += buf[4] << 8 | buf[5] << 0; |
6379 |
+diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c |
6380 |
+index 42805dfbffeb9..06edbe8749c64 100644 |
6381 |
+--- a/drivers/media/i2c/smiapp/smiapp-core.c |
6382 |
++++ b/drivers/media/i2c/smiapp/smiapp-core.c |
6383 |
+@@ -2327,11 +2327,12 @@ smiapp_sysfs_nvm_read(struct device *dev, struct device_attribute *attr, |
6384 |
+ if (rval < 0) { |
6385 |
+ if (rval != -EBUSY && rval != -EAGAIN) |
6386 |
+ pm_runtime_set_active(&client->dev); |
6387 |
+- pm_runtime_put(&client->dev); |
6388 |
++ pm_runtime_put_noidle(&client->dev); |
6389 |
+ return -ENODEV; |
6390 |
+ } |
6391 |
+ |
6392 |
+ if (smiapp_read_nvm(sensor, sensor->nvm)) { |
6393 |
++ pm_runtime_put(&client->dev); |
6394 |
+ dev_err(&client->dev, "nvm read failed\n"); |
6395 |
+ return -ENODEV; |
6396 |
+ } |
6397 |
+diff --git a/drivers/media/mc/mc-device.c b/drivers/media/mc/mc-device.c |
6398 |
+index e19df5165e78c..da80883511352 100644 |
6399 |
+--- a/drivers/media/mc/mc-device.c |
6400 |
++++ b/drivers/media/mc/mc-device.c |
6401 |
+@@ -575,6 +575,38 @@ static void media_device_release(struct media_devnode *devnode) |
6402 |
+ dev_dbg(devnode->parent, "Media device released\n"); |
6403 |
+ } |
6404 |
+ |
6405 |
++static void __media_device_unregister_entity(struct media_entity *entity) |
6406 |
++{ |
6407 |
++ struct media_device *mdev = entity->graph_obj.mdev; |
6408 |
++ struct media_link *link, *tmp; |
6409 |
++ struct media_interface *intf; |
6410 |
++ unsigned int i; |
6411 |
++ |
6412 |
++ ida_free(&mdev->entity_internal_idx, entity->internal_idx); |
6413 |
++ |
6414 |
++ /* Remove all interface links pointing to this entity */ |
6415 |
++ list_for_each_entry(intf, &mdev->interfaces, graph_obj.list) { |
6416 |
++ list_for_each_entry_safe(link, tmp, &intf->links, list) { |
6417 |
++ if (link->entity == entity) |
6418 |
++ __media_remove_intf_link(link); |
6419 |
++ } |
6420 |
++ } |
6421 |
++ |
6422 |
++ /* Remove all data links that belong to this entity */ |
6423 |
++ __media_entity_remove_links(entity); |
6424 |
++ |
6425 |
++ /* Remove all pads that belong to this entity */ |
6426 |
++ for (i = 0; i < entity->num_pads; i++) |
6427 |
++ media_gobj_destroy(&entity->pads[i].graph_obj); |
6428 |
++ |
6429 |
++ /* Remove the entity */ |
6430 |
++ media_gobj_destroy(&entity->graph_obj); |
6431 |
++ |
6432 |
++ /* invoke entity_notify callbacks to handle entity removal?? */ |
6433 |
++ |
6434 |
++ entity->graph_obj.mdev = NULL; |
6435 |
++} |
6436 |
++ |
6437 |
+ /** |
6438 |
+ * media_device_register_entity - Register an entity with a media device |
6439 |
+ * @mdev: The media device |
6440 |
+@@ -632,6 +664,7 @@ int __must_check media_device_register_entity(struct media_device *mdev, |
6441 |
+ */ |
6442 |
+ ret = media_graph_walk_init(&new, mdev); |
6443 |
+ if (ret) { |
6444 |
++ __media_device_unregister_entity(entity); |
6445 |
+ mutex_unlock(&mdev->graph_mutex); |
6446 |
+ return ret; |
6447 |
+ } |
6448 |
+@@ -644,38 +677,6 @@ int __must_check media_device_register_entity(struct media_device *mdev, |
6449 |
+ } |
6450 |
+ EXPORT_SYMBOL_GPL(media_device_register_entity); |
6451 |
+ |
6452 |
+-static void __media_device_unregister_entity(struct media_entity *entity) |
6453 |
+-{ |
6454 |
+- struct media_device *mdev = entity->graph_obj.mdev; |
6455 |
+- struct media_link *link, *tmp; |
6456 |
+- struct media_interface *intf; |
6457 |
+- unsigned int i; |
6458 |
+- |
6459 |
+- ida_free(&mdev->entity_internal_idx, entity->internal_idx); |
6460 |
+- |
6461 |
+- /* Remove all interface links pointing to this entity */ |
6462 |
+- list_for_each_entry(intf, &mdev->interfaces, graph_obj.list) { |
6463 |
+- list_for_each_entry_safe(link, tmp, &intf->links, list) { |
6464 |
+- if (link->entity == entity) |
6465 |
+- __media_remove_intf_link(link); |
6466 |
+- } |
6467 |
+- } |
6468 |
+- |
6469 |
+- /* Remove all data links that belong to this entity */ |
6470 |
+- __media_entity_remove_links(entity); |
6471 |
+- |
6472 |
+- /* Remove all pads that belong to this entity */ |
6473 |
+- for (i = 0; i < entity->num_pads; i++) |
6474 |
+- media_gobj_destroy(&entity->pads[i].graph_obj); |
6475 |
+- |
6476 |
+- /* Remove the entity */ |
6477 |
+- media_gobj_destroy(&entity->graph_obj); |
6478 |
+- |
6479 |
+- /* invoke entity_notify callbacks to handle entity removal?? */ |
6480 |
+- |
6481 |
+- entity->graph_obj.mdev = NULL; |
6482 |
+-} |
6483 |
+- |
6484 |
+ void media_device_unregister_entity(struct media_entity *entity) |
6485 |
+ { |
6486 |
+ struct media_device *mdev = entity->graph_obj.mdev; |
6487 |
+diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c |
6488 |
+index 59ae7a1e63bc2..05b80a66e80ed 100644 |
6489 |
+--- a/drivers/media/platform/qcom/venus/vdec.c |
6490 |
++++ b/drivers/media/platform/qcom/venus/vdec.c |
6491 |
+@@ -987,13 +987,14 @@ static int vdec_stop_capture(struct venus_inst *inst) |
6492 |
+ ret = hfi_session_flush(inst, HFI_FLUSH_OUTPUT); |
6493 |
+ vdec_cancel_dst_buffers(inst); |
6494 |
+ inst->codec_state = VENUS_DEC_STATE_CAPTURE_SETUP; |
6495 |
+- INIT_LIST_HEAD(&inst->registeredbufs); |
6496 |
+ venus_helper_free_dpb_bufs(inst); |
6497 |
+ break; |
6498 |
+ default: |
6499 |
+- return 0; |
6500 |
++ break; |
6501 |
+ } |
6502 |
+ |
6503 |
++ INIT_LIST_HEAD(&inst->registeredbufs); |
6504 |
++ |
6505 |
+ return ret; |
6506 |
+ } |
6507 |
+ |
6508 |
+diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c |
6509 |
+index 955a49b8e9c08..f06408009a9c2 100644 |
6510 |
+--- a/drivers/media/platform/ti-vpe/cal.c |
6511 |
++++ b/drivers/media/platform/ti-vpe/cal.c |
6512 |
+@@ -678,12 +678,13 @@ static void pix_proc_config(struct cal_ctx *ctx) |
6513 |
+ } |
6514 |
+ |
6515 |
+ static void cal_wr_dma_config(struct cal_ctx *ctx, |
6516 |
+- unsigned int width) |
6517 |
++ unsigned int width, unsigned int height) |
6518 |
+ { |
6519 |
+ u32 val; |
6520 |
+ |
6521 |
+ val = reg_read(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port)); |
6522 |
+ set_field(&val, ctx->csi2_port, CAL_WR_DMA_CTRL_CPORT_MASK); |
6523 |
++ set_field(&val, height, CAL_WR_DMA_CTRL_YSIZE_MASK); |
6524 |
+ set_field(&val, CAL_WR_DMA_CTRL_DTAG_PIX_DAT, |
6525 |
+ CAL_WR_DMA_CTRL_DTAG_MASK); |
6526 |
+ set_field(&val, CAL_WR_DMA_CTRL_MODE_CONST, |
6527 |
+@@ -1306,7 +1307,8 @@ static int cal_start_streaming(struct vb2_queue *vq, unsigned int count) |
6528 |
+ csi2_lane_config(ctx); |
6529 |
+ csi2_ctx_config(ctx); |
6530 |
+ pix_proc_config(ctx); |
6531 |
+- cal_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline); |
6532 |
++ cal_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline, |
6533 |
++ ctx->v_fmt.fmt.pix.height); |
6534 |
+ cal_wr_dma_addr(ctx, addr); |
6535 |
+ csi2_ppi_enable(ctx); |
6536 |
+ |
6537 |
+diff --git a/drivers/media/usb/go7007/go7007-usb.c b/drivers/media/usb/go7007/go7007-usb.c |
6538 |
+index ff2aa057c1fbc..f889c9d740cd1 100644 |
6539 |
+--- a/drivers/media/usb/go7007/go7007-usb.c |
6540 |
++++ b/drivers/media/usb/go7007/go7007-usb.c |
6541 |
+@@ -1044,6 +1044,7 @@ static int go7007_usb_probe(struct usb_interface *intf, |
6542 |
+ struct go7007_usb *usb; |
6543 |
+ const struct go7007_usb_board *board; |
6544 |
+ struct usb_device *usbdev = interface_to_usbdev(intf); |
6545 |
++ struct usb_host_endpoint *ep; |
6546 |
+ unsigned num_i2c_devs; |
6547 |
+ char *name; |
6548 |
+ int video_pipe, i, v_urb_len; |
6549 |
+@@ -1140,7 +1141,8 @@ static int go7007_usb_probe(struct usb_interface *intf, |
6550 |
+ if (usb->intr_urb->transfer_buffer == NULL) |
6551 |
+ goto allocfail; |
6552 |
+ |
6553 |
+- if (go->board_id == GO7007_BOARDID_SENSORAY_2250) |
6554 |
++ ep = usb->usbdev->ep_in[4]; |
6555 |
++ if (usb_endpoint_type(&ep->desc) == USB_ENDPOINT_XFER_BULK) |
6556 |
+ usb_fill_bulk_urb(usb->intr_urb, usb->usbdev, |
6557 |
+ usb_rcvbulkpipe(usb->usbdev, 4), |
6558 |
+ usb->intr_urb->transfer_buffer, 2*sizeof(u16), |
6559 |
+diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c |
6560 |
+index 23276a80e3b48..96d02b6f06fd8 100644 |
6561 |
+--- a/drivers/mfd/mfd-core.c |
6562 |
++++ b/drivers/mfd/mfd-core.c |
6563 |
+@@ -28,6 +28,11 @@ int mfd_cell_enable(struct platform_device *pdev) |
6564 |
+ const struct mfd_cell *cell = mfd_get_cell(pdev); |
6565 |
+ int err = 0; |
6566 |
+ |
6567 |
++ if (!cell->enable) { |
6568 |
++ dev_dbg(&pdev->dev, "No .enable() call-back registered\n"); |
6569 |
++ return 0; |
6570 |
++ } |
6571 |
++ |
6572 |
+ /* only call enable hook if the cell wasn't previously enabled */ |
6573 |
+ if (atomic_inc_return(cell->usage_count) == 1) |
6574 |
+ err = cell->enable(pdev); |
6575 |
+@@ -45,6 +50,11 @@ int mfd_cell_disable(struct platform_device *pdev) |
6576 |
+ const struct mfd_cell *cell = mfd_get_cell(pdev); |
6577 |
+ int err = 0; |
6578 |
+ |
6579 |
++ if (!cell->disable) { |
6580 |
++ dev_dbg(&pdev->dev, "No .disable() call-back registered\n"); |
6581 |
++ return 0; |
6582 |
++ } |
6583 |
++ |
6584 |
+ /* only disable if no other clients are using it */ |
6585 |
+ if (atomic_dec_return(cell->usage_count) == 0) |
6586 |
+ err = cell->disable(pdev); |
6587 |
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c |
6588 |
+index b7159e243323b..de14b5845f525 100644 |
6589 |
+--- a/drivers/mmc/core/mmc.c |
6590 |
++++ b/drivers/mmc/core/mmc.c |
6591 |
+@@ -297,7 +297,7 @@ static void mmc_manage_enhanced_area(struct mmc_card *card, u8 *ext_csd) |
6592 |
+ } |
6593 |
+ } |
6594 |
+ |
6595 |
+-static void mmc_part_add(struct mmc_card *card, unsigned int size, |
6596 |
++static void mmc_part_add(struct mmc_card *card, u64 size, |
6597 |
+ unsigned int part_cfg, char *name, int idx, bool ro, |
6598 |
+ int area_type) |
6599 |
+ { |
6600 |
+@@ -313,7 +313,7 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd) |
6601 |
+ { |
6602 |
+ int idx; |
6603 |
+ u8 hc_erase_grp_sz, hc_wp_grp_sz; |
6604 |
+- unsigned int part_size; |
6605 |
++ u64 part_size; |
6606 |
+ |
6607 |
+ /* |
6608 |
+ * General purpose partition feature support -- |
6609 |
+@@ -343,8 +343,7 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd) |
6610 |
+ (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] |
6611 |
+ << 8) + |
6612 |
+ ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3]; |
6613 |
+- part_size *= (size_t)(hc_erase_grp_sz * |
6614 |
+- hc_wp_grp_sz); |
6615 |
++ part_size *= (hc_erase_grp_sz * hc_wp_grp_sz); |
6616 |
+ mmc_part_add(card, part_size << 19, |
6617 |
+ EXT_CSD_PART_CONFIG_ACC_GP0 + idx, |
6618 |
+ "gp%d", idx, false, |
6619 |
+@@ -362,7 +361,7 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd) |
6620 |
+ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd) |
6621 |
+ { |
6622 |
+ int err = 0, idx; |
6623 |
+- unsigned int part_size; |
6624 |
++ u64 part_size; |
6625 |
+ struct device_node *np; |
6626 |
+ bool broken_hpi = false; |
6627 |
+ |
6628 |
+diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c |
6629 |
+index a4f2d8cdca120..c8b9ab40a1027 100644 |
6630 |
+--- a/drivers/mtd/chips/cfi_cmdset_0002.c |
6631 |
++++ b/drivers/mtd/chips/cfi_cmdset_0002.c |
6632 |
+@@ -794,7 +794,6 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) |
6633 |
+ kfree(mtd->eraseregions); |
6634 |
+ kfree(mtd); |
6635 |
+ kfree(cfi->cmdset_priv); |
6636 |
+- kfree(cfi->cfiq); |
6637 |
+ return NULL; |
6638 |
+ } |
6639 |
+ |
6640 |
+diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c |
6641 |
+index b9d5d55a5edb9..ef89947ee3191 100644 |
6642 |
+--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c |
6643 |
++++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c |
6644 |
+@@ -540,8 +540,10 @@ static int bch_set_geometry(struct gpmi_nand_data *this) |
6645 |
+ return ret; |
6646 |
+ |
6647 |
+ ret = pm_runtime_get_sync(this->dev); |
6648 |
+- if (ret < 0) |
6649 |
++ if (ret < 0) { |
6650 |
++ pm_runtime_put_autosuspend(this->dev); |
6651 |
+ return ret; |
6652 |
++ } |
6653 |
+ |
6654 |
+ /* |
6655 |
+ * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this |
6656 |
+diff --git a/drivers/mtd/nand/raw/omap_elm.c b/drivers/mtd/nand/raw/omap_elm.c |
6657 |
+index 5502ffbdd1e6d..6e0e31eab7cce 100644 |
6658 |
+--- a/drivers/mtd/nand/raw/omap_elm.c |
6659 |
++++ b/drivers/mtd/nand/raw/omap_elm.c |
6660 |
+@@ -411,6 +411,7 @@ static int elm_probe(struct platform_device *pdev) |
6661 |
+ pm_runtime_enable(&pdev->dev); |
6662 |
+ if (pm_runtime_get_sync(&pdev->dev) < 0) { |
6663 |
+ ret = -EINVAL; |
6664 |
++ pm_runtime_put_sync(&pdev->dev); |
6665 |
+ pm_runtime_disable(&pdev->dev); |
6666 |
+ dev_err(&pdev->dev, "can't enable clock\n"); |
6667 |
+ return ret; |
6668 |
+diff --git a/drivers/mtd/parsers/cmdlinepart.c b/drivers/mtd/parsers/cmdlinepart.c |
6669 |
+index c86f2db8c882d..0625b25620ca7 100644 |
6670 |
+--- a/drivers/mtd/parsers/cmdlinepart.c |
6671 |
++++ b/drivers/mtd/parsers/cmdlinepart.c |
6672 |
+@@ -218,12 +218,29 @@ static int mtdpart_setup_real(char *s) |
6673 |
+ struct cmdline_mtd_partition *this_mtd; |
6674 |
+ struct mtd_partition *parts; |
6675 |
+ int mtd_id_len, num_parts; |
6676 |
+- char *p, *mtd_id; |
6677 |
++ char *p, *mtd_id, *semicol; |
6678 |
++ |
6679 |
++ /* |
6680 |
++ * Replace the first ';' by a NULL char so strrchr can work |
6681 |
++ * properly. |
6682 |
++ */ |
6683 |
++ semicol = strchr(s, ';'); |
6684 |
++ if (semicol) |
6685 |
++ *semicol = '\0'; |
6686 |
+ |
6687 |
+ mtd_id = s; |
6688 |
+ |
6689 |
+- /* fetch <mtd-id> */ |
6690 |
+- p = strchr(s, ':'); |
6691 |
++ /* |
6692 |
++ * fetch <mtd-id>. We use strrchr to ignore all ':' that could |
6693 |
++ * be present in the MTD name, only the last one is interpreted |
6694 |
++ * as an <mtd-id>/<part-definition> separator. |
6695 |
++ */ |
6696 |
++ p = strrchr(s, ':'); |
6697 |
++ |
6698 |
++ /* Restore the ';' now. */ |
6699 |
++ if (semicol) |
6700 |
++ *semicol = ';'; |
6701 |
++ |
6702 |
+ if (!p) { |
6703 |
+ pr_err("no mtd-id\n"); |
6704 |
+ return -EINVAL; |
6705 |
+diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c |
6706 |
+index c44c8470247e1..b486250923c5a 100644 |
6707 |
+--- a/drivers/mtd/ubi/fastmap-wl.c |
6708 |
++++ b/drivers/mtd/ubi/fastmap-wl.c |
6709 |
+@@ -39,6 +39,13 @@ static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root) |
6710 |
+ return victim; |
6711 |
+ } |
6712 |
+ |
6713 |
++static inline void return_unused_peb(struct ubi_device *ubi, |
6714 |
++ struct ubi_wl_entry *e) |
6715 |
++{ |
6716 |
++ wl_tree_add(e, &ubi->free); |
6717 |
++ ubi->free_count++; |
6718 |
++} |
6719 |
++ |
6720 |
+ /** |
6721 |
+ * return_unused_pool_pebs - returns unused PEB to the free tree. |
6722 |
+ * @ubi: UBI device description object |
6723 |
+@@ -52,23 +59,10 @@ static void return_unused_pool_pebs(struct ubi_device *ubi, |
6724 |
+ |
6725 |
+ for (i = pool->used; i < pool->size; i++) { |
6726 |
+ e = ubi->lookuptbl[pool->pebs[i]]; |
6727 |
+- wl_tree_add(e, &ubi->free); |
6728 |
+- ubi->free_count++; |
6729 |
++ return_unused_peb(ubi, e); |
6730 |
+ } |
6731 |
+ } |
6732 |
+ |
6733 |
+-static int anchor_pebs_available(struct rb_root *root) |
6734 |
+-{ |
6735 |
+- struct rb_node *p; |
6736 |
+- struct ubi_wl_entry *e; |
6737 |
+- |
6738 |
+- ubi_rb_for_each_entry(p, e, root, u.rb) |
6739 |
+- if (e->pnum < UBI_FM_MAX_START) |
6740 |
+- return 1; |
6741 |
+- |
6742 |
+- return 0; |
6743 |
+-} |
6744 |
+- |
6745 |
+ /** |
6746 |
+ * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number. |
6747 |
+ * @ubi: UBI device description object |
6748 |
+@@ -277,8 +271,26 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi) |
6749 |
+ int ubi_ensure_anchor_pebs(struct ubi_device *ubi) |
6750 |
+ { |
6751 |
+ struct ubi_work *wrk; |
6752 |
++ struct ubi_wl_entry *anchor; |
6753 |
+ |
6754 |
+ spin_lock(&ubi->wl_lock); |
6755 |
++ |
6756 |
++ /* Do we already have an anchor? */ |
6757 |
++ if (ubi->fm_anchor) { |
6758 |
++ spin_unlock(&ubi->wl_lock); |
6759 |
++ return 0; |
6760 |
++ } |
6761 |
++ |
6762 |
++ /* See if we can find an anchor PEB on the list of free PEBs */ |
6763 |
++ anchor = ubi_wl_get_fm_peb(ubi, 1); |
6764 |
++ if (anchor) { |
6765 |
++ ubi->fm_anchor = anchor; |
6766 |
++ spin_unlock(&ubi->wl_lock); |
6767 |
++ return 0; |
6768 |
++ } |
6769 |
++ |
6770 |
++ /* No luck, trigger wear leveling to produce a new anchor PEB */ |
6771 |
++ ubi->fm_do_produce_anchor = 1; |
6772 |
+ if (ubi->wl_scheduled) { |
6773 |
+ spin_unlock(&ubi->wl_lock); |
6774 |
+ return 0; |
6775 |
+@@ -294,7 +306,6 @@ int ubi_ensure_anchor_pebs(struct ubi_device *ubi) |
6776 |
+ return -ENOMEM; |
6777 |
+ } |
6778 |
+ |
6779 |
+- wrk->anchor = 1; |
6780 |
+ wrk->func = &wear_leveling_worker; |
6781 |
+ __schedule_ubi_work(ubi, wrk); |
6782 |
+ return 0; |
6783 |
+@@ -356,6 +367,11 @@ static void ubi_fastmap_close(struct ubi_device *ubi) |
6784 |
+ return_unused_pool_pebs(ubi, &ubi->fm_pool); |
6785 |
+ return_unused_pool_pebs(ubi, &ubi->fm_wl_pool); |
6786 |
+ |
6787 |
++ if (ubi->fm_anchor) { |
6788 |
++ return_unused_peb(ubi, ubi->fm_anchor); |
6789 |
++ ubi->fm_anchor = NULL; |
6790 |
++ } |
6791 |
++ |
6792 |
+ if (ubi->fm) { |
6793 |
+ for (i = 0; i < ubi->fm->used_blocks; i++) |
6794 |
+ kfree(ubi->fm->e[i]); |
6795 |
+diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c |
6796 |
+index 604772fc4a965..53f448e7433a9 100644 |
6797 |
+--- a/drivers/mtd/ubi/fastmap.c |
6798 |
++++ b/drivers/mtd/ubi/fastmap.c |
6799 |
+@@ -1543,14 +1543,6 @@ int ubi_update_fastmap(struct ubi_device *ubi) |
6800 |
+ return 0; |
6801 |
+ } |
6802 |
+ |
6803 |
+- ret = ubi_ensure_anchor_pebs(ubi); |
6804 |
+- if (ret) { |
6805 |
+- up_write(&ubi->fm_eba_sem); |
6806 |
+- up_write(&ubi->work_sem); |
6807 |
+- up_write(&ubi->fm_protect); |
6808 |
+- return ret; |
6809 |
+- } |
6810 |
+- |
6811 |
+ new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL); |
6812 |
+ if (!new_fm) { |
6813 |
+ up_write(&ubi->fm_eba_sem); |
6814 |
+@@ -1621,7 +1613,8 @@ int ubi_update_fastmap(struct ubi_device *ubi) |
6815 |
+ } |
6816 |
+ |
6817 |
+ spin_lock(&ubi->wl_lock); |
6818 |
+- tmp_e = ubi_wl_get_fm_peb(ubi, 1); |
6819 |
++ tmp_e = ubi->fm_anchor; |
6820 |
++ ubi->fm_anchor = NULL; |
6821 |
+ spin_unlock(&ubi->wl_lock); |
6822 |
+ |
6823 |
+ if (old_fm) { |
6824 |
+@@ -1673,6 +1666,9 @@ out_unlock: |
6825 |
+ up_write(&ubi->work_sem); |
6826 |
+ up_write(&ubi->fm_protect); |
6827 |
+ kfree(old_fm); |
6828 |
++ |
6829 |
++ ubi_ensure_anchor_pebs(ubi); |
6830 |
++ |
6831 |
+ return ret; |
6832 |
+ |
6833 |
+ err: |
6834 |
+diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h |
6835 |
+index 721b6aa7936cf..a173eb707bddb 100644 |
6836 |
+--- a/drivers/mtd/ubi/ubi.h |
6837 |
++++ b/drivers/mtd/ubi/ubi.h |
6838 |
+@@ -491,6 +491,8 @@ struct ubi_debug_info { |
6839 |
+ * @fm_work: fastmap work queue |
6840 |
+ * @fm_work_scheduled: non-zero if fastmap work was scheduled |
6841 |
+ * @fast_attach: non-zero if UBI was attached by fastmap |
6842 |
++ * @fm_anchor: The next anchor PEB to use for fastmap |
6843 |
++ * @fm_do_produce_anchor: If true produce an anchor PEB in wl |
6844 |
+ * |
6845 |
+ * @used: RB-tree of used physical eraseblocks |
6846 |
+ * @erroneous: RB-tree of erroneous used physical eraseblocks |
6847 |
+@@ -599,6 +601,8 @@ struct ubi_device { |
6848 |
+ struct work_struct fm_work; |
6849 |
+ int fm_work_scheduled; |
6850 |
+ int fast_attach; |
6851 |
++ struct ubi_wl_entry *fm_anchor; |
6852 |
++ int fm_do_produce_anchor; |
6853 |
+ |
6854 |
+ /* Wear-leveling sub-system's stuff */ |
6855 |
+ struct rb_root used; |
6856 |
+@@ -789,7 +793,6 @@ struct ubi_attach_info { |
6857 |
+ * @vol_id: the volume ID on which this erasure is being performed |
6858 |
+ * @lnum: the logical eraseblock number |
6859 |
+ * @torture: if the physical eraseblock has to be tortured |
6860 |
+- * @anchor: produce a anchor PEB to by used by fastmap |
6861 |
+ * |
6862 |
+ * The @func pointer points to the worker function. If the @shutdown argument is |
6863 |
+ * not zero, the worker has to free the resources and exit immediately as the |
6864 |
+@@ -805,7 +808,6 @@ struct ubi_work { |
6865 |
+ int vol_id; |
6866 |
+ int lnum; |
6867 |
+ int torture; |
6868 |
+- int anchor; |
6869 |
+ }; |
6870 |
+ |
6871 |
+ #include "debug.h" |
6872 |
+diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c |
6873 |
+index 3fcdefe2714d0..5d77a38dba542 100644 |
6874 |
+--- a/drivers/mtd/ubi/wl.c |
6875 |
++++ b/drivers/mtd/ubi/wl.c |
6876 |
+@@ -339,13 +339,6 @@ static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi, |
6877 |
+ } |
6878 |
+ } |
6879 |
+ |
6880 |
+- /* If no fastmap has been written and this WL entry can be used |
6881 |
+- * as anchor PEB, hold it back and return the second best WL entry |
6882 |
+- * such that fastmap can use the anchor PEB later. */ |
6883 |
+- if (prev_e && !ubi->fm_disabled && |
6884 |
+- !ubi->fm && e->pnum < UBI_FM_MAX_START) |
6885 |
+- return prev_e; |
6886 |
+- |
6887 |
+ return e; |
6888 |
+ } |
6889 |
+ |
6890 |
+@@ -656,9 +649,6 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, |
6891 |
+ { |
6892 |
+ int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0; |
6893 |
+ int erase = 0, keep = 0, vol_id = -1, lnum = -1; |
6894 |
+-#ifdef CONFIG_MTD_UBI_FASTMAP |
6895 |
+- int anchor = wrk->anchor; |
6896 |
+-#endif |
6897 |
+ struct ubi_wl_entry *e1, *e2; |
6898 |
+ struct ubi_vid_io_buf *vidb; |
6899 |
+ struct ubi_vid_hdr *vid_hdr; |
6900 |
+@@ -698,11 +688,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, |
6901 |
+ } |
6902 |
+ |
6903 |
+ #ifdef CONFIG_MTD_UBI_FASTMAP |
6904 |
+- /* Check whether we need to produce an anchor PEB */ |
6905 |
+- if (!anchor) |
6906 |
+- anchor = !anchor_pebs_available(&ubi->free); |
6907 |
+- |
6908 |
+- if (anchor) { |
6909 |
++ if (ubi->fm_do_produce_anchor) { |
6910 |
+ e1 = find_anchor_wl_entry(&ubi->used); |
6911 |
+ if (!e1) |
6912 |
+ goto out_cancel; |
6913 |
+@@ -719,6 +705,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, |
6914 |
+ self_check_in_wl_tree(ubi, e1, &ubi->used); |
6915 |
+ rb_erase(&e1->u.rb, &ubi->used); |
6916 |
+ dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum); |
6917 |
++ ubi->fm_do_produce_anchor = 0; |
6918 |
+ } else if (!ubi->scrub.rb_node) { |
6919 |
+ #else |
6920 |
+ if (!ubi->scrub.rb_node) { |
6921 |
+@@ -1051,7 +1038,6 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested) |
6922 |
+ goto out_cancel; |
6923 |
+ } |
6924 |
+ |
6925 |
+- wrk->anchor = 0; |
6926 |
+ wrk->func = &wear_leveling_worker; |
6927 |
+ if (nested) |
6928 |
+ __schedule_ubi_work(ubi, wrk); |
6929 |
+@@ -1093,8 +1079,15 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk) |
6930 |
+ err = sync_erase(ubi, e, wl_wrk->torture); |
6931 |
+ if (!err) { |
6932 |
+ spin_lock(&ubi->wl_lock); |
6933 |
+- wl_tree_add(e, &ubi->free); |
6934 |
+- ubi->free_count++; |
6935 |
++ |
6936 |
++ if (!ubi->fm_anchor && e->pnum < UBI_FM_MAX_START) { |
6937 |
++ ubi->fm_anchor = e; |
6938 |
++ ubi->fm_do_produce_anchor = 0; |
6939 |
++ } else { |
6940 |
++ wl_tree_add(e, &ubi->free); |
6941 |
++ ubi->free_count++; |
6942 |
++ } |
6943 |
++ |
6944 |
+ spin_unlock(&ubi->wl_lock); |
6945 |
+ |
6946 |
+ /* |
6947 |
+@@ -1882,6 +1875,9 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) |
6948 |
+ if (err) |
6949 |
+ goto out_free; |
6950 |
+ |
6951 |
++#ifdef CONFIG_MTD_UBI_FASTMAP |
6952 |
++ ubi_ensure_anchor_pebs(ubi); |
6953 |
++#endif |
6954 |
+ return 0; |
6955 |
+ |
6956 |
+ out_free: |
6957 |
+diff --git a/drivers/mtd/ubi/wl.h b/drivers/mtd/ubi/wl.h |
6958 |
+index a9e2d669acd81..c93a532937863 100644 |
6959 |
+--- a/drivers/mtd/ubi/wl.h |
6960 |
++++ b/drivers/mtd/ubi/wl.h |
6961 |
+@@ -2,7 +2,6 @@ |
6962 |
+ #ifndef UBI_WL_H |
6963 |
+ #define UBI_WL_H |
6964 |
+ #ifdef CONFIG_MTD_UBI_FASTMAP |
6965 |
+-static int anchor_pebs_available(struct rb_root *root); |
6966 |
+ static void update_fastmap_work_fn(struct work_struct *wrk); |
6967 |
+ static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root); |
6968 |
+ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi); |
6969 |
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c |
6970 |
+index 7a248cc1055a3..7af7cc7c8669a 100644 |
6971 |
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c |
6972 |
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c |
6973 |
+@@ -2654,8 +2654,10 @@ static int setup_dpni(struct fsl_mc_device *ls_dev) |
6974 |
+ |
6975 |
+ priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) * |
6976 |
+ dpaa2_eth_fs_count(priv), GFP_KERNEL); |
6977 |
+- if (!priv->cls_rules) |
6978 |
++ if (!priv->cls_rules) { |
6979 |
++ err = -ENOMEM; |
6980 |
+ goto close; |
6981 |
++ } |
6982 |
+ |
6983 |
+ return 0; |
6984 |
+ |
6985 |
+diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c |
6986 |
+index f93ed70709c65..a2ee28e487a6f 100644 |
6987 |
+--- a/drivers/net/ethernet/intel/e1000/e1000_main.c |
6988 |
++++ b/drivers/net/ethernet/intel/e1000/e1000_main.c |
6989 |
+@@ -542,8 +542,13 @@ void e1000_reinit_locked(struct e1000_adapter *adapter) |
6990 |
+ WARN_ON(in_interrupt()); |
6991 |
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) |
6992 |
+ msleep(1); |
6993 |
+- e1000_down(adapter); |
6994 |
+- e1000_up(adapter); |
6995 |
++ |
6996 |
++ /* only run the task if not already down */ |
6997 |
++ if (!test_bit(__E1000_DOWN, &adapter->flags)) { |
6998 |
++ e1000_down(adapter); |
6999 |
++ e1000_up(adapter); |
7000 |
++ } |
7001 |
++ |
7002 |
+ clear_bit(__E1000_RESETTING, &adapter->flags); |
7003 |
+ } |
7004 |
+ |
7005 |
+@@ -1433,10 +1438,15 @@ int e1000_close(struct net_device *netdev) |
7006 |
+ struct e1000_hw *hw = &adapter->hw; |
7007 |
+ int count = E1000_CHECK_RESET_COUNT; |
7008 |
+ |
7009 |
+- while (test_bit(__E1000_RESETTING, &adapter->flags) && count--) |
7010 |
++ while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--) |
7011 |
+ usleep_range(10000, 20000); |
7012 |
+ |
7013 |
+- WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); |
7014 |
++ WARN_ON(count < 0); |
7015 |
++ |
7016 |
++ /* signal that we're down so that the reset task will no longer run */ |
7017 |
++ set_bit(__E1000_DOWN, &adapter->flags); |
7018 |
++ clear_bit(__E1000_RESETTING, &adapter->flags); |
7019 |
++ |
7020 |
+ e1000_down(adapter); |
7021 |
+ e1000_power_down_phy(adapter); |
7022 |
+ e1000_free_irq(adapter); |
7023 |
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c |
7024 |
+index 34124c213d27c..222ae76809aa1 100644 |
7025 |
+--- a/drivers/net/ethernet/intel/iavf/iavf_main.c |
7026 |
++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c |
7027 |
+@@ -3077,9 +3077,6 @@ static int iavf_delete_clsflower(struct iavf_adapter *adapter, |
7028 |
+ static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter, |
7029 |
+ struct flow_cls_offload *cls_flower) |
7030 |
+ { |
7031 |
+- if (cls_flower->common.chain_index) |
7032 |
+- return -EOPNOTSUPP; |
7033 |
+- |
7034 |
+ switch (cls_flower->command) { |
7035 |
+ case FLOW_CLS_REPLACE: |
7036 |
+ return iavf_configure_clsflower(adapter, cls_flower); |
7037 |
+@@ -3103,6 +3100,11 @@ static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter, |
7038 |
+ static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data, |
7039 |
+ void *cb_priv) |
7040 |
+ { |
7041 |
++ struct iavf_adapter *adapter = cb_priv; |
7042 |
++ |
7043 |
++ if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data)) |
7044 |
++ return -EOPNOTSUPP; |
7045 |
++ |
7046 |
+ switch (type) { |
7047 |
+ case TC_SETUP_CLSFLOWER: |
7048 |
+ return iavf_setup_tc_cls_flower(cb_priv, type_data); |
7049 |
+diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c |
7050 |
+index 62673e27af0e8..fc9ff985a62bd 100644 |
7051 |
+--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c |
7052 |
++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c |
7053 |
+@@ -2635,14 +2635,14 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) |
7054 |
+ netdev_info(netdev, "Changing Tx descriptor count from %d to %d\n", |
7055 |
+ vsi->tx_rings[0]->count, new_tx_cnt); |
7056 |
+ |
7057 |
+- tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq, |
7058 |
++ tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, |
7059 |
+ sizeof(*tx_rings), GFP_KERNEL); |
7060 |
+ if (!tx_rings) { |
7061 |
+ err = -ENOMEM; |
7062 |
+ goto done; |
7063 |
+ } |
7064 |
+ |
7065 |
+- for (i = 0; i < vsi->alloc_txq; i++) { |
7066 |
++ ice_for_each_txq(vsi, i) { |
7067 |
+ /* clone ring and setup updated count */ |
7068 |
+ tx_rings[i] = *vsi->tx_rings[i]; |
7069 |
+ tx_rings[i].count = new_tx_cnt; |
7070 |
+@@ -2667,14 +2667,14 @@ process_rx: |
7071 |
+ netdev_info(netdev, "Changing Rx descriptor count from %d to %d\n", |
7072 |
+ vsi->rx_rings[0]->count, new_rx_cnt); |
7073 |
+ |
7074 |
+- rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq, |
7075 |
++ rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->num_rxq, |
7076 |
+ sizeof(*rx_rings), GFP_KERNEL); |
7077 |
+ if (!rx_rings) { |
7078 |
+ err = -ENOMEM; |
7079 |
+ goto done; |
7080 |
+ } |
7081 |
+ |
7082 |
+- for (i = 0; i < vsi->alloc_rxq; i++) { |
7083 |
++ ice_for_each_rxq(vsi, i) { |
7084 |
+ /* clone ring and setup updated count */ |
7085 |
+ rx_rings[i] = *vsi->rx_rings[i]; |
7086 |
+ rx_rings[i].count = new_rx_cnt; |
7087 |
+@@ -2712,7 +2712,7 @@ process_link: |
7088 |
+ ice_down(vsi); |
7089 |
+ |
7090 |
+ if (tx_rings) { |
7091 |
+- for (i = 0; i < vsi->alloc_txq; i++) { |
7092 |
++ ice_for_each_txq(vsi, i) { |
7093 |
+ ice_free_tx_ring(vsi->tx_rings[i]); |
7094 |
+ *vsi->tx_rings[i] = tx_rings[i]; |
7095 |
+ } |
7096 |
+@@ -2720,7 +2720,7 @@ process_link: |
7097 |
+ } |
7098 |
+ |
7099 |
+ if (rx_rings) { |
7100 |
+- for (i = 0; i < vsi->alloc_rxq; i++) { |
7101 |
++ ice_for_each_rxq(vsi, i) { |
7102 |
+ ice_free_rx_ring(vsi->rx_rings[i]); |
7103 |
+ /* copy the real tail offset */ |
7104 |
+ rx_rings[i].tail = vsi->rx_rings[i]->tail; |
7105 |
+@@ -2744,7 +2744,7 @@ process_link: |
7106 |
+ free_tx: |
7107 |
+ /* error cleanup if the Rx allocations failed after getting Tx */ |
7108 |
+ if (tx_rings) { |
7109 |
+- for (i = 0; i < vsi->alloc_txq; i++) |
7110 |
++ ice_for_each_txq(vsi, i) |
7111 |
+ ice_free_tx_ring(&tx_rings[i]); |
7112 |
+ devm_kfree(&pf->pdev->dev, tx_rings); |
7113 |
+ } |
7114 |
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c |
7115 |
+index 4456ce5325a74..a923c65532702 100644 |
7116 |
+--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c |
7117 |
++++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c |
7118 |
+@@ -4142,7 +4142,8 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
7119 |
+ cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) | |
7120 |
+ BIT(QED_MF_LLH_PROTO_CLSS) | |
7121 |
+ BIT(QED_MF_LL2_NON_UNICAST) | |
7122 |
+- BIT(QED_MF_INTER_PF_SWITCH); |
7123 |
++ BIT(QED_MF_INTER_PF_SWITCH) | |
7124 |
++ BIT(QED_MF_DISABLE_ARFS); |
7125 |
+ break; |
7126 |
+ case NVM_CFG1_GLOB_MF_MODE_DEFAULT: |
7127 |
+ cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) | |
7128 |
+@@ -4155,6 +4156,14 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
7129 |
+ |
7130 |
+ DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n", |
7131 |
+ cdev->mf_bits); |
7132 |
++ |
7133 |
++ /* In CMT the PF is unknown when the GFS block processes the |
7134 |
++ * packet. Therefore cannot use searcher as it has a per PF |
7135 |
++ * database, and thus ARFS must be disabled. |
7136 |
++ * |
7137 |
++ */ |
7138 |
++ if (QED_IS_CMT(cdev)) |
7139 |
++ cdev->mf_bits |= BIT(QED_MF_DISABLE_ARFS); |
7140 |
+ } |
7141 |
+ |
7142 |
+ DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n", |
7143 |
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c |
7144 |
+index 1a5fc2ae351c4..8a73482cb7a88 100644 |
7145 |
+--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c |
7146 |
++++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c |
7147 |
+@@ -2001,6 +2001,9 @@ void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, |
7148 |
+ struct qed_ptt *p_ptt, |
7149 |
+ struct qed_arfs_config_params *p_cfg_params) |
7150 |
+ { |
7151 |
++ if (test_bit(QED_MF_DISABLE_ARFS, &p_hwfn->cdev->mf_bits)) |
7152 |
++ return; |
7153 |
++ |
7154 |
+ if (p_cfg_params->mode != QED_FILTER_CONFIG_MODE_DISABLE) { |
7155 |
+ qed_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id, |
7156 |
+ p_cfg_params->tcp, |
7157 |
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c |
7158 |
+index e72f9f1d2e94d..bc1f5b36b5bf2 100644 |
7159 |
+--- a/drivers/net/ethernet/qlogic/qed/qed_main.c |
7160 |
++++ b/drivers/net/ethernet/qlogic/qed/qed_main.c |
7161 |
+@@ -280,6 +280,8 @@ int qed_fill_dev_info(struct qed_dev *cdev, |
7162 |
+ dev_info->fw_eng = FW_ENGINEERING_VERSION; |
7163 |
+ dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH, |
7164 |
+ &cdev->mf_bits); |
7165 |
++ if (!test_bit(QED_MF_DISABLE_ARFS, &cdev->mf_bits)) |
7166 |
++ dev_info->b_arfs_capable = true; |
7167 |
+ dev_info->tx_switching = true; |
7168 |
+ |
7169 |
+ if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME) |
7170 |
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c |
7171 |
+index dcb5c917f3733..fb9c3ca5d36cc 100644 |
7172 |
+--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c |
7173 |
++++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c |
7174 |
+@@ -96,6 +96,7 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf) |
7175 |
+ p_ramrod->personality = PERSONALITY_ETH; |
7176 |
+ break; |
7177 |
+ case QED_PCI_ETH_ROCE: |
7178 |
++ case QED_PCI_ETH_IWARP: |
7179 |
+ p_ramrod->personality = PERSONALITY_RDMA_AND_ETH; |
7180 |
+ break; |
7181 |
+ default: |
7182 |
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c |
7183 |
+index c8bdbf057d5a2..5041994bf03fb 100644 |
7184 |
+--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c |
7185 |
++++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c |
7186 |
+@@ -336,6 +336,9 @@ int qede_alloc_arfs(struct qede_dev *edev) |
7187 |
+ { |
7188 |
+ int i; |
7189 |
+ |
7190 |
++ if (!edev->dev_info.common.b_arfs_capable) |
7191 |
++ return -EINVAL; |
7192 |
++ |
7193 |
+ edev->arfs = vzalloc(sizeof(*edev->arfs)); |
7194 |
+ if (!edev->arfs) |
7195 |
+ return -ENOMEM; |
7196 |
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c |
7197 |
+index 2c3d654c84543..ce3e62e73e4cd 100644 |
7198 |
+--- a/drivers/net/ethernet/qlogic/qede/qede_main.c |
7199 |
++++ b/drivers/net/ethernet/qlogic/qede/qede_main.c |
7200 |
+@@ -770,7 +770,7 @@ static void qede_init_ndev(struct qede_dev *edev) |
7201 |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
7202 |
+ NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_TC; |
7203 |
+ |
7204 |
+- if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) |
7205 |
++ if (edev->dev_info.common.b_arfs_capable) |
7206 |
+ hw_features |= NETIF_F_NTUPLE; |
7207 |
+ |
7208 |
+ if (edev->dev_info.common.vxlan_enable || |
7209 |
+@@ -2211,7 +2211,7 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode, |
7210 |
+ qede_vlan_mark_nonconfigured(edev); |
7211 |
+ edev->ops->fastpath_stop(edev->cdev); |
7212 |
+ |
7213 |
+- if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) { |
7214 |
++ if (edev->dev_info.common.b_arfs_capable) { |
7215 |
+ qede_poll_for_freeing_arfs_filters(edev); |
7216 |
+ qede_free_arfs(edev); |
7217 |
+ } |
7218 |
+@@ -2278,10 +2278,9 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, |
7219 |
+ if (rc) |
7220 |
+ goto err2; |
7221 |
+ |
7222 |
+- if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) { |
7223 |
+- rc = qede_alloc_arfs(edev); |
7224 |
+- if (rc) |
7225 |
+- DP_NOTICE(edev, "aRFS memory allocation failed\n"); |
7226 |
++ if (qede_alloc_arfs(edev)) { |
7227 |
++ edev->ndev->features &= ~NETIF_F_NTUPLE; |
7228 |
++ edev->dev_info.common.b_arfs_capable = false; |
7229 |
+ } |
7230 |
+ |
7231 |
+ qede_napi_add_enable(edev); |
7232 |
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c |
7233 |
+index 6fa9852e3f97f..903212ad9bb2f 100644 |
7234 |
+--- a/drivers/net/ethernet/realtek/r8169_main.c |
7235 |
++++ b/drivers/net/ethernet/realtek/r8169_main.c |
7236 |
+@@ -6256,8 +6256,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) |
7237 |
+ if (unlikely(status & RxFIFOOver && |
7238 |
+ tp->mac_version == RTL_GIGA_MAC_VER_11)) { |
7239 |
+ netif_stop_queue(tp->dev); |
7240 |
+- /* XXX - Hack alert. See rtl_task(). */ |
7241 |
+- set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags); |
7242 |
++ rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); |
7243 |
+ } |
7244 |
+ |
7245 |
+ rtl_irq_disable(tp); |
7246 |
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h |
7247 |
+index 2dacfc85b3baa..04e51af32178c 100644 |
7248 |
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h |
7249 |
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h |
7250 |
+@@ -435,7 +435,7 @@ struct axienet_local { |
7251 |
+ void __iomem *regs; |
7252 |
+ void __iomem *dma_regs; |
7253 |
+ |
7254 |
+- struct tasklet_struct dma_err_tasklet; |
7255 |
++ struct work_struct dma_err_task; |
7256 |
+ |
7257 |
+ int tx_irq; |
7258 |
+ int rx_irq; |
7259 |
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c |
7260 |
+index 479325eeaf8a0..bb6e52f3bdf9b 100644 |
7261 |
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c |
7262 |
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c |
7263 |
+@@ -437,9 +437,10 @@ static void axienet_setoptions(struct net_device *ndev, u32 options) |
7264 |
+ lp->options |= options; |
7265 |
+ } |
7266 |
+ |
7267 |
+-static void __axienet_device_reset(struct axienet_local *lp) |
7268 |
++static int __axienet_device_reset(struct axienet_local *lp) |
7269 |
+ { |
7270 |
+ u32 timeout; |
7271 |
++ |
7272 |
+ /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset |
7273 |
+ * process of Axi DMA takes a while to complete as all pending |
7274 |
+ * commands/transfers will be flushed or completed during this |
7275 |
+@@ -455,9 +456,11 @@ static void __axienet_device_reset(struct axienet_local *lp) |
7276 |
+ if (--timeout == 0) { |
7277 |
+ netdev_err(lp->ndev, "%s: DMA reset timeout!\n", |
7278 |
+ __func__); |
7279 |
+- break; |
7280 |
++ return -ETIMEDOUT; |
7281 |
+ } |
7282 |
+ } |
7283 |
++ |
7284 |
++ return 0; |
7285 |
+ } |
7286 |
+ |
7287 |
+ /** |
7288 |
+@@ -470,13 +473,17 @@ static void __axienet_device_reset(struct axienet_local *lp) |
7289 |
+ * areconnected to Axi Ethernet reset lines, this in turn resets the Axi |
7290 |
+ * Ethernet core. No separate hardware reset is done for the Axi Ethernet |
7291 |
+ * core. |
7292 |
++ * Returns 0 on success or a negative error number otherwise. |
7293 |
+ */ |
7294 |
+-static void axienet_device_reset(struct net_device *ndev) |
7295 |
++static int axienet_device_reset(struct net_device *ndev) |
7296 |
+ { |
7297 |
+ u32 axienet_status; |
7298 |
+ struct axienet_local *lp = netdev_priv(ndev); |
7299 |
++ int ret; |
7300 |
+ |
7301 |
+- __axienet_device_reset(lp); |
7302 |
++ ret = __axienet_device_reset(lp); |
7303 |
++ if (ret) |
7304 |
++ return ret; |
7305 |
+ |
7306 |
+ lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE; |
7307 |
+ lp->options |= XAE_OPTION_VLAN; |
7308 |
+@@ -491,9 +498,11 @@ static void axienet_device_reset(struct net_device *ndev) |
7309 |
+ lp->options |= XAE_OPTION_JUMBO; |
7310 |
+ } |
7311 |
+ |
7312 |
+- if (axienet_dma_bd_init(ndev)) { |
7313 |
++ ret = axienet_dma_bd_init(ndev); |
7314 |
++ if (ret) { |
7315 |
+ netdev_err(ndev, "%s: descriptor allocation failed\n", |
7316 |
+ __func__); |
7317 |
++ return ret; |
7318 |
+ } |
7319 |
+ |
7320 |
+ axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); |
7321 |
+@@ -518,6 +527,8 @@ static void axienet_device_reset(struct net_device *ndev) |
7322 |
+ axienet_setoptions(ndev, lp->options); |
7323 |
+ |
7324 |
+ netif_trans_update(ndev); |
7325 |
++ |
7326 |
++ return 0; |
7327 |
+ } |
7328 |
+ |
7329 |
+ /** |
7330 |
+@@ -806,7 +817,7 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev) |
7331 |
+ /* Write to the Rx channel control register */ |
7332 |
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); |
7333 |
+ |
7334 |
+- tasklet_schedule(&lp->dma_err_tasklet); |
7335 |
++ schedule_work(&lp->dma_err_task); |
7336 |
+ axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); |
7337 |
+ } |
7338 |
+ out: |
7339 |
+@@ -855,7 +866,7 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev) |
7340 |
+ /* write to the Rx channel control register */ |
7341 |
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); |
7342 |
+ |
7343 |
+- tasklet_schedule(&lp->dma_err_tasklet); |
7344 |
++ schedule_work(&lp->dma_err_task); |
7345 |
+ axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); |
7346 |
+ } |
7347 |
+ out: |
7348 |
+@@ -891,7 +902,7 @@ static irqreturn_t axienet_eth_irq(int irq, void *_ndev) |
7349 |
+ return IRQ_HANDLED; |
7350 |
+ } |
7351 |
+ |
7352 |
+-static void axienet_dma_err_handler(unsigned long data); |
7353 |
++static void axienet_dma_err_handler(struct work_struct *work); |
7354 |
+ |
7355 |
+ /** |
7356 |
+ * axienet_open - Driver open routine. |
7357 |
+@@ -921,8 +932,9 @@ static int axienet_open(struct net_device *ndev) |
7358 |
+ */ |
7359 |
+ mutex_lock(&lp->mii_bus->mdio_lock); |
7360 |
+ axienet_mdio_disable(lp); |
7361 |
+- axienet_device_reset(ndev); |
7362 |
+- ret = axienet_mdio_enable(lp); |
7363 |
++ ret = axienet_device_reset(ndev); |
7364 |
++ if (ret == 0) |
7365 |
++ ret = axienet_mdio_enable(lp); |
7366 |
+ mutex_unlock(&lp->mii_bus->mdio_lock); |
7367 |
+ if (ret < 0) |
7368 |
+ return ret; |
7369 |
+@@ -935,9 +947,8 @@ static int axienet_open(struct net_device *ndev) |
7370 |
+ |
7371 |
+ phylink_start(lp->phylink); |
7372 |
+ |
7373 |
+- /* Enable tasklets for Axi DMA error handling */ |
7374 |
+- tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler, |
7375 |
+- (unsigned long) lp); |
7376 |
++ /* Enable worker thread for Axi DMA error handling */ |
7377 |
++ INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler); |
7378 |
+ |
7379 |
+ /* Enable interrupts for Axi DMA Tx */ |
7380 |
+ ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED, |
7381 |
+@@ -966,7 +977,7 @@ err_rx_irq: |
7382 |
+ err_tx_irq: |
7383 |
+ phylink_stop(lp->phylink); |
7384 |
+ phylink_disconnect_phy(lp->phylink); |
7385 |
+- tasklet_kill(&lp->dma_err_tasklet); |
7386 |
++ cancel_work_sync(&lp->dma_err_task); |
7387 |
+ dev_err(lp->dev, "request_irq() failed\n"); |
7388 |
+ return ret; |
7389 |
+ } |
7390 |
+@@ -1025,7 +1036,7 @@ static int axienet_stop(struct net_device *ndev) |
7391 |
+ axienet_mdio_enable(lp); |
7392 |
+ mutex_unlock(&lp->mii_bus->mdio_lock); |
7393 |
+ |
7394 |
+- tasklet_kill(&lp->dma_err_tasklet); |
7395 |
++ cancel_work_sync(&lp->dma_err_task); |
7396 |
+ |
7397 |
+ if (lp->eth_irq > 0) |
7398 |
+ free_irq(lp->eth_irq, ndev); |
7399 |
+@@ -1505,17 +1516,18 @@ static const struct phylink_mac_ops axienet_phylink_ops = { |
7400 |
+ }; |
7401 |
+ |
7402 |
+ /** |
7403 |
+- * axienet_dma_err_handler - Tasklet handler for Axi DMA Error |
7404 |
+- * @data: Data passed |
7405 |
++ * axienet_dma_err_handler - Work queue task for Axi DMA Error |
7406 |
++ * @work: pointer to work_struct |
7407 |
+ * |
7408 |
+ * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the |
7409 |
+ * Tx/Rx BDs. |
7410 |
+ */ |
7411 |
+-static void axienet_dma_err_handler(unsigned long data) |
7412 |
++static void axienet_dma_err_handler(struct work_struct *work) |
7413 |
+ { |
7414 |
+ u32 axienet_status; |
7415 |
+ u32 cr, i; |
7416 |
+- struct axienet_local *lp = (struct axienet_local *) data; |
7417 |
++ struct axienet_local *lp = container_of(work, struct axienet_local, |
7418 |
++ dma_err_task); |
7419 |
+ struct net_device *ndev = lp->ndev; |
7420 |
+ struct axidma_bd *cur_p; |
7421 |
+ |
7422 |
+diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c |
7423 |
+index 8dbccec6ac866..5945ac5f38eea 100644 |
7424 |
+--- a/drivers/net/ieee802154/adf7242.c |
7425 |
++++ b/drivers/net/ieee802154/adf7242.c |
7426 |
+@@ -882,7 +882,9 @@ static int adf7242_rx(struct adf7242_local *lp) |
7427 |
+ int ret; |
7428 |
+ u8 lqi, len_u8, *data; |
7429 |
+ |
7430 |
+- adf7242_read_reg(lp, 0, &len_u8); |
7431 |
++ ret = adf7242_read_reg(lp, 0, &len_u8); |
7432 |
++ if (ret) |
7433 |
++ return ret; |
7434 |
+ |
7435 |
+ len = len_u8; |
7436 |
+ |
7437 |
+diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c |
7438 |
+index 430c937861534..25dbea302fb6d 100644 |
7439 |
+--- a/drivers/net/ieee802154/ca8210.c |
7440 |
++++ b/drivers/net/ieee802154/ca8210.c |
7441 |
+@@ -2924,6 +2924,7 @@ static int ca8210_dev_com_init(struct ca8210_priv *priv) |
7442 |
+ ); |
7443 |
+ if (!priv->irq_workqueue) { |
7444 |
+ dev_crit(&priv->spi->dev, "alloc of irq_workqueue failed!\n"); |
7445 |
++ destroy_workqueue(priv->mlme_workqueue); |
7446 |
+ return -ENOMEM; |
7447 |
+ } |
7448 |
+ |
7449 |
+diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c |
7450 |
+index da2d179430ca5..4c57e79e5779a 100644 |
7451 |
+--- a/drivers/net/wireless/ath/ar5523/ar5523.c |
7452 |
++++ b/drivers/net/wireless/ath/ar5523/ar5523.c |
7453 |
+@@ -1770,6 +1770,8 @@ static const struct usb_device_id ar5523_id_table[] = { |
7454 |
+ AR5523_DEVICE_UX(0x0846, 0x4300), /* Netgear / WG111U */ |
7455 |
+ AR5523_DEVICE_UG(0x0846, 0x4250), /* Netgear / WG111T */ |
7456 |
+ AR5523_DEVICE_UG(0x0846, 0x5f00), /* Netgear / WPN111 */ |
7457 |
++ AR5523_DEVICE_UG(0x083a, 0x4506), /* SMC / EZ Connect |
7458 |
++ SMCWUSBT-G2 */ |
7459 |
+ AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / AR5523_1 */ |
7460 |
+ AR5523_DEVICE_UX(0x157e, 0x3205), /* Umedia / AR5523_2 */ |
7461 |
+ AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / TEW444UBEU */ |
7462 |
+diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c |
7463 |
+index bd2b5628f850b..04c50a26a4f47 100644 |
7464 |
+--- a/drivers/net/wireless/ath/ath10k/debug.c |
7465 |
++++ b/drivers/net/wireless/ath/ath10k/debug.c |
7466 |
+@@ -1516,7 +1516,7 @@ static void ath10k_tpc_stats_print(struct ath10k_tpc_stats *tpc_stats, |
7467 |
+ *len += scnprintf(buf + *len, buf_len - *len, |
7468 |
+ "No. Preamble Rate_code "); |
7469 |
+ |
7470 |
+- for (i = 0; i < WMI_TPC_TX_N_CHAIN; i++) |
7471 |
++ for (i = 0; i < tpc_stats->num_tx_chain; i++) |
7472 |
+ *len += scnprintf(buf + *len, buf_len - *len, |
7473 |
+ "tpc_value%d ", i); |
7474 |
+ |
7475 |
+@@ -2532,6 +2532,7 @@ void ath10k_debug_destroy(struct ath10k *ar) |
7476 |
+ ath10k_debug_fw_stats_reset(ar); |
7477 |
+ |
7478 |
+ kfree(ar->debug.tpc_stats); |
7479 |
++ kfree(ar->debug.tpc_stats_final); |
7480 |
+ } |
7481 |
+ |
7482 |
+ int ath10k_debug_register(struct ath10k *ar) |
7483 |
+diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c |
7484 |
+index 9870d2d095c87..8fe626deadeb0 100644 |
7485 |
+--- a/drivers/net/wireless/ath/ath10k/sdio.c |
7486 |
++++ b/drivers/net/wireless/ath/ath10k/sdio.c |
7487 |
+@@ -1582,23 +1582,33 @@ static int ath10k_sdio_hif_diag_read(struct ath10k *ar, u32 address, void *buf, |
7488 |
+ size_t buf_len) |
7489 |
+ { |
7490 |
+ int ret; |
7491 |
++ void *mem; |
7492 |
++ |
7493 |
++ mem = kzalloc(buf_len, GFP_KERNEL); |
7494 |
++ if (!mem) |
7495 |
++ return -ENOMEM; |
7496 |
+ |
7497 |
+ /* set window register to start read cycle */ |
7498 |
+ ret = ath10k_sdio_write32(ar, MBOX_WINDOW_READ_ADDR_ADDRESS, address); |
7499 |
+ if (ret) { |
7500 |
+ ath10k_warn(ar, "failed to set mbox window read address: %d", ret); |
7501 |
+- return ret; |
7502 |
++ goto out; |
7503 |
+ } |
7504 |
+ |
7505 |
+ /* read the data */ |
7506 |
+- ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, buf, buf_len); |
7507 |
++ ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, mem, buf_len); |
7508 |
+ if (ret) { |
7509 |
+ ath10k_warn(ar, "failed to read from mbox window data address: %d\n", |
7510 |
+ ret); |
7511 |
+- return ret; |
7512 |
++ goto out; |
7513 |
+ } |
7514 |
+ |
7515 |
+- return 0; |
7516 |
++ memcpy(buf, mem, buf_len); |
7517 |
++ |
7518 |
++out: |
7519 |
++ kfree(mem); |
7520 |
++ |
7521 |
++ return ret; |
7522 |
+ } |
7523 |
+ |
7524 |
+ static int ath10k_sdio_hif_diag_read32(struct ath10k *ar, u32 address, |
7525 |
+diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c |
7526 |
+index 90f1197a6ad84..2675174cc4fec 100644 |
7527 |
+--- a/drivers/net/wireless/ath/ath10k/wmi.c |
7528 |
++++ b/drivers/net/wireless/ath/ath10k/wmi.c |
7529 |
+@@ -4668,16 +4668,13 @@ static void ath10k_tpc_config_disp_tables(struct ath10k *ar, |
7530 |
+ } |
7531 |
+ |
7532 |
+ pream_idx = 0; |
7533 |
+- for (i = 0; i < __le32_to_cpu(ev->rate_max); i++) { |
7534 |
++ for (i = 0; i < tpc_stats->rate_max; i++) { |
7535 |
+ memset(tpc_value, 0, sizeof(tpc_value)); |
7536 |
+ memset(buff, 0, sizeof(buff)); |
7537 |
+ if (i == pream_table[pream_idx]) |
7538 |
+ pream_idx++; |
7539 |
+ |
7540 |
+- for (j = 0; j < WMI_TPC_TX_N_CHAIN; j++) { |
7541 |
+- if (j >= __le32_to_cpu(ev->num_tx_chain)) |
7542 |
+- break; |
7543 |
+- |
7544 |
++ for (j = 0; j < tpc_stats->num_tx_chain; j++) { |
7545 |
+ tpc[j] = ath10k_tpc_config_get_rate(ar, ev, i, j + 1, |
7546 |
+ rate_code[i], |
7547 |
+ type); |
7548 |
+@@ -4790,7 +4787,7 @@ void ath10k_wmi_tpc_config_get_rate_code(u8 *rate_code, u16 *pream_table, |
7549 |
+ |
7550 |
+ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb) |
7551 |
+ { |
7552 |
+- u32 num_tx_chain; |
7553 |
++ u32 num_tx_chain, rate_max; |
7554 |
+ u8 rate_code[WMI_TPC_RATE_MAX]; |
7555 |
+ u16 pream_table[WMI_TPC_PREAM_TABLE_MAX]; |
7556 |
+ struct wmi_pdev_tpc_config_event *ev; |
7557 |
+@@ -4806,6 +4803,13 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb) |
7558 |
+ return; |
7559 |
+ } |
7560 |
+ |
7561 |
++ rate_max = __le32_to_cpu(ev->rate_max); |
7562 |
++ if (rate_max > WMI_TPC_RATE_MAX) { |
7563 |
++ ath10k_warn(ar, "number of rate is %d greater than TPC configured rate %d\n", |
7564 |
++ rate_max, WMI_TPC_RATE_MAX); |
7565 |
++ rate_max = WMI_TPC_RATE_MAX; |
7566 |
++ } |
7567 |
++ |
7568 |
+ tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC); |
7569 |
+ if (!tpc_stats) |
7570 |
+ return; |
7571 |
+@@ -4822,8 +4826,8 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb) |
7572 |
+ __le32_to_cpu(ev->twice_antenna_reduction); |
7573 |
+ tpc_stats->power_limit = __le32_to_cpu(ev->power_limit); |
7574 |
+ tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power); |
7575 |
+- tpc_stats->num_tx_chain = __le32_to_cpu(ev->num_tx_chain); |
7576 |
+- tpc_stats->rate_max = __le32_to_cpu(ev->rate_max); |
7577 |
++ tpc_stats->num_tx_chain = num_tx_chain; |
7578 |
++ tpc_stats->rate_max = rate_max; |
7579 |
+ |
7580 |
+ ath10k_tpc_config_disp_tables(ar, ev, tpc_stats, |
7581 |
+ rate_code, pream_table, |
7582 |
+@@ -5018,16 +5022,13 @@ ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k *ar, |
7583 |
+ } |
7584 |
+ |
7585 |
+ pream_idx = 0; |
7586 |
+- for (i = 0; i < __le32_to_cpu(ev->rate_max); i++) { |
7587 |
++ for (i = 0; i < tpc_stats->rate_max; i++) { |
7588 |
+ memset(tpc_value, 0, sizeof(tpc_value)); |
7589 |
+ memset(buff, 0, sizeof(buff)); |
7590 |
+ if (i == pream_table[pream_idx]) |
7591 |
+ pream_idx++; |
7592 |
+ |
7593 |
+- for (j = 0; j < WMI_TPC_TX_N_CHAIN; j++) { |
7594 |
+- if (j >= __le32_to_cpu(ev->num_tx_chain)) |
7595 |
+- break; |
7596 |
+- |
7597 |
++ for (j = 0; j < tpc_stats->num_tx_chain; j++) { |
7598 |
+ tpc[j] = ath10k_wmi_tpc_final_get_rate(ar, ev, i, j + 1, |
7599 |
+ rate_code[i], |
7600 |
+ type, pream_idx); |
7601 |
+@@ -5043,7 +5044,7 @@ ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k *ar, |
7602 |
+ |
7603 |
+ void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb) |
7604 |
+ { |
7605 |
+- u32 num_tx_chain; |
7606 |
++ u32 num_tx_chain, rate_max; |
7607 |
+ u8 rate_code[WMI_TPC_FINAL_RATE_MAX]; |
7608 |
+ u16 pream_table[WMI_TPC_PREAM_TABLE_MAX]; |
7609 |
+ struct wmi_pdev_tpc_final_table_event *ev; |
7610 |
+@@ -5051,12 +5052,24 @@ void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb) |
7611 |
+ |
7612 |
+ ev = (struct wmi_pdev_tpc_final_table_event *)skb->data; |
7613 |
+ |
7614 |
++ num_tx_chain = __le32_to_cpu(ev->num_tx_chain); |
7615 |
++ if (num_tx_chain > WMI_TPC_TX_N_CHAIN) { |
7616 |
++ ath10k_warn(ar, "number of tx chain is %d greater than TPC final configured tx chain %d\n", |
7617 |
++ num_tx_chain, WMI_TPC_TX_N_CHAIN); |
7618 |
++ return; |
7619 |
++ } |
7620 |
++ |
7621 |
++ rate_max = __le32_to_cpu(ev->rate_max); |
7622 |
++ if (rate_max > WMI_TPC_FINAL_RATE_MAX) { |
7623 |
++ ath10k_warn(ar, "number of rate is %d greater than TPC final configured rate %d\n", |
7624 |
++ rate_max, WMI_TPC_FINAL_RATE_MAX); |
7625 |
++ rate_max = WMI_TPC_FINAL_RATE_MAX; |
7626 |
++ } |
7627 |
++ |
7628 |
+ tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC); |
7629 |
+ if (!tpc_stats) |
7630 |
+ return; |
7631 |
+ |
7632 |
+- num_tx_chain = __le32_to_cpu(ev->num_tx_chain); |
7633 |
+- |
7634 |
+ ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table, |
7635 |
+ num_tx_chain); |
7636 |
+ |
7637 |
+@@ -5069,8 +5082,8 @@ void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb) |
7638 |
+ __le32_to_cpu(ev->twice_antenna_reduction); |
7639 |
+ tpc_stats->power_limit = __le32_to_cpu(ev->power_limit); |
7640 |
+ tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power); |
7641 |
+- tpc_stats->num_tx_chain = __le32_to_cpu(ev->num_tx_chain); |
7642 |
+- tpc_stats->rate_max = __le32_to_cpu(ev->rate_max); |
7643 |
++ tpc_stats->num_tx_chain = num_tx_chain; |
7644 |
++ tpc_stats->rate_max = rate_max; |
7645 |
+ |
7646 |
+ ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats, |
7647 |
+ rate_code, pream_table, |
7648 |
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c |
7649 |
+index eadc64454839d..3d36b6ee158bb 100644 |
7650 |
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c |
7651 |
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c |
7652 |
+@@ -2149,8 +2149,7 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb) |
7653 |
+ brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_DELAYED, fifo, skb); |
7654 |
+ brcmf_fws_schedule_deq(fws); |
7655 |
+ } else { |
7656 |
+- bphy_err(drvr, "drop skb: no hanger slot\n"); |
7657 |
+- brcmf_txfinalize(ifp, skb, false); |
7658 |
++ bphy_err(drvr, "no hanger slot available\n"); |
7659 |
+ rc = -ENOMEM; |
7660 |
+ } |
7661 |
+ brcmf_fws_unlock(fws); |
7662 |
+diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h |
7663 |
+index 1fb76d2f5d3fd..8b9d0809daf62 100644 |
7664 |
+--- a/drivers/net/wireless/marvell/mwifiex/fw.h |
7665 |
++++ b/drivers/net/wireless/marvell/mwifiex/fw.h |
7666 |
+@@ -953,7 +953,7 @@ struct mwifiex_tkip_param { |
7667 |
+ struct mwifiex_aes_param { |
7668 |
+ u8 pn[WPA_PN_SIZE]; |
7669 |
+ __le16 key_len; |
7670 |
+- u8 key[WLAN_KEY_LEN_CCMP]; |
7671 |
++ u8 key[WLAN_KEY_LEN_CCMP_256]; |
7672 |
+ } __packed; |
7673 |
+ |
7674 |
+ struct mwifiex_wapi_param { |
7675 |
+diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c |
7676 |
+index 7ae2c34f65db2..4eaa493e33253 100644 |
7677 |
+--- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c |
7678 |
++++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c |
7679 |
+@@ -619,7 +619,7 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv, |
7680 |
+ key_v2 = &resp->params.key_material_v2; |
7681 |
+ |
7682 |
+ len = le16_to_cpu(key_v2->key_param_set.key_params.aes.key_len); |
7683 |
+- if (len > WLAN_KEY_LEN_CCMP) |
7684 |
++ if (len > sizeof(key_v2->key_param_set.key_params.aes.key)) |
7685 |
+ return -EINVAL; |
7686 |
+ |
7687 |
+ if (le16_to_cpu(key_v2->action) == HostCmd_ACT_GEN_SET) { |
7688 |
+@@ -635,7 +635,7 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv, |
7689 |
+ return 0; |
7690 |
+ |
7691 |
+ memset(priv->aes_key_v2.key_param_set.key_params.aes.key, 0, |
7692 |
+- WLAN_KEY_LEN_CCMP); |
7693 |
++ sizeof(key_v2->key_param_set.key_params.aes.key)); |
7694 |
+ priv->aes_key_v2.key_param_set.key_params.aes.key_len = |
7695 |
+ cpu_to_le16(len); |
7696 |
+ memcpy(priv->aes_key_v2.key_param_set.key_params.aes.key, |
7697 |
+diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c |
7698 |
+index cbff0dfc96311..f8441fd65400c 100644 |
7699 |
+--- a/drivers/net/wireless/mediatek/mt76/agg-rx.c |
7700 |
++++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c |
7701 |
+@@ -268,6 +268,7 @@ static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid) |
7702 |
+ if (!skb) |
7703 |
+ continue; |
7704 |
+ |
7705 |
++ tid->reorder_buf[i] = NULL; |
7706 |
+ tid->nframes--; |
7707 |
+ dev_kfree_skb(skb); |
7708 |
+ } |
7709 |
+diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c |
7710 |
+index 6249a46c19762..026d996612fbe 100644 |
7711 |
+--- a/drivers/net/wireless/mediatek/mt76/dma.c |
7712 |
++++ b/drivers/net/wireless/mediatek/mt76/dma.c |
7713 |
+@@ -261,10 +261,13 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid, |
7714 |
+ struct mt76_queue_buf buf; |
7715 |
+ dma_addr_t addr; |
7716 |
+ |
7717 |
++ if (q->queued + 1 >= q->ndesc - 1) |
7718 |
++ goto error; |
7719 |
++ |
7720 |
+ addr = dma_map_single(dev->dev, skb->data, skb->len, |
7721 |
+ DMA_TO_DEVICE); |
7722 |
+ if (unlikely(dma_mapping_error(dev->dev, addr))) |
7723 |
+- return -ENOMEM; |
7724 |
++ goto error; |
7725 |
+ |
7726 |
+ buf.addr = addr; |
7727 |
+ buf.len = skb->len; |
7728 |
+@@ -275,6 +278,10 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid, |
7729 |
+ spin_unlock_bh(&q->lock); |
7730 |
+ |
7731 |
+ return 0; |
7732 |
++ |
7733 |
++error: |
7734 |
++ dev_kfree_skb(skb); |
7735 |
++ return -ENOMEM; |
7736 |
+ } |
7737 |
+ |
7738 |
+ static int |
7739 |
+diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c |
7740 |
+index 1a2c143b34d01..8bd191347b9fb 100644 |
7741 |
+--- a/drivers/net/wireless/mediatek/mt76/mac80211.c |
7742 |
++++ b/drivers/net/wireless/mediatek/mt76/mac80211.c |
7743 |
+@@ -105,7 +105,15 @@ static int mt76_led_init(struct mt76_dev *dev) |
7744 |
+ dev->led_al = of_property_read_bool(np, "led-active-low"); |
7745 |
+ } |
7746 |
+ |
7747 |
+- return devm_led_classdev_register(dev->dev, &dev->led_cdev); |
7748 |
++ return led_classdev_register(dev->dev, &dev->led_cdev); |
7749 |
++} |
7750 |
++ |
7751 |
++static void mt76_led_cleanup(struct mt76_dev *dev) |
7752 |
++{ |
7753 |
++ if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set) |
7754 |
++ return; |
7755 |
++ |
7756 |
++ led_classdev_unregister(&dev->led_cdev); |
7757 |
+ } |
7758 |
+ |
7759 |
+ static void mt76_init_stream_cap(struct mt76_dev *dev, |
7760 |
+@@ -360,6 +368,8 @@ void mt76_unregister_device(struct mt76_dev *dev) |
7761 |
+ { |
7762 |
+ struct ieee80211_hw *hw = dev->hw; |
7763 |
+ |
7764 |
++ if (IS_ENABLED(CONFIG_MT76_LEDS)) |
7765 |
++ mt76_led_cleanup(dev); |
7766 |
+ mt76_tx_status_check(dev, NULL, true); |
7767 |
+ ieee80211_unregister_hw(hw); |
7768 |
+ } |
7769 |
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c |
7770 |
+index 25d5b1608bc91..0a5695c3d9241 100644 |
7771 |
+--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c |
7772 |
++++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c |
7773 |
+@@ -561,6 +561,7 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, |
7774 |
+ |
7775 |
+ mtxq = (struct mt76_txq *)txq->drv_priv; |
7776 |
+ |
7777 |
++ mutex_lock(&dev->mt76.mutex); |
7778 |
+ switch (action) { |
7779 |
+ case IEEE80211_AMPDU_RX_START: |
7780 |
+ mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn, |
7781 |
+@@ -590,6 +591,7 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, |
7782 |
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); |
7783 |
+ break; |
7784 |
+ } |
7785 |
++ mutex_unlock(&dev->mt76.mutex); |
7786 |
+ |
7787 |
+ return 0; |
7788 |
+ } |
7789 |
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c |
7790 |
+index 87c748715b5d7..38183aef0eb92 100644 |
7791 |
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c |
7792 |
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c |
7793 |
+@@ -455,6 +455,7 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, |
7794 |
+ |
7795 |
+ mtxq = (struct mt76_txq *)txq->drv_priv; |
7796 |
+ |
7797 |
++ mutex_lock(&dev->mt76.mutex); |
7798 |
+ switch (action) { |
7799 |
+ case IEEE80211_AMPDU_RX_START: |
7800 |
+ mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn, |
7801 |
+@@ -485,6 +486,7 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, |
7802 |
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); |
7803 |
+ break; |
7804 |
+ } |
7805 |
++ mutex_unlock(&dev->mt76.mutex); |
7806 |
+ |
7807 |
+ return 0; |
7808 |
+ } |
7809 |
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c |
7810 |
+index aec73a0295e86..de0d6f21c621c 100644 |
7811 |
+--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c |
7812 |
++++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c |
7813 |
+@@ -371,6 +371,7 @@ int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, |
7814 |
+ |
7815 |
+ mtxq = (struct mt76_txq *)txq->drv_priv; |
7816 |
+ |
7817 |
++ mutex_lock(&dev->mt76.mutex); |
7818 |
+ switch (action) { |
7819 |
+ case IEEE80211_AMPDU_RX_START: |
7820 |
+ mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, |
7821 |
+@@ -400,6 +401,7 @@ int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, |
7822 |
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); |
7823 |
+ break; |
7824 |
+ } |
7825 |
++ mutex_unlock(&dev->mt76.mutex); |
7826 |
+ |
7827 |
+ return 0; |
7828 |
+ } |
7829 |
+diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c |
7830 |
+index 547ad538d8b66..5f74cf821068d 100644 |
7831 |
+--- a/drivers/net/wireless/ti/wlcore/main.c |
7832 |
++++ b/drivers/net/wireless/ti/wlcore/main.c |
7833 |
+@@ -3658,8 +3658,10 @@ void wlcore_regdomain_config(struct wl1271 *wl) |
7834 |
+ goto out; |
7835 |
+ |
7836 |
+ ret = pm_runtime_get_sync(wl->dev); |
7837 |
+- if (ret < 0) |
7838 |
++ if (ret < 0) { |
7839 |
++ pm_runtime_put_autosuspend(wl->dev); |
7840 |
+ goto out; |
7841 |
++ } |
7842 |
+ |
7843 |
+ ret = wlcore_cmd_regdomain_config_locked(wl); |
7844 |
+ if (ret < 0) { |
7845 |
+diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c |
7846 |
+index 90e56d4c3df3b..e20e18cd04aed 100644 |
7847 |
+--- a/drivers/net/wireless/ti/wlcore/tx.c |
7848 |
++++ b/drivers/net/wireless/ti/wlcore/tx.c |
7849 |
+@@ -863,6 +863,7 @@ void wl1271_tx_work(struct work_struct *work) |
7850 |
+ |
7851 |
+ ret = wlcore_tx_work_locked(wl); |
7852 |
+ if (ret < 0) { |
7853 |
++ pm_runtime_put_noidle(wl->dev); |
7854 |
+ wl12xx_queue_recovery_work(wl); |
7855 |
+ goto out; |
7856 |
+ } |
7857 |
+diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig |
7858 |
+index 2b36f052bfb91..7b3f6555e67ba 100644 |
7859 |
+--- a/drivers/nvme/host/Kconfig |
7860 |
++++ b/drivers/nvme/host/Kconfig |
7861 |
+@@ -64,6 +64,7 @@ config NVME_TCP |
7862 |
+ depends on INET |
7863 |
+ depends on BLK_DEV_NVME |
7864 |
+ select NVME_FABRICS |
7865 |
++ select CRYPTO |
7866 |
+ select CRYPTO_CRC32C |
7867 |
+ help |
7868 |
+ This provides support for the NVMe over Fabrics protocol using |
7869 |
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c |
7870 |
+index 2d2673d360ff2..2cd32901d95c7 100644 |
7871 |
+--- a/drivers/nvme/host/core.c |
7872 |
++++ b/drivers/nvme/host/core.c |
7873 |
+@@ -288,11 +288,8 @@ void nvme_complete_rq(struct request *req) |
7874 |
+ nvme_req(req)->ctrl->comp_seen = true; |
7875 |
+ |
7876 |
+ if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) { |
7877 |
+- if ((req->cmd_flags & REQ_NVME_MPATH) && |
7878 |
+- blk_path_error(status)) { |
7879 |
+- nvme_failover_req(req); |
7880 |
++ if ((req->cmd_flags & REQ_NVME_MPATH) && nvme_failover_req(req)) |
7881 |
+ return; |
7882 |
+- } |
7883 |
+ |
7884 |
+ if (!blk_queue_dying(req->q)) { |
7885 |
+ nvme_retry_req(req); |
7886 |
+@@ -1867,7 +1864,7 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) |
7887 |
+ if (ns->head->disk) { |
7888 |
+ nvme_update_disk_info(ns->head->disk, ns, id); |
7889 |
+ blk_queue_stack_limits(ns->head->disk->queue, ns->queue); |
7890 |
+- revalidate_disk(ns->head->disk); |
7891 |
++ nvme_mpath_update_disk_size(ns->head->disk); |
7892 |
+ } |
7893 |
+ #endif |
7894 |
+ } |
7895 |
+@@ -3200,6 +3197,10 @@ static ssize_t nvme_sysfs_delete(struct device *dev, |
7896 |
+ { |
7897 |
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev); |
7898 |
+ |
7899 |
++ /* Can't delete non-created controllers */ |
7900 |
++ if (!ctrl->created) |
7901 |
++ return -EBUSY; |
7902 |
++ |
7903 |
+ if (device_remove_file_self(dev, attr)) |
7904 |
+ nvme_delete_ctrl_sync(ctrl); |
7905 |
+ return count; |
7906 |
+@@ -3995,6 +3996,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl) |
7907 |
+ nvme_queue_scan(ctrl); |
7908 |
+ nvme_start_queues(ctrl); |
7909 |
+ } |
7910 |
++ ctrl->created = true; |
7911 |
+ } |
7912 |
+ EXPORT_SYMBOL_GPL(nvme_start_ctrl); |
7913 |
+ |
7914 |
+@@ -4085,6 +4087,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, |
7915 |
+ if (ret) |
7916 |
+ goto out_release_instance; |
7917 |
+ |
7918 |
++ nvme_get_ctrl(ctrl); |
7919 |
+ cdev_init(&ctrl->cdev, &nvme_dev_fops); |
7920 |
+ ctrl->cdev.owner = ops->module; |
7921 |
+ ret = cdev_device_add(&ctrl->cdev, ctrl->device); |
7922 |
+@@ -4103,6 +4106,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, |
7923 |
+ |
7924 |
+ return 0; |
7925 |
+ out_free_name: |
7926 |
++ nvme_put_ctrl(ctrl); |
7927 |
+ kfree_const(ctrl->device->kobj.name); |
7928 |
+ out_release_instance: |
7929 |
+ ida_simple_remove(&nvme_instance_ida, ctrl->instance); |
7930 |
+diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c |
7931 |
+index dae050d1f814d..da801a14cd13d 100644 |
7932 |
+--- a/drivers/nvme/host/fc.c |
7933 |
++++ b/drivers/nvme/host/fc.c |
7934 |
+@@ -3171,10 +3171,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, |
7935 |
+ goto fail_ctrl; |
7936 |
+ } |
7937 |
+ |
7938 |
+- nvme_get_ctrl(&ctrl->ctrl); |
7939 |
+- |
7940 |
+ if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) { |
7941 |
+- nvme_put_ctrl(&ctrl->ctrl); |
7942 |
+ dev_err(ctrl->ctrl.device, |
7943 |
+ "NVME-FC{%d}: failed to schedule initial connect\n", |
7944 |
+ ctrl->cnum); |
7945 |
+@@ -3199,6 +3196,7 @@ fail_ctrl: |
7946 |
+ |
7947 |
+ /* initiate nvme ctrl ref counting teardown */ |
7948 |
+ nvme_uninit_ctrl(&ctrl->ctrl); |
7949 |
++ nvme_put_ctrl(&ctrl->ctrl); |
7950 |
+ |
7951 |
+ /* Remove core ctrl ref. */ |
7952 |
+ nvme_put_ctrl(&ctrl->ctrl); |
7953 |
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c |
7954 |
+index 0a458f7880887..3968f89f7855a 100644 |
7955 |
+--- a/drivers/nvme/host/multipath.c |
7956 |
++++ b/drivers/nvme/host/multipath.c |
7957 |
+@@ -65,17 +65,12 @@ void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, |
7958 |
+ } |
7959 |
+ } |
7960 |
+ |
7961 |
+-void nvme_failover_req(struct request *req) |
7962 |
++bool nvme_failover_req(struct request *req) |
7963 |
+ { |
7964 |
+ struct nvme_ns *ns = req->q->queuedata; |
7965 |
+ u16 status = nvme_req(req)->status; |
7966 |
+ unsigned long flags; |
7967 |
+ |
7968 |
+- spin_lock_irqsave(&ns->head->requeue_lock, flags); |
7969 |
+- blk_steal_bios(&ns->head->requeue_list, req); |
7970 |
+- spin_unlock_irqrestore(&ns->head->requeue_lock, flags); |
7971 |
+- blk_mq_end_request(req, 0); |
7972 |
+- |
7973 |
+ switch (status & 0x7ff) { |
7974 |
+ case NVME_SC_ANA_TRANSITION: |
7975 |
+ case NVME_SC_ANA_INACCESSIBLE: |
7976 |
+@@ -104,15 +99,17 @@ void nvme_failover_req(struct request *req) |
7977 |
+ nvme_mpath_clear_current_path(ns); |
7978 |
+ break; |
7979 |
+ default: |
7980 |
+- /* |
7981 |
+- * Reset the controller for any non-ANA error as we don't know |
7982 |
+- * what caused the error. |
7983 |
+- */ |
7984 |
+- nvme_reset_ctrl(ns->ctrl); |
7985 |
+- break; |
7986 |
++ /* This was a non-ANA error so follow the normal error path. */ |
7987 |
++ return false; |
7988 |
+ } |
7989 |
+ |
7990 |
++ spin_lock_irqsave(&ns->head->requeue_lock, flags); |
7991 |
++ blk_steal_bios(&ns->head->requeue_list, req); |
7992 |
++ spin_unlock_irqrestore(&ns->head->requeue_lock, flags); |
7993 |
++ blk_mq_end_request(req, 0); |
7994 |
++ |
7995 |
+ kblockd_schedule_work(&ns->head->requeue_work); |
7996 |
++ return true; |
7997 |
+ } |
7998 |
+ |
7999 |
+ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) |
8000 |
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h |
8001 |
+index 2bd9f7c3084f2..b7117fb09dd0f 100644 |
8002 |
+--- a/drivers/nvme/host/nvme.h |
8003 |
++++ b/drivers/nvme/host/nvme.h |
8004 |
+@@ -253,6 +253,7 @@ struct nvme_ctrl { |
8005 |
+ struct nvme_command ka_cmd; |
8006 |
+ struct work_struct fw_act_work; |
8007 |
+ unsigned long events; |
8008 |
++ bool created; |
8009 |
+ |
8010 |
+ #ifdef CONFIG_NVME_MULTIPATH |
8011 |
+ /* asymmetric namespace access: */ |
8012 |
+@@ -530,7 +531,7 @@ void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys); |
8013 |
+ void nvme_mpath_start_freeze(struct nvme_subsystem *subsys); |
8014 |
+ void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, |
8015 |
+ struct nvme_ctrl *ctrl, int *flags); |
8016 |
+-void nvme_failover_req(struct request *req); |
8017 |
++bool nvme_failover_req(struct request *req); |
8018 |
+ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl); |
8019 |
+ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head); |
8020 |
+ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id); |
8021 |
+@@ -560,6 +561,16 @@ static inline void nvme_trace_bio_complete(struct request *req, |
8022 |
+ req->bio, status); |
8023 |
+ } |
8024 |
+ |
8025 |
++static inline void nvme_mpath_update_disk_size(struct gendisk *disk) |
8026 |
++{ |
8027 |
++ struct block_device *bdev = bdget_disk(disk, 0); |
8028 |
++ |
8029 |
++ if (bdev) { |
8030 |
++ bd_set_size(bdev, get_capacity(disk) << SECTOR_SHIFT); |
8031 |
++ bdput(bdev); |
8032 |
++ } |
8033 |
++} |
8034 |
++ |
8035 |
+ extern struct device_attribute dev_attr_ana_grpid; |
8036 |
+ extern struct device_attribute dev_attr_ana_state; |
8037 |
+ extern struct device_attribute subsys_attr_iopolicy; |
8038 |
+@@ -579,8 +590,9 @@ static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, |
8039 |
+ sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance); |
8040 |
+ } |
8041 |
+ |
8042 |
+-static inline void nvme_failover_req(struct request *req) |
8043 |
++static inline bool nvme_failover_req(struct request *req) |
8044 |
+ { |
8045 |
++ return false; |
8046 |
+ } |
8047 |
+ static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) |
8048 |
+ { |
8049 |
+@@ -634,6 +646,9 @@ static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys) |
8050 |
+ static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys) |
8051 |
+ { |
8052 |
+ } |
8053 |
++static inline void nvme_mpath_update_disk_size(struct gendisk *disk) |
8054 |
++{ |
8055 |
++} |
8056 |
+ #endif /* CONFIG_NVME_MULTIPATH */ |
8057 |
+ |
8058 |
+ #ifdef CONFIG_NVM |
8059 |
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c |
8060 |
+index a91433bdf5de4..75f26d2ec6429 100644 |
8061 |
+--- a/drivers/nvme/host/pci.c |
8062 |
++++ b/drivers/nvme/host/pci.c |
8063 |
+@@ -2850,7 +2850,6 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
8064 |
+ dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); |
8065 |
+ |
8066 |
+ nvme_reset_ctrl(&dev->ctrl); |
8067 |
+- nvme_get_ctrl(&dev->ctrl); |
8068 |
+ async_schedule(nvme_async_probe, dev); |
8069 |
+ |
8070 |
+ return 0; |
8071 |
+diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c |
8072 |
+index f9444272f861e..abe4fe496d05c 100644 |
8073 |
+--- a/drivers/nvme/host/rdma.c |
8074 |
++++ b/drivers/nvme/host/rdma.c |
8075 |
+@@ -2088,8 +2088,6 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, |
8076 |
+ dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n", |
8077 |
+ ctrl->ctrl.opts->subsysnqn, &ctrl->addr); |
8078 |
+ |
8079 |
+- nvme_get_ctrl(&ctrl->ctrl); |
8080 |
+- |
8081 |
+ mutex_lock(&nvme_rdma_ctrl_mutex); |
8082 |
+ list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list); |
8083 |
+ mutex_unlock(&nvme_rdma_ctrl_mutex); |
8084 |
+@@ -2099,6 +2097,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, |
8085 |
+ out_uninit_ctrl: |
8086 |
+ nvme_uninit_ctrl(&ctrl->ctrl); |
8087 |
+ nvme_put_ctrl(&ctrl->ctrl); |
8088 |
++ nvme_put_ctrl(&ctrl->ctrl); |
8089 |
+ if (ret > 0) |
8090 |
+ ret = -EIO; |
8091 |
+ return ERR_PTR(ret); |
8092 |
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c |
8093 |
+index c782005ee99f9..6d7a813e7183a 100644 |
8094 |
+--- a/drivers/nvme/host/tcp.c |
8095 |
++++ b/drivers/nvme/host/tcp.c |
8096 |
+@@ -2404,8 +2404,6 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev, |
8097 |
+ dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n", |
8098 |
+ ctrl->ctrl.opts->subsysnqn, &ctrl->addr); |
8099 |
+ |
8100 |
+- nvme_get_ctrl(&ctrl->ctrl); |
8101 |
+- |
8102 |
+ mutex_lock(&nvme_tcp_ctrl_mutex); |
8103 |
+ list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list); |
8104 |
+ mutex_unlock(&nvme_tcp_ctrl_mutex); |
8105 |
+@@ -2415,6 +2413,7 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev, |
8106 |
+ out_uninit_ctrl: |
8107 |
+ nvme_uninit_ctrl(&ctrl->ctrl); |
8108 |
+ nvme_put_ctrl(&ctrl->ctrl); |
8109 |
++ nvme_put_ctrl(&ctrl->ctrl); |
8110 |
+ if (ret > 0) |
8111 |
+ ret = -EIO; |
8112 |
+ return ERR_PTR(ret); |
8113 |
+diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c |
8114 |
+index 11f5aea97d1b1..82b87a4c50f63 100644 |
8115 |
+--- a/drivers/nvme/target/loop.c |
8116 |
++++ b/drivers/nvme/target/loop.c |
8117 |
+@@ -619,8 +619,6 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev, |
8118 |
+ dev_info(ctrl->ctrl.device, |
8119 |
+ "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn); |
8120 |
+ |
8121 |
+- nvme_get_ctrl(&ctrl->ctrl); |
8122 |
+- |
8123 |
+ changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); |
8124 |
+ WARN_ON_ONCE(!changed); |
8125 |
+ |
8126 |
+@@ -638,6 +636,7 @@ out_free_queues: |
8127 |
+ kfree(ctrl->queues); |
8128 |
+ out_uninit_ctrl: |
8129 |
+ nvme_uninit_ctrl(&ctrl->ctrl); |
8130 |
++ nvme_put_ctrl(&ctrl->ctrl); |
8131 |
+ out_put_ctrl: |
8132 |
+ nvme_put_ctrl(&ctrl->ctrl); |
8133 |
+ if (ret > 0) |
8134 |
+diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c |
8135 |
+index 36d906a7f70d3..b5314164479e9 100644 |
8136 |
+--- a/drivers/nvme/target/rdma.c |
8137 |
++++ b/drivers/nvme/target/rdma.c |
8138 |
+@@ -75,6 +75,7 @@ enum nvmet_rdma_queue_state { |
8139 |
+ |
8140 |
+ struct nvmet_rdma_queue { |
8141 |
+ struct rdma_cm_id *cm_id; |
8142 |
++ struct ib_qp *qp; |
8143 |
+ struct nvmet_port *port; |
8144 |
+ struct ib_cq *cq; |
8145 |
+ atomic_t sq_wr_avail; |
8146 |
+@@ -464,7 +465,7 @@ static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev, |
8147 |
+ if (ndev->srq) |
8148 |
+ ret = ib_post_srq_recv(ndev->srq, &cmd->wr, NULL); |
8149 |
+ else |
8150 |
+- ret = ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, NULL); |
8151 |
++ ret = ib_post_recv(cmd->queue->qp, &cmd->wr, NULL); |
8152 |
+ |
8153 |
+ if (unlikely(ret)) |
8154 |
+ pr_err("post_recv cmd failed\n"); |
8155 |
+@@ -503,7 +504,7 @@ static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp) |
8156 |
+ atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); |
8157 |
+ |
8158 |
+ if (rsp->n_rdma) { |
8159 |
+- rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp, |
8160 |
++ rdma_rw_ctx_destroy(&rsp->rw, queue->qp, |
8161 |
+ queue->cm_id->port_num, rsp->req.sg, |
8162 |
+ rsp->req.sg_cnt, nvmet_data_dir(&rsp->req)); |
8163 |
+ } |
8164 |
+@@ -587,7 +588,7 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc) |
8165 |
+ |
8166 |
+ WARN_ON(rsp->n_rdma <= 0); |
8167 |
+ atomic_add(rsp->n_rdma, &queue->sq_wr_avail); |
8168 |
+- rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp, |
8169 |
++ rdma_rw_ctx_destroy(&rsp->rw, queue->qp, |
8170 |
+ queue->cm_id->port_num, rsp->req.sg, |
8171 |
+ rsp->req.sg_cnt, nvmet_data_dir(&rsp->req)); |
8172 |
+ rsp->n_rdma = 0; |
8173 |
+@@ -742,7 +743,7 @@ static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp) |
8174 |
+ } |
8175 |
+ |
8176 |
+ if (nvmet_rdma_need_data_in(rsp)) { |
8177 |
+- if (rdma_rw_ctx_post(&rsp->rw, queue->cm_id->qp, |
8178 |
++ if (rdma_rw_ctx_post(&rsp->rw, queue->qp, |
8179 |
+ queue->cm_id->port_num, &rsp->read_cqe, NULL)) |
8180 |
+ nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR); |
8181 |
+ } else { |
8182 |
+@@ -1025,6 +1026,7 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue) |
8183 |
+ pr_err("failed to create_qp ret= %d\n", ret); |
8184 |
+ goto err_destroy_cq; |
8185 |
+ } |
8186 |
++ queue->qp = queue->cm_id->qp; |
8187 |
+ |
8188 |
+ atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr); |
8189 |
+ |
8190 |
+@@ -1053,11 +1055,10 @@ err_destroy_cq: |
8191 |
+ |
8192 |
+ static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue) |
8193 |
+ { |
8194 |
+- struct ib_qp *qp = queue->cm_id->qp; |
8195 |
+- |
8196 |
+- ib_drain_qp(qp); |
8197 |
+- rdma_destroy_id(queue->cm_id); |
8198 |
+- ib_destroy_qp(qp); |
8199 |
++ ib_drain_qp(queue->qp); |
8200 |
++ if (queue->cm_id) |
8201 |
++ rdma_destroy_id(queue->cm_id); |
8202 |
++ ib_destroy_qp(queue->qp); |
8203 |
+ ib_free_cq(queue->cq); |
8204 |
+ } |
8205 |
+ |
8206 |
+@@ -1291,9 +1292,12 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id, |
8207 |
+ |
8208 |
+ ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); |
8209 |
+ if (ret) { |
8210 |
+- schedule_work(&queue->release_work); |
8211 |
+- /* Destroying rdma_cm id is not needed here */ |
8212 |
+- return 0; |
8213 |
++ /* |
8214 |
++ * Don't destroy the cm_id in free path, as we implicitly |
8215 |
++ * destroy the cm_id here with non-zero ret code. |
8216 |
++ */ |
8217 |
++ queue->cm_id = NULL; |
8218 |
++ goto free_queue; |
8219 |
+ } |
8220 |
+ |
8221 |
+ mutex_lock(&nvmet_rdma_queue_mutex); |
8222 |
+@@ -1302,6 +1306,8 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id, |
8223 |
+ |
8224 |
+ return 0; |
8225 |
+ |
8226 |
++free_queue: |
8227 |
++ nvmet_rdma_free_queue(queue); |
8228 |
+ put_device: |
8229 |
+ kref_put(&ndev->ref, nvmet_rdma_free_dev); |
8230 |
+ |
8231 |
+diff --git a/drivers/opp/core.c b/drivers/opp/core.c |
8232 |
+index 7b057c32e11b1..29dfaa591f8b0 100644 |
8233 |
+--- a/drivers/opp/core.c |
8234 |
++++ b/drivers/opp/core.c |
8235 |
+@@ -990,7 +990,6 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index) |
8236 |
+ BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head); |
8237 |
+ INIT_LIST_HEAD(&opp_table->opp_list); |
8238 |
+ kref_init(&opp_table->kref); |
8239 |
+- kref_init(&opp_table->list_kref); |
8240 |
+ |
8241 |
+ /* Secure the device table modification */ |
8242 |
+ list_add(&opp_table->node, &opp_tables); |
8243 |
+@@ -1074,33 +1073,6 @@ static void _opp_table_kref_release(struct kref *kref) |
8244 |
+ mutex_unlock(&opp_table_lock); |
8245 |
+ } |
8246 |
+ |
8247 |
+-void _opp_remove_all_static(struct opp_table *opp_table) |
8248 |
+-{ |
8249 |
+- struct dev_pm_opp *opp, *tmp; |
8250 |
+- |
8251 |
+- list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) { |
8252 |
+- if (!opp->dynamic) |
8253 |
+- dev_pm_opp_put(opp); |
8254 |
+- } |
8255 |
+- |
8256 |
+- opp_table->parsed_static_opps = false; |
8257 |
+-} |
8258 |
+- |
8259 |
+-static void _opp_table_list_kref_release(struct kref *kref) |
8260 |
+-{ |
8261 |
+- struct opp_table *opp_table = container_of(kref, struct opp_table, |
8262 |
+- list_kref); |
8263 |
+- |
8264 |
+- _opp_remove_all_static(opp_table); |
8265 |
+- mutex_unlock(&opp_table_lock); |
8266 |
+-} |
8267 |
+- |
8268 |
+-void _put_opp_list_kref(struct opp_table *opp_table) |
8269 |
+-{ |
8270 |
+- kref_put_mutex(&opp_table->list_kref, _opp_table_list_kref_release, |
8271 |
+- &opp_table_lock); |
8272 |
+-} |
8273 |
+- |
8274 |
+ void dev_pm_opp_put_opp_table(struct opp_table *opp_table) |
8275 |
+ { |
8276 |
+ kref_put_mutex(&opp_table->kref, _opp_table_kref_release, |
8277 |
+@@ -1204,6 +1176,24 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq) |
8278 |
+ } |
8279 |
+ EXPORT_SYMBOL_GPL(dev_pm_opp_remove); |
8280 |
+ |
8281 |
++void _opp_remove_all_static(struct opp_table *opp_table) |
8282 |
++{ |
8283 |
++ struct dev_pm_opp *opp, *tmp; |
8284 |
++ |
8285 |
++ mutex_lock(&opp_table->lock); |
8286 |
++ |
8287 |
++ if (!opp_table->parsed_static_opps || --opp_table->parsed_static_opps) |
8288 |
++ goto unlock; |
8289 |
++ |
8290 |
++ list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) { |
8291 |
++ if (!opp->dynamic) |
8292 |
++ dev_pm_opp_put_unlocked(opp); |
8293 |
++ } |
8294 |
++ |
8295 |
++unlock: |
8296 |
++ mutex_unlock(&opp_table->lock); |
8297 |
++} |
8298 |
++ |
8299 |
+ /** |
8300 |
+ * dev_pm_opp_remove_all_dynamic() - Remove all dynamically created OPPs |
8301 |
+ * @dev: device for which we do this operation |
8302 |
+@@ -2209,7 +2199,7 @@ void _dev_pm_opp_find_and_remove_table(struct device *dev) |
8303 |
+ return; |
8304 |
+ } |
8305 |
+ |
8306 |
+- _put_opp_list_kref(opp_table); |
8307 |
++ _opp_remove_all_static(opp_table); |
8308 |
+ |
8309 |
+ /* Drop reference taken by _find_opp_table() */ |
8310 |
+ dev_pm_opp_put_opp_table(opp_table); |
8311 |
+diff --git a/drivers/opp/of.c b/drivers/opp/of.c |
8312 |
+index 1e5fcdee043c4..249738e1e0b7a 100644 |
8313 |
+--- a/drivers/opp/of.c |
8314 |
++++ b/drivers/opp/of.c |
8315 |
+@@ -658,17 +658,15 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table) |
8316 |
+ struct dev_pm_opp *opp; |
8317 |
+ |
8318 |
+ /* OPP table is already initialized for the device */ |
8319 |
++ mutex_lock(&opp_table->lock); |
8320 |
+ if (opp_table->parsed_static_opps) { |
8321 |
+- kref_get(&opp_table->list_kref); |
8322 |
++ opp_table->parsed_static_opps++; |
8323 |
++ mutex_unlock(&opp_table->lock); |
8324 |
+ return 0; |
8325 |
+ } |
8326 |
+ |
8327 |
+- /* |
8328 |
+- * Re-initialize list_kref every time we add static OPPs to the OPP |
8329 |
+- * table as the reference count may be 0 after the last tie static OPPs |
8330 |
+- * were removed. |
8331 |
+- */ |
8332 |
+- kref_init(&opp_table->list_kref); |
8333 |
++ opp_table->parsed_static_opps = 1; |
8334 |
++ mutex_unlock(&opp_table->lock); |
8335 |
+ |
8336 |
+ /* We have opp-table node now, iterate over it and add OPPs */ |
8337 |
+ for_each_available_child_of_node(opp_table->np, np) { |
8338 |
+@@ -678,7 +676,7 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table) |
8339 |
+ dev_err(dev, "%s: Failed to add OPP, %d\n", __func__, |
8340 |
+ ret); |
8341 |
+ of_node_put(np); |
8342 |
+- goto put_list_kref; |
8343 |
++ goto remove_static_opp; |
8344 |
+ } else if (opp) { |
8345 |
+ count++; |
8346 |
+ } |
8347 |
+@@ -687,7 +685,7 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table) |
8348 |
+ /* There should be one of more OPP defined */ |
8349 |
+ if (WARN_ON(!count)) { |
8350 |
+ ret = -ENOENT; |
8351 |
+- goto put_list_kref; |
8352 |
++ goto remove_static_opp; |
8353 |
+ } |
8354 |
+ |
8355 |
+ list_for_each_entry(opp, &opp_table->opp_list, node) |
8356 |
+@@ -698,18 +696,16 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table) |
8357 |
+ dev_err(dev, "Not all nodes have performance state set (%d: %d)\n", |
8358 |
+ count, pstate_count); |
8359 |
+ ret = -ENOENT; |
8360 |
+- goto put_list_kref; |
8361 |
++ goto remove_static_opp; |
8362 |
+ } |
8363 |
+ |
8364 |
+ if (pstate_count) |
8365 |
+ opp_table->genpd_performance_state = true; |
8366 |
+ |
8367 |
+- opp_table->parsed_static_opps = true; |
8368 |
+- |
8369 |
+ return 0; |
8370 |
+ |
8371 |
+-put_list_kref: |
8372 |
+- _put_opp_list_kref(opp_table); |
8373 |
++remove_static_opp: |
8374 |
++ _opp_remove_all_static(opp_table); |
8375 |
+ |
8376 |
+ return ret; |
8377 |
+ } |
8378 |
+@@ -737,6 +733,10 @@ static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table) |
8379 |
+ return -EINVAL; |
8380 |
+ } |
8381 |
+ |
8382 |
++ mutex_lock(&opp_table->lock); |
8383 |
++ opp_table->parsed_static_opps = 1; |
8384 |
++ mutex_unlock(&opp_table->lock); |
8385 |
++ |
8386 |
+ val = prop->value; |
8387 |
+ while (nr) { |
8388 |
+ unsigned long freq = be32_to_cpup(val++) * 1000; |
8389 |
+@@ -746,7 +746,7 @@ static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table) |
8390 |
+ if (ret) { |
8391 |
+ dev_err(dev, "%s: Failed to add OPP %ld (%d)\n", |
8392 |
+ __func__, freq, ret); |
8393 |
+- _put_opp_list_kref(opp_table); |
8394 |
++ _opp_remove_all_static(opp_table); |
8395 |
+ return ret; |
8396 |
+ } |
8397 |
+ nr -= 2; |
8398 |
+diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h |
8399 |
+index 01a500e2c40a1..d14e27102730c 100644 |
8400 |
+--- a/drivers/opp/opp.h |
8401 |
++++ b/drivers/opp/opp.h |
8402 |
+@@ -127,11 +127,10 @@ enum opp_table_access { |
8403 |
+ * @dev_list: list of devices that share these OPPs |
8404 |
+ * @opp_list: table of opps |
8405 |
+ * @kref: for reference count of the table. |
8406 |
+- * @list_kref: for reference count of the OPP list. |
8407 |
+ * @lock: mutex protecting the opp_list and dev_list. |
8408 |
+ * @np: struct device_node pointer for opp's DT node. |
8409 |
+ * @clock_latency_ns_max: Max clock latency in nanoseconds. |
8410 |
+- * @parsed_static_opps: True if OPPs are initialized from DT. |
8411 |
++ * @parsed_static_opps: Count of devices for which OPPs are initialized from DT. |
8412 |
+ * @shared_opp: OPP is shared between multiple devices. |
8413 |
+ * @suspend_opp: Pointer to OPP to be used during device suspend. |
8414 |
+ * @genpd_virt_dev_lock: Mutex protecting the genpd virtual device pointers. |
8415 |
+@@ -167,7 +166,6 @@ struct opp_table { |
8416 |
+ struct list_head dev_list; |
8417 |
+ struct list_head opp_list; |
8418 |
+ struct kref kref; |
8419 |
+- struct kref list_kref; |
8420 |
+ struct mutex lock; |
8421 |
+ |
8422 |
+ struct device_node *np; |
8423 |
+@@ -176,7 +174,7 @@ struct opp_table { |
8424 |
+ /* For backward compatibility with v1 bindings */ |
8425 |
+ unsigned int voltage_tolerance_v1; |
8426 |
+ |
8427 |
+- bool parsed_static_opps; |
8428 |
++ unsigned int parsed_static_opps; |
8429 |
+ enum opp_table_access shared_opp; |
8430 |
+ struct dev_pm_opp *suspend_opp; |
8431 |
+ |
8432 |
+diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c |
8433 |
+index f89f5acee72d4..c06b05ab9f787 100644 |
8434 |
+--- a/drivers/pci/controller/dwc/pcie-tegra194.c |
8435 |
++++ b/drivers/pci/controller/dwc/pcie-tegra194.c |
8436 |
+@@ -1395,7 +1395,7 @@ static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie) |
8437 |
+ ret = pinctrl_pm_select_default_state(dev); |
8438 |
+ if (ret < 0) { |
8439 |
+ dev_err(dev, "Failed to configure sideband pins: %d\n", ret); |
8440 |
+- goto fail_pinctrl; |
8441 |
++ goto fail_pm_get_sync; |
8442 |
+ } |
8443 |
+ |
8444 |
+ tegra_pcie_init_controller(pcie); |
8445 |
+@@ -1422,9 +1422,8 @@ static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie) |
8446 |
+ |
8447 |
+ fail_host_init: |
8448 |
+ tegra_pcie_deinit_controller(pcie); |
8449 |
+-fail_pinctrl: |
8450 |
+- pm_runtime_put_sync(dev); |
8451 |
+ fail_pm_get_sync: |
8452 |
++ pm_runtime_put_sync(dev); |
8453 |
+ pm_runtime_disable(dev); |
8454 |
+ return ret; |
8455 |
+ } |
8456 |
+diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c |
8457 |
+index b71e753419c2d..cfa3c83d6cc74 100644 |
8458 |
+--- a/drivers/pci/controller/pci-tegra.c |
8459 |
++++ b/drivers/pci/controller/pci-tegra.c |
8460 |
+@@ -2768,7 +2768,7 @@ static int tegra_pcie_probe(struct platform_device *pdev) |
8461 |
+ err = pm_runtime_get_sync(pcie->dev); |
8462 |
+ if (err < 0) { |
8463 |
+ dev_err(dev, "fail to enable pcie controller: %d\n", err); |
8464 |
+- goto teardown_msi; |
8465 |
++ goto pm_runtime_put; |
8466 |
+ } |
8467 |
+ |
8468 |
+ err = tegra_pcie_request_resources(pcie); |
8469 |
+@@ -2808,7 +2808,6 @@ free_resources: |
8470 |
+ pm_runtime_put: |
8471 |
+ pm_runtime_put_sync(pcie->dev); |
8472 |
+ pm_runtime_disable(pcie->dev); |
8473 |
+-teardown_msi: |
8474 |
+ tegra_pcie_msi_teardown(pcie); |
8475 |
+ put_resources: |
8476 |
+ tegra_pcie_put_resources(pcie); |
8477 |
+diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c |
8478 |
+index 356786a3b7f4b..88b996764ff95 100644 |
8479 |
+--- a/drivers/pci/hotplug/pciehp_hpc.c |
8480 |
++++ b/drivers/pci/hotplug/pciehp_hpc.c |
8481 |
+@@ -529,7 +529,7 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id) |
8482 |
+ struct controller *ctrl = (struct controller *)dev_id; |
8483 |
+ struct pci_dev *pdev = ctrl_dev(ctrl); |
8484 |
+ struct device *parent = pdev->dev.parent; |
8485 |
+- u16 status, events; |
8486 |
++ u16 status, events = 0; |
8487 |
+ |
8488 |
+ /* |
8489 |
+ * Interrupts only occur in D3hot or shallower and only if enabled |
8490 |
+@@ -554,6 +554,7 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id) |
8491 |
+ } |
8492 |
+ } |
8493 |
+ |
8494 |
++read_status: |
8495 |
+ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &status); |
8496 |
+ if (status == (u16) ~0) { |
8497 |
+ ctrl_info(ctrl, "%s: no response from device\n", __func__); |
8498 |
+@@ -566,24 +567,37 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id) |
8499 |
+ * Slot Status contains plain status bits as well as event |
8500 |
+ * notification bits; right now we only want the event bits. |
8501 |
+ */ |
8502 |
+- events = status & (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD | |
8503 |
+- PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC | |
8504 |
+- PCI_EXP_SLTSTA_DLLSC); |
8505 |
++ status &= PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD | |
8506 |
++ PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC | |
8507 |
++ PCI_EXP_SLTSTA_DLLSC; |
8508 |
+ |
8509 |
+ /* |
8510 |
+ * If we've already reported a power fault, don't report it again |
8511 |
+ * until we've done something to handle it. |
8512 |
+ */ |
8513 |
+ if (ctrl->power_fault_detected) |
8514 |
+- events &= ~PCI_EXP_SLTSTA_PFD; |
8515 |
++ status &= ~PCI_EXP_SLTSTA_PFD; |
8516 |
+ |
8517 |
++ events |= status; |
8518 |
+ if (!events) { |
8519 |
+ if (parent) |
8520 |
+ pm_runtime_put(parent); |
8521 |
+ return IRQ_NONE; |
8522 |
+ } |
8523 |
+ |
8524 |
+- pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events); |
8525 |
++ if (status) { |
8526 |
++ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events); |
8527 |
++ |
8528 |
++ /* |
8529 |
++ * In MSI mode, all event bits must be zero before the port |
8530 |
++ * will send a new interrupt (PCIe Base Spec r5.0 sec 6.7.3.4). |
8531 |
++ * So re-read the Slot Status register in case a bit was set |
8532 |
++ * between read and write. |
8533 |
++ */ |
8534 |
++ if (pci_dev_msi_enabled(pdev) && !pciehp_poll_mode) |
8535 |
++ goto read_status; |
8536 |
++ } |
8537 |
++ |
8538 |
+ ctrl_dbg(ctrl, "pending interrupts %#06x from Slot Status\n", events); |
8539 |
+ if (parent) |
8540 |
+ pm_runtime_put(parent); |
8541 |
+diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c |
8542 |
+index deec9f9e0b616..9c116cbaa95d8 100644 |
8543 |
+--- a/drivers/pci/iov.c |
8544 |
++++ b/drivers/pci/iov.c |
8545 |
+@@ -253,8 +253,14 @@ static ssize_t sriov_numvfs_show(struct device *dev, |
8546 |
+ char *buf) |
8547 |
+ { |
8548 |
+ struct pci_dev *pdev = to_pci_dev(dev); |
8549 |
++ u16 num_vfs; |
8550 |
++ |
8551 |
++ /* Serialize vs sriov_numvfs_store() so readers see valid num_VFs */ |
8552 |
++ device_lock(&pdev->dev); |
8553 |
++ num_vfs = pdev->sriov->num_VFs; |
8554 |
++ device_unlock(&pdev->dev); |
8555 |
+ |
8556 |
+- return sprintf(buf, "%u\n", pdev->sriov->num_VFs); |
8557 |
++ return sprintf(buf, "%u\n", num_vfs); |
8558 |
+ } |
8559 |
+ |
8560 |
+ /* |
8561 |
+diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c |
8562 |
+index 137bf0cee897c..8fc9a4e911e3a 100644 |
8563 |
+--- a/drivers/pci/rom.c |
8564 |
++++ b/drivers/pci/rom.c |
8565 |
+@@ -195,20 +195,3 @@ void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom) |
8566 |
+ pci_disable_rom(pdev); |
8567 |
+ } |
8568 |
+ EXPORT_SYMBOL(pci_unmap_rom); |
8569 |
+- |
8570 |
+-/** |
8571 |
+- * pci_platform_rom - provides a pointer to any ROM image provided by the |
8572 |
+- * platform |
8573 |
+- * @pdev: pointer to pci device struct |
8574 |
+- * @size: pointer to receive size of pci window over ROM |
8575 |
+- */ |
8576 |
+-void __iomem *pci_platform_rom(struct pci_dev *pdev, size_t *size) |
8577 |
+-{ |
8578 |
+- if (pdev->rom && pdev->romlen) { |
8579 |
+- *size = pdev->romlen; |
8580 |
+- return phys_to_virt((phys_addr_t)pdev->rom); |
8581 |
+- } |
8582 |
+- |
8583 |
+- return NULL; |
8584 |
+-} |
8585 |
+-EXPORT_SYMBOL(pci_platform_rom); |
8586 |
+diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c |
8587 |
+index 5356630e0e483..44f4866d95d8c 100644 |
8588 |
+--- a/drivers/pci/setup-bus.c |
8589 |
++++ b/drivers/pci/setup-bus.c |
8590 |
+@@ -752,24 +752,32 @@ static void pci_bridge_check_ranges(struct pci_bus *bus) |
8591 |
+ } |
8592 |
+ |
8593 |
+ /* |
8594 |
+- * Helper function for sizing routines: find first available bus resource |
8595 |
+- * of a given type. Note: we intentionally skip the bus resources which |
8596 |
+- * have already been assigned (that is, have non-NULL parent resource). |
8597 |
++ * Helper function for sizing routines. Assigned resources have non-NULL |
8598 |
++ * parent resource. |
8599 |
++ * |
8600 |
++ * Return first unassigned resource of the correct type. If there is none, |
8601 |
++ * return first assigned resource of the correct type. If none of the |
8602 |
++ * above, return NULL. |
8603 |
++ * |
8604 |
++ * Returning an assigned resource of the correct type allows the caller to |
8605 |
++ * distinguish between already assigned and no resource of the correct type. |
8606 |
+ */ |
8607 |
+-static struct resource *find_free_bus_resource(struct pci_bus *bus, |
8608 |
+- unsigned long type_mask, |
8609 |
+- unsigned long type) |
8610 |
++static struct resource *find_bus_resource_of_type(struct pci_bus *bus, |
8611 |
++ unsigned long type_mask, |
8612 |
++ unsigned long type) |
8613 |
+ { |
8614 |
++ struct resource *r, *r_assigned = NULL; |
8615 |
+ int i; |
8616 |
+- struct resource *r; |
8617 |
+ |
8618 |
+ pci_bus_for_each_resource(bus, r, i) { |
8619 |
+ if (r == &ioport_resource || r == &iomem_resource) |
8620 |
+ continue; |
8621 |
+ if (r && (r->flags & type_mask) == type && !r->parent) |
8622 |
+ return r; |
8623 |
++ if (r && (r->flags & type_mask) == type && !r_assigned) |
8624 |
++ r_assigned = r; |
8625 |
+ } |
8626 |
+- return NULL; |
8627 |
++ return r_assigned; |
8628 |
+ } |
8629 |
+ |
8630 |
+ static resource_size_t calculate_iosize(resource_size_t size, |
8631 |
+@@ -866,8 +874,8 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, |
8632 |
+ struct list_head *realloc_head) |
8633 |
+ { |
8634 |
+ struct pci_dev *dev; |
8635 |
+- struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO, |
8636 |
+- IORESOURCE_IO); |
8637 |
++ struct resource *b_res = find_bus_resource_of_type(bus, IORESOURCE_IO, |
8638 |
++ IORESOURCE_IO); |
8639 |
+ resource_size_t size = 0, size0 = 0, size1 = 0; |
8640 |
+ resource_size_t children_add_size = 0; |
8641 |
+ resource_size_t min_align, align; |
8642 |
+@@ -875,6 +883,10 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, |
8643 |
+ if (!b_res) |
8644 |
+ return; |
8645 |
+ |
8646 |
++ /* If resource is already assigned, nothing more to do */ |
8647 |
++ if (b_res->parent) |
8648 |
++ return; |
8649 |
++ |
8650 |
+ min_align = window_alignment(bus, IORESOURCE_IO); |
8651 |
+ list_for_each_entry(dev, &bus->devices, bus_list) { |
8652 |
+ int i; |
8653 |
+@@ -978,7 +990,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, |
8654 |
+ resource_size_t min_align, align, size, size0, size1; |
8655 |
+ resource_size_t aligns[18]; /* Alignments from 1MB to 128GB */ |
8656 |
+ int order, max_order; |
8657 |
+- struct resource *b_res = find_free_bus_resource(bus, |
8658 |
++ struct resource *b_res = find_bus_resource_of_type(bus, |
8659 |
+ mask | IORESOURCE_PREFETCH, type); |
8660 |
+ resource_size_t children_add_size = 0; |
8661 |
+ resource_size_t children_add_align = 0; |
8662 |
+@@ -987,6 +999,10 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, |
8663 |
+ if (!b_res) |
8664 |
+ return -ENOSPC; |
8665 |
+ |
8666 |
++ /* If resource is already assigned, nothing more to do */ |
8667 |
++ if (b_res->parent) |
8668 |
++ return 0; |
8669 |
++ |
8670 |
+ memset(aligns, 0, sizeof(aligns)); |
8671 |
+ max_order = 0; |
8672 |
+ size = 0; |
8673 |
+diff --git a/drivers/phy/samsung/phy-s5pv210-usb2.c b/drivers/phy/samsung/phy-s5pv210-usb2.c |
8674 |
+index 56a5083fe6f94..32be62e498047 100644 |
8675 |
+--- a/drivers/phy/samsung/phy-s5pv210-usb2.c |
8676 |
++++ b/drivers/phy/samsung/phy-s5pv210-usb2.c |
8677 |
+@@ -139,6 +139,10 @@ static void s5pv210_phy_pwr(struct samsung_usb2_phy_instance *inst, bool on) |
8678 |
+ udelay(10); |
8679 |
+ rst &= ~rstbits; |
8680 |
+ writel(rst, drv->reg_phy + S5PV210_UPHYRST); |
8681 |
++ /* The following delay is necessary for the reset sequence to be |
8682 |
++ * completed |
8683 |
++ */ |
8684 |
++ udelay(80); |
8685 |
+ } else { |
8686 |
+ pwr = readl(drv->reg_phy + S5PV210_UPHYPWR); |
8687 |
+ pwr |= phypwr; |
8688 |
+diff --git a/drivers/power/supply/max17040_battery.c b/drivers/power/supply/max17040_battery.c |
8689 |
+index 62499018e68bf..2e845045a3fc0 100644 |
8690 |
+--- a/drivers/power/supply/max17040_battery.c |
8691 |
++++ b/drivers/power/supply/max17040_battery.c |
8692 |
+@@ -105,7 +105,7 @@ static void max17040_get_vcell(struct i2c_client *client) |
8693 |
+ |
8694 |
+ vcell = max17040_read_reg(client, MAX17040_VCELL); |
8695 |
+ |
8696 |
+- chip->vcell = vcell; |
8697 |
++ chip->vcell = (vcell >> 4) * 1250; |
8698 |
+ } |
8699 |
+ |
8700 |
+ static void max17040_get_soc(struct i2c_client *client) |
8701 |
+diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c |
8702 |
+index 10af330153b5e..0b85a80ae7ef6 100644 |
8703 |
+--- a/drivers/rapidio/devices/rio_mport_cdev.c |
8704 |
++++ b/drivers/rapidio/devices/rio_mport_cdev.c |
8705 |
+@@ -2384,13 +2384,6 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport) |
8706 |
+ cdev_init(&md->cdev, &mport_fops); |
8707 |
+ md->cdev.owner = THIS_MODULE; |
8708 |
+ |
8709 |
+- ret = cdev_device_add(&md->cdev, &md->dev); |
8710 |
+- if (ret) { |
8711 |
+- rmcd_error("Failed to register mport %d (err=%d)", |
8712 |
+- mport->id, ret); |
8713 |
+- goto err_cdev; |
8714 |
+- } |
8715 |
+- |
8716 |
+ INIT_LIST_HEAD(&md->doorbells); |
8717 |
+ spin_lock_init(&md->db_lock); |
8718 |
+ INIT_LIST_HEAD(&md->portwrites); |
8719 |
+@@ -2410,6 +2403,13 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport) |
8720 |
+ #else |
8721 |
+ md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER; |
8722 |
+ #endif |
8723 |
++ |
8724 |
++ ret = cdev_device_add(&md->cdev, &md->dev); |
8725 |
++ if (ret) { |
8726 |
++ rmcd_error("Failed to register mport %d (err=%d)", |
8727 |
++ mport->id, ret); |
8728 |
++ goto err_cdev; |
8729 |
++ } |
8730 |
+ ret = rio_query_mport(mport, &attr); |
8731 |
+ if (!ret) { |
8732 |
+ md->properties.flags = attr.flags; |
8733 |
+diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c |
8734 |
+index 16f0c85700360..7075f42b9fcf6 100644 |
8735 |
+--- a/drivers/regulator/axp20x-regulator.c |
8736 |
++++ b/drivers/regulator/axp20x-regulator.c |
8737 |
+@@ -42,8 +42,9 @@ |
8738 |
+ |
8739 |
+ #define AXP20X_DCDC2_V_OUT_MASK GENMASK(5, 0) |
8740 |
+ #define AXP20X_DCDC3_V_OUT_MASK GENMASK(7, 0) |
8741 |
+-#define AXP20X_LDO24_V_OUT_MASK GENMASK(7, 4) |
8742 |
++#define AXP20X_LDO2_V_OUT_MASK GENMASK(7, 4) |
8743 |
+ #define AXP20X_LDO3_V_OUT_MASK GENMASK(6, 0) |
8744 |
++#define AXP20X_LDO4_V_OUT_MASK GENMASK(3, 0) |
8745 |
+ #define AXP20X_LDO5_V_OUT_MASK GENMASK(7, 4) |
8746 |
+ |
8747 |
+ #define AXP20X_PWR_OUT_EXTEN_MASK BIT_MASK(0) |
8748 |
+@@ -544,14 +545,14 @@ static const struct regulator_desc axp20x_regulators[] = { |
8749 |
+ AXP20X_PWR_OUT_CTRL, AXP20X_PWR_OUT_DCDC3_MASK), |
8750 |
+ AXP_DESC_FIXED(AXP20X, LDO1, "ldo1", "acin", 1300), |
8751 |
+ AXP_DESC(AXP20X, LDO2, "ldo2", "ldo24in", 1800, 3300, 100, |
8752 |
+- AXP20X_LDO24_V_OUT, AXP20X_LDO24_V_OUT_MASK, |
8753 |
++ AXP20X_LDO24_V_OUT, AXP20X_LDO2_V_OUT_MASK, |
8754 |
+ AXP20X_PWR_OUT_CTRL, AXP20X_PWR_OUT_LDO2_MASK), |
8755 |
+ AXP_DESC(AXP20X, LDO3, "ldo3", "ldo3in", 700, 3500, 25, |
8756 |
+ AXP20X_LDO3_V_OUT, AXP20X_LDO3_V_OUT_MASK, |
8757 |
+ AXP20X_PWR_OUT_CTRL, AXP20X_PWR_OUT_LDO3_MASK), |
8758 |
+ AXP_DESC_RANGES(AXP20X, LDO4, "ldo4", "ldo24in", |
8759 |
+ axp20x_ldo4_ranges, AXP20X_LDO4_V_OUT_NUM_VOLTAGES, |
8760 |
+- AXP20X_LDO24_V_OUT, AXP20X_LDO24_V_OUT_MASK, |
8761 |
++ AXP20X_LDO24_V_OUT, AXP20X_LDO4_V_OUT_MASK, |
8762 |
+ AXP20X_PWR_OUT_CTRL, AXP20X_PWR_OUT_LDO4_MASK), |
8763 |
+ AXP_DESC_IO(AXP20X, LDO5, "ldo5", "ldo5in", 1800, 3300, 100, |
8764 |
+ AXP20X_LDO5_V_OUT, AXP20X_LDO5_V_OUT_MASK, |
8765 |
+diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c |
8766 |
+index 367497914c100..28eb96cbaf98b 100644 |
8767 |
+--- a/drivers/rtc/rtc-ds1374.c |
8768 |
++++ b/drivers/rtc/rtc-ds1374.c |
8769 |
+@@ -620,6 +620,10 @@ static int ds1374_probe(struct i2c_client *client, |
8770 |
+ if (!ds1374) |
8771 |
+ return -ENOMEM; |
8772 |
+ |
8773 |
++ ds1374->rtc = devm_rtc_allocate_device(&client->dev); |
8774 |
++ if (IS_ERR(ds1374->rtc)) |
8775 |
++ return PTR_ERR(ds1374->rtc); |
8776 |
++ |
8777 |
+ ds1374->client = client; |
8778 |
+ i2c_set_clientdata(client, ds1374); |
8779 |
+ |
8780 |
+@@ -641,12 +645,11 @@ static int ds1374_probe(struct i2c_client *client, |
8781 |
+ device_set_wakeup_capable(&client->dev, 1); |
8782 |
+ } |
8783 |
+ |
8784 |
+- ds1374->rtc = devm_rtc_device_register(&client->dev, client->name, |
8785 |
+- &ds1374_rtc_ops, THIS_MODULE); |
8786 |
+- if (IS_ERR(ds1374->rtc)) { |
8787 |
+- dev_err(&client->dev, "unable to register the class device\n"); |
8788 |
+- return PTR_ERR(ds1374->rtc); |
8789 |
+- } |
8790 |
++ ds1374->rtc->ops = &ds1374_rtc_ops; |
8791 |
++ |
8792 |
++ ret = rtc_register_device(ds1374->rtc); |
8793 |
++ if (ret) |
8794 |
++ return ret; |
8795 |
+ |
8796 |
+ #ifdef CONFIG_RTC_DRV_DS1374_WDT |
8797 |
+ save_client = client; |
8798 |
+diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c |
8799 |
+index 86fa723b3b762..795273269d58e 100644 |
8800 |
+--- a/drivers/rtc/rtc-sa1100.c |
8801 |
++++ b/drivers/rtc/rtc-sa1100.c |
8802 |
+@@ -182,7 +182,6 @@ static const struct rtc_class_ops sa1100_rtc_ops = { |
8803 |
+ |
8804 |
+ int sa1100_rtc_init(struct platform_device *pdev, struct sa1100_rtc *info) |
8805 |
+ { |
8806 |
+- struct rtc_device *rtc; |
8807 |
+ int ret; |
8808 |
+ |
8809 |
+ spin_lock_init(&info->lock); |
8810 |
+@@ -211,15 +210,14 @@ int sa1100_rtc_init(struct platform_device *pdev, struct sa1100_rtc *info) |
8811 |
+ writel_relaxed(0, info->rcnr); |
8812 |
+ } |
8813 |
+ |
8814 |
+- rtc = devm_rtc_device_register(&pdev->dev, pdev->name, &sa1100_rtc_ops, |
8815 |
+- THIS_MODULE); |
8816 |
+- if (IS_ERR(rtc)) { |
8817 |
++ info->rtc->ops = &sa1100_rtc_ops; |
8818 |
++ info->rtc->max_user_freq = RTC_FREQ; |
8819 |
++ |
8820 |
++ ret = rtc_register_device(info->rtc); |
8821 |
++ if (ret) { |
8822 |
+ clk_disable_unprepare(info->clk); |
8823 |
+- return PTR_ERR(rtc); |
8824 |
++ return ret; |
8825 |
+ } |
8826 |
+- info->rtc = rtc; |
8827 |
+- |
8828 |
+- rtc->max_user_freq = RTC_FREQ; |
8829 |
+ |
8830 |
+ /* Fix for a nasty initialization problem the in SA11xx RTSR register. |
8831 |
+ * See also the comments in sa1100_rtc_interrupt(). |
8832 |
+@@ -268,6 +266,10 @@ static int sa1100_rtc_probe(struct platform_device *pdev) |
8833 |
+ info->irq_1hz = irq_1hz; |
8834 |
+ info->irq_alarm = irq_alarm; |
8835 |
+ |
8836 |
++ info->rtc = devm_rtc_allocate_device(&pdev->dev); |
8837 |
++ if (IS_ERR(info->rtc)) |
8838 |
++ return PTR_ERR(info->rtc); |
8839 |
++ |
8840 |
+ ret = devm_request_irq(&pdev->dev, irq_1hz, sa1100_rtc_interrupt, 0, |
8841 |
+ "rtc 1Hz", &pdev->dev); |
8842 |
+ if (ret) { |
8843 |
+diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c |
8844 |
+index cbb770824226f..1a44e321b54e1 100644 |
8845 |
+--- a/drivers/s390/block/dasd_fba.c |
8846 |
++++ b/drivers/s390/block/dasd_fba.c |
8847 |
+@@ -40,6 +40,7 @@ |
8848 |
+ MODULE_LICENSE("GPL"); |
8849 |
+ |
8850 |
+ static struct dasd_discipline dasd_fba_discipline; |
8851 |
++static void *dasd_fba_zero_page; |
8852 |
+ |
8853 |
+ struct dasd_fba_private { |
8854 |
+ struct dasd_fba_characteristics rdc_data; |
8855 |
+@@ -270,7 +271,7 @@ static void ccw_write_zero(struct ccw1 *ccw, int count) |
8856 |
+ ccw->cmd_code = DASD_FBA_CCW_WRITE; |
8857 |
+ ccw->flags |= CCW_FLAG_SLI; |
8858 |
+ ccw->count = count; |
8859 |
+- ccw->cda = (__u32) (addr_t) page_to_phys(ZERO_PAGE(0)); |
8860 |
++ ccw->cda = (__u32) (addr_t) dasd_fba_zero_page; |
8861 |
+ } |
8862 |
+ |
8863 |
+ /* |
8864 |
+@@ -830,6 +831,11 @@ dasd_fba_init(void) |
8865 |
+ int ret; |
8866 |
+ |
8867 |
+ ASCEBC(dasd_fba_discipline.ebcname, 4); |
8868 |
++ |
8869 |
++ dasd_fba_zero_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); |
8870 |
++ if (!dasd_fba_zero_page) |
8871 |
++ return -ENOMEM; |
8872 |
++ |
8873 |
+ ret = ccw_driver_register(&dasd_fba_driver); |
8874 |
+ if (!ret) |
8875 |
+ wait_for_device_probe(); |
8876 |
+@@ -841,6 +847,7 @@ static void __exit |
8877 |
+ dasd_fba_cleanup(void) |
8878 |
+ { |
8879 |
+ ccw_driver_unregister(&dasd_fba_driver); |
8880 |
++ free_page((unsigned long)dasd_fba_zero_page); |
8881 |
+ } |
8882 |
+ |
8883 |
+ module_init(dasd_fba_init); |
8884 |
+diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c |
8885 |
+index 427b2e24a8cea..cb466ed7eb5ef 100644 |
8886 |
+--- a/drivers/s390/cio/airq.c |
8887 |
++++ b/drivers/s390/cio/airq.c |
8888 |
+@@ -105,16 +105,12 @@ static irqreturn_t do_airq_interrupt(int irq, void *dummy) |
8889 |
+ return IRQ_HANDLED; |
8890 |
+ } |
8891 |
+ |
8892 |
+-static struct irqaction airq_interrupt = { |
8893 |
+- .name = "AIO", |
8894 |
+- .handler = do_airq_interrupt, |
8895 |
+-}; |
8896 |
+- |
8897 |
+ void __init init_airq_interrupts(void) |
8898 |
+ { |
8899 |
+ irq_set_chip_and_handler(THIN_INTERRUPT, |
8900 |
+ &dummy_irq_chip, handle_percpu_irq); |
8901 |
+- setup_irq(THIN_INTERRUPT, &airq_interrupt); |
8902 |
++ if (request_irq(THIN_INTERRUPT, do_airq_interrupt, 0, "AIO", NULL)) |
8903 |
++ panic("Failed to register AIO interrupt\n"); |
8904 |
+ } |
8905 |
+ |
8906 |
+ static inline unsigned long iv_size(unsigned long bits) |
8907 |
+diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c |
8908 |
+index 18f5458f90e8f..6d716db2a46ab 100644 |
8909 |
+--- a/drivers/s390/cio/cio.c |
8910 |
++++ b/drivers/s390/cio/cio.c |
8911 |
+@@ -563,16 +563,12 @@ static irqreturn_t do_cio_interrupt(int irq, void *dummy) |
8912 |
+ return IRQ_HANDLED; |
8913 |
+ } |
8914 |
+ |
8915 |
+-static struct irqaction io_interrupt = { |
8916 |
+- .name = "I/O", |
8917 |
+- .handler = do_cio_interrupt, |
8918 |
+-}; |
8919 |
+- |
8920 |
+ void __init init_cio_interrupts(void) |
8921 |
+ { |
8922 |
+ irq_set_chip_and_handler(IO_INTERRUPT, |
8923 |
+ &dummy_irq_chip, handle_percpu_irq); |
8924 |
+- setup_irq(IO_INTERRUPT, &io_interrupt); |
8925 |
++ if (request_irq(IO_INTERRUPT, do_cio_interrupt, 0, "I/O", NULL)) |
8926 |
++ panic("Failed to register I/O interrupt\n"); |
8927 |
+ } |
8928 |
+ |
8929 |
+ #ifdef CONFIG_CCW_CONSOLE |
8930 |
+diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c |
8931 |
+index 7fa0262e91af0..ec41a8a76398c 100644 |
8932 |
+--- a/drivers/s390/crypto/zcrypt_api.c |
8933 |
++++ b/drivers/s390/crypto/zcrypt_api.c |
8934 |
+@@ -1419,7 +1419,8 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, |
8935 |
+ if (!reqcnt) |
8936 |
+ return -ENOMEM; |
8937 |
+ zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES); |
8938 |
+- if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt))) |
8939 |
++ if (copy_to_user((int __user *) arg, reqcnt, |
8940 |
++ sizeof(u32) * AP_DEVICES)) |
8941 |
+ rc = -EFAULT; |
8942 |
+ kfree(reqcnt); |
8943 |
+ return rc; |
8944 |
+diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c |
8945 |
+index 0ed3f806ace54..2388143d59f5d 100644 |
8946 |
+--- a/drivers/scsi/aacraid/aachba.c |
8947 |
++++ b/drivers/scsi/aacraid/aachba.c |
8948 |
+@@ -2467,13 +2467,13 @@ static int aac_read(struct scsi_cmnd * scsicmd) |
8949 |
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | |
8950 |
+ SAM_STAT_CHECK_CONDITION; |
8951 |
+ set_sense(&dev->fsa_dev[cid].sense_data, |
8952 |
+- HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE, |
8953 |
++ ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE, |
8954 |
+ ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0); |
8955 |
+ memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, |
8956 |
+ min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), |
8957 |
+ SCSI_SENSE_BUFFERSIZE)); |
8958 |
+ scsicmd->scsi_done(scsicmd); |
8959 |
+- return 1; |
8960 |
++ return 0; |
8961 |
+ } |
8962 |
+ |
8963 |
+ dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n", |
8964 |
+@@ -2559,13 +2559,13 @@ static int aac_write(struct scsi_cmnd * scsicmd) |
8965 |
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | |
8966 |
+ SAM_STAT_CHECK_CONDITION; |
8967 |
+ set_sense(&dev->fsa_dev[cid].sense_data, |
8968 |
+- HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE, |
8969 |
++ ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE, |
8970 |
+ ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0); |
8971 |
+ memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, |
8972 |
+ min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), |
8973 |
+ SCSI_SENSE_BUFFERSIZE)); |
8974 |
+ scsicmd->scsi_done(scsicmd); |
8975 |
+- return 1; |
8976 |
++ return 0; |
8977 |
+ } |
8978 |
+ |
8979 |
+ dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n", |
8980 |
+diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c |
8981 |
+index 2142a649e865b..90fb17c5dd69c 100644 |
8982 |
+--- a/drivers/scsi/aacraid/commsup.c |
8983 |
++++ b/drivers/scsi/aacraid/commsup.c |
8984 |
+@@ -728,7 +728,7 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback, |
8985 |
+ hbacmd->request_id = |
8986 |
+ cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1); |
8987 |
+ fibptr->flags |= FIB_CONTEXT_FLAG_SCSI_CMD; |
8988 |
+- } else if (command != HBA_IU_TYPE_SCSI_TM_REQ) |
8989 |
++ } else |
8990 |
+ return -EINVAL; |
8991 |
+ |
8992 |
+ |
8993 |
+diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c |
8994 |
+index 4a858789e6c5e..1035f947f1bcf 100644 |
8995 |
+--- a/drivers/scsi/aacraid/linit.c |
8996 |
++++ b/drivers/scsi/aacraid/linit.c |
8997 |
+@@ -723,7 +723,11 @@ static int aac_eh_abort(struct scsi_cmnd* cmd) |
8998 |
+ status = aac_hba_send(HBA_IU_TYPE_SCSI_TM_REQ, fib, |
8999 |
+ (fib_callback) aac_hba_callback, |
9000 |
+ (void *) cmd); |
9001 |
+- |
9002 |
++ if (status != -EINPROGRESS) { |
9003 |
++ aac_fib_complete(fib); |
9004 |
++ aac_fib_free(fib); |
9005 |
++ return ret; |
9006 |
++ } |
9007 |
+ /* Wait up to 15 secs for completion */ |
9008 |
+ for (count = 0; count < 15; ++count) { |
9009 |
+ if (cmd->SCp.sent_command) { |
9010 |
+@@ -902,11 +906,11 @@ static int aac_eh_dev_reset(struct scsi_cmnd *cmd) |
9011 |
+ |
9012 |
+ info = &aac->hba_map[bus][cid]; |
9013 |
+ |
9014 |
+- if (info->devtype != AAC_DEVTYPE_NATIVE_RAW && |
9015 |
+- info->reset_state > 0) |
9016 |
++ if (!(info->devtype == AAC_DEVTYPE_NATIVE_RAW && |
9017 |
++ !(info->reset_state > 0))) |
9018 |
+ return FAILED; |
9019 |
+ |
9020 |
+- pr_err("%s: Host adapter reset request. SCSI hang ?\n", |
9021 |
++ pr_err("%s: Host device reset request. SCSI hang ?\n", |
9022 |
+ AAC_DRIVERNAME); |
9023 |
+ |
9024 |
+ fib = aac_fib_alloc(aac); |
9025 |
+@@ -921,7 +925,12 @@ static int aac_eh_dev_reset(struct scsi_cmnd *cmd) |
9026 |
+ status = aac_hba_send(command, fib, |
9027 |
+ (fib_callback) aac_tmf_callback, |
9028 |
+ (void *) info); |
9029 |
+- |
9030 |
++ if (status != -EINPROGRESS) { |
9031 |
++ info->reset_state = 0; |
9032 |
++ aac_fib_complete(fib); |
9033 |
++ aac_fib_free(fib); |
9034 |
++ return ret; |
9035 |
++ } |
9036 |
+ /* Wait up to 15 seconds for completion */ |
9037 |
+ for (count = 0; count < 15; ++count) { |
9038 |
+ if (info->reset_state == 0) { |
9039 |
+@@ -960,11 +969,11 @@ static int aac_eh_target_reset(struct scsi_cmnd *cmd) |
9040 |
+ |
9041 |
+ info = &aac->hba_map[bus][cid]; |
9042 |
+ |
9043 |
+- if (info->devtype != AAC_DEVTYPE_NATIVE_RAW && |
9044 |
+- info->reset_state > 0) |
9045 |
++ if (!(info->devtype == AAC_DEVTYPE_NATIVE_RAW && |
9046 |
++ !(info->reset_state > 0))) |
9047 |
+ return FAILED; |
9048 |
+ |
9049 |
+- pr_err("%s: Host adapter reset request. SCSI hang ?\n", |
9050 |
++ pr_err("%s: Host target reset request. SCSI hang ?\n", |
9051 |
+ AAC_DRIVERNAME); |
9052 |
+ |
9053 |
+ fib = aac_fib_alloc(aac); |
9054 |
+@@ -981,6 +990,13 @@ static int aac_eh_target_reset(struct scsi_cmnd *cmd) |
9055 |
+ (fib_callback) aac_tmf_callback, |
9056 |
+ (void *) info); |
9057 |
+ |
9058 |
++ if (status != -EINPROGRESS) { |
9059 |
++ info->reset_state = 0; |
9060 |
++ aac_fib_complete(fib); |
9061 |
++ aac_fib_free(fib); |
9062 |
++ return ret; |
9063 |
++ } |
9064 |
++ |
9065 |
+ /* Wait up to 15 seconds for completion */ |
9066 |
+ for (count = 0; count < 15; ++count) { |
9067 |
+ if (info->reset_state <= 0) { |
9068 |
+@@ -1033,7 +1049,7 @@ static int aac_eh_bus_reset(struct scsi_cmnd* cmd) |
9069 |
+ } |
9070 |
+ } |
9071 |
+ |
9072 |
+- pr_err("%s: Host adapter reset request. SCSI hang ?\n", AAC_DRIVERNAME); |
9073 |
++ pr_err("%s: Host bus reset request. SCSI hang ?\n", AAC_DRIVERNAME); |
9074 |
+ |
9075 |
+ /* |
9076 |
+ * Check the health of the controller |
9077 |
+@@ -1591,7 +1607,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) |
9078 |
+ struct Scsi_Host *shost; |
9079 |
+ struct aac_dev *aac; |
9080 |
+ struct list_head *insert = &aac_devices; |
9081 |
+- int error = -ENODEV; |
9082 |
++ int error; |
9083 |
+ int unique_id = 0; |
9084 |
+ u64 dmamask; |
9085 |
+ int mask_bits = 0; |
9086 |
+@@ -1616,7 +1632,6 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) |
9087 |
+ error = pci_enable_device(pdev); |
9088 |
+ if (error) |
9089 |
+ goto out; |
9090 |
+- error = -ENODEV; |
9091 |
+ |
9092 |
+ if (!(aac_drivers[index].quirks & AAC_QUIRK_SRC)) { |
9093 |
+ error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
9094 |
+@@ -1648,8 +1663,10 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) |
9095 |
+ pci_set_master(pdev); |
9096 |
+ |
9097 |
+ shost = scsi_host_alloc(&aac_driver_template, sizeof(struct aac_dev)); |
9098 |
+- if (!shost) |
9099 |
++ if (!shost) { |
9100 |
++ error = -ENOMEM; |
9101 |
+ goto out_disable_pdev; |
9102 |
++ } |
9103 |
+ |
9104 |
+ shost->irq = pdev->irq; |
9105 |
+ shost->unique_id = unique_id; |
9106 |
+@@ -1674,8 +1691,11 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) |
9107 |
+ aac->fibs = kcalloc(shost->can_queue + AAC_NUM_MGT_FIB, |
9108 |
+ sizeof(struct fib), |
9109 |
+ GFP_KERNEL); |
9110 |
+- if (!aac->fibs) |
9111 |
++ if (!aac->fibs) { |
9112 |
++ error = -ENOMEM; |
9113 |
+ goto out_free_host; |
9114 |
++ } |
9115 |
++ |
9116 |
+ spin_lock_init(&aac->fib_lock); |
9117 |
+ |
9118 |
+ mutex_init(&aac->ioctl_mutex); |
9119 |
+diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c |
9120 |
+index 93ef97af22df4..67d681c53c295 100644 |
9121 |
+--- a/drivers/scsi/cxlflash/main.c |
9122 |
++++ b/drivers/scsi/cxlflash/main.c |
9123 |
+@@ -3746,6 +3746,7 @@ static int cxlflash_probe(struct pci_dev *pdev, |
9124 |
+ cfg->afu_cookie = cfg->ops->create_afu(pdev); |
9125 |
+ if (unlikely(!cfg->afu_cookie)) { |
9126 |
+ dev_err(dev, "%s: create_afu failed\n", __func__); |
9127 |
++ rc = -ENOMEM; |
9128 |
+ goto out_remove; |
9129 |
+ } |
9130 |
+ |
9131 |
+diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c |
9132 |
+index e3f5c91d5e4fe..b60795893994c 100644 |
9133 |
+--- a/drivers/scsi/fnic/fnic_scsi.c |
9134 |
++++ b/drivers/scsi/fnic/fnic_scsi.c |
9135 |
+@@ -1027,7 +1027,8 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, |
9136 |
+ atomic64_inc(&fnic_stats->io_stats.io_completions); |
9137 |
+ |
9138 |
+ |
9139 |
+- io_duration_time = jiffies_to_msecs(jiffies) - jiffies_to_msecs(io_req->start_time); |
9140 |
++ io_duration_time = jiffies_to_msecs(jiffies) - |
9141 |
++ jiffies_to_msecs(start_time); |
9142 |
+ |
9143 |
+ if(io_duration_time <= 10) |
9144 |
+ atomic64_inc(&fnic_stats->io_stats.io_btw_0_to_10_msec); |
9145 |
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c |
9146 |
+index 216e557f703e6..e67cb4561aace 100644 |
9147 |
+--- a/drivers/scsi/hpsa.c |
9148 |
++++ b/drivers/scsi/hpsa.c |
9149 |
+@@ -504,6 +504,12 @@ static ssize_t host_store_rescan(struct device *dev, |
9150 |
+ return count; |
9151 |
+ } |
9152 |
+ |
9153 |
++static void hpsa_turn_off_ioaccel_for_device(struct hpsa_scsi_dev_t *device) |
9154 |
++{ |
9155 |
++ device->offload_enabled = 0; |
9156 |
++ device->offload_to_be_enabled = 0; |
9157 |
++} |
9158 |
++ |
9159 |
+ static ssize_t host_show_firmware_revision(struct device *dev, |
9160 |
+ struct device_attribute *attr, char *buf) |
9161 |
+ { |
9162 |
+@@ -1738,8 +1744,7 @@ static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h, |
9163 |
+ __func__, |
9164 |
+ h->scsi_host->host_no, logical_drive->bus, |
9165 |
+ logical_drive->target, logical_drive->lun); |
9166 |
+- logical_drive->offload_enabled = 0; |
9167 |
+- logical_drive->offload_to_be_enabled = 0; |
9168 |
++ hpsa_turn_off_ioaccel_for_device(logical_drive); |
9169 |
+ logical_drive->queue_depth = 8; |
9170 |
+ } |
9171 |
+ } |
9172 |
+@@ -2499,8 +2504,7 @@ static void process_ioaccel2_completion(struct ctlr_info *h, |
9173 |
+ IOACCEL2_SERV_RESPONSE_FAILURE) { |
9174 |
+ if (c2->error_data.status == |
9175 |
+ IOACCEL2_STATUS_SR_IOACCEL_DISABLED) { |
9176 |
+- dev->offload_enabled = 0; |
9177 |
+- dev->offload_to_be_enabled = 0; |
9178 |
++ hpsa_turn_off_ioaccel_for_device(dev); |
9179 |
+ } |
9180 |
+ |
9181 |
+ if (dev->in_reset) { |
9182 |
+@@ -3670,10 +3674,17 @@ static void hpsa_get_ioaccel_status(struct ctlr_info *h, |
9183 |
+ this_device->offload_config = |
9184 |
+ !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT); |
9185 |
+ if (this_device->offload_config) { |
9186 |
+- this_device->offload_to_be_enabled = |
9187 |
++ bool offload_enabled = |
9188 |
+ !!(ioaccel_status & OFFLOAD_ENABLED_BIT); |
9189 |
+- if (hpsa_get_raid_map(h, scsi3addr, this_device)) |
9190 |
+- this_device->offload_to_be_enabled = 0; |
9191 |
++ /* |
9192 |
++ * Check to see if offload can be enabled. |
9193 |
++ */ |
9194 |
++ if (offload_enabled) { |
9195 |
++ rc = hpsa_get_raid_map(h, scsi3addr, this_device); |
9196 |
++ if (rc) /* could not load raid_map */ |
9197 |
++ goto out; |
9198 |
++ this_device->offload_to_be_enabled = 1; |
9199 |
++ } |
9200 |
+ } |
9201 |
+ |
9202 |
+ out: |
9203 |
+@@ -3996,8 +4007,7 @@ static int hpsa_update_device_info(struct ctlr_info *h, |
9204 |
+ } else { |
9205 |
+ this_device->raid_level = RAID_UNKNOWN; |
9206 |
+ this_device->offload_config = 0; |
9207 |
+- this_device->offload_enabled = 0; |
9208 |
+- this_device->offload_to_be_enabled = 0; |
9209 |
++ hpsa_turn_off_ioaccel_for_device(this_device); |
9210 |
+ this_device->hba_ioaccel_enabled = 0; |
9211 |
+ this_device->volume_offline = 0; |
9212 |
+ this_device->queue_depth = h->nr_cmds; |
9213 |
+@@ -5230,8 +5240,12 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, |
9214 |
+ /* Handles load balance across RAID 1 members. |
9215 |
+ * (2-drive R1 and R10 with even # of drives.) |
9216 |
+ * Appropriate for SSDs, not optimal for HDDs |
9217 |
++ * Ensure we have the correct raid_map. |
9218 |
+ */ |
9219 |
+- BUG_ON(le16_to_cpu(map->layout_map_count) != 2); |
9220 |
++ if (le16_to_cpu(map->layout_map_count) != 2) { |
9221 |
++ hpsa_turn_off_ioaccel_for_device(dev); |
9222 |
++ return IO_ACCEL_INELIGIBLE; |
9223 |
++ } |
9224 |
+ if (dev->offload_to_mirror) |
9225 |
+ map_index += le16_to_cpu(map->data_disks_per_row); |
9226 |
+ dev->offload_to_mirror = !dev->offload_to_mirror; |
9227 |
+@@ -5239,8 +5253,12 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, |
9228 |
+ case HPSA_RAID_ADM: |
9229 |
+ /* Handles N-way mirrors (R1-ADM) |
9230 |
+ * and R10 with # of drives divisible by 3.) |
9231 |
++ * Ensure we have the correct raid_map. |
9232 |
+ */ |
9233 |
+- BUG_ON(le16_to_cpu(map->layout_map_count) != 3); |
9234 |
++ if (le16_to_cpu(map->layout_map_count) != 3) { |
9235 |
++ hpsa_turn_off_ioaccel_for_device(dev); |
9236 |
++ return IO_ACCEL_INELIGIBLE; |
9237 |
++ } |
9238 |
+ |
9239 |
+ offload_to_mirror = dev->offload_to_mirror; |
9240 |
+ raid_map_helper(map, offload_to_mirror, |
9241 |
+@@ -5265,7 +5283,10 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, |
9242 |
+ r5or6_blocks_per_row = |
9243 |
+ le16_to_cpu(map->strip_size) * |
9244 |
+ le16_to_cpu(map->data_disks_per_row); |
9245 |
+- BUG_ON(r5or6_blocks_per_row == 0); |
9246 |
++ if (r5or6_blocks_per_row == 0) { |
9247 |
++ hpsa_turn_off_ioaccel_for_device(dev); |
9248 |
++ return IO_ACCEL_INELIGIBLE; |
9249 |
++ } |
9250 |
+ stripesize = r5or6_blocks_per_row * |
9251 |
+ le16_to_cpu(map->layout_map_count); |
9252 |
+ #if BITS_PER_LONG == 32 |
9253 |
+@@ -8285,7 +8306,7 @@ static int detect_controller_lockup(struct ctlr_info *h) |
9254 |
+ * |
9255 |
+ * Called from monitor controller worker (hpsa_event_monitor_worker) |
9256 |
+ * |
9257 |
+- * A Volume (or Volumes that comprise an Array set may be undergoing a |
9258 |
++ * A Volume (or Volumes that comprise an Array set) may be undergoing a |
9259 |
+ * transformation, so we will be turning off ioaccel for all volumes that |
9260 |
+ * make up the Array. |
9261 |
+ */ |
9262 |
+@@ -8308,6 +8329,9 @@ static void hpsa_set_ioaccel_status(struct ctlr_info *h) |
9263 |
+ * Run through current device list used during I/O requests. |
9264 |
+ */ |
9265 |
+ for (i = 0; i < h->ndevices; i++) { |
9266 |
++ int offload_to_be_enabled = 0; |
9267 |
++ int offload_config = 0; |
9268 |
++ |
9269 |
+ device = h->dev[i]; |
9270 |
+ |
9271 |
+ if (!device) |
9272 |
+@@ -8325,25 +8349,35 @@ static void hpsa_set_ioaccel_status(struct ctlr_info *h) |
9273 |
+ continue; |
9274 |
+ |
9275 |
+ ioaccel_status = buf[IOACCEL_STATUS_BYTE]; |
9276 |
+- device->offload_config = |
9277 |
++ |
9278 |
++ /* |
9279 |
++ * Check if offload is still configured on |
9280 |
++ */ |
9281 |
++ offload_config = |
9282 |
+ !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT); |
9283 |
+- if (device->offload_config) |
9284 |
+- device->offload_to_be_enabled = |
9285 |
++ /* |
9286 |
++ * If offload is configured on, check to see if ioaccel |
9287 |
++ * needs to be enabled. |
9288 |
++ */ |
9289 |
++ if (offload_config) |
9290 |
++ offload_to_be_enabled = |
9291 |
+ !!(ioaccel_status & OFFLOAD_ENABLED_BIT); |
9292 |
+ |
9293 |
++ /* |
9294 |
++ * If ioaccel is to be re-enabled, re-enable later during the |
9295 |
++ * scan operation so the driver can get a fresh raidmap |
9296 |
++ * before turning ioaccel back on. |
9297 |
++ */ |
9298 |
++ if (offload_to_be_enabled) |
9299 |
++ continue; |
9300 |
++ |
9301 |
+ /* |
9302 |
+ * Immediately turn off ioaccel for any volume the |
9303 |
+ * controller tells us to. Some of the reasons could be: |
9304 |
+ * transformation - change to the LVs of an Array. |
9305 |
+ * degraded volume - component failure |
9306 |
+- * |
9307 |
+- * If ioaccel is to be re-enabled, re-enable later during the |
9308 |
+- * scan operation so the driver can get a fresh raidmap |
9309 |
+- * before turning ioaccel back on. |
9310 |
+- * |
9311 |
+ */ |
9312 |
+- if (!device->offload_to_be_enabled) |
9313 |
+- device->offload_enabled = 0; |
9314 |
++ hpsa_turn_off_ioaccel_for_device(device); |
9315 |
+ } |
9316 |
+ |
9317 |
+ kfree(buf); |
9318 |
+diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c |
9319 |
+index 6bb8917b99a19..64500417c22ea 100644 |
9320 |
+--- a/drivers/scsi/libfc/fc_rport.c |
9321 |
++++ b/drivers/scsi/libfc/fc_rport.c |
9322 |
+@@ -133,8 +133,10 @@ struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id) |
9323 |
+ lockdep_assert_held(&lport->disc.disc_mutex); |
9324 |
+ |
9325 |
+ rdata = fc_rport_lookup(lport, port_id); |
9326 |
+- if (rdata) |
9327 |
++ if (rdata) { |
9328 |
++ kref_put(&rdata->kref, fc_rport_destroy); |
9329 |
+ return rdata; |
9330 |
++ } |
9331 |
+ |
9332 |
+ if (lport->rport_priv_size > 0) |
9333 |
+ rport_priv_size = lport->rport_priv_size; |
9334 |
+@@ -481,10 +483,11 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata, |
9335 |
+ |
9336 |
+ fc_rport_state_enter(rdata, RPORT_ST_DELETE); |
9337 |
+ |
9338 |
+- kref_get(&rdata->kref); |
9339 |
+- if (rdata->event == RPORT_EV_NONE && |
9340 |
+- !queue_work(rport_event_queue, &rdata->event_work)) |
9341 |
+- kref_put(&rdata->kref, fc_rport_destroy); |
9342 |
++ if (rdata->event == RPORT_EV_NONE) { |
9343 |
++ kref_get(&rdata->kref); |
9344 |
++ if (!queue_work(rport_event_queue, &rdata->event_work)) |
9345 |
++ kref_put(&rdata->kref, fc_rport_destroy); |
9346 |
++ } |
9347 |
+ |
9348 |
+ rdata->event = event; |
9349 |
+ } |
9350 |
+diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c |
9351 |
+index 25aa7a53d255e..bb973901b672d 100644 |
9352 |
+--- a/drivers/scsi/lpfc/lpfc_attr.c |
9353 |
++++ b/drivers/scsi/lpfc/lpfc_attr.c |
9354 |
+@@ -176,7 +176,6 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, |
9355 |
+ int i; |
9356 |
+ int len = 0; |
9357 |
+ char tmp[LPFC_MAX_NVME_INFO_TMP_LEN] = {0}; |
9358 |
+- unsigned long iflags = 0; |
9359 |
+ |
9360 |
+ if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) { |
9361 |
+ len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n"); |
9362 |
+@@ -347,7 +346,6 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, |
9363 |
+ if (strlcat(buf, "\nNVME Initiator Enabled\n", PAGE_SIZE) >= PAGE_SIZE) |
9364 |
+ goto buffer_done; |
9365 |
+ |
9366 |
+- rcu_read_lock(); |
9367 |
+ scnprintf(tmp, sizeof(tmp), |
9368 |
+ "XRI Dist lpfc%d Total %d IO %d ELS %d\n", |
9369 |
+ phba->brd_no, |
9370 |
+@@ -355,7 +353,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, |
9371 |
+ phba->sli4_hba.io_xri_max, |
9372 |
+ lpfc_sli4_get_els_iocb_cnt(phba)); |
9373 |
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) |
9374 |
+- goto rcu_unlock_buf_done; |
9375 |
++ goto buffer_done; |
9376 |
+ |
9377 |
+ /* Port state is only one of two values for now. */ |
9378 |
+ if (localport->port_id) |
9379 |
+@@ -371,15 +369,17 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, |
9380 |
+ wwn_to_u64(vport->fc_nodename.u.wwn), |
9381 |
+ localport->port_id, statep); |
9382 |
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) |
9383 |
+- goto rcu_unlock_buf_done; |
9384 |
++ goto buffer_done; |
9385 |
++ |
9386 |
++ spin_lock_irq(shost->host_lock); |
9387 |
+ |
9388 |
+ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { |
9389 |
+ nrport = NULL; |
9390 |
+- spin_lock_irqsave(&vport->phba->hbalock, iflags); |
9391 |
++ spin_lock(&vport->phba->hbalock); |
9392 |
+ rport = lpfc_ndlp_get_nrport(ndlp); |
9393 |
+ if (rport) |
9394 |
+ nrport = rport->remoteport; |
9395 |
+- spin_unlock_irqrestore(&vport->phba->hbalock, iflags); |
9396 |
++ spin_unlock(&vport->phba->hbalock); |
9397 |
+ if (!nrport) |
9398 |
+ continue; |
9399 |
+ |
9400 |
+@@ -398,39 +398,39 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, |
9401 |
+ |
9402 |
+ /* Tab in to show lport ownership. */ |
9403 |
+ if (strlcat(buf, "NVME RPORT ", PAGE_SIZE) >= PAGE_SIZE) |
9404 |
+- goto rcu_unlock_buf_done; |
9405 |
++ goto unlock_buf_done; |
9406 |
+ if (phba->brd_no >= 10) { |
9407 |
+ if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE) |
9408 |
+- goto rcu_unlock_buf_done; |
9409 |
++ goto unlock_buf_done; |
9410 |
+ } |
9411 |
+ |
9412 |
+ scnprintf(tmp, sizeof(tmp), "WWPN x%llx ", |
9413 |
+ nrport->port_name); |
9414 |
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) |
9415 |
+- goto rcu_unlock_buf_done; |
9416 |
++ goto unlock_buf_done; |
9417 |
+ |
9418 |
+ scnprintf(tmp, sizeof(tmp), "WWNN x%llx ", |
9419 |
+ nrport->node_name); |
9420 |
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) |
9421 |
+- goto rcu_unlock_buf_done; |
9422 |
++ goto unlock_buf_done; |
9423 |
+ |
9424 |
+ scnprintf(tmp, sizeof(tmp), "DID x%06x ", |
9425 |
+ nrport->port_id); |
9426 |
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) |
9427 |
+- goto rcu_unlock_buf_done; |
9428 |
++ goto unlock_buf_done; |
9429 |
+ |
9430 |
+ /* An NVME rport can have multiple roles. */ |
9431 |
+ if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) { |
9432 |
+ if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE) |
9433 |
+- goto rcu_unlock_buf_done; |
9434 |
++ goto unlock_buf_done; |
9435 |
+ } |
9436 |
+ if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) { |
9437 |
+ if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE) |
9438 |
+- goto rcu_unlock_buf_done; |
9439 |
++ goto unlock_buf_done; |
9440 |
+ } |
9441 |
+ if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) { |
9442 |
+ if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE) |
9443 |
+- goto rcu_unlock_buf_done; |
9444 |
++ goto unlock_buf_done; |
9445 |
+ } |
9446 |
+ if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR | |
9447 |
+ FC_PORT_ROLE_NVME_TARGET | |
9448 |
+@@ -438,14 +438,14 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, |
9449 |
+ scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x", |
9450 |
+ nrport->port_role); |
9451 |
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) |
9452 |
+- goto rcu_unlock_buf_done; |
9453 |
++ goto unlock_buf_done; |
9454 |
+ } |
9455 |
+ |
9456 |
+ scnprintf(tmp, sizeof(tmp), "%s\n", statep); |
9457 |
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) |
9458 |
+- goto rcu_unlock_buf_done; |
9459 |
++ goto unlock_buf_done; |
9460 |
+ } |
9461 |
+- rcu_read_unlock(); |
9462 |
++ spin_unlock_irq(shost->host_lock); |
9463 |
+ |
9464 |
+ if (!lport) |
9465 |
+ goto buffer_done; |
9466 |
+@@ -505,11 +505,11 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, |
9467 |
+ atomic_read(&lport->cmpl_fcp_err)); |
9468 |
+ strlcat(buf, tmp, PAGE_SIZE); |
9469 |
+ |
9470 |
+- /* RCU is already unlocked. */ |
9471 |
++ /* host_lock is already unlocked. */ |
9472 |
+ goto buffer_done; |
9473 |
+ |
9474 |
+- rcu_unlock_buf_done: |
9475 |
+- rcu_read_unlock(); |
9476 |
++ unlock_buf_done: |
9477 |
++ spin_unlock_irq(shost->host_lock); |
9478 |
+ |
9479 |
+ buffer_done: |
9480 |
+ len = strnlen(buf, PAGE_SIZE); |
9481 |
+diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c |
9482 |
+index 4a09f21cb235f..e672fa9e842c9 100644 |
9483 |
+--- a/drivers/scsi/lpfc/lpfc_ct.c |
9484 |
++++ b/drivers/scsi/lpfc/lpfc_ct.c |
9485 |
+@@ -2056,8 +2056,8 @@ lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) |
9486 |
+ struct lpfc_fdmi_attr_entry *ae; |
9487 |
+ uint32_t size; |
9488 |
+ |
9489 |
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
9490 |
+- memset(ae, 0, sizeof(struct lpfc_name)); |
9491 |
++ ae = &ad->AttrValue; |
9492 |
++ memset(ae, 0, sizeof(*ae)); |
9493 |
+ |
9494 |
+ memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName, |
9495 |
+ sizeof(struct lpfc_name)); |
9496 |
+@@ -2073,8 +2073,8 @@ lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport, |
9497 |
+ struct lpfc_fdmi_attr_entry *ae; |
9498 |
+ uint32_t len, size; |
9499 |
+ |
9500 |
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
9501 |
+- memset(ae, 0, 256); |
9502 |
++ ae = &ad->AttrValue; |
9503 |
++ memset(ae, 0, sizeof(*ae)); |
9504 |
+ |
9505 |
+ /* This string MUST be consistent with other FC platforms |
9506 |
+ * supported by Broadcom. |
9507 |
+@@ -2098,8 +2098,8 @@ lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) |
9508 |
+ struct lpfc_fdmi_attr_entry *ae; |
9509 |
+ uint32_t len, size; |
9510 |
+ |
9511 |
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
9512 |
+- memset(ae, 0, 256); |
9513 |
++ ae = &ad->AttrValue; |
9514 |
++ memset(ae, 0, sizeof(*ae)); |
9515 |
+ |
9516 |
+ strncpy(ae->un.AttrString, phba->SerialNumber, |
9517 |
+ sizeof(ae->un.AttrString)); |
9518 |
+@@ -2120,8 +2120,8 @@ lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport, |
9519 |
+ struct lpfc_fdmi_attr_entry *ae; |
9520 |
+ uint32_t len, size; |
9521 |
+ |
9522 |
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
9523 |
+- memset(ae, 0, 256); |
9524 |
++ ae = &ad->AttrValue; |
9525 |
++ memset(ae, 0, sizeof(*ae)); |
9526 |
+ |
9527 |
+ strncpy(ae->un.AttrString, phba->ModelName, |
9528 |
+ sizeof(ae->un.AttrString)); |
9529 |
+@@ -2141,8 +2141,8 @@ lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport, |
9530 |
+ struct lpfc_fdmi_attr_entry *ae; |
9531 |
+ uint32_t len, size; |
9532 |
+ |
9533 |
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
9534 |
+- memset(ae, 0, 256); |
9535 |
++ ae = &ad->AttrValue; |
9536 |
++ memset(ae, 0, sizeof(*ae)); |
9537 |
+ |
9538 |
+ strncpy(ae->un.AttrString, phba->ModelDesc, |
9539 |
+ sizeof(ae->un.AttrString)); |
9540 |
+@@ -2164,8 +2164,8 @@ lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport, |
9541 |
+ struct lpfc_fdmi_attr_entry *ae; |
9542 |
+ uint32_t i, j, incr, size; |
9543 |
+ |
9544 |
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
9545 |
+- memset(ae, 0, 256); |
9546 |
++ ae = &ad->AttrValue; |
9547 |
++ memset(ae, 0, sizeof(*ae)); |
9548 |
+ |
9549 |
+ /* Convert JEDEC ID to ascii for hardware version */ |
9550 |
+ incr = vp->rev.biuRev; |
9551 |
+@@ -2194,8 +2194,8 @@ lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport, |
9552 |
+ struct lpfc_fdmi_attr_entry *ae; |
9553 |
+ uint32_t len, size; |
9554 |
+ |
9555 |
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
9556 |
+- memset(ae, 0, 256); |
9557 |
++ ae = &ad->AttrValue; |
9558 |
++ memset(ae, 0, sizeof(*ae)); |
9559 |
+ |
9560 |
+ strncpy(ae->un.AttrString, lpfc_release_version, |
9561 |
+ sizeof(ae->un.AttrString)); |
9562 |
+@@ -2216,8 +2216,8 @@ lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport, |
9563 |
+ struct lpfc_fdmi_attr_entry *ae; |
9564 |
+ uint32_t len, size; |
9565 |
+ |
9566 |
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
9567 |
+- memset(ae, 0, 256); |
9568 |
++ ae = &ad->AttrValue; |
9569 |
++ memset(ae, 0, sizeof(*ae)); |
9570 |
+ |
9571 |
+ if (phba->sli_rev == LPFC_SLI_REV4) |
9572 |
+ lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1); |
9573 |
+@@ -2241,8 +2241,8 @@ lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport, |
9574 |
+ struct lpfc_fdmi_attr_entry *ae; |
9575 |
+ uint32_t len, size; |
9576 |
+ |
9577 |
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
9578 |
+- memset(ae, 0, 256); |
9579 |
++ ae = &ad->AttrValue; |
9580 |
++ memset(ae, 0, sizeof(*ae)); |
9581 |
+ |
9582 |
+ lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1); |
9583 |
+ len = strnlen(ae->un.AttrString, |
9584 |
+@@ -2261,8 +2261,8 @@ lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport *vport, |
9585 |
+ struct lpfc_fdmi_attr_entry *ae; |
9586 |
+ uint32_t len, size; |
9587 |
+ |
9588 |
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
9589 |
+- memset(ae, 0, 256); |
9590 |
++ ae = &ad->AttrValue; |
9591 |
++ memset(ae, 0, sizeof(*ae)); |
9592 |
+ |
9593 |
+ snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s %s %s", |
9594 |
+ init_utsname()->sysname, |
9595 |
+@@ -2284,7 +2284,7 @@ lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport *vport, |
9596 |
+ struct lpfc_fdmi_attr_entry *ae; |
9597 |
+ uint32_t size; |
9598 |
+ |
9599 |
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
9600 |
++ ae = &ad->AttrValue; |
9601 |
+ |
9602 |
+ ae->un.AttrInt = cpu_to_be32(LPFC_MAX_CT_SIZE); |
9603 |
+ size = FOURBYTES + sizeof(uint32_t); |
9604 |
+@@ -2300,8 +2300,8 @@ lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport *vport, |
9605 |
+ struct lpfc_fdmi_attr_entry *ae; |
9606 |
+ uint32_t len, size; |
9607 |
+ |
9608 |
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
9609 |
+- memset(ae, 0, 256); |
9610 |
++ ae = &ad->AttrValue; |
9611 |
++ memset(ae, 0, sizeof(*ae)); |
9612 |
+ |
9613 |
+ len = lpfc_vport_symbolic_node_name(vport, |
9614 |
+ ae->un.AttrString, 256); |
9615 |
+@@ -2319,7 +2319,7 @@ lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport *vport, |
9616 |
+ struct lpfc_fdmi_attr_entry *ae; |
9617 |
+ uint32_t size; |
9618 |
+ |
9619 |
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
9620 |
++ ae = &ad->AttrValue; |
9621 |
+ |
9622 |
+ /* Nothing is defined for this currently */ |
9623 |
+ ae->un.AttrInt = cpu_to_be32(0); |
9624 |
+@@ -2336,7 +2336,7 @@ lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport *vport, |
9625 |
+ struct lpfc_fdmi_attr_entry *ae; |
9626 |
+ uint32_t size; |
9627 |
+ |
9628 |
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
9629 |
++ ae = &ad->AttrValue; |
9630 |
+ |
9631 |
+ /* Each driver instance corresponds to a single port */ |
9632 |
+ ae->un.AttrInt = cpu_to_be32(1); |
9633 |
+@@ -2353,8 +2353,8 @@ lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport *vport, |
9634 |
+ struct lpfc_fdmi_attr_entry *ae; |
9635 |
+ uint32_t size; |
9636 |
+ |
9637 |
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
9638 |
+- memset(ae, 0, sizeof(struct lpfc_name)); |
9639 |
++ ae = &ad->AttrValue; |
9640 |
++ memset(ae, 0, sizeof(*ae)); |
9641 |
+ |
9642 |
+ memcpy(&ae->un.AttrWWN, &vport->fabric_nodename, |
9643 |
+ sizeof(struct lpfc_name)); |
9644 |
+@@ -2372,8 +2372,8 @@ lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport, |
9645 |
+ struct lpfc_fdmi_attr_entry *ae; |
9646 |
+ uint32_t len, size; |
9647 |
+ |
9648 |
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
9649 |
+- memset(ae, 0, 256); |
9650 |
++ ae = &ad->AttrValue; |
9651 |
++ memset(ae, 0, sizeof(*ae)); |
9652 |
+ |
9653 |
+ strlcat(ae->un.AttrString, phba->BIOSVersion, |
9654 |
+ sizeof(ae->un.AttrString)); |
9655 |
+@@ -2393,7 +2393,7 @@ lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport *vport, |
9656 |
+ struct lpfc_fdmi_attr_entry *ae; |
9657 |
+ uint32_t size; |
9658 |
+ |
9659 |
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
9660 |
++ ae = &ad->AttrValue; |
9661 |
+ |
9662 |
+ /* Driver doesn't have access to this information */ |
9663 |
+ ae->un.AttrInt = cpu_to_be32(0); |
9664 |
+@@ -2410,8 +2410,8 @@ lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport *vport, |
9665 |
+ struct lpfc_fdmi_attr_entry *ae; |
9666 |
+ uint32_t len, size; |
9667 |
+ |
9668 |
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
9669 |
+- memset(ae, 0, 256); |
9670 |
++ ae = &ad->AttrValue; |
9671 |
++ memset(ae, 0, sizeof(*ae)); |
9672 |
+ |
9673 |
+ strncpy(ae->un.AttrString, "EMULEX", |
9674 |
+ sizeof(ae->un.AttrString)); |
9675 |
+@@ -2433,8 +2433,8 @@ lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport, |
9676 |
+ struct lpfc_fdmi_attr_entry *ae; |
9677 |
+ uint32_t size; |
9678 |
+ |
9679 |
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
9680 |
+- memset(ae, 0, 32); |
9681 |
++ ae = &ad->AttrValue; |
9682 |
++ memset(ae, 0, sizeof(*ae)); |
9683 |
+ |
9684 |
+ ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */ |
9685 |
+ ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */ |
9686 |
+@@ -2459,7 +2459,7 @@ lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport, |
9687 |
+ struct lpfc_fdmi_attr_entry *ae; |
9688 |
+ uint32_t size; |
9689 |
+ |
9690 |
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
9691 |
++ ae = &ad->AttrValue; |
9692 |
+ |
9693 |
+ ae->un.AttrInt = 0; |
9694 |
+ if (!(phba->hba_flag & HBA_FCOE_MODE)) { |
9695 |
+@@ -2513,7 +2513,7 @@ lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport, |
9696 |
+ struct lpfc_fdmi_attr_entry *ae; |
9697 |
+ uint32_t size; |
9698 |
+ |
9699 |
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
9700 |
++ ae = &ad->AttrValue; |
9701 |
+ |
9702 |
+ if (!(phba->hba_flag & HBA_FCOE_MODE)) { |
9703 |
+ switch (phba->fc_linkspeed) { |
9704 |
+@@ -2583,7 +2583,7 @@ lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport, |
9705 |
+ struct lpfc_fdmi_attr_entry *ae; |
9706 |
+ uint32_t size; |
9707 |
+ |
9708 |
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
9709 |
++ ae = &ad->AttrValue; |
9710 |
+ |
9711 |
+ hsp = (struct serv_parm *)&vport->fc_sparam; |
9712 |
+ ae->un.AttrInt = (((uint32_t) hsp->cmn.bbRcvSizeMsb & 0x0F) << 8) | |
9713 |
+@@ -2603,8 +2603,8 @@ lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport, |
9714 |
+ struct lpfc_fdmi_attr_entry *ae; |
9715 |
+ uint32_t len, size; |
9716 |
+ |
9717 |
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
9718 |
+- memset(ae, 0, 256); |
9719 |
++ ae = &ad->AttrValue; |
9720 |
++ memset(ae, 0, sizeof(*ae)); |
9721 |
+ |
9722 |
+ snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), |
9723 |
+ "/sys/class/scsi_host/host%d", shost->host_no); |
9724 |
+@@ -2624,8 +2624,8 @@ lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport, |
9725 |
+ struct lpfc_fdmi_attr_entry *ae; |
9726 |
+ uint32_t len, size; |
9727 |
+ |
9728 |
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
9729 |
+- memset(ae, 0, 256); |
9730 |
++ ae = &ad->AttrValue; |
9731 |
++ memset(ae, 0, sizeof(*ae)); |
9732 |
+ |
9733 |
+ scnprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s", |
9734 |
+ vport->phba->os_host_name); |
9735 |
+@@ -2645,8 +2645,8 @@ lpfc_fdmi_port_attr_wwnn(struct lpfc_vport *vport, |
9736 |
+ struct lpfc_fdmi_attr_entry *ae; |
9737 |
+ uint32_t size; |
9738 |
+ |
9739 |
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
9740 |
+- memset(ae, 0, sizeof(struct lpfc_name)); |
9741 |
++ ae = &ad->AttrValue; |
9742 |
++ memset(ae, 0, sizeof(*ae)); |
9743 |
+ |
9744 |
+ memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName, |
9745 |
+ sizeof(struct lpfc_name)); |
9746 |
+@@ -2663,8 +2663,8 @@ lpfc_fdmi_port_attr_wwpn(struct lpfc_vport *vport, |
9747 |
+ struct lpfc_fdmi_attr_entry *ae; |
9748 |
+ uint32_t size; |
9749 |
+ |
9750 |
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
9751 |
+- memset(ae, 0, sizeof(struct lpfc_name)); |
9752 |
++ ae = &ad->AttrValue; |
9753 |
++ memset(ae, 0, sizeof(*ae)); |
9754 |
+ |
9755 |
+ memcpy(&ae->un.AttrWWN, &vport->fc_sparam.portName, |
9756 |
+ sizeof(struct lpfc_name)); |
9757 |
+@@ -2681,8 +2681,8 @@ lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport *vport, |
9758 |
+ struct lpfc_fdmi_attr_entry *ae; |
9759 |
+ uint32_t len, size; |
9760 |
+ |
9761 |
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
9762 |
+- memset(ae, 0, 256); |
9763 |
++ ae = &ad->AttrValue; |
9764 |
++ memset(ae, 0, sizeof(*ae)); |
9765 |
+ |
9766 |
+ len = lpfc_vport_symbolic_port_name(vport, ae->un.AttrString, 256); |
9767 |
+ len += (len & 3) ? (4 - (len & 3)) : 4; |
9768 |
+@@ -2700,7 +2700,7 @@ lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport, |
9769 |
+ struct lpfc_fdmi_attr_entry *ae; |
9770 |
+ uint32_t size; |
9771 |
+ |
9772 |
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
9773 |
++ ae = &ad->AttrValue; |
9774 |
+ if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) |
9775 |
+ ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTTYPE_NLPORT); |
9776 |
+ else |
9777 |
+@@ -2718,7 +2718,7 @@ lpfc_fdmi_port_attr_class(struct lpfc_vport *vport, |
9778 |
+ struct lpfc_fdmi_attr_entry *ae; |
9779 |
+ uint32_t size; |
9780 |
+ |
9781 |
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
9782 |
++ ae = &ad->AttrValue; |
9783 |
+ ae->un.AttrInt = cpu_to_be32(FC_COS_CLASS2 | FC_COS_CLASS3); |
9784 |
+ size = FOURBYTES + sizeof(uint32_t); |
9785 |
+ ad->AttrLen = cpu_to_be16(size); |
9786 |
+@@ -2733,8 +2733,8 @@ lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport *vport, |
9787 |
+ struct lpfc_fdmi_attr_entry *ae; |
9788 |
+ uint32_t size; |
9789 |
+ |
9790 |
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
9791 |
+- memset(ae, 0, sizeof(struct lpfc_name)); |
9792 |
++ ae = &ad->AttrValue; |
9793 |
++ memset(ae, 0, sizeof(*ae)); |
9794 |
+ |
9795 |
+ memcpy(&ae->un.AttrWWN, &vport->fabric_portname, |
9796 |
+ sizeof(struct lpfc_name)); |
9797 |
+@@ -2751,8 +2751,8 @@ lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport, |
9798 |
+ struct lpfc_fdmi_attr_entry *ae; |
9799 |
+ uint32_t size; |
9800 |
+ |
9801 |
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
9802 |
+- memset(ae, 0, 32); |
9803 |
++ ae = &ad->AttrValue; |
9804 |
++ memset(ae, 0, sizeof(*ae)); |
9805 |
+ |
9806 |
+ ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */ |
9807 |
+ ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */ |
9808 |
+@@ -2775,7 +2775,7 @@ lpfc_fdmi_port_attr_port_state(struct lpfc_vport *vport, |
9809 |
+ struct lpfc_fdmi_attr_entry *ae; |
9810 |
+ uint32_t size; |
9811 |
+ |
9812 |
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
9813 |
++ ae = &ad->AttrValue; |
9814 |
+ /* Link Up - operational */ |
9815 |
+ ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTSTATE_ONLINE); |
9816 |
+ size = FOURBYTES + sizeof(uint32_t); |
9817 |
+@@ -2791,7 +2791,7 @@ lpfc_fdmi_port_attr_num_disc(struct lpfc_vport *vport, |
9818 |
+ struct lpfc_fdmi_attr_entry *ae; |
9819 |
+ uint32_t size; |
9820 |
+ |
9821 |
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
9822 |
++ ae = &ad->AttrValue; |
9823 |
+ vport->fdmi_num_disc = lpfc_find_map_node(vport); |
9824 |
+ ae->un.AttrInt = cpu_to_be32(vport->fdmi_num_disc); |
9825 |
+ size = FOURBYTES + sizeof(uint32_t); |
9826 |
+@@ -2807,7 +2807,7 @@ lpfc_fdmi_port_attr_nportid(struct lpfc_vport *vport, |
9827 |
+ struct lpfc_fdmi_attr_entry *ae; |
9828 |
+ uint32_t size; |
9829 |
+ |
9830 |
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
9831 |
++ ae = &ad->AttrValue; |
9832 |
+ ae->un.AttrInt = cpu_to_be32(vport->fc_myDID); |
9833 |
+ size = FOURBYTES + sizeof(uint32_t); |
9834 |
+ ad->AttrLen = cpu_to_be16(size); |
9835 |
+@@ -2822,8 +2822,8 @@ lpfc_fdmi_smart_attr_service(struct lpfc_vport *vport, |
9836 |
+ struct lpfc_fdmi_attr_entry *ae; |
9837 |
+ uint32_t len, size; |
9838 |
+ |
9839 |
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
9840 |
+- memset(ae, 0, 256); |
9841 |
++ ae = &ad->AttrValue; |
9842 |
++ memset(ae, 0, sizeof(*ae)); |
9843 |
+ |
9844 |
+ strncpy(ae->un.AttrString, "Smart SAN Initiator", |
9845 |
+ sizeof(ae->un.AttrString)); |
9846 |
+@@ -2843,8 +2843,8 @@ lpfc_fdmi_smart_attr_guid(struct lpfc_vport *vport, |
9847 |
+ struct lpfc_fdmi_attr_entry *ae; |
9848 |
+ uint32_t size; |
9849 |
+ |
9850 |
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
9851 |
+- memset(ae, 0, 256); |
9852 |
++ ae = &ad->AttrValue; |
9853 |
++ memset(ae, 0, sizeof(*ae)); |
9854 |
+ |
9855 |
+ memcpy(&ae->un.AttrString, &vport->fc_sparam.nodeName, |
9856 |
+ sizeof(struct lpfc_name)); |
9857 |
+@@ -2864,8 +2864,8 @@ lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport, |
9858 |
+ struct lpfc_fdmi_attr_entry *ae; |
9859 |
+ uint32_t len, size; |
9860 |
+ |
9861 |
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
9862 |
+- memset(ae, 0, 256); |
9863 |
++ ae = &ad->AttrValue; |
9864 |
++ memset(ae, 0, sizeof(*ae)); |
9865 |
+ |
9866 |
+ strncpy(ae->un.AttrString, "Smart SAN Version 2.0", |
9867 |
+ sizeof(ae->un.AttrString)); |
9868 |
+@@ -2886,8 +2886,8 @@ lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport, |
9869 |
+ struct lpfc_fdmi_attr_entry *ae; |
9870 |
+ uint32_t len, size; |
9871 |
+ |
9872 |
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
9873 |
+- memset(ae, 0, 256); |
9874 |
++ ae = &ad->AttrValue; |
9875 |
++ memset(ae, 0, sizeof(*ae)); |
9876 |
+ |
9877 |
+ strncpy(ae->un.AttrString, phba->ModelName, |
9878 |
+ sizeof(ae->un.AttrString)); |
9879 |
+@@ -2906,7 +2906,7 @@ lpfc_fdmi_smart_attr_port_info(struct lpfc_vport *vport, |
9880 |
+ struct lpfc_fdmi_attr_entry *ae; |
9881 |
+ uint32_t size; |
9882 |
+ |
9883 |
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
9884 |
++ ae = &ad->AttrValue; |
9885 |
+ |
9886 |
+ /* SRIOV (type 3) is not supported */ |
9887 |
+ if (vport->vpi) |
9888 |
+@@ -2926,7 +2926,7 @@ lpfc_fdmi_smart_attr_qos(struct lpfc_vport *vport, |
9889 |
+ struct lpfc_fdmi_attr_entry *ae; |
9890 |
+ uint32_t size; |
9891 |
+ |
9892 |
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
9893 |
++ ae = &ad->AttrValue; |
9894 |
+ ae->un.AttrInt = cpu_to_be32(0); |
9895 |
+ size = FOURBYTES + sizeof(uint32_t); |
9896 |
+ ad->AttrLen = cpu_to_be16(size); |
9897 |
+@@ -2941,7 +2941,7 @@ lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport, |
9898 |
+ struct lpfc_fdmi_attr_entry *ae; |
9899 |
+ uint32_t size; |
9900 |
+ |
9901 |
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
9902 |
++ ae = &ad->AttrValue; |
9903 |
+ ae->un.AttrInt = cpu_to_be32(1); |
9904 |
+ size = FOURBYTES + sizeof(uint32_t); |
9905 |
+ ad->AttrLen = cpu_to_be16(size); |
9906 |
+@@ -3089,7 +3089,8 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
9907 |
+ /* Registered Port List */ |
9908 |
+ /* One entry (port) per adapter */ |
9909 |
+ rh->rpl.EntryCnt = cpu_to_be32(1); |
9910 |
+- memcpy(&rh->rpl.pe, &phba->pport->fc_sparam.portName, |
9911 |
++ memcpy(&rh->rpl.pe.PortName, |
9912 |
++ &phba->pport->fc_sparam.portName, |
9913 |
+ sizeof(struct lpfc_name)); |
9914 |
+ |
9915 |
+ /* point to the HBA attribute block */ |
9916 |
+diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c |
9917 |
+index 799db8a785c21..0dc1d56ff4709 100644 |
9918 |
+--- a/drivers/scsi/lpfc/lpfc_hbadisc.c |
9919 |
++++ b/drivers/scsi/lpfc/lpfc_hbadisc.c |
9920 |
+@@ -73,6 +73,7 @@ static void lpfc_disc_timeout_handler(struct lpfc_vport *); |
9921 |
+ static void lpfc_disc_flush_list(struct lpfc_vport *vport); |
9922 |
+ static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); |
9923 |
+ static int lpfc_fcf_inuse(struct lpfc_hba *); |
9924 |
++static void lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *); |
9925 |
+ |
9926 |
+ void |
9927 |
+ lpfc_terminate_rport_io(struct fc_rport *rport) |
9928 |
+@@ -1134,11 +1135,13 @@ out: |
9929 |
+ return; |
9930 |
+ } |
9931 |
+ |
9932 |
+- |
9933 |
+ void |
9934 |
+ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
9935 |
+ { |
9936 |
+ struct lpfc_vport *vport = pmb->vport; |
9937 |
++ LPFC_MBOXQ_t *sparam_mb; |
9938 |
++ struct lpfc_dmabuf *sparam_mp; |
9939 |
++ int rc; |
9940 |
+ |
9941 |
+ if (pmb->u.mb.mbxStatus) |
9942 |
+ goto out; |
9943 |
+@@ -1163,12 +1166,42 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
9944 |
+ } |
9945 |
+ |
9946 |
+ /* Start discovery by sending a FLOGI. port_state is identically |
9947 |
+- * LPFC_FLOGI while waiting for FLOGI cmpl. Check if sending |
9948 |
+- * the FLOGI is being deferred till after MBX_READ_SPARAM completes. |
9949 |
++ * LPFC_FLOGI while waiting for FLOGI cmpl. |
9950 |
+ */ |
9951 |
+ if (vport->port_state != LPFC_FLOGI) { |
9952 |
+- if (!(phba->hba_flag & HBA_DEFER_FLOGI)) |
9953 |
++ /* Issue MBX_READ_SPARAM to update CSPs before FLOGI if |
9954 |
++ * bb-credit recovery is in place. |
9955 |
++ */ |
9956 |
++ if (phba->bbcredit_support && phba->cfg_enable_bbcr && |
9957 |
++ !(phba->link_flag & LS_LOOPBACK_MODE)) { |
9958 |
++ sparam_mb = mempool_alloc(phba->mbox_mem_pool, |
9959 |
++ GFP_KERNEL); |
9960 |
++ if (!sparam_mb) |
9961 |
++ goto sparam_out; |
9962 |
++ |
9963 |
++ rc = lpfc_read_sparam(phba, sparam_mb, 0); |
9964 |
++ if (rc) { |
9965 |
++ mempool_free(sparam_mb, phba->mbox_mem_pool); |
9966 |
++ goto sparam_out; |
9967 |
++ } |
9968 |
++ sparam_mb->vport = vport; |
9969 |
++ sparam_mb->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; |
9970 |
++ rc = lpfc_sli_issue_mbox(phba, sparam_mb, MBX_NOWAIT); |
9971 |
++ if (rc == MBX_NOT_FINISHED) { |
9972 |
++ sparam_mp = (struct lpfc_dmabuf *) |
9973 |
++ sparam_mb->ctx_buf; |
9974 |
++ lpfc_mbuf_free(phba, sparam_mp->virt, |
9975 |
++ sparam_mp->phys); |
9976 |
++ kfree(sparam_mp); |
9977 |
++ sparam_mb->ctx_buf = NULL; |
9978 |
++ mempool_free(sparam_mb, phba->mbox_mem_pool); |
9979 |
++ goto sparam_out; |
9980 |
++ } |
9981 |
++ |
9982 |
++ phba->hba_flag |= HBA_DEFER_FLOGI; |
9983 |
++ } else { |
9984 |
+ lpfc_initial_flogi(vport); |
9985 |
++ } |
9986 |
+ } else { |
9987 |
+ if (vport->fc_flag & FC_PT2PT) |
9988 |
+ lpfc_disc_start(vport); |
9989 |
+@@ -1180,6 +1213,7 @@ out: |
9990 |
+ "0306 CONFIG_LINK mbxStatus error x%x " |
9991 |
+ "HBA state x%x\n", |
9992 |
+ pmb->u.mb.mbxStatus, vport->port_state); |
9993 |
++sparam_out: |
9994 |
+ mempool_free(pmb, phba->mbox_mem_pool); |
9995 |
+ |
9996 |
+ lpfc_linkdown(phba); |
9997 |
+@@ -3237,21 +3271,6 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) |
9998 |
+ lpfc_linkup(phba); |
9999 |
+ sparam_mbox = NULL; |
10000 |
+ |
10001 |
+- if (!(phba->hba_flag & HBA_FCOE_MODE)) { |
10002 |
+- cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
10003 |
+- if (!cfglink_mbox) |
10004 |
+- goto out; |
10005 |
+- vport->port_state = LPFC_LOCAL_CFG_LINK; |
10006 |
+- lpfc_config_link(phba, cfglink_mbox); |
10007 |
+- cfglink_mbox->vport = vport; |
10008 |
+- cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; |
10009 |
+- rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT); |
10010 |
+- if (rc == MBX_NOT_FINISHED) { |
10011 |
+- mempool_free(cfglink_mbox, phba->mbox_mem_pool); |
10012 |
+- goto out; |
10013 |
+- } |
10014 |
+- } |
10015 |
+- |
10016 |
+ sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
10017 |
+ if (!sparam_mbox) |
10018 |
+ goto out; |
10019 |
+@@ -3272,7 +3291,20 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) |
10020 |
+ goto out; |
10021 |
+ } |
10022 |
+ |
10023 |
+- if (phba->hba_flag & HBA_FCOE_MODE) { |
10024 |
++ if (!(phba->hba_flag & HBA_FCOE_MODE)) { |
10025 |
++ cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
10026 |
++ if (!cfglink_mbox) |
10027 |
++ goto out; |
10028 |
++ vport->port_state = LPFC_LOCAL_CFG_LINK; |
10029 |
++ lpfc_config_link(phba, cfglink_mbox); |
10030 |
++ cfglink_mbox->vport = vport; |
10031 |
++ cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; |
10032 |
++ rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT); |
10033 |
++ if (rc == MBX_NOT_FINISHED) { |
10034 |
++ mempool_free(cfglink_mbox, phba->mbox_mem_pool); |
10035 |
++ goto out; |
10036 |
++ } |
10037 |
++ } else { |
10038 |
+ vport->port_state = LPFC_VPORT_UNKNOWN; |
10039 |
+ /* |
10040 |
+ * Add the driver's default FCF record at FCF index 0 now. This |
10041 |
+@@ -3329,10 +3361,6 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) |
10042 |
+ } |
10043 |
+ /* Reset FCF roundrobin bmask for new discovery */ |
10044 |
+ lpfc_sli4_clear_fcf_rr_bmask(phba); |
10045 |
+- } else { |
10046 |
+- if (phba->bbcredit_support && phba->cfg_enable_bbcr && |
10047 |
+- !(phba->link_flag & LS_LOOPBACK_MODE)) |
10048 |
+- phba->hba_flag |= HBA_DEFER_FLOGI; |
10049 |
+ } |
10050 |
+ |
10051 |
+ /* Prepare for LINK up registrations */ |
10052 |
+diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h |
10053 |
+index 436cdc8c5ef46..b5642c8725938 100644 |
10054 |
+--- a/drivers/scsi/lpfc/lpfc_hw.h |
10055 |
++++ b/drivers/scsi/lpfc/lpfc_hw.h |
10056 |
+@@ -1340,25 +1340,8 @@ struct fc_rdp_res_frame { |
10057 |
+ /* lpfc_sli_ct_request defines the CT_IU preamble for FDMI commands */ |
10058 |
+ #define SLI_CT_FDMI_Subtypes 0x10 /* Management Service Subtype */ |
10059 |
+ |
10060 |
+-/* |
10061 |
+- * Registered Port List Format |
10062 |
+- */ |
10063 |
+-struct lpfc_fdmi_reg_port_list { |
10064 |
+- uint32_t EntryCnt; |
10065 |
+- uint32_t pe; /* Variable-length array */ |
10066 |
+-}; |
10067 |
+- |
10068 |
+- |
10069 |
+ /* Definitions for HBA / Port attribute entries */ |
10070 |
+ |
10071 |
+-struct lpfc_fdmi_attr_def { /* Defined in TLV format */ |
10072 |
+- /* Structure is in Big Endian format */ |
10073 |
+- uint32_t AttrType:16; |
10074 |
+- uint32_t AttrLen:16; |
10075 |
+- uint32_t AttrValue; /* Marks start of Value (ATTRIBUTE_ENTRY) */ |
10076 |
+-}; |
10077 |
+- |
10078 |
+- |
10079 |
+ /* Attribute Entry */ |
10080 |
+ struct lpfc_fdmi_attr_entry { |
10081 |
+ union { |
10082 |
+@@ -1369,7 +1352,13 @@ struct lpfc_fdmi_attr_entry { |
10083 |
+ } un; |
10084 |
+ }; |
10085 |
+ |
10086 |
+-#define LPFC_FDMI_MAX_AE_SIZE sizeof(struct lpfc_fdmi_attr_entry) |
10087 |
++struct lpfc_fdmi_attr_def { /* Defined in TLV format */ |
10088 |
++ /* Structure is in Big Endian format */ |
10089 |
++ uint32_t AttrType:16; |
10090 |
++ uint32_t AttrLen:16; |
10091 |
++ /* Marks start of Value (ATTRIBUTE_ENTRY) */ |
10092 |
++ struct lpfc_fdmi_attr_entry AttrValue; |
10093 |
++} __packed; |
10094 |
+ |
10095 |
+ /* |
10096 |
+ * HBA Attribute Block |
10097 |
+@@ -1393,13 +1382,20 @@ struct lpfc_fdmi_hba_ident { |
10098 |
+ struct lpfc_name PortName; |
10099 |
+ }; |
10100 |
+ |
10101 |
++/* |
10102 |
++ * Registered Port List Format |
10103 |
++ */ |
10104 |
++struct lpfc_fdmi_reg_port_list { |
10105 |
++ uint32_t EntryCnt; |
10106 |
++ struct lpfc_fdmi_port_entry pe; |
10107 |
++} __packed; |
10108 |
++ |
10109 |
+ /* |
10110 |
+ * Register HBA(RHBA) |
10111 |
+ */ |
10112 |
+ struct lpfc_fdmi_reg_hba { |
10113 |
+ struct lpfc_fdmi_hba_ident hi; |
10114 |
+- struct lpfc_fdmi_reg_port_list rpl; /* variable-length array */ |
10115 |
+-/* struct lpfc_fdmi_attr_block ab; */ |
10116 |
++ struct lpfc_fdmi_reg_port_list rpl; |
10117 |
+ }; |
10118 |
+ |
10119 |
+ /* |
10120 |
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c |
10121 |
+index 95abffd9ad100..d4c83eca0ad2c 100644 |
10122 |
+--- a/drivers/scsi/lpfc/lpfc_init.c |
10123 |
++++ b/drivers/scsi/lpfc/lpfc_init.c |
10124 |
+@@ -9124,6 +9124,7 @@ lpfc_sli4_release_hdwq(struct lpfc_hba *phba) |
10125 |
+ /* Free the CQ/WQ corresponding to the Hardware Queue */ |
10126 |
+ lpfc_sli4_queue_free(hdwq[idx].io_cq); |
10127 |
+ lpfc_sli4_queue_free(hdwq[idx].io_wq); |
10128 |
++ hdwq[idx].hba_eq = NULL; |
10129 |
+ hdwq[idx].io_cq = NULL; |
10130 |
+ hdwq[idx].io_wq = NULL; |
10131 |
+ if (phba->cfg_xpsgl && !phba->nvmet_support) |
10132 |
+diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c |
10133 |
+index 6961713825585..590a49e847626 100644 |
10134 |
+--- a/drivers/scsi/lpfc/lpfc_nportdisc.c |
10135 |
++++ b/drivers/scsi/lpfc/lpfc_nportdisc.c |
10136 |
+@@ -279,6 +279,109 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) |
10137 |
+ lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); |
10138 |
+ } |
10139 |
+ |
10140 |
++/* lpfc_defer_pt2pt_acc - Complete SLI3 pt2pt processing on link up |
10141 |
++ * @phba: pointer to lpfc hba data structure. |
10142 |
++ * @link_mbox: pointer to CONFIG_LINK mailbox object |
10143 |
++ * |
10144 |
++ * This routine is only called if we are SLI3, direct connect pt2pt |
10145 |
++ * mode and the remote NPort issues the PLOGI after link up. |
10146 |
++ */ |
10147 |
++void |
10148 |
++lpfc_defer_pt2pt_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *link_mbox) |
10149 |
++{ |
10150 |
++ LPFC_MBOXQ_t *login_mbox; |
10151 |
++ MAILBOX_t *mb = &link_mbox->u.mb; |
10152 |
++ struct lpfc_iocbq *save_iocb; |
10153 |
++ struct lpfc_nodelist *ndlp; |
10154 |
++ int rc; |
10155 |
++ |
10156 |
++ ndlp = link_mbox->ctx_ndlp; |
10157 |
++ login_mbox = link_mbox->context3; |
10158 |
++ save_iocb = login_mbox->context3; |
10159 |
++ link_mbox->context3 = NULL; |
10160 |
++ login_mbox->context3 = NULL; |
10161 |
++ |
10162 |
++ /* Check for CONFIG_LINK error */ |
10163 |
++ if (mb->mbxStatus) { |
10164 |
++ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, |
10165 |
++ "4575 CONFIG_LINK fails pt2pt discovery: %x\n", |
10166 |
++ mb->mbxStatus); |
10167 |
++ mempool_free(login_mbox, phba->mbox_mem_pool); |
10168 |
++ mempool_free(link_mbox, phba->mbox_mem_pool); |
10169 |
++ kfree(save_iocb); |
10170 |
++ return; |
10171 |
++ } |
10172 |
++ |
10173 |
++ /* Now that CONFIG_LINK completed, and our SID is configured, |
10174 |
++ * we can now proceed with sending the PLOGI ACC. |
10175 |
++ */ |
10176 |
++ rc = lpfc_els_rsp_acc(link_mbox->vport, ELS_CMD_PLOGI, |
10177 |
++ save_iocb, ndlp, login_mbox); |
10178 |
++ if (rc) { |
10179 |
++ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, |
10180 |
++ "4576 PLOGI ACC fails pt2pt discovery: %x\n", |
10181 |
++ rc); |
10182 |
++ mempool_free(login_mbox, phba->mbox_mem_pool); |
10183 |
++ } |
10184 |
++ |
10185 |
++ mempool_free(link_mbox, phba->mbox_mem_pool); |
10186 |
++ kfree(save_iocb); |
10187 |
++} |
10188 |
++ |
10189 |
++/** |
10190 |
++ * lpfc_defer_tgt_acc - Progress SLI4 target rcv PLOGI handler |
10191 |
++ * @phba: Pointer to HBA context object. |
10192 |
++ * @pmb: Pointer to mailbox object. |
10193 |
++ * |
10194 |
++ * This function provides the unreg rpi mailbox completion handler for a tgt. |
10195 |
++ * The routine frees the memory resources associated with the completed |
10196 |
++ * mailbox command and transmits the ELS ACC. |
10197 |
++ * |
10198 |
++ * This routine is only called if we are SLI4, acting in target |
10199 |
++ * mode and the remote NPort issues the PLOGI after link up. |
10200 |
++ **/ |
10201 |
++void |
10202 |
++lpfc_defer_acc_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
10203 |
++{ |
10204 |
++ struct lpfc_vport *vport = pmb->vport; |
10205 |
++ struct lpfc_nodelist *ndlp = pmb->ctx_ndlp; |
10206 |
++ LPFC_MBOXQ_t *mbox = pmb->context3; |
10207 |
++ struct lpfc_iocbq *piocb = NULL; |
10208 |
++ int rc; |
10209 |
++ |
10210 |
++ if (mbox) { |
10211 |
++ pmb->context3 = NULL; |
10212 |
++ piocb = mbox->context3; |
10213 |
++ mbox->context3 = NULL; |
10214 |
++ } |
10215 |
++ |
10216 |
++ /* |
10217 |
++ * Complete the unreg rpi mbx request, and update flags. |
10218 |
++ * This will also restart any deferred events. |
10219 |
++ */ |
10220 |
++ lpfc_nlp_get(ndlp); |
10221 |
++ lpfc_sli4_unreg_rpi_cmpl_clr(phba, pmb); |
10222 |
++ |
10223 |
++ if (!piocb) { |
10224 |
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY | LOG_ELS, |
10225 |
++ "4578 PLOGI ACC fail\n"); |
10226 |
++ if (mbox) |
10227 |
++ mempool_free(mbox, phba->mbox_mem_pool); |
10228 |
++ goto out; |
10229 |
++ } |
10230 |
++ |
10231 |
++ rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, piocb, ndlp, mbox); |
10232 |
++ if (rc) { |
10233 |
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY | LOG_ELS, |
10234 |
++ "4579 PLOGI ACC fail %x\n", rc); |
10235 |
++ if (mbox) |
10236 |
++ mempool_free(mbox, phba->mbox_mem_pool); |
10237 |
++ } |
10238 |
++ kfree(piocb); |
10239 |
++out: |
10240 |
++ lpfc_nlp_put(ndlp); |
10241 |
++} |
10242 |
++ |
10243 |
+ static int |
10244 |
+ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
10245 |
+ struct lpfc_iocbq *cmdiocb) |
10246 |
+@@ -291,10 +394,13 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
10247 |
+ IOCB_t *icmd; |
10248 |
+ struct serv_parm *sp; |
10249 |
+ uint32_t ed_tov; |
10250 |
+- LPFC_MBOXQ_t *mbox; |
10251 |
++ LPFC_MBOXQ_t *link_mbox; |
10252 |
++ LPFC_MBOXQ_t *login_mbox; |
10253 |
++ struct lpfc_iocbq *save_iocb; |
10254 |
+ struct ls_rjt stat; |
10255 |
+ uint32_t vid, flag; |
10256 |
+- int rc; |
10257 |
++ u16 rpi; |
10258 |
++ int rc, defer_acc; |
10259 |
+ |
10260 |
+ memset(&stat, 0, sizeof (struct ls_rjt)); |
10261 |
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; |
10262 |
+@@ -343,6 +449,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
10263 |
+ else |
10264 |
+ ndlp->nlp_fcp_info |= CLASS3; |
10265 |
+ |
10266 |
++ defer_acc = 0; |
10267 |
+ ndlp->nlp_class_sup = 0; |
10268 |
+ if (sp->cls1.classValid) |
10269 |
+ ndlp->nlp_class_sup |= FC_COS_CLASS1; |
10270 |
+@@ -354,7 +461,6 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
10271 |
+ ndlp->nlp_class_sup |= FC_COS_CLASS4; |
10272 |
+ ndlp->nlp_maxframe = |
10273 |
+ ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; |
10274 |
+- |
10275 |
+ /* if already logged in, do implicit logout */ |
10276 |
+ switch (ndlp->nlp_state) { |
10277 |
+ case NLP_STE_NPR_NODE: |
10278 |
+@@ -396,6 +502,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
10279 |
+ ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; |
10280 |
+ ndlp->nlp_flag &= ~NLP_FIRSTBURST; |
10281 |
+ |
10282 |
++ login_mbox = NULL; |
10283 |
++ link_mbox = NULL; |
10284 |
++ save_iocb = NULL; |
10285 |
++ |
10286 |
+ /* Check for Nport to NPort pt2pt protocol */ |
10287 |
+ if ((vport->fc_flag & FC_PT2PT) && |
10288 |
+ !(vport->fc_flag & FC_PT2PT_PLOGI)) { |
10289 |
+@@ -423,17 +533,22 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
10290 |
+ if (phba->sli_rev == LPFC_SLI_REV4) |
10291 |
+ lpfc_issue_reg_vfi(vport); |
10292 |
+ else { |
10293 |
+- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
10294 |
+- if (mbox == NULL) |
10295 |
++ defer_acc = 1; |
10296 |
++ link_mbox = mempool_alloc(phba->mbox_mem_pool, |
10297 |
++ GFP_KERNEL); |
10298 |
++ if (!link_mbox) |
10299 |
+ goto out; |
10300 |
+- lpfc_config_link(phba, mbox); |
10301 |
+- mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
10302 |
+- mbox->vport = vport; |
10303 |
+- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); |
10304 |
+- if (rc == MBX_NOT_FINISHED) { |
10305 |
+- mempool_free(mbox, phba->mbox_mem_pool); |
10306 |
++ lpfc_config_link(phba, link_mbox); |
10307 |
++ link_mbox->mbox_cmpl = lpfc_defer_pt2pt_acc; |
10308 |
++ link_mbox->vport = vport; |
10309 |
++ link_mbox->ctx_ndlp = ndlp; |
10310 |
++ |
10311 |
++ save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL); |
10312 |
++ if (!save_iocb) |
10313 |
+ goto out; |
10314 |
+- } |
10315 |
++ /* Save info from cmd IOCB used in rsp */ |
10316 |
++ memcpy((uint8_t *)save_iocb, (uint8_t *)cmdiocb, |
10317 |
++ sizeof(struct lpfc_iocbq)); |
10318 |
+ } |
10319 |
+ |
10320 |
+ lpfc_can_disctmo(vport); |
10321 |
+@@ -448,30 +563,57 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
10322 |
+ ndlp->nlp_flag |= NLP_SUPPRESS_RSP; |
10323 |
+ } |
10324 |
+ |
10325 |
+- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
10326 |
+- if (!mbox) |
10327 |
++ login_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
10328 |
++ if (!login_mbox) |
10329 |
+ goto out; |
10330 |
+ |
10331 |
+ /* Registering an existing RPI behaves differently for SLI3 vs SLI4 */ |
10332 |
+- if (phba->sli_rev == LPFC_SLI_REV4) |
10333 |
++ if (phba->nvmet_support && !defer_acc) { |
10334 |
++ link_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
10335 |
++ if (!link_mbox) |
10336 |
++ goto out; |
10337 |
++ |
10338 |
++ /* As unique identifiers such as iotag would be overwritten |
10339 |
++ * with those from the cmdiocb, allocate separate temporary |
10340 |
++ * storage for the copy. |
10341 |
++ */ |
10342 |
++ save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL); |
10343 |
++ if (!save_iocb) |
10344 |
++ goto out; |
10345 |
++ |
10346 |
++ /* Unreg RPI is required for SLI4. */ |
10347 |
++ rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; |
10348 |
++ lpfc_unreg_login(phba, vport->vpi, rpi, link_mbox); |
10349 |
++ link_mbox->vport = vport; |
10350 |
++ link_mbox->ctx_ndlp = ndlp; |
10351 |
++ link_mbox->mbox_cmpl = lpfc_defer_acc_rsp; |
10352 |
++ |
10353 |
++ if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && |
10354 |
++ (!(vport->fc_flag & FC_OFFLINE_MODE))) |
10355 |
++ ndlp->nlp_flag |= NLP_UNREG_INP; |
10356 |
++ |
10357 |
++ /* Save info from cmd IOCB used in rsp */ |
10358 |
++ memcpy(save_iocb, cmdiocb, sizeof(*save_iocb)); |
10359 |
++ |
10360 |
++ /* Delay sending ACC till unreg RPI completes. */ |
10361 |
++ defer_acc = 1; |
10362 |
++ } else if (phba->sli_rev == LPFC_SLI_REV4) |
10363 |
+ lpfc_unreg_rpi(vport, ndlp); |
10364 |
+ |
10365 |
+ rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID, |
10366 |
+- (uint8_t *) sp, mbox, ndlp->nlp_rpi); |
10367 |
+- if (rc) { |
10368 |
+- mempool_free(mbox, phba->mbox_mem_pool); |
10369 |
++ (uint8_t *)sp, login_mbox, ndlp->nlp_rpi); |
10370 |
++ if (rc) |
10371 |
+ goto out; |
10372 |
+- } |
10373 |
+ |
10374 |
+ /* ACC PLOGI rsp command needs to execute first, |
10375 |
+- * queue this mbox command to be processed later. |
10376 |
++ * queue this login_mbox command to be processed later. |
10377 |
+ */ |
10378 |
+- mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; |
10379 |
++ login_mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; |
10380 |
+ /* |
10381 |
+- * mbox->ctx_ndlp = lpfc_nlp_get(ndlp) deferred until mailbox |
10382 |
++ * login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp) deferred until mailbox |
10383 |
+ * command issued in lpfc_cmpl_els_acc(). |
10384 |
+ */ |
10385 |
+- mbox->vport = vport; |
10386 |
++ login_mbox->vport = vport; |
10387 |
+ spin_lock_irq(shost->host_lock); |
10388 |
+ ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI); |
10389 |
+ spin_unlock_irq(shost->host_lock); |
10390 |
+@@ -495,6 +637,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
10391 |
+ if ((vport->port_type == LPFC_NPIV_PORT && |
10392 |
+ vport->cfg_restrict_login)) { |
10393 |
+ |
10394 |
++ /* no deferred ACC */ |
10395 |
++ kfree(save_iocb); |
10396 |
++ |
10397 |
+ /* In order to preserve RPIs, we want to cleanup |
10398 |
+ * the default RPI the firmware created to rcv |
10399 |
+ * this ELS request. The only way to do this is |
10400 |
+@@ -506,16 +651,50 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
10401 |
+ stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD; |
10402 |
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; |
10403 |
+ rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, |
10404 |
+- ndlp, mbox); |
10405 |
++ ndlp, login_mbox); |
10406 |
+ if (rc) |
10407 |
+- mempool_free(mbox, phba->mbox_mem_pool); |
10408 |
++ mempool_free(login_mbox, phba->mbox_mem_pool); |
10409 |
+ return 1; |
10410 |
+ } |
10411 |
+- rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox); |
10412 |
++ if (defer_acc) { |
10413 |
++ /* So the order here should be: |
10414 |
++ * SLI3 pt2pt |
10415 |
++ * Issue CONFIG_LINK mbox |
10416 |
++ * CONFIG_LINK cmpl |
10417 |
++ * SLI4 tgt |
10418 |
++ * Issue UNREG RPI mbx |
10419 |
++ * UNREG RPI cmpl |
10420 |
++ * Issue PLOGI ACC |
10421 |
++ * PLOGI ACC cmpl |
10422 |
++ * Issue REG_LOGIN mbox |
10423 |
++ */ |
10424 |
++ |
10425 |
++ /* Save the REG_LOGIN mbox for and rcv IOCB copy later */ |
10426 |
++ link_mbox->context3 = login_mbox; |
10427 |
++ login_mbox->context3 = save_iocb; |
10428 |
++ |
10429 |
++ /* Start the ball rolling by issuing CONFIG_LINK here */ |
10430 |
++ rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT); |
10431 |
++ if (rc == MBX_NOT_FINISHED) |
10432 |
++ goto out; |
10433 |
++ return 1; |
10434 |
++ } |
10435 |
++ |
10436 |
++ rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, login_mbox); |
10437 |
+ if (rc) |
10438 |
+- mempool_free(mbox, phba->mbox_mem_pool); |
10439 |
++ mempool_free(login_mbox, phba->mbox_mem_pool); |
10440 |
+ return 1; |
10441 |
+ out: |
10442 |
++ if (defer_acc) |
10443 |
++ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, |
10444 |
++ "4577 discovery failure: %p %p %p\n", |
10445 |
++ save_iocb, link_mbox, login_mbox); |
10446 |
++ kfree(save_iocb); |
10447 |
++ if (link_mbox) |
10448 |
++ mempool_free(link_mbox, phba->mbox_mem_pool); |
10449 |
++ if (login_mbox) |
10450 |
++ mempool_free(login_mbox, phba->mbox_mem_pool); |
10451 |
++ |
10452 |
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; |
10453 |
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE; |
10454 |
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); |
10455 |
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c |
10456 |
+index a951e1c8165ed..e2877d2b3cc0d 100644 |
10457 |
+--- a/drivers/scsi/lpfc/lpfc_sli.c |
10458 |
++++ b/drivers/scsi/lpfc/lpfc_sli.c |
10459 |
+@@ -17866,6 +17866,10 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) |
10460 |
+ list_add_tail(&iocbq->list, &first_iocbq->list); |
10461 |
+ } |
10462 |
+ } |
10463 |
++ /* Free the sequence's header buffer */ |
10464 |
++ if (!first_iocbq) |
10465 |
++ lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf); |
10466 |
++ |
10467 |
+ return first_iocbq; |
10468 |
+ } |
10469 |
+ |
10470 |
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c |
10471 |
+index d5a62fea8fe3e..bae7cf70ee177 100644 |
10472 |
+--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c |
10473 |
++++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c |
10474 |
+@@ -3717,12 +3717,6 @@ mpt3sas_ctl_exit(ushort hbas_to_enumerate) |
10475 |
+ for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { |
10476 |
+ if (!ioc->diag_buffer[i]) |
10477 |
+ continue; |
10478 |
+- if (!(ioc->diag_buffer_status[i] & |
10479 |
+- MPT3_DIAG_BUFFER_IS_REGISTERED)) |
10480 |
+- continue; |
10481 |
+- if ((ioc->diag_buffer_status[i] & |
10482 |
+- MPT3_DIAG_BUFFER_IS_RELEASED)) |
10483 |
+- continue; |
10484 |
+ dma_free_coherent(&ioc->pdev->dev, |
10485 |
+ ioc->diag_buffer_sz[i], |
10486 |
+ ioc->diag_buffer[i], |
10487 |
+diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c |
10488 |
+index 027bf5b2981b9..36f5bab09f73e 100644 |
10489 |
+--- a/drivers/scsi/pm8001/pm8001_sas.c |
10490 |
++++ b/drivers/scsi/pm8001/pm8001_sas.c |
10491 |
+@@ -1202,8 +1202,8 @@ int pm8001_abort_task(struct sas_task *task) |
10492 |
+ pm8001_dev = dev->lldd_dev; |
10493 |
+ pm8001_ha = pm8001_find_ha_by_dev(dev); |
10494 |
+ phy_id = pm8001_dev->attached_phy; |
10495 |
+- rc = pm8001_find_tag(task, &tag); |
10496 |
+- if (rc == 0) { |
10497 |
++ ret = pm8001_find_tag(task, &tag); |
10498 |
++ if (ret == 0) { |
10499 |
+ pm8001_printk("no tag for task:%p\n", task); |
10500 |
+ return TMF_RESP_FUNC_FAILED; |
10501 |
+ } |
10502 |
+@@ -1241,26 +1241,50 @@ int pm8001_abort_task(struct sas_task *task) |
10503 |
+ |
10504 |
+ /* 2. Send Phy Control Hard Reset */ |
10505 |
+ reinit_completion(&completion); |
10506 |
++ phy->port_reset_status = PORT_RESET_TMO; |
10507 |
+ phy->reset_success = false; |
10508 |
+ phy->enable_completion = &completion; |
10509 |
+ phy->reset_completion = &completion_reset; |
10510 |
+ ret = PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, |
10511 |
+ PHY_HARD_RESET); |
10512 |
+- if (ret) |
10513 |
+- goto out; |
10514 |
+- PM8001_MSG_DBG(pm8001_ha, |
10515 |
+- pm8001_printk("Waiting for local phy ctl\n")); |
10516 |
+- wait_for_completion(&completion); |
10517 |
+- if (!phy->reset_success) |
10518 |
++ if (ret) { |
10519 |
++ phy->enable_completion = NULL; |
10520 |
++ phy->reset_completion = NULL; |
10521 |
+ goto out; |
10522 |
++ } |
10523 |
+ |
10524 |
+- /* 3. Wait for Port Reset complete / Port reset TMO */ |
10525 |
++ /* In the case of the reset timeout/fail we still |
10526 |
++ * abort the command at the firmware. The assumption |
10527 |
++ * here is that the drive is off doing something so |
10528 |
++ * that it's not processing requests, and we want to |
10529 |
++ * avoid getting a completion for this and either |
10530 |
++ * leaking the task in libsas or losing the race and |
10531 |
++ * getting a double free. |
10532 |
++ */ |
10533 |
+ PM8001_MSG_DBG(pm8001_ha, |
10534 |
++ pm8001_printk("Waiting for local phy ctl\n")); |
10535 |
++ ret = wait_for_completion_timeout(&completion, |
10536 |
++ PM8001_TASK_TIMEOUT * HZ); |
10537 |
++ if (!ret || !phy->reset_success) { |
10538 |
++ phy->enable_completion = NULL; |
10539 |
++ phy->reset_completion = NULL; |
10540 |
++ } else { |
10541 |
++ /* 3. Wait for Port Reset complete or |
10542 |
++ * Port reset TMO |
10543 |
++ */ |
10544 |
++ PM8001_MSG_DBG(pm8001_ha, |
10545 |
+ pm8001_printk("Waiting for Port reset\n")); |
10546 |
+- wait_for_completion(&completion_reset); |
10547 |
+- if (phy->port_reset_status) { |
10548 |
+- pm8001_dev_gone_notify(dev); |
10549 |
+- goto out; |
10550 |
++ ret = wait_for_completion_timeout( |
10551 |
++ &completion_reset, |
10552 |
++ PM8001_TASK_TIMEOUT * HZ); |
10553 |
++ if (!ret) |
10554 |
++ phy->reset_completion = NULL; |
10555 |
++ WARN_ON(phy->port_reset_status == |
10556 |
++ PORT_RESET_TMO); |
10557 |
++ if (phy->port_reset_status == PORT_RESET_TMO) { |
10558 |
++ pm8001_dev_gone_notify(dev); |
10559 |
++ goto out; |
10560 |
++ } |
10561 |
+ } |
10562 |
+ |
10563 |
+ /* |
10564 |
+diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c |
10565 |
+index 0f57c80734061..0f2622a48311c 100644 |
10566 |
+--- a/drivers/scsi/qedi/qedi_iscsi.c |
10567 |
++++ b/drivers/scsi/qedi/qedi_iscsi.c |
10568 |
+@@ -1062,6 +1062,9 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep) |
10569 |
+ break; |
10570 |
+ } |
10571 |
+ |
10572 |
++ if (!abrt_conn) |
10573 |
++ wait_delay += qedi->pf_params.iscsi_pf_params.two_msl_timer; |
10574 |
++ |
10575 |
+ qedi_ep->state = EP_STATE_DISCONN_START; |
10576 |
+ ret = qedi_ops->destroy_conn(qedi->cdev, qedi_ep->handle, abrt_conn); |
10577 |
+ if (ret) { |
10578 |
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c |
10579 |
+index ac4c47fc5f4c1..62d2ee825c97a 100644 |
10580 |
+--- a/drivers/scsi/qla2xxx/qla_init.c |
10581 |
++++ b/drivers/scsi/qla2xxx/qla_init.c |
10582 |
+@@ -1002,7 +1002,7 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res) |
10583 |
+ set_bit(loop_id, vha->hw->loop_id_map); |
10584 |
+ wwn = wwn_to_u64(e->port_name); |
10585 |
+ |
10586 |
+- ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20e8, |
10587 |
++ ql_dbg(ql_dbg_disc, vha, 0x20e8, |
10588 |
+ "%s %8phC %02x:%02x:%02x CLS %x/%x lid %x \n", |
10589 |
+ __func__, (void *)&wwn, e->port_id[2], e->port_id[1], |
10590 |
+ e->port_id[0], e->current_login_state, e->last_login_state, |
10591 |
+@@ -1061,6 +1061,16 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res) |
10592 |
+ |
10593 |
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); |
10594 |
+ vha->gnl.sent = 0; |
10595 |
++ if (!list_empty(&vha->gnl.fcports)) { |
10596 |
++ /* retrigger gnl */ |
10597 |
++ list_for_each_entry_safe(fcport, tf, &vha->gnl.fcports, |
10598 |
++ gnl_entry) { |
10599 |
++ list_del_init(&fcport->gnl_entry); |
10600 |
++ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); |
10601 |
++ if (qla24xx_post_gnl_work(vha, fcport) == QLA_SUCCESS) |
10602 |
++ break; |
10603 |
++ } |
10604 |
++ } |
10605 |
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); |
10606 |
+ |
10607 |
+ sp->free(sp); |
10608 |
+@@ -1901,33 +1911,13 @@ qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea) |
10609 |
+ "%s %d %8phC post fc4 prli\n", |
10610 |
+ __func__, __LINE__, ea->fcport->port_name); |
10611 |
+ ea->fcport->fc4f_nvme = 0; |
10612 |
+- qla24xx_post_prli_work(vha, ea->fcport); |
10613 |
+ return; |
10614 |
+ } |
10615 |
+ |
10616 |
+- /* at this point both PRLI NVME & PRLI FCP failed */ |
10617 |
+- if (N2N_TOPO(vha->hw)) { |
10618 |
+- if (ea->fcport->n2n_link_reset_cnt < 3) { |
10619 |
+- ea->fcport->n2n_link_reset_cnt++; |
10620 |
+- /* |
10621 |
+- * remote port is not sending Plogi. Reset |
10622 |
+- * link to kick start his state machine |
10623 |
+- */ |
10624 |
+- set_bit(N2N_LINK_RESET, &vha->dpc_flags); |
10625 |
+- } else { |
10626 |
+- ql_log(ql_log_warn, vha, 0x2119, |
10627 |
+- "%s %d %8phC Unable to reconnect\n", |
10628 |
+- __func__, __LINE__, ea->fcport->port_name); |
10629 |
+- } |
10630 |
+- } else { |
10631 |
+- /* |
10632 |
+- * switch connect. login failed. Take connection |
10633 |
+- * down and allow relogin to retrigger |
10634 |
+- */ |
10635 |
+- ea->fcport->flags &= ~FCF_ASYNC_SENT; |
10636 |
+- ea->fcport->keep_nport_handle = 0; |
10637 |
+- qlt_schedule_sess_for_deletion(ea->fcport); |
10638 |
+- } |
10639 |
++ ea->fcport->flags &= ~FCF_ASYNC_SENT; |
10640 |
++ ea->fcport->keep_nport_handle = 0; |
10641 |
++ ea->fcport->logout_on_delete = 1; |
10642 |
++ qlt_schedule_sess_for_deletion(ea->fcport); |
10643 |
+ break; |
10644 |
+ } |
10645 |
+ } |
10646 |
+@@ -1995,7 +1985,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) |
10647 |
+ qla24xx_post_prli_work(vha, ea->fcport); |
10648 |
+ } else { |
10649 |
+ ql_dbg(ql_dbg_disc, vha, 0x20ea, |
10650 |
+- "%s %d %8phC LoopID 0x%x in use with %06x. post gnl\n", |
10651 |
++ "%s %d %8phC LoopID 0x%x in use with %06x. post gpdb\n", |
10652 |
+ __func__, __LINE__, ea->fcport->port_name, |
10653 |
+ ea->fcport->loop_id, ea->fcport->d_id.b24); |
10654 |
+ |
10655 |
+@@ -2066,6 +2056,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) |
10656 |
+ set_bit(lid, vha->hw->loop_id_map); |
10657 |
+ ea->fcport->loop_id = lid; |
10658 |
+ ea->fcport->keep_nport_handle = 0; |
10659 |
++ ea->fcport->logout_on_delete = 1; |
10660 |
+ qlt_schedule_sess_for_deletion(ea->fcport); |
10661 |
+ } |
10662 |
+ break; |
10663 |
+diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c |
10664 |
+index bdf1994251b9b..aed4ce66e6cf9 100644 |
10665 |
+--- a/drivers/scsi/qla2xxx/qla_iocb.c |
10666 |
++++ b/drivers/scsi/qla2xxx/qla_iocb.c |
10667 |
+@@ -2749,6 +2749,10 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) |
10668 |
+ struct scsi_qla_host *vha = sp->vha; |
10669 |
+ struct event_arg ea; |
10670 |
+ struct qla_work_evt *e; |
10671 |
++ struct fc_port *conflict_fcport; |
10672 |
++ port_id_t cid; /* conflict Nport id */ |
10673 |
++ u32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status; |
10674 |
++ u16 lid; |
10675 |
+ |
10676 |
+ ql_dbg(ql_dbg_disc, vha, 0x3072, |
10677 |
+ "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n", |
10678 |
+@@ -2760,14 +2764,102 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) |
10679 |
+ if (sp->flags & SRB_WAKEUP_ON_COMP) |
10680 |
+ complete(&lio->u.els_plogi.comp); |
10681 |
+ else { |
10682 |
+- if (res) { |
10683 |
+- set_bit(RELOGIN_NEEDED, &vha->dpc_flags); |
10684 |
+- } else { |
10685 |
++ switch (fw_status[0]) { |
10686 |
++ case CS_DATA_UNDERRUN: |
10687 |
++ case CS_COMPLETE: |
10688 |
+ memset(&ea, 0, sizeof(ea)); |
10689 |
+ ea.fcport = fcport; |
10690 |
+ ea.data[0] = MBS_COMMAND_COMPLETE; |
10691 |
+ ea.sp = sp; |
10692 |
+ qla24xx_handle_plogi_done_event(vha, &ea); |
10693 |
++ break; |
10694 |
++ |
10695 |
++ case CS_IOCB_ERROR: |
10696 |
++ switch (fw_status[1]) { |
10697 |
++ case LSC_SCODE_PORTID_USED: |
10698 |
++ lid = fw_status[2] & 0xffff; |
10699 |
++ qlt_find_sess_invalidate_other(vha, |
10700 |
++ wwn_to_u64(fcport->port_name), |
10701 |
++ fcport->d_id, lid, &conflict_fcport); |
10702 |
++ if (conflict_fcport) { |
10703 |
++ /* |
10704 |
++ * Another fcport shares the same |
10705 |
++ * loop_id & nport id; conflict |
10706 |
++ * fcport needs to finish cleanup |
10707 |
++ * before this fcport can proceed |
10708 |
++ * to login. |
10709 |
++ */ |
10710 |
++ conflict_fcport->conflict = fcport; |
10711 |
++ fcport->login_pause = 1; |
10712 |
++ ql_dbg(ql_dbg_disc, vha, 0x20ed, |
10713 |
++ "%s %d %8phC pid %06x inuse with lid %#x post gidpn\n", |
10714 |
++ __func__, __LINE__, |
10715 |
++ fcport->port_name, |
10716 |
++ fcport->d_id.b24, lid); |
10717 |
++ } else { |
10718 |
++ ql_dbg(ql_dbg_disc, vha, 0x20ed, |
10719 |
++ "%s %d %8phC pid %06x inuse with lid %#x sched del\n", |
10720 |
++ __func__, __LINE__, |
10721 |
++ fcport->port_name, |
10722 |
++ fcport->d_id.b24, lid); |
10723 |
++ qla2x00_clear_loop_id(fcport); |
10724 |
++ set_bit(lid, vha->hw->loop_id_map); |
10725 |
++ fcport->loop_id = lid; |
10726 |
++ fcport->keep_nport_handle = 0; |
10727 |
++ qlt_schedule_sess_for_deletion(fcport); |
10728 |
++ } |
10729 |
++ break; |
10730 |
++ |
10731 |
++ case LSC_SCODE_NPORT_USED: |
10732 |
++ cid.b.domain = (fw_status[2] >> 16) & 0xff; |
10733 |
++ cid.b.area = (fw_status[2] >> 8) & 0xff; |
10734 |
++ cid.b.al_pa = fw_status[2] & 0xff; |
10735 |
++ cid.b.rsvd_1 = 0; |
10736 |
++ |
10737 |
++ ql_dbg(ql_dbg_disc, vha, 0x20ec, |
10738 |
++ "%s %d %8phC lid %#x in use with pid %06x post gnl\n", |
10739 |
++ __func__, __LINE__, fcport->port_name, |
10740 |
++ fcport->loop_id, cid.b24); |
10741 |
++ set_bit(fcport->loop_id, |
10742 |
++ vha->hw->loop_id_map); |
10743 |
++ fcport->loop_id = FC_NO_LOOP_ID; |
10744 |
++ qla24xx_post_gnl_work(vha, fcport); |
10745 |
++ break; |
10746 |
++ |
10747 |
++ case LSC_SCODE_NOXCB: |
10748 |
++ vha->hw->exch_starvation++; |
10749 |
++ if (vha->hw->exch_starvation > 5) { |
10750 |
++ ql_log(ql_log_warn, vha, 0xd046, |
10751 |
++ "Exchange starvation. Resetting RISC\n"); |
10752 |
++ vha->hw->exch_starvation = 0; |
10753 |
++ set_bit(ISP_ABORT_NEEDED, |
10754 |
++ &vha->dpc_flags); |
10755 |
++ qla2xxx_wake_dpc(vha); |
10756 |
++ } |
10757 |
++ /* fall through */ |
10758 |
++ default: |
10759 |
++ ql_dbg(ql_dbg_disc, vha, 0x20eb, |
10760 |
++ "%s %8phC cmd error fw_status 0x%x 0x%x 0x%x\n", |
10761 |
++ __func__, sp->fcport->port_name, |
10762 |
++ fw_status[0], fw_status[1], fw_status[2]); |
10763 |
++ |
10764 |
++ fcport->flags &= ~FCF_ASYNC_SENT; |
10765 |
++ fcport->disc_state = DSC_LOGIN_FAILED; |
10766 |
++ set_bit(RELOGIN_NEEDED, &vha->dpc_flags); |
10767 |
++ break; |
10768 |
++ } |
10769 |
++ break; |
10770 |
++ |
10771 |
++ default: |
10772 |
++ ql_dbg(ql_dbg_disc, vha, 0x20eb, |
10773 |
++ "%s %8phC cmd error 2 fw_status 0x%x 0x%x 0x%x\n", |
10774 |
++ __func__, sp->fcport->port_name, |
10775 |
++ fw_status[0], fw_status[1], fw_status[2]); |
10776 |
++ |
10777 |
++ sp->fcport->flags &= ~FCF_ASYNC_SENT; |
10778 |
++ sp->fcport->disc_state = DSC_LOGIN_FAILED; |
10779 |
++ set_bit(RELOGIN_NEEDED, &vha->dpc_flags); |
10780 |
++ break; |
10781 |
+ } |
10782 |
+ |
10783 |
+ e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP); |
10784 |
+@@ -2801,11 +2893,12 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, |
10785 |
+ return -ENOMEM; |
10786 |
+ } |
10787 |
+ |
10788 |
++ fcport->flags |= FCF_ASYNC_SENT; |
10789 |
++ fcport->disc_state = DSC_LOGIN_PEND; |
10790 |
+ elsio = &sp->u.iocb_cmd; |
10791 |
+ ql_dbg(ql_dbg_io, vha, 0x3073, |
10792 |
+ "Enter: PLOGI portid=%06x\n", fcport->d_id.b24); |
10793 |
+ |
10794 |
+- fcport->flags |= FCF_ASYNC_SENT; |
10795 |
+ sp->type = SRB_ELS_DCMD; |
10796 |
+ sp->name = "ELS_DCMD"; |
10797 |
+ sp->fcport = fcport; |
10798 |
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c |
10799 |
+index b75e6e4d58c06..a7acc266cec06 100644 |
10800 |
+--- a/drivers/scsi/qla2xxx/qla_target.c |
10801 |
++++ b/drivers/scsi/qla2xxx/qla_target.c |
10802 |
+@@ -957,7 +957,7 @@ void qlt_free_session_done(struct work_struct *work) |
10803 |
+ struct qlt_plogi_ack_t *own = |
10804 |
+ sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]; |
10805 |
+ |
10806 |
+- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084, |
10807 |
++ ql_dbg(ql_dbg_disc, vha, 0xf084, |
10808 |
+ "%s: se_sess %p / sess %p from port %8phC loop_id %#04x" |
10809 |
+ " s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n", |
10810 |
+ __func__, sess->se_sess, sess, sess->port_name, sess->loop_id, |
10811 |
+@@ -1024,7 +1024,7 @@ void qlt_free_session_done(struct work_struct *work) |
10812 |
+ |
10813 |
+ while (!READ_ONCE(sess->logout_completed)) { |
10814 |
+ if (!traced) { |
10815 |
+- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086, |
10816 |
++ ql_dbg(ql_dbg_disc, vha, 0xf086, |
10817 |
+ "%s: waiting for sess %p logout\n", |
10818 |
+ __func__, sess); |
10819 |
+ traced = true; |
10820 |
+@@ -1045,6 +1045,10 @@ void qlt_free_session_done(struct work_struct *work) |
10821 |
+ (struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO); |
10822 |
+ } |
10823 |
+ |
10824 |
++ spin_lock_irqsave(&vha->work_lock, flags); |
10825 |
++ sess->flags &= ~FCF_ASYNC_SENT; |
10826 |
++ spin_unlock_irqrestore(&vha->work_lock, flags); |
10827 |
++ |
10828 |
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags); |
10829 |
+ if (sess->se_sess) { |
10830 |
+ sess->se_sess = NULL; |
10831 |
+@@ -1108,7 +1112,7 @@ void qlt_free_session_done(struct work_struct *work) |
10832 |
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); |
10833 |
+ sess->free_pending = 0; |
10834 |
+ |
10835 |
+- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001, |
10836 |
++ ql_dbg(ql_dbg_disc, vha, 0xf001, |
10837 |
+ "Unregistration of sess %p %8phC finished fcp_cnt %d\n", |
10838 |
+ sess, sess->port_name, vha->fcport_count); |
10839 |
+ |
10840 |
+@@ -1151,6 +1155,11 @@ void qlt_unreg_sess(struct fc_port *sess) |
10841 |
+ return; |
10842 |
+ } |
10843 |
+ sess->free_pending = 1; |
10844 |
++ /* |
10845 |
++ * Use FCF_ASYNC_SENT flag to block other cmds used in sess |
10846 |
++ * management from being sent. |
10847 |
++ */ |
10848 |
++ sess->flags |= FCF_ASYNC_SENT; |
10849 |
+ spin_unlock_irqrestore(&sess->vha->work_lock, flags); |
10850 |
+ |
10851 |
+ if (sess->se_sess) |
10852 |
+@@ -4580,7 +4589,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn, |
10853 |
+ /* find other sess with nport_id collision */ |
10854 |
+ if (port_id.b24 == other_sess->d_id.b24) { |
10855 |
+ if (loop_id != other_sess->loop_id) { |
10856 |
+- ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000c, |
10857 |
++ ql_dbg(ql_dbg_disc, vha, 0x1000c, |
10858 |
+ "Invalidating sess %p loop_id %d wwn %llx.\n", |
10859 |
+ other_sess, other_sess->loop_id, other_wwn); |
10860 |
+ |
10861 |
+@@ -4596,7 +4605,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn, |
10862 |
+ * Another wwn used to have our s_id/loop_id |
10863 |
+ * kill the session, but don't free the loop_id |
10864 |
+ */ |
10865 |
+- ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01b, |
10866 |
++ ql_dbg(ql_dbg_disc, vha, 0xf01b, |
10867 |
+ "Invalidating sess %p loop_id %d wwn %llx.\n", |
10868 |
+ other_sess, other_sess->loop_id, other_wwn); |
10869 |
+ |
10870 |
+@@ -4611,7 +4620,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn, |
10871 |
+ /* find other sess with nport handle collision */ |
10872 |
+ if ((loop_id == other_sess->loop_id) && |
10873 |
+ (loop_id != FC_NO_LOOP_ID)) { |
10874 |
+- ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000d, |
10875 |
++ ql_dbg(ql_dbg_disc, vha, 0x1000d, |
10876 |
+ "Invalidating sess %p loop_id %d wwn %llx.\n", |
10877 |
+ other_sess, other_sess->loop_id, other_wwn); |
10878 |
+ |
10879 |
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c |
10880 |
+index 5e502e1605549..d538b3d4f74a5 100644 |
10881 |
+--- a/drivers/scsi/ufs/ufshcd.c |
10882 |
++++ b/drivers/scsi/ufs/ufshcd.c |
10883 |
+@@ -334,27 +334,27 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, |
10884 |
+ u8 opcode = 0; |
10885 |
+ u32 intr, doorbell; |
10886 |
+ struct ufshcd_lrb *lrbp = &hba->lrb[tag]; |
10887 |
++ struct scsi_cmnd *cmd = lrbp->cmd; |
10888 |
+ int transfer_len = -1; |
10889 |
+ |
10890 |
+ if (!trace_ufshcd_command_enabled()) { |
10891 |
+ /* trace UPIU W/O tracing command */ |
10892 |
+- if (lrbp->cmd) |
10893 |
++ if (cmd) |
10894 |
+ ufshcd_add_cmd_upiu_trace(hba, tag, str); |
10895 |
+ return; |
10896 |
+ } |
10897 |
+ |
10898 |
+- if (lrbp->cmd) { /* data phase exists */ |
10899 |
++ if (cmd) { /* data phase exists */ |
10900 |
+ /* trace UPIU also */ |
10901 |
+ ufshcd_add_cmd_upiu_trace(hba, tag, str); |
10902 |
+- opcode = (u8)(*lrbp->cmd->cmnd); |
10903 |
++ opcode = cmd->cmnd[0]; |
10904 |
+ if ((opcode == READ_10) || (opcode == WRITE_10)) { |
10905 |
+ /* |
10906 |
+ * Currently we only fully trace read(10) and write(10) |
10907 |
+ * commands |
10908 |
+ */ |
10909 |
+- if (lrbp->cmd->request && lrbp->cmd->request->bio) |
10910 |
+- lba = |
10911 |
+- lrbp->cmd->request->bio->bi_iter.bi_sector; |
10912 |
++ if (cmd->request && cmd->request->bio) |
10913 |
++ lba = cmd->request->bio->bi_iter.bi_sector; |
10914 |
+ transfer_len = be32_to_cpu( |
10915 |
+ lrbp->ucd_req_ptr->sc.exp_data_transfer_len); |
10916 |
+ } |
10917 |
+@@ -1888,12 +1888,12 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) |
10918 |
+ { |
10919 |
+ hba->lrb[task_tag].issue_time_stamp = ktime_get(); |
10920 |
+ hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0); |
10921 |
++ ufshcd_add_command_trace(hba, task_tag, "send"); |
10922 |
+ ufshcd_clk_scaling_start_busy(hba); |
10923 |
+ __set_bit(task_tag, &hba->outstanding_reqs); |
10924 |
+ ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); |
10925 |
+ /* Make sure that doorbell is committed immediately */ |
10926 |
+ wmb(); |
10927 |
+- ufshcd_add_command_trace(hba, task_tag, "send"); |
10928 |
+ } |
10929 |
+ |
10930 |
+ /** |
10931 |
+diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c |
10932 |
+index fc53dbe57f854..a90963812357c 100644 |
10933 |
+--- a/drivers/soundwire/bus.c |
10934 |
++++ b/drivers/soundwire/bus.c |
10935 |
+@@ -113,6 +113,8 @@ static int sdw_delete_slave(struct device *dev, void *data) |
10936 |
+ struct sdw_slave *slave = dev_to_sdw_dev(dev); |
10937 |
+ struct sdw_bus *bus = slave->bus; |
10938 |
+ |
10939 |
++ pm_runtime_disable(dev); |
10940 |
++ |
10941 |
+ sdw_slave_debugfs_exit(slave); |
10942 |
+ |
10943 |
+ mutex_lock(&bus->bus_lock); |
10944 |
+diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c |
10945 |
+index 502ed4ec8f070..e3d06330d1258 100644 |
10946 |
+--- a/drivers/soundwire/cadence_master.c |
10947 |
++++ b/drivers/soundwire/cadence_master.c |
10948 |
+@@ -231,6 +231,22 @@ static int cdns_clear_bit(struct sdw_cdns *cdns, int offset, u32 value) |
10949 |
+ return -EAGAIN; |
10950 |
+ } |
10951 |
+ |
10952 |
++/* |
10953 |
++ * all changes to the MCP_CONFIG, MCP_CONTROL, MCP_CMDCTRL and MCP_PHYCTRL |
10954 |
++ * need to be confirmed with a write to MCP_CONFIG_UPDATE |
10955 |
++ */ |
10956 |
++static int cdns_update_config(struct sdw_cdns *cdns) |
10957 |
++{ |
10958 |
++ int ret; |
10959 |
++ |
10960 |
++ ret = cdns_clear_bit(cdns, CDNS_MCP_CONFIG_UPDATE, |
10961 |
++ CDNS_MCP_CONFIG_UPDATE_BIT); |
10962 |
++ if (ret < 0) |
10963 |
++ dev_err(cdns->dev, "Config update timedout\n"); |
10964 |
++ |
10965 |
++ return ret; |
10966 |
++} |
10967 |
++ |
10968 |
+ /* |
10969 |
+ * debugfs |
10970 |
+ */ |
10971 |
+@@ -752,7 +768,38 @@ EXPORT_SYMBOL(sdw_cdns_thread); |
10972 |
+ /* |
10973 |
+ * init routines |
10974 |
+ */ |
10975 |
+-static int _cdns_enable_interrupt(struct sdw_cdns *cdns) |
10976 |
++ |
10977 |
++/** |
10978 |
++ * sdw_cdns_exit_reset() - Program reset parameters and start bus operations |
10979 |
++ * @cdns: Cadence instance |
10980 |
++ */ |
10981 |
++int sdw_cdns_exit_reset(struct sdw_cdns *cdns) |
10982 |
++{ |
10983 |
++ /* program maximum length reset to be safe */ |
10984 |
++ cdns_updatel(cdns, CDNS_MCP_CONTROL, |
10985 |
++ CDNS_MCP_CONTROL_RST_DELAY, |
10986 |
++ CDNS_MCP_CONTROL_RST_DELAY); |
10987 |
++ |
10988 |
++ /* use hardware generated reset */ |
10989 |
++ cdns_updatel(cdns, CDNS_MCP_CONTROL, |
10990 |
++ CDNS_MCP_CONTROL_HW_RST, |
10991 |
++ CDNS_MCP_CONTROL_HW_RST); |
10992 |
++ |
10993 |
++ /* enable bus operations with clock and data */ |
10994 |
++ cdns_updatel(cdns, CDNS_MCP_CONFIG, |
10995 |
++ CDNS_MCP_CONFIG_OP, |
10996 |
++ CDNS_MCP_CONFIG_OP_NORMAL); |
10997 |
++ |
10998 |
++ /* commit changes */ |
10999 |
++ return cdns_update_config(cdns); |
11000 |
++} |
11001 |
++EXPORT_SYMBOL(sdw_cdns_exit_reset); |
11002 |
++ |
11003 |
++/** |
11004 |
++ * sdw_cdns_enable_interrupt() - Enable SDW interrupts and update config |
11005 |
++ * @cdns: Cadence instance |
11006 |
++ */ |
11007 |
++int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns) |
11008 |
+ { |
11009 |
+ u32 mask; |
11010 |
+ |
11011 |
+@@ -784,24 +831,8 @@ static int _cdns_enable_interrupt(struct sdw_cdns *cdns) |
11012 |
+ |
11013 |
+ cdns_writel(cdns, CDNS_MCP_INTMASK, mask); |
11014 |
+ |
11015 |
+- return 0; |
11016 |
+-} |
11017 |
+- |
11018 |
+-/** |
11019 |
+- * sdw_cdns_enable_interrupt() - Enable SDW interrupts and update config |
11020 |
+- * @cdns: Cadence instance |
11021 |
+- */ |
11022 |
+-int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns) |
11023 |
+-{ |
11024 |
+- int ret; |
11025 |
+- |
11026 |
+- _cdns_enable_interrupt(cdns); |
11027 |
+- ret = cdns_clear_bit(cdns, CDNS_MCP_CONFIG_UPDATE, |
11028 |
+- CDNS_MCP_CONFIG_UPDATE_BIT); |
11029 |
+- if (ret < 0) |
11030 |
+- dev_err(cdns->dev, "Config update timedout\n"); |
11031 |
+- |
11032 |
+- return ret; |
11033 |
++ /* commit changes */ |
11034 |
++ return cdns_update_config(cdns); |
11035 |
+ } |
11036 |
+ EXPORT_SYMBOL(sdw_cdns_enable_interrupt); |
11037 |
+ |
11038 |
+@@ -975,6 +1006,10 @@ int sdw_cdns_init(struct sdw_cdns *cdns) |
11039 |
+ cdns_writel(cdns, CDNS_MCP_SSP_CTRL0, CDNS_DEFAULT_SSP_INTERVAL); |
11040 |
+ cdns_writel(cdns, CDNS_MCP_SSP_CTRL1, CDNS_DEFAULT_SSP_INTERVAL); |
11041 |
+ |
11042 |
++ /* flush command FIFOs */ |
11043 |
++ cdns_updatel(cdns, CDNS_MCP_CONTROL, CDNS_MCP_CONTROL_CMD_RST, |
11044 |
++ CDNS_MCP_CONTROL_CMD_RST); |
11045 |
++ |
11046 |
+ /* Set cmd accept mode */ |
11047 |
+ cdns_updatel(cdns, CDNS_MCP_CONTROL, CDNS_MCP_CONTROL_CMD_ACCEPT, |
11048 |
+ CDNS_MCP_CONTROL_CMD_ACCEPT); |
11049 |
+@@ -997,13 +1032,10 @@ int sdw_cdns_init(struct sdw_cdns *cdns) |
11050 |
+ /* Set cmd mode for Tx and Rx cmds */ |
11051 |
+ val &= ~CDNS_MCP_CONFIG_CMD; |
11052 |
+ |
11053 |
+- /* Set operation to normal */ |
11054 |
+- val &= ~CDNS_MCP_CONFIG_OP; |
11055 |
+- val |= CDNS_MCP_CONFIG_OP_NORMAL; |
11056 |
+- |
11057 |
+ cdns_writel(cdns, CDNS_MCP_CONFIG, val); |
11058 |
+ |
11059 |
+- return 0; |
11060 |
++ /* commit changes */ |
11061 |
++ return cdns_update_config(cdns); |
11062 |
+ } |
11063 |
+ EXPORT_SYMBOL(sdw_cdns_init); |
11064 |
+ |
11065 |
+diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h |
11066 |
+index 0b72b70947352..1a67728c5000f 100644 |
11067 |
+--- a/drivers/soundwire/cadence_master.h |
11068 |
++++ b/drivers/soundwire/cadence_master.h |
11069 |
+@@ -161,6 +161,7 @@ irqreturn_t sdw_cdns_thread(int irq, void *dev_id); |
11070 |
+ int sdw_cdns_init(struct sdw_cdns *cdns); |
11071 |
+ int sdw_cdns_pdi_init(struct sdw_cdns *cdns, |
11072 |
+ struct sdw_cdns_stream_config config); |
11073 |
++int sdw_cdns_exit_reset(struct sdw_cdns *cdns); |
11074 |
+ int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns); |
11075 |
+ |
11076 |
+ #ifdef CONFIG_DEBUG_FS |
11077 |
+diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c |
11078 |
+index 243af8198d1c6..a2da04946f0b4 100644 |
11079 |
+--- a/drivers/soundwire/intel.c |
11080 |
++++ b/drivers/soundwire/intel.c |
11081 |
+@@ -1050,8 +1050,6 @@ static int intel_probe(struct platform_device *pdev) |
11082 |
+ if (ret) |
11083 |
+ goto err_init; |
11084 |
+ |
11085 |
+- ret = sdw_cdns_enable_interrupt(&sdw->cdns); |
11086 |
+- |
11087 |
+ /* Read the PDI config and initialize cadence PDI */ |
11088 |
+ intel_pdi_init(sdw, &config); |
11089 |
+ ret = sdw_cdns_pdi_init(&sdw->cdns, config); |
11090 |
+@@ -1069,6 +1067,18 @@ static int intel_probe(struct platform_device *pdev) |
11091 |
+ goto err_init; |
11092 |
+ } |
11093 |
+ |
11094 |
++ ret = sdw_cdns_enable_interrupt(&sdw->cdns); |
11095 |
++ if (ret < 0) { |
11096 |
++ dev_err(sdw->cdns.dev, "cannot enable interrupts\n"); |
11097 |
++ goto err_init; |
11098 |
++ } |
11099 |
++ |
11100 |
++ ret = sdw_cdns_exit_reset(&sdw->cdns); |
11101 |
++ if (ret < 0) { |
11102 |
++ dev_err(sdw->cdns.dev, "unable to exit bus reset sequence\n"); |
11103 |
++ goto err_init; |
11104 |
++ } |
11105 |
++ |
11106 |
+ /* Register DAIs */ |
11107 |
+ ret = intel_register_dai(sdw); |
11108 |
+ if (ret) { |
11109 |
+diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c |
11110 |
+index 46576e32581f0..d151cd6d31884 100644 |
11111 |
+--- a/drivers/staging/media/imx/imx-media-capture.c |
11112 |
++++ b/drivers/staging/media/imx/imx-media-capture.c |
11113 |
+@@ -785,7 +785,7 @@ int imx_media_capture_device_register(struct imx_media_video_dev *vdev) |
11114 |
+ /* setup default format */ |
11115 |
+ fmt_src.pad = priv->src_sd_pad; |
11116 |
+ fmt_src.which = V4L2_SUBDEV_FORMAT_ACTIVE; |
11117 |
+- v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt_src); |
11118 |
++ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt_src); |
11119 |
+ if (ret) { |
11120 |
+ v4l2_err(sd, "failed to get src_sd format\n"); |
11121 |
+ goto unreg; |
11122 |
+diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c |
11123 |
+index d4278361e0028..a036ef104198e 100644 |
11124 |
+--- a/drivers/staging/rtl8188eu/core/rtw_recv.c |
11125 |
++++ b/drivers/staging/rtl8188eu/core/rtw_recv.c |
11126 |
+@@ -1525,21 +1525,14 @@ static int amsdu_to_msdu(struct adapter *padapter, struct recv_frame *prframe) |
11127 |
+ |
11128 |
+ /* Allocate new skb for releasing to upper layer */ |
11129 |
+ sub_skb = dev_alloc_skb(nSubframe_Length + 12); |
11130 |
+- if (sub_skb) { |
11131 |
+- skb_reserve(sub_skb, 12); |
11132 |
+- skb_put_data(sub_skb, pdata, nSubframe_Length); |
11133 |
+- } else { |
11134 |
+- sub_skb = skb_clone(prframe->pkt, GFP_ATOMIC); |
11135 |
+- if (sub_skb) { |
11136 |
+- sub_skb->data = pdata; |
11137 |
+- sub_skb->len = nSubframe_Length; |
11138 |
+- skb_set_tail_pointer(sub_skb, nSubframe_Length); |
11139 |
+- } else { |
11140 |
+- DBG_88E("skb_clone() Fail!!! , nr_subframes=%d\n", nr_subframes); |
11141 |
+- break; |
11142 |
+- } |
11143 |
++ if (!sub_skb) { |
11144 |
++ DBG_88E("dev_alloc_skb() Fail!!! , nr_subframes=%d\n", nr_subframes); |
11145 |
++ break; |
11146 |
+ } |
11147 |
+ |
11148 |
++ skb_reserve(sub_skb, 12); |
11149 |
++ skb_put_data(sub_skb, pdata, nSubframe_Length); |
11150 |
++ |
11151 |
+ subframes[nr_subframes++] = sub_skb; |
11152 |
+ |
11153 |
+ if (nr_subframes >= MAX_SUBFRAME_COUNT) { |
11154 |
+diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c |
11155 |
+index d0873de718da9..43f0cd2bd0ae6 100644 |
11156 |
+--- a/drivers/thermal/rcar_thermal.c |
11157 |
++++ b/drivers/thermal/rcar_thermal.c |
11158 |
+@@ -526,8 +526,10 @@ static int rcar_thermal_probe(struct platform_device *pdev) |
11159 |
+ res = platform_get_resource(pdev, IORESOURCE_MEM, |
11160 |
+ mres++); |
11161 |
+ common->base = devm_ioremap_resource(dev, res); |
11162 |
+- if (IS_ERR(common->base)) |
11163 |
+- return PTR_ERR(common->base); |
11164 |
++ if (IS_ERR(common->base)) { |
11165 |
++ ret = PTR_ERR(common->base); |
11166 |
++ goto error_unregister; |
11167 |
++ } |
11168 |
+ |
11169 |
+ idle = 0; /* polling delay is not needed */ |
11170 |
+ } |
11171 |
+diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c |
11172 |
+index 836e736ae188b..f2c6d9d3bb28f 100644 |
11173 |
+--- a/drivers/tty/serial/8250/8250_omap.c |
11174 |
++++ b/drivers/tty/serial/8250/8250_omap.c |
11175 |
+@@ -790,7 +790,10 @@ static void __dma_rx_do_complete(struct uart_8250_port *p) |
11176 |
+ dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state); |
11177 |
+ |
11178 |
+ count = dma->rx_size - state.residue; |
11179 |
+- |
11180 |
++ if (count < dma->rx_size) |
11181 |
++ dmaengine_terminate_async(dma->rxchan); |
11182 |
++ if (!count) |
11183 |
++ goto unlock; |
11184 |
+ ret = tty_insert_flip_string(tty_port, dma->rx_buf, count); |
11185 |
+ |
11186 |
+ p->port.icount.rx += ret; |
11187 |
+@@ -852,7 +855,6 @@ static void omap_8250_rx_dma_flush(struct uart_8250_port *p) |
11188 |
+ spin_unlock_irqrestore(&priv->rx_dma_lock, flags); |
11189 |
+ |
11190 |
+ __dma_rx_do_complete(p); |
11191 |
+- dmaengine_terminate_all(dma->rxchan); |
11192 |
+ } |
11193 |
+ |
11194 |
+ static int omap_8250_rx_dma(struct uart_8250_port *p) |
11195 |
+@@ -1234,6 +1236,7 @@ static int omap8250_probe(struct platform_device *pdev) |
11196 |
+ spin_lock_init(&priv->rx_dma_lock); |
11197 |
+ |
11198 |
+ device_init_wakeup(&pdev->dev, true); |
11199 |
++ pm_runtime_enable(&pdev->dev); |
11200 |
+ pm_runtime_use_autosuspend(&pdev->dev); |
11201 |
+ |
11202 |
+ /* |
11203 |
+@@ -1247,7 +1250,6 @@ static int omap8250_probe(struct platform_device *pdev) |
11204 |
+ pm_runtime_set_autosuspend_delay(&pdev->dev, -1); |
11205 |
+ |
11206 |
+ pm_runtime_irq_safe(&pdev->dev); |
11207 |
+- pm_runtime_enable(&pdev->dev); |
11208 |
+ |
11209 |
+ pm_runtime_get_sync(&pdev->dev); |
11210 |
+ |
11211 |
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c |
11212 |
+index 90f09ed6e5ad3..5b673077639ba 100644 |
11213 |
+--- a/drivers/tty/serial/8250/8250_port.c |
11214 |
++++ b/drivers/tty/serial/8250/8250_port.c |
11215 |
+@@ -1816,6 +1816,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir) |
11216 |
+ unsigned char status; |
11217 |
+ unsigned long flags; |
11218 |
+ struct uart_8250_port *up = up_to_u8250p(port); |
11219 |
++ bool skip_rx = false; |
11220 |
+ |
11221 |
+ if (iir & UART_IIR_NO_INT) |
11222 |
+ return 0; |
11223 |
+@@ -1824,7 +1825,20 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir) |
11224 |
+ |
11225 |
+ status = serial_port_in(port, UART_LSR); |
11226 |
+ |
11227 |
+- if (status & (UART_LSR_DR | UART_LSR_BI)) { |
11228 |
++ /* |
11229 |
++ * If port is stopped and there are no error conditions in the |
11230 |
++ * FIFO, then don't drain the FIFO, as this may lead to TTY buffer |
11231 |
++ * overflow. Not servicing, RX FIFO would trigger auto HW flow |
11232 |
++ * control when FIFO occupancy reaches preset threshold, thus |
11233 |
++ * halting RX. This only works when auto HW flow control is |
11234 |
++ * available. |
11235 |
++ */ |
11236 |
++ if (!(status & (UART_LSR_FIFOE | UART_LSR_BRK_ERROR_BITS)) && |
11237 |
++ (port->status & (UPSTAT_AUTOCTS | UPSTAT_AUTORTS)) && |
11238 |
++ !(port->read_status_mask & UART_LSR_DR)) |
11239 |
++ skip_rx = true; |
11240 |
++ |
11241 |
++ if (status & (UART_LSR_DR | UART_LSR_BI) && !skip_rx) { |
11242 |
+ if (!up->dma || handle_rx_dma(up, iir)) |
11243 |
+ status = serial8250_rx_chars(up, status); |
11244 |
+ } |
11245 |
+diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c |
11246 |
+index 71f99e9217592..c7683beb3412a 100644 |
11247 |
+--- a/drivers/tty/serial/samsung.c |
11248 |
++++ b/drivers/tty/serial/samsung.c |
11249 |
+@@ -1187,14 +1187,14 @@ static unsigned int s3c24xx_serial_getclk(struct s3c24xx_uart_port *ourport, |
11250 |
+ struct s3c24xx_uart_info *info = ourport->info; |
11251 |
+ struct clk *clk; |
11252 |
+ unsigned long rate; |
11253 |
+- unsigned int cnt, baud, quot, clk_sel, best_quot = 0; |
11254 |
++ unsigned int cnt, baud, quot, best_quot = 0; |
11255 |
+ char clkname[MAX_CLK_NAME_LENGTH]; |
11256 |
+ int calc_deviation, deviation = (1 << 30) - 1; |
11257 |
+ |
11258 |
+- clk_sel = (ourport->cfg->clk_sel) ? ourport->cfg->clk_sel : |
11259 |
+- ourport->info->def_clk_sel; |
11260 |
+ for (cnt = 0; cnt < info->num_clks; cnt++) { |
11261 |
+- if (!(clk_sel & (1 << cnt))) |
11262 |
++ /* Keep selected clock if provided */ |
11263 |
++ if (ourport->cfg->clk_sel && |
11264 |
++ !(ourport->cfg->clk_sel & (1 << cnt))) |
11265 |
+ continue; |
11266 |
+ |
11267 |
+ sprintf(clkname, "clk_uart_baud%d", cnt); |
11268 |
+diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c |
11269 |
+index 38133eba83a87..b4343c6aa6512 100644 |
11270 |
+--- a/drivers/tty/serial/sifive.c |
11271 |
++++ b/drivers/tty/serial/sifive.c |
11272 |
+@@ -618,10 +618,10 @@ static void sifive_serial_shutdown(struct uart_port *port) |
11273 |
+ * |
11274 |
+ * On the V0 SoC, the UART IP block is derived from the CPU clock source |
11275 |
+ * after a synchronous divide-by-two divider, so any CPU clock rate change |
11276 |
+- * requires the UART baud rate to be updated. This presumably could corrupt any |
11277 |
+- * serial word currently being transmitted or received. It would probably |
11278 |
+- * be better to stop receives and transmits, then complete the baud rate |
11279 |
+- * change, then re-enable them. |
11280 |
++ * requires the UART baud rate to be updated. This presumably corrupts any |
11281 |
++ * serial word currently being transmitted or received. In order to avoid |
11282 |
++ * corrupting the output data stream, we drain the transmit queue before |
11283 |
++ * allowing the clock's rate to be changed. |
11284 |
+ */ |
11285 |
+ static int sifive_serial_clk_notifier(struct notifier_block *nb, |
11286 |
+ unsigned long event, void *data) |
11287 |
+@@ -629,6 +629,26 @@ static int sifive_serial_clk_notifier(struct notifier_block *nb, |
11288 |
+ struct clk_notifier_data *cnd = data; |
11289 |
+ struct sifive_serial_port *ssp = notifier_to_sifive_serial_port(nb); |
11290 |
+ |
11291 |
++ if (event == PRE_RATE_CHANGE) { |
11292 |
++ /* |
11293 |
++ * The TX watermark is always set to 1 by this driver, which |
11294 |
++ * means that the TX busy bit will lower when there are 0 bytes |
11295 |
++ * left in the TX queue -- in other words, when the TX FIFO is |
11296 |
++ * empty. |
11297 |
++ */ |
11298 |
++ __ssp_wait_for_xmitr(ssp); |
11299 |
++ /* |
11300 |
++ * On the cycle the TX FIFO goes empty there is still a full |
11301 |
++ * UART frame left to be transmitted in the shift register. |
11302 |
++ * The UART provides no way for software to directly determine |
11303 |
++ * when that last frame has been transmitted, so we just sleep |
11304 |
++ * here instead. As we're not tracking the number of stop bits |
11305 |
++ * they're just worst cased here. The rest of the serial |
11306 |
++ * framing parameters aren't configurable by software. |
11307 |
++ */ |
11308 |
++ udelay(DIV_ROUND_UP(12 * 1000 * 1000, ssp->baud_rate)); |
11309 |
++ } |
11310 |
++ |
11311 |
+ if (event == POST_RATE_CHANGE && ssp->clkin_rate != cnd->new_rate) { |
11312 |
+ ssp->clkin_rate = cnd->new_rate; |
11313 |
+ __ssp_update_div(ssp); |
11314 |
+diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c |
11315 |
+index 8948970f795e6..9359c80fbb9f5 100644 |
11316 |
+--- a/drivers/tty/serial/xilinx_uartps.c |
11317 |
++++ b/drivers/tty/serial/xilinx_uartps.c |
11318 |
+@@ -1248,6 +1248,7 @@ static int cdns_uart_console_setup(struct console *co, char *options) |
11319 |
+ int bits = 8; |
11320 |
+ int parity = 'n'; |
11321 |
+ int flow = 'n'; |
11322 |
++ unsigned long time_out; |
11323 |
+ |
11324 |
+ if (!port->membase) { |
11325 |
+ pr_debug("console on " CDNS_UART_TTY_NAME "%i not present\n", |
11326 |
+@@ -1258,6 +1259,13 @@ static int cdns_uart_console_setup(struct console *co, char *options) |
11327 |
+ if (options) |
11328 |
+ uart_parse_options(options, &baud, &parity, &bits, &flow); |
11329 |
+ |
11330 |
++ /* Wait for tx_empty before setting up the console */ |
11331 |
++ time_out = jiffies + usecs_to_jiffies(TX_TIMEOUT); |
11332 |
++ |
11333 |
++ while (time_before(jiffies, time_out) && |
11334 |
++ cdns_uart_tx_empty(port) != TIOCSER_TEMT) |
11335 |
++ cpu_relax(); |
11336 |
++ |
11337 |
+ return uart_set_options(port, co, baud, parity, bits, flow); |
11338 |
+ } |
11339 |
+ |
11340 |
+diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c |
11341 |
+index d2a1e1228c82d..9ffd42e333b83 100644 |
11342 |
+--- a/drivers/tty/vcc.c |
11343 |
++++ b/drivers/tty/vcc.c |
11344 |
+@@ -605,6 +605,7 @@ static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id) |
11345 |
+ port->index = vcc_table_add(port); |
11346 |
+ if (port->index == -1) { |
11347 |
+ pr_err("VCC: no more TTY indices left for allocation\n"); |
11348 |
++ rv = -ENOMEM; |
11349 |
+ goto free_ldc; |
11350 |
+ } |
11351 |
+ |
11352 |
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c |
11353 |
+index 4225544342519..809103254fc64 100644 |
11354 |
+--- a/drivers/usb/dwc3/gadget.c |
11355 |
++++ b/drivers/usb/dwc3/gadget.c |
11356 |
+@@ -270,7 +270,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd, |
11357 |
+ { |
11358 |
+ const struct usb_endpoint_descriptor *desc = dep->endpoint.desc; |
11359 |
+ struct dwc3 *dwc = dep->dwc; |
11360 |
+- u32 timeout = 1000; |
11361 |
++ u32 timeout = 5000; |
11362 |
+ u32 saved_config = 0; |
11363 |
+ u32 reg; |
11364 |
+ |
11365 |
+diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c |
11366 |
+index 66ec1fdf9fe7d..b6f196f5e252e 100644 |
11367 |
+--- a/drivers/usb/host/ehci-mv.c |
11368 |
++++ b/drivers/usb/host/ehci-mv.c |
11369 |
+@@ -156,12 +156,10 @@ static int mv_ehci_probe(struct platform_device *pdev) |
11370 |
+ hcd->rsrc_len = resource_size(r); |
11371 |
+ hcd->regs = ehci_mv->op_regs; |
11372 |
+ |
11373 |
+- hcd->irq = platform_get_irq(pdev, 0); |
11374 |
+- if (!hcd->irq) { |
11375 |
+- dev_err(&pdev->dev, "Cannot get irq."); |
11376 |
+- retval = -ENODEV; |
11377 |
++ retval = platform_get_irq(pdev, 0); |
11378 |
++ if (retval < 0) |
11379 |
+ goto err_disable_clk; |
11380 |
+- } |
11381 |
++ hcd->irq = retval; |
11382 |
+ |
11383 |
+ ehci = hcd_to_ehci(hcd); |
11384 |
+ ehci->caps = (struct ehci_caps *) ehci_mv->cap_regs; |
11385 |
+diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c |
11386 |
+index 0d16f9806655f..a72fd5309b09f 100644 |
11387 |
+--- a/drivers/vfio/pci/vfio_pci.c |
11388 |
++++ b/drivers/vfio/pci/vfio_pci.c |
11389 |
+@@ -474,6 +474,19 @@ static void vfio_pci_release(void *device_data) |
11390 |
+ if (!(--vdev->refcnt)) { |
11391 |
+ vfio_spapr_pci_eeh_release(vdev->pdev); |
11392 |
+ vfio_pci_disable(vdev); |
11393 |
++ mutex_lock(&vdev->igate); |
11394 |
++ if (vdev->err_trigger) { |
11395 |
++ eventfd_ctx_put(vdev->err_trigger); |
11396 |
++ vdev->err_trigger = NULL; |
11397 |
++ } |
11398 |
++ mutex_unlock(&vdev->igate); |
11399 |
++ |
11400 |
++ mutex_lock(&vdev->igate); |
11401 |
++ if (vdev->req_trigger) { |
11402 |
++ eventfd_ctx_put(vdev->req_trigger); |
11403 |
++ vdev->req_trigger = NULL; |
11404 |
++ } |
11405 |
++ mutex_unlock(&vdev->igate); |
11406 |
+ } |
11407 |
+ |
11408 |
+ mutex_unlock(&vdev->reflck->lock); |
11409 |
+diff --git a/fs/block_dev.c b/fs/block_dev.c |
11410 |
+index 2dc9c73a4cb29..79272cdbe8277 100644 |
11411 |
+--- a/fs/block_dev.c |
11412 |
++++ b/fs/block_dev.c |
11413 |
+@@ -1857,6 +1857,16 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) |
11414 |
+ struct gendisk *disk = bdev->bd_disk; |
11415 |
+ struct block_device *victim = NULL; |
11416 |
+ |
11417 |
++ /* |
11418 |
++ * Sync early if it looks like we're the last one. If someone else |
11419 |
++ * opens the block device between now and the decrement of bd_openers |
11420 |
++ * then we did a sync that we didn't need to, but that's not the end |
11421 |
++ * of the world and we want to avoid long (could be several minute) |
11422 |
++ * syncs while holding the mutex. |
11423 |
++ */ |
11424 |
++ if (bdev->bd_openers == 1) |
11425 |
++ sync_blockdev(bdev); |
11426 |
++ |
11427 |
+ mutex_lock_nested(&bdev->bd_mutex, for_part); |
11428 |
+ if (for_part) |
11429 |
+ bdev->bd_part_count--; |
11430 |
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c |
11431 |
+index dd6fb2ee80409..cd65ef7c7c3f2 100644 |
11432 |
+--- a/fs/btrfs/disk-io.c |
11433 |
++++ b/fs/btrfs/disk-io.c |
11434 |
+@@ -649,16 +649,15 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio, |
11435 |
+ goto err; |
11436 |
+ |
11437 |
+ if (memcmp_extent_buffer(eb, result, 0, csum_size)) { |
11438 |
+- u32 val; |
11439 |
+- u32 found = 0; |
11440 |
+- |
11441 |
+- memcpy(&found, result, csum_size); |
11442 |
++ u8 val[BTRFS_CSUM_SIZE] = { 0 }; |
11443 |
+ |
11444 |
+ read_extent_buffer(eb, &val, 0, csum_size); |
11445 |
+ btrfs_warn_rl(fs_info, |
11446 |
+- "%s checksum verify failed on %llu wanted %x found %x level %d", |
11447 |
++ "%s checksum verify failed on %llu wanted " CSUM_FMT " found " CSUM_FMT " level %d", |
11448 |
+ fs_info->sb->s_id, eb->start, |
11449 |
+- val, found, btrfs_header_level(eb)); |
11450 |
++ CSUM_FMT_VALUE(csum_size, val), |
11451 |
++ CSUM_FMT_VALUE(csum_size, result), |
11452 |
++ btrfs_header_level(eb)); |
11453 |
+ ret = -EUCLEAN; |
11454 |
+ goto err; |
11455 |
+ } |
11456 |
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c |
11457 |
+index 31c1ed554d26d..7658f3193175b 100644 |
11458 |
+--- a/fs/btrfs/extent-tree.c |
11459 |
++++ b/fs/btrfs/extent-tree.c |
11460 |
+@@ -5428,8 +5428,6 @@ out: |
11461 |
+ */ |
11462 |
+ if (!for_reloc && !root_dropped) |
11463 |
+ btrfs_add_dead_root(root); |
11464 |
+- if (err && err != -EAGAIN) |
11465 |
+- btrfs_handle_fs_error(fs_info, err, NULL); |
11466 |
+ return err; |
11467 |
+ } |
11468 |
+ |
11469 |
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c |
11470 |
+index 9ac40991a6405..182e93a5b11d5 100644 |
11471 |
+--- a/fs/btrfs/inode.c |
11472 |
++++ b/fs/btrfs/inode.c |
11473 |
+@@ -8586,14 +8586,64 @@ err: |
11474 |
+ return ret; |
11475 |
+ } |
11476 |
+ |
11477 |
+-static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip) |
11478 |
++/* |
11479 |
++ * If this succeeds, the btrfs_dio_private is responsible for cleaning up locked |
11480 |
++ * or ordered extents whether or not we submit any bios. |
11481 |
++ */ |
11482 |
++static struct btrfs_dio_private *btrfs_create_dio_private(struct bio *dio_bio, |
11483 |
++ struct inode *inode, |
11484 |
++ loff_t file_offset) |
11485 |
+ { |
11486 |
+- struct inode *inode = dip->inode; |
11487 |
++ const bool write = (bio_op(dio_bio) == REQ_OP_WRITE); |
11488 |
++ struct btrfs_dio_private *dip; |
11489 |
++ struct bio *bio; |
11490 |
++ |
11491 |
++ dip = kzalloc(sizeof(*dip), GFP_NOFS); |
11492 |
++ if (!dip) |
11493 |
++ return NULL; |
11494 |
++ |
11495 |
++ bio = btrfs_bio_clone(dio_bio); |
11496 |
++ bio->bi_private = dip; |
11497 |
++ btrfs_io_bio(bio)->logical = file_offset; |
11498 |
++ |
11499 |
++ dip->private = dio_bio->bi_private; |
11500 |
++ dip->inode = inode; |
11501 |
++ dip->logical_offset = file_offset; |
11502 |
++ dip->bytes = dio_bio->bi_iter.bi_size; |
11503 |
++ dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9; |
11504 |
++ dip->orig_bio = bio; |
11505 |
++ dip->dio_bio = dio_bio; |
11506 |
++ atomic_set(&dip->pending_bios, 1); |
11507 |
++ |
11508 |
++ if (write) { |
11509 |
++ struct btrfs_dio_data *dio_data = current->journal_info; |
11510 |
++ |
11511 |
++ /* |
11512 |
++ * Setting range start and end to the same value means that |
11513 |
++ * no cleanup will happen in btrfs_direct_IO |
11514 |
++ */ |
11515 |
++ dio_data->unsubmitted_oe_range_end = dip->logical_offset + |
11516 |
++ dip->bytes; |
11517 |
++ dio_data->unsubmitted_oe_range_start = |
11518 |
++ dio_data->unsubmitted_oe_range_end; |
11519 |
++ |
11520 |
++ bio->bi_end_io = btrfs_endio_direct_write; |
11521 |
++ } else { |
11522 |
++ bio->bi_end_io = btrfs_endio_direct_read; |
11523 |
++ dip->subio_endio = btrfs_subio_endio_read; |
11524 |
++ } |
11525 |
++ return dip; |
11526 |
++} |
11527 |
++ |
11528 |
++static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode, |
11529 |
++ loff_t file_offset) |
11530 |
++{ |
11531 |
++ const bool write = (bio_op(dio_bio) == REQ_OP_WRITE); |
11532 |
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
11533 |
++ struct btrfs_dio_private *dip; |
11534 |
+ struct bio *bio; |
11535 |
+- struct bio *orig_bio = dip->orig_bio; |
11536 |
+- u64 start_sector = orig_bio->bi_iter.bi_sector; |
11537 |
+- u64 file_offset = dip->logical_offset; |
11538 |
++ struct bio *orig_bio; |
11539 |
++ u64 start_sector; |
11540 |
+ int async_submit = 0; |
11541 |
+ u64 submit_len; |
11542 |
+ int clone_offset = 0; |
11543 |
+@@ -8602,11 +8652,24 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip) |
11544 |
+ blk_status_t status; |
11545 |
+ struct btrfs_io_geometry geom; |
11546 |
+ |
11547 |
++ dip = btrfs_create_dio_private(dio_bio, inode, file_offset); |
11548 |
++ if (!dip) { |
11549 |
++ if (!write) { |
11550 |
++ unlock_extent(&BTRFS_I(inode)->io_tree, file_offset, |
11551 |
++ file_offset + dio_bio->bi_iter.bi_size - 1); |
11552 |
++ } |
11553 |
++ dio_bio->bi_status = BLK_STS_RESOURCE; |
11554 |
++ dio_end_io(dio_bio); |
11555 |
++ return; |
11556 |
++ } |
11557 |
++ |
11558 |
++ orig_bio = dip->orig_bio; |
11559 |
++ start_sector = orig_bio->bi_iter.bi_sector; |
11560 |
+ submit_len = orig_bio->bi_iter.bi_size; |
11561 |
+ ret = btrfs_get_io_geometry(fs_info, btrfs_op(orig_bio), |
11562 |
+ start_sector << 9, submit_len, &geom); |
11563 |
+ if (ret) |
11564 |
+- return -EIO; |
11565 |
++ goto out_err; |
11566 |
+ |
11567 |
+ if (geom.len >= submit_len) { |
11568 |
+ bio = orig_bio; |
11569 |
+@@ -8669,7 +8732,7 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip) |
11570 |
+ submit: |
11571 |
+ status = btrfs_submit_dio_bio(bio, inode, file_offset, async_submit); |
11572 |
+ if (!status) |
11573 |
+- return 0; |
11574 |
++ return; |
11575 |
+ |
11576 |
+ if (bio != orig_bio) |
11577 |
+ bio_put(bio); |
11578 |
+@@ -8683,107 +8746,6 @@ out_err: |
11579 |
+ */ |
11580 |
+ if (atomic_dec_and_test(&dip->pending_bios)) |
11581 |
+ bio_io_error(dip->orig_bio); |
11582 |
+- |
11583 |
+- /* bio_end_io() will handle error, so we needn't return it */ |
11584 |
+- return 0; |
11585 |
+-} |
11586 |
+- |
11587 |
+-static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode, |
11588 |
+- loff_t file_offset) |
11589 |
+-{ |
11590 |
+- struct btrfs_dio_private *dip = NULL; |
11591 |
+- struct bio *bio = NULL; |
11592 |
+- struct btrfs_io_bio *io_bio; |
11593 |
+- bool write = (bio_op(dio_bio) == REQ_OP_WRITE); |
11594 |
+- int ret = 0; |
11595 |
+- |
11596 |
+- bio = btrfs_bio_clone(dio_bio); |
11597 |
+- |
11598 |
+- dip = kzalloc(sizeof(*dip), GFP_NOFS); |
11599 |
+- if (!dip) { |
11600 |
+- ret = -ENOMEM; |
11601 |
+- goto free_ordered; |
11602 |
+- } |
11603 |
+- |
11604 |
+- dip->private = dio_bio->bi_private; |
11605 |
+- dip->inode = inode; |
11606 |
+- dip->logical_offset = file_offset; |
11607 |
+- dip->bytes = dio_bio->bi_iter.bi_size; |
11608 |
+- dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9; |
11609 |
+- bio->bi_private = dip; |
11610 |
+- dip->orig_bio = bio; |
11611 |
+- dip->dio_bio = dio_bio; |
11612 |
+- atomic_set(&dip->pending_bios, 1); |
11613 |
+- io_bio = btrfs_io_bio(bio); |
11614 |
+- io_bio->logical = file_offset; |
11615 |
+- |
11616 |
+- if (write) { |
11617 |
+- bio->bi_end_io = btrfs_endio_direct_write; |
11618 |
+- } else { |
11619 |
+- bio->bi_end_io = btrfs_endio_direct_read; |
11620 |
+- dip->subio_endio = btrfs_subio_endio_read; |
11621 |
+- } |
11622 |
+- |
11623 |
+- /* |
11624 |
+- * Reset the range for unsubmitted ordered extents (to a 0 length range) |
11625 |
+- * even if we fail to submit a bio, because in such case we do the |
11626 |
+- * corresponding error handling below and it must not be done a second |
11627 |
+- * time by btrfs_direct_IO(). |
11628 |
+- */ |
11629 |
+- if (write) { |
11630 |
+- struct btrfs_dio_data *dio_data = current->journal_info; |
11631 |
+- |
11632 |
+- dio_data->unsubmitted_oe_range_end = dip->logical_offset + |
11633 |
+- dip->bytes; |
11634 |
+- dio_data->unsubmitted_oe_range_start = |
11635 |
+- dio_data->unsubmitted_oe_range_end; |
11636 |
+- } |
11637 |
+- |
11638 |
+- ret = btrfs_submit_direct_hook(dip); |
11639 |
+- if (!ret) |
11640 |
+- return; |
11641 |
+- |
11642 |
+- btrfs_io_bio_free_csum(io_bio); |
11643 |
+- |
11644 |
+-free_ordered: |
11645 |
+- /* |
11646 |
+- * If we arrived here it means either we failed to submit the dip |
11647 |
+- * or we either failed to clone the dio_bio or failed to allocate the |
11648 |
+- * dip. If we cloned the dio_bio and allocated the dip, we can just |
11649 |
+- * call bio_endio against our io_bio so that we get proper resource |
11650 |
+- * cleanup if we fail to submit the dip, otherwise, we must do the |
11651 |
+- * same as btrfs_endio_direct_[write|read] because we can't call these |
11652 |
+- * callbacks - they require an allocated dip and a clone of dio_bio. |
11653 |
+- */ |
11654 |
+- if (bio && dip) { |
11655 |
+- bio_io_error(bio); |
11656 |
+- /* |
11657 |
+- * The end io callbacks free our dip, do the final put on bio |
11658 |
+- * and all the cleanup and final put for dio_bio (through |
11659 |
+- * dio_end_io()). |
11660 |
+- */ |
11661 |
+- dip = NULL; |
11662 |
+- bio = NULL; |
11663 |
+- } else { |
11664 |
+- if (write) |
11665 |
+- __endio_write_update_ordered(inode, |
11666 |
+- file_offset, |
11667 |
+- dio_bio->bi_iter.bi_size, |
11668 |
+- false); |
11669 |
+- else |
11670 |
+- unlock_extent(&BTRFS_I(inode)->io_tree, file_offset, |
11671 |
+- file_offset + dio_bio->bi_iter.bi_size - 1); |
11672 |
+- |
11673 |
+- dio_bio->bi_status = BLK_STS_IOERR; |
11674 |
+- /* |
11675 |
+- * Releases and cleans up our dio_bio, no need to bio_put() |
11676 |
+- * nor bio_endio()/bio_io_error() against dio_bio. |
11677 |
+- */ |
11678 |
+- dio_end_io(dio_bio); |
11679 |
+- } |
11680 |
+- if (bio) |
11681 |
+- bio_put(bio); |
11682 |
+- kfree(dip); |
11683 |
+ } |
11684 |
+ |
11685 |
+ static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info, |
11686 |
+@@ -9082,20 +9044,17 @@ again: |
11687 |
+ /* |
11688 |
+ * Qgroup reserved space handler |
11689 |
+ * Page here will be either |
11690 |
+- * 1) Already written to disk |
11691 |
+- * In this case, its reserved space is released from data rsv map |
11692 |
+- * and will be freed by delayed_ref handler finally. |
11693 |
+- * So even we call qgroup_free_data(), it won't decrease reserved |
11694 |
+- * space. |
11695 |
+- * 2) Not written to disk |
11696 |
+- * This means the reserved space should be freed here. However, |
11697 |
+- * if a truncate invalidates the page (by clearing PageDirty) |
11698 |
+- * and the page is accounted for while allocating extent |
11699 |
+- * in btrfs_check_data_free_space() we let delayed_ref to |
11700 |
+- * free the entire extent. |
11701 |
++ * 1) Already written to disk or ordered extent already submitted |
11702 |
++ * Then its QGROUP_RESERVED bit in io_tree is already cleaned. |
11703 |
++ * Qgroup will be handled by its qgroup_record then. |
11704 |
++ * btrfs_qgroup_free_data() call will do nothing here. |
11705 |
++ * |
11706 |
++ * 2) Not written to disk yet |
11707 |
++ * Then btrfs_qgroup_free_data() call will clear the QGROUP_RESERVED |
11708 |
++ * bit of its io_tree, and free the qgroup reserved data space. |
11709 |
++ * Since the IO will never happen for this page. |
11710 |
+ */ |
11711 |
+- if (PageDirty(page)) |
11712 |
+- btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE); |
11713 |
++ btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE); |
11714 |
+ if (!inode_evicting) { |
11715 |
+ clear_extent_bit(tree, page_start, page_end, EXTENT_LOCKED | |
11716 |
+ EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | |
11717 |
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c |
11718 |
+index af3605a0bf2e0..1bc57f7b91cfa 100644 |
11719 |
+--- a/fs/btrfs/relocation.c |
11720 |
++++ b/fs/btrfs/relocation.c |
11721 |
+@@ -1468,6 +1468,9 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, |
11722 |
+ int clear_rsv = 0; |
11723 |
+ int ret; |
11724 |
+ |
11725 |
++ if (!rc) |
11726 |
++ return 0; |
11727 |
++ |
11728 |
+ /* |
11729 |
+ * The subvolume has reloc tree but the swap is finished, no need to |
11730 |
+ * create/update the dead reloc tree |
11731 |
+@@ -1475,13 +1478,25 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, |
11732 |
+ if (reloc_root_is_dead(root)) |
11733 |
+ return 0; |
11734 |
+ |
11735 |
++ /* |
11736 |
++ * This is subtle but important. We do not do |
11737 |
++ * record_root_in_transaction for reloc roots, instead we record their |
11738 |
++ * corresponding fs root, and then here we update the last trans for the |
11739 |
++ * reloc root. This means that we have to do this for the entire life |
11740 |
++ * of the reloc root, regardless of which stage of the relocation we are |
11741 |
++ * in. |
11742 |
++ */ |
11743 |
+ if (root->reloc_root) { |
11744 |
+ reloc_root = root->reloc_root; |
11745 |
+ reloc_root->last_trans = trans->transid; |
11746 |
+ return 0; |
11747 |
+ } |
11748 |
+ |
11749 |
+- if (!rc || !rc->create_reloc_tree || |
11750 |
++ /* |
11751 |
++ * We are merging reloc roots, we do not need new reloc trees. Also |
11752 |
++ * reloc trees never need their own reloc tree. |
11753 |
++ */ |
11754 |
++ if (!rc->create_reloc_tree || |
11755 |
+ root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) |
11756 |
+ return 0; |
11757 |
+ |
11758 |
+@@ -2336,6 +2351,18 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, |
11759 |
+ trans = NULL; |
11760 |
+ goto out; |
11761 |
+ } |
11762 |
++ |
11763 |
++ /* |
11764 |
++ * At this point we no longer have a reloc_control, so we can't |
11765 |
++ * depend on btrfs_init_reloc_root to update our last_trans. |
11766 |
++ * |
11767 |
++ * But that's ok, we started the trans handle on our |
11768 |
++ * corresponding fs_root, which means it's been added to the |
11769 |
++ * dirty list. At commit time we'll still call |
11770 |
++ * btrfs_update_reloc_root() and update our root item |
11771 |
++ * appropriately. |
11772 |
++ */ |
11773 |
++ reloc_root->last_trans = trans->transid; |
11774 |
+ trans->block_rsv = rc->block_rsv; |
11775 |
+ |
11776 |
+ replaced = 0; |
11777 |
+@@ -4342,6 +4369,18 @@ static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info) |
11778 |
+ return rc; |
11779 |
+ } |
11780 |
+ |
11781 |
++static void free_reloc_control(struct reloc_control *rc) |
11782 |
++{ |
11783 |
++ struct mapping_node *node, *tmp; |
11784 |
++ |
11785 |
++ free_reloc_roots(&rc->reloc_roots); |
11786 |
++ rbtree_postorder_for_each_entry_safe(node, tmp, |
11787 |
++ &rc->reloc_root_tree.rb_root, rb_node) |
11788 |
++ kfree(node); |
11789 |
++ |
11790 |
++ kfree(rc); |
11791 |
++} |
11792 |
++ |
11793 |
+ /* |
11794 |
+ * Print the block group being relocated |
11795 |
+ */ |
11796 |
+@@ -4474,7 +4513,7 @@ out: |
11797 |
+ btrfs_dec_block_group_ro(rc->block_group); |
11798 |
+ iput(rc->data_inode); |
11799 |
+ btrfs_put_block_group(rc->block_group); |
11800 |
+- kfree(rc); |
11801 |
++ free_reloc_control(rc); |
11802 |
+ return err; |
11803 |
+ } |
11804 |
+ |
11805 |
+@@ -4647,7 +4686,7 @@ out_clean: |
11806 |
+ err = ret; |
11807 |
+ out_unset: |
11808 |
+ unset_reloc_control(rc); |
11809 |
+- kfree(rc); |
11810 |
++ free_reloc_control(rc); |
11811 |
+ out: |
11812 |
+ if (!list_empty(&reloc_roots)) |
11813 |
+ free_reloc_roots(&reloc_roots); |
11814 |
+diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c |
11815 |
+index 91ea38506fbb7..84b8d6ebf98f3 100644 |
11816 |
+--- a/fs/btrfs/tree-checker.c |
11817 |
++++ b/fs/btrfs/tree-checker.c |
11818 |
+@@ -674,6 +674,44 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf, |
11819 |
+ return 0; |
11820 |
+ } |
11821 |
+ |
11822 |
++/* |
11823 |
++ * Enhanced version of chunk item checker. |
11824 |
++ * |
11825 |
++ * The common btrfs_check_chunk_valid() doesn't check item size since it needs |
11826 |
++ * to work on super block sys_chunk_array which doesn't have full item ptr. |
11827 |
++ */ |
11828 |
++static int check_leaf_chunk_item(struct extent_buffer *leaf, |
11829 |
++ struct btrfs_chunk *chunk, |
11830 |
++ struct btrfs_key *key, int slot) |
11831 |
++{ |
11832 |
++ int num_stripes; |
11833 |
++ |
11834 |
++ if (btrfs_item_size_nr(leaf, slot) < sizeof(struct btrfs_chunk)) { |
11835 |
++ chunk_err(leaf, chunk, key->offset, |
11836 |
++ "invalid chunk item size: have %u expect [%zu, %u)", |
11837 |
++ btrfs_item_size_nr(leaf, slot), |
11838 |
++ sizeof(struct btrfs_chunk), |
11839 |
++ BTRFS_LEAF_DATA_SIZE(leaf->fs_info)); |
11840 |
++ return -EUCLEAN; |
11841 |
++ } |
11842 |
++ |
11843 |
++ num_stripes = btrfs_chunk_num_stripes(leaf, chunk); |
11844 |
++ /* Let btrfs_check_chunk_valid() handle this error type */ |
11845 |
++ if (num_stripes == 0) |
11846 |
++ goto out; |
11847 |
++ |
11848 |
++ if (btrfs_chunk_item_size(num_stripes) != |
11849 |
++ btrfs_item_size_nr(leaf, slot)) { |
11850 |
++ chunk_err(leaf, chunk, key->offset, |
11851 |
++ "invalid chunk item size: have %u expect %lu", |
11852 |
++ btrfs_item_size_nr(leaf, slot), |
11853 |
++ btrfs_chunk_item_size(num_stripes)); |
11854 |
++ return -EUCLEAN; |
11855 |
++ } |
11856 |
++out: |
11857 |
++ return btrfs_check_chunk_valid(leaf, chunk, key->offset); |
11858 |
++} |
11859 |
++ |
11860 |
+ __printf(3, 4) |
11861 |
+ __cold |
11862 |
+ static void dev_item_err(const struct extent_buffer *eb, int slot, |
11863 |
+@@ -1265,7 +1303,7 @@ static int check_leaf_item(struct extent_buffer *leaf, |
11864 |
+ break; |
11865 |
+ case BTRFS_CHUNK_ITEM_KEY: |
11866 |
+ chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); |
11867 |
+- ret = btrfs_check_chunk_valid(leaf, chunk, key->offset); |
11868 |
++ ret = check_leaf_chunk_item(leaf, chunk, key, slot); |
11869 |
+ break; |
11870 |
+ case BTRFS_DEV_ITEM_KEY: |
11871 |
+ ret = check_dev_item(leaf, key, slot); |
11872 |
+diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c |
11873 |
+index b2695919435e8..af563d73d252c 100644 |
11874 |
+--- a/fs/ceph/caps.c |
11875 |
++++ b/fs/ceph/caps.c |
11876 |
+@@ -2013,12 +2013,24 @@ ack: |
11877 |
+ if (mutex_trylock(&session->s_mutex) == 0) { |
11878 |
+ dout("inverting session/ino locks on %p\n", |
11879 |
+ session); |
11880 |
++ session = ceph_get_mds_session(session); |
11881 |
+ spin_unlock(&ci->i_ceph_lock); |
11882 |
+ if (took_snap_rwsem) { |
11883 |
+ up_read(&mdsc->snap_rwsem); |
11884 |
+ took_snap_rwsem = 0; |
11885 |
+ } |
11886 |
+- mutex_lock(&session->s_mutex); |
11887 |
++ if (session) { |
11888 |
++ mutex_lock(&session->s_mutex); |
11889 |
++ ceph_put_mds_session(session); |
11890 |
++ } else { |
11891 |
++ /* |
11892 |
++ * Because we take the reference while |
11893 |
++ * holding the i_ceph_lock, it should |
11894 |
++ * never be NULL. Throw a warning if it |
11895 |
++ * ever is. |
11896 |
++ */ |
11897 |
++ WARN_ON_ONCE(true); |
11898 |
++ } |
11899 |
+ goto retry; |
11900 |
+ } |
11901 |
+ } |
11902 |
+diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c |
11903 |
+index c07407586ce87..660a878e20ef2 100644 |
11904 |
+--- a/fs/ceph/inode.c |
11905 |
++++ b/fs/ceph/inode.c |
11906 |
+@@ -754,8 +754,11 @@ static int fill_inode(struct inode *inode, struct page *locked_page, |
11907 |
+ info_caps = le32_to_cpu(info->cap.caps); |
11908 |
+ |
11909 |
+ /* prealloc new cap struct */ |
11910 |
+- if (info_caps && ceph_snap(inode) == CEPH_NOSNAP) |
11911 |
++ if (info_caps && ceph_snap(inode) == CEPH_NOSNAP) { |
11912 |
+ new_cap = ceph_get_cap(mdsc, caps_reservation); |
11913 |
++ if (!new_cap) |
11914 |
++ return -ENOMEM; |
11915 |
++ } |
11916 |
+ |
11917 |
+ /* |
11918 |
+ * prealloc xattr data, if it looks like we'll need it. only |
11919 |
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h |
11920 |
+index f9cbdfc1591b1..b16c994414ab0 100644 |
11921 |
+--- a/fs/cifs/cifsglob.h |
11922 |
++++ b/fs/cifs/cifsglob.h |
11923 |
+@@ -268,8 +268,9 @@ struct smb_version_operations { |
11924 |
+ int (*check_message)(char *, unsigned int, struct TCP_Server_Info *); |
11925 |
+ bool (*is_oplock_break)(char *, struct TCP_Server_Info *); |
11926 |
+ int (*handle_cancelled_mid)(char *, struct TCP_Server_Info *); |
11927 |
+- void (*downgrade_oplock)(struct TCP_Server_Info *, |
11928 |
+- struct cifsInodeInfo *, bool); |
11929 |
++ void (*downgrade_oplock)(struct TCP_Server_Info *server, |
11930 |
++ struct cifsInodeInfo *cinode, __u32 oplock, |
11931 |
++ unsigned int epoch, bool *purge_cache); |
11932 |
+ /* process transaction2 response */ |
11933 |
+ bool (*check_trans2)(struct mid_q_entry *, struct TCP_Server_Info *, |
11934 |
+ char *, int); |
11935 |
+@@ -1261,6 +1262,8 @@ struct cifsFileInfo { |
11936 |
+ unsigned int f_flags; |
11937 |
+ bool invalidHandle:1; /* file closed via session abend */ |
11938 |
+ bool oplock_break_cancelled:1; |
11939 |
++ unsigned int oplock_epoch; /* epoch from the lease break */ |
11940 |
++ __u32 oplock_level; /* oplock/lease level from the lease break */ |
11941 |
+ int count; |
11942 |
+ spinlock_t file_info_lock; /* protects four flag/count fields above */ |
11943 |
+ struct mutex fh_mutex; /* prevents reopen race after dead ses*/ |
11944 |
+@@ -1408,7 +1411,7 @@ struct cifsInodeInfo { |
11945 |
+ unsigned int epoch; /* used to track lease state changes */ |
11946 |
+ #define CIFS_INODE_PENDING_OPLOCK_BREAK (0) /* oplock break in progress */ |
11947 |
+ #define CIFS_INODE_PENDING_WRITERS (1) /* Writes in progress */ |
11948 |
+-#define CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2 (2) /* Downgrade oplock to L2 */ |
11949 |
++#define CIFS_INODE_FLAG_UNUSED (2) /* Unused flag */ |
11950 |
+ #define CIFS_INO_DELETE_PENDING (3) /* delete pending on server */ |
11951 |
+ #define CIFS_INO_INVALID_MAPPING (4) /* pagecache is invalid */ |
11952 |
+ #define CIFS_INO_LOCK (5) /* lock bit for synchronization */ |
11953 |
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c |
11954 |
+index 4959dbe740f71..31d578739341b 100644 |
11955 |
+--- a/fs/cifs/file.c |
11956 |
++++ b/fs/cifs/file.c |
11957 |
+@@ -4269,7 +4269,8 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list, |
11958 |
+ break; |
11959 |
+ |
11960 |
+ __SetPageLocked(page); |
11961 |
+- if (add_to_page_cache_locked(page, mapping, page->index, gfp)) { |
11962 |
++ rc = add_to_page_cache_locked(page, mapping, page->index, gfp); |
11963 |
++ if (rc) { |
11964 |
+ __ClearPageLocked(page); |
11965 |
+ break; |
11966 |
+ } |
11967 |
+@@ -4285,6 +4286,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, |
11968 |
+ struct list_head *page_list, unsigned num_pages) |
11969 |
+ { |
11970 |
+ int rc; |
11971 |
++ int err = 0; |
11972 |
+ struct list_head tmplist; |
11973 |
+ struct cifsFileInfo *open_file = file->private_data; |
11974 |
+ struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file); |
11975 |
+@@ -4329,7 +4331,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, |
11976 |
+ * the order of declining indexes. When we put the pages in |
11977 |
+ * the rdata->pages, then we want them in increasing order. |
11978 |
+ */ |
11979 |
+- while (!list_empty(page_list)) { |
11980 |
++ while (!list_empty(page_list) && !err) { |
11981 |
+ unsigned int i, nr_pages, bytes, rsize; |
11982 |
+ loff_t offset; |
11983 |
+ struct page *page, *tpage; |
11984 |
+@@ -4362,9 +4364,10 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, |
11985 |
+ return 0; |
11986 |
+ } |
11987 |
+ |
11988 |
+- rc = readpages_get_pages(mapping, page_list, rsize, &tmplist, |
11989 |
++ nr_pages = 0; |
11990 |
++ err = readpages_get_pages(mapping, page_list, rsize, &tmplist, |
11991 |
+ &nr_pages, &offset, &bytes); |
11992 |
+- if (rc) { |
11993 |
++ if (!nr_pages) { |
11994 |
+ add_credits_and_wake_if(server, credits, 0); |
11995 |
+ break; |
11996 |
+ } |
11997 |
+@@ -4675,12 +4678,13 @@ void cifs_oplock_break(struct work_struct *work) |
11998 |
+ struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); |
11999 |
+ struct TCP_Server_Info *server = tcon->ses->server; |
12000 |
+ int rc = 0; |
12001 |
++ bool purge_cache = false; |
12002 |
+ |
12003 |
+ wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS, |
12004 |
+ TASK_UNINTERRUPTIBLE); |
12005 |
+ |
12006 |
+- server->ops->downgrade_oplock(server, cinode, |
12007 |
+- test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags)); |
12008 |
++ server->ops->downgrade_oplock(server, cinode, cfile->oplock_level, |
12009 |
++ cfile->oplock_epoch, &purge_cache); |
12010 |
+ |
12011 |
+ if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) && |
12012 |
+ cifs_has_mand_locks(cinode)) { |
12013 |
+@@ -4695,18 +4699,21 @@ void cifs_oplock_break(struct work_struct *work) |
12014 |
+ else |
12015 |
+ break_lease(inode, O_WRONLY); |
12016 |
+ rc = filemap_fdatawrite(inode->i_mapping); |
12017 |
+- if (!CIFS_CACHE_READ(cinode)) { |
12018 |
++ if (!CIFS_CACHE_READ(cinode) || purge_cache) { |
12019 |
+ rc = filemap_fdatawait(inode->i_mapping); |
12020 |
+ mapping_set_error(inode->i_mapping, rc); |
12021 |
+ cifs_zap_mapping(inode); |
12022 |
+ } |
12023 |
+ cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc); |
12024 |
++ if (CIFS_CACHE_WRITE(cinode)) |
12025 |
++ goto oplock_break_ack; |
12026 |
+ } |
12027 |
+ |
12028 |
+ rc = cifs_push_locks(cfile); |
12029 |
+ if (rc) |
12030 |
+ cifs_dbg(VFS, "Push locks rc = %d\n", rc); |
12031 |
+ |
12032 |
++oplock_break_ack: |
12033 |
+ /* |
12034 |
+ * releasing stale oplock after recent reconnect of smb session using |
12035 |
+ * a now incorrect file handle is not a data integrity issue but do |
12036 |
+diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c |
12037 |
+index 5ad83bdb9bea3..40ca394fd5de9 100644 |
12038 |
+--- a/fs/cifs/misc.c |
12039 |
++++ b/fs/cifs/misc.c |
12040 |
+@@ -488,21 +488,10 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv) |
12041 |
+ set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, |
12042 |
+ &pCifsInode->flags); |
12043 |
+ |
12044 |
+- /* |
12045 |
+- * Set flag if the server downgrades the oplock |
12046 |
+- * to L2 else clear. |
12047 |
+- */ |
12048 |
+- if (pSMB->OplockLevel) |
12049 |
+- set_bit( |
12050 |
+- CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, |
12051 |
+- &pCifsInode->flags); |
12052 |
+- else |
12053 |
+- clear_bit( |
12054 |
+- CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, |
12055 |
+- &pCifsInode->flags); |
12056 |
+- |
12057 |
+- cifs_queue_oplock_break(netfile); |
12058 |
++ netfile->oplock_epoch = 0; |
12059 |
++ netfile->oplock_level = pSMB->OplockLevel; |
12060 |
+ netfile->oplock_break_cancelled = false; |
12061 |
++ cifs_queue_oplock_break(netfile); |
12062 |
+ |
12063 |
+ spin_unlock(&tcon->open_file_lock); |
12064 |
+ spin_unlock(&cifs_tcp_ses_lock); |
12065 |
+diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c |
12066 |
+index 195766221a7a8..e523c05a44876 100644 |
12067 |
+--- a/fs/cifs/smb1ops.c |
12068 |
++++ b/fs/cifs/smb1ops.c |
12069 |
+@@ -369,12 +369,10 @@ coalesce_t2(char *second_buf, struct smb_hdr *target_hdr) |
12070 |
+ |
12071 |
+ static void |
12072 |
+ cifs_downgrade_oplock(struct TCP_Server_Info *server, |
12073 |
+- struct cifsInodeInfo *cinode, bool set_level2) |
12074 |
++ struct cifsInodeInfo *cinode, __u32 oplock, |
12075 |
++ unsigned int epoch, bool *purge_cache) |
12076 |
+ { |
12077 |
+- if (set_level2) |
12078 |
+- cifs_set_oplock_level(cinode, OPLOCK_READ); |
12079 |
+- else |
12080 |
+- cifs_set_oplock_level(cinode, 0); |
12081 |
++ cifs_set_oplock_level(cinode, oplock); |
12082 |
+ } |
12083 |
+ |
12084 |
+ static bool |
12085 |
+diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c |
12086 |
+index 2fc96f7923ee5..7d875a47d0226 100644 |
12087 |
+--- a/fs/cifs/smb2misc.c |
12088 |
++++ b/fs/cifs/smb2misc.c |
12089 |
+@@ -550,7 +550,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp) |
12090 |
+ |
12091 |
+ cifs_dbg(FYI, "found in the open list\n"); |
12092 |
+ cifs_dbg(FYI, "lease key match, lease break 0x%x\n", |
12093 |
+- le32_to_cpu(rsp->NewLeaseState)); |
12094 |
++ lease_state); |
12095 |
+ |
12096 |
+ if (ack_req) |
12097 |
+ cfile->oplock_break_cancelled = false; |
12098 |
+@@ -559,17 +559,8 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp) |
12099 |
+ |
12100 |
+ set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags); |
12101 |
+ |
12102 |
+- /* |
12103 |
+- * Set or clear flags depending on the lease state being READ. |
12104 |
+- * HANDLE caching flag should be added when the client starts |
12105 |
+- * to defer closing remote file handles with HANDLE leases. |
12106 |
+- */ |
12107 |
+- if (lease_state & SMB2_LEASE_READ_CACHING_HE) |
12108 |
+- set_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, |
12109 |
+- &cinode->flags); |
12110 |
+- else |
12111 |
+- clear_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, |
12112 |
+- &cinode->flags); |
12113 |
++ cfile->oplock_epoch = le16_to_cpu(rsp->Epoch); |
12114 |
++ cfile->oplock_level = lease_state; |
12115 |
+ |
12116 |
+ cifs_queue_oplock_break(cfile); |
12117 |
+ return true; |
12118 |
+@@ -599,7 +590,7 @@ smb2_tcon_find_pending_open_lease(struct cifs_tcon *tcon, |
12119 |
+ |
12120 |
+ cifs_dbg(FYI, "found in the pending open list\n"); |
12121 |
+ cifs_dbg(FYI, "lease key match, lease break 0x%x\n", |
12122 |
+- le32_to_cpu(rsp->NewLeaseState)); |
12123 |
++ lease_state); |
12124 |
+ |
12125 |
+ open->oplock = lease_state; |
12126 |
+ } |
12127 |
+@@ -732,18 +723,9 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server) |
12128 |
+ set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, |
12129 |
+ &cinode->flags); |
12130 |
+ |
12131 |
+- /* |
12132 |
+- * Set flag if the server downgrades the oplock |
12133 |
+- * to L2 else clear. |
12134 |
+- */ |
12135 |
+- if (rsp->OplockLevel) |
12136 |
+- set_bit( |
12137 |
+- CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, |
12138 |
+- &cinode->flags); |
12139 |
+- else |
12140 |
+- clear_bit( |
12141 |
+- CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, |
12142 |
+- &cinode->flags); |
12143 |
++ cfile->oplock_epoch = 0; |
12144 |
++ cfile->oplock_level = rsp->OplockLevel; |
12145 |
++ |
12146 |
+ spin_unlock(&cfile->file_info_lock); |
12147 |
+ |
12148 |
+ cifs_queue_oplock_break(cfile); |
12149 |
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c |
12150 |
+index 7ccbfc6564787..64ad466695c55 100644 |
12151 |
+--- a/fs/cifs/smb2ops.c |
12152 |
++++ b/fs/cifs/smb2ops.c |
12153 |
+@@ -1565,35 +1565,32 @@ smb2_ioctl_query_info(const unsigned int xid, |
12154 |
+ if (le32_to_cpu(io_rsp->OutputCount) < qi.input_buffer_length) |
12155 |
+ qi.input_buffer_length = le32_to_cpu(io_rsp->OutputCount); |
12156 |
+ if (qi.input_buffer_length > 0 && |
12157 |
+- le32_to_cpu(io_rsp->OutputOffset) + qi.input_buffer_length > rsp_iov[1].iov_len) { |
12158 |
+- rc = -EFAULT; |
12159 |
+- goto iqinf_exit; |
12160 |
+- } |
12161 |
+- if (copy_to_user(&pqi->input_buffer_length, &qi.input_buffer_length, |
12162 |
+- sizeof(qi.input_buffer_length))) { |
12163 |
+- rc = -EFAULT; |
12164 |
+- goto iqinf_exit; |
12165 |
+- } |
12166 |
++ le32_to_cpu(io_rsp->OutputOffset) + qi.input_buffer_length |
12167 |
++ > rsp_iov[1].iov_len) |
12168 |
++ goto e_fault; |
12169 |
++ |
12170 |
++ if (copy_to_user(&pqi->input_buffer_length, |
12171 |
++ &qi.input_buffer_length, |
12172 |
++ sizeof(qi.input_buffer_length))) |
12173 |
++ goto e_fault; |
12174 |
++ |
12175 |
+ if (copy_to_user((void __user *)pqi + sizeof(struct smb_query_info), |
12176 |
+ (const void *)io_rsp + le32_to_cpu(io_rsp->OutputOffset), |
12177 |
+- qi.input_buffer_length)) { |
12178 |
+- rc = -EFAULT; |
12179 |
+- goto iqinf_exit; |
12180 |
+- } |
12181 |
++ qi.input_buffer_length)) |
12182 |
++ goto e_fault; |
12183 |
+ } else { |
12184 |
+ pqi = (struct smb_query_info __user *)arg; |
12185 |
+ qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base; |
12186 |
+ if (le32_to_cpu(qi_rsp->OutputBufferLength) < qi.input_buffer_length) |
12187 |
+ qi.input_buffer_length = le32_to_cpu(qi_rsp->OutputBufferLength); |
12188 |
+- if (copy_to_user(&pqi->input_buffer_length, &qi.input_buffer_length, |
12189 |
+- sizeof(qi.input_buffer_length))) { |
12190 |
+- rc = -EFAULT; |
12191 |
+- goto iqinf_exit; |
12192 |
+- } |
12193 |
+- if (copy_to_user(pqi + 1, qi_rsp->Buffer, qi.input_buffer_length)) { |
12194 |
+- rc = -EFAULT; |
12195 |
+- goto iqinf_exit; |
12196 |
+- } |
12197 |
++ if (copy_to_user(&pqi->input_buffer_length, |
12198 |
++ &qi.input_buffer_length, |
12199 |
++ sizeof(qi.input_buffer_length))) |
12200 |
++ goto e_fault; |
12201 |
++ |
12202 |
++ if (copy_to_user(pqi + 1, qi_rsp->Buffer, |
12203 |
++ qi.input_buffer_length)) |
12204 |
++ goto e_fault; |
12205 |
+ } |
12206 |
+ |
12207 |
+ iqinf_exit: |
12208 |
+@@ -1609,6 +1606,10 @@ smb2_ioctl_query_info(const unsigned int xid, |
12209 |
+ free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base); |
12210 |
+ free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base); |
12211 |
+ return rc; |
12212 |
++ |
12213 |
++e_fault: |
12214 |
++ rc = -EFAULT; |
12215 |
++ goto iqinf_exit; |
12216 |
+ } |
12217 |
+ |
12218 |
+ static ssize_t |
12219 |
+@@ -3331,22 +3332,38 @@ static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode, |
12220 |
+ |
12221 |
+ static void |
12222 |
+ smb2_downgrade_oplock(struct TCP_Server_Info *server, |
12223 |
+- struct cifsInodeInfo *cinode, bool set_level2) |
12224 |
++ struct cifsInodeInfo *cinode, __u32 oplock, |
12225 |
++ unsigned int epoch, bool *purge_cache) |
12226 |
+ { |
12227 |
+- if (set_level2) |
12228 |
+- server->ops->set_oplock_level(cinode, SMB2_OPLOCK_LEVEL_II, |
12229 |
+- 0, NULL); |
12230 |
+- else |
12231 |
+- server->ops->set_oplock_level(cinode, 0, 0, NULL); |
12232 |
++ server->ops->set_oplock_level(cinode, oplock, 0, NULL); |
12233 |
+ } |
12234 |
+ |
12235 |
+ static void |
12236 |
+-smb21_downgrade_oplock(struct TCP_Server_Info *server, |
12237 |
+- struct cifsInodeInfo *cinode, bool set_level2) |
12238 |
++smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock, |
12239 |
++ unsigned int epoch, bool *purge_cache); |
12240 |
++ |
12241 |
++static void |
12242 |
++smb3_downgrade_oplock(struct TCP_Server_Info *server, |
12243 |
++ struct cifsInodeInfo *cinode, __u32 oplock, |
12244 |
++ unsigned int epoch, bool *purge_cache) |
12245 |
+ { |
12246 |
+- server->ops->set_oplock_level(cinode, |
12247 |
+- set_level2 ? SMB2_LEASE_READ_CACHING_HE : |
12248 |
+- 0, 0, NULL); |
12249 |
++ unsigned int old_state = cinode->oplock; |
12250 |
++ unsigned int old_epoch = cinode->epoch; |
12251 |
++ unsigned int new_state; |
12252 |
++ |
12253 |
++ if (epoch > old_epoch) { |
12254 |
++ smb21_set_oplock_level(cinode, oplock, 0, NULL); |
12255 |
++ cinode->epoch = epoch; |
12256 |
++ } |
12257 |
++ |
12258 |
++ new_state = cinode->oplock; |
12259 |
++ *purge_cache = false; |
12260 |
++ |
12261 |
++ if ((old_state & CIFS_CACHE_READ_FLG) != 0 && |
12262 |
++ (new_state & CIFS_CACHE_READ_FLG) == 0) |
12263 |
++ *purge_cache = true; |
12264 |
++ else if (old_state == new_state && (epoch - old_epoch > 1)) |
12265 |
++ *purge_cache = true; |
12266 |
+ } |
12267 |
+ |
12268 |
+ static void |
12269 |
+@@ -4606,7 +4623,7 @@ struct smb_version_operations smb21_operations = { |
12270 |
+ .print_stats = smb2_print_stats, |
12271 |
+ .is_oplock_break = smb2_is_valid_oplock_break, |
12272 |
+ .handle_cancelled_mid = smb2_handle_cancelled_mid, |
12273 |
+- .downgrade_oplock = smb21_downgrade_oplock, |
12274 |
++ .downgrade_oplock = smb2_downgrade_oplock, |
12275 |
+ .need_neg = smb2_need_neg, |
12276 |
+ .negotiate = smb2_negotiate, |
12277 |
+ .negotiate_wsize = smb2_negotiate_wsize, |
12278 |
+@@ -4706,7 +4723,7 @@ struct smb_version_operations smb30_operations = { |
12279 |
+ .dump_share_caps = smb2_dump_share_caps, |
12280 |
+ .is_oplock_break = smb2_is_valid_oplock_break, |
12281 |
+ .handle_cancelled_mid = smb2_handle_cancelled_mid, |
12282 |
+- .downgrade_oplock = smb21_downgrade_oplock, |
12283 |
++ .downgrade_oplock = smb3_downgrade_oplock, |
12284 |
+ .need_neg = smb2_need_neg, |
12285 |
+ .negotiate = smb2_negotiate, |
12286 |
+ .negotiate_wsize = smb3_negotiate_wsize, |
12287 |
+@@ -4814,7 +4831,7 @@ struct smb_version_operations smb311_operations = { |
12288 |
+ .dump_share_caps = smb2_dump_share_caps, |
12289 |
+ .is_oplock_break = smb2_is_valid_oplock_break, |
12290 |
+ .handle_cancelled_mid = smb2_handle_cancelled_mid, |
12291 |
+- .downgrade_oplock = smb21_downgrade_oplock, |
12292 |
++ .downgrade_oplock = smb3_downgrade_oplock, |
12293 |
+ .need_neg = smb2_need_neg, |
12294 |
+ .negotiate = smb2_negotiate, |
12295 |
+ .negotiate_wsize = smb3_negotiate_wsize, |
12296 |
+diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h |
12297 |
+index 0abfde6d0b051..f264e1d36fe16 100644 |
12298 |
+--- a/fs/cifs/smb2pdu.h |
12299 |
++++ b/fs/cifs/smb2pdu.h |
12300 |
+@@ -1386,7 +1386,7 @@ struct smb2_oplock_break { |
12301 |
+ struct smb2_lease_break { |
12302 |
+ struct smb2_sync_hdr sync_hdr; |
12303 |
+ __le16 StructureSize; /* Must be 44 */ |
12304 |
+- __le16 Reserved; |
12305 |
++ __le16 Epoch; |
12306 |
+ __le32 Flags; |
12307 |
+ __u8 LeaseKey[16]; |
12308 |
+ __le32 CurrentLeaseState; |
12309 |
+diff --git a/fs/dcache.c b/fs/dcache.c |
12310 |
+index e88cf0554e659..b2a7f1765f0b1 100644 |
12311 |
+--- a/fs/dcache.c |
12312 |
++++ b/fs/dcache.c |
12313 |
+@@ -903,17 +903,19 @@ struct dentry *dget_parent(struct dentry *dentry) |
12314 |
+ { |
12315 |
+ int gotref; |
12316 |
+ struct dentry *ret; |
12317 |
++ unsigned seq; |
12318 |
+ |
12319 |
+ /* |
12320 |
+ * Do optimistic parent lookup without any |
12321 |
+ * locking. |
12322 |
+ */ |
12323 |
+ rcu_read_lock(); |
12324 |
++ seq = raw_seqcount_begin(&dentry->d_seq); |
12325 |
+ ret = READ_ONCE(dentry->d_parent); |
12326 |
+ gotref = lockref_get_not_zero(&ret->d_lockref); |
12327 |
+ rcu_read_unlock(); |
12328 |
+ if (likely(gotref)) { |
12329 |
+- if (likely(ret == READ_ONCE(dentry->d_parent))) |
12330 |
++ if (!read_seqcount_retry(&dentry->d_seq, seq)) |
12331 |
+ return ret; |
12332 |
+ dput(ret); |
12333 |
+ } |
12334 |
+diff --git a/fs/exec.c b/fs/exec.c |
12335 |
+index d62cd1d71098f..de833553ae27d 100644 |
12336 |
+--- a/fs/exec.c |
12337 |
++++ b/fs/exec.c |
12338 |
+@@ -1007,16 +1007,26 @@ ssize_t read_code(struct file *file, unsigned long addr, loff_t pos, size_t len) |
12339 |
+ } |
12340 |
+ EXPORT_SYMBOL(read_code); |
12341 |
+ |
12342 |
++/* |
12343 |
++ * Maps the mm_struct mm into the current task struct. |
12344 |
++ * On success, this function returns with the mutex |
12345 |
++ * exec_update_mutex locked. |
12346 |
++ */ |
12347 |
+ static int exec_mmap(struct mm_struct *mm) |
12348 |
+ { |
12349 |
+ struct task_struct *tsk; |
12350 |
+ struct mm_struct *old_mm, *active_mm; |
12351 |
++ int ret; |
12352 |
+ |
12353 |
+ /* Notify parent that we're no longer interested in the old VM */ |
12354 |
+ tsk = current; |
12355 |
+ old_mm = current->mm; |
12356 |
+ exec_mm_release(tsk, old_mm); |
12357 |
+ |
12358 |
++ ret = mutex_lock_killable(&tsk->signal->exec_update_mutex); |
12359 |
++ if (ret) |
12360 |
++ return ret; |
12361 |
++ |
12362 |
+ if (old_mm) { |
12363 |
+ sync_mm_rss(old_mm); |
12364 |
+ /* |
12365 |
+@@ -1028,9 +1038,11 @@ static int exec_mmap(struct mm_struct *mm) |
12366 |
+ down_read(&old_mm->mmap_sem); |
12367 |
+ if (unlikely(old_mm->core_state)) { |
12368 |
+ up_read(&old_mm->mmap_sem); |
12369 |
++ mutex_unlock(&tsk->signal->exec_update_mutex); |
12370 |
+ return -EINTR; |
12371 |
+ } |
12372 |
+ } |
12373 |
++ |
12374 |
+ task_lock(tsk); |
12375 |
+ active_mm = tsk->active_mm; |
12376 |
+ membarrier_exec_mmap(mm); |
12377 |
+@@ -1285,11 +1297,12 @@ int flush_old_exec(struct linux_binprm * bprm) |
12378 |
+ goto out; |
12379 |
+ |
12380 |
+ /* |
12381 |
+- * After clearing bprm->mm (to mark that current is using the |
12382 |
+- * prepared mm now), we have nothing left of the original |
12383 |
++ * After setting bprm->called_exec_mmap (to mark that current is |
12384 |
++ * using the prepared mm now), we have nothing left of the original |
12385 |
+ * process. If anything from here on returns an error, the check |
12386 |
+ * in search_binary_handler() will SEGV current. |
12387 |
+ */ |
12388 |
++ bprm->called_exec_mmap = 1; |
12389 |
+ bprm->mm = NULL; |
12390 |
+ |
12391 |
+ set_fs(USER_DS); |
12392 |
+@@ -1423,6 +1436,8 @@ static void free_bprm(struct linux_binprm *bprm) |
12393 |
+ { |
12394 |
+ free_arg_pages(bprm); |
12395 |
+ if (bprm->cred) { |
12396 |
++ if (bprm->called_exec_mmap) |
12397 |
++ mutex_unlock(¤t->signal->exec_update_mutex); |
12398 |
+ mutex_unlock(¤t->signal->cred_guard_mutex); |
12399 |
+ abort_creds(bprm->cred); |
12400 |
+ } |
12401 |
+@@ -1472,6 +1487,7 @@ void install_exec_creds(struct linux_binprm *bprm) |
12402 |
+ * credentials; any time after this it may be unlocked. |
12403 |
+ */ |
12404 |
+ security_bprm_committed_creds(bprm); |
12405 |
++ mutex_unlock(¤t->signal->exec_update_mutex); |
12406 |
+ mutex_unlock(¤t->signal->cred_guard_mutex); |
12407 |
+ } |
12408 |
+ EXPORT_SYMBOL(install_exec_creds); |
12409 |
+@@ -1663,7 +1679,7 @@ int search_binary_handler(struct linux_binprm *bprm) |
12410 |
+ |
12411 |
+ read_lock(&binfmt_lock); |
12412 |
+ put_binfmt(fmt); |
12413 |
+- if (retval < 0 && !bprm->mm) { |
12414 |
++ if (retval < 0 && bprm->called_exec_mmap) { |
12415 |
+ /* we got to flush_old_exec() and failed after it */ |
12416 |
+ read_unlock(&binfmt_lock); |
12417 |
+ force_sigsegv(SIGSEGV); |
12418 |
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c |
12419 |
+index a284d99a1ee57..95a8a04c77dd3 100644 |
12420 |
+--- a/fs/ext4/inode.c |
12421 |
++++ b/fs/ext4/inode.c |
12422 |
+@@ -5315,7 +5315,7 @@ static int ext4_do_update_inode(handle_t *handle, |
12423 |
+ raw_inode->i_file_acl_high = |
12424 |
+ cpu_to_le16(ei->i_file_acl >> 32); |
12425 |
+ raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl); |
12426 |
+- if (ei->i_disksize != ext4_isize(inode->i_sb, raw_inode)) { |
12427 |
++ if (READ_ONCE(ei->i_disksize) != ext4_isize(inode->i_sb, raw_inode)) { |
12428 |
+ ext4_isize_set(raw_inode, ei->i_disksize); |
12429 |
+ need_datasync = 1; |
12430 |
+ } |
12431 |
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c |
12432 |
+index e1782b2e2e2dd..e5d43d2ee474d 100644 |
12433 |
+--- a/fs/ext4/mballoc.c |
12434 |
++++ b/fs/ext4/mballoc.c |
12435 |
+@@ -1901,8 +1901,15 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, |
12436 |
+ BUG_ON(buddy == NULL); |
12437 |
+ |
12438 |
+ k = mb_find_next_zero_bit(buddy, max, 0); |
12439 |
+- BUG_ON(k >= max); |
12440 |
+- |
12441 |
++ if (k >= max) { |
12442 |
++ ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0, |
12443 |
++ "%d free clusters of order %d. But found 0", |
12444 |
++ grp->bb_counters[i], i); |
12445 |
++ ext4_mark_group_bitmap_corrupted(ac->ac_sb, |
12446 |
++ e4b->bd_group, |
12447 |
++ EXT4_GROUP_INFO_BBITMAP_CORRUPT); |
12448 |
++ break; |
12449 |
++ } |
12450 |
+ ac->ac_found++; |
12451 |
+ |
12452 |
+ ac->ac_b_ex.fe_len = 1 << i; |
12453 |
+diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c |
12454 |
+index e611d768efde3..a78aa5480454f 100644 |
12455 |
+--- a/fs/f2fs/gc.c |
12456 |
++++ b/fs/f2fs/gc.c |
12457 |
+@@ -1012,8 +1012,14 @@ next_step: |
12458 |
+ block_t start_bidx; |
12459 |
+ nid_t nid = le32_to_cpu(entry->nid); |
12460 |
+ |
12461 |
+- /* stop BG_GC if there is not enough free sections. */ |
12462 |
+- if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) |
12463 |
++ /* |
12464 |
++ * stop BG_GC if there is not enough free sections. |
12465 |
++ * Or, stop GC if the segment becomes fully valid caused by |
12466 |
++ * race condition along with SSR block allocation. |
12467 |
++ */ |
12468 |
++ if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) || |
12469 |
++ get_valid_blocks(sbi, segno, false) == |
12470 |
++ sbi->blocks_per_seg) |
12471 |
+ return submitted; |
12472 |
+ |
12473 |
+ if (check_valid_map(sbi, segno, off) == 0) |
12474 |
+diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c |
12475 |
+index 8a67b933ccd42..ed12e96681842 100644 |
12476 |
+--- a/fs/f2fs/node.c |
12477 |
++++ b/fs/f2fs/node.c |
12478 |
+@@ -2353,7 +2353,6 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi, |
12479 |
+ |
12480 |
+ if (ret) { |
12481 |
+ up_read(&nm_i->nat_tree_lock); |
12482 |
+- f2fs_bug_on(sbi, !mount); |
12483 |
+ f2fs_err(sbi, "NAT is corrupt, run fsck to fix it"); |
12484 |
+ return ret; |
12485 |
+ } |
12486 |
+diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c |
12487 |
+index 06dd38e76c62a..f9022b7028754 100644 |
12488 |
+--- a/fs/fuse/dev.c |
12489 |
++++ b/fs/fuse/dev.c |
12490 |
+@@ -764,7 +764,6 @@ static int fuse_check_page(struct page *page) |
12491 |
+ { |
12492 |
+ if (page_mapcount(page) || |
12493 |
+ page->mapping != NULL || |
12494 |
+- page_count(page) != 1 || |
12495 |
+ (page->flags & PAGE_FLAGS_CHECK_AT_PREP & |
12496 |
+ ~(1 << PG_locked | |
12497 |
+ 1 << PG_referenced | |
12498 |
+diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c |
12499 |
+index 5dca643a257c9..f58ab84b09fb3 100644 |
12500 |
+--- a/fs/fuse/inode.c |
12501 |
++++ b/fs/fuse/inode.c |
12502 |
+@@ -323,6 +323,8 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid, |
12503 |
+ int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid, |
12504 |
+ loff_t offset, loff_t len) |
12505 |
+ { |
12506 |
++ struct fuse_conn *fc = get_fuse_conn_super(sb); |
12507 |
++ struct fuse_inode *fi; |
12508 |
+ struct inode *inode; |
12509 |
+ pgoff_t pg_start; |
12510 |
+ pgoff_t pg_end; |
12511 |
+@@ -331,6 +333,11 @@ int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid, |
12512 |
+ if (!inode) |
12513 |
+ return -ENOENT; |
12514 |
+ |
12515 |
++ fi = get_fuse_inode(inode); |
12516 |
++ spin_lock(&fi->lock); |
12517 |
++ fi->attr_version = atomic64_inc_return(&fc->attr_version); |
12518 |
++ spin_unlock(&fi->lock); |
12519 |
++ |
12520 |
+ fuse_invalidate_attr(inode); |
12521 |
+ forget_all_cached_acls(inode); |
12522 |
+ if (offset >= 0) { |
12523 |
+diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c |
12524 |
+index 8466166f22e3d..988bb7b17ed8f 100644 |
12525 |
+--- a/fs/gfs2/inode.c |
12526 |
++++ b/fs/gfs2/inode.c |
12527 |
+@@ -712,7 +712,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, |
12528 |
+ |
12529 |
+ error = gfs2_trans_begin(sdp, blocks, 0); |
12530 |
+ if (error) |
12531 |
+- goto fail_gunlock2; |
12532 |
++ goto fail_free_inode; |
12533 |
+ |
12534 |
+ if (blocks > 1) { |
12535 |
+ ip->i_eattr = ip->i_no_addr + 1; |
12536 |
+@@ -723,7 +723,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, |
12537 |
+ |
12538 |
+ error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_iopen_glops, CREATE, &io_gl); |
12539 |
+ if (error) |
12540 |
+- goto fail_gunlock2; |
12541 |
++ goto fail_free_inode; |
12542 |
+ |
12543 |
+ BUG_ON(test_and_set_bit(GLF_INODE_CREATING, &io_gl->gl_flags)); |
12544 |
+ |
12545 |
+@@ -732,7 +732,6 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, |
12546 |
+ goto fail_gunlock2; |
12547 |
+ |
12548 |
+ glock_set_object(ip->i_iopen_gh.gh_gl, ip); |
12549 |
+- gfs2_glock_put(io_gl); |
12550 |
+ gfs2_set_iop(inode); |
12551 |
+ insert_inode_hash(inode); |
12552 |
+ |
12553 |
+@@ -765,6 +764,8 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, |
12554 |
+ |
12555 |
+ mark_inode_dirty(inode); |
12556 |
+ d_instantiate(dentry, inode); |
12557 |
++ /* After instantiate, errors should result in evict which will destroy |
12558 |
++ * both inode and iopen glocks properly. */ |
12559 |
+ if (file) { |
12560 |
+ file->f_mode |= FMODE_CREATED; |
12561 |
+ error = finish_open(file, dentry, gfs2_open_common); |
12562 |
+@@ -772,15 +773,15 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, |
12563 |
+ gfs2_glock_dq_uninit(ghs); |
12564 |
+ gfs2_glock_dq_uninit(ghs + 1); |
12565 |
+ clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags); |
12566 |
++ gfs2_glock_put(io_gl); |
12567 |
+ return error; |
12568 |
+ |
12569 |
+ fail_gunlock3: |
12570 |
+ glock_clear_object(io_gl, ip); |
12571 |
+ gfs2_glock_dq_uninit(&ip->i_iopen_gh); |
12572 |
+- gfs2_glock_put(io_gl); |
12573 |
+ fail_gunlock2: |
12574 |
+- if (io_gl) |
12575 |
+- clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags); |
12576 |
++ clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags); |
12577 |
++ gfs2_glock_put(io_gl); |
12578 |
+ fail_free_inode: |
12579 |
+ if (ip->i_gl) { |
12580 |
+ glock_clear_object(ip->i_gl, ip); |
12581 |
+diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c |
12582 |
+index e25901ae3ff44..a30ea7ecb790a 100644 |
12583 |
+--- a/fs/iomap/buffered-io.c |
12584 |
++++ b/fs/iomap/buffered-io.c |
12585 |
+@@ -1040,20 +1040,19 @@ vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops) |
12586 |
+ |
12587 |
+ lock_page(page); |
12588 |
+ size = i_size_read(inode); |
12589 |
+- if ((page->mapping != inode->i_mapping) || |
12590 |
+- (page_offset(page) > size)) { |
12591 |
++ offset = page_offset(page); |
12592 |
++ if (page->mapping != inode->i_mapping || offset > size) { |
12593 |
+ /* We overload EFAULT to mean page got truncated */ |
12594 |
+ ret = -EFAULT; |
12595 |
+ goto out_unlock; |
12596 |
+ } |
12597 |
+ |
12598 |
+ /* page is wholly or partially inside EOF */ |
12599 |
+- if (((page->index + 1) << PAGE_SHIFT) > size) |
12600 |
++ if (offset > size - PAGE_SIZE) |
12601 |
+ length = offset_in_page(size); |
12602 |
+ else |
12603 |
+ length = PAGE_SIZE; |
12604 |
+ |
12605 |
+- offset = page_offset(page); |
12606 |
+ while (length > 0) { |
12607 |
+ ret = iomap_apply(inode, offset, length, |
12608 |
+ IOMAP_WRITE | IOMAP_FAULT, ops, page, |
12609 |
+diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h |
12610 |
+index 361cc10d6f95d..c8081d2b4166a 100644 |
12611 |
+--- a/fs/nfs/nfstrace.h |
12612 |
++++ b/fs/nfs/nfstrace.h |
12613 |
+@@ -1147,7 +1147,12 @@ TRACE_EVENT(nfs_xdr_status, |
12614 |
+ __field(unsigned int, task_id) |
12615 |
+ __field(unsigned int, client_id) |
12616 |
+ __field(u32, xid) |
12617 |
++ __field(int, version) |
12618 |
+ __field(unsigned long, error) |
12619 |
++ __string(program, |
12620 |
++ xdr->rqst->rq_task->tk_client->cl_program->name) |
12621 |
++ __string(procedure, |
12622 |
++ xdr->rqst->rq_task->tk_msg.rpc_proc->p_name) |
12623 |
+ ), |
12624 |
+ |
12625 |
+ TP_fast_assign( |
12626 |
+@@ -1157,13 +1162,19 @@ TRACE_EVENT(nfs_xdr_status, |
12627 |
+ __entry->task_id = task->tk_pid; |
12628 |
+ __entry->client_id = task->tk_client->cl_clid; |
12629 |
+ __entry->xid = be32_to_cpu(rqstp->rq_xid); |
12630 |
++ __entry->version = task->tk_client->cl_vers; |
12631 |
+ __entry->error = error; |
12632 |
++ __assign_str(program, |
12633 |
++ task->tk_client->cl_program->name) |
12634 |
++ __assign_str(procedure, task->tk_msg.rpc_proc->p_name) |
12635 |
+ ), |
12636 |
+ |
12637 |
+ TP_printk( |
12638 |
+- "task:%u@%d xid=0x%08x error=%ld (%s)", |
12639 |
++ "task:%u@%d xid=0x%08x %sv%d %s error=%ld (%s)", |
12640 |
+ __entry->task_id, __entry->client_id, __entry->xid, |
12641 |
+- -__entry->error, nfs_show_status(__entry->error) |
12642 |
++ __get_str(program), __entry->version, |
12643 |
++ __get_str(procedure), -__entry->error, |
12644 |
++ nfs_show_status(__entry->error) |
12645 |
+ ) |
12646 |
+ ); |
12647 |
+ |
12648 |
+diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c |
12649 |
+index b736912098eee..f4407dd426bf0 100644 |
12650 |
+--- a/fs/nfs/pagelist.c |
12651 |
++++ b/fs/nfs/pagelist.c |
12652 |
+@@ -133,47 +133,70 @@ nfs_async_iocounter_wait(struct rpc_task *task, struct nfs_lock_context *l_ctx) |
12653 |
+ EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait); |
12654 |
+ |
12655 |
+ /* |
12656 |
+- * nfs_page_group_lock - lock the head of the page group |
12657 |
+- * @req - request in group that is to be locked |
12658 |
++ * nfs_page_set_headlock - set the request PG_HEADLOCK |
12659 |
++ * @req: request that is to be locked |
12660 |
+ * |
12661 |
+- * this lock must be held when traversing or modifying the page |
12662 |
+- * group list |
12663 |
++ * this lock must be held when modifying req->wb_head |
12664 |
+ * |
12665 |
+ * return 0 on success, < 0 on error |
12666 |
+ */ |
12667 |
+ int |
12668 |
+-nfs_page_group_lock(struct nfs_page *req) |
12669 |
++nfs_page_set_headlock(struct nfs_page *req) |
12670 |
+ { |
12671 |
+- struct nfs_page *head = req->wb_head; |
12672 |
+- |
12673 |
+- WARN_ON_ONCE(head != head->wb_head); |
12674 |
+- |
12675 |
+- if (!test_and_set_bit(PG_HEADLOCK, &head->wb_flags)) |
12676 |
++ if (!test_and_set_bit(PG_HEADLOCK, &req->wb_flags)) |
12677 |
+ return 0; |
12678 |
+ |
12679 |
+- set_bit(PG_CONTENDED1, &head->wb_flags); |
12680 |
++ set_bit(PG_CONTENDED1, &req->wb_flags); |
12681 |
+ smp_mb__after_atomic(); |
12682 |
+- return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK, |
12683 |
++ return wait_on_bit_lock(&req->wb_flags, PG_HEADLOCK, |
12684 |
+ TASK_UNINTERRUPTIBLE); |
12685 |
+ } |
12686 |
+ |
12687 |
+ /* |
12688 |
+- * nfs_page_group_unlock - unlock the head of the page group |
12689 |
+- * @req - request in group that is to be unlocked |
12690 |
++ * nfs_page_clear_headlock - clear the request PG_HEADLOCK |
12691 |
++ * @req: request that is to be locked |
12692 |
+ */ |
12693 |
+ void |
12694 |
+-nfs_page_group_unlock(struct nfs_page *req) |
12695 |
++nfs_page_clear_headlock(struct nfs_page *req) |
12696 |
+ { |
12697 |
+- struct nfs_page *head = req->wb_head; |
12698 |
+- |
12699 |
+- WARN_ON_ONCE(head != head->wb_head); |
12700 |
+- |
12701 |
+ smp_mb__before_atomic(); |
12702 |
+- clear_bit(PG_HEADLOCK, &head->wb_flags); |
12703 |
++ clear_bit(PG_HEADLOCK, &req->wb_flags); |
12704 |
+ smp_mb__after_atomic(); |
12705 |
+- if (!test_bit(PG_CONTENDED1, &head->wb_flags)) |
12706 |
++ if (!test_bit(PG_CONTENDED1, &req->wb_flags)) |
12707 |
+ return; |
12708 |
+- wake_up_bit(&head->wb_flags, PG_HEADLOCK); |
12709 |
++ wake_up_bit(&req->wb_flags, PG_HEADLOCK); |
12710 |
++} |
12711 |
++ |
12712 |
++/* |
12713 |
++ * nfs_page_group_lock - lock the head of the page group |
12714 |
++ * @req: request in group that is to be locked |
12715 |
++ * |
12716 |
++ * this lock must be held when traversing or modifying the page |
12717 |
++ * group list |
12718 |
++ * |
12719 |
++ * return 0 on success, < 0 on error |
12720 |
++ */ |
12721 |
++int |
12722 |
++nfs_page_group_lock(struct nfs_page *req) |
12723 |
++{ |
12724 |
++ int ret; |
12725 |
++ |
12726 |
++ ret = nfs_page_set_headlock(req); |
12727 |
++ if (ret || req->wb_head == req) |
12728 |
++ return ret; |
12729 |
++ return nfs_page_set_headlock(req->wb_head); |
12730 |
++} |
12731 |
++ |
12732 |
++/* |
12733 |
++ * nfs_page_group_unlock - unlock the head of the page group |
12734 |
++ * @req: request in group that is to be unlocked |
12735 |
++ */ |
12736 |
++void |
12737 |
++nfs_page_group_unlock(struct nfs_page *req) |
12738 |
++{ |
12739 |
++ if (req != req->wb_head) |
12740 |
++ nfs_page_clear_headlock(req->wb_head); |
12741 |
++ nfs_page_clear_headlock(req); |
12742 |
+ } |
12743 |
+ |
12744 |
+ /* |
12745 |
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c |
12746 |
+index 58c8317dd7d88..613c3ef23e07b 100644 |
12747 |
+--- a/fs/nfs/write.c |
12748 |
++++ b/fs/nfs/write.c |
12749 |
+@@ -425,22 +425,28 @@ nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list, |
12750 |
+ destroy_list = (subreq->wb_this_page == old_head) ? |
12751 |
+ NULL : subreq->wb_this_page; |
12752 |
+ |
12753 |
++ /* Note: lock subreq in order to change subreq->wb_head */ |
12754 |
++ nfs_page_set_headlock(subreq); |
12755 |
+ WARN_ON_ONCE(old_head != subreq->wb_head); |
12756 |
+ |
12757 |
+ /* make sure old group is not used */ |
12758 |
+ subreq->wb_this_page = subreq; |
12759 |
++ subreq->wb_head = subreq; |
12760 |
+ |
12761 |
+ clear_bit(PG_REMOVE, &subreq->wb_flags); |
12762 |
+ |
12763 |
+ /* Note: races with nfs_page_group_destroy() */ |
12764 |
+ if (!kref_read(&subreq->wb_kref)) { |
12765 |
+ /* Check if we raced with nfs_page_group_destroy() */ |
12766 |
+- if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags)) |
12767 |
++ if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags)) { |
12768 |
++ nfs_page_clear_headlock(subreq); |
12769 |
+ nfs_free_request(subreq); |
12770 |
++ } else |
12771 |
++ nfs_page_clear_headlock(subreq); |
12772 |
+ continue; |
12773 |
+ } |
12774 |
++ nfs_page_clear_headlock(subreq); |
12775 |
+ |
12776 |
+- subreq->wb_head = subreq; |
12777 |
+ nfs_release_request(old_head); |
12778 |
+ |
12779 |
+ if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) { |
12780 |
+diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c |
12781 |
+index 3007b8945d388..51c08ae79063c 100644 |
12782 |
+--- a/fs/nfsd/filecache.c |
12783 |
++++ b/fs/nfsd/filecache.c |
12784 |
+@@ -133,9 +133,13 @@ nfsd_file_mark_find_or_create(struct nfsd_file *nf) |
12785 |
+ struct nfsd_file_mark, |
12786 |
+ nfm_mark)); |
12787 |
+ mutex_unlock(&nfsd_file_fsnotify_group->mark_mutex); |
12788 |
+- fsnotify_put_mark(mark); |
12789 |
+- if (likely(nfm)) |
12790 |
++ if (nfm) { |
12791 |
++ fsnotify_put_mark(mark); |
12792 |
+ break; |
12793 |
++ } |
12794 |
++ /* Avoid soft lockup race with nfsd_file_mark_put() */ |
12795 |
++ fsnotify_destroy_mark(mark, nfsd_file_fsnotify_group); |
12796 |
++ fsnotify_put_mark(mark); |
12797 |
+ } else |
12798 |
+ mutex_unlock(&nfsd_file_fsnotify_group->mark_mutex); |
12799 |
+ |
12800 |
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c |
12801 |
+index 68cf116607645..8cb2f744dde6b 100644 |
12802 |
+--- a/fs/nfsd/nfs4state.c |
12803 |
++++ b/fs/nfsd/nfs4state.c |
12804 |
+@@ -495,6 +495,8 @@ find_any_file(struct nfs4_file *f) |
12805 |
+ { |
12806 |
+ struct nfsd_file *ret; |
12807 |
+ |
12808 |
++ if (!f) |
12809 |
++ return NULL; |
12810 |
+ spin_lock(&f->fi_lock); |
12811 |
+ ret = __nfs4_get_fd(f, O_RDWR); |
12812 |
+ if (!ret) { |
12813 |
+@@ -1273,6 +1275,12 @@ static void nfs4_put_stateowner(struct nfs4_stateowner *sop) |
12814 |
+ nfs4_free_stateowner(sop); |
12815 |
+ } |
12816 |
+ |
12817 |
++static bool |
12818 |
++nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid *stp) |
12819 |
++{ |
12820 |
++ return list_empty(&stp->st_perfile); |
12821 |
++} |
12822 |
++ |
12823 |
+ static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp) |
12824 |
+ { |
12825 |
+ struct nfs4_file *fp = stp->st_stid.sc_file; |
12826 |
+@@ -1343,9 +1351,11 @@ static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp) |
12827 |
+ { |
12828 |
+ lockdep_assert_held(&stp->st_stid.sc_client->cl_lock); |
12829 |
+ |
12830 |
++ if (!unhash_ol_stateid(stp)) |
12831 |
++ return false; |
12832 |
+ list_del_init(&stp->st_locks); |
12833 |
+ nfs4_unhash_stid(&stp->st_stid); |
12834 |
+- return unhash_ol_stateid(stp); |
12835 |
++ return true; |
12836 |
+ } |
12837 |
+ |
12838 |
+ static void release_lock_stateid(struct nfs4_ol_stateid *stp) |
12839 |
+@@ -1410,13 +1420,12 @@ static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp, |
12840 |
+ static bool unhash_open_stateid(struct nfs4_ol_stateid *stp, |
12841 |
+ struct list_head *reaplist) |
12842 |
+ { |
12843 |
+- bool unhashed; |
12844 |
+- |
12845 |
+ lockdep_assert_held(&stp->st_stid.sc_client->cl_lock); |
12846 |
+ |
12847 |
+- unhashed = unhash_ol_stateid(stp); |
12848 |
++ if (!unhash_ol_stateid(stp)) |
12849 |
++ return false; |
12850 |
+ release_open_stateid_locks(stp, reaplist); |
12851 |
+- return unhashed; |
12852 |
++ return true; |
12853 |
+ } |
12854 |
+ |
12855 |
+ static void release_open_stateid(struct nfs4_ol_stateid *stp) |
12856 |
+@@ -6267,21 +6276,21 @@ alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, |
12857 |
+ } |
12858 |
+ |
12859 |
+ static struct nfs4_ol_stateid * |
12860 |
+-find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp) |
12861 |
++find_lock_stateid(const struct nfs4_lockowner *lo, |
12862 |
++ const struct nfs4_ol_stateid *ost) |
12863 |
+ { |
12864 |
+ struct nfs4_ol_stateid *lst; |
12865 |
+- struct nfs4_client *clp = lo->lo_owner.so_client; |
12866 |
+ |
12867 |
+- lockdep_assert_held(&clp->cl_lock); |
12868 |
++ lockdep_assert_held(&ost->st_stid.sc_client->cl_lock); |
12869 |
+ |
12870 |
+- list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) { |
12871 |
+- if (lst->st_stid.sc_type != NFS4_LOCK_STID) |
12872 |
+- continue; |
12873 |
+- if (lst->st_stid.sc_file == fp) { |
12874 |
+- refcount_inc(&lst->st_stid.sc_count); |
12875 |
+- return lst; |
12876 |
++ /* If ost is not hashed, ost->st_locks will not be valid */ |
12877 |
++ if (!nfs4_ol_stateid_unhashed(ost)) |
12878 |
++ list_for_each_entry(lst, &ost->st_locks, st_locks) { |
12879 |
++ if (lst->st_stateowner == &lo->lo_owner) { |
12880 |
++ refcount_inc(&lst->st_stid.sc_count); |
12881 |
++ return lst; |
12882 |
++ } |
12883 |
+ } |
12884 |
+- } |
12885 |
+ return NULL; |
12886 |
+ } |
12887 |
+ |
12888 |
+@@ -6297,11 +6306,11 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo, |
12889 |
+ mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX); |
12890 |
+ retry: |
12891 |
+ spin_lock(&clp->cl_lock); |
12892 |
+- spin_lock(&fp->fi_lock); |
12893 |
+- retstp = find_lock_stateid(lo, fp); |
12894 |
++ if (nfs4_ol_stateid_unhashed(open_stp)) |
12895 |
++ goto out_close; |
12896 |
++ retstp = find_lock_stateid(lo, open_stp); |
12897 |
+ if (retstp) |
12898 |
+- goto out_unlock; |
12899 |
+- |
12900 |
++ goto out_found; |
12901 |
+ refcount_inc(&stp->st_stid.sc_count); |
12902 |
+ stp->st_stid.sc_type = NFS4_LOCK_STID; |
12903 |
+ stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner); |
12904 |
+@@ -6310,22 +6319,26 @@ retry: |
12905 |
+ stp->st_access_bmap = 0; |
12906 |
+ stp->st_deny_bmap = open_stp->st_deny_bmap; |
12907 |
+ stp->st_openstp = open_stp; |
12908 |
++ spin_lock(&fp->fi_lock); |
12909 |
+ list_add(&stp->st_locks, &open_stp->st_locks); |
12910 |
+ list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids); |
12911 |
+ list_add(&stp->st_perfile, &fp->fi_stateids); |
12912 |
+-out_unlock: |
12913 |
+ spin_unlock(&fp->fi_lock); |
12914 |
+ spin_unlock(&clp->cl_lock); |
12915 |
+- if (retstp) { |
12916 |
+- if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) { |
12917 |
+- nfs4_put_stid(&retstp->st_stid); |
12918 |
+- goto retry; |
12919 |
+- } |
12920 |
+- /* To keep mutex tracking happy */ |
12921 |
+- mutex_unlock(&stp->st_mutex); |
12922 |
+- stp = retstp; |
12923 |
+- } |
12924 |
+ return stp; |
12925 |
++out_found: |
12926 |
++ spin_unlock(&clp->cl_lock); |
12927 |
++ if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) { |
12928 |
++ nfs4_put_stid(&retstp->st_stid); |
12929 |
++ goto retry; |
12930 |
++ } |
12931 |
++ /* To keep mutex tracking happy */ |
12932 |
++ mutex_unlock(&stp->st_mutex); |
12933 |
++ return retstp; |
12934 |
++out_close: |
12935 |
++ spin_unlock(&clp->cl_lock); |
12936 |
++ mutex_unlock(&stp->st_mutex); |
12937 |
++ return NULL; |
12938 |
+ } |
12939 |
+ |
12940 |
+ static struct nfs4_ol_stateid * |
12941 |
+@@ -6340,7 +6353,7 @@ find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi, |
12942 |
+ |
12943 |
+ *new = false; |
12944 |
+ spin_lock(&clp->cl_lock); |
12945 |
+- lst = find_lock_stateid(lo, fi); |
12946 |
++ lst = find_lock_stateid(lo, ost); |
12947 |
+ spin_unlock(&clp->cl_lock); |
12948 |
+ if (lst != NULL) { |
12949 |
+ if (nfsd4_lock_ol_stateid(lst) == nfs_ok) |
12950 |
+diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h |
12951 |
+index ffc78a0e28b24..b073bdc2e6e89 100644 |
12952 |
+--- a/fs/nfsd/trace.h |
12953 |
++++ b/fs/nfsd/trace.h |
12954 |
+@@ -228,7 +228,7 @@ TRACE_EVENT(nfsd_file_acquire, |
12955 |
+ TP_ARGS(rqstp, hash, inode, may_flags, nf, status), |
12956 |
+ |
12957 |
+ TP_STRUCT__entry( |
12958 |
+- __field(__be32, xid) |
12959 |
++ __field(u32, xid) |
12960 |
+ __field(unsigned int, hash) |
12961 |
+ __field(void *, inode) |
12962 |
+ __field(unsigned int, may_flags) |
12963 |
+@@ -236,11 +236,11 @@ TRACE_EVENT(nfsd_file_acquire, |
12964 |
+ __field(unsigned long, nf_flags) |
12965 |
+ __field(unsigned char, nf_may) |
12966 |
+ __field(struct file *, nf_file) |
12967 |
+- __field(__be32, status) |
12968 |
++ __field(u32, status) |
12969 |
+ ), |
12970 |
+ |
12971 |
+ TP_fast_assign( |
12972 |
+- __entry->xid = rqstp->rq_xid; |
12973 |
++ __entry->xid = be32_to_cpu(rqstp->rq_xid); |
12974 |
+ __entry->hash = hash; |
12975 |
+ __entry->inode = inode; |
12976 |
+ __entry->may_flags = may_flags; |
12977 |
+@@ -248,15 +248,15 @@ TRACE_EVENT(nfsd_file_acquire, |
12978 |
+ __entry->nf_flags = nf ? nf->nf_flags : 0; |
12979 |
+ __entry->nf_may = nf ? nf->nf_may : 0; |
12980 |
+ __entry->nf_file = nf ? nf->nf_file : NULL; |
12981 |
+- __entry->status = status; |
12982 |
++ __entry->status = be32_to_cpu(status); |
12983 |
+ ), |
12984 |
+ |
12985 |
+ TP_printk("xid=0x%x hash=0x%x inode=0x%p may_flags=%s ref=%d nf_flags=%s nf_may=%s nf_file=0x%p status=%u", |
12986 |
+- be32_to_cpu(__entry->xid), __entry->hash, __entry->inode, |
12987 |
++ __entry->xid, __entry->hash, __entry->inode, |
12988 |
+ show_nf_may(__entry->may_flags), __entry->nf_ref, |
12989 |
+ show_nf_flags(__entry->nf_flags), |
12990 |
+ show_nf_may(__entry->nf_may), __entry->nf_file, |
12991 |
+- be32_to_cpu(__entry->status)) |
12992 |
++ __entry->status) |
12993 |
+ ); |
12994 |
+ |
12995 |
+ DECLARE_EVENT_CLASS(nfsd_file_search_class, |
12996 |
+diff --git a/fs/proc/base.c b/fs/proc/base.c |
12997 |
+index ebea9501afb84..529d0c6ec6f9c 100644 |
12998 |
+--- a/fs/proc/base.c |
12999 |
++++ b/fs/proc/base.c |
13000 |
+@@ -403,11 +403,11 @@ print0: |
13001 |
+ |
13002 |
+ static int lock_trace(struct task_struct *task) |
13003 |
+ { |
13004 |
+- int err = mutex_lock_killable(&task->signal->cred_guard_mutex); |
13005 |
++ int err = mutex_lock_killable(&task->signal->exec_update_mutex); |
13006 |
+ if (err) |
13007 |
+ return err; |
13008 |
+ if (!ptrace_may_access(task, PTRACE_MODE_ATTACH_FSCREDS)) { |
13009 |
+- mutex_unlock(&task->signal->cred_guard_mutex); |
13010 |
++ mutex_unlock(&task->signal->exec_update_mutex); |
13011 |
+ return -EPERM; |
13012 |
+ } |
13013 |
+ return 0; |
13014 |
+@@ -415,7 +415,7 @@ static int lock_trace(struct task_struct *task) |
13015 |
+ |
13016 |
+ static void unlock_trace(struct task_struct *task) |
13017 |
+ { |
13018 |
+- mutex_unlock(&task->signal->cred_guard_mutex); |
13019 |
++ mutex_unlock(&task->signal->exec_update_mutex); |
13020 |
+ } |
13021 |
+ |
13022 |
+ #ifdef CONFIG_STACKTRACE |
13023 |
+@@ -2770,7 +2770,7 @@ static int do_io_accounting(struct task_struct *task, struct seq_file *m, int wh |
13024 |
+ unsigned long flags; |
13025 |
+ int result; |
13026 |
+ |
13027 |
+- result = mutex_lock_killable(&task->signal->cred_guard_mutex); |
13028 |
++ result = mutex_lock_killable(&task->signal->exec_update_mutex); |
13029 |
+ if (result) |
13030 |
+ return result; |
13031 |
+ |
13032 |
+@@ -2806,7 +2806,7 @@ static int do_io_accounting(struct task_struct *task, struct seq_file *m, int wh |
13033 |
+ result = 0; |
13034 |
+ |
13035 |
+ out_unlock: |
13036 |
+- mutex_unlock(&task->signal->cred_guard_mutex); |
13037 |
++ mutex_unlock(&task->signal->exec_update_mutex); |
13038 |
+ return result; |
13039 |
+ } |
13040 |
+ |
13041 |
+diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c |
13042 |
+index 8ceb51478800b..7e4bfaf2871fa 100644 |
13043 |
+--- a/fs/ubifs/io.c |
13044 |
++++ b/fs/ubifs/io.c |
13045 |
+@@ -225,7 +225,7 @@ int ubifs_is_mapped(const struct ubifs_info *c, int lnum) |
13046 |
+ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, |
13047 |
+ int offs, int quiet, int must_chk_crc) |
13048 |
+ { |
13049 |
+- int err = -EINVAL, type, node_len; |
13050 |
++ int err = -EINVAL, type, node_len, dump_node = 1; |
13051 |
+ uint32_t crc, node_crc, magic; |
13052 |
+ const struct ubifs_ch *ch = buf; |
13053 |
+ |
13054 |
+@@ -278,10 +278,22 @@ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, |
13055 |
+ out_len: |
13056 |
+ if (!quiet) |
13057 |
+ ubifs_err(c, "bad node length %d", node_len); |
13058 |
++ if (type == UBIFS_DATA_NODE && node_len > UBIFS_DATA_NODE_SZ) |
13059 |
++ dump_node = 0; |
13060 |
+ out: |
13061 |
+ if (!quiet) { |
13062 |
+ ubifs_err(c, "bad node at LEB %d:%d", lnum, offs); |
13063 |
+- ubifs_dump_node(c, buf); |
13064 |
++ if (dump_node) { |
13065 |
++ ubifs_dump_node(c, buf); |
13066 |
++ } else { |
13067 |
++ int safe_len = min3(node_len, c->leb_size - offs, |
13068 |
++ (int)UBIFS_MAX_DATA_NODE_SZ); |
13069 |
++ pr_err("\tprevent out-of-bounds memory access\n"); |
13070 |
++ pr_err("\ttruncated data node length %d\n", safe_len); |
13071 |
++ pr_err("\tcorrupted data node:\n"); |
13072 |
++ print_hex_dump(KERN_ERR, "\t", DUMP_PREFIX_OFFSET, 32, 1, |
13073 |
++ buf, safe_len, 0); |
13074 |
++ } |
13075 |
+ dump_stack(); |
13076 |
+ } |
13077 |
+ return err; |
13078 |
+diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c |
13079 |
+index a6ae2428e4c96..5f2ac5ef0891e 100644 |
13080 |
+--- a/fs/ubifs/journal.c |
13081 |
++++ b/fs/ubifs/journal.c |
13082 |
+@@ -906,6 +906,7 @@ int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode) |
13083 |
+ ubifs_err(c, "dead directory entry '%s', error %d", |
13084 |
+ xent->name, err); |
13085 |
+ ubifs_ro_mode(c, err); |
13086 |
++ kfree(xent); |
13087 |
+ goto out_release; |
13088 |
+ } |
13089 |
+ ubifs_assert(c, ubifs_inode(xino)->xattr); |
13090 |
+diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c |
13091 |
+index 7dd740e3692da..283f9eb48410d 100644 |
13092 |
+--- a/fs/ubifs/orphan.c |
13093 |
++++ b/fs/ubifs/orphan.c |
13094 |
+@@ -157,7 +157,7 @@ int ubifs_add_orphan(struct ubifs_info *c, ino_t inum) |
13095 |
+ int err = 0; |
13096 |
+ ino_t xattr_inum; |
13097 |
+ union ubifs_key key; |
13098 |
+- struct ubifs_dent_node *xent; |
13099 |
++ struct ubifs_dent_node *xent, *pxent = NULL; |
13100 |
+ struct fscrypt_name nm = {0}; |
13101 |
+ struct ubifs_orphan *xattr_orphan; |
13102 |
+ struct ubifs_orphan *orphan; |
13103 |
+@@ -181,11 +181,16 @@ int ubifs_add_orphan(struct ubifs_info *c, ino_t inum) |
13104 |
+ xattr_inum = le64_to_cpu(xent->inum); |
13105 |
+ |
13106 |
+ xattr_orphan = orphan_add(c, xattr_inum, orphan); |
13107 |
+- if (IS_ERR(xattr_orphan)) |
13108 |
++ if (IS_ERR(xattr_orphan)) { |
13109 |
++ kfree(xent); |
13110 |
+ return PTR_ERR(xattr_orphan); |
13111 |
++ } |
13112 |
+ |
13113 |
++ kfree(pxent); |
13114 |
++ pxent = xent; |
13115 |
+ key_read(c, &xent->key, &key); |
13116 |
+ } |
13117 |
++ kfree(pxent); |
13118 |
+ |
13119 |
+ return 0; |
13120 |
+ } |
13121 |
+diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c |
13122 |
+index 5472ed3ce6943..de33efc9b4f94 100644 |
13123 |
+--- a/fs/xfs/libxfs/xfs_attr_leaf.c |
13124 |
++++ b/fs/xfs/libxfs/xfs_attr_leaf.c |
13125 |
+@@ -453,13 +453,15 @@ xfs_attr_copy_value( |
13126 |
+ * special case for dev/uuid inodes, they have fixed size data forks. |
13127 |
+ */ |
13128 |
+ int |
13129 |
+-xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes) |
13130 |
++xfs_attr_shortform_bytesfit( |
13131 |
++ struct xfs_inode *dp, |
13132 |
++ int bytes) |
13133 |
+ { |
13134 |
+- int offset; |
13135 |
+- int minforkoff; /* lower limit on valid forkoff locations */ |
13136 |
+- int maxforkoff; /* upper limit on valid forkoff locations */ |
13137 |
+- int dsize; |
13138 |
+- xfs_mount_t *mp = dp->i_mount; |
13139 |
++ struct xfs_mount *mp = dp->i_mount; |
13140 |
++ int64_t dsize; |
13141 |
++ int minforkoff; |
13142 |
++ int maxforkoff; |
13143 |
++ int offset; |
13144 |
+ |
13145 |
+ /* rounded down */ |
13146 |
+ offset = (XFS_LITINO(mp, dp->i_d.di_version) - bytes) >> 3; |
13147 |
+@@ -525,7 +527,7 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes) |
13148 |
+ * A data fork btree root must have space for at least |
13149 |
+ * MINDBTPTRS key/ptr pairs if the data fork is small or empty. |
13150 |
+ */ |
13151 |
+- minforkoff = max(dsize, XFS_BMDR_SPACE_CALC(MINDBTPTRS)); |
13152 |
++ minforkoff = max_t(int64_t, dsize, XFS_BMDR_SPACE_CALC(MINDBTPTRS)); |
13153 |
+ minforkoff = roundup(minforkoff, 8) >> 3; |
13154 |
+ |
13155 |
+ /* attr fork btree root can have at least this many key/ptr pairs */ |
13156 |
+@@ -924,7 +926,7 @@ xfs_attr_shortform_verify( |
13157 |
+ char *endp; |
13158 |
+ struct xfs_ifork *ifp; |
13159 |
+ int i; |
13160 |
+- int size; |
13161 |
++ int64_t size; |
13162 |
+ |
13163 |
+ ASSERT(ip->i_d.di_aformat == XFS_DINODE_FMT_LOCAL); |
13164 |
+ ifp = XFS_IFORK_PTR(ip, XFS_ATTR_FORK); |
13165 |
+@@ -1449,7 +1451,9 @@ xfs_attr3_leaf_add_work( |
13166 |
+ for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) { |
13167 |
+ if (ichdr->freemap[i].base == tmp) { |
13168 |
+ ichdr->freemap[i].base += sizeof(xfs_attr_leaf_entry_t); |
13169 |
+- ichdr->freemap[i].size -= sizeof(xfs_attr_leaf_entry_t); |
13170 |
++ ichdr->freemap[i].size -= |
13171 |
++ min_t(uint16_t, ichdr->freemap[i].size, |
13172 |
++ sizeof(xfs_attr_leaf_entry_t)); |
13173 |
+ } |
13174 |
+ } |
13175 |
+ ichdr->usedbytes += xfs_attr_leaf_entsize(leaf, args->index); |
13176 |
+diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c |
13177 |
+index 19a600443b9ee..f8db3fe616df9 100644 |
13178 |
+--- a/fs/xfs/libxfs/xfs_bmap.c |
13179 |
++++ b/fs/xfs/libxfs/xfs_bmap.c |
13180 |
+@@ -5376,16 +5376,17 @@ __xfs_bunmapi( |
13181 |
+ } |
13182 |
+ div_u64_rem(del.br_startblock, mp->m_sb.sb_rextsize, &mod); |
13183 |
+ if (mod) { |
13184 |
++ xfs_extlen_t off = mp->m_sb.sb_rextsize - mod; |
13185 |
++ |
13186 |
+ /* |
13187 |
+ * Realtime extent is lined up at the end but not |
13188 |
+ * at the front. We'll get rid of full extents if |
13189 |
+ * we can. |
13190 |
+ */ |
13191 |
+- mod = mp->m_sb.sb_rextsize - mod; |
13192 |
+- if (del.br_blockcount > mod) { |
13193 |
+- del.br_blockcount -= mod; |
13194 |
+- del.br_startoff += mod; |
13195 |
+- del.br_startblock += mod; |
13196 |
++ if (del.br_blockcount > off) { |
13197 |
++ del.br_blockcount -= off; |
13198 |
++ del.br_startoff += off; |
13199 |
++ del.br_startblock += off; |
13200 |
+ } else if (del.br_startoff == start && |
13201 |
+ (del.br_state == XFS_EXT_UNWRITTEN || |
13202 |
+ tp->t_blk_res == 0)) { |
13203 |
+@@ -5403,6 +5404,7 @@ __xfs_bunmapi( |
13204 |
+ continue; |
13205 |
+ } else if (del.br_state == XFS_EXT_UNWRITTEN) { |
13206 |
+ struct xfs_bmbt_irec prev; |
13207 |
++ xfs_fileoff_t unwrite_start; |
13208 |
+ |
13209 |
+ /* |
13210 |
+ * This one is already unwritten. |
13211 |
+@@ -5416,12 +5418,13 @@ __xfs_bunmapi( |
13212 |
+ ASSERT(!isnullstartblock(prev.br_startblock)); |
13213 |
+ ASSERT(del.br_startblock == |
13214 |
+ prev.br_startblock + prev.br_blockcount); |
13215 |
+- if (prev.br_startoff < start) { |
13216 |
+- mod = start - prev.br_startoff; |
13217 |
+- prev.br_blockcount -= mod; |
13218 |
+- prev.br_startblock += mod; |
13219 |
+- prev.br_startoff = start; |
13220 |
+- } |
13221 |
++ unwrite_start = max3(start, |
13222 |
++ del.br_startoff - mod, |
13223 |
++ prev.br_startoff); |
13224 |
++ mod = unwrite_start - prev.br_startoff; |
13225 |
++ prev.br_startoff = unwrite_start; |
13226 |
++ prev.br_startblock += mod; |
13227 |
++ prev.br_blockcount -= mod; |
13228 |
+ prev.br_state = XFS_EXT_UNWRITTEN; |
13229 |
+ error = xfs_bmap_add_extent_unwritten_real(tp, |
13230 |
+ ip, whichfork, &icur, &cur, |
13231 |
+diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c |
13232 |
+index 705c4f5627582..99d5b2ed67f2e 100644 |
13233 |
+--- a/fs/xfs/libxfs/xfs_dir2_node.c |
13234 |
++++ b/fs/xfs/libxfs/xfs_dir2_node.c |
13235 |
+@@ -210,6 +210,7 @@ __xfs_dir3_free_read( |
13236 |
+ if (fa) { |
13237 |
+ xfs_verifier_error(*bpp, -EFSCORRUPTED, fa); |
13238 |
+ xfs_trans_brelse(tp, *bpp); |
13239 |
++ *bpp = NULL; |
13240 |
+ return -EFSCORRUPTED; |
13241 |
+ } |
13242 |
+ |
13243 |
+diff --git a/fs/xfs/libxfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c |
13244 |
+index 85f14fc2a8da9..ae16ca7c422a9 100644 |
13245 |
+--- a/fs/xfs/libxfs/xfs_dir2_sf.c |
13246 |
++++ b/fs/xfs/libxfs/xfs_dir2_sf.c |
13247 |
+@@ -628,7 +628,7 @@ xfs_dir2_sf_verify( |
13248 |
+ int i; |
13249 |
+ int i8count; |
13250 |
+ int offset; |
13251 |
+- int size; |
13252 |
++ int64_t size; |
13253 |
+ int error; |
13254 |
+ uint8_t filetype; |
13255 |
+ |
13256 |
+diff --git a/fs/xfs/libxfs/xfs_iext_tree.c b/fs/xfs/libxfs/xfs_iext_tree.c |
13257 |
+index 7bc87408f1a0a..52451809c4786 100644 |
13258 |
+--- a/fs/xfs/libxfs/xfs_iext_tree.c |
13259 |
++++ b/fs/xfs/libxfs/xfs_iext_tree.c |
13260 |
+@@ -596,7 +596,7 @@ xfs_iext_realloc_root( |
13261 |
+ struct xfs_ifork *ifp, |
13262 |
+ struct xfs_iext_cursor *cur) |
13263 |
+ { |
13264 |
+- size_t new_size = ifp->if_bytes + sizeof(struct xfs_iext_rec); |
13265 |
++ int64_t new_size = ifp->if_bytes + sizeof(struct xfs_iext_rec); |
13266 |
+ void *new; |
13267 |
+ |
13268 |
+ /* account for the prev/next pointers */ |
13269 |
+diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c |
13270 |
+index c643beeb5a248..8fdd0424070e0 100644 |
13271 |
+--- a/fs/xfs/libxfs/xfs_inode_fork.c |
13272 |
++++ b/fs/xfs/libxfs/xfs_inode_fork.c |
13273 |
+@@ -129,7 +129,7 @@ xfs_init_local_fork( |
13274 |
+ struct xfs_inode *ip, |
13275 |
+ int whichfork, |
13276 |
+ const void *data, |
13277 |
+- int size) |
13278 |
++ int64_t size) |
13279 |
+ { |
13280 |
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); |
13281 |
+ int mem_size = size, real_size = 0; |
13282 |
+@@ -467,11 +467,11 @@ xfs_iroot_realloc( |
13283 |
+ void |
13284 |
+ xfs_idata_realloc( |
13285 |
+ struct xfs_inode *ip, |
13286 |
+- int byte_diff, |
13287 |
++ int64_t byte_diff, |
13288 |
+ int whichfork) |
13289 |
+ { |
13290 |
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); |
13291 |
+- int new_size = (int)ifp->if_bytes + byte_diff; |
13292 |
++ int64_t new_size = ifp->if_bytes + byte_diff; |
13293 |
+ |
13294 |
+ ASSERT(new_size >= 0); |
13295 |
+ ASSERT(new_size <= XFS_IFORK_SIZE(ip, whichfork)); |
13296 |
+@@ -552,7 +552,7 @@ xfs_iextents_copy( |
13297 |
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); |
13298 |
+ struct xfs_iext_cursor icur; |
13299 |
+ struct xfs_bmbt_irec rec; |
13300 |
+- int copied = 0; |
13301 |
++ int64_t copied = 0; |
13302 |
+ |
13303 |
+ ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL | XFS_ILOCK_SHARED)); |
13304 |
+ ASSERT(ifp->if_bytes > 0); |
13305 |
+diff --git a/fs/xfs/libxfs/xfs_inode_fork.h b/fs/xfs/libxfs/xfs_inode_fork.h |
13306 |
+index 00c62ce170d0e..7b845c052fb45 100644 |
13307 |
+--- a/fs/xfs/libxfs/xfs_inode_fork.h |
13308 |
++++ b/fs/xfs/libxfs/xfs_inode_fork.h |
13309 |
+@@ -13,16 +13,16 @@ struct xfs_dinode; |
13310 |
+ * File incore extent information, present for each of data & attr forks. |
13311 |
+ */ |
13312 |
+ struct xfs_ifork { |
13313 |
+- int if_bytes; /* bytes in if_u1 */ |
13314 |
+- unsigned int if_seq; /* fork mod counter */ |
13315 |
++ int64_t if_bytes; /* bytes in if_u1 */ |
13316 |
+ struct xfs_btree_block *if_broot; /* file's incore btree root */ |
13317 |
+- short if_broot_bytes; /* bytes allocated for root */ |
13318 |
+- unsigned char if_flags; /* per-fork flags */ |
13319 |
++ unsigned int if_seq; /* fork mod counter */ |
13320 |
+ int if_height; /* height of the extent tree */ |
13321 |
+ union { |
13322 |
+ void *if_root; /* extent tree root */ |
13323 |
+ char *if_data; /* inline file data */ |
13324 |
+ } if_u1; |
13325 |
++ short if_broot_bytes; /* bytes allocated for root */ |
13326 |
++ unsigned char if_flags; /* per-fork flags */ |
13327 |
+ }; |
13328 |
+ |
13329 |
+ /* |
13330 |
+@@ -93,12 +93,14 @@ int xfs_iformat_fork(struct xfs_inode *, struct xfs_dinode *); |
13331 |
+ void xfs_iflush_fork(struct xfs_inode *, struct xfs_dinode *, |
13332 |
+ struct xfs_inode_log_item *, int); |
13333 |
+ void xfs_idestroy_fork(struct xfs_inode *, int); |
13334 |
+-void xfs_idata_realloc(struct xfs_inode *, int, int); |
13335 |
++void xfs_idata_realloc(struct xfs_inode *ip, int64_t byte_diff, |
13336 |
++ int whichfork); |
13337 |
+ void xfs_iroot_realloc(struct xfs_inode *, int, int); |
13338 |
+ int xfs_iread_extents(struct xfs_trans *, struct xfs_inode *, int); |
13339 |
+ int xfs_iextents_copy(struct xfs_inode *, struct xfs_bmbt_rec *, |
13340 |
+ int); |
13341 |
+-void xfs_init_local_fork(struct xfs_inode *, int, const void *, int); |
13342 |
++void xfs_init_local_fork(struct xfs_inode *ip, int whichfork, |
13343 |
++ const void *data, int64_t size); |
13344 |
+ |
13345 |
+ xfs_extnum_t xfs_iext_count(struct xfs_ifork *ifp); |
13346 |
+ void xfs_iext_insert(struct xfs_inode *, struct xfs_iext_cursor *cur, |
13347 |
+diff --git a/fs/xfs/libxfs/xfs_trans_resv.c b/fs/xfs/libxfs/xfs_trans_resv.c |
13348 |
+index d12bbd526e7c0..b3584cd2cc164 100644 |
13349 |
+--- a/fs/xfs/libxfs/xfs_trans_resv.c |
13350 |
++++ b/fs/xfs/libxfs/xfs_trans_resv.c |
13351 |
+@@ -196,6 +196,24 @@ xfs_calc_inode_chunk_res( |
13352 |
+ return res; |
13353 |
+ } |
13354 |
+ |
13355 |
++/* |
13356 |
++ * Per-extent log reservation for the btree changes involved in freeing or |
13357 |
++ * allocating a realtime extent. We have to be able to log as many rtbitmap |
13358 |
++ * blocks as needed to mark inuse MAXEXTLEN blocks' worth of realtime extents, |
13359 |
++ * as well as the realtime summary block. |
13360 |
++ */ |
13361 |
++unsigned int |
13362 |
++xfs_rtalloc_log_count( |
13363 |
++ struct xfs_mount *mp, |
13364 |
++ unsigned int num_ops) |
13365 |
++{ |
13366 |
++ unsigned int blksz = XFS_FSB_TO_B(mp, 1); |
13367 |
++ unsigned int rtbmp_bytes; |
13368 |
++ |
13369 |
++ rtbmp_bytes = (MAXEXTLEN / mp->m_sb.sb_rextsize) / NBBY; |
13370 |
++ return (howmany(rtbmp_bytes, blksz) + 1) * num_ops; |
13371 |
++} |
13372 |
++ |
13373 |
+ /* |
13374 |
+ * Various log reservation values. |
13375 |
+ * |
13376 |
+@@ -218,13 +236,21 @@ xfs_calc_inode_chunk_res( |
13377 |
+ |
13378 |
+ /* |
13379 |
+ * In a write transaction we can allocate a maximum of 2 |
13380 |
+- * extents. This gives: |
13381 |
++ * extents. This gives (t1): |
13382 |
+ * the inode getting the new extents: inode size |
13383 |
+ * the inode's bmap btree: max depth * block size |
13384 |
+ * the agfs of the ags from which the extents are allocated: 2 * sector |
13385 |
+ * the superblock free block counter: sector size |
13386 |
+ * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size |
13387 |
+- * And the bmap_finish transaction can free bmap blocks in a join: |
13388 |
++ * Or, if we're writing to a realtime file (t2): |
13389 |
++ * the inode getting the new extents: inode size |
13390 |
++ * the inode's bmap btree: max depth * block size |
13391 |
++ * the agfs of the ags from which the extents are allocated: 2 * sector |
13392 |
++ * the superblock free block counter: sector size |
13393 |
++ * the realtime bitmap: ((MAXEXTLEN / rtextsize) / NBBY) bytes |
13394 |
++ * the realtime summary: 1 block |
13395 |
++ * the allocation btrees: 2 trees * (2 * max depth - 1) * block size |
13396 |
++ * And the bmap_finish transaction can free bmap blocks in a join (t3): |
13397 |
+ * the agfs of the ags containing the blocks: 2 * sector size |
13398 |
+ * the agfls of the ags containing the blocks: 2 * sector size |
13399 |
+ * the super block free block counter: sector size |
13400 |
+@@ -234,40 +260,72 @@ STATIC uint |
13401 |
+ xfs_calc_write_reservation( |
13402 |
+ struct xfs_mount *mp) |
13403 |
+ { |
13404 |
+- return XFS_DQUOT_LOGRES(mp) + |
13405 |
+- max((xfs_calc_inode_res(mp, 1) + |
13406 |
++ unsigned int t1, t2, t3; |
13407 |
++ unsigned int blksz = XFS_FSB_TO_B(mp, 1); |
13408 |
++ |
13409 |
++ t1 = xfs_calc_inode_res(mp, 1) + |
13410 |
++ xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK), blksz) + |
13411 |
++ xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) + |
13412 |
++ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), blksz); |
13413 |
++ |
13414 |
++ if (xfs_sb_version_hasrealtime(&mp->m_sb)) { |
13415 |
++ t2 = xfs_calc_inode_res(mp, 1) + |
13416 |
+ xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK), |
13417 |
+- XFS_FSB_TO_B(mp, 1)) + |
13418 |
++ blksz) + |
13419 |
+ xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) + |
13420 |
+- xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), |
13421 |
+- XFS_FSB_TO_B(mp, 1))), |
13422 |
+- (xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) + |
13423 |
+- xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), |
13424 |
+- XFS_FSB_TO_B(mp, 1)))); |
13425 |
++ xfs_calc_buf_res(xfs_rtalloc_log_count(mp, 1), blksz) + |
13426 |
++ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1), blksz); |
13427 |
++ } else { |
13428 |
++ t2 = 0; |
13429 |
++ } |
13430 |
++ |
13431 |
++ t3 = xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) + |
13432 |
++ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), blksz); |
13433 |
++ |
13434 |
++ return XFS_DQUOT_LOGRES(mp) + max3(t1, t2, t3); |
13435 |
+ } |
13436 |
+ |
13437 |
+ /* |
13438 |
+- * In truncating a file we free up to two extents at once. We can modify: |
13439 |
++ * In truncating a file we free up to two extents at once. We can modify (t1): |
13440 |
+ * the inode being truncated: inode size |
13441 |
+ * the inode's bmap btree: (max depth + 1) * block size |
13442 |
+- * And the bmap_finish transaction can free the blocks and bmap blocks: |
13443 |
++ * And the bmap_finish transaction can free the blocks and bmap blocks (t2): |
13444 |
+ * the agf for each of the ags: 4 * sector size |
13445 |
+ * the agfl for each of the ags: 4 * sector size |
13446 |
+ * the super block to reflect the freed blocks: sector size |
13447 |
+ * worst case split in allocation btrees per extent assuming 4 extents: |
13448 |
+ * 4 exts * 2 trees * (2 * max depth - 1) * block size |
13449 |
++ * Or, if it's a realtime file (t3): |
13450 |
++ * the agf for each of the ags: 2 * sector size |
13451 |
++ * the agfl for each of the ags: 2 * sector size |
13452 |
++ * the super block to reflect the freed blocks: sector size |
13453 |
++ * the realtime bitmap: 2 exts * ((MAXEXTLEN / rtextsize) / NBBY) bytes |
13454 |
++ * the realtime summary: 2 exts * 1 block |
13455 |
++ * worst case split in allocation btrees per extent assuming 2 extents: |
13456 |
++ * 2 exts * 2 trees * (2 * max depth - 1) * block size |
13457 |
+ */ |
13458 |
+ STATIC uint |
13459 |
+ xfs_calc_itruncate_reservation( |
13460 |
+ struct xfs_mount *mp) |
13461 |
+ { |
13462 |
+- return XFS_DQUOT_LOGRES(mp) + |
13463 |
+- max((xfs_calc_inode_res(mp, 1) + |
13464 |
+- xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1, |
13465 |
+- XFS_FSB_TO_B(mp, 1))), |
13466 |
+- (xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) + |
13467 |
+- xfs_calc_buf_res(xfs_allocfree_log_count(mp, 4), |
13468 |
+- XFS_FSB_TO_B(mp, 1)))); |
13469 |
++ unsigned int t1, t2, t3; |
13470 |
++ unsigned int blksz = XFS_FSB_TO_B(mp, 1); |
13471 |
++ |
13472 |
++ t1 = xfs_calc_inode_res(mp, 1) + |
13473 |
++ xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1, blksz); |
13474 |
++ |
13475 |
++ t2 = xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) + |
13476 |
++ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 4), blksz); |
13477 |
++ |
13478 |
++ if (xfs_sb_version_hasrealtime(&mp->m_sb)) { |
13479 |
++ t3 = xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) + |
13480 |
++ xfs_calc_buf_res(xfs_rtalloc_log_count(mp, 2), blksz) + |
13481 |
++ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), blksz); |
13482 |
++ } else { |
13483 |
++ t3 = 0; |
13484 |
++ } |
13485 |
++ |
13486 |
++ return XFS_DQUOT_LOGRES(mp) + max3(t1, t2, t3); |
13487 |
+ } |
13488 |
+ |
13489 |
+ /* |
13490 |
+diff --git a/fs/xfs/scrub/dir.c b/fs/xfs/scrub/dir.c |
13491 |
+index 1e2e11721eb99..20eca2d8e7c77 100644 |
13492 |
+--- a/fs/xfs/scrub/dir.c |
13493 |
++++ b/fs/xfs/scrub/dir.c |
13494 |
+@@ -152,6 +152,9 @@ xchk_dir_actor( |
13495 |
+ xname.type = XFS_DIR3_FT_UNKNOWN; |
13496 |
+ |
13497 |
+ error = xfs_dir_lookup(sdc->sc->tp, ip, &xname, &lookup_ino, NULL); |
13498 |
++ /* ENOENT means the hash lookup failed and the dir is corrupt */ |
13499 |
++ if (error == -ENOENT) |
13500 |
++ error = -EFSCORRUPTED; |
13501 |
+ if (!xchk_fblock_process_error(sdc->sc, XFS_DATA_FORK, offset, |
13502 |
+ &error)) |
13503 |
+ goto out; |
13504 |
+diff --git a/fs/xfs/scrub/scrub.c b/fs/xfs/scrub/scrub.c |
13505 |
+index 15c8c5f3f688d..720bef5779989 100644 |
13506 |
+--- a/fs/xfs/scrub/scrub.c |
13507 |
++++ b/fs/xfs/scrub/scrub.c |
13508 |
+@@ -167,6 +167,7 @@ xchk_teardown( |
13509 |
+ xfs_irele(sc->ip); |
13510 |
+ sc->ip = NULL; |
13511 |
+ } |
13512 |
++ sb_end_write(sc->mp->m_super); |
13513 |
+ if (sc->flags & XCHK_REAPING_DISABLED) |
13514 |
+ xchk_start_reaping(sc); |
13515 |
+ if (sc->flags & XCHK_HAS_QUOTAOFFLOCK) { |
13516 |
+@@ -489,6 +490,14 @@ xfs_scrub_metadata( |
13517 |
+ sc.ops = &meta_scrub_ops[sm->sm_type]; |
13518 |
+ sc.sick_mask = xchk_health_mask_for_scrub_type(sm->sm_type); |
13519 |
+ retry_op: |
13520 |
++ /* |
13521 |
++ * If freeze runs concurrently with a scrub, the freeze can be delayed |
13522 |
++ * indefinitely as we walk the filesystem and iterate over metadata |
13523 |
++ * buffers. Freeze quiesces the log (which waits for the buffer LRU to |
13524 |
++ * be emptied) and that won't happen while checking is running. |
13525 |
++ */ |
13526 |
++ sb_start_write(mp->m_super); |
13527 |
++ |
13528 |
+ /* Set up for the operation. */ |
13529 |
+ error = sc.ops->setup(&sc, ip); |
13530 |
+ if (error) |
13531 |
+diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c |
13532 |
+index 0c71acc1b8317..d6d78e1276254 100644 |
13533 |
+--- a/fs/xfs/xfs_bmap_util.c |
13534 |
++++ b/fs/xfs/xfs_bmap_util.c |
13535 |
+@@ -1039,6 +1039,7 @@ out_trans_cancel: |
13536 |
+ goto out_unlock; |
13537 |
+ } |
13538 |
+ |
13539 |
++/* Caller must first wait for the completion of any pending DIOs if required. */ |
13540 |
+ int |
13541 |
+ xfs_flush_unmap_range( |
13542 |
+ struct xfs_inode *ip, |
13543 |
+@@ -1050,9 +1051,6 @@ xfs_flush_unmap_range( |
13544 |
+ xfs_off_t rounding, start, end; |
13545 |
+ int error; |
13546 |
+ |
13547 |
+- /* wait for the completion of any pending DIOs */ |
13548 |
+- inode_dio_wait(inode); |
13549 |
+- |
13550 |
+ rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE); |
13551 |
+ start = round_down(offset, rounding); |
13552 |
+ end = round_up(offset + len, rounding) - 1; |
13553 |
+@@ -1084,10 +1082,6 @@ xfs_free_file_space( |
13554 |
+ if (len <= 0) /* if nothing being freed */ |
13555 |
+ return 0; |
13556 |
+ |
13557 |
+- error = xfs_flush_unmap_range(ip, offset, len); |
13558 |
+- if (error) |
13559 |
+- return error; |
13560 |
+- |
13561 |
+ startoffset_fsb = XFS_B_TO_FSB(mp, offset); |
13562 |
+ endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len); |
13563 |
+ |
13564 |
+diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c |
13565 |
+index 1e2176190c86f..203065a647652 100644 |
13566 |
+--- a/fs/xfs/xfs_file.c |
13567 |
++++ b/fs/xfs/xfs_file.c |
13568 |
+@@ -818,6 +818,36 @@ xfs_file_fallocate( |
13569 |
+ if (error) |
13570 |
+ goto out_unlock; |
13571 |
+ |
13572 |
++ /* |
13573 |
++ * Must wait for all AIO to complete before we continue as AIO can |
13574 |
++ * change the file size on completion without holding any locks we |
13575 |
++ * currently hold. We must do this first because AIO can update both |
13576 |
++ * the on disk and in memory inode sizes, and the operations that follow |
13577 |
++ * require the in-memory size to be fully up-to-date. |
13578 |
++ */ |
13579 |
++ inode_dio_wait(inode); |
13580 |
++ |
13581 |
++ /* |
13582 |
++ * Now AIO and DIO has drained we flush and (if necessary) invalidate |
13583 |
++ * the cached range over the first operation we are about to run. |
13584 |
++ * |
13585 |
++ * We care about zero and collapse here because they both run a hole |
13586 |
++ * punch over the range first. Because that can zero data, and the range |
13587 |
++ * of invalidation for the shift operations is much larger, we still do |
13588 |
++ * the required flush for collapse in xfs_prepare_shift(). |
13589 |
++ * |
13590 |
++ * Insert has the same range requirements as collapse, and we extend the |
13591 |
++ * file first which can zero data. Hence insert has the same |
13592 |
++ * flush/invalidate requirements as collapse and so they are both |
13593 |
++ * handled at the right time by xfs_prepare_shift(). |
13594 |
++ */ |
13595 |
++ if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE | |
13596 |
++ FALLOC_FL_COLLAPSE_RANGE)) { |
13597 |
++ error = xfs_flush_unmap_range(ip, offset, len); |
13598 |
++ if (error) |
13599 |
++ goto out_unlock; |
13600 |
++ } |
13601 |
++ |
13602 |
+ if (mode & FALLOC_FL_PUNCH_HOLE) { |
13603 |
+ error = xfs_free_file_space(ip, offset, len); |
13604 |
+ if (error) |
13605 |
+diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c |
13606 |
+index d082143feb5ab..c13754e119be1 100644 |
13607 |
+--- a/fs/xfs/xfs_fsmap.c |
13608 |
++++ b/fs/xfs/xfs_fsmap.c |
13609 |
+@@ -895,6 +895,14 @@ xfs_getfsmap( |
13610 |
+ info.format_arg = arg; |
13611 |
+ info.head = head; |
13612 |
+ |
13613 |
++ /* |
13614 |
++ * If fsmap runs concurrently with a scrub, the freeze can be delayed |
13615 |
++ * indefinitely as we walk the rmapbt and iterate over metadata |
13616 |
++ * buffers. Freeze quiesces the log (which waits for the buffer LRU to |
13617 |
++ * be emptied) and that won't happen while we're reading buffers. |
13618 |
++ */ |
13619 |
++ sb_start_write(mp->m_super); |
13620 |
++ |
13621 |
+ /* For each device we support... */ |
13622 |
+ for (i = 0; i < XFS_GETFSMAP_DEVS; i++) { |
13623 |
+ /* Is this device within the range the user asked for? */ |
13624 |
+@@ -934,6 +942,7 @@ xfs_getfsmap( |
13625 |
+ |
13626 |
+ if (tp) |
13627 |
+ xfs_trans_cancel(tp); |
13628 |
++ sb_end_write(mp->m_super); |
13629 |
+ head->fmh_oflags = FMH_OF_DEV_T; |
13630 |
+ return error; |
13631 |
+ } |
13632 |
+diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c |
13633 |
+index c93c4b7328ef7..60c4526312771 100644 |
13634 |
+--- a/fs/xfs/xfs_ioctl.c |
13635 |
++++ b/fs/xfs/xfs_ioctl.c |
13636 |
+@@ -622,6 +622,7 @@ xfs_ioc_space( |
13637 |
+ error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP); |
13638 |
+ if (error) |
13639 |
+ goto out_unlock; |
13640 |
++ inode_dio_wait(inode); |
13641 |
+ |
13642 |
+ switch (bf->l_whence) { |
13643 |
+ case 0: /*SEEK_SET*/ |
13644 |
+diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c |
13645 |
+index f4795fdb7389c..b32a66452d441 100644 |
13646 |
+--- a/fs/xfs/xfs_trans.c |
13647 |
++++ b/fs/xfs/xfs_trans.c |
13648 |
+@@ -306,6 +306,11 @@ xfs_trans_alloc( |
13649 |
+ * |
13650 |
+ * Note the zero-length reservation; this transaction MUST be cancelled |
13651 |
+ * without any dirty data. |
13652 |
++ * |
13653 |
++ * Callers should obtain freeze protection to avoid two conflicts with fs |
13654 |
++ * freezing: (1) having active transactions trip the m_active_trans ASSERTs; |
13655 |
++ * and (2) grabbing buffers at the same time that freeze is trying to drain |
13656 |
++ * the buffer LRU list. |
13657 |
+ */ |
13658 |
+ int |
13659 |
+ xfs_trans_alloc_empty( |
13660 |
+diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h |
13661 |
+index 818691846c909..6fd08cf04add7 100644 |
13662 |
+--- a/include/asm-generic/pgtable.h |
13663 |
++++ b/include/asm-generic/pgtable.h |
13664 |
+@@ -1171,6 +1171,16 @@ static inline bool arch_has_pfn_modify_check(void) |
13665 |
+ #endif |
13666 |
+ #endif |
13667 |
+ |
13668 |
++#ifndef p4d_offset_lockless |
13669 |
++#define p4d_offset_lockless(pgdp, pgd, address) p4d_offset(&(pgd), address) |
13670 |
++#endif |
13671 |
++#ifndef pud_offset_lockless |
13672 |
++#define pud_offset_lockless(p4dp, p4d, address) pud_offset(&(p4d), address) |
13673 |
++#endif |
13674 |
++#ifndef pmd_offset_lockless |
13675 |
++#define pmd_offset_lockless(pudp, pud, address) pmd_offset(&(pud), address) |
13676 |
++#endif |
13677 |
++ |
13678 |
+ /* |
13679 |
+ * On some architectures it depends on the mm if the p4d/pud or pmd |
13680 |
+ * layer of the page table hierarchy is folded or not. |
13681 |
+diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h |
13682 |
+index b40fc633f3be6..a345d9fed3d8d 100644 |
13683 |
+--- a/include/linux/binfmts.h |
13684 |
++++ b/include/linux/binfmts.h |
13685 |
+@@ -44,7 +44,13 @@ struct linux_binprm { |
13686 |
+ * exec has happened. Used to sanitize execution environment |
13687 |
+ * and to set AT_SECURE auxv for glibc. |
13688 |
+ */ |
13689 |
+- secureexec:1; |
13690 |
++ secureexec:1, |
13691 |
++ /* |
13692 |
++ * Set by flush_old_exec, when exec_mmap has been called. |
13693 |
++ * This is past the point of no return, when the |
13694 |
++ * exec_update_mutex has been taken. |
13695 |
++ */ |
13696 |
++ called_exec_mmap:1; |
13697 |
+ #ifdef __alpha__ |
13698 |
+ unsigned int taso:1; |
13699 |
+ #endif |
13700 |
+diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h |
13701 |
+index 58424eb3b3291..798f0b9b43aee 100644 |
13702 |
+--- a/include/linux/debugfs.h |
13703 |
++++ b/include/linux/debugfs.h |
13704 |
+@@ -54,6 +54,8 @@ static const struct file_operations __fops = { \ |
13705 |
+ .llseek = no_llseek, \ |
13706 |
+ } |
13707 |
+ |
13708 |
++typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *); |
13709 |
++ |
13710 |
+ #if defined(CONFIG_DEBUG_FS) |
13711 |
+ |
13712 |
+ struct dentry *debugfs_lookup(const char *name, struct dentry *parent); |
13713 |
+@@ -75,7 +77,6 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent); |
13714 |
+ struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent, |
13715 |
+ const char *dest); |
13716 |
+ |
13717 |
+-typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *); |
13718 |
+ struct dentry *debugfs_create_automount(const char *name, |
13719 |
+ struct dentry *parent, |
13720 |
+ debugfs_automount_t f, |
13721 |
+@@ -203,7 +204,7 @@ static inline struct dentry *debugfs_create_symlink(const char *name, |
13722 |
+ |
13723 |
+ static inline struct dentry *debugfs_create_automount(const char *name, |
13724 |
+ struct dentry *parent, |
13725 |
+- struct vfsmount *(*f)(void *), |
13726 |
++ debugfs_automount_t f, |
13727 |
+ void *data) |
13728 |
+ { |
13729 |
+ return ERR_PTR(-ENODEV); |
13730 |
+diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h |
13731 |
+index 645fd401c856d..a60488867dd06 100644 |
13732 |
+--- a/include/linux/kprobes.h |
13733 |
++++ b/include/linux/kprobes.h |
13734 |
+@@ -369,6 +369,8 @@ void unregister_kretprobes(struct kretprobe **rps, int num); |
13735 |
+ void kprobe_flush_task(struct task_struct *tk); |
13736 |
+ void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head); |
13737 |
+ |
13738 |
++void kprobe_free_init_mem(void); |
13739 |
++ |
13740 |
+ int disable_kprobe(struct kprobe *kp); |
13741 |
+ int enable_kprobe(struct kprobe *kp); |
13742 |
+ |
13743 |
+@@ -426,6 +428,9 @@ static inline void unregister_kretprobes(struct kretprobe **rps, int num) |
13744 |
+ static inline void kprobe_flush_task(struct task_struct *tk) |
13745 |
+ { |
13746 |
+ } |
13747 |
++static inline void kprobe_free_init_mem(void) |
13748 |
++{ |
13749 |
++} |
13750 |
+ static inline int disable_kprobe(struct kprobe *kp) |
13751 |
+ { |
13752 |
+ return -ENOSYS; |
13753 |
+diff --git a/include/linux/libata.h b/include/linux/libata.h |
13754 |
+index e752368ea3516..3c3d8d6b16183 100644 |
13755 |
+--- a/include/linux/libata.h |
13756 |
++++ b/include/linux/libata.h |
13757 |
+@@ -486,6 +486,7 @@ enum hsm_task_states { |
13758 |
+ }; |
13759 |
+ |
13760 |
+ enum ata_completion_errors { |
13761 |
++ AC_ERR_OK = 0, /* no error */ |
13762 |
+ AC_ERR_DEV = (1 << 0), /* device reported error */ |
13763 |
+ AC_ERR_HSM = (1 << 1), /* host state machine violation */ |
13764 |
+ AC_ERR_TIMEOUT = (1 << 2), /* timeout */ |
13765 |
+@@ -895,9 +896,9 @@ struct ata_port_operations { |
13766 |
+ /* |
13767 |
+ * Command execution |
13768 |
+ */ |
13769 |
+- int (*qc_defer)(struct ata_queued_cmd *qc); |
13770 |
+- int (*check_atapi_dma)(struct ata_queued_cmd *qc); |
13771 |
+- void (*qc_prep)(struct ata_queued_cmd *qc); |
13772 |
++ int (*qc_defer)(struct ata_queued_cmd *qc); |
13773 |
++ int (*check_atapi_dma)(struct ata_queued_cmd *qc); |
13774 |
++ enum ata_completion_errors (*qc_prep)(struct ata_queued_cmd *qc); |
13775 |
+ unsigned int (*qc_issue)(struct ata_queued_cmd *qc); |
13776 |
+ bool (*qc_fill_rtf)(struct ata_queued_cmd *qc); |
13777 |
+ |
13778 |
+@@ -1165,7 +1166,7 @@ extern int ata_xfer_mode2shift(unsigned long xfer_mode); |
13779 |
+ extern const char *ata_mode_string(unsigned long xfer_mask); |
13780 |
+ extern unsigned long ata_id_xfermask(const u16 *id); |
13781 |
+ extern int ata_std_qc_defer(struct ata_queued_cmd *qc); |
13782 |
+-extern void ata_noop_qc_prep(struct ata_queued_cmd *qc); |
13783 |
++extern enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc); |
13784 |
+ extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, |
13785 |
+ unsigned int n_elem); |
13786 |
+ extern unsigned int ata_dev_classify(const struct ata_taskfile *tf); |
13787 |
+@@ -1899,9 +1900,9 @@ extern const struct ata_port_operations ata_bmdma_port_ops; |
13788 |
+ .sg_tablesize = LIBATA_MAX_PRD, \ |
13789 |
+ .dma_boundary = ATA_DMA_BOUNDARY |
13790 |
+ |
13791 |
+-extern void ata_bmdma_qc_prep(struct ata_queued_cmd *qc); |
13792 |
++extern enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc); |
13793 |
+ extern unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc); |
13794 |
+-extern void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc); |
13795 |
++extern enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc); |
13796 |
+ extern unsigned int ata_bmdma_port_intr(struct ata_port *ap, |
13797 |
+ struct ata_queued_cmd *qc); |
13798 |
+ extern irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance); |
13799 |
+diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h |
13800 |
+index e459b38ef33cc..cf3780a6ccc4b 100644 |
13801 |
+--- a/include/linux/mmc/card.h |
13802 |
++++ b/include/linux/mmc/card.h |
13803 |
+@@ -226,7 +226,7 @@ struct mmc_queue_req; |
13804 |
+ * MMC Physical partitions |
13805 |
+ */ |
13806 |
+ struct mmc_part { |
13807 |
+- unsigned int size; /* partition size (in bytes) */ |
13808 |
++ u64 size; /* partition size (in bytes) */ |
13809 |
+ unsigned int part_cfg; /* partition type */ |
13810 |
+ char name[MAX_MMC_PART_NAME_LEN]; |
13811 |
+ bool force_ro; /* to make boot parts RO by default */ |
13812 |
+diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h |
13813 |
+index 0bbd587fac6a9..7e9419d74b86b 100644 |
13814 |
+--- a/include/linux/nfs_page.h |
13815 |
++++ b/include/linux/nfs_page.h |
13816 |
+@@ -142,6 +142,8 @@ extern void nfs_unlock_and_release_request(struct nfs_page *); |
13817 |
+ extern int nfs_page_group_lock(struct nfs_page *); |
13818 |
+ extern void nfs_page_group_unlock(struct nfs_page *); |
13819 |
+ extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int); |
13820 |
++extern int nfs_page_set_headlock(struct nfs_page *req); |
13821 |
++extern void nfs_page_clear_headlock(struct nfs_page *req); |
13822 |
+ extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *); |
13823 |
+ |
13824 |
+ /* |
13825 |
+diff --git a/include/linux/pci.h b/include/linux/pci.h |
13826 |
+index f39f22f9ee474..e92bd9b32f369 100644 |
13827 |
+--- a/include/linux/pci.h |
13828 |
++++ b/include/linux/pci.h |
13829 |
+@@ -1216,7 +1216,6 @@ int pci_enable_rom(struct pci_dev *pdev); |
13830 |
+ void pci_disable_rom(struct pci_dev *pdev); |
13831 |
+ void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size); |
13832 |
+ void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom); |
13833 |
+-void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size); |
13834 |
+ |
13835 |
+ /* Power management related routines */ |
13836 |
+ int pci_save_state(struct pci_dev *dev); |
13837 |
+diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h |
13838 |
+index b5db1ee96d789..65a7355ed07b3 100644 |
13839 |
+--- a/include/linux/qed/qed_if.h |
13840 |
++++ b/include/linux/qed/qed_if.h |
13841 |
+@@ -637,6 +637,7 @@ struct qed_dev_info { |
13842 |
+ #define QED_MFW_VERSION_3_OFFSET 24 |
13843 |
+ |
13844 |
+ u32 flash_size; |
13845 |
++ bool b_arfs_capable; |
13846 |
+ bool b_inter_pf_switch; |
13847 |
+ bool tx_switching; |
13848 |
+ bool rdma_supported; |
13849 |
+diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h |
13850 |
+index 88050259c466e..a29df79540ce6 100644 |
13851 |
+--- a/include/linux/sched/signal.h |
13852 |
++++ b/include/linux/sched/signal.h |
13853 |
+@@ -224,7 +224,14 @@ struct signal_struct { |
13854 |
+ |
13855 |
+ struct mutex cred_guard_mutex; /* guard against foreign influences on |
13856 |
+ * credential calculations |
13857 |
+- * (notably. ptrace) */ |
13858 |
++ * (notably. ptrace) |
13859 |
++ * Deprecated do not use in new code. |
13860 |
++ * Use exec_update_mutex instead. |
13861 |
++ */ |
13862 |
++ struct mutex exec_update_mutex; /* Held while task_struct is being |
13863 |
++ * updated during exec, and may have |
13864 |
++ * inconsistent permissions. |
13865 |
++ */ |
13866 |
+ } __randomize_layout; |
13867 |
+ |
13868 |
+ /* |
13869 |
+diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h |
13870 |
+index bcf4cf26b8c89..a42a29952889c 100644 |
13871 |
+--- a/include/linux/seqlock.h |
13872 |
++++ b/include/linux/seqlock.h |
13873 |
+@@ -243,6 +243,13 @@ static inline void raw_write_seqcount_end(seqcount_t *s) |
13874 |
+ * usual consistency guarantee. It is one wmb cheaper, because we can |
13875 |
+ * collapse the two back-to-back wmb()s. |
13876 |
+ * |
13877 |
++ * Note that, writes surrounding the barrier should be declared atomic (e.g. |
13878 |
++ * via WRITE_ONCE): a) to ensure the writes become visible to other threads |
13879 |
++ * atomically, avoiding compiler optimizations; b) to document which writes are |
13880 |
++ * meant to propagate to the reader critical section. This is necessary because |
13881 |
++ * neither writes before and after the barrier are enclosed in a seq-writer |
13882 |
++ * critical section that would ensure readers are aware of ongoing writes. |
13883 |
++ * |
13884 |
+ * seqcount_t seq; |
13885 |
+ * bool X = true, Y = false; |
13886 |
+ * |
13887 |
+@@ -262,11 +269,11 @@ static inline void raw_write_seqcount_end(seqcount_t *s) |
13888 |
+ * |
13889 |
+ * void write(void) |
13890 |
+ * { |
13891 |
+- * Y = true; |
13892 |
++ * WRITE_ONCE(Y, true); |
13893 |
+ * |
13894 |
+ * raw_write_seqcount_barrier(seq); |
13895 |
+ * |
13896 |
+- * X = false; |
13897 |
++ * WRITE_ONCE(X, false); |
13898 |
+ * } |
13899 |
+ */ |
13900 |
+ static inline void raw_write_seqcount_barrier(seqcount_t *s) |
13901 |
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h |
13902 |
+index a62889c8bed7a..68139cc2f3ca3 100644 |
13903 |
+--- a/include/linux/skbuff.h |
13904 |
++++ b/include/linux/skbuff.h |
13905 |
+@@ -1816,6 +1816,18 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_) |
13906 |
+ return list_->qlen; |
13907 |
+ } |
13908 |
+ |
13909 |
++/** |
13910 |
++ * skb_queue_len_lockless - get queue length |
13911 |
++ * @list_: list to measure |
13912 |
++ * |
13913 |
++ * Return the length of an &sk_buff queue. |
13914 |
++ * This variant can be used in lockless contexts. |
13915 |
++ */ |
13916 |
++static inline __u32 skb_queue_len_lockless(const struct sk_buff_head *list_) |
13917 |
++{ |
13918 |
++ return READ_ONCE(list_->qlen); |
13919 |
++} |
13920 |
++ |
13921 |
+ /** |
13922 |
+ * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head |
13923 |
+ * @list: queue to initialize |
13924 |
+@@ -2021,7 +2033,7 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) |
13925 |
+ { |
13926 |
+ struct sk_buff *next, *prev; |
13927 |
+ |
13928 |
+- list->qlen--; |
13929 |
++ WRITE_ONCE(list->qlen, list->qlen - 1); |
13930 |
+ next = skb->next; |
13931 |
+ prev = skb->prev; |
13932 |
+ skb->next = skb->prev = NULL; |
13933 |
+diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h |
13934 |
+index 26f282e5e0822..77589ed787f5c 100644 |
13935 |
+--- a/include/linux/sunrpc/svc_rdma.h |
13936 |
++++ b/include/linux/sunrpc/svc_rdma.h |
13937 |
+@@ -154,9 +154,8 @@ struct svc_rdma_send_ctxt { |
13938 |
+ }; |
13939 |
+ |
13940 |
+ /* svc_rdma_backchannel.c */ |
13941 |
+-extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, |
13942 |
+- __be32 *rdma_resp, |
13943 |
+- struct xdr_buf *rcvbuf); |
13944 |
++extern void svc_rdma_handle_bc_reply(struct svc_rqst *rqstp, |
13945 |
++ struct svc_rdma_recv_ctxt *rctxt); |
13946 |
+ |
13947 |
+ /* svc_rdma_recvfrom.c */ |
13948 |
+ extern void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma); |
13949 |
+diff --git a/include/net/sock.h b/include/net/sock.h |
13950 |
+index 6d9c1131fe5c8..e6a48ebb22aa4 100644 |
13951 |
+--- a/include/net/sock.h |
13952 |
++++ b/include/net/sock.h |
13953 |
+@@ -909,11 +909,11 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb) |
13954 |
+ skb_dst_force(skb); |
13955 |
+ |
13956 |
+ if (!sk->sk_backlog.tail) |
13957 |
+- sk->sk_backlog.head = skb; |
13958 |
++ WRITE_ONCE(sk->sk_backlog.head, skb); |
13959 |
+ else |
13960 |
+ sk->sk_backlog.tail->next = skb; |
13961 |
+ |
13962 |
+- sk->sk_backlog.tail = skb; |
13963 |
++ WRITE_ONCE(sk->sk_backlog.tail, skb); |
13964 |
+ skb->next = NULL; |
13965 |
+ } |
13966 |
+ |
13967 |
+diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h |
13968 |
+index 9a0393cf024c2..65c056ce91128 100644 |
13969 |
+--- a/include/sound/hda_codec.h |
13970 |
++++ b/include/sound/hda_codec.h |
13971 |
+@@ -494,6 +494,11 @@ void snd_hda_update_power_acct(struct hda_codec *codec); |
13972 |
+ static inline void snd_hda_set_power_save(struct hda_bus *bus, int delay) {} |
13973 |
+ #endif |
13974 |
+ |
13975 |
++static inline bool hda_codec_need_resume(struct hda_codec *codec) |
13976 |
++{ |
13977 |
++ return !codec->relaxed_resume && codec->jacktbl.used; |
13978 |
++} |
13979 |
++ |
13980 |
+ #ifdef CONFIG_SND_HDA_PATCH_LOADER |
13981 |
+ /* |
13982 |
+ * patch firmware |
13983 |
+diff --git a/include/trace/events/sctp.h b/include/trace/events/sctp.h |
13984 |
+index 7475c7be165aa..d4aac34365955 100644 |
13985 |
+--- a/include/trace/events/sctp.h |
13986 |
++++ b/include/trace/events/sctp.h |
13987 |
+@@ -75,15 +75,6 @@ TRACE_EVENT(sctp_probe, |
13988 |
+ __entry->pathmtu = asoc->pathmtu; |
13989 |
+ __entry->rwnd = asoc->peer.rwnd; |
13990 |
+ __entry->unack_data = asoc->unack_data; |
13991 |
+- |
13992 |
+- if (trace_sctp_probe_path_enabled()) { |
13993 |
+- struct sctp_transport *sp; |
13994 |
+- |
13995 |
+- list_for_each_entry(sp, &asoc->peer.transport_addr_list, |
13996 |
+- transports) { |
13997 |
+- trace_sctp_probe_path(sp, asoc); |
13998 |
+- } |
13999 |
+- } |
14000 |
+ ), |
14001 |
+ |
14002 |
+ TP_printk("asoc=%#llx mark=%#x bind_port=%d peer_port=%d pathmtu=%d " |
14003 |
+diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h |
14004 |
+index ffa3c51dbb1a0..28df77a948e56 100644 |
14005 |
+--- a/include/trace/events/sunrpc.h |
14006 |
++++ b/include/trace/events/sunrpc.h |
14007 |
+@@ -165,6 +165,7 @@ DECLARE_EVENT_CLASS(rpc_task_running, |
14008 |
+ DEFINE_RPC_RUNNING_EVENT(begin); |
14009 |
+ DEFINE_RPC_RUNNING_EVENT(run_action); |
14010 |
+ DEFINE_RPC_RUNNING_EVENT(complete); |
14011 |
++DEFINE_RPC_RUNNING_EVENT(end); |
14012 |
+ |
14013 |
+ DECLARE_EVENT_CLASS(rpc_task_queued, |
14014 |
+ |
14015 |
+diff --git a/init/init_task.c b/init/init_task.c |
14016 |
+index 9e5cbe5eab7b1..bd403ed3e4184 100644 |
14017 |
+--- a/init/init_task.c |
14018 |
++++ b/init/init_task.c |
14019 |
+@@ -26,6 +26,7 @@ static struct signal_struct init_signals = { |
14020 |
+ .multiprocess = HLIST_HEAD_INIT, |
14021 |
+ .rlim = INIT_RLIMITS, |
14022 |
+ .cred_guard_mutex = __MUTEX_INITIALIZER(init_signals.cred_guard_mutex), |
14023 |
++ .exec_update_mutex = __MUTEX_INITIALIZER(init_signals.exec_update_mutex), |
14024 |
+ #ifdef CONFIG_POSIX_TIMERS |
14025 |
+ .posix_timers = LIST_HEAD_INIT(init_signals.posix_timers), |
14026 |
+ .cputimer = { |
14027 |
+diff --git a/init/main.c b/init/main.c |
14028 |
+index 8c7d6b8ee6bd6..fef9e610b74b7 100644 |
14029 |
+--- a/init/main.c |
14030 |
++++ b/init/main.c |
14031 |
+@@ -32,6 +32,7 @@ |
14032 |
+ #include <linux/nmi.h> |
14033 |
+ #include <linux/percpu.h> |
14034 |
+ #include <linux/kmod.h> |
14035 |
++#include <linux/kprobes.h> |
14036 |
+ #include <linux/vmalloc.h> |
14037 |
+ #include <linux/kernel_stat.h> |
14038 |
+ #include <linux/start_kernel.h> |
14039 |
+@@ -1111,6 +1112,7 @@ static int __ref kernel_init(void *unused) |
14040 |
+ kernel_init_freeable(); |
14041 |
+ /* need to finish all async __init code before freeing the memory */ |
14042 |
+ async_synchronize_full(); |
14043 |
++ kprobe_free_init_mem(); |
14044 |
+ ftrace_free_init_mem(); |
14045 |
+ free_initmem(); |
14046 |
+ mark_readonly(); |
14047 |
+diff --git a/kernel/Makefile b/kernel/Makefile |
14048 |
+index 42557f251fea6..f2cc0d118a0bc 100644 |
14049 |
+--- a/kernel/Makefile |
14050 |
++++ b/kernel/Makefile |
14051 |
+@@ -115,6 +115,8 @@ obj-$(CONFIG_TORTURE_TEST) += torture.o |
14052 |
+ obj-$(CONFIG_HAS_IOMEM) += iomem.o |
14053 |
+ obj-$(CONFIG_RSEQ) += rseq.o |
14054 |
+ |
14055 |
++obj-$(CONFIG_SYSCTL_KUNIT_TEST) += sysctl-test.o |
14056 |
++ |
14057 |
+ obj-$(CONFIG_GCC_PLUGIN_STACKLEAK) += stackleak.o |
14058 |
+ KASAN_SANITIZE_stackleak.o := n |
14059 |
+ KCOV_INSTRUMENT_stackleak.o := n |
14060 |
+diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c |
14061 |
+index 4508d5e0cf696..8a8fd732ff6d0 100644 |
14062 |
+--- a/kernel/audit_watch.c |
14063 |
++++ b/kernel/audit_watch.c |
14064 |
+@@ -302,8 +302,6 @@ static void audit_update_watch(struct audit_parent *parent, |
14065 |
+ if (oentry->rule.exe) |
14066 |
+ audit_remove_mark(oentry->rule.exe); |
14067 |
+ |
14068 |
+- audit_watch_log_rule_change(r, owatch, "updated_rules"); |
14069 |
+- |
14070 |
+ call_rcu(&oentry->rcu, audit_free_rule_rcu); |
14071 |
+ } |
14072 |
+ |
14073 |
+diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c |
14074 |
+index 039d64b1bfb7d..728ffec52cf36 100644 |
14075 |
+--- a/kernel/bpf/hashtab.c |
14076 |
++++ b/kernel/bpf/hashtab.c |
14077 |
+@@ -664,15 +664,7 @@ static void htab_elem_free_rcu(struct rcu_head *head) |
14078 |
+ struct htab_elem *l = container_of(head, struct htab_elem, rcu); |
14079 |
+ struct bpf_htab *htab = l->htab; |
14080 |
+ |
14081 |
+- /* must increment bpf_prog_active to avoid kprobe+bpf triggering while |
14082 |
+- * we're calling kfree, otherwise deadlock is possible if kprobes |
14083 |
+- * are placed somewhere inside of slub |
14084 |
+- */ |
14085 |
+- preempt_disable(); |
14086 |
+- __this_cpu_inc(bpf_prog_active); |
14087 |
+ htab_elem_free(htab, l); |
14088 |
+- __this_cpu_dec(bpf_prog_active); |
14089 |
+- preempt_enable(); |
14090 |
+ } |
14091 |
+ |
14092 |
+ static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l) |
14093 |
+diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c |
14094 |
+index 218c09ff6a273..375d93eb71c71 100644 |
14095 |
+--- a/kernel/bpf/inode.c |
14096 |
++++ b/kernel/bpf/inode.c |
14097 |
+@@ -205,10 +205,12 @@ static void *map_seq_next(struct seq_file *m, void *v, loff_t *pos) |
14098 |
+ else |
14099 |
+ prev_key = key; |
14100 |
+ |
14101 |
++ rcu_read_lock(); |
14102 |
+ if (map->ops->map_get_next_key(map, prev_key, key)) { |
14103 |
+ map_iter(m)->done = true; |
14104 |
+- return NULL; |
14105 |
++ key = NULL; |
14106 |
+ } |
14107 |
++ rcu_read_unlock(); |
14108 |
+ return key; |
14109 |
+ } |
14110 |
+ |
14111 |
+diff --git a/kernel/events/core.c b/kernel/events/core.c |
14112 |
+index db1f5aa755f22..47646050efa0c 100644 |
14113 |
+--- a/kernel/events/core.c |
14114 |
++++ b/kernel/events/core.c |
14115 |
+@@ -1253,7 +1253,7 @@ static void put_ctx(struct perf_event_context *ctx) |
14116 |
+ * function. |
14117 |
+ * |
14118 |
+ * Lock order: |
14119 |
+- * cred_guard_mutex |
14120 |
++ * exec_update_mutex |
14121 |
+ * task_struct::perf_event_mutex |
14122 |
+ * perf_event_context::mutex |
14123 |
+ * perf_event::child_mutex; |
14124 |
+@@ -11002,14 +11002,14 @@ SYSCALL_DEFINE5(perf_event_open, |
14125 |
+ } |
14126 |
+ |
14127 |
+ if (task) { |
14128 |
+- err = mutex_lock_interruptible(&task->signal->cred_guard_mutex); |
14129 |
++ err = mutex_lock_interruptible(&task->signal->exec_update_mutex); |
14130 |
+ if (err) |
14131 |
+ goto err_task; |
14132 |
+ |
14133 |
+ /* |
14134 |
+ * Reuse ptrace permission checks for now. |
14135 |
+ * |
14136 |
+- * We must hold cred_guard_mutex across this and any potential |
14137 |
++ * We must hold exec_update_mutex across this and any potential |
14138 |
+ * perf_install_in_context() call for this new event to |
14139 |
+ * serialize against exec() altering our credentials (and the |
14140 |
+ * perf_event_exit_task() that could imply). |
14141 |
+@@ -11298,7 +11298,7 @@ SYSCALL_DEFINE5(perf_event_open, |
14142 |
+ mutex_unlock(&ctx->mutex); |
14143 |
+ |
14144 |
+ if (task) { |
14145 |
+- mutex_unlock(&task->signal->cred_guard_mutex); |
14146 |
++ mutex_unlock(&task->signal->exec_update_mutex); |
14147 |
+ put_task_struct(task); |
14148 |
+ } |
14149 |
+ |
14150 |
+@@ -11334,7 +11334,7 @@ err_alloc: |
14151 |
+ free_event(event); |
14152 |
+ err_cred: |
14153 |
+ if (task) |
14154 |
+- mutex_unlock(&task->signal->cred_guard_mutex); |
14155 |
++ mutex_unlock(&task->signal->exec_update_mutex); |
14156 |
+ err_task: |
14157 |
+ if (task) |
14158 |
+ put_task_struct(task); |
14159 |
+@@ -11639,7 +11639,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn) |
14160 |
+ /* |
14161 |
+ * When a child task exits, feed back event values to parent events. |
14162 |
+ * |
14163 |
+- * Can be called with cred_guard_mutex held when called from |
14164 |
++ * Can be called with exec_update_mutex held when called from |
14165 |
+ * install_exec_creds(). |
14166 |
+ */ |
14167 |
+ void perf_event_exit_task(struct task_struct *child) |
14168 |
+diff --git a/kernel/fork.c b/kernel/fork.c |
14169 |
+index 9180f4416dbab..594272569a80f 100644 |
14170 |
+--- a/kernel/fork.c |
14171 |
++++ b/kernel/fork.c |
14172 |
+@@ -1221,7 +1221,7 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) |
14173 |
+ struct mm_struct *mm; |
14174 |
+ int err; |
14175 |
+ |
14176 |
+- err = mutex_lock_killable(&task->signal->cred_guard_mutex); |
14177 |
++ err = mutex_lock_killable(&task->signal->exec_update_mutex); |
14178 |
+ if (err) |
14179 |
+ return ERR_PTR(err); |
14180 |
+ |
14181 |
+@@ -1231,7 +1231,7 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) |
14182 |
+ mmput(mm); |
14183 |
+ mm = ERR_PTR(-EACCES); |
14184 |
+ } |
14185 |
+- mutex_unlock(&task->signal->cred_guard_mutex); |
14186 |
++ mutex_unlock(&task->signal->exec_update_mutex); |
14187 |
+ |
14188 |
+ return mm; |
14189 |
+ } |
14190 |
+@@ -1586,6 +1586,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) |
14191 |
+ sig->oom_score_adj_min = current->signal->oom_score_adj_min; |
14192 |
+ |
14193 |
+ mutex_init(&sig->cred_guard_mutex); |
14194 |
++ mutex_init(&sig->exec_update_mutex); |
14195 |
+ |
14196 |
+ return 0; |
14197 |
+ } |
14198 |
+diff --git a/kernel/kcmp.c b/kernel/kcmp.c |
14199 |
+index a0e3d7a0e8b81..b3ff9288c6cc9 100644 |
14200 |
+--- a/kernel/kcmp.c |
14201 |
++++ b/kernel/kcmp.c |
14202 |
+@@ -173,8 +173,8 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type, |
14203 |
+ /* |
14204 |
+ * One should have enough rights to inspect task details. |
14205 |
+ */ |
14206 |
+- ret = kcmp_lock(&task1->signal->cred_guard_mutex, |
14207 |
+- &task2->signal->cred_guard_mutex); |
14208 |
++ ret = kcmp_lock(&task1->signal->exec_update_mutex, |
14209 |
++ &task2->signal->exec_update_mutex); |
14210 |
+ if (ret) |
14211 |
+ goto err; |
14212 |
+ if (!ptrace_may_access(task1, PTRACE_MODE_READ_REALCREDS) || |
14213 |
+@@ -229,8 +229,8 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type, |
14214 |
+ } |
14215 |
+ |
14216 |
+ err_unlock: |
14217 |
+- kcmp_unlock(&task1->signal->cred_guard_mutex, |
14218 |
+- &task2->signal->cred_guard_mutex); |
14219 |
++ kcmp_unlock(&task1->signal->exec_update_mutex, |
14220 |
++ &task2->signal->exec_update_mutex); |
14221 |
+ err: |
14222 |
+ put_task_struct(task1); |
14223 |
+ put_task_struct(task2); |
14224 |
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c |
14225 |
+index 5646f291eb705..283c8b01ce789 100644 |
14226 |
+--- a/kernel/kprobes.c |
14227 |
++++ b/kernel/kprobes.c |
14228 |
+@@ -1076,9 +1076,20 @@ static int disarm_kprobe_ftrace(struct kprobe *p) |
14229 |
+ ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled); |
14230 |
+ } |
14231 |
+ #else /* !CONFIG_KPROBES_ON_FTRACE */ |
14232 |
+-#define prepare_kprobe(p) arch_prepare_kprobe(p) |
14233 |
+-#define arm_kprobe_ftrace(p) (-ENODEV) |
14234 |
+-#define disarm_kprobe_ftrace(p) (-ENODEV) |
14235 |
++static inline int prepare_kprobe(struct kprobe *p) |
14236 |
++{ |
14237 |
++ return arch_prepare_kprobe(p); |
14238 |
++} |
14239 |
++ |
14240 |
++static inline int arm_kprobe_ftrace(struct kprobe *p) |
14241 |
++{ |
14242 |
++ return -ENODEV; |
14243 |
++} |
14244 |
++ |
14245 |
++static inline int disarm_kprobe_ftrace(struct kprobe *p) |
14246 |
++{ |
14247 |
++ return -ENODEV; |
14248 |
++} |
14249 |
+ #endif |
14250 |
+ |
14251 |
+ /* Arm a kprobe with text_mutex */ |
14252 |
+@@ -2110,9 +2121,10 @@ static void kill_kprobe(struct kprobe *p) |
14253 |
+ |
14254 |
+ /* |
14255 |
+ * The module is going away. We should disarm the kprobe which |
14256 |
+- * is using ftrace. |
14257 |
++ * is using ftrace, because ftrace framework is still available at |
14258 |
++ * MODULE_STATE_GOING notification. |
14259 |
+ */ |
14260 |
+- if (kprobe_ftrace(p)) |
14261 |
++ if (kprobe_ftrace(p) && !kprobe_disabled(p) && !kprobes_all_disarmed) |
14262 |
+ disarm_kprobe_ftrace(p); |
14263 |
+ } |
14264 |
+ |
14265 |
+@@ -2308,6 +2320,28 @@ static struct notifier_block kprobe_module_nb = { |
14266 |
+ extern unsigned long __start_kprobe_blacklist[]; |
14267 |
+ extern unsigned long __stop_kprobe_blacklist[]; |
14268 |
+ |
14269 |
++void kprobe_free_init_mem(void) |
14270 |
++{ |
14271 |
++ void *start = (void *)(&__init_begin); |
14272 |
++ void *end = (void *)(&__init_end); |
14273 |
++ struct hlist_head *head; |
14274 |
++ struct kprobe *p; |
14275 |
++ int i; |
14276 |
++ |
14277 |
++ mutex_lock(&kprobe_mutex); |
14278 |
++ |
14279 |
++ /* Kill all kprobes on initmem */ |
14280 |
++ for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
14281 |
++ head = &kprobe_table[i]; |
14282 |
++ hlist_for_each_entry(p, head, hlist) { |
14283 |
++ if (start <= (void *)p->addr && (void *)p->addr < end) |
14284 |
++ kill_kprobe(p); |
14285 |
++ } |
14286 |
++ } |
14287 |
++ |
14288 |
++ mutex_unlock(&kprobe_mutex); |
14289 |
++} |
14290 |
++ |
14291 |
+ static int __init init_kprobes(void) |
14292 |
+ { |
14293 |
+ int i, err = 0; |
14294 |
+diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c |
14295 |
+index 9ab1a965c3b92..bca0f7f71cde4 100644 |
14296 |
+--- a/kernel/locking/lockdep.c |
14297 |
++++ b/kernel/locking/lockdep.c |
14298 |
+@@ -2302,18 +2302,6 @@ static int check_irq_usage(struct task_struct *curr, struct held_lock *prev, |
14299 |
+ return 0; |
14300 |
+ } |
14301 |
+ |
14302 |
+-static void inc_chains(void) |
14303 |
+-{ |
14304 |
+- if (current->hardirq_context) |
14305 |
+- nr_hardirq_chains++; |
14306 |
+- else { |
14307 |
+- if (current->softirq_context) |
14308 |
+- nr_softirq_chains++; |
14309 |
+- else |
14310 |
+- nr_process_chains++; |
14311 |
+- } |
14312 |
+-} |
14313 |
+- |
14314 |
+ #else |
14315 |
+ |
14316 |
+ static inline int check_irq_usage(struct task_struct *curr, |
14317 |
+@@ -2321,13 +2309,27 @@ static inline int check_irq_usage(struct task_struct *curr, |
14318 |
+ { |
14319 |
+ return 1; |
14320 |
+ } |
14321 |
++#endif /* CONFIG_TRACE_IRQFLAGS */ |
14322 |
+ |
14323 |
+-static inline void inc_chains(void) |
14324 |
++static void inc_chains(int irq_context) |
14325 |
+ { |
14326 |
+- nr_process_chains++; |
14327 |
++ if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT) |
14328 |
++ nr_hardirq_chains++; |
14329 |
++ else if (irq_context & LOCK_CHAIN_SOFTIRQ_CONTEXT) |
14330 |
++ nr_softirq_chains++; |
14331 |
++ else |
14332 |
++ nr_process_chains++; |
14333 |
+ } |
14334 |
+ |
14335 |
+-#endif /* CONFIG_TRACE_IRQFLAGS */ |
14336 |
++static void dec_chains(int irq_context) |
14337 |
++{ |
14338 |
++ if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT) |
14339 |
++ nr_hardirq_chains--; |
14340 |
++ else if (irq_context & LOCK_CHAIN_SOFTIRQ_CONTEXT) |
14341 |
++ nr_softirq_chains--; |
14342 |
++ else |
14343 |
++ nr_process_chains--; |
14344 |
++} |
14345 |
+ |
14346 |
+ static void |
14347 |
+ print_deadlock_scenario(struct held_lock *nxt, struct held_lock *prv) |
14348 |
+@@ -2847,7 +2849,7 @@ static inline int add_chain_cache(struct task_struct *curr, |
14349 |
+ |
14350 |
+ hlist_add_head_rcu(&chain->entry, hash_head); |
14351 |
+ debug_atomic_inc(chain_lookup_misses); |
14352 |
+- inc_chains(); |
14353 |
++ inc_chains(chain->irq_context); |
14354 |
+ |
14355 |
+ return 1; |
14356 |
+ } |
14357 |
+@@ -3600,7 +3602,8 @@ lock_used: |
14358 |
+ |
14359 |
+ static inline unsigned int task_irq_context(struct task_struct *task) |
14360 |
+ { |
14361 |
+- return 2 * !!task->hardirq_context + !!task->softirq_context; |
14362 |
++ return LOCK_CHAIN_HARDIRQ_CONTEXT * !!task->hardirq_context + |
14363 |
++ LOCK_CHAIN_SOFTIRQ_CONTEXT * !!task->softirq_context; |
14364 |
+ } |
14365 |
+ |
14366 |
+ static int separate_irq_context(struct task_struct *curr, |
14367 |
+@@ -4805,6 +4808,8 @@ recalc: |
14368 |
+ return; |
14369 |
+ /* Overwrite the chain key for concurrent RCU readers. */ |
14370 |
+ WRITE_ONCE(chain->chain_key, chain_key); |
14371 |
++ dec_chains(chain->irq_context); |
14372 |
++ |
14373 |
+ /* |
14374 |
+ * Note: calling hlist_del_rcu() from inside a |
14375 |
+ * hlist_for_each_entry_rcu() loop is safe. |
14376 |
+@@ -4826,6 +4831,7 @@ recalc: |
14377 |
+ } |
14378 |
+ *new_chain = *chain; |
14379 |
+ hlist_add_head_rcu(&new_chain->entry, chainhashentry(chain_key)); |
14380 |
++ inc_chains(new_chain->irq_context); |
14381 |
+ #endif |
14382 |
+ } |
14383 |
+ |
14384 |
+diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h |
14385 |
+index 18d85aebbb57f..a525368b8cf61 100644 |
14386 |
+--- a/kernel/locking/lockdep_internals.h |
14387 |
++++ b/kernel/locking/lockdep_internals.h |
14388 |
+@@ -106,6 +106,12 @@ static const unsigned long LOCKF_USED_IN_IRQ_READ = |
14389 |
+ #define STACK_TRACE_HASH_SIZE 16384 |
14390 |
+ #endif |
14391 |
+ |
14392 |
++/* |
14393 |
++ * Bit definitions for lock_chain.irq_context |
14394 |
++ */ |
14395 |
++#define LOCK_CHAIN_SOFTIRQ_CONTEXT (1 << 0) |
14396 |
++#define LOCK_CHAIN_HARDIRQ_CONTEXT (1 << 1) |
14397 |
++ |
14398 |
+ #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) |
14399 |
+ |
14400 |
+ #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5) |
14401 |
+diff --git a/kernel/notifier.c b/kernel/notifier.c |
14402 |
+index 157d7c29f7207..f6d5ffe4e72ec 100644 |
14403 |
+--- a/kernel/notifier.c |
14404 |
++++ b/kernel/notifier.c |
14405 |
+@@ -23,7 +23,10 @@ static int notifier_chain_register(struct notifier_block **nl, |
14406 |
+ struct notifier_block *n) |
14407 |
+ { |
14408 |
+ while ((*nl) != NULL) { |
14409 |
+- WARN_ONCE(((*nl) == n), "double register detected"); |
14410 |
++ if (unlikely((*nl) == n)) { |
14411 |
++ WARN(1, "double register detected"); |
14412 |
++ return 0; |
14413 |
++ } |
14414 |
+ if (n->priority > (*nl)->priority) |
14415 |
+ break; |
14416 |
+ nl = &((*nl)->next); |
14417 |
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c |
14418 |
+index 971197f5d8ee5..5569ef6bc1839 100644 |
14419 |
+--- a/kernel/printk/printk.c |
14420 |
++++ b/kernel/printk/printk.c |
14421 |
+@@ -2193,6 +2193,9 @@ static int __init console_setup(char *str) |
14422 |
+ char *s, *options, *brl_options = NULL; |
14423 |
+ int idx; |
14424 |
+ |
14425 |
++ if (str[0] == 0) |
14426 |
++ return 1; |
14427 |
++ |
14428 |
+ if (_braille_console_setup(&str, &brl_options)) |
14429 |
+ return 1; |
14430 |
+ |
14431 |
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c |
14432 |
+index 352239c411a44..79ce22de44095 100644 |
14433 |
+--- a/kernel/sched/core.c |
14434 |
++++ b/kernel/sched/core.c |
14435 |
+@@ -4199,7 +4199,8 @@ static inline void sched_submit_work(struct task_struct *tsk) |
14436 |
+ * it wants to wake up a task to maintain concurrency. |
14437 |
+ * As this function is called inside the schedule() context, |
14438 |
+ * we disable preemption to avoid it calling schedule() again |
14439 |
+- * in the possible wakeup of a kworker. |
14440 |
++ * in the possible wakeup of a kworker and because wq_worker_sleeping() |
14441 |
++ * requires it. |
14442 |
+ */ |
14443 |
+ if (tsk->flags & PF_WQ_WORKER) { |
14444 |
+ preempt_disable(); |
14445 |
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c |
14446 |
+index 20bf1f66733ac..b02a83ff40687 100644 |
14447 |
+--- a/kernel/sched/fair.c |
14448 |
++++ b/kernel/sched/fair.c |
14449 |
+@@ -4383,16 +4383,16 @@ static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) |
14450 |
+ } |
14451 |
+ |
14452 |
+ /* returns 0 on failure to allocate runtime */ |
14453 |
+-static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) |
14454 |
++static int __assign_cfs_rq_runtime(struct cfs_bandwidth *cfs_b, |
14455 |
++ struct cfs_rq *cfs_rq, u64 target_runtime) |
14456 |
+ { |
14457 |
+- struct task_group *tg = cfs_rq->tg; |
14458 |
+- struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg); |
14459 |
+- u64 amount = 0, min_amount; |
14460 |
++ u64 min_amount, amount = 0; |
14461 |
++ |
14462 |
++ lockdep_assert_held(&cfs_b->lock); |
14463 |
+ |
14464 |
+ /* note: this is a positive sum as runtime_remaining <= 0 */ |
14465 |
+- min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining; |
14466 |
++ min_amount = target_runtime - cfs_rq->runtime_remaining; |
14467 |
+ |
14468 |
+- raw_spin_lock(&cfs_b->lock); |
14469 |
+ if (cfs_b->quota == RUNTIME_INF) |
14470 |
+ amount = min_amount; |
14471 |
+ else { |
14472 |
+@@ -4404,13 +4404,25 @@ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) |
14473 |
+ cfs_b->idle = 0; |
14474 |
+ } |
14475 |
+ } |
14476 |
+- raw_spin_unlock(&cfs_b->lock); |
14477 |
+ |
14478 |
+ cfs_rq->runtime_remaining += amount; |
14479 |
+ |
14480 |
+ return cfs_rq->runtime_remaining > 0; |
14481 |
+ } |
14482 |
+ |
14483 |
++/* returns 0 on failure to allocate runtime */ |
14484 |
++static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) |
14485 |
++{ |
14486 |
++ struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); |
14487 |
++ int ret; |
14488 |
++ |
14489 |
++ raw_spin_lock(&cfs_b->lock); |
14490 |
++ ret = __assign_cfs_rq_runtime(cfs_b, cfs_rq, sched_cfs_bandwidth_slice()); |
14491 |
++ raw_spin_unlock(&cfs_b->lock); |
14492 |
++ |
14493 |
++ return ret; |
14494 |
++} |
14495 |
++ |
14496 |
+ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) |
14497 |
+ { |
14498 |
+ /* dock delta_exec before expiring quota (as it could span periods) */ |
14499 |
+@@ -4499,13 +4511,33 @@ static int tg_throttle_down(struct task_group *tg, void *data) |
14500 |
+ return 0; |
14501 |
+ } |
14502 |
+ |
14503 |
+-static void throttle_cfs_rq(struct cfs_rq *cfs_rq) |
14504 |
++static bool throttle_cfs_rq(struct cfs_rq *cfs_rq) |
14505 |
+ { |
14506 |
+ struct rq *rq = rq_of(cfs_rq); |
14507 |
+ struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); |
14508 |
+ struct sched_entity *se; |
14509 |
+ long task_delta, idle_task_delta, dequeue = 1; |
14510 |
+- bool empty; |
14511 |
++ |
14512 |
++ raw_spin_lock(&cfs_b->lock); |
14513 |
++ /* This will start the period timer if necessary */ |
14514 |
++ if (__assign_cfs_rq_runtime(cfs_b, cfs_rq, 1)) { |
14515 |
++ /* |
14516 |
++ * We have raced with bandwidth becoming available, and if we |
14517 |
++ * actually throttled the timer might not unthrottle us for an |
14518 |
++ * entire period. We additionally needed to make sure that any |
14519 |
++ * subsequent check_cfs_rq_runtime calls agree not to throttle |
14520 |
++ * us, as we may commit to do cfs put_prev+pick_next, so we ask |
14521 |
++ * for 1ns of runtime rather than just check cfs_b. |
14522 |
++ */ |
14523 |
++ dequeue = 0; |
14524 |
++ } else { |
14525 |
++ list_add_tail_rcu(&cfs_rq->throttled_list, |
14526 |
++ &cfs_b->throttled_cfs_rq); |
14527 |
++ } |
14528 |
++ raw_spin_unlock(&cfs_b->lock); |
14529 |
++ |
14530 |
++ if (!dequeue) |
14531 |
++ return false; /* Throttle no longer required. */ |
14532 |
+ |
14533 |
+ se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; |
14534 |
+ |
14535 |
+@@ -4534,29 +4566,13 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq) |
14536 |
+ if (!se) |
14537 |
+ sub_nr_running(rq, task_delta); |
14538 |
+ |
14539 |
+- cfs_rq->throttled = 1; |
14540 |
+- cfs_rq->throttled_clock = rq_clock(rq); |
14541 |
+- raw_spin_lock(&cfs_b->lock); |
14542 |
+- empty = list_empty(&cfs_b->throttled_cfs_rq); |
14543 |
+- |
14544 |
+- /* |
14545 |
+- * Add to the _head_ of the list, so that an already-started |
14546 |
+- * distribute_cfs_runtime will not see us. If disribute_cfs_runtime is |
14547 |
+- * not running add to the tail so that later runqueues don't get starved. |
14548 |
+- */ |
14549 |
+- if (cfs_b->distribute_running) |
14550 |
+- list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); |
14551 |
+- else |
14552 |
+- list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); |
14553 |
+- |
14554 |
+ /* |
14555 |
+- * If we're the first throttled task, make sure the bandwidth |
14556 |
+- * timer is running. |
14557 |
++ * Note: distribution will already see us throttled via the |
14558 |
++ * throttled-list. rq->lock protects completion. |
14559 |
+ */ |
14560 |
+- if (empty) |
14561 |
+- start_cfs_bandwidth(cfs_b); |
14562 |
+- |
14563 |
+- raw_spin_unlock(&cfs_b->lock); |
14564 |
++ cfs_rq->throttled = 1; |
14565 |
++ cfs_rq->throttled_clock = rq_clock(rq); |
14566 |
++ return true; |
14567 |
+ } |
14568 |
+ |
14569 |
+ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) |
14570 |
+@@ -4915,8 +4931,7 @@ static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) |
14571 |
+ if (cfs_rq_throttled(cfs_rq)) |
14572 |
+ return true; |
14573 |
+ |
14574 |
+- throttle_cfs_rq(cfs_rq); |
14575 |
+- return true; |
14576 |
++ return throttle_cfs_rq(cfs_rq); |
14577 |
+ } |
14578 |
+ |
14579 |
+ static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer) |
14580 |
+diff --git a/kernel/sys.c b/kernel/sys.c |
14581 |
+index a611d1d58c7d0..3459a5ce0da01 100644 |
14582 |
+--- a/kernel/sys.c |
14583 |
++++ b/kernel/sys.c |
14584 |
+@@ -1279,11 +1279,13 @@ SYSCALL_DEFINE1(uname, struct old_utsname __user *, name) |
14585 |
+ |
14586 |
+ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name) |
14587 |
+ { |
14588 |
+- struct oldold_utsname tmp = {}; |
14589 |
++ struct oldold_utsname tmp; |
14590 |
+ |
14591 |
+ if (!name) |
14592 |
+ return -EFAULT; |
14593 |
+ |
14594 |
++ memset(&tmp, 0, sizeof(tmp)); |
14595 |
++ |
14596 |
+ down_read(&uts_sem); |
14597 |
+ memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN); |
14598 |
+ memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN); |
14599 |
+diff --git a/kernel/sysctl-test.c b/kernel/sysctl-test.c |
14600 |
+new file mode 100644 |
14601 |
+index 0000000000000..2a63241a8453b |
14602 |
+--- /dev/null |
14603 |
++++ b/kernel/sysctl-test.c |
14604 |
+@@ -0,0 +1,392 @@ |
14605 |
++// SPDX-License-Identifier: GPL-2.0 |
14606 |
++/* |
14607 |
++ * KUnit test of proc sysctl. |
14608 |
++ */ |
14609 |
++ |
14610 |
++#include <kunit/test.h> |
14611 |
++#include <linux/sysctl.h> |
14612 |
++ |
14613 |
++#define KUNIT_PROC_READ 0 |
14614 |
++#define KUNIT_PROC_WRITE 1 |
14615 |
++ |
14616 |
++static int i_zero; |
14617 |
++static int i_one_hundred = 100; |
14618 |
++ |
14619 |
++/* |
14620 |
++ * Test that proc_dointvec will not try to use a NULL .data field even when the |
14621 |
++ * length is non-zero. |
14622 |
++ */ |
14623 |
++static void sysctl_test_api_dointvec_null_tbl_data(struct kunit *test) |
14624 |
++{ |
14625 |
++ struct ctl_table null_data_table = { |
14626 |
++ .procname = "foo", |
14627 |
++ /* |
14628 |
++ * Here we are testing that proc_dointvec behaves correctly when |
14629 |
++ * we give it a NULL .data field. Normally this would point to a |
14630 |
++ * piece of memory where the value would be stored. |
14631 |
++ */ |
14632 |
++ .data = NULL, |
14633 |
++ .maxlen = sizeof(int), |
14634 |
++ .mode = 0644, |
14635 |
++ .proc_handler = proc_dointvec, |
14636 |
++ .extra1 = &i_zero, |
14637 |
++ .extra2 = &i_one_hundred, |
14638 |
++ }; |
14639 |
++ /* |
14640 |
++ * proc_dointvec expects a buffer in user space, so we allocate one. We |
14641 |
++ * also need to cast it to __user so sparse doesn't get mad. |
14642 |
++ */ |
14643 |
++ void __user *buffer = (void __user *)kunit_kzalloc(test, sizeof(int), |
14644 |
++ GFP_USER); |
14645 |
++ size_t len; |
14646 |
++ loff_t pos; |
14647 |
++ |
14648 |
++ /* |
14649 |
++ * We don't care what the starting length is since proc_dointvec should |
14650 |
++ * not try to read because .data is NULL. |
14651 |
++ */ |
14652 |
++ len = 1234; |
14653 |
++ KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&null_data_table, |
14654 |
++ KUNIT_PROC_READ, buffer, &len, |
14655 |
++ &pos)); |
14656 |
++ KUNIT_EXPECT_EQ(test, (size_t)0, len); |
14657 |
++ |
14658 |
++ /* |
14659 |
++ * See above. |
14660 |
++ */ |
14661 |
++ len = 1234; |
14662 |
++ KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&null_data_table, |
14663 |
++ KUNIT_PROC_WRITE, buffer, &len, |
14664 |
++ &pos)); |
14665 |
++ KUNIT_EXPECT_EQ(test, (size_t)0, len); |
14666 |
++} |
14667 |
++ |
14668 |
++/* |
14669 |
++ * Similar to the previous test, we create a struct ctrl_table that has a .data |
14670 |
++ * field that proc_dointvec cannot do anything with; however, this time it is |
14671 |
++ * because we tell proc_dointvec that the size is 0. |
14672 |
++ */ |
14673 |
++static void sysctl_test_api_dointvec_table_maxlen_unset(struct kunit *test) |
14674 |
++{ |
14675 |
++ int data = 0; |
14676 |
++ struct ctl_table data_maxlen_unset_table = { |
14677 |
++ .procname = "foo", |
14678 |
++ .data = &data, |
14679 |
++ /* |
14680 |
++ * So .data is no longer NULL, but we tell proc_dointvec its |
14681 |
++ * length is 0, so it still shouldn't try to use it. |
14682 |
++ */ |
14683 |
++ .maxlen = 0, |
14684 |
++ .mode = 0644, |
14685 |
++ .proc_handler = proc_dointvec, |
14686 |
++ .extra1 = &i_zero, |
14687 |
++ .extra2 = &i_one_hundred, |
14688 |
++ }; |
14689 |
++ void __user *buffer = (void __user *)kunit_kzalloc(test, sizeof(int), |
14690 |
++ GFP_USER); |
14691 |
++ size_t len; |
14692 |
++ loff_t pos; |
14693 |
++ |
14694 |
++ /* |
14695 |
++ * As before, we don't care what buffer length is because proc_dointvec |
14696 |
++ * cannot do anything because its internal .data buffer has zero length. |
14697 |
++ */ |
14698 |
++ len = 1234; |
14699 |
++ KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&data_maxlen_unset_table, |
14700 |
++ KUNIT_PROC_READ, buffer, &len, |
14701 |
++ &pos)); |
14702 |
++ KUNIT_EXPECT_EQ(test, (size_t)0, len); |
14703 |
++ |
14704 |
++ /* |
14705 |
++ * See previous comment. |
14706 |
++ */ |
14707 |
++ len = 1234; |
14708 |
++ KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&data_maxlen_unset_table, |
14709 |
++ KUNIT_PROC_WRITE, buffer, &len, |
14710 |
++ &pos)); |
14711 |
++ KUNIT_EXPECT_EQ(test, (size_t)0, len); |
14712 |
++} |
14713 |
++ |
14714 |
++/* |
14715 |
++ * Here we provide a valid struct ctl_table, but we try to read and write from |
14716 |
++ * it using a buffer of zero length, so it should still fail in a similar way as |
14717 |
++ * before. |
14718 |
++ */ |
14719 |
++static void sysctl_test_api_dointvec_table_len_is_zero(struct kunit *test) |
14720 |
++{ |
14721 |
++ int data = 0; |
14722 |
++ /* Good table. */ |
14723 |
++ struct ctl_table table = { |
14724 |
++ .procname = "foo", |
14725 |
++ .data = &data, |
14726 |
++ .maxlen = sizeof(int), |
14727 |
++ .mode = 0644, |
14728 |
++ .proc_handler = proc_dointvec, |
14729 |
++ .extra1 = &i_zero, |
14730 |
++ .extra2 = &i_one_hundred, |
14731 |
++ }; |
14732 |
++ void __user *buffer = (void __user *)kunit_kzalloc(test, sizeof(int), |
14733 |
++ GFP_USER); |
14734 |
++ /* |
14735 |
++ * However, now our read/write buffer has zero length. |
14736 |
++ */ |
14737 |
++ size_t len = 0; |
14738 |
++ loff_t pos; |
14739 |
++ |
14740 |
++ KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_READ, buffer, |
14741 |
++ &len, &pos)); |
14742 |
++ KUNIT_EXPECT_EQ(test, (size_t)0, len); |
14743 |
++ |
14744 |
++ KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_WRITE, buffer, |
14745 |
++ &len, &pos)); |
14746 |
++ KUNIT_EXPECT_EQ(test, (size_t)0, len); |
14747 |
++} |
14748 |
++ |
14749 |
++/* |
14750 |
++ * Test that proc_dointvec refuses to read when the file position is non-zero. |
14751 |
++ */ |
14752 |
++static void sysctl_test_api_dointvec_table_read_but_position_set( |
14753 |
++ struct kunit *test) |
14754 |
++{ |
14755 |
++ int data = 0; |
14756 |
++ /* Good table. */ |
14757 |
++ struct ctl_table table = { |
14758 |
++ .procname = "foo", |
14759 |
++ .data = &data, |
14760 |
++ .maxlen = sizeof(int), |
14761 |
++ .mode = 0644, |
14762 |
++ .proc_handler = proc_dointvec, |
14763 |
++ .extra1 = &i_zero, |
14764 |
++ .extra2 = &i_one_hundred, |
14765 |
++ }; |
14766 |
++ void __user *buffer = (void __user *)kunit_kzalloc(test, sizeof(int), |
14767 |
++ GFP_USER); |
14768 |
++ /* |
14769 |
++ * We don't care about our buffer length because we start off with a |
14770 |
++ * non-zero file position. |
14771 |
++ */ |
14772 |
++ size_t len = 1234; |
14773 |
++ /* |
14774 |
++ * proc_dointvec should refuse to read into the buffer since the file |
14775 |
++ * pos is non-zero. |
14776 |
++ */ |
14777 |
++ loff_t pos = 1; |
14778 |
++ |
14779 |
++ KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_READ, buffer, |
14780 |
++ &len, &pos)); |
14781 |
++ KUNIT_EXPECT_EQ(test, (size_t)0, len); |
14782 |
++} |
14783 |
++ |
14784 |
++/* |
14785 |
++ * Test that we can read a two digit number in a sufficiently size buffer. |
14786 |
++ * Nothing fancy. |
14787 |
++ */ |
14788 |
++static void sysctl_test_dointvec_read_happy_single_positive(struct kunit *test) |
14789 |
++{ |
14790 |
++ int data = 0; |
14791 |
++ /* Good table. */ |
14792 |
++ struct ctl_table table = { |
14793 |
++ .procname = "foo", |
14794 |
++ .data = &data, |
14795 |
++ .maxlen = sizeof(int), |
14796 |
++ .mode = 0644, |
14797 |
++ .proc_handler = proc_dointvec, |
14798 |
++ .extra1 = &i_zero, |
14799 |
++ .extra2 = &i_one_hundred, |
14800 |
++ }; |
14801 |
++ size_t len = 4; |
14802 |
++ loff_t pos = 0; |
14803 |
++ char *buffer = kunit_kzalloc(test, len, GFP_USER); |
14804 |
++ char __user *user_buffer = (char __user *)buffer; |
14805 |
++ /* Store 13 in the data field. */ |
14806 |
++ *((int *)table.data) = 13; |
14807 |
++ |
14808 |
++ KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_READ, |
14809 |
++ user_buffer, &len, &pos)); |
14810 |
++ KUNIT_ASSERT_EQ(test, (size_t)3, len); |
14811 |
++ buffer[len] = '\0'; |
14812 |
++ /* And we read 13 back out. */ |
14813 |
++ KUNIT_EXPECT_STREQ(test, "13\n", buffer); |
14814 |
++} |
14815 |
++ |
14816 |
++/* |
14817 |
++ * Same as previous test, just now with negative numbers. |
14818 |
++ */ |
14819 |
++static void sysctl_test_dointvec_read_happy_single_negative(struct kunit *test) |
14820 |
++{ |
14821 |
++ int data = 0; |
14822 |
++ /* Good table. */ |
14823 |
++ struct ctl_table table = { |
14824 |
++ .procname = "foo", |
14825 |
++ .data = &data, |
14826 |
++ .maxlen = sizeof(int), |
14827 |
++ .mode = 0644, |
14828 |
++ .proc_handler = proc_dointvec, |
14829 |
++ .extra1 = &i_zero, |
14830 |
++ .extra2 = &i_one_hundred, |
14831 |
++ }; |
14832 |
++ size_t len = 5; |
14833 |
++ loff_t pos = 0; |
14834 |
++ char *buffer = kunit_kzalloc(test, len, GFP_USER); |
14835 |
++ char __user *user_buffer = (char __user *)buffer; |
14836 |
++ *((int *)table.data) = -16; |
14837 |
++ |
14838 |
++ KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_READ, |
14839 |
++ user_buffer, &len, &pos)); |
14840 |
++ KUNIT_ASSERT_EQ(test, (size_t)4, len); |
14841 |
++ buffer[len] = '\0'; |
14842 |
++ KUNIT_EXPECT_STREQ(test, "-16\n", (char *)buffer); |
14843 |
++} |
14844 |
++ |
14845 |
++/* |
14846 |
++ * Test that a simple positive write works. |
14847 |
++ */ |
14848 |
++static void sysctl_test_dointvec_write_happy_single_positive(struct kunit *test) |
14849 |
++{ |
14850 |
++ int data = 0; |
14851 |
++ /* Good table. */ |
14852 |
++ struct ctl_table table = { |
14853 |
++ .procname = "foo", |
14854 |
++ .data = &data, |
14855 |
++ .maxlen = sizeof(int), |
14856 |
++ .mode = 0644, |
14857 |
++ .proc_handler = proc_dointvec, |
14858 |
++ .extra1 = &i_zero, |
14859 |
++ .extra2 = &i_one_hundred, |
14860 |
++ }; |
14861 |
++ char input[] = "9"; |
14862 |
++ size_t len = sizeof(input) - 1; |
14863 |
++ loff_t pos = 0; |
14864 |
++ char *buffer = kunit_kzalloc(test, len, GFP_USER); |
14865 |
++ char __user *user_buffer = (char __user *)buffer; |
14866 |
++ |
14867 |
++ memcpy(buffer, input, len); |
14868 |
++ |
14869 |
++ KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_WRITE, |
14870 |
++ user_buffer, &len, &pos)); |
14871 |
++ KUNIT_EXPECT_EQ(test, sizeof(input) - 1, len); |
14872 |
++ KUNIT_EXPECT_EQ(test, sizeof(input) - 1, (size_t)pos); |
14873 |
++ KUNIT_EXPECT_EQ(test, 9, *((int *)table.data)); |
14874 |
++} |
14875 |
++ |
14876 |
++/* |
14877 |
++ * Same as previous test, but now with negative numbers. |
14878 |
++ */ |
14879 |
++static void sysctl_test_dointvec_write_happy_single_negative(struct kunit *test) |
14880 |
++{ |
14881 |
++ int data = 0; |
14882 |
++ struct ctl_table table = { |
14883 |
++ .procname = "foo", |
14884 |
++ .data = &data, |
14885 |
++ .maxlen = sizeof(int), |
14886 |
++ .mode = 0644, |
14887 |
++ .proc_handler = proc_dointvec, |
14888 |
++ .extra1 = &i_zero, |
14889 |
++ .extra2 = &i_one_hundred, |
14890 |
++ }; |
14891 |
++ char input[] = "-9"; |
14892 |
++ size_t len = sizeof(input) - 1; |
14893 |
++ loff_t pos = 0; |
14894 |
++ char *buffer = kunit_kzalloc(test, len, GFP_USER); |
14895 |
++ char __user *user_buffer = (char __user *)buffer; |
14896 |
++ |
14897 |
++ memcpy(buffer, input, len); |
14898 |
++ |
14899 |
++ KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_WRITE, |
14900 |
++ user_buffer, &len, &pos)); |
14901 |
++ KUNIT_EXPECT_EQ(test, sizeof(input) - 1, len); |
14902 |
++ KUNIT_EXPECT_EQ(test, sizeof(input) - 1, (size_t)pos); |
14903 |
++ KUNIT_EXPECT_EQ(test, -9, *((int *)table.data)); |
14904 |
++} |
14905 |
++ |
14906 |
++/* |
14907 |
++ * Test that writing a value smaller than the minimum possible value is not |
14908 |
++ * allowed. |
14909 |
++ */ |
14910 |
++static void sysctl_test_api_dointvec_write_single_less_int_min( |
14911 |
++ struct kunit *test) |
14912 |
++{ |
14913 |
++ int data = 0; |
14914 |
++ struct ctl_table table = { |
14915 |
++ .procname = "foo", |
14916 |
++ .data = &data, |
14917 |
++ .maxlen = sizeof(int), |
14918 |
++ .mode = 0644, |
14919 |
++ .proc_handler = proc_dointvec, |
14920 |
++ .extra1 = &i_zero, |
14921 |
++ .extra2 = &i_one_hundred, |
14922 |
++ }; |
14923 |
++ size_t max_len = 32, len = max_len; |
14924 |
++ loff_t pos = 0; |
14925 |
++ char *buffer = kunit_kzalloc(test, max_len, GFP_USER); |
14926 |
++ char __user *user_buffer = (char __user *)buffer; |
14927 |
++ unsigned long abs_of_less_than_min = (unsigned long)INT_MAX |
14928 |
++ - (INT_MAX + INT_MIN) + 1; |
14929 |
++ |
14930 |
++ /* |
14931 |
++ * We use this rigmarole to create a string that contains a value one |
14932 |
++ * less than the minimum accepted value. |
14933 |
++ */ |
14934 |
++ KUNIT_ASSERT_LT(test, |
14935 |
++ (size_t)snprintf(buffer, max_len, "-%lu", |
14936 |
++ abs_of_less_than_min), |
14937 |
++ max_len); |
14938 |
++ |
14939 |
++ KUNIT_EXPECT_EQ(test, -EINVAL, proc_dointvec(&table, KUNIT_PROC_WRITE, |
14940 |
++ user_buffer, &len, &pos)); |
14941 |
++ KUNIT_EXPECT_EQ(test, max_len, len); |
14942 |
++ KUNIT_EXPECT_EQ(test, 0, *((int *)table.data)); |
14943 |
++} |
14944 |
++ |
14945 |
++/* |
14946 |
++ * Test that writing the maximum possible value works. |
14947 |
++ */ |
14948 |
++static void sysctl_test_api_dointvec_write_single_greater_int_max( |
14949 |
++ struct kunit *test) |
14950 |
++{ |
14951 |
++ int data = 0; |
14952 |
++ struct ctl_table table = { |
14953 |
++ .procname = "foo", |
14954 |
++ .data = &data, |
14955 |
++ .maxlen = sizeof(int), |
14956 |
++ .mode = 0644, |
14957 |
++ .proc_handler = proc_dointvec, |
14958 |
++ .extra1 = &i_zero, |
14959 |
++ .extra2 = &i_one_hundred, |
14960 |
++ }; |
14961 |
++ size_t max_len = 32, len = max_len; |
14962 |
++ loff_t pos = 0; |
14963 |
++ char *buffer = kunit_kzalloc(test, max_len, GFP_USER); |
14964 |
++ char __user *user_buffer = (char __user *)buffer; |
14965 |
++ unsigned long greater_than_max = (unsigned long)INT_MAX + 1; |
14966 |
++ |
14967 |
++ KUNIT_ASSERT_GT(test, greater_than_max, (unsigned long)INT_MAX); |
14968 |
++ KUNIT_ASSERT_LT(test, (size_t)snprintf(buffer, max_len, "%lu", |
14969 |
++ greater_than_max), |
14970 |
++ max_len); |
14971 |
++ KUNIT_EXPECT_EQ(test, -EINVAL, proc_dointvec(&table, KUNIT_PROC_WRITE, |
14972 |
++ user_buffer, &len, &pos)); |
14973 |
++ KUNIT_ASSERT_EQ(test, max_len, len); |
14974 |
++ KUNIT_EXPECT_EQ(test, 0, *((int *)table.data)); |
14975 |
++} |
14976 |
++ |
14977 |
++static struct kunit_case sysctl_test_cases[] = { |
14978 |
++ KUNIT_CASE(sysctl_test_api_dointvec_null_tbl_data), |
14979 |
++ KUNIT_CASE(sysctl_test_api_dointvec_table_maxlen_unset), |
14980 |
++ KUNIT_CASE(sysctl_test_api_dointvec_table_len_is_zero), |
14981 |
++ KUNIT_CASE(sysctl_test_api_dointvec_table_read_but_position_set), |
14982 |
++ KUNIT_CASE(sysctl_test_dointvec_read_happy_single_positive), |
14983 |
++ KUNIT_CASE(sysctl_test_dointvec_read_happy_single_negative), |
14984 |
++ KUNIT_CASE(sysctl_test_dointvec_write_happy_single_positive), |
14985 |
++ KUNIT_CASE(sysctl_test_dointvec_write_happy_single_negative), |
14986 |
++ KUNIT_CASE(sysctl_test_api_dointvec_write_single_less_int_min), |
14987 |
++ KUNIT_CASE(sysctl_test_api_dointvec_write_single_greater_int_max), |
14988 |
++ {} |
14989 |
++}; |
14990 |
++ |
14991 |
++static struct kunit_suite sysctl_test_suite = { |
14992 |
++ .name = "sysctl_test", |
14993 |
++ .test_cases = sysctl_test_cases, |
14994 |
++}; |
14995 |
++ |
14996 |
++kunit_test_suite(sysctl_test_suite); |
14997 |
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c |
14998 |
+index ca69290bee2a3..4fc2af4367a7b 100644 |
14999 |
+--- a/kernel/time/timekeeping.c |
15000 |
++++ b/kernel/time/timekeeping.c |
15001 |
+@@ -1005,9 +1005,8 @@ static int scale64_check_overflow(u64 mult, u64 div, u64 *base) |
15002 |
+ ((int)sizeof(u64)*8 - fls64(mult) < fls64(rem))) |
15003 |
+ return -EOVERFLOW; |
15004 |
+ tmp *= mult; |
15005 |
+- rem *= mult; |
15006 |
+ |
15007 |
+- do_div(rem, div); |
15008 |
++ rem = div64_u64(rem * mult, div); |
15009 |
+ *base = tmp + rem; |
15010 |
+ return 0; |
15011 |
+ } |
15012 |
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c |
15013 |
+index f9c2bdbbd8936..db8162b34ef64 100644 |
15014 |
+--- a/kernel/trace/trace.c |
15015 |
++++ b/kernel/trace/trace.c |
15016 |
+@@ -3233,6 +3233,9 @@ int trace_array_printk(struct trace_array *tr, |
15017 |
+ if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) |
15018 |
+ return 0; |
15019 |
+ |
15020 |
++ if (!tr) |
15021 |
++ return -ENOENT; |
15022 |
++ |
15023 |
+ va_start(ap, fmt); |
15024 |
+ ret = trace_array_vprintk(tr, ip, fmt, ap); |
15025 |
+ va_end(ap); |
15026 |
+@@ -8502,17 +8505,26 @@ static int __remove_instance(struct trace_array *tr) |
15027 |
+ return 0; |
15028 |
+ } |
15029 |
+ |
15030 |
+-int trace_array_destroy(struct trace_array *tr) |
15031 |
++int trace_array_destroy(struct trace_array *this_tr) |
15032 |
+ { |
15033 |
++ struct trace_array *tr; |
15034 |
+ int ret; |
15035 |
+ |
15036 |
+- if (!tr) |
15037 |
++ if (!this_tr) |
15038 |
+ return -EINVAL; |
15039 |
+ |
15040 |
+ mutex_lock(&event_mutex); |
15041 |
+ mutex_lock(&trace_types_lock); |
15042 |
+ |
15043 |
+- ret = __remove_instance(tr); |
15044 |
++ ret = -ENODEV; |
15045 |
++ |
15046 |
++ /* Making sure trace array exists before destroying it. */ |
15047 |
++ list_for_each_entry(tr, &ftrace_trace_arrays, list) { |
15048 |
++ if (tr == this_tr) { |
15049 |
++ ret = __remove_instance(tr); |
15050 |
++ break; |
15051 |
++ } |
15052 |
++ } |
15053 |
+ |
15054 |
+ mutex_unlock(&trace_types_lock); |
15055 |
+ mutex_unlock(&event_mutex); |
15056 |
+@@ -9134,7 +9146,7 @@ __init static int tracer_alloc_buffers(void) |
15057 |
+ goto out_free_buffer_mask; |
15058 |
+ |
15059 |
+ /* Only allocate trace_printk buffers if a trace_printk exists */ |
15060 |
+- if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt) |
15061 |
++ if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt) |
15062 |
+ /* Must be called before global_trace.buffer is allocated */ |
15063 |
+ trace_printk_init_buffers(); |
15064 |
+ |
15065 |
+diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h |
15066 |
+index fc8e97328e540..78c146efb8623 100644 |
15067 |
+--- a/kernel/trace/trace_entries.h |
15068 |
++++ b/kernel/trace/trace_entries.h |
15069 |
+@@ -174,7 +174,7 @@ FTRACE_ENTRY(kernel_stack, stack_entry, |
15070 |
+ |
15071 |
+ F_STRUCT( |
15072 |
+ __field( int, size ) |
15073 |
+- __dynamic_array(unsigned long, caller ) |
15074 |
++ __array( unsigned long, caller, FTRACE_STACK_ENTRIES ) |
15075 |
+ ), |
15076 |
+ |
15077 |
+ F_printk("\t=> %ps\n\t=> %ps\n\t=> %ps\n" |
15078 |
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c |
15079 |
+index ed9eb97b64b47..309b2b3c5349e 100644 |
15080 |
+--- a/kernel/trace/trace_events.c |
15081 |
++++ b/kernel/trace/trace_events.c |
15082 |
+@@ -793,6 +793,8 @@ int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set) |
15083 |
+ char *event = NULL, *sub = NULL, *match; |
15084 |
+ int ret; |
15085 |
+ |
15086 |
++ if (!tr) |
15087 |
++ return -ENOENT; |
15088 |
+ /* |
15089 |
+ * The buf format can be <subsystem>:<event-name> |
15090 |
+ * *:<event-name> means any event by that name. |
15091 |
+diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c |
15092 |
+index 8107574e8af9d..a616b314fb7ab 100644 |
15093 |
+--- a/kernel/trace/trace_events_hist.c |
15094 |
++++ b/kernel/trace/trace_events_hist.c |
15095 |
+@@ -4770,7 +4770,6 @@ static int parse_var_defs(struct hist_trigger_data *hist_data) |
15096 |
+ |
15097 |
+ s = kstrdup(field_str, GFP_KERNEL); |
15098 |
+ if (!s) { |
15099 |
+- kfree(hist_data->attrs->var_defs.name[n_vars]); |
15100 |
+ ret = -ENOMEM; |
15101 |
+ goto free; |
15102 |
+ } |
15103 |
+diff --git a/kernel/trace/trace_preemptirq.c b/kernel/trace/trace_preemptirq.c |
15104 |
+index 4d8e99fdbbbee..26b06b09c9f68 100644 |
15105 |
+--- a/kernel/trace/trace_preemptirq.c |
15106 |
++++ b/kernel/trace/trace_preemptirq.c |
15107 |
+@@ -63,14 +63,14 @@ NOKPROBE_SYMBOL(trace_hardirqs_on_caller); |
15108 |
+ |
15109 |
+ __visible void trace_hardirqs_off_caller(unsigned long caller_addr) |
15110 |
+ { |
15111 |
++ lockdep_hardirqs_off(CALLER_ADDR0); |
15112 |
++ |
15113 |
+ if (!this_cpu_read(tracing_irq_cpu)) { |
15114 |
+ this_cpu_write(tracing_irq_cpu, 1); |
15115 |
+ tracer_hardirqs_off(CALLER_ADDR0, caller_addr); |
15116 |
+ if (!in_nmi()) |
15117 |
+ trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr); |
15118 |
+ } |
15119 |
+- |
15120 |
+- lockdep_hardirqs_off(CALLER_ADDR0); |
15121 |
+ } |
15122 |
+ EXPORT_SYMBOL(trace_hardirqs_off_caller); |
15123 |
+ NOKPROBE_SYMBOL(trace_hardirqs_off_caller); |
15124 |
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c |
15125 |
+index 1a0c224af6fb3..4aa268582a225 100644 |
15126 |
+--- a/kernel/workqueue.c |
15127 |
++++ b/kernel/workqueue.c |
15128 |
+@@ -864,7 +864,8 @@ void wq_worker_running(struct task_struct *task) |
15129 |
+ * @task: task going to sleep |
15130 |
+ * |
15131 |
+ * This function is called from schedule() when a busy worker is |
15132 |
+- * going to sleep. |
15133 |
++ * going to sleep. Preemption needs to be disabled to protect ->sleeping |
15134 |
++ * assignment. |
15135 |
+ */ |
15136 |
+ void wq_worker_sleeping(struct task_struct *task) |
15137 |
+ { |
15138 |
+@@ -881,7 +882,8 @@ void wq_worker_sleeping(struct task_struct *task) |
15139 |
+ |
15140 |
+ pool = worker->pool; |
15141 |
+ |
15142 |
+- if (WARN_ON_ONCE(worker->sleeping)) |
15143 |
++ /* Return if preempted before wq_worker_running() was reached */ |
15144 |
++ if (worker->sleeping) |
15145 |
+ return; |
15146 |
+ |
15147 |
+ worker->sleeping = 1; |
15148 |
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug |
15149 |
+index 6118d99117daa..ee00c6c8a373e 100644 |
15150 |
+--- a/lib/Kconfig.debug |
15151 |
++++ b/lib/Kconfig.debug |
15152 |
+@@ -1939,6 +1939,17 @@ config TEST_SYSCTL |
15153 |
+ |
15154 |
+ If unsure, say N. |
15155 |
+ |
15156 |
++config SYSCTL_KUNIT_TEST |
15157 |
++ bool "KUnit test for sysctl" |
15158 |
++ depends on KUNIT |
15159 |
++ help |
15160 |
++ This builds the proc sysctl unit test, which runs on boot. |
15161 |
++ Tests the API contract and implementation correctness of sysctl. |
15162 |
++ For more information on KUnit and unit tests in general please refer |
15163 |
++ to the KUnit documentation in Documentation/dev-tools/kunit/. |
15164 |
++ |
15165 |
++ If unsure, say N. |
15166 |
++ |
15167 |
+ config TEST_UDELAY |
15168 |
+ tristate "udelay test driver" |
15169 |
+ help |
15170 |
+diff --git a/lib/string.c b/lib/string.c |
15171 |
+index 08ec58cc673b5..abfaa05181e2c 100644 |
15172 |
+--- a/lib/string.c |
15173 |
++++ b/lib/string.c |
15174 |
+@@ -272,6 +272,30 @@ ssize_t strscpy_pad(char *dest, const char *src, size_t count) |
15175 |
+ } |
15176 |
+ EXPORT_SYMBOL(strscpy_pad); |
15177 |
+ |
15178 |
++/** |
15179 |
++ * stpcpy - copy a string from src to dest returning a pointer to the new end |
15180 |
++ * of dest, including src's %NUL-terminator. May overrun dest. |
15181 |
++ * @dest: pointer to end of string being copied into. Must be large enough |
15182 |
++ * to receive copy. |
15183 |
++ * @src: pointer to the beginning of string being copied from. Must not overlap |
15184 |
++ * dest. |
15185 |
++ * |
15186 |
++ * stpcpy differs from strcpy in a key way: the return value is a pointer |
15187 |
++ * to the new %NUL-terminating character in @dest. (For strcpy, the return |
15188 |
++ * value is a pointer to the start of @dest). This interface is considered |
15189 |
++ * unsafe as it doesn't perform bounds checking of the inputs. As such it's |
15190 |
++ * not recommended for usage. Instead, its definition is provided in case |
15191 |
++ * the compiler lowers other libcalls to stpcpy. |
15192 |
++ */ |
15193 |
++char *stpcpy(char *__restrict__ dest, const char *__restrict__ src); |
15194 |
++char *stpcpy(char *__restrict__ dest, const char *__restrict__ src) |
15195 |
++{ |
15196 |
++ while ((*dest++ = *src++) != '\0') |
15197 |
++ /* nothing */; |
15198 |
++ return --dest; |
15199 |
++} |
15200 |
++EXPORT_SYMBOL(stpcpy); |
15201 |
++ |
15202 |
+ #ifndef __HAVE_ARCH_STRCAT |
15203 |
+ /** |
15204 |
+ * strcat - Append one %NUL-terminated string to another |
15205 |
+diff --git a/mm/filemap.c b/mm/filemap.c |
15206 |
+index 18c1f58300742..51b2cb5aa5030 100644 |
15207 |
+--- a/mm/filemap.c |
15208 |
++++ b/mm/filemap.c |
15209 |
+@@ -2845,6 +2845,14 @@ filler: |
15210 |
+ unlock_page(page); |
15211 |
+ goto out; |
15212 |
+ } |
15213 |
++ |
15214 |
++ /* |
15215 |
++ * A previous I/O error may have been due to temporary |
15216 |
++ * failures. |
15217 |
++ * Clear page error before actual read, PG_error will be |
15218 |
++ * set again if read page fails. |
15219 |
++ */ |
15220 |
++ ClearPageError(page); |
15221 |
+ goto filler; |
15222 |
+ |
15223 |
+ out: |
15224 |
+diff --git a/mm/gup.c b/mm/gup.c |
15225 |
+index 4a8e969a6e594..3ef769529548a 100644 |
15226 |
+--- a/mm/gup.c |
15227 |
++++ b/mm/gup.c |
15228 |
+@@ -2184,13 +2184,13 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, |
15229 |
+ return 1; |
15230 |
+ } |
15231 |
+ |
15232 |
+-static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, |
15233 |
++static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned long end, |
15234 |
+ unsigned int flags, struct page **pages, int *nr) |
15235 |
+ { |
15236 |
+ unsigned long next; |
15237 |
+ pmd_t *pmdp; |
15238 |
+ |
15239 |
+- pmdp = pmd_offset(&pud, addr); |
15240 |
++ pmdp = pmd_offset_lockless(pudp, pud, addr); |
15241 |
+ do { |
15242 |
+ pmd_t pmd = READ_ONCE(*pmdp); |
15243 |
+ |
15244 |
+@@ -2227,13 +2227,13 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, |
15245 |
+ return 1; |
15246 |
+ } |
15247 |
+ |
15248 |
+-static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end, |
15249 |
++static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned long end, |
15250 |
+ unsigned int flags, struct page **pages, int *nr) |
15251 |
+ { |
15252 |
+ unsigned long next; |
15253 |
+ pud_t *pudp; |
15254 |
+ |
15255 |
+- pudp = pud_offset(&p4d, addr); |
15256 |
++ pudp = pud_offset_lockless(p4dp, p4d, addr); |
15257 |
+ do { |
15258 |
+ pud_t pud = READ_ONCE(*pudp); |
15259 |
+ |
15260 |
+@@ -2248,20 +2248,20 @@ static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end, |
15261 |
+ if (!gup_huge_pd(__hugepd(pud_val(pud)), addr, |
15262 |
+ PUD_SHIFT, next, flags, pages, nr)) |
15263 |
+ return 0; |
15264 |
+- } else if (!gup_pmd_range(pud, addr, next, flags, pages, nr)) |
15265 |
++ } else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr)) |
15266 |
+ return 0; |
15267 |
+ } while (pudp++, addr = next, addr != end); |
15268 |
+ |
15269 |
+ return 1; |
15270 |
+ } |
15271 |
+ |
15272 |
+-static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end, |
15273 |
++static int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, unsigned long end, |
15274 |
+ unsigned int flags, struct page **pages, int *nr) |
15275 |
+ { |
15276 |
+ unsigned long next; |
15277 |
+ p4d_t *p4dp; |
15278 |
+ |
15279 |
+- p4dp = p4d_offset(&pgd, addr); |
15280 |
++ p4dp = p4d_offset_lockless(pgdp, pgd, addr); |
15281 |
+ do { |
15282 |
+ p4d_t p4d = READ_ONCE(*p4dp); |
15283 |
+ |
15284 |
+@@ -2273,7 +2273,7 @@ static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end, |
15285 |
+ if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr, |
15286 |
+ P4D_SHIFT, next, flags, pages, nr)) |
15287 |
+ return 0; |
15288 |
+- } else if (!gup_pud_range(p4d, addr, next, flags, pages, nr)) |
15289 |
++ } else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr)) |
15290 |
+ return 0; |
15291 |
+ } while (p4dp++, addr = next, addr != end); |
15292 |
+ |
15293 |
+@@ -2301,7 +2301,7 @@ static void gup_pgd_range(unsigned long addr, unsigned long end, |
15294 |
+ if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr, |
15295 |
+ PGDIR_SHIFT, next, flags, pages, nr)) |
15296 |
+ return; |
15297 |
+- } else if (!gup_p4d_range(pgd, addr, next, flags, pages, nr)) |
15298 |
++ } else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr)) |
15299 |
+ return; |
15300 |
+ } while (pgdp++, addr = next, addr != end); |
15301 |
+ } |
15302 |
+diff --git a/mm/kmemleak.c b/mm/kmemleak.c |
15303 |
+index 2446076633631..312942d784058 100644 |
15304 |
+--- a/mm/kmemleak.c |
15305 |
++++ b/mm/kmemleak.c |
15306 |
+@@ -1947,7 +1947,7 @@ void __init kmemleak_init(void) |
15307 |
+ create_object((unsigned long)__bss_start, __bss_stop - __bss_start, |
15308 |
+ KMEMLEAK_GREY, GFP_ATOMIC); |
15309 |
+ /* only register .data..ro_after_init if not within .data */ |
15310 |
+- if (__start_ro_after_init < _sdata || __end_ro_after_init > _edata) |
15311 |
++ if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata) |
15312 |
+ create_object((unsigned long)__start_ro_after_init, |
15313 |
+ __end_ro_after_init - __start_ro_after_init, |
15314 |
+ KMEMLEAK_GREY, GFP_ATOMIC); |
15315 |
+diff --git a/mm/madvise.c b/mm/madvise.c |
15316 |
+index 26f7954865ed9..1107e99e498b2 100644 |
15317 |
+--- a/mm/madvise.c |
15318 |
++++ b/mm/madvise.c |
15319 |
+@@ -380,9 +380,9 @@ huge_unlock: |
15320 |
+ return 0; |
15321 |
+ } |
15322 |
+ |
15323 |
++regular_page: |
15324 |
+ if (pmd_trans_unstable(pmd)) |
15325 |
+ return 0; |
15326 |
+-regular_page: |
15327 |
+ #endif |
15328 |
+ tlb_change_page_size(tlb, PAGE_SIZE); |
15329 |
+ orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); |
15330 |
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c |
15331 |
+index 402c8bc65e08d..ca1632850fb76 100644 |
15332 |
+--- a/mm/memcontrol.c |
15333 |
++++ b/mm/memcontrol.c |
15334 |
+@@ -5489,7 +5489,6 @@ static int mem_cgroup_move_account(struct page *page, |
15335 |
+ { |
15336 |
+ struct lruvec *from_vec, *to_vec; |
15337 |
+ struct pglist_data *pgdat; |
15338 |
+- unsigned long flags; |
15339 |
+ unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; |
15340 |
+ int ret; |
15341 |
+ bool anon; |
15342 |
+@@ -5516,18 +5515,13 @@ static int mem_cgroup_move_account(struct page *page, |
15343 |
+ from_vec = mem_cgroup_lruvec(pgdat, from); |
15344 |
+ to_vec = mem_cgroup_lruvec(pgdat, to); |
15345 |
+ |
15346 |
+- spin_lock_irqsave(&from->move_lock, flags); |
15347 |
++ lock_page_memcg(page); |
15348 |
+ |
15349 |
+ if (!anon && page_mapped(page)) { |
15350 |
+ __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages); |
15351 |
+ __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages); |
15352 |
+ } |
15353 |
+ |
15354 |
+- /* |
15355 |
+- * move_lock grabbed above and caller set from->moving_account, so |
15356 |
+- * mod_memcg_page_state will serialize updates to PageDirty. |
15357 |
+- * So mapping should be stable for dirty pages. |
15358 |
+- */ |
15359 |
+ if (!anon && PageDirty(page)) { |
15360 |
+ struct address_space *mapping = page_mapping(page); |
15361 |
+ |
15362 |
+@@ -5543,15 +5537,23 @@ static int mem_cgroup_move_account(struct page *page, |
15363 |
+ } |
15364 |
+ |
15365 |
+ /* |
15366 |
++ * All state has been migrated, let's switch to the new memcg. |
15367 |
++ * |
15368 |
+ * It is safe to change page->mem_cgroup here because the page |
15369 |
+- * is referenced, charged, and isolated - we can't race with |
15370 |
+- * uncharging, charging, migration, or LRU putback. |
15371 |
++ * is referenced, charged, isolated, and locked: we can't race |
15372 |
++ * with (un)charging, migration, LRU putback, or anything else |
15373 |
++ * that would rely on a stable page->mem_cgroup. |
15374 |
++ * |
15375 |
++ * Note that lock_page_memcg is a memcg lock, not a page lock, |
15376 |
++ * to save space. As soon as we switch page->mem_cgroup to a |
15377 |
++ * new memcg that isn't locked, the above state can change |
15378 |
++ * concurrently again. Make sure we're truly done with it. |
15379 |
+ */ |
15380 |
++ smp_mb(); |
15381 |
+ |
15382 |
+- /* caller should have done css_get */ |
15383 |
+- page->mem_cgroup = to; |
15384 |
++ page->mem_cgroup = to; /* caller should have done css_get */ |
15385 |
+ |
15386 |
+- spin_unlock_irqrestore(&from->move_lock, flags); |
15387 |
++ __unlock_page_memcg(from); |
15388 |
+ |
15389 |
+ ret = 0; |
15390 |
+ |
15391 |
+diff --git a/mm/memory.c b/mm/memory.c |
15392 |
+index cb7c940cf800c..2157bb28117ac 100644 |
15393 |
+--- a/mm/memory.c |
15394 |
++++ b/mm/memory.c |
15395 |
+@@ -118,6 +118,18 @@ int randomize_va_space __read_mostly = |
15396 |
+ 2; |
15397 |
+ #endif |
15398 |
+ |
15399 |
++#ifndef arch_faults_on_old_pte |
15400 |
++static inline bool arch_faults_on_old_pte(void) |
15401 |
++{ |
15402 |
++ /* |
15403 |
++ * Those arches which don't have hw access flag feature need to |
15404 |
++ * implement their own helper. By default, "true" means pagefault |
15405 |
++ * will be hit on old pte. |
15406 |
++ */ |
15407 |
++ return true; |
15408 |
++} |
15409 |
++#endif |
15410 |
++ |
15411 |
+ static int __init disable_randmaps(char *s) |
15412 |
+ { |
15413 |
+ randomize_va_space = 0; |
15414 |
+@@ -2145,32 +2157,101 @@ static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, |
15415 |
+ return same; |
15416 |
+ } |
15417 |
+ |
15418 |
+-static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma) |
15419 |
++static inline bool cow_user_page(struct page *dst, struct page *src, |
15420 |
++ struct vm_fault *vmf) |
15421 |
+ { |
15422 |
++ bool ret; |
15423 |
++ void *kaddr; |
15424 |
++ void __user *uaddr; |
15425 |
++ bool locked = false; |
15426 |
++ struct vm_area_struct *vma = vmf->vma; |
15427 |
++ struct mm_struct *mm = vma->vm_mm; |
15428 |
++ unsigned long addr = vmf->address; |
15429 |
++ |
15430 |
+ debug_dma_assert_idle(src); |
15431 |
+ |
15432 |
++ if (likely(src)) { |
15433 |
++ copy_user_highpage(dst, src, addr, vma); |
15434 |
++ return true; |
15435 |
++ } |
15436 |
++ |
15437 |
+ /* |
15438 |
+ * If the source page was a PFN mapping, we don't have |
15439 |
+ * a "struct page" for it. We do a best-effort copy by |
15440 |
+ * just copying from the original user address. If that |
15441 |
+ * fails, we just zero-fill it. Live with it. |
15442 |
+ */ |
15443 |
+- if (unlikely(!src)) { |
15444 |
+- void *kaddr = kmap_atomic(dst); |
15445 |
+- void __user *uaddr = (void __user *)(va & PAGE_MASK); |
15446 |
++ kaddr = kmap_atomic(dst); |
15447 |
++ uaddr = (void __user *)(addr & PAGE_MASK); |
15448 |
++ |
15449 |
++ /* |
15450 |
++ * On architectures with software "accessed" bits, we would |
15451 |
++ * take a double page fault, so mark it accessed here. |
15452 |
++ */ |
15453 |
++ if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) { |
15454 |
++ pte_t entry; |
15455 |
++ |
15456 |
++ vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); |
15457 |
++ locked = true; |
15458 |
++ if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) { |
15459 |
++ /* |
15460 |
++ * Other thread has already handled the fault |
15461 |
++ * and we don't need to do anything. If it's |
15462 |
++ * not the case, the fault will be triggered |
15463 |
++ * again on the same address. |
15464 |
++ */ |
15465 |
++ ret = false; |
15466 |
++ goto pte_unlock; |
15467 |
++ } |
15468 |
++ |
15469 |
++ entry = pte_mkyoung(vmf->orig_pte); |
15470 |
++ if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0)) |
15471 |
++ update_mmu_cache(vma, addr, vmf->pte); |
15472 |
++ } |
15473 |
++ |
15474 |
++ /* |
15475 |
++ * This really shouldn't fail, because the page is there |
15476 |
++ * in the page tables. But it might just be unreadable, |
15477 |
++ * in which case we just give up and fill the result with |
15478 |
++ * zeroes. |
15479 |
++ */ |
15480 |
++ if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) { |
15481 |
++ if (locked) |
15482 |
++ goto warn; |
15483 |
++ |
15484 |
++ /* Re-validate under PTL if the page is still mapped */ |
15485 |
++ vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); |
15486 |
++ locked = true; |
15487 |
++ if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) { |
15488 |
++ /* The PTE changed under us. Retry page fault. */ |
15489 |
++ ret = false; |
15490 |
++ goto pte_unlock; |
15491 |
++ } |
15492 |
+ |
15493 |
+ /* |
15494 |
+- * This really shouldn't fail, because the page is there |
15495 |
+- * in the page tables. But it might just be unreadable, |
15496 |
+- * in which case we just give up and fill the result with |
15497 |
+- * zeroes. |
15498 |
++ * The same page can be mapped back since last copy attampt. |
15499 |
++ * Try to copy again under PTL. |
15500 |
+ */ |
15501 |
+- if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) |
15502 |
++ if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) { |
15503 |
++ /* |
15504 |
++ * Give a warn in case there can be some obscure |
15505 |
++ * use-case |
15506 |
++ */ |
15507 |
++warn: |
15508 |
++ WARN_ON_ONCE(1); |
15509 |
+ clear_page(kaddr); |
15510 |
+- kunmap_atomic(kaddr); |
15511 |
+- flush_dcache_page(dst); |
15512 |
+- } else |
15513 |
+- copy_user_highpage(dst, src, va, vma); |
15514 |
++ } |
15515 |
++ } |
15516 |
++ |
15517 |
++ ret = true; |
15518 |
++ |
15519 |
++pte_unlock: |
15520 |
++ if (locked) |
15521 |
++ pte_unmap_unlock(vmf->pte, vmf->ptl); |
15522 |
++ kunmap_atomic(kaddr); |
15523 |
++ flush_dcache_page(dst); |
15524 |
++ |
15525 |
++ return ret; |
15526 |
+ } |
15527 |
+ |
15528 |
+ static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma) |
15529 |
+@@ -2342,7 +2423,19 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) |
15530 |
+ vmf->address); |
15531 |
+ if (!new_page) |
15532 |
+ goto oom; |
15533 |
+- cow_user_page(new_page, old_page, vmf->address, vma); |
15534 |
++ |
15535 |
++ if (!cow_user_page(new_page, old_page, vmf)) { |
15536 |
++ /* |
15537 |
++ * COW failed, if the fault was solved by other, |
15538 |
++ * it's fine. If not, userspace would re-fault on |
15539 |
++ * the same address and we will handle the fault |
15540 |
++ * from the second attempt. |
15541 |
++ */ |
15542 |
++ put_page(new_page); |
15543 |
++ if (old_page) |
15544 |
++ put_page(old_page); |
15545 |
++ return 0; |
15546 |
++ } |
15547 |
+ } |
15548 |
+ |
15549 |
+ if (mem_cgroup_try_charge_delay(new_page, mm, GFP_KERNEL, &memcg, false)) |
15550 |
+diff --git a/mm/mmap.c b/mm/mmap.c |
15551 |
+index a3584a90c55c2..ba78f1f1b1bd1 100644 |
15552 |
+--- a/mm/mmap.c |
15553 |
++++ b/mm/mmap.c |
15554 |
+@@ -2126,6 +2126,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, |
15555 |
+ info.low_limit = mm->mmap_base; |
15556 |
+ info.high_limit = mmap_end; |
15557 |
+ info.align_mask = 0; |
15558 |
++ info.align_offset = 0; |
15559 |
+ return vm_unmapped_area(&info); |
15560 |
+ } |
15561 |
+ #endif |
15562 |
+@@ -2167,6 +2168,7 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, |
15563 |
+ info.low_limit = max(PAGE_SIZE, mmap_min_addr); |
15564 |
+ info.high_limit = arch_get_mmap_base(addr, mm->mmap_base); |
15565 |
+ info.align_mask = 0; |
15566 |
++ info.align_offset = 0; |
15567 |
+ addr = vm_unmapped_area(&info); |
15568 |
+ |
15569 |
+ /* |
15570 |
+diff --git a/mm/pagewalk.c b/mm/pagewalk.c |
15571 |
+index d48c2a986ea3f..4eb09e0898817 100644 |
15572 |
+--- a/mm/pagewalk.c |
15573 |
++++ b/mm/pagewalk.c |
15574 |
+@@ -16,9 +16,9 @@ static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, |
15575 |
+ err = ops->pte_entry(pte, addr, addr + PAGE_SIZE, walk); |
15576 |
+ if (err) |
15577 |
+ break; |
15578 |
+- addr += PAGE_SIZE; |
15579 |
+- if (addr == end) |
15580 |
++ if (addr >= end - PAGE_SIZE) |
15581 |
+ break; |
15582 |
++ addr += PAGE_SIZE; |
15583 |
+ pte++; |
15584 |
+ } |
15585 |
+ |
15586 |
+diff --git a/mm/slub.c b/mm/slub.c |
15587 |
+index 822ba07245291..d69934eac9e94 100644 |
15588 |
+--- a/mm/slub.c |
15589 |
++++ b/mm/slub.c |
15590 |
+@@ -533,15 +533,32 @@ static void print_section(char *level, char *text, u8 *addr, |
15591 |
+ metadata_access_disable(); |
15592 |
+ } |
15593 |
+ |
15594 |
++/* |
15595 |
++ * See comment in calculate_sizes(). |
15596 |
++ */ |
15597 |
++static inline bool freeptr_outside_object(struct kmem_cache *s) |
15598 |
++{ |
15599 |
++ return s->offset >= s->inuse; |
15600 |
++} |
15601 |
++ |
15602 |
++/* |
15603 |
++ * Return offset of the end of info block which is inuse + free pointer if |
15604 |
++ * not overlapping with object. |
15605 |
++ */ |
15606 |
++static inline unsigned int get_info_end(struct kmem_cache *s) |
15607 |
++{ |
15608 |
++ if (freeptr_outside_object(s)) |
15609 |
++ return s->inuse + sizeof(void *); |
15610 |
++ else |
15611 |
++ return s->inuse; |
15612 |
++} |
15613 |
++ |
15614 |
+ static struct track *get_track(struct kmem_cache *s, void *object, |
15615 |
+ enum track_item alloc) |
15616 |
+ { |
15617 |
+ struct track *p; |
15618 |
+ |
15619 |
+- if (s->offset) |
15620 |
+- p = object + s->offset + sizeof(void *); |
15621 |
+- else |
15622 |
+- p = object + s->inuse; |
15623 |
++ p = object + get_info_end(s); |
15624 |
+ |
15625 |
+ return p + alloc; |
15626 |
+ } |
15627 |
+@@ -682,10 +699,7 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) |
15628 |
+ print_section(KERN_ERR, "Redzone ", p + s->object_size, |
15629 |
+ s->inuse - s->object_size); |
15630 |
+ |
15631 |
+- if (s->offset) |
15632 |
+- off = s->offset + sizeof(void *); |
15633 |
+- else |
15634 |
+- off = s->inuse; |
15635 |
++ off = get_info_end(s); |
15636 |
+ |
15637 |
+ if (s->flags & SLAB_STORE_USER) |
15638 |
+ off += 2 * sizeof(struct track); |
15639 |
+@@ -776,7 +790,7 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page, |
15640 |
+ * object address |
15641 |
+ * Bytes of the object to be managed. |
15642 |
+ * If the freepointer may overlay the object then the free |
15643 |
+- * pointer is the first word of the object. |
15644 |
++ * pointer is at the middle of the object. |
15645 |
+ * |
15646 |
+ * Poisoning uses 0x6b (POISON_FREE) and the last byte is |
15647 |
+ * 0xa5 (POISON_END) |
15648 |
+@@ -810,11 +824,7 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page, |
15649 |
+ |
15650 |
+ static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) |
15651 |
+ { |
15652 |
+- unsigned long off = s->inuse; /* The end of info */ |
15653 |
+- |
15654 |
+- if (s->offset) |
15655 |
+- /* Freepointer is placed after the object. */ |
15656 |
+- off += sizeof(void *); |
15657 |
++ unsigned long off = get_info_end(s); /* The end of info */ |
15658 |
+ |
15659 |
+ if (s->flags & SLAB_STORE_USER) |
15660 |
+ /* We also have user information there */ |
15661 |
+@@ -900,7 +910,7 @@ static int check_object(struct kmem_cache *s, struct page *page, |
15662 |
+ check_pad_bytes(s, page, p); |
15663 |
+ } |
15664 |
+ |
15665 |
+- if (!s->offset && val == SLUB_RED_ACTIVE) |
15666 |
++ if (!freeptr_outside_object(s) && val == SLUB_RED_ACTIVE) |
15667 |
+ /* |
15668 |
+ * Object and freepointer overlap. Cannot check |
15669 |
+ * freepointer while object is allocated. |
15670 |
+@@ -3585,6 +3595,11 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) |
15671 |
+ * |
15672 |
+ * This is the case if we do RCU, have a constructor or |
15673 |
+ * destructor or are poisoning the objects. |
15674 |
++ * |
15675 |
++ * The assumption that s->offset >= s->inuse means free |
15676 |
++ * pointer is outside of the object is used in the |
15677 |
++ * freeptr_outside_object() function. If that is no |
15678 |
++ * longer true, the function needs to be modified. |
15679 |
+ */ |
15680 |
+ s->offset = size; |
15681 |
+ size += sizeof(void *); |
15682 |
+diff --git a/mm/swap_state.c b/mm/swap_state.c |
15683 |
+index 4ce014dc4571a..7c434fcfff0dd 100644 |
15684 |
+--- a/mm/swap_state.c |
15685 |
++++ b/mm/swap_state.c |
15686 |
+@@ -511,10 +511,11 @@ static unsigned long swapin_nr_pages(unsigned long offset) |
15687 |
+ return 1; |
15688 |
+ |
15689 |
+ hits = atomic_xchg(&swapin_readahead_hits, 0); |
15690 |
+- pages = __swapin_nr_pages(prev_offset, offset, hits, max_pages, |
15691 |
++ pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits, |
15692 |
++ max_pages, |
15693 |
+ atomic_read(&last_readahead_pages)); |
15694 |
+ if (!hits) |
15695 |
+- prev_offset = offset; |
15696 |
++ WRITE_ONCE(prev_offset, offset); |
15697 |
+ atomic_set(&last_readahead_pages, pages); |
15698 |
+ |
15699 |
+ return pages; |
15700 |
+diff --git a/mm/swapfile.c b/mm/swapfile.c |
15701 |
+index 891a3ef486511..cf62bdb7b3045 100644 |
15702 |
+--- a/mm/swapfile.c |
15703 |
++++ b/mm/swapfile.c |
15704 |
+@@ -1038,7 +1038,7 @@ start_over: |
15705 |
+ goto nextsi; |
15706 |
+ } |
15707 |
+ if (size == SWAPFILE_CLUSTER) { |
15708 |
+- if (!(si->flags & SWP_FS)) |
15709 |
++ if (si->flags & SWP_BLKDEV) |
15710 |
+ n_ret = swap_alloc_cluster(si, swp_entries); |
15711 |
+ } else |
15712 |
+ n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE, |
15713 |
+@@ -2132,7 +2132,7 @@ int try_to_unuse(unsigned int type, bool frontswap, |
15714 |
+ swp_entry_t entry; |
15715 |
+ unsigned int i; |
15716 |
+ |
15717 |
+- if (!si->inuse_pages) |
15718 |
++ if (!READ_ONCE(si->inuse_pages)) |
15719 |
+ return 0; |
15720 |
+ |
15721 |
+ if (!frontswap) |
15722 |
+@@ -2148,7 +2148,7 @@ retry: |
15723 |
+ |
15724 |
+ spin_lock(&mmlist_lock); |
15725 |
+ p = &init_mm.mmlist; |
15726 |
+- while (si->inuse_pages && |
15727 |
++ while (READ_ONCE(si->inuse_pages) && |
15728 |
+ !signal_pending(current) && |
15729 |
+ (p = p->next) != &init_mm.mmlist) { |
15730 |
+ |
15731 |
+@@ -2177,7 +2177,7 @@ retry: |
15732 |
+ mmput(prev_mm); |
15733 |
+ |
15734 |
+ i = 0; |
15735 |
+- while (si->inuse_pages && |
15736 |
++ while (READ_ONCE(si->inuse_pages) && |
15737 |
+ !signal_pending(current) && |
15738 |
+ (i = find_next_to_unuse(si, i, frontswap)) != 0) { |
15739 |
+ |
15740 |
+@@ -2219,7 +2219,7 @@ retry: |
15741 |
+ * been preempted after get_swap_page(), temporarily hiding that swap. |
15742 |
+ * It's easy and robust (though cpu-intensive) just to keep retrying. |
15743 |
+ */ |
15744 |
+- if (si->inuse_pages) { |
15745 |
++ if (READ_ONCE(si->inuse_pages)) { |
15746 |
+ if (!signal_pending(current)) |
15747 |
+ goto retry; |
15748 |
+ retval = -EINTR; |
15749 |
+@@ -2737,10 +2737,10 @@ static void *swap_next(struct seq_file *swap, void *v, loff_t *pos) |
15750 |
+ else |
15751 |
+ type = si->type + 1; |
15752 |
+ |
15753 |
++ ++(*pos); |
15754 |
+ for (; (si = swap_type_to_swap_info(type)); type++) { |
15755 |
+ if (!(si->flags & SWP_USED) || !si->swap_map) |
15756 |
+ continue; |
15757 |
+- ++*pos; |
15758 |
+ return si; |
15759 |
+ } |
15760 |
+ |
15761 |
+diff --git a/mm/vmscan.c b/mm/vmscan.c |
15762 |
+index 6db9176d8c63e..10feb872d9a4f 100644 |
15763 |
+--- a/mm/vmscan.c |
15764 |
++++ b/mm/vmscan.c |
15765 |
+@@ -3168,8 +3168,9 @@ static bool allow_direct_reclaim(pg_data_t *pgdat) |
15766 |
+ |
15767 |
+ /* kswapd must be awake if processes are being throttled */ |
15768 |
+ if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) { |
15769 |
+- pgdat->kswapd_classzone_idx = min(pgdat->kswapd_classzone_idx, |
15770 |
+- (enum zone_type)ZONE_NORMAL); |
15771 |
++ if (READ_ONCE(pgdat->kswapd_classzone_idx) > ZONE_NORMAL) |
15772 |
++ WRITE_ONCE(pgdat->kswapd_classzone_idx, ZONE_NORMAL); |
15773 |
++ |
15774 |
+ wake_up_interruptible(&pgdat->kswapd_wait); |
15775 |
+ } |
15776 |
+ |
15777 |
+@@ -3801,9 +3802,9 @@ out: |
15778 |
+ static enum zone_type kswapd_classzone_idx(pg_data_t *pgdat, |
15779 |
+ enum zone_type prev_classzone_idx) |
15780 |
+ { |
15781 |
+- if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES) |
15782 |
+- return prev_classzone_idx; |
15783 |
+- return pgdat->kswapd_classzone_idx; |
15784 |
++ enum zone_type curr_idx = READ_ONCE(pgdat->kswapd_classzone_idx); |
15785 |
++ |
15786 |
++ return curr_idx == MAX_NR_ZONES ? prev_classzone_idx : curr_idx; |
15787 |
+ } |
15788 |
+ |
15789 |
+ static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order, |
15790 |
+@@ -3847,8 +3848,11 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_o |
15791 |
+ * the previous request that slept prematurely. |
15792 |
+ */ |
15793 |
+ if (remaining) { |
15794 |
+- pgdat->kswapd_classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx); |
15795 |
+- pgdat->kswapd_order = max(pgdat->kswapd_order, reclaim_order); |
15796 |
++ WRITE_ONCE(pgdat->kswapd_classzone_idx, |
15797 |
++ kswapd_classzone_idx(pgdat, classzone_idx)); |
15798 |
++ |
15799 |
++ if (READ_ONCE(pgdat->kswapd_order) < reclaim_order) |
15800 |
++ WRITE_ONCE(pgdat->kswapd_order, reclaim_order); |
15801 |
+ } |
15802 |
+ |
15803 |
+ finish_wait(&pgdat->kswapd_wait, &wait); |
15804 |
+@@ -3925,12 +3929,12 @@ static int kswapd(void *p) |
15805 |
+ tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; |
15806 |
+ set_freezable(); |
15807 |
+ |
15808 |
+- pgdat->kswapd_order = 0; |
15809 |
+- pgdat->kswapd_classzone_idx = MAX_NR_ZONES; |
15810 |
++ WRITE_ONCE(pgdat->kswapd_order, 0); |
15811 |
++ WRITE_ONCE(pgdat->kswapd_classzone_idx, MAX_NR_ZONES); |
15812 |
+ for ( ; ; ) { |
15813 |
+ bool ret; |
15814 |
+ |
15815 |
+- alloc_order = reclaim_order = pgdat->kswapd_order; |
15816 |
++ alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order); |
15817 |
+ classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx); |
15818 |
+ |
15819 |
+ kswapd_try_sleep: |
15820 |
+@@ -3938,10 +3942,10 @@ kswapd_try_sleep: |
15821 |
+ classzone_idx); |
15822 |
+ |
15823 |
+ /* Read the new order and classzone_idx */ |
15824 |
+- alloc_order = reclaim_order = pgdat->kswapd_order; |
15825 |
++ alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order); |
15826 |
+ classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx); |
15827 |
+- pgdat->kswapd_order = 0; |
15828 |
+- pgdat->kswapd_classzone_idx = MAX_NR_ZONES; |
15829 |
++ WRITE_ONCE(pgdat->kswapd_order, 0); |
15830 |
++ WRITE_ONCE(pgdat->kswapd_classzone_idx, MAX_NR_ZONES); |
15831 |
+ |
15832 |
+ ret = try_to_freeze(); |
15833 |
+ if (kthread_should_stop()) |
15834 |
+@@ -3985,20 +3989,23 @@ void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order, |
15835 |
+ enum zone_type classzone_idx) |
15836 |
+ { |
15837 |
+ pg_data_t *pgdat; |
15838 |
++ enum zone_type curr_idx; |
15839 |
+ |
15840 |
+ if (!managed_zone(zone)) |
15841 |
+ return; |
15842 |
+ |
15843 |
+ if (!cpuset_zone_allowed(zone, gfp_flags)) |
15844 |
+ return; |
15845 |
++ |
15846 |
+ pgdat = zone->zone_pgdat; |
15847 |
++ curr_idx = READ_ONCE(pgdat->kswapd_classzone_idx); |
15848 |
++ |
15849 |
++ if (curr_idx == MAX_NR_ZONES || curr_idx < classzone_idx) |
15850 |
++ WRITE_ONCE(pgdat->kswapd_classzone_idx, classzone_idx); |
15851 |
++ |
15852 |
++ if (READ_ONCE(pgdat->kswapd_order) < order) |
15853 |
++ WRITE_ONCE(pgdat->kswapd_order, order); |
15854 |
+ |
15855 |
+- if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES) |
15856 |
+- pgdat->kswapd_classzone_idx = classzone_idx; |
15857 |
+- else |
15858 |
+- pgdat->kswapd_classzone_idx = max(pgdat->kswapd_classzone_idx, |
15859 |
+- classzone_idx); |
15860 |
+- pgdat->kswapd_order = max(pgdat->kswapd_order, order); |
15861 |
+ if (!waitqueue_active(&pgdat->kswapd_wait)) |
15862 |
+ return; |
15863 |
+ |
15864 |
+diff --git a/net/atm/lec.c b/net/atm/lec.c |
15865 |
+index 5a77c235a212f..3625a04a6c701 100644 |
15866 |
+--- a/net/atm/lec.c |
15867 |
++++ b/net/atm/lec.c |
15868 |
+@@ -1269,6 +1269,12 @@ static void lec_arp_clear_vccs(struct lec_arp_table *entry) |
15869 |
+ entry->vcc = NULL; |
15870 |
+ } |
15871 |
+ if (entry->recv_vcc) { |
15872 |
++ struct atm_vcc *vcc = entry->recv_vcc; |
15873 |
++ struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc); |
15874 |
++ |
15875 |
++ kfree(vpriv); |
15876 |
++ vcc->user_back = NULL; |
15877 |
++ |
15878 |
+ entry->recv_vcc->push = entry->old_recv_push; |
15879 |
+ vcc_release_async(entry->recv_vcc, -EPIPE); |
15880 |
+ entry->recv_vcc = NULL; |
15881 |
+diff --git a/net/atm/proc.c b/net/atm/proc.c |
15882 |
+index d79221fd4dae2..c318967073139 100644 |
15883 |
+--- a/net/atm/proc.c |
15884 |
++++ b/net/atm/proc.c |
15885 |
+@@ -134,8 +134,7 @@ static void vcc_seq_stop(struct seq_file *seq, void *v) |
15886 |
+ static void *vcc_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
15887 |
+ { |
15888 |
+ v = vcc_walk(seq, 1); |
15889 |
+- if (v) |
15890 |
+- (*pos)++; |
15891 |
++ (*pos)++; |
15892 |
+ return v; |
15893 |
+ } |
15894 |
+ |
15895 |
+diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c |
15896 |
+index 5f6309ade1ea1..a6b26ca5c6973 100644 |
15897 |
+--- a/net/batman-adv/bridge_loop_avoidance.c |
15898 |
++++ b/net/batman-adv/bridge_loop_avoidance.c |
15899 |
+@@ -25,6 +25,7 @@ |
15900 |
+ #include <linux/lockdep.h> |
15901 |
+ #include <linux/netdevice.h> |
15902 |
+ #include <linux/netlink.h> |
15903 |
++#include <linux/preempt.h> |
15904 |
+ #include <linux/rculist.h> |
15905 |
+ #include <linux/rcupdate.h> |
15906 |
+ #include <linux/seq_file.h> |
15907 |
+@@ -83,11 +84,12 @@ static inline u32 batadv_choose_claim(const void *data, u32 size) |
15908 |
+ */ |
15909 |
+ static inline u32 batadv_choose_backbone_gw(const void *data, u32 size) |
15910 |
+ { |
15911 |
+- const struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data; |
15912 |
++ const struct batadv_bla_backbone_gw *gw; |
15913 |
+ u32 hash = 0; |
15914 |
+ |
15915 |
+- hash = jhash(&claim->addr, sizeof(claim->addr), hash); |
15916 |
+- hash = jhash(&claim->vid, sizeof(claim->vid), hash); |
15917 |
++ gw = (struct batadv_bla_backbone_gw *)data; |
15918 |
++ hash = jhash(&gw->orig, sizeof(gw->orig), hash); |
15919 |
++ hash = jhash(&gw->vid, sizeof(gw->vid), hash); |
15920 |
+ |
15921 |
+ return hash % size; |
15922 |
+ } |
15923 |
+@@ -1579,13 +1581,16 @@ int batadv_bla_init(struct batadv_priv *bat_priv) |
15924 |
+ } |
15925 |
+ |
15926 |
+ /** |
15927 |
+- * batadv_bla_check_bcast_duplist() - Check if a frame is in the broadcast dup. |
15928 |
++ * batadv_bla_check_duplist() - Check if a frame is in the broadcast dup. |
15929 |
+ * @bat_priv: the bat priv with all the soft interface information |
15930 |
+- * @skb: contains the bcast_packet to be checked |
15931 |
++ * @skb: contains the multicast packet to be checked |
15932 |
++ * @payload_ptr: pointer to position inside the head buffer of the skb |
15933 |
++ * marking the start of the data to be CRC'ed |
15934 |
++ * @orig: originator mac address, NULL if unknown |
15935 |
+ * |
15936 |
+- * check if it is on our broadcast list. Another gateway might |
15937 |
+- * have sent the same packet because it is connected to the same backbone, |
15938 |
+- * so we have to remove this duplicate. |
15939 |
++ * Check if it is on our broadcast list. Another gateway might have sent the |
15940 |
++ * same packet because it is connected to the same backbone, so we have to |
15941 |
++ * remove this duplicate. |
15942 |
+ * |
15943 |
+ * This is performed by checking the CRC, which will tell us |
15944 |
+ * with a good chance that it is the same packet. If it is furthermore |
15945 |
+@@ -1594,19 +1599,17 @@ int batadv_bla_init(struct batadv_priv *bat_priv) |
15946 |
+ * |
15947 |
+ * Return: true if a packet is in the duplicate list, false otherwise. |
15948 |
+ */ |
15949 |
+-bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, |
15950 |
+- struct sk_buff *skb) |
15951 |
++static bool batadv_bla_check_duplist(struct batadv_priv *bat_priv, |
15952 |
++ struct sk_buff *skb, u8 *payload_ptr, |
15953 |
++ const u8 *orig) |
15954 |
+ { |
15955 |
+- int i, curr; |
15956 |
+- __be32 crc; |
15957 |
+- struct batadv_bcast_packet *bcast_packet; |
15958 |
+ struct batadv_bcast_duplist_entry *entry; |
15959 |
+ bool ret = false; |
15960 |
+- |
15961 |
+- bcast_packet = (struct batadv_bcast_packet *)skb->data; |
15962 |
++ int i, curr; |
15963 |
++ __be32 crc; |
15964 |
+ |
15965 |
+ /* calculate the crc ... */ |
15966 |
+- crc = batadv_skb_crc32(skb, (u8 *)(bcast_packet + 1)); |
15967 |
++ crc = batadv_skb_crc32(skb, payload_ptr); |
15968 |
+ |
15969 |
+ spin_lock_bh(&bat_priv->bla.bcast_duplist_lock); |
15970 |
+ |
15971 |
+@@ -1625,8 +1628,21 @@ bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, |
15972 |
+ if (entry->crc != crc) |
15973 |
+ continue; |
15974 |
+ |
15975 |
+- if (batadv_compare_eth(entry->orig, bcast_packet->orig)) |
15976 |
+- continue; |
15977 |
++ /* are the originators both known and not anonymous? */ |
15978 |
++ if (orig && !is_zero_ether_addr(orig) && |
15979 |
++ !is_zero_ether_addr(entry->orig)) { |
15980 |
++ /* If known, check if the new frame came from |
15981 |
++ * the same originator: |
15982 |
++ * We are safe to take identical frames from the |
15983 |
++ * same orig, if known, as multiplications in |
15984 |
++ * the mesh are detected via the (orig, seqno) pair. |
15985 |
++ * So we can be a bit more liberal here and allow |
15986 |
++ * identical frames from the same orig which the source |
15987 |
++ * host might have sent multiple times on purpose. |
15988 |
++ */ |
15989 |
++ if (batadv_compare_eth(entry->orig, orig)) |
15990 |
++ continue; |
15991 |
++ } |
15992 |
+ |
15993 |
+ /* this entry seems to match: same crc, not too old, |
15994 |
+ * and from another gw. therefore return true to forbid it. |
15995 |
+@@ -1642,7 +1658,14 @@ bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, |
15996 |
+ entry = &bat_priv->bla.bcast_duplist[curr]; |
15997 |
+ entry->crc = crc; |
15998 |
+ entry->entrytime = jiffies; |
15999 |
+- ether_addr_copy(entry->orig, bcast_packet->orig); |
16000 |
++ |
16001 |
++ /* known originator */ |
16002 |
++ if (orig) |
16003 |
++ ether_addr_copy(entry->orig, orig); |
16004 |
++ /* anonymous originator */ |
16005 |
++ else |
16006 |
++ eth_zero_addr(entry->orig); |
16007 |
++ |
16008 |
+ bat_priv->bla.bcast_duplist_curr = curr; |
16009 |
+ |
16010 |
+ out: |
16011 |
+@@ -1651,6 +1674,48 @@ out: |
16012 |
+ return ret; |
16013 |
+ } |
16014 |
+ |
16015 |
++/** |
16016 |
++ * batadv_bla_check_ucast_duplist() - Check if a frame is in the broadcast dup. |
16017 |
++ * @bat_priv: the bat priv with all the soft interface information |
16018 |
++ * @skb: contains the multicast packet to be checked, decapsulated from a |
16019 |
++ * unicast_packet |
16020 |
++ * |
16021 |
++ * Check if it is on our broadcast list. Another gateway might have sent the |
16022 |
++ * same packet because it is connected to the same backbone, so we have to |
16023 |
++ * remove this duplicate. |
16024 |
++ * |
16025 |
++ * Return: true if a packet is in the duplicate list, false otherwise. |
16026 |
++ */ |
16027 |
++static bool batadv_bla_check_ucast_duplist(struct batadv_priv *bat_priv, |
16028 |
++ struct sk_buff *skb) |
16029 |
++{ |
16030 |
++ return batadv_bla_check_duplist(bat_priv, skb, (u8 *)skb->data, NULL); |
16031 |
++} |
16032 |
++ |
16033 |
++/** |
16034 |
++ * batadv_bla_check_bcast_duplist() - Check if a frame is in the broadcast dup. |
16035 |
++ * @bat_priv: the bat priv with all the soft interface information |
16036 |
++ * @skb: contains the bcast_packet to be checked |
16037 |
++ * |
16038 |
++ * Check if it is on our broadcast list. Another gateway might have sent the |
16039 |
++ * same packet because it is connected to the same backbone, so we have to |
16040 |
++ * remove this duplicate. |
16041 |
++ * |
16042 |
++ * Return: true if a packet is in the duplicate list, false otherwise. |
16043 |
++ */ |
16044 |
++bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, |
16045 |
++ struct sk_buff *skb) |
16046 |
++{ |
16047 |
++ struct batadv_bcast_packet *bcast_packet; |
16048 |
++ u8 *payload_ptr; |
16049 |
++ |
16050 |
++ bcast_packet = (struct batadv_bcast_packet *)skb->data; |
16051 |
++ payload_ptr = (u8 *)(bcast_packet + 1); |
16052 |
++ |
16053 |
++ return batadv_bla_check_duplist(bat_priv, skb, payload_ptr, |
16054 |
++ bcast_packet->orig); |
16055 |
++} |
16056 |
++ |
16057 |
+ /** |
16058 |
+ * batadv_bla_is_backbone_gw_orig() - Check if the originator is a gateway for |
16059 |
+ * the VLAN identified by vid. |
16060 |
+@@ -1812,7 +1877,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb, |
16061 |
+ * @bat_priv: the bat priv with all the soft interface information |
16062 |
+ * @skb: the frame to be checked |
16063 |
+ * @vid: the VLAN ID of the frame |
16064 |
+- * @is_bcast: the packet came in a broadcast packet type. |
16065 |
++ * @packet_type: the batman packet type this frame came in |
16066 |
+ * |
16067 |
+ * batadv_bla_rx avoidance checks if: |
16068 |
+ * * we have to race for a claim |
16069 |
+@@ -1824,7 +1889,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb, |
16070 |
+ * further process the skb. |
16071 |
+ */ |
16072 |
+ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, |
16073 |
+- unsigned short vid, bool is_bcast) |
16074 |
++ unsigned short vid, int packet_type) |
16075 |
+ { |
16076 |
+ struct batadv_bla_backbone_gw *backbone_gw; |
16077 |
+ struct ethhdr *ethhdr; |
16078 |
+@@ -1846,9 +1911,32 @@ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, |
16079 |
+ goto handled; |
16080 |
+ |
16081 |
+ if (unlikely(atomic_read(&bat_priv->bla.num_requests))) |
16082 |
+- /* don't allow broadcasts while requests are in flight */ |
16083 |
+- if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) |
16084 |
+- goto handled; |
16085 |
++ /* don't allow multicast packets while requests are in flight */ |
16086 |
++ if (is_multicast_ether_addr(ethhdr->h_dest)) |
16087 |
++ /* Both broadcast flooding or multicast-via-unicasts |
16088 |
++ * delivery might send to multiple backbone gateways |
16089 |
++ * sharing the same LAN and therefore need to coordinate |
16090 |
++ * which backbone gateway forwards into the LAN, |
16091 |
++ * by claiming the payload source address. |
16092 |
++ * |
16093 |
++ * Broadcast flooding and multicast-via-unicasts |
16094 |
++ * delivery use the following two batman packet types. |
16095 |
++ * Note: explicitly exclude BATADV_UNICAST_4ADDR, |
16096 |
++ * as the DHCP gateway feature will send explicitly |
16097 |
++ * to only one BLA gateway, so the claiming process |
16098 |
++ * should be avoided there. |
16099 |
++ */ |
16100 |
++ if (packet_type == BATADV_BCAST || |
16101 |
++ packet_type == BATADV_UNICAST) |
16102 |
++ goto handled; |
16103 |
++ |
16104 |
++ /* potential duplicates from foreign BLA backbone gateways via |
16105 |
++ * multicast-in-unicast packets |
16106 |
++ */ |
16107 |
++ if (is_multicast_ether_addr(ethhdr->h_dest) && |
16108 |
++ packet_type == BATADV_UNICAST && |
16109 |
++ batadv_bla_check_ucast_duplist(bat_priv, skb)) |
16110 |
++ goto handled; |
16111 |
+ |
16112 |
+ ether_addr_copy(search_claim.addr, ethhdr->h_source); |
16113 |
+ search_claim.vid = vid; |
16114 |
+@@ -1883,13 +1971,14 @@ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, |
16115 |
+ goto allow; |
16116 |
+ } |
16117 |
+ |
16118 |
+- /* if it is a broadcast ... */ |
16119 |
+- if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) { |
16120 |
++ /* if it is a multicast ... */ |
16121 |
++ if (is_multicast_ether_addr(ethhdr->h_dest) && |
16122 |
++ (packet_type == BATADV_BCAST || packet_type == BATADV_UNICAST)) { |
16123 |
+ /* ... drop it. the responsible gateway is in charge. |
16124 |
+ * |
16125 |
+- * We need to check is_bcast because with the gateway |
16126 |
++ * We need to check packet type because with the gateway |
16127 |
+ * feature, broadcasts (like DHCP requests) may be sent |
16128 |
+- * using a unicast packet type. |
16129 |
++ * using a unicast 4 address packet type. See comment above. |
16130 |
+ */ |
16131 |
+ goto handled; |
16132 |
+ } else { |
16133 |
+diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h |
16134 |
+index 02b24a861a854..9370be0158130 100644 |
16135 |
+--- a/net/batman-adv/bridge_loop_avoidance.h |
16136 |
++++ b/net/batman-adv/bridge_loop_avoidance.h |
16137 |
+@@ -35,7 +35,7 @@ static inline bool batadv_bla_is_loopdetect_mac(const uint8_t *mac) |
16138 |
+ |
16139 |
+ #ifdef CONFIG_BATMAN_ADV_BLA |
16140 |
+ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, |
16141 |
+- unsigned short vid, bool is_bcast); |
16142 |
++ unsigned short vid, int packet_type); |
16143 |
+ bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, |
16144 |
+ unsigned short vid); |
16145 |
+ bool batadv_bla_is_backbone_gw(struct sk_buff *skb, |
16146 |
+@@ -66,7 +66,7 @@ bool batadv_bla_check_claim(struct batadv_priv *bat_priv, u8 *addr, |
16147 |
+ |
16148 |
+ static inline bool batadv_bla_rx(struct batadv_priv *bat_priv, |
16149 |
+ struct sk_buff *skb, unsigned short vid, |
16150 |
+- bool is_bcast) |
16151 |
++ int packet_type) |
16152 |
+ { |
16153 |
+ return false; |
16154 |
+ } |
16155 |
+diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c |
16156 |
+index 1d5bdf3a4b655..f5bf931252c4b 100644 |
16157 |
+--- a/net/batman-adv/multicast.c |
16158 |
++++ b/net/batman-adv/multicast.c |
16159 |
+@@ -51,6 +51,7 @@ |
16160 |
+ #include <uapi/linux/batadv_packet.h> |
16161 |
+ #include <uapi/linux/batman_adv.h> |
16162 |
+ |
16163 |
++#include "bridge_loop_avoidance.h" |
16164 |
+ #include "hard-interface.h" |
16165 |
+ #include "hash.h" |
16166 |
+ #include "log.h" |
16167 |
+@@ -1434,6 +1435,35 @@ batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb, |
16168 |
+ return BATADV_FORW_ALL; |
16169 |
+ } |
16170 |
+ |
16171 |
++/** |
16172 |
++ * batadv_mcast_forw_send_orig() - send a multicast packet to an originator |
16173 |
++ * @bat_priv: the bat priv with all the soft interface information |
16174 |
++ * @skb: the multicast packet to send |
16175 |
++ * @vid: the vlan identifier |
16176 |
++ * @orig_node: the originator to send the packet to |
16177 |
++ * |
16178 |
++ * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise. |
16179 |
++ */ |
16180 |
++int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv, |
16181 |
++ struct sk_buff *skb, |
16182 |
++ unsigned short vid, |
16183 |
++ struct batadv_orig_node *orig_node) |
16184 |
++{ |
16185 |
++ /* Avoid sending multicast-in-unicast packets to other BLA |
16186 |
++ * gateways - they already got the frame from the LAN side |
16187 |
++ * we share with them. |
16188 |
++ * TODO: Refactor to take BLA into account earlier, to avoid |
16189 |
++ * reducing the mcast_fanout count. |
16190 |
++ */ |
16191 |
++ if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig, vid)) { |
16192 |
++ dev_kfree_skb(skb); |
16193 |
++ return NET_XMIT_SUCCESS; |
16194 |
++ } |
16195 |
++ |
16196 |
++ return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0, |
16197 |
++ orig_node, vid); |
16198 |
++} |
16199 |
++ |
16200 |
+ /** |
16201 |
+ * batadv_mcast_forw_tt() - forwards a packet to multicast listeners |
16202 |
+ * @bat_priv: the bat priv with all the soft interface information |
16203 |
+@@ -1471,8 +1501,8 @@ batadv_mcast_forw_tt(struct batadv_priv *bat_priv, struct sk_buff *skb, |
16204 |
+ break; |
16205 |
+ } |
16206 |
+ |
16207 |
+- batadv_send_skb_unicast(bat_priv, newskb, BATADV_UNICAST, 0, |
16208 |
+- orig_entry->orig_node, vid); |
16209 |
++ batadv_mcast_forw_send_orig(bat_priv, newskb, vid, |
16210 |
++ orig_entry->orig_node); |
16211 |
+ } |
16212 |
+ rcu_read_unlock(); |
16213 |
+ |
16214 |
+@@ -1513,8 +1543,7 @@ batadv_mcast_forw_want_all_ipv4(struct batadv_priv *bat_priv, |
16215 |
+ break; |
16216 |
+ } |
16217 |
+ |
16218 |
+- batadv_send_skb_unicast(bat_priv, newskb, BATADV_UNICAST, 0, |
16219 |
+- orig_node, vid); |
16220 |
++ batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node); |
16221 |
+ } |
16222 |
+ rcu_read_unlock(); |
16223 |
+ return ret; |
16224 |
+@@ -1551,8 +1580,7 @@ batadv_mcast_forw_want_all_ipv6(struct batadv_priv *bat_priv, |
16225 |
+ break; |
16226 |
+ } |
16227 |
+ |
16228 |
+- batadv_send_skb_unicast(bat_priv, newskb, BATADV_UNICAST, 0, |
16229 |
+- orig_node, vid); |
16230 |
++ batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node); |
16231 |
+ } |
16232 |
+ rcu_read_unlock(); |
16233 |
+ return ret; |
16234 |
+@@ -1618,8 +1646,7 @@ batadv_mcast_forw_want_all_rtr4(struct batadv_priv *bat_priv, |
16235 |
+ break; |
16236 |
+ } |
16237 |
+ |
16238 |
+- batadv_send_skb_unicast(bat_priv, newskb, BATADV_UNICAST, 0, |
16239 |
+- orig_node, vid); |
16240 |
++ batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node); |
16241 |
+ } |
16242 |
+ rcu_read_unlock(); |
16243 |
+ return ret; |
16244 |
+@@ -1656,8 +1683,7 @@ batadv_mcast_forw_want_all_rtr6(struct batadv_priv *bat_priv, |
16245 |
+ break; |
16246 |
+ } |
16247 |
+ |
16248 |
+- batadv_send_skb_unicast(bat_priv, newskb, BATADV_UNICAST, 0, |
16249 |
+- orig_node, vid); |
16250 |
++ batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node); |
16251 |
+ } |
16252 |
+ rcu_read_unlock(); |
16253 |
+ return ret; |
16254 |
+diff --git a/net/batman-adv/multicast.h b/net/batman-adv/multicast.h |
16255 |
+index 5d9e2bb29c971..403929013ac47 100644 |
16256 |
+--- a/net/batman-adv/multicast.h |
16257 |
++++ b/net/batman-adv/multicast.h |
16258 |
+@@ -46,6 +46,11 @@ enum batadv_forw_mode |
16259 |
+ batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb, |
16260 |
+ struct batadv_orig_node **mcast_single_orig); |
16261 |
+ |
16262 |
++int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv, |
16263 |
++ struct sk_buff *skb, |
16264 |
++ unsigned short vid, |
16265 |
++ struct batadv_orig_node *orig_node); |
16266 |
++ |
16267 |
+ int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb, |
16268 |
+ unsigned short vid); |
16269 |
+ |
16270 |
+@@ -71,6 +76,16 @@ batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb, |
16271 |
+ return BATADV_FORW_ALL; |
16272 |
+ } |
16273 |
+ |
16274 |
++static inline int |
16275 |
++batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv, |
16276 |
++ struct sk_buff *skb, |
16277 |
++ unsigned short vid, |
16278 |
++ struct batadv_orig_node *orig_node) |
16279 |
++{ |
16280 |
++ kfree_skb(skb); |
16281 |
++ return NET_XMIT_DROP; |
16282 |
++} |
16283 |
++ |
16284 |
+ static inline int |
16285 |
+ batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb, |
16286 |
+ unsigned short vid) |
16287 |
+diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c |
16288 |
+index f0f864820dead..708e90cb18a6e 100644 |
16289 |
+--- a/net/batman-adv/routing.c |
16290 |
++++ b/net/batman-adv/routing.c |
16291 |
+@@ -826,6 +826,10 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv, |
16292 |
+ vid = batadv_get_vid(skb, hdr_len); |
16293 |
+ ethhdr = (struct ethhdr *)(skb->data + hdr_len); |
16294 |
+ |
16295 |
++ /* do not reroute multicast frames in a unicast header */ |
16296 |
++ if (is_multicast_ether_addr(ethhdr->h_dest)) |
16297 |
++ return true; |
16298 |
++ |
16299 |
+ /* check if the destination client was served by this node and it is now |
16300 |
+ * roaming. In this case, it means that the node has got a ROAM_ADV |
16301 |
+ * message and that it knows the new destination in the mesh to re-route |
16302 |
+diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c |
16303 |
+index 5ee8e9a100f90..7f209390069ea 100644 |
16304 |
+--- a/net/batman-adv/soft-interface.c |
16305 |
++++ b/net/batman-adv/soft-interface.c |
16306 |
+@@ -364,9 +364,8 @@ send: |
16307 |
+ goto dropped; |
16308 |
+ ret = batadv_send_skb_via_gw(bat_priv, skb, vid); |
16309 |
+ } else if (mcast_single_orig) { |
16310 |
+- ret = batadv_send_skb_unicast(bat_priv, skb, |
16311 |
+- BATADV_UNICAST, 0, |
16312 |
+- mcast_single_orig, vid); |
16313 |
++ ret = batadv_mcast_forw_send_orig(bat_priv, skb, vid, |
16314 |
++ mcast_single_orig); |
16315 |
+ } else if (forw_mode == BATADV_FORW_SOME) { |
16316 |
+ ret = batadv_mcast_forw_send(bat_priv, skb, vid); |
16317 |
+ } else { |
16318 |
+@@ -425,10 +424,10 @@ void batadv_interface_rx(struct net_device *soft_iface, |
16319 |
+ struct vlan_ethhdr *vhdr; |
16320 |
+ struct ethhdr *ethhdr; |
16321 |
+ unsigned short vid; |
16322 |
+- bool is_bcast; |
16323 |
++ int packet_type; |
16324 |
+ |
16325 |
+ batadv_bcast_packet = (struct batadv_bcast_packet *)skb->data; |
16326 |
+- is_bcast = (batadv_bcast_packet->packet_type == BATADV_BCAST); |
16327 |
++ packet_type = batadv_bcast_packet->packet_type; |
16328 |
+ |
16329 |
+ skb_pull_rcsum(skb, hdr_size); |
16330 |
+ skb_reset_mac_header(skb); |
16331 |
+@@ -471,7 +470,7 @@ void batadv_interface_rx(struct net_device *soft_iface, |
16332 |
+ /* Let the bridge loop avoidance check the packet. If will |
16333 |
+ * not handle it, we can safely push it up. |
16334 |
+ */ |
16335 |
+- if (batadv_bla_rx(bat_priv, skb, vid, is_bcast)) |
16336 |
++ if (batadv_bla_rx(bat_priv, skb, vid, packet_type)) |
16337 |
+ goto out; |
16338 |
+ |
16339 |
+ if (orig_node) |
16340 |
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c |
16341 |
+index 7bf6860fed783..fd436e5d7b542 100644 |
16342 |
+--- a/net/bluetooth/hci_event.c |
16343 |
++++ b/net/bluetooth/hci_event.c |
16344 |
+@@ -41,12 +41,27 @@ |
16345 |
+ |
16346 |
+ /* Handle HCI Event packets */ |
16347 |
+ |
16348 |
+-static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb) |
16349 |
++static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb, |
16350 |
++ u8 *new_status) |
16351 |
+ { |
16352 |
+ __u8 status = *((__u8 *) skb->data); |
16353 |
+ |
16354 |
+ BT_DBG("%s status 0x%2.2x", hdev->name, status); |
16355 |
+ |
16356 |
++ /* It is possible that we receive Inquiry Complete event right |
16357 |
++ * before we receive Inquiry Cancel Command Complete event, in |
16358 |
++ * which case the latter event should have status of Command |
16359 |
++ * Disallowed (0x0c). This should not be treated as error, since |
16360 |
++ * we actually achieve what Inquiry Cancel wants to achieve, |
16361 |
++ * which is to end the last Inquiry session. |
16362 |
++ */ |
16363 |
++ if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) { |
16364 |
++ bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command"); |
16365 |
++ status = 0x00; |
16366 |
++ } |
16367 |
++ |
16368 |
++ *new_status = status; |
16369 |
++ |
16370 |
+ if (status) |
16371 |
+ return; |
16372 |
+ |
16373 |
+@@ -3142,7 +3157,7 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb, |
16374 |
+ |
16375 |
+ switch (*opcode) { |
16376 |
+ case HCI_OP_INQUIRY_CANCEL: |
16377 |
+- hci_cc_inquiry_cancel(hdev, skb); |
16378 |
++ hci_cc_inquiry_cancel(hdev, skb, status); |
16379 |
+ break; |
16380 |
+ |
16381 |
+ case HCI_OP_PERIODIC_INQ: |
16382 |
+@@ -5853,6 +5868,11 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) |
16383 |
+ u8 status = 0, event = hdr->evt, req_evt = 0; |
16384 |
+ u16 opcode = HCI_OP_NOP; |
16385 |
+ |
16386 |
++ if (!event) { |
16387 |
++ bt_dev_warn(hdev, "Received unexpected HCI Event 00000000"); |
16388 |
++ goto done; |
16389 |
++ } |
16390 |
++ |
16391 |
+ if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) { |
16392 |
+ struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data; |
16393 |
+ opcode = __le16_to_cpu(cmd_hdr->opcode); |
16394 |
+@@ -6064,6 +6084,7 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) |
16395 |
+ req_complete_skb(hdev, status, opcode, orig_skb); |
16396 |
+ } |
16397 |
+ |
16398 |
++done: |
16399 |
+ kfree_skb(orig_skb); |
16400 |
+ kfree_skb(skb); |
16401 |
+ hdev->stat.evt_rx++; |
16402 |
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c |
16403 |
+index a845786258a0b..12a50e5a9f452 100644 |
16404 |
+--- a/net/bluetooth/l2cap_core.c |
16405 |
++++ b/net/bluetooth/l2cap_core.c |
16406 |
+@@ -419,6 +419,9 @@ static void l2cap_chan_timeout(struct work_struct *work) |
16407 |
+ BT_DBG("chan %p state %s", chan, state_to_string(chan->state)); |
16408 |
+ |
16409 |
+ mutex_lock(&conn->chan_lock); |
16410 |
++ /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling |
16411 |
++ * this work. No need to call l2cap_chan_hold(chan) here again. |
16412 |
++ */ |
16413 |
+ l2cap_chan_lock(chan); |
16414 |
+ |
16415 |
+ if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG) |
16416 |
+@@ -431,12 +434,12 @@ static void l2cap_chan_timeout(struct work_struct *work) |
16417 |
+ |
16418 |
+ l2cap_chan_close(chan, reason); |
16419 |
+ |
16420 |
+- l2cap_chan_unlock(chan); |
16421 |
+- |
16422 |
+ chan->ops->close(chan); |
16423 |
+- mutex_unlock(&conn->chan_lock); |
16424 |
+ |
16425 |
++ l2cap_chan_unlock(chan); |
16426 |
+ l2cap_chan_put(chan); |
16427 |
++ |
16428 |
++ mutex_unlock(&conn->chan_lock); |
16429 |
+ } |
16430 |
+ |
16431 |
+ struct l2cap_chan *l2cap_chan_create(void) |
16432 |
+@@ -1734,9 +1737,9 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err) |
16433 |
+ |
16434 |
+ l2cap_chan_del(chan, err); |
16435 |
+ |
16436 |
+- l2cap_chan_unlock(chan); |
16437 |
+- |
16438 |
+ chan->ops->close(chan); |
16439 |
++ |
16440 |
++ l2cap_chan_unlock(chan); |
16441 |
+ l2cap_chan_put(chan); |
16442 |
+ } |
16443 |
+ |
16444 |
+@@ -4131,7 +4134,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, |
16445 |
+ return 0; |
16446 |
+ } |
16447 |
+ |
16448 |
+- if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) { |
16449 |
++ if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 && |
16450 |
++ chan->state != BT_CONNECTED) { |
16451 |
+ cmd_reject_invalid_cid(conn, cmd->ident, chan->scid, |
16452 |
+ chan->dcid); |
16453 |
+ goto unlock; |
16454 |
+@@ -4355,6 +4359,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, |
16455 |
+ return 0; |
16456 |
+ } |
16457 |
+ |
16458 |
++ l2cap_chan_hold(chan); |
16459 |
+ l2cap_chan_lock(chan); |
16460 |
+ |
16461 |
+ rsp.dcid = cpu_to_le16(chan->scid); |
16462 |
+@@ -4363,12 +4368,11 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, |
16463 |
+ |
16464 |
+ chan->ops->set_shutdown(chan); |
16465 |
+ |
16466 |
+- l2cap_chan_hold(chan); |
16467 |
+ l2cap_chan_del(chan, ECONNRESET); |
16468 |
+ |
16469 |
+- l2cap_chan_unlock(chan); |
16470 |
+- |
16471 |
+ chan->ops->close(chan); |
16472 |
++ |
16473 |
++ l2cap_chan_unlock(chan); |
16474 |
+ l2cap_chan_put(chan); |
16475 |
+ |
16476 |
+ mutex_unlock(&conn->chan_lock); |
16477 |
+@@ -4400,20 +4404,21 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, |
16478 |
+ return 0; |
16479 |
+ } |
16480 |
+ |
16481 |
++ l2cap_chan_hold(chan); |
16482 |
+ l2cap_chan_lock(chan); |
16483 |
+ |
16484 |
+ if (chan->state != BT_DISCONN) { |
16485 |
+ l2cap_chan_unlock(chan); |
16486 |
++ l2cap_chan_put(chan); |
16487 |
+ mutex_unlock(&conn->chan_lock); |
16488 |
+ return 0; |
16489 |
+ } |
16490 |
+ |
16491 |
+- l2cap_chan_hold(chan); |
16492 |
+ l2cap_chan_del(chan, 0); |
16493 |
+ |
16494 |
+- l2cap_chan_unlock(chan); |
16495 |
+- |
16496 |
+ chan->ops->close(chan); |
16497 |
++ |
16498 |
++ l2cap_chan_unlock(chan); |
16499 |
+ l2cap_chan_put(chan); |
16500 |
+ |
16501 |
+ mutex_unlock(&conn->chan_lock); |
16502 |
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c |
16503 |
+index a7be8b59b3c28..390a9afab6473 100644 |
16504 |
+--- a/net/bluetooth/l2cap_sock.c |
16505 |
++++ b/net/bluetooth/l2cap_sock.c |
16506 |
+@@ -1042,7 +1042,7 @@ done: |
16507 |
+ } |
16508 |
+ |
16509 |
+ /* Kill socket (only if zapped and orphan) |
16510 |
+- * Must be called on unlocked socket. |
16511 |
++ * Must be called on unlocked socket, with l2cap channel lock. |
16512 |
+ */ |
16513 |
+ static void l2cap_sock_kill(struct sock *sk) |
16514 |
+ { |
16515 |
+@@ -1193,6 +1193,7 @@ static int l2cap_sock_release(struct socket *sock) |
16516 |
+ { |
16517 |
+ struct sock *sk = sock->sk; |
16518 |
+ int err; |
16519 |
++ struct l2cap_chan *chan; |
16520 |
+ |
16521 |
+ BT_DBG("sock %p, sk %p", sock, sk); |
16522 |
+ |
16523 |
+@@ -1202,9 +1203,17 @@ static int l2cap_sock_release(struct socket *sock) |
16524 |
+ bt_sock_unlink(&l2cap_sk_list, sk); |
16525 |
+ |
16526 |
+ err = l2cap_sock_shutdown(sock, 2); |
16527 |
++ chan = l2cap_pi(sk)->chan; |
16528 |
++ |
16529 |
++ l2cap_chan_hold(chan); |
16530 |
++ l2cap_chan_lock(chan); |
16531 |
+ |
16532 |
+ sock_orphan(sk); |
16533 |
+ l2cap_sock_kill(sk); |
16534 |
++ |
16535 |
++ l2cap_chan_unlock(chan); |
16536 |
++ l2cap_chan_put(chan); |
16537 |
++ |
16538 |
+ return err; |
16539 |
+ } |
16540 |
+ |
16541 |
+@@ -1222,12 +1231,15 @@ static void l2cap_sock_cleanup_listen(struct sock *parent) |
16542 |
+ BT_DBG("child chan %p state %s", chan, |
16543 |
+ state_to_string(chan->state)); |
16544 |
+ |
16545 |
++ l2cap_chan_hold(chan); |
16546 |
+ l2cap_chan_lock(chan); |
16547 |
++ |
16548 |
+ __clear_chan_timer(chan); |
16549 |
+ l2cap_chan_close(chan, ECONNRESET); |
16550 |
+- l2cap_chan_unlock(chan); |
16551 |
+- |
16552 |
+ l2cap_sock_kill(sk); |
16553 |
++ |
16554 |
++ l2cap_chan_unlock(chan); |
16555 |
++ l2cap_chan_put(chan); |
16556 |
+ } |
16557 |
+ } |
16558 |
+ |
16559 |
+diff --git a/net/core/devlink.c b/net/core/devlink.c |
16560 |
+index 5667cae57072f..26c8993a17ae0 100644 |
16561 |
+--- a/net/core/devlink.c |
16562 |
++++ b/net/core/devlink.c |
16563 |
+@@ -4823,6 +4823,7 @@ int devlink_health_report(struct devlink_health_reporter *reporter, |
16564 |
+ { |
16565 |
+ enum devlink_health_reporter_state prev_health_state; |
16566 |
+ struct devlink *devlink = reporter->devlink; |
16567 |
++ unsigned long recover_ts_threshold; |
16568 |
+ |
16569 |
+ /* write a log message of the current error */ |
16570 |
+ WARN_ON(!msg); |
16571 |
+@@ -4832,10 +4833,12 @@ int devlink_health_report(struct devlink_health_reporter *reporter, |
16572 |
+ reporter->health_state = DEVLINK_HEALTH_REPORTER_STATE_ERROR; |
16573 |
+ |
16574 |
+ /* abort if the previous error wasn't recovered */ |
16575 |
++ recover_ts_threshold = reporter->last_recovery_ts + |
16576 |
++ msecs_to_jiffies(reporter->graceful_period); |
16577 |
+ if (reporter->auto_recover && |
16578 |
+ (prev_health_state != DEVLINK_HEALTH_REPORTER_STATE_HEALTHY || |
16579 |
+- jiffies - reporter->last_recovery_ts < |
16580 |
+- msecs_to_jiffies(reporter->graceful_period))) { |
16581 |
++ (reporter->last_recovery_ts && reporter->recovery_count && |
16582 |
++ time_is_after_jiffies(recover_ts_threshold)))) { |
16583 |
+ trace_devlink_health_recover_aborted(devlink, |
16584 |
+ reporter->ops->name, |
16585 |
+ reporter->health_state, |
16586 |
+diff --git a/net/core/filter.c b/net/core/filter.c |
16587 |
+index cf2a68513bfd5..c441f9961e917 100644 |
16588 |
+--- a/net/core/filter.c |
16589 |
++++ b/net/core/filter.c |
16590 |
+@@ -6791,8 +6791,6 @@ static int bpf_gen_ld_abs(const struct bpf_insn *orig, |
16591 |
+ bool indirect = BPF_MODE(orig->code) == BPF_IND; |
16592 |
+ struct bpf_insn *insn = insn_buf; |
16593 |
+ |
16594 |
+- /* We're guaranteed here that CTX is in R6. */ |
16595 |
+- *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_CTX); |
16596 |
+ if (!indirect) { |
16597 |
+ *insn++ = BPF_MOV64_IMM(BPF_REG_2, orig->imm); |
16598 |
+ } else { |
16599 |
+@@ -6800,6 +6798,8 @@ static int bpf_gen_ld_abs(const struct bpf_insn *orig, |
16600 |
+ if (orig->imm) |
16601 |
+ *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, orig->imm); |
16602 |
+ } |
16603 |
++ /* We're guaranteed here that CTX is in R6. */ |
16604 |
++ *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_CTX); |
16605 |
+ |
16606 |
+ switch (BPF_SIZE(orig->code)) { |
16607 |
+ case BPF_B: |
16608 |
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c |
16609 |
+index 7b40d12f0c229..04953e5f25302 100644 |
16610 |
+--- a/net/core/neighbour.c |
16611 |
++++ b/net/core/neighbour.c |
16612 |
+@@ -3290,6 +3290,7 @@ static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
16613 |
+ *pos = cpu+1; |
16614 |
+ return per_cpu_ptr(tbl->stats, cpu); |
16615 |
+ } |
16616 |
++ (*pos)++; |
16617 |
+ return NULL; |
16618 |
+ } |
16619 |
+ |
16620 |
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c |
16621 |
+index aa77f989ba817..7a5f64cf1fdd2 100644 |
16622 |
+--- a/net/ipv4/route.c |
16623 |
++++ b/net/ipv4/route.c |
16624 |
+@@ -271,6 +271,7 @@ static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
16625 |
+ *pos = cpu+1; |
16626 |
+ return &per_cpu(rt_cache_stat, cpu); |
16627 |
+ } |
16628 |
++ (*pos)++; |
16629 |
+ return NULL; |
16630 |
+ |
16631 |
+ } |
16632 |
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c |
16633 |
+index 01ddfb4156e4a..2ffa33b5ef404 100644 |
16634 |
+--- a/net/ipv4/tcp.c |
16635 |
++++ b/net/ipv4/tcp.c |
16636 |
+@@ -2053,7 +2053,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, |
16637 |
+ |
16638 |
+ /* Well, if we have backlog, try to process it now yet. */ |
16639 |
+ |
16640 |
+- if (copied >= target && !sk->sk_backlog.tail) |
16641 |
++ if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) |
16642 |
+ break; |
16643 |
+ |
16644 |
+ if (copied) { |
16645 |
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c |
16646 |
+index 96d80e50bf35b..9ca6c32065ec6 100644 |
16647 |
+--- a/net/ipv6/ip6_fib.c |
16648 |
++++ b/net/ipv6/ip6_fib.c |
16649 |
+@@ -2479,14 +2479,13 @@ static void *ipv6_route_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
16650 |
+ struct net *net = seq_file_net(seq); |
16651 |
+ struct ipv6_route_iter *iter = seq->private; |
16652 |
+ |
16653 |
++ ++(*pos); |
16654 |
+ if (!v) |
16655 |
+ goto iter_table; |
16656 |
+ |
16657 |
+ n = rcu_dereference_bh(((struct fib6_info *)v)->fib6_next); |
16658 |
+- if (n) { |
16659 |
+- ++*pos; |
16660 |
++ if (n) |
16661 |
+ return n; |
16662 |
+- } |
16663 |
+ |
16664 |
+ iter_table: |
16665 |
+ ipv6_route_check_sernum(iter); |
16666 |
+@@ -2494,8 +2493,6 @@ iter_table: |
16667 |
+ r = fib6_walk_continue(&iter->w); |
16668 |
+ spin_unlock_bh(&iter->tbl->tb6_lock); |
16669 |
+ if (r > 0) { |
16670 |
+- if (v) |
16671 |
+- ++*pos; |
16672 |
+ return iter->w.leaf; |
16673 |
+ } else if (r < 0) { |
16674 |
+ fib6_walker_unlink(net, &iter->w); |
16675 |
+diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c |
16676 |
+index 5abb7f9b7ee5f..fa0f3c1543ba5 100644 |
16677 |
+--- a/net/llc/af_llc.c |
16678 |
++++ b/net/llc/af_llc.c |
16679 |
+@@ -784,7 +784,7 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, |
16680 |
+ } |
16681 |
+ /* Well, if we have backlog, try to process it now yet. */ |
16682 |
+ |
16683 |
+- if (copied >= target && !sk->sk_backlog.tail) |
16684 |
++ if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) |
16685 |
+ break; |
16686 |
+ |
16687 |
+ if (copied) { |
16688 |
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c |
16689 |
+index 30201aeb426cf..f029e75ec815a 100644 |
16690 |
+--- a/net/mac80211/tx.c |
16691 |
++++ b/net/mac80211/tx.c |
16692 |
+@@ -3913,6 +3913,9 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb, |
16693 |
+ skb->prev = NULL; |
16694 |
+ skb->next = NULL; |
16695 |
+ |
16696 |
++ if (skb->protocol == sdata->control_port_protocol) |
16697 |
++ ctrl_flags |= IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP; |
16698 |
++ |
16699 |
+ skb = ieee80211_build_hdr(sdata, skb, info_flags, |
16700 |
+ sta, ctrl_flags); |
16701 |
+ if (IS_ERR(skb)) |
16702 |
+@@ -5096,7 +5099,8 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev, |
16703 |
+ return -EINVAL; |
16704 |
+ |
16705 |
+ if (proto == sdata->control_port_protocol) |
16706 |
+- ctrl_flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO; |
16707 |
++ ctrl_flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO | |
16708 |
++ IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP; |
16709 |
+ |
16710 |
+ if (unencrypted) |
16711 |
+ flags = IEEE80211_TX_INTFL_DONT_ENCRYPT; |
16712 |
+diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c |
16713 |
+index ab52811523e99..c829e4a753256 100644 |
16714 |
+--- a/net/mac802154/tx.c |
16715 |
++++ b/net/mac802154/tx.c |
16716 |
+@@ -34,11 +34,11 @@ void ieee802154_xmit_worker(struct work_struct *work) |
16717 |
+ if (res) |
16718 |
+ goto err_tx; |
16719 |
+ |
16720 |
+- ieee802154_xmit_complete(&local->hw, skb, false); |
16721 |
+- |
16722 |
+ dev->stats.tx_packets++; |
16723 |
+ dev->stats.tx_bytes += skb->len; |
16724 |
+ |
16725 |
++ ieee802154_xmit_complete(&local->hw, skb, false); |
16726 |
++ |
16727 |
+ return; |
16728 |
+ |
16729 |
+ err_tx: |
16730 |
+@@ -78,6 +78,8 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb) |
16731 |
+ |
16732 |
+ /* async is priority, otherwise sync is fallback */ |
16733 |
+ if (local->ops->xmit_async) { |
16734 |
++ unsigned int len = skb->len; |
16735 |
++ |
16736 |
+ ret = drv_xmit_async(local, skb); |
16737 |
+ if (ret) { |
16738 |
+ ieee802154_wake_queue(&local->hw); |
16739 |
+@@ -85,7 +87,7 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb) |
16740 |
+ } |
16741 |
+ |
16742 |
+ dev->stats.tx_packets++; |
16743 |
+- dev->stats.tx_bytes += skb->len; |
16744 |
++ dev->stats.tx_bytes += len; |
16745 |
+ } else { |
16746 |
+ local->tx_skb = skb; |
16747 |
+ queue_work(local->workqueue, &local->tx_work); |
16748 |
+diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c |
16749 |
+index a0560d175a7ff..aaf4293ddd459 100644 |
16750 |
+--- a/net/netfilter/nf_conntrack_proto.c |
16751 |
++++ b/net/netfilter/nf_conntrack_proto.c |
16752 |
+@@ -565,6 +565,7 @@ static int nf_ct_netns_inet_get(struct net *net) |
16753 |
+ int err; |
16754 |
+ |
16755 |
+ err = nf_ct_netns_do_get(net, NFPROTO_IPV4); |
16756 |
++#if IS_ENABLED(CONFIG_IPV6) |
16757 |
+ if (err < 0) |
16758 |
+ goto err1; |
16759 |
+ err = nf_ct_netns_do_get(net, NFPROTO_IPV6); |
16760 |
+@@ -575,6 +576,7 @@ static int nf_ct_netns_inet_get(struct net *net) |
16761 |
+ err2: |
16762 |
+ nf_ct_netns_put(net, NFPROTO_IPV4); |
16763 |
+ err1: |
16764 |
++#endif |
16765 |
+ return err; |
16766 |
+ } |
16767 |
+ |
16768 |
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c |
16769 |
+index 2023650c27249..ff2d2b514506e 100644 |
16770 |
+--- a/net/netfilter/nf_tables_api.c |
16771 |
++++ b/net/netfilter/nf_tables_api.c |
16772 |
+@@ -456,7 +456,8 @@ static struct nft_table *nft_table_lookup(const struct net *net, |
16773 |
+ if (nla == NULL) |
16774 |
+ return ERR_PTR(-EINVAL); |
16775 |
+ |
16776 |
+- list_for_each_entry_rcu(table, &net->nft.tables, list) { |
16777 |
++ list_for_each_entry_rcu(table, &net->nft.tables, list, |
16778 |
++ lockdep_is_held(&net->nft.commit_mutex)) { |
16779 |
+ if (!nla_strcmp(nla, table->name) && |
16780 |
+ table->family == family && |
16781 |
+ nft_active_genmask(table, genmask)) |
16782 |
+diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c |
16783 |
+index 3323b79ff548d..541eea74ef7a6 100644 |
16784 |
+--- a/net/openvswitch/meter.c |
16785 |
++++ b/net/openvswitch/meter.c |
16786 |
+@@ -251,8 +251,8 @@ static struct dp_meter *dp_meter_create(struct nlattr **a) |
16787 |
+ * |
16788 |
+ * Start with a full bucket. |
16789 |
+ */ |
16790 |
+- band->bucket = (band->burst_size + band->rate) * 1000; |
16791 |
+- band_max_delta_t = band->bucket / band->rate; |
16792 |
++ band->bucket = (band->burst_size + band->rate) * 1000ULL; |
16793 |
++ band_max_delta_t = div_u64(band->bucket, band->rate); |
16794 |
+ if (band_max_delta_t > meter->max_delta_t) |
16795 |
+ meter->max_delta_t = band_max_delta_t; |
16796 |
+ band++; |
16797 |
+diff --git a/net/openvswitch/meter.h b/net/openvswitch/meter.h |
16798 |
+index f645913870bd2..2e3fd6f1d7ebe 100644 |
16799 |
+--- a/net/openvswitch/meter.h |
16800 |
++++ b/net/openvswitch/meter.h |
16801 |
+@@ -23,7 +23,7 @@ struct dp_meter_band { |
16802 |
+ u32 type; |
16803 |
+ u32 rate; |
16804 |
+ u32 burst_size; |
16805 |
+- u32 bucket; /* 1/1000 packets, or in bits */ |
16806 |
++ u64 bucket; /* 1/1000 packets, or in bits */ |
16807 |
+ struct ovs_flow_stats stats; |
16808 |
+ }; |
16809 |
+ |
16810 |
+diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c |
16811 |
+index 0dab62b67b9a4..adceb226ffab3 100644 |
16812 |
+--- a/net/sctp/outqueue.c |
16813 |
++++ b/net/sctp/outqueue.c |
16814 |
+@@ -36,6 +36,7 @@ |
16815 |
+ #include <net/sctp/sctp.h> |
16816 |
+ #include <net/sctp/sm.h> |
16817 |
+ #include <net/sctp/stream_sched.h> |
16818 |
++#include <trace/events/sctp.h> |
16819 |
+ |
16820 |
+ /* Declare internal functions here. */ |
16821 |
+ static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn); |
16822 |
+@@ -1238,6 +1239,11 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk) |
16823 |
+ /* Grab the association's destination address list. */ |
16824 |
+ transport_list = &asoc->peer.transport_addr_list; |
16825 |
+ |
16826 |
++ /* SCTP path tracepoint for congestion control debugging. */ |
16827 |
++ list_for_each_entry(transport, transport_list, transports) { |
16828 |
++ trace_sctp_probe_path(transport, asoc); |
16829 |
++ } |
16830 |
++ |
16831 |
+ sack_ctsn = ntohl(sack->cum_tsn_ack); |
16832 |
+ gap_ack_blocks = ntohs(sack->num_gap_ack_blocks); |
16833 |
+ asoc->stats.gapcnt += gap_ack_blocks; |
16834 |
+diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c |
16835 |
+index 987c4b1f0b174..53d8b82eda006 100644 |
16836 |
+--- a/net/sunrpc/sched.c |
16837 |
++++ b/net/sunrpc/sched.c |
16838 |
+@@ -204,10 +204,6 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, |
16839 |
+ struct rpc_task *task, |
16840 |
+ unsigned char queue_priority) |
16841 |
+ { |
16842 |
+- WARN_ON_ONCE(RPC_IS_QUEUED(task)); |
16843 |
+- if (RPC_IS_QUEUED(task)) |
16844 |
+- return; |
16845 |
+- |
16846 |
+ INIT_LIST_HEAD(&task->u.tk_wait.timer_list); |
16847 |
+ if (RPC_IS_PRIORITY(queue)) |
16848 |
+ __rpc_add_wait_queue_priority(queue, task, queue_priority); |
16849 |
+@@ -382,7 +378,7 @@ static void rpc_make_runnable(struct workqueue_struct *wq, |
16850 |
+ * NB: An RPC task will only receive interrupt-driven events as long |
16851 |
+ * as it's on a wait queue. |
16852 |
+ */ |
16853 |
+-static void __rpc_sleep_on_priority(struct rpc_wait_queue *q, |
16854 |
++static void __rpc_do_sleep_on_priority(struct rpc_wait_queue *q, |
16855 |
+ struct rpc_task *task, |
16856 |
+ unsigned char queue_priority) |
16857 |
+ { |
16858 |
+@@ -395,12 +391,23 @@ static void __rpc_sleep_on_priority(struct rpc_wait_queue *q, |
16859 |
+ |
16860 |
+ } |
16861 |
+ |
16862 |
++static void __rpc_sleep_on_priority(struct rpc_wait_queue *q, |
16863 |
++ struct rpc_task *task, |
16864 |
++ unsigned char queue_priority) |
16865 |
++{ |
16866 |
++ if (WARN_ON_ONCE(RPC_IS_QUEUED(task))) |
16867 |
++ return; |
16868 |
++ __rpc_do_sleep_on_priority(q, task, queue_priority); |
16869 |
++} |
16870 |
++ |
16871 |
+ static void __rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q, |
16872 |
+ struct rpc_task *task, unsigned long timeout, |
16873 |
+ unsigned char queue_priority) |
16874 |
+ { |
16875 |
++ if (WARN_ON_ONCE(RPC_IS_QUEUED(task))) |
16876 |
++ return; |
16877 |
+ if (time_is_after_jiffies(timeout)) { |
16878 |
+- __rpc_sleep_on_priority(q, task, queue_priority); |
16879 |
++ __rpc_do_sleep_on_priority(q, task, queue_priority); |
16880 |
+ __rpc_add_timer(q, task, timeout); |
16881 |
+ } else |
16882 |
+ task->tk_status = -ETIMEDOUT; |
16883 |
+@@ -824,6 +831,7 @@ rpc_reset_task_statistics(struct rpc_task *task) |
16884 |
+ */ |
16885 |
+ void rpc_exit_task(struct rpc_task *task) |
16886 |
+ { |
16887 |
++ trace_rpc_task_end(task, task->tk_action); |
16888 |
+ task->tk_action = NULL; |
16889 |
+ if (task->tk_ops->rpc_count_stats) |
16890 |
+ task->tk_ops->rpc_count_stats(task, task->tk_calldata); |
16891 |
+diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c |
16892 |
+index dc74519286be5..fe4cd0b4c4127 100644 |
16893 |
+--- a/net/sunrpc/svc_xprt.c |
16894 |
++++ b/net/sunrpc/svc_xprt.c |
16895 |
+@@ -104,8 +104,17 @@ void svc_unreg_xprt_class(struct svc_xprt_class *xcl) |
16896 |
+ } |
16897 |
+ EXPORT_SYMBOL_GPL(svc_unreg_xprt_class); |
16898 |
+ |
16899 |
+-/* |
16900 |
+- * Format the transport list for printing |
16901 |
++/** |
16902 |
++ * svc_print_xprts - Format the transport list for printing |
16903 |
++ * @buf: target buffer for formatted address |
16904 |
++ * @maxlen: length of target buffer |
16905 |
++ * |
16906 |
++ * Fills in @buf with a string containing a list of transport names, each name |
16907 |
++ * terminated with '\n'. If the buffer is too small, some entries may be |
16908 |
++ * missing, but it is guaranteed that all lines in the output buffer are |
16909 |
++ * complete. |
16910 |
++ * |
16911 |
++ * Returns positive length of the filled-in string. |
16912 |
+ */ |
16913 |
+ int svc_print_xprts(char *buf, int maxlen) |
16914 |
+ { |
16915 |
+@@ -118,9 +127,9 @@ int svc_print_xprts(char *buf, int maxlen) |
16916 |
+ list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) { |
16917 |
+ int slen; |
16918 |
+ |
16919 |
+- sprintf(tmpstr, "%s %d\n", xcl->xcl_name, xcl->xcl_max_payload); |
16920 |
+- slen = strlen(tmpstr); |
16921 |
+- if (len + slen > maxlen) |
16922 |
++ slen = snprintf(tmpstr, sizeof(tmpstr), "%s %d\n", |
16923 |
++ xcl->xcl_name, xcl->xcl_max_payload); |
16924 |
++ if (slen >= sizeof(tmpstr) || len + slen >= maxlen) |
16925 |
+ break; |
16926 |
+ len += slen; |
16927 |
+ strcat(buf, tmpstr); |
16928 |
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c |
16929 |
+index cf80394b2db33..68d2dcf0a1be1 100644 |
16930 |
+--- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c |
16931 |
++++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c |
16932 |
+@@ -15,26 +15,25 @@ |
16933 |
+ #undef SVCRDMA_BACKCHANNEL_DEBUG |
16934 |
+ |
16935 |
+ /** |
16936 |
+- * svc_rdma_handle_bc_reply - Process incoming backchannel reply |
16937 |
+- * @xprt: controlling backchannel transport |
16938 |
+- * @rdma_resp: pointer to incoming transport header |
16939 |
+- * @rcvbuf: XDR buffer into which to decode the reply |
16940 |
++ * svc_rdma_handle_bc_reply - Process incoming backchannel Reply |
16941 |
++ * @rqstp: resources for handling the Reply |
16942 |
++ * @rctxt: Received message |
16943 |
+ * |
16944 |
+- * Returns: |
16945 |
+- * %0 if @rcvbuf is filled in, xprt_complete_rqst called, |
16946 |
+- * %-EAGAIN if server should call ->recvfrom again. |
16947 |
+ */ |
16948 |
+-int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp, |
16949 |
+- struct xdr_buf *rcvbuf) |
16950 |
++void svc_rdma_handle_bc_reply(struct svc_rqst *rqstp, |
16951 |
++ struct svc_rdma_recv_ctxt *rctxt) |
16952 |
+ { |
16953 |
++ struct svc_xprt *sxprt = rqstp->rq_xprt; |
16954 |
++ struct rpc_xprt *xprt = sxprt->xpt_bc_xprt; |
16955 |
+ struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); |
16956 |
++ struct xdr_buf *rcvbuf = &rqstp->rq_arg; |
16957 |
+ struct kvec *dst, *src = &rcvbuf->head[0]; |
16958 |
++ __be32 *rdma_resp = rctxt->rc_recv_buf; |
16959 |
+ struct rpc_rqst *req; |
16960 |
+ u32 credits; |
16961 |
+ size_t len; |
16962 |
+ __be32 xid; |
16963 |
+ __be32 *p; |
16964 |
+- int ret; |
16965 |
+ |
16966 |
+ p = (__be32 *)src->iov_base; |
16967 |
+ len = src->iov_len; |
16968 |
+@@ -49,14 +48,10 @@ int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp, |
16969 |
+ __func__, (int)len, p); |
16970 |
+ #endif |
16971 |
+ |
16972 |
+- ret = -EAGAIN; |
16973 |
+- if (src->iov_len < 24) |
16974 |
+- goto out_shortreply; |
16975 |
+- |
16976 |
+ spin_lock(&xprt->queue_lock); |
16977 |
+ req = xprt_lookup_rqst(xprt, xid); |
16978 |
+ if (!req) |
16979 |
+- goto out_notfound; |
16980 |
++ goto out_unlock; |
16981 |
+ |
16982 |
+ dst = &req->rq_private_buf.head[0]; |
16983 |
+ memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(struct xdr_buf)); |
16984 |
+@@ -77,25 +72,12 @@ int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp, |
16985 |
+ spin_unlock(&xprt->transport_lock); |
16986 |
+ |
16987 |
+ spin_lock(&xprt->queue_lock); |
16988 |
+- ret = 0; |
16989 |
+ xprt_complete_rqst(req->rq_task, rcvbuf->len); |
16990 |
+ xprt_unpin_rqst(req); |
16991 |
+ rcvbuf->len = 0; |
16992 |
+ |
16993 |
+ out_unlock: |
16994 |
+ spin_unlock(&xprt->queue_lock); |
16995 |
+-out: |
16996 |
+- return ret; |
16997 |
+- |
16998 |
+-out_shortreply: |
16999 |
+- dprintk("svcrdma: short bc reply: xprt=%p, len=%zu\n", |
17000 |
+- xprt, src->iov_len); |
17001 |
+- goto out; |
17002 |
+- |
17003 |
+-out_notfound: |
17004 |
+- dprintk("svcrdma: unrecognized bc reply: xprt=%p, xid=%08x\n", |
17005 |
+- xprt, be32_to_cpu(xid)); |
17006 |
+- goto out_unlock; |
17007 |
+ } |
17008 |
+ |
17009 |
+ /* Send a backwards direction RPC call. |
17010 |
+@@ -252,6 +234,7 @@ xprt_rdma_bc_put(struct rpc_xprt *xprt) |
17011 |
+ { |
17012 |
+ dprintk("svcrdma: %s: xprt %p\n", __func__, xprt); |
17013 |
+ |
17014 |
++ xprt_rdma_free_addresses(xprt); |
17015 |
+ xprt_free(xprt); |
17016 |
+ } |
17017 |
+ |
17018 |
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c |
17019 |
+index d803d814a03ad..fd5c1f1bb9885 100644 |
17020 |
+--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c |
17021 |
++++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c |
17022 |
+@@ -817,12 +817,9 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) |
17023 |
+ goto out_drop; |
17024 |
+ rqstp->rq_xprt_hlen = ret; |
17025 |
+ |
17026 |
+- if (svc_rdma_is_backchannel_reply(xprt, p)) { |
17027 |
+- ret = svc_rdma_handle_bc_reply(xprt->xpt_bc_xprt, p, |
17028 |
+- &rqstp->rq_arg); |
17029 |
+- svc_rdma_recv_ctxt_put(rdma_xprt, ctxt); |
17030 |
+- return ret; |
17031 |
+- } |
17032 |
++ if (svc_rdma_is_backchannel_reply(xprt, p)) |
17033 |
++ goto out_backchannel; |
17034 |
++ |
17035 |
+ svc_rdma_get_inv_rkey(rdma_xprt, ctxt); |
17036 |
+ |
17037 |
+ p += rpcrdma_fixed_maxsz; |
17038 |
+@@ -852,6 +849,8 @@ out_postfail: |
17039 |
+ svc_rdma_recv_ctxt_put(rdma_xprt, ctxt); |
17040 |
+ return ret; |
17041 |
+ |
17042 |
++out_backchannel: |
17043 |
++ svc_rdma_handle_bc_reply(rqstp, ctxt); |
17044 |
+ out_drop: |
17045 |
+ svc_rdma_recv_ctxt_put(rdma_xprt, ctxt); |
17046 |
+ return 0; |
17047 |
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c |
17048 |
+index 959155c3a1608..66e8f89bce534 100644 |
17049 |
+--- a/net/tipc/socket.c |
17050 |
++++ b/net/tipc/socket.c |
17051 |
+@@ -260,12 +260,12 @@ static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err) |
17052 |
+ * |
17053 |
+ * Caller must hold socket lock |
17054 |
+ */ |
17055 |
+-static void tsk_rej_rx_queue(struct sock *sk) |
17056 |
++static void tsk_rej_rx_queue(struct sock *sk, int error) |
17057 |
+ { |
17058 |
+ struct sk_buff *skb; |
17059 |
+ |
17060 |
+ while ((skb = __skb_dequeue(&sk->sk_receive_queue))) |
17061 |
+- tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT); |
17062 |
++ tipc_sk_respond(sk, skb, error); |
17063 |
+ } |
17064 |
+ |
17065 |
+ static bool tipc_sk_connected(struct sock *sk) |
17066 |
+@@ -515,34 +515,45 @@ static void __tipc_shutdown(struct socket *sock, int error) |
17067 |
+ /* Remove any pending SYN message */ |
17068 |
+ __skb_queue_purge(&sk->sk_write_queue); |
17069 |
+ |
17070 |
+- /* Reject all unreceived messages, except on an active connection |
17071 |
+- * (which disconnects locally & sends a 'FIN+' to peer). |
17072 |
+- */ |
17073 |
+- while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { |
17074 |
+- if (TIPC_SKB_CB(skb)->bytes_read) { |
17075 |
+- kfree_skb(skb); |
17076 |
+- continue; |
17077 |
+- } |
17078 |
+- if (!tipc_sk_type_connectionless(sk) && |
17079 |
+- sk->sk_state != TIPC_DISCONNECTING) { |
17080 |
+- tipc_set_sk_state(sk, TIPC_DISCONNECTING); |
17081 |
+- tipc_node_remove_conn(net, dnode, tsk->portid); |
17082 |
+- } |
17083 |
+- tipc_sk_respond(sk, skb, error); |
17084 |
++ /* Remove partially received buffer if any */ |
17085 |
++ skb = skb_peek(&sk->sk_receive_queue); |
17086 |
++ if (skb && TIPC_SKB_CB(skb)->bytes_read) { |
17087 |
++ __skb_unlink(skb, &sk->sk_receive_queue); |
17088 |
++ kfree_skb(skb); |
17089 |
+ } |
17090 |
+ |
17091 |
+- if (tipc_sk_type_connectionless(sk)) |
17092 |
++ /* Reject all unreceived messages if connectionless */ |
17093 |
++ if (tipc_sk_type_connectionless(sk)) { |
17094 |
++ tsk_rej_rx_queue(sk, error); |
17095 |
+ return; |
17096 |
++ } |
17097 |
+ |
17098 |
+- if (sk->sk_state != TIPC_DISCONNECTING) { |
17099 |
++ switch (sk->sk_state) { |
17100 |
++ case TIPC_CONNECTING: |
17101 |
++ case TIPC_ESTABLISHED: |
17102 |
++ tipc_set_sk_state(sk, TIPC_DISCONNECTING); |
17103 |
++ tipc_node_remove_conn(net, dnode, tsk->portid); |
17104 |
++ /* Send a FIN+/- to its peer */ |
17105 |
++ skb = __skb_dequeue(&sk->sk_receive_queue); |
17106 |
++ if (skb) { |
17107 |
++ __skb_queue_purge(&sk->sk_receive_queue); |
17108 |
++ tipc_sk_respond(sk, skb, error); |
17109 |
++ break; |
17110 |
++ } |
17111 |
+ skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, |
17112 |
+ TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode, |
17113 |
+ tsk_own_node(tsk), tsk_peer_port(tsk), |
17114 |
+ tsk->portid, error); |
17115 |
+ if (skb) |
17116 |
+ tipc_node_xmit_skb(net, skb, dnode, tsk->portid); |
17117 |
+- tipc_node_remove_conn(net, dnode, tsk->portid); |
17118 |
+- tipc_set_sk_state(sk, TIPC_DISCONNECTING); |
17119 |
++ break; |
17120 |
++ case TIPC_LISTEN: |
17121 |
++ /* Reject all SYN messages */ |
17122 |
++ tsk_rej_rx_queue(sk, error); |
17123 |
++ break; |
17124 |
++ default: |
17125 |
++ __skb_queue_purge(&sk->sk_receive_queue); |
17126 |
++ break; |
17127 |
+ } |
17128 |
+ } |
17129 |
+ |
17130 |
+@@ -2564,7 +2575,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags, |
17131 |
+ * Reject any stray messages received by new socket |
17132 |
+ * before the socket lock was taken (very, very unlikely) |
17133 |
+ */ |
17134 |
+- tsk_rej_rx_queue(new_sk); |
17135 |
++ tsk_rej_rx_queue(new_sk, TIPC_ERR_NO_PORT); |
17136 |
+ |
17137 |
+ /* Connect new socket to it's peer */ |
17138 |
+ tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg)); |
17139 |
+diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c |
17140 |
+index 73dbed0c4b6b8..931c426673c02 100644 |
17141 |
+--- a/net/tipc/topsrv.c |
17142 |
++++ b/net/tipc/topsrv.c |
17143 |
+@@ -400,7 +400,9 @@ static int tipc_conn_rcv_from_sock(struct tipc_conn *con) |
17144 |
+ return -EWOULDBLOCK; |
17145 |
+ if (ret == sizeof(s)) { |
17146 |
+ read_lock_bh(&sk->sk_callback_lock); |
17147 |
+- ret = tipc_conn_rcv_sub(srv, con, &s); |
17148 |
++ /* RACE: the connection can be closed in the meantime */ |
17149 |
++ if (likely(connected(con))) |
17150 |
++ ret = tipc_conn_rcv_sub(srv, con, &s); |
17151 |
+ read_unlock_bh(&sk->sk_callback_lock); |
17152 |
+ if (!ret) |
17153 |
+ return 0; |
17154 |
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c |
17155 |
+index b3369d678f1af..ecadd9e482c46 100644 |
17156 |
+--- a/net/unix/af_unix.c |
17157 |
++++ b/net/unix/af_unix.c |
17158 |
+@@ -189,11 +189,17 @@ static inline int unix_may_send(struct sock *sk, struct sock *osk) |
17159 |
+ return unix_peer(osk) == NULL || unix_our_peer(sk, osk); |
17160 |
+ } |
17161 |
+ |
17162 |
+-static inline int unix_recvq_full(struct sock const *sk) |
17163 |
++static inline int unix_recvq_full(const struct sock *sk) |
17164 |
+ { |
17165 |
+ return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog; |
17166 |
+ } |
17167 |
+ |
17168 |
++static inline int unix_recvq_full_lockless(const struct sock *sk) |
17169 |
++{ |
17170 |
++ return skb_queue_len_lockless(&sk->sk_receive_queue) > |
17171 |
++ READ_ONCE(sk->sk_max_ack_backlog); |
17172 |
++} |
17173 |
++ |
17174 |
+ struct sock *unix_peer_get(struct sock *s) |
17175 |
+ { |
17176 |
+ struct sock *peer; |
17177 |
+@@ -1724,7 +1730,8 @@ restart_locked: |
17178 |
+ * - unix_peer(sk) == sk by time of get but disconnected before lock |
17179 |
+ */ |
17180 |
+ if (other != sk && |
17181 |
+- unlikely(unix_peer(other) != sk && unix_recvq_full(other))) { |
17182 |
++ unlikely(unix_peer(other) != sk && |
17183 |
++ unix_recvq_full_lockless(other))) { |
17184 |
+ if (timeo) { |
17185 |
+ timeo = unix_wait_for_peer(other, timeo); |
17186 |
+ |
17187 |
+diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig |
17188 |
+index 63cf7131f601c..211007c091d59 100644 |
17189 |
+--- a/net/wireless/Kconfig |
17190 |
++++ b/net/wireless/Kconfig |
17191 |
+@@ -217,6 +217,7 @@ config LIB80211_CRYPT_WEP |
17192 |
+ |
17193 |
+ config LIB80211_CRYPT_CCMP |
17194 |
+ tristate |
17195 |
++ select CRYPTO |
17196 |
+ select CRYPTO_AES |
17197 |
+ select CRYPTO_CCM |
17198 |
+ |
17199 |
+diff --git a/security/device_cgroup.c b/security/device_cgroup.c |
17200 |
+index 725674f3276d3..5d7bb91c64876 100644 |
17201 |
+--- a/security/device_cgroup.c |
17202 |
++++ b/security/device_cgroup.c |
17203 |
+@@ -352,7 +352,8 @@ static bool match_exception_partial(struct list_head *exceptions, short type, |
17204 |
+ { |
17205 |
+ struct dev_exception_item *ex; |
17206 |
+ |
17207 |
+- list_for_each_entry_rcu(ex, exceptions, list) { |
17208 |
++ list_for_each_entry_rcu(ex, exceptions, list, |
17209 |
++ lockdep_is_held(&devcgroup_mutex)) { |
17210 |
+ if ((type & DEVCG_DEV_BLOCK) && !(ex->type & DEVCG_DEV_BLOCK)) |
17211 |
+ continue; |
17212 |
+ if ((type & DEVCG_DEV_CHAR) && !(ex->type & DEVCG_DEV_CHAR)) |
17213 |
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c |
17214 |
+index 552e73d90fd25..212f48025db81 100644 |
17215 |
+--- a/security/selinux/hooks.c |
17216 |
++++ b/security/selinux/hooks.c |
17217 |
+@@ -3156,6 +3156,9 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name, |
17218 |
+ return dentry_has_perm(current_cred(), dentry, FILE__SETATTR); |
17219 |
+ } |
17220 |
+ |
17221 |
++ if (!selinux_state.initialized) |
17222 |
++ return (inode_owner_or_capable(inode) ? 0 : -EPERM); |
17223 |
++ |
17224 |
+ sbsec = inode->i_sb->s_security; |
17225 |
+ if (!(sbsec->flags & SBLABEL_MNT)) |
17226 |
+ return -EOPNOTSUPP; |
17227 |
+@@ -3239,6 +3242,15 @@ static void selinux_inode_post_setxattr(struct dentry *dentry, const char *name, |
17228 |
+ return; |
17229 |
+ } |
17230 |
+ |
17231 |
++ if (!selinux_state.initialized) { |
17232 |
++ /* If we haven't even been initialized, then we can't validate |
17233 |
++ * against a policy, so leave the label as invalid. It may |
17234 |
++ * resolve to a valid label on the next revalidation try if |
17235 |
++ * we've since initialized. |
17236 |
++ */ |
17237 |
++ return; |
17238 |
++ } |
17239 |
++ |
17240 |
+ rc = security_context_to_sid_force(&selinux_state, value, size, |
17241 |
+ &newsid); |
17242 |
+ if (rc) { |
17243 |
+diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c |
17244 |
+index e6c7643c3fc08..e9eaff90cbccd 100644 |
17245 |
+--- a/security/selinux/selinuxfs.c |
17246 |
++++ b/security/selinux/selinuxfs.c |
17247 |
+@@ -1508,6 +1508,7 @@ static struct avc_cache_stats *sel_avc_get_stat_idx(loff_t *idx) |
17248 |
+ *idx = cpu + 1; |
17249 |
+ return &per_cpu(avc_cache_stats, cpu); |
17250 |
+ } |
17251 |
++ (*idx)++; |
17252 |
+ return NULL; |
17253 |
+ } |
17254 |
+ |
17255 |
+diff --git a/sound/hda/hdac_bus.c b/sound/hda/hdac_bus.c |
17256 |
+index 8f19876244ebe..53be2cac98e7c 100644 |
17257 |
+--- a/sound/hda/hdac_bus.c |
17258 |
++++ b/sound/hda/hdac_bus.c |
17259 |
+@@ -158,6 +158,7 @@ static void snd_hdac_bus_process_unsol_events(struct work_struct *work) |
17260 |
+ struct hdac_driver *drv; |
17261 |
+ unsigned int rp, caddr, res; |
17262 |
+ |
17263 |
++ spin_lock_irq(&bus->reg_lock); |
17264 |
+ while (bus->unsol_rp != bus->unsol_wp) { |
17265 |
+ rp = (bus->unsol_rp + 1) % HDA_UNSOL_QUEUE_SIZE; |
17266 |
+ bus->unsol_rp = rp; |
17267 |
+@@ -169,10 +170,13 @@ static void snd_hdac_bus_process_unsol_events(struct work_struct *work) |
17268 |
+ codec = bus->caddr_tbl[caddr & 0x0f]; |
17269 |
+ if (!codec || !codec->dev.driver) |
17270 |
+ continue; |
17271 |
++ spin_unlock_irq(&bus->reg_lock); |
17272 |
+ drv = drv_to_hdac_driver(codec->dev.driver); |
17273 |
+ if (drv->unsol_event) |
17274 |
+ drv->unsol_event(codec, res); |
17275 |
++ spin_lock_irq(&bus->reg_lock); |
17276 |
+ } |
17277 |
++ spin_unlock_irq(&bus->reg_lock); |
17278 |
+ } |
17279 |
+ |
17280 |
+ /** |
17281 |
+diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c |
17282 |
+index 2596a881186fa..49780399c2849 100644 |
17283 |
+--- a/sound/hda/hdac_regmap.c |
17284 |
++++ b/sound/hda/hdac_regmap.c |
17285 |
+@@ -363,7 +363,6 @@ static const struct regmap_config hda_regmap_cfg = { |
17286 |
+ .reg_write = hda_reg_write, |
17287 |
+ .use_single_read = true, |
17288 |
+ .use_single_write = true, |
17289 |
+- .disable_locking = true, |
17290 |
+ }; |
17291 |
+ |
17292 |
+ /** |
17293 |
+diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c |
17294 |
+index 496dcde9715d6..9790f5108a166 100644 |
17295 |
+--- a/sound/pci/asihpi/hpioctl.c |
17296 |
++++ b/sound/pci/asihpi/hpioctl.c |
17297 |
+@@ -343,7 +343,7 @@ int asihpi_adapter_probe(struct pci_dev *pci_dev, |
17298 |
+ struct hpi_message hm; |
17299 |
+ struct hpi_response hr; |
17300 |
+ struct hpi_adapter adapter; |
17301 |
+- struct hpi_pci pci; |
17302 |
++ struct hpi_pci pci = { 0 }; |
17303 |
+ |
17304 |
+ memset(&adapter, 0, sizeof(adapter)); |
17305 |
+ |
17306 |
+@@ -499,7 +499,7 @@ int asihpi_adapter_probe(struct pci_dev *pci_dev, |
17307 |
+ return 0; |
17308 |
+ |
17309 |
+ err: |
17310 |
+- for (idx = 0; idx < HPI_MAX_ADAPTER_MEM_SPACES; idx++) { |
17311 |
++ while (--idx >= 0) { |
17312 |
+ if (pci.ap_mem_base[idx]) { |
17313 |
+ iounmap(pci.ap_mem_base[idx]); |
17314 |
+ pci.ap_mem_base[idx] = NULL; |
17315 |
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c |
17316 |
+index 103011e7285a3..6da296def283e 100644 |
17317 |
+--- a/sound/pci/hda/hda_codec.c |
17318 |
++++ b/sound/pci/hda/hda_codec.c |
17319 |
+@@ -641,8 +641,18 @@ static void hda_jackpoll_work(struct work_struct *work) |
17320 |
+ struct hda_codec *codec = |
17321 |
+ container_of(work, struct hda_codec, jackpoll_work.work); |
17322 |
+ |
17323 |
+- snd_hda_jack_set_dirty_all(codec); |
17324 |
+- snd_hda_jack_poll_all(codec); |
17325 |
++ /* for non-polling trigger: we need nothing if already powered on */ |
17326 |
++ if (!codec->jackpoll_interval && snd_hdac_is_power_on(&codec->core)) |
17327 |
++ return; |
17328 |
++ |
17329 |
++ /* the power-up/down sequence triggers the runtime resume */ |
17330 |
++ snd_hda_power_up_pm(codec); |
17331 |
++ /* update jacks manually if polling is required, too */ |
17332 |
++ if (codec->jackpoll_interval) { |
17333 |
++ snd_hda_jack_set_dirty_all(codec); |
17334 |
++ snd_hda_jack_poll_all(codec); |
17335 |
++ } |
17336 |
++ snd_hda_power_down_pm(codec); |
17337 |
+ |
17338 |
+ if (!codec->jackpoll_interval) |
17339 |
+ return; |
17340 |
+@@ -2958,18 +2968,14 @@ static int hda_codec_runtime_resume(struct device *dev) |
17341 |
+ static int hda_codec_force_resume(struct device *dev) |
17342 |
+ { |
17343 |
+ struct hda_codec *codec = dev_to_hda_codec(dev); |
17344 |
+- bool forced_resume = !codec->relaxed_resume && codec->jacktbl.used; |
17345 |
+ int ret; |
17346 |
+ |
17347 |
+- /* The get/put pair below enforces the runtime resume even if the |
17348 |
+- * device hasn't been used at suspend time. This trick is needed to |
17349 |
+- * update the jack state change during the sleep. |
17350 |
+- */ |
17351 |
+- if (forced_resume) |
17352 |
+- pm_runtime_get_noresume(dev); |
17353 |
+ ret = pm_runtime_force_resume(dev); |
17354 |
+- if (forced_resume) |
17355 |
+- pm_runtime_put(dev); |
17356 |
++ /* schedule jackpoll work for jack detection update */ |
17357 |
++ if (codec->jackpoll_interval || |
17358 |
++ (pm_runtime_suspended(dev) && hda_codec_need_resume(codec))) |
17359 |
++ schedule_delayed_work(&codec->jackpoll_work, |
17360 |
++ codec->jackpoll_interval); |
17361 |
+ return ret; |
17362 |
+ } |
17363 |
+ |
17364 |
+diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c |
17365 |
+index 76b507058cb4d..5e6081750bd9b 100644 |
17366 |
+--- a/sound/pci/hda/hda_controller.c |
17367 |
++++ b/sound/pci/hda/hda_controller.c |
17368 |
+@@ -1159,16 +1159,23 @@ irqreturn_t azx_interrupt(int irq, void *dev_id) |
17369 |
+ if (snd_hdac_bus_handle_stream_irq(bus, status, stream_update)) |
17370 |
+ active = true; |
17371 |
+ |
17372 |
+- /* clear rirb int */ |
17373 |
+ status = azx_readb(chip, RIRBSTS); |
17374 |
+ if (status & RIRB_INT_MASK) { |
17375 |
++ /* |
17376 |
++ * Clearing the interrupt status here ensures that no |
17377 |
++ * interrupt gets masked after the RIRB wp is read in |
17378 |
++ * snd_hdac_bus_update_rirb. This avoids a possible |
17379 |
++ * race condition where codec response in RIRB may |
17380 |
++ * remain unserviced by IRQ, eventually falling back |
17381 |
++ * to polling mode in azx_rirb_get_response. |
17382 |
++ */ |
17383 |
++ azx_writeb(chip, RIRBSTS, RIRB_INT_MASK); |
17384 |
+ active = true; |
17385 |
+ if (status & RIRB_INT_RESPONSE) { |
17386 |
+ if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) |
17387 |
+ udelay(80); |
17388 |
+ snd_hdac_bus_update_rirb(bus); |
17389 |
+ } |
17390 |
+- azx_writeb(chip, RIRBSTS, RIRB_INT_MASK); |
17391 |
+ } |
17392 |
+ } while (active && ++repeat < 10); |
17393 |
+ |
17394 |
+diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h |
17395 |
+index 82e26442724ba..a356fb0e57738 100644 |
17396 |
+--- a/sound/pci/hda/hda_controller.h |
17397 |
++++ b/sound/pci/hda/hda_controller.h |
17398 |
+@@ -41,7 +41,7 @@ |
17399 |
+ /* 24 unused */ |
17400 |
+ #define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */ |
17401 |
+ #define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */ |
17402 |
+-/* 27 unused */ |
17403 |
++#define AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP (1 << 27) /* Workaround for spurious wakeups after suspend */ |
17404 |
+ #define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28) /* CORBRP clears itself after reset */ |
17405 |
+ #define AZX_DCAPS_NO_MSI64 (1 << 29) /* Stick to 32-bit MSIs */ |
17406 |
+ #define AZX_DCAPS_SEPARATE_STREAM_TAG (1 << 30) /* capture and playback use separate stream tag */ |
17407 |
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c |
17408 |
+index 7353d2ec359ae..590ea262f2e20 100644 |
17409 |
+--- a/sound/pci/hda/hda_intel.c |
17410 |
++++ b/sound/pci/hda/hda_intel.c |
17411 |
+@@ -295,7 +295,8 @@ enum { |
17412 |
+ /* PCH for HSW/BDW; with runtime PM */ |
17413 |
+ /* no i915 binding for this as HSW/BDW has another controller for HDMI */ |
17414 |
+ #define AZX_DCAPS_INTEL_PCH \ |
17415 |
+- (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME) |
17416 |
++ (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME |\ |
17417 |
++ AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP) |
17418 |
+ |
17419 |
+ /* HSW HDMI */ |
17420 |
+ #define AZX_DCAPS_INTEL_HASWELL \ |
17421 |
+@@ -1002,7 +1003,8 @@ static void __azx_runtime_resume(struct azx *chip, bool from_rt) |
17422 |
+ |
17423 |
+ if (status && from_rt) { |
17424 |
+ list_for_each_codec(codec, &chip->bus) |
17425 |
+- if (status & (1 << codec->addr)) |
17426 |
++ if (!codec->relaxed_resume && |
17427 |
++ (status & (1 << codec->addr))) |
17428 |
+ schedule_delayed_work(&codec->jackpoll_work, |
17429 |
+ codec->jackpoll_interval); |
17430 |
+ } |
17431 |
+@@ -1025,7 +1027,14 @@ static int azx_suspend(struct device *dev) |
17432 |
+ chip = card->private_data; |
17433 |
+ bus = azx_bus(chip); |
17434 |
+ snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); |
17435 |
+- __azx_runtime_suspend(chip); |
17436 |
++ /* An ugly workaround: direct call of __azx_runtime_suspend() and |
17437 |
++ * __azx_runtime_resume() for old Intel platforms that suffer from |
17438 |
++ * spurious wakeups after S3 suspend |
17439 |
++ */ |
17440 |
++ if (chip->driver_caps & AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP) |
17441 |
++ __azx_runtime_suspend(chip); |
17442 |
++ else |
17443 |
++ pm_runtime_force_suspend(dev); |
17444 |
+ if (bus->irq >= 0) { |
17445 |
+ free_irq(bus->irq, chip); |
17446 |
+ bus->irq = -1; |
17447 |
+@@ -1052,7 +1061,11 @@ static int azx_resume(struct device *dev) |
17448 |
+ chip->msi = 0; |
17449 |
+ if (azx_acquire_irq(chip, 1) < 0) |
17450 |
+ return -EIO; |
17451 |
+- __azx_runtime_resume(chip, false); |
17452 |
++ |
17453 |
++ if (chip->driver_caps & AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP) |
17454 |
++ __azx_runtime_resume(chip, false); |
17455 |
++ else |
17456 |
++ pm_runtime_force_resume(dev); |
17457 |
+ snd_power_change_state(card, SNDRV_CTL_POWER_D0); |
17458 |
+ |
17459 |
+ trace_azx_resume(chip); |
17460 |
+@@ -1099,12 +1112,12 @@ static int azx_runtime_suspend(struct device *dev) |
17461 |
+ if (!azx_is_pm_ready(card)) |
17462 |
+ return 0; |
17463 |
+ chip = card->private_data; |
17464 |
+- if (!azx_has_pm_runtime(chip)) |
17465 |
+- return 0; |
17466 |
+ |
17467 |
+ /* enable controller wake up event */ |
17468 |
+- azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) | |
17469 |
+- STATESTS_INT_MASK); |
17470 |
++ if (snd_power_get_state(card) == SNDRV_CTL_POWER_D0) { |
17471 |
++ azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) | |
17472 |
++ STATESTS_INT_MASK); |
17473 |
++ } |
17474 |
+ |
17475 |
+ __azx_runtime_suspend(chip); |
17476 |
+ trace_azx_runtime_suspend(chip); |
17477 |
+@@ -1115,17 +1128,18 @@ static int azx_runtime_resume(struct device *dev) |
17478 |
+ { |
17479 |
+ struct snd_card *card = dev_get_drvdata(dev); |
17480 |
+ struct azx *chip; |
17481 |
++ bool from_rt = snd_power_get_state(card) == SNDRV_CTL_POWER_D0; |
17482 |
+ |
17483 |
+ if (!azx_is_pm_ready(card)) |
17484 |
+ return 0; |
17485 |
+ chip = card->private_data; |
17486 |
+- if (!azx_has_pm_runtime(chip)) |
17487 |
+- return 0; |
17488 |
+- __azx_runtime_resume(chip, true); |
17489 |
++ __azx_runtime_resume(chip, from_rt); |
17490 |
+ |
17491 |
+ /* disable controller Wake Up event*/ |
17492 |
+- azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) & |
17493 |
+- ~STATESTS_INT_MASK); |
17494 |
++ if (from_rt) { |
17495 |
++ azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) & |
17496 |
++ ~STATESTS_INT_MASK); |
17497 |
++ } |
17498 |
+ |
17499 |
+ trace_azx_runtime_resume(chip); |
17500 |
+ return 0; |
17501 |
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c |
17502 |
+index 54346ae47d112..4dfd714f718b8 100644 |
17503 |
+--- a/sound/pci/hda/patch_realtek.c |
17504 |
++++ b/sound/pci/hda/patch_realtek.c |
17505 |
+@@ -3418,7 +3418,11 @@ static void alc256_shutup(struct hda_codec *codec) |
17506 |
+ |
17507 |
+ /* 3k pull low control for Headset jack. */ |
17508 |
+ /* NOTE: call this before clearing the pin, otherwise codec stalls */ |
17509 |
+- alc_update_coef_idx(codec, 0x46, 0, 3 << 12); |
17510 |
++ /* If disable 3k pulldown control for alc257, the Mic detection will not work correctly |
17511 |
++ * when booting with headset plugged. So skip setting it for the codec alc257 |
17512 |
++ */ |
17513 |
++ if (codec->core.vendor_id != 0x10ec0257) |
17514 |
++ alc_update_coef_idx(codec, 0x46, 0, 3 << 12); |
17515 |
+ |
17516 |
+ if (!spec->no_shutup_pins) |
17517 |
+ snd_hda_codec_write(codec, hp_pin, 0, |
17518 |
+@@ -6032,6 +6036,7 @@ static void alc_fixup_thinkpad_acpi(struct hda_codec *codec, |
17519 |
+ #include "hp_x360_helper.c" |
17520 |
+ |
17521 |
+ enum { |
17522 |
++ ALC269_FIXUP_GPIO2, |
17523 |
+ ALC269_FIXUP_SONY_VAIO, |
17524 |
+ ALC275_FIXUP_SONY_VAIO_GPIO2, |
17525 |
+ ALC269_FIXUP_DELL_M101Z, |
17526 |
+@@ -6213,6 +6218,10 @@ enum { |
17527 |
+ }; |
17528 |
+ |
17529 |
+ static const struct hda_fixup alc269_fixups[] = { |
17530 |
++ [ALC269_FIXUP_GPIO2] = { |
17531 |
++ .type = HDA_FIXUP_FUNC, |
17532 |
++ .v.func = alc_fixup_gpio2, |
17533 |
++ }, |
17534 |
+ [ALC269_FIXUP_SONY_VAIO] = { |
17535 |
+ .type = HDA_FIXUP_PINCTLS, |
17536 |
+ .v.pins = (const struct hda_pintbl[]) { |
17537 |
+@@ -7032,6 +7041,8 @@ static const struct hda_fixup alc269_fixups[] = { |
17538 |
+ [ALC233_FIXUP_LENOVO_MULTI_CODECS] = { |
17539 |
+ .type = HDA_FIXUP_FUNC, |
17540 |
+ .v.func = alc233_alc662_fixup_lenovo_dual_codecs, |
17541 |
++ .chained = true, |
17542 |
++ .chain_id = ALC269_FIXUP_GPIO2 |
17543 |
+ }, |
17544 |
+ [ALC233_FIXUP_ACER_HEADSET_MIC] = { |
17545 |
+ .type = HDA_FIXUP_VERBS, |
17546 |
+diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c |
17547 |
+index 45da2b51543e7..6b9d326e11b07 100644 |
17548 |
+--- a/sound/soc/codecs/max98090.c |
17549 |
++++ b/sound/soc/codecs/max98090.c |
17550 |
+@@ -2112,10 +2112,16 @@ static void max98090_pll_work(struct max98090_priv *max98090) |
17551 |
+ |
17552 |
+ dev_info_ratelimited(component->dev, "PLL unlocked\n"); |
17553 |
+ |
17554 |
++ /* |
17555 |
++ * As the datasheet suggested, the maximum PLL lock time should be |
17556 |
++ * 7 msec. The workaround resets the codec softly by toggling SHDN |
17557 |
++ * off and on if PLL failed to lock for 10 msec. Notably, there is |
17558 |
++ * no suggested hold time for SHDN off. |
17559 |
++ */ |
17560 |
++ |
17561 |
+ /* Toggle shutdown OFF then ON */ |
17562 |
+ snd_soc_component_update_bits(component, M98090_REG_DEVICE_SHUTDOWN, |
17563 |
+ M98090_SHDNN_MASK, 0); |
17564 |
+- msleep(10); |
17565 |
+ snd_soc_component_update_bits(component, M98090_REG_DEVICE_SHUTDOWN, |
17566 |
+ M98090_SHDNN_MASK, M98090_SHDNN_MASK); |
17567 |
+ |
17568 |
+diff --git a/sound/soc/codecs/pcm3168a.c b/sound/soc/codecs/pcm3168a.c |
17569 |
+index 88b75695fbf7f..b37e5fbbd301a 100644 |
17570 |
+--- a/sound/soc/codecs/pcm3168a.c |
17571 |
++++ b/sound/soc/codecs/pcm3168a.c |
17572 |
+@@ -302,6 +302,13 @@ static int pcm3168a_set_dai_sysclk(struct snd_soc_dai *dai, |
17573 |
+ struct pcm3168a_priv *pcm3168a = snd_soc_component_get_drvdata(dai->component); |
17574 |
+ int ret; |
17575 |
+ |
17576 |
++ /* |
17577 |
++ * Some sound card sets 0 Hz as reset, |
17578 |
++ * but it is impossible to set. Ignore it here |
17579 |
++ */ |
17580 |
++ if (freq == 0) |
17581 |
++ return 0; |
17582 |
++ |
17583 |
+ if (freq > PCM3168A_MAX_SYSCLK) |
17584 |
+ return -EINVAL; |
17585 |
+ |
17586 |
+diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c |
17587 |
+index d5fb7f5dd551c..6dbab3fc6537e 100644 |
17588 |
+--- a/sound/soc/codecs/wm8994.c |
17589 |
++++ b/sound/soc/codecs/wm8994.c |
17590 |
+@@ -3372,6 +3372,8 @@ int wm8994_mic_detect(struct snd_soc_component *component, struct snd_soc_jack * |
17591 |
+ return -EINVAL; |
17592 |
+ } |
17593 |
+ |
17594 |
++ pm_runtime_get_sync(component->dev); |
17595 |
++ |
17596 |
+ switch (micbias) { |
17597 |
+ case 1: |
17598 |
+ micdet = &wm8994->micdet[0]; |
17599 |
+@@ -3419,6 +3421,8 @@ int wm8994_mic_detect(struct snd_soc_component *component, struct snd_soc_jack * |
17600 |
+ |
17601 |
+ snd_soc_dapm_sync(dapm); |
17602 |
+ |
17603 |
++ pm_runtime_put(component->dev); |
17604 |
++ |
17605 |
+ return 0; |
17606 |
+ } |
17607 |
+ EXPORT_SYMBOL_GPL(wm8994_mic_detect); |
17608 |
+@@ -3786,6 +3790,8 @@ int wm8958_mic_detect(struct snd_soc_component *component, struct snd_soc_jack * |
17609 |
+ return -EINVAL; |
17610 |
+ } |
17611 |
+ |
17612 |
++ pm_runtime_get_sync(component->dev); |
17613 |
++ |
17614 |
+ if (jack) { |
17615 |
+ snd_soc_dapm_force_enable_pin(dapm, "CLK_SYS"); |
17616 |
+ snd_soc_dapm_sync(dapm); |
17617 |
+@@ -3854,6 +3860,8 @@ int wm8958_mic_detect(struct snd_soc_component *component, struct snd_soc_jack * |
17618 |
+ snd_soc_dapm_sync(dapm); |
17619 |
+ } |
17620 |
+ |
17621 |
++ pm_runtime_put(component->dev); |
17622 |
++ |
17623 |
+ return 0; |
17624 |
+ } |
17625 |
+ EXPORT_SYMBOL_GPL(wm8958_mic_detect); |
17626 |
+@@ -4047,11 +4055,13 @@ static int wm8994_component_probe(struct snd_soc_component *component) |
17627 |
+ wm8994->hubs.dcs_readback_mode = 2; |
17628 |
+ break; |
17629 |
+ } |
17630 |
++ wm8994->hubs.micd_scthr = true; |
17631 |
+ break; |
17632 |
+ |
17633 |
+ case WM8958: |
17634 |
+ wm8994->hubs.dcs_readback_mode = 1; |
17635 |
+ wm8994->hubs.hp_startup_mode = 1; |
17636 |
++ wm8994->hubs.micd_scthr = true; |
17637 |
+ |
17638 |
+ switch (control->revision) { |
17639 |
+ case 0: |
17640 |
+diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c |
17641 |
+index e93af7edd8f75..dd421e2fe7b21 100644 |
17642 |
+--- a/sound/soc/codecs/wm_hubs.c |
17643 |
++++ b/sound/soc/codecs/wm_hubs.c |
17644 |
+@@ -1223,6 +1223,9 @@ int wm_hubs_handle_analogue_pdata(struct snd_soc_component *component, |
17645 |
+ snd_soc_component_update_bits(component, WM8993_ADDITIONAL_CONTROL, |
17646 |
+ WM8993_LINEOUT2_FB, WM8993_LINEOUT2_FB); |
17647 |
+ |
17648 |
++ if (!hubs->micd_scthr) |
17649 |
++ return 0; |
17650 |
++ |
17651 |
+ snd_soc_component_update_bits(component, WM8993_MICBIAS, |
17652 |
+ WM8993_JD_SCTHR_MASK | WM8993_JD_THR_MASK | |
17653 |
+ WM8993_MICB1_LVL | WM8993_MICB2_LVL, |
17654 |
+diff --git a/sound/soc/codecs/wm_hubs.h b/sound/soc/codecs/wm_hubs.h |
17655 |
+index 4b8e5f0d6e32d..988b29e630607 100644 |
17656 |
+--- a/sound/soc/codecs/wm_hubs.h |
17657 |
++++ b/sound/soc/codecs/wm_hubs.h |
17658 |
+@@ -27,6 +27,7 @@ struct wm_hubs_data { |
17659 |
+ int hp_startup_mode; |
17660 |
+ int series_startup; |
17661 |
+ int no_series_update; |
17662 |
++ bool micd_scthr; |
17663 |
+ |
17664 |
+ bool no_cache_dac_hp_direct; |
17665 |
+ struct list_head dcs_cache; |
17666 |
+diff --git a/sound/soc/img/img-i2s-out.c b/sound/soc/img/img-i2s-out.c |
17667 |
+index 4b18534096336..9c4212f2f7269 100644 |
17668 |
+--- a/sound/soc/img/img-i2s-out.c |
17669 |
++++ b/sound/soc/img/img-i2s-out.c |
17670 |
+@@ -347,8 +347,10 @@ static int img_i2s_out_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) |
17671 |
+ chan_control_mask = IMG_I2S_OUT_CHAN_CTL_CLKT_MASK; |
17672 |
+ |
17673 |
+ ret = pm_runtime_get_sync(i2s->dev); |
17674 |
+- if (ret < 0) |
17675 |
++ if (ret < 0) { |
17676 |
++ pm_runtime_put_noidle(i2s->dev); |
17677 |
+ return ret; |
17678 |
++ } |
17679 |
+ |
17680 |
+ img_i2s_out_disable(i2s); |
17681 |
+ |
17682 |
+@@ -488,8 +490,10 @@ static int img_i2s_out_probe(struct platform_device *pdev) |
17683 |
+ goto err_pm_disable; |
17684 |
+ } |
17685 |
+ ret = pm_runtime_get_sync(&pdev->dev); |
17686 |
+- if (ret < 0) |
17687 |
++ if (ret < 0) { |
17688 |
++ pm_runtime_put_noidle(&pdev->dev); |
17689 |
+ goto err_suspend; |
17690 |
++ } |
17691 |
+ |
17692 |
+ reg = IMG_I2S_OUT_CTL_FRM_SIZE_MASK; |
17693 |
+ img_i2s_out_writel(i2s, reg, IMG_I2S_OUT_CTL); |
17694 |
+diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c |
17695 |
+index f7964d1ec486f..6012367f6fe48 100644 |
17696 |
+--- a/sound/soc/intel/boards/bytcr_rt5640.c |
17697 |
++++ b/sound/soc/intel/boards/bytcr_rt5640.c |
17698 |
+@@ -591,6 +591,16 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = { |
17699 |
+ BYT_RT5640_SSP0_AIF1 | |
17700 |
+ BYT_RT5640_MCLK_EN), |
17701 |
+ }, |
17702 |
++ { /* MPMAN Converter 9, similar hw as the I.T.Works TW891 2-in-1 */ |
17703 |
++ .matches = { |
17704 |
++ DMI_MATCH(DMI_SYS_VENDOR, "MPMAN"), |
17705 |
++ DMI_MATCH(DMI_PRODUCT_NAME, "Converter9"), |
17706 |
++ }, |
17707 |
++ .driver_data = (void *)(BYTCR_INPUT_DEFAULTS | |
17708 |
++ BYT_RT5640_MONO_SPEAKER | |
17709 |
++ BYT_RT5640_SSP0_AIF1 | |
17710 |
++ BYT_RT5640_MCLK_EN), |
17711 |
++ }, |
17712 |
+ { |
17713 |
+ /* MPMAN MPWIN895CL */ |
17714 |
+ .matches = { |
17715 |
+diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c |
17716 |
+index 6f69f314f2c2a..d2d5c25bf5502 100644 |
17717 |
+--- a/sound/soc/kirkwood/kirkwood-dma.c |
17718 |
++++ b/sound/soc/kirkwood/kirkwood-dma.c |
17719 |
+@@ -132,7 +132,7 @@ static int kirkwood_dma_open(struct snd_pcm_substream *substream) |
17720 |
+ err = request_irq(priv->irq, kirkwood_dma_irq, IRQF_SHARED, |
17721 |
+ "kirkwood-i2s", priv); |
17722 |
+ if (err) |
17723 |
+- return -EBUSY; |
17724 |
++ return err; |
17725 |
+ |
17726 |
+ /* |
17727 |
+ * Enable Error interrupts. We're only ack'ing them but |
17728 |
+diff --git a/sound/soc/sof/ipc.c b/sound/soc/sof/ipc.c |
17729 |
+index e7b1a80e2a14c..f38f651da2246 100644 |
17730 |
+--- a/sound/soc/sof/ipc.c |
17731 |
++++ b/sound/soc/sof/ipc.c |
17732 |
+@@ -215,15 +215,17 @@ static int tx_wait_done(struct snd_sof_ipc *ipc, struct snd_sof_ipc_msg *msg, |
17733 |
+ snd_sof_trace_notify_for_error(ipc->sdev); |
17734 |
+ ret = -ETIMEDOUT; |
17735 |
+ } else { |
17736 |
+- /* copy the data returned from DSP */ |
17737 |
+ ret = msg->reply_error; |
17738 |
+- if (msg->reply_size) |
17739 |
+- memcpy(reply_data, msg->reply_data, msg->reply_size); |
17740 |
+- if (ret < 0) |
17741 |
++ if (ret < 0) { |
17742 |
+ dev_err(sdev->dev, "error: ipc error for 0x%x size %zu\n", |
17743 |
+ hdr->cmd, msg->reply_size); |
17744 |
+- else |
17745 |
++ } else { |
17746 |
+ ipc_log_header(sdev->dev, "ipc tx succeeded", hdr->cmd); |
17747 |
++ if (msg->reply_size) |
17748 |
++ /* copy the data returned from DSP */ |
17749 |
++ memcpy(reply_data, msg->reply_data, |
17750 |
++ msg->reply_size); |
17751 |
++ } |
17752 |
+ } |
17753 |
+ |
17754 |
+ return ret; |
17755 |
+diff --git a/sound/usb/midi.c b/sound/usb/midi.c |
17756 |
+index 0cb4142b05f64..bc9068b616bb9 100644 |
17757 |
+--- a/sound/usb/midi.c |
17758 |
++++ b/sound/usb/midi.c |
17759 |
+@@ -1827,6 +1827,28 @@ static int snd_usbmidi_create_endpoints(struct snd_usb_midi *umidi, |
17760 |
+ return 0; |
17761 |
+ } |
17762 |
+ |
17763 |
++static struct usb_ms_endpoint_descriptor *find_usb_ms_endpoint_descriptor( |
17764 |
++ struct usb_host_endpoint *hostep) |
17765 |
++{ |
17766 |
++ unsigned char *extra = hostep->extra; |
17767 |
++ int extralen = hostep->extralen; |
17768 |
++ |
17769 |
++ while (extralen > 3) { |
17770 |
++ struct usb_ms_endpoint_descriptor *ms_ep = |
17771 |
++ (struct usb_ms_endpoint_descriptor *)extra; |
17772 |
++ |
17773 |
++ if (ms_ep->bLength > 3 && |
17774 |
++ ms_ep->bDescriptorType == USB_DT_CS_ENDPOINT && |
17775 |
++ ms_ep->bDescriptorSubtype == UAC_MS_GENERAL) |
17776 |
++ return ms_ep; |
17777 |
++ if (!extra[0]) |
17778 |
++ break; |
17779 |
++ extralen -= extra[0]; |
17780 |
++ extra += extra[0]; |
17781 |
++ } |
17782 |
++ return NULL; |
17783 |
++} |
17784 |
++ |
17785 |
+ /* |
17786 |
+ * Returns MIDIStreaming device capabilities. |
17787 |
+ */ |
17788 |
+@@ -1864,11 +1886,8 @@ static int snd_usbmidi_get_ms_info(struct snd_usb_midi *umidi, |
17789 |
+ ep = get_ep_desc(hostep); |
17790 |
+ if (!usb_endpoint_xfer_bulk(ep) && !usb_endpoint_xfer_int(ep)) |
17791 |
+ continue; |
17792 |
+- ms_ep = (struct usb_ms_endpoint_descriptor *)hostep->extra; |
17793 |
+- if (hostep->extralen < 4 || |
17794 |
+- ms_ep->bLength < 4 || |
17795 |
+- ms_ep->bDescriptorType != USB_DT_CS_ENDPOINT || |
17796 |
+- ms_ep->bDescriptorSubtype != UAC_MS_GENERAL) |
17797 |
++ ms_ep = find_usb_ms_endpoint_descriptor(hostep); |
17798 |
++ if (!ms_ep) |
17799 |
+ continue; |
17800 |
+ if (usb_endpoint_dir_out(ep)) { |
17801 |
+ if (endpoints[epidx].out_ep) { |
17802 |
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c |
17803 |
+index 9079c380228fc..8aa96ed0b1b56 100644 |
17804 |
+--- a/sound/usb/mixer.c |
17805 |
++++ b/sound/usb/mixer.c |
17806 |
+@@ -1684,6 +1684,16 @@ static void __build_feature_ctl(struct usb_mixer_interface *mixer, |
17807 |
+ /* get min/max values */ |
17808 |
+ get_min_max_with_quirks(cval, 0, kctl); |
17809 |
+ |
17810 |
++ /* skip a bogus volume range */ |
17811 |
++ if (cval->max <= cval->min) { |
17812 |
++ usb_audio_dbg(mixer->chip, |
17813 |
++ "[%d] FU [%s] skipped due to invalid volume\n", |
17814 |
++ cval->head.id, kctl->id.name); |
17815 |
++ snd_ctl_free_one(kctl); |
17816 |
++ return; |
17817 |
++ } |
17818 |
++ |
17819 |
++ |
17820 |
+ if (control == UAC_FU_VOLUME) { |
17821 |
+ check_mapped_dB(map, cval); |
17822 |
+ if (cval->dBmin < cval->dBmax || !cval->initialized) { |
17823 |
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c |
17824 |
+index a756f50d9f078..cc75d9749e9fa 100644 |
17825 |
+--- a/sound/usb/quirks.c |
17826 |
++++ b/sound/usb/quirks.c |
17827 |
+@@ -1604,12 +1604,13 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe, |
17828 |
+ && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) |
17829 |
+ msleep(20); |
17830 |
+ |
17831 |
+- /* Zoom R16/24, Logitech H650e, Jabra 550a, Kingston HyperX needs a tiny |
17832 |
+- * delay here, otherwise requests like get/set frequency return as |
17833 |
+- * failed despite actually succeeding. |
17834 |
++ /* Zoom R16/24, Logitech H650e/H570e, Jabra 550a, Kingston HyperX |
17835 |
++ * needs a tiny delay here, otherwise requests like get/set |
17836 |
++ * frequency return as failed despite actually succeeding. |
17837 |
+ */ |
17838 |
+ if ((chip->usb_id == USB_ID(0x1686, 0x00dd) || |
17839 |
+ chip->usb_id == USB_ID(0x046d, 0x0a46) || |
17840 |
++ chip->usb_id == USB_ID(0x046d, 0x0a56) || |
17841 |
+ chip->usb_id == USB_ID(0x0b0e, 0x0349) || |
17842 |
+ chip->usb_id == USB_ID(0x0951, 0x16ad)) && |
17843 |
+ (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) |
17844 |
+diff --git a/tools/gpio/gpio-hammer.c b/tools/gpio/gpio-hammer.c |
17845 |
+index 0e0060a6eb346..083399d276e4e 100644 |
17846 |
+--- a/tools/gpio/gpio-hammer.c |
17847 |
++++ b/tools/gpio/gpio-hammer.c |
17848 |
+@@ -135,7 +135,14 @@ int main(int argc, char **argv) |
17849 |
+ device_name = optarg; |
17850 |
+ break; |
17851 |
+ case 'o': |
17852 |
+- lines[i] = strtoul(optarg, NULL, 10); |
17853 |
++ /* |
17854 |
++ * Avoid overflow. Do not immediately error, we want to |
17855 |
++ * be able to accurately report on the amount of times |
17856 |
++ * '-o' was given to give an accurate error message |
17857 |
++ */ |
17858 |
++ if (i < GPIOHANDLES_MAX) |
17859 |
++ lines[i] = strtoul(optarg, NULL, 10); |
17860 |
++ |
17861 |
+ i++; |
17862 |
+ break; |
17863 |
+ case '?': |
17864 |
+@@ -143,6 +150,14 @@ int main(int argc, char **argv) |
17865 |
+ return -1; |
17866 |
+ } |
17867 |
+ } |
17868 |
++ |
17869 |
++ if (i >= GPIOHANDLES_MAX) { |
17870 |
++ fprintf(stderr, |
17871 |
++ "Only %d occurences of '-o' are allowed, %d were found\n", |
17872 |
++ GPIOHANDLES_MAX, i + 1); |
17873 |
++ return -1; |
17874 |
++ } |
17875 |
++ |
17876 |
+ nlines = i; |
17877 |
+ |
17878 |
+ if (!device_name || !nlines) { |
17879 |
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c |
17880 |
+index 48b234d8f251e..1b7e748170e54 100644 |
17881 |
+--- a/tools/objtool/check.c |
17882 |
++++ b/tools/objtool/check.c |
17883 |
+@@ -556,7 +556,7 @@ static int add_jump_destinations(struct objtool_file *file) |
17884 |
+ insn->type != INSN_JUMP_UNCONDITIONAL) |
17885 |
+ continue; |
17886 |
+ |
17887 |
+- if (insn->ignore || insn->offset == FAKE_JUMP_OFFSET) |
17888 |
++ if (insn->offset == FAKE_JUMP_OFFSET) |
17889 |
+ continue; |
17890 |
+ |
17891 |
+ rela = find_rela_by_dest_range(insn->sec, insn->offset, |
17892 |
+diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c |
17893 |
+index 468fc49420ce1..ac2feddc75fdd 100644 |
17894 |
+--- a/tools/perf/builtin-stat.c |
17895 |
++++ b/tools/perf/builtin-stat.c |
17896 |
+@@ -351,7 +351,7 @@ static void process_interval(void) |
17897 |
+ } |
17898 |
+ |
17899 |
+ init_stats(&walltime_nsecs_stats); |
17900 |
+- update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000); |
17901 |
++ update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000ULL); |
17902 |
+ print_counters(&rs, 0, NULL); |
17903 |
+ } |
17904 |
+ |
17905 |
+diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c |
17906 |
+index d36ae65ae3330..f4a0d72246cb7 100644 |
17907 |
+--- a/tools/perf/pmu-events/jevents.c |
17908 |
++++ b/tools/perf/pmu-events/jevents.c |
17909 |
+@@ -1068,10 +1068,9 @@ static int process_one_file(const char *fpath, const struct stat *sb, |
17910 |
+ */ |
17911 |
+ int main(int argc, char *argv[]) |
17912 |
+ { |
17913 |
+- int rc; |
17914 |
++ int rc, ret = 0; |
17915 |
+ int maxfds; |
17916 |
+ char ldirname[PATH_MAX]; |
17917 |
+- |
17918 |
+ const char *arch; |
17919 |
+ const char *output_file; |
17920 |
+ const char *start_dirname; |
17921 |
+@@ -1142,7 +1141,8 @@ int main(int argc, char *argv[]) |
17922 |
+ /* Make build fail */ |
17923 |
+ fclose(eventsfp); |
17924 |
+ free_arch_std_events(); |
17925 |
+- return 1; |
17926 |
++ ret = 1; |
17927 |
++ goto out_free_mapfile; |
17928 |
+ } else if (rc) { |
17929 |
+ goto empty_map; |
17930 |
+ } |
17931 |
+@@ -1160,14 +1160,17 @@ int main(int argc, char *argv[]) |
17932 |
+ /* Make build fail */ |
17933 |
+ fclose(eventsfp); |
17934 |
+ free_arch_std_events(); |
17935 |
+- return 1; |
17936 |
++ ret = 1; |
17937 |
+ } |
17938 |
+ |
17939 |
+- return 0; |
17940 |
++ |
17941 |
++ goto out_free_mapfile; |
17942 |
+ |
17943 |
+ empty_map: |
17944 |
+ fclose(eventsfp); |
17945 |
+ create_empty_mapping(output_file); |
17946 |
+ free_arch_std_events(); |
17947 |
+- return 0; |
17948 |
++out_free_mapfile: |
17949 |
++ free(mapfile); |
17950 |
++ return ret; |
17951 |
+ } |
17952 |
+diff --git a/tools/perf/tests/shell/lib/probe_vfs_getname.sh b/tools/perf/tests/shell/lib/probe_vfs_getname.sh |
17953 |
+index 7cb99b433888b..c2cc42daf9242 100644 |
17954 |
+--- a/tools/perf/tests/shell/lib/probe_vfs_getname.sh |
17955 |
++++ b/tools/perf/tests/shell/lib/probe_vfs_getname.sh |
17956 |
+@@ -14,7 +14,7 @@ add_probe_vfs_getname() { |
17957 |
+ if [ $had_vfs_getname -eq 1 ] ; then |
17958 |
+ line=$(perf probe -L getname_flags 2>&1 | egrep 'result.*=.*filename;' | sed -r 's/[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*/\1/') |
17959 |
+ perf probe -q "vfs_getname=getname_flags:${line} pathname=result->name:string" || \ |
17960 |
+- perf probe $verbose "vfs_getname=getname_flags:${line} pathname=filename:string" |
17961 |
++ perf probe $verbose "vfs_getname=getname_flags:${line} pathname=filename:ustring" |
17962 |
+ fi |
17963 |
+ } |
17964 |
+ |
17965 |
+diff --git a/tools/perf/tests/shell/record+zstd_comp_decomp.sh b/tools/perf/tests/shell/record+zstd_comp_decomp.sh |
17966 |
+index 63a91ec473bb5..045723b3d9928 100755 |
17967 |
+--- a/tools/perf/tests/shell/record+zstd_comp_decomp.sh |
17968 |
++++ b/tools/perf/tests/shell/record+zstd_comp_decomp.sh |
17969 |
+@@ -12,7 +12,8 @@ skip_if_no_z_record() { |
17970 |
+ |
17971 |
+ collect_z_record() { |
17972 |
+ echo "Collecting compressed record file:" |
17973 |
+- $perf_tool record -o $trace_file -g -z -F 5000 -- \ |
17974 |
++ [[ "$(uname -m)" != s390x ]] && gflag='-g' |
17975 |
++ $perf_tool record -o $trace_file $gflag -z -F 5000 -- \ |
17976 |
+ dd count=500 if=/dev/urandom of=/dev/null |
17977 |
+ } |
17978 |
+ |
17979 |
+diff --git a/tools/perf/trace/beauty/arch_errno_names.sh b/tools/perf/trace/beauty/arch_errno_names.sh |
17980 |
+index 22c9fc900c847..f8c44a85650be 100755 |
17981 |
+--- a/tools/perf/trace/beauty/arch_errno_names.sh |
17982 |
++++ b/tools/perf/trace/beauty/arch_errno_names.sh |
17983 |
+@@ -91,7 +91,7 @@ EoHEADER |
17984 |
+ # in tools/perf/arch |
17985 |
+ archlist="" |
17986 |
+ for arch in $(find $toolsdir/arch -maxdepth 1 -mindepth 1 -type d -printf "%f\n" | grep -v x86 | sort); do |
17987 |
+- test -d arch/$arch && archlist="$archlist $arch" |
17988 |
++ test -d $toolsdir/perf/arch/$arch && archlist="$archlist $arch" |
17989 |
+ done |
17990 |
+ |
17991 |
+ for arch in x86 $archlist generic; do |
17992 |
+diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c |
17993 |
+index a22c1114e880d..324ec0456c83f 100644 |
17994 |
+--- a/tools/perf/util/cpumap.c |
17995 |
++++ b/tools/perf/util/cpumap.c |
17996 |
+@@ -299,7 +299,7 @@ static void set_max_cpu_num(void) |
17997 |
+ |
17998 |
+ /* get the highest possible cpu number for a sparse allocation */ |
17999 |
+ ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/possible", mnt); |
18000 |
+- if (ret == PATH_MAX) { |
18001 |
++ if (ret >= PATH_MAX) { |
18002 |
+ pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX); |
18003 |
+ goto out; |
18004 |
+ } |
18005 |
+@@ -310,7 +310,7 @@ static void set_max_cpu_num(void) |
18006 |
+ |
18007 |
+ /* get the highest present cpu number for a sparse allocation */ |
18008 |
+ ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/present", mnt); |
18009 |
+- if (ret == PATH_MAX) { |
18010 |
++ if (ret >= PATH_MAX) { |
18011 |
+ pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX); |
18012 |
+ goto out; |
18013 |
+ } |
18014 |
+@@ -338,7 +338,7 @@ static void set_max_node_num(void) |
18015 |
+ |
18016 |
+ /* get the highest possible cpu number for a sparse allocation */ |
18017 |
+ ret = snprintf(path, PATH_MAX, "%s/devices/system/node/possible", mnt); |
18018 |
+- if (ret == PATH_MAX) { |
18019 |
++ if (ret >= PATH_MAX) { |
18020 |
+ pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX); |
18021 |
+ goto out; |
18022 |
+ } |
18023 |
+@@ -423,7 +423,7 @@ int cpu__setup_cpunode_map(void) |
18024 |
+ return 0; |
18025 |
+ |
18026 |
+ n = snprintf(path, PATH_MAX, "%s/devices/system/node", mnt); |
18027 |
+- if (n == PATH_MAX) { |
18028 |
++ if (n >= PATH_MAX) { |
18029 |
+ pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX); |
18030 |
+ return -1; |
18031 |
+ } |
18032 |
+@@ -438,7 +438,7 @@ int cpu__setup_cpunode_map(void) |
18033 |
+ continue; |
18034 |
+ |
18035 |
+ n = snprintf(buf, PATH_MAX, "%s/%s", path, dent1->d_name); |
18036 |
+- if (n == PATH_MAX) { |
18037 |
++ if (n >= PATH_MAX) { |
18038 |
+ pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX); |
18039 |
+ continue; |
18040 |
+ } |
18041 |
+diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c |
18042 |
+index f5f855fff412e..451eee24165ee 100644 |
18043 |
+--- a/tools/perf/util/cs-etm.c |
18044 |
++++ b/tools/perf/util/cs-etm.c |
18045 |
+@@ -363,6 +363,23 @@ struct cs_etm_packet_queue |
18046 |
+ return NULL; |
18047 |
+ } |
18048 |
+ |
18049 |
++static void cs_etm__packet_swap(struct cs_etm_auxtrace *etm, |
18050 |
++ struct cs_etm_traceid_queue *tidq) |
18051 |
++{ |
18052 |
++ struct cs_etm_packet *tmp; |
18053 |
++ |
18054 |
++ if (etm->sample_branches || etm->synth_opts.last_branch || |
18055 |
++ etm->sample_instructions) { |
18056 |
++ /* |
18057 |
++ * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for |
18058 |
++ * the next incoming packet. |
18059 |
++ */ |
18060 |
++ tmp = tidq->packet; |
18061 |
++ tidq->packet = tidq->prev_packet; |
18062 |
++ tidq->prev_packet = tmp; |
18063 |
++ } |
18064 |
++} |
18065 |
++ |
18066 |
+ static void cs_etm__packet_dump(const char *pkt_string) |
18067 |
+ { |
18068 |
+ const char *color = PERF_COLOR_BLUE; |
18069 |
+@@ -1340,12 +1357,14 @@ static int cs_etm__sample(struct cs_etm_queue *etmq, |
18070 |
+ struct cs_etm_traceid_queue *tidq) |
18071 |
+ { |
18072 |
+ struct cs_etm_auxtrace *etm = etmq->etm; |
18073 |
+- struct cs_etm_packet *tmp; |
18074 |
+ int ret; |
18075 |
+ u8 trace_chan_id = tidq->trace_chan_id; |
18076 |
+- u64 instrs_executed = tidq->packet->instr_count; |
18077 |
++ u64 instrs_prev; |
18078 |
++ |
18079 |
++ /* Get instructions remainder from previous packet */ |
18080 |
++ instrs_prev = tidq->period_instructions; |
18081 |
+ |
18082 |
+- tidq->period_instructions += instrs_executed; |
18083 |
++ tidq->period_instructions += tidq->packet->instr_count; |
18084 |
+ |
18085 |
+ /* |
18086 |
+ * Record a branch when the last instruction in |
18087 |
+@@ -1363,26 +1382,76 @@ static int cs_etm__sample(struct cs_etm_queue *etmq, |
18088 |
+ * TODO: allow period to be defined in cycles and clock time |
18089 |
+ */ |
18090 |
+ |
18091 |
+- /* Get number of instructions executed after the sample point */ |
18092 |
+- u64 instrs_over = tidq->period_instructions - |
18093 |
+- etm->instructions_sample_period; |
18094 |
++ /* |
18095 |
++ * Below diagram demonstrates the instruction samples |
18096 |
++ * generation flows: |
18097 |
++ * |
18098 |
++ * Instrs Instrs Instrs Instrs |
18099 |
++ * Sample(n) Sample(n+1) Sample(n+2) Sample(n+3) |
18100 |
++ * | | | | |
18101 |
++ * V V V V |
18102 |
++ * -------------------------------------------------- |
18103 |
++ * ^ ^ |
18104 |
++ * | | |
18105 |
++ * Period Period |
18106 |
++ * instructions(Pi) instructions(Pi') |
18107 |
++ * |
18108 |
++ * | | |
18109 |
++ * \---------------- -----------------/ |
18110 |
++ * V |
18111 |
++ * tidq->packet->instr_count |
18112 |
++ * |
18113 |
++ * Instrs Sample(n...) are the synthesised samples occurring |
18114 |
++ * every etm->instructions_sample_period instructions - as |
18115 |
++ * defined on the perf command line. Sample(n) is being the |
18116 |
++ * last sample before the current etm packet, n+1 to n+3 |
18117 |
++ * samples are generated from the current etm packet. |
18118 |
++ * |
18119 |
++ * tidq->packet->instr_count represents the number of |
18120 |
++ * instructions in the current etm packet. |
18121 |
++ * |
18122 |
++ * Period instructions (Pi) contains the the number of |
18123 |
++ * instructions executed after the sample point(n) from the |
18124 |
++ * previous etm packet. This will always be less than |
18125 |
++ * etm->instructions_sample_period. |
18126 |
++ * |
18127 |
++ * When generate new samples, it combines with two parts |
18128 |
++ * instructions, one is the tail of the old packet and another |
18129 |
++ * is the head of the new coming packet, to generate |
18130 |
++ * sample(n+1); sample(n+2) and sample(n+3) consume the |
18131 |
++ * instructions with sample period. After sample(n+3), the rest |
18132 |
++ * instructions will be used by later packet and it is assigned |
18133 |
++ * to tidq->period_instructions for next round calculation. |
18134 |
++ */ |
18135 |
+ |
18136 |
+ /* |
18137 |
+- * Calculate the address of the sampled instruction (-1 as |
18138 |
+- * sample is reported as though instruction has just been |
18139 |
+- * executed, but PC has not advanced to next instruction) |
18140 |
++ * Get the initial offset into the current packet instructions; |
18141 |
++ * entry conditions ensure that instrs_prev is less than |
18142 |
++ * etm->instructions_sample_period. |
18143 |
+ */ |
18144 |
+- u64 offset = (instrs_executed - instrs_over - 1); |
18145 |
+- u64 addr = cs_etm__instr_addr(etmq, trace_chan_id, |
18146 |
+- tidq->packet, offset); |
18147 |
++ u64 offset = etm->instructions_sample_period - instrs_prev; |
18148 |
++ u64 addr; |
18149 |
+ |
18150 |
+- ret = cs_etm__synth_instruction_sample( |
18151 |
+- etmq, tidq, addr, etm->instructions_sample_period); |
18152 |
+- if (ret) |
18153 |
+- return ret; |
18154 |
++ while (tidq->period_instructions >= |
18155 |
++ etm->instructions_sample_period) { |
18156 |
++ /* |
18157 |
++ * Calculate the address of the sampled instruction (-1 |
18158 |
++ * as sample is reported as though instruction has just |
18159 |
++ * been executed, but PC has not advanced to next |
18160 |
++ * instruction) |
18161 |
++ */ |
18162 |
++ addr = cs_etm__instr_addr(etmq, trace_chan_id, |
18163 |
++ tidq->packet, offset - 1); |
18164 |
++ ret = cs_etm__synth_instruction_sample( |
18165 |
++ etmq, tidq, addr, |
18166 |
++ etm->instructions_sample_period); |
18167 |
++ if (ret) |
18168 |
++ return ret; |
18169 |
+ |
18170 |
+- /* Carry remaining instructions into next sample period */ |
18171 |
+- tidq->period_instructions = instrs_over; |
18172 |
++ offset += etm->instructions_sample_period; |
18173 |
++ tidq->period_instructions -= |
18174 |
++ etm->instructions_sample_period; |
18175 |
++ } |
18176 |
+ } |
18177 |
+ |
18178 |
+ if (etm->sample_branches) { |
18179 |
+@@ -1404,15 +1473,7 @@ static int cs_etm__sample(struct cs_etm_queue *etmq, |
18180 |
+ } |
18181 |
+ } |
18182 |
+ |
18183 |
+- if (etm->sample_branches || etm->synth_opts.last_branch) { |
18184 |
+- /* |
18185 |
+- * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for |
18186 |
+- * the next incoming packet. |
18187 |
+- */ |
18188 |
+- tmp = tidq->packet; |
18189 |
+- tidq->packet = tidq->prev_packet; |
18190 |
+- tidq->prev_packet = tmp; |
18191 |
+- } |
18192 |
++ cs_etm__packet_swap(etm, tidq); |
18193 |
+ |
18194 |
+ return 0; |
18195 |
+ } |
18196 |
+@@ -1441,7 +1502,6 @@ static int cs_etm__flush(struct cs_etm_queue *etmq, |
18197 |
+ { |
18198 |
+ int err = 0; |
18199 |
+ struct cs_etm_auxtrace *etm = etmq->etm; |
18200 |
+- struct cs_etm_packet *tmp; |
18201 |
+ |
18202 |
+ /* Handle start tracing packet */ |
18203 |
+ if (tidq->prev_packet->sample_type == CS_ETM_EMPTY) |
18204 |
+@@ -1476,15 +1536,7 @@ static int cs_etm__flush(struct cs_etm_queue *etmq, |
18205 |
+ } |
18206 |
+ |
18207 |
+ swap_packet: |
18208 |
+- if (etm->sample_branches || etm->synth_opts.last_branch) { |
18209 |
+- /* |
18210 |
+- * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for |
18211 |
+- * the next incoming packet. |
18212 |
+- */ |
18213 |
+- tmp = tidq->packet; |
18214 |
+- tidq->packet = tidq->prev_packet; |
18215 |
+- tidq->prev_packet = tmp; |
18216 |
+- } |
18217 |
++ cs_etm__packet_swap(etm, tidq); |
18218 |
+ |
18219 |
+ return err; |
18220 |
+ } |
18221 |
+diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c |
18222 |
+index a844715a352d8..9dd9e3f4ef591 100644 |
18223 |
+--- a/tools/perf/util/evsel.c |
18224 |
++++ b/tools/perf/util/evsel.c |
18225 |
+@@ -1254,6 +1254,9 @@ void perf_evsel__exit(struct evsel *evsel) |
18226 |
+ perf_thread_map__put(evsel->core.threads); |
18227 |
+ zfree(&evsel->group_name); |
18228 |
+ zfree(&evsel->name); |
18229 |
++ zfree(&evsel->pmu_name); |
18230 |
++ zfree(&evsel->per_pkg_mask); |
18231 |
++ zfree(&evsel->metric_events); |
18232 |
+ perf_evsel__object.fini(evsel); |
18233 |
+ } |
18234 |
+ |
18235 |
+@@ -2357,6 +2360,10 @@ bool perf_evsel__fallback(struct evsel *evsel, int err, |
18236 |
+ char *new_name; |
18237 |
+ const char *sep = ":"; |
18238 |
+ |
18239 |
++ /* If event has exclude user then don't exclude kernel. */ |
18240 |
++ if (evsel->core.attr.exclude_user) |
18241 |
++ return false; |
18242 |
++ |
18243 |
+ /* Is there already the separator in the name. */ |
18244 |
+ if (strchr(name, '/') || |
18245 |
+ strchr(name, ':')) |
18246 |
+diff --git a/tools/perf/util/mem2node.c b/tools/perf/util/mem2node.c |
18247 |
+index 797d86a1ab095..c84f5841c7abd 100644 |
18248 |
+--- a/tools/perf/util/mem2node.c |
18249 |
++++ b/tools/perf/util/mem2node.c |
18250 |
+@@ -1,5 +1,6 @@ |
18251 |
+ #include <errno.h> |
18252 |
+ #include <inttypes.h> |
18253 |
++#include <asm/bug.h> |
18254 |
+ #include <linux/bitmap.h> |
18255 |
+ #include <linux/kernel.h> |
18256 |
+ #include <linux/zalloc.h> |
18257 |
+@@ -95,7 +96,7 @@ int mem2node__init(struct mem2node *map, struct perf_env *env) |
18258 |
+ |
18259 |
+ /* Cut unused entries, due to merging. */ |
18260 |
+ tmp_entries = realloc(entries, sizeof(*entries) * j); |
18261 |
+- if (tmp_entries) |
18262 |
++ if (tmp_entries || WARN_ON_ONCE(j == 0)) |
18263 |
+ entries = tmp_entries; |
18264 |
+ |
18265 |
+ for (i = 0; i < j; i++) { |
18266 |
+diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c |
18267 |
+index 940a6e7a68549..7753c3091478a 100644 |
18268 |
+--- a/tools/perf/util/metricgroup.c |
18269 |
++++ b/tools/perf/util/metricgroup.c |
18270 |
+@@ -174,6 +174,7 @@ static int metricgroup__setup_events(struct list_head *groups, |
18271 |
+ if (!evsel) { |
18272 |
+ pr_debug("Cannot resolve %s: %s\n", |
18273 |
+ eg->metric_name, eg->metric_expr); |
18274 |
++ free(metric_events); |
18275 |
+ continue; |
18276 |
+ } |
18277 |
+ for (i = 0; i < eg->idnum; i++) |
18278 |
+@@ -181,11 +182,13 @@ static int metricgroup__setup_events(struct list_head *groups, |
18279 |
+ me = metricgroup__lookup(metric_events_list, evsel, true); |
18280 |
+ if (!me) { |
18281 |
+ ret = -ENOMEM; |
18282 |
++ free(metric_events); |
18283 |
+ break; |
18284 |
+ } |
18285 |
+ expr = malloc(sizeof(struct metric_expr)); |
18286 |
+ if (!expr) { |
18287 |
+ ret = -ENOMEM; |
18288 |
++ free(metric_events); |
18289 |
+ break; |
18290 |
+ } |
18291 |
+ expr->metric_expr = eg->metric_expr; |
18292 |
+diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c |
18293 |
+index 759a99f723fc3..2d651c93b476f 100644 |
18294 |
+--- a/tools/perf/util/parse-events.c |
18295 |
++++ b/tools/perf/util/parse-events.c |
18296 |
+@@ -1344,7 +1344,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state, |
18297 |
+ evsel = __add_event(list, &parse_state->idx, &attr, NULL, pmu, NULL, |
18298 |
+ auto_merge_stats, NULL); |
18299 |
+ if (evsel) { |
18300 |
+- evsel->pmu_name = name; |
18301 |
++ evsel->pmu_name = name ? strdup(name) : NULL; |
18302 |
+ evsel->use_uncore_alias = use_uncore_alias; |
18303 |
+ return 0; |
18304 |
+ } else { |
18305 |
+@@ -1385,7 +1385,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state, |
18306 |
+ evsel->snapshot = info.snapshot; |
18307 |
+ evsel->metric_expr = info.metric_expr; |
18308 |
+ evsel->metric_name = info.metric_name; |
18309 |
+- evsel->pmu_name = name; |
18310 |
++ evsel->pmu_name = name ? strdup(name) : NULL; |
18311 |
+ evsel->use_uncore_alias = use_uncore_alias; |
18312 |
+ evsel->percore = config_term_percore(&evsel->config_terms); |
18313 |
+ } |
18314 |
+@@ -1505,12 +1505,11 @@ parse_events__set_leader_for_uncore_aliase(char *name, struct list_head *list, |
18315 |
+ * event. That can be used to distinguish the leader from |
18316 |
+ * other members, even they have the same event name. |
18317 |
+ */ |
18318 |
+- if ((leader != evsel) && (leader->pmu_name == evsel->pmu_name)) { |
18319 |
++ if ((leader != evsel) && |
18320 |
++ !strcmp(leader->pmu_name, evsel->pmu_name)) { |
18321 |
+ is_leader = false; |
18322 |
+ continue; |
18323 |
+ } |
18324 |
+- /* The name is always alias name */ |
18325 |
+- WARN_ON(strcmp(leader->name, evsel->name)); |
18326 |
+ |
18327 |
+ /* Store the leader event for each PMU */ |
18328 |
+ leaders[nr_pmu++] = (uintptr_t) evsel; |
18329 |
+diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c |
18330 |
+index 43d1d410854a3..4027906fd3e38 100644 |
18331 |
+--- a/tools/perf/util/sort.c |
18332 |
++++ b/tools/perf/util/sort.c |
18333 |
+@@ -2788,7 +2788,7 @@ static char *prefix_if_not_in(const char *pre, char *str) |
18334 |
+ return str; |
18335 |
+ |
18336 |
+ if (asprintf(&n, "%s,%s", pre, str) < 0) |
18337 |
+- return NULL; |
18338 |
++ n = NULL; |
18339 |
+ |
18340 |
+ free(str); |
18341 |
+ return n; |
18342 |
+diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c |
18343 |
+index 66f4be1df573e..2ec0a32da5793 100644 |
18344 |
+--- a/tools/perf/util/symbol-elf.c |
18345 |
++++ b/tools/perf/util/symbol-elf.c |
18346 |
+@@ -1449,6 +1449,7 @@ struct kcore_copy_info { |
18347 |
+ u64 first_symbol; |
18348 |
+ u64 last_symbol; |
18349 |
+ u64 first_module; |
18350 |
++ u64 first_module_symbol; |
18351 |
+ u64 last_module_symbol; |
18352 |
+ size_t phnum; |
18353 |
+ struct list_head phdrs; |
18354 |
+@@ -1525,6 +1526,8 @@ static int kcore_copy__process_kallsyms(void *arg, const char *name, char type, |
18355 |
+ return 0; |
18356 |
+ |
18357 |
+ if (strchr(name, '[')) { |
18358 |
++ if (!kci->first_module_symbol || start < kci->first_module_symbol) |
18359 |
++ kci->first_module_symbol = start; |
18360 |
+ if (start > kci->last_module_symbol) |
18361 |
+ kci->last_module_symbol = start; |
18362 |
+ return 0; |
18363 |
+@@ -1722,6 +1725,10 @@ static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir, |
18364 |
+ kci->etext += page_size; |
18365 |
+ } |
18366 |
+ |
18367 |
++ if (kci->first_module_symbol && |
18368 |
++ (!kci->first_module || kci->first_module_symbol < kci->first_module)) |
18369 |
++ kci->first_module = kci->first_module_symbol; |
18370 |
++ |
18371 |
+ kci->first_module = round_down(kci->first_module, page_size); |
18372 |
+ |
18373 |
+ if (kci->last_module_symbol) { |
18374 |
+diff --git a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py |
18375 |
+index 2d6d342b148f1..1351975d07699 100755 |
18376 |
+--- a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py |
18377 |
++++ b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py |
18378 |
+@@ -11,11 +11,11 @@ then this utility enables and collects trace data for a user specified interval |
18379 |
+ and generates performance plots. |
18380 |
+ |
18381 |
+ Prerequisites: |
18382 |
+- Python version 2.7.x |
18383 |
++ Python version 2.7.x or higher |
18384 |
+ gnuplot 5.0 or higher |
18385 |
+- gnuplot-py 1.8 |
18386 |
++ gnuplot-py 1.8 or higher |
18387 |
+ (Most of the distributions have these required packages. They may be called |
18388 |
+- gnuplot-py, phython-gnuplot. ) |
18389 |
++ gnuplot-py, phython-gnuplot or phython3-gnuplot, gnuplot-nox, ... ) |
18390 |
+ |
18391 |
+ HWP (Hardware P-States are disabled) |
18392 |
+ Kernel config for Linux trace is enabled |
18393 |
+@@ -181,7 +181,7 @@ def plot_pstate_cpu_with_sample(): |
18394 |
+ g_plot('set xlabel "Samples"') |
18395 |
+ g_plot('set ylabel "P-State"') |
18396 |
+ g_plot('set title "{} : cpu pstate vs. sample : {:%F %H:%M}"'.format(testname, datetime.now())) |
18397 |
+- title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ') |
18398 |
++ title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ') |
18399 |
+ plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_SAMPLE, C_TO) |
18400 |
+ g_plot('title_list = "{}"'.format(title_list)) |
18401 |
+ g_plot(plot_str) |
18402 |
+@@ -198,7 +198,7 @@ def plot_pstate_cpu(): |
18403 |
+ # the following command is really cool, but doesn't work with the CPU masking option because it aborts on the first missing file. |
18404 |
+ # plot_str = 'plot for [i=0:*] file=sprintf("cpu%03d.csv",i) title_s=sprintf("cpu%03d",i) file using 16:7 pt 7 ps 1 title title_s' |
18405 |
+ # |
18406 |
+- title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ') |
18407 |
++ title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ') |
18408 |
+ plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_TO) |
18409 |
+ g_plot('title_list = "{}"'.format(title_list)) |
18410 |
+ g_plot(plot_str) |
18411 |
+@@ -212,7 +212,7 @@ def plot_load_cpu(): |
18412 |
+ g_plot('set ylabel "CPU load (percent)"') |
18413 |
+ g_plot('set title "{} : cpu loads : {:%F %H:%M}"'.format(testname, datetime.now())) |
18414 |
+ |
18415 |
+- title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ') |
18416 |
++ title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ') |
18417 |
+ plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_LOAD) |
18418 |
+ g_plot('title_list = "{}"'.format(title_list)) |
18419 |
+ g_plot(plot_str) |
18420 |
+@@ -226,7 +226,7 @@ def plot_frequency_cpu(): |
18421 |
+ g_plot('set ylabel "CPU Frequency (GHz)"') |
18422 |
+ g_plot('set title "{} : cpu frequencies : {:%F %H:%M}"'.format(testname, datetime.now())) |
18423 |
+ |
18424 |
+- title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ') |
18425 |
++ title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ') |
18426 |
+ plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_FREQ) |
18427 |
+ g_plot('title_list = "{}"'.format(title_list)) |
18428 |
+ g_plot(plot_str) |
18429 |
+@@ -241,7 +241,7 @@ def plot_duration_cpu(): |
18430 |
+ g_plot('set ylabel "Timer Duration (MilliSeconds)"') |
18431 |
+ g_plot('set title "{} : cpu durations : {:%F %H:%M}"'.format(testname, datetime.now())) |
18432 |
+ |
18433 |
+- title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ') |
18434 |
++ title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ') |
18435 |
+ plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_DURATION) |
18436 |
+ g_plot('title_list = "{}"'.format(title_list)) |
18437 |
+ g_plot(plot_str) |
18438 |
+@@ -255,7 +255,7 @@ def plot_scaled_cpu(): |
18439 |
+ g_plot('set ylabel "Scaled Busy (Unitless)"') |
18440 |
+ g_plot('set title "{} : cpu scaled busy : {:%F %H:%M}"'.format(testname, datetime.now())) |
18441 |
+ |
18442 |
+- title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ') |
18443 |
++ title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ') |
18444 |
+ plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_SCALED) |
18445 |
+ g_plot('title_list = "{}"'.format(title_list)) |
18446 |
+ g_plot(plot_str) |
18447 |
+@@ -269,7 +269,7 @@ def plot_boost_cpu(): |
18448 |
+ g_plot('set ylabel "CPU IO Boost (percent)"') |
18449 |
+ g_plot('set title "{} : cpu io boost : {:%F %H:%M}"'.format(testname, datetime.now())) |
18450 |
+ |
18451 |
+- title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ') |
18452 |
++ title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ') |
18453 |
+ plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_BOOST) |
18454 |
+ g_plot('title_list = "{}"'.format(title_list)) |
18455 |
+ g_plot(plot_str) |
18456 |
+@@ -283,7 +283,7 @@ def plot_ghz_cpu(): |
18457 |
+ g_plot('set ylabel "TSC Frequency (GHz)"') |
18458 |
+ g_plot('set title "{} : cpu TSC Frequencies (Sanity check calculation) : {:%F %H:%M}"'.format(testname, datetime.now())) |
18459 |
+ |
18460 |
+- title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ') |
18461 |
++ title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ') |
18462 |
+ plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_GHZ) |
18463 |
+ g_plot('title_list = "{}"'.format(title_list)) |
18464 |
+ g_plot(plot_str) |
18465 |
+diff --git a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c |
18466 |
+index 2e233613d1fc0..7fa4595d2b66b 100644 |
18467 |
+--- a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c |
18468 |
++++ b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c |
18469 |
+@@ -131,6 +131,7 @@ int bpf_testcb(struct bpf_sock_ops *skops) |
18470 |
+ g.bytes_received = skops->bytes_received; |
18471 |
+ g.bytes_acked = skops->bytes_acked; |
18472 |
+ } |
18473 |
++ g.num_close_events++; |
18474 |
+ bpf_map_update_elem(&global_map, &key, &g, |
18475 |
+ BPF_ANY); |
18476 |
+ } |
18477 |
+diff --git a/tools/testing/selftests/bpf/test_tcpbpf.h b/tools/testing/selftests/bpf/test_tcpbpf.h |
18478 |
+index 7bcfa62070056..6220b95cbd02c 100644 |
18479 |
+--- a/tools/testing/selftests/bpf/test_tcpbpf.h |
18480 |
++++ b/tools/testing/selftests/bpf/test_tcpbpf.h |
18481 |
+@@ -13,5 +13,6 @@ struct tcpbpf_globals { |
18482 |
+ __u64 bytes_received; |
18483 |
+ __u64 bytes_acked; |
18484 |
+ __u32 num_listen; |
18485 |
++ __u32 num_close_events; |
18486 |
+ }; |
18487 |
+ #endif |
18488 |
+diff --git a/tools/testing/selftests/bpf/test_tcpbpf_user.c b/tools/testing/selftests/bpf/test_tcpbpf_user.c |
18489 |
+index 716b4e3be5813..3ae127620463d 100644 |
18490 |
+--- a/tools/testing/selftests/bpf/test_tcpbpf_user.c |
18491 |
++++ b/tools/testing/selftests/bpf/test_tcpbpf_user.c |
18492 |
+@@ -16,6 +16,9 @@ |
18493 |
+ |
18494 |
+ #include "test_tcpbpf.h" |
18495 |
+ |
18496 |
++/* 3 comes from one listening socket + both ends of the connection */ |
18497 |
++#define EXPECTED_CLOSE_EVENTS 3 |
18498 |
++ |
18499 |
+ #define EXPECT_EQ(expected, actual, fmt) \ |
18500 |
+ do { \ |
18501 |
+ if ((expected) != (actual)) { \ |
18502 |
+@@ -23,13 +26,14 @@ |
18503 |
+ " Actual: %" fmt "\n" \ |
18504 |
+ " Expected: %" fmt "\n", \ |
18505 |
+ (actual), (expected)); \ |
18506 |
+- goto err; \ |
18507 |
++ ret--; \ |
18508 |
+ } \ |
18509 |
+ } while (0) |
18510 |
+ |
18511 |
+ int verify_result(const struct tcpbpf_globals *result) |
18512 |
+ { |
18513 |
+ __u32 expected_events; |
18514 |
++ int ret = 0; |
18515 |
+ |
18516 |
+ expected_events = ((1 << BPF_SOCK_OPS_TIMEOUT_INIT) | |
18517 |
+ (1 << BPF_SOCK_OPS_RWND_INIT) | |
18518 |
+@@ -48,15 +52,15 @@ int verify_result(const struct tcpbpf_globals *result) |
18519 |
+ EXPECT_EQ(0x80, result->bad_cb_test_rv, PRIu32); |
18520 |
+ EXPECT_EQ(0, result->good_cb_test_rv, PRIu32); |
18521 |
+ EXPECT_EQ(1, result->num_listen, PRIu32); |
18522 |
++ EXPECT_EQ(EXPECTED_CLOSE_EVENTS, result->num_close_events, PRIu32); |
18523 |
+ |
18524 |
+- return 0; |
18525 |
+-err: |
18526 |
+- return -1; |
18527 |
++ return ret; |
18528 |
+ } |
18529 |
+ |
18530 |
+ int verify_sockopt_result(int sock_map_fd) |
18531 |
+ { |
18532 |
+ __u32 key = 0; |
18533 |
++ int ret = 0; |
18534 |
+ int res; |
18535 |
+ int rv; |
18536 |
+ |
18537 |
+@@ -69,9 +73,7 @@ int verify_sockopt_result(int sock_map_fd) |
18538 |
+ rv = bpf_map_lookup_elem(sock_map_fd, &key, &res); |
18539 |
+ EXPECT_EQ(0, rv, "d"); |
18540 |
+ EXPECT_EQ(1, res, "d"); |
18541 |
+- return 0; |
18542 |
+-err: |
18543 |
+- return -1; |
18544 |
++ return ret; |
18545 |
+ } |
18546 |
+ |
18547 |
+ static int bpf_find_map(const char *test, struct bpf_object *obj, |
18548 |
+@@ -96,6 +98,7 @@ int main(int argc, char **argv) |
18549 |
+ int error = EXIT_FAILURE; |
18550 |
+ struct bpf_object *obj; |
18551 |
+ int cg_fd = -1; |
18552 |
++ int retry = 10; |
18553 |
+ __u32 key = 0; |
18554 |
+ int rv; |
18555 |
+ |
18556 |
+@@ -134,12 +137,20 @@ int main(int argc, char **argv) |
18557 |
+ if (sock_map_fd < 0) |
18558 |
+ goto err; |
18559 |
+ |
18560 |
++retry_lookup: |
18561 |
+ rv = bpf_map_lookup_elem(map_fd, &key, &g); |
18562 |
+ if (rv != 0) { |
18563 |
+ printf("FAILED: bpf_map_lookup_elem returns %d\n", rv); |
18564 |
+ goto err; |
18565 |
+ } |
18566 |
+ |
18567 |
++ if (g.num_close_events != EXPECTED_CLOSE_EVENTS && retry--) { |
18568 |
++ printf("Unexpected number of close events (%d), retrying!\n", |
18569 |
++ g.num_close_events); |
18570 |
++ usleep(100); |
18571 |
++ goto retry_lookup; |
18572 |
++ } |
18573 |
++ |
18574 |
+ if (verify_result(&g)) { |
18575 |
+ printf("FAILED: Wrong stats\n"); |
18576 |
+ goto err; |
18577 |
+diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc |
18578 |
+index 27a54a17da65d..f4e92afab14b2 100644 |
18579 |
+--- a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc |
18580 |
++++ b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc |
18581 |
+@@ -30,7 +30,7 @@ ftrace_filter_check '*schedule*' '^.*schedule.*$' |
18582 |
+ ftrace_filter_check 'schedule*' '^schedule.*$' |
18583 |
+ |
18584 |
+ # filter by *mid*end |
18585 |
+-ftrace_filter_check '*aw*lock' '.*aw.*lock$' |
18586 |
++ftrace_filter_check '*pin*lock' '.*pin.*lock$' |
18587 |
+ |
18588 |
+ # filter by start*mid* |
18589 |
+ ftrace_filter_check 'mutex*try*' '^mutex.*try.*' |
18590 |
+diff --git a/tools/testing/selftests/ptrace/Makefile b/tools/testing/selftests/ptrace/Makefile |
18591 |
+index c0b7f89f09300..2f1f532c39dbc 100644 |
18592 |
+--- a/tools/testing/selftests/ptrace/Makefile |
18593 |
++++ b/tools/testing/selftests/ptrace/Makefile |
18594 |
+@@ -1,6 +1,6 @@ |
18595 |
+ # SPDX-License-Identifier: GPL-2.0-only |
18596 |
+-CFLAGS += -iquote../../../../include/uapi -Wall |
18597 |
++CFLAGS += -std=c99 -pthread -iquote../../../../include/uapi -Wall |
18598 |
+ |
18599 |
+-TEST_GEN_PROGS := get_syscall_info peeksiginfo |
18600 |
++TEST_GEN_PROGS := get_syscall_info peeksiginfo vmaccess |
18601 |
+ |
18602 |
+ include ../lib.mk |
18603 |
+diff --git a/tools/testing/selftests/ptrace/vmaccess.c b/tools/testing/selftests/ptrace/vmaccess.c |
18604 |
+new file mode 100644 |
18605 |
+index 0000000000000..4db327b445862 |
18606 |
+--- /dev/null |
18607 |
++++ b/tools/testing/selftests/ptrace/vmaccess.c |
18608 |
+@@ -0,0 +1,86 @@ |
18609 |
++// SPDX-License-Identifier: GPL-2.0+ |
18610 |
++/* |
18611 |
++ * Copyright (c) 2020 Bernd Edlinger <bernd.edlinger@×××××××.de> |
18612 |
++ * All rights reserved. |
18613 |
++ * |
18614 |
++ * Check whether /proc/$pid/mem can be accessed without causing deadlocks |
18615 |
++ * when de_thread is blocked with ->cred_guard_mutex held. |
18616 |
++ */ |
18617 |
++ |
18618 |
++#include "../kselftest_harness.h" |
18619 |
++#include <stdio.h> |
18620 |
++#include <fcntl.h> |
18621 |
++#include <pthread.h> |
18622 |
++#include <signal.h> |
18623 |
++#include <unistd.h> |
18624 |
++#include <sys/ptrace.h> |
18625 |
++ |
18626 |
++static void *thread(void *arg) |
18627 |
++{ |
18628 |
++ ptrace(PTRACE_TRACEME, 0, 0L, 0L); |
18629 |
++ return NULL; |
18630 |
++} |
18631 |
++ |
18632 |
++TEST(vmaccess) |
18633 |
++{ |
18634 |
++ int f, pid = fork(); |
18635 |
++ char mm[64]; |
18636 |
++ |
18637 |
++ if (!pid) { |
18638 |
++ pthread_t pt; |
18639 |
++ |
18640 |
++ pthread_create(&pt, NULL, thread, NULL); |
18641 |
++ pthread_join(pt, NULL); |
18642 |
++ execlp("true", "true", NULL); |
18643 |
++ } |
18644 |
++ |
18645 |
++ sleep(1); |
18646 |
++ sprintf(mm, "/proc/%d/mem", pid); |
18647 |
++ f = open(mm, O_RDONLY); |
18648 |
++ ASSERT_GE(f, 0); |
18649 |
++ close(f); |
18650 |
++ f = kill(pid, SIGCONT); |
18651 |
++ ASSERT_EQ(f, 0); |
18652 |
++} |
18653 |
++ |
18654 |
++TEST(attach) |
18655 |
++{ |
18656 |
++ int s, k, pid = fork(); |
18657 |
++ |
18658 |
++ if (!pid) { |
18659 |
++ pthread_t pt; |
18660 |
++ |
18661 |
++ pthread_create(&pt, NULL, thread, NULL); |
18662 |
++ pthread_join(pt, NULL); |
18663 |
++ execlp("sleep", "sleep", "2", NULL); |
18664 |
++ } |
18665 |
++ |
18666 |
++ sleep(1); |
18667 |
++ k = ptrace(PTRACE_ATTACH, pid, 0L, 0L); |
18668 |
++ ASSERT_EQ(errno, EAGAIN); |
18669 |
++ ASSERT_EQ(k, -1); |
18670 |
++ k = waitpid(-1, &s, WNOHANG); |
18671 |
++ ASSERT_NE(k, -1); |
18672 |
++ ASSERT_NE(k, 0); |
18673 |
++ ASSERT_NE(k, pid); |
18674 |
++ ASSERT_EQ(WIFEXITED(s), 1); |
18675 |
++ ASSERT_EQ(WEXITSTATUS(s), 0); |
18676 |
++ sleep(1); |
18677 |
++ k = ptrace(PTRACE_ATTACH, pid, 0L, 0L); |
18678 |
++ ASSERT_EQ(k, 0); |
18679 |
++ k = waitpid(-1, &s, 0); |
18680 |
++ ASSERT_EQ(k, pid); |
18681 |
++ ASSERT_EQ(WIFSTOPPED(s), 1); |
18682 |
++ ASSERT_EQ(WSTOPSIG(s), SIGSTOP); |
18683 |
++ k = ptrace(PTRACE_DETACH, pid, 0L, 0L); |
18684 |
++ ASSERT_EQ(k, 0); |
18685 |
++ k = waitpid(-1, &s, 0); |
18686 |
++ ASSERT_EQ(k, pid); |
18687 |
++ ASSERT_EQ(WIFEXITED(s), 1); |
18688 |
++ ASSERT_EQ(WEXITSTATUS(s), 0); |
18689 |
++ k = waitpid(-1, NULL, 0); |
18690 |
++ ASSERT_EQ(k, -1); |
18691 |
++ ASSERT_EQ(errno, ECHILD); |
18692 |
++} |
18693 |
++ |
18694 |
++TEST_HARNESS_MAIN |
18695 |
+diff --git a/tools/testing/selftests/x86/syscall_nt.c b/tools/testing/selftests/x86/syscall_nt.c |
18696 |
+index 02309a1950413..a765f62ee7668 100644 |
18697 |
+--- a/tools/testing/selftests/x86/syscall_nt.c |
18698 |
++++ b/tools/testing/selftests/x86/syscall_nt.c |
18699 |
+@@ -59,6 +59,7 @@ static void do_it(unsigned long extraflags) |
18700 |
+ set_eflags(get_eflags() | extraflags); |
18701 |
+ syscall(SYS_getpid); |
18702 |
+ flags = get_eflags(); |
18703 |
++ set_eflags(X86_EFLAGS_IF | X86_EFLAGS_FIXED); |
18704 |
+ if ((flags & extraflags) == extraflags) { |
18705 |
+ printf("[OK]\tThe syscall worked and flags are still set\n"); |
18706 |
+ } else { |
18707 |
+diff --git a/virt/kvm/arm/mmio.c b/virt/kvm/arm/mmio.c |
18708 |
+index f274fabb4301f..1e9ec878d56d8 100644 |
18709 |
+--- a/virt/kvm/arm/mmio.c |
18710 |
++++ b/virt/kvm/arm/mmio.c |
18711 |
+@@ -130,7 +130,7 @@ static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len) |
18712 |
+ bool sign_extend; |
18713 |
+ bool sixty_four; |
18714 |
+ |
18715 |
+- if (kvm_vcpu_dabt_iss1tw(vcpu)) { |
18716 |
++ if (kvm_vcpu_abt_iss1tw(vcpu)) { |
18717 |
+ /* page table accesses IO mem: tell guest to fix its TTBR */ |
18718 |
+ kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); |
18719 |
+ return 1; |
18720 |
+diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c |
18721 |
+index 1e1c4f6a85c7d..8700402f3000d 100644 |
18722 |
+--- a/virt/kvm/arm/mmu.c |
18723 |
++++ b/virt/kvm/arm/mmu.c |
18724 |
+@@ -1690,7 +1690,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, |
18725 |
+ unsigned long vma_pagesize, flags = 0; |
18726 |
+ |
18727 |
+ write_fault = kvm_is_write_fault(vcpu); |
18728 |
+- exec_fault = kvm_vcpu_trap_is_iabt(vcpu); |
18729 |
++ exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu); |
18730 |
+ VM_BUG_ON(write_fault && exec_fault); |
18731 |
+ |
18732 |
+ if (fault_status == FSC_PERM && !write_fault && !exec_fault) { |
18733 |
+diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c |
18734 |
+index 6f50c429196de..6899101538890 100644 |
18735 |
+--- a/virt/kvm/arm/vgic/vgic-init.c |
18736 |
++++ b/virt/kvm/arm/vgic/vgic-init.c |
18737 |
+@@ -177,6 +177,7 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis) |
18738 |
+ break; |
18739 |
+ default: |
18740 |
+ kfree(dist->spis); |
18741 |
++ dist->spis = NULL; |
18742 |
+ return -EINVAL; |
18743 |
+ } |
18744 |
+ } |
18745 |
+@@ -357,6 +358,12 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) |
18746 |
+ { |
18747 |
+ struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
18748 |
+ |
18749 |
++ /* |
18750 |
++ * Retire all pending LPIs on this vcpu anyway as we're |
18751 |
++ * going to destroy it. |
18752 |
++ */ |
18753 |
++ vgic_flush_pending_lpis(vcpu); |
18754 |
++ |
18755 |
+ INIT_LIST_HEAD(&vgic_cpu->ap_list_head); |
18756 |
+ } |
18757 |
+ |
18758 |
+@@ -368,10 +375,10 @@ static void __kvm_vgic_destroy(struct kvm *kvm) |
18759 |
+ |
18760 |
+ vgic_debug_destroy(kvm); |
18761 |
+ |
18762 |
+- kvm_vgic_dist_destroy(kvm); |
18763 |
+- |
18764 |
+ kvm_for_each_vcpu(i, vcpu, kvm) |
18765 |
+ kvm_vgic_vcpu_destroy(vcpu); |
18766 |
++ |
18767 |
++ kvm_vgic_dist_destroy(kvm); |
18768 |
+ } |
18769 |
+ |
18770 |
+ void kvm_vgic_destroy(struct kvm *kvm) |
18771 |
+diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c |
18772 |
+index f8ad7096555d7..35be0e2a46393 100644 |
18773 |
+--- a/virt/kvm/arm/vgic/vgic-its.c |
18774 |
++++ b/virt/kvm/arm/vgic/vgic-its.c |
18775 |
+@@ -96,14 +96,21 @@ out_unlock: |
18776 |
+ * We "cache" the configuration table entries in our struct vgic_irq's. |
18777 |
+ * However we only have those structs for mapped IRQs, so we read in |
18778 |
+ * the respective config data from memory here upon mapping the LPI. |
18779 |
++ * |
18780 |
++ * Should any of these fail, behave as if we couldn't create the LPI |
18781 |
++ * by dropping the refcount and returning the error. |
18782 |
+ */ |
18783 |
+ ret = update_lpi_config(kvm, irq, NULL, false); |
18784 |
+- if (ret) |
18785 |
++ if (ret) { |
18786 |
++ vgic_put_irq(kvm, irq); |
18787 |
+ return ERR_PTR(ret); |
18788 |
++ } |
18789 |
+ |
18790 |
+ ret = vgic_v3_lpi_sync_pending_status(kvm, irq); |
18791 |
+- if (ret) |
18792 |
++ if (ret) { |
18793 |
++ vgic_put_irq(kvm, irq); |
18794 |
+ return ERR_PTR(ret); |
18795 |
++ } |
18796 |
+ |
18797 |
+ return irq; |
18798 |
+ } |
18799 |
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c |
18800 |
+index 4eed7fd8db939..1dfa49d26de91 100644 |
18801 |
+--- a/virt/kvm/kvm_main.c |
18802 |
++++ b/virt/kvm/kvm_main.c |
18803 |
+@@ -185,6 +185,7 @@ bool kvm_is_reserved_pfn(kvm_pfn_t pfn) |
18804 |
+ */ |
18805 |
+ if (pfn_valid(pfn)) |
18806 |
+ return PageReserved(pfn_to_page(pfn)) && |
18807 |
++ !is_zero_pfn(pfn) && |
18808 |
+ !kvm_is_zone_device_pfn(pfn); |
18809 |
+ |
18810 |
+ return true; |