Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.2 commit in: /
Date: Fri, 16 Aug 2019 12:16:13
Message-Id: 1565957748.cb6b2cec3829ccc37b3cad4e77fe06905bd73489.mpagano@gentoo
1 commit: cb6b2cec3829ccc37b3cad4e77fe06905bd73489
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Aug 16 12:15:48 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri Aug 16 12:15:48 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=cb6b2cec
7
8 Linux patch 5.2.9
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1008_linux-5.2.9.patch | 4835 ++++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 4839 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 6e8d29d..4179af7 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -75,6 +75,10 @@ Patch: 1007_linux-5.2.8.patch
21 From: https://www.kernel.org
22 Desc: Linux 5.2.8
23
24 +Patch: 1008_linux-5.2.9.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 5.2.9
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1008_linux-5.2.9.patch b/1008_linux-5.2.9.patch
33 new file mode 100644
34 index 0000000..682b8db
35 --- /dev/null
36 +++ b/1008_linux-5.2.9.patch
37 @@ -0,0 +1,4835 @@
38 +diff --git a/Makefile b/Makefile
39 +index bad87c4c8117..cfc667fe9959 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 2
46 +-SUBLEVEL = 8
47 ++SUBLEVEL = 9
48 + EXTRAVERSION =
49 + NAME = Bobtail Squid
50 +
51 +diff --git a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
52 +index 5fd47eec4407..1679959a3654 100644
53 +--- a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
54 ++++ b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
55 +@@ -126,6 +126,9 @@
56 + };
57 +
58 + mdio-bus-mux {
59 ++ #address-cells = <1>;
60 ++ #size-cells = <0>;
61 ++
62 + /* BIT(9) = 1 => external mdio */
63 + mdio_ext: mdio@200 {
64 + reg = <0x200>;
65 +diff --git a/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi b/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi
66 +index 9207d5d071f1..d556f7c541ce 100644
67 +--- a/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi
68 ++++ b/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi
69 +@@ -112,7 +112,7 @@
70 + };
71 +
72 + &i2c2 {
73 +- clock_frequency = <100000>;
74 ++ clock-frequency = <100000>;
75 + pinctrl-names = "default";
76 + pinctrl-0 = <&pinctrl_i2c2>;
77 + status = "okay";
78 +diff --git a/arch/arm/boot/dts/imx6ul-geam.dts b/arch/arm/boot/dts/imx6ul-geam.dts
79 +index bc77f26a2f1d..6157a058feec 100644
80 +--- a/arch/arm/boot/dts/imx6ul-geam.dts
81 ++++ b/arch/arm/boot/dts/imx6ul-geam.dts
82 +@@ -156,7 +156,7 @@
83 + };
84 +
85 + &i2c2 {
86 +- clock_frequency = <100000>;
87 ++ clock-frequency = <100000>;
88 + pinctrl-names = "default";
89 + pinctrl-0 = <&pinctrl_i2c2>;
90 + status = "okay";
91 +diff --git a/arch/arm/boot/dts/imx6ul-isiot.dtsi b/arch/arm/boot/dts/imx6ul-isiot.dtsi
92 +index 213e802bf35c..23e6e2e7ace9 100644
93 +--- a/arch/arm/boot/dts/imx6ul-isiot.dtsi
94 ++++ b/arch/arm/boot/dts/imx6ul-isiot.dtsi
95 +@@ -148,7 +148,7 @@
96 + };
97 +
98 + &i2c2 {
99 +- clock_frequency = <100000>;
100 ++ clock-frequency = <100000>;
101 + pinctrl-names = "default";
102 + pinctrl-0 = <&pinctrl_i2c2>;
103 + status = "okay";
104 +diff --git a/arch/arm/boot/dts/imx6ul-pico-hobbit.dts b/arch/arm/boot/dts/imx6ul-pico-hobbit.dts
105 +index 39eeeddac39e..09f7ffa9ad8c 100644
106 +--- a/arch/arm/boot/dts/imx6ul-pico-hobbit.dts
107 ++++ b/arch/arm/boot/dts/imx6ul-pico-hobbit.dts
108 +@@ -43,7 +43,7 @@
109 + };
110 +
111 + &i2c2 {
112 +- clock_frequency = <100000>;
113 ++ clock-frequency = <100000>;
114 + pinctrl-names = "default";
115 + pinctrl-0 = <&pinctrl_i2c2>;
116 + status = "okay";
117 +diff --git a/arch/arm/boot/dts/imx6ul-pico-pi.dts b/arch/arm/boot/dts/imx6ul-pico-pi.dts
118 +index de07357b27fc..6cd7d5877d20 100644
119 +--- a/arch/arm/boot/dts/imx6ul-pico-pi.dts
120 ++++ b/arch/arm/boot/dts/imx6ul-pico-pi.dts
121 +@@ -43,7 +43,7 @@
122 + };
123 +
124 + &i2c2 {
125 +- clock_frequency = <100000>;
126 ++ clock-frequency = <100000>;
127 + pinctrl-names = "default";
128 + pinctrl-0 = <&pinctrl_i2c2>;
129 + status = "okay";
130 +@@ -58,7 +58,7 @@
131 + };
132 +
133 + &i2c3 {
134 +- clock_frequency = <100000>;
135 ++ clock-frequency = <100000>;
136 + pinctrl-names = "default";
137 + pinctrl-0 = <&pinctrl_i2c3>;
138 + status = "okay";
139 +diff --git a/arch/arm/mach-davinci/sleep.S b/arch/arm/mach-davinci/sleep.S
140 +index 05d03f09ff54..71262dcdbca3 100644
141 +--- a/arch/arm/mach-davinci/sleep.S
142 ++++ b/arch/arm/mach-davinci/sleep.S
143 +@@ -24,6 +24,7 @@
144 + #define DEEPSLEEP_SLEEPENABLE_BIT BIT(31)
145 +
146 + .text
147 ++ .arch armv5te
148 + /*
149 + * Move DaVinci into deep sleep state
150 + *
151 +diff --git a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
152 +index e25f7fcd7997..cffa8991880d 100644
153 +--- a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
154 ++++ b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
155 +@@ -462,7 +462,7 @@
156 + #define MX8MM_IOMUXC_SAI3_RXFS_GPIO4_IO28 0x1CC 0x434 0x000 0x5 0x0
157 + #define MX8MM_IOMUXC_SAI3_RXFS_TPSMP_HTRANS0 0x1CC 0x434 0x000 0x7 0x0
158 + #define MX8MM_IOMUXC_SAI3_RXC_SAI3_RX_BCLK 0x1D0 0x438 0x000 0x0 0x0
159 +-#define MX8MM_IOMUXC_SAI3_RXC_GPT1_CAPTURE2 0x1D0 0x438 0x000 0x1 0x0
160 ++#define MX8MM_IOMUXC_SAI3_RXC_GPT1_CLK 0x1D0 0x438 0x000 0x1 0x0
161 + #define MX8MM_IOMUXC_SAI3_RXC_SAI5_RX_BCLK 0x1D0 0x438 0x4D0 0x2 0x2
162 + #define MX8MM_IOMUXC_SAI3_RXC_GPIO4_IO29 0x1D0 0x438 0x000 0x5 0x0
163 + #define MX8MM_IOMUXC_SAI3_RXC_TPSMP_HTRANS1 0x1D0 0x438 0x000 0x7 0x0
164 +@@ -472,7 +472,7 @@
165 + #define MX8MM_IOMUXC_SAI3_RXD_GPIO4_IO30 0x1D4 0x43C 0x000 0x5 0x0
166 + #define MX8MM_IOMUXC_SAI3_RXD_TPSMP_HDATA0 0x1D4 0x43C 0x000 0x7 0x0
167 + #define MX8MM_IOMUXC_SAI3_TXFS_SAI3_TX_SYNC 0x1D8 0x440 0x000 0x0 0x0
168 +-#define MX8MM_IOMUXC_SAI3_TXFS_GPT1_CLK 0x1D8 0x440 0x000 0x1 0x0
169 ++#define MX8MM_IOMUXC_SAI3_TXFS_GPT1_CAPTURE2 0x1D8 0x440 0x000 0x1 0x0
170 + #define MX8MM_IOMUXC_SAI3_TXFS_SAI5_RX_DATA1 0x1D8 0x440 0x4D8 0x2 0x2
171 + #define MX8MM_IOMUXC_SAI3_TXFS_GPIO4_IO31 0x1D8 0x440 0x000 0x5 0x0
172 + #define MX8MM_IOMUXC_SAI3_TXFS_TPSMP_HDATA1 0x1D8 0x440 0x000 0x7 0x0
173 +diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
174 +index 6d635ba0904c..6632cbd88bed 100644
175 +--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
176 ++++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
177 +@@ -675,8 +675,7 @@
178 +
179 + sai2: sai@308b0000 {
180 + #sound-dai-cells = <0>;
181 +- compatible = "fsl,imx8mq-sai",
182 +- "fsl,imx6sx-sai";
183 ++ compatible = "fsl,imx8mq-sai";
184 + reg = <0x308b0000 0x10000>;
185 + interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
186 + clocks = <&clk IMX8MQ_CLK_SAI2_IPG>,
187 +diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
188 +index fd5b1a4efc70..844e2964b0f5 100644
189 +--- a/arch/arm64/include/asm/processor.h
190 ++++ b/arch/arm64/include/asm/processor.h
191 +@@ -193,6 +193,16 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
192 + regs->pmr_save = GIC_PRIO_IRQON;
193 + }
194 +
195 ++static inline void set_ssbs_bit(struct pt_regs *regs)
196 ++{
197 ++ regs->pstate |= PSR_SSBS_BIT;
198 ++}
199 ++
200 ++static inline void set_compat_ssbs_bit(struct pt_regs *regs)
201 ++{
202 ++ regs->pstate |= PSR_AA32_SSBS_BIT;
203 ++}
204 ++
205 + static inline void start_thread(struct pt_regs *regs, unsigned long pc,
206 + unsigned long sp)
207 + {
208 +@@ -200,7 +210,7 @@ static inline void start_thread(struct pt_regs *regs, unsigned long pc,
209 + regs->pstate = PSR_MODE_EL0t;
210 +
211 + if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
212 +- regs->pstate |= PSR_SSBS_BIT;
213 ++ set_ssbs_bit(regs);
214 +
215 + regs->sp = sp;
216 + }
217 +@@ -219,7 +229,7 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
218 + #endif
219 +
220 + if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
221 +- regs->pstate |= PSR_AA32_SSBS_BIT;
222 ++ set_compat_ssbs_bit(regs);
223 +
224 + regs->compat_sp = sp;
225 + }
226 +diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
227 +index 9cdc4592da3e..320a30dbe35e 100644
228 +--- a/arch/arm64/kernel/entry.S
229 ++++ b/arch/arm64/kernel/entry.S
230 +@@ -586,10 +586,8 @@ el1_sync:
231 + b.eq el1_ia
232 + cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
233 + b.eq el1_undef
234 +- cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
235 +- b.eq el1_sp_pc
236 + cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
237 +- b.eq el1_sp_pc
238 ++ b.eq el1_pc
239 + cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1
240 + b.eq el1_undef
241 + cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1
242 +@@ -611,9 +609,11 @@ el1_da:
243 + bl do_mem_abort
244 +
245 + kernel_exit 1
246 +-el1_sp_pc:
247 ++el1_pc:
248 + /*
249 +- * Stack or PC alignment exception handling
250 ++ * PC alignment exception handling. We don't handle SP alignment faults,
251 ++ * since we will have hit a recursive exception when trying to push the
252 ++ * initial pt_regs.
253 + */
254 + mrs x0, far_el1
255 + inherit_daif pstate=x23, tmp=x2
256 +@@ -732,9 +732,9 @@ el0_sync:
257 + ccmp x24, #ESR_ELx_EC_WFx, #4, ne
258 + b.eq el0_sys
259 + cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
260 +- b.eq el0_sp_pc
261 ++ b.eq el0_sp
262 + cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
263 +- b.eq el0_sp_pc
264 ++ b.eq el0_pc
265 + cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
266 + b.eq el0_undef
267 + cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
268 +@@ -758,7 +758,7 @@ el0_sync_compat:
269 + cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception
270 + b.eq el0_fpsimd_exc
271 + cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
272 +- b.eq el0_sp_pc
273 ++ b.eq el0_pc
274 + cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
275 + b.eq el0_undef
276 + cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap
277 +@@ -858,11 +858,15 @@ el0_fpsimd_exc:
278 + mov x1, sp
279 + bl do_fpsimd_exc
280 + b ret_to_user
281 ++el0_sp:
282 ++ ldr x26, [sp, #S_SP]
283 ++ b el0_sp_pc
284 ++el0_pc:
285 ++ mrs x26, far_el1
286 + el0_sp_pc:
287 + /*
288 + * Stack or PC alignment exception handling
289 + */
290 +- mrs x26, far_el1
291 + gic_prio_kentry_setup tmp=x0
292 + enable_da_f
293 + #ifdef CONFIG_TRACE_IRQFLAGS
294 +diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
295 +index 6a869d9f304f..b0c859ca6320 100644
296 +--- a/arch/arm64/kernel/process.c
297 ++++ b/arch/arm64/kernel/process.c
298 +@@ -398,7 +398,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
299 + childregs->pstate |= PSR_UAO_BIT;
300 +
301 + if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
302 +- childregs->pstate |= PSR_SSBS_BIT;
303 ++ set_ssbs_bit(childregs);
304 +
305 + if (system_uses_irq_prio_masking())
306 + childregs->pmr_save = GIC_PRIO_IRQON;
307 +@@ -442,6 +442,32 @@ void uao_thread_switch(struct task_struct *next)
308 + }
309 + }
310 +
311 ++/*
312 ++ * Force SSBS state on context-switch, since it may be lost after migrating
313 ++ * from a CPU which treats the bit as RES0 in a heterogeneous system.
314 ++ */
315 ++static void ssbs_thread_switch(struct task_struct *next)
316 ++{
317 ++ struct pt_regs *regs = task_pt_regs(next);
318 ++
319 ++ /*
320 ++ * Nothing to do for kernel threads, but 'regs' may be junk
321 ++ * (e.g. idle task) so check the flags and bail early.
322 ++ */
323 ++ if (unlikely(next->flags & PF_KTHREAD))
324 ++ return;
325 ++
326 ++ /* If the mitigation is enabled, then we leave SSBS clear. */
327 ++ if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
328 ++ test_tsk_thread_flag(next, TIF_SSBD))
329 ++ return;
330 ++
331 ++ if (compat_user_mode(regs))
332 ++ set_compat_ssbs_bit(regs);
333 ++ else if (user_mode(regs))
334 ++ set_ssbs_bit(regs);
335 ++}
336 ++
337 + /*
338 + * We store our current task in sp_el0, which is clobbered by userspace. Keep a
339 + * shadow copy so that we can restore this upon entry from userspace.
340 +@@ -471,6 +497,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
341 + entry_task_switch(next);
342 + uao_thread_switch(next);
343 + ptrauth_thread_switch(next);
344 ++ ssbs_thread_switch(next);
345 +
346 + /*
347 + * Complete any pending TLB or cache maintenance on this CPU in case
348 +diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
349 +index 6d704ad2472b..993017dd83ca 100644
350 +--- a/arch/powerpc/kvm/powerpc.c
351 ++++ b/arch/powerpc/kvm/powerpc.c
352 +@@ -50,6 +50,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
353 + return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
354 + }
355 +
356 ++bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
357 ++{
358 ++ return kvm_arch_vcpu_runnable(vcpu);
359 ++}
360 ++
361 + bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
362 + {
363 + return false;
364 +diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
365 +index 2540d3b2588c..2eda1ec36f55 100644
366 +--- a/arch/powerpc/mm/mem.c
367 ++++ b/arch/powerpc/mm/mem.c
368 +@@ -249,7 +249,7 @@ void __init paging_init(void)
369 +
370 + #ifdef CONFIG_ZONE_DMA
371 + max_zone_pfns[ZONE_DMA] = min(max_low_pfn,
372 +- ((1UL << ARCH_ZONE_DMA_BITS) - 1) >> PAGE_SHIFT);
373 ++ 1UL << (ARCH_ZONE_DMA_BITS - PAGE_SHIFT));
374 + #endif
375 + max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
376 + #ifdef CONFIG_HIGHMEM
377 +diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
378 +index 96c53b23e58f..dad9825e4087 100644
379 +--- a/arch/powerpc/platforms/pseries/papr_scm.c
380 ++++ b/arch/powerpc/platforms/pseries/papr_scm.c
381 +@@ -42,8 +42,9 @@ struct papr_scm_priv {
382 + static int drc_pmem_bind(struct papr_scm_priv *p)
383 + {
384 + unsigned long ret[PLPAR_HCALL_BUFSIZE];
385 +- uint64_t rc, token;
386 + uint64_t saved = 0;
387 ++ uint64_t token;
388 ++ int64_t rc;
389 +
390 + /*
391 + * When the hypervisor cannot map all the requested memory in a single
392 +@@ -63,6 +64,10 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
393 + } while (rc == H_BUSY);
394 +
395 + if (rc) {
396 ++ /* H_OVERLAP needs a separate error path */
397 ++ if (rc == H_OVERLAP)
398 ++ return -EBUSY;
399 ++
400 + dev_err(&p->pdev->dev, "bind err: %lld\n", rc);
401 + return -ENXIO;
402 + }
403 +@@ -316,6 +321,14 @@ static int papr_scm_probe(struct platform_device *pdev)
404 +
405 + /* request the hypervisor to bind this region to somewhere in memory */
406 + rc = drc_pmem_bind(p);
407 ++
408 ++ /* If phyp says drc memory still bound then force unbound and retry */
409 ++ if (rc == -EBUSY) {
410 ++ dev_warn(&pdev->dev, "Retrying bind after unbinding\n");
411 ++ drc_pmem_unbind(p);
412 ++ rc = drc_pmem_bind(p);
413 ++ }
414 ++
415 + if (rc)
416 + goto err;
417 +
418 +diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
419 +index a4d38092530a..823578c6b9e2 100644
420 +--- a/arch/s390/include/asm/page.h
421 ++++ b/arch/s390/include/asm/page.h
422 +@@ -177,6 +177,8 @@ static inline int devmem_is_allowed(unsigned long pfn)
423 + #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
424 + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
425 +
426 ++#define ARCH_ZONE_DMA_BITS 31
427 ++
428 + #include <asm-generic/memory_model.h>
429 + #include <asm-generic/getorder.h>
430 +
431 +diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c
432 +index 401e30ca0a75..8272a4492844 100644
433 +--- a/arch/x86/boot/string.c
434 ++++ b/arch/x86/boot/string.c
435 +@@ -37,6 +37,14 @@ int memcmp(const void *s1, const void *s2, size_t len)
436 + return diff;
437 + }
438 +
439 ++/*
440 ++ * Clang may lower `memcmp == 0` to `bcmp == 0`.
441 ++ */
442 ++int bcmp(const void *s1, const void *s2, size_t len)
443 ++{
444 ++ return memcmp(s1, s2, len);
445 ++}
446 ++
447 + int strcmp(const char *str1, const char *str2)
448 + {
449 + const unsigned char *s1 = (const unsigned char *)str1;
450 +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
451 +index 2889dd023566..6179be624f35 100644
452 +--- a/arch/x86/events/intel/core.c
453 ++++ b/arch/x86/events/intel/core.c
454 +@@ -20,7 +20,6 @@
455 + #include <asm/intel-family.h>
456 + #include <asm/apic.h>
457 + #include <asm/cpu_device_id.h>
458 +-#include <asm/hypervisor.h>
459 +
460 + #include "../perf_event.h"
461 +
462 +@@ -263,8 +262,8 @@ static struct event_constraint intel_icl_event_constraints[] = {
463 + };
464 +
465 + static struct extra_reg intel_icl_extra_regs[] __read_mostly = {
466 +- INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff9fffull, RSP_0),
467 +- INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff9fffull, RSP_1),
468 ++ INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffbfffull, RSP_0),
469 ++ INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffffbfffull, RSP_1),
470 + INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
471 + INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
472 + EVENT_EXTRA_END
473 +@@ -4057,7 +4056,7 @@ static bool check_msr(unsigned long msr, u64 mask)
474 + * Disable the check for real HW, so we don't
475 + * mess with potentionaly enabled registers:
476 + */
477 +- if (hypervisor_is_type(X86_HYPER_NATIVE))
478 ++ if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
479 + return true;
480 +
481 + /*
482 +diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
483 +index 505c73dc6a73..6601b8759c92 100644
484 +--- a/arch/x86/events/intel/ds.c
485 ++++ b/arch/x86/events/intel/ds.c
486 +@@ -851,7 +851,7 @@ struct event_constraint intel_skl_pebs_event_constraints[] = {
487 +
488 + struct event_constraint intel_icl_pebs_event_constraints[] = {
489 + INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x100000000ULL), /* INST_RETIRED.PREC_DIST */
490 +- INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x400000000ULL), /* SLOTS */
491 ++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL), /* SLOTS */
492 +
493 + INTEL_PLD_CONSTRAINT(0x1cd, 0xff), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
494 + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x1d0, 0xf), /* MEM_INST_RETIRED.LOAD */
495 +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
496 +index 8253925c5e8c..921c609c2af7 100644
497 +--- a/arch/x86/include/asm/kvm_host.h
498 ++++ b/arch/x86/include/asm/kvm_host.h
499 +@@ -1169,6 +1169,7 @@ struct kvm_x86_ops {
500 + int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
501 + uint32_t guest_irq, bool set);
502 + void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
503 ++ bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
504 +
505 + int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
506 + bool *expired);
507 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
508 +index 48c865a4e5dd..14384a1ec53f 100644
509 +--- a/arch/x86/kvm/svm.c
510 ++++ b/arch/x86/kvm/svm.c
511 +@@ -3290,7 +3290,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
512 + vmcb->control.exit_int_info_err,
513 + KVM_ISA_SVM);
514 +
515 +- rc = kvm_vcpu_map(&svm->vcpu, gfn_to_gpa(svm->nested.vmcb), &map);
516 ++ rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb), &map);
517 + if (rc) {
518 + if (rc == -EINVAL)
519 + kvm_inject_gp(&svm->vcpu, 0);
520 +@@ -3580,7 +3580,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
521 +
522 + vmcb_gpa = svm->vmcb->save.rax;
523 +
524 +- rc = kvm_vcpu_map(&svm->vcpu, gfn_to_gpa(vmcb_gpa), &map);
525 ++ rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb_gpa), &map);
526 + if (rc) {
527 + if (rc == -EINVAL)
528 + kvm_inject_gp(&svm->vcpu, 0);
529 +@@ -5167,6 +5167,11 @@ static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
530 + kvm_vcpu_wake_up(vcpu);
531 + }
532 +
533 ++static bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
534 ++{
535 ++ return false;
536 ++}
537 ++
538 + static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
539 + {
540 + unsigned long flags;
541 +@@ -7264,6 +7269,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
542 +
543 + .pmu_ops = &amd_pmu_ops,
544 + .deliver_posted_interrupt = svm_deliver_avic_intr,
545 ++ .dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt,
546 + .update_pi_irte = svm_update_pi_irte,
547 + .setup_mce = svm_setup_mce,
548 +
549 +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
550 +index 924c2a79e4a9..4b830c0adcf8 100644
551 +--- a/arch/x86/kvm/vmx/vmx.c
552 ++++ b/arch/x86/kvm/vmx/vmx.c
553 +@@ -6096,6 +6096,11 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
554 + return max_irr;
555 + }
556 +
557 ++static bool vmx_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
558 ++{
559 ++ return pi_test_on(vcpu_to_pi_desc(vcpu));
560 ++}
561 ++
562 + static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
563 + {
564 + if (!kvm_vcpu_apicv_active(vcpu))
565 +@@ -7662,6 +7667,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
566 + .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
567 + .sync_pir_to_irr = vmx_sync_pir_to_irr,
568 + .deliver_posted_interrupt = vmx_deliver_posted_interrupt,
569 ++ .dy_apicv_has_pending_interrupt = vmx_dy_apicv_has_pending_interrupt,
570 +
571 + .set_tss_addr = vmx_set_tss_addr,
572 + .set_identity_map_addr = vmx_set_identity_map_addr,
573 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
574 +index a8ad3a4d86b1..cbced8ff29d4 100644
575 +--- a/arch/x86/kvm/x86.c
576 ++++ b/arch/x86/kvm/x86.c
577 +@@ -9641,6 +9641,22 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
578 + return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
579 + }
580 +
581 ++bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
582 ++{
583 ++ if (READ_ONCE(vcpu->arch.pv.pv_unhalted))
584 ++ return true;
585 ++
586 ++ if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
587 ++ kvm_test_request(KVM_REQ_SMI, vcpu) ||
588 ++ kvm_test_request(KVM_REQ_EVENT, vcpu))
589 ++ return true;
590 ++
591 ++ if (vcpu->arch.apicv_active && kvm_x86_ops->dy_apicv_has_pending_interrupt(vcpu))
592 ++ return true;
593 ++
594 ++ return false;
595 ++}
596 ++
597 + bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
598 + {
599 + return vcpu->arch.preempted_in_kernel;
600 +diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
601 +index 46df4c6aae46..26a8b4b1b9ed 100644
602 +--- a/arch/x86/mm/fault.c
603 ++++ b/arch/x86/mm/fault.c
604 +@@ -194,13 +194,14 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
605 +
606 + pmd = pmd_offset(pud, address);
607 + pmd_k = pmd_offset(pud_k, address);
608 +- if (!pmd_present(*pmd_k))
609 +- return NULL;
610 +
611 +- if (!pmd_present(*pmd))
612 ++ if (pmd_present(*pmd) != pmd_present(*pmd_k))
613 + set_pmd(pmd, *pmd_k);
614 ++
615 ++ if (!pmd_present(*pmd_k))
616 ++ return NULL;
617 + else
618 +- BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
619 ++ BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k));
620 +
621 + return pmd_k;
622 + }
623 +@@ -220,17 +221,13 @@ void vmalloc_sync_all(void)
624 + spin_lock(&pgd_lock);
625 + list_for_each_entry(page, &pgd_list, lru) {
626 + spinlock_t *pgt_lock;
627 +- pmd_t *ret;
628 +
629 + /* the pgt_lock only for Xen */
630 + pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
631 +
632 + spin_lock(pgt_lock);
633 +- ret = vmalloc_sync_one(page_address(page), address);
634 ++ vmalloc_sync_one(page_address(page), address);
635 + spin_unlock(pgt_lock);
636 +-
637 +- if (!ret)
638 +- break;
639 + }
640 + spin_unlock(&pgd_lock);
641 + }
642 +diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
643 +index 3cf302b26332..8901a1f89cf5 100644
644 +--- a/arch/x86/purgatory/Makefile
645 ++++ b/arch/x86/purgatory/Makefile
646 +@@ -6,6 +6,9 @@ purgatory-y := purgatory.o stack.o setup-x86_$(BITS).o sha256.o entry64.o string
647 + targets += $(purgatory-y)
648 + PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
649 +
650 ++$(obj)/string.o: $(srctree)/arch/x86/boot/compressed/string.c FORCE
651 ++ $(call if_changed_rule,cc_o_c)
652 ++
653 + $(obj)/sha256.o: $(srctree)/lib/sha256.c FORCE
654 + $(call if_changed_rule,cc_o_c)
655 +
656 +@@ -17,11 +20,34 @@ KCOV_INSTRUMENT := n
657 +
658 + # Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
659 + # in turn leaves some undefined symbols like __fentry__ in purgatory and not
660 +-# sure how to relocate those. Like kexec-tools, use custom flags.
661 +-
662 +-KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -c -Os -mcmodel=large
663 +-KBUILD_CFLAGS += -m$(BITS)
664 +-KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
665 ++# sure how to relocate those.
666 ++ifdef CONFIG_FUNCTION_TRACER
667 ++CFLAGS_REMOVE_sha256.o += $(CC_FLAGS_FTRACE)
668 ++CFLAGS_REMOVE_purgatory.o += $(CC_FLAGS_FTRACE)
669 ++CFLAGS_REMOVE_string.o += $(CC_FLAGS_FTRACE)
670 ++CFLAGS_REMOVE_kexec-purgatory.o += $(CC_FLAGS_FTRACE)
671 ++endif
672 ++
673 ++ifdef CONFIG_STACKPROTECTOR
674 ++CFLAGS_REMOVE_sha256.o += -fstack-protector
675 ++CFLAGS_REMOVE_purgatory.o += -fstack-protector
676 ++CFLAGS_REMOVE_string.o += -fstack-protector
677 ++CFLAGS_REMOVE_kexec-purgatory.o += -fstack-protector
678 ++endif
679 ++
680 ++ifdef CONFIG_STACKPROTECTOR_STRONG
681 ++CFLAGS_REMOVE_sha256.o += -fstack-protector-strong
682 ++CFLAGS_REMOVE_purgatory.o += -fstack-protector-strong
683 ++CFLAGS_REMOVE_string.o += -fstack-protector-strong
684 ++CFLAGS_REMOVE_kexec-purgatory.o += -fstack-protector-strong
685 ++endif
686 ++
687 ++ifdef CONFIG_RETPOLINE
688 ++CFLAGS_REMOVE_sha256.o += $(RETPOLINE_CFLAGS)
689 ++CFLAGS_REMOVE_purgatory.o += $(RETPOLINE_CFLAGS)
690 ++CFLAGS_REMOVE_string.o += $(RETPOLINE_CFLAGS)
691 ++CFLAGS_REMOVE_kexec-purgatory.o += $(RETPOLINE_CFLAGS)
692 ++endif
693 +
694 + $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
695 + $(call if_changed,ld)
696 +diff --git a/arch/x86/purgatory/purgatory.c b/arch/x86/purgatory/purgatory.c
697 +index 6d8d5a34c377..b607bda786f6 100644
698 +--- a/arch/x86/purgatory/purgatory.c
699 ++++ b/arch/x86/purgatory/purgatory.c
700 +@@ -68,3 +68,9 @@ void purgatory(void)
701 + }
702 + copy_backup_region();
703 + }
704 ++
705 ++/*
706 ++ * Defined in order to reuse memcpy() and memset() from
707 ++ * arch/x86/boot/compressed/string.c
708 ++ */
709 ++void warn(const char *msg) {}
710 +diff --git a/arch/x86/purgatory/string.c b/arch/x86/purgatory/string.c
711 +deleted file mode 100644
712 +index 01ad43873ad9..000000000000
713 +--- a/arch/x86/purgatory/string.c
714 ++++ /dev/null
715 +@@ -1,23 +0,0 @@
716 +-// SPDX-License-Identifier: GPL-2.0-only
717 +-/*
718 +- * Simple string functions.
719 +- *
720 +- * Copyright (C) 2014 Red Hat Inc.
721 +- *
722 +- * Author:
723 +- * Vivek Goyal <vgoyal@××××××.com>
724 +- */
725 +-
726 +-#include <linux/types.h>
727 +-
728 +-#include "../boot/string.c"
729 +-
730 +-void *memcpy(void *dst, const void *src, size_t len)
731 +-{
732 +- return __builtin_memcpy(dst, src, len);
733 +-}
734 +-
735 +-void *memset(void *dst, int c, size_t len)
736 +-{
737 +- return __builtin_memset(dst, c, len);
738 +-}
739 +diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c
740 +index 659ccb8b693f..06d024204f50 100644
741 +--- a/block/blk-rq-qos.c
742 ++++ b/block/blk-rq-qos.c
743 +@@ -202,6 +202,7 @@ static int rq_qos_wake_function(struct wait_queue_entry *curr,
744 + return -1;
745 +
746 + data->got_token = true;
747 ++ smp_wmb();
748 + list_del_init(&curr->entry);
749 + wake_up_process(data->task);
750 + return 1;
751 +@@ -245,6 +246,7 @@ void rq_qos_wait(struct rq_wait *rqw, void *private_data,
752 +
753 + prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE);
754 + do {
755 ++ /* The memory barrier in set_task_state saves us here. */
756 + if (data.got_token)
757 + break;
758 + if (!has_sleeper && acquire_inflight_cb(rqw, private_data)) {
759 +@@ -255,12 +257,14 @@ void rq_qos_wait(struct rq_wait *rqw, void *private_data,
760 + * which means we now have two. Put our local token
761 + * and wake anyone else potentially waiting for one.
762 + */
763 ++ smp_rmb();
764 + if (data.got_token)
765 + cleanup_cb(rqw, private_data);
766 + break;
767 + }
768 + io_schedule();
769 +- has_sleeper = false;
770 ++ has_sleeper = true;
771 ++ set_current_state(TASK_UNINTERRUPTIBLE);
772 + } while (1);
773 + finish_wait(&rqw->wait, &data.wq);
774 + }
775 +diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
776 +index d4551e33fa71..8569b79e8b58 100644
777 +--- a/drivers/acpi/arm64/iort.c
778 ++++ b/drivers/acpi/arm64/iort.c
779 +@@ -611,8 +611,8 @@ static int iort_dev_find_its_id(struct device *dev, u32 req_id,
780 +
781 + /* Move to ITS specific data */
782 + its = (struct acpi_iort_its_group *)node->node_data;
783 +- if (idx > its->its_count) {
784 +- dev_err(dev, "requested ITS ID index [%d] is greater than available [%d]\n",
785 ++ if (idx >= its->its_count) {
786 ++ dev_err(dev, "requested ITS ID index [%d] overruns ITS entries [%d]\n",
787 + idx, its->its_count);
788 + return -ENXIO;
789 + }
790 +diff --git a/drivers/base/platform.c b/drivers/base/platform.c
791 +index 4d1729853d1a..8b25c7b12179 100644
792 +--- a/drivers/base/platform.c
793 ++++ b/drivers/base/platform.c
794 +@@ -157,8 +157,13 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
795 + * the device will only expose one IRQ, and this fallback
796 + * allows a common code path across either kind of resource.
797 + */
798 +- if (num == 0 && has_acpi_companion(&dev->dev))
799 +- return acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num);
800 ++ if (num == 0 && has_acpi_companion(&dev->dev)) {
801 ++ int ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num);
802 ++
803 ++ /* Our callers expect -ENXIO for missing IRQs. */
804 ++ if (ret >= 0 || ret == -EPROBE_DEFER)
805 ++ return ret;
806 ++ }
807 +
808 + return -ENXIO;
809 + #endif
810 +diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
811 +index 90ebfcae0ce6..2b3103c30857 100644
812 +--- a/drivers/block/drbd/drbd_receiver.c
813 ++++ b/drivers/block/drbd/drbd_receiver.c
814 +@@ -5417,7 +5417,7 @@ static int drbd_do_auth(struct drbd_connection *connection)
815 + unsigned int key_len;
816 + char secret[SHARED_SECRET_MAX]; /* 64 byte */
817 + unsigned int resp_size;
818 +- SHASH_DESC_ON_STACK(desc, connection->cram_hmac_tfm);
819 ++ struct shash_desc *desc;
820 + struct packet_info pi;
821 + struct net_conf *nc;
822 + int err, rv;
823 +@@ -5430,6 +5430,13 @@ static int drbd_do_auth(struct drbd_connection *connection)
824 + memcpy(secret, nc->shared_secret, key_len);
825 + rcu_read_unlock();
826 +
827 ++ desc = kmalloc(sizeof(struct shash_desc) +
828 ++ crypto_shash_descsize(connection->cram_hmac_tfm),
829 ++ GFP_KERNEL);
830 ++ if (!desc) {
831 ++ rv = -1;
832 ++ goto fail;
833 ++ }
834 + desc->tfm = connection->cram_hmac_tfm;
835 +
836 + rv = crypto_shash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len);
837 +@@ -5571,7 +5578,10 @@ static int drbd_do_auth(struct drbd_connection *connection)
838 + kfree(peers_ch);
839 + kfree(response);
840 + kfree(right_response);
841 +- shash_desc_zero(desc);
842 ++ if (desc) {
843 ++ shash_desc_zero(desc);
844 ++ kfree(desc);
845 ++ }
846 +
847 + return rv;
848 + }
849 +diff --git a/drivers/block/loop.c b/drivers/block/loop.c
850 +index 430d31499ce9..e1739efca37e 100644
851 +--- a/drivers/block/loop.c
852 ++++ b/drivers/block/loop.c
853 +@@ -893,7 +893,7 @@ static void loop_unprepare_queue(struct loop_device *lo)
854 +
855 + static int loop_kthread_worker_fn(void *worker_ptr)
856 + {
857 +- current->flags |= PF_LESS_THROTTLE;
858 ++ current->flags |= PF_LESS_THROTTLE | PF_MEMALLOC_NOIO;
859 + return kthread_worker_fn(worker_ptr);
860 + }
861 +
862 +diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
863 +index 6b1e4abe3248..d2f061015323 100644
864 +--- a/drivers/cpufreq/pasemi-cpufreq.c
865 ++++ b/drivers/cpufreq/pasemi-cpufreq.c
866 +@@ -131,10 +131,18 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
867 + int err = -ENODEV;
868 +
869 + cpu = of_get_cpu_node(policy->cpu, NULL);
870 ++ if (!cpu)
871 ++ goto out;
872 +
873 ++ max_freqp = of_get_property(cpu, "clock-frequency", NULL);
874 + of_node_put(cpu);
875 +- if (!cpu)
876 ++ if (!max_freqp) {
877 ++ err = -EINVAL;
878 + goto out;
879 ++ }
880 ++
881 ++ /* we need the freq in kHz */
882 ++ max_freq = *max_freqp / 1000;
883 +
884 + dn = of_find_compatible_node(NULL, NULL, "1682m-sdc");
885 + if (!dn)
886 +@@ -171,16 +179,6 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
887 + }
888 +
889 + pr_debug("init cpufreq on CPU %d\n", policy->cpu);
890 +-
891 +- max_freqp = of_get_property(cpu, "clock-frequency", NULL);
892 +- if (!max_freqp) {
893 +- err = -EINVAL;
894 +- goto out_unmap_sdcpwr;
895 +- }
896 +-
897 +- /* we need the freq in kHz */
898 +- max_freq = *max_freqp / 1000;
899 +-
900 + pr_debug("max clock-frequency is at %u kHz\n", max_freq);
901 + pr_debug("initializing frequency table\n");
902 +
903 +@@ -198,9 +196,6 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
904 +
905 + return cpufreq_generic_init(policy, pas_freqs, get_gizmo_latency());
906 +
907 +-out_unmap_sdcpwr:
908 +- iounmap(sdcpwr_mapbase);
909 +-
910 + out_unmap_sdcasr:
911 + iounmap(sdcasr_mapbase);
912 + out:
913 +diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
914 +index f9fec2ddf56a..94c1ad7eeddf 100644
915 +--- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c
916 ++++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
917 +@@ -58,6 +58,19 @@ static int ccp_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
918 + static int ccp_aes_gcm_setauthsize(struct crypto_aead *tfm,
919 + unsigned int authsize)
920 + {
921 ++ switch (authsize) {
922 ++ case 16:
923 ++ case 15:
924 ++ case 14:
925 ++ case 13:
926 ++ case 12:
927 ++ case 8:
928 ++ case 4:
929 ++ break;
930 ++ default:
931 ++ return -EINVAL;
932 ++ }
933 ++
934 + return 0;
935 + }
936 +
937 +@@ -104,6 +117,7 @@ static int ccp_aes_gcm_crypt(struct aead_request *req, bool encrypt)
938 + memset(&rctx->cmd, 0, sizeof(rctx->cmd));
939 + INIT_LIST_HEAD(&rctx->cmd.entry);
940 + rctx->cmd.engine = CCP_ENGINE_AES;
941 ++ rctx->cmd.u.aes.authsize = crypto_aead_authsize(tfm);
942 + rctx->cmd.u.aes.type = ctx->u.aes.type;
943 + rctx->cmd.u.aes.mode = ctx->u.aes.mode;
944 + rctx->cmd.u.aes.action = encrypt;
945 +diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
946 +index 1cbdfc08ca00..a13e8a362316 100644
947 +--- a/drivers/crypto/ccp/ccp-ops.c
948 ++++ b/drivers/crypto/ccp/ccp-ops.c
949 +@@ -622,6 +622,7 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
950 +
951 + unsigned long long *final;
952 + unsigned int dm_offset;
953 ++ unsigned int authsize;
954 + unsigned int jobid;
955 + unsigned int ilen;
956 + bool in_place = true; /* Default value */
957 +@@ -643,6 +644,21 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
958 + if (!aes->key) /* Gotta have a key SGL */
959 + return -EINVAL;
960 +
961 ++ /* Zero defaults to 16 bytes, the maximum size */
962 ++ authsize = aes->authsize ? aes->authsize : AES_BLOCK_SIZE;
963 ++ switch (authsize) {
964 ++ case 16:
965 ++ case 15:
966 ++ case 14:
967 ++ case 13:
968 ++ case 12:
969 ++ case 8:
970 ++ case 4:
971 ++ break;
972 ++ default:
973 ++ return -EINVAL;
974 ++ }
975 ++
976 + /* First, decompose the source buffer into AAD & PT,
977 + * and the destination buffer into AAD, CT & tag, or
978 + * the input into CT & tag.
979 +@@ -657,7 +673,7 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
980 + p_tag = scatterwalk_ffwd(sg_tag, p_outp, ilen);
981 + } else {
982 + /* Input length for decryption includes tag */
983 +- ilen = aes->src_len - AES_BLOCK_SIZE;
984 ++ ilen = aes->src_len - authsize;
985 + p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen);
986 + }
987 +
988 +@@ -766,8 +782,7 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
989 + while (src.sg_wa.bytes_left) {
990 + ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
991 + if (!src.sg_wa.bytes_left) {
992 +- unsigned int nbytes = aes->src_len
993 +- % AES_BLOCK_SIZE;
994 ++ unsigned int nbytes = ilen % AES_BLOCK_SIZE;
995 +
996 + if (nbytes) {
997 + op.eom = 1;
998 +@@ -839,19 +854,19 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
999 +
1000 + if (aes->action == CCP_AES_ACTION_ENCRYPT) {
1001 + /* Put the ciphered tag after the ciphertext. */
1002 +- ccp_get_dm_area(&final_wa, 0, p_tag, 0, AES_BLOCK_SIZE);
1003 ++ ccp_get_dm_area(&final_wa, 0, p_tag, 0, authsize);
1004 + } else {
1005 + /* Does this ciphered tag match the input? */
1006 +- ret = ccp_init_dm_workarea(&tag, cmd_q, AES_BLOCK_SIZE,
1007 ++ ret = ccp_init_dm_workarea(&tag, cmd_q, authsize,
1008 + DMA_BIDIRECTIONAL);
1009 + if (ret)
1010 + goto e_tag;
1011 +- ret = ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE);
1012 ++ ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize);
1013 + if (ret)
1014 + goto e_tag;
1015 +
1016 + ret = crypto_memneq(tag.address, final_wa.address,
1017 +- AES_BLOCK_SIZE) ? -EBADMSG : 0;
1018 ++ authsize) ? -EBADMSG : 0;
1019 + ccp_dm_free(&tag);
1020 + }
1021 +
1022 +@@ -859,11 +874,11 @@ e_tag:
1023 + ccp_dm_free(&final_wa);
1024 +
1025 + e_dst:
1026 +- if (aes->src_len && !in_place)
1027 ++ if (ilen > 0 && !in_place)
1028 + ccp_free_data(&dst, cmd_q);
1029 +
1030 + e_src:
1031 +- if (aes->src_len)
1032 ++ if (ilen > 0)
1033 + ccp_free_data(&src, cmd_q);
1034 +
1035 + e_aad:
1036 +diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
1037 +index d40ccc3af9e2..fa7ed01415b7 100644
1038 +--- a/drivers/firmware/Kconfig
1039 ++++ b/drivers/firmware/Kconfig
1040 +@@ -157,7 +157,7 @@ config DMI_SCAN_MACHINE_NON_EFI_FALLBACK
1041 +
1042 + config ISCSI_IBFT_FIND
1043 + bool "iSCSI Boot Firmware Table Attributes"
1044 +- depends on X86 && ACPI
1045 ++ depends on X86 && ISCSI_IBFT
1046 + default n
1047 + help
1048 + This option enables the kernel to find the region of memory
1049 +@@ -168,7 +168,8 @@ config ISCSI_IBFT_FIND
1050 + config ISCSI_IBFT
1051 + tristate "iSCSI Boot Firmware Table Attributes module"
1052 + select ISCSI_BOOT_SYSFS
1053 +- depends on ISCSI_IBFT_FIND && SCSI && SCSI_LOWLEVEL
1054 ++ select ISCSI_IBFT_FIND if X86
1055 ++ depends on ACPI && SCSI && SCSI_LOWLEVEL
1056 + default n
1057 + help
1058 + This option enables support for detection and exposing of iSCSI
1059 +diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
1060 +index ab3aa3983833..7e12cbdf957c 100644
1061 +--- a/drivers/firmware/iscsi_ibft.c
1062 ++++ b/drivers/firmware/iscsi_ibft.c
1063 +@@ -84,6 +84,10 @@ MODULE_DESCRIPTION("sysfs interface to BIOS iBFT information");
1064 + MODULE_LICENSE("GPL");
1065 + MODULE_VERSION(IBFT_ISCSI_VERSION);
1066 +
1067 ++#ifndef CONFIG_ISCSI_IBFT_FIND
1068 ++struct acpi_table_ibft *ibft_addr;
1069 ++#endif
1070 ++
1071 + struct ibft_hdr {
1072 + u8 id;
1073 + u8 version;
1074 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
1075 +index ee6b646180b6..0a7adc2925e3 100644
1076 +--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
1077 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
1078 +@@ -608,8 +608,10 @@ const struct dc_link_settings *dc_link_get_link_cap(
1079 +
1080 + static void destruct(struct dc *dc)
1081 + {
1082 +- dc_release_state(dc->current_state);
1083 +- dc->current_state = NULL;
1084 ++ if (dc->current_state) {
1085 ++ dc_release_state(dc->current_state);
1086 ++ dc->current_state = NULL;
1087 ++ }
1088 +
1089 + destroy_links(dc);
1090 +
1091 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
1092 +index a3ff33ff6da1..adf39e3b8d29 100644
1093 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
1094 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
1095 +@@ -2284,7 +2284,7 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
1096 + if (core_dc->current_state->res_ctx.pipe_ctx[i].stream) {
1097 + if (core_dc->current_state->res_ctx.
1098 + pipe_ctx[i].stream->link
1099 +- == link)
1100 ++ == link) {
1101 + /* DMCU -1 for all controller id values,
1102 + * therefore +1 here
1103 + */
1104 +@@ -2292,6 +2292,13 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
1105 + core_dc->current_state->
1106 + res_ctx.pipe_ctx[i].stream_res.tg->inst +
1107 + 1;
1108 ++
1109 ++ /* Disable brightness ramping when the display is blanked
1110 ++ * as it can hang the DMCU
1111 ++ */
1112 ++ if (core_dc->current_state->res_ctx.pipe_ctx[i].plane_state == NULL)
1113 ++ frame_ramp = 0;
1114 ++ }
1115 + }
1116 + }
1117 + abm->funcs->set_backlight_level_pwm(
1118 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
1119 +index 253311864cdd..966aa3b754c5 100644
1120 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
1121 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
1122 +@@ -2218,11 +2218,18 @@ static void get_active_converter_info(
1123 + link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE;
1124 + ddc_service_set_dongle_type(link->ddc,
1125 + link->dpcd_caps.dongle_type);
1126 ++ link->dpcd_caps.is_branch_dev = false;
1127 + return;
1128 + }
1129 +
1130 + /* DPCD 0x5 bit 0 = 1, it indicate it's branch device */
1131 +- link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT;
1132 ++ if (ds_port.fields.PORT_TYPE == DOWNSTREAM_DP) {
1133 ++ link->dpcd_caps.is_branch_dev = false;
1134 ++ }
1135 ++
1136 ++ else {
1137 ++ link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT;
1138 ++ }
1139 +
1140 + switch (ds_port.fields.PORT_TYPE) {
1141 + case DOWNSTREAM_VGA:
1142 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
1143 +index 12142d13f22f..b459ce056b60 100644
1144 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
1145 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
1146 +@@ -254,7 +254,7 @@ bool resource_construct(
1147 + * PORT_CONNECTIVITY == 1 (as instructed by HW team).
1148 + */
1149 + update_num_audio(&straps, &num_audio, &pool->audio_support);
1150 +- for (i = 0; i < pool->pipe_count && i < num_audio; i++) {
1151 ++ for (i = 0; i < caps->num_audio; i++) {
1152 + struct audio *aud = create_funcs->create_audio(ctx, i);
1153 +
1154 + if (aud == NULL) {
1155 +@@ -1702,6 +1702,12 @@ static struct audio *find_first_free_audio(
1156 + return pool->audios[i];
1157 + }
1158 + }
1159 ++
1160 ++ /* use engine id to find free audio */
1161 ++ if ((id < pool->audio_count) && (res_ctx->is_audio_acquired[id] == false)) {
1162 ++ return pool->audios[id];
1163 ++ }
1164 ++
1165 + /*not found the matching one, first come first serve*/
1166 + for (i = 0; i < pool->audio_count; i++) {
1167 + if (res_ctx->is_audio_acquired[i] == false) {
1168 +@@ -1866,6 +1872,7 @@ static int get_norm_pix_clk(const struct dc_crtc_timing *timing)
1169 + pix_clk /= 2;
1170 + if (timing->pixel_encoding != PIXEL_ENCODING_YCBCR422) {
1171 + switch (timing->display_color_depth) {
1172 ++ case COLOR_DEPTH_666:
1173 + case COLOR_DEPTH_888:
1174 + normalized_pix_clk = pix_clk;
1175 + break;
1176 +@@ -2012,7 +2019,7 @@ enum dc_status resource_map_pool_resources(
1177 + /* TODO: Add check if ASIC support and EDID audio */
1178 + if (!stream->converter_disable_audio &&
1179 + dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
1180 +- stream->audio_info.mode_count) {
1181 ++ stream->audio_info.mode_count && stream->audio_info.flags.all) {
1182 + pipe_ctx->stream_res.audio = find_first_free_audio(
1183 + &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id);
1184 +
1185 +diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
1186 +index 2959c3c9390b..da30ae04e82b 100644
1187 +--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
1188 ++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
1189 +@@ -234,6 +234,10 @@ static void dmcu_set_backlight_level(
1190 + s2 |= (backlight_8_bit << ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
1191 +
1192 + REG_WRITE(BIOS_SCRATCH_2, s2);
1193 ++
1194 ++ /* waitDMCUReadyForCmd */
1195 ++ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT,
1196 ++ 0, 1, 80000);
1197 + }
1198 +
1199 + static void dce_abm_init(struct abm *abm)
1200 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
1201 +index 9e4d70a0055e..5cc5dabf4d65 100644
1202 +--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
1203 ++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
1204 +@@ -1120,16 +1120,7 @@ static void dcn10_init_hw(struct dc *dc)
1205 + * everything down.
1206 + */
1207 + if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) {
1208 +- for (i = 0; i < dc->res_pool->pipe_count; i++) {
1209 +- struct hubp *hubp = dc->res_pool->hubps[i];
1210 +- struct dpp *dpp = dc->res_pool->dpps[i];
1211 +-
1212 +- hubp->funcs->hubp_init(hubp);
1213 +- dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1214 +- plane_atomic_power_down(dc, dpp, hubp);
1215 +- }
1216 +-
1217 +- apply_DEGVIDCN10_253_wa(dc);
1218 ++ dc->hwss.init_pipes(dc, dc->current_state);
1219 + }
1220 +
1221 + for (i = 0; i < dc->res_pool->audio_count; i++) {
1222 +@@ -1298,10 +1289,6 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
1223 + return result;
1224 + }
1225 +
1226 +-
1227 +-
1228 +-
1229 +-
1230 + static bool
1231 + dcn10_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
1232 + const struct dc_stream_state *stream)
1233 +@@ -2416,6 +2403,12 @@ static void dcn10_apply_ctx_for_surface(
1234 + if (removed_pipe[i])
1235 + dcn10_disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
1236 +
1237 ++ for (i = 0; i < dc->res_pool->pipe_count; i++)
1238 ++ if (removed_pipe[i]) {
1239 ++ dc->hwss.optimize_bandwidth(dc, context);
1240 ++ break;
1241 ++ }
1242 ++
1243 + if (dc->hwseq->wa.DEGVIDCN10_254)
1244 + hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
1245 + }
1246 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
1247 +index 7eccb54c421d..aac52eed6b2a 100644
1248 +--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
1249 ++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
1250 +@@ -512,7 +512,7 @@ static const struct resource_caps rv2_res_cap = {
1251 + .num_audio = 3,
1252 + .num_stream_encoder = 3,
1253 + .num_pll = 3,
1254 +- .num_ddc = 3,
1255 ++ .num_ddc = 4,
1256 + };
1257 + #endif
1258 +
1259 +diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
1260 +index 6f5ab05d6467..6f0cc718fbd7 100644
1261 +--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
1262 ++++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
1263 +@@ -169,7 +169,7 @@ struct resource_pool {
1264 + struct clock_source *clock_sources[MAX_CLOCK_SOURCES];
1265 + unsigned int clk_src_count;
1266 +
1267 +- struct audio *audios[MAX_PIPES];
1268 ++ struct audio *audios[MAX_AUDIOS];
1269 + unsigned int audio_count;
1270 + struct audio_support audio_support;
1271 +
1272 +diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
1273 +index 4c8e2c6fb6db..72266efd826c 100644
1274 +--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
1275 ++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
1276 +@@ -34,6 +34,7 @@
1277 + * Data types shared between different Virtual HW blocks
1278 + ******************************************************************************/
1279 +
1280 ++#define MAX_AUDIOS 7
1281 + #define MAX_PIPES 6
1282 +
1283 + struct gamma_curve {
1284 +diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
1285 +index d8d75e25f6fb..45f6f11a88a7 100644
1286 +--- a/drivers/gpu/drm/drm_framebuffer.c
1287 ++++ b/drivers/gpu/drm/drm_framebuffer.c
1288 +@@ -830,7 +830,7 @@ static int atomic_remove_fb(struct drm_framebuffer *fb)
1289 + struct drm_device *dev = fb->dev;
1290 + struct drm_atomic_state *state;
1291 + struct drm_plane *plane;
1292 +- struct drm_connector *conn;
1293 ++ struct drm_connector *conn __maybe_unused;
1294 + struct drm_connector_state *conn_state;
1295 + int i, ret;
1296 + unsigned plane_mask;
1297 +diff --git a/drivers/gpu/drm/i915/vlv_dsi_pll.c b/drivers/gpu/drm/i915/vlv_dsi_pll.c
1298 +index 5e7b1fb2db5d..8ea1c927dbad 100644
1299 +--- a/drivers/gpu/drm/i915/vlv_dsi_pll.c
1300 ++++ b/drivers/gpu/drm/i915/vlv_dsi_pll.c
1301 +@@ -394,8 +394,8 @@ static void glk_dsi_program_esc_clock(struct drm_device *dev,
1302 + else
1303 + txesc2_div = 10;
1304 +
1305 +- I915_WRITE(MIPIO_TXESC_CLK_DIV1, txesc1_div & GLK_TX_ESC_CLK_DIV1_MASK);
1306 +- I915_WRITE(MIPIO_TXESC_CLK_DIV2, txesc2_div & GLK_TX_ESC_CLK_DIV2_MASK);
1307 ++ I915_WRITE(MIPIO_TXESC_CLK_DIV1, (1 << (txesc1_div - 1)) & GLK_TX_ESC_CLK_DIV1_MASK);
1308 ++ I915_WRITE(MIPIO_TXESC_CLK_DIV2, (1 << (txesc2_div - 1)) & GLK_TX_ESC_CLK_DIV2_MASK);
1309 + }
1310 +
1311 + /* Program BXT Mipi clocks and dividers */
1312 +diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
1313 +index 0ea150196659..c62f7abcf509 100644
1314 +--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
1315 ++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
1316 +@@ -2226,8 +2226,6 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
1317 + if (ret)
1318 + goto fail;
1319 +
1320 +- spin_lock_init(&dpu_enc->enc_spinlock);
1321 +-
1322 + atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
1323 + timer_setup(&dpu_enc->frame_done_timer,
1324 + dpu_encoder_frame_done_timeout, 0);
1325 +@@ -2281,6 +2279,7 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
1326 +
1327 + drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
1328 +
1329 ++ spin_lock_init(&dpu_enc->enc_spinlock);
1330 + dpu_enc->enabled = false;
1331 +
1332 + return &dpu_enc->base;
1333 +diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
1334 +index 93942063b51b..49dd2d905c7f 100644
1335 +--- a/drivers/hid/hid-sony.c
1336 ++++ b/drivers/hid/hid-sony.c
1337 +@@ -585,10 +585,14 @@ static void sony_set_leds(struct sony_sc *sc);
1338 + static inline void sony_schedule_work(struct sony_sc *sc,
1339 + enum sony_worker which)
1340 + {
1341 ++ unsigned long flags;
1342 ++
1343 + switch (which) {
1344 + case SONY_WORKER_STATE:
1345 +- if (!sc->defer_initialization)
1346 ++ spin_lock_irqsave(&sc->lock, flags);
1347 ++ if (!sc->defer_initialization && sc->state_worker_initialized)
1348 + schedule_work(&sc->state_worker);
1349 ++ spin_unlock_irqrestore(&sc->lock, flags);
1350 + break;
1351 + case SONY_WORKER_HOTPLUG:
1352 + if (sc->hotplug_worker_initialized)
1353 +@@ -2558,13 +2562,18 @@ static inline void sony_init_output_report(struct sony_sc *sc,
1354 +
1355 + static inline void sony_cancel_work_sync(struct sony_sc *sc)
1356 + {
1357 ++ unsigned long flags;
1358 ++
1359 + if (sc->hotplug_worker_initialized)
1360 + cancel_work_sync(&sc->hotplug_worker);
1361 +- if (sc->state_worker_initialized)
1362 ++ if (sc->state_worker_initialized) {
1363 ++ spin_lock_irqsave(&sc->lock, flags);
1364 ++ sc->state_worker_initialized = 0;
1365 ++ spin_unlock_irqrestore(&sc->lock, flags);
1366 + cancel_work_sync(&sc->state_worker);
1367 ++ }
1368 + }
1369 +
1370 +-
1371 + static int sony_input_configured(struct hid_device *hdev,
1372 + struct hid_input *hidinput)
1373 + {
1374 +diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
1375 +index 3fb9c0a2d6d0..ce5ec403ec73 100644
1376 +--- a/drivers/hwmon/lm75.c
1377 ++++ b/drivers/hwmon/lm75.c
1378 +@@ -343,7 +343,7 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
1379 + data->sample_time = MSEC_PER_SEC / 2;
1380 + break;
1381 + case tmp75b: /* not one-shot mode, Conversion rate 37Hz */
1382 +- clr_mask |= 1 << 15 | 0x3 << 13;
1383 ++ clr_mask |= 1 << 7 | 0x3 << 5;
1384 + data->resolution = 12;
1385 + data->sample_time = MSEC_PER_SEC / 37;
1386 + break;
1387 +diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
1388 +index e7dff5febe16..d42bc0883a32 100644
1389 +--- a/drivers/hwmon/nct6775.c
1390 ++++ b/drivers/hwmon/nct6775.c
1391 +@@ -852,7 +852,7 @@ static const u16 NCT6106_REG_TARGET[] = { 0x111, 0x121, 0x131 };
1392 + static const u16 NCT6106_REG_WEIGHT_TEMP_SEL[] = { 0x168, 0x178, 0x188 };
1393 + static const u16 NCT6106_REG_WEIGHT_TEMP_STEP[] = { 0x169, 0x179, 0x189 };
1394 + static const u16 NCT6106_REG_WEIGHT_TEMP_STEP_TOL[] = { 0x16a, 0x17a, 0x18a };
1395 +-static const u16 NCT6106_REG_WEIGHT_DUTY_STEP[] = { 0x16b, 0x17b, 0x17c };
1396 ++static const u16 NCT6106_REG_WEIGHT_DUTY_STEP[] = { 0x16b, 0x17b, 0x18b };
1397 + static const u16 NCT6106_REG_WEIGHT_TEMP_BASE[] = { 0x16c, 0x17c, 0x18c };
1398 + static const u16 NCT6106_REG_WEIGHT_DUTY_BASE[] = { 0x16d, 0x17d, 0x18d };
1399 +
1400 +@@ -3764,6 +3764,7 @@ static int nct6775_probe(struct platform_device *pdev)
1401 + data->REG_FAN_TIME[0] = NCT6106_REG_FAN_STOP_TIME;
1402 + data->REG_FAN_TIME[1] = NCT6106_REG_FAN_STEP_UP_TIME;
1403 + data->REG_FAN_TIME[2] = NCT6106_REG_FAN_STEP_DOWN_TIME;
1404 ++ data->REG_TOLERANCE_H = NCT6106_REG_TOLERANCE_H;
1405 + data->REG_PWM[0] = NCT6106_REG_PWM;
1406 + data->REG_PWM[1] = NCT6106_REG_FAN_START_OUTPUT;
1407 + data->REG_PWM[2] = NCT6106_REG_FAN_STOP_OUTPUT;
1408 +diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
1409 +index ec7bcf8d7cd6..f3dd2a17bd42 100644
1410 +--- a/drivers/hwmon/nct7802.c
1411 ++++ b/drivers/hwmon/nct7802.c
1412 +@@ -704,7 +704,7 @@ static struct attribute *nct7802_in_attrs[] = {
1413 + &sensor_dev_attr_in3_alarm.dev_attr.attr,
1414 + &sensor_dev_attr_in3_beep.dev_attr.attr,
1415 +
1416 +- &sensor_dev_attr_in4_input.dev_attr.attr, /* 17 */
1417 ++ &sensor_dev_attr_in4_input.dev_attr.attr, /* 16 */
1418 + &sensor_dev_attr_in4_min.dev_attr.attr,
1419 + &sensor_dev_attr_in4_max.dev_attr.attr,
1420 + &sensor_dev_attr_in4_alarm.dev_attr.attr,
1421 +@@ -730,9 +730,9 @@ static umode_t nct7802_in_is_visible(struct kobject *kobj,
1422 +
1423 + if (index >= 6 && index < 11 && (reg & 0x03) != 0x03) /* VSEN1 */
1424 + return 0;
1425 +- if (index >= 11 && index < 17 && (reg & 0x0c) != 0x0c) /* VSEN2 */
1426 ++ if (index >= 11 && index < 16 && (reg & 0x0c) != 0x0c) /* VSEN2 */
1427 + return 0;
1428 +- if (index >= 17 && (reg & 0x30) != 0x30) /* VSEN3 */
1429 ++ if (index >= 16 && (reg & 0x30) != 0x30) /* VSEN3 */
1430 + return 0;
1431 +
1432 + return attr->mode;
1433 +diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
1434 +index 13a6290c8d25..f02aa403332c 100644
1435 +--- a/drivers/hwmon/occ/common.c
1436 ++++ b/drivers/hwmon/occ/common.c
1437 +@@ -402,8 +402,10 @@ static ssize_t occ_show_power_1(struct device *dev,
1438 +
1439 + static u64 occ_get_powr_avg(u64 *accum, u32 *samples)
1440 + {
1441 +- return div64_u64(get_unaligned_be64(accum) * 1000000ULL,
1442 +- get_unaligned_be32(samples));
1443 ++ u64 divisor = get_unaligned_be32(samples);
1444 ++
1445 ++ return (divisor == 0) ? 0 :
1446 ++ div64_u64(get_unaligned_be64(accum) * 1000000ULL, divisor);
1447 + }
1448 +
1449 + static ssize_t occ_show_power_2(struct device *dev,
1450 +diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
1451 +index 3c6294432748..1ef098ff27c3 100644
1452 +--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
1453 ++++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
1454 +@@ -544,6 +544,7 @@ int etm_perf_add_symlink_sink(struct coresight_device *csdev)
1455 + /* See function coresight_get_sink_by_id() to know where this is used */
1456 + hash = hashlen_hash(hashlen_string(NULL, name));
1457 +
1458 ++ sysfs_attr_init(&ea->attr.attr);
1459 + ea->attr.attr.name = devm_kstrdup(pdev, name, GFP_KERNEL);
1460 + if (!ea->attr.attr.name)
1461 + return -ENOMEM;
1462 +diff --git a/drivers/iio/accel/cros_ec_accel_legacy.c b/drivers/iio/accel/cros_ec_accel_legacy.c
1463 +index 46bb2e421bb9..ad19d9c716f4 100644
1464 +--- a/drivers/iio/accel/cros_ec_accel_legacy.c
1465 ++++ b/drivers/iio/accel/cros_ec_accel_legacy.c
1466 +@@ -319,7 +319,6 @@ static const struct iio_chan_spec_ext_info cros_ec_accel_legacy_ext_info[] = {
1467 + .modified = 1, \
1468 + .info_mask_separate = \
1469 + BIT(IIO_CHAN_INFO_RAW) | \
1470 +- BIT(IIO_CHAN_INFO_SCALE) | \
1471 + BIT(IIO_CHAN_INFO_CALIBBIAS), \
1472 + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SCALE), \
1473 + .ext_info = cros_ec_accel_legacy_ext_info, \
1474 +diff --git a/drivers/iio/adc/ingenic-adc.c b/drivers/iio/adc/ingenic-adc.c
1475 +index 92b1d5037ac9..e234970b7150 100644
1476 +--- a/drivers/iio/adc/ingenic-adc.c
1477 ++++ b/drivers/iio/adc/ingenic-adc.c
1478 +@@ -11,6 +11,7 @@
1479 + #include <linux/iio/iio.h>
1480 + #include <linux/io.h>
1481 + #include <linux/iopoll.h>
1482 ++#include <linux/kernel.h>
1483 + #include <linux/module.h>
1484 + #include <linux/mutex.h>
1485 + #include <linux/platform_device.h>
1486 +@@ -22,8 +23,11 @@
1487 + #define JZ_ADC_REG_ADTCH 0x18
1488 + #define JZ_ADC_REG_ADBDAT 0x1c
1489 + #define JZ_ADC_REG_ADSDAT 0x20
1490 ++#define JZ_ADC_REG_ADCLK 0x28
1491 +
1492 + #define JZ_ADC_REG_CFG_BAT_MD BIT(4)
1493 ++#define JZ_ADC_REG_ADCLK_CLKDIV_LSB 0
1494 ++#define JZ_ADC_REG_ADCLK_CLKDIV10US_LSB 16
1495 +
1496 + #define JZ_ADC_AUX_VREF 3300
1497 + #define JZ_ADC_AUX_VREF_BITS 12
1498 +@@ -34,6 +38,8 @@
1499 + #define JZ4740_ADC_BATTERY_HIGH_VREF (7500 * 0.986)
1500 + #define JZ4740_ADC_BATTERY_HIGH_VREF_BITS 12
1501 +
1502 ++struct ingenic_adc;
1503 ++
1504 + struct ingenic_adc_soc_data {
1505 + unsigned int battery_high_vref;
1506 + unsigned int battery_high_vref_bits;
1507 +@@ -41,6 +47,7 @@ struct ingenic_adc_soc_data {
1508 + size_t battery_raw_avail_size;
1509 + const int *battery_scale_avail;
1510 + size_t battery_scale_avail_size;
1511 ++ int (*init_clk_div)(struct device *dev, struct ingenic_adc *adc);
1512 + };
1513 +
1514 + struct ingenic_adc {
1515 +@@ -151,6 +158,42 @@ static const int jz4740_adc_battery_scale_avail[] = {
1516 + JZ_ADC_BATTERY_LOW_VREF, JZ_ADC_BATTERY_LOW_VREF_BITS,
1517 + };
1518 +
1519 ++static int jz4725b_adc_init_clk_div(struct device *dev, struct ingenic_adc *adc)
1520 ++{
1521 ++ struct clk *parent_clk;
1522 ++ unsigned long parent_rate, rate;
1523 ++ unsigned int div_main, div_10us;
1524 ++
1525 ++ parent_clk = clk_get_parent(adc->clk);
1526 ++ if (!parent_clk) {
1527 ++ dev_err(dev, "ADC clock has no parent\n");
1528 ++ return -ENODEV;
1529 ++ }
1530 ++ parent_rate = clk_get_rate(parent_clk);
1531 ++
1532 ++ /*
1533 ++ * The JZ4725B ADC works at 500 kHz to 8 MHz.
1534 ++ * We pick the highest rate possible.
1535 ++ * In practice we typically get 6 MHz, half of the 12 MHz EXT clock.
1536 ++ */
1537 ++ div_main = DIV_ROUND_UP(parent_rate, 8000000);
1538 ++ div_main = clamp(div_main, 1u, 64u);
1539 ++ rate = parent_rate / div_main;
1540 ++ if (rate < 500000 || rate > 8000000) {
1541 ++ dev_err(dev, "No valid divider for ADC main clock\n");
1542 ++ return -EINVAL;
1543 ++ }
1544 ++
1545 ++ /* We also need a divider that produces a 10us clock. */
1546 ++ div_10us = DIV_ROUND_UP(rate, 100000);
1547 ++
1548 ++ writel(((div_10us - 1) << JZ_ADC_REG_ADCLK_CLKDIV10US_LSB) |
1549 ++ (div_main - 1) << JZ_ADC_REG_ADCLK_CLKDIV_LSB,
1550 ++ adc->base + JZ_ADC_REG_ADCLK);
1551 ++
1552 ++ return 0;
1553 ++}
1554 ++
1555 + static const struct ingenic_adc_soc_data jz4725b_adc_soc_data = {
1556 + .battery_high_vref = JZ4725B_ADC_BATTERY_HIGH_VREF,
1557 + .battery_high_vref_bits = JZ4725B_ADC_BATTERY_HIGH_VREF_BITS,
1558 +@@ -158,6 +201,7 @@ static const struct ingenic_adc_soc_data jz4725b_adc_soc_data = {
1559 + .battery_raw_avail_size = ARRAY_SIZE(jz4725b_adc_battery_raw_avail),
1560 + .battery_scale_avail = jz4725b_adc_battery_scale_avail,
1561 + .battery_scale_avail_size = ARRAY_SIZE(jz4725b_adc_battery_scale_avail),
1562 ++ .init_clk_div = jz4725b_adc_init_clk_div,
1563 + };
1564 +
1565 + static const struct ingenic_adc_soc_data jz4740_adc_soc_data = {
1566 +@@ -167,6 +211,7 @@ static const struct ingenic_adc_soc_data jz4740_adc_soc_data = {
1567 + .battery_raw_avail_size = ARRAY_SIZE(jz4740_adc_battery_raw_avail),
1568 + .battery_scale_avail = jz4740_adc_battery_scale_avail,
1569 + .battery_scale_avail_size = ARRAY_SIZE(jz4740_adc_battery_scale_avail),
1570 ++ .init_clk_div = NULL, /* no ADCLK register on JZ4740 */
1571 + };
1572 +
1573 + static int ingenic_adc_read_avail(struct iio_dev *iio_dev,
1574 +@@ -317,6 +362,15 @@ static int ingenic_adc_probe(struct platform_device *pdev)
1575 + return ret;
1576 + }
1577 +
1578 ++ /* Set clock dividers. */
1579 ++ if (soc_data->init_clk_div) {
1580 ++ ret = soc_data->init_clk_div(dev, adc);
1581 ++ if (ret) {
1582 ++ clk_disable_unprepare(adc->clk);
1583 ++ return ret;
1584 ++ }
1585 ++ }
1586 ++
1587 + /* Put hardware in a known passive state. */
1588 + writeb(0x00, adc->base + JZ_ADC_REG_ENABLE);
1589 + writeb(0xff, adc->base + JZ_ADC_REG_CTRL);
1590 +diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c
1591 +index 917223d5ff5b..0e3c6529fc4c 100644
1592 +--- a/drivers/iio/adc/max9611.c
1593 ++++ b/drivers/iio/adc/max9611.c
1594 +@@ -83,7 +83,7 @@
1595 + #define MAX9611_TEMP_MAX_POS 0x7f80
1596 + #define MAX9611_TEMP_MAX_NEG 0xff80
1597 + #define MAX9611_TEMP_MIN_NEG 0xd980
1598 +-#define MAX9611_TEMP_MASK GENMASK(7, 15)
1599 ++#define MAX9611_TEMP_MASK GENMASK(15, 7)
1600 + #define MAX9611_TEMP_SHIFT 0x07
1601 + #define MAX9611_TEMP_RAW(_r) ((_r) >> MAX9611_TEMP_SHIFT)
1602 + #define MAX9611_TEMP_SCALE_NUM 1000000
1603 +diff --git a/drivers/iio/adc/rcar-gyroadc.c b/drivers/iio/adc/rcar-gyroadc.c
1604 +index 2c0d0316d149..b373acce5927 100644
1605 +--- a/drivers/iio/adc/rcar-gyroadc.c
1606 ++++ b/drivers/iio/adc/rcar-gyroadc.c
1607 +@@ -382,7 +382,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
1608 + dev_err(dev,
1609 + "Only %i channels supported with %pOFn, but reg = <%i>.\n",
1610 + num_channels, child, reg);
1611 +- return ret;
1612 ++ return -EINVAL;
1613 + }
1614 + }
1615 +
1616 +@@ -391,7 +391,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
1617 + dev_err(dev,
1618 + "Channel %i uses different ADC mode than the rest.\n",
1619 + reg);
1620 +- return ret;
1621 ++ return -EINVAL;
1622 + }
1623 +
1624 + /* Channel is valid, grab the regulator. */
1625 +diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
1626 +index 53a59957cc54..8a704cd5bddb 100644
1627 +--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
1628 ++++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
1629 +@@ -845,6 +845,25 @@ static const struct iio_chan_spec inv_mpu_channels[] = {
1630 + INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Z, INV_MPU6050_SCAN_ACCL_Z),
1631 + };
1632 +
1633 ++static const unsigned long inv_mpu_scan_masks[] = {
1634 ++ /* 3-axis accel */
1635 ++ BIT(INV_MPU6050_SCAN_ACCL_X)
1636 ++ | BIT(INV_MPU6050_SCAN_ACCL_Y)
1637 ++ | BIT(INV_MPU6050_SCAN_ACCL_Z),
1638 ++ /* 3-axis gyro */
1639 ++ BIT(INV_MPU6050_SCAN_GYRO_X)
1640 ++ | BIT(INV_MPU6050_SCAN_GYRO_Y)
1641 ++ | BIT(INV_MPU6050_SCAN_GYRO_Z),
1642 ++ /* 6-axis accel + gyro */
1643 ++ BIT(INV_MPU6050_SCAN_ACCL_X)
1644 ++ | BIT(INV_MPU6050_SCAN_ACCL_Y)
1645 ++ | BIT(INV_MPU6050_SCAN_ACCL_Z)
1646 ++ | BIT(INV_MPU6050_SCAN_GYRO_X)
1647 ++ | BIT(INV_MPU6050_SCAN_GYRO_Y)
1648 ++ | BIT(INV_MPU6050_SCAN_GYRO_Z),
1649 ++ 0,
1650 ++};
1651 ++
1652 + static const struct iio_chan_spec inv_icm20602_channels[] = {
1653 + IIO_CHAN_SOFT_TIMESTAMP(INV_ICM20602_SCAN_TIMESTAMP),
1654 + {
1655 +@@ -871,6 +890,28 @@ static const struct iio_chan_spec inv_icm20602_channels[] = {
1656 + INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Z, INV_ICM20602_SCAN_ACCL_Z),
1657 + };
1658 +
1659 ++static const unsigned long inv_icm20602_scan_masks[] = {
1660 ++ /* 3-axis accel + temp (mandatory) */
1661 ++ BIT(INV_ICM20602_SCAN_ACCL_X)
1662 ++ | BIT(INV_ICM20602_SCAN_ACCL_Y)
1663 ++ | BIT(INV_ICM20602_SCAN_ACCL_Z)
1664 ++ | BIT(INV_ICM20602_SCAN_TEMP),
1665 ++ /* 3-axis gyro + temp (mandatory) */
1666 ++ BIT(INV_ICM20602_SCAN_GYRO_X)
1667 ++ | BIT(INV_ICM20602_SCAN_GYRO_Y)
1668 ++ | BIT(INV_ICM20602_SCAN_GYRO_Z)
1669 ++ | BIT(INV_ICM20602_SCAN_TEMP),
1670 ++ /* 6-axis accel + gyro + temp (mandatory) */
1671 ++ BIT(INV_ICM20602_SCAN_ACCL_X)
1672 ++ | BIT(INV_ICM20602_SCAN_ACCL_Y)
1673 ++ | BIT(INV_ICM20602_SCAN_ACCL_Z)
1674 ++ | BIT(INV_ICM20602_SCAN_GYRO_X)
1675 ++ | BIT(INV_ICM20602_SCAN_GYRO_Y)
1676 ++ | BIT(INV_ICM20602_SCAN_GYRO_Z)
1677 ++ | BIT(INV_ICM20602_SCAN_TEMP),
1678 ++ 0,
1679 ++};
1680 ++
1681 + /*
1682 + * The user can choose any frequency between INV_MPU6050_MIN_FIFO_RATE and
1683 + * INV_MPU6050_MAX_FIFO_RATE, but only these frequencies are matched by the
1684 +@@ -1130,9 +1171,11 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
1685 + if (chip_type == INV_ICM20602) {
1686 + indio_dev->channels = inv_icm20602_channels;
1687 + indio_dev->num_channels = ARRAY_SIZE(inv_icm20602_channels);
1688 ++ indio_dev->available_scan_masks = inv_icm20602_scan_masks;
1689 + } else {
1690 + indio_dev->channels = inv_mpu_channels;
1691 + indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels);
1692 ++ indio_dev->available_scan_masks = inv_mpu_scan_masks;
1693 + }
1694 +
1695 + indio_dev->info = &mpu_info;
1696 +diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
1697 +index a4345052abd2..a47c7add4e0e 100644
1698 +--- a/drivers/input/mouse/elantech.c
1699 ++++ b/drivers/input/mouse/elantech.c
1700 +@@ -1807,6 +1807,30 @@ static int elantech_create_smbus(struct psmouse *psmouse,
1701 + leave_breadcrumbs);
1702 + }
1703 +
1704 ++static bool elantech_use_host_notify(struct psmouse *psmouse,
1705 ++ struct elantech_device_info *info)
1706 ++{
1707 ++ if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
1708 ++ return true;
1709 ++
1710 ++ switch (info->bus) {
1711 ++ case ETP_BUS_PS2_ONLY:
1712 ++ /* expected case */
1713 ++ break;
1714 ++ case ETP_BUS_SMB_HST_NTFY_ONLY:
1715 ++ case ETP_BUS_PS2_SMB_HST_NTFY:
1716 ++ /* SMbus implementation is stable since 2018 */
1717 ++ if (dmi_get_bios_year() >= 2018)
1718 ++ return true;
1719 ++ default:
1720 ++ psmouse_dbg(psmouse,
1721 ++ "Ignoring SMBus bus provider %d\n", info->bus);
1722 ++ break;
1723 ++ }
1724 ++
1725 ++ return false;
1726 ++}
1727 ++
1728 + /**
1729 + * elantech_setup_smbus - called once the PS/2 devices are enumerated
1730 + * and decides to instantiate a SMBus InterTouch device.
1731 +@@ -1826,7 +1850,7 @@ static int elantech_setup_smbus(struct psmouse *psmouse,
1732 + * i2c_blacklist_pnp_ids.
1733 + * Old ICs are up to the user to decide.
1734 + */
1735 +- if (!ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version) ||
1736 ++ if (!elantech_use_host_notify(psmouse, info) ||
1737 + psmouse_matches_pnp_id(psmouse, i2c_blacklist_pnp_ids))
1738 + return -ENXIO;
1739 + }
1740 +@@ -1846,34 +1870,6 @@ static int elantech_setup_smbus(struct psmouse *psmouse,
1741 + return 0;
1742 + }
1743 +
1744 +-static bool elantech_use_host_notify(struct psmouse *psmouse,
1745 +- struct elantech_device_info *info)
1746 +-{
1747 +- if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
1748 +- return true;
1749 +-
1750 +- switch (info->bus) {
1751 +- case ETP_BUS_PS2_ONLY:
1752 +- /* expected case */
1753 +- break;
1754 +- case ETP_BUS_SMB_ALERT_ONLY:
1755 +- /* fall-through */
1756 +- case ETP_BUS_PS2_SMB_ALERT:
1757 +- psmouse_dbg(psmouse, "Ignoring SMBus provider through alert protocol.\n");
1758 +- break;
1759 +- case ETP_BUS_SMB_HST_NTFY_ONLY:
1760 +- /* fall-through */
1761 +- case ETP_BUS_PS2_SMB_HST_NTFY:
1762 +- return true;
1763 +- default:
1764 +- psmouse_dbg(psmouse,
1765 +- "Ignoring SMBus bus provider %d.\n",
1766 +- info->bus);
1767 +- }
1768 +-
1769 +- return false;
1770 +-}
1771 +-
1772 + int elantech_init_smbus(struct psmouse *psmouse)
1773 + {
1774 + struct elantech_device_info info;
1775 +diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
1776 +index 7f8f4780b511..c0e188cd3811 100644
1777 +--- a/drivers/input/mouse/synaptics.c
1778 ++++ b/drivers/input/mouse/synaptics.c
1779 +@@ -182,6 +182,7 @@ static const char * const smbus_pnp_ids[] = {
1780 + "LEN2055", /* E580 */
1781 + "SYN3052", /* HP EliteBook 840 G4 */
1782 + "SYN3221", /* HP 15-ay000 */
1783 ++ "SYN323d", /* HP Spectre X360 13-w013dx */
1784 + NULL
1785 + };
1786 +
1787 +diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
1788 +index a2cec6cacf57..16d70201de4a 100644
1789 +--- a/drivers/input/touchscreen/usbtouchscreen.c
1790 ++++ b/drivers/input/touchscreen/usbtouchscreen.c
1791 +@@ -1659,6 +1659,8 @@ static int usbtouch_probe(struct usb_interface *intf,
1792 + if (!usbtouch || !input_dev)
1793 + goto out_free;
1794 +
1795 ++ mutex_init(&usbtouch->pm_mutex);
1796 ++
1797 + type = &usbtouch_dev_info[id->driver_info];
1798 + usbtouch->type = type;
1799 + if (!type->process_pkt)
1800 +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
1801 +index 2101601adf57..1ad24367373f 100644
1802 +--- a/drivers/iommu/intel-iommu.c
1803 ++++ b/drivers/iommu/intel-iommu.c
1804 +@@ -1900,7 +1900,6 @@ static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
1805 +
1806 + static void domain_exit(struct dmar_domain *domain)
1807 + {
1808 +- struct page *freelist;
1809 +
1810 + /* Remove associated devices and clear attached or cached domains */
1811 + rcu_read_lock();
1812 +@@ -1910,9 +1909,12 @@ static void domain_exit(struct dmar_domain *domain)
1813 + /* destroy iovas */
1814 + put_iova_domain(&domain->iovad);
1815 +
1816 +- freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1817 ++ if (domain->pgd) {
1818 ++ struct page *freelist;
1819 +
1820 +- dma_free_pagelist(freelist);
1821 ++ freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1822 ++ dma_free_pagelist(freelist);
1823 ++ }
1824 +
1825 + free_domain_mem(domain);
1826 + }
1827 +diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c
1828 +index ed5cefb83768..89deb451e0ac 100644
1829 +--- a/drivers/mmc/host/cavium.c
1830 ++++ b/drivers/mmc/host/cavium.c
1831 +@@ -374,6 +374,7 @@ static int finish_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
1832 + {
1833 + data->bytes_xfered = data->blocks * data->blksz;
1834 + data->error = 0;
1835 ++ dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
1836 + return 1;
1837 + }
1838 +
1839 +@@ -1046,7 +1047,8 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
1840 + mmc->max_segs = 1;
1841 +
1842 + /* DMA size field can address up to 8 MB */
1843 +- mmc->max_seg_size = 8 * 1024 * 1024;
1844 ++ mmc->max_seg_size = min_t(unsigned int, 8 * 1024 * 1024,
1845 ++ dma_get_max_seg_size(host->dev));
1846 + mmc->max_req_size = mmc->max_seg_size;
1847 + /* External DMA is in 512 byte blocks */
1848 + mmc->max_blk_size = 512;
1849 +diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
1850 +index f2fe344593d5..fcec8bcb53d6 100644
1851 +--- a/drivers/net/can/flexcan.c
1852 ++++ b/drivers/net/can/flexcan.c
1853 +@@ -400,9 +400,10 @@ static void flexcan_enable_wakeup_irq(struct flexcan_priv *priv, bool enable)
1854 + priv->write(reg_mcr, &regs->mcr);
1855 + }
1856 +
1857 +-static inline void flexcan_enter_stop_mode(struct flexcan_priv *priv)
1858 ++static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv)
1859 + {
1860 + struct flexcan_regs __iomem *regs = priv->regs;
1861 ++ unsigned int ackval;
1862 + u32 reg_mcr;
1863 +
1864 + reg_mcr = priv->read(&regs->mcr);
1865 +@@ -412,20 +413,37 @@ static inline void flexcan_enter_stop_mode(struct flexcan_priv *priv)
1866 + /* enable stop request */
1867 + regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
1868 + 1 << priv->stm.req_bit, 1 << priv->stm.req_bit);
1869 ++
1870 ++ /* get stop acknowledgment */
1871 ++ if (regmap_read_poll_timeout(priv->stm.gpr, priv->stm.ack_gpr,
1872 ++ ackval, ackval & (1 << priv->stm.ack_bit),
1873 ++ 0, FLEXCAN_TIMEOUT_US))
1874 ++ return -ETIMEDOUT;
1875 ++
1876 ++ return 0;
1877 + }
1878 +
1879 +-static inline void flexcan_exit_stop_mode(struct flexcan_priv *priv)
1880 ++static inline int flexcan_exit_stop_mode(struct flexcan_priv *priv)
1881 + {
1882 + struct flexcan_regs __iomem *regs = priv->regs;
1883 ++ unsigned int ackval;
1884 + u32 reg_mcr;
1885 +
1886 + /* remove stop request */
1887 + regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
1888 + 1 << priv->stm.req_bit, 0);
1889 +
1890 ++ /* get stop acknowledgment */
1891 ++ if (regmap_read_poll_timeout(priv->stm.gpr, priv->stm.ack_gpr,
1892 ++ ackval, !(ackval & (1 << priv->stm.ack_bit)),
1893 ++ 0, FLEXCAN_TIMEOUT_US))
1894 ++ return -ETIMEDOUT;
1895 ++
1896 + reg_mcr = priv->read(&regs->mcr);
1897 + reg_mcr &= ~FLEXCAN_MCR_SLF_WAK;
1898 + priv->write(reg_mcr, &regs->mcr);
1899 ++
1900 ++ return 0;
1901 + }
1902 +
1903 + static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv)
1904 +@@ -1437,10 +1455,10 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
1905 +
1906 + priv = netdev_priv(dev);
1907 + priv->stm.gpr = syscon_node_to_regmap(gpr_np);
1908 +- of_node_put(gpr_np);
1909 + if (IS_ERR(priv->stm.gpr)) {
1910 + dev_dbg(&pdev->dev, "could not find gpr regmap\n");
1911 +- return PTR_ERR(priv->stm.gpr);
1912 ++ ret = PTR_ERR(priv->stm.gpr);
1913 ++ goto out_put_node;
1914 + }
1915 +
1916 + priv->stm.req_gpr = out_val[1];
1917 +@@ -1455,7 +1473,9 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
1918 +
1919 + device_set_wakeup_capable(&pdev->dev, true);
1920 +
1921 +- return 0;
1922 ++out_put_node:
1923 ++ of_node_put(gpr_np);
1924 ++ return ret;
1925 + }
1926 +
1927 + static const struct of_device_id flexcan_of_match[] = {
1928 +@@ -1612,7 +1632,9 @@ static int __maybe_unused flexcan_suspend(struct device *device)
1929 + */
1930 + if (device_may_wakeup(device)) {
1931 + enable_irq_wake(dev->irq);
1932 +- flexcan_enter_stop_mode(priv);
1933 ++ err = flexcan_enter_stop_mode(priv);
1934 ++ if (err)
1935 ++ return err;
1936 + } else {
1937 + err = flexcan_chip_disable(priv);
1938 + if (err)
1939 +@@ -1662,10 +1684,13 @@ static int __maybe_unused flexcan_noirq_resume(struct device *device)
1940 + {
1941 + struct net_device *dev = dev_get_drvdata(device);
1942 + struct flexcan_priv *priv = netdev_priv(dev);
1943 ++ int err;
1944 +
1945 + if (netif_running(dev) && device_may_wakeup(device)) {
1946 + flexcan_enable_wakeup_irq(priv, false);
1947 +- flexcan_exit_stop_mode(priv);
1948 ++ err = flexcan_exit_stop_mode(priv);
1949 ++ if (err)
1950 ++ return err;
1951 + }
1952 +
1953 + return 0;
1954 +diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
1955 +index 05410008aa6b..de34a4b82d4a 100644
1956 +--- a/drivers/net/can/rcar/rcar_canfd.c
1957 ++++ b/drivers/net/can/rcar/rcar_canfd.c
1958 +@@ -1508,10 +1508,11 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota)
1959 +
1960 + /* All packets processed */
1961 + if (num_pkts < quota) {
1962 +- napi_complete_done(napi, num_pkts);
1963 +- /* Enable Rx FIFO interrupts */
1964 +- rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
1965 +- RCANFD_RFCC_RFIE);
1966 ++ if (napi_complete_done(napi, num_pkts)) {
1967 ++ /* Enable Rx FIFO interrupts */
1968 ++ rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
1969 ++ RCANFD_RFCC_RFIE);
1970 ++ }
1971 + }
1972 + return num_pkts;
1973 + }
1974 +diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
1975 +index 458154c9b482..22b9c8e6d040 100644
1976 +--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
1977 ++++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
1978 +@@ -568,16 +568,16 @@ static int peak_usb_ndo_stop(struct net_device *netdev)
1979 + dev->state &= ~PCAN_USB_STATE_STARTED;
1980 + netif_stop_queue(netdev);
1981 +
1982 ++ close_candev(netdev);
1983 ++
1984 ++ dev->can.state = CAN_STATE_STOPPED;
1985 ++
1986 + /* unlink all pending urbs and free used memory */
1987 + peak_usb_unlink_all_urbs(dev);
1988 +
1989 + if (dev->adapter->dev_stop)
1990 + dev->adapter->dev_stop(dev);
1991 +
1992 +- close_candev(netdev);
1993 +-
1994 +- dev->can.state = CAN_STATE_STOPPED;
1995 +-
1996 + /* can set bus off now */
1997 + if (dev->adapter->dev_set_bus) {
1998 + int err = dev->adapter->dev_set_bus(dev, 0);
1999 +diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
2000 +index 34761c3a6286..47cc1ff5b88e 100644
2001 +--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
2002 ++++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
2003 +@@ -841,7 +841,7 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
2004 + goto err_out;
2005 +
2006 + /* allocate command buffer once for all for the interface */
2007 +- pdev->cmd_buffer_addr = kmalloc(PCAN_UFD_CMD_BUFFER_SIZE,
2008 ++ pdev->cmd_buffer_addr = kzalloc(PCAN_UFD_CMD_BUFFER_SIZE,
2009 + GFP_KERNEL);
2010 + if (!pdev->cmd_buffer_addr)
2011 + goto err_out_1;
2012 +diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
2013 +index 178bb7cff0c1..53cb2f72bdd0 100644
2014 +--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
2015 ++++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
2016 +@@ -494,7 +494,7 @@ static int pcan_usb_pro_drv_loaded(struct peak_usb_device *dev, int loaded)
2017 + u8 *buffer;
2018 + int err;
2019 +
2020 +- buffer = kmalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL);
2021 ++ buffer = kzalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL);
2022 + if (!buffer)
2023 + return -ENOMEM;
2024 +
2025 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
2026 +index cfaf8f618d1f..56742fa0c1af 100644
2027 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
2028 ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
2029 +@@ -67,7 +67,8 @@ static struct ch_tc_pedit_fields pedits[] = {
2030 + static struct ch_tc_flower_entry *allocate_flower_entry(void)
2031 + {
2032 + struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL);
2033 +- spin_lock_init(&new->lock);
2034 ++ if (new)
2035 ++ spin_lock_init(&new->lock);
2036 + return new;
2037 + }
2038 +
2039 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
2040 +index 559f6df1a74d..5af9959d05e5 100644
2041 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
2042 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
2043 +@@ -753,7 +753,7 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
2044 +
2045 + for (i = 0; i < n_profiles; i++) {
2046 + /* the tables start at element 3 */
2047 +- static int pos = 3;
2048 ++ int pos = 3;
2049 +
2050 + /* The EWRD profiles officially go from 2 to 4, but we
2051 + * save them in sar_profiles[1-3] (because we don't
2052 +@@ -874,6 +874,22 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
2053 + return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
2054 + }
2055 +
2056 ++static bool iwl_mvm_sar_geo_support(struct iwl_mvm *mvm)
2057 ++{
2058 ++ /*
2059 ++ * The GEO_TX_POWER_LIMIT command is not supported on earlier
2060 ++ * firmware versions. Unfortunately, we don't have a TLV API
2061 ++ * flag to rely on, so rely on the major version which is in
2062 ++ * the first byte of ucode_ver. This was implemented
2063 ++ * initially on version 38 and then backported to 36, 29 and
2064 ++ * 17.
2065 ++ */
2066 ++ return IWL_UCODE_SERIAL(mvm->fw->ucode_ver) >= 38 ||
2067 ++ IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 36 ||
2068 ++ IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 ||
2069 ++ IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17;
2070 ++}
2071 ++
2072 + int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
2073 + {
2074 + struct iwl_geo_tx_power_profiles_resp *resp;
2075 +@@ -889,6 +905,9 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
2076 + .data = { &geo_cmd },
2077 + };
2078 +
2079 ++ if (!iwl_mvm_sar_geo_support(mvm))
2080 ++ return -EOPNOTSUPP;
2081 ++
2082 + ret = iwl_mvm_send_cmd(mvm, &cmd);
2083 + if (ret) {
2084 + IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret);
2085 +@@ -914,13 +933,7 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
2086 + int ret, i, j;
2087 + u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
2088 +
2089 +- /*
2090 +- * This command is not supported on earlier firmware versions.
2091 +- * Unfortunately, we don't have a TLV API flag to rely on, so
2092 +- * rely on the major version which is in the first byte of
2093 +- * ucode_ver.
2094 +- */
2095 +- if (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) < 41)
2096 ++ if (!iwl_mvm_sar_geo_support(mvm))
2097 + return 0;
2098 +
2099 + ret = iwl_mvm_sar_get_wgds_table(mvm);
2100 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
2101 +index 96f8d38ea321..a12ee20fb9ab 100644
2102 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
2103 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
2104 +@@ -831,6 +831,7 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
2105 + unsigned int tcp_payload_len;
2106 + unsigned int mss = skb_shinfo(skb)->gso_size;
2107 + bool ipv4 = (skb->protocol == htons(ETH_P_IP));
2108 ++ bool qos = ieee80211_is_data_qos(hdr->frame_control);
2109 + u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
2110 +
2111 + skb_shinfo(skb)->gso_size = num_subframes * mss;
2112 +@@ -864,7 +865,7 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
2113 + if (tcp_payload_len > mss) {
2114 + skb_shinfo(tmp)->gso_size = mss;
2115 + } else {
2116 +- if (ieee80211_is_data_qos(hdr->frame_control)) {
2117 ++ if (qos) {
2118 + u8 *qc;
2119 +
2120 + if (ipv4)
2121 +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
2122 +index fa4245d0d4a8..2f0ba7ef53b8 100644
2123 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
2124 ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
2125 +@@ -435,6 +435,8 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
2126 + DMA_TO_DEVICE);
2127 + }
2128 +
2129 ++ meta->tbs = 0;
2130 ++
2131 + if (trans->cfg->use_tfh) {
2132 + struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
2133 +
2134 +diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
2135 +index b025ba164412..e39bb5c42c9a 100644
2136 +--- a/drivers/net/wireless/marvell/mwifiex/main.h
2137 ++++ b/drivers/net/wireless/marvell/mwifiex/main.h
2138 +@@ -124,6 +124,7 @@ enum {
2139 +
2140 + #define MWIFIEX_MAX_TOTAL_SCAN_TIME (MWIFIEX_TIMER_10S - MWIFIEX_TIMER_1S)
2141 +
2142 ++#define WPA_GTK_OUI_OFFSET 2
2143 + #define RSN_GTK_OUI_OFFSET 2
2144 +
2145 + #define MWIFIEX_OUI_NOT_PRESENT 0
2146 +diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
2147 +index e2786ab612ca..dd02bbd9544e 100644
2148 +--- a/drivers/net/wireless/marvell/mwifiex/scan.c
2149 ++++ b/drivers/net/wireless/marvell/mwifiex/scan.c
2150 +@@ -181,7 +181,8 @@ mwifiex_is_wpa_oui_present(struct mwifiex_bssdescriptor *bss_desc, u32 cipher)
2151 + u8 ret = MWIFIEX_OUI_NOT_PRESENT;
2152 +
2153 + if (has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC)) {
2154 +- iebody = (struct ie_body *) bss_desc->bcn_wpa_ie->data;
2155 ++ iebody = (struct ie_body *)((u8 *)bss_desc->bcn_wpa_ie->data +
2156 ++ WPA_GTK_OUI_OFFSET);
2157 + oui = &mwifiex_wpa_oui[cipher][0];
2158 + ret = mwifiex_search_oui_in_ie(iebody, oui);
2159 + if (ret)
2160 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
2161 +index 4a1d2ab4d161..5deb4deb3820 100644
2162 +--- a/drivers/nvme/host/core.c
2163 ++++ b/drivers/nvme/host/core.c
2164 +@@ -2264,17 +2264,15 @@ static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ct
2165 + memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off);
2166 + }
2167 +
2168 +-static void __nvme_release_subsystem(struct nvme_subsystem *subsys)
2169 ++static void nvme_release_subsystem(struct device *dev)
2170 + {
2171 ++ struct nvme_subsystem *subsys =
2172 ++ container_of(dev, struct nvme_subsystem, dev);
2173 ++
2174 + ida_simple_remove(&nvme_subsystems_ida, subsys->instance);
2175 + kfree(subsys);
2176 + }
2177 +
2178 +-static void nvme_release_subsystem(struct device *dev)
2179 +-{
2180 +- __nvme_release_subsystem(container_of(dev, struct nvme_subsystem, dev));
2181 +-}
2182 +-
2183 + static void nvme_destroy_subsystem(struct kref *ref)
2184 + {
2185 + struct nvme_subsystem *subsys =
2186 +@@ -2429,7 +2427,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2187 + mutex_lock(&nvme_subsystems_lock);
2188 + found = __nvme_find_get_subsystem(subsys->subnqn);
2189 + if (found) {
2190 +- __nvme_release_subsystem(subsys);
2191 ++ put_device(&subsys->dev);
2192 + subsys = found;
2193 +
2194 + if (!nvme_validate_cntlid(subsys, ctrl, id)) {
2195 +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
2196 +index 7fbcd72c438f..f9959eaaa185 100644
2197 +--- a/drivers/nvme/host/pci.c
2198 ++++ b/drivers/nvme/host/pci.c
2199 +@@ -2959,6 +2959,8 @@ static const struct pci_device_id nvme_id_table[] = {
2200 + .driver_data = NVME_QUIRK_LIGHTNVM, },
2201 + { PCI_DEVICE(0x1d1d, 0x2601), /* CNEX Granby */
2202 + .driver_data = NVME_QUIRK_LIGHTNVM, },
2203 ++ { PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */
2204 ++ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
2205 + { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
2206 + { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
2207 + { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
2208 +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
2209 +index 720da09d4d73..088fcdc8d2b4 100644
2210 +--- a/drivers/pci/pci.c
2211 ++++ b/drivers/pci/pci.c
2212 +@@ -1004,10 +1004,15 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
2213 + if (state == PCI_D0) {
2214 + pci_platform_power_transition(dev, PCI_D0);
2215 + /*
2216 +- * Mandatory power management transition delays are
2217 +- * handled in the PCIe portdrv resume hooks.
2218 ++ * Mandatory power management transition delays, see
2219 ++ * PCI Express Base Specification Revision 2.0 Section
2220 ++ * 6.6.1: Conventional Reset. Do not delay for
2221 ++ * devices powered on/off by corresponding bridge,
2222 ++ * because have already delayed for the bridge.
2223 + */
2224 + if (dev->runtime_d3cold) {
2225 ++ if (dev->d3cold_delay && !dev->imm_ready)
2226 ++ msleep(dev->d3cold_delay);
2227 + /*
2228 + * When powering on a bridge from D3cold, the
2229 + * whole hierarchy may be powered on into
2230 +@@ -4570,16 +4575,14 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
2231 +
2232 + return pci_dev_wait(dev, "PM D3->D0", PCIE_RESET_READY_POLL_MS);
2233 + }
2234 +-
2235 + /**
2236 +- * pcie_wait_for_link_delay - Wait until link is active or inactive
2237 ++ * pcie_wait_for_link - Wait until link is active or inactive
2238 + * @pdev: Bridge device
2239 + * @active: waiting for active or inactive?
2240 +- * @delay: Delay to wait after link has become active (in ms)
2241 + *
2242 + * Use this to wait till link becomes active or inactive.
2243 + */
2244 +-bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, int delay)
2245 ++bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
2246 + {
2247 + int timeout = 1000;
2248 + bool ret;
2249 +@@ -4616,25 +4619,13 @@ bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, int delay)
2250 + timeout -= 10;
2251 + }
2252 + if (active && ret)
2253 +- msleep(delay);
2254 ++ msleep(100);
2255 + else if (ret != active)
2256 + pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
2257 + active ? "set" : "cleared");
2258 + return ret == active;
2259 + }
2260 +
2261 +-/**
2262 +- * pcie_wait_for_link - Wait until link is active or inactive
2263 +- * @pdev: Bridge device
2264 +- * @active: waiting for active or inactive?
2265 +- *
2266 +- * Use this to wait till link becomes active or inactive.
2267 +- */
2268 +-bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
2269 +-{
2270 +- return pcie_wait_for_link_delay(pdev, active, 100);
2271 +-}
2272 +-
2273 + void pci_reset_secondary_bus(struct pci_dev *dev)
2274 + {
2275 + u16 ctrl;
2276 +diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
2277 +index 59802b3def4b..9cb99380c61e 100644
2278 +--- a/drivers/pci/pci.h
2279 ++++ b/drivers/pci/pci.h
2280 +@@ -493,7 +493,6 @@ static inline int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
2281 + void pcie_do_recovery(struct pci_dev *dev, enum pci_channel_state state,
2282 + u32 service);
2283 +
2284 +-bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, int delay);
2285 + bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
2286 + #ifdef CONFIG_PCIEASPM
2287 + void pcie_aspm_init_link_state(struct pci_dev *pdev);
2288 +diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
2289 +index 308c3e0c4a34..1b330129089f 100644
2290 +--- a/drivers/pci/pcie/portdrv_core.c
2291 ++++ b/drivers/pci/pcie/portdrv_core.c
2292 +@@ -9,7 +9,6 @@
2293 + #include <linux/module.h>
2294 + #include <linux/pci.h>
2295 + #include <linux/kernel.h>
2296 +-#include <linux/delay.h>
2297 + #include <linux/errno.h>
2298 + #include <linux/pm.h>
2299 + #include <linux/pm_runtime.h>
2300 +@@ -379,67 +378,6 @@ static int pm_iter(struct device *dev, void *data)
2301 + return 0;
2302 + }
2303 +
2304 +-static int get_downstream_delay(struct pci_bus *bus)
2305 +-{
2306 +- struct pci_dev *pdev;
2307 +- int min_delay = 100;
2308 +- int max_delay = 0;
2309 +-
2310 +- list_for_each_entry(pdev, &bus->devices, bus_list) {
2311 +- if (!pdev->imm_ready)
2312 +- min_delay = 0;
2313 +- else if (pdev->d3cold_delay < min_delay)
2314 +- min_delay = pdev->d3cold_delay;
2315 +- if (pdev->d3cold_delay > max_delay)
2316 +- max_delay = pdev->d3cold_delay;
2317 +- }
2318 +-
2319 +- return max(min_delay, max_delay);
2320 +-}
2321 +-
2322 +-/*
2323 +- * wait_for_downstream_link - Wait for downstream link to establish
2324 +- * @pdev: PCIe port whose downstream link is waited
2325 +- *
2326 +- * Handle delays according to PCIe 4.0 section 6.6.1 before configuration
2327 +- * access to the downstream component is permitted.
2328 +- *
2329 +- * This blocks PCI core resume of the hierarchy below this port until the
2330 +- * link is trained. Should be called before resuming port services to
2331 +- * prevent pciehp from starting to tear-down the hierarchy too soon.
2332 +- */
2333 +-static void wait_for_downstream_link(struct pci_dev *pdev)
2334 +-{
2335 +- int delay;
2336 +-
2337 +- if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT &&
2338 +- pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM)
2339 +- return;
2340 +-
2341 +- if (pci_dev_is_disconnected(pdev))
2342 +- return;
2343 +-
2344 +- if (!pdev->subordinate || list_empty(&pdev->subordinate->devices) ||
2345 +- !pdev->bridge_d3)
2346 +- return;
2347 +-
2348 +- delay = get_downstream_delay(pdev->subordinate);
2349 +- if (!delay)
2350 +- return;
2351 +-
2352 +- dev_dbg(&pdev->dev, "waiting downstream link for %d ms\n", delay);
2353 +-
2354 +- /*
2355 +- * If downstream port does not support speeds greater than 5 GT/s
2356 +- * need to wait 100ms. For higher speeds (gen3) we need to wait
2357 +- * first for the data link layer to become active.
2358 +- */
2359 +- if (pcie_get_speed_cap(pdev) <= PCIE_SPEED_5_0GT)
2360 +- msleep(delay);
2361 +- else
2362 +- pcie_wait_for_link_delay(pdev, true, delay);
2363 +-}
2364 +-
2365 + /**
2366 + * pcie_port_device_suspend - suspend port services associated with a PCIe port
2367 + * @dev: PCI Express port to handle
2368 +@@ -453,8 +391,6 @@ int pcie_port_device_suspend(struct device *dev)
2369 + int pcie_port_device_resume_noirq(struct device *dev)
2370 + {
2371 + size_t off = offsetof(struct pcie_port_service_driver, resume_noirq);
2372 +-
2373 +- wait_for_downstream_link(to_pci_dev(dev));
2374 + return device_for_each_child(dev, &off, pm_iter);
2375 + }
2376 +
2377 +@@ -485,8 +421,6 @@ int pcie_port_device_runtime_suspend(struct device *dev)
2378 + int pcie_port_device_runtime_resume(struct device *dev)
2379 + {
2380 + size_t off = offsetof(struct pcie_port_service_driver, runtime_resume);
2381 +-
2382 +- wait_for_downstream_link(to_pci_dev(dev));
2383 + return device_for_each_child(dev, &off, pm_iter);
2384 + }
2385 + #endif /* PM */
2386 +diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
2387 +index 730c4e68094b..7f5adf02f095 100644
2388 +--- a/drivers/s390/cio/qdio_main.c
2389 ++++ b/drivers/s390/cio/qdio_main.c
2390 +@@ -1558,13 +1558,13 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
2391 + rc = qdio_kick_outbound_q(q, phys_aob);
2392 + } else if (need_siga_sync(q)) {
2393 + rc = qdio_siga_sync_q(q);
2394 ++ } else if (count < QDIO_MAX_BUFFERS_PER_Q &&
2395 ++ get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 &&
2396 ++ state == SLSB_CU_OUTPUT_PRIMED) {
2397 ++ /* The previous buffer is not processed yet, tack on. */
2398 ++ qperf_inc(q, fast_requeue);
2399 + } else {
2400 +- /* try to fast requeue buffers */
2401 +- get_buf_state(q, prev_buf(bufnr), &state, 0);
2402 +- if (state != SLSB_CU_OUTPUT_PRIMED)
2403 +- rc = qdio_kick_outbound_q(q, 0);
2404 +- else
2405 +- qperf_inc(q, fast_requeue);
2406 ++ rc = qdio_kick_outbound_q(q, 0);
2407 + }
2408 +
2409 + /* in case of SIGA errors we must process the error immediately */
2410 +diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
2411 +index 0e79799e9a71..79eb40bdaf9f 100644
2412 +--- a/drivers/s390/cio/vfio_ccw_cp.c
2413 ++++ b/drivers/s390/cio/vfio_ccw_cp.c
2414 +@@ -89,8 +89,10 @@ static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
2415 + sizeof(*pa->pa_iova_pfn) +
2416 + sizeof(*pa->pa_pfn),
2417 + GFP_KERNEL);
2418 +- if (unlikely(!pa->pa_iova_pfn))
2419 ++ if (unlikely(!pa->pa_iova_pfn)) {
2420 ++ pa->pa_nr = 0;
2421 + return -ENOMEM;
2422 ++ }
2423 + pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr;
2424 +
2425 + pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
2426 +diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
2427 +index 9125f7f4e64c..8a8fbde7e186 100644
2428 +--- a/drivers/s390/cio/vfio_ccw_drv.c
2429 ++++ b/drivers/s390/cio/vfio_ccw_drv.c
2430 +@@ -88,7 +88,7 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
2431 + (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
2432 + if (scsw_is_solicited(&irb->scsw)) {
2433 + cp_update_scsw(&private->cp, &irb->scsw);
2434 +- if (is_final)
2435 ++ if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING)
2436 + cp_free(&private->cp);
2437 + }
2438 + mutex_lock(&private->io_mutex);
2439 +diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
2440 +index f0066f8a1786..4971104b1817 100644
2441 +--- a/drivers/scsi/device_handler/scsi_dh_alua.c
2442 ++++ b/drivers/scsi/device_handler/scsi_dh_alua.c
2443 +@@ -40,6 +40,7 @@
2444 + #define ALUA_FAILOVER_TIMEOUT 60
2445 + #define ALUA_FAILOVER_RETRIES 5
2446 + #define ALUA_RTPG_DELAY_MSECS 5
2447 ++#define ALUA_RTPG_RETRY_DELAY 2
2448 +
2449 + /* device handler flags */
2450 + #define ALUA_OPTIMIZE_STPG 0x01
2451 +@@ -682,7 +683,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
2452 + case SCSI_ACCESS_STATE_TRANSITIONING:
2453 + if (time_before(jiffies, pg->expiry)) {
2454 + /* State transition, retry */
2455 +- pg->interval = 2;
2456 ++ pg->interval = ALUA_RTPG_RETRY_DELAY;
2457 + err = SCSI_DH_RETRY;
2458 + } else {
2459 + struct alua_dh_data *h;
2460 +@@ -807,6 +808,8 @@ static void alua_rtpg_work(struct work_struct *work)
2461 + spin_lock_irqsave(&pg->lock, flags);
2462 + pg->flags &= ~ALUA_PG_RUNNING;
2463 + pg->flags |= ALUA_PG_RUN_RTPG;
2464 ++ if (!pg->interval)
2465 ++ pg->interval = ALUA_RTPG_RETRY_DELAY;
2466 + spin_unlock_irqrestore(&pg->lock, flags);
2467 + queue_delayed_work(kaluad_wq, &pg->rtpg_work,
2468 + pg->interval * HZ);
2469 +@@ -818,6 +821,8 @@ static void alua_rtpg_work(struct work_struct *work)
2470 + spin_lock_irqsave(&pg->lock, flags);
2471 + if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) {
2472 + pg->flags &= ~ALUA_PG_RUNNING;
2473 ++ if (!pg->interval && !(pg->flags & ALUA_PG_RUN_RTPG))
2474 ++ pg->interval = ALUA_RTPG_RETRY_DELAY;
2475 + pg->flags |= ALUA_PG_RUN_RTPG;
2476 + spin_unlock_irqrestore(&pg->lock, flags);
2477 + queue_delayed_work(kaluad_wq, &pg->rtpg_work,
2478 +diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
2479 +index acd16e0d52cf..8cdbac076a1b 100644
2480 +--- a/drivers/scsi/ibmvscsi/ibmvfc.c
2481 ++++ b/drivers/scsi/ibmvscsi/ibmvfc.c
2482 +@@ -4864,8 +4864,8 @@ static int ibmvfc_remove(struct vio_dev *vdev)
2483 +
2484 + spin_lock_irqsave(vhost->host->host_lock, flags);
2485 + ibmvfc_purge_requests(vhost, DID_ERROR);
2486 +- ibmvfc_free_event_pool(vhost);
2487 + spin_unlock_irqrestore(vhost->host->host_lock, flags);
2488 ++ ibmvfc_free_event_pool(vhost);
2489 +
2490 + ibmvfc_free_mem(vhost);
2491 + spin_lock(&ibmvfc_driver_lock);
2492 +diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
2493 +index 7237114a1d53..5f30016e9b64 100644
2494 +--- a/drivers/scsi/megaraid/megaraid_sas_base.c
2495 ++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
2496 +@@ -3045,6 +3045,7 @@ megasas_fw_crash_buffer_show(struct device *cdev,
2497 + u32 size;
2498 + unsigned long buff_addr;
2499 + unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
2500 ++ unsigned long chunk_left_bytes;
2501 + unsigned long src_addr;
2502 + unsigned long flags;
2503 + u32 buff_offset;
2504 +@@ -3070,6 +3071,8 @@ megasas_fw_crash_buffer_show(struct device *cdev,
2505 + }
2506 +
2507 + size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
2508 ++ chunk_left_bytes = dmachunk - (buff_offset % dmachunk);
2509 ++ size = (size > chunk_left_bytes) ? chunk_left_bytes : size;
2510 + size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
2511 +
2512 + src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
2513 +diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
2514 +index fd4995fb676e..f85ec5b16b65 100644
2515 +--- a/drivers/staging/android/ion/ion_page_pool.c
2516 ++++ b/drivers/staging/android/ion/ion_page_pool.c
2517 +@@ -8,11 +8,14 @@
2518 + #include <linux/list.h>
2519 + #include <linux/slab.h>
2520 + #include <linux/swap.h>
2521 ++#include <linux/sched/signal.h>
2522 +
2523 + #include "ion.h"
2524 +
2525 + static inline struct page *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
2526 + {
2527 ++ if (fatal_signal_pending(current))
2528 ++ return NULL;
2529 + return alloc_pages(pool->gfp_mask, pool->order);
2530 + }
2531 +
2532 +diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
2533 +index 9b07badf4c6c..bc750250ccd6 100644
2534 +--- a/drivers/staging/fbtft/fbtft-core.c
2535 ++++ b/drivers/staging/fbtft/fbtft-core.c
2536 +@@ -76,21 +76,18 @@ static int fbtft_request_one_gpio(struct fbtft_par *par,
2537 + struct gpio_desc **gpiop)
2538 + {
2539 + struct device *dev = par->info->device;
2540 +- struct device_node *node = dev->of_node;
2541 + int ret = 0;
2542 +
2543 +- if (of_find_property(node, name, NULL)) {
2544 +- *gpiop = devm_gpiod_get_index(dev, dev->driver->name, index,
2545 +- GPIOD_OUT_HIGH);
2546 +- if (IS_ERR(*gpiop)) {
2547 +- ret = PTR_ERR(*gpiop);
2548 +- dev_err(dev,
2549 +- "Failed to request %s GPIO:%d\n", name, ret);
2550 +- return ret;
2551 +- }
2552 +- fbtft_par_dbg(DEBUG_REQUEST_GPIOS, par, "%s: '%s' GPIO\n",
2553 +- __func__, name);
2554 ++ *gpiop = devm_gpiod_get_index_optional(dev, name, index,
2555 ++ GPIOD_OUT_HIGH);
2556 ++ if (IS_ERR(*gpiop)) {
2557 ++ ret = PTR_ERR(*gpiop);
2558 ++ dev_err(dev,
2559 ++ "Failed to request %s GPIO: %d\n", name, ret);
2560 ++ return ret;
2561 + }
2562 ++ fbtft_par_dbg(DEBUG_REQUEST_GPIOS, par, "%s: '%s' GPIO\n",
2563 ++ __func__, name);
2564 +
2565 + return ret;
2566 + }
2567 +@@ -103,34 +100,34 @@ static int fbtft_request_gpios_dt(struct fbtft_par *par)
2568 + if (!par->info->device->of_node)
2569 + return -EINVAL;
2570 +
2571 +- ret = fbtft_request_one_gpio(par, "reset-gpios", 0, &par->gpio.reset);
2572 ++ ret = fbtft_request_one_gpio(par, "reset", 0, &par->gpio.reset);
2573 + if (ret)
2574 + return ret;
2575 +- ret = fbtft_request_one_gpio(par, "dc-gpios", 0, &par->gpio.dc);
2576 ++ ret = fbtft_request_one_gpio(par, "dc", 0, &par->gpio.dc);
2577 + if (ret)
2578 + return ret;
2579 +- ret = fbtft_request_one_gpio(par, "rd-gpios", 0, &par->gpio.rd);
2580 ++ ret = fbtft_request_one_gpio(par, "rd", 0, &par->gpio.rd);
2581 + if (ret)
2582 + return ret;
2583 +- ret = fbtft_request_one_gpio(par, "wr-gpios", 0, &par->gpio.wr);
2584 ++ ret = fbtft_request_one_gpio(par, "wr", 0, &par->gpio.wr);
2585 + if (ret)
2586 + return ret;
2587 +- ret = fbtft_request_one_gpio(par, "cs-gpios", 0, &par->gpio.cs);
2588 ++ ret = fbtft_request_one_gpio(par, "cs", 0, &par->gpio.cs);
2589 + if (ret)
2590 + return ret;
2591 +- ret = fbtft_request_one_gpio(par, "latch-gpios", 0, &par->gpio.latch);
2592 ++ ret = fbtft_request_one_gpio(par, "latch", 0, &par->gpio.latch);
2593 + if (ret)
2594 + return ret;
2595 + for (i = 0; i < 16; i++) {
2596 +- ret = fbtft_request_one_gpio(par, "db-gpios", i,
2597 ++ ret = fbtft_request_one_gpio(par, "db", i,
2598 + &par->gpio.db[i]);
2599 + if (ret)
2600 + return ret;
2601 +- ret = fbtft_request_one_gpio(par, "led-gpios", i,
2602 ++ ret = fbtft_request_one_gpio(par, "led", i,
2603 + &par->gpio.led[i]);
2604 + if (ret)
2605 + return ret;
2606 +- ret = fbtft_request_one_gpio(par, "aux-gpios", i,
2607 ++ ret = fbtft_request_one_gpio(par, "aux", i,
2608 + &par->gpio.aux[i]);
2609 + if (ret)
2610 + return ret;
2611 +@@ -234,9 +231,9 @@ static void fbtft_reset(struct fbtft_par *par)
2612 + if (!par->gpio.reset)
2613 + return;
2614 + fbtft_par_dbg(DEBUG_RESET, par, "%s()\n", __func__);
2615 +- gpiod_set_value_cansleep(par->gpio.reset, 0);
2616 +- usleep_range(20, 40);
2617 + gpiod_set_value_cansleep(par->gpio.reset, 1);
2618 ++ usleep_range(20, 40);
2619 ++ gpiod_set_value_cansleep(par->gpio.reset, 0);
2620 + msleep(120);
2621 + }
2622 +
2623 +diff --git a/drivers/staging/gasket/apex_driver.c b/drivers/staging/gasket/apex_driver.c
2624 +index 2be45ee9d061..464648ee2036 100644
2625 +--- a/drivers/staging/gasket/apex_driver.c
2626 ++++ b/drivers/staging/gasket/apex_driver.c
2627 +@@ -532,7 +532,7 @@ static ssize_t sysfs_show(struct device *device, struct device_attribute *attr,
2628 + break;
2629 + case ATTR_KERNEL_HIB_SIMPLE_PAGE_TABLE_SIZE:
2630 + ret = scnprintf(buf, PAGE_SIZE, "%u\n",
2631 +- gasket_page_table_num_entries(
2632 ++ gasket_page_table_num_simple_entries(
2633 + gasket_dev->page_table[0]));
2634 + break;
2635 + case ATTR_KERNEL_HIB_NUM_ACTIVE_PAGES:
2636 +diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
2637 +index f6825727bf77..3592b545246c 100644
2638 +--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
2639 ++++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
2640 +@@ -1789,6 +1789,7 @@ void wilc_deinit_host_int(struct net_device *net)
2641 +
2642 + priv->p2p_listen_state = false;
2643 +
2644 ++ flush_workqueue(vif->wilc->hif_workqueue);
2645 + mutex_destroy(&priv->scan_req_lock);
2646 + ret = wilc_deinit(vif);
2647 +
2648 +diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
2649 +index 717292c1c0df..60ff236a3d63 100644
2650 +--- a/drivers/tty/tty_ldsem.c
2651 ++++ b/drivers/tty/tty_ldsem.c
2652 +@@ -93,8 +93,7 @@ static void __ldsem_wake_readers(struct ld_semaphore *sem)
2653 +
2654 + list_for_each_entry_safe(waiter, next, &sem->read_wait, list) {
2655 + tsk = waiter->task;
2656 +- smp_mb();
2657 +- waiter->task = NULL;
2658 ++ smp_store_release(&waiter->task, NULL);
2659 + wake_up_process(tsk);
2660 + put_task_struct(tsk);
2661 + }
2662 +@@ -194,7 +193,7 @@ down_read_failed(struct ld_semaphore *sem, long count, long timeout)
2663 + for (;;) {
2664 + set_current_state(TASK_UNINTERRUPTIBLE);
2665 +
2666 +- if (!waiter.task)
2667 ++ if (!smp_load_acquire(&waiter.task))
2668 + break;
2669 + if (!timeout)
2670 + break;
2671 +diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
2672 +index a02448105527..86130e8d35f9 100644
2673 +--- a/drivers/usb/core/devio.c
2674 ++++ b/drivers/usb/core/devio.c
2675 +@@ -1788,8 +1788,6 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
2676 + return 0;
2677 +
2678 + error:
2679 +- if (as && as->usbm)
2680 +- dec_usb_memory_use_count(as->usbm, &as->usbm->urb_use_count);
2681 + kfree(isopkt);
2682 + kfree(dr);
2683 + if (as)
2684 +diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
2685 +index 671bce18782c..8616c52849c6 100644
2686 +--- a/drivers/usb/host/xhci-rcar.c
2687 ++++ b/drivers/usb/host/xhci-rcar.c
2688 +@@ -238,10 +238,15 @@ int xhci_rcar_init_quirk(struct usb_hcd *hcd)
2689 + * pointers. So, this driver clears the AC64 bit of xhci->hcc_params
2690 + * to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in
2691 + * xhci_gen_setup().
2692 ++ *
2693 ++ * And, since the firmware/internal CPU control the USBSTS.STS_HALT
2694 ++ * and the process speed is down when the roothub port enters U3,
2695 ++ * long delay for the handshake of STS_HALT is neeed in xhci_suspend().
2696 + */
2697 + if (xhci_rcar_is_gen2(hcd->self.controller) ||
2698 +- xhci_rcar_is_gen3(hcd->self.controller))
2699 +- xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
2700 ++ xhci_rcar_is_gen3(hcd->self.controller)) {
2701 ++ xhci->quirks |= XHCI_NO_64BIT_SUPPORT | XHCI_SLOW_SUSPEND;
2702 ++ }
2703 +
2704 + if (!xhci_rcar_wait_for_pll_active(hcd))
2705 + return -ETIMEDOUT;
2706 +diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
2707 +index ba05dd80a020..f5bed9f29e56 100644
2708 +--- a/drivers/usb/misc/iowarrior.c
2709 ++++ b/drivers/usb/misc/iowarrior.c
2710 +@@ -866,19 +866,20 @@ static void iowarrior_disconnect(struct usb_interface *interface)
2711 + dev = usb_get_intfdata(interface);
2712 + mutex_lock(&iowarrior_open_disc_lock);
2713 + usb_set_intfdata(interface, NULL);
2714 ++ /* prevent device read, write and ioctl */
2715 ++ dev->present = 0;
2716 +
2717 + minor = dev->minor;
2718 ++ mutex_unlock(&iowarrior_open_disc_lock);
2719 ++ /* give back our minor - this will call close() locks need to be dropped at this point*/
2720 +
2721 +- /* give back our minor */
2722 + usb_deregister_dev(interface, &iowarrior_class);
2723 +
2724 + mutex_lock(&dev->mutex);
2725 +
2726 + /* prevent device read, write and ioctl */
2727 +- dev->present = 0;
2728 +
2729 + mutex_unlock(&dev->mutex);
2730 +- mutex_unlock(&iowarrior_open_disc_lock);
2731 +
2732 + if (dev->opened) {
2733 + /* There is a process that holds a filedescriptor to the device ,
2734 +diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c
2735 +index 27e9c78a791e..a32d61a79ab8 100644
2736 +--- a/drivers/usb/misc/rio500.c
2737 ++++ b/drivers/usb/misc/rio500.c
2738 +@@ -51,6 +51,7 @@ struct rio_usb_data {
2739 + char *obuf, *ibuf; /* transfer buffers */
2740 + char bulk_in_ep, bulk_out_ep; /* Endpoint assignments */
2741 + wait_queue_head_t wait_q; /* for timeouts */
2742 ++ struct mutex lock; /* general race avoidance */
2743 + };
2744 +
2745 + static DEFINE_MUTEX(rio500_mutex);
2746 +@@ -62,8 +63,10 @@ static int open_rio(struct inode *inode, struct file *file)
2747 +
2748 + /* against disconnect() */
2749 + mutex_lock(&rio500_mutex);
2750 ++ mutex_lock(&(rio->lock));
2751 +
2752 + if (rio->isopen || !rio->present) {
2753 ++ mutex_unlock(&(rio->lock));
2754 + mutex_unlock(&rio500_mutex);
2755 + return -EBUSY;
2756 + }
2757 +@@ -71,6 +74,7 @@ static int open_rio(struct inode *inode, struct file *file)
2758 +
2759 + init_waitqueue_head(&rio->wait_q);
2760 +
2761 ++ mutex_unlock(&(rio->lock));
2762 +
2763 + dev_info(&rio->rio_dev->dev, "Rio opened.\n");
2764 + mutex_unlock(&rio500_mutex);
2765 +@@ -84,6 +88,7 @@ static int close_rio(struct inode *inode, struct file *file)
2766 +
2767 + /* against disconnect() */
2768 + mutex_lock(&rio500_mutex);
2769 ++ mutex_lock(&(rio->lock));
2770 +
2771 + rio->isopen = 0;
2772 + if (!rio->present) {
2773 +@@ -95,6 +100,7 @@ static int close_rio(struct inode *inode, struct file *file)
2774 + } else {
2775 + dev_info(&rio->rio_dev->dev, "Rio closed.\n");
2776 + }
2777 ++ mutex_unlock(&(rio->lock));
2778 + mutex_unlock(&rio500_mutex);
2779 + return 0;
2780 + }
2781 +@@ -109,7 +115,7 @@ static long ioctl_rio(struct file *file, unsigned int cmd, unsigned long arg)
2782 + int retries;
2783 + int retval=0;
2784 +
2785 +- mutex_lock(&rio500_mutex);
2786 ++ mutex_lock(&(rio->lock));
2787 + /* Sanity check to make sure rio is connected, powered, etc */
2788 + if (rio->present == 0 || rio->rio_dev == NULL) {
2789 + retval = -ENODEV;
2790 +@@ -253,7 +259,7 @@ static long ioctl_rio(struct file *file, unsigned int cmd, unsigned long arg)
2791 +
2792 +
2793 + err_out:
2794 +- mutex_unlock(&rio500_mutex);
2795 ++ mutex_unlock(&(rio->lock));
2796 + return retval;
2797 + }
2798 +
2799 +@@ -273,12 +279,12 @@ write_rio(struct file *file, const char __user *buffer,
2800 + int errn = 0;
2801 + int intr;
2802 +
2803 +- intr = mutex_lock_interruptible(&rio500_mutex);
2804 ++ intr = mutex_lock_interruptible(&(rio->lock));
2805 + if (intr)
2806 + return -EINTR;
2807 + /* Sanity check to make sure rio is connected, powered, etc */
2808 + if (rio->present == 0 || rio->rio_dev == NULL) {
2809 +- mutex_unlock(&rio500_mutex);
2810 ++ mutex_unlock(&(rio->lock));
2811 + return -ENODEV;
2812 + }
2813 +
2814 +@@ -301,7 +307,7 @@ write_rio(struct file *file, const char __user *buffer,
2815 + goto error;
2816 + }
2817 + if (signal_pending(current)) {
2818 +- mutex_unlock(&rio500_mutex);
2819 ++ mutex_unlock(&(rio->lock));
2820 + return bytes_written ? bytes_written : -EINTR;
2821 + }
2822 +
2823 +@@ -339,12 +345,12 @@ write_rio(struct file *file, const char __user *buffer,
2824 + buffer += copy_size;
2825 + } while (count > 0);
2826 +
2827 +- mutex_unlock(&rio500_mutex);
2828 ++ mutex_unlock(&(rio->lock));
2829 +
2830 + return bytes_written ? bytes_written : -EIO;
2831 +
2832 + error:
2833 +- mutex_unlock(&rio500_mutex);
2834 ++ mutex_unlock(&(rio->lock));
2835 + return errn;
2836 + }
2837 +
2838 +@@ -361,12 +367,12 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
2839 + char *ibuf;
2840 + int intr;
2841 +
2842 +- intr = mutex_lock_interruptible(&rio500_mutex);
2843 ++ intr = mutex_lock_interruptible(&(rio->lock));
2844 + if (intr)
2845 + return -EINTR;
2846 + /* Sanity check to make sure rio is connected, powered, etc */
2847 + if (rio->present == 0 || rio->rio_dev == NULL) {
2848 +- mutex_unlock(&rio500_mutex);
2849 ++ mutex_unlock(&(rio->lock));
2850 + return -ENODEV;
2851 + }
2852 +
2853 +@@ -377,11 +383,11 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
2854 +
2855 + while (count > 0) {
2856 + if (signal_pending(current)) {
2857 +- mutex_unlock(&rio500_mutex);
2858 ++ mutex_unlock(&(rio->lock));
2859 + return read_count ? read_count : -EINTR;
2860 + }
2861 + if (!rio->rio_dev) {
2862 +- mutex_unlock(&rio500_mutex);
2863 ++ mutex_unlock(&(rio->lock));
2864 + return -ENODEV;
2865 + }
2866 + this_read = (count >= IBUF_SIZE) ? IBUF_SIZE : count;
2867 +@@ -399,7 +405,7 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
2868 + count = this_read = partial;
2869 + } else if (result == -ETIMEDOUT || result == 15) { /* FIXME: 15 ??? */
2870 + if (!maxretry--) {
2871 +- mutex_unlock(&rio500_mutex);
2872 ++ mutex_unlock(&(rio->lock));
2873 + dev_err(&rio->rio_dev->dev,
2874 + "read_rio: maxretry timeout\n");
2875 + return -ETIME;
2876 +@@ -409,19 +415,19 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
2877 + finish_wait(&rio->wait_q, &wait);
2878 + continue;
2879 + } else if (result != -EREMOTEIO) {
2880 +- mutex_unlock(&rio500_mutex);
2881 ++ mutex_unlock(&(rio->lock));
2882 + dev_err(&rio->rio_dev->dev,
2883 + "Read Whoops - result:%d partial:%u this_read:%u\n",
2884 + result, partial, this_read);
2885 + return -EIO;
2886 + } else {
2887 +- mutex_unlock(&rio500_mutex);
2888 ++ mutex_unlock(&(rio->lock));
2889 + return (0);
2890 + }
2891 +
2892 + if (this_read) {
2893 + if (copy_to_user(buffer, ibuf, this_read)) {
2894 +- mutex_unlock(&rio500_mutex);
2895 ++ mutex_unlock(&(rio->lock));
2896 + return -EFAULT;
2897 + }
2898 + count -= this_read;
2899 +@@ -429,7 +435,7 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
2900 + buffer += this_read;
2901 + }
2902 + }
2903 +- mutex_unlock(&rio500_mutex);
2904 ++ mutex_unlock(&(rio->lock));
2905 + return read_count;
2906 + }
2907 +
2908 +@@ -494,6 +500,8 @@ static int probe_rio(struct usb_interface *intf,
2909 + }
2910 + dev_dbg(&intf->dev, "ibuf address:%p\n", rio->ibuf);
2911 +
2912 ++ mutex_init(&(rio->lock));
2913 ++
2914 + usb_set_intfdata (intf, rio);
2915 + rio->present = 1;
2916 + bail_out:
2917 +@@ -511,10 +519,12 @@ static void disconnect_rio(struct usb_interface *intf)
2918 + if (rio) {
2919 + usb_deregister_dev(intf, &usb_rio_class);
2920 +
2921 ++ mutex_lock(&(rio->lock));
2922 + if (rio->isopen) {
2923 + rio->isopen = 0;
2924 + /* better let it finish - the release will do whats needed */
2925 + rio->rio_dev = NULL;
2926 ++ mutex_unlock(&(rio->lock));
2927 + mutex_unlock(&rio500_mutex);
2928 + return;
2929 + }
2930 +@@ -524,6 +534,7 @@ static void disconnect_rio(struct usb_interface *intf)
2931 + dev_info(&intf->dev, "USB Rio disconnected.\n");
2932 +
2933 + rio->present = 0;
2934 ++ mutex_unlock(&(rio->lock));
2935 + }
2936 + mutex_unlock(&rio500_mutex);
2937 + }
2938 +diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
2939 +index 7b306aa22d25..6715a128e6c8 100644
2940 +--- a/drivers/usb/misc/yurex.c
2941 ++++ b/drivers/usb/misc/yurex.c
2942 +@@ -92,7 +92,6 @@ static void yurex_delete(struct kref *kref)
2943 +
2944 + dev_dbg(&dev->interface->dev, "%s\n", __func__);
2945 +
2946 +- usb_put_dev(dev->udev);
2947 + if (dev->cntl_urb) {
2948 + usb_kill_urb(dev->cntl_urb);
2949 + kfree(dev->cntl_req);
2950 +@@ -108,6 +107,7 @@ static void yurex_delete(struct kref *kref)
2951 + dev->int_buffer, dev->urb->transfer_dma);
2952 + usb_free_urb(dev->urb);
2953 + }
2954 ++ usb_put_dev(dev->udev);
2955 + kfree(dev);
2956 + }
2957 +
2958 +diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
2959 +index fba32d84e578..15abe1d9958f 100644
2960 +--- a/drivers/usb/typec/tcpm/tcpm.c
2961 ++++ b/drivers/usb/typec/tcpm/tcpm.c
2962 +@@ -379,7 +379,8 @@ static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
2963 + return SNK_UNATTACHED;
2964 + else if (port->try_role == TYPEC_SOURCE)
2965 + return SRC_UNATTACHED;
2966 +- else if (port->tcpc->config->default_role == TYPEC_SINK)
2967 ++ else if (port->tcpc->config &&
2968 ++ port->tcpc->config->default_role == TYPEC_SINK)
2969 + return SNK_UNATTACHED;
2970 + /* Fall through to return SRC_UNATTACHED */
2971 + } else if (port->port_type == TYPEC_PORT_SNK) {
2972 +@@ -586,7 +587,20 @@ static void tcpm_debugfs_init(struct tcpm_port *port)
2973 +
2974 + static void tcpm_debugfs_exit(struct tcpm_port *port)
2975 + {
2976 ++ int i;
2977 ++
2978 ++ mutex_lock(&port->logbuffer_lock);
2979 ++ for (i = 0; i < LOG_BUFFER_ENTRIES; i++) {
2980 ++ kfree(port->logbuffer[i]);
2981 ++ port->logbuffer[i] = NULL;
2982 ++ }
2983 ++ mutex_unlock(&port->logbuffer_lock);
2984 ++
2985 + debugfs_remove(port->dentry);
2986 ++ if (list_empty(&rootdir->d_subdirs)) {
2987 ++ debugfs_remove(rootdir);
2988 ++ rootdir = NULL;
2989 ++ }
2990 + }
2991 +
2992 + #else
2993 +@@ -1095,7 +1109,8 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
2994 + break;
2995 + case CMD_ATTENTION:
2996 + /* Attention command does not have response */
2997 +- typec_altmode_attention(adev, p[1]);
2998 ++ if (adev)
2999 ++ typec_altmode_attention(adev, p[1]);
3000 + return 0;
3001 + default:
3002 + break;
3003 +@@ -1147,20 +1162,26 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
3004 + }
3005 + break;
3006 + case CMD_ENTER_MODE:
3007 +- typec_altmode_update_active(pdev, true);
3008 +-
3009 +- if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
3010 +- response[0] = VDO(adev->svid, 1, CMD_EXIT_MODE);
3011 +- response[0] |= VDO_OPOS(adev->mode);
3012 +- return 1;
3013 ++ if (adev && pdev) {
3014 ++ typec_altmode_update_active(pdev, true);
3015 ++
3016 ++ if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
3017 ++ response[0] = VDO(adev->svid, 1,
3018 ++ CMD_EXIT_MODE);
3019 ++ response[0] |= VDO_OPOS(adev->mode);
3020 ++ return 1;
3021 ++ }
3022 + }
3023 + return 0;
3024 + case CMD_EXIT_MODE:
3025 +- typec_altmode_update_active(pdev, false);
3026 ++ if (adev && pdev) {
3027 ++ typec_altmode_update_active(pdev, false);
3028 +
3029 +- /* Back to USB Operation */
3030 +- WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB,
3031 +- NULL));
3032 ++ /* Back to USB Operation */
3033 ++ WARN_ON(typec_altmode_notify(adev,
3034 ++ TYPEC_STATE_USB,
3035 ++ NULL));
3036 ++ }
3037 + break;
3038 + default:
3039 + break;
3040 +@@ -1170,8 +1191,10 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
3041 + switch (cmd) {
3042 + case CMD_ENTER_MODE:
3043 + /* Back to USB Operation */
3044 +- WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB,
3045 +- NULL));
3046 ++ if (adev)
3047 ++ WARN_ON(typec_altmode_notify(adev,
3048 ++ TYPEC_STATE_USB,
3049 ++ NULL));
3050 + break;
3051 + default:
3052 + break;
3053 +@@ -1182,7 +1205,8 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
3054 + }
3055 +
3056 + /* Informing the alternate mode drivers about everything */
3057 +- typec_altmode_vdm(adev, p[0], &p[1], cnt);
3058 ++ if (adev)
3059 ++ typec_altmode_vdm(adev, p[0], &p[1], cnt);
3060 +
3061 + return rlen;
3062 + }
3063 +@@ -4114,7 +4138,7 @@ static int tcpm_try_role(const struct typec_capability *cap, int role)
3064 + mutex_lock(&port->lock);
3065 + if (tcpc->try_role)
3066 + ret = tcpc->try_role(tcpc, role);
3067 +- if (!ret && !tcpc->config->try_role_hw)
3068 ++ if (!ret && (!tcpc->config || !tcpc->config->try_role_hw))
3069 + port->try_role = role;
3070 + port->try_src_count = 0;
3071 + port->try_snk_count = 0;
3072 +@@ -4701,7 +4725,7 @@ static int tcpm_copy_caps(struct tcpm_port *port,
3073 + port->typec_caps.prefer_role = tcfg->default_role;
3074 + port->typec_caps.type = tcfg->type;
3075 + port->typec_caps.data = tcfg->data;
3076 +- port->self_powered = port->tcpc->config->self_powered;
3077 ++ port->self_powered = tcfg->self_powered;
3078 +
3079 + return 0;
3080 + }
3081 +diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
3082 +index bf63074675fc..c274a36c2fe4 100644
3083 +--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
3084 ++++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
3085 +@@ -963,7 +963,7 @@ release_fw:
3086 + ******************************************************************************/
3087 + static int ccg_fw_update(struct ucsi_ccg *uc, enum enum_flash_mode flash_mode)
3088 + {
3089 +- int err;
3090 ++ int err = 0;
3091 +
3092 + while (flash_mode != FLASH_NOT_NEEDED) {
3093 + err = do_flash(uc, flash_mode);
3094 +diff --git a/fs/block_dev.c b/fs/block_dev.c
3095 +index 09c9d6726f07..a9e7c1b69437 100644
3096 +--- a/fs/block_dev.c
3097 ++++ b/fs/block_dev.c
3098 +@@ -1723,7 +1723,10 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
3099 +
3100 + /* finish claiming */
3101 + mutex_lock(&bdev->bd_mutex);
3102 +- bd_finish_claiming(bdev, whole, holder);
3103 ++ if (!res)
3104 ++ bd_finish_claiming(bdev, whole, holder);
3105 ++ else
3106 ++ bd_abort_claiming(bdev, whole, holder);
3107 + /*
3108 + * Block event polling for write claims if requested. Any
3109 + * write holder makes the write_holder state stick until
3110 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
3111 +index 75311a8a68bf..c3c8de5513db 100644
3112 +--- a/fs/cifs/smb2pdu.c
3113 ++++ b/fs/cifs/smb2pdu.c
3114 +@@ -252,7 +252,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
3115 + if (tcon == NULL)
3116 + return 0;
3117 +
3118 +- if (smb2_command == SMB2_TREE_CONNECT)
3119 ++ if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL)
3120 + return 0;
3121 +
3122 + if (tcon->tidStatus == CifsExiting) {
3123 +@@ -1173,7 +1173,12 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
3124 + else
3125 + req->SecurityMode = 0;
3126 +
3127 ++#ifdef CONFIG_CIFS_DFS_UPCALL
3128 ++ req->Capabilities = cpu_to_le32(SMB2_GLOBAL_CAP_DFS);
3129 ++#else
3130 + req->Capabilities = 0;
3131 ++#endif /* DFS_UPCALL */
3132 ++
3133 + req->Channel = 0; /* MBZ */
3134 +
3135 + sess_data->iov[0].iov_base = (char *)req;
3136 +diff --git a/fs/dax.c b/fs/dax.c
3137 +index 7d0e99982d48..4f42b852375b 100644
3138 +--- a/fs/dax.c
3139 ++++ b/fs/dax.c
3140 +@@ -601,7 +601,7 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
3141 + * guaranteed to either see new references or prevent new
3142 + * references from being established.
3143 + */
3144 +- unmap_mapping_range(mapping, 0, 0, 1);
3145 ++ unmap_mapping_range(mapping, 0, 0, 0);
3146 +
3147 + xas_lock_irq(&xas);
3148 + xas_for_each(&xas, entry, ULONG_MAX) {
3149 +diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
3150 +index 93ea1d529aa3..253e2f939d5f 100644
3151 +--- a/fs/gfs2/bmap.c
3152 ++++ b/fs/gfs2/bmap.c
3153 +@@ -390,6 +390,19 @@ static int fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, int h)
3154 + return mp->mp_aheight - x - 1;
3155 + }
3156 +
3157 ++static sector_t metapath_to_block(struct gfs2_sbd *sdp, struct metapath *mp)
3158 ++{
3159 ++ sector_t factor = 1, block = 0;
3160 ++ int hgt;
3161 ++
3162 ++ for (hgt = mp->mp_fheight - 1; hgt >= 0; hgt--) {
3163 ++ if (hgt < mp->mp_aheight)
3164 ++ block += mp->mp_list[hgt] * factor;
3165 ++ factor *= sdp->sd_inptrs;
3166 ++ }
3167 ++ return block;
3168 ++}
3169 ++
3170 + static void release_metapath(struct metapath *mp)
3171 + {
3172 + int i;
3173 +@@ -430,60 +443,84 @@ static inline unsigned int gfs2_extent_length(struct buffer_head *bh, __be64 *pt
3174 + return ptr - first;
3175 + }
3176 +
3177 +-typedef const __be64 *(*gfs2_metadata_walker)(
3178 +- struct metapath *mp,
3179 +- const __be64 *start, const __be64 *end,
3180 +- u64 factor, void *data);
3181 ++enum walker_status { WALK_STOP, WALK_FOLLOW, WALK_CONTINUE };
3182 +
3183 +-#define WALK_STOP ((__be64 *)0)
3184 +-#define WALK_NEXT ((__be64 *)1)
3185 ++/*
3186 ++ * gfs2_metadata_walker - walk an indirect block
3187 ++ * @mp: Metapath to indirect block
3188 ++ * @ptrs: Number of pointers to look at
3189 ++ *
3190 ++ * When returning WALK_FOLLOW, the walker must update @mp to point at the right
3191 ++ * indirect block to follow.
3192 ++ */
3193 ++typedef enum walker_status (*gfs2_metadata_walker)(struct metapath *mp,
3194 ++ unsigned int ptrs);
3195 +
3196 +-static int gfs2_walk_metadata(struct inode *inode, sector_t lblock,
3197 +- u64 len, struct metapath *mp, gfs2_metadata_walker walker,
3198 +- void *data)
3199 ++/*
3200 ++ * gfs2_walk_metadata - walk a tree of indirect blocks
3201 ++ * @inode: The inode
3202 ++ * @mp: Starting point of walk
3203 ++ * @max_len: Maximum number of blocks to walk
3204 ++ * @walker: Called during the walk
3205 ++ *
3206 ++ * Returns 1 if the walk was stopped by @walker, 0 if we went past @max_len or
3207 ++ * past the end of metadata, and a negative error code otherwise.
3208 ++ */
3209 ++
3210 ++static int gfs2_walk_metadata(struct inode *inode, struct metapath *mp,
3211 ++ u64 max_len, gfs2_metadata_walker walker)
3212 + {
3213 +- struct metapath clone;
3214 + struct gfs2_inode *ip = GFS2_I(inode);
3215 + struct gfs2_sbd *sdp = GFS2_SB(inode);
3216 +- const __be64 *start, *end, *ptr;
3217 + u64 factor = 1;
3218 + unsigned int hgt;
3219 +- int ret = 0;
3220 ++ int ret;
3221 +
3222 +- for (hgt = ip->i_height - 1; hgt >= mp->mp_aheight; hgt--)
3223 ++ /*
3224 ++ * The walk starts in the lowest allocated indirect block, which may be
3225 ++ * before the position indicated by @mp. Adjust @max_len accordingly
3226 ++ * to avoid a short walk.
3227 ++ */
3228 ++ for (hgt = mp->mp_fheight - 1; hgt >= mp->mp_aheight; hgt--) {
3229 ++ max_len += mp->mp_list[hgt] * factor;
3230 ++ mp->mp_list[hgt] = 0;
3231 + factor *= sdp->sd_inptrs;
3232 ++ }
3233 +
3234 + for (;;) {
3235 +- u64 step;
3236 ++ u16 start = mp->mp_list[hgt];
3237 ++ enum walker_status status;
3238 ++ unsigned int ptrs;
3239 ++ u64 len;
3240 +
3241 + /* Walk indirect block. */
3242 +- start = metapointer(hgt, mp);
3243 +- end = metaend(hgt, mp);
3244 +-
3245 +- step = (end - start) * factor;
3246 +- if (step > len)
3247 +- end = start + DIV_ROUND_UP_ULL(len, factor);
3248 +-
3249 +- ptr = walker(mp, start, end, factor, data);
3250 +- if (ptr == WALK_STOP)
3251 ++ ptrs = (hgt >= 1 ? sdp->sd_inptrs : sdp->sd_diptrs) - start;
3252 ++ len = ptrs * factor;
3253 ++ if (len > max_len)
3254 ++ ptrs = DIV_ROUND_UP_ULL(max_len, factor);
3255 ++ status = walker(mp, ptrs);
3256 ++ switch (status) {
3257 ++ case WALK_STOP:
3258 ++ return 1;
3259 ++ case WALK_FOLLOW:
3260 ++ BUG_ON(mp->mp_aheight == mp->mp_fheight);
3261 ++ ptrs = mp->mp_list[hgt] - start;
3262 ++ len = ptrs * factor;
3263 + break;
3264 +- if (step >= len)
3265 ++ case WALK_CONTINUE:
3266 + break;
3267 +- len -= step;
3268 +- if (ptr != WALK_NEXT) {
3269 +- BUG_ON(!*ptr);
3270 +- mp->mp_list[hgt] += ptr - start;
3271 +- goto fill_up_metapath;
3272 + }
3273 ++ if (len >= max_len)
3274 ++ break;
3275 ++ max_len -= len;
3276 ++ if (status == WALK_FOLLOW)
3277 ++ goto fill_up_metapath;
3278 +
3279 + lower_metapath:
3280 + /* Decrease height of metapath. */
3281 +- if (mp != &clone) {
3282 +- clone_metapath(&clone, mp);
3283 +- mp = &clone;
3284 +- }
3285 + brelse(mp->mp_bh[hgt]);
3286 + mp->mp_bh[hgt] = NULL;
3287 ++ mp->mp_list[hgt] = 0;
3288 + if (!hgt)
3289 + break;
3290 + hgt--;
3291 +@@ -491,10 +528,7 @@ lower_metapath:
3292 +
3293 + /* Advance in metadata tree. */
3294 + (mp->mp_list[hgt])++;
3295 +- start = metapointer(hgt, mp);
3296 +- end = metaend(hgt, mp);
3297 +- if (start >= end) {
3298 +- mp->mp_list[hgt] = 0;
3299 ++ if (mp->mp_list[hgt] >= sdp->sd_inptrs) {
3300 + if (!hgt)
3301 + break;
3302 + goto lower_metapath;
3303 +@@ -502,44 +536,36 @@ lower_metapath:
3304 +
3305 + fill_up_metapath:
3306 + /* Increase height of metapath. */
3307 +- if (mp != &clone) {
3308 +- clone_metapath(&clone, mp);
3309 +- mp = &clone;
3310 +- }
3311 + ret = fillup_metapath(ip, mp, ip->i_height - 1);
3312 + if (ret < 0)
3313 +- break;
3314 ++ return ret;
3315 + hgt += ret;
3316 + for (; ret; ret--)
3317 + do_div(factor, sdp->sd_inptrs);
3318 + mp->mp_aheight = hgt + 1;
3319 + }
3320 +- if (mp == &clone)
3321 +- release_metapath(mp);
3322 +- return ret;
3323 ++ return 0;
3324 + }
3325 +
3326 +-struct gfs2_hole_walker_args {
3327 +- u64 blocks;
3328 +-};
3329 +-
3330 +-static const __be64 *gfs2_hole_walker(struct metapath *mp,
3331 +- const __be64 *start, const __be64 *end,
3332 +- u64 factor, void *data)
3333 ++static enum walker_status gfs2_hole_walker(struct metapath *mp,
3334 ++ unsigned int ptrs)
3335 + {
3336 +- struct gfs2_hole_walker_args *args = data;
3337 +- const __be64 *ptr;
3338 ++ const __be64 *start, *ptr, *end;
3339 ++ unsigned int hgt;
3340 ++
3341 ++ hgt = mp->mp_aheight - 1;
3342 ++ start = metapointer(hgt, mp);
3343 ++ end = start + ptrs;
3344 +
3345 + for (ptr = start; ptr < end; ptr++) {
3346 + if (*ptr) {
3347 +- args->blocks += (ptr - start) * factor;
3348 ++ mp->mp_list[hgt] += ptr - start;
3349 + if (mp->mp_aheight == mp->mp_fheight)
3350 + return WALK_STOP;
3351 +- return ptr; /* increase height */
3352 ++ return WALK_FOLLOW;
3353 + }
3354 + }
3355 +- args->blocks += (end - start) * factor;
3356 +- return WALK_NEXT;
3357 ++ return WALK_CONTINUE;
3358 + }
3359 +
3360 + /**
3361 +@@ -557,12 +583,24 @@ static const __be64 *gfs2_hole_walker(struct metapath *mp,
3362 + static int gfs2_hole_size(struct inode *inode, sector_t lblock, u64 len,
3363 + struct metapath *mp, struct iomap *iomap)
3364 + {
3365 +- struct gfs2_hole_walker_args args = { };
3366 +- int ret = 0;
3367 ++ struct metapath clone;
3368 ++ u64 hole_size;
3369 ++ int ret;
3370 +
3371 +- ret = gfs2_walk_metadata(inode, lblock, len, mp, gfs2_hole_walker, &args);
3372 +- if (!ret)
3373 +- iomap->length = args.blocks << inode->i_blkbits;
3374 ++ clone_metapath(&clone, mp);
3375 ++ ret = gfs2_walk_metadata(inode, &clone, len, gfs2_hole_walker);
3376 ++ if (ret < 0)
3377 ++ goto out;
3378 ++
3379 ++ if (ret == 1)
3380 ++ hole_size = metapath_to_block(GFS2_SB(inode), &clone) - lblock;
3381 ++ else
3382 ++ hole_size = len;
3383 ++ iomap->length = hole_size << inode->i_blkbits;
3384 ++ ret = 0;
3385 ++
3386 ++out:
3387 ++ release_metapath(&clone);
3388 + return ret;
3389 + }
3390 +
3391 +diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
3392 +index 0ff3facf81da..0af854cce8ff 100644
3393 +--- a/fs/nfs/delegation.c
3394 ++++ b/fs/nfs/delegation.c
3395 +@@ -153,7 +153,7 @@ again:
3396 + /* Block nfs4_proc_unlck */
3397 + mutex_lock(&sp->so_delegreturn_mutex);
3398 + seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
3399 +- err = nfs4_open_delegation_recall(ctx, state, stateid, type);
3400 ++ err = nfs4_open_delegation_recall(ctx, state, stateid);
3401 + if (!err)
3402 + err = nfs_delegation_claim_locks(state, stateid);
3403 + if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
3404 +diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
3405 +index 5799777df5ec..9eb87ae4c982 100644
3406 +--- a/fs/nfs/delegation.h
3407 ++++ b/fs/nfs/delegation.h
3408 +@@ -63,7 +63,7 @@ void nfs_reap_expired_delegations(struct nfs_client *clp);
3409 +
3410 + /* NFSv4 delegation-related procedures */
3411 + int nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, const nfs4_stateid *stateid, int issync);
3412 +-int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid, fmode_t type);
3413 ++int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid);
3414 + int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid);
3415 + bool nfs4_copy_delegation_stateid(struct inode *inode, fmode_t flags, nfs4_stateid *dst, const struct cred **cred);
3416 + bool nfs4_refresh_delegation_stateid(nfs4_stateid *dst, struct inode *inode);
3417 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
3418 +index 6418cb6c079b..63edda145d1b 100644
3419 +--- a/fs/nfs/nfs4proc.c
3420 ++++ b/fs/nfs/nfs4proc.c
3421 +@@ -1878,8 +1878,9 @@ _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
3422 + if (data->o_res.delegation_type != 0)
3423 + nfs4_opendata_check_deleg(data, state);
3424 + update:
3425 +- update_open_stateid(state, &data->o_res.stateid, NULL,
3426 +- data->o_arg.fmode);
3427 ++ if (!update_open_stateid(state, &data->o_res.stateid,
3428 ++ NULL, data->o_arg.fmode))
3429 ++ return ERR_PTR(-EAGAIN);
3430 + refcount_inc(&state->count);
3431 +
3432 + return state;
3433 +@@ -1944,8 +1945,11 @@ _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
3434 +
3435 + if (data->o_res.delegation_type != 0)
3436 + nfs4_opendata_check_deleg(data, state);
3437 +- update_open_stateid(state, &data->o_res.stateid, NULL,
3438 +- data->o_arg.fmode);
3439 ++ if (!update_open_stateid(state, &data->o_res.stateid,
3440 ++ NULL, data->o_arg.fmode)) {
3441 ++ nfs4_put_open_state(state);
3442 ++ state = ERR_PTR(-EAGAIN);
3443 ++ }
3444 + out:
3445 + nfs_release_seqid(data->o_arg.seqid);
3446 + return state;
3447 +@@ -2148,12 +2152,10 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
3448 + case -NFS4ERR_BAD_HIGH_SLOT:
3449 + case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
3450 + case -NFS4ERR_DEADSESSION:
3451 +- set_bit(NFS_DELEGATED_STATE, &state->flags);
3452 + nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
3453 + return -EAGAIN;
3454 + case -NFS4ERR_STALE_CLIENTID:
3455 + case -NFS4ERR_STALE_STATEID:
3456 +- set_bit(NFS_DELEGATED_STATE, &state->flags);
3457 + /* Don't recall a delegation if it was lost */
3458 + nfs4_schedule_lease_recovery(server->nfs_client);
3459 + return -EAGAIN;
3460 +@@ -2174,7 +2176,6 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
3461 + return -EAGAIN;
3462 + case -NFS4ERR_DELAY:
3463 + case -NFS4ERR_GRACE:
3464 +- set_bit(NFS_DELEGATED_STATE, &state->flags);
3465 + ssleep(1);
3466 + return -EAGAIN;
3467 + case -ENOMEM:
3468 +@@ -2190,8 +2191,7 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
3469 + }
3470 +
3471 + int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
3472 +- struct nfs4_state *state, const nfs4_stateid *stateid,
3473 +- fmode_t type)
3474 ++ struct nfs4_state *state, const nfs4_stateid *stateid)
3475 + {
3476 + struct nfs_server *server = NFS_SERVER(state->inode);
3477 + struct nfs4_opendata *opendata;
3478 +@@ -2202,20 +2202,23 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
3479 + if (IS_ERR(opendata))
3480 + return PTR_ERR(opendata);
3481 + nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
3482 +- nfs_state_clear_delegation(state);
3483 +- switch (type & (FMODE_READ|FMODE_WRITE)) {
3484 +- case FMODE_READ|FMODE_WRITE:
3485 +- case FMODE_WRITE:
3486 ++ if (!test_bit(NFS_O_RDWR_STATE, &state->flags)) {
3487 + err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
3488 + if (err)
3489 +- break;
3490 ++ goto out;
3491 ++ }
3492 ++ if (!test_bit(NFS_O_WRONLY_STATE, &state->flags)) {
3493 + err = nfs4_open_recover_helper(opendata, FMODE_WRITE);
3494 + if (err)
3495 +- break;
3496 +- /* Fall through */
3497 +- case FMODE_READ:
3498 ++ goto out;
3499 ++ }
3500 ++ if (!test_bit(NFS_O_RDONLY_STATE, &state->flags)) {
3501 + err = nfs4_open_recover_helper(opendata, FMODE_READ);
3502 ++ if (err)
3503 ++ goto out;
3504 + }
3505 ++ nfs_state_clear_delegation(state);
3506 ++out:
3507 + nfs4_opendata_put(opendata);
3508 + return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err);
3509 + }
3510 +@@ -3172,7 +3175,7 @@ static int _nfs4_do_setattr(struct inode *inode,
3511 +
3512 + if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) {
3513 + /* Use that stateid */
3514 +- } else if (ctx != NULL) {
3515 ++ } else if (ctx != NULL && ctx->state) {
3516 + struct nfs_lock_context *l_ctx;
3517 + if (!nfs4_valid_open_stateid(ctx->state))
3518 + return -EBADF;
3519 +diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
3520 +index 46bbc949c20a..7a30524a80ee 100644
3521 +--- a/include/kvm/arm_vgic.h
3522 ++++ b/include/kvm/arm_vgic.h
3523 +@@ -350,6 +350,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
3524 +
3525 + void kvm_vgic_load(struct kvm_vcpu *vcpu);
3526 + void kvm_vgic_put(struct kvm_vcpu *vcpu);
3527 ++void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu);
3528 +
3529 + #define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
3530 + #define vgic_initialized(k) ((k)->arch.vgic.initialized)
3531 +diff --git a/include/linux/ccp.h b/include/linux/ccp.h
3532 +index 55cb455cfcb0..a5dfbaf2470d 100644
3533 +--- a/include/linux/ccp.h
3534 ++++ b/include/linux/ccp.h
3535 +@@ -170,6 +170,8 @@ struct ccp_aes_engine {
3536 + enum ccp_aes_mode mode;
3537 + enum ccp_aes_action action;
3538 +
3539 ++ u32 authsize;
3540 ++
3541 + struct scatterlist *key;
3542 + u32 key_len; /* In bytes */
3543 +
3544 +diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
3545 +index d1ad38a3f048..7546cbf3dfb0 100644
3546 +--- a/include/linux/kvm_host.h
3547 ++++ b/include/linux/kvm_host.h
3548 +@@ -871,6 +871,7 @@ void kvm_arch_check_processor_compat(void *rtn);
3549 + int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
3550 + bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
3551 + int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
3552 ++bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
3553 +
3554 + #ifndef __KVM_HAVE_ARCH_VM_ALLOC
3555 + /*
3556 +diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
3557 +index c5188ff724d1..bc88d6f964da 100644
3558 +--- a/include/sound/compress_driver.h
3559 ++++ b/include/sound/compress_driver.h
3560 +@@ -173,10 +173,7 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream)
3561 + if (snd_BUG_ON(!stream))
3562 + return;
3563 +
3564 +- if (stream->direction == SND_COMPRESS_PLAYBACK)
3565 +- stream->runtime->state = SNDRV_PCM_STATE_SETUP;
3566 +- else
3567 +- stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
3568 ++ stream->runtime->state = SNDRV_PCM_STATE_SETUP;
3569 +
3570 + wake_up(&stream->runtime->sleep);
3571 + }
3572 +diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
3573 +index 6f09d1500960..70da1c6cdd07 100644
3574 +--- a/include/uapi/linux/nl80211.h
3575 ++++ b/include/uapi/linux/nl80211.h
3576 +@@ -2844,7 +2844,7 @@ enum nl80211_attrs {
3577 + #define NL80211_HT_CAPABILITY_LEN 26
3578 + #define NL80211_VHT_CAPABILITY_LEN 12
3579 + #define NL80211_HE_MIN_CAPABILITY_LEN 16
3580 +-#define NL80211_HE_MAX_CAPABILITY_LEN 51
3581 ++#define NL80211_HE_MAX_CAPABILITY_LEN 54
3582 + #define NL80211_MAX_NR_CIPHER_SUITES 5
3583 + #define NL80211_MAX_NR_AKM_SUITES 2
3584 +
3585 +diff --git a/kernel/events/core.c b/kernel/events/core.c
3586 +index f851934d55d4..4bc15cff1026 100644
3587 +--- a/kernel/events/core.c
3588 ++++ b/kernel/events/core.c
3589 +@@ -11266,7 +11266,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
3590 + goto err_unlock;
3591 + }
3592 +
3593 +- perf_install_in_context(ctx, event, cpu);
3594 ++ perf_install_in_context(ctx, event, event->cpu);
3595 + perf_unpin_context(ctx);
3596 + mutex_unlock(&ctx->mutex);
3597 +
3598 +diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
3599 +index f18cd5aa33e8..656c14333613 100644
3600 +--- a/kernel/irq/affinity.c
3601 ++++ b/kernel/irq/affinity.c
3602 +@@ -253,11 +253,9 @@ irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd)
3603 + * Determine the number of vectors which need interrupt affinities
3604 + * assigned. If the pre/post request exhausts the available vectors
3605 + * then nothing to do here except for invoking the calc_sets()
3606 +- * callback so the device driver can adjust to the situation. If there
3607 +- * is only a single vector, then managing the queue is pointless as
3608 +- * well.
3609 ++ * callback so the device driver can adjust to the situation.
3610 + */
3611 +- if (nvecs > 1 && nvecs > affd->pre_vectors + affd->post_vectors)
3612 ++ if (nvecs > affd->pre_vectors + affd->post_vectors)
3613 + affvecs = nvecs - affd->pre_vectors - affd->post_vectors;
3614 + else
3615 + affvecs = 0;
3616 +diff --git a/lib/test_firmware.c b/lib/test_firmware.c
3617 +index 83ea6c4e623c..6ca97a63b3d6 100644
3618 +--- a/lib/test_firmware.c
3619 ++++ b/lib/test_firmware.c
3620 +@@ -886,8 +886,11 @@ static int __init test_firmware_init(void)
3621 + return -ENOMEM;
3622 +
3623 + rc = __test_firmware_config_init();
3624 +- if (rc)
3625 ++ if (rc) {
3626 ++ kfree(test_fw_config);
3627 ++ pr_err("could not init firmware test config: %d\n", rc);
3628 + return rc;
3629 ++ }
3630 +
3631 + rc = misc_register(&test_fw_misc_device);
3632 + if (rc) {
3633 +diff --git a/mm/vmalloc.c b/mm/vmalloc.c
3634 +index 0f76cca32a1c..080d30408ce3 100644
3635 +--- a/mm/vmalloc.c
3636 ++++ b/mm/vmalloc.c
3637 +@@ -1213,6 +1213,12 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
3638 + if (unlikely(valist == NULL))
3639 + return false;
3640 +
3641 ++ /*
3642 ++ * First make sure the mappings are removed from all page-tables
3643 ++ * before they are freed.
3644 ++ */
3645 ++ vmalloc_sync_all();
3646 ++
3647 + /*
3648 + * TODO: to calculate a flush range without looping.
3649 + * The list can be up to lazy_max_pages() elements.
3650 +@@ -3001,6 +3007,9 @@ EXPORT_SYMBOL(remap_vmalloc_range);
3651 + /*
3652 + * Implement a stub for vmalloc_sync_all() if the architecture chose not to
3653 + * have one.
3654 ++ *
3655 ++ * The purpose of this function is to make sure the vmalloc area
3656 ++ * mappings are identical in all page-tables in the system.
3657 + */
3658 + void __weak vmalloc_sync_all(void)
3659 + {
3660 +diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
3661 +index 59031670b16a..cc23f1ce239c 100644
3662 +--- a/net/ipv4/netfilter/ipt_rpfilter.c
3663 ++++ b/net/ipv4/netfilter/ipt_rpfilter.c
3664 +@@ -78,6 +78,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
3665 + flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
3666 + flow.flowi4_tos = RT_TOS(iph->tos);
3667 + flow.flowi4_scope = RT_SCOPE_UNIVERSE;
3668 ++ flow.flowi4_oif = l3mdev_master_ifindex_rcu(xt_in(par));
3669 +
3670 + return rpfilter_lookup_reverse(xt_net(par), &flow, xt_in(par), info->flags) ^ invert;
3671 + }
3672 +diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c
3673 +index 6bcaf7357183..d800801a5dd2 100644
3674 +--- a/net/ipv6/netfilter/ip6t_rpfilter.c
3675 ++++ b/net/ipv6/netfilter/ip6t_rpfilter.c
3676 +@@ -55,7 +55,9 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
3677 + if (rpfilter_addr_linklocal(&iph->saddr)) {
3678 + lookup_flags |= RT6_LOOKUP_F_IFACE;
3679 + fl6.flowi6_oif = dev->ifindex;
3680 +- } else if ((flags & XT_RPFILTER_LOOSE) == 0)
3681 ++ /* Set flowi6_oif for vrf devices to lookup route in l3mdev domain. */
3682 ++ } else if (netif_is_l3_master(dev) || netif_is_l3_slave(dev) ||
3683 ++ (flags & XT_RPFILTER_LOOSE) == 0)
3684 + fl6.flowi6_oif = dev->ifindex;
3685 +
3686 + rt = (void *)ip6_route_lookup(net, &fl6, skb, lookup_flags);
3687 +@@ -70,7 +72,9 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
3688 + goto out;
3689 + }
3690 +
3691 +- if (rt->rt6i_idev->dev == dev || (flags & XT_RPFILTER_LOOSE))
3692 ++ if (rt->rt6i_idev->dev == dev ||
3693 ++ l3mdev_master_ifindex_rcu(rt->rt6i_idev->dev) == dev->ifindex ||
3694 ++ (flags & XT_RPFILTER_LOOSE))
3695 + ret = true;
3696 + out:
3697 + ip6_rt_put(rt);
3698 +diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
3699 +index a1973a26c7fc..b8288125e05d 100644
3700 +--- a/net/mac80211/cfg.c
3701 ++++ b/net/mac80211/cfg.c
3702 +@@ -935,8 +935,10 @@ static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
3703 +
3704 + err = ieee80211_set_probe_resp(sdata, params->probe_resp,
3705 + params->probe_resp_len, csa);
3706 +- if (err < 0)
3707 ++ if (err < 0) {
3708 ++ kfree(new);
3709 + return err;
3710 ++ }
3711 + if (err == 0)
3712 + changed |= BSS_CHANGED_AP_PROBE_RESP;
3713 +
3714 +@@ -948,8 +950,10 @@ static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
3715 + params->civicloc,
3716 + params->civicloc_len);
3717 +
3718 +- if (err < 0)
3719 ++ if (err < 0) {
3720 ++ kfree(new);
3721 + return err;
3722 ++ }
3723 +
3724 + changed |= BSS_CHANGED_FTM_RESPONDER;
3725 + }
3726 +diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
3727 +index acd4afb4944b..c9a8a2433e8a 100644
3728 +--- a/net/mac80211/driver-ops.c
3729 ++++ b/net/mac80211/driver-ops.c
3730 +@@ -187,11 +187,16 @@ int drv_conf_tx(struct ieee80211_local *local,
3731 + if (!check_sdata_in_driver(sdata))
3732 + return -EIO;
3733 +
3734 +- if (WARN_ONCE(params->cw_min == 0 ||
3735 +- params->cw_min > params->cw_max,
3736 +- "%s: invalid CW_min/CW_max: %d/%d\n",
3737 +- sdata->name, params->cw_min, params->cw_max))
3738 ++ if (params->cw_min == 0 || params->cw_min > params->cw_max) {
3739 ++ /*
3740 ++ * If we can't configure hardware anyway, don't warn. We may
3741 ++ * never have initialized the CW parameters.
3742 ++ */
3743 ++ WARN_ONCE(local->ops->conf_tx,
3744 ++ "%s: invalid CW_min/CW_max: %d/%d\n",
3745 ++ sdata->name, params->cw_min, params->cw_max);
3746 + return -EINVAL;
3747 ++ }
3748 +
3749 + trace_drv_conf_tx(local, sdata, ac, params);
3750 + if (local->ops->conf_tx)
3751 +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
3752 +index 379d2ab6d327..36b60f39930b 100644
3753 +--- a/net/mac80211/mlme.c
3754 ++++ b/net/mac80211/mlme.c
3755 +@@ -2041,6 +2041,16 @@ ieee80211_sta_wmm_params(struct ieee80211_local *local,
3756 + ieee80211_regulatory_limit_wmm_params(sdata, &params[ac], ac);
3757 + }
3758 +
3759 ++ /* WMM specification requires all 4 ACIs. */
3760 ++ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
3761 ++ if (params[ac].cw_min == 0) {
3762 ++ sdata_info(sdata,
3763 ++ "AP has invalid WMM params (missing AC %d), using defaults\n",
3764 ++ ac);
3765 ++ return false;
3766 ++ }
3767 ++ }
3768 ++
3769 + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
3770 + mlme_dbg(sdata,
3771 + "WMM AC=%d acm=%d aifs=%d cWmin=%d cWmax=%d txop=%d uapsd=%d, downgraded=%d\n",
3772 +diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
3773 +index 1e2cc83ff5da..ae1f8c6b3a97 100644
3774 +--- a/net/netfilter/nf_conntrack_proto_tcp.c
3775 ++++ b/net/netfilter/nf_conntrack_proto_tcp.c
3776 +@@ -472,6 +472,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
3777 + struct ip_ct_tcp_state *receiver = &state->seen[!dir];
3778 + const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
3779 + __u32 seq, ack, sack, end, win, swin;
3780 ++ u16 win_raw;
3781 + s32 receiver_offset;
3782 + bool res, in_recv_win;
3783 +
3784 +@@ -480,7 +481,8 @@ static bool tcp_in_window(const struct nf_conn *ct,
3785 + */
3786 + seq = ntohl(tcph->seq);
3787 + ack = sack = ntohl(tcph->ack_seq);
3788 +- win = ntohs(tcph->window);
3789 ++ win_raw = ntohs(tcph->window);
3790 ++ win = win_raw;
3791 + end = segment_seq_plus_len(seq, skb->len, dataoff, tcph);
3792 +
3793 + if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM)
3794 +@@ -655,14 +657,14 @@ static bool tcp_in_window(const struct nf_conn *ct,
3795 + && state->last_seq == seq
3796 + && state->last_ack == ack
3797 + && state->last_end == end
3798 +- && state->last_win == win)
3799 ++ && state->last_win == win_raw)
3800 + state->retrans++;
3801 + else {
3802 + state->last_dir = dir;
3803 + state->last_seq = seq;
3804 + state->last_ack = ack;
3805 + state->last_end = end;
3806 +- state->last_win = win;
3807 ++ state->last_win = win_raw;
3808 + state->retrans = 0;
3809 + }
3810 + }
3811 +diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
3812 +index 92077d459109..4abbb452cf6c 100644
3813 +--- a/net/netfilter/nfnetlink.c
3814 ++++ b/net/netfilter/nfnetlink.c
3815 +@@ -578,7 +578,7 @@ static int nfnetlink_bind(struct net *net, int group)
3816 + ss = nfnetlink_get_subsys(type << 8);
3817 + rcu_read_unlock();
3818 + if (!ss)
3819 +- request_module("nfnetlink-subsys-%d", type);
3820 ++ request_module_nowait("nfnetlink-subsys-%d", type);
3821 + return 0;
3822 + }
3823 + #endif
3824 +diff --git a/net/netfilter/nft_chain_nat.c b/net/netfilter/nft_chain_nat.c
3825 +index 2f89bde3c61c..ff9ac8ae0031 100644
3826 +--- a/net/netfilter/nft_chain_nat.c
3827 ++++ b/net/netfilter/nft_chain_nat.c
3828 +@@ -142,3 +142,6 @@ MODULE_ALIAS_NFT_CHAIN(AF_INET, "nat");
3829 + #ifdef CONFIG_NF_TABLES_IPV6
3830 + MODULE_ALIAS_NFT_CHAIN(AF_INET6, "nat");
3831 + #endif
3832 ++#ifdef CONFIG_NF_TABLES_INET
3833 ++MODULE_ALIAS_NFT_CHAIN(1, "nat"); /* NFPROTO_INET */
3834 ++#endif
3835 +diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
3836 +index fe93e731dc7f..b836d550b919 100644
3837 +--- a/net/netfilter/nft_hash.c
3838 ++++ b/net/netfilter/nft_hash.c
3839 +@@ -129,7 +129,7 @@ static int nft_symhash_init(const struct nft_ctx *ctx,
3840 + priv->dreg = nft_parse_register(tb[NFTA_HASH_DREG]);
3841 +
3842 + priv->modulus = ntohl(nla_get_be32(tb[NFTA_HASH_MODULUS]));
3843 +- if (priv->modulus <= 1)
3844 ++ if (priv->modulus < 1)
3845 + return -ERANGE;
3846 +
3847 + if (priv->offset + priv->modulus - 1 < priv->offset)
3848 +diff --git a/net/netfilter/nft_redir.c b/net/netfilter/nft_redir.c
3849 +index 8487eeff5c0e..43eeb1f609f1 100644
3850 +--- a/net/netfilter/nft_redir.c
3851 ++++ b/net/netfilter/nft_redir.c
3852 +@@ -291,4 +291,4 @@ module_exit(nft_redir_module_exit);
3853 +
3854 + MODULE_LICENSE("GPL");
3855 + MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo@××××××.org>");
3856 +-MODULE_ALIAS_NFT_EXPR("nat");
3857 ++MODULE_ALIAS_NFT_EXPR("redir");
3858 +diff --git a/scripts/gen_compile_commands.py b/scripts/gen_compile_commands.py
3859 +index 7915823b92a5..c458696ef3a7 100755
3860 +--- a/scripts/gen_compile_commands.py
3861 ++++ b/scripts/gen_compile_commands.py
3862 +@@ -21,9 +21,9 @@ _LINE_PATTERN = r'^cmd_[^ ]*\.o := (.* )([^ ]*\.c)$'
3863 + _VALID_LOG_LEVELS = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']
3864 +
3865 + # A kernel build generally has over 2000 entries in its compile_commands.json
3866 +-# database. If this code finds 500 or fewer, then warn the user that they might
3867 ++# database. If this code finds 300 or fewer, then warn the user that they might
3868 + # not have all the .cmd files, and they might need to compile the kernel.
3869 +-_LOW_COUNT_THRESHOLD = 500
3870 ++_LOW_COUNT_THRESHOLD = 300
3871 +
3872 +
3873 + def parse_arguments():
3874 +diff --git a/scripts/sphinx-pre-install b/scripts/sphinx-pre-install
3875 +index 9be208db88d3..1f9f0a334c24 100755
3876 +--- a/scripts/sphinx-pre-install
3877 ++++ b/scripts/sphinx-pre-install
3878 +@@ -77,6 +77,17 @@ sub check_missing(%)
3879 + foreach my $prog (sort keys %missing) {
3880 + my $is_optional = $missing{$prog};
3881 +
3882 ++ # At least on some LTS distros like CentOS 7, texlive doesn't
3883 ++ # provide all packages we need. When such distros are
3884 ++ # detected, we have to disable PDF output.
3885 ++ #
3886 ++ # So, we need to ignore the packages that distros would
3887 ++ # need for LaTeX to work
3888 ++ if ($is_optional == 2 && !$pdf) {
3889 ++ $optional--;
3890 ++ next;
3891 ++ }
3892 ++
3893 + if ($is_optional) {
3894 + print "Warning: better to also install \"$prog\".\n";
3895 + } else {
3896 +@@ -326,10 +337,10 @@ sub give_debian_hints()
3897 +
3898 + if ($pdf) {
3899 + check_missing_file("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf",
3900 +- "fonts-dejavu", 1);
3901 ++ "fonts-dejavu", 2);
3902 + }
3903 +
3904 +- check_program("dvipng", 1) if ($pdf);
3905 ++ check_program("dvipng", 2) if ($pdf);
3906 + check_missing(\%map);
3907 +
3908 + return if (!$need && !$optional);
3909 +@@ -364,22 +375,40 @@ sub give_redhat_hints()
3910 + #
3911 + # Checks valid for RHEL/CentOS version 7.x.
3912 + #
3913 +- if (! $system_release =~ /Fedora/) {
3914 ++ my $old = 0;
3915 ++ my $rel;
3916 ++ $rel = $1 if ($system_release =~ /release\s+(\d+)/);
3917 ++
3918 ++ if (!($system_release =~ /Fedora/)) {
3919 + $map{"virtualenv"} = "python-virtualenv";
3920 +- }
3921 +
3922 +- my $release;
3923 ++ if ($rel && $rel < 8) {
3924 ++ $old = 1;
3925 ++ $pdf = 0;
3926 +
3927 +- $release = $1 if ($system_release =~ /Fedora\s+release\s+(\d+)/);
3928 ++ printf("Note: texlive packages on RHEL/CENTOS <= 7 are incomplete. Can't support PDF output\n");
3929 ++ printf("If you want to build PDF, please read:\n");
3930 ++ printf("\thttps://www.systutorials.com/241660/how-to-install-tex-live-on-centos-7-linux/\n");
3931 ++ }
3932 ++ } else {
3933 ++ if ($rel && $rel < 26) {
3934 ++ $old = 1;
3935 ++ }
3936 ++ }
3937 ++ if (!$rel) {
3938 ++ printf("Couldn't identify release number\n");
3939 ++ $old = 1;
3940 ++ $pdf = 0;
3941 ++ }
3942 +
3943 +- check_rpm_missing(\@fedora26_opt_pkgs, 1) if ($pdf && $release >= 26);
3944 +- check_rpm_missing(\@fedora_tex_pkgs, 1) if ($pdf);
3945 +- check_missing_tex(1) if ($pdf);
3946 ++ check_rpm_missing(\@fedora26_opt_pkgs, 2) if ($pdf && !$old);
3947 ++ check_rpm_missing(\@fedora_tex_pkgs, 2) if ($pdf);
3948 ++ check_missing_tex(2) if ($pdf);
3949 + check_missing(\%map);
3950 +
3951 + return if (!$need && !$optional);
3952 +
3953 +- if ($release >= 18) {
3954 ++ if (!$old) {
3955 + # dnf, for Fedora 18+
3956 + printf("You should run:\n\n\tsudo dnf install -y $install\n");
3957 + } else {
3958 +@@ -418,8 +447,10 @@ sub give_opensuse_hints()
3959 + "texlive-zapfding",
3960 + );
3961 +
3962 +- check_rpm_missing(\@suse_tex_pkgs, 1) if ($pdf);
3963 +- check_missing_tex(1) if ($pdf);
3964 ++ $map{"latexmk"} = "texlive-latexmk-bin";
3965 ++
3966 ++ check_rpm_missing(\@suse_tex_pkgs, 2) if ($pdf);
3967 ++ check_missing_tex(2) if ($pdf);
3968 + check_missing(\%map);
3969 +
3970 + return if (!$need && !$optional);
3971 +@@ -443,7 +474,9 @@ sub give_mageia_hints()
3972 + "texlive-fontsextra",
3973 + );
3974 +
3975 +- check_rpm_missing(\@tex_pkgs, 1) if ($pdf);
3976 ++ $map{"latexmk"} = "texlive-collection-basic";
3977 ++
3978 ++ check_rpm_missing(\@tex_pkgs, 2) if ($pdf);
3979 + check_missing(\%map);
3980 +
3981 + return if (!$need && !$optional);
3982 +@@ -466,7 +499,8 @@ sub give_arch_linux_hints()
3983 + "texlive-latexextra",
3984 + "ttf-dejavu",
3985 + );
3986 +- check_pacman_missing(\@archlinux_tex_pkgs, 1) if ($pdf);
3987 ++ check_pacman_missing(\@archlinux_tex_pkgs, 2) if ($pdf);
3988 ++
3989 + check_missing(\%map);
3990 +
3991 + return if (!$need && !$optional);
3992 +@@ -485,7 +519,7 @@ sub give_gentoo_hints()
3993 + );
3994 +
3995 + check_missing_file("/usr/share/fonts/dejavu/DejaVuSans.ttf",
3996 +- "media-fonts/dejavu", 1) if ($pdf);
3997 ++ "media-fonts/dejavu", 2) if ($pdf);
3998 +
3999 + check_missing(\%map);
4000 +
4001 +@@ -553,7 +587,7 @@ sub check_distros()
4002 + my %map = (
4003 + "sphinx-build" => "sphinx"
4004 + );
4005 +- check_missing_tex(1) if ($pdf);
4006 ++ check_missing_tex(2) if ($pdf);
4007 + check_missing(\%map);
4008 + print "I don't know distro $system_release.\n";
4009 + print "So, I can't provide you a hint with the install procedure.\n";
4010 +@@ -591,11 +625,13 @@ sub check_needs()
4011 + check_program("make", 0);
4012 + check_program("gcc", 0);
4013 + check_python_module("sphinx_rtd_theme", 1) if (!$virtualenv);
4014 +- check_program("xelatex", 1) if ($pdf);
4015 + check_program("dot", 1);
4016 + check_program("convert", 1);
4017 +- check_program("rsvg-convert", 1) if ($pdf);
4018 +- check_program("latexmk", 1) if ($pdf);
4019 ++
4020 ++ # Extra PDF files - should use 2 for is_optional
4021 ++ check_program("xelatex", 2) if ($pdf);
4022 ++ check_program("rsvg-convert", 2) if ($pdf);
4023 ++ check_program("latexmk", 2) if ($pdf);
4024 +
4025 + check_distros();
4026 +
4027 +diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
4028 +index 99b882158705..41905afada63 100644
4029 +--- a/sound/core/compress_offload.c
4030 ++++ b/sound/core/compress_offload.c
4031 +@@ -574,10 +574,7 @@ snd_compr_set_params(struct snd_compr_stream *stream, unsigned long arg)
4032 + stream->metadata_set = false;
4033 + stream->next_track = false;
4034 +
4035 +- if (stream->direction == SND_COMPRESS_PLAYBACK)
4036 +- stream->runtime->state = SNDRV_PCM_STATE_SETUP;
4037 +- else
4038 +- stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
4039 ++ stream->runtime->state = SNDRV_PCM_STATE_SETUP;
4040 + } else {
4041 + return -EPERM;
4042 + }
4043 +@@ -693,8 +690,17 @@ static int snd_compr_start(struct snd_compr_stream *stream)
4044 + {
4045 + int retval;
4046 +
4047 +- if (stream->runtime->state != SNDRV_PCM_STATE_PREPARED)
4048 ++ switch (stream->runtime->state) {
4049 ++ case SNDRV_PCM_STATE_SETUP:
4050 ++ if (stream->direction != SND_COMPRESS_CAPTURE)
4051 ++ return -EPERM;
4052 ++ break;
4053 ++ case SNDRV_PCM_STATE_PREPARED:
4054 ++ break;
4055 ++ default:
4056 + return -EPERM;
4057 ++ }
4058 ++
4059 + retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_START);
4060 + if (!retval)
4061 + stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
4062 +@@ -705,9 +711,15 @@ static int snd_compr_stop(struct snd_compr_stream *stream)
4063 + {
4064 + int retval;
4065 +
4066 +- if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
4067 +- stream->runtime->state == SNDRV_PCM_STATE_SETUP)
4068 ++ switch (stream->runtime->state) {
4069 ++ case SNDRV_PCM_STATE_OPEN:
4070 ++ case SNDRV_PCM_STATE_SETUP:
4071 ++ case SNDRV_PCM_STATE_PREPARED:
4072 + return -EPERM;
4073 ++ default:
4074 ++ break;
4075 ++ }
4076 ++
4077 + retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
4078 + if (!retval) {
4079 + snd_compr_drain_notify(stream);
4080 +@@ -795,9 +807,17 @@ static int snd_compr_drain(struct snd_compr_stream *stream)
4081 + {
4082 + int retval;
4083 +
4084 +- if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
4085 +- stream->runtime->state == SNDRV_PCM_STATE_SETUP)
4086 ++ switch (stream->runtime->state) {
4087 ++ case SNDRV_PCM_STATE_OPEN:
4088 ++ case SNDRV_PCM_STATE_SETUP:
4089 ++ case SNDRV_PCM_STATE_PREPARED:
4090 ++ case SNDRV_PCM_STATE_PAUSED:
4091 + return -EPERM;
4092 ++ case SNDRV_PCM_STATE_XRUN:
4093 ++ return -EPIPE;
4094 ++ default:
4095 ++ break;
4096 ++ }
4097 +
4098 + retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
4099 + if (retval) {
4100 +@@ -817,6 +837,10 @@ static int snd_compr_next_track(struct snd_compr_stream *stream)
4101 + if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING)
4102 + return -EPERM;
4103 +
4104 ++ /* next track doesn't have any meaning for capture streams */
4105 ++ if (stream->direction == SND_COMPRESS_CAPTURE)
4106 ++ return -EPERM;
4107 ++
4108 + /* you can signal next track if this is intended to be a gapless stream
4109 + * and current track metadata is set
4110 + */
4111 +@@ -834,9 +858,23 @@ static int snd_compr_next_track(struct snd_compr_stream *stream)
4112 + static int snd_compr_partial_drain(struct snd_compr_stream *stream)
4113 + {
4114 + int retval;
4115 +- if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
4116 +- stream->runtime->state == SNDRV_PCM_STATE_SETUP)
4117 ++
4118 ++ switch (stream->runtime->state) {
4119 ++ case SNDRV_PCM_STATE_OPEN:
4120 ++ case SNDRV_PCM_STATE_SETUP:
4121 ++ case SNDRV_PCM_STATE_PREPARED:
4122 ++ case SNDRV_PCM_STATE_PAUSED:
4123 ++ return -EPERM;
4124 ++ case SNDRV_PCM_STATE_XRUN:
4125 ++ return -EPIPE;
4126 ++ default:
4127 ++ break;
4128 ++ }
4129 ++
4130 ++ /* partial drain doesn't have any meaning for capture streams */
4131 ++ if (stream->direction == SND_COMPRESS_CAPTURE)
4132 + return -EPERM;
4133 ++
4134 + /* stream can be drained only when next track has been signalled */
4135 + if (stream->next_track == false)
4136 + return -EPERM;
4137 +diff --git a/sound/firewire/packets-buffer.c b/sound/firewire/packets-buffer.c
4138 +index 0d35359d25cd..0ecafd0c6722 100644
4139 +--- a/sound/firewire/packets-buffer.c
4140 ++++ b/sound/firewire/packets-buffer.c
4141 +@@ -37,7 +37,7 @@ int iso_packets_buffer_init(struct iso_packets_buffer *b, struct fw_unit *unit,
4142 + packets_per_page = PAGE_SIZE / packet_size;
4143 + if (WARN_ON(!packets_per_page)) {
4144 + err = -EINVAL;
4145 +- goto error;
4146 ++ goto err_packets;
4147 + }
4148 + pages = DIV_ROUND_UP(count, packets_per_page);
4149 +
4150 +diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
4151 +index 232a1926758a..dd96def48a3a 100644
4152 +--- a/sound/pci/hda/hda_controller.c
4153 ++++ b/sound/pci/hda/hda_controller.c
4154 +@@ -598,11 +598,9 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
4155 + }
4156 + runtime->private_data = azx_dev;
4157 +
4158 +- if (chip->gts_present)
4159 +- azx_pcm_hw.info = azx_pcm_hw.info |
4160 +- SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME;
4161 +-
4162 + runtime->hw = azx_pcm_hw;
4163 ++ if (chip->gts_present)
4164 ++ runtime->hw.info |= SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME;
4165 + runtime->hw.channels_min = hinfo->channels_min;
4166 + runtime->hw.channels_max = hinfo->channels_max;
4167 + runtime->hw.formats = hinfo->formats;
4168 +@@ -615,6 +613,13 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
4169 + 20,
4170 + 178000000);
4171 +
4172 ++ /* by some reason, the playback stream stalls on PulseAudio with
4173 ++ * tsched=1 when a capture stream triggers. Until we figure out the
4174 ++ * real cause, disable tsched mode by telling the PCM info flag.
4175 ++ */
4176 ++ if (chip->driver_caps & AZX_DCAPS_AMD_WORKAROUND)
4177 ++ runtime->hw.info |= SNDRV_PCM_INFO_BATCH;
4178 ++
4179 + if (chip->align_buffer_size)
4180 + /* constrain buffer sizes to be multiple of 128
4181 + bytes. This is more efficient in terms of memory
4182 +diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h
4183 +index 3aa5c957ffbf..863695d025af 100644
4184 +--- a/sound/pci/hda/hda_controller.h
4185 ++++ b/sound/pci/hda/hda_controller.h
4186 +@@ -31,7 +31,7 @@
4187 + /* 14 unused */
4188 + #define AZX_DCAPS_CTX_WORKAROUND (1 << 15) /* X-Fi workaround */
4189 + #define AZX_DCAPS_POSFIX_LPIB (1 << 16) /* Use LPIB as default */
4190 +-/* 17 unused */
4191 ++#define AZX_DCAPS_AMD_WORKAROUND (1 << 17) /* AMD-specific workaround */
4192 + #define AZX_DCAPS_NO_64BIT (1 << 18) /* No 64bit address */
4193 + #define AZX_DCAPS_SYNC_WRITE (1 << 19) /* sync each cmd write */
4194 + #define AZX_DCAPS_OLD_SSYNC (1 << 20) /* Old SSYNC reg for ICH */
4195 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
4196 +index d438c450f04d..fb8f452a1c78 100644
4197 +--- a/sound/pci/hda/hda_intel.c
4198 ++++ b/sound/pci/hda/hda_intel.c
4199 +@@ -64,6 +64,7 @@ enum {
4200 + POS_FIX_VIACOMBO,
4201 + POS_FIX_COMBO,
4202 + POS_FIX_SKL,
4203 ++ POS_FIX_FIFO,
4204 + };
4205 +
4206 + /* Defines for ATI HD Audio support in SB450 south bridge */
4207 +@@ -135,7 +136,7 @@ module_param_array(model, charp, NULL, 0444);
4208 + MODULE_PARM_DESC(model, "Use the given board model.");
4209 + module_param_array(position_fix, int, NULL, 0444);
4210 + MODULE_PARM_DESC(position_fix, "DMA pointer read method."
4211 +- "(-1 = system default, 0 = auto, 1 = LPIB, 2 = POSBUF, 3 = VIACOMBO, 4 = COMBO, 5 = SKL+).");
4212 ++ "(-1 = system default, 0 = auto, 1 = LPIB, 2 = POSBUF, 3 = VIACOMBO, 4 = COMBO, 5 = SKL+, 6 = FIFO).");
4213 + module_param_array(bdl_pos_adj, int, NULL, 0644);
4214 + MODULE_PARM_DESC(bdl_pos_adj, "BDL position adjustment offset.");
4215 + module_param_array(probe_mask, int, NULL, 0444);
4216 +@@ -332,6 +333,11 @@ enum {
4217 + #define AZX_DCAPS_PRESET_ATI_HDMI_NS \
4218 + (AZX_DCAPS_PRESET_ATI_HDMI | AZX_DCAPS_SNOOP_OFF)
4219 +
4220 ++/* quirks for AMD SB */
4221 ++#define AZX_DCAPS_PRESET_AMD_SB \
4222 ++ (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_AMD_WORKAROUND |\
4223 ++ AZX_DCAPS_SNOOP_TYPE(ATI) | AZX_DCAPS_PM_RUNTIME)
4224 ++
4225 + /* quirks for Nvidia */
4226 + #define AZX_DCAPS_PRESET_NVIDIA \
4227 + (AZX_DCAPS_NO_MSI | AZX_DCAPS_CORBRP_SELF_CLEAR |\
4228 +@@ -841,6 +847,49 @@ static unsigned int azx_via_get_position(struct azx *chip,
4229 + return bound_pos + mod_dma_pos;
4230 + }
4231 +
4232 ++#define AMD_FIFO_SIZE 32
4233 ++
4234 ++/* get the current DMA position with FIFO size correction */
4235 ++static unsigned int azx_get_pos_fifo(struct azx *chip, struct azx_dev *azx_dev)
4236 ++{
4237 ++ struct snd_pcm_substream *substream = azx_dev->core.substream;
4238 ++ struct snd_pcm_runtime *runtime = substream->runtime;
4239 ++ unsigned int pos, delay;
4240 ++
4241 ++ pos = snd_hdac_stream_get_pos_lpib(azx_stream(azx_dev));
4242 ++ if (!runtime)
4243 ++ return pos;
4244 ++
4245 ++ runtime->delay = AMD_FIFO_SIZE;
4246 ++ delay = frames_to_bytes(runtime, AMD_FIFO_SIZE);
4247 ++ if (azx_dev->insufficient) {
4248 ++ if (pos < delay) {
4249 ++ delay = pos;
4250 ++ runtime->delay = bytes_to_frames(runtime, pos);
4251 ++ } else {
4252 ++ azx_dev->insufficient = 0;
4253 ++ }
4254 ++ }
4255 ++
4256 ++ /* correct the DMA position for capture stream */
4257 ++ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
4258 ++ if (pos < delay)
4259 ++ pos += azx_dev->core.bufsize;
4260 ++ pos -= delay;
4261 ++ }
4262 ++
4263 ++ return pos;
4264 ++}
4265 ++
4266 ++static int azx_get_delay_from_fifo(struct azx *chip, struct azx_dev *azx_dev,
4267 ++ unsigned int pos)
4268 ++{
4269 ++ struct snd_pcm_substream *substream = azx_dev->core.substream;
4270 ++
4271 ++ /* just read back the calculated value in the above */
4272 ++ return substream->runtime->delay;
4273 ++}
4274 ++
4275 + static unsigned int azx_skl_get_dpib_pos(struct azx *chip,
4276 + struct azx_dev *azx_dev)
4277 + {
4278 +@@ -1417,6 +1466,7 @@ static int check_position_fix(struct azx *chip, int fix)
4279 + case POS_FIX_VIACOMBO:
4280 + case POS_FIX_COMBO:
4281 + case POS_FIX_SKL:
4282 ++ case POS_FIX_FIFO:
4283 + return fix;
4284 + }
4285 +
4286 +@@ -1433,6 +1483,10 @@ static int check_position_fix(struct azx *chip, int fix)
4287 + dev_dbg(chip->card->dev, "Using VIACOMBO position fix\n");
4288 + return POS_FIX_VIACOMBO;
4289 + }
4290 ++ if (chip->driver_caps & AZX_DCAPS_AMD_WORKAROUND) {
4291 ++ dev_dbg(chip->card->dev, "Using FIFO position fix\n");
4292 ++ return POS_FIX_FIFO;
4293 ++ }
4294 + if (chip->driver_caps & AZX_DCAPS_POSFIX_LPIB) {
4295 + dev_dbg(chip->card->dev, "Using LPIB position fix\n");
4296 + return POS_FIX_LPIB;
4297 +@@ -1453,6 +1507,7 @@ static void assign_position_fix(struct azx *chip, int fix)
4298 + [POS_FIX_VIACOMBO] = azx_via_get_position,
4299 + [POS_FIX_COMBO] = azx_get_pos_lpib,
4300 + [POS_FIX_SKL] = azx_get_pos_skl,
4301 ++ [POS_FIX_FIFO] = azx_get_pos_fifo,
4302 + };
4303 +
4304 + chip->get_position[0] = chip->get_position[1] = callbacks[fix];
4305 +@@ -1467,6 +1522,9 @@ static void assign_position_fix(struct azx *chip, int fix)
4306 + azx_get_delay_from_lpib;
4307 + }
4308 +
4309 ++ if (fix == POS_FIX_FIFO)
4310 ++ chip->get_delay[0] = chip->get_delay[1] =
4311 ++ azx_get_delay_from_fifo;
4312 + }
4313 +
4314 + /*
4315 +@@ -2444,6 +2502,9 @@ static const struct pci_device_id azx_ids[] = {
4316 + /* AMD Hudson */
4317 + { PCI_DEVICE(0x1022, 0x780d),
4318 + .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
4319 ++ /* AMD, X370 & co */
4320 ++ { PCI_DEVICE(0x1022, 0x1457),
4321 ++ .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
4322 + /* AMD Stoney */
4323 + { PCI_DEVICE(0x1022, 0x157a),
4324 + .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
4325 +diff --git a/sound/sound_core.c b/sound/sound_core.c
4326 +index b730d97c4de6..90d118cd9164 100644
4327 +--- a/sound/sound_core.c
4328 ++++ b/sound/sound_core.c
4329 +@@ -275,7 +275,8 @@ retry:
4330 + goto retry;
4331 + }
4332 + spin_unlock(&sound_loader_lock);
4333 +- return -EBUSY;
4334 ++ r = -EBUSY;
4335 ++ goto fail;
4336 + }
4337 + }
4338 +
4339 +diff --git a/sound/usb/hiface/pcm.c b/sound/usb/hiface/pcm.c
4340 +index 14fc1e1d5d13..c406497c5919 100644
4341 +--- a/sound/usb/hiface/pcm.c
4342 ++++ b/sound/usb/hiface/pcm.c
4343 +@@ -600,14 +600,13 @@ int hiface_pcm_init(struct hiface_chip *chip, u8 extra_freq)
4344 + ret = hiface_pcm_init_urb(&rt->out_urbs[i], chip, OUT_EP,
4345 + hiface_pcm_out_urb_handler);
4346 + if (ret < 0)
4347 +- return ret;
4348 ++ goto error;
4349 + }
4350 +
4351 + ret = snd_pcm_new(chip->card, "USB-SPDIF Audio", 0, 1, 0, &pcm);
4352 + if (ret < 0) {
4353 +- kfree(rt);
4354 + dev_err(&chip->dev->dev, "Cannot create pcm instance\n");
4355 +- return ret;
4356 ++ goto error;
4357 + }
4358 +
4359 + pcm->private_data = rt;
4360 +@@ -620,4 +619,10 @@ int hiface_pcm_init(struct hiface_chip *chip, u8 extra_freq)
4361 +
4362 + chip->pcm = rt;
4363 + return 0;
4364 ++
4365 ++error:
4366 ++ for (i = 0; i < PCM_N_URBS; i++)
4367 ++ kfree(rt->out_urbs[i].buffer);
4368 ++ kfree(rt);
4369 ++ return ret;
4370 + }
4371 +diff --git a/sound/usb/stream.c b/sound/usb/stream.c
4372 +index 7ee9d17d0143..e852c7fd6109 100644
4373 +--- a/sound/usb/stream.c
4374 ++++ b/sound/usb/stream.c
4375 +@@ -1043,6 +1043,7 @@ found_clock:
4376 +
4377 + pd = kzalloc(sizeof(*pd), GFP_KERNEL);
4378 + if (!pd) {
4379 ++ kfree(fp->chmap);
4380 + kfree(fp->rate_table);
4381 + kfree(fp);
4382 + return NULL;
4383 +diff --git a/tools/perf/arch/s390/util/machine.c b/tools/perf/arch/s390/util/machine.c
4384 +index a19690a17291..c8c86a0c9b79 100644
4385 +--- a/tools/perf/arch/s390/util/machine.c
4386 ++++ b/tools/perf/arch/s390/util/machine.c
4387 +@@ -6,8 +6,9 @@
4388 + #include "machine.h"
4389 + #include "api/fs/fs.h"
4390 + #include "debug.h"
4391 ++#include "symbol.h"
4392 +
4393 +-int arch__fix_module_text_start(u64 *start, const char *name)
4394 ++int arch__fix_module_text_start(u64 *start, u64 *size, const char *name)
4395 + {
4396 + u64 m_start = *start;
4397 + char path[PATH_MAX];
4398 +@@ -17,7 +18,35 @@ int arch__fix_module_text_start(u64 *start, const char *name)
4399 + if (sysfs__read_ull(path, (unsigned long long *)start) < 0) {
4400 + pr_debug2("Using module %s start:%#lx\n", path, m_start);
4401 + *start = m_start;
4402 ++ } else {
4403 ++ /* Successful read of the modules segment text start address.
4404 ++ * Calculate difference between module start address
4405 ++ * in memory and module text segment start address.
4406 ++ * For example module load address is 0x3ff8011b000
4407 ++ * (from /proc/modules) and module text segment start
4408 ++ * address is 0x3ff8011b870 (from file above).
4409 ++ *
4410 ++ * Adjust the module size and subtract the GOT table
4411 ++ * size located at the beginning of the module.
4412 ++ */
4413 ++ *size -= (*start - m_start);
4414 + }
4415 +
4416 + return 0;
4417 + }
4418 ++
4419 ++/* On s390 kernel text segment start is located at very low memory addresses,
4420 ++ * for example 0x10000. Modules are located at very high memory addresses,
4421 ++ * for example 0x3ff xxxx xxxx. The gap between end of kernel text segment
4422 ++ * and beginning of first module's text segment is very big.
4423 ++ * Therefore do not fill this gap and do not assign it to the kernel dso map.
4424 ++ */
4425 ++void arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
4426 ++{
4427 ++ if (strchr(p->name, '[') == NULL && strchr(c->name, '['))
4428 ++ /* Last kernel symbol mapped to end of page */
4429 ++ p->end = roundup(p->end, page_size);
4430 ++ else
4431 ++ p->end = c->start;
4432 ++ pr_debug4("%s sym:%s end:%#lx\n", __func__, p->name, p->end);
4433 ++}
4434 +diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
4435 +index 8bb124e55c6d..2c376f5b2120 100644
4436 +--- a/tools/perf/builtin-probe.c
4437 ++++ b/tools/perf/builtin-probe.c
4438 +@@ -698,6 +698,16 @@ __cmd_probe(int argc, const char **argv)
4439 +
4440 + ret = perf_add_probe_events(params.events, params.nevents);
4441 + if (ret < 0) {
4442 ++
4443 ++ /*
4444 ++ * When perf_add_probe_events() fails it calls
4445 ++ * cleanup_perf_probe_events(pevs, npevs), i.e.
4446 ++ * cleanup_perf_probe_events(params.events, params.nevents), which
4447 ++ * will call clear_perf_probe_event(), so set nevents to zero
4448 ++ * to avoid cleanup_params() to call clear_perf_probe_event() again
4449 ++ * on the same pevs.
4450 ++ */
4451 ++ params.nevents = 0;
4452 + pr_err_with_code(" Error: Failed to add events.", ret);
4453 + return ret;
4454 + }
4455 +diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
4456 +index d089eb706d18..4380474c8c35 100644
4457 +--- a/tools/perf/builtin-script.c
4458 ++++ b/tools/perf/builtin-script.c
4459 +@@ -1057,7 +1057,7 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
4460 +
4461 + printed += ip__fprintf_sym(ip, thread, x.cpumode, x.cpu, &lastsym, attr, fp);
4462 + if (ip == end) {
4463 +- printed += ip__fprintf_jump(ip, &br->entries[i], &x, buffer + off, len - off, insn, fp,
4464 ++ printed += ip__fprintf_jump(ip, &br->entries[i], &x, buffer + off, len - off, ++insn, fp,
4465 + &total_cycles);
4466 + if (PRINT_FIELD(SRCCODE))
4467 + printed += print_srccode(thread, x.cpumode, ip);
4468 +diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
4469 +index e28002d90573..c6c550dbb947 100644
4470 +--- a/tools/perf/builtin-stat.c
4471 ++++ b/tools/perf/builtin-stat.c
4472 +@@ -607,7 +607,13 @@ try_again:
4473 + * group leaders.
4474 + */
4475 + read_counters(&(struct timespec) { .tv_nsec = t1-t0 });
4476 +- perf_evlist__close(evsel_list);
4477 ++
4478 ++ /*
4479 ++ * We need to keep evsel_list alive, because it's processed
4480 ++ * later the evsel_list will be closed after.
4481 ++ */
4482 ++ if (!STAT_RECORD)
4483 ++ perf_evlist__close(evsel_list);
4484 +
4485 + return WEXITSTATUS(status);
4486 + }
4487 +@@ -1922,6 +1928,7 @@ int cmd_stat(int argc, const char **argv)
4488 + perf_session__write_header(perf_stat.session, evsel_list, fd, true);
4489 + }
4490 +
4491 ++ perf_evlist__close(evsel_list);
4492 + perf_session__delete(perf_stat.session);
4493 + }
4494 +
4495 +diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
4496 +index 2c46f9aa416c..b854541604df 100644
4497 +--- a/tools/perf/util/evsel.c
4498 ++++ b/tools/perf/util/evsel.c
4499 +@@ -1282,6 +1282,7 @@ static void perf_evsel__free_id(struct perf_evsel *evsel)
4500 + xyarray__delete(evsel->sample_id);
4501 + evsel->sample_id = NULL;
4502 + zfree(&evsel->id);
4503 ++ evsel->ids = 0;
4504 + }
4505 +
4506 + static void perf_evsel__free_config_terms(struct perf_evsel *evsel)
4507 +@@ -2074,6 +2075,7 @@ void perf_evsel__close(struct perf_evsel *evsel)
4508 +
4509 + perf_evsel__close_fd(evsel);
4510 + perf_evsel__free_fd(evsel);
4511 ++ perf_evsel__free_id(evsel);
4512 + }
4513 +
4514 + int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
4515 +diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
4516 +index b82d4577d969..e84b70be3fc1 100644
4517 +--- a/tools/perf/util/header.c
4518 ++++ b/tools/perf/util/header.c
4519 +@@ -3666,7 +3666,7 @@ int perf_event__process_feature(struct perf_session *session,
4520 + return 0;
4521 +
4522 + ff.buf = (void *)fe->data;
4523 +- ff.size = event->header.size - sizeof(event->header);
4524 ++ ff.size = event->header.size - sizeof(*fe);
4525 + ff.ph = &session->header;
4526 +
4527 + if (feat_ops[feat].process(&ff, NULL))
4528 +diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
4529 +index dc7aafe45a2b..081fe4bdebaa 100644
4530 +--- a/tools/perf/util/machine.c
4531 ++++ b/tools/perf/util/machine.c
4532 +@@ -1365,6 +1365,7 @@ static int machine__set_modules_path(struct machine *machine)
4533 + return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
4534 + }
4535 + int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
4536 ++ u64 *size __maybe_unused,
4537 + const char *name __maybe_unused)
4538 + {
4539 + return 0;
4540 +@@ -1376,7 +1377,7 @@ static int machine__create_module(void *arg, const char *name, u64 start,
4541 + struct machine *machine = arg;
4542 + struct map *map;
4543 +
4544 +- if (arch__fix_module_text_start(&start, name) < 0)
4545 ++ if (arch__fix_module_text_start(&start, &size, name) < 0)
4546 + return -1;
4547 +
4548 + map = machine__findnew_module_map(machine, start, name);
4549 +diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
4550 +index f70ab98a7bde..7aa38da26427 100644
4551 +--- a/tools/perf/util/machine.h
4552 ++++ b/tools/perf/util/machine.h
4553 +@@ -222,7 +222,7 @@ struct symbol *machine__find_kernel_symbol_by_name(struct machine *machine,
4554 +
4555 + struct map *machine__findnew_module_map(struct machine *machine, u64 start,
4556 + const char *filename);
4557 +-int arch__fix_module_text_start(u64 *start, const char *name);
4558 ++int arch__fix_module_text_start(u64 *start, u64 *size, const char *name);
4559 +
4560 + int machine__load_kallsyms(struct machine *machine, const char *filename);
4561 +
4562 +diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
4563 +index 2e61dd6a3574..d78984096044 100644
4564 +--- a/tools/perf/util/session.c
4565 ++++ b/tools/perf/util/session.c
4566 +@@ -36,10 +36,16 @@ static int perf_session__process_compressed_event(struct perf_session *session,
4567 + void *src;
4568 + size_t decomp_size, src_size;
4569 + u64 decomp_last_rem = 0;
4570 +- size_t decomp_len = session->header.env.comp_mmap_len;
4571 ++ size_t mmap_len, decomp_len = session->header.env.comp_mmap_len;
4572 + struct decomp *decomp, *decomp_last = session->decomp_last;
4573 +
4574 +- decomp = mmap(NULL, sizeof(struct decomp) + decomp_len, PROT_READ|PROT_WRITE,
4575 ++ if (decomp_last) {
4576 ++ decomp_last_rem = decomp_last->size - decomp_last->head;
4577 ++ decomp_len += decomp_last_rem;
4578 ++ }
4579 ++
4580 ++ mmap_len = sizeof(struct decomp) + decomp_len;
4581 ++ decomp = mmap(NULL, mmap_len, PROT_READ|PROT_WRITE,
4582 + MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
4583 + if (decomp == MAP_FAILED) {
4584 + pr_err("Couldn't allocate memory for decompression\n");
4585 +@@ -47,10 +53,10 @@ static int perf_session__process_compressed_event(struct perf_session *session,
4586 + }
4587 +
4588 + decomp->file_pos = file_offset;
4589 ++ decomp->mmap_len = mmap_len;
4590 + decomp->head = 0;
4591 +
4592 +- if (decomp_last) {
4593 +- decomp_last_rem = decomp_last->size - decomp_last->head;
4594 ++ if (decomp_last_rem) {
4595 + memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem);
4596 + decomp->size = decomp_last_rem;
4597 + }
4598 +@@ -61,7 +67,7 @@ static int perf_session__process_compressed_event(struct perf_session *session,
4599 + decomp_size = zstd_decompress_stream(&(session->zstd_data), src, src_size,
4600 + &(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem);
4601 + if (!decomp_size) {
4602 +- munmap(decomp, sizeof(struct decomp) + decomp_len);
4603 ++ munmap(decomp, mmap_len);
4604 + pr_err("Couldn't decompress data\n");
4605 + return -1;
4606 + }
4607 +@@ -255,15 +261,15 @@ static void perf_session__delete_threads(struct perf_session *session)
4608 + static void perf_session__release_decomp_events(struct perf_session *session)
4609 + {
4610 + struct decomp *next, *decomp;
4611 +- size_t decomp_len;
4612 ++ size_t mmap_len;
4613 + next = session->decomp;
4614 +- decomp_len = session->header.env.comp_mmap_len;
4615 + do {
4616 + decomp = next;
4617 + if (decomp == NULL)
4618 + break;
4619 + next = decomp->next;
4620 +- munmap(decomp, decomp_len + sizeof(struct decomp));
4621 ++ mmap_len = decomp->mmap_len;
4622 ++ munmap(decomp, mmap_len);
4623 + } while (1);
4624 + }
4625 +
4626 +diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
4627 +index dd8920b745bc..863dbad87849 100644
4628 +--- a/tools/perf/util/session.h
4629 ++++ b/tools/perf/util/session.h
4630 +@@ -46,6 +46,7 @@ struct perf_session {
4631 + struct decomp {
4632 + struct decomp *next;
4633 + u64 file_pos;
4634 ++ size_t mmap_len;
4635 + u64 head;
4636 + size_t size;
4637 + char data[];
4638 +diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
4639 +index 5cbad55cd99d..3b49eb4e3ed9 100644
4640 +--- a/tools/perf/util/symbol.c
4641 ++++ b/tools/perf/util/symbol.c
4642 +@@ -91,6 +91,11 @@ static int prefix_underscores_count(const char *str)
4643 + return tail - str;
4644 + }
4645 +
4646 ++void __weak arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
4647 ++{
4648 ++ p->end = c->start;
4649 ++}
4650 ++
4651 + const char * __weak arch__normalize_symbol_name(const char *name)
4652 + {
4653 + return name;
4654 +@@ -217,7 +222,7 @@ void symbols__fixup_end(struct rb_root_cached *symbols)
4655 + curr = rb_entry(nd, struct symbol, rb_node);
4656 +
4657 + if (prev->end == prev->start && prev->end != curr->start)
4658 +- prev->end = curr->start;
4659 ++ arch__symbols__fixup_end(prev, curr);
4660 + }
4661 +
4662 + /* Last entry */
4663 +diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
4664 +index 9a8fe012910a..f30ab608ea54 100644
4665 +--- a/tools/perf/util/symbol.h
4666 ++++ b/tools/perf/util/symbol.h
4667 +@@ -277,6 +277,7 @@ const char *arch__normalize_symbol_name(const char *name);
4668 + #define SYMBOL_A 0
4669 + #define SYMBOL_B 1
4670 +
4671 ++void arch__symbols__fixup_end(struct symbol *p, struct symbol *c);
4672 + int arch__compare_symbol_names(const char *namea, const char *nameb);
4673 + int arch__compare_symbol_names_n(const char *namea, const char *nameb,
4674 + unsigned int n);
4675 +diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
4676 +index b413ba5b9835..4a9f88d9b7ab 100644
4677 +--- a/tools/perf/util/thread.c
4678 ++++ b/tools/perf/util/thread.c
4679 +@@ -197,14 +197,24 @@ struct comm *thread__comm(const struct thread *thread)
4680 +
4681 + struct comm *thread__exec_comm(const struct thread *thread)
4682 + {
4683 +- struct comm *comm, *last = NULL;
4684 ++ struct comm *comm, *last = NULL, *second_last = NULL;
4685 +
4686 + list_for_each_entry(comm, &thread->comm_list, list) {
4687 + if (comm->exec)
4688 + return comm;
4689 ++ second_last = last;
4690 + last = comm;
4691 + }
4692 +
4693 ++ /*
4694 ++ * 'last' with no start time might be the parent's comm of a synthesized
4695 ++ * thread (created by processing a synthesized fork event). For a main
4696 ++ * thread, that is very probably wrong. Prefer a later comm to avoid
4697 ++ * that case.
4698 ++ */
4699 ++ if (second_last && !last->start && thread->pid_ == thread->tid)
4700 ++ return second_last;
4701 ++
4702 + return last;
4703 + }
4704 +
4705 +diff --git a/tools/perf/util/zstd.c b/tools/perf/util/zstd.c
4706 +index 23bdb9884576..d2202392ffdb 100644
4707 +--- a/tools/perf/util/zstd.c
4708 ++++ b/tools/perf/util/zstd.c
4709 +@@ -99,8 +99,8 @@ size_t zstd_decompress_stream(struct zstd_data *data, void *src, size_t src_size
4710 + while (input.pos < input.size) {
4711 + ret = ZSTD_decompressStream(data->dstream, &output, &input);
4712 + if (ZSTD_isError(ret)) {
4713 +- pr_err("failed to decompress (B): %ld -> %ld : %s\n",
4714 +- src_size, output.size, ZSTD_getErrorName(ret));
4715 ++ pr_err("failed to decompress (B): %ld -> %ld, dst_size %ld : %s\n",
4716 ++ src_size, output.size, dst_size, ZSTD_getErrorName(ret));
4717 + break;
4718 + }
4719 + output.dst = dst + output.pos;
4720 +diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
4721 +index bd5c55916d0d..9b778c51af1b 100644
4722 +--- a/virt/kvm/arm/arm.c
4723 ++++ b/virt/kvm/arm/arm.c
4724 +@@ -323,6 +323,17 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
4725 +
4726 + void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
4727 + {
4728 ++ /*
4729 ++ * If we're about to block (most likely because we've just hit a
4730 ++ * WFI), we need to sync back the state of the GIC CPU interface
4731 ++ * so that we have the lastest PMR and group enables. This ensures
4732 ++ * that kvm_arch_vcpu_runnable has up-to-date data to decide
4733 ++ * whether we have pending interrupts.
4734 ++ */
4735 ++ preempt_disable();
4736 ++ kvm_vgic_vmcr_sync(vcpu);
4737 ++ preempt_enable();
4738 ++
4739 + kvm_vgic_v4_enable_doorbell(vcpu);
4740 + }
4741 +
4742 +diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
4743 +index 6dd5ad706c92..96aab77d0471 100644
4744 +--- a/virt/kvm/arm/vgic/vgic-v2.c
4745 ++++ b/virt/kvm/arm/vgic/vgic-v2.c
4746 +@@ -484,10 +484,17 @@ void vgic_v2_load(struct kvm_vcpu *vcpu)
4747 + kvm_vgic_global_state.vctrl_base + GICH_APR);
4748 + }
4749 +
4750 +-void vgic_v2_put(struct kvm_vcpu *vcpu)
4751 ++void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu)
4752 + {
4753 + struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
4754 +
4755 + cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR);
4756 ++}
4757 ++
4758 ++void vgic_v2_put(struct kvm_vcpu *vcpu)
4759 ++{
4760 ++ struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
4761 ++
4762 ++ vgic_v2_vmcr_sync(vcpu);
4763 + cpu_if->vgic_apr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_APR);
4764 + }
4765 +diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
4766 +index c2c9ce009f63..0c653a1e5215 100644
4767 +--- a/virt/kvm/arm/vgic/vgic-v3.c
4768 ++++ b/virt/kvm/arm/vgic/vgic-v3.c
4769 +@@ -662,12 +662,17 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
4770 + __vgic_v3_activate_traps(vcpu);
4771 + }
4772 +
4773 +-void vgic_v3_put(struct kvm_vcpu *vcpu)
4774 ++void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
4775 + {
4776 + struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
4777 +
4778 + if (likely(cpu_if->vgic_sre))
4779 + cpu_if->vgic_vmcr = kvm_call_hyp_ret(__vgic_v3_read_vmcr);
4780 ++}
4781 ++
4782 ++void vgic_v3_put(struct kvm_vcpu *vcpu)
4783 ++{
4784 ++ vgic_v3_vmcr_sync(vcpu);
4785 +
4786 + kvm_call_hyp(__vgic_v3_save_aprs, vcpu);
4787 +
4788 +diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
4789 +index 04786c8ec77e..13d4b38a94ec 100644
4790 +--- a/virt/kvm/arm/vgic/vgic.c
4791 ++++ b/virt/kvm/arm/vgic/vgic.c
4792 +@@ -919,6 +919,17 @@ void kvm_vgic_put(struct kvm_vcpu *vcpu)
4793 + vgic_v3_put(vcpu);
4794 + }
4795 +
4796 ++void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu)
4797 ++{
4798 ++ if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
4799 ++ return;
4800 ++
4801 ++ if (kvm_vgic_global_state.type == VGIC_V2)
4802 ++ vgic_v2_vmcr_sync(vcpu);
4803 ++ else
4804 ++ vgic_v3_vmcr_sync(vcpu);
4805 ++}
4806 ++
4807 + int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
4808 + {
4809 + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
4810 +diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
4811 +index 57205beaa981..11adbdac1d56 100644
4812 +--- a/virt/kvm/arm/vgic/vgic.h
4813 ++++ b/virt/kvm/arm/vgic/vgic.h
4814 +@@ -193,6 +193,7 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
4815 + void vgic_v2_init_lrs(void);
4816 + void vgic_v2_load(struct kvm_vcpu *vcpu);
4817 + void vgic_v2_put(struct kvm_vcpu *vcpu);
4818 ++void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu);
4819 +
4820 + void vgic_v2_save_state(struct kvm_vcpu *vcpu);
4821 + void vgic_v2_restore_state(struct kvm_vcpu *vcpu);
4822 +@@ -223,6 +224,7 @@ bool vgic_v3_check_base(struct kvm *kvm);
4823 +
4824 + void vgic_v3_load(struct kvm_vcpu *vcpu);
4825 + void vgic_v3_put(struct kvm_vcpu *vcpu);
4826 ++void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu);
4827 +
4828 + bool vgic_has_its(struct kvm *kvm);
4829 + int kvm_vgic_register_its_device(void);
4830 +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
4831 +index e629766f0ec8..7e0d18ac62bf 100644
4832 +--- a/virt/kvm/kvm_main.c
4833 ++++ b/virt/kvm/kvm_main.c
4834 +@@ -2475,6 +2475,29 @@ static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
4835 + #endif
4836 + }
4837 +
4838 ++/*
4839 ++ * Unlike kvm_arch_vcpu_runnable, this function is called outside
4840 ++ * a vcpu_load/vcpu_put pair. However, for most architectures
4841 ++ * kvm_arch_vcpu_runnable does not require vcpu_load.
4842 ++ */
4843 ++bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
4844 ++{
4845 ++ return kvm_arch_vcpu_runnable(vcpu);
4846 ++}
4847 ++
4848 ++static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
4849 ++{
4850 ++ if (kvm_arch_dy_runnable(vcpu))
4851 ++ return true;
4852 ++
4853 ++#ifdef CONFIG_KVM_ASYNC_PF
4854 ++ if (!list_empty_careful(&vcpu->async_pf.done))
4855 ++ return true;
4856 ++#endif
4857 ++
4858 ++ return false;
4859 ++}
4860 ++
4861 + void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
4862 + {
4863 + struct kvm *kvm = me->kvm;
4864 +@@ -2504,7 +2527,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
4865 + continue;
4866 + if (vcpu == me)
4867 + continue;
4868 +- if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
4869 ++ if (swait_active(&vcpu->wq) && !vcpu_dy_runnable(vcpu))
4870 + continue;
4871 + if (yield_to_kernel_mode && !kvm_arch_vcpu_in_kernel(vcpu))
4872 + continue;