Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.1 commit in: /
Date: Tue, 25 Jun 2019 10:54:23
Message-Id: 1561460043.43bec38f38323b1bb6703d492e753525586dd530.mpagano@gentoo
1 commit: 43bec38f38323b1bb6703d492e753525586dd530
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Tue Jun 25 10:54:03 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Jun 25 10:54:03 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=43bec38f
7
8 Linux patch 5.1.15
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1014_linux-5.1.15.patch | 4242 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 4246 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 3443ce1..db80d60 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -99,6 +99,10 @@ Patch: 1013_linux-5.1.14.patch
21 From: https://www.kernel.org
22 Desc: Linux 5.1.14
23
24 +Patch: 1014_linux-5.1.15.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 5.1.15
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1014_linux-5.1.15.patch b/1014_linux-5.1.15.patch
33 new file mode 100644
34 index 0000000..3ba035b
35 --- /dev/null
36 +++ b/1014_linux-5.1.15.patch
37 @@ -0,0 +1,4242 @@
38 +diff --git a/Makefile b/Makefile
39 +index c4b1a345d3f0..d7b3c8e3ff3e 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 1
46 +-SUBLEVEL = 14
47 ++SUBLEVEL = 15
48 + EXTRAVERSION =
49 + NAME = Shy Crocodile
50 +
51 +diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts
52 +index 7425bb0f2d1b..6219b372e9c1 100644
53 +--- a/arch/arc/boot/dts/hsdk.dts
54 ++++ b/arch/arc/boot/dts/hsdk.dts
55 +@@ -187,6 +187,7 @@
56 + interrupt-names = "macirq";
57 + phy-mode = "rgmii";
58 + snps,pbl = <32>;
59 ++ snps,multicast-filter-bins = <256>;
60 + clocks = <&gmacclk>;
61 + clock-names = "stmmaceth";
62 + phy-handle = <&phy0>;
63 +@@ -195,6 +196,9 @@
64 + mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */
65 + dma-coherent;
66 +
67 ++ tx-fifo-depth = <4096>;
68 ++ rx-fifo-depth = <4096>;
69 ++
70 + mdio {
71 + #address-cells = <1>;
72 + #size-cells = <0>;
73 +diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h
74 +index d819de1c5d10..3ea4112c8302 100644
75 +--- a/arch/arc/include/asm/cmpxchg.h
76 ++++ b/arch/arc/include/asm/cmpxchg.h
77 +@@ -92,8 +92,11 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
78 +
79 + #endif /* CONFIG_ARC_HAS_LLSC */
80 +
81 +-#define cmpxchg(ptr, o, n) ((typeof(*(ptr)))__cmpxchg((ptr), \
82 +- (unsigned long)(o), (unsigned long)(n)))
83 ++#define cmpxchg(ptr, o, n) ({ \
84 ++ (typeof(*(ptr)))__cmpxchg((ptr), \
85 ++ (unsigned long)(o), \
86 ++ (unsigned long)(n)); \
87 ++})
88 +
89 + /*
90 + * atomic_cmpxchg is same as cmpxchg
91 +@@ -198,8 +201,11 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
92 + return __xchg_bad_pointer();
93 + }
94 +
95 +-#define xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \
96 +- sizeof(*(ptr))))
97 ++#define xchg(ptr, with) ({ \
98 ++ (typeof(*(ptr)))__xchg((unsigned long)(with), \
99 ++ (ptr), \
100 ++ sizeof(*(ptr))); \
101 ++})
102 +
103 + #endif /* CONFIG_ARC_PLAT_EZNPS */
104 +
105 +diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
106 +index 4097764fea23..fa18c00b0cfd 100644
107 +--- a/arch/arc/mm/tlb.c
108 ++++ b/arch/arc/mm/tlb.c
109 +@@ -911,9 +911,11 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
110 + struct pt_regs *regs)
111 + {
112 + struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
113 +- unsigned int pd0[mmu->ways];
114 + unsigned long flags;
115 +- int set;
116 ++ int set, n_ways = mmu->ways;
117 ++
118 ++ n_ways = min(n_ways, 4);
119 ++ BUG_ON(mmu->ways > 4);
120 +
121 + local_irq_save(flags);
122 +
123 +@@ -921,9 +923,10 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
124 + for (set = 0; set < mmu->sets; set++) {
125 +
126 + int is_valid, way;
127 ++ unsigned int pd0[4];
128 +
129 + /* read out all the ways of current set */
130 +- for (way = 0, is_valid = 0; way < mmu->ways; way++) {
131 ++ for (way = 0, is_valid = 0; way < n_ways; way++) {
132 + write_aux_reg(ARC_REG_TLBINDEX,
133 + SET_WAY_TO_IDX(mmu, set, way));
134 + write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead);
135 +@@ -937,14 +940,14 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
136 + continue;
137 +
138 + /* Scan the set for duplicate ways: needs a nested loop */
139 +- for (way = 0; way < mmu->ways - 1; way++) {
140 ++ for (way = 0; way < n_ways - 1; way++) {
141 +
142 + int n;
143 +
144 + if (!pd0[way])
145 + continue;
146 +
147 +- for (n = way + 1; n < mmu->ways; n++) {
148 ++ for (n = way + 1; n < n_ways; n++) {
149 + if (pd0[way] != pd0[n])
150 + continue;
151 +
152 +diff --git a/arch/arm/boot/dts/am57xx-idk-common.dtsi b/arch/arm/boot/dts/am57xx-idk-common.dtsi
153 +index f7bd26458915..42e433da79ec 100644
154 +--- a/arch/arm/boot/dts/am57xx-idk-common.dtsi
155 ++++ b/arch/arm/boot/dts/am57xx-idk-common.dtsi
156 +@@ -420,6 +420,7 @@
157 + vqmmc-supply = <&ldo1_reg>;
158 + bus-width = <4>;
159 + cd-gpios = <&gpio6 27 GPIO_ACTIVE_LOW>; /* gpio 219 */
160 ++ no-1-8-v;
161 + };
162 +
163 + &mmc2 {
164 +diff --git a/arch/arm/boot/dts/dra76x-mmc-iodelay.dtsi b/arch/arm/boot/dts/dra76x-mmc-iodelay.dtsi
165 +index baba7b00eca7..fdca48186916 100644
166 +--- a/arch/arm/boot/dts/dra76x-mmc-iodelay.dtsi
167 ++++ b/arch/arm/boot/dts/dra76x-mmc-iodelay.dtsi
168 +@@ -22,7 +22,7 @@
169 + *
170 + * Datamanual Revisions:
171 + *
172 +- * DRA76x Silicon Revision 1.0: SPRS993A, Revised July 2017
173 ++ * DRA76x Silicon Revision 1.0: SPRS993E, Revised December 2018
174 + *
175 + */
176 +
177 +@@ -169,25 +169,25 @@
178 + /* Corresponds to MMC2_HS200_MANUAL1 in datamanual */
179 + mmc2_iodelay_hs200_conf: mmc2_iodelay_hs200_conf {
180 + pinctrl-pin-array = <
181 +- 0x190 A_DELAY_PS(384) G_DELAY_PS(0) /* CFG_GPMC_A19_OEN */
182 +- 0x194 A_DELAY_PS(0) G_DELAY_PS(174) /* CFG_GPMC_A19_OUT */
183 +- 0x1a8 A_DELAY_PS(410) G_DELAY_PS(0) /* CFG_GPMC_A20_OEN */
184 +- 0x1ac A_DELAY_PS(85) G_DELAY_PS(0) /* CFG_GPMC_A20_OUT */
185 +- 0x1b4 A_DELAY_PS(468) G_DELAY_PS(0) /* CFG_GPMC_A21_OEN */
186 +- 0x1b8 A_DELAY_PS(139) G_DELAY_PS(0) /* CFG_GPMC_A21_OUT */
187 +- 0x1c0 A_DELAY_PS(676) G_DELAY_PS(0) /* CFG_GPMC_A22_OEN */
188 +- 0x1c4 A_DELAY_PS(69) G_DELAY_PS(0) /* CFG_GPMC_A22_OUT */
189 +- 0x1d0 A_DELAY_PS(1062) G_DELAY_PS(154) /* CFG_GPMC_A23_OUT */
190 +- 0x1d8 A_DELAY_PS(640) G_DELAY_PS(0) /* CFG_GPMC_A24_OEN */
191 +- 0x1dc A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A24_OUT */
192 +- 0x1e4 A_DELAY_PS(356) G_DELAY_PS(0) /* CFG_GPMC_A25_OEN */
193 +- 0x1e8 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A25_OUT */
194 +- 0x1f0 A_DELAY_PS(579) G_DELAY_PS(0) /* CFG_GPMC_A26_OEN */
195 +- 0x1f4 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A26_OUT */
196 +- 0x1fc A_DELAY_PS(435) G_DELAY_PS(0) /* CFG_GPMC_A27_OEN */
197 +- 0x200 A_DELAY_PS(36) G_DELAY_PS(0) /* CFG_GPMC_A27_OUT */
198 +- 0x364 A_DELAY_PS(759) G_DELAY_PS(0) /* CFG_GPMC_CS1_OEN */
199 +- 0x368 A_DELAY_PS(72) G_DELAY_PS(0) /* CFG_GPMC_CS1_OUT */
200 ++ 0x190 A_DELAY_PS(384) G_DELAY_PS(0) /* CFG_GPMC_A19_OEN */
201 ++ 0x194 A_DELAY_PS(350) G_DELAY_PS(174) /* CFG_GPMC_A19_OUT */
202 ++ 0x1a8 A_DELAY_PS(410) G_DELAY_PS(0) /* CFG_GPMC_A20_OEN */
203 ++ 0x1ac A_DELAY_PS(335) G_DELAY_PS(0) /* CFG_GPMC_A20_OUT */
204 ++ 0x1b4 A_DELAY_PS(468) G_DELAY_PS(0) /* CFG_GPMC_A21_OEN */
205 ++ 0x1b8 A_DELAY_PS(339) G_DELAY_PS(0) /* CFG_GPMC_A21_OUT */
206 ++ 0x1c0 A_DELAY_PS(676) G_DELAY_PS(0) /* CFG_GPMC_A22_OEN */
207 ++ 0x1c4 A_DELAY_PS(219) G_DELAY_PS(0) /* CFG_GPMC_A22_OUT */
208 ++ 0x1d0 A_DELAY_PS(1062) G_DELAY_PS(154) /* CFG_GPMC_A23_OUT */
209 ++ 0x1d8 A_DELAY_PS(640) G_DELAY_PS(0) /* CFG_GPMC_A24_OEN */
210 ++ 0x1dc A_DELAY_PS(150) G_DELAY_PS(0) /* CFG_GPMC_A24_OUT */
211 ++ 0x1e4 A_DELAY_PS(356) G_DELAY_PS(0) /* CFG_GPMC_A25_OEN */
212 ++ 0x1e8 A_DELAY_PS(150) G_DELAY_PS(0) /* CFG_GPMC_A25_OUT */
213 ++ 0x1f0 A_DELAY_PS(579) G_DELAY_PS(0) /* CFG_GPMC_A26_OEN */
214 ++ 0x1f4 A_DELAY_PS(200) G_DELAY_PS(0) /* CFG_GPMC_A26_OUT */
215 ++ 0x1fc A_DELAY_PS(435) G_DELAY_PS(0) /* CFG_GPMC_A27_OEN */
216 ++ 0x200 A_DELAY_PS(236) G_DELAY_PS(0) /* CFG_GPMC_A27_OUT */
217 ++ 0x364 A_DELAY_PS(759) G_DELAY_PS(0) /* CFG_GPMC_CS1_OEN */
218 ++ 0x368 A_DELAY_PS(372) G_DELAY_PS(0) /* CFG_GPMC_CS1_OUT */
219 + >;
220 + };
221 +
222 +diff --git a/arch/arm/configs/mvebu_v7_defconfig b/arch/arm/configs/mvebu_v7_defconfig
223 +index 55140219ab11..001460ee519e 100644
224 +--- a/arch/arm/configs/mvebu_v7_defconfig
225 ++++ b/arch/arm/configs/mvebu_v7_defconfig
226 +@@ -131,6 +131,7 @@ CONFIG_MV_XOR=y
227 + # CONFIG_IOMMU_SUPPORT is not set
228 + CONFIG_MEMORY=y
229 + CONFIG_PWM=y
230 ++CONFIG_PHY_MVEBU_A38X_COMPHY=y
231 + CONFIG_EXT4_FS=y
232 + CONFIG_ISO9660_FS=y
233 + CONFIG_JOLIET=y
234 +diff --git a/arch/arm/mach-imx/cpuidle-imx6sx.c b/arch/arm/mach-imx/cpuidle-imx6sx.c
235 +index fd0053e47a15..3708a71f30e6 100644
236 +--- a/arch/arm/mach-imx/cpuidle-imx6sx.c
237 ++++ b/arch/arm/mach-imx/cpuidle-imx6sx.c
238 +@@ -15,6 +15,7 @@
239 +
240 + #include "common.h"
241 + #include "cpuidle.h"
242 ++#include "hardware.h"
243 +
244 + static int imx6sx_idle_finish(unsigned long val)
245 + {
246 +@@ -110,7 +111,7 @@ int __init imx6sx_cpuidle_init(void)
247 + * except for power up sw2iso which need to be
248 + * larger than LDO ramp up time.
249 + */
250 +- imx_gpc_set_arm_power_up_timing(0xf, 1);
251 ++ imx_gpc_set_arm_power_up_timing(cpu_is_imx6sx() ? 0xf : 0x2, 1);
252 + imx_gpc_set_arm_power_down_timing(1, 1);
253 +
254 + return cpuidle_register(&imx6sx_cpuidle_driver, NULL);
255 +diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
256 +index b025304bde46..8fbd583b18e1 100644
257 +--- a/arch/arm64/Makefile
258 ++++ b/arch/arm64/Makefile
259 +@@ -51,6 +51,7 @@ endif
260 +
261 + KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) $(brokengasinst)
262 + KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
263 ++KBUILD_CFLAGS += -Wno-psabi
264 + KBUILD_AFLAGS += $(lseinstr) $(brokengasinst)
265 +
266 + KBUILD_CFLAGS += $(call cc-option,-mabi=lp64)
267 +diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
268 +index d78623acb649..438759e7e8a7 100644
269 +--- a/arch/arm64/include/uapi/asm/ptrace.h
270 ++++ b/arch/arm64/include/uapi/asm/ptrace.h
271 +@@ -65,8 +65,6 @@
272 +
273 + #ifndef __ASSEMBLY__
274 +
275 +-#include <linux/prctl.h>
276 +-
277 + /*
278 + * User structures for general purpose, floating point and debug registers.
279 + */
280 +@@ -113,10 +111,10 @@ struct user_sve_header {
281 +
282 + /*
283 + * Common SVE_PT_* flags:
284 +- * These must be kept in sync with prctl interface in <linux/ptrace.h>
285 ++ * These must be kept in sync with prctl interface in <linux/prctl.h>
286 + */
287 +-#define SVE_PT_VL_INHERIT (PR_SVE_VL_INHERIT >> 16)
288 +-#define SVE_PT_VL_ONEXEC (PR_SVE_SET_VL_ONEXEC >> 16)
289 ++#define SVE_PT_VL_INHERIT ((1 << 17) /* PR_SVE_VL_INHERIT */ >> 16)
290 ++#define SVE_PT_VL_ONEXEC ((1 << 18) /* PR_SVE_SET_VL_ONEXEC */ >> 16)
291 +
292 +
293 + /*
294 +diff --git a/arch/arm64/kernel/ssbd.c b/arch/arm64/kernel/ssbd.c
295 +index 885f13e58708..52cfc6148355 100644
296 +--- a/arch/arm64/kernel/ssbd.c
297 ++++ b/arch/arm64/kernel/ssbd.c
298 +@@ -5,6 +5,7 @@
299 +
300 + #include <linux/compat.h>
301 + #include <linux/errno.h>
302 ++#include <linux/prctl.h>
303 + #include <linux/sched.h>
304 + #include <linux/sched/task_stack.h>
305 + #include <linux/thread_info.h>
306 +diff --git a/arch/mips/include/asm/ginvt.h b/arch/mips/include/asm/ginvt.h
307 +index 49c6dbe37338..6eb7c2b94dc7 100644
308 +--- a/arch/mips/include/asm/ginvt.h
309 ++++ b/arch/mips/include/asm/ginvt.h
310 +@@ -19,7 +19,7 @@ _ASM_MACRO_1R1I(ginvt, rs, type,
311 + # define _ASM_SET_GINV
312 + #endif
313 +
314 +-static inline void ginvt(unsigned long addr, enum ginvt_type type)
315 ++static __always_inline void ginvt(unsigned long addr, enum ginvt_type type)
316 + {
317 + asm volatile(
318 + ".set push\n"
319 +diff --git a/arch/mips/kernel/uprobes.c b/arch/mips/kernel/uprobes.c
320 +index 4aaff3b3175c..6dbe4eab0a0e 100644
321 +--- a/arch/mips/kernel/uprobes.c
322 ++++ b/arch/mips/kernel/uprobes.c
323 +@@ -112,9 +112,6 @@ int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs)
324 + */
325 + aup->resume_epc = regs->cp0_epc + 4;
326 + if (insn_has_delay_slot((union mips_instruction) aup->insn[0])) {
327 +- unsigned long epc;
328 +-
329 +- epc = regs->cp0_epc;
330 + __compute_return_epc_for_insn(regs,
331 + (union mips_instruction) aup->insn[0]);
332 + aup->resume_epc = regs->cp0_epc;
333 +diff --git a/arch/parisc/math-emu/cnv_float.h b/arch/parisc/math-emu/cnv_float.h
334 +index 933423fa5144..b0db61188a61 100644
335 +--- a/arch/parisc/math-emu/cnv_float.h
336 ++++ b/arch/parisc/math-emu/cnv_float.h
337 +@@ -60,19 +60,19 @@
338 + ((exponent < (SGL_P - 1)) ? \
339 + (Sall(sgl_value) << (SGL_EXP_LENGTH + 1 + exponent)) : FALSE)
340 +
341 +-#define Int_isinexact_to_sgl(int_value) (int_value << 33 - SGL_EXP_LENGTH)
342 ++#define Int_isinexact_to_sgl(int_value) ((int_value << 33 - SGL_EXP_LENGTH) != 0)
343 +
344 + #define Sgl_roundnearest_from_int(int_value,sgl_value) \
345 + if (int_value & 1<<(SGL_EXP_LENGTH - 2)) /* round bit */ \
346 +- if ((int_value << 34 - SGL_EXP_LENGTH) || Slow(sgl_value)) \
347 ++ if (((int_value << 34 - SGL_EXP_LENGTH) != 0) || Slow(sgl_value)) \
348 + Sall(sgl_value)++
349 +
350 + #define Dint_isinexact_to_sgl(dint_valueA,dint_valueB) \
351 +- ((Dintp1(dint_valueA) << 33 - SGL_EXP_LENGTH) || Dintp2(dint_valueB))
352 ++ (((Dintp1(dint_valueA) << 33 - SGL_EXP_LENGTH) != 0) || Dintp2(dint_valueB))
353 +
354 + #define Sgl_roundnearest_from_dint(dint_valueA,dint_valueB,sgl_value) \
355 + if (Dintp1(dint_valueA) & 1<<(SGL_EXP_LENGTH - 2)) \
356 +- if ((Dintp1(dint_valueA) << 34 - SGL_EXP_LENGTH) || \
357 ++ if (((Dintp1(dint_valueA) << 34 - SGL_EXP_LENGTH) != 0) || \
358 + Dintp2(dint_valueB) || Slow(sgl_value)) Sall(sgl_value)++
359 +
360 + #define Dint_isinexact_to_dbl(dint_value) \
361 +diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
362 +index 23f7ed796f38..49d65cd08ee0 100644
363 +--- a/arch/powerpc/include/asm/ppc-opcode.h
364 ++++ b/arch/powerpc/include/asm/ppc-opcode.h
365 +@@ -342,6 +342,7 @@
366 + #define PPC_INST_MADDLD 0x10000033
367 + #define PPC_INST_DIVWU 0x7c000396
368 + #define PPC_INST_DIVD 0x7c0003d2
369 ++#define PPC_INST_DIVDU 0x7c000392
370 + #define PPC_INST_RLWINM 0x54000000
371 + #define PPC_INST_RLWINM_DOT 0x54000001
372 + #define PPC_INST_RLWIMI 0x50000000
373 +diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
374 +index f720c5cc0b5e..8751ae2e2d04 100644
375 +--- a/arch/powerpc/mm/mmu_context_book3s64.c
376 ++++ b/arch/powerpc/mm/mmu_context_book3s64.c
377 +@@ -55,14 +55,48 @@ EXPORT_SYMBOL_GPL(hash__alloc_context_id);
378 +
379 + void slb_setup_new_exec(void);
380 +
381 ++static int realloc_context_ids(mm_context_t *ctx)
382 ++{
383 ++ int i, id;
384 ++
385 ++ /*
386 ++ * id 0 (aka. ctx->id) is special, we always allocate a new one, even if
387 ++ * there wasn't one allocated previously (which happens in the exec
388 ++ * case where ctx is newly allocated).
389 ++ *
390 ++ * We have to be a bit careful here. We must keep the existing ids in
391 ++ * the array, so that we can test if they're non-zero to decide if we
392 ++ * need to allocate a new one. However in case of error we must free the
393 ++ * ids we've allocated but *not* any of the existing ones (or risk a
394 ++ * UAF). That's why we decrement i at the start of the error handling
395 ++ * loop, to skip the id that we just tested but couldn't reallocate.
396 ++ */
397 ++ for (i = 0; i < ARRAY_SIZE(ctx->extended_id); i++) {
398 ++ if (i == 0 || ctx->extended_id[i]) {
399 ++ id = hash__alloc_context_id();
400 ++ if (id < 0)
401 ++ goto error;
402 ++
403 ++ ctx->extended_id[i] = id;
404 ++ }
405 ++ }
406 ++
407 ++ /* The caller expects us to return id */
408 ++ return ctx->id;
409 ++
410 ++error:
411 ++ for (i--; i >= 0; i--) {
412 ++ if (ctx->extended_id[i])
413 ++ ida_free(&mmu_context_ida, ctx->extended_id[i]);
414 ++ }
415 ++
416 ++ return id;
417 ++}
418 ++
419 + static int hash__init_new_context(struct mm_struct *mm)
420 + {
421 + int index;
422 +
423 +- index = hash__alloc_context_id();
424 +- if (index < 0)
425 +- return index;
426 +-
427 + /*
428 + * The old code would re-promote on fork, we don't do that when using
429 + * slices as it could cause problem promoting slices that have been
430 +@@ -80,6 +114,10 @@ static int hash__init_new_context(struct mm_struct *mm)
431 + if (mm->context.id == 0)
432 + slice_init_new_context_exec(mm);
433 +
434 ++ index = realloc_context_ids(&mm->context);
435 ++ if (index < 0)
436 ++ return index;
437 ++
438 + subpage_prot_init_new_context(mm);
439 +
440 + pkey_mm_init(mm);
441 +diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
442 +index dcac37745b05..1e932898d430 100644
443 +--- a/arch/powerpc/net/bpf_jit.h
444 ++++ b/arch/powerpc/net/bpf_jit.h
445 +@@ -116,7 +116,7 @@
446 + ___PPC_RA(a) | IMM_L(i))
447 + #define PPC_DIVWU(d, a, b) EMIT(PPC_INST_DIVWU | ___PPC_RT(d) | \
448 + ___PPC_RA(a) | ___PPC_RB(b))
449 +-#define PPC_DIVD(d, a, b) EMIT(PPC_INST_DIVD | ___PPC_RT(d) | \
450 ++#define PPC_DIVDU(d, a, b) EMIT(PPC_INST_DIVDU | ___PPC_RT(d) | \
451 + ___PPC_RA(a) | ___PPC_RB(b))
452 + #define PPC_AND(d, a, b) EMIT(PPC_INST_AND | ___PPC_RA(d) | \
453 + ___PPC_RS(a) | ___PPC_RB(b))
454 +diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
455 +index 21a1dcd4b156..e3fedeffe40f 100644
456 +--- a/arch/powerpc/net/bpf_jit_comp64.c
457 ++++ b/arch/powerpc/net/bpf_jit_comp64.c
458 +@@ -399,12 +399,12 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
459 + case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
460 + case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
461 + if (BPF_OP(code) == BPF_MOD) {
462 +- PPC_DIVD(b2p[TMP_REG_1], dst_reg, src_reg);
463 ++ PPC_DIVDU(b2p[TMP_REG_1], dst_reg, src_reg);
464 + PPC_MULD(b2p[TMP_REG_1], src_reg,
465 + b2p[TMP_REG_1]);
466 + PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
467 + } else
468 +- PPC_DIVD(dst_reg, dst_reg, src_reg);
469 ++ PPC_DIVDU(dst_reg, dst_reg, src_reg);
470 + break;
471 + case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
472 + case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
473 +@@ -432,7 +432,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
474 + break;
475 + case BPF_ALU64:
476 + if (BPF_OP(code) == BPF_MOD) {
477 +- PPC_DIVD(b2p[TMP_REG_2], dst_reg,
478 ++ PPC_DIVDU(b2p[TMP_REG_2], dst_reg,
479 + b2p[TMP_REG_1]);
480 + PPC_MULD(b2p[TMP_REG_1],
481 + b2p[TMP_REG_1],
482 +@@ -440,7 +440,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
483 + PPC_SUB(dst_reg, dst_reg,
484 + b2p[TMP_REG_1]);
485 + } else
486 +- PPC_DIVD(dst_reg, dst_reg,
487 ++ PPC_DIVDU(dst_reg, dst_reg,
488 + b2p[TMP_REG_1]);
489 + break;
490 + }
491 +diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
492 +index 88401d5125bc..523dbfbac03d 100644
493 +--- a/arch/riscv/mm/fault.c
494 ++++ b/arch/riscv/mm/fault.c
495 +@@ -29,6 +29,7 @@
496 +
497 + #include <asm/pgalloc.h>
498 + #include <asm/ptrace.h>
499 ++#include <asm/tlbflush.h>
500 +
501 + /*
502 + * This routine handles page faults. It determines the address and the
503 +@@ -281,6 +282,18 @@ vmalloc_fault:
504 + pte_k = pte_offset_kernel(pmd_k, addr);
505 + if (!pte_present(*pte_k))
506 + goto no_context;
507 ++
508 ++ /*
509 ++ * The kernel assumes that TLBs don't cache invalid
510 ++ * entries, but in RISC-V, SFENCE.VMA specifies an
511 ++ * ordering constraint, not a cache flush; it is
512 ++ * necessary even after writing invalid entries.
513 ++ * Relying on flush_tlb_fix_spurious_fault would
514 ++ * suffice, but the extra traps reduce
515 ++ * performance. So, eagerly SFENCE.VMA.
516 ++ */
517 ++ local_flush_tlb_page(addr);
518 ++
519 + return;
520 + }
521 + }
522 +diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
523 +index 9a26b442f820..8e645ddac58e 100644
524 +--- a/arch/sparc/kernel/mdesc.c
525 ++++ b/arch/sparc/kernel/mdesc.c
526 +@@ -356,6 +356,8 @@ static int get_vdev_port_node_info(struct mdesc_handle *md, u64 node,
527 +
528 + node_info->vdev_port.id = *idp;
529 + node_info->vdev_port.name = kstrdup_const(name, GFP_KERNEL);
530 ++ if (!node_info->vdev_port.name)
531 ++ return -1;
532 + node_info->vdev_port.parent_cfg_hdl = *parent_cfg_hdlp;
533 +
534 + return 0;
535 +diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
536 +index 6de7c684c29f..a58ae9c42803 100644
537 +--- a/arch/sparc/kernel/perf_event.c
538 ++++ b/arch/sparc/kernel/perf_event.c
539 +@@ -891,6 +891,10 @@ static int sparc_perf_event_set_period(struct perf_event *event,
540 + s64 period = hwc->sample_period;
541 + int ret = 0;
542 +
543 ++ /* The period may have been changed by PERF_EVENT_IOC_PERIOD */
544 ++ if (unlikely(period != hwc->last_period))
545 ++ left = period - (hwc->last_period - left);
546 ++
547 + if (unlikely(left <= -period)) {
548 + left = period;
549 + local64_set(&hwc->period_left, left);
550 +diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
551 +index 98c7d12b945c..ed7ddee1ae69 100644
552 +--- a/arch/x86/entry/vdso/vclock_gettime.c
553 ++++ b/arch/x86/entry/vdso/vclock_gettime.c
554 +@@ -128,13 +128,24 @@ notrace static inline u64 vgetcyc(int mode)
555 + {
556 + if (mode == VCLOCK_TSC)
557 + return (u64)rdtsc_ordered();
558 ++
559 ++ /*
560 ++ * For any memory-mapped vclock type, we need to make sure that gcc
561 ++ * doesn't cleverly hoist a load before the mode check. Otherwise we
562 ++ * might end up touching the memory-mapped page even if the vclock in
563 ++ * question isn't enabled, which will segfault. Hence the barriers.
564 ++ */
565 + #ifdef CONFIG_PARAVIRT_CLOCK
566 +- else if (mode == VCLOCK_PVCLOCK)
567 ++ if (mode == VCLOCK_PVCLOCK) {
568 ++ barrier();
569 + return vread_pvclock();
570 ++ }
571 + #endif
572 + #ifdef CONFIG_HYPERV_TSCPAGE
573 +- else if (mode == VCLOCK_HVCLOCK)
574 ++ if (mode == VCLOCK_HVCLOCK) {
575 ++ barrier();
576 + return vread_hvclock();
577 ++ }
578 + #endif
579 + return U64_MAX;
580 + }
581 +diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
582 +index 85212a32b54d..c51b56e29948 100644
583 +--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
584 ++++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
585 +@@ -2556,7 +2556,7 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
586 + if (closid_allocated(i) && i != closid) {
587 + mode = rdtgroup_mode_by_closid(i);
588 + if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
589 +- break;
590 ++ continue;
591 + /*
592 + * If CDP is active include peer
593 + * domain's usage to ensure there
594 +diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
595 +index d9c7b45d231f..85438a624930 100644
596 +--- a/arch/x86/kvm/mmu.c
597 ++++ b/arch/x86/kvm/mmu.c
598 +@@ -5591,14 +5591,18 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
599 + struct page *page;
600 + int i;
601 +
602 +- if (tdp_enabled)
603 +- return 0;
604 +-
605 + /*
606 +- * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
607 +- * Therefore we need to allocate shadow page tables in the first
608 +- * 4GB of memory, which happens to fit the DMA32 zone.
609 ++ * When using PAE paging, the four PDPTEs are treated as 'root' pages,
610 ++ * while the PDP table is a per-vCPU construct that's allocated at MMU
611 ++ * creation. When emulating 32-bit mode, cr3 is only 32 bits even on
612 ++ * x86_64. Therefore we need to allocate the PDP table in the first
613 ++ * 4GB of memory, which happens to fit the DMA32 zone. Except for
614 ++ * SVM's 32-bit NPT support, TDP paging doesn't use PAE paging and can
615 ++ * skip allocating the PDP table.
616 + */
617 ++ if (tdp_enabled && kvm_x86_ops->get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
618 ++ return 0;
619 ++
620 + page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32);
621 + if (!page)
622 + return -ENOMEM;
623 +diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
624 +index 4ec6fbb696bf..a5139f1d9220 100644
625 +--- a/arch/xtensa/kernel/setup.c
626 ++++ b/arch/xtensa/kernel/setup.c
627 +@@ -310,7 +310,8 @@ extern char _SecondaryResetVector_text_start;
628 + extern char _SecondaryResetVector_text_end;
629 + #endif
630 +
631 +-static inline int mem_reserve(unsigned long start, unsigned long end)
632 ++static inline int __init_memblock mem_reserve(unsigned long start,
633 ++ unsigned long end)
634 + {
635 + return memblock_reserve(start, end - start);
636 + }
637 +diff --git a/crypto/hmac.c b/crypto/hmac.c
638 +index 4b8c8ee8f15c..c623778b36ba 100644
639 +--- a/crypto/hmac.c
640 ++++ b/crypto/hmac.c
641 +@@ -168,8 +168,10 @@ static int hmac_init_tfm(struct crypto_tfm *tfm)
642 +
643 + parent->descsize = sizeof(struct shash_desc) +
644 + crypto_shash_descsize(hash);
645 +- if (WARN_ON(parent->descsize > HASH_MAX_DESCSIZE))
646 ++ if (WARN_ON(parent->descsize > HASH_MAX_DESCSIZE)) {
647 ++ crypto_free_shash(hash);
648 + return -EINVAL;
649 ++ }
650 +
651 + ctx->hash = hash;
652 + return 0;
653 +diff --git a/drivers/android/binder.c b/drivers/android/binder.c
654 +index 4b9c7ca492e6..cc19d91c1688 100644
655 +--- a/drivers/android/binder.c
656 ++++ b/drivers/android/binder.c
657 +@@ -1950,8 +1950,18 @@ static void binder_free_txn_fixups(struct binder_transaction *t)
658 +
659 + static void binder_free_transaction(struct binder_transaction *t)
660 + {
661 +- if (t->buffer)
662 +- t->buffer->transaction = NULL;
663 ++ struct binder_proc *target_proc = t->to_proc;
664 ++
665 ++ if (target_proc) {
666 ++ binder_inner_proc_lock(target_proc);
667 ++ if (t->buffer)
668 ++ t->buffer->transaction = NULL;
669 ++ binder_inner_proc_unlock(target_proc);
670 ++ }
671 ++ /*
672 ++ * If the transaction has no target_proc, then
673 ++ * t->buffer->transaction has already been cleared.
674 ++ */
675 + binder_free_txn_fixups(t);
676 + kfree(t);
677 + binder_stats_deleted(BINDER_STAT_TRANSACTION);
678 +@@ -3550,10 +3560,12 @@ err_invalid_target_handle:
679 + static void
680 + binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
681 + {
682 ++ binder_inner_proc_lock(proc);
683 + if (buffer->transaction) {
684 + buffer->transaction->buffer = NULL;
685 + buffer->transaction = NULL;
686 + }
687 ++ binder_inner_proc_unlock(proc);
688 + if (buffer->async_transaction && buffer->target_node) {
689 + struct binder_node *buf_node;
690 + struct binder_work *w;
691 +diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
692 +index cd57747286f2..9635897458a0 100644
693 +--- a/drivers/dma-buf/udmabuf.c
694 ++++ b/drivers/dma-buf/udmabuf.c
695 +@@ -77,6 +77,7 @@ static void unmap_udmabuf(struct dma_buf_attachment *at,
696 + struct sg_table *sg,
697 + enum dma_data_direction direction)
698 + {
699 ++ dma_unmap_sg(at->dev, sg->sgl, sg->nents, direction);
700 + sg_free_table(sg);
701 + kfree(sg);
702 + }
703 +diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
704 +index 9ce0a386225b..f49534019d37 100644
705 +--- a/drivers/dma/dma-jz4780.c
706 ++++ b/drivers/dma/dma-jz4780.c
707 +@@ -666,10 +666,11 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
708 + return status;
709 + }
710 +
711 +-static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
712 +- struct jz4780_dma_chan *jzchan)
713 ++static bool jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
714 ++ struct jz4780_dma_chan *jzchan)
715 + {
716 + uint32_t dcs;
717 ++ bool ack = true;
718 +
719 + spin_lock(&jzchan->vchan.lock);
720 +
721 +@@ -692,12 +693,20 @@ static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
722 + if ((dcs & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT)) == 0) {
723 + if (jzchan->desc->type == DMA_CYCLIC) {
724 + vchan_cyclic_callback(&jzchan->desc->vdesc);
725 +- } else {
726 ++
727 ++ jz4780_dma_begin(jzchan);
728 ++ } else if (dcs & JZ_DMA_DCS_TT) {
729 + vchan_cookie_complete(&jzchan->desc->vdesc);
730 + jzchan->desc = NULL;
731 +- }
732 +
733 +- jz4780_dma_begin(jzchan);
734 ++ jz4780_dma_begin(jzchan);
735 ++ } else {
736 ++ /* False positive - continue the transfer */
737 ++ ack = false;
738 ++ jz4780_dma_chn_writel(jzdma, jzchan->id,
739 ++ JZ_DMA_REG_DCS,
740 ++ JZ_DMA_DCS_CTE);
741 ++ }
742 + }
743 + } else {
744 + dev_err(&jzchan->vchan.chan.dev->device,
745 +@@ -705,21 +714,22 @@ static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
746 + }
747 +
748 + spin_unlock(&jzchan->vchan.lock);
749 ++
750 ++ return ack;
751 + }
752 +
753 + static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
754 + {
755 + struct jz4780_dma_dev *jzdma = data;
756 ++ unsigned int nb_channels = jzdma->soc_data->nb_channels;
757 + uint32_t pending, dmac;
758 + int i;
759 +
760 + pending = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DIRQP);
761 +
762 +- for (i = 0; i < jzdma->soc_data->nb_channels; i++) {
763 +- if (!(pending & (1<<i)))
764 +- continue;
765 +-
766 +- jz4780_dma_chan_irq(jzdma, &jzdma->chan[i]);
767 ++ for_each_set_bit(i, (unsigned long *)&pending, nb_channels) {
768 ++ if (jz4780_dma_chan_irq(jzdma, &jzdma->chan[i]))
769 ++ pending &= ~BIT(i);
770 + }
771 +
772 + /* Clear halt and address error status of all channels. */
773 +@@ -728,7 +738,7 @@ static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
774 + jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC, dmac);
775 +
776 + /* Clear interrupt pending status. */
777 +- jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DIRQP, 0);
778 ++ jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DIRQP, pending);
779 +
780 + return IRQ_HANDLED;
781 + }
782 +diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
783 +index b2ac1d2c5b86..a1ce307c502f 100644
784 +--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
785 ++++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
786 +@@ -512,7 +512,8 @@ dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr,
787 + return vchan_tx_prep(&chan->vc, &first->vd, flags);
788 +
789 + err_desc_get:
790 +- axi_desc_put(first);
791 ++ if (first)
792 ++ axi_desc_put(first);
793 + return NULL;
794 + }
795 +
796 +diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c
797 +index 814853842e29..723b11c190b3 100644
798 +--- a/drivers/dma/mediatek/mtk-cqdma.c
799 ++++ b/drivers/dma/mediatek/mtk-cqdma.c
800 +@@ -225,7 +225,7 @@ static int mtk_cqdma_hard_reset(struct mtk_cqdma_pchan *pc)
801 + mtk_dma_set(pc, MTK_CQDMA_RESET, MTK_CQDMA_HARD_RST_BIT);
802 + mtk_dma_clr(pc, MTK_CQDMA_RESET, MTK_CQDMA_HARD_RST_BIT);
803 +
804 +- return mtk_cqdma_poll_engine_done(pc, false);
805 ++ return mtk_cqdma_poll_engine_done(pc, true);
806 + }
807 +
808 + static void mtk_cqdma_start(struct mtk_cqdma_pchan *pc,
809 +@@ -671,7 +671,7 @@ static void mtk_cqdma_free_chan_resources(struct dma_chan *c)
810 + mtk_dma_set(cvc->pc, MTK_CQDMA_FLUSH, MTK_CQDMA_FLUSH_BIT);
811 +
812 + /* wait for the completion of flush operation */
813 +- if (mtk_cqdma_poll_engine_done(cvc->pc, false) < 0)
814 ++ if (mtk_cqdma_poll_engine_done(cvc->pc, true) < 0)
815 + dev_err(cqdma2dev(to_cqdma_dev(c)), "cqdma flush timeout\n");
816 +
817 + /* clear the flush bit and interrupt flag */
818 +diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
819 +index 48431e2da987..01abed5cde49 100644
820 +--- a/drivers/dma/sprd-dma.c
821 ++++ b/drivers/dma/sprd-dma.c
822 +@@ -510,7 +510,9 @@ static void sprd_dma_start(struct sprd_dma_chn *schan)
823 + sprd_dma_set_uid(schan);
824 + sprd_dma_enable_chn(schan);
825 +
826 +- if (schan->dev_id == SPRD_DMA_SOFTWARE_UID)
827 ++ if (schan->dev_id == SPRD_DMA_SOFTWARE_UID &&
828 ++ schan->chn_mode != SPRD_DMA_DST_CHN0 &&
829 ++ schan->chn_mode != SPRD_DMA_DST_CHN1)
830 + sprd_dma_soft_request(schan);
831 + }
832 +
833 +@@ -552,12 +554,17 @@ static irqreturn_t dma_irq_handle(int irq, void *dev_id)
834 + schan = &sdev->channels[i];
835 +
836 + spin_lock(&schan->vc.lock);
837 ++
838 ++ sdesc = schan->cur_desc;
839 ++ if (!sdesc) {
840 ++ spin_unlock(&schan->vc.lock);
841 ++ return IRQ_HANDLED;
842 ++ }
843 ++
844 + int_type = sprd_dma_get_int_type(schan);
845 + req_type = sprd_dma_get_req_type(schan);
846 + sprd_dma_clear_int(schan);
847 +
848 +- sdesc = schan->cur_desc;
849 +-
850 + /* cyclic mode schedule callback */
851 + cyclic = schan->linklist.phy_addr ? true : false;
852 + if (cyclic == true) {
853 +@@ -625,7 +632,7 @@ static enum dma_status sprd_dma_tx_status(struct dma_chan *chan,
854 + else
855 + pos = 0;
856 + } else if (schan->cur_desc && schan->cur_desc->vd.tx.cookie == cookie) {
857 +- struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd);
858 ++ struct sprd_dma_desc *sdesc = schan->cur_desc;
859 +
860 + if (sdesc->dir == DMA_DEV_TO_MEM)
861 + pos = sprd_dma_get_dst_addr(schan);
862 +@@ -771,7 +778,7 @@ static int sprd_dma_fill_desc(struct dma_chan *chan,
863 + temp |= slave_cfg->src_maxburst & SPRD_DMA_FRG_LEN_MASK;
864 + hw->frg_len = temp;
865 +
866 +- hw->blk_len = len & SPRD_DMA_BLK_LEN_MASK;
867 ++ hw->blk_len = slave_cfg->src_maxburst & SPRD_DMA_BLK_LEN_MASK;
868 + hw->trsc_len = len & SPRD_DMA_TRSC_LEN_MASK;
869 +
870 + temp = (dst_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_DEST_TRSF_STEP_OFFSET;
871 +@@ -904,6 +911,12 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
872 + schan->linklist.virt_addr = 0;
873 + }
874 +
875 ++ /* Set channel mode and trigger mode for 2-stage transfer */
876 ++ schan->chn_mode =
877 ++ (flags >> SPRD_DMA_CHN_MODE_SHIFT) & SPRD_DMA_CHN_MODE_MASK;
878 ++ schan->trg_mode =
879 ++ (flags >> SPRD_DMA_TRG_MODE_SHIFT) & SPRD_DMA_TRG_MODE_MASK;
880 ++
881 + sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
882 + if (!sdesc)
883 + return NULL;
884 +@@ -937,12 +950,6 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
885 + }
886 + }
887 +
888 +- /* Set channel mode and trigger mode for 2-stage transfer */
889 +- schan->chn_mode =
890 +- (flags >> SPRD_DMA_CHN_MODE_SHIFT) & SPRD_DMA_CHN_MODE_MASK;
891 +- schan->trg_mode =
892 +- (flags >> SPRD_DMA_TRG_MODE_SHIFT) & SPRD_DMA_TRG_MODE_MASK;
893 +-
894 + ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, src, dst, len,
895 + dir, flags, slave_cfg);
896 + if (ret) {
897 +diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c
898 +index e18a786fc943..cd68002ac097 100644
899 +--- a/drivers/fpga/dfl-afu-dma-region.c
900 ++++ b/drivers/fpga/dfl-afu-dma-region.c
901 +@@ -399,7 +399,7 @@ int afu_dma_map_region(struct dfl_feature_platform_data *pdata,
902 + region->pages[0], 0,
903 + region->length,
904 + DMA_BIDIRECTIONAL);
905 +- if (dma_mapping_error(&pdata->dev->dev, region->iova)) {
906 ++ if (dma_mapping_error(dfl_fpga_pdata_to_parent(pdata), region->iova)) {
907 + dev_err(&pdata->dev->dev, "failed to map for dma\n");
908 + ret = -EFAULT;
909 + goto unpin_pages;
910 +diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
911 +index 2c09e502e721..c25217cde5ca 100644
912 +--- a/drivers/fpga/dfl.c
913 ++++ b/drivers/fpga/dfl.c
914 +@@ -40,6 +40,13 @@ enum dfl_fpga_devt_type {
915 + DFL_FPGA_DEVT_MAX,
916 + };
917 +
918 ++static struct lock_class_key dfl_pdata_keys[DFL_ID_MAX];
919 ++
920 ++static const char *dfl_pdata_key_strings[DFL_ID_MAX] = {
921 ++ "dfl-fme-pdata",
922 ++ "dfl-port-pdata",
923 ++};
924 ++
925 + /**
926 + * dfl_dev_info - dfl feature device information.
927 + * @name: name string of the feature platform device.
928 +@@ -443,11 +450,16 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
929 + struct platform_device *fdev = binfo->feature_dev;
930 + struct dfl_feature_platform_data *pdata;
931 + struct dfl_feature_info *finfo, *p;
932 ++ enum dfl_id_type type;
933 + int ret, index = 0;
934 +
935 + if (!fdev)
936 + return 0;
937 +
938 ++ type = feature_dev_id_type(fdev);
939 ++ if (WARN_ON_ONCE(type >= DFL_ID_MAX))
940 ++ return -EINVAL;
941 ++
942 + /*
943 + * we do not need to care for the memory which is associated with
944 + * the platform device. After calling platform_device_unregister(),
945 +@@ -463,6 +475,8 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
946 + pdata->num = binfo->feature_num;
947 + pdata->dfl_cdev = binfo->cdev;
948 + mutex_init(&pdata->lock);
949 ++ lockdep_set_class_and_name(&pdata->lock, &dfl_pdata_keys[type],
950 ++ dfl_pdata_key_strings[type]);
951 +
952 + /*
953 + * the count should be initialized to 0 to make sure
954 +@@ -497,7 +511,7 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
955 +
956 + ret = platform_device_add(binfo->feature_dev);
957 + if (!ret) {
958 +- if (feature_dev_id_type(binfo->feature_dev) == PORT_ID)
959 ++ if (type == PORT_ID)
960 + dfl_fpga_cdev_add_port_dev(binfo->cdev,
961 + binfo->feature_dev);
962 + else
963 +diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c
964 +index 13851b3d1c56..215d33789c74 100644
965 +--- a/drivers/fpga/stratix10-soc.c
966 ++++ b/drivers/fpga/stratix10-soc.c
967 +@@ -507,12 +507,16 @@ static int __init s10_init(void)
968 + if (!fw_np)
969 + return -ENODEV;
970 +
971 ++ of_node_get(fw_np);
972 + np = of_find_matching_node(fw_np, s10_of_match);
973 +- if (!np)
974 ++ if (!np) {
975 ++ of_node_put(fw_np);
976 + return -ENODEV;
977 ++ }
978 +
979 + of_node_put(np);
980 + ret = of_platform_populate(fw_np, s10_of_match, NULL, NULL);
981 ++ of_node_put(fw_np);
982 + if (ret)
983 + return ret;
984 +
985 +diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
986 +index 0b2b62f8fa3c..a3efa28436ea 100644
987 +--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
988 ++++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
989 +@@ -186,20 +186,20 @@ static void hdlcd_crtc_atomic_disable(struct drm_crtc *crtc,
990 + clk_disable_unprepare(hdlcd->clk);
991 + }
992 +
993 +-static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc,
994 +- struct drm_crtc_state *state)
995 ++static enum drm_mode_status hdlcd_crtc_mode_valid(struct drm_crtc *crtc,
996 ++ const struct drm_display_mode *mode)
997 + {
998 + struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
999 +- struct drm_display_mode *mode = &state->adjusted_mode;
1000 + long rate, clk_rate = mode->clock * 1000;
1001 +
1002 + rate = clk_round_rate(hdlcd->clk, clk_rate);
1003 +- if (rate != clk_rate) {
1004 ++ /* 0.1% seems a close enough tolerance for the TDA19988 on Juno */
1005 ++ if (abs(rate - clk_rate) * 1000 > clk_rate) {
1006 + /* clock required by mode not supported by hardware */
1007 +- return -EINVAL;
1008 ++ return MODE_NOCLOCK;
1009 + }
1010 +
1011 +- return 0;
1012 ++ return MODE_OK;
1013 + }
1014 +
1015 + static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc,
1016 +@@ -220,7 +220,7 @@ static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc,
1017 + }
1018 +
1019 + static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = {
1020 +- .atomic_check = hdlcd_crtc_atomic_check,
1021 ++ .mode_valid = hdlcd_crtc_mode_valid,
1022 + .atomic_begin = hdlcd_crtc_atomic_begin,
1023 + .atomic_enable = hdlcd_crtc_atomic_enable,
1024 + .atomic_disable = hdlcd_crtc_atomic_disable,
1025 +diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
1026 +index ab50ad06e271..64da56f4b0cf 100644
1027 +--- a/drivers/gpu/drm/arm/malidp_drv.c
1028 ++++ b/drivers/gpu/drm/arm/malidp_drv.c
1029 +@@ -192,6 +192,7 @@ static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state)
1030 + {
1031 + struct drm_device *drm = state->dev;
1032 + struct malidp_drm *malidp = drm->dev_private;
1033 ++ int loop = 5;
1034 +
1035 + malidp->event = malidp->crtc.state->event;
1036 + malidp->crtc.state->event = NULL;
1037 +@@ -206,8 +207,18 @@ static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state)
1038 + drm_crtc_vblank_get(&malidp->crtc);
1039 +
1040 + /* only set config_valid if the CRTC is enabled */
1041 +- if (malidp_set_and_wait_config_valid(drm) < 0)
1042 ++ if (malidp_set_and_wait_config_valid(drm) < 0) {
1043 ++ /*
1044 ++ * make a loop around the second CVAL setting and
1045 ++ * try 5 times before giving up.
1046 ++ */
1047 ++ while (loop--) {
1048 ++ if (!malidp_set_and_wait_config_valid(drm))
1049 ++ break;
1050 ++ }
1051 + DRM_DEBUG_DRIVER("timed out waiting for updated configuration\n");
1052 ++ }
1053 ++
1054 + } else if (malidp->event) {
1055 + /* CRTC inactive means vblank IRQ is disabled, send event directly */
1056 + spin_lock_irq(&drm->event_lock);
1057 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
1058 +index cd8a22d6370e..be4024f0e3a8 100644
1059 +--- a/drivers/gpu/drm/i915/intel_display.c
1060 ++++ b/drivers/gpu/drm/i915/intel_display.c
1061 +@@ -11820,9 +11820,6 @@ intel_compare_link_m_n(const struct intel_link_m_n *m_n,
1062 + m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
1063 + intel_compare_m_n(m_n->link_m, m_n->link_n,
1064 + m2_n2->link_m, m2_n2->link_n, !adjust)) {
1065 +- if (adjust)
1066 +- *m2_n2 = *m_n;
1067 +-
1068 + return true;
1069 + }
1070 +
1071 +@@ -12855,6 +12852,33 @@ static int calc_watermark_data(struct intel_atomic_state *state)
1072 + return 0;
1073 + }
1074 +
1075 ++static void intel_crtc_check_fastset(struct intel_crtc_state *old_crtc_state,
1076 ++ struct intel_crtc_state *new_crtc_state)
1077 ++{
1078 ++ struct drm_i915_private *dev_priv =
1079 ++ to_i915(new_crtc_state->base.crtc->dev);
1080 ++
1081 ++ if (!intel_pipe_config_compare(dev_priv, old_crtc_state,
1082 ++ new_crtc_state, true))
1083 ++ return;
1084 ++
1085 ++ new_crtc_state->base.mode_changed = false;
1086 ++ new_crtc_state->update_pipe = true;
1087 ++
1088 ++ /*
1089 ++ * If we're not doing the full modeset we want to
1090 ++ * keep the current M/N values as they may be
1091 ++ * sufficiently different to the computed values
1092 ++ * to cause problems.
1093 ++ *
1094 ++ * FIXME: should really copy more fuzzy state here
1095 ++ */
1096 ++ new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
1097 ++ new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
1098 ++ new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
1099 ++ new_crtc_state->has_drrs = old_crtc_state->has_drrs;
1100 ++}
1101 ++
1102 + /**
1103 + * intel_atomic_check - validate state object
1104 + * @dev: drm device
1105 +@@ -12903,12 +12927,8 @@ static int intel_atomic_check(struct drm_device *dev,
1106 + return ret;
1107 + }
1108 +
1109 +- if (intel_pipe_config_compare(dev_priv,
1110 +- to_intel_crtc_state(old_crtc_state),
1111 +- pipe_config, true)) {
1112 +- crtc_state->mode_changed = false;
1113 +- pipe_config->update_pipe = true;
1114 +- }
1115 ++ intel_crtc_check_fastset(to_intel_crtc_state(old_crtc_state),
1116 ++ pipe_config);
1117 +
1118 + if (needs_modeset(crtc_state))
1119 + any_ms = true;
1120 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
1121 +index 8b9270f31409..e4e09d47c5c0 100644
1122 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
1123 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
1124 +@@ -136,6 +136,114 @@ static int vmw_close_channel(struct rpc_channel *channel)
1125 + return 0;
1126 + }
1127 +
1128 ++/**
1129 ++ * vmw_port_hb_out - Send the message payload either through the
1130 ++ * high-bandwidth port if available, or through the backdoor otherwise.
1131 ++ * @channel: The rpc channel.
1132 ++ * @msg: NULL-terminated message.
1133 ++ * @hb: Whether the high-bandwidth port is available.
1134 ++ *
1135 ++ * Return: The port status.
1136 ++ */
1137 ++static unsigned long vmw_port_hb_out(struct rpc_channel *channel,
1138 ++ const char *msg, bool hb)
1139 ++{
1140 ++ unsigned long si, di, eax, ebx, ecx, edx;
1141 ++ unsigned long msg_len = strlen(msg);
1142 ++
1143 ++ if (hb) {
1144 ++ unsigned long bp = channel->cookie_high;
1145 ++
1146 ++ si = (uintptr_t) msg;
1147 ++ di = channel->cookie_low;
1148 ++
1149 ++ VMW_PORT_HB_OUT(
1150 ++ (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
1151 ++ msg_len, si, di,
1152 ++ VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16),
1153 ++ VMW_HYPERVISOR_MAGIC, bp,
1154 ++ eax, ebx, ecx, edx, si, di);
1155 ++
1156 ++ return ebx;
1157 ++ }
1158 ++
1159 ++ /* HB port not available. Send the message 4 bytes at a time. */
1160 ++ ecx = MESSAGE_STATUS_SUCCESS << 16;
1161 ++ while (msg_len && (HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS)) {
1162 ++ unsigned int bytes = min_t(size_t, msg_len, 4);
1163 ++ unsigned long word = 0;
1164 ++
1165 ++ memcpy(&word, msg, bytes);
1166 ++ msg_len -= bytes;
1167 ++ msg += bytes;
1168 ++ si = channel->cookie_high;
1169 ++ di = channel->cookie_low;
1170 ++
1171 ++ VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_SENDPAYLOAD << 16),
1172 ++ word, si, di,
1173 ++ VMW_HYPERVISOR_PORT | (channel->channel_id << 16),
1174 ++ VMW_HYPERVISOR_MAGIC,
1175 ++ eax, ebx, ecx, edx, si, di);
1176 ++ }
1177 ++
1178 ++ return ecx;
1179 ++}
1180 ++
1181 ++/**
1182 ++ * vmw_port_hb_in - Receive the message payload either through the
1183 ++ * high-bandwidth port if available, or through the backdoor otherwise.
1184 ++ * @channel: The rpc channel.
1185 ++ * @reply: Pointer to buffer holding reply.
1186 ++ * @reply_len: Length of the reply.
1187 ++ * @hb: Whether the high-bandwidth port is available.
1188 ++ *
1189 ++ * Return: The port status.
1190 ++ */
1191 ++static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply,
1192 ++ unsigned long reply_len, bool hb)
1193 ++{
1194 ++ unsigned long si, di, eax, ebx, ecx, edx;
1195 ++
1196 ++ if (hb) {
1197 ++ unsigned long bp = channel->cookie_low;
1198 ++
1199 ++ si = channel->cookie_high;
1200 ++ di = (uintptr_t) reply;
1201 ++
1202 ++ VMW_PORT_HB_IN(
1203 ++ (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
1204 ++ reply_len, si, di,
1205 ++ VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16),
1206 ++ VMW_HYPERVISOR_MAGIC, bp,
1207 ++ eax, ebx, ecx, edx, si, di);
1208 ++
1209 ++ return ebx;
1210 ++ }
1211 ++
1212 ++ /* HB port not available. Retrieve the message 4 bytes at a time. */
1213 ++ ecx = MESSAGE_STATUS_SUCCESS << 16;
1214 ++ while (reply_len) {
1215 ++ unsigned int bytes = min_t(unsigned long, reply_len, 4);
1216 ++
1217 ++ si = channel->cookie_high;
1218 ++ di = channel->cookie_low;
1219 ++
1220 ++ VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_RECVPAYLOAD << 16),
1221 ++ MESSAGE_STATUS_SUCCESS, si, di,
1222 ++ VMW_HYPERVISOR_PORT | (channel->channel_id << 16),
1223 ++ VMW_HYPERVISOR_MAGIC,
1224 ++ eax, ebx, ecx, edx, si, di);
1225 ++
1226 ++ if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0)
1227 ++ break;
1228 ++
1229 ++ memcpy(reply, &ebx, bytes);
1230 ++ reply_len -= bytes;
1231 ++ reply += bytes;
1232 ++ }
1233 ++
1234 ++ return ecx;
1235 ++}
1236 +
1237 +
1238 + /**
1239 +@@ -148,11 +256,10 @@ static int vmw_close_channel(struct rpc_channel *channel)
1240 + */
1241 + static int vmw_send_msg(struct rpc_channel *channel, const char *msg)
1242 + {
1243 +- unsigned long eax, ebx, ecx, edx, si, di, bp;
1244 ++ unsigned long eax, ebx, ecx, edx, si, di;
1245 + size_t msg_len = strlen(msg);
1246 + int retries = 0;
1247 +
1248 +-
1249 + while (retries < RETRIES) {
1250 + retries++;
1251 +
1252 +@@ -166,23 +273,14 @@ static int vmw_send_msg(struct rpc_channel *channel, const char *msg)
1253 + VMW_HYPERVISOR_MAGIC,
1254 + eax, ebx, ecx, edx, si, di);
1255 +
1256 +- if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0 ||
1257 +- (HIGH_WORD(ecx) & MESSAGE_STATUS_HB) == 0) {
1258 +- /* Expected success + high-bandwidth. Give up. */
1259 ++ if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
1260 ++ /* Expected success. Give up. */
1261 + return -EINVAL;
1262 + }
1263 +
1264 + /* Send msg */
1265 +- si = (uintptr_t) msg;
1266 +- di = channel->cookie_low;
1267 +- bp = channel->cookie_high;
1268 +-
1269 +- VMW_PORT_HB_OUT(
1270 +- (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
1271 +- msg_len, si, di,
1272 +- VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16),
1273 +- VMW_HYPERVISOR_MAGIC, bp,
1274 +- eax, ebx, ecx, edx, si, di);
1275 ++ ebx = vmw_port_hb_out(channel, msg,
1276 ++ !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB));
1277 +
1278 + if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) != 0) {
1279 + return 0;
1280 +@@ -211,7 +309,7 @@ STACK_FRAME_NON_STANDARD(vmw_send_msg);
1281 + static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
1282 + size_t *msg_len)
1283 + {
1284 +- unsigned long eax, ebx, ecx, edx, si, di, bp;
1285 ++ unsigned long eax, ebx, ecx, edx, si, di;
1286 + char *reply;
1287 + size_t reply_len;
1288 + int retries = 0;
1289 +@@ -233,8 +331,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
1290 + VMW_HYPERVISOR_MAGIC,
1291 + eax, ebx, ecx, edx, si, di);
1292 +
1293 +- if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0 ||
1294 +- (HIGH_WORD(ecx) & MESSAGE_STATUS_HB) == 0) {
1295 ++ if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
1296 + DRM_ERROR("Failed to get reply size for host message.\n");
1297 + return -EINVAL;
1298 + }
1299 +@@ -252,17 +349,8 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
1300 +
1301 +
1302 + /* Receive buffer */
1303 +- si = channel->cookie_high;
1304 +- di = (uintptr_t) reply;
1305 +- bp = channel->cookie_low;
1306 +-
1307 +- VMW_PORT_HB_IN(
1308 +- (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
1309 +- reply_len, si, di,
1310 +- VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16),
1311 +- VMW_HYPERVISOR_MAGIC, bp,
1312 +- eax, ebx, ecx, edx, si, di);
1313 +-
1314 ++ ebx = vmw_port_hb_in(channel, reply, reply_len,
1315 ++ !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB));
1316 + if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) {
1317 + kfree(reply);
1318 +
1319 +diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
1320 +index c22dc1e07911..c38883f748a1 100644
1321 +--- a/drivers/hwmon/hwmon.c
1322 ++++ b/drivers/hwmon/hwmon.c
1323 +@@ -633,7 +633,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
1324 + if (err)
1325 + goto free_hwmon;
1326 +
1327 +- if (dev && chip && chip->ops->read &&
1328 ++ if (dev && dev->of_node && chip && chip->ops->read &&
1329 + chip->info[0]->type == hwmon_chip &&
1330 + (chip->info[0]->config[0] & HWMON_C_REGISTER_TZ)) {
1331 + const struct hwmon_channel_info **info = chip->info;
1332 +diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
1333 +index 2e2b5851139c..cd24b375df1e 100644
1334 +--- a/drivers/hwmon/pmbus/pmbus_core.c
1335 ++++ b/drivers/hwmon/pmbus/pmbus_core.c
1336 +@@ -1230,7 +1230,8 @@ static int pmbus_add_sensor_attrs_one(struct i2c_client *client,
1337 + const struct pmbus_driver_info *info,
1338 + const char *name,
1339 + int index, int page,
1340 +- const struct pmbus_sensor_attr *attr)
1341 ++ const struct pmbus_sensor_attr *attr,
1342 ++ bool paged)
1343 + {
1344 + struct pmbus_sensor *base;
1345 + bool upper = !!(attr->gbit & 0xff00); /* need to check STATUS_WORD */
1346 +@@ -1238,7 +1239,7 @@ static int pmbus_add_sensor_attrs_one(struct i2c_client *client,
1347 +
1348 + if (attr->label) {
1349 + ret = pmbus_add_label(data, name, index, attr->label,
1350 +- attr->paged ? page + 1 : 0);
1351 ++ paged ? page + 1 : 0);
1352 + if (ret)
1353 + return ret;
1354 + }
1355 +@@ -1271,6 +1272,30 @@ static int pmbus_add_sensor_attrs_one(struct i2c_client *client,
1356 + return 0;
1357 + }
1358 +
1359 ++static bool pmbus_sensor_is_paged(const struct pmbus_driver_info *info,
1360 ++ const struct pmbus_sensor_attr *attr)
1361 ++{
1362 ++ int p;
1363 ++
1364 ++ if (attr->paged)
1365 ++ return true;
1366 ++
1367 ++ /*
1368 ++ * Some attributes may be present on more than one page despite
1369 ++ * not being marked with the paged attribute. If that is the case,
1370 ++ * then treat the sensor as being paged and add the page suffix to the
1371 ++ * attribute name.
1372 ++ * We don't just add the paged attribute to all such attributes, in
1373 ++ * order to maintain the un-suffixed labels in the case where the
1374 ++ * attribute is only on page 0.
1375 ++ */
1376 ++ for (p = 1; p < info->pages; p++) {
1377 ++ if (info->func[p] & attr->func)
1378 ++ return true;
1379 ++ }
1380 ++ return false;
1381 ++}
1382 ++
1383 + static int pmbus_add_sensor_attrs(struct i2c_client *client,
1384 + struct pmbus_data *data,
1385 + const char *name,
1386 +@@ -1284,14 +1309,15 @@ static int pmbus_add_sensor_attrs(struct i2c_client *client,
1387 + index = 1;
1388 + for (i = 0; i < nattrs; i++) {
1389 + int page, pages;
1390 ++ bool paged = pmbus_sensor_is_paged(info, attrs);
1391 +
1392 +- pages = attrs->paged ? info->pages : 1;
1393 ++ pages = paged ? info->pages : 1;
1394 + for (page = 0; page < pages; page++) {
1395 + if (!(info->func[page] & attrs->func))
1396 + continue;
1397 + ret = pmbus_add_sensor_attrs_one(client, data, info,
1398 + name, index, page,
1399 +- attrs);
1400 ++ attrs, paged);
1401 + if (ret)
1402 + return ret;
1403 + index++;
1404 +diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
1405 +index d1d8d07a0714..83425b7b580c 100644
1406 +--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
1407 ++++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
1408 +@@ -265,6 +265,7 @@ struct st_lsm6dsx_sensor {
1409 + * @conf_lock: Mutex to prevent concurrent FIFO configuration update.
1410 + * @page_lock: Mutex to prevent concurrent memory page configuration.
1411 + * @fifo_mode: FIFO operating mode supported by the device.
1412 ++ * @suspend_mask: Suspended sensor bitmask.
1413 + * @enable_mask: Enabled sensor bitmask.
1414 + * @ts_sip: Total number of timestamp samples in a given pattern.
1415 + * @sip: Total number of samples (acc/gyro/ts) in a given pattern.
1416 +@@ -282,6 +283,7 @@ struct st_lsm6dsx_hw {
1417 + struct mutex page_lock;
1418 +
1419 + enum st_lsm6dsx_fifo_mode fifo_mode;
1420 ++ u8 suspend_mask;
1421 + u8 enable_mask;
1422 + u8 ts_sip;
1423 + u8 sip;
1424 +diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
1425 +index 12e29dda9b98..96986d84e418 100644
1426 +--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
1427 ++++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
1428 +@@ -1023,8 +1023,6 @@ static int __maybe_unused st_lsm6dsx_suspend(struct device *dev)
1429 + {
1430 + struct st_lsm6dsx_hw *hw = dev_get_drvdata(dev);
1431 + struct st_lsm6dsx_sensor *sensor;
1432 +- const struct st_lsm6dsx_reg *reg;
1433 +- unsigned int data;
1434 + int i, err = 0;
1435 +
1436 + for (i = 0; i < ST_LSM6DSX_ID_MAX; i++) {
1437 +@@ -1035,12 +1033,16 @@ static int __maybe_unused st_lsm6dsx_suspend(struct device *dev)
1438 + if (!(hw->enable_mask & BIT(sensor->id)))
1439 + continue;
1440 +
1441 +- reg = &st_lsm6dsx_odr_table[sensor->id].reg;
1442 +- data = ST_LSM6DSX_SHIFT_VAL(0, reg->mask);
1443 +- err = st_lsm6dsx_update_bits_locked(hw, reg->addr, reg->mask,
1444 +- data);
1445 ++ if (sensor->id == ST_LSM6DSX_ID_EXT0 ||
1446 ++ sensor->id == ST_LSM6DSX_ID_EXT1 ||
1447 ++ sensor->id == ST_LSM6DSX_ID_EXT2)
1448 ++ err = st_lsm6dsx_shub_set_enable(sensor, false);
1449 ++ else
1450 ++ err = st_lsm6dsx_sensor_set_enable(sensor, false);
1451 + if (err < 0)
1452 + return err;
1453 ++
1454 ++ hw->suspend_mask |= BIT(sensor->id);
1455 + }
1456 +
1457 + if (hw->fifo_mode != ST_LSM6DSX_FIFO_BYPASS)
1458 +@@ -1060,12 +1062,19 @@ static int __maybe_unused st_lsm6dsx_resume(struct device *dev)
1459 + continue;
1460 +
1461 + sensor = iio_priv(hw->iio_devs[i]);
1462 +- if (!(hw->enable_mask & BIT(sensor->id)))
1463 ++ if (!(hw->suspend_mask & BIT(sensor->id)))
1464 + continue;
1465 +
1466 +- err = st_lsm6dsx_set_odr(sensor, sensor->odr);
1467 ++ if (sensor->id == ST_LSM6DSX_ID_EXT0 ||
1468 ++ sensor->id == ST_LSM6DSX_ID_EXT1 ||
1469 ++ sensor->id == ST_LSM6DSX_ID_EXT2)
1470 ++ err = st_lsm6dsx_shub_set_enable(sensor, true);
1471 ++ else
1472 ++ err = st_lsm6dsx_sensor_set_enable(sensor, true);
1473 + if (err < 0)
1474 + return err;
1475 ++
1476 ++ hw->suspend_mask &= ~BIT(sensor->id);
1477 + }
1478 +
1479 + if (hw->enable_mask)
1480 +diff --git a/drivers/iio/temperature/mlx90632.c b/drivers/iio/temperature/mlx90632.c
1481 +index be03be719efe..f71918430f95 100644
1482 +--- a/drivers/iio/temperature/mlx90632.c
1483 ++++ b/drivers/iio/temperature/mlx90632.c
1484 +@@ -81,6 +81,8 @@
1485 + /* Magic constants */
1486 + #define MLX90632_ID_MEDICAL 0x0105 /* EEPROM DSPv5 Medical device id */
1487 + #define MLX90632_ID_CONSUMER 0x0205 /* EEPROM DSPv5 Consumer device id */
1488 ++#define MLX90632_DSP_VERSION 5 /* DSP version */
1489 ++#define MLX90632_DSP_MASK GENMASK(7, 0) /* DSP version in EE_VERSION */
1490 + #define MLX90632_RESET_CMD 0x0006 /* Reset sensor (address or global) */
1491 + #define MLX90632_REF_12 12LL /**< ResCtrlRef value of Ch 1 or Ch 2 */
1492 + #define MLX90632_REF_3 12LL /**< ResCtrlRef value of Channel 3 */
1493 +@@ -667,10 +669,13 @@ static int mlx90632_probe(struct i2c_client *client,
1494 + } else if (read == MLX90632_ID_CONSUMER) {
1495 + dev_dbg(&client->dev,
1496 + "Detected Consumer EEPROM calibration %x\n", read);
1497 ++ } else if ((read & MLX90632_DSP_MASK) == MLX90632_DSP_VERSION) {
1498 ++ dev_dbg(&client->dev,
1499 ++ "Detected Unknown EEPROM calibration %x\n", read);
1500 + } else {
1501 + dev_err(&client->dev,
1502 +- "EEPROM version mismatch %x (expected %x or %x)\n",
1503 +- read, MLX90632_ID_CONSUMER, MLX90632_ID_MEDICAL);
1504 ++ "Wrong DSP version %x (expected %x)\n",
1505 ++ read, MLX90632_DSP_VERSION);
1506 + return -EPROTONOSUPPORT;
1507 + }
1508 +
1509 +diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
1510 +index 9784c6c0d2ec..597f2f02f3a8 100644
1511 +--- a/drivers/infiniband/hw/hfi1/chip.c
1512 ++++ b/drivers/infiniband/hw/hfi1/chip.c
1513 +@@ -9848,6 +9848,7 @@ void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
1514 +
1515 + /* disable the port */
1516 + clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
1517 ++ cancel_work_sync(&ppd->freeze_work);
1518 + }
1519 +
1520 + static inline int init_cpu_counters(struct hfi1_devdata *dd)
1521 +@@ -14027,6 +14028,19 @@ static void init_kdeth_qp(struct hfi1_devdata *dd)
1522 + RCV_BTH_QP_KDETH_QP_SHIFT);
1523 + }
1524 +
1525 ++/**
1526 ++ * hfi1_get_qp_map
1527 ++ * @dd: device data
1528 ++ * @idx: index to read
1529 ++ */
1530 ++u8 hfi1_get_qp_map(struct hfi1_devdata *dd, u8 idx)
1531 ++{
1532 ++ u64 reg = read_csr(dd, RCV_QP_MAP_TABLE + (idx / 8) * 8);
1533 ++
1534 ++ reg >>= (idx % 8) * 8;
1535 ++ return reg;
1536 ++}
1537 ++
1538 + /**
1539 + * init_qpmap_table
1540 + * @dd - device data
1541 +diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
1542 +index 6c27c1c6a868..a5c61400b295 100644
1543 +--- a/drivers/infiniband/hw/hfi1/chip.h
1544 ++++ b/drivers/infiniband/hw/hfi1/chip.h
1545 +@@ -1442,6 +1442,7 @@ void clear_all_interrupts(struct hfi1_devdata *dd);
1546 + void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr);
1547 + void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr);
1548 + void reset_interrupts(struct hfi1_devdata *dd);
1549 ++u8 hfi1_get_qp_map(struct hfi1_devdata *dd, u8 idx);
1550 +
1551 + /*
1552 + * Interrupt source table.
1553 +diff --git a/drivers/infiniband/hw/hfi1/fault.c b/drivers/infiniband/hw/hfi1/fault.c
1554 +index 3fd3315d0fb0..93613e5def9b 100644
1555 +--- a/drivers/infiniband/hw/hfi1/fault.c
1556 ++++ b/drivers/infiniband/hw/hfi1/fault.c
1557 +@@ -153,6 +153,7 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
1558 + char *dash;
1559 + unsigned long range_start, range_end, i;
1560 + bool remove = false;
1561 ++ unsigned long bound = 1U << BITS_PER_BYTE;
1562 +
1563 + end = strchr(ptr, ',');
1564 + if (end)
1565 +@@ -178,6 +179,10 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
1566 + BITS_PER_BYTE);
1567 + break;
1568 + }
1569 ++ /* Check the inputs */
1570 ++ if (range_start >= bound || range_end >= bound)
1571 ++ break;
1572 ++
1573 + for (i = range_start; i <= range_end; i++) {
1574 + if (remove)
1575 + clear_bit(i, fault->opcodes);
1576 +diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
1577 +index b0110728f541..70828de7436b 100644
1578 +--- a/drivers/infiniband/hw/hfi1/sdma.c
1579 ++++ b/drivers/infiniband/hw/hfi1/sdma.c
1580 +@@ -410,10 +410,7 @@ static void sdma_flush(struct sdma_engine *sde)
1581 + sdma_flush_descq(sde);
1582 + spin_lock_irqsave(&sde->flushlist_lock, flags);
1583 + /* copy flush list */
1584 +- list_for_each_entry_safe(txp, txp_next, &sde->flushlist, list) {
1585 +- list_del_init(&txp->list);
1586 +- list_add_tail(&txp->list, &flushlist);
1587 +- }
1588 ++ list_splice_init(&sde->flushlist, &flushlist);
1589 + spin_unlock_irqrestore(&sde->flushlist_lock, flags);
1590 + /* flush from flush list */
1591 + list_for_each_entry_safe(txp, txp_next, &flushlist, list)
1592 +@@ -2413,7 +2410,7 @@ unlock_noconn:
1593 + list_add_tail(&tx->list, &sde->flushlist);
1594 + spin_unlock(&sde->flushlist_lock);
1595 + iowait_inc_wait_count(wait, tx->num_desc);
1596 +- schedule_work(&sde->flush_worker);
1597 ++ queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker);
1598 + ret = -ECOMM;
1599 + goto unlock;
1600 + nodesc:
1601 +@@ -2511,7 +2508,7 @@ unlock_noconn:
1602 + iowait_inc_wait_count(wait, tx->num_desc);
1603 + }
1604 + spin_unlock(&sde->flushlist_lock);
1605 +- schedule_work(&sde->flush_worker);
1606 ++ queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker);
1607 + ret = -ECOMM;
1608 + goto update_tail;
1609 + nodesc:
1610 +diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
1611 +index 43cbce7a19ea..e0851f01a804 100644
1612 +--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
1613 ++++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
1614 +@@ -305,9 +305,7 @@ static struct hfi1_ctxtdata *qp_to_rcd(struct rvt_dev_info *rdi,
1615 + if (qp->ibqp.qp_num == 0)
1616 + ctxt = 0;
1617 + else
1618 +- ctxt = ((qp->ibqp.qp_num >> dd->qos_shift) %
1619 +- (dd->n_krcv_queues - 1)) + 1;
1620 +-
1621 ++ ctxt = hfi1_get_qp_map(dd, qp->ibqp.qp_num >> dd->qos_shift);
1622 + return dd->rcd[ctxt];
1623 + }
1624 +
1625 +diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
1626 +index 0cd71ce7cc71..3592a9ec155e 100644
1627 +--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
1628 ++++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
1629 +@@ -324,6 +324,9 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
1630 + u32 *tidlist = NULL;
1631 + struct tid_user_buf *tidbuf;
1632 +
1633 ++ if (!PAGE_ALIGNED(tinfo->vaddr))
1634 ++ return -EINVAL;
1635 ++
1636 + tidbuf = kzalloc(sizeof(*tidbuf), GFP_KERNEL);
1637 + if (!tidbuf)
1638 + return -ENOMEM;
1639 +diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
1640 +index 8bfbc6d7ea34..fd754a16475a 100644
1641 +--- a/drivers/infiniband/hw/hfi1/user_sdma.c
1642 ++++ b/drivers/infiniband/hw/hfi1/user_sdma.c
1643 +@@ -130,20 +130,16 @@ static int defer_packet_queue(
1644 + {
1645 + struct hfi1_user_sdma_pkt_q *pq =
1646 + container_of(wait->iow, struct hfi1_user_sdma_pkt_q, busy);
1647 +- struct user_sdma_txreq *tx =
1648 +- container_of(txreq, struct user_sdma_txreq, txreq);
1649 +
1650 +- if (sdma_progress(sde, seq, txreq)) {
1651 +- if (tx->busycount++ < MAX_DEFER_RETRY_COUNT)
1652 +- goto eagain;
1653 +- }
1654 ++ write_seqlock(&sde->waitlock);
1655 ++ if (sdma_progress(sde, seq, txreq))
1656 ++ goto eagain;
1657 + /*
1658 + * We are assuming that if the list is enqueued somewhere, it
1659 + * is to the dmawait list since that is the only place where
1660 + * it is supposed to be enqueued.
1661 + */
1662 + xchg(&pq->state, SDMA_PKT_Q_DEFERRED);
1663 +- write_seqlock(&sde->waitlock);
1664 + if (list_empty(&pq->busy.list)) {
1665 + iowait_get_priority(&pq->busy);
1666 + iowait_queue(pkts_sent, &pq->busy, &sde->dmawait);
1667 +@@ -151,6 +147,7 @@ static int defer_packet_queue(
1668 + write_sequnlock(&sde->waitlock);
1669 + return -EBUSY;
1670 + eagain:
1671 ++ write_sequnlock(&sde->waitlock);
1672 + return -EAGAIN;
1673 + }
1674 +
1675 +@@ -804,7 +801,6 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts)
1676 +
1677 + tx->flags = 0;
1678 + tx->req = req;
1679 +- tx->busycount = 0;
1680 + INIT_LIST_HEAD(&tx->list);
1681 +
1682 + /*
1683 +diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h
1684 +index 14dfd757dafd..4d8510b0fc38 100644
1685 +--- a/drivers/infiniband/hw/hfi1/user_sdma.h
1686 ++++ b/drivers/infiniband/hw/hfi1/user_sdma.h
1687 +@@ -245,7 +245,6 @@ struct user_sdma_txreq {
1688 + struct list_head list;
1689 + struct user_sdma_request *req;
1690 + u16 flags;
1691 +- unsigned int busycount;
1692 + u16 seqnum;
1693 + };
1694 +
1695 +diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
1696 +index 55a56b3d7f83..ea68eeba3f22 100644
1697 +--- a/drivers/infiniband/hw/hfi1/verbs.c
1698 ++++ b/drivers/infiniband/hw/hfi1/verbs.c
1699 +@@ -1355,8 +1355,6 @@ static void hfi1_fill_device_attr(struct hfi1_devdata *dd)
1700 + rdi->dparms.props.max_cq = hfi1_max_cqs;
1701 + rdi->dparms.props.max_ah = hfi1_max_ahs;
1702 + rdi->dparms.props.max_cqe = hfi1_max_cqes;
1703 +- rdi->dparms.props.max_mr = rdi->lkey_table.max;
1704 +- rdi->dparms.props.max_fmr = rdi->lkey_table.max;
1705 + rdi->dparms.props.max_map_per_fmr = 32767;
1706 + rdi->dparms.props.max_pd = hfi1_max_pds;
1707 + rdi->dparms.props.max_qp_rd_atom = HFI1_MAX_RDMA_ATOMIC;
1708 +diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.c b/drivers/infiniband/hw/hfi1/verbs_txreq.c
1709 +index c4ab2d5b4502..8f766dd3f61c 100644
1710 +--- a/drivers/infiniband/hw/hfi1/verbs_txreq.c
1711 ++++ b/drivers/infiniband/hw/hfi1/verbs_txreq.c
1712 +@@ -100,7 +100,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
1713 + if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
1714 + struct hfi1_qp_priv *priv;
1715 +
1716 +- tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC);
1717 ++ tx = kmem_cache_alloc(dev->verbs_txreq_cache, VERBS_TXREQ_GFP);
1718 + if (tx)
1719 + goto out;
1720 + priv = qp->priv;
1721 +diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h
1722 +index b002e96eb335..bfa6e081cb56 100644
1723 +--- a/drivers/infiniband/hw/hfi1/verbs_txreq.h
1724 ++++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h
1725 +@@ -72,6 +72,7 @@ struct hfi1_ibdev;
1726 + struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
1727 + struct rvt_qp *qp);
1728 +
1729 ++#define VERBS_TXREQ_GFP (GFP_ATOMIC | __GFP_NOWARN)
1730 + static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
1731 + struct rvt_qp *qp)
1732 + __must_hold(&qp->slock)
1733 +@@ -79,7 +80,7 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
1734 + struct verbs_txreq *tx;
1735 + struct hfi1_qp_priv *priv = qp->priv;
1736 +
1737 +- tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC);
1738 ++ tx = kmem_cache_alloc(dev->verbs_txreq_cache, VERBS_TXREQ_GFP);
1739 + if (unlikely(!tx)) {
1740 + /* call slow path to get the lock */
1741 + tx = __get_txreq(dev, qp);
1742 +diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
1743 +index 5ff32d32c61c..2c4e569ce438 100644
1744 +--- a/drivers/infiniband/hw/qib/qib_verbs.c
1745 ++++ b/drivers/infiniband/hw/qib/qib_verbs.c
1746 +@@ -1459,8 +1459,6 @@ static void qib_fill_device_attr(struct qib_devdata *dd)
1747 + rdi->dparms.props.max_cq = ib_qib_max_cqs;
1748 + rdi->dparms.props.max_cqe = ib_qib_max_cqes;
1749 + rdi->dparms.props.max_ah = ib_qib_max_ahs;
1750 +- rdi->dparms.props.max_mr = rdi->lkey_table.max;
1751 +- rdi->dparms.props.max_fmr = rdi->lkey_table.max;
1752 + rdi->dparms.props.max_map_per_fmr = 32767;
1753 + rdi->dparms.props.max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
1754 + rdi->dparms.props.max_qp_init_rd_atom = 255;
1755 +diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
1756 +index 0bb6e39dd03a..b04d2173e3f4 100644
1757 +--- a/drivers/infiniband/sw/rdmavt/mr.c
1758 ++++ b/drivers/infiniband/sw/rdmavt/mr.c
1759 +@@ -96,6 +96,8 @@ int rvt_driver_mr_init(struct rvt_dev_info *rdi)
1760 + for (i = 0; i < rdi->lkey_table.max; i++)
1761 + RCU_INIT_POINTER(rdi->lkey_table.table[i], NULL);
1762 +
1763 ++ rdi->dparms.props.max_mr = rdi->lkey_table.max;
1764 ++ rdi->dparms.props.max_fmr = rdi->lkey_table.max;
1765 + return 0;
1766 + }
1767 +
1768 +diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
1769 +index a34b9a2a32b6..a77436ee5ff7 100644
1770 +--- a/drivers/infiniband/sw/rdmavt/qp.c
1771 ++++ b/drivers/infiniband/sw/rdmavt/qp.c
1772 +@@ -594,7 +594,8 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
1773 + offset = qpt->incr | ((offset & 1) ^ 1);
1774 + }
1775 + /* there can be no set bits in low-order QoS bits */
1776 +- WARN_ON(offset & (BIT(rdi->dparms.qos_shift) - 1));
1777 ++ WARN_ON(rdi->dparms.qos_shift > 1 &&
1778 ++ offset & ((BIT(rdi->dparms.qos_shift - 1) - 1) << 1));
1779 + qpn = mk_qpn(qpt, map, offset);
1780 + }
1781 +
1782 +diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
1783 +index 26ec603fe220..83d1499fe021 100644
1784 +--- a/drivers/input/misc/uinput.c
1785 ++++ b/drivers/input/misc/uinput.c
1786 +@@ -1051,13 +1051,31 @@ static long uinput_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1787 +
1788 + #ifdef CONFIG_COMPAT
1789 +
1790 +-#define UI_SET_PHYS_COMPAT _IOW(UINPUT_IOCTL_BASE, 108, compat_uptr_t)
1791 ++/*
1792 ++ * These IOCTLs change their size and thus their numbers between
1793 ++ * 32 and 64 bits.
1794 ++ */
1795 ++#define UI_SET_PHYS_COMPAT \
1796 ++ _IOW(UINPUT_IOCTL_BASE, 108, compat_uptr_t)
1797 ++#define UI_BEGIN_FF_UPLOAD_COMPAT \
1798 ++ _IOWR(UINPUT_IOCTL_BASE, 200, struct uinput_ff_upload_compat)
1799 ++#define UI_END_FF_UPLOAD_COMPAT \
1800 ++ _IOW(UINPUT_IOCTL_BASE, 201, struct uinput_ff_upload_compat)
1801 +
1802 + static long uinput_compat_ioctl(struct file *file,
1803 + unsigned int cmd, unsigned long arg)
1804 + {
1805 +- if (cmd == UI_SET_PHYS_COMPAT)
1806 ++ switch (cmd) {
1807 ++ case UI_SET_PHYS_COMPAT:
1808 + cmd = UI_SET_PHYS;
1809 ++ break;
1810 ++ case UI_BEGIN_FF_UPLOAD_COMPAT:
1811 ++ cmd = UI_BEGIN_FF_UPLOAD;
1812 ++ break;
1813 ++ case UI_END_FF_UPLOAD_COMPAT:
1814 ++ cmd = UI_END_FF_UPLOAD;
1815 ++ break;
1816 ++ }
1817 +
1818 + return uinput_ioctl_handler(file, cmd, arg, compat_ptr(arg));
1819 + }
1820 +diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
1821 +index b6da0c1267e3..8e6077d8e434 100644
1822 +--- a/drivers/input/mouse/synaptics.c
1823 ++++ b/drivers/input/mouse/synaptics.c
1824 +@@ -179,6 +179,8 @@ static const char * const smbus_pnp_ids[] = {
1825 + "LEN0096", /* X280 */
1826 + "LEN0097", /* X280 -> ALPS trackpoint */
1827 + "LEN200f", /* T450s */
1828 ++ "LEN2054", /* E480 */
1829 ++ "LEN2055", /* E580 */
1830 + "SYN3052", /* HP EliteBook 840 G4 */
1831 + "SYN3221", /* HP 15-ay000 */
1832 + NULL
1833 +diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c
1834 +index 09241d4cdebc..06f0eb04a8fd 100644
1835 +--- a/drivers/input/touchscreen/silead.c
1836 ++++ b/drivers/input/touchscreen/silead.c
1837 +@@ -617,6 +617,7 @@ static const struct acpi_device_id silead_ts_acpi_match[] = {
1838 + { "MSSL1680", 0 },
1839 + { "MSSL0001", 0 },
1840 + { "MSSL0002", 0 },
1841 ++ { "MSSL0017", 0 },
1842 + { }
1843 + };
1844 + MODULE_DEVICE_TABLE(acpi, silead_ts_acpi_match);
1845 +diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c
1846 +index fadaf557603f..425442819d31 100644
1847 +--- a/drivers/misc/habanalabs/memory.c
1848 ++++ b/drivers/misc/habanalabs/memory.c
1849 +@@ -675,11 +675,6 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
1850 +
1851 + total_npages += npages;
1852 +
1853 +- if (first) {
1854 +- first = false;
1855 +- dma_addr &= PAGE_MASK_2MB;
1856 +- }
1857 +-
1858 + if ((npages % PGS_IN_2MB_PAGE) ||
1859 + (dma_addr & (PAGE_SIZE_2MB - 1)))
1860 + is_huge_page_opt = false;
1861 +@@ -704,7 +699,6 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
1862 + phys_pg_pack->total_size = total_npages * page_size;
1863 +
1864 + j = 0;
1865 +- first = true;
1866 + for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
1867 + npages = get_sg_info(sg, &dma_addr);
1868 +
1869 +diff --git a/drivers/misc/lkdtm/usercopy.c b/drivers/misc/lkdtm/usercopy.c
1870 +index d5a0e7f1813b..e172719dd86d 100644
1871 +--- a/drivers/misc/lkdtm/usercopy.c
1872 ++++ b/drivers/misc/lkdtm/usercopy.c
1873 +@@ -324,14 +324,16 @@ free_user:
1874 +
1875 + void lkdtm_USERCOPY_KERNEL_DS(void)
1876 + {
1877 +- char __user *user_ptr = (char __user *)ERR_PTR(-EINVAL);
1878 ++ char __user *user_ptr =
1879 ++ (char __user *)(0xFUL << (sizeof(unsigned long) * 8 - 4));
1880 + mm_segment_t old_fs = get_fs();
1881 + char buf[10] = {0};
1882 +
1883 +- pr_info("attempting copy_to_user on unmapped kernel address\n");
1884 ++ pr_info("attempting copy_to_user() to noncanonical address: %px\n",
1885 ++ user_ptr);
1886 + set_fs(KERNEL_DS);
1887 +- if (copy_to_user(user_ptr, buf, sizeof(buf)))
1888 +- pr_info("copy_to_user un unmapped kernel address failed\n");
1889 ++ if (copy_to_user(user_ptr, buf, sizeof(buf)) == 0)
1890 ++ pr_err("copy_to_user() to noncanonical address succeeded!?\n");
1891 + set_fs(old_fs);
1892 + }
1893 +
1894 +diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
1895 +index 6db36dc870b5..9020cb2490f7 100644
1896 +--- a/drivers/mmc/core/core.c
1897 ++++ b/drivers/mmc/core/core.c
1898 +@@ -144,8 +144,9 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
1899 + int err = cmd->error;
1900 +
1901 + /* Flag re-tuning needed on CRC errors */
1902 +- if ((cmd->opcode != MMC_SEND_TUNING_BLOCK &&
1903 +- cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) &&
1904 ++ if (cmd->opcode != MMC_SEND_TUNING_BLOCK &&
1905 ++ cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200 &&
1906 ++ !host->retune_crc_disable &&
1907 + (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
1908 + (mrq->data && mrq->data->error == -EILSEQ) ||
1909 + (mrq->stop && mrq->stop->error == -EILSEQ)))
1910 +diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
1911 +index 6718fc8bb40f..0f51e774183e 100644
1912 +--- a/drivers/mmc/core/sdio.c
1913 ++++ b/drivers/mmc/core/sdio.c
1914 +@@ -941,6 +941,10 @@ static int mmc_sdio_pre_suspend(struct mmc_host *host)
1915 + */
1916 + static int mmc_sdio_suspend(struct mmc_host *host)
1917 + {
1918 ++ /* Prevent processing of SDIO IRQs in suspended state. */
1919 ++ mmc_card_set_suspended(host->card);
1920 ++ cancel_delayed_work_sync(&host->sdio_irq_work);
1921 ++
1922 + mmc_claim_host(host);
1923 +
1924 + if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host))
1925 +@@ -989,13 +993,20 @@ static int mmc_sdio_resume(struct mmc_host *host)
1926 + err = sdio_enable_4bit_bus(host->card);
1927 + }
1928 +
1929 +- if (!err && host->sdio_irqs) {
1930 ++ if (err)
1931 ++ goto out;
1932 ++
1933 ++ /* Allow SDIO IRQs to be processed again. */
1934 ++ mmc_card_clr_suspended(host->card);
1935 ++
1936 ++ if (host->sdio_irqs) {
1937 + if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD))
1938 + wake_up_process(host->sdio_irq_thread);
1939 + else if (host->caps & MMC_CAP_SDIO_IRQ)
1940 + host->ops->enable_sdio_irq(host, 1);
1941 + }
1942 +
1943 ++out:
1944 + mmc_release_host(host);
1945 +
1946 + host->pm_flags &= ~MMC_PM_KEEP_POWER;
1947 +diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c
1948 +index 3f67fbbe0d75..df887ced9666 100644
1949 +--- a/drivers/mmc/core/sdio_io.c
1950 ++++ b/drivers/mmc/core/sdio_io.c
1951 +@@ -19,6 +19,7 @@
1952 + #include "sdio_ops.h"
1953 + #include "core.h"
1954 + #include "card.h"
1955 ++#include "host.h"
1956 +
1957 + /**
1958 + * sdio_claim_host - exclusively claim a bus for a certain SDIO function
1959 +@@ -738,3 +739,79 @@ int sdio_set_host_pm_flags(struct sdio_func *func, mmc_pm_flag_t flags)
1960 + return 0;
1961 + }
1962 + EXPORT_SYMBOL_GPL(sdio_set_host_pm_flags);
1963 ++
1964 ++/**
1965 ++ * sdio_retune_crc_disable - temporarily disable retuning on CRC errors
1966 ++ * @func: SDIO function attached to host
1967 ++ *
1968 ++ * If the SDIO card is known to be in a state where it might produce
1969 ++ * CRC errors on the bus in response to commands (like if we know it is
1970 ++ * transitioning between power states), an SDIO function driver can
1971 ++ * call this function to temporarily disable the SD/MMC core behavior of
1972 ++ * triggering an automatic retuning.
1973 ++ *
1974 ++ * This function should be called while the host is claimed and the host
1975 ++ * should remain claimed until sdio_retune_crc_enable() is called.
1976 ++ * Specifically, the expected sequence of calls is:
1977 ++ * - sdio_claim_host()
1978 ++ * - sdio_retune_crc_disable()
1979 ++ * - some number of calls like sdio_writeb() and sdio_readb()
1980 ++ * - sdio_retune_crc_enable()
1981 ++ * - sdio_release_host()
1982 ++ */
1983 ++void sdio_retune_crc_disable(struct sdio_func *func)
1984 ++{
1985 ++ func->card->host->retune_crc_disable = true;
1986 ++}
1987 ++EXPORT_SYMBOL_GPL(sdio_retune_crc_disable);
1988 ++
1989 ++/**
1990 ++ * sdio_retune_crc_enable - re-enable retuning on CRC errors
1991 ++ * @func: SDIO function attached to host
1992 ++ *
1993 ++ * This is the compement to sdio_retune_crc_disable().
1994 ++ */
1995 ++void sdio_retune_crc_enable(struct sdio_func *func)
1996 ++{
1997 ++ func->card->host->retune_crc_disable = false;
1998 ++}
1999 ++EXPORT_SYMBOL_GPL(sdio_retune_crc_enable);
2000 ++
2001 ++/**
2002 ++ * sdio_retune_hold_now - start deferring retuning requests till release
2003 ++ * @func: SDIO function attached to host
2004 ++ *
2005 ++ * This function can be called if it's currently a bad time to do
2006 ++ * a retune of the SDIO card. Retune requests made during this time
2007 ++ * will be held and we'll actually do the retune sometime after the
2008 ++ * release.
2009 ++ *
2010 ++ * This function could be useful if an SDIO card is in a power state
2011 ++ * where it can respond to a small subset of commands that doesn't
2012 ++ * include the retuning command. Care should be taken when using
2013 ++ * this function since (presumably) the retuning request we might be
2014 ++ * deferring was made for a good reason.
2015 ++ *
2016 ++ * This function should be called while the host is claimed.
2017 ++ */
2018 ++void sdio_retune_hold_now(struct sdio_func *func)
2019 ++{
2020 ++ mmc_retune_hold_now(func->card->host);
2021 ++}
2022 ++EXPORT_SYMBOL_GPL(sdio_retune_hold_now);
2023 ++
2024 ++/**
2025 ++ * sdio_retune_release - signal that it's OK to retune now
2026 ++ * @func: SDIO function attached to host
2027 ++ *
2028 ++ * This is the complement to sdio_retune_hold_now(). Calling this
2029 ++ * function won't make a retune happen right away but will allow
2030 ++ * them to be scheduled normally.
2031 ++ *
2032 ++ * This function should be called while the host is claimed.
2033 ++ */
2034 ++void sdio_retune_release(struct sdio_func *func)
2035 ++{
2036 ++ mmc_retune_release(func->card->host);
2037 ++}
2038 ++EXPORT_SYMBOL_GPL(sdio_retune_release);
2039 +diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
2040 +index 7ca7b99413f0..b299a24d33f9 100644
2041 +--- a/drivers/mmc/core/sdio_irq.c
2042 ++++ b/drivers/mmc/core/sdio_irq.c
2043 +@@ -38,6 +38,10 @@ static int process_sdio_pending_irqs(struct mmc_host *host)
2044 + unsigned char pending;
2045 + struct sdio_func *func;
2046 +
2047 ++ /* Don't process SDIO IRQs if the card is suspended. */
2048 ++ if (mmc_card_suspended(card))
2049 ++ return 0;
2050 ++
2051 + /*
2052 + * Optimization, if there is only 1 function interrupt registered
2053 + * and we know an IRQ was signaled then call irq handler directly.
2054 +diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
2055 +index 833ef0590af8..b33f2c90d8d8 100644
2056 +--- a/drivers/mmc/host/mtk-sd.c
2057 ++++ b/drivers/mmc/host/mtk-sd.c
2058 +@@ -1003,6 +1003,8 @@ static void msdc_request_done(struct msdc_host *host, struct mmc_request *mrq)
2059 + msdc_track_cmd_data(host, mrq->cmd, mrq->data);
2060 + if (mrq->data)
2061 + msdc_unprepare_data(host, mrq);
2062 ++ if (host->error)
2063 ++ msdc_reset_hw(host);
2064 + mmc_request_done(host->mmc, mrq);
2065 + }
2066 +
2067 +@@ -1355,24 +1357,25 @@ static void msdc_request_timeout(struct work_struct *work)
2068 + }
2069 + }
2070 +
2071 +-static void __msdc_enable_sdio_irq(struct mmc_host *mmc, int enb)
2072 ++static void __msdc_enable_sdio_irq(struct msdc_host *host, int enb)
2073 + {
2074 +- unsigned long flags;
2075 +- struct msdc_host *host = mmc_priv(mmc);
2076 +-
2077 +- spin_lock_irqsave(&host->lock, flags);
2078 +- if (enb)
2079 ++ if (enb) {
2080 + sdr_set_bits(host->base + MSDC_INTEN, MSDC_INTEN_SDIOIRQ);
2081 +- else
2082 ++ sdr_set_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
2083 ++ } else {
2084 + sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INTEN_SDIOIRQ);
2085 +- spin_unlock_irqrestore(&host->lock, flags);
2086 ++ sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
2087 ++ }
2088 + }
2089 +
2090 + static void msdc_enable_sdio_irq(struct mmc_host *mmc, int enb)
2091 + {
2092 ++ unsigned long flags;
2093 + struct msdc_host *host = mmc_priv(mmc);
2094 +
2095 +- __msdc_enable_sdio_irq(mmc, enb);
2096 ++ spin_lock_irqsave(&host->lock, flags);
2097 ++ __msdc_enable_sdio_irq(host, enb);
2098 ++ spin_unlock_irqrestore(&host->lock, flags);
2099 +
2100 + if (enb)
2101 + pm_runtime_get_noresume(host->dev);
2102 +@@ -1394,6 +1397,8 @@ static irqreturn_t msdc_irq(int irq, void *dev_id)
2103 + spin_lock_irqsave(&host->lock, flags);
2104 + events = readl(host->base + MSDC_INT);
2105 + event_mask = readl(host->base + MSDC_INTEN);
2106 ++ if ((events & event_mask) & MSDC_INT_SDIOIRQ)
2107 ++ __msdc_enable_sdio_irq(host, 0);
2108 + /* clear interrupts */
2109 + writel(events & event_mask, host->base + MSDC_INT);
2110 +
2111 +@@ -1402,10 +1407,8 @@ static irqreturn_t msdc_irq(int irq, void *dev_id)
2112 + data = host->data;
2113 + spin_unlock_irqrestore(&host->lock, flags);
2114 +
2115 +- if ((events & event_mask) & MSDC_INT_SDIOIRQ) {
2116 +- __msdc_enable_sdio_irq(host->mmc, 0);
2117 ++ if ((events & event_mask) & MSDC_INT_SDIOIRQ)
2118 + sdio_signal_irq(host->mmc);
2119 +- }
2120 +
2121 + if (!(events & (event_mask & ~MSDC_INT_SDIOIRQ)))
2122 + break;
2123 +@@ -1528,10 +1531,7 @@ static void msdc_init_hw(struct msdc_host *host)
2124 + sdr_set_bits(host->base + SDC_CFG, SDC_CFG_SDIO);
2125 +
2126 + /* Config SDIO device detect interrupt function */
2127 +- if (host->mmc->caps & MMC_CAP_SDIO_IRQ)
2128 +- sdr_set_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
2129 +- else
2130 +- sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
2131 ++ sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
2132 +
2133 + /* Configure to default data timeout */
2134 + sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, 3);
2135 +@@ -2052,7 +2052,12 @@ static void msdc_hw_reset(struct mmc_host *mmc)
2136 +
2137 + static void msdc_ack_sdio_irq(struct mmc_host *mmc)
2138 + {
2139 +- __msdc_enable_sdio_irq(mmc, 1);
2140 ++ unsigned long flags;
2141 ++ struct msdc_host *host = mmc_priv(mmc);
2142 ++
2143 ++ spin_lock_irqsave(&host->lock, flags);
2144 ++ __msdc_enable_sdio_irq(host, 1);
2145 ++ spin_unlock_irqrestore(&host->lock, flags);
2146 + }
2147 +
2148 + static const struct mmc_host_ops mt_msdc_ops = {
2149 +diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
2150 +index 8742e27e4e8b..c42e442ba7ab 100644
2151 +--- a/drivers/mmc/host/renesas_sdhi_core.c
2152 ++++ b/drivers/mmc/host/renesas_sdhi_core.c
2153 +@@ -620,11 +620,16 @@ static const struct renesas_sdhi_quirks sdhi_quirks_h3_es2 = {
2154 + .hs400_4taps = true,
2155 + };
2156 +
2157 ++static const struct renesas_sdhi_quirks sdhi_quirks_nohs400 = {
2158 ++ .hs400_disabled = true,
2159 ++};
2160 ++
2161 + static const struct soc_device_attribute sdhi_quirks_match[] = {
2162 + { .soc_id = "r8a7795", .revision = "ES1.*", .data = &sdhi_quirks_h3_m3w_es1 },
2163 + { .soc_id = "r8a7795", .revision = "ES2.0", .data = &sdhi_quirks_h3_es2 },
2164 +- { .soc_id = "r8a7796", .revision = "ES1.0", .data = &sdhi_quirks_h3_m3w_es1 },
2165 +- { .soc_id = "r8a7796", .revision = "ES1.1", .data = &sdhi_quirks_h3_m3w_es1 },
2166 ++ { .soc_id = "r8a7796", .revision = "ES1.[012]", .data = &sdhi_quirks_h3_m3w_es1 },
2167 ++ { .soc_id = "r8a774a1", .revision = "ES1.[012]", .data = &sdhi_quirks_h3_m3w_es1 },
2168 ++ { .soc_id = "r8a77980", .data = &sdhi_quirks_nohs400 },
2169 + { /* Sentinel. */ },
2170 + };
2171 +
2172 +diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
2173 +index 05a012a694b2..423c3339c03b 100644
2174 +--- a/drivers/mmc/host/sdhci-pci-o2micro.c
2175 ++++ b/drivers/mmc/host/sdhci-pci-o2micro.c
2176 +@@ -124,6 +124,7 @@ static int sdhci_o2_execute_tuning(struct mmc_host *mmc, u32 opcode)
2177 + */
2178 + if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
2179 + current_bus_width = mmc->ios.bus_width;
2180 ++ mmc->ios.bus_width = MMC_BUS_WIDTH_4;
2181 + sdhci_set_bus_width(host, MMC_BUS_WIDTH_4);
2182 + }
2183 +
2184 +@@ -135,8 +136,10 @@ static int sdhci_o2_execute_tuning(struct mmc_host *mmc, u32 opcode)
2185 +
2186 + sdhci_end_tuning(host);
2187 +
2188 +- if (current_bus_width == MMC_BUS_WIDTH_8)
2189 ++ if (current_bus_width == MMC_BUS_WIDTH_8) {
2190 ++ mmc->ios.bus_width = MMC_BUS_WIDTH_8;
2191 + sdhci_set_bus_width(host, current_bus_width);
2192 ++ }
2193 +
2194 + host->flags &= ~SDHCI_HS400_TUNING;
2195 + return 0;
2196 +diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
2197 +index 1c66fb2ad76b..f97c628eb2ad 100644
2198 +--- a/drivers/net/can/flexcan.c
2199 ++++ b/drivers/net/can/flexcan.c
2200 +@@ -166,7 +166,7 @@
2201 + #define FLEXCAN_MB_CNT_LENGTH(x) (((x) & 0xf) << 16)
2202 + #define FLEXCAN_MB_CNT_TIMESTAMP(x) ((x) & 0xffff)
2203 +
2204 +-#define FLEXCAN_TIMEOUT_US (50)
2205 ++#define FLEXCAN_TIMEOUT_US (250)
2206 +
2207 + /* FLEXCAN hardware feature flags
2208 + *
2209 +diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
2210 +index 97d0933d9bd9..e4a5038eba9a 100644
2211 +--- a/drivers/net/can/xilinx_can.c
2212 ++++ b/drivers/net/can/xilinx_can.c
2213 +@@ -1443,7 +1443,7 @@ static const struct xcan_devtype_data xcan_canfd_data = {
2214 + XCAN_FLAG_RXMNF |
2215 + XCAN_FLAG_TX_MAILBOXES |
2216 + XCAN_FLAG_RX_FIFO_MULTI,
2217 +- .bittiming_const = &xcan_bittiming_const,
2218 ++ .bittiming_const = &xcan_bittiming_const_canfd,
2219 + .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD,
2220 + .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD,
2221 + .bus_clk_name = "s_axi_aclk",
2222 +diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
2223 +index 720f1dde2c2d..ae750ab9a4d7 100644
2224 +--- a/drivers/net/dsa/mv88e6xxx/chip.c
2225 ++++ b/drivers/net/dsa/mv88e6xxx/chip.c
2226 +@@ -1517,7 +1517,7 @@ static int mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid,
2227 + int err;
2228 +
2229 + if (!vid)
2230 +- return -EINVAL;
2231 ++ return -EOPNOTSUPP;
2232 +
2233 + entry->vid = vid - 1;
2234 + entry->valid = false;
2235 +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
2236 +index ce15d2350db9..188c3f6791b5 100644
2237 +--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
2238 ++++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
2239 +@@ -339,6 +339,7 @@ static int __lb_setup(struct net_device *ndev,
2240 + static int __lb_up(struct net_device *ndev,
2241 + enum hnae_loop loop_mode)
2242 + {
2243 ++#define NIC_LB_TEST_WAIT_PHY_LINK_TIME 300
2244 + struct hns_nic_priv *priv = netdev_priv(ndev);
2245 + struct hnae_handle *h = priv->ae_handle;
2246 + int speed, duplex;
2247 +@@ -365,6 +366,9 @@ static int __lb_up(struct net_device *ndev,
2248 +
2249 + h->dev->ops->adjust_link(h, speed, duplex);
2250 +
2251 ++ /* wait adjust link done and phy ready */
2252 ++ msleep(NIC_LB_TEST_WAIT_PHY_LINK_TIME);
2253 ++
2254 + return 0;
2255 + }
2256 +
2257 +diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
2258 +index 549d36497b8c..f3f7551162a9 100644
2259 +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
2260 ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
2261 +@@ -1777,6 +1777,7 @@ static void mtk_poll_controller(struct net_device *dev)
2262 +
2263 + static int mtk_start_dma(struct mtk_eth *eth)
2264 + {
2265 ++ u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
2266 + int err;
2267 +
2268 + err = mtk_dma_init(eth);
2269 +@@ -1793,7 +1794,7 @@ static int mtk_start_dma(struct mtk_eth *eth)
2270 + MTK_QDMA_GLO_CFG);
2271 +
2272 + mtk_w32(eth,
2273 +- MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
2274 ++ MTK_RX_DMA_EN | rx_2b_offset |
2275 + MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
2276 + MTK_PDMA_GLO_CFG);
2277 +
2278 +@@ -2297,13 +2298,13 @@ static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2279 +
2280 + switch (cmd->cmd) {
2281 + case ETHTOOL_GRXRINGS:
2282 +- if (dev->features & NETIF_F_LRO) {
2283 ++ if (dev->hw_features & NETIF_F_LRO) {
2284 + cmd->data = MTK_MAX_RX_RING_NUM;
2285 + ret = 0;
2286 + }
2287 + break;
2288 + case ETHTOOL_GRXCLSRLCNT:
2289 +- if (dev->features & NETIF_F_LRO) {
2290 ++ if (dev->hw_features & NETIF_F_LRO) {
2291 + struct mtk_mac *mac = netdev_priv(dev);
2292 +
2293 + cmd->rule_cnt = mac->hwlro_ip_cnt;
2294 +@@ -2311,11 +2312,11 @@ static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2295 + }
2296 + break;
2297 + case ETHTOOL_GRXCLSRULE:
2298 +- if (dev->features & NETIF_F_LRO)
2299 ++ if (dev->hw_features & NETIF_F_LRO)
2300 + ret = mtk_hwlro_get_fdir_entry(dev, cmd);
2301 + break;
2302 + case ETHTOOL_GRXCLSRLALL:
2303 +- if (dev->features & NETIF_F_LRO)
2304 ++ if (dev->hw_features & NETIF_F_LRO)
2305 + ret = mtk_hwlro_get_fdir_all(dev, cmd,
2306 + rule_locs);
2307 + break;
2308 +@@ -2332,11 +2333,11 @@ static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
2309 +
2310 + switch (cmd->cmd) {
2311 + case ETHTOOL_SRXCLSRLINS:
2312 +- if (dev->features & NETIF_F_LRO)
2313 ++ if (dev->hw_features & NETIF_F_LRO)
2314 + ret = mtk_hwlro_add_ipaddr(dev, cmd);
2315 + break;
2316 + case ETHTOOL_SRXCLSRLDEL:
2317 +- if (dev->features & NETIF_F_LRO)
2318 ++ if (dev->hw_features & NETIF_F_LRO)
2319 + ret = mtk_hwlro_del_ipaddr(dev, cmd);
2320 + break;
2321 + default:
2322 +diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
2323 +index bbeb1623e2d5..717fce6edeb7 100644
2324 +--- a/drivers/net/ipvlan/ipvlan_main.c
2325 ++++ b/drivers/net/ipvlan/ipvlan_main.c
2326 +@@ -112,7 +112,7 @@ static void ipvlan_port_destroy(struct net_device *dev)
2327 + }
2328 +
2329 + #define IPVLAN_FEATURES \
2330 +- (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
2331 ++ (NETIF_F_SG | NETIF_F_CSUM_MASK | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
2332 + NETIF_F_GSO | NETIF_F_TSO | NETIF_F_GSO_ROBUST | \
2333 + NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \
2334 + NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)
2335 +diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
2336 +index efa31fcda505..611dfc3d89a0 100644
2337 +--- a/drivers/net/phy/phylink.c
2338 ++++ b/drivers/net/phy/phylink.c
2339 +@@ -1080,6 +1080,7 @@ EXPORT_SYMBOL_GPL(phylink_ethtool_ksettings_get);
2340 + int phylink_ethtool_ksettings_set(struct phylink *pl,
2341 + const struct ethtool_link_ksettings *kset)
2342 + {
2343 ++ __ETHTOOL_DECLARE_LINK_MODE_MASK(support);
2344 + struct ethtool_link_ksettings our_kset;
2345 + struct phylink_link_state config;
2346 + int ret;
2347 +@@ -1090,11 +1091,12 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
2348 + kset->base.autoneg != AUTONEG_ENABLE)
2349 + return -EINVAL;
2350 +
2351 ++ linkmode_copy(support, pl->supported);
2352 + config = pl->link_config;
2353 +
2354 + /* Mask out unsupported advertisements */
2355 + linkmode_and(config.advertising, kset->link_modes.advertising,
2356 +- pl->supported);
2357 ++ support);
2358 +
2359 + /* FIXME: should we reject autoneg if phy/mac does not support it? */
2360 + if (kset->base.autoneg == AUTONEG_DISABLE) {
2361 +@@ -1104,7 +1106,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
2362 + * duplex.
2363 + */
2364 + s = phy_lookup_setting(kset->base.speed, kset->base.duplex,
2365 +- pl->supported, false);
2366 ++ support, false);
2367 + if (!s)
2368 + return -EINVAL;
2369 +
2370 +@@ -1133,7 +1135,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
2371 + __set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, config.advertising);
2372 + }
2373 +
2374 +- if (phylink_validate(pl, pl->supported, &config))
2375 ++ if (phylink_validate(pl, support, &config))
2376 + return -EINVAL;
2377 +
2378 + /* If autonegotiation is enabled, we must have an advertisement */
2379 +@@ -1583,6 +1585,7 @@ static int phylink_sfp_module_insert(void *upstream,
2380 + {
2381 + struct phylink *pl = upstream;
2382 + __ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, };
2383 ++ __ETHTOOL_DECLARE_LINK_MODE_MASK(support1);
2384 + struct phylink_link_state config;
2385 + phy_interface_t iface;
2386 + int ret = 0;
2387 +@@ -1610,6 +1613,8 @@ static int phylink_sfp_module_insert(void *upstream,
2388 + return ret;
2389 + }
2390 +
2391 ++ linkmode_copy(support1, support);
2392 ++
2393 + iface = sfp_select_interface(pl->sfp_bus, id, config.advertising);
2394 + if (iface == PHY_INTERFACE_MODE_NA) {
2395 + netdev_err(pl->netdev,
2396 +@@ -1619,7 +1624,7 @@ static int phylink_sfp_module_insert(void *upstream,
2397 + }
2398 +
2399 + config.interface = iface;
2400 +- ret = phylink_validate(pl, support, &config);
2401 ++ ret = phylink_validate(pl, support1, &config);
2402 + if (ret) {
2403 + netdev_err(pl->netdev, "validation of %s/%s with support %*pb failed: %d\n",
2404 + phylink_an_mode_str(MLO_AN_INBAND),
2405 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
2406 +index 4d104ab80fd8..f757c9e72e71 100644
2407 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
2408 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
2409 +@@ -676,6 +676,12 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
2410 +
2411 + brcmf_dbg(TRACE, "Enter: on=%d\n", on);
2412 +
2413 ++ sdio_retune_crc_disable(bus->sdiodev->func1);
2414 ++
2415 ++ /* Cannot re-tune if device is asleep; defer till we're awake */
2416 ++ if (on)
2417 ++ sdio_retune_hold_now(bus->sdiodev->func1);
2418 ++
2419 + wr_val = (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
2420 + /* 1st KSO write goes to AOS wake up core if device is asleep */
2421 + brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err);
2422 +@@ -736,6 +742,11 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
2423 + if (try_cnt > MAX_KSO_ATTEMPTS)
2424 + brcmf_err("max tries: rd_val=0x%x err=%d\n", rd_val, err);
2425 +
2426 ++ if (on)
2427 ++ sdio_retune_release(bus->sdiodev->func1);
2428 ++
2429 ++ sdio_retune_crc_enable(bus->sdiodev->func1);
2430 ++
2431 + return err;
2432 + }
2433 +
2434 +@@ -3373,11 +3384,7 @@ err:
2435 +
2436 + static bool brcmf_sdio_aos_no_decode(struct brcmf_sdio *bus)
2437 + {
2438 +- if (bus->ci->chip == CY_CC_43012_CHIP_ID ||
2439 +- bus->ci->chip == CY_CC_4373_CHIP_ID ||
2440 +- bus->ci->chip == BRCM_CC_4339_CHIP_ID ||
2441 +- bus->ci->chip == BRCM_CC_4345_CHIP_ID ||
2442 +- bus->ci->chip == BRCM_CC_4354_CHIP_ID)
2443 ++ if (bus->ci->chip == CY_CC_43012_CHIP_ID)
2444 + return true;
2445 + else
2446 + return false;
2447 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
2448 +index 35d2202ee2fd..3a390b2c7540 100644
2449 +--- a/drivers/nvme/host/core.c
2450 ++++ b/drivers/nvme/host/core.c
2451 +@@ -3397,7 +3397,8 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn)
2452 + {
2453 + struct nvme_ns *ns;
2454 + __le32 *ns_list;
2455 +- unsigned i, j, nsid, prev = 0, num_lists = DIV_ROUND_UP(nn, 1024);
2456 ++ unsigned i, j, nsid, prev = 0;
2457 ++ unsigned num_lists = DIV_ROUND_UP_ULL((u64)nn, 1024);
2458 + int ret = 0;
2459 +
2460 + ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
2461 +diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
2462 +index a065dbfc43b1..a77fd8674ecf 100644
2463 +--- a/drivers/nvme/target/io-cmd-bdev.c
2464 ++++ b/drivers/nvme/target/io-cmd-bdev.c
2465 +@@ -295,6 +295,7 @@ u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
2466 + return 0;
2467 + case nvme_cmd_write_zeroes:
2468 + req->execute = nvmet_bdev_execute_write_zeroes;
2469 ++ req->data_len = 0;
2470 + return 0;
2471 + default:
2472 + pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
2473 +diff --git a/drivers/parport/share.c b/drivers/parport/share.c
2474 +index 5dc53d420ca8..7b4ee33c1935 100644
2475 +--- a/drivers/parport/share.c
2476 ++++ b/drivers/parport/share.c
2477 +@@ -895,6 +895,7 @@ parport_register_dev_model(struct parport *port, const char *name,
2478 + par_dev->devmodel = true;
2479 + ret = device_register(&par_dev->dev);
2480 + if (ret) {
2481 ++ kfree(par_dev->state);
2482 + put_device(&par_dev->dev);
2483 + goto err_put_port;
2484 + }
2485 +@@ -912,6 +913,7 @@ parport_register_dev_model(struct parport *port, const char *name,
2486 + spin_unlock(&port->physport->pardevice_lock);
2487 + pr_debug("%s: cannot grant exclusive access for device %s\n",
2488 + port->name, name);
2489 ++ kfree(par_dev->state);
2490 + device_unregister(&par_dev->dev);
2491 + goto err_put_port;
2492 + }
2493 +diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
2494 +index c3067fd3bd9e..fece768efcb1 100644
2495 +--- a/drivers/s390/net/qeth_l2_main.c
2496 ++++ b/drivers/s390/net/qeth_l2_main.c
2497 +@@ -1679,7 +1679,7 @@ static void qeth_bridgeport_an_set_cb(void *priv,
2498 +
2499 + l2entry = (struct qdio_brinfo_entry_l2 *)entry;
2500 + code = IPA_ADDR_CHANGE_CODE_MACADDR;
2501 +- if (l2entry->addr_lnid.lnid)
2502 ++ if (l2entry->addr_lnid.lnid < VLAN_N_VID)
2503 + code |= IPA_ADDR_CHANGE_CODE_VLANID;
2504 + qeth_bridge_emit_host_event(card, anev_reg_unreg, code,
2505 + (struct net_if_token *)&l2entry->nit,
2506 +diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
2507 +index 53712cf26406..93a5748036de 100644
2508 +--- a/drivers/s390/net/qeth_l3_main.c
2509 ++++ b/drivers/s390/net/qeth_l3_main.c
2510 +@@ -1883,13 +1883,20 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2511 +
2512 + static int qeth_l3_get_cast_type(struct sk_buff *skb)
2513 + {
2514 ++ int ipv = qeth_get_ip_version(skb);
2515 + struct neighbour *n = NULL;
2516 + struct dst_entry *dst;
2517 +
2518 + rcu_read_lock();
2519 + dst = skb_dst(skb);
2520 +- if (dst)
2521 +- n = dst_neigh_lookup_skb(dst, skb);
2522 ++ if (dst) {
2523 ++ struct rt6_info *rt = (struct rt6_info *) dst;
2524 ++
2525 ++ dst = dst_check(dst, (ipv == 6) ? rt6_get_cookie(rt) : 0);
2526 ++ if (dst)
2527 ++ n = dst_neigh_lookup_skb(dst, skb);
2528 ++ }
2529 ++
2530 + if (n) {
2531 + int cast_type = n->type;
2532 +
2533 +@@ -1904,8 +1911,10 @@ static int qeth_l3_get_cast_type(struct sk_buff *skb)
2534 + rcu_read_unlock();
2535 +
2536 + /* no neighbour (eg AF_PACKET), fall back to target's IP address ... */
2537 +- switch (qeth_get_ip_version(skb)) {
2538 ++ switch (ipv) {
2539 + case 4:
2540 ++ if (ipv4_is_lbcast(ip_hdr(skb)->daddr))
2541 ++ return RTN_BROADCAST;
2542 + return ipv4_is_multicast(ip_hdr(skb)->daddr) ?
2543 + RTN_MULTICAST : RTN_UNICAST;
2544 + case 6:
2545 +@@ -1941,6 +1950,7 @@ static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
2546 + struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
2547 + struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
2548 + struct qeth_card *card = queue->card;
2549 ++ struct dst_entry *dst;
2550 +
2551 + hdr->hdr.l3.length = data_len;
2552 +
2553 +@@ -1991,15 +2001,27 @@ static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
2554 +
2555 + hdr->hdr.l3.flags = qeth_l3_cast_type_to_flag(cast_type);
2556 + rcu_read_lock();
2557 ++ dst = skb_dst(skb);
2558 ++
2559 + if (ipv == 4) {
2560 +- struct rtable *rt = skb_rtable(skb);
2561 ++ struct rtable *rt;
2562 ++
2563 ++ if (dst)
2564 ++ dst = dst_check(dst, 0);
2565 ++ rt = (struct rtable *) dst;
2566 +
2567 + *((__be32 *) &hdr->hdr.l3.next_hop.ipv4.addr) = (rt) ?
2568 + rt_nexthop(rt, ip_hdr(skb)->daddr) :
2569 + ip_hdr(skb)->daddr;
2570 + } else {
2571 + /* IPv6 */
2572 +- const struct rt6_info *rt = skb_rt6_info(skb);
2573 ++ struct rt6_info *rt;
2574 ++
2575 ++ if (dst) {
2576 ++ rt = (struct rt6_info *) dst;
2577 ++ dst = dst_check(dst, rt6_get_cookie(rt));
2578 ++ }
2579 ++ rt = (struct rt6_info *) dst;
2580 +
2581 + if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
2582 + l3_hdr->next_hop.ipv6_addr = rt->rt6i_gateway;
2583 +diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
2584 +index 531824afba5f..392695b4691a 100644
2585 +--- a/drivers/scsi/smartpqi/smartpqi_init.c
2586 ++++ b/drivers/scsi/smartpqi/smartpqi_init.c
2587 +@@ -4044,8 +4044,10 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
2588 + return -ETIMEDOUT;
2589 + msecs_blocked =
2590 + jiffies_to_msecs(jiffies - start_jiffies);
2591 +- if (msecs_blocked >= timeout_msecs)
2592 +- return -ETIMEDOUT;
2593 ++ if (msecs_blocked >= timeout_msecs) {
2594 ++ rc = -ETIMEDOUT;
2595 ++ goto out;
2596 ++ }
2597 + timeout_msecs -= msecs_blocked;
2598 + }
2599 + }
2600 +diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
2601 +index 27213676329c..848c7478efd6 100644
2602 +--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
2603 ++++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
2604 +@@ -340,24 +340,21 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
2605 + goto dealloc_host;
2606 + }
2607 +
2608 +- pm_runtime_set_active(&pdev->dev);
2609 +- pm_runtime_enable(&pdev->dev);
2610 +-
2611 + ufshcd_init_lanes_per_dir(hba);
2612 +
2613 + err = ufshcd_init(hba, mmio_base, irq);
2614 + if (err) {
2615 + dev_err(dev, "Initialization failed\n");
2616 +- goto out_disable_rpm;
2617 ++ goto dealloc_host;
2618 + }
2619 +
2620 + platform_set_drvdata(pdev, hba);
2621 +
2622 ++ pm_runtime_set_active(&pdev->dev);
2623 ++ pm_runtime_enable(&pdev->dev);
2624 ++
2625 + return 0;
2626 +
2627 +-out_disable_rpm:
2628 +- pm_runtime_disable(&pdev->dev);
2629 +- pm_runtime_set_suspended(&pdev->dev);
2630 + dealloc_host:
2631 + ufshcd_dealloc_host(hba);
2632 + out:
2633 +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
2634 +index 5ba49c8cd2a3..dbd1f8c253bf 100644
2635 +--- a/drivers/scsi/ufs/ufshcd.c
2636 ++++ b/drivers/scsi/ufs/ufshcd.c
2637 +@@ -1917,7 +1917,8 @@ int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2638 + memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
2639 +
2640 + /* Get the descriptor */
2641 +- if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
2642 ++ if (hba->dev_cmd.query.descriptor &&
2643 ++ lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
2644 + u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
2645 + GENERAL_UPIU_REQUEST_SIZE;
2646 + u16 resp_len;
2647 +diff --git a/drivers/staging/erofs/erofs_fs.h b/drivers/staging/erofs/erofs_fs.h
2648 +index fa52898df006..8ddb2b3e7d39 100644
2649 +--- a/drivers/staging/erofs/erofs_fs.h
2650 ++++ b/drivers/staging/erofs/erofs_fs.h
2651 +@@ -17,10 +17,16 @@
2652 + #define EROFS_SUPER_MAGIC_V1 0xE0F5E1E2
2653 + #define EROFS_SUPER_OFFSET 1024
2654 +
2655 ++/*
2656 ++ * Any bits that aren't in EROFS_ALL_REQUIREMENTS should be
2657 ++ * incompatible with this kernel version.
2658 ++ */
2659 ++#define EROFS_ALL_REQUIREMENTS 0
2660 ++
2661 + struct erofs_super_block {
2662 + /* 0 */__le32 magic; /* in the little endian */
2663 + /* 4 */__le32 checksum; /* crc32c(super_block) */
2664 +-/* 8 */__le32 features;
2665 ++/* 8 */__le32 features; /* (aka. feature_compat) */
2666 + /* 12 */__u8 blkszbits; /* support block_size == PAGE_SIZE only */
2667 + /* 13 */__u8 reserved;
2668 +
2669 +@@ -34,9 +40,10 @@ struct erofs_super_block {
2670 + /* 44 */__le32 xattr_blkaddr;
2671 + /* 48 */__u8 uuid[16]; /* 128-bit uuid for volume */
2672 + /* 64 */__u8 volume_name[16]; /* volume name */
2673 ++/* 80 */__le32 requirements; /* (aka. feature_incompat) */
2674 +
2675 +-/* 80 */__u8 reserved2[48]; /* 128 bytes */
2676 +-} __packed;
2677 ++/* 84 */__u8 reserved2[44];
2678 ++} __packed; /* 128 bytes */
2679 +
2680 + /*
2681 + * erofs inode data mapping:
2682 +diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h
2683 +index e3bfde00c7d2..20cf6e7e170f 100644
2684 +--- a/drivers/staging/erofs/internal.h
2685 ++++ b/drivers/staging/erofs/internal.h
2686 +@@ -114,6 +114,8 @@ struct erofs_sb_info {
2687 +
2688 + u8 uuid[16]; /* 128-bit uuid for volume */
2689 + u8 volume_name[16]; /* volume name */
2690 ++ u32 requirements;
2691 ++
2692 + char *dev_name;
2693 +
2694 + unsigned int mount_opt;
2695 +diff --git a/drivers/staging/erofs/super.c b/drivers/staging/erofs/super.c
2696 +index c8981662a49b..2ed53dd7f50c 100644
2697 +--- a/drivers/staging/erofs/super.c
2698 ++++ b/drivers/staging/erofs/super.c
2699 +@@ -76,6 +76,22 @@ static void destroy_inode(struct inode *inode)
2700 + call_rcu(&inode->i_rcu, i_callback);
2701 + }
2702 +
2703 ++static bool check_layout_compatibility(struct super_block *sb,
2704 ++ struct erofs_super_block *layout)
2705 ++{
2706 ++ const unsigned int requirements = le32_to_cpu(layout->requirements);
2707 ++
2708 ++ EROFS_SB(sb)->requirements = requirements;
2709 ++
2710 ++ /* check if current kernel meets all mandatory requirements */
2711 ++ if (requirements & (~EROFS_ALL_REQUIREMENTS)) {
2712 ++ errln("unidentified requirements %x, please upgrade kernel version",
2713 ++ requirements & ~EROFS_ALL_REQUIREMENTS);
2714 ++ return false;
2715 ++ }
2716 ++ return true;
2717 ++}
2718 ++
2719 + static int superblock_read(struct super_block *sb)
2720 + {
2721 + struct erofs_sb_info *sbi;
2722 +@@ -109,6 +125,9 @@ static int superblock_read(struct super_block *sb)
2723 + goto out;
2724 + }
2725 +
2726 ++ if (!check_layout_compatibility(sb, layout))
2727 ++ goto out;
2728 ++
2729 + sbi->blocks = le32_to_cpu(layout->blocks);
2730 + sbi->meta_blkaddr = le32_to_cpu(layout->meta_blkaddr);
2731 + #ifdef CONFIG_EROFS_FS_XATTR
2732 +diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
2733 +index 829e947cabf5..6a5ee8e6da10 100644
2734 +--- a/drivers/usb/chipidea/udc.c
2735 ++++ b/drivers/usb/chipidea/udc.c
2736 +@@ -1622,6 +1622,25 @@ static int ci_udc_pullup(struct usb_gadget *_gadget, int is_on)
2737 + static int ci_udc_start(struct usb_gadget *gadget,
2738 + struct usb_gadget_driver *driver);
2739 + static int ci_udc_stop(struct usb_gadget *gadget);
2740 ++
2741 ++/* Match ISOC IN from the highest endpoint */
2742 ++static struct usb_ep *ci_udc_match_ep(struct usb_gadget *gadget,
2743 ++ struct usb_endpoint_descriptor *desc,
2744 ++ struct usb_ss_ep_comp_descriptor *comp_desc)
2745 ++{
2746 ++ struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
2747 ++ struct usb_ep *ep;
2748 ++
2749 ++ if (usb_endpoint_xfer_isoc(desc) && usb_endpoint_dir_in(desc)) {
2750 ++ list_for_each_entry_reverse(ep, &ci->gadget.ep_list, ep_list) {
2751 ++ if (ep->caps.dir_in && !ep->claimed)
2752 ++ return ep;
2753 ++ }
2754 ++ }
2755 ++
2756 ++ return NULL;
2757 ++}
2758 ++
2759 + /**
2760 + * Device operations part of the API to the USB controller hardware,
2761 + * which don't involve endpoints (or i/o)
2762 +@@ -1635,6 +1654,7 @@ static const struct usb_gadget_ops usb_gadget_ops = {
2763 + .vbus_draw = ci_udc_vbus_draw,
2764 + .udc_start = ci_udc_start,
2765 + .udc_stop = ci_udc_stop,
2766 ++ .match_ep = ci_udc_match_ep,
2767 + };
2768 +
2769 + static int init_eps(struct ci_hdrc *ci)
2770 +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
2771 +index 765ef5f1ffb8..3c8e65900dcb 100644
2772 +--- a/drivers/usb/host/xhci-ring.c
2773 ++++ b/drivers/usb/host/xhci-ring.c
2774 +@@ -1608,8 +1608,13 @@ static void handle_port_status(struct xhci_hcd *xhci,
2775 + usb_hcd_resume_root_hub(hcd);
2776 + }
2777 +
2778 +- if (hcd->speed >= HCD_USB3 && (portsc & PORT_PLS_MASK) == XDEV_INACTIVE)
2779 ++ if (hcd->speed >= HCD_USB3 &&
2780 ++ (portsc & PORT_PLS_MASK) == XDEV_INACTIVE) {
2781 ++ slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1);
2782 ++ if (slot_id && xhci->devs[slot_id])
2783 ++ xhci->devs[slot_id]->flags |= VDEV_PORT_ERROR;
2784 + bus_state->port_remote_wakeup &= ~(1 << hcd_portnum);
2785 ++ }
2786 +
2787 + if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_RESUME) {
2788 + xhci_dbg(xhci, "port resume event for port %d\n", port_id);
2789 +@@ -1797,6 +1802,14 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
2790 + {
2791 + struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
2792 + struct xhci_command *command;
2793 ++
2794 ++ /*
2795 ++ * Avoid resetting endpoint if link is inactive. Can cause host hang.
2796 ++ * Device will be reset soon to recover the link so don't do anything
2797 ++ */
2798 ++ if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR)
2799 ++ return;
2800 ++
2801 + command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
2802 + if (!command)
2803 + return;
2804 +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
2805 +index 448e3f812833..f39ca3980e48 100644
2806 +--- a/drivers/usb/host/xhci.c
2807 ++++ b/drivers/usb/host/xhci.c
2808 +@@ -1442,6 +1442,10 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
2809 + xhci_dbg(xhci, "urb submitted during PCI suspend\n");
2810 + return -ESHUTDOWN;
2811 + }
2812 ++ if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) {
2813 ++ xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n");
2814 ++ return -ENODEV;
2815 ++ }
2816 +
2817 + if (usb_endpoint_xfer_isoc(&urb->ep->desc))
2818 + num_tds = urb->number_of_packets;
2819 +@@ -3724,6 +3728,7 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
2820 + }
2821 + /* If necessary, update the number of active TTs on this root port */
2822 + xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
2823 ++ virt_dev->flags = 0;
2824 + ret = 0;
2825 +
2826 + command_cleanup:
2827 +@@ -5029,16 +5034,26 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
2828 + } else {
2829 + /*
2830 + * Some 3.1 hosts return sbrn 0x30, use xhci supported protocol
2831 +- * minor revision instead of sbrn
2832 ++ * minor revision instead of sbrn. Minor revision is a two digit
2833 ++ * BCD containing minor and sub-minor numbers, only show minor.
2834 + */
2835 +- minor_rev = xhci->usb3_rhub.min_rev;
2836 +- if (minor_rev) {
2837 ++ minor_rev = xhci->usb3_rhub.min_rev / 0x10;
2838 ++
2839 ++ switch (minor_rev) {
2840 ++ case 2:
2841 ++ hcd->speed = HCD_USB32;
2842 ++ hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
2843 ++ hcd->self.root_hub->rx_lanes = 2;
2844 ++ hcd->self.root_hub->tx_lanes = 2;
2845 ++ break;
2846 ++ case 1:
2847 + hcd->speed = HCD_USB31;
2848 + hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
2849 ++ break;
2850 + }
2851 +- xhci_info(xhci, "Host supports USB 3.%x %s SuperSpeed\n",
2852 ++ xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n",
2853 + minor_rev,
2854 +- minor_rev ? "Enhanced" : "");
2855 ++ minor_rev ? "Enhanced " : "");
2856 +
2857 + xhci->usb3_rhub.hcd = hcd;
2858 + /* xHCI private pointer was set in xhci_pci_probe for the second
2859 +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
2860 +index 9334cdee382a..a0035e7b62d8 100644
2861 +--- a/drivers/usb/host/xhci.h
2862 ++++ b/drivers/usb/host/xhci.h
2863 +@@ -1010,6 +1010,15 @@ struct xhci_virt_device {
2864 + u8 real_port;
2865 + struct xhci_interval_bw_table *bw_table;
2866 + struct xhci_tt_bw_info *tt_info;
2867 ++ /*
2868 ++ * flags for state tracking based on events and issued commands.
2869 ++ * Software can not rely on states from output contexts because of
2870 ++ * latency between events and xHC updating output context values.
2871 ++ * See xhci 1.1 section 4.8.3 for more details
2872 ++ */
2873 ++ unsigned long flags;
2874 ++#define VDEV_PORT_ERROR BIT(0) /* Port error, link inactive */
2875 ++
2876 + /* The current max exit latency for the enabled USB3 link states. */
2877 + u16 current_mel;
2878 + /* Used for the debugfs interfaces. */
2879 +diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
2880 +index 10d9589001a9..bb5bd49573b4 100644
2881 +--- a/fs/btrfs/reada.c
2882 ++++ b/fs/btrfs/reada.c
2883 +@@ -747,6 +747,7 @@ static void __reada_start_machine(struct btrfs_fs_info *fs_info)
2884 + u64 total = 0;
2885 + int i;
2886 +
2887 ++again:
2888 + do {
2889 + enqueued = 0;
2890 + mutex_lock(&fs_devices->device_list_mutex);
2891 +@@ -758,6 +759,10 @@ static void __reada_start_machine(struct btrfs_fs_info *fs_info)
2892 + mutex_unlock(&fs_devices->device_list_mutex);
2893 + total += enqueued;
2894 + } while (enqueued && total < 10000);
2895 ++ if (fs_devices->seed) {
2896 ++ fs_devices = fs_devices->seed;
2897 ++ goto again;
2898 ++ }
2899 +
2900 + if (enqueued == 0)
2901 + return;
2902 +diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
2903 +index a05bf1d6e1d0..2ab3de440927 100644
2904 +--- a/fs/cifs/cifsfs.c
2905 ++++ b/fs/cifs/cifsfs.c
2906 +@@ -303,6 +303,7 @@ cifs_alloc_inode(struct super_block *sb)
2907 + cifs_inode->uniqueid = 0;
2908 + cifs_inode->createtime = 0;
2909 + cifs_inode->epoch = 0;
2910 ++ spin_lock_init(&cifs_inode->open_file_lock);
2911 + generate_random_uuid(cifs_inode->lease_key);
2912 +
2913 + /*
2914 +diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
2915 +index 607468948f72..a588fbc54968 100644
2916 +--- a/fs/cifs/cifsglob.h
2917 ++++ b/fs/cifs/cifsglob.h
2918 +@@ -1357,6 +1357,7 @@ struct cifsInodeInfo {
2919 + struct rw_semaphore lock_sem; /* protect the fields above */
2920 + /* BB add in lists for dirty pages i.e. write caching info for oplock */
2921 + struct list_head openFileList;
2922 ++ spinlock_t open_file_lock; /* protects openFileList */
2923 + __u32 cifsAttrs; /* e.g. DOS archive bit, sparse, compressed, system */
2924 + unsigned int oplock; /* oplock/lease level we have */
2925 + unsigned int epoch; /* used to track lease state changes */
2926 +@@ -1760,10 +1761,14 @@ require use of the stronger protocol */
2927 + * tcp_ses_lock protects:
2928 + * list operations on tcp and SMB session lists
2929 + * tcon->open_file_lock protects the list of open files hanging off the tcon
2930 ++ * inode->open_file_lock protects the openFileList hanging off the inode
2931 + * cfile->file_info_lock protects counters and fields in cifs file struct
2932 + * f_owner.lock protects certain per file struct operations
2933 + * mapping->page_lock protects certain per page operations
2934 + *
2935 ++ * Note that the cifs_tcon.open_file_lock should be taken before
2936 ++ * not after the cifsInodeInfo.open_file_lock
2937 ++ *
2938 + * Semaphores
2939 + * ----------
2940 + * sesSem operations on smb session
2941 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
2942 +index 4c0e44489f21..e9507fba0b36 100644
2943 +--- a/fs/cifs/connect.c
2944 ++++ b/fs/cifs/connect.c
2945 +@@ -478,6 +478,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
2946 + spin_lock(&GlobalMid_Lock);
2947 + server->nr_targets = 1;
2948 + #ifdef CONFIG_CIFS_DFS_UPCALL
2949 ++ spin_unlock(&GlobalMid_Lock);
2950 + cifs_sb = find_super_by_tcp(server);
2951 + if (IS_ERR(cifs_sb)) {
2952 + rc = PTR_ERR(cifs_sb);
2953 +@@ -495,6 +496,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
2954 + }
2955 + cifs_dbg(FYI, "%s: will retry %d target(s)\n", __func__,
2956 + server->nr_targets);
2957 ++ spin_lock(&GlobalMid_Lock);
2958 + #endif
2959 + if (server->tcpStatus == CifsExiting) {
2960 + /* the demux thread will exit normally
2961 +diff --git a/fs/cifs/file.c b/fs/cifs/file.c
2962 +index 9a1db37b303a..736a61843e73 100644
2963 +--- a/fs/cifs/file.c
2964 ++++ b/fs/cifs/file.c
2965 +@@ -338,10 +338,12 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
2966 + atomic_inc(&tcon->num_local_opens);
2967 +
2968 + /* if readable file instance put first in list*/
2969 ++ spin_lock(&cinode->open_file_lock);
2970 + if (file->f_mode & FMODE_READ)
2971 + list_add(&cfile->flist, &cinode->openFileList);
2972 + else
2973 + list_add_tail(&cfile->flist, &cinode->openFileList);
2974 ++ spin_unlock(&cinode->open_file_lock);
2975 + spin_unlock(&tcon->open_file_lock);
2976 +
2977 + if (fid->purge_cache)
2978 +@@ -413,7 +415,9 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
2979 + cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
2980 +
2981 + /* remove it from the lists */
2982 ++ spin_lock(&cifsi->open_file_lock);
2983 + list_del(&cifs_file->flist);
2984 ++ spin_unlock(&cifsi->open_file_lock);
2985 + list_del(&cifs_file->tlist);
2986 + atomic_dec(&tcon->num_local_opens);
2987 +
2988 +@@ -1950,9 +1954,9 @@ refind_writable:
2989 + return 0;
2990 + }
2991 +
2992 +- spin_lock(&tcon->open_file_lock);
2993 ++ spin_lock(&cifs_inode->open_file_lock);
2994 + list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
2995 +- spin_unlock(&tcon->open_file_lock);
2996 ++ spin_unlock(&cifs_inode->open_file_lock);
2997 + cifsFileInfo_put(inv_file);
2998 + ++refind;
2999 + inv_file = NULL;
3000 +diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
3001 +index e32c264e3adb..82ade16c9501 100644
3002 +--- a/fs/cifs/smb2maperror.c
3003 ++++ b/fs/cifs/smb2maperror.c
3004 +@@ -457,7 +457,7 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
3005 + {STATUS_FILE_INVALID, -EIO, "STATUS_FILE_INVALID"},
3006 + {STATUS_ALLOTTED_SPACE_EXCEEDED, -EIO,
3007 + "STATUS_ALLOTTED_SPACE_EXCEEDED"},
3008 +- {STATUS_INSUFFICIENT_RESOURCES, -EREMOTEIO,
3009 ++ {STATUS_INSUFFICIENT_RESOURCES, -EAGAIN,
3010 + "STATUS_INSUFFICIENT_RESOURCES"},
3011 + {STATUS_DFS_EXIT_PATH_FOUND, -EIO, "STATUS_DFS_EXIT_PATH_FOUND"},
3012 + {STATUS_DEVICE_DATA_ERROR, -EIO, "STATUS_DEVICE_DATA_ERROR"},
3013 +diff --git a/fs/namespace.c b/fs/namespace.c
3014 +index c9cab307fa77..061f247a3cdb 100644
3015 +--- a/fs/namespace.c
3016 ++++ b/fs/namespace.c
3017 +@@ -2079,6 +2079,7 @@ static int attach_recursive_mnt(struct mount *source_mnt,
3018 + /* Notice when we are propagating across user namespaces */
3019 + if (child->mnt_parent->mnt_ns->user_ns != user_ns)
3020 + lock_mnt_tree(child);
3021 ++ child->mnt.mnt_flags &= ~MNT_LOCKED;
3022 + commit_tree(child);
3023 + }
3024 + put_mountpoint(smp);
3025 +diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
3026 +index b48273e846ad..f0389849fd80 100644
3027 +--- a/fs/overlayfs/inode.c
3028 ++++ b/fs/overlayfs/inode.c
3029 +@@ -553,15 +553,15 @@ static void ovl_fill_inode(struct inode *inode, umode_t mode, dev_t rdev,
3030 + int xinobits = ovl_xino_bits(inode->i_sb);
3031 +
3032 + /*
3033 +- * When NFS export is enabled and d_ino is consistent with st_ino
3034 +- * (samefs or i_ino has enough bits to encode layer), set the same
3035 +- * value used for d_ino to i_ino, because nfsd readdirplus compares
3036 +- * d_ino values to i_ino values of child entries. When called from
3037 ++ * When d_ino is consistent with st_ino (samefs or i_ino has enough
3038 ++ * bits to encode layer), set the same value used for st_ino to i_ino,
3039 ++ * so inode number exposed via /proc/locks and a like will be
3040 ++ * consistent with d_ino and st_ino values. An i_ino value inconsistent
3041 ++ * with d_ino also causes nfsd readdirplus to fail. When called from
3042 + * ovl_new_inode(), ino arg is 0, so i_ino will be updated to real
3043 + * upper inode i_ino on ovl_inode_init() or ovl_inode_update().
3044 + */
3045 +- if (inode->i_sb->s_export_op &&
3046 +- (ovl_same_sb(inode->i_sb) || xinobits)) {
3047 ++ if (ovl_same_sb(inode->i_sb) || xinobits) {
3048 + inode->i_ino = ino;
3049 + if (xinobits && fsid && !(ino >> (64 - xinobits)))
3050 + inode->i_ino |= (unsigned long)fsid << (64 - xinobits);
3051 +@@ -777,6 +777,54 @@ struct inode *ovl_lookup_inode(struct super_block *sb, struct dentry *real,
3052 + return inode;
3053 + }
3054 +
3055 ++bool ovl_lookup_trap_inode(struct super_block *sb, struct dentry *dir)
3056 ++{
3057 ++ struct inode *key = d_inode(dir);
3058 ++ struct inode *trap;
3059 ++ bool res;
3060 ++
3061 ++ trap = ilookup5(sb, (unsigned long) key, ovl_inode_test, key);
3062 ++ if (!trap)
3063 ++ return false;
3064 ++
3065 ++ res = IS_DEADDIR(trap) && !ovl_inode_upper(trap) &&
3066 ++ !ovl_inode_lower(trap);
3067 ++
3068 ++ iput(trap);
3069 ++ return res;
3070 ++}
3071 ++
3072 ++/*
3073 ++ * Create an inode cache entry for layer root dir, that will intentionally
3074 ++ * fail ovl_verify_inode(), so any lookup that will find some layer root
3075 ++ * will fail.
3076 ++ */
3077 ++struct inode *ovl_get_trap_inode(struct super_block *sb, struct dentry *dir)
3078 ++{
3079 ++ struct inode *key = d_inode(dir);
3080 ++ struct inode *trap;
3081 ++
3082 ++ if (!d_is_dir(dir))
3083 ++ return ERR_PTR(-ENOTDIR);
3084 ++
3085 ++ trap = iget5_locked(sb, (unsigned long) key, ovl_inode_test,
3086 ++ ovl_inode_set, key);
3087 ++ if (!trap)
3088 ++ return ERR_PTR(-ENOMEM);
3089 ++
3090 ++ if (!(trap->i_state & I_NEW)) {
3091 ++ /* Conflicting layer roots? */
3092 ++ iput(trap);
3093 ++ return ERR_PTR(-ELOOP);
3094 ++ }
3095 ++
3096 ++ trap->i_mode = S_IFDIR;
3097 ++ trap->i_flags = S_DEAD;
3098 ++ unlock_new_inode(trap);
3099 ++
3100 ++ return trap;
3101 ++}
3102 ++
3103 + /*
3104 + * Does overlay inode need to be hashed by lower inode?
3105 + */
3106 +diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
3107 +index efd372312ef1..badf039267a2 100644
3108 +--- a/fs/overlayfs/namei.c
3109 ++++ b/fs/overlayfs/namei.c
3110 +@@ -18,6 +18,7 @@
3111 + #include "overlayfs.h"
3112 +
3113 + struct ovl_lookup_data {
3114 ++ struct super_block *sb;
3115 + struct qstr name;
3116 + bool is_dir;
3117 + bool opaque;
3118 +@@ -244,6 +245,12 @@ static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
3119 + if (!d->metacopy || d->last)
3120 + goto out;
3121 + } else {
3122 ++ if (ovl_lookup_trap_inode(d->sb, this)) {
3123 ++ /* Caught in a trap of overlapping layers */
3124 ++ err = -ELOOP;
3125 ++ goto out_err;
3126 ++ }
3127 ++
3128 + if (last_element)
3129 + d->is_dir = true;
3130 + if (d->last)
3131 +@@ -819,6 +826,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
3132 + int err;
3133 + bool metacopy = false;
3134 + struct ovl_lookup_data d = {
3135 ++ .sb = dentry->d_sb,
3136 + .name = dentry->d_name,
3137 + .is_dir = false,
3138 + .opaque = false,
3139 +diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
3140 +index d26efed9f80a..cec40077b522 100644
3141 +--- a/fs/overlayfs/overlayfs.h
3142 ++++ b/fs/overlayfs/overlayfs.h
3143 +@@ -270,6 +270,7 @@ void ovl_clear_flag(unsigned long flag, struct inode *inode);
3144 + bool ovl_test_flag(unsigned long flag, struct inode *inode);
3145 + bool ovl_inuse_trylock(struct dentry *dentry);
3146 + void ovl_inuse_unlock(struct dentry *dentry);
3147 ++bool ovl_is_inuse(struct dentry *dentry);
3148 + bool ovl_need_index(struct dentry *dentry);
3149 + int ovl_nlink_start(struct dentry *dentry);
3150 + void ovl_nlink_end(struct dentry *dentry);
3151 +@@ -376,6 +377,8 @@ struct ovl_inode_params {
3152 + struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, dev_t rdev);
3153 + struct inode *ovl_lookup_inode(struct super_block *sb, struct dentry *real,
3154 + bool is_upper);
3155 ++bool ovl_lookup_trap_inode(struct super_block *sb, struct dentry *dir);
3156 ++struct inode *ovl_get_trap_inode(struct super_block *sb, struct dentry *dir);
3157 + struct inode *ovl_get_inode(struct super_block *sb,
3158 + struct ovl_inode_params *oip);
3159 + static inline void ovl_copyattr(struct inode *from, struct inode *to)
3160 +diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
3161 +index ec237035333a..6ed1ace8f8b3 100644
3162 +--- a/fs/overlayfs/ovl_entry.h
3163 ++++ b/fs/overlayfs/ovl_entry.h
3164 +@@ -29,6 +29,8 @@ struct ovl_sb {
3165 +
3166 + struct ovl_layer {
3167 + struct vfsmount *mnt;
3168 ++ /* Trap in ovl inode cache */
3169 ++ struct inode *trap;
3170 + struct ovl_sb *fs;
3171 + /* Index of this layer in fs root (upper idx == 0) */
3172 + int idx;
3173 +@@ -65,6 +67,10 @@ struct ovl_fs {
3174 + /* Did we take the inuse lock? */
3175 + bool upperdir_locked;
3176 + bool workdir_locked;
3177 ++ /* Traps in ovl inode cache */
3178 ++ struct inode *upperdir_trap;
3179 ++ struct inode *workdir_trap;
3180 ++ struct inode *indexdir_trap;
3181 + /* Inode numbers in all layers do not use the high xino_bits */
3182 + unsigned int xino_bits;
3183 + };
3184 +diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
3185 +index 0116735cc321..9780617c69ee 100644
3186 +--- a/fs/overlayfs/super.c
3187 ++++ b/fs/overlayfs/super.c
3188 +@@ -217,6 +217,9 @@ static void ovl_free_fs(struct ovl_fs *ofs)
3189 + {
3190 + unsigned i;
3191 +
3192 ++ iput(ofs->indexdir_trap);
3193 ++ iput(ofs->workdir_trap);
3194 ++ iput(ofs->upperdir_trap);
3195 + dput(ofs->indexdir);
3196 + dput(ofs->workdir);
3197 + if (ofs->workdir_locked)
3198 +@@ -225,8 +228,10 @@ static void ovl_free_fs(struct ovl_fs *ofs)
3199 + if (ofs->upperdir_locked)
3200 + ovl_inuse_unlock(ofs->upper_mnt->mnt_root);
3201 + mntput(ofs->upper_mnt);
3202 +- for (i = 0; i < ofs->numlower; i++)
3203 ++ for (i = 0; i < ofs->numlower; i++) {
3204 ++ iput(ofs->lower_layers[i].trap);
3205 + mntput(ofs->lower_layers[i].mnt);
3206 ++ }
3207 + for (i = 0; i < ofs->numlowerfs; i++)
3208 + free_anon_bdev(ofs->lower_fs[i].pseudo_dev);
3209 + kfree(ofs->lower_layers);
3210 +@@ -984,7 +989,26 @@ static const struct xattr_handler *ovl_xattr_handlers[] = {
3211 + NULL
3212 + };
3213 +
3214 +-static int ovl_get_upper(struct ovl_fs *ofs, struct path *upperpath)
3215 ++static int ovl_setup_trap(struct super_block *sb, struct dentry *dir,
3216 ++ struct inode **ptrap, const char *name)
3217 ++{
3218 ++ struct inode *trap;
3219 ++ int err;
3220 ++
3221 ++ trap = ovl_get_trap_inode(sb, dir);
3222 ++ err = PTR_ERR_OR_ZERO(trap);
3223 ++ if (err) {
3224 ++ if (err == -ELOOP)
3225 ++ pr_err("overlayfs: conflicting %s path\n", name);
3226 ++ return err;
3227 ++ }
3228 ++
3229 ++ *ptrap = trap;
3230 ++ return 0;
3231 ++}
3232 ++
3233 ++static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs,
3234 ++ struct path *upperpath)
3235 + {
3236 + struct vfsmount *upper_mnt;
3237 + int err;
3238 +@@ -1004,6 +1028,11 @@ static int ovl_get_upper(struct ovl_fs *ofs, struct path *upperpath)
3239 + if (err)
3240 + goto out;
3241 +
3242 ++ err = ovl_setup_trap(sb, upperpath->dentry, &ofs->upperdir_trap,
3243 ++ "upperdir");
3244 ++ if (err)
3245 ++ goto out;
3246 ++
3247 + upper_mnt = clone_private_mount(upperpath);
3248 + err = PTR_ERR(upper_mnt);
3249 + if (IS_ERR(upper_mnt)) {
3250 +@@ -1030,7 +1059,8 @@ out:
3251 + return err;
3252 + }
3253 +
3254 +-static int ovl_make_workdir(struct ovl_fs *ofs, struct path *workpath)
3255 ++static int ovl_make_workdir(struct super_block *sb, struct ovl_fs *ofs,
3256 ++ struct path *workpath)
3257 + {
3258 + struct vfsmount *mnt = ofs->upper_mnt;
3259 + struct dentry *temp;
3260 +@@ -1045,6 +1075,10 @@ static int ovl_make_workdir(struct ovl_fs *ofs, struct path *workpath)
3261 + if (!ofs->workdir)
3262 + goto out;
3263 +
3264 ++ err = ovl_setup_trap(sb, ofs->workdir, &ofs->workdir_trap, "workdir");
3265 ++ if (err)
3266 ++ goto out;
3267 ++
3268 + /*
3269 + * Upper should support d_type, else whiteouts are visible. Given
3270 + * workdir and upper are on same fs, we can do iterate_dir() on
3271 +@@ -1105,7 +1139,8 @@ out:
3272 + return err;
3273 + }
3274 +
3275 +-static int ovl_get_workdir(struct ovl_fs *ofs, struct path *upperpath)
3276 ++static int ovl_get_workdir(struct super_block *sb, struct ovl_fs *ofs,
3277 ++ struct path *upperpath)
3278 + {
3279 + int err;
3280 + struct path workpath = { };
3281 +@@ -1136,19 +1171,16 @@ static int ovl_get_workdir(struct ovl_fs *ofs, struct path *upperpath)
3282 + pr_warn("overlayfs: workdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
3283 + }
3284 +
3285 +- err = ovl_make_workdir(ofs, &workpath);
3286 +- if (err)
3287 +- goto out;
3288 ++ err = ovl_make_workdir(sb, ofs, &workpath);
3289 +
3290 +- err = 0;
3291 + out:
3292 + path_put(&workpath);
3293 +
3294 + return err;
3295 + }
3296 +
3297 +-static int ovl_get_indexdir(struct ovl_fs *ofs, struct ovl_entry *oe,
3298 +- struct path *upperpath)
3299 ++static int ovl_get_indexdir(struct super_block *sb, struct ovl_fs *ofs,
3300 ++ struct ovl_entry *oe, struct path *upperpath)
3301 + {
3302 + struct vfsmount *mnt = ofs->upper_mnt;
3303 + int err;
3304 +@@ -1167,6 +1199,11 @@ static int ovl_get_indexdir(struct ovl_fs *ofs, struct ovl_entry *oe,
3305 +
3306 + ofs->indexdir = ovl_workdir_create(ofs, OVL_INDEXDIR_NAME, true);
3307 + if (ofs->indexdir) {
3308 ++ err = ovl_setup_trap(sb, ofs->indexdir, &ofs->indexdir_trap,
3309 ++ "indexdir");
3310 ++ if (err)
3311 ++ goto out;
3312 ++
3313 + /*
3314 + * Verify upper root is exclusively associated with index dir.
3315 + * Older kernels stored upper fh in "trusted.overlay.origin"
3316 +@@ -1254,8 +1291,8 @@ static int ovl_get_fsid(struct ovl_fs *ofs, const struct path *path)
3317 + return ofs->numlowerfs;
3318 + }
3319 +
3320 +-static int ovl_get_lower_layers(struct ovl_fs *ofs, struct path *stack,
3321 +- unsigned int numlower)
3322 ++static int ovl_get_lower_layers(struct super_block *sb, struct ovl_fs *ofs,
3323 ++ struct path *stack, unsigned int numlower)
3324 + {
3325 + int err;
3326 + unsigned int i;
3327 +@@ -1273,16 +1310,28 @@ static int ovl_get_lower_layers(struct ovl_fs *ofs, struct path *stack,
3328 +
3329 + for (i = 0; i < numlower; i++) {
3330 + struct vfsmount *mnt;
3331 ++ struct inode *trap;
3332 + int fsid;
3333 +
3334 + err = fsid = ovl_get_fsid(ofs, &stack[i]);
3335 + if (err < 0)
3336 + goto out;
3337 +
3338 ++ err = -EBUSY;
3339 ++ if (ovl_is_inuse(stack[i].dentry)) {
3340 ++ pr_err("overlayfs: lowerdir is in-use as upperdir/workdir\n");
3341 ++ goto out;
3342 ++ }
3343 ++
3344 ++ err = ovl_setup_trap(sb, stack[i].dentry, &trap, "lowerdir");
3345 ++ if (err)
3346 ++ goto out;
3347 ++
3348 + mnt = clone_private_mount(&stack[i]);
3349 + err = PTR_ERR(mnt);
3350 + if (IS_ERR(mnt)) {
3351 + pr_err("overlayfs: failed to clone lowerpath\n");
3352 ++ iput(trap);
3353 + goto out;
3354 + }
3355 +
3356 +@@ -1292,6 +1341,7 @@ static int ovl_get_lower_layers(struct ovl_fs *ofs, struct path *stack,
3357 + */
3358 + mnt->mnt_flags |= MNT_READONLY | MNT_NOATIME;
3359 +
3360 ++ ofs->lower_layers[ofs->numlower].trap = trap;
3361 + ofs->lower_layers[ofs->numlower].mnt = mnt;
3362 + ofs->lower_layers[ofs->numlower].idx = i + 1;
3363 + ofs->lower_layers[ofs->numlower].fsid = fsid;
3364 +@@ -1386,7 +1436,7 @@ static struct ovl_entry *ovl_get_lowerstack(struct super_block *sb,
3365 + goto out_err;
3366 + }
3367 +
3368 +- err = ovl_get_lower_layers(ofs, stack, numlower);
3369 ++ err = ovl_get_lower_layers(sb, ofs, stack, numlower);
3370 + if (err)
3371 + goto out_err;
3372 +
3373 +@@ -1418,6 +1468,77 @@ out_err:
3374 + goto out;
3375 + }
3376 +
3377 ++/*
3378 ++ * Check if this layer root is a descendant of:
3379 ++ * - another layer of this overlayfs instance
3380 ++ * - upper/work dir of any overlayfs instance
3381 ++ */
3382 ++static int ovl_check_layer(struct super_block *sb, struct dentry *dentry,
3383 ++ const char *name)
3384 ++{
3385 ++ struct dentry *next = dentry, *parent;
3386 ++ int err = 0;
3387 ++
3388 ++ if (!dentry)
3389 ++ return 0;
3390 ++
3391 ++ parent = dget_parent(next);
3392 ++
3393 ++ /* Walk back ancestors to root (inclusive) looking for traps */
3394 ++ while (!err && parent != next) {
3395 ++ if (ovl_is_inuse(parent)) {
3396 ++ err = -EBUSY;
3397 ++ pr_err("overlayfs: %s path overlapping in-use upperdir/workdir\n",
3398 ++ name);
3399 ++ } else if (ovl_lookup_trap_inode(sb, parent)) {
3400 ++ err = -ELOOP;
3401 ++ pr_err("overlayfs: overlapping %s path\n", name);
3402 ++ }
3403 ++ next = parent;
3404 ++ parent = dget_parent(next);
3405 ++ dput(next);
3406 ++ }
3407 ++
3408 ++ dput(parent);
3409 ++
3410 ++ return err;
3411 ++}
3412 ++
3413 ++/*
3414 ++ * Check if any of the layers or work dirs overlap.
3415 ++ */
3416 ++static int ovl_check_overlapping_layers(struct super_block *sb,
3417 ++ struct ovl_fs *ofs)
3418 ++{
3419 ++ int i, err;
3420 ++
3421 ++ if (ofs->upper_mnt) {
3422 ++ err = ovl_check_layer(sb, ofs->upper_mnt->mnt_root, "upperdir");
3423 ++ if (err)
3424 ++ return err;
3425 ++
3426 ++ /*
3427 ++ * Checking workbasedir avoids hitting ovl_is_inuse(parent) of
3428 ++ * this instance and covers overlapping work and index dirs,
3429 ++ * unless work or index dir have been moved since created inside
3430 ++ * workbasedir. In that case, we already have their traps in
3431 ++ * inode cache and we will catch that case on lookup.
3432 ++ */
3433 ++ err = ovl_check_layer(sb, ofs->workbasedir, "workdir");
3434 ++ if (err)
3435 ++ return err;
3436 ++ }
3437 ++
3438 ++ for (i = 0; i < ofs->numlower; i++) {
3439 ++ err = ovl_check_layer(sb, ofs->lower_layers[i].mnt->mnt_root,
3440 ++ "lowerdir");
3441 ++ if (err)
3442 ++ return err;
3443 ++ }
3444 ++
3445 ++ return 0;
3446 ++}
3447 ++
3448 + static int ovl_fill_super(struct super_block *sb, void *data, int silent)
3449 + {
3450 + struct path upperpath = { };
3451 +@@ -1457,17 +1578,20 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
3452 + if (ofs->config.xino != OVL_XINO_OFF)
3453 + ofs->xino_bits = BITS_PER_LONG - 32;
3454 +
3455 ++ /* alloc/destroy_inode needed for setting up traps in inode cache */
3456 ++ sb->s_op = &ovl_super_operations;
3457 ++
3458 + if (ofs->config.upperdir) {
3459 + if (!ofs->config.workdir) {
3460 + pr_err("overlayfs: missing 'workdir'\n");
3461 + goto out_err;
3462 + }
3463 +
3464 +- err = ovl_get_upper(ofs, &upperpath);
3465 ++ err = ovl_get_upper(sb, ofs, &upperpath);
3466 + if (err)
3467 + goto out_err;
3468 +
3469 +- err = ovl_get_workdir(ofs, &upperpath);
3470 ++ err = ovl_get_workdir(sb, ofs, &upperpath);
3471 + if (err)
3472 + goto out_err;
3473 +
3474 +@@ -1488,7 +1612,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
3475 + sb->s_flags |= SB_RDONLY;
3476 +
3477 + if (!(ovl_force_readonly(ofs)) && ofs->config.index) {
3478 +- err = ovl_get_indexdir(ofs, oe, &upperpath);
3479 ++ err = ovl_get_indexdir(sb, ofs, oe, &upperpath);
3480 + if (err)
3481 + goto out_free_oe;
3482 +
3483 +@@ -1501,6 +1625,10 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
3484 +
3485 + }
3486 +
3487 ++ err = ovl_check_overlapping_layers(sb, ofs);
3488 ++ if (err)
3489 ++ goto out_free_oe;
3490 ++
3491 + /* Show index=off in /proc/mounts for forced r/o mount */
3492 + if (!ofs->indexdir) {
3493 + ofs->config.index = false;
3494 +@@ -1522,7 +1650,6 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
3495 + cap_lower(cred->cap_effective, CAP_SYS_RESOURCE);
3496 +
3497 + sb->s_magic = OVERLAYFS_SUPER_MAGIC;
3498 +- sb->s_op = &ovl_super_operations;
3499 + sb->s_xattr = ovl_xattr_handlers;
3500 + sb->s_fs_info = ofs;
3501 + sb->s_flags |= SB_POSIXACL;
3502 +diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
3503 +index 4035e640f402..e135064e87ad 100644
3504 +--- a/fs/overlayfs/util.c
3505 ++++ b/fs/overlayfs/util.c
3506 +@@ -652,6 +652,18 @@ void ovl_inuse_unlock(struct dentry *dentry)
3507 + }
3508 + }
3509 +
3510 ++bool ovl_is_inuse(struct dentry *dentry)
3511 ++{
3512 ++ struct inode *inode = d_inode(dentry);
3513 ++ bool inuse;
3514 ++
3515 ++ spin_lock(&inode->i_lock);
3516 ++ inuse = (inode->i_state & I_OVL_INUSE);
3517 ++ spin_unlock(&inode->i_lock);
3518 ++
3519 ++ return inuse;
3520 ++}
3521 ++
3522 + /*
3523 + * Does this overlay dentry need to be indexed on copy up?
3524 + */
3525 +diff --git a/fs/pnode.c b/fs/pnode.c
3526 +index 7ea6cfb65077..012be405fec0 100644
3527 +--- a/fs/pnode.c
3528 ++++ b/fs/pnode.c
3529 +@@ -262,7 +262,6 @@ static int propagate_one(struct mount *m)
3530 + child = copy_tree(last_source, last_source->mnt.mnt_root, type);
3531 + if (IS_ERR(child))
3532 + return PTR_ERR(child);
3533 +- child->mnt.mnt_flags &= ~MNT_LOCKED;
3534 + mnt_set_mountpoint(m, mp, child);
3535 + last_dest = m;
3536 + last_source = child;
3537 +diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
3538 +index 43d0f0c496f6..ecb7972e2423 100644
3539 +--- a/include/linux/mmc/host.h
3540 ++++ b/include/linux/mmc/host.h
3541 +@@ -398,6 +398,7 @@ struct mmc_host {
3542 + unsigned int retune_now:1; /* do re-tuning at next req */
3543 + unsigned int retune_paused:1; /* re-tuning is temporarily disabled */
3544 + unsigned int use_blk_mq:1; /* use blk-mq */
3545 ++ unsigned int retune_crc_disable:1; /* don't trigger retune upon crc */
3546 +
3547 + int rescan_disable; /* disable card detection */
3548 + int rescan_entered; /* used with nonremovable devices */
3549 +diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h
3550 +index 97ca105347a6..6905f3f641cc 100644
3551 +--- a/include/linux/mmc/sdio_func.h
3552 ++++ b/include/linux/mmc/sdio_func.h
3553 +@@ -159,4 +159,10 @@ extern void sdio_f0_writeb(struct sdio_func *func, unsigned char b,
3554 + extern mmc_pm_flag_t sdio_get_host_pm_caps(struct sdio_func *func);
3555 + extern int sdio_set_host_pm_flags(struct sdio_func *func, mmc_pm_flag_t flags);
3556 +
3557 ++extern void sdio_retune_crc_disable(struct sdio_func *func);
3558 ++extern void sdio_retune_crc_enable(struct sdio_func *func);
3559 ++
3560 ++extern void sdio_retune_hold_now(struct sdio_func *func);
3561 ++extern void sdio_retune_release(struct sdio_func *func);
3562 ++
3563 + #endif /* LINUX_MMC_SDIO_FUNC_H */
3564 +diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
3565 +index 094e61e07030..05b1b96f4d9e 100644
3566 +--- a/include/net/bluetooth/hci_core.h
3567 ++++ b/include/net/bluetooth/hci_core.h
3568 +@@ -190,6 +190,9 @@ struct adv_info {
3569 +
3570 + #define HCI_MAX_SHORT_NAME_LENGTH 10
3571 +
3572 ++/* Min encryption key size to match with SMP */
3573 ++#define HCI_MIN_ENC_KEY_SIZE 7
3574 ++
3575 + /* Default LE RPA expiry time, 15 minutes */
3576 + #define HCI_DEFAULT_RPA_TIMEOUT (15 * 60)
3577 +
3578 +diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
3579 +index 13bfeb712d36..ab00733087ac 100644
3580 +--- a/include/net/cfg80211.h
3581 ++++ b/include/net/cfg80211.h
3582 +@@ -3767,7 +3767,8 @@ struct cfg80211_ops {
3583 + * on wiphy_new(), but can be changed by the driver if it has a good
3584 + * reason to override the default
3585 + * @WIPHY_FLAG_4ADDR_AP: supports 4addr mode even on AP (with a single station
3586 +- * on a VLAN interface)
3587 ++ * on a VLAN interface). This flag also serves an extra purpose of
3588 ++ * supporting 4ADDR AP mode on devices which do not support AP/VLAN iftype.
3589 + * @WIPHY_FLAG_4ADDR_STATION: supports 4addr mode even as a station
3590 + * @WIPHY_FLAG_CONTROL_PORT_PROTOCOL: This device supports setting the
3591 + * control port protocol ethertype. The device also honours the
3592 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
3593 +index ca1ee656d6d8..5880c993002b 100644
3594 +--- a/kernel/trace/trace.c
3595 ++++ b/kernel/trace/trace.c
3596 +@@ -8627,12 +8627,8 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
3597 +
3598 + cnt++;
3599 +
3600 +- /* reset all but tr, trace, and overruns */
3601 +- memset(&iter.seq, 0,
3602 +- sizeof(struct trace_iterator) -
3603 +- offsetof(struct trace_iterator, seq));
3604 ++ trace_iterator_reset(&iter);
3605 + iter.iter_flags |= TRACE_FILE_LAT_FMT;
3606 +- iter.pos = -1;
3607 +
3608 + if (trace_find_next_entry_inc(&iter) != NULL) {
3609 + int ret;
3610 +diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
3611 +index d80cee49e0eb..8ddf36e5eb42 100644
3612 +--- a/kernel/trace/trace.h
3613 ++++ b/kernel/trace/trace.h
3614 +@@ -1964,4 +1964,22 @@ static inline void tracer_hardirqs_off(unsigned long a0, unsigned long a1) { }
3615 +
3616 + extern struct trace_iterator *tracepoint_print_iter;
3617 +
3618 ++/*
3619 ++ * Reset the state of the trace_iterator so that it can read consumed data.
3620 ++ * Normally, the trace_iterator is used for reading the data when it is not
3621 ++ * consumed, and must retain state.
3622 ++ */
3623 ++static __always_inline void trace_iterator_reset(struct trace_iterator *iter)
3624 ++{
3625 ++ const size_t offset = offsetof(struct trace_iterator, seq);
3626 ++
3627 ++ /*
3628 ++ * Keep gcc from complaining about overwriting more than just one
3629 ++ * member in the structure.
3630 ++ */
3631 ++ memset((char *)iter + offset, 0, sizeof(struct trace_iterator) - offset);
3632 ++
3633 ++ iter->pos = -1;
3634 ++}
3635 ++
3636 + #endif /* _LINUX_KERNEL_TRACE_H */
3637 +diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c
3638 +index 810d78a8d14c..2905a3dd94c1 100644
3639 +--- a/kernel/trace/trace_kdb.c
3640 ++++ b/kernel/trace/trace_kdb.c
3641 +@@ -41,12 +41,8 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
3642 +
3643 + kdb_printf("Dumping ftrace buffer:\n");
3644 +
3645 +- /* reset all but tr, trace, and overruns */
3646 +- memset(&iter.seq, 0,
3647 +- sizeof(struct trace_iterator) -
3648 +- offsetof(struct trace_iterator, seq));
3649 ++ trace_iterator_reset(&iter);
3650 + iter.iter_flags |= TRACE_FILE_LAT_FMT;
3651 +- iter.pos = -1;
3652 +
3653 + if (cpu_file == RING_BUFFER_ALL_CPUS) {
3654 + for_each_tracing_cpu(cpu) {
3655 +diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
3656 +index bd4978ce8c45..15d1cb5aee18 100644
3657 +--- a/net/bluetooth/hci_conn.c
3658 ++++ b/net/bluetooth/hci_conn.c
3659 +@@ -1392,8 +1392,16 @@ auth:
3660 + return 0;
3661 +
3662 + encrypt:
3663 +- if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
3664 ++ if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) {
3665 ++ /* Ensure that the encryption key size has been read,
3666 ++ * otherwise stall the upper layer responses.
3667 ++ */
3668 ++ if (!conn->enc_key_size)
3669 ++ return 0;
3670 ++
3671 ++ /* Nothing else needed, all requirements are met */
3672 + return 1;
3673 ++ }
3674 +
3675 + hci_conn_encrypt(conn);
3676 + return 0;
3677 +diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
3678 +index b53acd6c9a3d..9f77432dbe38 100644
3679 +--- a/net/bluetooth/l2cap_core.c
3680 ++++ b/net/bluetooth/l2cap_core.c
3681 +@@ -1341,6 +1341,21 @@ static void l2cap_request_info(struct l2cap_conn *conn)
3682 + sizeof(req), &req);
3683 + }
3684 +
3685 ++static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
3686 ++{
3687 ++ /* The minimum encryption key size needs to be enforced by the
3688 ++ * host stack before establishing any L2CAP connections. The
3689 ++ * specification in theory allows a minimum of 1, but to align
3690 ++ * BR/EDR and LE transports, a minimum of 7 is chosen.
3691 ++ *
3692 ++ * This check might also be called for unencrypted connections
3693 ++ * that have no key size requirements. Ensure that the link is
3694 ++ * actually encrypted before enforcing a key size.
3695 ++ */
3696 ++ return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
3697 ++ hcon->enc_key_size > HCI_MIN_ENC_KEY_SIZE);
3698 ++}
3699 ++
3700 + static void l2cap_do_start(struct l2cap_chan *chan)
3701 + {
3702 + struct l2cap_conn *conn = chan->conn;
3703 +@@ -1358,9 +1373,14 @@ static void l2cap_do_start(struct l2cap_chan *chan)
3704 + if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
3705 + return;
3706 +
3707 +- if (l2cap_chan_check_security(chan, true) &&
3708 +- __l2cap_no_conn_pending(chan))
3709 ++ if (!l2cap_chan_check_security(chan, true) ||
3710 ++ !__l2cap_no_conn_pending(chan))
3711 ++ return;
3712 ++
3713 ++ if (l2cap_check_enc_key_size(conn->hcon))
3714 + l2cap_start_connection(chan);
3715 ++ else
3716 ++ __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
3717 + }
3718 +
3719 + static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
3720 +@@ -1439,7 +1459,10 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
3721 + continue;
3722 + }
3723 +
3724 +- l2cap_start_connection(chan);
3725 ++ if (l2cap_check_enc_key_size(conn->hcon))
3726 ++ l2cap_start_connection(chan);
3727 ++ else
3728 ++ l2cap_chan_close(chan, ECONNREFUSED);
3729 +
3730 + } else if (chan->state == BT_CONNECT2) {
3731 + struct l2cap_conn_rsp rsp;
3732 +@@ -7490,7 +7513,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3733 + }
3734 +
3735 + if (chan->state == BT_CONNECT) {
3736 +- if (!status)
3737 ++ if (!status && l2cap_check_enc_key_size(hcon))
3738 + l2cap_start_connection(chan);
3739 + else
3740 + __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
3741 +@@ -7499,7 +7522,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3742 + struct l2cap_conn_rsp rsp;
3743 + __u16 res, stat;
3744 +
3745 +- if (!status) {
3746 ++ if (!status && l2cap_check_enc_key_size(hcon)) {
3747 + if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3748 + res = L2CAP_CR_PEND;
3749 + stat = L2CAP_CS_AUTHOR_PEND;
3750 +diff --git a/net/can/af_can.c b/net/can/af_can.c
3751 +index 1684ba5b51eb..e386d654116d 100644
3752 +--- a/net/can/af_can.c
3753 ++++ b/net/can/af_can.c
3754 +@@ -105,6 +105,7 @@ EXPORT_SYMBOL(can_ioctl);
3755 + static void can_sock_destruct(struct sock *sk)
3756 + {
3757 + skb_queue_purge(&sk->sk_receive_queue);
3758 ++ skb_queue_purge(&sk->sk_error_queue);
3759 + }
3760 +
3761 + static const struct can_proto *can_get_proto(int protocol)
3762 +diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
3763 +index e170f986d226..c875d45f1e1d 100644
3764 +--- a/net/mac80211/ieee80211_i.h
3765 ++++ b/net/mac80211/ieee80211_i.h
3766 +@@ -2222,6 +2222,9 @@ void ieee80211_tdls_cancel_channel_switch(struct wiphy *wiphy,
3767 + const u8 *addr);
3768 + void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata);
3769 + void ieee80211_tdls_chsw_work(struct work_struct *wk);
3770 ++void ieee80211_tdls_handle_disconnect(struct ieee80211_sub_if_data *sdata,
3771 ++ const u8 *peer, u16 reason);
3772 ++const char *ieee80211_get_reason_code_string(u16 reason_code);
3773 +
3774 + extern const struct ethtool_ops ieee80211_ethtool_ops;
3775 +
3776 +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
3777 +index b7a9fe3d5fcb..383b0df100e4 100644
3778 +--- a/net/mac80211/mlme.c
3779 ++++ b/net/mac80211/mlme.c
3780 +@@ -2963,7 +2963,7 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
3781 + #define case_WLAN(type) \
3782 + case WLAN_REASON_##type: return #type
3783 +
3784 +-static const char *ieee80211_get_reason_code_string(u16 reason_code)
3785 ++const char *ieee80211_get_reason_code_string(u16 reason_code)
3786 + {
3787 + switch (reason_code) {
3788 + case_WLAN(UNSPECIFIED);
3789 +@@ -3028,6 +3028,11 @@ static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
3790 + if (len < 24 + 2)
3791 + return;
3792 +
3793 ++ if (!ether_addr_equal(mgmt->bssid, mgmt->sa)) {
3794 ++ ieee80211_tdls_handle_disconnect(sdata, mgmt->sa, reason_code);
3795 ++ return;
3796 ++ }
3797 ++
3798 + if (ifmgd->associated &&
3799 + ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid)) {
3800 + const u8 *bssid = ifmgd->associated->bssid;
3801 +@@ -3077,6 +3082,11 @@ static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
3802 +
3803 + reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
3804 +
3805 ++ if (!ether_addr_equal(mgmt->bssid, mgmt->sa)) {
3806 ++ ieee80211_tdls_handle_disconnect(sdata, mgmt->sa, reason_code);
3807 ++ return;
3808 ++ }
3809 ++
3810 + sdata_info(sdata, "disassociated from %pM (Reason: %u=%s)\n",
3811 + mgmt->sa, reason_code,
3812 + ieee80211_get_reason_code_string(reason_code));
3813 +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
3814 +index bf0b187f994e..1a1f850b76fd 100644
3815 +--- a/net/mac80211/rx.c
3816 ++++ b/net/mac80211/rx.c
3817 +@@ -3823,6 +3823,8 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
3818 + case NL80211_IFTYPE_STATION:
3819 + if (!bssid && !sdata->u.mgd.use_4addr)
3820 + return false;
3821 ++ if (ieee80211_is_robust_mgmt_frame(skb) && !rx->sta)
3822 ++ return false;
3823 + if (multicast)
3824 + return true;
3825 + return ether_addr_equal(sdata->vif.addr, hdr->addr1);
3826 +diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
3827 +index d30690d79a58..fcc5cd49c3ac 100644
3828 +--- a/net/mac80211/tdls.c
3829 ++++ b/net/mac80211/tdls.c
3830 +@@ -1994,3 +1994,26 @@ void ieee80211_tdls_chsw_work(struct work_struct *wk)
3831 + }
3832 + rtnl_unlock();
3833 + }
3834 ++
3835 ++void ieee80211_tdls_handle_disconnect(struct ieee80211_sub_if_data *sdata,
3836 ++ const u8 *peer, u16 reason)
3837 ++{
3838 ++ struct ieee80211_sta *sta;
3839 ++
3840 ++ rcu_read_lock();
3841 ++ sta = ieee80211_find_sta(&sdata->vif, peer);
3842 ++ if (!sta || !sta->tdls) {
3843 ++ rcu_read_unlock();
3844 ++ return;
3845 ++ }
3846 ++ rcu_read_unlock();
3847 ++
3848 ++ tdls_dbg(sdata, "disconnected from TDLS peer %pM (Reason: %u=%s)\n",
3849 ++ peer, reason,
3850 ++ ieee80211_get_reason_code_string(reason));
3851 ++
3852 ++ ieee80211_tdls_oper_request(&sdata->vif, peer,
3853 ++ NL80211_TDLS_TEARDOWN,
3854 ++ WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE,
3855 ++ GFP_ATOMIC);
3856 ++}
3857 +diff --git a/net/mac80211/util.c b/net/mac80211/util.c
3858 +index 4c1655972565..447a55ae9df1 100644
3859 +--- a/net/mac80211/util.c
3860 ++++ b/net/mac80211/util.c
3861 +@@ -3757,7 +3757,9 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
3862 + }
3863 +
3864 + /* Always allow software iftypes */
3865 +- if (local->hw.wiphy->software_iftypes & BIT(iftype)) {
3866 ++ if (local->hw.wiphy->software_iftypes & BIT(iftype) ||
3867 ++ (iftype == NL80211_IFTYPE_AP_VLAN &&
3868 ++ local->hw.wiphy->flags & WIPHY_FLAG_4ADDR_AP)) {
3869 + if (radar_detect)
3870 + return -EINVAL;
3871 + return 0;
3872 +diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
3873 +index 58d0b258b684..5dd48f0a4b1b 100644
3874 +--- a/net/mac80211/wpa.c
3875 ++++ b/net/mac80211/wpa.c
3876 +@@ -1175,7 +1175,7 @@ ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx)
3877 + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
3878 + struct ieee80211_key *key = rx->key;
3879 + struct ieee80211_mmie_16 *mmie;
3880 +- u8 aad[GMAC_AAD_LEN], mic[GMAC_MIC_LEN], ipn[6], nonce[GMAC_NONCE_LEN];
3881 ++ u8 aad[GMAC_AAD_LEN], *mic, ipn[6], nonce[GMAC_NONCE_LEN];
3882 + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
3883 +
3884 + if (!ieee80211_is_mgmt(hdr->frame_control))
3885 +@@ -1206,13 +1206,18 @@ ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx)
3886 + memcpy(nonce, hdr->addr2, ETH_ALEN);
3887 + memcpy(nonce + ETH_ALEN, ipn, 6);
3888 +
3889 ++ mic = kmalloc(GMAC_MIC_LEN, GFP_ATOMIC);
3890 ++ if (!mic)
3891 ++ return RX_DROP_UNUSABLE;
3892 + if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce,
3893 + skb->data + 24, skb->len - 24,
3894 + mic) < 0 ||
3895 + crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
3896 + key->u.aes_gmac.icverrors++;
3897 ++ kfree(mic);
3898 + return RX_DROP_UNUSABLE;
3899 + }
3900 ++ kfree(mic);
3901 + }
3902 +
3903 + memcpy(key->u.aes_gmac.rx_pn, ipn, 6);
3904 +diff --git a/net/wireless/core.c b/net/wireless/core.c
3905 +index b36ad8efb5e5..c58acca09301 100644
3906 +--- a/net/wireless/core.c
3907 ++++ b/net/wireless/core.c
3908 +@@ -513,7 +513,7 @@ use_default_name:
3909 + &rdev->rfkill_ops, rdev);
3910 +
3911 + if (!rdev->rfkill) {
3912 +- kfree(rdev);
3913 ++ wiphy_free(&rdev->wiphy);
3914 + return NULL;
3915 + }
3916 +
3917 +@@ -1396,8 +1396,12 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
3918 + }
3919 + break;
3920 + case NETDEV_PRE_UP:
3921 +- if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)))
3922 ++ if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)) &&
3923 ++ !(wdev->iftype == NL80211_IFTYPE_AP_VLAN &&
3924 ++ rdev->wiphy.flags & WIPHY_FLAG_4ADDR_AP &&
3925 ++ wdev->use_4addr))
3926 + return notifier_from_errno(-EOPNOTSUPP);
3927 ++
3928 + if (rfkill_blocked(rdev->rfkill))
3929 + return notifier_from_errno(-ERFKILL);
3930 + break;
3931 +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
3932 +index d2a7459a5da4..d1553a661336 100644
3933 +--- a/net/wireless/nl80211.c
3934 ++++ b/net/wireless/nl80211.c
3935 +@@ -3385,8 +3385,7 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
3936 + if (info->attrs[NL80211_ATTR_IFTYPE])
3937 + type = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]);
3938 +
3939 +- if (!rdev->ops->add_virtual_intf ||
3940 +- !(rdev->wiphy.interface_modes & (1 << type)))
3941 ++ if (!rdev->ops->add_virtual_intf)
3942 + return -EOPNOTSUPP;
3943 +
3944 + if ((type == NL80211_IFTYPE_P2P_DEVICE || type == NL80211_IFTYPE_NAN ||
3945 +@@ -3405,6 +3404,11 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
3946 + return err;
3947 + }
3948 +
3949 ++ if (!(rdev->wiphy.interface_modes & (1 << type)) &&
3950 ++ !(type == NL80211_IFTYPE_AP_VLAN && params.use_4addr &&
3951 ++ rdev->wiphy.flags & WIPHY_FLAG_4ADDR_AP))
3952 ++ return -EOPNOTSUPP;
3953 ++
3954 + err = nl80211_parse_mon_options(rdev, type, info, &params);
3955 + if (err < 0)
3956 + return err;
3957 +@@ -4800,8 +4804,10 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid,
3958 + struct nlattr *sinfoattr, *bss_param;
3959 +
3960 + hdr = nl80211hdr_put(msg, portid, seq, flags, cmd);
3961 +- if (!hdr)
3962 ++ if (!hdr) {
3963 ++ cfg80211_sinfo_release_content(sinfo);
3964 + return -1;
3965 ++ }
3966 +
3967 + if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
3968 + nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr) ||
3969 +diff --git a/scripts/checkstack.pl b/scripts/checkstack.pl
3970 +index 122aef5e4e14..371bd17a4983 100755
3971 +--- a/scripts/checkstack.pl
3972 ++++ b/scripts/checkstack.pl
3973 +@@ -46,7 +46,7 @@ my (@stack, $re, $dre, $x, $xs, $funcre);
3974 + $x = "[0-9a-f]"; # hex character
3975 + $xs = "[0-9a-f ]"; # hex character or space
3976 + $funcre = qr/^$x* <(.*)>:$/;
3977 +- if ($arch eq 'aarch64') {
3978 ++ if ($arch =~ '^(aarch|arm)64$') {
3979 + #ffffffc0006325cc: a9bb7bfd stp x29, x30, [sp, #-80]!
3980 + #a110: d11643ff sub sp, sp, #0x590
3981 + $re = qr/^.*stp.*sp, \#-([0-9]{1,8})\]\!/o;
3982 +diff --git a/scripts/package/Makefile b/scripts/package/Makefile
3983 +index 2c6de21e5152..fd854439de0f 100644
3984 +--- a/scripts/package/Makefile
3985 ++++ b/scripts/package/Makefile
3986 +@@ -103,7 +103,7 @@ clean-dirs += $(objtree)/snap/
3987 + # ---------------------------------------------------------------------------
3988 + tar%pkg: FORCE
3989 + $(MAKE) -f $(srctree)/Makefile
3990 +- $(CONFIG_SHELL) $(srctree)/scripts/package/buildtar $@
3991 ++ +$(CONFIG_SHELL) $(srctree)/scripts/package/buildtar $@
3992 +
3993 + clean-dirs += $(objtree)/tar-install/
3994 +
3995 +diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h
3996 +index 8e6707c837be..06ed62f00b4b 100644
3997 +--- a/security/apparmor/include/policy.h
3998 ++++ b/security/apparmor/include/policy.h
3999 +@@ -217,7 +217,16 @@ static inline struct aa_profile *aa_get_newest_profile(struct aa_profile *p)
4000 + return labels_profile(aa_get_newest_label(&p->label));
4001 + }
4002 +
4003 +-#define PROFILE_MEDIATES(P, T) ((P)->policy.start[(unsigned char) (T)])
4004 ++static inline unsigned int PROFILE_MEDIATES(struct aa_profile *profile,
4005 ++ unsigned char class)
4006 ++{
4007 ++ if (class <= AA_CLASS_LAST)
4008 ++ return profile->policy.start[class];
4009 ++ else
4010 ++ return aa_dfa_match_len(profile->policy.dfa,
4011 ++ profile->policy.start[0], &class, 1);
4012 ++}
4013 ++
4014 + static inline unsigned int PROFILE_MEDIATES_AF(struct aa_profile *profile,
4015 + u16 AF) {
4016 + unsigned int state = PROFILE_MEDIATES(profile, AA_CLASS_NET);
4017 +diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
4018 +index f6c2bcb2ab14..f1b2202f725e 100644
4019 +--- a/security/apparmor/policy_unpack.c
4020 ++++ b/security/apparmor/policy_unpack.c
4021 +@@ -223,16 +223,21 @@ static void *kvmemdup(const void *src, size_t len)
4022 + static size_t unpack_u16_chunk(struct aa_ext *e, char **chunk)
4023 + {
4024 + size_t size = 0;
4025 ++ void *pos = e->pos;
4026 +
4027 + if (!inbounds(e, sizeof(u16)))
4028 +- return 0;
4029 ++ goto fail;
4030 + size = le16_to_cpu(get_unaligned((__le16 *) e->pos));
4031 + e->pos += sizeof(__le16);
4032 + if (!inbounds(e, size))
4033 +- return 0;
4034 ++ goto fail;
4035 + *chunk = e->pos;
4036 + e->pos += size;
4037 + return size;
4038 ++
4039 ++fail:
4040 ++ e->pos = pos;
4041 ++ return 0;
4042 + }
4043 +
4044 + /* unpack control byte */
4045 +@@ -276,7 +281,7 @@ static bool unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name)
4046 + char *tag = NULL;
4047 + size_t size = unpack_u16_chunk(e, &tag);
4048 + /* if a name is specified it must match. otherwise skip tag */
4049 +- if (name && (!size || strcmp(name, tag)))
4050 ++ if (name && (!size || tag[size-1] != '\0' || strcmp(name, tag)))
4051 + goto fail;
4052 + } else if (name) {
4053 + /* if a name is specified and there is no name tag fail */
4054 +@@ -294,62 +299,84 @@ fail:
4055 +
4056 + static bool unpack_u8(struct aa_ext *e, u8 *data, const char *name)
4057 + {
4058 ++ void *pos = e->pos;
4059 ++
4060 + if (unpack_nameX(e, AA_U8, name)) {
4061 + if (!inbounds(e, sizeof(u8)))
4062 +- return 0;
4063 ++ goto fail;
4064 + if (data)
4065 + *data = get_unaligned((u8 *)e->pos);
4066 + e->pos += sizeof(u8);
4067 + return 1;
4068 + }
4069 ++
4070 ++fail:
4071 ++ e->pos = pos;
4072 + return 0;
4073 + }
4074 +
4075 + static bool unpack_u32(struct aa_ext *e, u32 *data, const char *name)
4076 + {
4077 ++ void *pos = e->pos;
4078 ++
4079 + if (unpack_nameX(e, AA_U32, name)) {
4080 + if (!inbounds(e, sizeof(u32)))
4081 +- return 0;
4082 ++ goto fail;
4083 + if (data)
4084 + *data = le32_to_cpu(get_unaligned((__le32 *) e->pos));
4085 + e->pos += sizeof(u32);
4086 + return 1;
4087 + }
4088 ++
4089 ++fail:
4090 ++ e->pos = pos;
4091 + return 0;
4092 + }
4093 +
4094 + static bool unpack_u64(struct aa_ext *e, u64 *data, const char *name)
4095 + {
4096 ++ void *pos = e->pos;
4097 ++
4098 + if (unpack_nameX(e, AA_U64, name)) {
4099 + if (!inbounds(e, sizeof(u64)))
4100 +- return 0;
4101 ++ goto fail;
4102 + if (data)
4103 + *data = le64_to_cpu(get_unaligned((__le64 *) e->pos));
4104 + e->pos += sizeof(u64);
4105 + return 1;
4106 + }
4107 ++
4108 ++fail:
4109 ++ e->pos = pos;
4110 + return 0;
4111 + }
4112 +
4113 + static size_t unpack_array(struct aa_ext *e, const char *name)
4114 + {
4115 ++ void *pos = e->pos;
4116 ++
4117 + if (unpack_nameX(e, AA_ARRAY, name)) {
4118 + int size;
4119 + if (!inbounds(e, sizeof(u16)))
4120 +- return 0;
4121 ++ goto fail;
4122 + size = (int)le16_to_cpu(get_unaligned((__le16 *) e->pos));
4123 + e->pos += sizeof(u16);
4124 + return size;
4125 + }
4126 ++
4127 ++fail:
4128 ++ e->pos = pos;
4129 + return 0;
4130 + }
4131 +
4132 + static size_t unpack_blob(struct aa_ext *e, char **blob, const char *name)
4133 + {
4134 ++ void *pos = e->pos;
4135 ++
4136 + if (unpack_nameX(e, AA_BLOB, name)) {
4137 + u32 size;
4138 + if (!inbounds(e, sizeof(u32)))
4139 +- return 0;
4140 ++ goto fail;
4141 + size = le32_to_cpu(get_unaligned((__le32 *) e->pos));
4142 + e->pos += sizeof(u32);
4143 + if (inbounds(e, (size_t) size)) {
4144 +@@ -358,6 +385,9 @@ static size_t unpack_blob(struct aa_ext *e, char **blob, const char *name)
4145 + return size;
4146 + }
4147 + }
4148 ++
4149 ++fail:
4150 ++ e->pos = pos;
4151 + return 0;
4152 + }
4153 +
4154 +@@ -374,9 +404,10 @@ static int unpack_str(struct aa_ext *e, const char **string, const char *name)
4155 + if (src_str[size - 1] != 0)
4156 + goto fail;
4157 + *string = src_str;
4158 ++
4159 ++ return size;
4160 + }
4161 + }
4162 +- return size;
4163 +
4164 + fail:
4165 + e->pos = pos;
4166 +diff --git a/tools/testing/selftests/cgroup/test_core.c b/tools/testing/selftests/cgroup/test_core.c
4167 +index be59f9c34ea2..79053a4f4783 100644
4168 +--- a/tools/testing/selftests/cgroup/test_core.c
4169 ++++ b/tools/testing/selftests/cgroup/test_core.c
4170 +@@ -198,7 +198,7 @@ static int test_cgcore_no_internal_process_constraint_on_threads(const char *roo
4171 + char *parent = NULL, *child = NULL;
4172 +
4173 + if (cg_read_strstr(root, "cgroup.controllers", "cpu") ||
4174 +- cg_read_strstr(root, "cgroup.subtree_control", "cpu")) {
4175 ++ cg_write(root, "cgroup.subtree_control", "+cpu")) {
4176 + ret = KSFT_SKIP;
4177 + goto cleanup;
4178 + }
4179 +@@ -376,6 +376,11 @@ int main(int argc, char *argv[])
4180 +
4181 + if (cg_find_unified_root(root, sizeof(root)))
4182 + ksft_exit_skip("cgroup v2 isn't mounted\n");
4183 ++
4184 ++ if (cg_read_strstr(root, "cgroup.subtree_control", "memory"))
4185 ++ if (cg_write(root, "cgroup.subtree_control", "+memory"))
4186 ++ ksft_exit_skip("Failed to set memory controller\n");
4187 ++
4188 + for (i = 0; i < ARRAY_SIZE(tests); i++) {
4189 + switch (tests[i].fn(root)) {
4190 + case KSFT_PASS:
4191 +diff --git a/tools/testing/selftests/cgroup/test_memcontrol.c b/tools/testing/selftests/cgroup/test_memcontrol.c
4192 +index 6f339882a6ca..c19a97dd02d4 100644
4193 +--- a/tools/testing/selftests/cgroup/test_memcontrol.c
4194 ++++ b/tools/testing/selftests/cgroup/test_memcontrol.c
4195 +@@ -1205,6 +1205,10 @@ int main(int argc, char **argv)
4196 + if (cg_read_strstr(root, "cgroup.controllers", "memory"))
4197 + ksft_exit_skip("memory controller isn't available\n");
4198 +
4199 ++ if (cg_read_strstr(root, "cgroup.subtree_control", "memory"))
4200 ++ if (cg_write(root, "cgroup.subtree_control", "+memory"))
4201 ++ ksft_exit_skip("Failed to set memory controller\n");
4202 ++
4203 + for (i = 0; i < ARRAY_SIZE(tests); i++) {
4204 + switch (tests[i].fn(root)) {
4205 + case KSFT_PASS:
4206 +diff --git a/tools/testing/selftests/net/forwarding/router_broadcast.sh b/tools/testing/selftests/net/forwarding/router_broadcast.sh
4207 +index 9a678ece32b4..4eac0a06f451 100755
4208 +--- a/tools/testing/selftests/net/forwarding/router_broadcast.sh
4209 ++++ b/tools/testing/selftests/net/forwarding/router_broadcast.sh
4210 +@@ -145,16 +145,19 @@ bc_forwarding_disable()
4211 + {
4212 + sysctl_set net.ipv4.conf.all.bc_forwarding 0
4213 + sysctl_set net.ipv4.conf.$rp1.bc_forwarding 0
4214 ++ sysctl_set net.ipv4.conf.$rp2.bc_forwarding 0
4215 + }
4216 +
4217 + bc_forwarding_enable()
4218 + {
4219 + sysctl_set net.ipv4.conf.all.bc_forwarding 1
4220 + sysctl_set net.ipv4.conf.$rp1.bc_forwarding 1
4221 ++ sysctl_set net.ipv4.conf.$rp2.bc_forwarding 1
4222 + }
4223 +
4224 + bc_forwarding_restore()
4225 + {
4226 ++ sysctl_restore net.ipv4.conf.$rp2.bc_forwarding
4227 + sysctl_restore net.ipv4.conf.$rp1.bc_forwarding
4228 + sysctl_restore net.ipv4.conf.all.bc_forwarding
4229 + }
4230 +@@ -171,7 +174,7 @@ ping_test_from()
4231 + log_info "ping $dip, expected reply from $from"
4232 + ip vrf exec $(master_name_get $oif) \
4233 + $PING -I $oif $dip -c 10 -i 0.1 -w $PING_TIMEOUT -b 2>&1 \
4234 +- | grep $from &> /dev/null
4235 ++ | grep "bytes from $from" > /dev/null
4236 + check_err_fail $fail $?
4237 + }
4238 +
4239 +diff --git a/tools/testing/selftests/pidfd/pidfd_test.c b/tools/testing/selftests/pidfd/pidfd_test.c
4240 +index d59378a93782..20323f55613a 100644
4241 +--- a/tools/testing/selftests/pidfd/pidfd_test.c
4242 ++++ b/tools/testing/selftests/pidfd/pidfd_test.c
4243 +@@ -16,6 +16,10 @@
4244 +
4245 + #include "../kselftest.h"
4246 +
4247 ++#ifndef __NR_pidfd_send_signal
4248 ++#define __NR_pidfd_send_signal -1
4249 ++#endif
4250 ++
4251 + static inline int sys_pidfd_send_signal(int pidfd, int sig, siginfo_t *info,
4252 + unsigned int flags)
4253 + {
4254 +diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
4255 +index e13eb6cc8901..05306c58ff9f 100644
4256 +--- a/tools/testing/selftests/vm/Makefile
4257 ++++ b/tools/testing/selftests/vm/Makefile
4258 +@@ -25,6 +25,8 @@ TEST_GEN_FILES += virtual_address_range
4259 +
4260 + TEST_PROGS := run_vmtests
4261 +
4262 ++TEST_FILES := test_vmalloc.sh
4263 ++
4264 + KSFT_KHDR_INSTALL := 1
4265 + include ../lib.mk
4266 +
4267 +diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c
4268 +index 5d1db824f73a..b3e6497b080c 100644
4269 +--- a/tools/testing/selftests/vm/userfaultfd.c
4270 ++++ b/tools/testing/selftests/vm/userfaultfd.c
4271 +@@ -123,7 +123,7 @@ static void usage(void)
4272 + fprintf(stderr, "Supported <test type>: anon, hugetlb, "
4273 + "hugetlb_shared, shmem\n\n");
4274 + fprintf(stderr, "Examples:\n\n");
4275 +- fprintf(stderr, examples);
4276 ++ fprintf(stderr, "%s", examples);
4277 + exit(1);
4278 + }
4279 +