Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Wed, 03 Apr 2019 10:59:11
Message-Id: 1554289130.ccb4086de04c26ae9cd4a06cbaf687b32407504c.mpagano@gentoo
1 commit: ccb4086de04c26ae9cd4a06cbaf687b32407504c
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Apr 3 10:58:50 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Apr 3 10:58:50 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ccb4086d
7
8 Linuxpatch 4.19.33
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1032_linux-4.19.33.patch | 4531 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 4535 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 1e50242..fe8b3d1 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -171,6 +171,10 @@ Patch: 1031_linux-4.19.32.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.19.32
23
24 +Patch: 1032_linux-4.19.33.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.19.33
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1032_linux-4.19.33.patch b/1032_linux-4.19.33.patch
33 new file mode 100644
34 index 0000000..018985e
35 --- /dev/null
36 +++ b/1032_linux-4.19.33.patch
37 @@ -0,0 +1,4531 @@
38 +diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
39 +index 647f94128a85..8e16017ff397 100644
40 +--- a/Documentation/virtual/kvm/api.txt
41 ++++ b/Documentation/virtual/kvm/api.txt
42 +@@ -13,7 +13,7 @@ of a virtual machine. The ioctls belong to three classes
43 +
44 + - VM ioctls: These query and set attributes that affect an entire virtual
45 + machine, for example memory layout. In addition a VM ioctl is used to
46 +- create virtual cpus (vcpus).
47 ++ create virtual cpus (vcpus) and devices.
48 +
49 + Only run VM ioctls from the same process (address space) that was used
50 + to create the VM.
51 +@@ -24,6 +24,11 @@ of a virtual machine. The ioctls belong to three classes
52 + Only run vcpu ioctls from the same thread that was used to create the
53 + vcpu.
54 +
55 ++ - device ioctls: These query and set attributes that control the operation
56 ++ of a single device.
57 ++
58 ++ device ioctls must be issued from the same process (address space) that
59 ++ was used to create the VM.
60 +
61 + 2. File descriptors
62 + -------------------
63 +@@ -32,10 +37,11 @@ The kvm API is centered around file descriptors. An initial
64 + open("/dev/kvm") obtains a handle to the kvm subsystem; this handle
65 + can be used to issue system ioctls. A KVM_CREATE_VM ioctl on this
66 + handle will create a VM file descriptor which can be used to issue VM
67 +-ioctls. A KVM_CREATE_VCPU ioctl on a VM fd will create a virtual cpu
68 +-and return a file descriptor pointing to it. Finally, ioctls on a vcpu
69 +-fd can be used to control the vcpu, including the important task of
70 +-actually running guest code.
71 ++ioctls. A KVM_CREATE_VCPU or KVM_CREATE_DEVICE ioctl on a VM fd will
72 ++create a virtual cpu or device and return a file descriptor pointing to
73 ++the new resource. Finally, ioctls on a vcpu or device fd can be used
74 ++to control the vcpu or device. For vcpus, this includes the important
75 ++task of actually running guest code.
76 +
77 + In general file descriptors can be migrated among processes by means
78 + of fork() and the SCM_RIGHTS facility of unix domain socket. These
79 +diff --git a/Makefile b/Makefile
80 +index d66c433df5b1..8de5fab711d8 100644
81 +--- a/Makefile
82 ++++ b/Makefile
83 +@@ -1,7 +1,7 @@
84 + # SPDX-License-Identifier: GPL-2.0
85 + VERSION = 4
86 + PATCHLEVEL = 19
87 +-SUBLEVEL = 32
88 ++SUBLEVEL = 33
89 + EXTRAVERSION =
90 + NAME = "People's Front"
91 +
92 +@@ -948,9 +948,11 @@ mod_sign_cmd = true
93 + endif
94 + export mod_sign_cmd
95 +
96 ++HOST_LIBELF_LIBS = $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf)
97 ++
98 + ifdef CONFIG_STACK_VALIDATION
99 + has_libelf := $(call try-run,\
100 +- echo "int main() {}" | $(HOSTCC) -xc -o /dev/null -lelf -,1,0)
101 ++ echo "int main() {}" | $(HOSTCC) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0)
102 + ifeq ($(has_libelf),1)
103 + objtool_target := tools/objtool FORCE
104 + else
105 +diff --git a/arch/arm/mach-imx/cpuidle-imx6q.c b/arch/arm/mach-imx/cpuidle-imx6q.c
106 +index bfeb25aaf9a2..326e870d7123 100644
107 +--- a/arch/arm/mach-imx/cpuidle-imx6q.c
108 ++++ b/arch/arm/mach-imx/cpuidle-imx6q.c
109 +@@ -16,30 +16,23 @@
110 + #include "cpuidle.h"
111 + #include "hardware.h"
112 +
113 +-static atomic_t master = ATOMIC_INIT(0);
114 +-static DEFINE_SPINLOCK(master_lock);
115 ++static int num_idle_cpus = 0;
116 ++static DEFINE_SPINLOCK(cpuidle_lock);
117 +
118 + static int imx6q_enter_wait(struct cpuidle_device *dev,
119 + struct cpuidle_driver *drv, int index)
120 + {
121 +- if (atomic_inc_return(&master) == num_online_cpus()) {
122 +- /*
123 +- * With this lock, we prevent other cpu to exit and enter
124 +- * this function again and become the master.
125 +- */
126 +- if (!spin_trylock(&master_lock))
127 +- goto idle;
128 ++ spin_lock(&cpuidle_lock);
129 ++ if (++num_idle_cpus == num_online_cpus())
130 + imx6_set_lpm(WAIT_UNCLOCKED);
131 +- cpu_do_idle();
132 +- imx6_set_lpm(WAIT_CLOCKED);
133 +- spin_unlock(&master_lock);
134 +- goto done;
135 +- }
136 ++ spin_unlock(&cpuidle_lock);
137 +
138 +-idle:
139 + cpu_do_idle();
140 +-done:
141 +- atomic_dec(&master);
142 ++
143 ++ spin_lock(&cpuidle_lock);
144 ++ if (num_idle_cpus-- == num_online_cpus())
145 ++ imx6_set_lpm(WAIT_CLOCKED);
146 ++ spin_unlock(&cpuidle_lock);
147 +
148 + return index;
149 + }
150 +diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
151 +index 33b6f9c892c8..40a6c9261a6b 100644
152 +--- a/arch/powerpc/include/asm/feature-fixups.h
153 ++++ b/arch/powerpc/include/asm/feature-fixups.h
154 +@@ -221,6 +221,17 @@ label##3: \
155 + FTR_ENTRY_OFFSET 953b-954b; \
156 + .popsection;
157 +
158 ++#define START_BTB_FLUSH_SECTION \
159 ++955: \
160 ++
161 ++#define END_BTB_FLUSH_SECTION \
162 ++956: \
163 ++ .pushsection __btb_flush_fixup,"a"; \
164 ++ .align 2; \
165 ++957: \
166 ++ FTR_ENTRY_OFFSET 955b-957b; \
167 ++ FTR_ENTRY_OFFSET 956b-957b; \
168 ++ .popsection;
169 +
170 + #ifndef __ASSEMBLY__
171 + #include <linux/types.h>
172 +@@ -230,6 +241,7 @@ extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
173 + extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
174 + extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
175 + extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup;
176 ++extern long __start__btb_flush_fixup, __stop__btb_flush_fixup;
177 +
178 + void apply_feature_fixups(void);
179 + void setup_feature_keys(void);
180 +diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
181 +index 665af14850e4..2b7135391231 100644
182 +--- a/arch/powerpc/include/asm/ppc-opcode.h
183 ++++ b/arch/powerpc/include/asm/ppc-opcode.h
184 +@@ -300,6 +300,7 @@
185 + /* Misc instructions for BPF compiler */
186 + #define PPC_INST_LBZ 0x88000000
187 + #define PPC_INST_LD 0xe8000000
188 ++#define PPC_INST_LDX 0x7c00002a
189 + #define PPC_INST_LHZ 0xa0000000
190 + #define PPC_INST_LWZ 0x80000000
191 + #define PPC_INST_LHBRX 0x7c00062c
192 +@@ -307,6 +308,7 @@
193 + #define PPC_INST_STB 0x98000000
194 + #define PPC_INST_STH 0xb0000000
195 + #define PPC_INST_STD 0xf8000000
196 ++#define PPC_INST_STDX 0x7c00012a
197 + #define PPC_INST_STDU 0xf8000001
198 + #define PPC_INST_STW 0x90000000
199 + #define PPC_INST_STWU 0x94000000
200 +diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
201 +index b5d023680801..5c901bf4c505 100644
202 +--- a/arch/powerpc/include/asm/ppc_asm.h
203 ++++ b/arch/powerpc/include/asm/ppc_asm.h
204 +@@ -821,4 +821,14 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
205 + stringify_in_c(.long (_target) - . ;) \
206 + stringify_in_c(.previous)
207 +
208 ++#ifdef CONFIG_PPC_FSL_BOOK3E
209 ++#define BTB_FLUSH(reg) \
210 ++ lis reg,BUCSR_INIT@h; \
211 ++ ori reg,reg,BUCSR_INIT@l; \
212 ++ mtspr SPRN_BUCSR,reg; \
213 ++ isync;
214 ++#else
215 ++#define BTB_FLUSH(reg)
216 ++#endif /* CONFIG_PPC_FSL_BOOK3E */
217 ++
218 + #endif /* _ASM_POWERPC_PPC_ASM_H */
219 +diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
220 +index 1fffbba8d6a5..65676e2325b8 100644
221 +--- a/arch/powerpc/include/asm/setup.h
222 ++++ b/arch/powerpc/include/asm/setup.h
223 +@@ -67,6 +67,13 @@ void do_barrier_nospec_fixups_range(bool enable, void *start, void *end);
224 + static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { };
225 + #endif
226 +
227 ++#ifdef CONFIG_PPC_FSL_BOOK3E
228 ++void setup_spectre_v2(void);
229 ++#else
230 ++static inline void setup_spectre_v2(void) {};
231 ++#endif
232 ++void do_btb_flush_fixups(void);
233 ++
234 + #endif /* !__ASSEMBLY__ */
235 +
236 + #endif /* _ASM_POWERPC_SETUP_H */
237 +diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
238 +index 2206912ea4f0..c806a3c12592 100644
239 +--- a/arch/powerpc/kernel/entry_64.S
240 ++++ b/arch/powerpc/kernel/entry_64.S
241 +@@ -80,6 +80,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
242 + std r0,GPR0(r1)
243 + std r10,GPR1(r1)
244 + beq 2f /* if from kernel mode */
245 ++#ifdef CONFIG_PPC_FSL_BOOK3E
246 ++START_BTB_FLUSH_SECTION
247 ++ BTB_FLUSH(r10)
248 ++END_BTB_FLUSH_SECTION
249 ++#endif
250 + ACCOUNT_CPU_USER_ENTRY(r13, r10, r11)
251 + 2: std r2,GPR2(r1)
252 + std r3,GPR3(r1)
253 +diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
254 +index 6d6e144a28ce..447defdd4503 100644
255 +--- a/arch/powerpc/kernel/exceptions-64e.S
256 ++++ b/arch/powerpc/kernel/exceptions-64e.S
257 +@@ -296,7 +296,8 @@ ret_from_mc_except:
258 + andi. r10,r11,MSR_PR; /* save stack pointer */ \
259 + beq 1f; /* branch around if supervisor */ \
260 + ld r1,PACAKSAVE(r13); /* get kernel stack coming from usr */\
261 +-1: cmpdi cr1,r1,0; /* check if SP makes sense */ \
262 ++1: type##_BTB_FLUSH \
263 ++ cmpdi cr1,r1,0; /* check if SP makes sense */ \
264 + bge- cr1,exc_##n##_bad_stack;/* bad stack (TODO: out of line) */ \
265 + mfspr r10,SPRN_##type##_SRR0; /* read SRR0 before touching stack */
266 +
267 +@@ -328,6 +329,30 @@ ret_from_mc_except:
268 + #define SPRN_MC_SRR0 SPRN_MCSRR0
269 + #define SPRN_MC_SRR1 SPRN_MCSRR1
270 +
271 ++#ifdef CONFIG_PPC_FSL_BOOK3E
272 ++#define GEN_BTB_FLUSH \
273 ++ START_BTB_FLUSH_SECTION \
274 ++ beq 1f; \
275 ++ BTB_FLUSH(r10) \
276 ++ 1: \
277 ++ END_BTB_FLUSH_SECTION
278 ++
279 ++#define CRIT_BTB_FLUSH \
280 ++ START_BTB_FLUSH_SECTION \
281 ++ BTB_FLUSH(r10) \
282 ++ END_BTB_FLUSH_SECTION
283 ++
284 ++#define DBG_BTB_FLUSH CRIT_BTB_FLUSH
285 ++#define MC_BTB_FLUSH CRIT_BTB_FLUSH
286 ++#define GDBELL_BTB_FLUSH GEN_BTB_FLUSH
287 ++#else
288 ++#define GEN_BTB_FLUSH
289 ++#define CRIT_BTB_FLUSH
290 ++#define DBG_BTB_FLUSH
291 ++#define MC_BTB_FLUSH
292 ++#define GDBELL_BTB_FLUSH
293 ++#endif
294 ++
295 + #define NORMAL_EXCEPTION_PROLOG(n, intnum, addition) \
296 + EXCEPTION_PROLOG(n, intnum, GEN, addition##_GEN(n))
297 +
298 +diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h
299 +index d0862a100d29..306e26c073a0 100644
300 +--- a/arch/powerpc/kernel/head_booke.h
301 ++++ b/arch/powerpc/kernel/head_booke.h
302 +@@ -32,6 +32,16 @@
303 + */
304 + #define THREAD_NORMSAVE(offset) (THREAD_NORMSAVES + (offset * 4))
305 +
306 ++#ifdef CONFIG_PPC_FSL_BOOK3E
307 ++#define BOOKE_CLEAR_BTB(reg) \
308 ++START_BTB_FLUSH_SECTION \
309 ++ BTB_FLUSH(reg) \
310 ++END_BTB_FLUSH_SECTION
311 ++#else
312 ++#define BOOKE_CLEAR_BTB(reg)
313 ++#endif
314 ++
315 ++
316 + #define NORMAL_EXCEPTION_PROLOG(intno) \
317 + mtspr SPRN_SPRG_WSCRATCH0, r10; /* save one register */ \
318 + mfspr r10, SPRN_SPRG_THREAD; \
319 +@@ -43,6 +53,7 @@
320 + andi. r11, r11, MSR_PR; /* check whether user or kernel */\
321 + mr r11, r1; \
322 + beq 1f; \
323 ++ BOOKE_CLEAR_BTB(r11) \
324 + /* if from user, start at top of this thread's kernel stack */ \
325 + lwz r11, THREAD_INFO-THREAD(r10); \
326 + ALLOC_STACK_FRAME(r11, THREAD_SIZE); \
327 +@@ -128,6 +139,7 @@
328 + stw r9,_CCR(r8); /* save CR on stack */\
329 + mfspr r11,exc_level_srr1; /* check whether user or kernel */\
330 + DO_KVM BOOKE_INTERRUPT_##intno exc_level_srr1; \
331 ++ BOOKE_CLEAR_BTB(r10) \
332 + andi. r11,r11,MSR_PR; \
333 + mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\
334 + lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\
335 +diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
336 +index e2750b856c8f..2386ce2a9c6e 100644
337 +--- a/arch/powerpc/kernel/head_fsl_booke.S
338 ++++ b/arch/powerpc/kernel/head_fsl_booke.S
339 +@@ -453,6 +453,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
340 + mfcr r13
341 + stw r13, THREAD_NORMSAVE(3)(r10)
342 + DO_KVM BOOKE_INTERRUPT_DTLB_MISS SPRN_SRR1
343 ++START_BTB_FLUSH_SECTION
344 ++ mfspr r11, SPRN_SRR1
345 ++ andi. r10,r11,MSR_PR
346 ++ beq 1f
347 ++ BTB_FLUSH(r10)
348 ++1:
349 ++END_BTB_FLUSH_SECTION
350 + mfspr r10, SPRN_DEAR /* Get faulting address */
351 +
352 + /* If we are faulting a kernel address, we have to use the
353 +@@ -547,6 +554,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
354 + mfcr r13
355 + stw r13, THREAD_NORMSAVE(3)(r10)
356 + DO_KVM BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR1
357 ++START_BTB_FLUSH_SECTION
358 ++ mfspr r11, SPRN_SRR1
359 ++ andi. r10,r11,MSR_PR
360 ++ beq 1f
361 ++ BTB_FLUSH(r10)
362 ++1:
363 ++END_BTB_FLUSH_SECTION
364 ++
365 + mfspr r10, SPRN_SRR0 /* Get faulting address */
366 +
367 + /* If we are faulting a kernel address, we have to use the
368 +diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
369 +index 1b395b85132b..1341325599a7 100644
370 +--- a/arch/powerpc/kernel/security.c
371 ++++ b/arch/powerpc/kernel/security.c
372 +@@ -26,6 +26,10 @@ static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NO
373 +
374 + bool barrier_nospec_enabled;
375 + static bool no_nospec;
376 ++static bool btb_flush_enabled;
377 ++#ifdef CONFIG_PPC_FSL_BOOK3E
378 ++static bool no_spectrev2;
379 ++#endif
380 +
381 + static void enable_barrier_nospec(bool enable)
382 + {
383 +@@ -101,6 +105,23 @@ static __init int barrier_nospec_debugfs_init(void)
384 + device_initcall(barrier_nospec_debugfs_init);
385 + #endif /* CONFIG_DEBUG_FS */
386 +
387 ++#ifdef CONFIG_PPC_FSL_BOOK3E
388 ++static int __init handle_nospectre_v2(char *p)
389 ++{
390 ++ no_spectrev2 = true;
391 ++
392 ++ return 0;
393 ++}
394 ++early_param("nospectre_v2", handle_nospectre_v2);
395 ++void setup_spectre_v2(void)
396 ++{
397 ++ if (no_spectrev2)
398 ++ do_btb_flush_fixups();
399 ++ else
400 ++ btb_flush_enabled = true;
401 ++}
402 ++#endif /* CONFIG_PPC_FSL_BOOK3E */
403 ++
404 + #ifdef CONFIG_PPC_BOOK3S_64
405 + ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
406 + {
407 +@@ -168,31 +189,27 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
408 + bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED);
409 + ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED);
410 +
411 +- if (bcs || ccd || count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) {
412 +- bool comma = false;
413 ++ if (bcs || ccd) {
414 + seq_buf_printf(&s, "Mitigation: ");
415 +
416 +- if (bcs) {
417 ++ if (bcs)
418 + seq_buf_printf(&s, "Indirect branch serialisation (kernel only)");
419 +- comma = true;
420 +- }
421 +-
422 +- if (ccd) {
423 +- if (comma)
424 +- seq_buf_printf(&s, ", ");
425 +- seq_buf_printf(&s, "Indirect branch cache disabled");
426 +- comma = true;
427 +- }
428 +
429 +- if (comma)
430 ++ if (bcs && ccd)
431 + seq_buf_printf(&s, ", ");
432 +
433 +- seq_buf_printf(&s, "Software count cache flush");
434 ++ if (ccd)
435 ++ seq_buf_printf(&s, "Indirect branch cache disabled");
436 ++ } else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) {
437 ++ seq_buf_printf(&s, "Mitigation: Software count cache flush");
438 +
439 + if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW)
440 +- seq_buf_printf(&s, "(hardware accelerated)");
441 +- } else
442 ++ seq_buf_printf(&s, " (hardware accelerated)");
443 ++ } else if (btb_flush_enabled) {
444 ++ seq_buf_printf(&s, "Mitigation: Branch predictor state flush");
445 ++ } else {
446 + seq_buf_printf(&s, "Vulnerable");
447 ++ }
448 +
449 + seq_buf_printf(&s, "\n");
450 +
451 +diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
452 +index 93fa0c99681e..508244bcf19c 100644
453 +--- a/arch/powerpc/kernel/setup-common.c
454 ++++ b/arch/powerpc/kernel/setup-common.c
455 +@@ -973,6 +973,7 @@ void __init setup_arch(char **cmdline_p)
456 + ppc_md.setup_arch();
457 +
458 + setup_barrier_nospec();
459 ++ setup_spectre_v2();
460 +
461 + paging_init();
462 +
463 +diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
464 +index 53016c753f3c..fd35eddf3266 100644
465 +--- a/arch/powerpc/kernel/vmlinux.lds.S
466 ++++ b/arch/powerpc/kernel/vmlinux.lds.S
467 +@@ -164,6 +164,14 @@ SECTIONS
468 + }
469 + #endif /* CONFIG_PPC_BARRIER_NOSPEC */
470 +
471 ++#ifdef CONFIG_PPC_FSL_BOOK3E
472 ++ . = ALIGN(8);
473 ++ __spec_btb_flush_fixup : AT(ADDR(__spec_btb_flush_fixup) - LOAD_OFFSET) {
474 ++ __start__btb_flush_fixup = .;
475 ++ *(__btb_flush_fixup)
476 ++ __stop__btb_flush_fixup = .;
477 ++ }
478 ++#endif
479 + EXCEPTION_TABLE(0)
480 +
481 + NOTES :kernel :notes
482 +diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
483 +index 81bd8a07aa51..612b7f6a887f 100644
484 +--- a/arch/powerpc/kvm/bookehv_interrupts.S
485 ++++ b/arch/powerpc/kvm/bookehv_interrupts.S
486 +@@ -75,6 +75,10 @@
487 + PPC_LL r1, VCPU_HOST_STACK(r4)
488 + PPC_LL r2, HOST_R2(r1)
489 +
490 ++START_BTB_FLUSH_SECTION
491 ++ BTB_FLUSH(r10)
492 ++END_BTB_FLUSH_SECTION
493 ++
494 + mfspr r10, SPRN_PID
495 + lwz r8, VCPU_HOST_PID(r4)
496 + PPC_LL r11, VCPU_SHARED(r4)
497 +diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
498 +index 3f8189eb56ed..fde1de08b4d7 100644
499 +--- a/arch/powerpc/kvm/e500_emulate.c
500 ++++ b/arch/powerpc/kvm/e500_emulate.c
501 +@@ -277,6 +277,13 @@ int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_va
502 + vcpu->arch.pwrmgtcr0 = spr_val;
503 + break;
504 +
505 ++ case SPRN_BUCSR:
506 ++ /*
507 ++ * If we are here, it means that we have already flushed the
508 ++ * branch predictor, so just return to guest.
509 ++ */
510 ++ break;
511 ++
512 + /* extra exceptions */
513 + #ifdef CONFIG_SPE_POSSIBLE
514 + case SPRN_IVOR32:
515 +diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
516 +index e613b02bb2f0..dbe478e7b8e0 100644
517 +--- a/arch/powerpc/lib/feature-fixups.c
518 ++++ b/arch/powerpc/lib/feature-fixups.c
519 +@@ -347,6 +347,29 @@ void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_
520 +
521 + printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
522 + }
523 ++
524 ++static void patch_btb_flush_section(long *curr)
525 ++{
526 ++ unsigned int *start, *end;
527 ++
528 ++ start = (void *)curr + *curr;
529 ++ end = (void *)curr + *(curr + 1);
530 ++ for (; start < end; start++) {
531 ++ pr_devel("patching dest %lx\n", (unsigned long)start);
532 ++ patch_instruction(start, PPC_INST_NOP);
533 ++ }
534 ++}
535 ++
536 ++void do_btb_flush_fixups(void)
537 ++{
538 ++ long *start, *end;
539 ++
540 ++ start = PTRRELOC(&__start__btb_flush_fixup);
541 ++ end = PTRRELOC(&__stop__btb_flush_fixup);
542 ++
543 ++ for (; start < end; start += 2)
544 ++ patch_btb_flush_section(start);
545 ++}
546 + #endif /* CONFIG_PPC_FSL_BOOK3E */
547 +
548 + void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
549 +diff --git a/arch/powerpc/lib/memcmp_64.S b/arch/powerpc/lib/memcmp_64.S
550 +index 844d8e774492..b7f6f6e0b6e8 100644
551 +--- a/arch/powerpc/lib/memcmp_64.S
552 ++++ b/arch/powerpc/lib/memcmp_64.S
553 +@@ -215,11 +215,20 @@ _GLOBAL_TOC(memcmp)
554 + beq .Lzero
555 +
556 + .Lcmp_rest_lt8bytes:
557 +- /* Here we have only less than 8 bytes to compare with. at least s1
558 +- * Address is aligned with 8 bytes.
559 +- * The next double words are load and shift right with appropriate
560 +- * bits.
561 ++ /*
562 ++ * Here we have less than 8 bytes to compare. At least s1 is aligned to
563 ++ * 8 bytes, but s2 may not be. We must make sure s2 + 7 doesn't cross a
564 ++ * page boundary, otherwise we might read past the end of the buffer and
565 ++ * trigger a page fault. We use 4K as the conservative minimum page
566 ++ * size. If we detect that case we go to the byte-by-byte loop.
567 ++ *
568 ++ * Otherwise the next double word is loaded from s1 and s2, and shifted
569 ++ * right to compare the appropriate bits.
570 + */
571 ++ clrldi r6,r4,(64-12) // r6 = r4 & 0xfff
572 ++ cmpdi r6,0xff8
573 ++ bgt .Lshort
574 ++
575 + subfic r6,r5,8
576 + slwi r6,r6,3
577 + LD rA,0,r3
578 +diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
579 +index 7fd20c52a8ec..9ed90064f542 100644
580 +--- a/arch/powerpc/mm/tlb_low_64e.S
581 ++++ b/arch/powerpc/mm/tlb_low_64e.S
582 +@@ -70,6 +70,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
583 + std r15,EX_TLB_R15(r12)
584 + std r10,EX_TLB_CR(r12)
585 + #ifdef CONFIG_PPC_FSL_BOOK3E
586 ++START_BTB_FLUSH_SECTION
587 ++ mfspr r11, SPRN_SRR1
588 ++ andi. r10,r11,MSR_PR
589 ++ beq 1f
590 ++ BTB_FLUSH(r10)
591 ++1:
592 ++END_BTB_FLUSH_SECTION
593 + std r7,EX_TLB_R7(r12)
594 + #endif
595 + TLB_MISS_PROLOG_STATS
596 +diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
597 +index 47fc6660845d..68dece206048 100644
598 +--- a/arch/powerpc/net/bpf_jit.h
599 ++++ b/arch/powerpc/net/bpf_jit.h
600 +@@ -51,6 +51,8 @@
601 + #define PPC_LIS(r, i) PPC_ADDIS(r, 0, i)
602 + #define PPC_STD(r, base, i) EMIT(PPC_INST_STD | ___PPC_RS(r) | \
603 + ___PPC_RA(base) | ((i) & 0xfffc))
604 ++#define PPC_STDX(r, base, b) EMIT(PPC_INST_STDX | ___PPC_RS(r) | \
605 ++ ___PPC_RA(base) | ___PPC_RB(b))
606 + #define PPC_STDU(r, base, i) EMIT(PPC_INST_STDU | ___PPC_RS(r) | \
607 + ___PPC_RA(base) | ((i) & 0xfffc))
608 + #define PPC_STW(r, base, i) EMIT(PPC_INST_STW | ___PPC_RS(r) | \
609 +@@ -65,7 +67,9 @@
610 + #define PPC_LBZ(r, base, i) EMIT(PPC_INST_LBZ | ___PPC_RT(r) | \
611 + ___PPC_RA(base) | IMM_L(i))
612 + #define PPC_LD(r, base, i) EMIT(PPC_INST_LD | ___PPC_RT(r) | \
613 +- ___PPC_RA(base) | IMM_L(i))
614 ++ ___PPC_RA(base) | ((i) & 0xfffc))
615 ++#define PPC_LDX(r, base, b) EMIT(PPC_INST_LDX | ___PPC_RT(r) | \
616 ++ ___PPC_RA(base) | ___PPC_RB(b))
617 + #define PPC_LWZ(r, base, i) EMIT(PPC_INST_LWZ | ___PPC_RT(r) | \
618 + ___PPC_RA(base) | IMM_L(i))
619 + #define PPC_LHZ(r, base, i) EMIT(PPC_INST_LHZ | ___PPC_RT(r) | \
620 +@@ -85,17 +89,6 @@
621 + ___PPC_RA(a) | ___PPC_RB(b))
622 + #define PPC_BPF_STDCX(s, a, b) EMIT(PPC_INST_STDCX | ___PPC_RS(s) | \
623 + ___PPC_RA(a) | ___PPC_RB(b))
624 +-
625 +-#ifdef CONFIG_PPC64
626 +-#define PPC_BPF_LL(r, base, i) do { PPC_LD(r, base, i); } while(0)
627 +-#define PPC_BPF_STL(r, base, i) do { PPC_STD(r, base, i); } while(0)
628 +-#define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0)
629 +-#else
630 +-#define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0)
631 +-#define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0)
632 +-#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0)
633 +-#endif
634 +-
635 + #define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i))
636 + #define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i))
637 + #define PPC_CMPW(a, b) EMIT(PPC_INST_CMPW | ___PPC_RA(a) | \
638 +diff --git a/arch/powerpc/net/bpf_jit32.h b/arch/powerpc/net/bpf_jit32.h
639 +index 6f4daacad296..ade04547703f 100644
640 +--- a/arch/powerpc/net/bpf_jit32.h
641 ++++ b/arch/powerpc/net/bpf_jit32.h
642 +@@ -123,6 +123,10 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
643 + #define PPC_NTOHS_OFFS(r, base, i) PPC_LHZ_OFFS(r, base, i)
644 + #endif
645 +
646 ++#define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0)
647 ++#define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0)
648 ++#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0)
649 ++
650 + #define SEEN_DATAREF 0x10000 /* might call external helpers */
651 + #define SEEN_XREG 0x20000 /* X reg is used */
652 + #define SEEN_MEM 0x40000 /* SEEN_MEM+(1<<n) = use mem[n] for temporary
653 +diff --git a/arch/powerpc/net/bpf_jit64.h b/arch/powerpc/net/bpf_jit64.h
654 +index 3609be4692b3..47f441f351a6 100644
655 +--- a/arch/powerpc/net/bpf_jit64.h
656 ++++ b/arch/powerpc/net/bpf_jit64.h
657 +@@ -68,6 +68,26 @@ static const int b2p[] = {
658 + /* PPC NVR range -- update this if we ever use NVRs below r27 */
659 + #define BPF_PPC_NVR_MIN 27
660 +
661 ++/*
662 ++ * WARNING: These can use TMP_REG_2 if the offset is not at word boundary,
663 ++ * so ensure that it isn't in use already.
664 ++ */
665 ++#define PPC_BPF_LL(r, base, i) do { \
666 ++ if ((i) % 4) { \
667 ++ PPC_LI(b2p[TMP_REG_2], (i)); \
668 ++ PPC_LDX(r, base, b2p[TMP_REG_2]); \
669 ++ } else \
670 ++ PPC_LD(r, base, i); \
671 ++ } while(0)
672 ++#define PPC_BPF_STL(r, base, i) do { \
673 ++ if ((i) % 4) { \
674 ++ PPC_LI(b2p[TMP_REG_2], (i)); \
675 ++ PPC_STDX(r, base, b2p[TMP_REG_2]); \
676 ++ } else \
677 ++ PPC_STD(r, base, i); \
678 ++ } while(0)
679 ++#define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0)
680 ++
681 + #define SEEN_FUNC 0x1000 /* might call external helpers */
682 + #define SEEN_STACK 0x2000 /* uses BPF stack */
683 + #define SEEN_TAILCALL 0x4000 /* uses tail calls */
684 +diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
685 +index 50b129785aee..226eec62d125 100644
686 +--- a/arch/powerpc/net/bpf_jit_comp64.c
687 ++++ b/arch/powerpc/net/bpf_jit_comp64.c
688 +@@ -226,7 +226,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
689 + * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
690 + * goto out;
691 + */
692 +- PPC_LD(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
693 ++ PPC_BPF_LL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
694 + PPC_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT);
695 + PPC_BCC(COND_GT, out);
696 +
697 +@@ -239,7 +239,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
698 + /* prog = array->ptrs[index]; */
699 + PPC_MULI(b2p[TMP_REG_1], b2p_index, 8);
700 + PPC_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array);
701 +- PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
702 ++ PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
703 +
704 + /*
705 + * if (prog == NULL)
706 +@@ -249,7 +249,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
707 + PPC_BCC(COND_EQ, out);
708 +
709 + /* goto *(prog->bpf_func + prologue_size); */
710 +- PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
711 ++ PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
712 + #ifdef PPC64_ELF_ABI_v1
713 + /* skip past the function descriptor */
714 + PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
715 +@@ -573,7 +573,7 @@ bpf_alu32_trunc:
716 + * the instructions generated will remain the
717 + * same across all passes
718 + */
719 +- PPC_STD(dst_reg, 1, bpf_jit_stack_local(ctx));
720 ++ PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx));
721 + PPC_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx));
722 + PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]);
723 + break;
724 +@@ -629,7 +629,7 @@ emit_clear:
725 + PPC_LI32(b2p[TMP_REG_1], imm);
726 + src_reg = b2p[TMP_REG_1];
727 + }
728 +- PPC_STD(src_reg, dst_reg, off);
729 ++ PPC_BPF_STL(src_reg, dst_reg, off);
730 + break;
731 +
732 + /*
733 +@@ -676,7 +676,7 @@ emit_clear:
734 + break;
735 + /* dst = *(u64 *)(ul) (src + off) */
736 + case BPF_LDX | BPF_MEM | BPF_DW:
737 +- PPC_LD(dst_reg, src_reg, off);
738 ++ PPC_BPF_LL(dst_reg, src_reg, off);
739 + break;
740 +
741 + /*
742 +diff --git a/arch/powerpc/platforms/pseries/pseries_energy.c b/arch/powerpc/platforms/pseries/pseries_energy.c
743 +index 6ed22127391b..921f12182f3e 100644
744 +--- a/arch/powerpc/platforms/pseries/pseries_energy.c
745 ++++ b/arch/powerpc/platforms/pseries/pseries_energy.c
746 +@@ -77,18 +77,27 @@ static u32 cpu_to_drc_index(int cpu)
747 +
748 + ret = drc.drc_index_start + (thread_index * drc.sequential_inc);
749 + } else {
750 +- const __be32 *indexes;
751 +-
752 +- indexes = of_get_property(dn, "ibm,drc-indexes", NULL);
753 +- if (indexes == NULL)
754 +- goto err_of_node_put;
755 ++ u32 nr_drc_indexes, thread_drc_index;
756 +
757 + /*
758 +- * The first element indexes[0] is the number of drc_indexes
759 +- * returned in the list. Hence thread_index+1 will get the
760 +- * drc_index corresponding to core number thread_index.
761 ++ * The first element of ibm,drc-indexes array is the
762 ++ * number of drc_indexes returned in the list. Hence
763 ++ * thread_index+1 will get the drc_index corresponding
764 ++ * to core number thread_index.
765 + */
766 +- ret = indexes[thread_index + 1];
767 ++ rc = of_property_read_u32_index(dn, "ibm,drc-indexes",
768 ++ 0, &nr_drc_indexes);
769 ++ if (rc)
770 ++ goto err_of_node_put;
771 ++
772 ++ WARN_ON_ONCE(thread_index > nr_drc_indexes);
773 ++ rc = of_property_read_u32_index(dn, "ibm,drc-indexes",
774 ++ thread_index + 1,
775 ++ &thread_drc_index);
776 ++ if (rc)
777 ++ goto err_of_node_put;
778 ++
779 ++ ret = thread_drc_index;
780 + }
781 +
782 + rc = 0;
783 +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
784 +index 44c6a82b7ce5..e76d16ac2776 100644
785 +--- a/arch/x86/Kconfig
786 ++++ b/arch/x86/Kconfig
787 +@@ -2199,14 +2199,8 @@ config RANDOMIZE_MEMORY_PHYSICAL_PADDING
788 + If unsure, leave at the default value.
789 +
790 + config HOTPLUG_CPU
791 +- bool "Support for hot-pluggable CPUs"
792 ++ def_bool y
793 + depends on SMP
794 +- ---help---
795 +- Say Y here to allow turning CPUs off and on. CPUs can be
796 +- controlled through /sys/devices/system/cpu.
797 +- ( Note: power management support will enable this option
798 +- automatically on SMP systems. )
799 +- Say N if you want to disable CPU hotplug.
800 +
801 + config BOOTPARAM_HOTPLUG_CPU0
802 + bool "Set default setting of cpu0_hotpluggable"
803 +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
804 +index 46f0b621bd37..7014dba23d20 100644
805 +--- a/arch/x86/include/asm/kvm_host.h
806 ++++ b/arch/x86/include/asm/kvm_host.h
807 +@@ -315,6 +315,7 @@ struct kvm_mmu_page {
808 + };
809 +
810 + struct kvm_pio_request {
811 ++ unsigned long linear_rip;
812 + unsigned long count;
813 + int in;
814 + int port;
815 +@@ -527,6 +528,7 @@ struct kvm_vcpu_arch {
816 + bool tpr_access_reporting;
817 + u64 ia32_xss;
818 + u64 microcode_version;
819 ++ u64 arch_capabilities;
820 +
821 + /*
822 + * Paging state of the vcpu
823 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
824 +index 4029d3783e18..f99f59625da5 100644
825 +--- a/arch/x86/kvm/vmx.c
826 ++++ b/arch/x86/kvm/vmx.c
827 +@@ -970,7 +970,6 @@ struct vcpu_vmx {
828 + u64 msr_guest_kernel_gs_base;
829 + #endif
830 +
831 +- u64 arch_capabilities;
832 + u64 spec_ctrl;
833 +
834 + u32 vm_entry_controls_shadow;
835 +@@ -4104,12 +4103,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
836 +
837 + msr_info->data = to_vmx(vcpu)->spec_ctrl;
838 + break;
839 +- case MSR_IA32_ARCH_CAPABILITIES:
840 +- if (!msr_info->host_initiated &&
841 +- !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
842 +- return 1;
843 +- msr_info->data = to_vmx(vcpu)->arch_capabilities;
844 +- break;
845 + case MSR_IA32_SYSENTER_CS:
846 + msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
847 + break;
848 +@@ -4271,11 +4264,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
849 + vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD,
850 + MSR_TYPE_W);
851 + break;
852 +- case MSR_IA32_ARCH_CAPABILITIES:
853 +- if (!msr_info->host_initiated)
854 +- return 1;
855 +- vmx->arch_capabilities = data;
856 +- break;
857 + case MSR_IA32_CR_PAT:
858 + if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
859 + if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
860 +@@ -6666,8 +6654,6 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
861 + ++vmx->nmsrs;
862 + }
863 +
864 +- vmx->arch_capabilities = kvm_get_arch_capabilities();
865 +-
866 + vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl);
867 +
868 + /* 22.2.1, 20.8.1 */
869 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
870 +index 6181ec19bed2..4a61e1609c97 100644
871 +--- a/arch/x86/kvm/x86.c
872 ++++ b/arch/x86/kvm/x86.c
873 +@@ -2350,6 +2350,11 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
874 + if (msr_info->host_initiated)
875 + vcpu->arch.microcode_version = data;
876 + break;
877 ++ case MSR_IA32_ARCH_CAPABILITIES:
878 ++ if (!msr_info->host_initiated)
879 ++ return 1;
880 ++ vcpu->arch.arch_capabilities = data;
881 ++ break;
882 + case MSR_EFER:
883 + return set_efer(vcpu, data);
884 + case MSR_K7_HWCR:
885 +@@ -2654,6 +2659,12 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
886 + case MSR_IA32_UCODE_REV:
887 + msr_info->data = vcpu->arch.microcode_version;
888 + break;
889 ++ case MSR_IA32_ARCH_CAPABILITIES:
890 ++ if (!msr_info->host_initiated &&
891 ++ !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
892 ++ return 1;
893 ++ msr_info->data = vcpu->arch.arch_capabilities;
894 ++ break;
895 + case MSR_IA32_TSC:
896 + msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + vcpu->arch.tsc_offset;
897 + break;
898 +@@ -6317,14 +6328,27 @@ int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
899 + }
900 + EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer);
901 +
902 ++static int complete_fast_pio_out(struct kvm_vcpu *vcpu)
903 ++{
904 ++ vcpu->arch.pio.count = 0;
905 ++
906 ++ if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip)))
907 ++ return 1;
908 ++
909 ++ return kvm_skip_emulated_instruction(vcpu);
910 ++}
911 ++
912 + static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,
913 + unsigned short port)
914 + {
915 + unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
916 + int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
917 + size, port, &val, 1);
918 +- /* do not return to emulator after return from userspace */
919 +- vcpu->arch.pio.count = 0;
920 ++
921 ++ if (!ret) {
922 ++ vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu);
923 ++ vcpu->arch.complete_userspace_io = complete_fast_pio_out;
924 ++ }
925 + return ret;
926 + }
927 +
928 +@@ -6335,6 +6359,11 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
929 + /* We should only ever be called with arch.pio.count equal to 1 */
930 + BUG_ON(vcpu->arch.pio.count != 1);
931 +
932 ++ if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) {
933 ++ vcpu->arch.pio.count = 0;
934 ++ return 1;
935 ++ }
936 ++
937 + /* For size less than 4 we merge, else we zero extend */
938 + val = (vcpu->arch.pio.size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX)
939 + : 0;
940 +@@ -6347,7 +6376,7 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
941 + vcpu->arch.pio.port, &val, 1);
942 + kvm_register_write(vcpu, VCPU_REGS_RAX, val);
943 +
944 +- return 1;
945 ++ return kvm_skip_emulated_instruction(vcpu);
946 + }
947 +
948 + static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
949 +@@ -6366,6 +6395,7 @@ static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
950 + return ret;
951 + }
952 +
953 ++ vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu);
954 + vcpu->arch.complete_userspace_io = complete_fast_pio_in;
955 +
956 + return 0;
957 +@@ -6373,16 +6403,13 @@ static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
958 +
959 + int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in)
960 + {
961 +- int ret = kvm_skip_emulated_instruction(vcpu);
962 ++ int ret;
963 +
964 +- /*
965 +- * TODO: we might be squashing a KVM_GUESTDBG_SINGLESTEP-triggered
966 +- * KVM_EXIT_DEBUG here.
967 +- */
968 + if (in)
969 +- return kvm_fast_pio_in(vcpu, size, port) && ret;
970 ++ ret = kvm_fast_pio_in(vcpu, size, port);
971 + else
972 +- return kvm_fast_pio_out(vcpu, size, port) && ret;
973 ++ ret = kvm_fast_pio_out(vcpu, size, port);
974 ++ return ret && kvm_skip_emulated_instruction(vcpu);
975 + }
976 + EXPORT_SYMBOL_GPL(kvm_fast_pio);
977 +
978 +@@ -8485,6 +8512,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
979 +
980 + int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
981 + {
982 ++ vcpu->arch.arch_capabilities = kvm_get_arch_capabilities();
983 + kvm_vcpu_mtrr_init(vcpu);
984 + vcpu_load(vcpu);
985 + kvm_vcpu_reset(vcpu, false);
986 +diff --git a/drivers/char/ipmi/ipmi_si.h b/drivers/char/ipmi/ipmi_si.h
987 +index 52f6152d1fcb..7ae52c17618e 100644
988 +--- a/drivers/char/ipmi/ipmi_si.h
989 ++++ b/drivers/char/ipmi/ipmi_si.h
990 +@@ -25,7 +25,9 @@ void ipmi_irq_finish_setup(struct si_sm_io *io);
991 + int ipmi_si_remove_by_dev(struct device *dev);
992 + void ipmi_si_remove_by_data(int addr_space, enum si_type si_type,
993 + unsigned long addr);
994 +-int ipmi_si_hardcode_find_bmc(void);
995 ++void ipmi_hardcode_init(void);
996 ++void ipmi_si_hardcode_exit(void);
997 ++int ipmi_si_hardcode_match(int addr_type, unsigned long addr);
998 + void ipmi_si_platform_init(void);
999 + void ipmi_si_platform_shutdown(void);
1000 +
1001 +diff --git a/drivers/char/ipmi/ipmi_si_hardcode.c b/drivers/char/ipmi/ipmi_si_hardcode.c
1002 +index 10219f24546b..9ae2405c28bb 100644
1003 +--- a/drivers/char/ipmi/ipmi_si_hardcode.c
1004 ++++ b/drivers/char/ipmi/ipmi_si_hardcode.c
1005 +@@ -1,6 +1,7 @@
1006 + // SPDX-License-Identifier: GPL-2.0+
1007 +
1008 + #include <linux/moduleparam.h>
1009 ++#include <linux/platform_device.h>
1010 + #include "ipmi_si.h"
1011 +
1012 + #define PFX "ipmi_hardcode: "
1013 +@@ -11,23 +12,22 @@
1014 +
1015 + #define SI_MAX_PARMS 4
1016 +
1017 +-static char *si_type[SI_MAX_PARMS];
1018 + #define MAX_SI_TYPE_STR 30
1019 +-static char si_type_str[MAX_SI_TYPE_STR];
1020 ++static char si_type_str[MAX_SI_TYPE_STR] __initdata;
1021 + static unsigned long addrs[SI_MAX_PARMS];
1022 + static unsigned int num_addrs;
1023 + static unsigned int ports[SI_MAX_PARMS];
1024 + static unsigned int num_ports;
1025 +-static int irqs[SI_MAX_PARMS];
1026 +-static unsigned int num_irqs;
1027 +-static int regspacings[SI_MAX_PARMS];
1028 +-static unsigned int num_regspacings;
1029 +-static int regsizes[SI_MAX_PARMS];
1030 +-static unsigned int num_regsizes;
1031 +-static int regshifts[SI_MAX_PARMS];
1032 +-static unsigned int num_regshifts;
1033 +-static int slave_addrs[SI_MAX_PARMS]; /* Leaving 0 chooses the default value */
1034 +-static unsigned int num_slave_addrs;
1035 ++static int irqs[SI_MAX_PARMS] __initdata;
1036 ++static unsigned int num_irqs __initdata;
1037 ++static int regspacings[SI_MAX_PARMS] __initdata;
1038 ++static unsigned int num_regspacings __initdata;
1039 ++static int regsizes[SI_MAX_PARMS] __initdata;
1040 ++static unsigned int num_regsizes __initdata;
1041 ++static int regshifts[SI_MAX_PARMS] __initdata;
1042 ++static unsigned int num_regshifts __initdata;
1043 ++static int slave_addrs[SI_MAX_PARMS] __initdata;
1044 ++static unsigned int num_slave_addrs __initdata;
1045 +
1046 + module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
1047 + MODULE_PARM_DESC(type, "Defines the type of each interface, each"
1048 +@@ -72,12 +72,133 @@ MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
1049 + " overridden by this parm. This is an array indexed"
1050 + " by interface number.");
1051 +
1052 +-int ipmi_si_hardcode_find_bmc(void)
1053 ++static struct platform_device *ipmi_hc_pdevs[SI_MAX_PARMS];
1054 ++
1055 ++static void __init ipmi_hardcode_init_one(const char *si_type_str,
1056 ++ unsigned int i,
1057 ++ unsigned long addr,
1058 ++ unsigned int flags)
1059 + {
1060 +- int ret = -ENODEV;
1061 +- int i;
1062 +- struct si_sm_io io;
1063 ++ struct platform_device *pdev;
1064 ++ unsigned int num_r = 1, size;
1065 ++ struct resource r[4];
1066 ++ struct property_entry p[6];
1067 ++ enum si_type si_type;
1068 ++ unsigned int regspacing, regsize;
1069 ++ int rv;
1070 ++
1071 ++ memset(p, 0, sizeof(p));
1072 ++ memset(r, 0, sizeof(r));
1073 ++
1074 ++ if (!si_type_str || !*si_type_str || strcmp(si_type_str, "kcs") == 0) {
1075 ++ size = 2;
1076 ++ si_type = SI_KCS;
1077 ++ } else if (strcmp(si_type_str, "smic") == 0) {
1078 ++ size = 2;
1079 ++ si_type = SI_SMIC;
1080 ++ } else if (strcmp(si_type_str, "bt") == 0) {
1081 ++ size = 3;
1082 ++ si_type = SI_BT;
1083 ++ } else if (strcmp(si_type_str, "invalid") == 0) {
1084 ++ /*
1085 ++ * Allow a firmware-specified interface to be
1086 ++ * disabled.
1087 ++ */
1088 ++ size = 1;
1089 ++ si_type = SI_TYPE_INVALID;
1090 ++ } else {
1091 ++ pr_warn("Interface type specified for interface %d, was invalid: %s\n",
1092 ++ i, si_type_str);
1093 ++ return;
1094 ++ }
1095 ++
1096 ++ regsize = regsizes[i];
1097 ++ if (regsize == 0)
1098 ++ regsize = DEFAULT_REGSIZE;
1099 ++
1100 ++ p[0] = PROPERTY_ENTRY_U8("ipmi-type", si_type);
1101 ++ p[1] = PROPERTY_ENTRY_U8("slave-addr", slave_addrs[i]);
1102 ++ p[2] = PROPERTY_ENTRY_U8("addr-source", SI_HARDCODED);
1103 ++ p[3] = PROPERTY_ENTRY_U8("reg-shift", regshifts[i]);
1104 ++ p[4] = PROPERTY_ENTRY_U8("reg-size", regsize);
1105 ++ /* Last entry must be left NULL to terminate it. */
1106 ++
1107 ++ /*
1108 ++ * Register spacing is derived from the resources in
1109 ++ * the IPMI platform code.
1110 ++ */
1111 ++ regspacing = regspacings[i];
1112 ++ if (regspacing == 0)
1113 ++ regspacing = regsize;
1114 ++
1115 ++ r[0].start = addr;
1116 ++ r[0].end = r[0].start + regsize - 1;
1117 ++ r[0].name = "IPMI Address 1";
1118 ++ r[0].flags = flags;
1119 ++
1120 ++ if (size > 1) {
1121 ++ r[1].start = r[0].start + regspacing;
1122 ++ r[1].end = r[1].start + regsize - 1;
1123 ++ r[1].name = "IPMI Address 2";
1124 ++ r[1].flags = flags;
1125 ++ num_r++;
1126 ++ }
1127 ++
1128 ++ if (size > 2) {
1129 ++ r[2].start = r[1].start + regspacing;
1130 ++ r[2].end = r[2].start + regsize - 1;
1131 ++ r[2].name = "IPMI Address 3";
1132 ++ r[2].flags = flags;
1133 ++ num_r++;
1134 ++ }
1135 ++
1136 ++ if (irqs[i]) {
1137 ++ r[num_r].start = irqs[i];
1138 ++ r[num_r].end = irqs[i];
1139 ++ r[num_r].name = "IPMI IRQ";
1140 ++ r[num_r].flags = IORESOURCE_IRQ;
1141 ++ num_r++;
1142 ++ }
1143 ++
1144 ++ pdev = platform_device_alloc("hardcode-ipmi-si", i);
1145 ++ if (!pdev) {
1146 ++ pr_err("Error allocating IPMI platform device %d\n", i);
1147 ++ return;
1148 ++ }
1149 ++
1150 ++ rv = platform_device_add_resources(pdev, r, num_r);
1151 ++ if (rv) {
1152 ++ dev_err(&pdev->dev,
1153 ++ "Unable to add hard-code resources: %d\n", rv);
1154 ++ goto err;
1155 ++ }
1156 ++
1157 ++ rv = platform_device_add_properties(pdev, p);
1158 ++ if (rv) {
1159 ++ dev_err(&pdev->dev,
1160 ++ "Unable to add hard-code properties: %d\n", rv);
1161 ++ goto err;
1162 ++ }
1163 ++
1164 ++ rv = platform_device_add(pdev);
1165 ++ if (rv) {
1166 ++ dev_err(&pdev->dev,
1167 ++ "Unable to add hard-code device: %d\n", rv);
1168 ++ goto err;
1169 ++ }
1170 ++
1171 ++ ipmi_hc_pdevs[i] = pdev;
1172 ++ return;
1173 ++
1174 ++err:
1175 ++ platform_device_put(pdev);
1176 ++}
1177 ++
1178 ++void __init ipmi_hardcode_init(void)
1179 ++{
1180 ++ unsigned int i;
1181 + char *str;
1182 ++ char *si_type[SI_MAX_PARMS];
1183 +
1184 + /* Parse out the si_type string into its components. */
1185 + str = si_type_str;
1186 +@@ -94,54 +215,45 @@ int ipmi_si_hardcode_find_bmc(void)
1187 + }
1188 + }
1189 +
1190 +- memset(&io, 0, sizeof(io));
1191 + for (i = 0; i < SI_MAX_PARMS; i++) {
1192 +- if (!ports[i] && !addrs[i])
1193 +- continue;
1194 +-
1195 +- io.addr_source = SI_HARDCODED;
1196 +- pr_info(PFX "probing via hardcoded address\n");
1197 +-
1198 +- if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
1199 +- io.si_type = SI_KCS;
1200 +- } else if (strcmp(si_type[i], "smic") == 0) {
1201 +- io.si_type = SI_SMIC;
1202 +- } else if (strcmp(si_type[i], "bt") == 0) {
1203 +- io.si_type = SI_BT;
1204 +- } else {
1205 +- pr_warn(PFX "Interface type specified for interface %d, was invalid: %s\n",
1206 +- i, si_type[i]);
1207 +- continue;
1208 +- }
1209 ++ if (i < num_ports && ports[i])
1210 ++ ipmi_hardcode_init_one(si_type[i], i, ports[i],
1211 ++ IORESOURCE_IO);
1212 ++ if (i < num_addrs && addrs[i])
1213 ++ ipmi_hardcode_init_one(si_type[i], i, addrs[i],
1214 ++ IORESOURCE_MEM);
1215 ++ }
1216 ++}
1217 +
1218 +- if (ports[i]) {
1219 +- /* An I/O port */
1220 +- io.addr_data = ports[i];
1221 +- io.addr_type = IPMI_IO_ADDR_SPACE;
1222 +- } else if (addrs[i]) {
1223 +- /* A memory port */
1224 +- io.addr_data = addrs[i];
1225 +- io.addr_type = IPMI_MEM_ADDR_SPACE;
1226 +- } else {
1227 +- pr_warn(PFX "Interface type specified for interface %d, but port and address were not set or set to zero.\n",
1228 +- i);
1229 +- continue;
1230 +- }
1231 ++void ipmi_si_hardcode_exit(void)
1232 ++{
1233 ++ unsigned int i;
1234 +
1235 +- io.addr = NULL;
1236 +- io.regspacing = regspacings[i];
1237 +- if (!io.regspacing)
1238 +- io.regspacing = DEFAULT_REGSPACING;
1239 +- io.regsize = regsizes[i];
1240 +- if (!io.regsize)
1241 +- io.regsize = DEFAULT_REGSIZE;
1242 +- io.regshift = regshifts[i];
1243 +- io.irq = irqs[i];
1244 +- if (io.irq)
1245 +- io.irq_setup = ipmi_std_irq_setup;
1246 +- io.slave_addr = slave_addrs[i];
1247 +-
1248 +- ret = ipmi_si_add_smi(&io);
1249 ++ for (i = 0; i < SI_MAX_PARMS; i++) {
1250 ++ if (ipmi_hc_pdevs[i])
1251 ++ platform_device_unregister(ipmi_hc_pdevs[i]);
1252 + }
1253 +- return ret;
1254 ++}
1255 ++
1256 ++/*
1257 ++ * Returns true of the given address exists as a hardcoded address,
1258 ++ * false if not.
1259 ++ */
1260 ++int ipmi_si_hardcode_match(int addr_type, unsigned long addr)
1261 ++{
1262 ++ unsigned int i;
1263 ++
1264 ++ if (addr_type == IPMI_IO_ADDR_SPACE) {
1265 ++ for (i = 0; i < num_ports; i++) {
1266 ++ if (ports[i] == addr)
1267 ++ return 1;
1268 ++ }
1269 ++ } else {
1270 ++ for (i = 0; i < num_addrs; i++) {
1271 ++ if (addrs[i] == addr)
1272 ++ return 1;
1273 ++ }
1274 ++ }
1275 ++
1276 ++ return 0;
1277 + }
1278 +diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
1279 +index 82d831b103f9..75e5006f395a 100644
1280 +--- a/drivers/char/ipmi/ipmi_si_intf.c
1281 ++++ b/drivers/char/ipmi/ipmi_si_intf.c
1282 +@@ -1862,6 +1862,18 @@ int ipmi_si_add_smi(struct si_sm_io *io)
1283 + int rv = 0;
1284 + struct smi_info *new_smi, *dup;
1285 +
1286 ++ /*
1287 ++ * If the user gave us a hard-coded device at the same
1288 ++ * address, they presumably want us to use it and not what is
1289 ++ * in the firmware.
1290 ++ */
1291 ++ if (io->addr_source != SI_HARDCODED &&
1292 ++ ipmi_si_hardcode_match(io->addr_type, io->addr_data)) {
1293 ++ dev_info(io->dev,
1294 ++ "Hard-coded device at this address already exists");
1295 ++ return -ENODEV;
1296 ++ }
1297 ++
1298 + if (!io->io_setup) {
1299 + if (io->addr_type == IPMI_IO_ADDR_SPACE) {
1300 + io->io_setup = ipmi_si_port_setup;
1301 +@@ -2094,7 +2106,7 @@ static int try_smi_init(struct smi_info *new_smi)
1302 + return rv;
1303 + }
1304 +
1305 +-static int init_ipmi_si(void)
1306 ++static int __init init_ipmi_si(void)
1307 + {
1308 + struct smi_info *e;
1309 + enum ipmi_addr_src type = SI_INVALID;
1310 +@@ -2102,12 +2114,9 @@ static int init_ipmi_si(void)
1311 + if (initialized)
1312 + return 0;
1313 +
1314 ++ ipmi_hardcode_init();
1315 + pr_info("IPMI System Interface driver.\n");
1316 +
1317 +- /* If the user gave us a device, they presumably want us to use it */
1318 +- if (!ipmi_si_hardcode_find_bmc())
1319 +- goto do_scan;
1320 +-
1321 + ipmi_si_platform_init();
1322 +
1323 + ipmi_si_pci_init();
1324 +@@ -2118,7 +2127,6 @@ static int init_ipmi_si(void)
1325 + with multiple BMCs we assume that there will be several instances
1326 + of a given type so if we succeed in registering a type then also
1327 + try to register everything else of the same type */
1328 +-do_scan:
1329 + mutex_lock(&smi_infos_lock);
1330 + list_for_each_entry(e, &smi_infos, link) {
1331 + /* Try to register a device if it has an IRQ and we either
1332 +@@ -2304,6 +2312,8 @@ static void cleanup_ipmi_si(void)
1333 + list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
1334 + cleanup_one_si(e);
1335 + mutex_unlock(&smi_infos_lock);
1336 ++
1337 ++ ipmi_si_hardcode_exit();
1338 + }
1339 + module_exit(cleanup_ipmi_si);
1340 +
1341 +diff --git a/drivers/char/ipmi/ipmi_si_platform.c b/drivers/char/ipmi/ipmi_si_platform.c
1342 +index bf69927502bd..d32b0dd377c5 100644
1343 +--- a/drivers/char/ipmi/ipmi_si_platform.c
1344 ++++ b/drivers/char/ipmi/ipmi_si_platform.c
1345 +@@ -126,8 +126,6 @@ ipmi_get_info_from_resources(struct platform_device *pdev,
1346 + if (res_second->start > io->addr_data)
1347 + io->regspacing = res_second->start - io->addr_data;
1348 + }
1349 +- io->regsize = DEFAULT_REGSIZE;
1350 +- io->regshift = 0;
1351 +
1352 + return res;
1353 + }
1354 +@@ -135,7 +133,7 @@ ipmi_get_info_from_resources(struct platform_device *pdev,
1355 + static int platform_ipmi_probe(struct platform_device *pdev)
1356 + {
1357 + struct si_sm_io io;
1358 +- u8 type, slave_addr, addr_source;
1359 ++ u8 type, slave_addr, addr_source, regsize, regshift;
1360 + int rv;
1361 +
1362 + rv = device_property_read_u8(&pdev->dev, "addr-source", &addr_source);
1363 +@@ -147,7 +145,7 @@ static int platform_ipmi_probe(struct platform_device *pdev)
1364 + if (addr_source == SI_SMBIOS) {
1365 + if (!si_trydmi)
1366 + return -ENODEV;
1367 +- } else {
1368 ++ } else if (addr_source != SI_HARDCODED) {
1369 + if (!si_tryplatform)
1370 + return -ENODEV;
1371 + }
1372 +@@ -167,11 +165,23 @@ static int platform_ipmi_probe(struct platform_device *pdev)
1373 + case SI_BT:
1374 + io.si_type = type;
1375 + break;
1376 ++ case SI_TYPE_INVALID: /* User disabled this in hardcode. */
1377 ++ return -ENODEV;
1378 + default:
1379 + dev_err(&pdev->dev, "ipmi-type property is invalid\n");
1380 + return -EINVAL;
1381 + }
1382 +
1383 ++ io.regsize = DEFAULT_REGSIZE;
1384 ++ rv = device_property_read_u8(&pdev->dev, "reg-size", &regsize);
1385 ++ if (!rv)
1386 ++ io.regsize = regsize;
1387 ++
1388 ++ io.regshift = 0;
1389 ++ rv = device_property_read_u8(&pdev->dev, "reg-shift", &regshift);
1390 ++ if (!rv)
1391 ++ io.regshift = regshift;
1392 ++
1393 + if (!ipmi_get_info_from_resources(pdev, &io))
1394 + return -EINVAL;
1395 +
1396 +@@ -191,7 +201,8 @@ static int platform_ipmi_probe(struct platform_device *pdev)
1397 +
1398 + io.dev = &pdev->dev;
1399 +
1400 +- pr_info("ipmi_si: SMBIOS: %s %#lx regsize %d spacing %d irq %d\n",
1401 ++ pr_info("ipmi_si: %s: %s %#lx regsize %d spacing %d irq %d\n",
1402 ++ ipmi_addr_src_to_str(addr_source),
1403 + (io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
1404 + io.addr_data, io.regsize, io.regspacing, io.irq);
1405 +
1406 +@@ -356,6 +367,9 @@ static int acpi_ipmi_probe(struct platform_device *pdev)
1407 + goto err_free;
1408 + }
1409 +
1410 ++ io.regsize = DEFAULT_REGSIZE;
1411 ++ io.regshift = 0;
1412 ++
1413 + res = ipmi_get_info_from_resources(pdev, &io);
1414 + if (!res) {
1415 + rv = -EINVAL;
1416 +@@ -417,6 +431,11 @@ static int ipmi_remove(struct platform_device *pdev)
1417 + return ipmi_si_remove_by_dev(&pdev->dev);
1418 + }
1419 +
1420 ++static const struct platform_device_id si_plat_ids[] = {
1421 ++ { "hardcode-ipmi-si", 0 },
1422 ++ { }
1423 ++};
1424 ++
1425 + struct platform_driver ipmi_platform_driver = {
1426 + .driver = {
1427 + .name = DEVICE_NAME,
1428 +@@ -425,6 +444,7 @@ struct platform_driver ipmi_platform_driver = {
1429 + },
1430 + .probe = ipmi_probe,
1431 + .remove = ipmi_remove,
1432 ++ .id_table = si_plat_ids
1433 + };
1434 +
1435 + void ipmi_si_platform_init(void)
1436 +diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c
1437 +index 91b90c0cea73..12acdac85820 100644
1438 +--- a/drivers/gpio/gpio-adnp.c
1439 ++++ b/drivers/gpio/gpio-adnp.c
1440 +@@ -132,8 +132,10 @@ static int adnp_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
1441 + if (err < 0)
1442 + goto out;
1443 +
1444 +- if (err & BIT(pos))
1445 +- err = -EACCES;
1446 ++ if (value & BIT(pos)) {
1447 ++ err = -EPERM;
1448 ++ goto out;
1449 ++ }
1450 +
1451 + err = 0;
1452 +
1453 +diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c
1454 +index 0ecd2369c2ca..a09d2f9ebacc 100644
1455 +--- a/drivers/gpio/gpio-exar.c
1456 ++++ b/drivers/gpio/gpio-exar.c
1457 +@@ -148,6 +148,8 @@ static int gpio_exar_probe(struct platform_device *pdev)
1458 + mutex_init(&exar_gpio->lock);
1459 +
1460 + index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL);
1461 ++ if (index < 0)
1462 ++ goto err_destroy;
1463 +
1464 + sprintf(exar_gpio->name, "exar_gpio%d", index);
1465 + exar_gpio->gpio_chip.label = exar_gpio->name;
1466 +diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
1467 +index a614db310ea2..be15289bff9c 100644
1468 +--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
1469 ++++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
1470 +@@ -1446,7 +1446,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
1471 + }
1472 +
1473 + if (index_mode) {
1474 +- if (guest_gma >= I915_GTT_PAGE_SIZE / sizeof(u64)) {
1475 ++ if (guest_gma >= I915_GTT_PAGE_SIZE) {
1476 + ret = -EFAULT;
1477 + goto err;
1478 + }
1479 +diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
1480 +index 1359e5c773e4..f8f9ae6622eb 100644
1481 +--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
1482 ++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
1483 +@@ -505,6 +505,18 @@ static void vop_core_clks_disable(struct vop *vop)
1484 + clk_disable(vop->hclk);
1485 + }
1486 +
1487 ++static void vop_win_disable(struct vop *vop, const struct vop_win_data *win)
1488 ++{
1489 ++ if (win->phy->scl && win->phy->scl->ext) {
1490 ++ VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, SCALE_NONE);
1491 ++ VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, SCALE_NONE);
1492 ++ VOP_SCL_SET_EXT(vop, win, cbcr_hor_scl_mode, SCALE_NONE);
1493 ++ VOP_SCL_SET_EXT(vop, win, cbcr_ver_scl_mode, SCALE_NONE);
1494 ++ }
1495 ++
1496 ++ VOP_WIN_SET(vop, win, enable, 0);
1497 ++}
1498 ++
1499 + static int vop_enable(struct drm_crtc *crtc)
1500 + {
1501 + struct vop *vop = to_vop(crtc);
1502 +@@ -550,7 +562,7 @@ static int vop_enable(struct drm_crtc *crtc)
1503 + struct vop_win *vop_win = &vop->win[i];
1504 + const struct vop_win_data *win = vop_win->data;
1505 +
1506 +- VOP_WIN_SET(vop, win, enable, 0);
1507 ++ vop_win_disable(vop, win);
1508 + }
1509 + spin_unlock(&vop->reg_lock);
1510 +
1511 +@@ -694,7 +706,7 @@ static void vop_plane_atomic_disable(struct drm_plane *plane,
1512 +
1513 + spin_lock(&vop->reg_lock);
1514 +
1515 +- VOP_WIN_SET(vop, win, enable, 0);
1516 ++ vop_win_disable(vop, win);
1517 +
1518 + spin_unlock(&vop->reg_lock);
1519 + }
1520 +@@ -1449,7 +1461,7 @@ static int vop_initial(struct vop *vop)
1521 + int channel = i * 2 + 1;
1522 +
1523 + VOP_WIN_SET(vop, win, channel, (channel + 1) << 4 | channel);
1524 +- VOP_WIN_SET(vop, win, enable, 0);
1525 ++ vop_win_disable(vop, win);
1526 + VOP_WIN_SET(vop, win, gate, 1);
1527 + }
1528 +
1529 +diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
1530 +index 6887db878b38..4709f08f39e4 100644
1531 +--- a/drivers/gpu/drm/vgem/vgem_drv.c
1532 ++++ b/drivers/gpu/drm/vgem/vgem_drv.c
1533 +@@ -191,13 +191,9 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
1534 + ret = drm_gem_handle_create(file, &obj->base, handle);
1535 + drm_gem_object_put_unlocked(&obj->base);
1536 + if (ret)
1537 +- goto err;
1538 ++ return ERR_PTR(ret);
1539 +
1540 + return &obj->base;
1541 +-
1542 +-err:
1543 +- __vgem_gem_destroy(obj);
1544 +- return ERR_PTR(ret);
1545 + }
1546 +
1547 + static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
1548 +diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
1549 +index ca4a74e04977..ce394009a36c 100644
1550 +--- a/drivers/gpu/drm/vkms/vkms_gem.c
1551 ++++ b/drivers/gpu/drm/vkms/vkms_gem.c
1552 +@@ -110,11 +110,8 @@ struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
1553 +
1554 + ret = drm_gem_handle_create(file, &obj->gem, handle);
1555 + drm_gem_object_put_unlocked(&obj->gem);
1556 +- if (ret) {
1557 +- drm_gem_object_release(&obj->gem);
1558 +- kfree(obj);
1559 ++ if (ret)
1560 + return ERR_PTR(ret);
1561 +- }
1562 +
1563 + return &obj->gem;
1564 + }
1565 +diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
1566 +index b5948ba6b3b3..fde728ea2900 100644
1567 +--- a/drivers/iommu/io-pgtable-arm-v7s.c
1568 ++++ b/drivers/iommu/io-pgtable-arm-v7s.c
1569 +@@ -161,6 +161,14 @@
1570 +
1571 + #define ARM_V7S_TCR_PD1 BIT(5)
1572 +
1573 ++#ifdef CONFIG_ZONE_DMA32
1574 ++#define ARM_V7S_TABLE_GFP_DMA GFP_DMA32
1575 ++#define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA32
1576 ++#else
1577 ++#define ARM_V7S_TABLE_GFP_DMA GFP_DMA
1578 ++#define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA
1579 ++#endif
1580 ++
1581 + typedef u32 arm_v7s_iopte;
1582 +
1583 + static bool selftest_running;
1584 +@@ -198,13 +206,16 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
1585 + void *table = NULL;
1586 +
1587 + if (lvl == 1)
1588 +- table = (void *)__get_dma_pages(__GFP_ZERO, get_order(size));
1589 ++ table = (void *)__get_free_pages(
1590 ++ __GFP_ZERO | ARM_V7S_TABLE_GFP_DMA, get_order(size));
1591 + else if (lvl == 2)
1592 +- table = kmem_cache_zalloc(data->l2_tables, gfp | GFP_DMA);
1593 ++ table = kmem_cache_zalloc(data->l2_tables, gfp);
1594 + phys = virt_to_phys(table);
1595 +- if (phys != (arm_v7s_iopte)phys)
1596 ++ if (phys != (arm_v7s_iopte)phys) {
1597 + /* Doesn't fit in PTE */
1598 ++ dev_err(dev, "Page table does not fit in PTE: %pa", &phys);
1599 + goto out_free;
1600 ++ }
1601 + if (table && !(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) {
1602 + dma = dma_map_single(dev, table, size, DMA_TO_DEVICE);
1603 + if (dma_mapping_error(dev, dma))
1604 +@@ -728,7 +739,7 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
1605 + data->l2_tables = kmem_cache_create("io-pgtable_armv7s_l2",
1606 + ARM_V7S_TABLE_SIZE(2),
1607 + ARM_V7S_TABLE_SIZE(2),
1608 +- SLAB_CACHE_DMA, NULL);
1609 ++ ARM_V7S_TABLE_SLAB_FLAGS, NULL);
1610 + if (!data->l2_tables)
1611 + goto out_free_data;
1612 +
1613 +diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
1614 +index 4d85645c87f7..0928fd1f0e0c 100644
1615 +--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
1616 ++++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
1617 +@@ -4365,7 +4365,8 @@ setup_pci(struct hfc_multi *hc, struct pci_dev *pdev,
1618 + if (m->clock2)
1619 + test_and_set_bit(HFC_CHIP_CLOCK2, &hc->chip);
1620 +
1621 +- if (ent->device == 0xB410) {
1622 ++ if (ent->vendor == PCI_VENDOR_ID_DIGIUM &&
1623 ++ ent->device == PCI_DEVICE_ID_DIGIUM_HFC4S) {
1624 + test_and_set_bit(HFC_CHIP_B410P, &hc->chip);
1625 + test_and_set_bit(HFC_CHIP_PCM_MASTER, &hc->chip);
1626 + test_and_clear_bit(HFC_CHIP_PCM_SLAVE, &hc->chip);
1627 +diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
1628 +index d03775100f7d..619bf1498a66 100644
1629 +--- a/drivers/net/Kconfig
1630 ++++ b/drivers/net/Kconfig
1631 +@@ -213,8 +213,8 @@ config GENEVE
1632 +
1633 + config GTP
1634 + tristate "GPRS Tunneling Protocol datapath (GTP-U)"
1635 +- depends on INET && NET_UDP_TUNNEL
1636 +- select NET_IP_TUNNEL
1637 ++ depends on INET
1638 ++ select NET_UDP_TUNNEL
1639 + ---help---
1640 + This allows one to create gtp virtual interfaces that provide
1641 + the GPRS Tunneling Protocol datapath (GTP-U). This tunneling protocol
1642 +diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
1643 +index cdcde7f8e0b2..bdd8f2df6630 100644
1644 +--- a/drivers/net/dsa/qca8k.c
1645 ++++ b/drivers/net/dsa/qca8k.c
1646 +@@ -620,22 +620,6 @@ qca8k_adjust_link(struct dsa_switch *ds, int port, struct phy_device *phy)
1647 + qca8k_port_set_status(priv, port, 1);
1648 + }
1649 +
1650 +-static int
1651 +-qca8k_phy_read(struct dsa_switch *ds, int phy, int regnum)
1652 +-{
1653 +- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
1654 +-
1655 +- return mdiobus_read(priv->bus, phy, regnum);
1656 +-}
1657 +-
1658 +-static int
1659 +-qca8k_phy_write(struct dsa_switch *ds, int phy, int regnum, u16 val)
1660 +-{
1661 +- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
1662 +-
1663 +- return mdiobus_write(priv->bus, phy, regnum, val);
1664 +-}
1665 +-
1666 + static void
1667 + qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
1668 + {
1669 +@@ -876,8 +860,6 @@ static const struct dsa_switch_ops qca8k_switch_ops = {
1670 + .setup = qca8k_setup,
1671 + .adjust_link = qca8k_adjust_link,
1672 + .get_strings = qca8k_get_strings,
1673 +- .phy_read = qca8k_phy_read,
1674 +- .phy_write = qca8k_phy_write,
1675 + .get_ethtool_stats = qca8k_get_ethtool_stats,
1676 + .get_sset_count = qca8k_get_sset_count,
1677 + .get_mac_eee = qca8k_get_mac_eee,
1678 +diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
1679 +index 342ae08ec3c2..d60a86aa8aa8 100644
1680 +--- a/drivers/net/ethernet/8390/mac8390.c
1681 ++++ b/drivers/net/ethernet/8390/mac8390.c
1682 +@@ -153,8 +153,6 @@ static void dayna_block_input(struct net_device *dev, int count,
1683 + static void dayna_block_output(struct net_device *dev, int count,
1684 + const unsigned char *buf, int start_page);
1685 +
1686 +-#define memcmp_withio(a, b, c) memcmp((a), (void *)(b), (c))
1687 +-
1688 + /* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */
1689 + static void slow_sane_get_8390_hdr(struct net_device *dev,
1690 + struct e8390_pkt_hdr *hdr, int ring_page);
1691 +@@ -233,19 +231,26 @@ static enum mac8390_type mac8390_ident(struct nubus_rsrc *fres)
1692 +
1693 + static enum mac8390_access mac8390_testio(unsigned long membase)
1694 + {
1695 +- unsigned long outdata = 0xA5A0B5B0;
1696 +- unsigned long indata = 0x00000000;
1697 ++ u32 outdata = 0xA5A0B5B0;
1698 ++ u32 indata = 0;
1699 ++
1700 + /* Try writing 32 bits */
1701 +- memcpy_toio((void __iomem *)membase, &outdata, 4);
1702 +- /* Now compare them */
1703 +- if (memcmp_withio(&outdata, membase, 4) == 0)
1704 ++ nubus_writel(outdata, membase);
1705 ++ /* Now read it back */
1706 ++ indata = nubus_readl(membase);
1707 ++ if (outdata == indata)
1708 + return ACCESS_32;
1709 ++
1710 ++ outdata = 0xC5C0D5D0;
1711 ++ indata = 0;
1712 ++
1713 + /* Write 16 bit output */
1714 + word_memcpy_tocard(membase, &outdata, 4);
1715 + /* Now read it back */
1716 + word_memcpy_fromcard(&indata, membase, 4);
1717 + if (outdata == indata)
1718 + return ACCESS_16;
1719 ++
1720 + return ACCESS_UNKNOWN;
1721 + }
1722 +
1723 +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
1724 +index 7134d0d4cdf7..6f3312350cac 100644
1725 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
1726 ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
1727 +@@ -186,11 +186,12 @@ static void aq_rx_checksum(struct aq_ring_s *self,
1728 + }
1729 + if (buff->is_ip_cso) {
1730 + __skb_incr_checksum_unnecessary(skb);
1731 +- if (buff->is_udp_cso || buff->is_tcp_cso)
1732 +- __skb_incr_checksum_unnecessary(skb);
1733 + } else {
1734 + skb->ip_summed = CHECKSUM_NONE;
1735 + }
1736 ++
1737 ++ if (buff->is_udp_cso || buff->is_tcp_cso)
1738 ++ __skb_incr_checksum_unnecessary(skb);
1739 + }
1740 +
1741 + #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
1742 +diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
1743 +index fcaf18fa3904..9a4cfa61ed93 100644
1744 +--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
1745 ++++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
1746 +@@ -105,20 +105,19 @@ static inline struct pgcache *nicvf_alloc_page(struct nicvf *nic,
1747 + /* Check if page can be recycled */
1748 + if (page) {
1749 + ref_count = page_ref_count(page);
1750 +- /* Check if this page has been used once i.e 'put_page'
1751 +- * called after packet transmission i.e internal ref_count
1752 +- * and page's ref_count are equal i.e page can be recycled.
1753 ++ /* This page can be recycled if internal ref_count and page's
1754 ++ * ref_count are equal, indicating that the page has been used
1755 ++ * once for packet transmission. For non-XDP mode, internal
1756 ++ * ref_count is always '1'.
1757 + */
1758 +- if (rbdr->is_xdp && (ref_count == pgcache->ref_count))
1759 +- pgcache->ref_count--;
1760 +- else
1761 +- page = NULL;
1762 +-
1763 +- /* In non-XDP mode, page's ref_count needs to be '1' for it
1764 +- * to be recycled.
1765 +- */
1766 +- if (!rbdr->is_xdp && (ref_count != 1))
1767 ++ if (rbdr->is_xdp) {
1768 ++ if (ref_count == pgcache->ref_count)
1769 ++ pgcache->ref_count--;
1770 ++ else
1771 ++ page = NULL;
1772 ++ } else if (ref_count != 1) {
1773 + page = NULL;
1774 ++ }
1775 + }
1776 +
1777 + if (!page) {
1778 +@@ -365,11 +364,10 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
1779 + while (head < rbdr->pgcnt) {
1780 + pgcache = &rbdr->pgcache[head];
1781 + if (pgcache->page && page_ref_count(pgcache->page) != 0) {
1782 +- if (!rbdr->is_xdp) {
1783 +- put_page(pgcache->page);
1784 +- continue;
1785 ++ if (rbdr->is_xdp) {
1786 ++ page_ref_sub(pgcache->page,
1787 ++ pgcache->ref_count - 1);
1788 + }
1789 +- page_ref_sub(pgcache->page, pgcache->ref_count - 1);
1790 + put_page(pgcache->page);
1791 + }
1792 + head++;
1793 +diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
1794 +index bc83ced94e1b..afed0f0f4027 100644
1795 +--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
1796 ++++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
1797 +@@ -111,10 +111,11 @@ static unsigned int is_jumbo_frm(int len, int enh_desc)
1798 +
1799 + static void refill_desc3(void *priv_ptr, struct dma_desc *p)
1800 + {
1801 +- struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
1802 ++ struct stmmac_rx_queue *rx_q = priv_ptr;
1803 ++ struct stmmac_priv *priv = rx_q->priv_data;
1804 +
1805 + /* Fill DES3 in case of RING mode */
1806 +- if (priv->dma_buf_sz >= BUF_SIZE_8KiB)
1807 ++ if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1808 + p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
1809 + }
1810 +
1811 +diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
1812 +index ddc2c5ea3787..7ceebbc4bcc2 100644
1813 +--- a/drivers/net/phy/meson-gxl.c
1814 ++++ b/drivers/net/phy/meson-gxl.c
1815 +@@ -211,6 +211,7 @@ static int meson_gxl_ack_interrupt(struct phy_device *phydev)
1816 + static int meson_gxl_config_intr(struct phy_device *phydev)
1817 + {
1818 + u16 val;
1819 ++ int ret;
1820 +
1821 + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
1822 + val = INTSRC_ANEG_PR
1823 +@@ -223,6 +224,11 @@ static int meson_gxl_config_intr(struct phy_device *phydev)
1824 + val = 0;
1825 + }
1826 +
1827 ++ /* Ack any pending IRQ */
1828 ++ ret = meson_gxl_ack_interrupt(phydev);
1829 ++ if (ret)
1830 ++ return ret;
1831 ++
1832 + return phy_write(phydev, INTSRC_MASK, val);
1833 + }
1834 +
1835 +diff --git a/drivers/net/tun.c b/drivers/net/tun.c
1836 +index f3293355c784..044d5c3a4d04 100644
1837 +--- a/drivers/net/tun.c
1838 ++++ b/drivers/net/tun.c
1839 +@@ -1718,9 +1718,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1840 + int skb_xdp = 1;
1841 + bool frags = tun_napi_frags_enabled(tfile);
1842 +
1843 +- if (!(tun->dev->flags & IFF_UP))
1844 +- return -EIO;
1845 +-
1846 + if (!(tun->flags & IFF_NO_PI)) {
1847 + if (len < sizeof(pi))
1848 + return -EINVAL;
1849 +@@ -1822,6 +1819,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1850 + err = skb_copy_datagram_from_iter(skb, 0, from, len);
1851 +
1852 + if (err) {
1853 ++ err = -EFAULT;
1854 ++drop:
1855 + this_cpu_inc(tun->pcpu_stats->rx_dropped);
1856 + kfree_skb(skb);
1857 + if (frags) {
1858 +@@ -1829,7 +1828,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1859 + mutex_unlock(&tfile->napi_mutex);
1860 + }
1861 +
1862 +- return -EFAULT;
1863 ++ return err;
1864 + }
1865 + }
1866 +
1867 +@@ -1913,6 +1912,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1868 + !tfile->detached)
1869 + rxhash = __skb_get_hash_symmetric(skb);
1870 +
1871 ++ rcu_read_lock();
1872 ++ if (unlikely(!(tun->dev->flags & IFF_UP))) {
1873 ++ err = -EIO;
1874 ++ rcu_read_unlock();
1875 ++ goto drop;
1876 ++ }
1877 ++
1878 + if (frags) {
1879 + /* Exercise flow dissector code path. */
1880 + u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb));
1881 +@@ -1920,6 +1926,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1882 + if (unlikely(headlen > skb_headlen(skb))) {
1883 + this_cpu_inc(tun->pcpu_stats->rx_dropped);
1884 + napi_free_frags(&tfile->napi);
1885 ++ rcu_read_unlock();
1886 + mutex_unlock(&tfile->napi_mutex);
1887 + WARN_ON(1);
1888 + return -ENOMEM;
1889 +@@ -1947,6 +1954,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1890 + } else {
1891 + netif_rx_ni(skb);
1892 + }
1893 ++ rcu_read_unlock();
1894 +
1895 + stats = get_cpu_ptr(tun->pcpu_stats);
1896 + u64_stats_update_begin(&stats->syncp);
1897 +diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
1898 +index f93547f257fb..449fc52f9a89 100644
1899 +--- a/drivers/net/vrf.c
1900 ++++ b/drivers/net/vrf.c
1901 +@@ -1262,6 +1262,7 @@ static void vrf_setup(struct net_device *dev)
1902 +
1903 + /* default to no qdisc; user can add if desired */
1904 + dev->priv_flags |= IFF_NO_QUEUE;
1905 ++ dev->priv_flags |= IFF_NO_RX_HANDLER;
1906 + }
1907 +
1908 + static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
1909 +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
1910 +index 52387f7f12ed..0b1ec44acbf9 100644
1911 +--- a/drivers/net/vxlan.c
1912 ++++ b/drivers/net/vxlan.c
1913 +@@ -3798,10 +3798,8 @@ static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
1914 + /* If vxlan->dev is in the same netns, it has already been added
1915 + * to the list by the previous loop.
1916 + */
1917 +- if (!net_eq(dev_net(vxlan->dev), net)) {
1918 +- gro_cells_destroy(&vxlan->gro_cells);
1919 ++ if (!net_eq(dev_net(vxlan->dev), net))
1920 + unregister_netdevice_queue(vxlan->dev, head);
1921 +- }
1922 + }
1923 +
1924 + for (h = 0; h < PORT_HASH_SIZE; ++h)
1925 +diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
1926 +index 881078ff73f6..15c8fc2abf01 100644
1927 +--- a/drivers/phy/allwinner/phy-sun4i-usb.c
1928 ++++ b/drivers/phy/allwinner/phy-sun4i-usb.c
1929 +@@ -481,8 +481,11 @@ static int sun4i_usb_phy_set_mode(struct phy *_phy, enum phy_mode mode)
1930 + struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy);
1931 + int new_mode;
1932 +
1933 +- if (phy->index != 0)
1934 ++ if (phy->index != 0) {
1935 ++ if (mode == PHY_MODE_USB_HOST)
1936 ++ return 0;
1937 + return -EINVAL;
1938 ++ }
1939 +
1940 + switch (mode) {
1941 + case PHY_MODE_USB_HOST:
1942 +diff --git a/drivers/platform/x86/intel_cht_int33fe.c b/drivers/platform/x86/intel_cht_int33fe.c
1943 +index 39d4100c60a2..a26f410800c2 100644
1944 +--- a/drivers/platform/x86/intel_cht_int33fe.c
1945 ++++ b/drivers/platform/x86/intel_cht_int33fe.c
1946 +@@ -34,7 +34,7 @@ struct cht_int33fe_data {
1947 + struct i2c_client *fusb302;
1948 + struct i2c_client *pi3usb30532;
1949 + /* Contain a list-head must be per device */
1950 +- struct device_connection connections[3];
1951 ++ struct device_connection connections[5];
1952 + };
1953 +
1954 + /*
1955 +@@ -174,19 +174,20 @@ static int cht_int33fe_probe(struct i2c_client *client)
1956 + return -EPROBE_DEFER; /* Wait for i2c-adapter to load */
1957 + }
1958 +
1959 +- data->connections[0].endpoint[0] = "i2c-fusb302";
1960 ++ data->connections[0].endpoint[0] = "port0";
1961 + data->connections[0].endpoint[1] = "i2c-pi3usb30532";
1962 + data->connections[0].id = "typec-switch";
1963 +- data->connections[1].endpoint[0] = "i2c-fusb302";
1964 ++ data->connections[1].endpoint[0] = "port0";
1965 + data->connections[1].endpoint[1] = "i2c-pi3usb30532";
1966 + data->connections[1].id = "typec-mux";
1967 +- data->connections[2].endpoint[0] = "i2c-fusb302";
1968 +- data->connections[2].endpoint[1] = "intel_xhci_usb_sw-role-switch";
1969 +- data->connections[2].id = "usb-role-switch";
1970 ++ data->connections[2].endpoint[0] = "port0";
1971 ++ data->connections[2].endpoint[1] = "i2c-pi3usb30532";
1972 ++ data->connections[2].id = "idff01m01";
1973 ++ data->connections[3].endpoint[0] = "i2c-fusb302";
1974 ++ data->connections[3].endpoint[1] = "intel_xhci_usb_sw-role-switch";
1975 ++ data->connections[3].id = "usb-role-switch";
1976 +
1977 +- device_connection_add(&data->connections[0]);
1978 +- device_connection_add(&data->connections[1]);
1979 +- device_connection_add(&data->connections[2]);
1980 ++ device_connections_add(data->connections);
1981 +
1982 + memset(&board_info, 0, sizeof(board_info));
1983 + strlcpy(board_info.type, "typec_fusb302", I2C_NAME_SIZE);
1984 +@@ -217,9 +218,7 @@ out_unregister_max17047:
1985 + if (data->max17047)
1986 + i2c_unregister_device(data->max17047);
1987 +
1988 +- device_connection_remove(&data->connections[2]);
1989 +- device_connection_remove(&data->connections[1]);
1990 +- device_connection_remove(&data->connections[0]);
1991 ++ device_connections_remove(data->connections);
1992 +
1993 + return -EPROBE_DEFER; /* Wait for the i2c-adapter to load */
1994 + }
1995 +@@ -233,9 +232,7 @@ static int cht_int33fe_remove(struct i2c_client *i2c)
1996 + if (data->max17047)
1997 + i2c_unregister_device(data->max17047);
1998 +
1999 +- device_connection_remove(&data->connections[2]);
2000 +- device_connection_remove(&data->connections[1]);
2001 +- device_connection_remove(&data->connections[0]);
2002 ++ device_connections_remove(data->connections);
2003 +
2004 + return 0;
2005 + }
2006 +diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
2007 +index f47d16b5810b..fabd9798e4c4 100644
2008 +--- a/drivers/s390/cio/vfio_ccw_drv.c
2009 ++++ b/drivers/s390/cio/vfio_ccw_drv.c
2010 +@@ -72,20 +72,24 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
2011 + {
2012 + struct vfio_ccw_private *private;
2013 + struct irb *irb;
2014 ++ bool is_final;
2015 +
2016 + private = container_of(work, struct vfio_ccw_private, io_work);
2017 + irb = &private->irb;
2018 +
2019 ++ is_final = !(scsw_actl(&irb->scsw) &
2020 ++ (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
2021 + if (scsw_is_solicited(&irb->scsw)) {
2022 + cp_update_scsw(&private->cp, &irb->scsw);
2023 +- cp_free(&private->cp);
2024 ++ if (is_final)
2025 ++ cp_free(&private->cp);
2026 + }
2027 + memcpy(private->io_region->irb_area, irb, sizeof(*irb));
2028 +
2029 + if (private->io_trigger)
2030 + eventfd_signal(private->io_trigger, 1);
2031 +
2032 +- if (private->mdev)
2033 ++ if (private->mdev && is_final)
2034 + private->state = VFIO_CCW_STATE_IDLE;
2035 + }
2036 +
2037 +diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
2038 +index e7e6b63905e2..ebdbc457003f 100644
2039 +--- a/drivers/s390/scsi/zfcp_erp.c
2040 ++++ b/drivers/s390/scsi/zfcp_erp.c
2041 +@@ -643,6 +643,20 @@ static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
2042 + add_timer(&erp_action->timer);
2043 + }
2044 +
2045 ++void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
2046 ++ int clear, char *dbftag)
2047 ++{
2048 ++ unsigned long flags;
2049 ++ struct zfcp_port *port;
2050 ++
2051 ++ write_lock_irqsave(&adapter->erp_lock, flags);
2052 ++ read_lock(&adapter->port_list_lock);
2053 ++ list_for_each_entry(port, &adapter->port_list, list)
2054 ++ _zfcp_erp_port_forced_reopen(port, clear, dbftag);
2055 ++ read_unlock(&adapter->port_list_lock);
2056 ++ write_unlock_irqrestore(&adapter->erp_lock, flags);
2057 ++}
2058 ++
2059 + static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
2060 + int clear, char *id)
2061 + {
2062 +@@ -1297,6 +1311,9 @@ static void zfcp_erp_try_rport_unblock(struct zfcp_port *port)
2063 + struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
2064 + int lun_status;
2065 +
2066 ++ if (sdev->sdev_state == SDEV_DEL ||
2067 ++ sdev->sdev_state == SDEV_CANCEL)
2068 ++ continue;
2069 + if (zsdev->port != port)
2070 + continue;
2071 + /* LUN under port of interest */
2072 +diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
2073 +index bd0c5a9f04cb..1b4d6a3afb8f 100644
2074 +--- a/drivers/s390/scsi/zfcp_ext.h
2075 ++++ b/drivers/s390/scsi/zfcp_ext.h
2076 +@@ -69,6 +69,8 @@ extern void zfcp_erp_clear_port_status(struct zfcp_port *, u32);
2077 + extern void zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id);
2078 + extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *);
2079 + extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *);
2080 ++extern void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
2081 ++ int clear, char *dbftag);
2082 + extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
2083 + extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32);
2084 + extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *);
2085 +diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
2086 +index a8efcb330bc1..a4bbfa4ef653 100644
2087 +--- a/drivers/s390/scsi/zfcp_scsi.c
2088 ++++ b/drivers/s390/scsi/zfcp_scsi.c
2089 +@@ -362,6 +362,10 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
2090 + struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
2091 + int ret = SUCCESS, fc_ret;
2092 +
2093 ++ if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) {
2094 ++ zfcp_erp_port_forced_reopen_all(adapter, 0, "schrh_p");
2095 ++ zfcp_erp_wait(adapter);
2096 ++ }
2097 + zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
2098 + zfcp_erp_wait(adapter);
2099 + fc_ret = fc_block_scsi_eh(scpnt);
2100 +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
2101 +index a3a5162fa60e..e925eda93191 100644
2102 +--- a/drivers/scsi/sd.c
2103 ++++ b/drivers/scsi/sd.c
2104 +@@ -1408,11 +1408,6 @@ static void sd_release(struct gendisk *disk, fmode_t mode)
2105 + scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
2106 + }
2107 +
2108 +- /*
2109 +- * XXX and what if there are packets in flight and this close()
2110 +- * XXX is followed by a "rmmod sd_mod"?
2111 +- */
2112 +-
2113 + scsi_disk_put(sdkp);
2114 + }
2115 +
2116 +@@ -3078,6 +3073,9 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
2117 + unsigned int opt_xfer_bytes =
2118 + logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
2119 +
2120 ++ if (sdkp->opt_xfer_blocks == 0)
2121 ++ return false;
2122 ++
2123 + if (sdkp->opt_xfer_blocks > dev_max) {
2124 + sd_first_printk(KERN_WARNING, sdkp,
2125 + "Optimal transfer size %u logical blocks " \
2126 +@@ -3509,9 +3507,21 @@ static void scsi_disk_release(struct device *dev)
2127 + {
2128 + struct scsi_disk *sdkp = to_scsi_disk(dev);
2129 + struct gendisk *disk = sdkp->disk;
2130 +-
2131 ++ struct request_queue *q = disk->queue;
2132 ++
2133 + ida_free(&sd_index_ida, sdkp->index);
2134 +
2135 ++ /*
2136 ++ * Wait until all requests that are in progress have completed.
2137 ++ * This is necessary to avoid that e.g. scsi_end_request() crashes
2138 ++ * due to clearing the disk->private_data pointer. Wait from inside
2139 ++ * scsi_disk_release() instead of from sd_release() to avoid that
2140 ++ * freezing and unfreezing the request queue affects user space I/O
2141 ++ * in case multiple processes open a /dev/sd... node concurrently.
2142 ++ */
2143 ++ blk_mq_freeze_queue(q);
2144 ++ blk_mq_unfreeze_queue(q);
2145 ++
2146 + disk->private_data = NULL;
2147 + put_disk(disk);
2148 + put_device(&sdkp->device->sdev_gendev);
2149 +diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
2150 +index 5775a93917f4..fbbdf4b0f6c5 100644
2151 +--- a/drivers/staging/comedi/comedidev.h
2152 ++++ b/drivers/staging/comedi/comedidev.h
2153 +@@ -987,6 +987,8 @@ int comedi_dio_insn_config(struct comedi_device *dev,
2154 + unsigned int mask);
2155 + unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
2156 + unsigned int *data);
2157 ++unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s,
2158 ++ struct comedi_cmd *cmd);
2159 + unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s);
2160 + unsigned int comedi_nscans_left(struct comedi_subdevice *s,
2161 + unsigned int nscans);
2162 +diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
2163 +index 57dd63d548b7..5329a3955214 100644
2164 +--- a/drivers/staging/comedi/drivers.c
2165 ++++ b/drivers/staging/comedi/drivers.c
2166 +@@ -381,11 +381,13 @@ unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
2167 + EXPORT_SYMBOL_GPL(comedi_dio_update_state);
2168 +
2169 + /**
2170 +- * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes
2171 ++ * comedi_bytes_per_scan_cmd() - Get length of asynchronous command "scan" in
2172 ++ * bytes
2173 + * @s: COMEDI subdevice.
2174 ++ * @cmd: COMEDI command.
2175 + *
2176 + * Determines the overall scan length according to the subdevice type and the
2177 +- * number of channels in the scan.
2178 ++ * number of channels in the scan for the specified command.
2179 + *
2180 + * For digital input, output or input/output subdevices, samples for
2181 + * multiple channels are assumed to be packed into one or more unsigned
2182 +@@ -395,9 +397,9 @@ EXPORT_SYMBOL_GPL(comedi_dio_update_state);
2183 + *
2184 + * Returns the overall scan length in bytes.
2185 + */
2186 +-unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
2187 ++unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s,
2188 ++ struct comedi_cmd *cmd)
2189 + {
2190 +- struct comedi_cmd *cmd = &s->async->cmd;
2191 + unsigned int num_samples;
2192 + unsigned int bits_per_sample;
2193 +
2194 +@@ -414,6 +416,29 @@ unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
2195 + }
2196 + return comedi_samples_to_bytes(s, num_samples);
2197 + }
2198 ++EXPORT_SYMBOL_GPL(comedi_bytes_per_scan_cmd);
2199 ++
2200 ++/**
2201 ++ * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes
2202 ++ * @s: COMEDI subdevice.
2203 ++ *
2204 ++ * Determines the overall scan length according to the subdevice type and the
2205 ++ * number of channels in the scan for the current command.
2206 ++ *
2207 ++ * For digital input, output or input/output subdevices, samples for
2208 ++ * multiple channels are assumed to be packed into one or more unsigned
2209 ++ * short or unsigned int values according to the subdevice's %SDF_LSAMPL
2210 ++ * flag. For other types of subdevice, samples are assumed to occupy a
2211 ++ * whole unsigned short or unsigned int according to the %SDF_LSAMPL flag.
2212 ++ *
2213 ++ * Returns the overall scan length in bytes.
2214 ++ */
2215 ++unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
2216 ++{
2217 ++ struct comedi_cmd *cmd = &s->async->cmd;
2218 ++
2219 ++ return comedi_bytes_per_scan_cmd(s, cmd);
2220 ++}
2221 + EXPORT_SYMBOL_GPL(comedi_bytes_per_scan);
2222 +
2223 + static unsigned int __comedi_nscans_left(struct comedi_subdevice *s,
2224 +diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
2225 +index 4dee2fc37aed..d799b1b55de3 100644
2226 +--- a/drivers/staging/comedi/drivers/ni_mio_common.c
2227 ++++ b/drivers/staging/comedi/drivers/ni_mio_common.c
2228 +@@ -3516,6 +3516,7 @@ static int ni_cdio_check_chanlist(struct comedi_device *dev,
2229 + static int ni_cdio_cmdtest(struct comedi_device *dev,
2230 + struct comedi_subdevice *s, struct comedi_cmd *cmd)
2231 + {
2232 ++ unsigned int bytes_per_scan;
2233 + int err = 0;
2234 + int tmp;
2235 +
2236 +@@ -3545,9 +3546,12 @@ static int ni_cdio_cmdtest(struct comedi_device *dev,
2237 + err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
2238 + err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
2239 + cmd->chanlist_len);
2240 +- err |= comedi_check_trigger_arg_max(&cmd->stop_arg,
2241 +- s->async->prealloc_bufsz /
2242 +- comedi_bytes_per_scan(s));
2243 ++ bytes_per_scan = comedi_bytes_per_scan_cmd(s, cmd);
2244 ++ if (bytes_per_scan) {
2245 ++ err |= comedi_check_trigger_arg_max(&cmd->stop_arg,
2246 ++ s->async->prealloc_bufsz /
2247 ++ bytes_per_scan);
2248 ++ }
2249 +
2250 + if (err)
2251 + return 3;
2252 +diff --git a/drivers/staging/erofs/dir.c b/drivers/staging/erofs/dir.c
2253 +index 04b84ff31d03..0a089cf5c78f 100644
2254 +--- a/drivers/staging/erofs/dir.c
2255 ++++ b/drivers/staging/erofs/dir.c
2256 +@@ -23,6 +23,21 @@ static const unsigned char erofs_filetype_table[EROFS_FT_MAX] = {
2257 + [EROFS_FT_SYMLINK] = DT_LNK,
2258 + };
2259 +
2260 ++static void debug_one_dentry(unsigned char d_type, const char *de_name,
2261 ++ unsigned int de_namelen)
2262 ++{
2263 ++#ifdef CONFIG_EROFS_FS_DEBUG
2264 ++ /* since the on-disk name could not have the trailing '\0' */
2265 ++ unsigned char dbg_namebuf[EROFS_NAME_LEN + 1];
2266 ++
2267 ++ memcpy(dbg_namebuf, de_name, de_namelen);
2268 ++ dbg_namebuf[de_namelen] = '\0';
2269 ++
2270 ++ debugln("found dirent %s de_len %u d_type %d", dbg_namebuf,
2271 ++ de_namelen, d_type);
2272 ++#endif
2273 ++}
2274 ++
2275 + static int erofs_fill_dentries(struct dir_context *ctx,
2276 + void *dentry_blk, unsigned *ofs,
2277 + unsigned nameoff, unsigned maxsize)
2278 +@@ -33,14 +48,10 @@ static int erofs_fill_dentries(struct dir_context *ctx,
2279 + de = dentry_blk + *ofs;
2280 + while (de < end) {
2281 + const char *de_name;
2282 +- int de_namelen;
2283 ++ unsigned int de_namelen;
2284 + unsigned char d_type;
2285 +-#ifdef CONFIG_EROFS_FS_DEBUG
2286 +- unsigned dbg_namelen;
2287 +- unsigned char dbg_namebuf[EROFS_NAME_LEN];
2288 +-#endif
2289 +
2290 +- if (unlikely(de->file_type < EROFS_FT_MAX))
2291 ++ if (de->file_type < EROFS_FT_MAX)
2292 + d_type = erofs_filetype_table[de->file_type];
2293 + else
2294 + d_type = DT_UNKNOWN;
2295 +@@ -48,26 +59,20 @@ static int erofs_fill_dentries(struct dir_context *ctx,
2296 + nameoff = le16_to_cpu(de->nameoff);
2297 + de_name = (char *)dentry_blk + nameoff;
2298 +
2299 +- de_namelen = unlikely(de + 1 >= end) ?
2300 +- /* last directory entry */
2301 +- strnlen(de_name, maxsize - nameoff) :
2302 +- le16_to_cpu(de[1].nameoff) - nameoff;
2303 ++ /* the last dirent in the block? */
2304 ++ if (de + 1 >= end)
2305 ++ de_namelen = strnlen(de_name, maxsize - nameoff);
2306 ++ else
2307 ++ de_namelen = le16_to_cpu(de[1].nameoff) - nameoff;
2308 +
2309 + /* a corrupted entry is found */
2310 +- if (unlikely(de_namelen < 0)) {
2311 ++ if (unlikely(nameoff + de_namelen > maxsize ||
2312 ++ de_namelen > EROFS_NAME_LEN)) {
2313 + DBG_BUGON(1);
2314 + return -EIO;
2315 + }
2316 +
2317 +-#ifdef CONFIG_EROFS_FS_DEBUG
2318 +- dbg_namelen = min(EROFS_NAME_LEN - 1, de_namelen);
2319 +- memcpy(dbg_namebuf, de_name, dbg_namelen);
2320 +- dbg_namebuf[dbg_namelen] = '\0';
2321 +-
2322 +- debugln("%s, found de_name %s de_len %d d_type %d", __func__,
2323 +- dbg_namebuf, de_namelen, d_type);
2324 +-#endif
2325 +-
2326 ++ debug_one_dentry(d_type, de_name, de_namelen);
2327 + if (!dir_emit(ctx, de_name, de_namelen,
2328 + le64_to_cpu(de->nid), d_type))
2329 + /* stoped by some reason */
2330 +diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
2331 +index f44662dd795c..ad6fe6d9d00a 100644
2332 +--- a/drivers/staging/erofs/unzip_vle.c
2333 ++++ b/drivers/staging/erofs/unzip_vle.c
2334 +@@ -885,6 +885,7 @@ repeat:
2335 + overlapped = false;
2336 + compressed_pages = grp->compressed_pages;
2337 +
2338 ++ err = 0;
2339 + for (i = 0; i < clusterpages; ++i) {
2340 + unsigned pagenr;
2341 +
2342 +@@ -894,26 +895,39 @@ repeat:
2343 + DBG_BUGON(page == NULL);
2344 + DBG_BUGON(page->mapping == NULL);
2345 +
2346 +- if (z_erofs_is_stagingpage(page))
2347 +- continue;
2348 ++ if (!z_erofs_is_stagingpage(page)) {
2349 + #ifdef EROFS_FS_HAS_MANAGED_CACHE
2350 +- if (page->mapping == mngda) {
2351 +- DBG_BUGON(!PageUptodate(page));
2352 +- continue;
2353 +- }
2354 ++ if (page->mapping == mngda) {
2355 ++ if (unlikely(!PageUptodate(page)))
2356 ++ err = -EIO;
2357 ++ continue;
2358 ++ }
2359 + #endif
2360 +
2361 +- /* only non-head page could be reused as a compressed page */
2362 +- pagenr = z_erofs_onlinepage_index(page);
2363 ++ /*
2364 ++ * only if non-head page can be selected
2365 ++ * for inplace decompression
2366 ++ */
2367 ++ pagenr = z_erofs_onlinepage_index(page);
2368 +
2369 +- DBG_BUGON(pagenr >= nr_pages);
2370 +- DBG_BUGON(pages[pagenr]);
2371 +- ++sparsemem_pages;
2372 +- pages[pagenr] = page;
2373 ++ DBG_BUGON(pagenr >= nr_pages);
2374 ++ DBG_BUGON(pages[pagenr]);
2375 ++ ++sparsemem_pages;
2376 ++ pages[pagenr] = page;
2377 +
2378 +- overlapped = true;
2379 ++ overlapped = true;
2380 ++ }
2381 ++
2382 ++ /* PG_error needs checking for inplaced and staging pages */
2383 ++ if (unlikely(PageError(page))) {
2384 ++ DBG_BUGON(PageUptodate(page));
2385 ++ err = -EIO;
2386 ++ }
2387 + }
2388 +
2389 ++ if (unlikely(err))
2390 ++ goto out;
2391 ++
2392 + llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
2393 +
2394 + if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
2395 +@@ -942,6 +956,10 @@ repeat:
2396 +
2397 + skip_allocpage:
2398 + vout = erofs_vmap(pages, nr_pages);
2399 ++ if (!vout) {
2400 ++ err = -ENOMEM;
2401 ++ goto out;
2402 ++ }
2403 +
2404 + err = z_erofs_vle_unzip_vmap(compressed_pages,
2405 + clusterpages, vout, llen, work->pageofs, overlapped);
2406 +@@ -1078,6 +1096,8 @@ static inline bool recover_managed_page(struct z_erofs_vle_workgroup *grp,
2407 + return true;
2408 +
2409 + lock_page(page);
2410 ++ ClearPageError(page);
2411 ++
2412 + if (unlikely(!PagePrivate(page))) {
2413 + set_page_private(page, (unsigned long)grp);
2414 + SetPagePrivate(page);
2415 +diff --git a/drivers/staging/erofs/unzip_vle_lz4.c b/drivers/staging/erofs/unzip_vle_lz4.c
2416 +index 055420e8af2c..3a7428317f0a 100644
2417 +--- a/drivers/staging/erofs/unzip_vle_lz4.c
2418 ++++ b/drivers/staging/erofs/unzip_vle_lz4.c
2419 +@@ -116,10 +116,13 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
2420 +
2421 + nr_pages = DIV_ROUND_UP(outlen + pageofs, PAGE_SIZE);
2422 +
2423 +- if (clusterpages == 1)
2424 ++ if (clusterpages == 1) {
2425 + vin = kmap_atomic(compressed_pages[0]);
2426 +- else
2427 ++ } else {
2428 + vin = erofs_vmap(compressed_pages, clusterpages);
2429 ++ if (!vin)
2430 ++ return -ENOMEM;
2431 ++ }
2432 +
2433 + preempt_disable();
2434 + vout = erofs_pcpubuf[smp_processor_id()].data;
2435 +diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
2436 +index 947c79532e10..d5383974d40e 100644
2437 +--- a/drivers/staging/speakup/speakup_soft.c
2438 ++++ b/drivers/staging/speakup/speakup_soft.c
2439 +@@ -208,12 +208,15 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
2440 + return -EINVAL;
2441 +
2442 + spin_lock_irqsave(&speakup_info.spinlock, flags);
2443 ++ synth_soft.alive = 1;
2444 + while (1) {
2445 + prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE);
2446 +- if (!unicode)
2447 +- synth_buffer_skip_nonlatin1();
2448 +- if (!synth_buffer_empty() || speakup_info.flushing)
2449 +- break;
2450 ++ if (synth_current() == &synth_soft) {
2451 ++ if (!unicode)
2452 ++ synth_buffer_skip_nonlatin1();
2453 ++ if (!synth_buffer_empty() || speakup_info.flushing)
2454 ++ break;
2455 ++ }
2456 + spin_unlock_irqrestore(&speakup_info.spinlock, flags);
2457 + if (fp->f_flags & O_NONBLOCK) {
2458 + finish_wait(&speakup_event, &wait);
2459 +@@ -233,6 +236,8 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
2460 +
2461 + /* Keep 3 bytes available for a 16bit UTF-8-encoded character */
2462 + while (chars_sent <= count - bytes_per_ch) {
2463 ++ if (synth_current() != &synth_soft)
2464 ++ break;
2465 + if (speakup_info.flushing) {
2466 + speakup_info.flushing = 0;
2467 + ch = '\x18';
2468 +@@ -329,7 +334,8 @@ static __poll_t softsynth_poll(struct file *fp, struct poll_table_struct *wait)
2469 + poll_wait(fp, &speakup_event, wait);
2470 +
2471 + spin_lock_irqsave(&speakup_info.spinlock, flags);
2472 +- if (!synth_buffer_empty() || speakup_info.flushing)
2473 ++ if (synth_current() == &synth_soft &&
2474 ++ (!synth_buffer_empty() || speakup_info.flushing))
2475 + ret = EPOLLIN | EPOLLRDNORM;
2476 + spin_unlock_irqrestore(&speakup_info.spinlock, flags);
2477 + return ret;
2478 +diff --git a/drivers/staging/speakup/spk_priv.h b/drivers/staging/speakup/spk_priv.h
2479 +index 7b3a16e1fa23..796ffcca43c1 100644
2480 +--- a/drivers/staging/speakup/spk_priv.h
2481 ++++ b/drivers/staging/speakup/spk_priv.h
2482 +@@ -72,6 +72,7 @@ int synth_request_region(unsigned long start, unsigned long n);
2483 + int synth_release_region(unsigned long start, unsigned long n);
2484 + int synth_add(struct spk_synth *in_synth);
2485 + void synth_remove(struct spk_synth *in_synth);
2486 ++struct spk_synth *synth_current(void);
2487 +
2488 + extern struct speakup_info_t speakup_info;
2489 +
2490 +diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
2491 +index 25f259ee4ffc..3568bfb89912 100644
2492 +--- a/drivers/staging/speakup/synth.c
2493 ++++ b/drivers/staging/speakup/synth.c
2494 +@@ -481,4 +481,10 @@ void synth_remove(struct spk_synth *in_synth)
2495 + }
2496 + EXPORT_SYMBOL_GPL(synth_remove);
2497 +
2498 ++struct spk_synth *synth_current(void)
2499 ++{
2500 ++ return synth;
2501 ++}
2502 ++EXPORT_SYMBOL_GPL(synth_current);
2503 ++
2504 + short spk_punc_masks[] = { 0, SOME, MOST, PUNC, PUNC | B_SYM };
2505 +diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
2506 +index 1ab0e8562d40..607804aa560d 100644
2507 +--- a/drivers/staging/vt6655/device_main.c
2508 ++++ b/drivers/staging/vt6655/device_main.c
2509 +@@ -1040,8 +1040,6 @@ static void vnt_interrupt_process(struct vnt_private *priv)
2510 + return;
2511 + }
2512 +
2513 +- MACvIntDisable(priv->PortOffset);
2514 +-
2515 + spin_lock_irqsave(&priv->lock, flags);
2516 +
2517 + /* Read low level stats */
2518 +@@ -1129,8 +1127,6 @@ static void vnt_interrupt_process(struct vnt_private *priv)
2519 + }
2520 +
2521 + spin_unlock_irqrestore(&priv->lock, flags);
2522 +-
2523 +- MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
2524 + }
2525 +
2526 + static void vnt_interrupt_work(struct work_struct *work)
2527 +@@ -1140,14 +1136,17 @@ static void vnt_interrupt_work(struct work_struct *work)
2528 +
2529 + if (priv->vif)
2530 + vnt_interrupt_process(priv);
2531 ++
2532 ++ MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
2533 + }
2534 +
2535 + static irqreturn_t vnt_interrupt(int irq, void *arg)
2536 + {
2537 + struct vnt_private *priv = arg;
2538 +
2539 +- if (priv->vif)
2540 +- schedule_work(&priv->interrupt_work);
2541 ++ schedule_work(&priv->interrupt_work);
2542 ++
2543 ++ MACvIntDisable(priv->PortOffset);
2544 +
2545 + return IRQ_HANDLED;
2546 + }
2547 +diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
2548 +index 8e4428725848..bfdd5ad4116f 100644
2549 +--- a/drivers/tty/serial/atmel_serial.c
2550 ++++ b/drivers/tty/serial/atmel_serial.c
2551 +@@ -1156,6 +1156,10 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
2552 + sg_dma_len(&atmel_port->sg_rx)/2,
2553 + DMA_DEV_TO_MEM,
2554 + DMA_PREP_INTERRUPT);
2555 ++ if (!desc) {
2556 ++ dev_err(port->dev, "Preparing DMA cyclic failed\n");
2557 ++ goto chan_err;
2558 ++ }
2559 + desc->callback = atmel_complete_rx_dma;
2560 + desc->callback_param = port;
2561 + atmel_port->desc_rx = desc;
2562 +diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
2563 +index 93d3a0ec5e11..b0aa864f84a9 100644
2564 +--- a/drivers/tty/serial/kgdboc.c
2565 ++++ b/drivers/tty/serial/kgdboc.c
2566 +@@ -145,8 +145,10 @@ static int configure_kgdboc(void)
2567 + char *cptr = config;
2568 + struct console *cons;
2569 +
2570 +- if (!strlen(config) || isspace(config[0]))
2571 ++ if (!strlen(config) || isspace(config[0])) {
2572 ++ err = 0;
2573 + goto noconfig;
2574 ++ }
2575 +
2576 + kgdboc_io_ops.is_console = 0;
2577 + kgdb_tty_driver = NULL;
2578 +diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
2579 +index 3db48fcd6068..4c4070a202fb 100644
2580 +--- a/drivers/tty/serial/max310x.c
2581 ++++ b/drivers/tty/serial/max310x.c
2582 +@@ -1419,6 +1419,8 @@ static int max310x_spi_probe(struct spi_device *spi)
2583 + if (spi->dev.of_node) {
2584 + const struct of_device_id *of_id =
2585 + of_match_device(max310x_dt_ids, &spi->dev);
2586 ++ if (!of_id)
2587 ++ return -ENODEV;
2588 +
2589 + devtype = (struct max310x_devtype *)of_id->data;
2590 + } else {
2591 +diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
2592 +index 170e446a2f62..7d26c9b57d8e 100644
2593 +--- a/drivers/tty/serial/mvebu-uart.c
2594 ++++ b/drivers/tty/serial/mvebu-uart.c
2595 +@@ -799,6 +799,9 @@ static int mvebu_uart_probe(struct platform_device *pdev)
2596 + return -EINVAL;
2597 + }
2598 +
2599 ++ if (!match)
2600 ++ return -ENODEV;
2601 ++
2602 + /* Assume that all UART ports have a DT alias or none has */
2603 + id = of_alias_get_id(pdev->dev.of_node, "serial");
2604 + if (!pdev->dev.of_node || id < 0)
2605 +diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
2606 +index 76aa289652f7..34acdf29713d 100644
2607 +--- a/drivers/tty/serial/mxs-auart.c
2608 ++++ b/drivers/tty/serial/mxs-auart.c
2609 +@@ -1685,6 +1685,10 @@ static int mxs_auart_probe(struct platform_device *pdev)
2610 +
2611 + s->port.mapbase = r->start;
2612 + s->port.membase = ioremap(r->start, resource_size(r));
2613 ++ if (!s->port.membase) {
2614 ++ ret = -ENOMEM;
2615 ++ goto out_disable_clks;
2616 ++ }
2617 + s->port.ops = &mxs_auart_ops;
2618 + s->port.iotype = UPIO_MEM;
2619 + s->port.fifosize = MXS_AUART_FIFO_SIZE;
2620 +diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
2621 +index 35d1f6fa0e3c..5b96df4ad5b3 100644
2622 +--- a/drivers/tty/serial/qcom_geni_serial.c
2623 ++++ b/drivers/tty/serial/qcom_geni_serial.c
2624 +@@ -1052,7 +1052,7 @@ static int __init qcom_geni_console_setup(struct console *co, char *options)
2625 + {
2626 + struct uart_port *uport;
2627 + struct qcom_geni_serial_port *port;
2628 +- int baud;
2629 ++ int baud = 9600;
2630 + int bits = 8;
2631 + int parity = 'n';
2632 + int flow = 'n';
2633 +diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
2634 +index 859b173e3b82..cbbf239aea0f 100644
2635 +--- a/drivers/tty/serial/sh-sci.c
2636 ++++ b/drivers/tty/serial/sh-sci.c
2637 +@@ -838,19 +838,9 @@ static void sci_transmit_chars(struct uart_port *port)
2638 +
2639 + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
2640 + uart_write_wakeup(port);
2641 +- if (uart_circ_empty(xmit)) {
2642 ++ if (uart_circ_empty(xmit))
2643 + sci_stop_tx(port);
2644 +- } else {
2645 +- ctrl = serial_port_in(port, SCSCR);
2646 +-
2647 +- if (port->type != PORT_SCI) {
2648 +- serial_port_in(port, SCxSR); /* Dummy read */
2649 +- sci_clear_SCxSR(port, SCxSR_TDxE_CLEAR(port));
2650 +- }
2651 +
2652 +- ctrl |= SCSCR_TIE;
2653 +- serial_port_out(port, SCSCR, ctrl);
2654 +- }
2655 + }
2656 +
2657 + /* On SH3, SCIF may read end-of-break as a space->mark char */
2658 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
2659 +index 08b8aa5299b5..32da5a4182ac 100644
2660 +--- a/drivers/usb/class/cdc-acm.c
2661 ++++ b/drivers/usb/class/cdc-acm.c
2662 +@@ -558,10 +558,8 @@ static void acm_softint(struct work_struct *work)
2663 + clear_bit(EVENT_RX_STALL, &acm->flags);
2664 + }
2665 +
2666 +- if (test_bit(EVENT_TTY_WAKEUP, &acm->flags)) {
2667 ++ if (test_and_clear_bit(EVENT_TTY_WAKEUP, &acm->flags))
2668 + tty_port_tty_wakeup(&acm->port);
2669 +- clear_bit(EVENT_TTY_WAKEUP, &acm->flags);
2670 +- }
2671 + }
2672 +
2673 + /*
2674 +diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c
2675 +index 48277bbc15e4..73c8e6591746 100644
2676 +--- a/drivers/usb/common/common.c
2677 ++++ b/drivers/usb/common/common.c
2678 +@@ -145,6 +145,8 @@ enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0)
2679 +
2680 + do {
2681 + controller = of_find_node_with_property(controller, "phys");
2682 ++ if (!of_device_is_available(controller))
2683 ++ continue;
2684 + index = 0;
2685 + do {
2686 + if (arg0 == -1) {
2687 +diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
2688 +index 54e859dcb25c..492bb44153b3 100644
2689 +--- a/drivers/usb/gadget/function/f_hid.c
2690 ++++ b/drivers/usb/gadget/function/f_hid.c
2691 +@@ -391,20 +391,20 @@ try_again:
2692 + req->complete = f_hidg_req_complete;
2693 + req->context = hidg;
2694 +
2695 ++ spin_unlock_irqrestore(&hidg->write_spinlock, flags);
2696 ++
2697 + status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC);
2698 + if (status < 0) {
2699 + ERROR(hidg->func.config->cdev,
2700 + "usb_ep_queue error on int endpoint %zd\n", status);
2701 +- goto release_write_pending_unlocked;
2702 ++ goto release_write_pending;
2703 + } else {
2704 + status = count;
2705 + }
2706 +- spin_unlock_irqrestore(&hidg->write_spinlock, flags);
2707 +
2708 + return status;
2709 + release_write_pending:
2710 + spin_lock_irqsave(&hidg->write_spinlock, flags);
2711 +-release_write_pending_unlocked:
2712 + hidg->write_pending = 0;
2713 + spin_unlock_irqrestore(&hidg->write_spinlock, flags);
2714 +
2715 +diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
2716 +index 86cff5c28eff..ba841c569c48 100644
2717 +--- a/drivers/usb/host/xhci-dbgcap.c
2718 ++++ b/drivers/usb/host/xhci-dbgcap.c
2719 +@@ -516,7 +516,6 @@ static int xhci_do_dbc_stop(struct xhci_hcd *xhci)
2720 + return -1;
2721 +
2722 + writel(0, &dbc->regs->control);
2723 +- xhci_dbc_mem_cleanup(xhci);
2724 + dbc->state = DS_DISABLED;
2725 +
2726 + return 0;
2727 +@@ -562,8 +561,10 @@ static void xhci_dbc_stop(struct xhci_hcd *xhci)
2728 + ret = xhci_do_dbc_stop(xhci);
2729 + spin_unlock_irqrestore(&dbc->lock, flags);
2730 +
2731 +- if (!ret)
2732 ++ if (!ret) {
2733 ++ xhci_dbc_mem_cleanup(xhci);
2734 + pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
2735 ++ }
2736 + }
2737 +
2738 + static void
2739 +diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
2740 +index 01b5818a4be5..333f9202ec8b 100644
2741 +--- a/drivers/usb/host/xhci-hub.c
2742 ++++ b/drivers/usb/host/xhci-hub.c
2743 +@@ -1501,20 +1501,25 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
2744 + port_index = max_ports;
2745 + while (port_index--) {
2746 + u32 t1, t2;
2747 +-
2748 ++ int retries = 10;
2749 ++retry:
2750 + t1 = readl(ports[port_index]->addr);
2751 + t2 = xhci_port_state_to_neutral(t1);
2752 + portsc_buf[port_index] = 0;
2753 +
2754 +- /* Bail out if a USB3 port has a new device in link training */
2755 +- if ((hcd->speed >= HCD_USB3) &&
2756 ++ /*
2757 ++ * Give a USB3 port in link training time to finish, but don't
2758 ++ * prevent suspend as port might be stuck
2759 ++ */
2760 ++ if ((hcd->speed >= HCD_USB3) && retries-- &&
2761 + (t1 & PORT_PLS_MASK) == XDEV_POLLING) {
2762 +- bus_state->bus_suspended = 0;
2763 + spin_unlock_irqrestore(&xhci->lock, flags);
2764 +- xhci_dbg(xhci, "Bus suspend bailout, port in polling\n");
2765 +- return -EBUSY;
2766 ++ msleep(XHCI_PORT_POLLING_LFPS_TIME);
2767 ++ spin_lock_irqsave(&xhci->lock, flags);
2768 ++ xhci_dbg(xhci, "port %d polling in bus suspend, waiting\n",
2769 ++ port_index);
2770 ++ goto retry;
2771 + }
2772 +-
2773 + /* suspend ports in U0, or bail out for new connect changes */
2774 + if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) {
2775 + if ((t1 & PORT_CSC) && wake_enabled) {
2776 +diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
2777 +index a6e463715779..671bce18782c 100644
2778 +--- a/drivers/usb/host/xhci-rcar.c
2779 ++++ b/drivers/usb/host/xhci-rcar.c
2780 +@@ -246,6 +246,7 @@ int xhci_rcar_init_quirk(struct usb_hcd *hcd)
2781 + if (!xhci_rcar_wait_for_pll_active(hcd))
2782 + return -ETIMEDOUT;
2783 +
2784 ++ xhci->quirks |= XHCI_TRUST_TX_LENGTH;
2785 + return xhci_rcar_download_firmware(hcd);
2786 + }
2787 +
2788 +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
2789 +index 9ae17a666bdb..f054464347c9 100644
2790 +--- a/drivers/usb/host/xhci-ring.c
2791 ++++ b/drivers/usb/host/xhci-ring.c
2792 +@@ -1643,10 +1643,13 @@ static void handle_port_status(struct xhci_hcd *xhci,
2793 + }
2794 + }
2795 +
2796 +- if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_U0 &&
2797 +- DEV_SUPERSPEED_ANY(portsc)) {
2798 ++ if ((portsc & PORT_PLC) &&
2799 ++ DEV_SUPERSPEED_ANY(portsc) &&
2800 ++ ((portsc & PORT_PLS_MASK) == XDEV_U0 ||
2801 ++ (portsc & PORT_PLS_MASK) == XDEV_U1 ||
2802 ++ (portsc & PORT_PLS_MASK) == XDEV_U2)) {
2803 + xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
2804 +- /* We've just brought the device into U0 through either the
2805 ++ /* We've just brought the device into U0/1/2 through either the
2806 + * Resume state after a device remote wakeup, or through the
2807 + * U3Exit state after a host-initiated resume. If it's a device
2808 + * initiated remote wake, don't pass up the link state change,
2809 +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
2810 +index e88060ea1e33..dc00f59c8e69 100644
2811 +--- a/drivers/usb/host/xhci.h
2812 ++++ b/drivers/usb/host/xhci.h
2813 +@@ -452,6 +452,14 @@ struct xhci_op_regs {
2814 + */
2815 + #define XHCI_DEFAULT_BESL 4
2816 +
2817 ++/*
2818 ++ * USB3 specification define a 360ms tPollingLFPSTiemout for USB3 ports
2819 ++ * to complete link training. usually link trainig completes much faster
2820 ++ * so check status 10 times with 36ms sleep in places we need to wait for
2821 ++ * polling to complete.
2822 ++ */
2823 ++#define XHCI_PORT_POLLING_LFPS_TIME 36
2824 ++
2825 + /**
2826 + * struct xhci_intr_reg - Interrupt Register Set
2827 + * @irq_pending: IMAN - Interrupt Management Register. Used to enable
2828 +diff --git a/drivers/usb/mtu3/Kconfig b/drivers/usb/mtu3/Kconfig
2829 +index 40bbf1f53337..fe58904f350b 100644
2830 +--- a/drivers/usb/mtu3/Kconfig
2831 ++++ b/drivers/usb/mtu3/Kconfig
2832 +@@ -4,6 +4,7 @@ config USB_MTU3
2833 + tristate "MediaTek USB3 Dual Role controller"
2834 + depends on USB || USB_GADGET
2835 + depends on ARCH_MEDIATEK || COMPILE_TEST
2836 ++ depends on EXTCON || !EXTCON
2837 + select USB_XHCI_MTK if USB_SUPPORT && USB_XHCI_HCD
2838 + help
2839 + Say Y or M here if your system runs on MediaTek SoCs with
2840 +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
2841 +index 4c66edf533fe..e732949f6567 100644
2842 +--- a/drivers/usb/serial/cp210x.c
2843 ++++ b/drivers/usb/serial/cp210x.c
2844 +@@ -80,6 +80,7 @@ static const struct usb_device_id id_table[] = {
2845 + { USB_DEVICE(0x10C4, 0x804E) }, /* Software Bisque Paramount ME build-in converter */
2846 + { USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */
2847 + { USB_DEVICE(0x10C4, 0x8054) }, /* Enfora GSM2228 */
2848 ++ { USB_DEVICE(0x10C4, 0x8056) }, /* Lorenz Messtechnik devices */
2849 + { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */
2850 + { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */
2851 + { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */
2852 +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
2853 +index 1d8077e880a0..c0dc4bc776db 100644
2854 +--- a/drivers/usb/serial/ftdi_sio.c
2855 ++++ b/drivers/usb/serial/ftdi_sio.c
2856 +@@ -599,6 +599,8 @@ static const struct usb_device_id id_table_combined[] = {
2857 + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
2858 + { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
2859 + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
2860 ++ { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLX_PLUS_PID) },
2861 ++ { USB_DEVICE(FTDI_VID, FTDI_NT_ORION_IO_PID) },
2862 + { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
2863 + { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) },
2864 + { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) },
2865 +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
2866 +index b863bedb55a1..5755f0df0025 100644
2867 +--- a/drivers/usb/serial/ftdi_sio_ids.h
2868 ++++ b/drivers/usb/serial/ftdi_sio_ids.h
2869 +@@ -567,7 +567,9 @@
2870 + /*
2871 + * NovaTech product ids (FTDI_VID)
2872 + */
2873 +-#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */
2874 ++#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */
2875 ++#define FTDI_NT_ORIONLX_PLUS_PID 0x7c91 /* OrionLX+ Substation Automation Platform */
2876 ++#define FTDI_NT_ORION_IO_PID 0x7c92 /* Orion I/O */
2877 +
2878 + /*
2879 + * Synapse Wireless product ids (FTDI_VID)
2880 +diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
2881 +index 27109522fd8b..e8f275a0326d 100644
2882 +--- a/drivers/usb/serial/mos7720.c
2883 ++++ b/drivers/usb/serial/mos7720.c
2884 +@@ -366,8 +366,6 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
2885 + if (!urbtrack)
2886 + return -ENOMEM;
2887 +
2888 +- kref_get(&mos_parport->ref_count);
2889 +- urbtrack->mos_parport = mos_parport;
2890 + urbtrack->urb = usb_alloc_urb(0, GFP_ATOMIC);
2891 + if (!urbtrack->urb) {
2892 + kfree(urbtrack);
2893 +@@ -388,6 +386,8 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
2894 + usb_sndctrlpipe(usbdev, 0),
2895 + (unsigned char *)urbtrack->setup,
2896 + NULL, 0, async_complete, urbtrack);
2897 ++ kref_get(&mos_parport->ref_count);
2898 ++ urbtrack->mos_parport = mos_parport;
2899 + kref_init(&urbtrack->ref_count);
2900 + INIT_LIST_HEAD(&urbtrack->urblist_entry);
2901 +
2902 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
2903 +index faf833e8f557..d8c474b386a8 100644
2904 +--- a/drivers/usb/serial/option.c
2905 ++++ b/drivers/usb/serial/option.c
2906 +@@ -246,6 +246,7 @@ static void option_instat_callback(struct urb *urb);
2907 + #define QUECTEL_PRODUCT_EC25 0x0125
2908 + #define QUECTEL_PRODUCT_BG96 0x0296
2909 + #define QUECTEL_PRODUCT_EP06 0x0306
2910 ++#define QUECTEL_PRODUCT_EM12 0x0512
2911 +
2912 + #define CMOTECH_VENDOR_ID 0x16d8
2913 + #define CMOTECH_PRODUCT_6001 0x6001
2914 +@@ -1066,7 +1067,8 @@ static const struct usb_device_id option_ids[] = {
2915 + .driver_info = RSVD(3) },
2916 + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
2917 + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
2918 +- { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
2919 ++ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000), /* SIMCom SIM5218 */
2920 ++ .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) | NCTRL(3) | RSVD(4) },
2921 + /* Quectel products using Qualcomm vendor ID */
2922 + { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
2923 + { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
2924 +@@ -1087,6 +1089,9 @@ static const struct usb_device_id option_ids[] = {
2925 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
2926 + .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
2927 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
2928 ++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
2929 ++ .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
2930 ++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
2931 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
2932 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
2933 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
2934 +@@ -1940,10 +1945,12 @@ static const struct usb_device_id option_ids[] = {
2935 + .driver_info = RSVD(4) },
2936 + { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
2937 + .driver_info = RSVD(4) },
2938 +- { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
2939 +- { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
2940 +- { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
2941 +- { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
2942 ++ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
2943 ++ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
2944 ++ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
2945 ++ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */
2946 ++ .driver_info = RSVD(4) },
2947 ++ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
2948 + { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
2949 + { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
2950 + { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
2951 +diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
2952 +index e61dffb27a0c..00141e05bc72 100644
2953 +--- a/drivers/usb/typec/class.c
2954 ++++ b/drivers/usb/typec/class.c
2955 +@@ -1500,7 +1500,7 @@ typec_port_register_altmode(struct typec_port *port,
2956 +
2957 + sprintf(id, "id%04xm%02x", desc->svid, desc->mode);
2958 +
2959 +- mux = typec_mux_get(port->dev.parent, id);
2960 ++ mux = typec_mux_get(&port->dev, id);
2961 + if (IS_ERR(mux))
2962 + return ERR_CAST(mux);
2963 +
2964 +@@ -1540,18 +1540,6 @@ struct typec_port *typec_register_port(struct device *parent,
2965 + return ERR_PTR(id);
2966 + }
2967 +
2968 +- port->sw = typec_switch_get(cap->fwnode ? &port->dev : parent);
2969 +- if (IS_ERR(port->sw)) {
2970 +- ret = PTR_ERR(port->sw);
2971 +- goto err_switch;
2972 +- }
2973 +-
2974 +- port->mux = typec_mux_get(parent, "typec-mux");
2975 +- if (IS_ERR(port->mux)) {
2976 +- ret = PTR_ERR(port->mux);
2977 +- goto err_mux;
2978 +- }
2979 +-
2980 + switch (cap->type) {
2981 + case TYPEC_PORT_SRC:
2982 + port->pwr_role = TYPEC_SOURCE;
2983 +@@ -1592,13 +1580,26 @@ struct typec_port *typec_register_port(struct device *parent,
2984 + port->port_type = cap->type;
2985 + port->prefer_role = cap->prefer_role;
2986 +
2987 ++ device_initialize(&port->dev);
2988 + port->dev.class = typec_class;
2989 + port->dev.parent = parent;
2990 + port->dev.fwnode = cap->fwnode;
2991 + port->dev.type = &typec_port_dev_type;
2992 + dev_set_name(&port->dev, "port%d", id);
2993 +
2994 +- ret = device_register(&port->dev);
2995 ++ port->sw = typec_switch_get(&port->dev);
2996 ++ if (IS_ERR(port->sw)) {
2997 ++ put_device(&port->dev);
2998 ++ return ERR_CAST(port->sw);
2999 ++ }
3000 ++
3001 ++ port->mux = typec_mux_get(&port->dev, "typec-mux");
3002 ++ if (IS_ERR(port->mux)) {
3003 ++ put_device(&port->dev);
3004 ++ return ERR_CAST(port->mux);
3005 ++ }
3006 ++
3007 ++ ret = device_add(&port->dev);
3008 + if (ret) {
3009 + dev_err(parent, "failed to register port (%d)\n", ret);
3010 + put_device(&port->dev);
3011 +@@ -1606,15 +1607,6 @@ struct typec_port *typec_register_port(struct device *parent,
3012 + }
3013 +
3014 + return port;
3015 +-
3016 +-err_mux:
3017 +- typec_switch_put(port->sw);
3018 +-
3019 +-err_switch:
3020 +- ida_simple_remove(&typec_index_ida, port->id);
3021 +- kfree(port);
3022 +-
3023 +- return ERR_PTR(ret);
3024 + }
3025 + EXPORT_SYMBOL_GPL(typec_register_port);
3026 +
3027 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
3028 +index a16760b410b1..c0db7785cede 100644
3029 +--- a/fs/btrfs/extent-tree.c
3030 ++++ b/fs/btrfs/extent-tree.c
3031 +@@ -5872,7 +5872,7 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
3032 + *
3033 + * This is overestimating in most cases.
3034 + */
3035 +- qgroup_rsv_size = outstanding_extents * fs_info->nodesize;
3036 ++ qgroup_rsv_size = (u64)outstanding_extents * fs_info->nodesize;
3037 +
3038 + spin_lock(&block_rsv->lock);
3039 + block_rsv->size = reserve_size;
3040 +diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
3041 +index df41d7049936..927f9f3daddb 100644
3042 +--- a/fs/btrfs/raid56.c
3043 ++++ b/fs/btrfs/raid56.c
3044 +@@ -2429,8 +2429,9 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
3045 + bitmap_clear(rbio->dbitmap, pagenr, 1);
3046 + kunmap(p);
3047 +
3048 +- for (stripe = 0; stripe < rbio->real_stripes; stripe++)
3049 ++ for (stripe = 0; stripe < nr_data; stripe++)
3050 + kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
3051 ++ kunmap(p_page);
3052 + }
3053 +
3054 + __free_page(p_page);
3055 +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
3056 +index 0805f8c5e72d..2f4f0958e5f2 100644
3057 +--- a/fs/btrfs/tree-log.c
3058 ++++ b/fs/btrfs/tree-log.c
3059 +@@ -3532,9 +3532,16 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
3060 + }
3061 + btrfs_release_path(path);
3062 +
3063 +- /* find the first key from this transaction again */
3064 ++ /*
3065 ++ * Find the first key from this transaction again. See the note for
3066 ++ * log_new_dir_dentries, if we're logging a directory recursively we
3067 ++ * won't be holding its i_mutex, which means we can modify the directory
3068 ++ * while we're logging it. If we remove an entry between our first
3069 ++ * search and this search we'll not find the key again and can just
3070 ++ * bail.
3071 ++ */
3072 + ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3073 +- if (WARN_ON(ret != 0))
3074 ++ if (ret != 0)
3075 + goto done;
3076 +
3077 + /*
3078 +@@ -4504,6 +4511,19 @@ static int logged_inode_size(struct btrfs_root *log, struct btrfs_inode *inode,
3079 + item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3080 + struct btrfs_inode_item);
3081 + *size_ret = btrfs_inode_size(path->nodes[0], item);
3082 ++ /*
3083 ++ * If the in-memory inode's i_size is smaller then the inode
3084 ++ * size stored in the btree, return the inode's i_size, so
3085 ++ * that we get a correct inode size after replaying the log
3086 ++ * when before a power failure we had a shrinking truncate
3087 ++ * followed by addition of a new name (rename / new hard link).
3088 ++ * Otherwise return the inode size from the btree, to avoid
3089 ++ * data loss when replaying a log due to previously doing a
3090 ++ * write that expands the inode's size and logging a new name
3091 ++ * immediately after.
3092 ++ */
3093 ++ if (*size_ret > inode->vfs_inode.i_size)
3094 ++ *size_ret = inode->vfs_inode.i_size;
3095 + }
3096 +
3097 + btrfs_release_path(path);
3098 +@@ -4665,15 +4685,8 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
3099 + struct btrfs_file_extent_item);
3100 +
3101 + if (btrfs_file_extent_type(leaf, extent) ==
3102 +- BTRFS_FILE_EXTENT_INLINE) {
3103 +- len = btrfs_file_extent_ram_bytes(leaf, extent);
3104 +- ASSERT(len == i_size ||
3105 +- (len == fs_info->sectorsize &&
3106 +- btrfs_file_extent_compression(leaf, extent) !=
3107 +- BTRFS_COMPRESS_NONE) ||
3108 +- (len < i_size && i_size < fs_info->sectorsize));
3109 ++ BTRFS_FILE_EXTENT_INLINE)
3110 + return 0;
3111 +- }
3112 +
3113 + len = btrfs_file_extent_num_bytes(leaf, extent);
3114 + /* Last extent goes beyond i_size, no need to log a hole. */
3115 +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
3116 +index c13f62182513..207f4e87445d 100644
3117 +--- a/fs/btrfs/volumes.c
3118 ++++ b/fs/btrfs/volumes.c
3119 +@@ -6051,7 +6051,7 @@ static void btrfs_end_bio(struct bio *bio)
3120 + if (bio_op(bio) == REQ_OP_WRITE)
3121 + btrfs_dev_stat_inc_and_print(dev,
3122 + BTRFS_DEV_STAT_WRITE_ERRS);
3123 +- else
3124 ++ else if (!(bio->bi_opf & REQ_RAHEAD))
3125 + btrfs_dev_stat_inc_and_print(dev,
3126 + BTRFS_DEV_STAT_READ_ERRS);
3127 + if (bio->bi_opf & REQ_PREFLUSH)
3128 +diff --git a/fs/lockd/host.c b/fs/lockd/host.c
3129 +index 93fb7cf0b92b..f0b5c987d6ae 100644
3130 +--- a/fs/lockd/host.c
3131 ++++ b/fs/lockd/host.c
3132 +@@ -290,12 +290,11 @@ void nlmclnt_release_host(struct nlm_host *host)
3133 +
3134 + WARN_ON_ONCE(host->h_server);
3135 +
3136 +- if (refcount_dec_and_test(&host->h_count)) {
3137 ++ if (refcount_dec_and_mutex_lock(&host->h_count, &nlm_host_mutex)) {
3138 + WARN_ON_ONCE(!list_empty(&host->h_lockowners));
3139 + WARN_ON_ONCE(!list_empty(&host->h_granted));
3140 + WARN_ON_ONCE(!list_empty(&host->h_reclaim));
3141 +
3142 +- mutex_lock(&nlm_host_mutex);
3143 + nlm_destroy_host_locked(host);
3144 + mutex_unlock(&nlm_host_mutex);
3145 + }
3146 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
3147 +index e7abcf7629b3..580e37bc3fe2 100644
3148 +--- a/fs/nfs/nfs4proc.c
3149 ++++ b/fs/nfs/nfs4proc.c
3150 +@@ -2909,7 +2909,8 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
3151 + }
3152 +
3153 + out:
3154 +- nfs4_sequence_free_slot(&opendata->o_res.seq_res);
3155 ++ if (!opendata->cancelled)
3156 ++ nfs4_sequence_free_slot(&opendata->o_res.seq_res);
3157 + return ret;
3158 + }
3159 +
3160 +diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
3161 +index 7a5ee145c733..fc197e599e8c 100644
3162 +--- a/fs/ocfs2/refcounttree.c
3163 ++++ b/fs/ocfs2/refcounttree.c
3164 +@@ -4716,22 +4716,23 @@ out:
3165 +
3166 + /* Lock an inode and grab a bh pointing to the inode. */
3167 + static int ocfs2_reflink_inodes_lock(struct inode *s_inode,
3168 +- struct buffer_head **bh1,
3169 ++ struct buffer_head **bh_s,
3170 + struct inode *t_inode,
3171 +- struct buffer_head **bh2)
3172 ++ struct buffer_head **bh_t)
3173 + {
3174 +- struct inode *inode1;
3175 +- struct inode *inode2;
3176 ++ struct inode *inode1 = s_inode;
3177 ++ struct inode *inode2 = t_inode;
3178 + struct ocfs2_inode_info *oi1;
3179 + struct ocfs2_inode_info *oi2;
3180 ++ struct buffer_head *bh1 = NULL;
3181 ++ struct buffer_head *bh2 = NULL;
3182 + bool same_inode = (s_inode == t_inode);
3183 ++ bool need_swap = (inode1->i_ino > inode2->i_ino);
3184 + int status;
3185 +
3186 + /* First grab the VFS and rw locks. */
3187 + lock_two_nondirectories(s_inode, t_inode);
3188 +- inode1 = s_inode;
3189 +- inode2 = t_inode;
3190 +- if (inode1->i_ino > inode2->i_ino)
3191 ++ if (need_swap)
3192 + swap(inode1, inode2);
3193 +
3194 + status = ocfs2_rw_lock(inode1, 1);
3195 +@@ -4754,17 +4755,13 @@ static int ocfs2_reflink_inodes_lock(struct inode *s_inode,
3196 + trace_ocfs2_double_lock((unsigned long long)oi1->ip_blkno,
3197 + (unsigned long long)oi2->ip_blkno);
3198 +
3199 +- if (*bh1)
3200 +- *bh1 = NULL;
3201 +- if (*bh2)
3202 +- *bh2 = NULL;
3203 +-
3204 + /* We always want to lock the one with the lower lockid first. */
3205 + if (oi1->ip_blkno > oi2->ip_blkno)
3206 + mlog_errno(-ENOLCK);
3207 +
3208 + /* lock id1 */
3209 +- status = ocfs2_inode_lock_nested(inode1, bh1, 1, OI_LS_REFLINK_TARGET);
3210 ++ status = ocfs2_inode_lock_nested(inode1, &bh1, 1,
3211 ++ OI_LS_REFLINK_TARGET);
3212 + if (status < 0) {
3213 + if (status != -ENOENT)
3214 + mlog_errno(status);
3215 +@@ -4773,15 +4770,25 @@ static int ocfs2_reflink_inodes_lock(struct inode *s_inode,
3216 +
3217 + /* lock id2 */
3218 + if (!same_inode) {
3219 +- status = ocfs2_inode_lock_nested(inode2, bh2, 1,
3220 ++ status = ocfs2_inode_lock_nested(inode2, &bh2, 1,
3221 + OI_LS_REFLINK_TARGET);
3222 + if (status < 0) {
3223 + if (status != -ENOENT)
3224 + mlog_errno(status);
3225 + goto out_cl1;
3226 + }
3227 +- } else
3228 +- *bh2 = *bh1;
3229 ++ } else {
3230 ++ bh2 = bh1;
3231 ++ }
3232 ++
3233 ++ /*
3234 ++ * If we swapped inode order above, we have to swap the buffer heads
3235 ++ * before passing them back to the caller.
3236 ++ */
3237 ++ if (need_swap)
3238 ++ swap(bh1, bh2);
3239 ++ *bh_s = bh1;
3240 ++ *bh_t = bh2;
3241 +
3242 + trace_ocfs2_double_lock_end(
3243 + (unsigned long long)oi1->ip_blkno,
3244 +@@ -4791,8 +4798,7 @@ static int ocfs2_reflink_inodes_lock(struct inode *s_inode,
3245 +
3246 + out_cl1:
3247 + ocfs2_inode_unlock(inode1, 1);
3248 +- brelse(*bh1);
3249 +- *bh1 = NULL;
3250 ++ brelse(bh1);
3251 + out_rw2:
3252 + ocfs2_rw_unlock(inode2, 1);
3253 + out_i2:
3254 +diff --git a/fs/open.c b/fs/open.c
3255 +index 0285ce7dbd51..f1c2f855fd43 100644
3256 +--- a/fs/open.c
3257 ++++ b/fs/open.c
3258 +@@ -733,6 +733,12 @@ static int do_dentry_open(struct file *f,
3259 + return 0;
3260 + }
3261 +
3262 ++ /* Any file opened for execve()/uselib() has to be a regular file. */
3263 ++ if (unlikely(f->f_flags & FMODE_EXEC && !S_ISREG(inode->i_mode))) {
3264 ++ error = -EACCES;
3265 ++ goto cleanup_file;
3266 ++ }
3267 ++
3268 + if (f->f_mode & FMODE_WRITE && !special_file(inode->i_mode)) {
3269 + error = get_write_access(inode);
3270 + if (unlikely(error))
3271 +diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
3272 +index 4d598a399bbf..d65390727541 100644
3273 +--- a/fs/proc/proc_sysctl.c
3274 ++++ b/fs/proc/proc_sysctl.c
3275 +@@ -1626,7 +1626,8 @@ static void drop_sysctl_table(struct ctl_table_header *header)
3276 + if (--header->nreg)
3277 + return;
3278 +
3279 +- put_links(header);
3280 ++ if (parent)
3281 ++ put_links(header);
3282 + start_unregistering(header);
3283 + if (!--header->count)
3284 + kfree_rcu(header, rcu);
3285 +diff --git a/include/linux/device.h b/include/linux/device.h
3286 +index 8f882549edee..3f1066a9e1c3 100644
3287 +--- a/include/linux/device.h
3288 ++++ b/include/linux/device.h
3289 +@@ -773,6 +773,30 @@ struct device *device_connection_find(struct device *dev, const char *con_id);
3290 + void device_connection_add(struct device_connection *con);
3291 + void device_connection_remove(struct device_connection *con);
3292 +
3293 ++/**
3294 ++ * device_connections_add - Add multiple device connections at once
3295 ++ * @cons: Zero terminated array of device connection descriptors
3296 ++ */
3297 ++static inline void device_connections_add(struct device_connection *cons)
3298 ++{
3299 ++ struct device_connection *c;
3300 ++
3301 ++ for (c = cons; c->endpoint[0]; c++)
3302 ++ device_connection_add(c);
3303 ++}
3304 ++
3305 ++/**
3306 ++ * device_connections_remove - Remove multiple device connections at once
3307 ++ * @cons: Zero terminated array of device connection descriptors
3308 ++ */
3309 ++static inline void device_connections_remove(struct device_connection *cons)
3310 ++{
3311 ++ struct device_connection *c;
3312 ++
3313 ++ for (c = cons; c->endpoint[0]; c++)
3314 ++ device_connection_remove(c);
3315 ++}
3316 ++
3317 + /**
3318 + * enum device_link_state - Device link states.
3319 + * @DL_STATE_NONE: The presence of the drivers is not being tracked.
3320 +diff --git a/include/linux/slab.h b/include/linux/slab.h
3321 +index ed9cbddeb4a6..d6393413ef09 100644
3322 +--- a/include/linux/slab.h
3323 ++++ b/include/linux/slab.h
3324 +@@ -32,6 +32,8 @@
3325 + #define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U)
3326 + /* Use GFP_DMA memory */
3327 + #define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U)
3328 ++/* Use GFP_DMA32 memory */
3329 ++#define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U)
3330 + /* DEBUG: Store the last owner for bug hunting */
3331 + #define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U)
3332 + /* Panic if kmem_cache_create() fails */
3333 +diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
3334 +index 32ee65a30aff..1c6e6c0766ca 100644
3335 +--- a/include/net/sctp/checksum.h
3336 ++++ b/include/net/sctp/checksum.h
3337 +@@ -61,7 +61,7 @@ static inline __wsum sctp_csum_combine(__wsum csum, __wsum csum2,
3338 + static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
3339 + unsigned int offset)
3340 + {
3341 +- struct sctphdr *sh = sctp_hdr(skb);
3342 ++ struct sctphdr *sh = (struct sctphdr *)(skb->data + offset);
3343 + const struct skb_checksum_ops ops = {
3344 + .update = sctp_csum_update,
3345 + .combine = sctp_csum_combine,
3346 +diff --git a/include/net/sock.h b/include/net/sock.h
3347 +index 6cb5a545df7d..1ece7736c49c 100644
3348 +--- a/include/net/sock.h
3349 ++++ b/include/net/sock.h
3350 +@@ -710,6 +710,12 @@ static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
3351 + hlist_add_head_rcu(&sk->sk_node, list);
3352 + }
3353 +
3354 ++static inline void sk_add_node_tail_rcu(struct sock *sk, struct hlist_head *list)
3355 ++{
3356 ++ sock_hold(sk);
3357 ++ hlist_add_tail_rcu(&sk->sk_node, list);
3358 ++}
3359 ++
3360 + static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
3361 + {
3362 + hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
3363 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
3364 +index bcb42aaf1b3a..acc2305ad895 100644
3365 +--- a/kernel/bpf/verifier.c
3366 ++++ b/kernel/bpf/verifier.c
3367 +@@ -2815,7 +2815,7 @@ do_sim:
3368 + *dst_reg = *ptr_reg;
3369 + }
3370 + ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
3371 +- if (!ptr_is_dst_reg)
3372 ++ if (!ptr_is_dst_reg && ret)
3373 + *dst_reg = tmp;
3374 + return !ret ? -EFAULT : 0;
3375 + }
3376 +diff --git a/kernel/cpu.c b/kernel/cpu.c
3377 +index 56f657adcf03..9d0ecc4a0e79 100644
3378 +--- a/kernel/cpu.c
3379 ++++ b/kernel/cpu.c
3380 +@@ -533,6 +533,20 @@ static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
3381 + cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
3382 + }
3383 +
3384 ++static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
3385 ++{
3386 ++ if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
3387 ++ return true;
3388 ++ /*
3389 ++ * When CPU hotplug is disabled, then taking the CPU down is not
3390 ++ * possible because takedown_cpu() and the architecture and
3391 ++ * subsystem specific mechanisms are not available. So the CPU
3392 ++ * which would be completely unplugged again needs to stay around
3393 ++ * in the current state.
3394 ++ */
3395 ++ return st->state <= CPUHP_BRINGUP_CPU;
3396 ++}
3397 ++
3398 + static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
3399 + enum cpuhp_state target)
3400 + {
3401 +@@ -543,8 +557,10 @@ static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
3402 + st->state++;
3403 + ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
3404 + if (ret) {
3405 +- st->target = prev_state;
3406 +- undo_cpu_up(cpu, st);
3407 ++ if (can_rollback_cpu(st)) {
3408 ++ st->target = prev_state;
3409 ++ undo_cpu_up(cpu, st);
3410 ++ }
3411 + break;
3412 + }
3413 + }
3414 +diff --git a/kernel/watchdog.c b/kernel/watchdog.c
3415 +index 977918d5d350..bbc4940f21af 100644
3416 +--- a/kernel/watchdog.c
3417 ++++ b/kernel/watchdog.c
3418 +@@ -547,13 +547,15 @@ static void softlockup_start_all(void)
3419 +
3420 + int lockup_detector_online_cpu(unsigned int cpu)
3421 + {
3422 +- watchdog_enable(cpu);
3423 ++ if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
3424 ++ watchdog_enable(cpu);
3425 + return 0;
3426 + }
3427 +
3428 + int lockup_detector_offline_cpu(unsigned int cpu)
3429 + {
3430 +- watchdog_disable(cpu);
3431 ++ if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
3432 ++ watchdog_disable(cpu);
3433 + return 0;
3434 + }
3435 +
3436 +diff --git a/lib/rhashtable.c b/lib/rhashtable.c
3437 +index 30526afa8343..6410c857b048 100644
3438 +--- a/lib/rhashtable.c
3439 ++++ b/lib/rhashtable.c
3440 +@@ -416,8 +416,12 @@ static void rht_deferred_worker(struct work_struct *work)
3441 + else if (tbl->nest)
3442 + err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
3443 +
3444 +- if (!err)
3445 +- err = rhashtable_rehash_table(ht);
3446 ++ if (!err || err == -EEXIST) {
3447 ++ int nerr;
3448 ++
3449 ++ nerr = rhashtable_rehash_table(ht);
3450 ++ err = err ?: nerr;
3451 ++ }
3452 +
3453 + mutex_unlock(&ht->mutex);
3454 +
3455 +diff --git a/mm/mempolicy.c b/mm/mempolicy.c
3456 +index 89d4439516f6..f32d0a5be4fb 100644
3457 +--- a/mm/mempolicy.c
3458 ++++ b/mm/mempolicy.c
3459 +@@ -428,6 +428,13 @@ static inline bool queue_pages_required(struct page *page,
3460 + return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
3461 + }
3462 +
3463 ++/*
3464 ++ * queue_pages_pmd() has three possible return values:
3465 ++ * 1 - pages are placed on the right node or queued successfully.
3466 ++ * 0 - THP was split.
3467 ++ * -EIO - is migration entry or MPOL_MF_STRICT was specified and an existing
3468 ++ * page was already on a node that does not follow the policy.
3469 ++ */
3470 + static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
3471 + unsigned long end, struct mm_walk *walk)
3472 + {
3473 +@@ -437,7 +444,7 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
3474 + unsigned long flags;
3475 +
3476 + if (unlikely(is_pmd_migration_entry(*pmd))) {
3477 +- ret = 1;
3478 ++ ret = -EIO;
3479 + goto unlock;
3480 + }
3481 + page = pmd_page(*pmd);
3482 +@@ -454,8 +461,15 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
3483 + ret = 1;
3484 + flags = qp->flags;
3485 + /* go to thp migration */
3486 +- if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
3487 ++ if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
3488 ++ if (!vma_migratable(walk->vma)) {
3489 ++ ret = -EIO;
3490 ++ goto unlock;
3491 ++ }
3492 ++
3493 + migrate_page_add(page, qp->pagelist, flags);
3494 ++ } else
3495 ++ ret = -EIO;
3496 + unlock:
3497 + spin_unlock(ptl);
3498 + out:
3499 +@@ -480,8 +494,10 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
3500 + ptl = pmd_trans_huge_lock(pmd, vma);
3501 + if (ptl) {
3502 + ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
3503 +- if (ret)
3504 ++ if (ret > 0)
3505 + return 0;
3506 ++ else if (ret < 0)
3507 ++ return ret;
3508 + }
3509 +
3510 + if (pmd_trans_unstable(pmd))
3511 +@@ -502,11 +518,16 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
3512 + continue;
3513 + if (!queue_pages_required(page, qp))
3514 + continue;
3515 +- migrate_page_add(page, qp->pagelist, flags);
3516 ++ if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
3517 ++ if (!vma_migratable(vma))
3518 ++ break;
3519 ++ migrate_page_add(page, qp->pagelist, flags);
3520 ++ } else
3521 ++ break;
3522 + }
3523 + pte_unmap_unlock(pte - 1, ptl);
3524 + cond_resched();
3525 +- return 0;
3526 ++ return addr != end ? -EIO : 0;
3527 + }
3528 +
3529 + static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
3530 +@@ -576,7 +597,12 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
3531 + unsigned long endvma = vma->vm_end;
3532 + unsigned long flags = qp->flags;
3533 +
3534 +- if (!vma_migratable(vma))
3535 ++ /*
3536 ++ * Need check MPOL_MF_STRICT to return -EIO if possible
3537 ++ * regardless of vma_migratable
3538 ++ */
3539 ++ if (!vma_migratable(vma) &&
3540 ++ !(flags & MPOL_MF_STRICT))
3541 + return 1;
3542 +
3543 + if (endvma > end)
3544 +@@ -603,7 +629,7 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
3545 + }
3546 +
3547 + /* queue pages from current vma */
3548 +- if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
3549 ++ if (flags & MPOL_MF_VALID)
3550 + return 0;
3551 + return 1;
3552 + }
3553 +diff --git a/mm/migrate.c b/mm/migrate.c
3554 +index 14779c4f9a60..b2ea7d1e6f24 100644
3555 +--- a/mm/migrate.c
3556 ++++ b/mm/migrate.c
3557 +@@ -248,10 +248,8 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
3558 + pte = swp_entry_to_pte(entry);
3559 + } else if (is_device_public_page(new)) {
3560 + pte = pte_mkdevmap(pte);
3561 +- flush_dcache_page(new);
3562 + }
3563 +- } else
3564 +- flush_dcache_page(new);
3565 ++ }
3566 +
3567 + #ifdef CONFIG_HUGETLB_PAGE
3568 + if (PageHuge(new)) {
3569 +@@ -983,6 +981,13 @@ static int move_to_new_page(struct page *newpage, struct page *page,
3570 + */
3571 + if (!PageMappingFlags(page))
3572 + page->mapping = NULL;
3573 ++
3574 ++ if (unlikely(is_zone_device_page(newpage))) {
3575 ++ if (is_device_public_page(newpage))
3576 ++ flush_dcache_page(newpage);
3577 ++ } else
3578 ++ flush_dcache_page(newpage);
3579 ++
3580 + }
3581 + out:
3582 + return rc;
3583 +diff --git a/mm/slab.c b/mm/slab.c
3584 +index fad6839e8eab..364e42d5a399 100644
3585 +--- a/mm/slab.c
3586 ++++ b/mm/slab.c
3587 +@@ -2124,6 +2124,8 @@ done:
3588 + cachep->allocflags = __GFP_COMP;
3589 + if (flags & SLAB_CACHE_DMA)
3590 + cachep->allocflags |= GFP_DMA;
3591 ++ if (flags & SLAB_CACHE_DMA32)
3592 ++ cachep->allocflags |= GFP_DMA32;
3593 + if (flags & SLAB_RECLAIM_ACCOUNT)
3594 + cachep->allocflags |= __GFP_RECLAIMABLE;
3595 + cachep->size = size;
3596 +diff --git a/mm/slab.h b/mm/slab.h
3597 +index 58c6c1c2a78e..9632772e14be 100644
3598 +--- a/mm/slab.h
3599 ++++ b/mm/slab.h
3600 +@@ -127,7 +127,8 @@ static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
3601 +
3602 +
3603 + /* Legal flag mask for kmem_cache_create(), for various configurations */
3604 +-#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
3605 ++#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
3606 ++ SLAB_CACHE_DMA32 | SLAB_PANIC | \
3607 + SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
3608 +
3609 + #if defined(CONFIG_DEBUG_SLAB)
3610 +diff --git a/mm/slab_common.c b/mm/slab_common.c
3611 +index 3a7ac4f15194..4d3c2e76d1ba 100644
3612 +--- a/mm/slab_common.c
3613 ++++ b/mm/slab_common.c
3614 +@@ -53,7 +53,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
3615 + SLAB_FAILSLAB | SLAB_KASAN)
3616 +
3617 + #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
3618 +- SLAB_ACCOUNT)
3619 ++ SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
3620 +
3621 + /*
3622 + * Merge control. If this is set then no merging of slab caches will occur.
3623 +diff --git a/mm/slub.c b/mm/slub.c
3624 +index 8da34a8af53d..09c0e24a06d8 100644
3625 +--- a/mm/slub.c
3626 ++++ b/mm/slub.c
3627 +@@ -3539,6 +3539,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
3628 + if (s->flags & SLAB_CACHE_DMA)
3629 + s->allocflags |= GFP_DMA;
3630 +
3631 ++ if (s->flags & SLAB_CACHE_DMA32)
3632 ++ s->allocflags |= GFP_DMA32;
3633 ++
3634 + if (s->flags & SLAB_RECLAIM_ACCOUNT)
3635 + s->allocflags |= __GFP_RECLAIMABLE;
3636 +
3637 +@@ -5633,6 +5636,8 @@ static char *create_unique_id(struct kmem_cache *s)
3638 + */
3639 + if (s->flags & SLAB_CACHE_DMA)
3640 + *p++ = 'd';
3641 ++ if (s->flags & SLAB_CACHE_DMA32)
3642 ++ *p++ = 'D';
3643 + if (s->flags & SLAB_RECLAIM_ACCOUNT)
3644 + *p++ = 'a';
3645 + if (s->flags & SLAB_CONSISTENCY_CHECKS)
3646 +diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
3647 +index d17a4736e47c..2c6eabf294b3 100644
3648 +--- a/net/bluetooth/l2cap_core.c
3649 ++++ b/net/bluetooth/l2cap_core.c
3650 +@@ -3336,16 +3336,22 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
3651 +
3652 + while (len >= L2CAP_CONF_OPT_SIZE) {
3653 + len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3654 ++ if (len < 0)
3655 ++ break;
3656 +
3657 + hint = type & L2CAP_CONF_HINT;
3658 + type &= L2CAP_CONF_MASK;
3659 +
3660 + switch (type) {
3661 + case L2CAP_CONF_MTU:
3662 ++ if (olen != 2)
3663 ++ break;
3664 + mtu = val;
3665 + break;
3666 +
3667 + case L2CAP_CONF_FLUSH_TO:
3668 ++ if (olen != 2)
3669 ++ break;
3670 + chan->flush_to = val;
3671 + break;
3672 +
3673 +@@ -3353,26 +3359,30 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
3674 + break;
3675 +
3676 + case L2CAP_CONF_RFC:
3677 +- if (olen == sizeof(rfc))
3678 +- memcpy(&rfc, (void *) val, olen);
3679 ++ if (olen != sizeof(rfc))
3680 ++ break;
3681 ++ memcpy(&rfc, (void *) val, olen);
3682 + break;
3683 +
3684 + case L2CAP_CONF_FCS:
3685 ++ if (olen != 1)
3686 ++ break;
3687 + if (val == L2CAP_FCS_NONE)
3688 + set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3689 + break;
3690 +
3691 + case L2CAP_CONF_EFS:
3692 +- if (olen == sizeof(efs)) {
3693 +- remote_efs = 1;
3694 +- memcpy(&efs, (void *) val, olen);
3695 +- }
3696 ++ if (olen != sizeof(efs))
3697 ++ break;
3698 ++ remote_efs = 1;
3699 ++ memcpy(&efs, (void *) val, olen);
3700 + break;
3701 +
3702 + case L2CAP_CONF_EWS:
3703 ++ if (olen != 2)
3704 ++ break;
3705 + if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3706 + return -ECONNREFUSED;
3707 +-
3708 + set_bit(FLAG_EXT_CTRL, &chan->flags);
3709 + set_bit(CONF_EWS_RECV, &chan->conf_state);
3710 + chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3711 +@@ -3382,7 +3392,6 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
3712 + default:
3713 + if (hint)
3714 + break;
3715 +-
3716 + result = L2CAP_CONF_UNKNOWN;
3717 + *((u8 *) ptr++) = type;
3718 + break;
3719 +@@ -3547,58 +3556,65 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3720 +
3721 + while (len >= L2CAP_CONF_OPT_SIZE) {
3722 + len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3723 ++ if (len < 0)
3724 ++ break;
3725 +
3726 + switch (type) {
3727 + case L2CAP_CONF_MTU:
3728 ++ if (olen != 2)
3729 ++ break;
3730 + if (val < L2CAP_DEFAULT_MIN_MTU) {
3731 + *result = L2CAP_CONF_UNACCEPT;
3732 + chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3733 + } else
3734 + chan->imtu = val;
3735 +- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
3736 ++ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3737 ++ endptr - ptr);
3738 + break;
3739 +
3740 + case L2CAP_CONF_FLUSH_TO:
3741 ++ if (olen != 2)
3742 ++ break;
3743 + chan->flush_to = val;
3744 +- l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3745 +- 2, chan->flush_to, endptr - ptr);
3746 ++ l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3747 ++ chan->flush_to, endptr - ptr);
3748 + break;
3749 +
3750 + case L2CAP_CONF_RFC:
3751 +- if (olen == sizeof(rfc))
3752 +- memcpy(&rfc, (void *)val, olen);
3753 +-
3754 ++ if (olen != sizeof(rfc))
3755 ++ break;
3756 ++ memcpy(&rfc, (void *)val, olen);
3757 + if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3758 + rfc.mode != chan->mode)
3759 + return -ECONNREFUSED;
3760 +-
3761 + chan->fcs = 0;
3762 +-
3763 +- l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3764 +- sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3765 ++ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3766 ++ (unsigned long) &rfc, endptr - ptr);
3767 + break;
3768 +
3769 + case L2CAP_CONF_EWS:
3770 ++ if (olen != 2)
3771 ++ break;
3772 + chan->ack_win = min_t(u16, val, chan->ack_win);
3773 + l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3774 + chan->tx_win, endptr - ptr);
3775 + break;
3776 +
3777 + case L2CAP_CONF_EFS:
3778 +- if (olen == sizeof(efs)) {
3779 +- memcpy(&efs, (void *)val, olen);
3780 +-
3781 +- if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3782 +- efs.stype != L2CAP_SERV_NOTRAFIC &&
3783 +- efs.stype != chan->local_stype)
3784 +- return -ECONNREFUSED;
3785 +-
3786 +- l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3787 +- (unsigned long) &efs, endptr - ptr);
3788 +- }
3789 ++ if (olen != sizeof(efs))
3790 ++ break;
3791 ++ memcpy(&efs, (void *)val, olen);
3792 ++ if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3793 ++ efs.stype != L2CAP_SERV_NOTRAFIC &&
3794 ++ efs.stype != chan->local_stype)
3795 ++ return -ECONNREFUSED;
3796 ++ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3797 ++ (unsigned long) &efs, endptr - ptr);
3798 + break;
3799 +
3800 + case L2CAP_CONF_FCS:
3801 ++ if (olen != 1)
3802 ++ break;
3803 + if (*result == L2CAP_CONF_PENDING)
3804 + if (val == L2CAP_FCS_NONE)
3805 + set_bit(CONF_RECV_NO_FCS,
3806 +@@ -3727,13 +3743,18 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3807 +
3808 + while (len >= L2CAP_CONF_OPT_SIZE) {
3809 + len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3810 ++ if (len < 0)
3811 ++ break;
3812 +
3813 + switch (type) {
3814 + case L2CAP_CONF_RFC:
3815 +- if (olen == sizeof(rfc))
3816 +- memcpy(&rfc, (void *)val, olen);
3817 ++ if (olen != sizeof(rfc))
3818 ++ break;
3819 ++ memcpy(&rfc, (void *)val, olen);
3820 + break;
3821 + case L2CAP_CONF_EWS:
3822 ++ if (olen != 2)
3823 ++ break;
3824 + txwin_ext = val;
3825 + break;
3826 + }
3827 +diff --git a/net/core/datagram.c b/net/core/datagram.c
3828 +index 57f3a6fcfc1e..a487df53a453 100644
3829 +--- a/net/core/datagram.c
3830 ++++ b/net/core/datagram.c
3831 +@@ -279,7 +279,7 @@ struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned int flags,
3832 + break;
3833 +
3834 + sk_busy_loop(sk, flags & MSG_DONTWAIT);
3835 +- } while (!skb_queue_empty(&sk->sk_receive_queue));
3836 ++ } while (sk->sk_receive_queue.prev != *last);
3837 +
3838 + error = -EAGAIN;
3839 +
3840 +diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
3841 +index 2aabb7eb0854..bf9a3b6ac885 100644
3842 +--- a/net/core/net-sysfs.c
3843 ++++ b/net/core/net-sysfs.c
3844 +@@ -934,6 +934,8 @@ static int rx_queue_add_kobject(struct net_device *dev, int index)
3845 + if (error)
3846 + return error;
3847 +
3848 ++ dev_hold(queue->dev);
3849 ++
3850 + if (dev->sysfs_rx_queue_group) {
3851 + error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group);
3852 + if (error) {
3853 +@@ -943,7 +945,6 @@ static int rx_queue_add_kobject(struct net_device *dev, int index)
3854 + }
3855 +
3856 + kobject_uevent(kobj, KOBJ_ADD);
3857 +- dev_hold(queue->dev);
3858 +
3859 + return error;
3860 + }
3861 +@@ -1472,6 +1473,8 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index)
3862 + if (error)
3863 + return error;
3864 +
3865 ++ dev_hold(queue->dev);
3866 ++
3867 + #ifdef CONFIG_BQL
3868 + error = sysfs_create_group(kobj, &dql_group);
3869 + if (error) {
3870 +@@ -1481,7 +1484,6 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index)
3871 + #endif
3872 +
3873 + kobject_uevent(kobj, KOBJ_ADD);
3874 +- dev_hold(queue->dev);
3875 +
3876 + return 0;
3877 + }
3878 +diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
3879 +index 6344f1b18a6a..58a401e9cf09 100644
3880 +--- a/net/dccp/ipv6.c
3881 ++++ b/net/dccp/ipv6.c
3882 +@@ -433,8 +433,8 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
3883 + newnp->ipv6_mc_list = NULL;
3884 + newnp->ipv6_ac_list = NULL;
3885 + newnp->ipv6_fl_list = NULL;
3886 +- newnp->mcast_oif = inet6_iif(skb);
3887 +- newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
3888 ++ newnp->mcast_oif = inet_iif(skb);
3889 ++ newnp->mcast_hops = ip_hdr(skb)->ttl;
3890 +
3891 + /*
3892 + * No need to charge this sock to the relevant IPv6 refcnt debug socks count
3893 +diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
3894 +index 17c455ff69ff..7858fa9ea103 100644
3895 +--- a/net/ipv6/ila/ila_xlat.c
3896 ++++ b/net/ipv6/ila/ila_xlat.c
3897 +@@ -420,6 +420,7 @@ int ila_xlat_nl_cmd_flush(struct sk_buff *skb, struct genl_info *info)
3898 +
3899 + done:
3900 + rhashtable_walk_stop(&iter);
3901 ++ rhashtable_walk_exit(&iter);
3902 + return ret;
3903 + }
3904 +
3905 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
3906 +index 66cc94427437..9006bb3c9e72 100644
3907 +--- a/net/ipv6/route.c
3908 ++++ b/net/ipv6/route.c
3909 +@@ -1048,14 +1048,20 @@ static struct rt6_info *ip6_create_rt_rcu(struct fib6_info *rt)
3910 + struct rt6_info *nrt;
3911 +
3912 + if (!fib6_info_hold_safe(rt))
3913 +- return NULL;
3914 ++ goto fallback;
3915 +
3916 + nrt = ip6_dst_alloc(dev_net(dev), dev, flags);
3917 +- if (nrt)
3918 +- ip6_rt_copy_init(nrt, rt);
3919 +- else
3920 ++ if (!nrt) {
3921 + fib6_info_release(rt);
3922 ++ goto fallback;
3923 ++ }
3924 +
3925 ++ ip6_rt_copy_init(nrt, rt);
3926 ++ return nrt;
3927 ++
3928 ++fallback:
3929 ++ nrt = dev_net(dev)->ipv6.ip6_null_entry;
3930 ++ dst_hold(&nrt->dst);
3931 + return nrt;
3932 + }
3933 +
3934 +@@ -1104,10 +1110,6 @@ restart:
3935 + dst_hold(&rt->dst);
3936 + } else {
3937 + rt = ip6_create_rt_rcu(f6i);
3938 +- if (!rt) {
3939 +- rt = net->ipv6.ip6_null_entry;
3940 +- dst_hold(&rt->dst);
3941 +- }
3942 + }
3943 +
3944 + rcu_read_unlock();
3945 +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
3946 +index 03e6b7a2bc53..e7cdfa92c382 100644
3947 +--- a/net/ipv6/tcp_ipv6.c
3948 ++++ b/net/ipv6/tcp_ipv6.c
3949 +@@ -1108,11 +1108,11 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
3950 + newnp->ipv6_fl_list = NULL;
3951 + newnp->pktoptions = NULL;
3952 + newnp->opt = NULL;
3953 +- newnp->mcast_oif = tcp_v6_iif(skb);
3954 +- newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
3955 +- newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
3956 ++ newnp->mcast_oif = inet_iif(skb);
3957 ++ newnp->mcast_hops = ip_hdr(skb)->ttl;
3958 ++ newnp->rcv_flowinfo = 0;
3959 + if (np->repflow)
3960 +- newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
3961 ++ newnp->flow_label = 0;
3962 +
3963 + /*
3964 + * No need to charge this sock to the relevant IPv6 refcnt debug socks count
3965 +diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
3966 +index 25eeb6d2a75a..f0ec068e1d02 100644
3967 +--- a/net/netlink/genetlink.c
3968 ++++ b/net/netlink/genetlink.c
3969 +@@ -366,7 +366,7 @@ int genl_register_family(struct genl_family *family)
3970 + start, end + 1, GFP_KERNEL);
3971 + if (family->id < 0) {
3972 + err = family->id;
3973 +- goto errout_locked;
3974 ++ goto errout_free;
3975 + }
3976 +
3977 + err = genl_validate_assign_mc_groups(family);
3978 +@@ -385,6 +385,7 @@ int genl_register_family(struct genl_family *family)
3979 +
3980 + errout_remove:
3981 + idr_remove(&genl_fam_idr, family->id);
3982 ++errout_free:
3983 + kfree(family->attrbuf);
3984 + errout_locked:
3985 + genl_unlock_all();
3986 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
3987 +index fd16fb836df2..a0d295478e69 100644
3988 +--- a/net/packet/af_packet.c
3989 ++++ b/net/packet/af_packet.c
3990 +@@ -3245,7 +3245,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
3991 + }
3992 +
3993 + mutex_lock(&net->packet.sklist_lock);
3994 +- sk_add_node_rcu(sk, &net->packet.sklist);
3995 ++ sk_add_node_tail_rcu(sk, &net->packet.sklist);
3996 + mutex_unlock(&net->packet.sklist_lock);
3997 +
3998 + preempt_disable();
3999 +@@ -4194,7 +4194,7 @@ static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4000 + struct pgv *pg_vec;
4001 + int i;
4002 +
4003 +- pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
4004 ++ pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
4005 + if (unlikely(!pg_vec))
4006 + goto out;
4007 +
4008 +diff --git a/net/rose/rose_subr.c b/net/rose/rose_subr.c
4009 +index 7ca57741b2fb..7849f286bb93 100644
4010 +--- a/net/rose/rose_subr.c
4011 ++++ b/net/rose/rose_subr.c
4012 +@@ -105,16 +105,17 @@ void rose_write_internal(struct sock *sk, int frametype)
4013 + struct sk_buff *skb;
4014 + unsigned char *dptr;
4015 + unsigned char lci1, lci2;
4016 +- char buffer[100];
4017 +- int len, faclen = 0;
4018 ++ int maxfaclen = 0;
4019 ++ int len, faclen;
4020 ++ int reserve;
4021 +
4022 +- len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN + 1;
4023 ++ reserve = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1;
4024 ++ len = ROSE_MIN_LEN;
4025 +
4026 + switch (frametype) {
4027 + case ROSE_CALL_REQUEST:
4028 + len += 1 + ROSE_ADDR_LEN + ROSE_ADDR_LEN;
4029 +- faclen = rose_create_facilities(buffer, rose);
4030 +- len += faclen;
4031 ++ maxfaclen = 256;
4032 + break;
4033 + case ROSE_CALL_ACCEPTED:
4034 + case ROSE_CLEAR_REQUEST:
4035 +@@ -123,15 +124,16 @@ void rose_write_internal(struct sock *sk, int frametype)
4036 + break;
4037 + }
4038 +
4039 +- if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
4040 ++ skb = alloc_skb(reserve + len + maxfaclen, GFP_ATOMIC);
4041 ++ if (!skb)
4042 + return;
4043 +
4044 + /*
4045 + * Space for AX.25 header and PID.
4046 + */
4047 +- skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1);
4048 ++ skb_reserve(skb, reserve);
4049 +
4050 +- dptr = skb_put(skb, skb_tailroom(skb));
4051 ++ dptr = skb_put(skb, len);
4052 +
4053 + lci1 = (rose->lci >> 8) & 0x0F;
4054 + lci2 = (rose->lci >> 0) & 0xFF;
4055 +@@ -146,7 +148,8 @@ void rose_write_internal(struct sock *sk, int frametype)
4056 + dptr += ROSE_ADDR_LEN;
4057 + memcpy(dptr, &rose->source_addr, ROSE_ADDR_LEN);
4058 + dptr += ROSE_ADDR_LEN;
4059 +- memcpy(dptr, buffer, faclen);
4060 ++ faclen = rose_create_facilities(dptr, rose);
4061 ++ skb_put(skb, faclen);
4062 + dptr += faclen;
4063 + break;
4064 +
4065 +diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
4066 +index 8bf66d0a6800..f767e78e38c9 100644
4067 +--- a/net/sched/act_mirred.c
4068 ++++ b/net/sched/act_mirred.c
4069 +@@ -159,6 +159,9 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
4070 + }
4071 + m = to_mirred(*a);
4072 +
4073 ++ if (ret == ACT_P_CREATED)
4074 ++ INIT_LIST_HEAD(&m->tcfm_list);
4075 ++
4076 + spin_lock_bh(&m->tcf_lock);
4077 + m->tcf_action = parm->action;
4078 + m->tcfm_eaction = parm->eaction;
4079 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
4080 +index 1b16250c5718..8c00a7ef1bcd 100644
4081 +--- a/net/sctp/socket.c
4082 ++++ b/net/sctp/socket.c
4083 +@@ -1017,7 +1017,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
4084 + if (unlikely(addrs_size <= 0))
4085 + return -EINVAL;
4086 +
4087 +- kaddrs = vmemdup_user(addrs, addrs_size);
4088 ++ kaddrs = memdup_user(addrs, addrs_size);
4089 + if (unlikely(IS_ERR(kaddrs)))
4090 + return PTR_ERR(kaddrs);
4091 +
4092 +@@ -1025,7 +1025,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
4093 + addr_buf = kaddrs;
4094 + while (walk_size < addrs_size) {
4095 + if (walk_size + sizeof(sa_family_t) > addrs_size) {
4096 +- kvfree(kaddrs);
4097 ++ kfree(kaddrs);
4098 + return -EINVAL;
4099 + }
4100 +
4101 +@@ -1036,7 +1036,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
4102 + * causes the address buffer to overflow return EINVAL.
4103 + */
4104 + if (!af || (walk_size + af->sockaddr_len) > addrs_size) {
4105 +- kvfree(kaddrs);
4106 ++ kfree(kaddrs);
4107 + return -EINVAL;
4108 + }
4109 + addrcnt++;
4110 +@@ -1072,7 +1072,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
4111 + }
4112 +
4113 + out:
4114 +- kvfree(kaddrs);
4115 ++ kfree(kaddrs);
4116 +
4117 + return err;
4118 + }
4119 +@@ -1347,7 +1347,7 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
4120 + if (unlikely(addrs_size <= 0))
4121 + return -EINVAL;
4122 +
4123 +- kaddrs = vmemdup_user(addrs, addrs_size);
4124 ++ kaddrs = memdup_user(addrs, addrs_size);
4125 + if (unlikely(IS_ERR(kaddrs)))
4126 + return PTR_ERR(kaddrs);
4127 +
4128 +@@ -1367,7 +1367,7 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
4129 + err = __sctp_connect(sk, kaddrs, addrs_size, flags, assoc_id);
4130 +
4131 + out_free:
4132 +- kvfree(kaddrs);
4133 ++ kfree(kaddrs);
4134 +
4135 + return err;
4136 + }
4137 +diff --git a/net/tipc/net.c b/net/tipc/net.c
4138 +index f076edb74338..7ce1e86b024f 100644
4139 +--- a/net/tipc/net.c
4140 ++++ b/net/tipc/net.c
4141 +@@ -163,12 +163,9 @@ void tipc_sched_net_finalize(struct net *net, u32 addr)
4142 +
4143 + void tipc_net_stop(struct net *net)
4144 + {
4145 +- u32 self = tipc_own_addr(net);
4146 +-
4147 +- if (!self)
4148 ++ if (!tipc_own_id(net))
4149 + return;
4150 +
4151 +- tipc_nametbl_withdraw(net, TIPC_CFG_SRV, self, self, self);
4152 + rtnl_lock();
4153 + tipc_bearer_stop(net);
4154 + tipc_node_stop(net);
4155 +diff --git a/net/tipc/socket.c b/net/tipc/socket.c
4156 +index 88c307ef1318..67a7b312a499 100644
4157 +--- a/net/tipc/socket.c
4158 ++++ b/net/tipc/socket.c
4159 +@@ -2310,6 +2310,16 @@ static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
4160 + return 0;
4161 + }
4162 +
4163 ++static bool tipc_sockaddr_is_sane(struct sockaddr_tipc *addr)
4164 ++{
4165 ++ if (addr->family != AF_TIPC)
4166 ++ return false;
4167 ++ if (addr->addrtype == TIPC_SERVICE_RANGE)
4168 ++ return (addr->addr.nameseq.lower <= addr->addr.nameseq.upper);
4169 ++ return (addr->addrtype == TIPC_SERVICE_ADDR ||
4170 ++ addr->addrtype == TIPC_SOCKET_ADDR);
4171 ++}
4172 ++
4173 + /**
4174 + * tipc_connect - establish a connection to another TIPC port
4175 + * @sock: socket structure
4176 +@@ -2345,18 +2355,18 @@ static int tipc_connect(struct socket *sock, struct sockaddr *dest,
4177 + if (!tipc_sk_type_connectionless(sk))
4178 + res = -EINVAL;
4179 + goto exit;
4180 +- } else if (dst->family != AF_TIPC) {
4181 +- res = -EINVAL;
4182 + }
4183 +- if (dst->addrtype != TIPC_ADDR_ID && dst->addrtype != TIPC_ADDR_NAME)
4184 ++ if (!tipc_sockaddr_is_sane(dst)) {
4185 + res = -EINVAL;
4186 +- if (res)
4187 + goto exit;
4188 +-
4189 ++ }
4190 + /* DGRAM/RDM connect(), just save the destaddr */
4191 + if (tipc_sk_type_connectionless(sk)) {
4192 + memcpy(&tsk->peer, dest, destlen);
4193 + goto exit;
4194 ++ } else if (dst->addrtype == TIPC_SERVICE_RANGE) {
4195 ++ res = -EINVAL;
4196 ++ goto exit;
4197 + }
4198 +
4199 + previous = sk->sk_state;
4200 +diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
4201 +index d65eed88c495..2301b09df234 100644
4202 +--- a/net/tipc/topsrv.c
4203 ++++ b/net/tipc/topsrv.c
4204 +@@ -371,6 +371,7 @@ static int tipc_conn_rcv_sub(struct tipc_topsrv *srv,
4205 + struct tipc_subscription *sub;
4206 +
4207 + if (tipc_sub_read(s, filter) & TIPC_SUB_CANCEL) {
4208 ++ s->filter &= __constant_ntohl(~TIPC_SUB_CANCEL);
4209 + tipc_conn_delete_sub(con, s);
4210 + return 0;
4211 + }
4212 +diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
4213 +index 5a77efd39b3f..858cbe56b100 100644
4214 +--- a/scripts/mod/modpost.c
4215 ++++ b/scripts/mod/modpost.c
4216 +@@ -640,7 +640,7 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
4217 + info->sechdrs[sym->st_shndx].sh_offset -
4218 + (info->hdr->e_type != ET_REL ?
4219 + info->sechdrs[sym->st_shndx].sh_addr : 0);
4220 +- crc = *crcp;
4221 ++ crc = TO_NATIVE(*crcp);
4222 + }
4223 + sym_update_crc(symname + strlen("__crc_"), mod, crc,
4224 + export);
4225 +diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
4226 +index 467039b342b5..41abb8bd466a 100644
4227 +--- a/sound/core/oss/pcm_oss.c
4228 ++++ b/sound/core/oss/pcm_oss.c
4229 +@@ -940,6 +940,28 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
4230 + oss_frame_size = snd_pcm_format_physical_width(params_format(params)) *
4231 + params_channels(params) / 8;
4232 +
4233 ++ err = snd_pcm_oss_period_size(substream, params, sparams);
4234 ++ if (err < 0)
4235 ++ goto failure;
4236 ++
4237 ++ n = snd_pcm_plug_slave_size(substream, runtime->oss.period_bytes / oss_frame_size);
4238 ++ err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, n, NULL);
4239 ++ if (err < 0)
4240 ++ goto failure;
4241 ++
4242 ++ err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIODS,
4243 ++ runtime->oss.periods, NULL);
4244 ++ if (err < 0)
4245 ++ goto failure;
4246 ++
4247 ++ snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
4248 ++
4249 ++ err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, sparams);
4250 ++ if (err < 0) {
4251 ++ pcm_dbg(substream->pcm, "HW_PARAMS failed: %i\n", err);
4252 ++ goto failure;
4253 ++ }
4254 ++
4255 + #ifdef CONFIG_SND_PCM_OSS_PLUGINS
4256 + snd_pcm_oss_plugin_clear(substream);
4257 + if (!direct) {
4258 +@@ -974,27 +996,6 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
4259 + }
4260 + #endif
4261 +
4262 +- err = snd_pcm_oss_period_size(substream, params, sparams);
4263 +- if (err < 0)
4264 +- goto failure;
4265 +-
4266 +- n = snd_pcm_plug_slave_size(substream, runtime->oss.period_bytes / oss_frame_size);
4267 +- err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, n, NULL);
4268 +- if (err < 0)
4269 +- goto failure;
4270 +-
4271 +- err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIODS,
4272 +- runtime->oss.periods, NULL);
4273 +- if (err < 0)
4274 +- goto failure;
4275 +-
4276 +- snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
4277 +-
4278 +- if ((err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, sparams)) < 0) {
4279 +- pcm_dbg(substream->pcm, "HW_PARAMS failed: %i\n", err);
4280 +- goto failure;
4281 +- }
4282 +-
4283 + if (runtime->oss.trigger) {
4284 + sw_params->start_threshold = 1;
4285 + } else {
4286 +diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
4287 +index 818dff1de545..b67f6fe08a1b 100644
4288 +--- a/sound/core/pcm_native.c
4289 ++++ b/sound/core/pcm_native.c
4290 +@@ -1426,8 +1426,15 @@ static int snd_pcm_pause(struct snd_pcm_substream *substream, int push)
4291 + static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream, int state)
4292 + {
4293 + struct snd_pcm_runtime *runtime = substream->runtime;
4294 +- if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED)
4295 ++ switch (runtime->status->state) {
4296 ++ case SNDRV_PCM_STATE_SUSPENDED:
4297 + return -EBUSY;
4298 ++ /* unresumable PCM state; return -EBUSY for skipping suspend */
4299 ++ case SNDRV_PCM_STATE_OPEN:
4300 ++ case SNDRV_PCM_STATE_SETUP:
4301 ++ case SNDRV_PCM_STATE_DISCONNECTED:
4302 ++ return -EBUSY;
4303 ++ }
4304 + runtime->trigger_master = substream;
4305 + return 0;
4306 + }
4307 +diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
4308 +index 08d5662039e3..a52d6d16efc4 100644
4309 +--- a/sound/core/rawmidi.c
4310 ++++ b/sound/core/rawmidi.c
4311 +@@ -30,6 +30,7 @@
4312 + #include <linux/module.h>
4313 + #include <linux/delay.h>
4314 + #include <linux/mm.h>
4315 ++#include <linux/nospec.h>
4316 + #include <sound/rawmidi.h>
4317 + #include <sound/info.h>
4318 + #include <sound/control.h>
4319 +@@ -601,6 +602,7 @@ static int __snd_rawmidi_info_select(struct snd_card *card,
4320 + return -ENXIO;
4321 + if (info->stream < 0 || info->stream > 1)
4322 + return -EINVAL;
4323 ++ info->stream = array_index_nospec(info->stream, 2);
4324 + pstr = &rmidi->streams[info->stream];
4325 + if (pstr->substream_count == 0)
4326 + return -ENOENT;
4327 +diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
4328 +index 278ebb993122..c93945917235 100644
4329 +--- a/sound/core/seq/oss/seq_oss_synth.c
4330 ++++ b/sound/core/seq/oss/seq_oss_synth.c
4331 +@@ -617,13 +617,14 @@ int
4332 + snd_seq_oss_synth_make_info(struct seq_oss_devinfo *dp, int dev, struct synth_info *inf)
4333 + {
4334 + struct seq_oss_synth *rec;
4335 ++ struct seq_oss_synthinfo *info = get_synthinfo_nospec(dp, dev);
4336 +
4337 +- if (dev < 0 || dev >= dp->max_synthdev)
4338 ++ if (!info)
4339 + return -ENXIO;
4340 +
4341 +- if (dp->synths[dev].is_midi) {
4342 ++ if (info->is_midi) {
4343 + struct midi_info minf;
4344 +- snd_seq_oss_midi_make_info(dp, dp->synths[dev].midi_mapped, &minf);
4345 ++ snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf);
4346 + inf->synth_type = SYNTH_TYPE_MIDI;
4347 + inf->synth_subtype = 0;
4348 + inf->nr_voices = 16;
4349 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4350 +index 877293149e3a..4c6321ec844d 100644
4351 +--- a/sound/pci/hda/patch_realtek.c
4352 ++++ b/sound/pci/hda/patch_realtek.c
4353 +@@ -5613,6 +5613,12 @@ enum {
4354 + ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
4355 + ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
4356 + ALC255_FIXUP_ACER_HEADSET_MIC,
4357 ++ ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE,
4358 ++ ALC225_FIXUP_WYSE_AUTO_MUTE,
4359 ++ ALC225_FIXUP_WYSE_DISABLE_MIC_VREF,
4360 ++ ALC286_FIXUP_ACER_AIO_HEADSET_MIC,
4361 ++ ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
4362 ++ ALC299_FIXUP_PREDATOR_SPK,
4363 + };
4364 +
4365 + static const struct hda_fixup alc269_fixups[] = {
4366 +@@ -6567,6 +6573,54 @@ static const struct hda_fixup alc269_fixups[] = {
4367 + .chained = true,
4368 + .chain_id = ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC
4369 + },
4370 ++ [ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE] = {
4371 ++ .type = HDA_FIXUP_PINS,
4372 ++ .v.pins = (const struct hda_pintbl[]) {
4373 ++ { 0x16, 0x01011020 }, /* Rear Line out */
4374 ++ { 0x19, 0x01a1913c }, /* use as Front headset mic, without its own jack detect */
4375 ++ { }
4376 ++ },
4377 ++ .chained = true,
4378 ++ .chain_id = ALC225_FIXUP_WYSE_AUTO_MUTE
4379 ++ },
4380 ++ [ALC225_FIXUP_WYSE_AUTO_MUTE] = {
4381 ++ .type = HDA_FIXUP_FUNC,
4382 ++ .v.func = alc_fixup_auto_mute_via_amp,
4383 ++ .chained = true,
4384 ++ .chain_id = ALC225_FIXUP_WYSE_DISABLE_MIC_VREF
4385 ++ },
4386 ++ [ALC225_FIXUP_WYSE_DISABLE_MIC_VREF] = {
4387 ++ .type = HDA_FIXUP_FUNC,
4388 ++ .v.func = alc_fixup_disable_mic_vref,
4389 ++ .chained = true,
4390 ++ .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
4391 ++ },
4392 ++ [ALC286_FIXUP_ACER_AIO_HEADSET_MIC] = {
4393 ++ .type = HDA_FIXUP_VERBS,
4394 ++ .v.verbs = (const struct hda_verb[]) {
4395 ++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x4f },
4396 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x5029 },
4397 ++ { }
4398 ++ },
4399 ++ .chained = true,
4400 ++ .chain_id = ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE
4401 ++ },
4402 ++ [ALC256_FIXUP_ASUS_MIC_NO_PRESENCE] = {
4403 ++ .type = HDA_FIXUP_PINS,
4404 ++ .v.pins = (const struct hda_pintbl[]) {
4405 ++ { 0x19, 0x04a11120 }, /* use as headset mic, without its own jack detect */
4406 ++ { }
4407 ++ },
4408 ++ .chained = true,
4409 ++ .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
4410 ++ },
4411 ++ [ALC299_FIXUP_PREDATOR_SPK] = {
4412 ++ .type = HDA_FIXUP_PINS,
4413 ++ .v.pins = (const struct hda_pintbl[]) {
4414 ++ { 0x21, 0x90170150 }, /* use as headset mic, without its own jack detect */
4415 ++ { }
4416 ++ }
4417 ++ },
4418 + };
4419 +
4420 + static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4421 +@@ -6583,9 +6637,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4422 + SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
4423 + SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
4424 + SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
4425 +- SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
4426 +- SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
4427 +- SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
4428 ++ SND_PCI_QUIRK(0x1025, 0x1099, "Acer Aspire E5-523G", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
4429 ++ SND_PCI_QUIRK(0x1025, 0x110e, "Acer Aspire ES1-432", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
4430 ++ SND_PCI_QUIRK(0x1025, 0x1246, "Acer Predator Helios 500", ALC299_FIXUP_PREDATOR_SPK),
4431 ++ SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
4432 ++ SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
4433 ++ SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
4434 ++ SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
4435 + SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC),
4436 + SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
4437 + SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
4438 +@@ -6631,6 +6689,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4439 + SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
4440 + SND_PCI_QUIRK(0x1028, 0x0872, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
4441 + SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB),
4442 ++ SND_PCI_QUIRK(0x1028, 0x08ad, "Dell WYSE AIO", ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE),
4443 ++ SND_PCI_QUIRK(0x1028, 0x08ae, "Dell WYSE NB", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE),
4444 + SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
4445 + SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
4446 + SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
4447 +@@ -6976,6 +7036,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
4448 + {.id = ALC255_FIXUP_DUMMY_LINEOUT_VERB, .name = "alc255-dummy-lineout"},
4449 + {.id = ALC255_FIXUP_DELL_HEADSET_MIC, .name = "alc255-dell-headset"},
4450 + {.id = ALC295_FIXUP_HP_X360, .name = "alc295-hp-x360"},
4451 ++ {.id = ALC299_FIXUP_PREDATOR_SPK, .name = "predator-spk"},
4452 + {}
4453 + };
4454 + #define ALC225_STANDARD_PINS \
4455 +@@ -7196,6 +7257,18 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
4456 + {0x14, 0x90170110},
4457 + {0x1b, 0x90a70130},
4458 + {0x21, 0x03211020}),
4459 ++ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
4460 ++ {0x12, 0x90a60130},
4461 ++ {0x14, 0x90170110},
4462 ++ {0x21, 0x03211020}),
4463 ++ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
4464 ++ {0x12, 0x90a60130},
4465 ++ {0x14, 0x90170110},
4466 ++ {0x21, 0x04211020}),
4467 ++ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
4468 ++ {0x1a, 0x90a70130},
4469 ++ {0x1b, 0x90170110},
4470 ++ {0x21, 0x03211020}),
4471 + SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
4472 + {0x12, 0xb7a60130},
4473 + {0x13, 0xb8a61140},
4474 +diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
4475 +index c9d038f91af6..53f8be0f4a1f 100644
4476 +--- a/tools/objtool/Makefile
4477 ++++ b/tools/objtool/Makefile
4478 +@@ -25,14 +25,17 @@ LIBSUBCMD = $(LIBSUBCMD_OUTPUT)libsubcmd.a
4479 + OBJTOOL := $(OUTPUT)objtool
4480 + OBJTOOL_IN := $(OBJTOOL)-in.o
4481 +
4482 ++LIBELF_FLAGS := $(shell pkg-config libelf --cflags 2>/dev/null)
4483 ++LIBELF_LIBS := $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf)
4484 ++
4485 + all: $(OBJTOOL)
4486 +
4487 + INCLUDES := -I$(srctree)/tools/include \
4488 + -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \
4489 + -I$(srctree)/tools/objtool/arch/$(ARCH)/include
4490 + WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed
4491 +-CFLAGS += -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES)
4492 +-LDFLAGS += -lelf $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS)
4493 ++CFLAGS += -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES) $(LIBELF_FLAGS)
4494 ++LDFLAGS += $(LIBELF_LIBS) $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS)
4495 +
4496 + # Allow old libelf to be used:
4497 + elfshdr := $(shell echo '$(pound)include <libelf.h>' | $(CC) $(CFLAGS) -x c -E - | grep elf_getshdr)
4498 +diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
4499 +index f3db68abbd9a..0bc3e6e93c31 100644
4500 +--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
4501 ++++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
4502 +@@ -251,19 +251,15 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params)
4503 + if (!(decoder->tsc_ctc_ratio_n % decoder->tsc_ctc_ratio_d))
4504 + decoder->tsc_ctc_mult = decoder->tsc_ctc_ratio_n /
4505 + decoder->tsc_ctc_ratio_d;
4506 +-
4507 +- /*
4508 +- * Allow for timestamps appearing to backwards because a TSC
4509 +- * packet has slipped past a MTC packet, so allow 2 MTC ticks
4510 +- * or ...
4511 +- */
4512 +- decoder->tsc_slip = multdiv(2 << decoder->mtc_shift,
4513 +- decoder->tsc_ctc_ratio_n,
4514 +- decoder->tsc_ctc_ratio_d);
4515 + }
4516 +- /* ... or 0x100 paranoia */
4517 +- if (decoder->tsc_slip < 0x100)
4518 +- decoder->tsc_slip = 0x100;
4519 ++
4520 ++ /*
4521 ++ * A TSC packet can slip past MTC packets so that the timestamp appears
4522 ++ * to go backwards. One estimate is that can be up to about 40 CPU
4523 ++ * cycles, which is certainly less than 0x1000 TSC ticks, but accept
4524 ++ * slippage an order of magnitude more to be on the safe side.
4525 ++ */
4526 ++ decoder->tsc_slip = 0x10000;
4527 +
4528 + intel_pt_log("timestamp: mtc_shift %u\n", decoder->mtc_shift);
4529 + intel_pt_log("timestamp: tsc_ctc_ratio_n %u\n", decoder->tsc_ctc_ratio_n);
4530 +diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
4531 +index 7348eea0248f..36cfc64c3824 100644
4532 +--- a/tools/perf/util/pmu.c
4533 ++++ b/tools/perf/util/pmu.c
4534 +@@ -773,10 +773,20 @@ static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
4535 +
4536 + if (!is_arm_pmu_core(name)) {
4537 + pname = pe->pmu ? pe->pmu : "cpu";
4538 ++
4539 ++ /*
4540 ++ * uncore alias may be from different PMU
4541 ++ * with common prefix
4542 ++ */
4543 ++ if (pmu_is_uncore(name) &&
4544 ++ !strncmp(pname, name, strlen(pname)))
4545 ++ goto new_alias;
4546 ++
4547 + if (strcmp(pname, name))
4548 + continue;
4549 + }
4550 +
4551 ++new_alias:
4552 + /* need type casts to override 'const' */
4553 + __perf_pmu__new_alias(head, NULL, (char *)pe->name,
4554 + (char *)pe->desc, (char *)pe->event,
4555 +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
4556 +index c436d95fd7aa..6a79df88b546 100644
4557 +--- a/virt/kvm/kvm_main.c
4558 ++++ b/virt/kvm/kvm_main.c
4559 +@@ -2815,6 +2815,9 @@ static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
4560 + {
4561 + struct kvm_device *dev = filp->private_data;
4562 +
4563 ++ if (dev->kvm->mm != current->mm)
4564 ++ return -EIO;
4565 ++
4566 + switch (ioctl) {
4567 + case KVM_SET_DEVICE_ATTR:
4568 + return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);