Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Thu, 14 Dec 2017 09:11:11
Message-Id: 1513242631.4272d0d23434036afb6b4601a7483c06cb556cf8.alicef@gentoo
1 commit: 4272d0d23434036afb6b4601a7483c06cb556cf8
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Thu Dec 14 09:10:31 2017 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Thu Dec 14 09:10:31 2017 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=4272d0d2
7
8 Added linux patches 4.14.6
9
10 0000_README | 4 +
11 1005_linux-4.14.6.patch | 4512 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 4516 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 797ac0c..8e3a889 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -63,6 +63,10 @@ Patch: 1004_linux-4.14.5.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.14.5
21
22 +Patch: 1005_linux-4.14.6.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.14.6
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1005_linux-4.14.6.patch b/1005_linux-4.14.6.patch
31 new file mode 100644
32 index 0000000..157c09c
33 --- /dev/null
34 +++ b/1005_linux-4.14.6.patch
35 @@ -0,0 +1,4512 @@
36 +diff --git a/Documentation/devicetree/bindings/usb/usb-device.txt b/Documentation/devicetree/bindings/usb/usb-device.txt
37 +index ce02cebac26a..464ddf7b509a 100644
38 +--- a/Documentation/devicetree/bindings/usb/usb-device.txt
39 ++++ b/Documentation/devicetree/bindings/usb/usb-device.txt
40 +@@ -11,7 +11,7 @@ Required properties:
41 + be used, but a device adhering to this binding may leave out all except
42 + for usbVID,PID.
43 + - reg: the port number which this device is connecting to, the range
44 +- is 1-31.
45 ++ is 1-255.
46 +
47 + Example:
48 +
49 +diff --git a/Makefile b/Makefile
50 +index 43ac7bdb10ad..eabbd7748a24 100644
51 +--- a/Makefile
52 ++++ b/Makefile
53 +@@ -1,7 +1,7 @@
54 + # SPDX-License-Identifier: GPL-2.0
55 + VERSION = 4
56 + PATCHLEVEL = 14
57 +-SUBLEVEL = 5
58 ++SUBLEVEL = 6
59 + EXTRAVERSION =
60 + NAME = Petit Gorille
61 +
62 +diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
63 +index 8bf0d89cdd35..2e516f4985e4 100644
64 +--- a/arch/arm/boot/dts/imx53.dtsi
65 ++++ b/arch/arm/boot/dts/imx53.dtsi
66 +@@ -433,15 +433,6 @@
67 + clock-names = "ipg", "per";
68 + };
69 +
70 +- srtc: srtc@53fa4000 {
71 +- compatible = "fsl,imx53-rtc", "fsl,imx25-rtc";
72 +- reg = <0x53fa4000 0x4000>;
73 +- interrupts = <24>;
74 +- interrupt-parent = <&tzic>;
75 +- clocks = <&clks IMX5_CLK_SRTC_GATE>;
76 +- clock-names = "ipg";
77 +- };
78 +-
79 + iomuxc: iomuxc@53fa8000 {
80 + compatible = "fsl,imx53-iomuxc";
81 + reg = <0x53fa8000 0x4000>;
82 +diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
83 +index ad301f107dd2..bc8d4bbd82e2 100644
84 +--- a/arch/arm/include/asm/assembler.h
85 ++++ b/arch/arm/include/asm/assembler.h
86 +@@ -518,4 +518,22 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
87 + #endif
88 + .endm
89 +
90 ++ .macro bug, msg, line
91 ++#ifdef CONFIG_THUMB2_KERNEL
92 ++1: .inst 0xde02
93 ++#else
94 ++1: .inst 0xe7f001f2
95 ++#endif
96 ++#ifdef CONFIG_DEBUG_BUGVERBOSE
97 ++ .pushsection .rodata.str, "aMS", %progbits, 1
98 ++2: .asciz "\msg"
99 ++ .popsection
100 ++ .pushsection __bug_table, "aw"
101 ++ .align 2
102 ++ .word 1b, 2b
103 ++ .hword \line
104 ++ .popsection
105 ++#endif
106 ++ .endm
107 ++
108 + #endif /* __ASM_ASSEMBLER_H__ */
109 +diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
110 +index c8781450905b..3ab8b3781bfe 100644
111 +--- a/arch/arm/include/asm/kvm_arm.h
112 ++++ b/arch/arm/include/asm/kvm_arm.h
113 +@@ -161,8 +161,7 @@
114 + #else
115 + #define VTTBR_X (5 - KVM_T0SZ)
116 + #endif
117 +-#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
118 +-#define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
119 ++#define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_X)
120 + #define VTTBR_VMID_SHIFT _AC(48, ULL)
121 + #define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
122 +
123 +diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
124 +index d523cd8439a3..0f07579af472 100644
125 +--- a/arch/arm/kernel/entry-header.S
126 ++++ b/arch/arm/kernel/entry-header.S
127 +@@ -300,6 +300,8 @@
128 + mov r2, sp
129 + ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr
130 + ldr lr, [r2, #\offset + S_PC]! @ get pc
131 ++ tst r1, #PSR_I_BIT | 0x0f
132 ++ bne 1f
133 + msr spsr_cxsf, r1 @ save in spsr_svc
134 + #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
135 + @ We must avoid clrex due to Cortex-A15 erratum #830321
136 +@@ -314,6 +316,7 @@
137 + @ after ldm {}^
138 + add sp, sp, #\offset + PT_REGS_SIZE
139 + movs pc, lr @ return & move spsr_svc into cpsr
140 ++1: bug "Returning to usermode but unexpected PSR bits set?", \@
141 + #elif defined(CONFIG_CPU_V7M)
142 + @ V7M restore.
143 + @ Note that we don't need to do clrex here as clearing the local
144 +@@ -329,6 +332,8 @@
145 + ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
146 + ldr lr, [sp, #\offset + S_PC] @ get pc
147 + add sp, sp, #\offset + S_SP
148 ++ tst r1, #PSR_I_BIT | 0x0f
149 ++ bne 1f
150 + msr spsr_cxsf, r1 @ save in spsr_svc
151 +
152 + @ We must avoid clrex due to Cortex-A15 erratum #830321
153 +@@ -341,6 +346,7 @@
154 + .endif
155 + add sp, sp, #PT_REGS_SIZE - S_SP
156 + movs pc, lr @ return & move spsr_svc into cpsr
157 ++1: bug "Returning to usermode but unexpected PSR bits set?", \@
158 + #endif /* !CONFIG_THUMB2_KERNEL */
159 + .endm
160 +
161 +diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
162 +index 650344d01124..c4cd5081d78b 100644
163 +--- a/arch/arm64/include/asm/efi.h
164 ++++ b/arch/arm64/include/asm/efi.h
165 +@@ -132,11 +132,9 @@ static inline void efi_set_pgd(struct mm_struct *mm)
166 + * Defer the switch to the current thread's TTBR0_EL1
167 + * until uaccess_enable(). Restore the current
168 + * thread's saved ttbr0 corresponding to its active_mm
169 +- * (if different from init_mm).
170 + */
171 + cpu_set_reserved_ttbr0();
172 +- if (current->active_mm != &init_mm)
173 +- update_saved_ttbr0(current, current->active_mm);
174 ++ update_saved_ttbr0(current, current->active_mm);
175 + }
176 + }
177 + }
178 +diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
179 +index 61d694c2eae5..555d463c0eaa 100644
180 +--- a/arch/arm64/include/asm/kvm_arm.h
181 ++++ b/arch/arm64/include/asm/kvm_arm.h
182 +@@ -170,8 +170,7 @@
183 + #define VTCR_EL2_FLAGS (VTCR_EL2_COMMON_BITS | VTCR_EL2_TGRAN_FLAGS)
184 + #define VTTBR_X (VTTBR_X_TGRAN_MAGIC - VTCR_EL2_T0SZ_IPA)
185 +
186 +-#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
187 +-#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
188 ++#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_X)
189 + #define VTTBR_VMID_SHIFT (UL(48))
190 + #define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
191 +
192 +diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
193 +index 3257895a9b5e..9d155fa9a507 100644
194 +--- a/arch/arm64/include/asm/mmu_context.h
195 ++++ b/arch/arm64/include/asm/mmu_context.h
196 +@@ -156,29 +156,21 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
197 +
198 + #define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; })
199 +
200 +-/*
201 +- * This is called when "tsk" is about to enter lazy TLB mode.
202 +- *
203 +- * mm: describes the currently active mm context
204 +- * tsk: task which is entering lazy tlb
205 +- * cpu: cpu number which is entering lazy tlb
206 +- *
207 +- * tsk->mm will be NULL
208 +- */
209 +-static inline void
210 +-enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
211 +-{
212 +-}
213 +-
214 + #ifdef CONFIG_ARM64_SW_TTBR0_PAN
215 + static inline void update_saved_ttbr0(struct task_struct *tsk,
216 + struct mm_struct *mm)
217 + {
218 +- if (system_uses_ttbr0_pan()) {
219 +- BUG_ON(mm->pgd == swapper_pg_dir);
220 +- task_thread_info(tsk)->ttbr0 =
221 +- virt_to_phys(mm->pgd) | ASID(mm) << 48;
222 +- }
223 ++ u64 ttbr;
224 ++
225 ++ if (!system_uses_ttbr0_pan())
226 ++ return;
227 ++
228 ++ if (mm == &init_mm)
229 ++ ttbr = __pa_symbol(empty_zero_page);
230 ++ else
231 ++ ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
232 ++
233 ++ task_thread_info(tsk)->ttbr0 = ttbr;
234 + }
235 + #else
236 + static inline void update_saved_ttbr0(struct task_struct *tsk,
237 +@@ -187,6 +179,16 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
238 + }
239 + #endif
240 +
241 ++static inline void
242 ++enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
243 ++{
244 ++ /*
245 ++ * We don't actually care about the ttbr0 mapping, so point it at the
246 ++ * zero page.
247 ++ */
248 ++ update_saved_ttbr0(tsk, &init_mm);
249 ++}
250 ++
251 + static inline void __switch_mm(struct mm_struct *next)
252 + {
253 + unsigned int cpu = smp_processor_id();
254 +@@ -214,11 +216,9 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
255 + * Update the saved TTBR0_EL1 of the scheduled-in task as the previous
256 + * value may have not been initialised yet (activate_mm caller) or the
257 + * ASID has changed since the last run (following the context switch
258 +- * of another thread of the same process). Avoid setting the reserved
259 +- * TTBR0_EL1 to swapper_pg_dir (init_mm; e.g. via idle_task_exit).
260 ++ * of another thread of the same process).
261 + */
262 +- if (next != &init_mm)
263 +- update_saved_ttbr0(tsk, next);
264 ++ update_saved_ttbr0(tsk, next);
265 + }
266 +
267 + #define deactivate_mm(tsk,mm) do { } while (0)
268 +diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
269 +index 2dc0f8482210..bcd22d7ee590 100644
270 +--- a/arch/arm64/kernel/process.c
271 ++++ b/arch/arm64/kernel/process.c
272 +@@ -258,6 +258,15 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
273 +
274 + memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
275 +
276 ++ /*
277 ++ * In case p was allocated the same task_struct pointer as some
278 ++ * other recently-exited task, make sure p is disassociated from
279 ++ * any cpu that may have run that now-exited task recently.
280 ++ * Otherwise we could erroneously skip reloading the FPSIMD
281 ++ * registers for p.
282 ++ */
283 ++ fpsimd_flush_task_state(p);
284 ++
285 + if (likely(!(p->flags & PF_KTHREAD))) {
286 + *childregs = *current_pt_regs();
287 + childregs->regs[0] = 0;
288 +diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
289 +index 73b92017b6d7..cd2fc1cc1cc7 100644
290 +--- a/arch/powerpc/include/asm/machdep.h
291 ++++ b/arch/powerpc/include/asm/machdep.h
292 +@@ -76,6 +76,7 @@ struct machdep_calls {
293 +
294 + void __noreturn (*restart)(char *cmd);
295 + void __noreturn (*halt)(void);
296 ++ void (*panic)(char *str);
297 + void (*cpu_die)(void);
298 +
299 + long (*time_init)(void); /* Optional, may be NULL */
300 +diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
301 +index 257d23dbf55d..cf00ec26303a 100644
302 +--- a/arch/powerpc/include/asm/setup.h
303 ++++ b/arch/powerpc/include/asm/setup.h
304 +@@ -24,6 +24,7 @@ extern void reloc_got2(unsigned long);
305 +
306 + void check_for_initrd(void);
307 + void initmem_init(void);
308 ++void setup_panic(void);
309 + #define ARCH_PANIC_TIMEOUT 180
310 +
311 + #ifdef CONFIG_PPC_PSERIES
312 +diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
313 +index 610955fe8b81..679bbe714e85 100644
314 +--- a/arch/powerpc/kernel/cpu_setup_power.S
315 ++++ b/arch/powerpc/kernel/cpu_setup_power.S
316 +@@ -102,6 +102,7 @@ _GLOBAL(__setup_cpu_power9)
317 + li r0,0
318 + mtspr SPRN_PSSCR,r0
319 + mtspr SPRN_LPID,r0
320 ++ mtspr SPRN_PID,r0
321 + mfspr r3,SPRN_LPCR
322 + LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC)
323 + or r3, r3, r4
324 +@@ -126,6 +127,7 @@ _GLOBAL(__restore_cpu_power9)
325 + li r0,0
326 + mtspr SPRN_PSSCR,r0
327 + mtspr SPRN_LPID,r0
328 ++ mtspr SPRN_PID,r0
329 + mfspr r3,SPRN_LPCR
330 + LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC)
331 + or r3, r3, r4
332 +diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
333 +index e1431800bfb9..29d2b6050140 100644
334 +--- a/arch/powerpc/kernel/fadump.c
335 ++++ b/arch/powerpc/kernel/fadump.c
336 +@@ -1453,25 +1453,6 @@ static void fadump_init_files(void)
337 + return;
338 + }
339 +
340 +-static int fadump_panic_event(struct notifier_block *this,
341 +- unsigned long event, void *ptr)
342 +-{
343 +- /*
344 +- * If firmware-assisted dump has been registered then trigger
345 +- * firmware-assisted dump and let firmware handle everything
346 +- * else. If this returns, then fadump was not registered, so
347 +- * go through the rest of the panic path.
348 +- */
349 +- crash_fadump(NULL, ptr);
350 +-
351 +- return NOTIFY_DONE;
352 +-}
353 +-
354 +-static struct notifier_block fadump_panic_block = {
355 +- .notifier_call = fadump_panic_event,
356 +- .priority = INT_MIN /* may not return; must be done last */
357 +-};
358 +-
359 + /*
360 + * Prepare for firmware-assisted dump.
361 + */
362 +@@ -1504,9 +1485,6 @@ int __init setup_fadump(void)
363 + init_fadump_mem_struct(&fdm, fw_dump.reserve_dump_area_start);
364 + fadump_init_files();
365 +
366 +- atomic_notifier_chain_register(&panic_notifier_list,
367 +- &fadump_panic_block);
368 +-
369 + return 1;
370 + }
371 + subsys_initcall(setup_fadump);
372 +diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
373 +index 2e3bc16d02b2..90bc20efb4c7 100644
374 +--- a/arch/powerpc/kernel/setup-common.c
375 ++++ b/arch/powerpc/kernel/setup-common.c
376 +@@ -704,6 +704,30 @@ int check_legacy_ioport(unsigned long base_port)
377 + }
378 + EXPORT_SYMBOL(check_legacy_ioport);
379 +
380 ++static int ppc_panic_event(struct notifier_block *this,
381 ++ unsigned long event, void *ptr)
382 ++{
383 ++ /*
384 ++ * If firmware-assisted dump has been registered then trigger
385 ++ * firmware-assisted dump and let firmware handle everything else.
386 ++ */
387 ++ crash_fadump(NULL, ptr);
388 ++ ppc_md.panic(ptr); /* May not return */
389 ++ return NOTIFY_DONE;
390 ++}
391 ++
392 ++static struct notifier_block ppc_panic_block = {
393 ++ .notifier_call = ppc_panic_event,
394 ++ .priority = INT_MIN /* may not return; must be done last */
395 ++};
396 ++
397 ++void __init setup_panic(void)
398 ++{
399 ++ if (!ppc_md.panic)
400 ++ return;
401 ++ atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block);
402 ++}
403 ++
404 + #ifdef CONFIG_CHECK_CACHE_COHERENCY
405 + /*
406 + * For platforms that have configurable cache-coherency. This function
407 +@@ -848,6 +872,9 @@ void __init setup_arch(char **cmdline_p)
408 + /* Probe the machine type, establish ppc_md. */
409 + probe_machine();
410 +
411 ++ /* Setup panic notifier if requested by the platform. */
412 ++ setup_panic();
413 ++
414 + /*
415 + * Configure ppc_md.power_save (ppc32 only, 64-bit machines do
416 + * it from their respective probe() function.
417 +diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c
418 +index 21f6531fae20..b150f4deaccf 100644
419 +--- a/arch/powerpc/platforms/powernv/opal-imc.c
420 ++++ b/arch/powerpc/platforms/powernv/opal-imc.c
421 +@@ -191,8 +191,10 @@ static int opal_imc_counters_probe(struct platform_device *pdev)
422 + break;
423 + }
424 +
425 +- if (!imc_pmu_create(imc_dev, pmu_count, domain))
426 +- pmu_count++;
427 ++ if (!imc_pmu_create(imc_dev, pmu_count, domain)) {
428 ++ if (domain == IMC_DOMAIN_NEST)
429 ++ pmu_count++;
430 ++ }
431 + }
432 +
433 + return 0;
434 +diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c
435 +index 9dabea6e1443..6244bc849469 100644
436 +--- a/arch/powerpc/platforms/ps3/setup.c
437 ++++ b/arch/powerpc/platforms/ps3/setup.c
438 +@@ -104,6 +104,20 @@ static void __noreturn ps3_halt(void)
439 + ps3_sys_manager_halt(); /* never returns */
440 + }
441 +
442 ++static void ps3_panic(char *str)
443 ++{
444 ++ DBG("%s:%d %s\n", __func__, __LINE__, str);
445 ++
446 ++ smp_send_stop();
447 ++ printk("\n");
448 ++ printk(" System does not reboot automatically.\n");
449 ++ printk(" Please press POWER button.\n");
450 ++ printk("\n");
451 ++
452 ++ while(1)
453 ++ lv1_pause(1);
454 ++}
455 ++
456 + #if defined(CONFIG_FB_PS3) || defined(CONFIG_FB_PS3_MODULE) || \
457 + defined(CONFIG_PS3_FLASH) || defined(CONFIG_PS3_FLASH_MODULE)
458 + static void __init prealloc(struct ps3_prealloc *p)
459 +@@ -255,6 +269,7 @@ define_machine(ps3) {
460 + .probe = ps3_probe,
461 + .setup_arch = ps3_setup_arch,
462 + .init_IRQ = ps3_init_IRQ,
463 ++ .panic = ps3_panic,
464 + .get_boot_time = ps3_get_boot_time,
465 + .set_dabr = ps3_set_dabr,
466 + .calibrate_decr = ps3_calibrate_decr,
467 +diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
468 +index 5f1beb8367ac..a8531e012658 100644
469 +--- a/arch/powerpc/platforms/pseries/setup.c
470 ++++ b/arch/powerpc/platforms/pseries/setup.c
471 +@@ -726,6 +726,7 @@ define_machine(pseries) {
472 + .pcibios_fixup = pSeries_final_fixup,
473 + .restart = rtas_restart,
474 + .halt = rtas_halt,
475 ++ .panic = rtas_os_term,
476 + .get_boot_time = rtas_get_boot_time,
477 + .get_rtc_time = rtas_get_rtc_time,
478 + .set_rtc_time = rtas_set_rtc_time,
479 +diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
480 +index ec7b476c1ac5..c61b2cc1a8a8 100644
481 +--- a/arch/s390/include/asm/switch_to.h
482 ++++ b/arch/s390/include/asm/switch_to.h
483 +@@ -30,21 +30,20 @@ static inline void restore_access_regs(unsigned int *acrs)
484 + asm volatile("lam 0,15,%0" : : "Q" (*(acrstype *)acrs));
485 + }
486 +
487 +-#define switch_to(prev,next,last) do { \
488 +- if (prev->mm) { \
489 +- save_fpu_regs(); \
490 +- save_access_regs(&prev->thread.acrs[0]); \
491 +- save_ri_cb(prev->thread.ri_cb); \
492 +- save_gs_cb(prev->thread.gs_cb); \
493 +- } \
494 ++#define switch_to(prev, next, last) do { \
495 ++ /* save_fpu_regs() sets the CIF_FPU flag, which enforces \
496 ++ * a restore of the floating point / vector registers as \
497 ++ * soon as the next task returns to user space \
498 ++ */ \
499 ++ save_fpu_regs(); \
500 ++ save_access_regs(&prev->thread.acrs[0]); \
501 ++ save_ri_cb(prev->thread.ri_cb); \
502 ++ save_gs_cb(prev->thread.gs_cb); \
503 + update_cr_regs(next); \
504 +- if (next->mm) { \
505 +- set_cpu_flag(CIF_FPU); \
506 +- restore_access_regs(&next->thread.acrs[0]); \
507 +- restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
508 +- restore_gs_cb(next->thread.gs_cb); \
509 +- } \
510 +- prev = __switch_to(prev,next); \
511 ++ restore_access_regs(&next->thread.acrs[0]); \
512 ++ restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
513 ++ restore_gs_cb(next->thread.gs_cb); \
514 ++ prev = __switch_to(prev, next); \
515 + } while (0)
516 +
517 + #endif /* __ASM_SWITCH_TO_H */
518 +diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
519 +index d39f121e67a9..bc905ae1d5c8 100644
520 +--- a/arch/s390/kernel/syscalls.S
521 ++++ b/arch/s390/kernel/syscalls.S
522 +@@ -370,10 +370,10 @@ SYSCALL(sys_recvmmsg,compat_sys_recvmmsg)
523 + SYSCALL(sys_sendmmsg,compat_sys_sendmmsg)
524 + SYSCALL(sys_socket,sys_socket)
525 + SYSCALL(sys_socketpair,compat_sys_socketpair) /* 360 */
526 +-SYSCALL(sys_bind,sys_bind)
527 +-SYSCALL(sys_connect,sys_connect)
528 ++SYSCALL(sys_bind,compat_sys_bind)
529 ++SYSCALL(sys_connect,compat_sys_connect)
530 + SYSCALL(sys_listen,sys_listen)
531 +-SYSCALL(sys_accept4,sys_accept4)
532 ++SYSCALL(sys_accept4,compat_sys_accept4)
533 + SYSCALL(sys_getsockopt,compat_sys_getsockopt) /* 365 */
534 + SYSCALL(sys_setsockopt,compat_sys_setsockopt)
535 + SYSCALL(sys_getsockname,compat_sys_getsockname)
536 +diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
537 +index c954ac49eee4..5b25287f449b 100644
538 +--- a/arch/s390/kvm/priv.c
539 ++++ b/arch/s390/kvm/priv.c
540 +@@ -235,8 +235,6 @@ static int try_handle_skey(struct kvm_vcpu *vcpu)
541 + VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
542 + return -EAGAIN;
543 + }
544 +- if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
545 +- return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
546 + return 0;
547 + }
548 +
549 +@@ -247,6 +245,9 @@ static int handle_iske(struct kvm_vcpu *vcpu)
550 + int reg1, reg2;
551 + int rc;
552 +
553 ++ if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
554 ++ return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
555 ++
556 + rc = try_handle_skey(vcpu);
557 + if (rc)
558 + return rc != -EAGAIN ? rc : 0;
559 +@@ -276,6 +277,9 @@ static int handle_rrbe(struct kvm_vcpu *vcpu)
560 + int reg1, reg2;
561 + int rc;
562 +
563 ++ if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
564 ++ return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
565 ++
566 + rc = try_handle_skey(vcpu);
567 + if (rc)
568 + return rc != -EAGAIN ? rc : 0;
569 +@@ -311,6 +315,9 @@ static int handle_sske(struct kvm_vcpu *vcpu)
570 + int reg1, reg2;
571 + int rc;
572 +
573 ++ if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
574 ++ return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
575 ++
576 + rc = try_handle_skey(vcpu);
577 + if (rc)
578 + return rc != -EAGAIN ? rc : 0;
579 +diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
580 +index cc2faffa7d6e..334b6d103cbd 100644
581 +--- a/arch/s390/mm/pgalloc.c
582 ++++ b/arch/s390/mm/pgalloc.c
583 +@@ -85,8 +85,6 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
584 +
585 + /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
586 + VM_BUG_ON(mm->context.asce_limit < _REGION2_SIZE);
587 +- if (end >= TASK_SIZE_MAX)
588 +- return -ENOMEM;
589 + rc = 0;
590 + notify = 0;
591 + while (mm->context.asce_limit < end) {
592 +diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
593 +index 61bdc1270d19..a0cc1be767c8 100644
594 +--- a/arch/sparc/mm/init_64.c
595 ++++ b/arch/sparc/mm/init_64.c
596 +@@ -2540,9 +2540,16 @@ void __init mem_init(void)
597 + {
598 + high_memory = __va(last_valid_pfn << PAGE_SHIFT);
599 +
600 +- register_page_bootmem_info();
601 + free_all_bootmem();
602 +
603 ++ /*
604 ++ * Must be done after boot memory is put on freelist, because here we
605 ++ * might set fields in deferred struct pages that have not yet been
606 ++ * initialized, and free_all_bootmem() initializes all the reserved
607 ++ * deferred pages for us.
608 ++ */
609 ++ register_page_bootmem_info();
610 ++
611 + /*
612 + * Set up the zero page, mark it reserved, so that page count
613 + * is not manipulated when freeing the page from user ptes.
614 +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
615 +index c73e493adf07..eb38ac9d9a31 100644
616 +--- a/arch/x86/include/asm/kvm_host.h
617 ++++ b/arch/x86/include/asm/kvm_host.h
618 +@@ -1426,4 +1426,7 @@ static inline int kvm_cpu_get_apicid(int mps_cpu)
619 + #endif
620 + }
621 +
622 ++void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
623 ++ unsigned long start, unsigned long end);
624 ++
625 + #endif /* _ASM_X86_KVM_HOST_H */
626 +diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
627 +index 65a0ccdc3050..5e0453f18a57 100644
628 +--- a/arch/x86/kernel/smpboot.c
629 ++++ b/arch/x86/kernel/smpboot.c
630 +@@ -239,7 +239,7 @@ static void notrace start_secondary(void *unused)
631 + load_cr3(swapper_pg_dir);
632 + __flush_tlb_all();
633 + #endif
634 +-
635 ++ load_current_idt();
636 + cpu_init();
637 + x86_cpuinit.early_percpu_clock_init();
638 + preempt_disable();
639 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
640 +index b21113bcf227..f366e6d3a5e1 100644
641 +--- a/arch/x86/kvm/vmx.c
642 ++++ b/arch/x86/kvm/vmx.c
643 +@@ -6750,12 +6750,7 @@ static __init int hardware_setup(void)
644 + memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
645 + memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
646 +
647 +- /*
648 +- * Allow direct access to the PC debug port (it is often used for I/O
649 +- * delays, but the vmexits simply slow things down).
650 +- */
651 + memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE);
652 +- clear_bit(0x80, vmx_io_bitmap_a);
653 +
654 + memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
655 +
656 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
657 +index 4195cbcdb310..df62cdc7a258 100644
658 +--- a/arch/x86/kvm/x86.c
659 ++++ b/arch/x86/kvm/x86.c
660 +@@ -6745,6 +6745,20 @@ static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
661 + kvm_x86_ops->tlb_flush(vcpu);
662 + }
663 +
664 ++void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
665 ++ unsigned long start, unsigned long end)
666 ++{
667 ++ unsigned long apic_address;
668 ++
669 ++ /*
670 ++ * The physical address of apic access page is stored in the VMCS.
671 ++ * Update it when it becomes invalid.
672 ++ */
673 ++ apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
674 ++ if (start <= apic_address && apic_address < end)
675 ++ kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
676 ++}
677 ++
678 + void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
679 + {
680 + struct page *page = NULL;
681 +diff --git a/arch/x86/pci/broadcom_bus.c b/arch/x86/pci/broadcom_bus.c
682 +index bb461cfd01ab..526536c81ddc 100644
683 +--- a/arch/x86/pci/broadcom_bus.c
684 ++++ b/arch/x86/pci/broadcom_bus.c
685 +@@ -97,7 +97,7 @@ static int __init broadcom_postcore_init(void)
686 + * We should get host bridge information from ACPI unless the BIOS
687 + * doesn't support it.
688 + */
689 +- if (acpi_os_get_root_pointer())
690 ++ if (!acpi_disabled && acpi_os_get_root_pointer())
691 + return 0;
692 + #endif
693 +
694 +diff --git a/block/blk-core.c b/block/blk-core.c
695 +index 33ee583cfe45..516ce3174683 100644
696 +--- a/block/blk-core.c
697 ++++ b/block/blk-core.c
698 +@@ -605,8 +605,8 @@ void blk_set_queue_dying(struct request_queue *q)
699 + spin_lock_irq(q->queue_lock);
700 + blk_queue_for_each_rl(rl, q) {
701 + if (rl->rq_pool) {
702 +- wake_up(&rl->wait[BLK_RW_SYNC]);
703 +- wake_up(&rl->wait[BLK_RW_ASYNC]);
704 ++ wake_up_all(&rl->wait[BLK_RW_SYNC]);
705 ++ wake_up_all(&rl->wait[BLK_RW_ASYNC]);
706 + }
707 + }
708 + spin_unlock_irq(q->queue_lock);
709 +diff --git a/crypto/asymmetric_keys/pkcs7_verify.c b/crypto/asymmetric_keys/pkcs7_verify.c
710 +index 2d93d9eccb4d..986033e64a83 100644
711 +--- a/crypto/asymmetric_keys/pkcs7_verify.c
712 ++++ b/crypto/asymmetric_keys/pkcs7_verify.c
713 +@@ -150,7 +150,7 @@ static int pkcs7_find_key(struct pkcs7_message *pkcs7,
714 + pr_devel("Sig %u: Found cert serial match X.509[%u]\n",
715 + sinfo->index, certix);
716 +
717 +- if (x509->pub->pkey_algo != sinfo->sig->pkey_algo) {
718 ++ if (strcmp(x509->pub->pkey_algo, sinfo->sig->pkey_algo) != 0) {
719 + pr_warn("Sig %u: X.509 algo and PKCS#7 sig algo don't match\n",
720 + sinfo->index);
721 + continue;
722 +diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
723 +index dd03fead1ca3..ce2df8c9c583 100644
724 +--- a/crypto/asymmetric_keys/x509_cert_parser.c
725 ++++ b/crypto/asymmetric_keys/x509_cert_parser.c
726 +@@ -409,6 +409,8 @@ int x509_extract_key_data(void *context, size_t hdrlen,
727 + ctx->cert->pub->pkey_algo = "rsa";
728 +
729 + /* Discard the BIT STRING metadata */
730 ++ if (vlen < 1 || *(const u8 *)value != 0)
731 ++ return -EBADMSG;
732 + ctx->key = value + 1;
733 + ctx->key_size = vlen - 1;
734 + return 0;
735 +diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
736 +index eea71dc9686c..1bd0cf71a22d 100644
737 +--- a/crypto/asymmetric_keys/x509_public_key.c
738 ++++ b/crypto/asymmetric_keys/x509_public_key.c
739 +@@ -135,7 +135,7 @@ int x509_check_for_self_signed(struct x509_certificate *cert)
740 + }
741 +
742 + ret = -EKEYREJECTED;
743 +- if (cert->pub->pkey_algo != cert->sig->pkey_algo)
744 ++ if (strcmp(cert->pub->pkey_algo, cert->sig->pkey_algo) != 0)
745 + goto out;
746 +
747 + ret = public_key_verify_signature(cert->pub, cert->sig);
748 +diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
749 +index 7e76b35f422c..e121b8485731 100644
750 +--- a/drivers/atm/horizon.c
751 ++++ b/drivers/atm/horizon.c
752 +@@ -2803,7 +2803,7 @@ static int hrz_probe(struct pci_dev *pci_dev,
753 + return err;
754 +
755 + out_free_irq:
756 +- free_irq(dev->irq, dev);
757 ++ free_irq(irq, dev);
758 + out_free:
759 + kfree(dev);
760 + out_release:
761 +diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
762 +index 2f6614c9a229..bdc87907d6a1 100644
763 +--- a/drivers/base/Kconfig
764 ++++ b/drivers/base/Kconfig
765 +@@ -91,22 +91,23 @@ config FIRMWARE_IN_KERNEL
766 + depends on FW_LOADER
767 + default y
768 + help
769 +- The kernel source tree includes a number of firmware 'blobs'
770 +- that are used by various drivers. The recommended way to
771 +- use these is to run "make firmware_install", which, after
772 +- converting ihex files to binary, copies all of the needed
773 +- binary files in firmware/ to /lib/firmware/ on your system so
774 +- that they can be loaded by userspace helpers on request.
775 ++ Various drivers in the kernel source tree may require firmware,
776 ++ which is generally available in your distribution's linux-firmware
777 ++ package.
778 ++
779 ++ The linux-firmware package should install firmware into
780 ++ /lib/firmware/ on your system, so they can be loaded by userspace
781 ++ helpers on request.
782 +
783 + Enabling this option will build each required firmware blob
784 +- into the kernel directly, where request_firmware() will find
785 +- them without having to call out to userspace. This may be
786 +- useful if your root file system requires a device that uses
787 +- such firmware and do not wish to use an initrd.
788 ++ specified by EXTRA_FIRMWARE into the kernel directly, where
789 ++ request_firmware() will find them without having to call out to
790 ++ userspace. This may be useful if your root file system requires a
791 ++ device that uses such firmware and you do not wish to use an
792 ++ initrd.
793 +
794 + This single option controls the inclusion of firmware for
795 +- every driver that uses request_firmware() and ships its
796 +- firmware in the kernel source tree, which avoids a
797 ++ every driver that uses request_firmware(), which avoids a
798 + proliferation of 'Include firmware for xxx device' options.
799 +
800 + Say 'N' and let firmware be loaded from userspace.
801 +diff --git a/drivers/base/isa.c b/drivers/base/isa.c
802 +index cd6ccdcf9df0..372d10af2600 100644
803 +--- a/drivers/base/isa.c
804 ++++ b/drivers/base/isa.c
805 +@@ -39,7 +39,7 @@ static int isa_bus_probe(struct device *dev)
806 + {
807 + struct isa_driver *isa_driver = dev->platform_data;
808 +
809 +- if (isa_driver->probe)
810 ++ if (isa_driver && isa_driver->probe)
811 + return isa_driver->probe(dev, to_isa_dev(dev)->id);
812 +
813 + return 0;
814 +@@ -49,7 +49,7 @@ static int isa_bus_remove(struct device *dev)
815 + {
816 + struct isa_driver *isa_driver = dev->platform_data;
817 +
818 +- if (isa_driver->remove)
819 ++ if (isa_driver && isa_driver->remove)
820 + return isa_driver->remove(dev, to_isa_dev(dev)->id);
821 +
822 + return 0;
823 +@@ -59,7 +59,7 @@ static void isa_bus_shutdown(struct device *dev)
824 + {
825 + struct isa_driver *isa_driver = dev->platform_data;
826 +
827 +- if (isa_driver->shutdown)
828 ++ if (isa_driver && isa_driver->shutdown)
829 + isa_driver->shutdown(dev, to_isa_dev(dev)->id);
830 + }
831 +
832 +@@ -67,7 +67,7 @@ static int isa_bus_suspend(struct device *dev, pm_message_t state)
833 + {
834 + struct isa_driver *isa_driver = dev->platform_data;
835 +
836 +- if (isa_driver->suspend)
837 ++ if (isa_driver && isa_driver->suspend)
838 + return isa_driver->suspend(dev, to_isa_dev(dev)->id, state);
839 +
840 + return 0;
841 +@@ -77,7 +77,7 @@ static int isa_bus_resume(struct device *dev)
842 + {
843 + struct isa_driver *isa_driver = dev->platform_data;
844 +
845 +- if (isa_driver->resume)
846 ++ if (isa_driver && isa_driver->resume)
847 + return isa_driver->resume(dev, to_isa_dev(dev)->id);
848 +
849 + return 0;
850 +diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
851 +index 3c29d36702a8..5426c04fe24b 100644
852 +--- a/drivers/bus/arm-cci.c
853 ++++ b/drivers/bus/arm-cci.c
854 +@@ -1755,14 +1755,17 @@ static int cci_pmu_probe(struct platform_device *pdev)
855 + raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock);
856 + mutex_init(&cci_pmu->reserve_mutex);
857 + atomic_set(&cci_pmu->active_events, 0);
858 +- cpumask_set_cpu(smp_processor_id(), &cci_pmu->cpus);
859 ++ cpumask_set_cpu(get_cpu(), &cci_pmu->cpus);
860 +
861 + ret = cci_pmu_init(cci_pmu, pdev);
862 +- if (ret)
863 ++ if (ret) {
864 ++ put_cpu();
865 + return ret;
866 ++ }
867 +
868 + cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE,
869 + &cci_pmu->node);
870 ++ put_cpu();
871 + pr_info("ARM %s PMU driver probed", cci_pmu->model->name);
872 + return 0;
873 + }
874 +diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
875 +index e8c6946fed9d..03d7faf51c2b 100644
876 +--- a/drivers/bus/arm-ccn.c
877 ++++ b/drivers/bus/arm-ccn.c
878 +@@ -1271,6 +1271,10 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
879 + int len = snprintf(NULL, 0, "ccn_%d", ccn->dt.id);
880 +
881 + name = devm_kzalloc(ccn->dev, len + 1, GFP_KERNEL);
882 ++ if (!name) {
883 ++ err = -ENOMEM;
884 ++ goto error_choose_name;
885 ++ }
886 + snprintf(name, len + 1, "ccn_%d", ccn->dt.id);
887 + }
888 +
889 +@@ -1297,7 +1301,7 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
890 + }
891 +
892 + /* Pick one CPU which we will use to collect data from CCN... */
893 +- cpumask_set_cpu(smp_processor_id(), &ccn->dt.cpu);
894 ++ cpumask_set_cpu(get_cpu(), &ccn->dt.cpu);
895 +
896 + /* Also make sure that the overflow interrupt is handled by this CPU */
897 + if (ccn->irq) {
898 +@@ -1314,10 +1318,13 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
899 +
900 + cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
901 + &ccn->dt.node);
902 ++ put_cpu();
903 + return 0;
904 +
905 + error_pmu_register:
906 + error_set_affinity:
907 ++ put_cpu();
908 ++error_choose_name:
909 + ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
910 + for (i = 0; i < ccn->num_xps; i++)
911 + writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
912 +@@ -1580,8 +1587,8 @@ static int __init arm_ccn_init(void)
913 +
914 + static void __exit arm_ccn_exit(void)
915 + {
916 +- cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE);
917 + platform_driver_unregister(&arm_ccn_driver);
918 ++ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE);
919 + }
920 +
921 + module_init(arm_ccn_init);
922 +diff --git a/drivers/clk/clk-stm32h7.c b/drivers/clk/clk-stm32h7.c
923 +index a94c3f56c590..61c3e40507d3 100644
924 +--- a/drivers/clk/clk-stm32h7.c
925 ++++ b/drivers/clk/clk-stm32h7.c
926 +@@ -384,7 +384,7 @@ static void get_cfg_composite_div(const struct composite_clk_gcfg *gcfg,
927 + mux_ops = div_ops = gate_ops = NULL;
928 + mux_hw = div_hw = gate_hw = NULL;
929 +
930 +- if (gcfg->mux && gcfg->mux) {
931 ++ if (gcfg->mux && cfg->mux) {
932 + mux = _get_cmux(base + cfg->mux->offset,
933 + cfg->mux->shift,
934 + cfg->mux->width,
935 +@@ -410,7 +410,7 @@ static void get_cfg_composite_div(const struct composite_clk_gcfg *gcfg,
936 + }
937 + }
938 +
939 +- if (gcfg->gate && gcfg->gate) {
940 ++ if (gcfg->gate && cfg->gate) {
941 + gate = _get_cgate(base + cfg->gate->offset,
942 + cfg->gate->bit_idx,
943 + gcfg->gate->flags, lock);
944 +diff --git a/drivers/clk/hisilicon/clk-hi3660.c b/drivers/clk/hisilicon/clk-hi3660.c
945 +index a18258eb89cb..f40419959656 100644
946 +--- a/drivers/clk/hisilicon/clk-hi3660.c
947 ++++ b/drivers/clk/hisilicon/clk-hi3660.c
948 +@@ -34,7 +34,7 @@ static const struct hisi_fixed_rate_clock hi3660_fixed_rate_clks[] = {
949 +
950 + /* crgctrl */
951 + static const struct hisi_fixed_factor_clock hi3660_crg_fixed_factor_clks[] = {
952 +- { HI3660_FACTOR_UART3, "clk_factor_uart3", "iomcu_peri0", 1, 8, 0, },
953 ++ { HI3660_FACTOR_UART3, "clk_factor_uart3", "iomcu_peri0", 1, 16, 0, },
954 + { HI3660_CLK_FACTOR_MMC, "clk_factor_mmc", "clkin_sys", 1, 6, 0, },
955 + { HI3660_CLK_GATE_I2C0, "clk_gate_i2c0", "clk_i2c0_iomcu", 1, 4, 0, },
956 + { HI3660_CLK_GATE_I2C1, "clk_gate_i2c1", "clk_i2c1_iomcu", 1, 4, 0, },
957 +diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
958 +index d523991c945f..28ceaf1e9937 100644
959 +--- a/drivers/clk/qcom/common.c
960 ++++ b/drivers/clk/qcom/common.c
961 +@@ -143,8 +143,10 @@ static int _qcom_cc_register_board_clk(struct device *dev, const char *path,
962 + int ret;
963 +
964 + clocks_node = of_find_node_by_path("/clocks");
965 +- if (clocks_node)
966 +- node = of_find_node_by_name(clocks_node, path);
967 ++ if (clocks_node) {
968 ++ node = of_get_child_by_name(clocks_node, path);
969 ++ of_node_put(clocks_node);
970 ++ }
971 +
972 + if (!node) {
973 + fixed = devm_kzalloc(dev, sizeof(*fixed), GFP_KERNEL);
974 +diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
975 +index e43acebdfbcd..f8203115a6bc 100644
976 +--- a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
977 ++++ b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
978 +@@ -354,9 +354,9 @@ static SUNXI_CCU_GATE(bus_tdm_clk, "bus-tdm", "apb1",
979 + static SUNXI_CCU_GATE(bus_i2c0_clk, "bus-i2c0", "apb2",
980 + 0x06c, BIT(0), 0);
981 + static SUNXI_CCU_GATE(bus_i2c1_clk, "bus-i2c1", "apb2",
982 +- 0x06c, BIT(0), 0);
983 ++ 0x06c, BIT(1), 0);
984 + static SUNXI_CCU_GATE(bus_i2c2_clk, "bus-i2c2", "apb2",
985 +- 0x06c, BIT(0), 0);
986 ++ 0x06c, BIT(2), 0);
987 + static SUNXI_CCU_GATE(bus_uart0_clk, "bus-uart0", "apb2",
988 + 0x06c, BIT(16), 0);
989 + static SUNXI_CCU_GATE(bus_uart1_clk, "bus-uart1", "apb2",
990 +diff --git a/drivers/clk/uniphier/clk-uniphier-sys.c b/drivers/clk/uniphier/clk-uniphier-sys.c
991 +index 07f3b91a7daf..d244e724e198 100644
992 +--- a/drivers/clk/uniphier/clk-uniphier-sys.c
993 ++++ b/drivers/clk/uniphier/clk-uniphier-sys.c
994 +@@ -123,7 +123,7 @@ const struct uniphier_clk_data uniphier_sld8_sys_clk_data[] = {
995 + const struct uniphier_clk_data uniphier_pro5_sys_clk_data[] = {
996 + UNIPHIER_CLK_FACTOR("spll", -1, "ref", 120, 1), /* 2400 MHz */
997 + UNIPHIER_CLK_FACTOR("dapll1", -1, "ref", 128, 1), /* 2560 MHz */
998 +- UNIPHIER_CLK_FACTOR("dapll2", -1, "ref", 144, 125), /* 2949.12 MHz */
999 ++ UNIPHIER_CLK_FACTOR("dapll2", -1, "dapll1", 144, 125), /* 2949.12 MHz */
1000 + UNIPHIER_CLK_FACTOR("uart", 0, "dapll2", 1, 40),
1001 + UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 48),
1002 + UNIPHIER_PRO5_SYS_CLK_NAND(2),
1003 +diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
1004 +index dff88838dce7..a19b5d0300a9 100644
1005 +--- a/drivers/crypto/talitos.c
1006 ++++ b/drivers/crypto/talitos.c
1007 +@@ -1232,12 +1232,11 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1008 + sg_link_tbl_len += authsize;
1009 + }
1010 +
1011 +- sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
1012 +- &desc->ptr[4], sg_count, areq->assoclen,
1013 +- tbl_off);
1014 ++ ret = talitos_sg_map(dev, areq->src, sg_link_tbl_len, edesc,
1015 ++ &desc->ptr[4], sg_count, areq->assoclen, tbl_off);
1016 +
1017 +- if (sg_count > 1) {
1018 +- tbl_off += sg_count;
1019 ++ if (ret > 1) {
1020 ++ tbl_off += ret;
1021 + sync_needed = true;
1022 + }
1023 +
1024 +@@ -1248,14 +1247,15 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1025 + dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1026 + }
1027 +
1028 +- sg_count = talitos_sg_map(dev, areq->dst, cryptlen, edesc,
1029 +- &desc->ptr[5], sg_count, areq->assoclen,
1030 +- tbl_off);
1031 ++ ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1032 ++ sg_count, areq->assoclen, tbl_off);
1033 +
1034 + if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
1035 + to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1036 +
1037 +- if (sg_count > 1) {
1038 ++ /* ICV data */
1039 ++ if (ret > 1) {
1040 ++ tbl_off += ret;
1041 + edesc->icv_ool = true;
1042 + sync_needed = true;
1043 +
1044 +@@ -1265,9 +1265,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1045 + sizeof(struct talitos_ptr) + authsize;
1046 +
1047 + /* Add an entry to the link table for ICV data */
1048 +- tbl_ptr += sg_count - 1;
1049 +- to_talitos_ptr_ext_set(tbl_ptr, 0, is_sec1);
1050 +- tbl_ptr++;
1051 ++ to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1052 + to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
1053 + is_sec1);
1054 + to_talitos_ptr_len(tbl_ptr, authsize, is_sec1);
1055 +@@ -1275,18 +1273,33 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1056 + /* icv data follows link tables */
1057 + to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset,
1058 + is_sec1);
1059 ++ } else {
1060 ++ dma_addr_t addr = edesc->dma_link_tbl;
1061 ++
1062 ++ if (is_sec1)
1063 ++ addr += areq->assoclen + cryptlen;
1064 ++ else
1065 ++ addr += sizeof(struct talitos_ptr) * tbl_off;
1066 ++
1067 ++ to_talitos_ptr(&desc->ptr[6], addr, is_sec1);
1068 ++ to_talitos_ptr_len(&desc->ptr[6], authsize, is_sec1);
1069 ++ }
1070 ++ } else if (!(desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)) {
1071 ++ ret = talitos_sg_map(dev, areq->dst, authsize, edesc,
1072 ++ &desc->ptr[6], sg_count, areq->assoclen +
1073 ++ cryptlen,
1074 ++ tbl_off);
1075 ++ if (ret > 1) {
1076 ++ tbl_off += ret;
1077 ++ edesc->icv_ool = true;
1078 ++ sync_needed = true;
1079 ++ } else {
1080 ++ edesc->icv_ool = false;
1081 + }
1082 + } else {
1083 + edesc->icv_ool = false;
1084 + }
1085 +
1086 +- /* ICV data */
1087 +- if (!(desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)) {
1088 +- to_talitos_ptr_len(&desc->ptr[6], authsize, is_sec1);
1089 +- to_talitos_ptr(&desc->ptr[6], edesc->dma_link_tbl +
1090 +- areq->assoclen + cryptlen, is_sec1);
1091 +- }
1092 +-
1093 + /* iv out */
1094 + if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
1095 + map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1096 +@@ -1494,12 +1507,20 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1097 + const u8 *key, unsigned int keylen)
1098 + {
1099 + struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1100 ++ u32 tmp[DES_EXPKEY_WORDS];
1101 +
1102 + if (keylen > TALITOS_MAX_KEY_SIZE) {
1103 + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1104 + return -EINVAL;
1105 + }
1106 +
1107 ++ if (unlikely(crypto_ablkcipher_get_flags(cipher) &
1108 ++ CRYPTO_TFM_REQ_WEAK_KEY) &&
1109 ++ !des_ekey(tmp, key)) {
1110 ++ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
1111 ++ return -EINVAL;
1112 ++ }
1113 ++
1114 + memcpy(&ctx->key, key, keylen);
1115 + ctx->keylen = keylen;
1116 +
1117 +@@ -2614,7 +2635,7 @@ static struct talitos_alg_template driver_algs[] = {
1118 + .ivsize = AES_BLOCK_SIZE,
1119 + }
1120 + },
1121 +- .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
1122 ++ .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
1123 + DESC_HDR_SEL0_AESU |
1124 + DESC_HDR_MODE0_AESU_CTR,
1125 + },
1126 +@@ -3047,6 +3068,11 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
1127 + t_alg->algt.alg.aead.setkey = aead_setkey;
1128 + t_alg->algt.alg.aead.encrypt = aead_encrypt;
1129 + t_alg->algt.alg.aead.decrypt = aead_decrypt;
1130 ++ if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
1131 ++ !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
1132 ++ kfree(t_alg);
1133 ++ return ERR_PTR(-ENOTSUPP);
1134 ++ }
1135 + break;
1136 + case CRYPTO_ALG_TYPE_AHASH:
1137 + alg = &t_alg->algt.alg.hash.halg.base;
1138 +diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
1139 +index f70febf680c3..c3eefa126e3b 100644
1140 +--- a/drivers/firmware/efi/efi.c
1141 ++++ b/drivers/firmware/efi/efi.c
1142 +@@ -143,8 +143,7 @@ static ssize_t systab_show(struct kobject *kobj,
1143 + return str - buf;
1144 + }
1145 +
1146 +-static struct kobj_attribute efi_attr_systab =
1147 +- __ATTR(systab, 0400, systab_show, NULL);
1148 ++static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
1149 +
1150 + #define EFI_FIELD(var) efi.var
1151 +
1152 +diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
1153 +index bd7ed3c1148a..c47e0c6ec00f 100644
1154 +--- a/drivers/firmware/efi/esrt.c
1155 ++++ b/drivers/firmware/efi/esrt.c
1156 +@@ -106,7 +106,7 @@ static const struct sysfs_ops esre_attr_ops = {
1157 + };
1158 +
1159 + /* Generic ESRT Entry ("ESRE") support. */
1160 +-static ssize_t esre_fw_class_show(struct esre_entry *entry, char *buf)
1161 ++static ssize_t fw_class_show(struct esre_entry *entry, char *buf)
1162 + {
1163 + char *str = buf;
1164 +
1165 +@@ -117,18 +117,16 @@ static ssize_t esre_fw_class_show(struct esre_entry *entry, char *buf)
1166 + return str - buf;
1167 + }
1168 +
1169 +-static struct esre_attribute esre_fw_class = __ATTR(fw_class, 0400,
1170 +- esre_fw_class_show, NULL);
1171 ++static struct esre_attribute esre_fw_class = __ATTR_RO_MODE(fw_class, 0400);
1172 +
1173 + #define esre_attr_decl(name, size, fmt) \
1174 +-static ssize_t esre_##name##_show(struct esre_entry *entry, char *buf) \
1175 ++static ssize_t name##_show(struct esre_entry *entry, char *buf) \
1176 + { \
1177 + return sprintf(buf, fmt "\n", \
1178 + le##size##_to_cpu(entry->esre.esre1->name)); \
1179 + } \
1180 + \
1181 +-static struct esre_attribute esre_##name = __ATTR(name, 0400, \
1182 +- esre_##name##_show, NULL)
1183 ++static struct esre_attribute esre_##name = __ATTR_RO_MODE(name, 0400)
1184 +
1185 + esre_attr_decl(fw_type, 32, "%u");
1186 + esre_attr_decl(fw_version, 32, "%u");
1187 +@@ -193,14 +191,13 @@ static int esre_create_sysfs_entry(void *esre, int entry_num)
1188 +
1189 + /* support for displaying ESRT fields at the top level */
1190 + #define esrt_attr_decl(name, size, fmt) \
1191 +-static ssize_t esrt_##name##_show(struct kobject *kobj, \
1192 ++static ssize_t name##_show(struct kobject *kobj, \
1193 + struct kobj_attribute *attr, char *buf)\
1194 + { \
1195 + return sprintf(buf, fmt "\n", le##size##_to_cpu(esrt->name)); \
1196 + } \
1197 + \
1198 +-static struct kobj_attribute esrt_##name = __ATTR(name, 0400, \
1199 +- esrt_##name##_show, NULL)
1200 ++static struct kobj_attribute esrt_##name = __ATTR_RO_MODE(name, 0400)
1201 +
1202 + esrt_attr_decl(fw_resource_count, 32, "%u");
1203 + esrt_attr_decl(fw_resource_count_max, 32, "%u");
1204 +@@ -431,7 +428,7 @@ static int __init esrt_sysfs_init(void)
1205 + err_remove_esrt:
1206 + kobject_put(esrt_kobj);
1207 + err:
1208 +- kfree(esrt);
1209 ++ memunmap(esrt);
1210 + esrt = NULL;
1211 + return error;
1212 + }
1213 +diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c
1214 +index 8e64b77aeac9..f377609ff141 100644
1215 +--- a/drivers/firmware/efi/runtime-map.c
1216 ++++ b/drivers/firmware/efi/runtime-map.c
1217 +@@ -63,11 +63,11 @@ static ssize_t map_attr_show(struct kobject *kobj, struct attribute *attr,
1218 + return map_attr->show(entry, buf);
1219 + }
1220 +
1221 +-static struct map_attribute map_type_attr = __ATTR_RO(type);
1222 +-static struct map_attribute map_phys_addr_attr = __ATTR_RO(phys_addr);
1223 +-static struct map_attribute map_virt_addr_attr = __ATTR_RO(virt_addr);
1224 +-static struct map_attribute map_num_pages_attr = __ATTR_RO(num_pages);
1225 +-static struct map_attribute map_attribute_attr = __ATTR_RO(attribute);
1226 ++static struct map_attribute map_type_attr = __ATTR_RO_MODE(type, 0400);
1227 ++static struct map_attribute map_phys_addr_attr = __ATTR_RO_MODE(phys_addr, 0400);
1228 ++static struct map_attribute map_virt_addr_attr = __ATTR_RO_MODE(virt_addr, 0400);
1229 ++static struct map_attribute map_num_pages_attr = __ATTR_RO_MODE(num_pages, 0400);
1230 ++static struct map_attribute map_attribute_attr = __ATTR_RO_MODE(attribute, 0400);
1231 +
1232 + /*
1233 + * These are default attributes that are added for every memmap entry.
1234 +diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
1235 +index 35e553b3b190..e4b40f2b4627 100644
1236 +--- a/drivers/firmware/google/vpd.c
1237 ++++ b/drivers/firmware/google/vpd.c
1238 +@@ -295,38 +295,60 @@ static int vpd_probe(struct platform_device *pdev)
1239 + if (ret)
1240 + return ret;
1241 +
1242 +- return vpd_sections_init(entry.cbmem_addr);
1243 ++ vpd_kobj = kobject_create_and_add("vpd", firmware_kobj);
1244 ++ if (!vpd_kobj)
1245 ++ return -ENOMEM;
1246 ++
1247 ++ ret = vpd_sections_init(entry.cbmem_addr);
1248 ++ if (ret) {
1249 ++ kobject_put(vpd_kobj);
1250 ++ return ret;
1251 ++ }
1252 ++
1253 ++ return 0;
1254 ++}
1255 ++
1256 ++static int vpd_remove(struct platform_device *pdev)
1257 ++{
1258 ++ vpd_section_destroy(&ro_vpd);
1259 ++ vpd_section_destroy(&rw_vpd);
1260 ++
1261 ++ kobject_put(vpd_kobj);
1262 ++
1263 ++ return 0;
1264 + }
1265 +
1266 + static struct platform_driver vpd_driver = {
1267 + .probe = vpd_probe,
1268 ++ .remove = vpd_remove,
1269 + .driver = {
1270 + .name = "vpd",
1271 + },
1272 + };
1273 +
1274 ++static struct platform_device *vpd_pdev;
1275 ++
1276 + static int __init vpd_platform_init(void)
1277 + {
1278 +- struct platform_device *pdev;
1279 +-
1280 +- pdev = platform_device_register_simple("vpd", -1, NULL, 0);
1281 +- if (IS_ERR(pdev))
1282 +- return PTR_ERR(pdev);
1283 ++ int ret;
1284 +
1285 +- vpd_kobj = kobject_create_and_add("vpd", firmware_kobj);
1286 +- if (!vpd_kobj)
1287 +- return -ENOMEM;
1288 ++ ret = platform_driver_register(&vpd_driver);
1289 ++ if (ret)
1290 ++ return ret;
1291 +
1292 +- platform_driver_register(&vpd_driver);
1293 ++ vpd_pdev = platform_device_register_simple("vpd", -1, NULL, 0);
1294 ++ if (IS_ERR(vpd_pdev)) {
1295 ++ platform_driver_unregister(&vpd_driver);
1296 ++ return PTR_ERR(vpd_pdev);
1297 ++ }
1298 +
1299 + return 0;
1300 + }
1301 +
1302 + static void __exit vpd_platform_exit(void)
1303 + {
1304 +- vpd_section_destroy(&ro_vpd);
1305 +- vpd_section_destroy(&rw_vpd);
1306 +- kobject_put(vpd_kobj);
1307 ++ platform_device_unregister(vpd_pdev);
1308 ++ platform_driver_unregister(&vpd_driver);
1309 + }
1310 +
1311 + module_init(vpd_platform_init);
1312 +diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
1313 +index 5dd3f1cd074a..a8905049b9da 100644
1314 +--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
1315 ++++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
1316 +@@ -946,7 +946,9 @@ static int analogix_dp_get_modes(struct drm_connector *connector)
1317 + return 0;
1318 + }
1319 +
1320 ++ pm_runtime_get_sync(dp->dev);
1321 + edid = drm_get_edid(connector, &dp->aux.ddc);
1322 ++ pm_runtime_put(dp->dev);
1323 + if (edid) {
1324 + drm_mode_connector_update_edid_property(&dp->connector,
1325 + edid);
1326 +diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
1327 +index 077de014d610..4400efe3974a 100644
1328 +--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
1329 ++++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
1330 +@@ -247,6 +247,15 @@ struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
1331 + if (IS_ERR(exynos_gem))
1332 + return exynos_gem;
1333 +
1334 ++ if (!is_drm_iommu_supported(dev) && (flags & EXYNOS_BO_NONCONTIG)) {
1335 ++ /*
1336 ++ * when no IOMMU is available, all allocated buffers are
1337 ++ * contiguous anyway, so drop EXYNOS_BO_NONCONTIG flag
1338 ++ */
1339 ++ flags &= ~EXYNOS_BO_NONCONTIG;
1340 ++ DRM_WARN("Non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n");
1341 ++ }
1342 ++
1343 + /* set memory type and cache attribute from user side. */
1344 + exynos_gem->flags = flags;
1345 +
1346 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
1347 +index 5ebdb63330dd..1c73d5542681 100644
1348 +--- a/drivers/gpu/drm/i915/intel_display.c
1349 ++++ b/drivers/gpu/drm/i915/intel_display.c
1350 +@@ -1000,7 +1000,8 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1351 + return crtc->config->cpu_transcoder;
1352 + }
1353 +
1354 +-static bool pipe_dsl_stopped(struct drm_i915_private *dev_priv, enum pipe pipe)
1355 ++static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
1356 ++ enum pipe pipe)
1357 + {
1358 + i915_reg_t reg = PIPEDSL(pipe);
1359 + u32 line1, line2;
1360 +@@ -1015,7 +1016,28 @@ static bool pipe_dsl_stopped(struct drm_i915_private *dev_priv, enum pipe pipe)
1361 + msleep(5);
1362 + line2 = I915_READ(reg) & line_mask;
1363 +
1364 +- return line1 == line2;
1365 ++ return line1 != line2;
1366 ++}
1367 ++
1368 ++static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1369 ++{
1370 ++ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1371 ++ enum pipe pipe = crtc->pipe;
1372 ++
1373 ++ /* Wait for the display line to settle/start moving */
1374 ++ if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1375 ++ DRM_ERROR("pipe %c scanline %s wait timed out\n",
1376 ++ pipe_name(pipe), onoff(state));
1377 ++}
1378 ++
1379 ++static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1380 ++{
1381 ++ wait_for_pipe_scanline_moving(crtc, false);
1382 ++}
1383 ++
1384 ++static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1385 ++{
1386 ++ wait_for_pipe_scanline_moving(crtc, true);
1387 + }
1388 +
1389 + /*
1390 +@@ -1038,7 +1060,6 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
1391 + {
1392 + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1393 + enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
1394 +- enum pipe pipe = crtc->pipe;
1395 +
1396 + if (INTEL_GEN(dev_priv) >= 4) {
1397 + i915_reg_t reg = PIPECONF(cpu_transcoder);
1398 +@@ -1049,9 +1070,7 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
1399 + 100))
1400 + WARN(1, "pipe_off wait timed out\n");
1401 + } else {
1402 +- /* Wait for the display line to settle */
1403 +- if (wait_for(pipe_dsl_stopped(dev_priv, pipe), 100))
1404 +- WARN(1, "pipe_off wait timed out\n");
1405 ++ intel_wait_for_pipe_scanline_stopped(crtc);
1406 + }
1407 + }
1408 +
1409 +@@ -1944,15 +1963,14 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
1410 + POSTING_READ(reg);
1411 +
1412 + /*
1413 +- * Until the pipe starts DSL will read as 0, which would cause
1414 +- * an apparent vblank timestamp jump, which messes up also the
1415 +- * frame count when it's derived from the timestamps. So let's
1416 +- * wait for the pipe to start properly before we call
1417 +- * drm_crtc_vblank_on()
1418 ++ * Until the pipe starts PIPEDSL reads will return a stale value,
1419 ++ * which causes an apparent vblank timestamp jump when PIPEDSL
1420 ++ * resets to its proper value. That also messes up the frame count
1421 ++ * when it's derived from the timestamps. So let's wait for the
1422 ++ * pipe to start properly before we call drm_crtc_vblank_on()
1423 + */
1424 +- if (dev->max_vblank_count == 0 &&
1425 +- wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50))
1426 +- DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe));
1427 ++ if (dev->max_vblank_count == 0)
1428 ++ intel_wait_for_pipe_scanline_moving(crtc);
1429 + }
1430 +
1431 + /**
1432 +@@ -14682,6 +14700,8 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
1433 +
1434 + void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
1435 + {
1436 ++ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1437 ++
1438 + DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
1439 + pipe_name(pipe));
1440 +
1441 +@@ -14691,8 +14711,7 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
1442 + I915_WRITE(PIPECONF(pipe), 0);
1443 + POSTING_READ(PIPECONF(pipe));
1444 +
1445 +- if (wait_for(pipe_dsl_stopped(dev_priv, pipe), 100))
1446 +- DRM_ERROR("pipe %c off wait timed out\n", pipe_name(pipe));
1447 ++ intel_wait_for_pipe_scanline_stopped(crtc);
1448 +
1449 + I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1450 + POSTING_READ(DPLL(pipe));
1451 +diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
1452 +index 894b67ac2cae..05964347008d 100644
1453 +--- a/drivers/hv/channel.c
1454 ++++ b/drivers/hv/channel.c
1455 +@@ -640,22 +640,28 @@ void vmbus_close(struct vmbus_channel *channel)
1456 + */
1457 + return;
1458 + }
1459 +- mutex_lock(&vmbus_connection.channel_mutex);
1460 + /*
1461 + * Close all the sub-channels first and then close the
1462 + * primary channel.
1463 + */
1464 + list_for_each_safe(cur, tmp, &channel->sc_list) {
1465 + cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
1466 +- vmbus_close_internal(cur_channel);
1467 + if (cur_channel->rescind) {
1468 ++ wait_for_completion(&cur_channel->rescind_event);
1469 ++ mutex_lock(&vmbus_connection.channel_mutex);
1470 ++ vmbus_close_internal(cur_channel);
1471 + hv_process_channel_removal(
1472 + cur_channel->offermsg.child_relid);
1473 ++ } else {
1474 ++ mutex_lock(&vmbus_connection.channel_mutex);
1475 ++ vmbus_close_internal(cur_channel);
1476 + }
1477 ++ mutex_unlock(&vmbus_connection.channel_mutex);
1478 + }
1479 + /*
1480 + * Now close the primary.
1481 + */
1482 ++ mutex_lock(&vmbus_connection.channel_mutex);
1483 + vmbus_close_internal(channel);
1484 + mutex_unlock(&vmbus_connection.channel_mutex);
1485 + }
1486 +diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
1487 +index 379b0df123be..65c6d6bdce4c 100644
1488 +--- a/drivers/hv/channel_mgmt.c
1489 ++++ b/drivers/hv/channel_mgmt.c
1490 +@@ -333,6 +333,7 @@ static struct vmbus_channel *alloc_channel(void)
1491 + return NULL;
1492 +
1493 + spin_lock_init(&channel->lock);
1494 ++ init_completion(&channel->rescind_event);
1495 +
1496 + INIT_LIST_HEAD(&channel->sc_list);
1497 + INIT_LIST_HEAD(&channel->percpu_list);
1498 +@@ -883,6 +884,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
1499 + /*
1500 + * Now wait for offer handling to complete.
1501 + */
1502 ++ vmbus_rescind_cleanup(channel);
1503 + while (READ_ONCE(channel->probe_done) == false) {
1504 + /*
1505 + * We wait here until any channel offer is currently
1506 +@@ -898,7 +900,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
1507 + if (channel->device_obj) {
1508 + if (channel->chn_rescind_callback) {
1509 + channel->chn_rescind_callback(channel);
1510 +- vmbus_rescind_cleanup(channel);
1511 + return;
1512 + }
1513 + /*
1514 +@@ -907,7 +908,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
1515 + */
1516 + dev = get_device(&channel->device_obj->device);
1517 + if (dev) {
1518 +- vmbus_rescind_cleanup(channel);
1519 + vmbus_device_unregister(channel->device_obj);
1520 + put_device(dev);
1521 + }
1522 +@@ -921,13 +921,14 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
1523 + * 2. Then close the primary channel.
1524 + */
1525 + mutex_lock(&vmbus_connection.channel_mutex);
1526 +- vmbus_rescind_cleanup(channel);
1527 + if (channel->state == CHANNEL_OPEN_STATE) {
1528 + /*
1529 + * The channel is currently not open;
1530 + * it is safe for us to cleanup the channel.
1531 + */
1532 + hv_process_channel_removal(rescind->child_relid);
1533 ++ } else {
1534 ++ complete(&channel->rescind_event);
1535 + }
1536 + mutex_unlock(&vmbus_connection.channel_mutex);
1537 + }
1538 +diff --git a/drivers/iio/adc/cpcap-adc.c b/drivers/iio/adc/cpcap-adc.c
1539 +index 6e419d5a7c14..f153e02686a0 100644
1540 +--- a/drivers/iio/adc/cpcap-adc.c
1541 ++++ b/drivers/iio/adc/cpcap-adc.c
1542 +@@ -1012,7 +1012,7 @@ static int cpcap_adc_probe(struct platform_device *pdev)
1543 + platform_set_drvdata(pdev, indio_dev);
1544 +
1545 + ddata->irq = platform_get_irq_byname(pdev, "adcdone");
1546 +- if (!ddata->irq)
1547 ++ if (ddata->irq < 0)
1548 + return -ENODEV;
1549 +
1550 + error = devm_request_threaded_irq(&pdev->dev, ddata->irq, NULL,
1551 +diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
1552 +index 2e8dbb89c8c9..7dc7d297a0fc 100644
1553 +--- a/drivers/iio/adc/meson_saradc.c
1554 ++++ b/drivers/iio/adc/meson_saradc.c
1555 +@@ -221,8 +221,10 @@ enum meson_sar_adc_chan7_mux_sel {
1556 +
1557 + struct meson_sar_adc_data {
1558 + bool has_bl30_integration;
1559 ++ u32 bandgap_reg;
1560 + unsigned int resolution;
1561 + const char *name;
1562 ++ const struct regmap_config *regmap_config;
1563 + };
1564 +
1565 + struct meson_sar_adc_priv {
1566 +@@ -242,13 +244,20 @@ struct meson_sar_adc_priv {
1567 + int calibscale;
1568 + };
1569 +
1570 +-static const struct regmap_config meson_sar_adc_regmap_config = {
1571 ++static const struct regmap_config meson_sar_adc_regmap_config_gxbb = {
1572 + .reg_bits = 8,
1573 + .val_bits = 32,
1574 + .reg_stride = 4,
1575 + .max_register = MESON_SAR_ADC_REG13,
1576 + };
1577 +
1578 ++static const struct regmap_config meson_sar_adc_regmap_config_meson8 = {
1579 ++ .reg_bits = 8,
1580 ++ .val_bits = 32,
1581 ++ .reg_stride = 4,
1582 ++ .max_register = MESON_SAR_ADC_DELTA_10,
1583 ++};
1584 ++
1585 + static unsigned int meson_sar_adc_get_fifo_count(struct iio_dev *indio_dev)
1586 + {
1587 + struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
1588 +@@ -600,7 +609,7 @@ static int meson_sar_adc_clk_init(struct iio_dev *indio_dev,
1589 + init.num_parents = 1;
1590 +
1591 + priv->clk_gate.reg = base + MESON_SAR_ADC_REG3;
1592 +- priv->clk_gate.bit_idx = fls(MESON_SAR_ADC_REG3_CLK_EN);
1593 ++ priv->clk_gate.bit_idx = __ffs(MESON_SAR_ADC_REG3_CLK_EN);
1594 + priv->clk_gate.hw.init = &init;
1595 +
1596 + priv->adc_clk = devm_clk_register(&indio_dev->dev, &priv->clk_gate.hw);
1597 +@@ -685,6 +694,20 @@ static int meson_sar_adc_init(struct iio_dev *indio_dev)
1598 + return 0;
1599 + }
1600 +
1601 ++static void meson_sar_adc_set_bandgap(struct iio_dev *indio_dev, bool on_off)
1602 ++{
1603 ++ struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
1604 ++ u32 enable_mask;
1605 ++
1606 ++ if (priv->data->bandgap_reg == MESON_SAR_ADC_REG11)
1607 ++ enable_mask = MESON_SAR_ADC_REG11_BANDGAP_EN;
1608 ++ else
1609 ++ enable_mask = MESON_SAR_ADC_DELTA_10_TS_VBG_EN;
1610 ++
1611 ++ regmap_update_bits(priv->regmap, priv->data->bandgap_reg, enable_mask,
1612 ++ on_off ? enable_mask : 0);
1613 ++}
1614 ++
1615 + static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev)
1616 + {
1617 + struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
1618 +@@ -717,9 +740,9 @@ static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev)
1619 + regval = FIELD_PREP(MESON_SAR_ADC_REG0_FIFO_CNT_IRQ_MASK, 1);
1620 + regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG0,
1621 + MESON_SAR_ADC_REG0_FIFO_CNT_IRQ_MASK, regval);
1622 +- regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11,
1623 +- MESON_SAR_ADC_REG11_BANDGAP_EN,
1624 +- MESON_SAR_ADC_REG11_BANDGAP_EN);
1625 ++
1626 ++ meson_sar_adc_set_bandgap(indio_dev, true);
1627 ++
1628 + regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3,
1629 + MESON_SAR_ADC_REG3_ADC_EN,
1630 + MESON_SAR_ADC_REG3_ADC_EN);
1631 +@@ -739,8 +762,7 @@ static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev)
1632 + err_adc_clk:
1633 + regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3,
1634 + MESON_SAR_ADC_REG3_ADC_EN, 0);
1635 +- regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11,
1636 +- MESON_SAR_ADC_REG11_BANDGAP_EN, 0);
1637 ++ meson_sar_adc_set_bandgap(indio_dev, false);
1638 + clk_disable_unprepare(priv->sana_clk);
1639 + err_sana_clk:
1640 + clk_disable_unprepare(priv->core_clk);
1641 +@@ -765,8 +787,8 @@ static int meson_sar_adc_hw_disable(struct iio_dev *indio_dev)
1642 +
1643 + regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3,
1644 + MESON_SAR_ADC_REG3_ADC_EN, 0);
1645 +- regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11,
1646 +- MESON_SAR_ADC_REG11_BANDGAP_EN, 0);
1647 ++
1648 ++ meson_sar_adc_set_bandgap(indio_dev, false);
1649 +
1650 + clk_disable_unprepare(priv->sana_clk);
1651 + clk_disable_unprepare(priv->core_clk);
1652 +@@ -845,30 +867,40 @@ static const struct iio_info meson_sar_adc_iio_info = {
1653 +
1654 + static const struct meson_sar_adc_data meson_sar_adc_meson8_data = {
1655 + .has_bl30_integration = false,
1656 ++ .bandgap_reg = MESON_SAR_ADC_DELTA_10,
1657 ++ .regmap_config = &meson_sar_adc_regmap_config_meson8,
1658 + .resolution = 10,
1659 + .name = "meson-meson8-saradc",
1660 + };
1661 +
1662 + static const struct meson_sar_adc_data meson_sar_adc_meson8b_data = {
1663 + .has_bl30_integration = false,
1664 ++ .bandgap_reg = MESON_SAR_ADC_DELTA_10,
1665 ++ .regmap_config = &meson_sar_adc_regmap_config_meson8,
1666 + .resolution = 10,
1667 + .name = "meson-meson8b-saradc",
1668 + };
1669 +
1670 + static const struct meson_sar_adc_data meson_sar_adc_gxbb_data = {
1671 + .has_bl30_integration = true,
1672 ++ .bandgap_reg = MESON_SAR_ADC_REG11,
1673 ++ .regmap_config = &meson_sar_adc_regmap_config_gxbb,
1674 + .resolution = 10,
1675 + .name = "meson-gxbb-saradc",
1676 + };
1677 +
1678 + static const struct meson_sar_adc_data meson_sar_adc_gxl_data = {
1679 + .has_bl30_integration = true,
1680 ++ .bandgap_reg = MESON_SAR_ADC_REG11,
1681 ++ .regmap_config = &meson_sar_adc_regmap_config_gxbb,
1682 + .resolution = 12,
1683 + .name = "meson-gxl-saradc",
1684 + };
1685 +
1686 + static const struct meson_sar_adc_data meson_sar_adc_gxm_data = {
1687 + .has_bl30_integration = true,
1688 ++ .bandgap_reg = MESON_SAR_ADC_REG11,
1689 ++ .regmap_config = &meson_sar_adc_regmap_config_gxbb,
1690 + .resolution = 12,
1691 + .name = "meson-gxm-saradc",
1692 + };
1693 +@@ -946,7 +978,7 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
1694 + return ret;
1695 +
1696 + priv->regmap = devm_regmap_init_mmio(&pdev->dev, base,
1697 +- &meson_sar_adc_regmap_config);
1698 ++ priv->data->regmap_config);
1699 + if (IS_ERR(priv->regmap))
1700 + return PTR_ERR(priv->regmap);
1701 +
1702 +diff --git a/drivers/iio/health/max30102.c b/drivers/iio/health/max30102.c
1703 +index 839b875c29b9..9fb4bc73a6bc 100644
1704 +--- a/drivers/iio/health/max30102.c
1705 ++++ b/drivers/iio/health/max30102.c
1706 +@@ -371,7 +371,7 @@ static int max30102_read_raw(struct iio_dev *indio_dev,
1707 + mutex_unlock(&indio_dev->mlock);
1708 + break;
1709 + case IIO_CHAN_INFO_SCALE:
1710 +- *val = 1; /* 0.0625 */
1711 ++ *val = 1000; /* 62.5 */
1712 + *val2 = 16;
1713 + ret = IIO_VAL_FRACTIONAL;
1714 + break;
1715 +diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
1716 +index 28607bb42d87..a337386652b0 100644
1717 +--- a/drivers/infiniband/core/security.c
1718 ++++ b/drivers/infiniband/core/security.c
1719 +@@ -417,8 +417,17 @@ void ib_close_shared_qp_security(struct ib_qp_security *sec)
1720 +
1721 + int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev)
1722 + {
1723 ++ u8 i = rdma_start_port(dev);
1724 ++ bool is_ib = false;
1725 + int ret;
1726 +
1727 ++ while (i <= rdma_end_port(dev) && !is_ib)
1728 ++ is_ib = rdma_protocol_ib(dev, i++);
1729 ++
1730 ++ /* If this isn't an IB device don't create the security context */
1731 ++ if (!is_ib)
1732 ++ return 0;
1733 ++
1734 + qp->qp_sec = kzalloc(sizeof(*qp->qp_sec), GFP_KERNEL);
1735 + if (!qp->qp_sec)
1736 + return -ENOMEM;
1737 +@@ -441,6 +450,10 @@ EXPORT_SYMBOL(ib_create_qp_security);
1738 +
1739 + void ib_destroy_qp_security_begin(struct ib_qp_security *sec)
1740 + {
1741 ++ /* Return if not IB */
1742 ++ if (!sec)
1743 ++ return;
1744 ++
1745 + mutex_lock(&sec->mutex);
1746 +
1747 + /* Remove the QP from the lists so it won't get added to
1748 +@@ -470,6 +483,10 @@ void ib_destroy_qp_security_abort(struct ib_qp_security *sec)
1749 + int ret;
1750 + int i;
1751 +
1752 ++ /* Return if not IB */
1753 ++ if (!sec)
1754 ++ return;
1755 ++
1756 + /* If a concurrent cache update is in progress this
1757 + * QP security could be marked for an error state
1758 + * transition. Wait for this to complete.
1759 +@@ -505,6 +522,10 @@ void ib_destroy_qp_security_end(struct ib_qp_security *sec)
1760 + {
1761 + int i;
1762 +
1763 ++ /* Return if not IB */
1764 ++ if (!sec)
1765 ++ return;
1766 ++
1767 + /* If a concurrent cache update is occurring we must
1768 + * wait until this QP security structure is processed
1769 + * in the QP to error flow before destroying it because
1770 +@@ -557,7 +578,7 @@ int ib_security_modify_qp(struct ib_qp *qp,
1771 + {
1772 + int ret = 0;
1773 + struct ib_ports_pkeys *tmp_pps;
1774 +- struct ib_ports_pkeys *new_pps;
1775 ++ struct ib_ports_pkeys *new_pps = NULL;
1776 + struct ib_qp *real_qp = qp->real_qp;
1777 + bool special_qp = (real_qp->qp_type == IB_QPT_SMI ||
1778 + real_qp->qp_type == IB_QPT_GSI ||
1779 +@@ -565,18 +586,27 @@ int ib_security_modify_qp(struct ib_qp *qp,
1780 + bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) ||
1781 + (qp_attr_mask & IB_QP_ALT_PATH));
1782 +
1783 ++ WARN_ONCE((qp_attr_mask & IB_QP_PORT &&
1784 ++ rdma_protocol_ib(real_qp->device, qp_attr->port_num) &&
1785 ++ !real_qp->qp_sec),
1786 ++ "%s: QP security is not initialized for IB QP: %d\n",
1787 ++ __func__, real_qp->qp_num);
1788 ++
1789 + /* The port/pkey settings are maintained only for the real QP. Open
1790 + * handles on the real QP will be in the shared_qp_list. When
1791 + * enforcing security on the real QP all the shared QPs will be
1792 + * checked as well.
1793 + */
1794 +
1795 +- if (pps_change && !special_qp) {
1796 ++ if (pps_change && !special_qp && real_qp->qp_sec) {
1797 + mutex_lock(&real_qp->qp_sec->mutex);
1798 + new_pps = get_new_pps(real_qp,
1799 + qp_attr,
1800 + qp_attr_mask);
1801 +-
1802 ++ if (!new_pps) {
1803 ++ mutex_unlock(&real_qp->qp_sec->mutex);
1804 ++ return -ENOMEM;
1805 ++ }
1806 + /* Add this QP to the lists for the new port
1807 + * and pkey settings before checking for permission
1808 + * in case there is a concurrent cache update
1809 +@@ -600,7 +630,7 @@ int ib_security_modify_qp(struct ib_qp *qp,
1810 + qp_attr_mask,
1811 + udata);
1812 +
1813 +- if (pps_change && !special_qp) {
1814 ++ if (new_pps) {
1815 + /* Clean up the lists and free the appropriate
1816 + * ports_pkeys structure.
1817 + */
1818 +@@ -631,6 +661,9 @@ int ib_security_pkey_access(struct ib_device *dev,
1819 + u16 pkey;
1820 + int ret;
1821 +
1822 ++ if (!rdma_protocol_ib(dev, port_num))
1823 ++ return 0;
1824 ++
1825 + ret = ib_get_cached_pkey(dev, port_num, pkey_index, &pkey);
1826 + if (ret)
1827 + return ret;
1828 +@@ -665,6 +698,9 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
1829 + {
1830 + int ret;
1831 +
1832 ++ if (!rdma_protocol_ib(agent->device, agent->port_num))
1833 ++ return 0;
1834 ++
1835 + ret = security_ib_alloc_security(&agent->security);
1836 + if (ret)
1837 + return ret;
1838 +@@ -690,6 +726,9 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
1839 +
1840 + void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
1841 + {
1842 ++ if (!rdma_protocol_ib(agent->device, agent->port_num))
1843 ++ return;
1844 ++
1845 + security_ib_free_security(agent->security);
1846 + if (agent->lsm_nb_reg)
1847 + unregister_lsm_notifier(&agent->lsm_nb);
1848 +@@ -697,20 +736,16 @@ void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
1849 +
1850 + int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
1851 + {
1852 +- int ret;
1853 ++ if (!rdma_protocol_ib(map->agent.device, map->agent.port_num))
1854 ++ return 0;
1855 +
1856 + if (map->agent.qp->qp_type == IB_QPT_SMI && !map->agent.smp_allowed)
1857 + return -EACCES;
1858 +
1859 +- ret = ib_security_pkey_access(map->agent.device,
1860 +- map->agent.port_num,
1861 +- pkey_index,
1862 +- map->agent.security);
1863 +-
1864 +- if (ret)
1865 +- return ret;
1866 +-
1867 +- return 0;
1868 ++ return ib_security_pkey_access(map->agent.device,
1869 ++ map->agent.port_num,
1870 ++ pkey_index,
1871 ++ map->agent.security);
1872 + }
1873 +
1874 + #endif /* CONFIG_SECURITY_INFINIBAND */
1875 +diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
1876 +index 0d89621d9fe8..b210495ff33c 100644
1877 +--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
1878 ++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
1879 +@@ -394,6 +394,7 @@ int bnxt_re_add_gid(struct ib_device *ibdev, u8 port_num,
1880 + ctx->idx = tbl_idx;
1881 + ctx->refcnt = 1;
1882 + ctx_tbl[tbl_idx] = ctx;
1883 ++ *context = ctx;
1884 +
1885 + return rc;
1886 + }
1887 +diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
1888 +index b6b33d99b0b4..17e44c86577a 100644
1889 +--- a/drivers/infiniband/hw/mlx4/qp.c
1890 ++++ b/drivers/infiniband/hw/mlx4/qp.c
1891 +@@ -2216,7 +2216,7 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
1892 + context->mtu_msgmax = (IB_MTU_4096 << 5) |
1893 + ilog2(dev->dev->caps.max_gso_sz);
1894 + else
1895 +- context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
1896 ++ context->mtu_msgmax = (IB_MTU_4096 << 5) | 13;
1897 + } else if (attr_mask & IB_QP_PATH_MTU) {
1898 + if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) {
1899 + pr_err("path MTU (%u) is invalid\n",
1900 +diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
1901 +index 552f7bd4ecc3..5aff1e33d984 100644
1902 +--- a/drivers/infiniband/hw/mlx5/main.c
1903 ++++ b/drivers/infiniband/hw/mlx5/main.c
1904 +@@ -3097,6 +3097,8 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
1905 + qp->real_qp = qp;
1906 + qp->uobject = NULL;
1907 + qp->qp_type = MLX5_IB_QPT_REG_UMR;
1908 ++ qp->send_cq = init_attr->send_cq;
1909 ++ qp->recv_cq = init_attr->recv_cq;
1910 +
1911 + attr->qp_state = IB_QPS_INIT;
1912 + attr->port_num = 1;
1913 +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
1914 +index 6784a05dd6b2..83f3d4831f94 100644
1915 +--- a/drivers/iommu/intel-iommu.c
1916 ++++ b/drivers/iommu/intel-iommu.c
1917 +@@ -2254,10 +2254,12 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1918 + uint64_t tmp;
1919 +
1920 + if (!sg_res) {
1921 ++ unsigned int pgoff = sg->offset & ~PAGE_MASK;
1922 ++
1923 + sg_res = aligned_nrpages(sg->offset, sg->length);
1924 +- sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1925 ++ sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + pgoff;
1926 + sg->dma_length = sg->length;
1927 +- pteval = page_to_phys(sg_page(sg)) | prot;
1928 ++ pteval = (sg_phys(sg) - pgoff) | prot;
1929 + phys_pfn = pteval >> VTD_PAGE_SHIFT;
1930 + }
1931 +
1932 +@@ -3790,7 +3792,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
1933 +
1934 + for_each_sg(sglist, sg, nelems, i) {
1935 + BUG_ON(!sg_page(sg));
1936 +- sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
1937 ++ sg->dma_address = sg_phys(sg);
1938 + sg->dma_length = sg->length;
1939 + }
1940 + return nelems;
1941 +diff --git a/drivers/irqchip/qcom-irq-combiner.c b/drivers/irqchip/qcom-irq-combiner.c
1942 +index 6aa3ea479214..f31265937439 100644
1943 +--- a/drivers/irqchip/qcom-irq-combiner.c
1944 ++++ b/drivers/irqchip/qcom-irq-combiner.c
1945 +@@ -238,7 +238,7 @@ static int __init combiner_probe(struct platform_device *pdev)
1946 + {
1947 + struct combiner *combiner;
1948 + size_t alloc_sz;
1949 +- u32 nregs;
1950 ++ int nregs;
1951 + int err;
1952 +
1953 + nregs = count_registers(pdev);
1954 +diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c
1955 +index 97fb956bb6e0..93f3d4d61fa7 100644
1956 +--- a/drivers/mailbox/mailbox-test.c
1957 ++++ b/drivers/mailbox/mailbox-test.c
1958 +@@ -30,6 +30,7 @@
1959 + #define MBOX_HEXDUMP_MAX_LEN (MBOX_HEXDUMP_LINE_LEN * \
1960 + (MBOX_MAX_MSG_LEN / MBOX_BYTES_PER_LINE))
1961 +
1962 ++static bool mbox_data_ready;
1963 + static struct dentry *root_debugfs_dir;
1964 +
1965 + struct mbox_test_device {
1966 +@@ -152,16 +153,14 @@ static ssize_t mbox_test_message_write(struct file *filp,
1967 +
1968 + static bool mbox_test_message_data_ready(struct mbox_test_device *tdev)
1969 + {
1970 +- unsigned char data;
1971 ++ bool data_ready;
1972 + unsigned long flags;
1973 +
1974 + spin_lock_irqsave(&tdev->lock, flags);
1975 +- data = tdev->rx_buffer[0];
1976 ++ data_ready = mbox_data_ready;
1977 + spin_unlock_irqrestore(&tdev->lock, flags);
1978 +
1979 +- if (data != '\0')
1980 +- return true;
1981 +- return false;
1982 ++ return data_ready;
1983 + }
1984 +
1985 + static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf,
1986 +@@ -223,6 +222,7 @@ static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf,
1987 + *(touser + l) = '\0';
1988 +
1989 + memset(tdev->rx_buffer, 0, MBOX_MAX_MSG_LEN);
1990 ++ mbox_data_ready = false;
1991 +
1992 + spin_unlock_irqrestore(&tdev->lock, flags);
1993 +
1994 +@@ -292,6 +292,7 @@ static void mbox_test_receive_message(struct mbox_client *client, void *message)
1995 + message, MBOX_MAX_MSG_LEN);
1996 + memcpy(tdev->rx_buffer, message, MBOX_MAX_MSG_LEN);
1997 + }
1998 ++ mbox_data_ready = true;
1999 + spin_unlock_irqrestore(&tdev->lock, flags);
2000 +
2001 + wake_up_interruptible(&tdev->waitq);
2002 +diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
2003 +index 0b7406ac8ce1..9a340728b846 100644
2004 +--- a/drivers/md/raid5-cache.c
2005 ++++ b/drivers/md/raid5-cache.c
2006 +@@ -2571,31 +2571,22 @@ static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page)
2007 + int r5c_journal_mode_set(struct mddev *mddev, int mode)
2008 + {
2009 + struct r5conf *conf;
2010 +- int err;
2011 +
2012 + if (mode < R5C_JOURNAL_MODE_WRITE_THROUGH ||
2013 + mode > R5C_JOURNAL_MODE_WRITE_BACK)
2014 + return -EINVAL;
2015 +
2016 +- err = mddev_lock(mddev);
2017 +- if (err)
2018 +- return err;
2019 + conf = mddev->private;
2020 +- if (!conf || !conf->log) {
2021 +- mddev_unlock(mddev);
2022 ++ if (!conf || !conf->log)
2023 + return -ENODEV;
2024 +- }
2025 +
2026 + if (raid5_calc_degraded(conf) > 0 &&
2027 +- mode == R5C_JOURNAL_MODE_WRITE_BACK) {
2028 +- mddev_unlock(mddev);
2029 ++ mode == R5C_JOURNAL_MODE_WRITE_BACK)
2030 + return -EINVAL;
2031 +- }
2032 +
2033 + mddev_suspend(mddev);
2034 + conf->log->r5c_journal_mode = mode;
2035 + mddev_resume(mddev);
2036 +- mddev_unlock(mddev);
2037 +
2038 + pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n",
2039 + mdname(mddev), mode, r5c_journal_mode_str[mode]);
2040 +@@ -2608,6 +2599,7 @@ static ssize_t r5c_journal_mode_store(struct mddev *mddev,
2041 + {
2042 + int mode = ARRAY_SIZE(r5c_journal_mode_str);
2043 + size_t len = length;
2044 ++ int ret;
2045 +
2046 + if (len < 2)
2047 + return -EINVAL;
2048 +@@ -2619,8 +2611,12 @@ static ssize_t r5c_journal_mode_store(struct mddev *mddev,
2049 + if (strlen(r5c_journal_mode_str[mode]) == len &&
2050 + !strncmp(page, r5c_journal_mode_str[mode], len))
2051 + break;
2052 +-
2053 +- return r5c_journal_mode_set(mddev, mode) ?: length;
2054 ++ ret = mddev_lock(mddev);
2055 ++ if (ret)
2056 ++ return ret;
2057 ++ ret = r5c_journal_mode_set(mddev, mode);
2058 ++ mddev_unlock(mddev);
2059 ++ return ret ?: length;
2060 + }
2061 +
2062 + struct md_sysfs_entry
2063 +diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
2064 +index 981cccd6b988..72f381522cb2 100644
2065 +--- a/drivers/media/rc/rc-main.c
2066 ++++ b/drivers/media/rc/rc-main.c
2067 +@@ -38,41 +38,41 @@ static const struct {
2068 + [RC_PROTO_UNKNOWN] = { .name = "unknown", .repeat_period = 250 },
2069 + [RC_PROTO_OTHER] = { .name = "other", .repeat_period = 250 },
2070 + [RC_PROTO_RC5] = { .name = "rc-5",
2071 +- .scancode_bits = 0x1f7f, .repeat_period = 164 },
2072 ++ .scancode_bits = 0x1f7f, .repeat_period = 250 },
2073 + [RC_PROTO_RC5X_20] = { .name = "rc-5x-20",
2074 +- .scancode_bits = 0x1f7f3f, .repeat_period = 164 },
2075 ++ .scancode_bits = 0x1f7f3f, .repeat_period = 250 },
2076 + [RC_PROTO_RC5_SZ] = { .name = "rc-5-sz",
2077 +- .scancode_bits = 0x2fff, .repeat_period = 164 },
2078 ++ .scancode_bits = 0x2fff, .repeat_period = 250 },
2079 + [RC_PROTO_JVC] = { .name = "jvc",
2080 + .scancode_bits = 0xffff, .repeat_period = 250 },
2081 + [RC_PROTO_SONY12] = { .name = "sony-12",
2082 +- .scancode_bits = 0x1f007f, .repeat_period = 100 },
2083 ++ .scancode_bits = 0x1f007f, .repeat_period = 250 },
2084 + [RC_PROTO_SONY15] = { .name = "sony-15",
2085 +- .scancode_bits = 0xff007f, .repeat_period = 100 },
2086 ++ .scancode_bits = 0xff007f, .repeat_period = 250 },
2087 + [RC_PROTO_SONY20] = { .name = "sony-20",
2088 +- .scancode_bits = 0x1fff7f, .repeat_period = 100 },
2089 ++ .scancode_bits = 0x1fff7f, .repeat_period = 250 },
2090 + [RC_PROTO_NEC] = { .name = "nec",
2091 +- .scancode_bits = 0xffff, .repeat_period = 160 },
2092 ++ .scancode_bits = 0xffff, .repeat_period = 250 },
2093 + [RC_PROTO_NECX] = { .name = "nec-x",
2094 +- .scancode_bits = 0xffffff, .repeat_period = 160 },
2095 ++ .scancode_bits = 0xffffff, .repeat_period = 250 },
2096 + [RC_PROTO_NEC32] = { .name = "nec-32",
2097 +- .scancode_bits = 0xffffffff, .repeat_period = 160 },
2098 ++ .scancode_bits = 0xffffffff, .repeat_period = 250 },
2099 + [RC_PROTO_SANYO] = { .name = "sanyo",
2100 + .scancode_bits = 0x1fffff, .repeat_period = 250 },
2101 + [RC_PROTO_MCIR2_KBD] = { .name = "mcir2-kbd",
2102 +- .scancode_bits = 0xffff, .repeat_period = 150 },
2103 ++ .scancode_bits = 0xffff, .repeat_period = 250 },
2104 + [RC_PROTO_MCIR2_MSE] = { .name = "mcir2-mse",
2105 +- .scancode_bits = 0x1fffff, .repeat_period = 150 },
2106 ++ .scancode_bits = 0x1fffff, .repeat_period = 250 },
2107 + [RC_PROTO_RC6_0] = { .name = "rc-6-0",
2108 +- .scancode_bits = 0xffff, .repeat_period = 164 },
2109 ++ .scancode_bits = 0xffff, .repeat_period = 250 },
2110 + [RC_PROTO_RC6_6A_20] = { .name = "rc-6-6a-20",
2111 +- .scancode_bits = 0xfffff, .repeat_period = 164 },
2112 ++ .scancode_bits = 0xfffff, .repeat_period = 250 },
2113 + [RC_PROTO_RC6_6A_24] = { .name = "rc-6-6a-24",
2114 +- .scancode_bits = 0xffffff, .repeat_period = 164 },
2115 ++ .scancode_bits = 0xffffff, .repeat_period = 250 },
2116 + [RC_PROTO_RC6_6A_32] = { .name = "rc-6-6a-32",
2117 +- .scancode_bits = 0xffffffff, .repeat_period = 164 },
2118 ++ .scancode_bits = 0xffffffff, .repeat_period = 250 },
2119 + [RC_PROTO_RC6_MCE] = { .name = "rc-6-mce",
2120 +- .scancode_bits = 0xffff7fff, .repeat_period = 164 },
2121 ++ .scancode_bits = 0xffff7fff, .repeat_period = 250 },
2122 + [RC_PROTO_SHARP] = { .name = "sharp",
2123 + .scancode_bits = 0x1fff, .repeat_period = 250 },
2124 + [RC_PROTO_XMP] = { .name = "xmp", .repeat_period = 250 },
2125 +diff --git a/drivers/media/rc/sir_ir.c b/drivers/media/rc/sir_ir.c
2126 +index bc906fb128d5..d59918878eb2 100644
2127 +--- a/drivers/media/rc/sir_ir.c
2128 ++++ b/drivers/media/rc/sir_ir.c
2129 +@@ -57,7 +57,7 @@ static void add_read_queue(int flag, unsigned long val);
2130 + static irqreturn_t sir_interrupt(int irq, void *dev_id);
2131 + static void send_space(unsigned long len);
2132 + static void send_pulse(unsigned long len);
2133 +-static void init_hardware(void);
2134 ++static int init_hardware(void);
2135 + static void drop_hardware(void);
2136 + /* Initialisation */
2137 +
2138 +@@ -263,11 +263,36 @@ static void send_pulse(unsigned long len)
2139 + }
2140 + }
2141 +
2142 +-static void init_hardware(void)
2143 ++static int init_hardware(void)
2144 + {
2145 ++ u8 scratch, scratch2, scratch3;
2146 + unsigned long flags;
2147 +
2148 + spin_lock_irqsave(&hardware_lock, flags);
2149 ++
2150 ++ /*
2151 ++ * This is a simple port existence test, borrowed from the autoconfig
2152 ++ * function in drivers/tty/serial/8250/8250_port.c
2153 ++ */
2154 ++ scratch = sinp(UART_IER);
2155 ++ soutp(UART_IER, 0);
2156 ++#ifdef __i386__
2157 ++ outb(0xff, 0x080);
2158 ++#endif
2159 ++ scratch2 = sinp(UART_IER) & 0x0f;
2160 ++ soutp(UART_IER, 0x0f);
2161 ++#ifdef __i386__
2162 ++ outb(0x00, 0x080);
2163 ++#endif
2164 ++ scratch3 = sinp(UART_IER) & 0x0f;
2165 ++ soutp(UART_IER, scratch);
2166 ++ if (scratch2 != 0 || scratch3 != 0x0f) {
2167 ++ /* we fail, there's nothing here */
2168 ++ spin_unlock_irqrestore(&hardware_lock, flags);
2169 ++ pr_err("port existence test failed, cannot continue\n");
2170 ++ return -ENODEV;
2171 ++ }
2172 ++
2173 + /* reset UART */
2174 + outb(0, io + UART_MCR);
2175 + outb(0, io + UART_IER);
2176 +@@ -285,6 +310,8 @@ static void init_hardware(void)
2177 + /* turn on UART */
2178 + outb(UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2, io + UART_MCR);
2179 + spin_unlock_irqrestore(&hardware_lock, flags);
2180 ++
2181 ++ return 0;
2182 + }
2183 +
2184 + static void drop_hardware(void)
2185 +@@ -334,14 +361,19 @@ static int sir_ir_probe(struct platform_device *dev)
2186 + pr_err("IRQ %d already in use.\n", irq);
2187 + return retval;
2188 + }
2189 ++
2190 ++ retval = init_hardware();
2191 ++ if (retval) {
2192 ++ del_timer_sync(&timerlist);
2193 ++ return retval;
2194 ++ }
2195 ++
2196 + pr_info("I/O port 0x%.4x, IRQ %d.\n", io, irq);
2197 +
2198 + retval = devm_rc_register_device(&sir_ir_dev->dev, rcdev);
2199 + if (retval < 0)
2200 + return retval;
2201 +
2202 +- init_hardware();
2203 +-
2204 + return 0;
2205 + }
2206 +
2207 +diff --git a/drivers/media/usb/dvb-usb/dibusb-common.c b/drivers/media/usb/dvb-usb/dibusb-common.c
2208 +index 8207e6900656..bcacb0f22028 100644
2209 +--- a/drivers/media/usb/dvb-usb/dibusb-common.c
2210 ++++ b/drivers/media/usb/dvb-usb/dibusb-common.c
2211 +@@ -223,8 +223,20 @@ EXPORT_SYMBOL(dibusb_i2c_algo);
2212 +
2213 + int dibusb_read_eeprom_byte(struct dvb_usb_device *d, u8 offs, u8 *val)
2214 + {
2215 +- u8 wbuf[1] = { offs };
2216 +- return dibusb_i2c_msg(d, 0x50, wbuf, 1, val, 1);
2217 ++ u8 *buf;
2218 ++ int rc;
2219 ++
2220 ++ buf = kmalloc(2, GFP_KERNEL);
2221 ++ if (!buf)
2222 ++ return -ENOMEM;
2223 ++
2224 ++ buf[0] = offs;
2225 ++
2226 ++ rc = dibusb_i2c_msg(d, 0x50, &buf[0], 1, &buf[1], 1);
2227 ++ *val = buf[1];
2228 ++ kfree(buf);
2229 ++
2230 ++ return rc;
2231 + }
2232 + EXPORT_SYMBOL(dibusb_read_eeprom_byte);
2233 +
2234 +diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
2235 +index a13a4896a8bd..c4d1140116ea 100644
2236 +--- a/drivers/net/can/flexcan.c
2237 ++++ b/drivers/net/can/flexcan.c
2238 +@@ -189,7 +189,7 @@
2239 + * MX35 FlexCAN2 03.00.00.00 no no ? no no
2240 + * MX53 FlexCAN2 03.00.00.00 yes no no no no
2241 + * MX6s FlexCAN3 10.00.12.00 yes yes no no yes
2242 +- * VF610 FlexCAN3 ? no yes ? yes yes?
2243 ++ * VF610 FlexCAN3 ? no yes no yes yes?
2244 + *
2245 + * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
2246 + */
2247 +@@ -297,7 +297,8 @@ static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {
2248 +
2249 + static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
2250 + .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
2251 +- FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
2252 ++ FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP |
2253 ++ FLEXCAN_QUIRK_BROKEN_PERR_STATE,
2254 + };
2255 +
2256 + static const struct can_bittiming_const flexcan_bittiming_const = {
2257 +diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
2258 +index 85268be0c913..55513411a82e 100644
2259 +--- a/drivers/net/can/peak_canfd/peak_canfd.c
2260 ++++ b/drivers/net/can/peak_canfd/peak_canfd.c
2261 +@@ -258,21 +258,18 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
2262 + /* if this frame is an echo, */
2263 + if ((rx_msg_flags & PUCAN_MSG_LOOPED_BACK) &&
2264 + !(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE)) {
2265 +- int n;
2266 + unsigned long flags;
2267 +
2268 + spin_lock_irqsave(&priv->echo_lock, flags);
2269 +- n = can_get_echo_skb(priv->ndev, msg->client);
2270 ++ can_get_echo_skb(priv->ndev, msg->client);
2271 + spin_unlock_irqrestore(&priv->echo_lock, flags);
2272 +
2273 + /* count bytes of the echo instead of skb */
2274 + stats->tx_bytes += cf_len;
2275 + stats->tx_packets++;
2276 +
2277 +- if (n) {
2278 +- /* restart tx queue only if a slot is free */
2279 +- netif_wake_queue(priv->ndev);
2280 +- }
2281 ++ /* restart tx queue (a slot is free) */
2282 ++ netif_wake_queue(priv->ndev);
2283 +
2284 + return 0;
2285 + }
2286 +diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c
2287 +index b4efd711f824..788c3464a3b0 100644
2288 +--- a/drivers/net/can/peak_canfd/peak_pciefd_main.c
2289 ++++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c
2290 +@@ -825,7 +825,10 @@ static int peak_pciefd_probe(struct pci_dev *pdev,
2291 + err_disable_pci:
2292 + pci_disable_device(pdev);
2293 +
2294 +- return err;
2295 ++ /* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while
2296 ++ * the probe() function must return a negative errno in case of failure
2297 ++ * (err is unchanged if negative) */
2298 ++ return pcibios_err_to_errno(err);
2299 + }
2300 +
2301 + /* free the board structure object, as well as its resources: */
2302 +diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
2303 +index 131026fbc2d7..5adc95c922ee 100644
2304 +--- a/drivers/net/can/sja1000/peak_pci.c
2305 ++++ b/drivers/net/can/sja1000/peak_pci.c
2306 +@@ -717,7 +717,10 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2307 + failure_disable_pci:
2308 + pci_disable_device(pdev);
2309 +
2310 +- return err;
2311 ++ /* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while
2312 ++ * the probe() function must return a negative errno in case of failure
2313 ++ * (err is unchanged if negative) */
2314 ++ return pcibios_err_to_errno(err);
2315 + }
2316 +
2317 + static void peak_pci_remove(struct pci_dev *pdev)
2318 +diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
2319 +index 4d4941469cfc..db6ea936dc3f 100644
2320 +--- a/drivers/net/can/ti_hecc.c
2321 ++++ b/drivers/net/can/ti_hecc.c
2322 +@@ -637,6 +637,9 @@ static int ti_hecc_rx_poll(struct napi_struct *napi, int quota)
2323 + mbx_mask = hecc_read(priv, HECC_CANMIM);
2324 + mbx_mask |= HECC_TX_MBOX_MASK;
2325 + hecc_write(priv, HECC_CANMIM, mbx_mask);
2326 ++ } else {
2327 ++ /* repoll is done only if whole budget is used */
2328 ++ num_pkts = quota;
2329 + }
2330 +
2331 + return num_pkts;
2332 +diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
2333 +index b3d02759c226..b00358297424 100644
2334 +--- a/drivers/net/can/usb/ems_usb.c
2335 ++++ b/drivers/net/can/usb/ems_usb.c
2336 +@@ -288,6 +288,8 @@ static void ems_usb_read_interrupt_callback(struct urb *urb)
2337 +
2338 + case -ECONNRESET: /* unlink */
2339 + case -ENOENT:
2340 ++ case -EPIPE:
2341 ++ case -EPROTO:
2342 + case -ESHUTDOWN:
2343 + return;
2344 +
2345 +diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
2346 +index 9fdb0f0bfa06..c6dcf93675c0 100644
2347 +--- a/drivers/net/can/usb/esd_usb2.c
2348 ++++ b/drivers/net/can/usb/esd_usb2.c
2349 +@@ -393,6 +393,8 @@ static void esd_usb2_read_bulk_callback(struct urb *urb)
2350 + break;
2351 +
2352 + case -ENOENT:
2353 ++ case -EPIPE:
2354 ++ case -EPROTO:
2355 + case -ESHUTDOWN:
2356 + return;
2357 +
2358 +diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
2359 +index 9b18d96ef526..63587b8e6825 100644
2360 +--- a/drivers/net/can/usb/kvaser_usb.c
2361 ++++ b/drivers/net/can/usb/kvaser_usb.c
2362 +@@ -609,8 +609,8 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
2363 + }
2364 +
2365 + if (pos + tmp->len > actual_len) {
2366 +- dev_err(dev->udev->dev.parent,
2367 +- "Format error\n");
2368 ++ dev_err_ratelimited(dev->udev->dev.parent,
2369 ++ "Format error\n");
2370 + break;
2371 + }
2372 +
2373 +@@ -813,6 +813,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
2374 + if (err) {
2375 + netdev_err(netdev, "Error transmitting URB\n");
2376 + usb_unanchor_urb(urb);
2377 ++ kfree(buf);
2378 + usb_free_urb(urb);
2379 + return err;
2380 + }
2381 +@@ -1325,6 +1326,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
2382 + case 0:
2383 + break;
2384 + case -ENOENT:
2385 ++ case -EPIPE:
2386 ++ case -EPROTO:
2387 + case -ESHUTDOWN:
2388 + return;
2389 + default:
2390 +@@ -1333,7 +1336,7 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
2391 + goto resubmit_urb;
2392 + }
2393 +
2394 +- while (pos <= urb->actual_length - MSG_HEADER_LEN) {
2395 ++ while (pos <= (int)(urb->actual_length - MSG_HEADER_LEN)) {
2396 + msg = urb->transfer_buffer + pos;
2397 +
2398 + /* The Kvaser firmware can only read and write messages that
2399 +@@ -1352,7 +1355,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
2400 + }
2401 +
2402 + if (pos + msg->len > urb->actual_length) {
2403 +- dev_err(dev->udev->dev.parent, "Format error\n");
2404 ++ dev_err_ratelimited(dev->udev->dev.parent,
2405 ++ "Format error\n");
2406 + break;
2407 + }
2408 +
2409 +@@ -1768,6 +1772,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
2410 + spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
2411 +
2412 + usb_unanchor_urb(urb);
2413 ++ kfree(buf);
2414 +
2415 + stats->tx_dropped++;
2416 +
2417 +diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
2418 +index 7f0272558bef..e0c24abce16c 100644
2419 +--- a/drivers/net/can/usb/mcba_usb.c
2420 ++++ b/drivers/net/can/usb/mcba_usb.c
2421 +@@ -592,6 +592,8 @@ static void mcba_usb_read_bulk_callback(struct urb *urb)
2422 + break;
2423 +
2424 + case -ENOENT:
2425 ++ case -EPIPE:
2426 ++ case -EPROTO:
2427 + case -ESHUTDOWN:
2428 + return;
2429 +
2430 +diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
2431 +index d000cb62d6ae..27861c417c94 100644
2432 +--- a/drivers/net/can/usb/usb_8dev.c
2433 ++++ b/drivers/net/can/usb/usb_8dev.c
2434 +@@ -524,6 +524,8 @@ static void usb_8dev_read_bulk_callback(struct urb *urb)
2435 + break;
2436 +
2437 + case -ENOENT:
2438 ++ case -EPIPE:
2439 ++ case -EPROTO:
2440 + case -ESHUTDOWN:
2441 + return;
2442 +
2443 +diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
2444 +index c20dd00a1cae..899e7d53e669 100644
2445 +--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
2446 ++++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
2447 +@@ -52,8 +52,7 @@ struct nfp_app;
2448 + #define NFP_FLOWER_MASK_ELEMENT_RS 1
2449 + #define NFP_FLOWER_MASK_HASH_BITS 10
2450 +
2451 +-#define NFP_FL_META_FLAG_NEW_MASK 128
2452 +-#define NFP_FL_META_FLAG_LAST_MASK 1
2453 ++#define NFP_FL_META_FLAG_MANAGE_MASK BIT(7)
2454 +
2455 + #define NFP_FL_MASK_REUSE_TIME_NS 40000
2456 + #define NFP_FL_MASK_ID_LOCATION 1
2457 +diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
2458 +index 3226ddc55f99..d9582ccc0025 100644
2459 +--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
2460 ++++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
2461 +@@ -282,7 +282,7 @@ nfp_check_mask_add(struct nfp_app *app, char *mask_data, u32 mask_len,
2462 + id = nfp_add_mask_table(app, mask_data, mask_len);
2463 + if (id < 0)
2464 + return false;
2465 +- *meta_flags |= NFP_FL_META_FLAG_NEW_MASK;
2466 ++ *meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
2467 + }
2468 + *mask_id = id;
2469 +
2470 +@@ -299,6 +299,9 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
2471 + if (!mask_entry)
2472 + return false;
2473 +
2474 ++ if (meta_flags)
2475 ++ *meta_flags &= ~NFP_FL_META_FLAG_MANAGE_MASK;
2476 ++
2477 + *mask_id = mask_entry->mask_id;
2478 + mask_entry->ref_cnt--;
2479 + if (!mask_entry->ref_cnt) {
2480 +@@ -306,7 +309,7 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
2481 + nfp_release_mask_id(app, *mask_id);
2482 + kfree(mask_entry);
2483 + if (meta_flags)
2484 +- *meta_flags |= NFP_FL_META_FLAG_LAST_MASK;
2485 ++ *meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
2486 + }
2487 +
2488 + return true;
2489 +diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
2490 +index d540a9dc77b3..1c43aca8162d 100644
2491 +--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
2492 ++++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
2493 +@@ -297,6 +297,8 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
2494 + netdev->netdev_ops = &nfp_repr_netdev_ops;
2495 + netdev->ethtool_ops = &nfp_port_ethtool_ops;
2496 +
2497 ++ netdev->max_mtu = pf_netdev->max_mtu;
2498 ++
2499 + SWITCHDEV_SET_OPS(netdev, &nfp_port_switchdev_ops);
2500 +
2501 + if (nfp_app_has_tc(app)) {
2502 +diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
2503 +index ed51018a813e..b9d8d71a6ecc 100644
2504 +--- a/drivers/net/geneve.c
2505 ++++ b/drivers/net/geneve.c
2506 +@@ -1503,6 +1503,7 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
2507 + {
2508 + struct geneve_dev *geneve = netdev_priv(dev);
2509 + struct ip_tunnel_info *info = &geneve->info;
2510 ++ bool metadata = geneve->collect_md;
2511 + __u8 tmp_vni[3];
2512 + __u32 vni;
2513 +
2514 +@@ -1511,32 +1512,24 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
2515 + if (nla_put_u32(skb, IFLA_GENEVE_ID, vni))
2516 + goto nla_put_failure;
2517 +
2518 +- if (rtnl_dereference(geneve->sock4)) {
2519 ++ if (!metadata && ip_tunnel_info_af(info) == AF_INET) {
2520 + if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE,
2521 + info->key.u.ipv4.dst))
2522 + goto nla_put_failure;
2523 +-
2524 + if (nla_put_u8(skb, IFLA_GENEVE_UDP_CSUM,
2525 + !!(info->key.tun_flags & TUNNEL_CSUM)))
2526 + goto nla_put_failure;
2527 +
2528 +- }
2529 +-
2530 + #if IS_ENABLED(CONFIG_IPV6)
2531 +- if (rtnl_dereference(geneve->sock6)) {
2532 ++ } else if (!metadata) {
2533 + if (nla_put_in6_addr(skb, IFLA_GENEVE_REMOTE6,
2534 + &info->key.u.ipv6.dst))
2535 + goto nla_put_failure;
2536 +-
2537 + if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_TX,
2538 + !(info->key.tun_flags & TUNNEL_CSUM)))
2539 + goto nla_put_failure;
2540 +-
2541 +- if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
2542 +- !geneve->use_udp6_rx_checksums))
2543 +- goto nla_put_failure;
2544 +- }
2545 + #endif
2546 ++ }
2547 +
2548 + if (nla_put_u8(skb, IFLA_GENEVE_TTL, info->key.ttl) ||
2549 + nla_put_u8(skb, IFLA_GENEVE_TOS, info->key.tos) ||
2550 +@@ -1546,10 +1539,13 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
2551 + if (nla_put_be16(skb, IFLA_GENEVE_PORT, info->key.tp_dst))
2552 + goto nla_put_failure;
2553 +
2554 +- if (geneve->collect_md) {
2555 +- if (nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA))
2556 ++ if (metadata && nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA))
2557 + goto nla_put_failure;
2558 +- }
2559 ++
2560 ++ if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
2561 ++ !geneve->use_udp6_rx_checksums))
2562 ++ goto nla_put_failure;
2563 ++
2564 + return 0;
2565 +
2566 + nla_put_failure:
2567 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
2568 +index 613caca7dc02..b3fa8ae80465 100644
2569 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
2570 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
2571 +@@ -4096,8 +4096,8 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
2572 + sdio_release_host(sdiodev->func[1]);
2573 + fail:
2574 + brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err);
2575 +- device_release_driver(dev);
2576 + device_release_driver(&sdiodev->func[2]->dev);
2577 ++ device_release_driver(dev);
2578 + }
2579 +
2580 + struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
2581 +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
2582 +index 87b4434224a1..dfa111bb411e 100644
2583 +--- a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
2584 ++++ b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
2585 +@@ -68,6 +68,9 @@
2586 + * @IWL_MVM_DQA_CMD_QUEUE: a queue reserved for sending HCMDs to the FW
2587 + * @IWL_MVM_DQA_AUX_QUEUE: a queue reserved for aux frames
2588 + * @IWL_MVM_DQA_P2P_DEVICE_QUEUE: a queue reserved for P2P device frames
2589 ++ * @IWL_MVM_DQA_INJECT_MONITOR_QUEUE: a queue reserved for injection using
2590 ++ * monitor mode. Note this queue is the same as the queue for P2P device
2591 ++ * but we can't have active monitor mode along with P2P device anyway.
2592 + * @IWL_MVM_DQA_GCAST_QUEUE: a queue reserved for P2P GO/SoftAP GCAST frames
2593 + * @IWL_MVM_DQA_BSS_CLIENT_QUEUE: a queue reserved for BSS activity, to ensure
2594 + * that we are never left without the possibility to connect to an AP.
2595 +@@ -87,6 +90,7 @@ enum iwl_mvm_dqa_txq {
2596 + IWL_MVM_DQA_CMD_QUEUE = 0,
2597 + IWL_MVM_DQA_AUX_QUEUE = 1,
2598 + IWL_MVM_DQA_P2P_DEVICE_QUEUE = 2,
2599 ++ IWL_MVM_DQA_INJECT_MONITOR_QUEUE = 2,
2600 + IWL_MVM_DQA_GCAST_QUEUE = 3,
2601 + IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4,
2602 + IWL_MVM_DQA_MIN_MGMT_QUEUE = 5,
2603 +diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
2604 +index e90abbfba718..ecd5c1df811c 100644
2605 +--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
2606 ++++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
2607 +@@ -117,6 +117,7 @@
2608 + #define FH_RSCSR_FRAME_INVALID 0x55550000
2609 + #define FH_RSCSR_FRAME_ALIGN 0x40
2610 + #define FH_RSCSR_RPA_EN BIT(25)
2611 ++#define FH_RSCSR_RADA_EN BIT(26)
2612 + #define FH_RSCSR_RXQ_POS 16
2613 + #define FH_RSCSR_RXQ_MASK 0x3F0000
2614 +
2615 +@@ -128,7 +129,8 @@ struct iwl_rx_packet {
2616 + * 31: flag flush RB request
2617 + * 30: flag ignore TC (terminal counter) request
2618 + * 29: flag fast IRQ request
2619 +- * 28-26: Reserved
2620 ++ * 28-27: Reserved
2621 ++ * 26: RADA enabled
2622 + * 25: Offload enabled
2623 + * 24: RPF enabled
2624 + * 23: RSS enabled
2625 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
2626 +index a2bf530eeae4..2f22e14e00fe 100644
2627 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
2628 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
2629 +@@ -787,7 +787,7 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
2630 + u32 action)
2631 + {
2632 + struct iwl_mac_ctx_cmd cmd = {};
2633 +- u32 tfd_queue_msk = 0;
2634 ++ u32 tfd_queue_msk = BIT(mvm->snif_queue);
2635 + int ret;
2636 +
2637 + WARN_ON(vif->type != NL80211_IFTYPE_MONITOR);
2638 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
2639 +index 8dcdb522b846..2ec27ceb8af9 100644
2640 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
2641 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
2642 +@@ -954,6 +954,7 @@ struct iwl_mvm {
2643 +
2644 + /* Tx queues */
2645 + u16 aux_queue;
2646 ++ u16 snif_queue;
2647 + u16 probe_queue;
2648 + u16 p2p_dev_queue;
2649 +
2650 +@@ -1042,6 +1043,7 @@ struct iwl_mvm {
2651 + * @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running
2652 + * @IWL_MVM_STATUS_D3_RECONFIG: D3 reconfiguration is being done
2653 + * @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running
2654 ++ * @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA
2655 + */
2656 + enum iwl_mvm_status {
2657 + IWL_MVM_STATUS_HW_RFKILL,
2658 +@@ -1053,6 +1055,7 @@ enum iwl_mvm_status {
2659 + IWL_MVM_STATUS_ROC_AUX_RUNNING,
2660 + IWL_MVM_STATUS_D3_RECONFIG,
2661 + IWL_MVM_STATUS_FIRMWARE_RUNNING,
2662 ++ IWL_MVM_STATUS_NEED_FLUSH_P2P,
2663 + };
2664 +
2665 + /* Keep track of completed init configuration */
2666 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
2667 +index 231878969332..9fb40955d5f4 100644
2668 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
2669 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
2670 +@@ -622,6 +622,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
2671 + mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0;
2672 +
2673 + mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
2674 ++ mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE;
2675 + mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
2676 + mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
2677 +
2678 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
2679 +index 248699c2c4bf..819e6f66a5b5 100644
2680 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
2681 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
2682 +@@ -232,8 +232,8 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
2683 +
2684 + static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
2685 + struct ieee80211_rx_status *stats,
2686 +- struct iwl_rx_mpdu_desc *desc, int queue,
2687 +- u8 *crypt_len)
2688 ++ struct iwl_rx_mpdu_desc *desc, u32 pkt_flags,
2689 ++ int queue, u8 *crypt_len)
2690 + {
2691 + u16 status = le16_to_cpu(desc->status);
2692 +
2693 +@@ -253,6 +253,8 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
2694 + return -1;
2695 +
2696 + stats->flag |= RX_FLAG_DECRYPTED;
2697 ++ if (pkt_flags & FH_RSCSR_RADA_EN)
2698 ++ stats->flag |= RX_FLAG_MIC_STRIPPED;
2699 + *crypt_len = IEEE80211_CCMP_HDR_LEN;
2700 + return 0;
2701 + case IWL_RX_MPDU_STATUS_SEC_TKIP:
2702 +@@ -270,6 +272,10 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
2703 + if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
2704 + IWL_RX_MPDU_STATUS_SEC_WEP)
2705 + *crypt_len = IEEE80211_WEP_IV_LEN;
2706 ++
2707 ++ if (pkt_flags & FH_RSCSR_RADA_EN)
2708 ++ stats->flag |= RX_FLAG_ICV_STRIPPED;
2709 ++
2710 + return 0;
2711 + case IWL_RX_MPDU_STATUS_SEC_EXT_ENC:
2712 + if (!(status & IWL_RX_MPDU_STATUS_MIC_OK))
2713 +@@ -810,7 +816,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
2714 +
2715 + rx_status = IEEE80211_SKB_RXCB(skb);
2716 +
2717 +- if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, desc, queue, &crypt_len)) {
2718 ++ if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, desc,
2719 ++ le32_to_cpu(pkt->len_n_flags), queue,
2720 ++ &crypt_len)) {
2721 + kfree_skb(skb);
2722 + return;
2723 + }
2724 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
2725 +index c4a343534c5e..0d7929799942 100644
2726 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
2727 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
2728 +@@ -1700,29 +1700,29 @@ void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
2729 + sta->sta_id = IWL_MVM_INVALID_STA;
2730 + }
2731 +
2732 +-static void iwl_mvm_enable_aux_queue(struct iwl_mvm *mvm)
2733 ++static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue,
2734 ++ u8 sta_id, u8 fifo)
2735 + {
2736 + unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
2737 + mvm->cfg->base_params->wd_timeout :
2738 + IWL_WATCHDOG_DISABLED;
2739 +
2740 + if (iwl_mvm_has_new_tx_api(mvm)) {
2741 +- int queue = iwl_mvm_tvqm_enable_txq(mvm, mvm->aux_queue,
2742 +- mvm->aux_sta.sta_id,
2743 +- IWL_MAX_TID_COUNT,
2744 +- wdg_timeout);
2745 +- mvm->aux_queue = queue;
2746 ++ int tvqm_queue =
2747 ++ iwl_mvm_tvqm_enable_txq(mvm, *queue, sta_id,
2748 ++ IWL_MAX_TID_COUNT,
2749 ++ wdg_timeout);
2750 ++ *queue = tvqm_queue;
2751 + } else {
2752 + struct iwl_trans_txq_scd_cfg cfg = {
2753 +- .fifo = IWL_MVM_TX_FIFO_MCAST,
2754 +- .sta_id = mvm->aux_sta.sta_id,
2755 ++ .fifo = fifo,
2756 ++ .sta_id = sta_id,
2757 + .tid = IWL_MAX_TID_COUNT,
2758 + .aggregate = false,
2759 + .frame_limit = IWL_FRAME_LIMIT,
2760 + };
2761 +
2762 +- iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg,
2763 +- wdg_timeout);
2764 ++ iwl_mvm_enable_txq(mvm, *queue, *queue, 0, &cfg, wdg_timeout);
2765 + }
2766 + }
2767 +
2768 +@@ -1741,7 +1741,9 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
2769 +
2770 + /* Map Aux queue to fifo - needs to happen before adding Aux station */
2771 + if (!iwl_mvm_has_new_tx_api(mvm))
2772 +- iwl_mvm_enable_aux_queue(mvm);
2773 ++ iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
2774 ++ mvm->aux_sta.sta_id,
2775 ++ IWL_MVM_TX_FIFO_MCAST);
2776 +
2777 + ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
2778 + MAC_INDEX_AUX, 0);
2779 +@@ -1755,7 +1757,9 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
2780 + * to firmware so enable queue here - after the station was added
2781 + */
2782 + if (iwl_mvm_has_new_tx_api(mvm))
2783 +- iwl_mvm_enable_aux_queue(mvm);
2784 ++ iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
2785 ++ mvm->aux_sta.sta_id,
2786 ++ IWL_MVM_TX_FIFO_MCAST);
2787 +
2788 + return 0;
2789 + }
2790 +@@ -1763,10 +1767,31 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
2791 + int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2792 + {
2793 + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2794 ++ int ret;
2795 +
2796 + lockdep_assert_held(&mvm->mutex);
2797 +- return iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
2798 ++
2799 ++ /* Map snif queue to fifo - must happen before adding snif station */
2800 ++ if (!iwl_mvm_has_new_tx_api(mvm))
2801 ++ iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
2802 ++ mvm->snif_sta.sta_id,
2803 ++ IWL_MVM_TX_FIFO_BE);
2804 ++
2805 ++ ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
2806 + mvmvif->id, 0);
2807 ++ if (ret)
2808 ++ return ret;
2809 ++
2810 ++ /*
2811 ++ * For 22000 firmware and on we cannot add queue to a station unknown
2812 ++ * to firmware so enable queue here - after the station was added
2813 ++ */
2814 ++ if (iwl_mvm_has_new_tx_api(mvm))
2815 ++ iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
2816 ++ mvm->snif_sta.sta_id,
2817 ++ IWL_MVM_TX_FIFO_BE);
2818 ++
2819 ++ return 0;
2820 + }
2821 +
2822 + int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2823 +@@ -1775,6 +1800,8 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2824 +
2825 + lockdep_assert_held(&mvm->mutex);
2826 +
2827 ++ iwl_mvm_disable_txq(mvm, mvm->snif_queue, mvm->snif_queue,
2828 ++ IWL_MAX_TID_COUNT, 0);
2829 + ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
2830 + if (ret)
2831 + IWL_WARN(mvm, "Failed sending remove station\n");
2832 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
2833 +index 4d0314912e94..e25cda9fbf6c 100644
2834 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
2835 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
2836 +@@ -132,6 +132,24 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
2837 + * executed, and a new time event means a new command.
2838 + */
2839 + iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true, CMD_ASYNC);
2840 ++
2841 ++ /* Do the same for the P2P device queue (STA) */
2842 ++ if (test_and_clear_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) {
2843 ++ struct iwl_mvm_vif *mvmvif;
2844 ++
2845 ++ /*
2846 ++ * NB: access to this pointer would be racy, but the flush bit
2847 ++ * can only be set when we had a P2P-Device VIF, and we have a
2848 ++ * flush of this work in iwl_mvm_prepare_mac_removal() so it's
2849 ++ * not really racy.
2850 ++ */
2851 ++
2852 ++ if (!WARN_ON(!mvm->p2p_device_vif)) {
2853 ++ mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif);
2854 ++ iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true,
2855 ++ CMD_ASYNC);
2856 ++ }
2857 ++ }
2858 + }
2859 +
2860 + static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
2861 +@@ -855,10 +873,12 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
2862 +
2863 + mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
2864 +
2865 +- if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE)
2866 ++ if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2867 + iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
2868 +- else
2869 ++ set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
2870 ++ } else {
2871 + iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data);
2872 ++ }
2873 +
2874 + iwl_mvm_roc_finished(mvm);
2875 + }
2876 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
2877 +index 6f2e2af23219..887a504ce64a 100644
2878 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
2879 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
2880 +@@ -657,7 +657,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
2881 + if (ap_sta_id != IWL_MVM_INVALID_STA)
2882 + sta_id = ap_sta_id;
2883 + } else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) {
2884 +- queue = mvm->aux_queue;
2885 ++ queue = mvm->snif_queue;
2886 ++ sta_id = mvm->snif_sta.sta_id;
2887 + }
2888 + }
2889 +
2890 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
2891 +index 2ea74abad73d..53e269d54050 100644
2892 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
2893 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
2894 +@@ -1143,9 +1143,18 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
2895 + unsigned int default_timeout =
2896 + cmd_q ? IWL_DEF_WD_TIMEOUT : mvm->cfg->base_params->wd_timeout;
2897 +
2898 +- if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS))
2899 ++ if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) {
2900 ++ /*
2901 ++ * We can't know when the station is asleep or awake, so we
2902 ++ * must disable the queue hang detection.
2903 ++ */
2904 ++ if (fw_has_capa(&mvm->fw->ucode_capa,
2905 ++ IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) &&
2906 ++ vif && vif->type == NL80211_IFTYPE_AP)
2907 ++ return IWL_WATCHDOG_DISABLED;
2908 + return iwlmvm_mod_params.tfd_q_hang_detect ?
2909 + default_timeout : IWL_WATCHDOG_DISABLED;
2910 ++ }
2911 +
2912 + trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS);
2913 + txq_timer = (void *)trigger->data;
2914 +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
2915 +index 548e1928430d..0f7bd37bf172 100644
2916 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
2917 ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
2918 +@@ -551,6 +551,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
2919 + {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
2920 + {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
2921 + {IWL_PCI_DEVICE(0x271B, 0x0214, iwl9260_2ac_cfg)},
2922 ++ {IWL_PCI_DEVICE(0x271C, 0x0214, iwl9260_2ac_cfg)},
2923 + {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_cfg)},
2924 + {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_cfg)},
2925 + {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_cfg)},
2926 +@@ -662,6 +663,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
2927 + {IWL_PCI_DEVICE(0x2720, 0x0310, iwla000_2ac_cfg_hr_cdb)},
2928 + {IWL_PCI_DEVICE(0x40C0, 0x0000, iwla000_2ax_cfg_hr)},
2929 + {IWL_PCI_DEVICE(0x40C0, 0x0A10, iwla000_2ax_cfg_hr)},
2930 ++ {IWL_PCI_DEVICE(0xA0F0, 0x0000, iwla000_2ax_cfg_hr)},
2931 +
2932 + #endif /* CONFIG_IWLMVM */
2933 +
2934 +diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
2935 +index 6467ffac9811..a59b54328c07 100644
2936 +--- a/drivers/net/wireless/mac80211_hwsim.c
2937 ++++ b/drivers/net/wireless/mac80211_hwsim.c
2938 +@@ -3108,6 +3108,7 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
2939 + {
2940 + struct hwsim_new_radio_params param = { 0 };
2941 + const char *hwname = NULL;
2942 ++ int ret;
2943 +
2944 + param.reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG];
2945 + param.p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE];
2946 +@@ -3147,7 +3148,9 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
2947 + param.regd = hwsim_world_regdom_custom[idx];
2948 + }
2949 +
2950 +- return mac80211_hwsim_new_radio(info, &param);
2951 ++ ret = mac80211_hwsim_new_radio(info, &param);
2952 ++ kfree(hwname);
2953 ++ return ret;
2954 + }
2955 +
2956 + static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
2957 +diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
2958 +index 81df09dd2636..f90c10b3c921 100644
2959 +--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
2960 ++++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
2961 +@@ -162,13 +162,13 @@ static int rsi_usb_reg_read(struct usb_device *usbdev,
2962 + u8 *buf;
2963 + int status = -ENOMEM;
2964 +
2965 ++ if (len > RSI_USB_CTRL_BUF_SIZE)
2966 ++ return -EINVAL;
2967 ++
2968 + buf = kmalloc(RSI_USB_CTRL_BUF_SIZE, GFP_KERNEL);
2969 + if (!buf)
2970 + return status;
2971 +
2972 +- if (len > RSI_USB_CTRL_BUF_SIZE)
2973 +- return -EINVAL;
2974 +-
2975 + status = usb_control_msg(usbdev,
2976 + usb_rcvctrlpipe(usbdev, 0),
2977 + USB_VENDOR_REGISTER_READ,
2978 +@@ -207,13 +207,13 @@ static int rsi_usb_reg_write(struct usb_device *usbdev,
2979 + u8 *usb_reg_buf;
2980 + int status = -ENOMEM;
2981 +
2982 ++ if (len > RSI_USB_CTRL_BUF_SIZE)
2983 ++ return -EINVAL;
2984 ++
2985 + usb_reg_buf = kmalloc(RSI_USB_CTRL_BUF_SIZE, GFP_KERNEL);
2986 + if (!usb_reg_buf)
2987 + return status;
2988 +
2989 +- if (len > RSI_USB_CTRL_BUF_SIZE)
2990 +- return -EINVAL;
2991 +-
2992 + usb_reg_buf[0] = (value & 0x00ff);
2993 + usb_reg_buf[1] = (value & 0xff00) >> 8;
2994 + usb_reg_buf[2] = 0x0;
2995 +diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
2996 +index 71b944748304..c5fe7d4a9065 100644
2997 +--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
2998 ++++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
2999 +@@ -408,12 +408,21 @@ static int armada_37xx_gpio_direction_output(struct gpio_chip *chip,
3000 + {
3001 + struct armada_37xx_pinctrl *info = gpiochip_get_data(chip);
3002 + unsigned int reg = OUTPUT_EN;
3003 +- unsigned int mask;
3004 ++ unsigned int mask, val, ret;
3005 +
3006 + armada_37xx_update_reg(&reg, offset);
3007 + mask = BIT(offset);
3008 +
3009 +- return regmap_update_bits(info->regmap, reg, mask, mask);
3010 ++ ret = regmap_update_bits(info->regmap, reg, mask, mask);
3011 ++
3012 ++ if (ret)
3013 ++ return ret;
3014 ++
3015 ++ reg = OUTPUT_VAL;
3016 ++ val = value ? mask : 0;
3017 ++ regmap_update_bits(info->regmap, reg, mask, val);
3018 ++
3019 ++ return 0;
3020 + }
3021 +
3022 + static int armada_37xx_gpio_get(struct gpio_chip *chip, unsigned int offset)
3023 +diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
3024 +index 5beb0c361076..76afe1449cab 100644
3025 +--- a/drivers/rapidio/devices/rio_mport_cdev.c
3026 ++++ b/drivers/rapidio/devices/rio_mport_cdev.c
3027 +@@ -963,7 +963,8 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
3028 + req->sgt.sgl, req->sgt.nents, dir);
3029 + if (nents == -EFAULT) {
3030 + rmcd_error("Failed to map SG list");
3031 +- return -EFAULT;
3032 ++ ret = -EFAULT;
3033 ++ goto err_pg;
3034 + }
3035 +
3036 + ret = do_dma_request(req, xfer, sync, nents);
3037 +diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
3038 +index bcc1694cebcd..635cfa1f2ace 100644
3039 +--- a/drivers/scsi/scsi_lib.c
3040 ++++ b/drivers/scsi/scsi_lib.c
3041 +@@ -2126,11 +2126,13 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
3042 + q->limits.cluster = 0;
3043 +
3044 + /*
3045 +- * set a reasonable default alignment on word boundaries: the
3046 +- * host and device may alter it using
3047 +- * blk_queue_update_dma_alignment() later.
3048 ++ * Set a reasonable default alignment: The larger of 32-byte (dword),
3049 ++ * which is a common minimum for HBAs, and the minimum DMA alignment,
3050 ++ * which is set by the platform.
3051 ++ *
3052 ++ * Devices that require a bigger alignment can increase it later.
3053 + */
3054 +- blk_queue_dma_alignment(q, 0x03);
3055 ++ blk_queue_dma_alignment(q, max(4, dma_get_cache_alignment()) - 1);
3056 + }
3057 + EXPORT_SYMBOL_GPL(__scsi_init_queue);
3058 +
3059 +diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c
3060 +index 302018d67efa..7f785d77ba7f 100644
3061 +--- a/drivers/tty/serdev/serdev-ttyport.c
3062 ++++ b/drivers/tty/serdev/serdev-ttyport.c
3063 +@@ -35,23 +35,41 @@ static int ttyport_receive_buf(struct tty_port *port, const unsigned char *cp,
3064 + {
3065 + struct serdev_controller *ctrl = port->client_data;
3066 + struct serport *serport = serdev_controller_get_drvdata(ctrl);
3067 ++ int ret;
3068 +
3069 + if (!test_bit(SERPORT_ACTIVE, &serport->flags))
3070 + return 0;
3071 +
3072 +- return serdev_controller_receive_buf(ctrl, cp, count);
3073 ++ ret = serdev_controller_receive_buf(ctrl, cp, count);
3074 ++
3075 ++ dev_WARN_ONCE(&ctrl->dev, ret < 0 || ret > count,
3076 ++ "receive_buf returns %d (count = %zu)\n",
3077 ++ ret, count);
3078 ++ if (ret < 0)
3079 ++ return 0;
3080 ++ else if (ret > count)
3081 ++ return count;
3082 ++
3083 ++ return ret;
3084 + }
3085 +
3086 + static void ttyport_write_wakeup(struct tty_port *port)
3087 + {
3088 + struct serdev_controller *ctrl = port->client_data;
3089 + struct serport *serport = serdev_controller_get_drvdata(ctrl);
3090 ++ struct tty_struct *tty;
3091 ++
3092 ++ tty = tty_port_tty_get(port);
3093 ++ if (!tty)
3094 ++ return;
3095 +
3096 +- if (test_and_clear_bit(TTY_DO_WRITE_WAKEUP, &port->tty->flags) &&
3097 ++ if (test_and_clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags) &&
3098 + test_bit(SERPORT_ACTIVE, &serport->flags))
3099 + serdev_controller_write_wakeup(ctrl);
3100 +
3101 +- wake_up_interruptible_poll(&port->tty->write_wait, POLLOUT);
3102 ++ wake_up_interruptible_poll(&tty->write_wait, POLLOUT);
3103 ++
3104 ++ tty_kref_put(tty);
3105 + }
3106 +
3107 + static const struct tty_port_client_operations client_ops = {
3108 +@@ -131,8 +149,10 @@ static void ttyport_close(struct serdev_controller *ctrl)
3109 +
3110 + clear_bit(SERPORT_ACTIVE, &serport->flags);
3111 +
3112 ++ tty_lock(tty);
3113 + if (tty->ops->close)
3114 + tty->ops->close(tty, NULL);
3115 ++ tty_unlock(tty);
3116 +
3117 + tty_release_struct(tty, serport->tty_idx);
3118 + }
3119 +diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
3120 +index ef8f7d63a8f0..0202e5132fa7 100644
3121 +--- a/drivers/usb/gadget/function/f_fs.c
3122 ++++ b/drivers/usb/gadget/function/f_fs.c
3123 +@@ -2286,9 +2286,18 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
3124 + int i;
3125 +
3126 + if (len < sizeof(*d) ||
3127 +- d->bFirstInterfaceNumber >= ffs->interfaces_count ||
3128 +- !d->Reserved1)
3129 ++ d->bFirstInterfaceNumber >= ffs->interfaces_count)
3130 + return -EINVAL;
3131 ++ if (d->Reserved1 != 1) {
3132 ++ /*
3133 ++ * According to the spec, Reserved1 must be set to 1
3134 ++ * but older kernels incorrectly rejected non-zero
3135 ++ * values. We fix it here to avoid returning EINVAL
3136 ++ * in response to values we used to accept.
3137 ++ */
3138 ++ pr_debug("usb_ext_compat_desc::Reserved1 forced to 1\n");
3139 ++ d->Reserved1 = 1;
3140 ++ }
3141 + for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
3142 + if (d->Reserved2[i])
3143 + return -EINVAL;
3144 +diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
3145 +index d41d07aae0ce..def1b05ffca0 100644
3146 +--- a/drivers/usb/gadget/udc/core.c
3147 ++++ b/drivers/usb/gadget/udc/core.c
3148 +@@ -1080,8 +1080,12 @@ static inline void usb_gadget_udc_stop(struct usb_udc *udc)
3149 + static inline void usb_gadget_udc_set_speed(struct usb_udc *udc,
3150 + enum usb_device_speed speed)
3151 + {
3152 +- if (udc->gadget->ops->udc_set_speed)
3153 +- udc->gadget->ops->udc_set_speed(udc->gadget, speed);
3154 ++ if (udc->gadget->ops->udc_set_speed) {
3155 ++ enum usb_device_speed s;
3156 ++
3157 ++ s = min(speed, udc->gadget->max_speed);
3158 ++ udc->gadget->ops->udc_set_speed(udc->gadget, s);
3159 ++ }
3160 + }
3161 +
3162 + /**
3163 +diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
3164 +index 63a206122058..6b3e8adb64e6 100644
3165 +--- a/drivers/usb/gadget/udc/renesas_usb3.c
3166 ++++ b/drivers/usb/gadget/udc/renesas_usb3.c
3167 +@@ -254,7 +254,7 @@
3168 + #define USB3_EP0_SS_MAX_PACKET_SIZE 512
3169 + #define USB3_EP0_HSFS_MAX_PACKET_SIZE 64
3170 + #define USB3_EP0_BUF_SIZE 8
3171 +-#define USB3_MAX_NUM_PIPES 30
3172 ++#define USB3_MAX_NUM_PIPES 6 /* This includes PIPE 0 */
3173 + #define USB3_WAIT_US 3
3174 + #define USB3_DMA_NUM_SETTING_AREA 4
3175 + /*
3176 +diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
3177 +index 48230a5e12f2..bf7ff3934d7f 100644
3178 +--- a/drivers/virtio/virtio.c
3179 ++++ b/drivers/virtio/virtio.c
3180 +@@ -333,6 +333,8 @@ int register_virtio_device(struct virtio_device *dev)
3181 + /* device_register() causes the bus infrastructure to look for a
3182 + * matching driver. */
3183 + err = device_register(&dev->dev);
3184 ++ if (err)
3185 ++ ida_simple_remove(&virtio_index_ida, dev->index);
3186 + out:
3187 + if (err)
3188 + virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
3189 +diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
3190 +index 782d4d05a53b..c7475867a52b 100644
3191 +--- a/fs/afs/cmservice.c
3192 ++++ b/fs/afs/cmservice.c
3193 +@@ -127,6 +127,9 @@ bool afs_cm_incoming_call(struct afs_call *call)
3194 + case CBProbe:
3195 + call->type = &afs_SRXCBProbe;
3196 + return true;
3197 ++ case CBProbeUuid:
3198 ++ call->type = &afs_SRXCBProbeUuid;
3199 ++ return true;
3200 + case CBTellMeAboutYourself:
3201 + call->type = &afs_SRXCBTellMeAboutYourself;
3202 + return true;
3203 +diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
3204 +index 0bf191f0dbaf..9f715c3edcf9 100644
3205 +--- a/fs/afs/rxrpc.c
3206 ++++ b/fs/afs/rxrpc.c
3207 +@@ -377,8 +377,17 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
3208 + */
3209 + tx_total_len = call->request_size;
3210 + if (call->send_pages) {
3211 +- tx_total_len += call->last_to - call->first_offset;
3212 +- tx_total_len += (call->last - call->first) * PAGE_SIZE;
3213 ++ if (call->last == call->first) {
3214 ++ tx_total_len += call->last_to - call->first_offset;
3215 ++ } else {
3216 ++ /* It looks mathematically like you should be able to
3217 ++ * combine the following lines with the ones above, but
3218 ++ * unsigned arithmetic is fun when it wraps...
3219 ++ */
3220 ++ tx_total_len += PAGE_SIZE - call->first_offset;
3221 ++ tx_total_len += call->last_to;
3222 ++ tx_total_len += (call->last - call->first - 1) * PAGE_SIZE;
3223 ++ }
3224 + }
3225 +
3226 + /* create a call */
3227 +diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
3228 +index 6d49db7d86be..e2bb2a065741 100644
3229 +--- a/fs/btrfs/ctree.c
3230 ++++ b/fs/btrfs/ctree.c
3231 +@@ -1032,14 +1032,17 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
3232 + root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
3233 + !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
3234 + ret = btrfs_inc_ref(trans, root, buf, 1);
3235 +- BUG_ON(ret); /* -ENOMEM */
3236 ++ if (ret)
3237 ++ return ret;
3238 +
3239 + if (root->root_key.objectid ==
3240 + BTRFS_TREE_RELOC_OBJECTID) {
3241 + ret = btrfs_dec_ref(trans, root, buf, 0);
3242 +- BUG_ON(ret); /* -ENOMEM */
3243 ++ if (ret)
3244 ++ return ret;
3245 + ret = btrfs_inc_ref(trans, root, cow, 1);
3246 +- BUG_ON(ret); /* -ENOMEM */
3247 ++ if (ret)
3248 ++ return ret;
3249 + }
3250 + new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
3251 + } else {
3252 +@@ -1049,7 +1052,8 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
3253 + ret = btrfs_inc_ref(trans, root, cow, 1);
3254 + else
3255 + ret = btrfs_inc_ref(trans, root, cow, 0);
3256 +- BUG_ON(ret); /* -ENOMEM */
3257 ++ if (ret)
3258 ++ return ret;
3259 + }
3260 + if (new_flags != 0) {
3261 + int level = btrfs_header_level(buf);
3262 +@@ -1068,9 +1072,11 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
3263 + ret = btrfs_inc_ref(trans, root, cow, 1);
3264 + else
3265 + ret = btrfs_inc_ref(trans, root, cow, 0);
3266 +- BUG_ON(ret); /* -ENOMEM */
3267 ++ if (ret)
3268 ++ return ret;
3269 + ret = btrfs_dec_ref(trans, root, buf, 1);
3270 +- BUG_ON(ret); /* -ENOMEM */
3271 ++ if (ret)
3272 ++ return ret;
3273 + }
3274 + clean_tree_block(fs_info, buf);
3275 + *last_ref = 1;
3276 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
3277 +index e4774c02d922..d227d8514b25 100644
3278 +--- a/fs/btrfs/extent-tree.c
3279 ++++ b/fs/btrfs/extent-tree.c
3280 +@@ -9283,6 +9283,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
3281 + ret = btrfs_del_root(trans, fs_info, &root->root_key);
3282 + if (ret) {
3283 + btrfs_abort_transaction(trans, ret);
3284 ++ err = ret;
3285 + goto out_end_trans;
3286 + }
3287 +
3288 +diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
3289 +index 6ce467872376..b8372095ba0a 100644
3290 +--- a/fs/f2fs/file.c
3291 ++++ b/fs/f2fs/file.c
3292 +@@ -2697,6 +2697,7 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
3293 +
3294 + err = f2fs_preallocate_blocks(iocb, from);
3295 + if (err) {
3296 ++ clear_inode_flag(inode, FI_NO_PREALLOC);
3297 + inode_unlock(inode);
3298 + return err;
3299 + }
3300 +diff --git a/fs/fcntl.c b/fs/fcntl.c
3301 +index 8d78ffd7b399..6fd311367efc 100644
3302 +--- a/fs/fcntl.c
3303 ++++ b/fs/fcntl.c
3304 +@@ -632,9 +632,8 @@ COMPAT_SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
3305 + if (err)
3306 + break;
3307 + err = fixup_compat_flock(&flock);
3308 +- if (err)
3309 +- return err;
3310 +- err = put_compat_flock(&flock, compat_ptr(arg));
3311 ++ if (!err)
3312 ++ err = put_compat_flock(&flock, compat_ptr(arg));
3313 + break;
3314 + case F_GETLK64:
3315 + case F_OFD_GETLK:
3316 +diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
3317 +index b03b3bc05f96..bf2c43635062 100644
3318 +--- a/fs/nfs/dir.c
3319 ++++ b/fs/nfs/dir.c
3320 +@@ -2064,7 +2064,7 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
3321 + * should mark the directories for revalidation.
3322 + */
3323 + d_move(old_dentry, new_dentry);
3324 +- nfs_set_verifier(new_dentry,
3325 ++ nfs_set_verifier(old_dentry,
3326 + nfs_save_change_attribute(new_dir));
3327 + } else if (error == -ENOENT)
3328 + nfs_dentry_handle_enoent(old_dentry);
3329 +diff --git a/fs/pipe.c b/fs/pipe.c
3330 +index 349c9d56d4b3..3909c55ed389 100644
3331 +--- a/fs/pipe.c
3332 ++++ b/fs/pipe.c
3333 +@@ -1125,7 +1125,7 @@ int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf,
3334 + {
3335 + int ret;
3336 +
3337 +- ret = proc_dointvec_minmax(table, write, buf, lenp, ppos);
3338 ++ ret = proc_douintvec_minmax(table, write, buf, lenp, ppos);
3339 + if (ret < 0 || !write)
3340 + return ret;
3341 +
3342 +diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
3343 +index 4ec5b7f45401..63350906961a 100644
3344 +--- a/fs/xfs/xfs_inode.c
3345 ++++ b/fs/xfs/xfs_inode.c
3346 +@@ -2378,6 +2378,7 @@ xfs_ifree_cluster(
3347 + */
3348 + if (ip->i_ino != inum + i) {
3349 + xfs_iunlock(ip, XFS_ILOCK_EXCL);
3350 ++ rcu_read_unlock();
3351 + continue;
3352 + }
3353 + }
3354 +diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
3355 +index 7653ea66874d..46930f82a988 100644
3356 +--- a/include/linux/dma-mapping.h
3357 ++++ b/include/linux/dma-mapping.h
3358 +@@ -697,7 +697,6 @@ static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
3359 + return ret;
3360 + }
3361 +
3362 +-#ifdef CONFIG_HAS_DMA
3363 + static inline int dma_get_cache_alignment(void)
3364 + {
3365 + #ifdef ARCH_DMA_MINALIGN
3366 +@@ -705,7 +704,6 @@ static inline int dma_get_cache_alignment(void)
3367 + #endif
3368 + return 1;
3369 + }
3370 +-#endif
3371 +
3372 + /* flags for the coherent memory api */
3373 + #define DMA_MEMORY_EXCLUSIVE 0x01
3374 +diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
3375 +index 6dfec4d638df..872f930f1b06 100644
3376 +--- a/include/linux/genalloc.h
3377 ++++ b/include/linux/genalloc.h
3378 +@@ -32,6 +32,7 @@
3379 +
3380 + #include <linux/types.h>
3381 + #include <linux/spinlock_types.h>
3382 ++#include <linux/atomic.h>
3383 +
3384 + struct device;
3385 + struct device_node;
3386 +@@ -71,7 +72,7 @@ struct gen_pool {
3387 + */
3388 + struct gen_pool_chunk {
3389 + struct list_head next_chunk; /* next chunk in pool */
3390 +- atomic_t avail;
3391 ++ atomic_long_t avail;
3392 + phys_addr_t phys_addr; /* physical starting address of memory chunk */
3393 + unsigned long start_addr; /* start address of memory chunk */
3394 + unsigned long end_addr; /* end address of memory chunk (inclusive) */
3395 +diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
3396 +index 6431087816ba..ba74eaa8eadf 100644
3397 +--- a/include/linux/hyperv.h
3398 ++++ b/include/linux/hyperv.h
3399 +@@ -708,6 +708,7 @@ struct vmbus_channel {
3400 + u8 monitor_bit;
3401 +
3402 + bool rescind; /* got rescind msg */
3403 ++ struct completion rescind_event;
3404 +
3405 + u32 ringbuffer_gpadlhandle;
3406 +
3407 +diff --git a/include/linux/iio/timer/stm32-lptim-trigger.h b/include/linux/iio/timer/stm32-lptim-trigger.h
3408 +index 34d59bfdce2d..464458d20b16 100644
3409 +--- a/include/linux/iio/timer/stm32-lptim-trigger.h
3410 ++++ b/include/linux/iio/timer/stm32-lptim-trigger.h
3411 +@@ -16,11 +16,14 @@
3412 + #define LPTIM2_OUT "lptim2_out"
3413 + #define LPTIM3_OUT "lptim3_out"
3414 +
3415 +-#if IS_ENABLED(CONFIG_IIO_STM32_LPTIMER_TRIGGER)
3416 ++#if IS_REACHABLE(CONFIG_IIO_STM32_LPTIMER_TRIGGER)
3417 + bool is_stm32_lptim_trigger(struct iio_trigger *trig);
3418 + #else
3419 + static inline bool is_stm32_lptim_trigger(struct iio_trigger *trig)
3420 + {
3421 ++#if IS_ENABLED(CONFIG_IIO_STM32_LPTIMER_TRIGGER)
3422 ++ pr_warn_once("stm32 lptim_trigger not linked in\n");
3423 ++#endif
3424 + return false;
3425 + }
3426 + #endif
3427 +diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
3428 +index e32dfe098e82..40839c02d28c 100644
3429 +--- a/include/linux/sysfs.h
3430 ++++ b/include/linux/sysfs.h
3431 +@@ -117,6 +117,12 @@ struct attribute_group {
3432 + .show = _name##_show, \
3433 + }
3434 +
3435 ++#define __ATTR_RO_MODE(_name, _mode) { \
3436 ++ .attr = { .name = __stringify(_name), \
3437 ++ .mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \
3438 ++ .show = _name##_show, \
3439 ++}
3440 ++
3441 + #define __ATTR_WO(_name) { \
3442 + .attr = { .name = __stringify(_name), .mode = S_IWUSR }, \
3443 + .store = _name##_store, \
3444 +diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
3445 +index 6c0dc6155ee7..a966d281dedc 100644
3446 +--- a/include/scsi/libsas.h
3447 ++++ b/include/scsi/libsas.h
3448 +@@ -165,11 +165,11 @@ struct expander_device {
3449 +
3450 + struct sata_device {
3451 + unsigned int class;
3452 +- struct smp_resp rps_resp; /* report_phy_sata_resp */
3453 + u8 port_no; /* port number, if this is a PM (Port) */
3454 +
3455 + struct ata_port *ap;
3456 + struct ata_host ata_host;
3457 ++ struct smp_resp rps_resp ____cacheline_aligned; /* report_phy_sata_resp */
3458 + u8 fis[ATA_RESP_FIS_SIZE];
3459 + };
3460 +
3461 +diff --git a/kernel/bpf/percpu_freelist.c b/kernel/bpf/percpu_freelist.c
3462 +index 5c51d1985b51..673fa6fe2d73 100644
3463 +--- a/kernel/bpf/percpu_freelist.c
3464 ++++ b/kernel/bpf/percpu_freelist.c
3465 +@@ -78,8 +78,10 @@ struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
3466 + {
3467 + struct pcpu_freelist_head *head;
3468 + struct pcpu_freelist_node *node;
3469 ++ unsigned long flags;
3470 + int orig_cpu, cpu;
3471 +
3472 ++ local_irq_save(flags);
3473 + orig_cpu = cpu = raw_smp_processor_id();
3474 + while (1) {
3475 + head = per_cpu_ptr(s->freelist, cpu);
3476 +@@ -87,14 +89,16 @@ struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
3477 + node = head->first;
3478 + if (node) {
3479 + head->first = node->next;
3480 +- raw_spin_unlock(&head->lock);
3481 ++ raw_spin_unlock_irqrestore(&head->lock, flags);
3482 + return node;
3483 + }
3484 + raw_spin_unlock(&head->lock);
3485 + cpu = cpumask_next(cpu, cpu_possible_mask);
3486 + if (cpu >= nr_cpu_ids)
3487 + cpu = 0;
3488 +- if (cpu == orig_cpu)
3489 ++ if (cpu == orig_cpu) {
3490 ++ local_irq_restore(flags);
3491 + return NULL;
3492 ++ }
3493 + }
3494 + }
3495 +diff --git a/kernel/cpu.c b/kernel/cpu.c
3496 +index 04892a82f6ac..7891aecc6aec 100644
3497 +--- a/kernel/cpu.c
3498 ++++ b/kernel/cpu.c
3499 +@@ -1289,11 +1289,6 @@ static struct cpuhp_step cpuhp_bp_states[] = {
3500 + .teardown.single = NULL,
3501 + .cant_stop = true,
3502 + },
3503 +- [CPUHP_AP_SMPCFD_DYING] = {
3504 +- .name = "smpcfd:dying",
3505 +- .startup.single = NULL,
3506 +- .teardown.single = smpcfd_dying_cpu,
3507 +- },
3508 + /*
3509 + * Handled on controll processor until the plugged processor manages
3510 + * this itself.
3511 +@@ -1335,6 +1330,11 @@ static struct cpuhp_step cpuhp_ap_states[] = {
3512 + .startup.single = NULL,
3513 + .teardown.single = rcutree_dying_cpu,
3514 + },
3515 ++ [CPUHP_AP_SMPCFD_DYING] = {
3516 ++ .name = "smpcfd:dying",
3517 ++ .startup.single = NULL,
3518 ++ .teardown.single = smpcfd_dying_cpu,
3519 ++ },
3520 + /* Entry state on starting. Interrupts enabled from here on. Transient
3521 + * state for synchronsization */
3522 + [CPUHP_AP_ONLINE] = {
3523 +diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
3524 +index e74be38245ad..ed5d34925ad0 100644
3525 +--- a/kernel/debug/kdb/kdb_io.c
3526 ++++ b/kernel/debug/kdb/kdb_io.c
3527 +@@ -350,7 +350,7 @@ static char *kdb_read(char *buffer, size_t bufsize)
3528 + }
3529 + kdb_printf("\n");
3530 + for (i = 0; i < count; i++) {
3531 +- if (kallsyms_symbol_next(p_tmp, i) < 0)
3532 ++ if (WARN_ON(!kallsyms_symbol_next(p_tmp, i)))
3533 + break;
3534 + kdb_printf("%s ", p_tmp);
3535 + *(p_tmp + len) = '\0';
3536 +diff --git a/kernel/jump_label.c b/kernel/jump_label.c
3537 +index 0bf2e8f5244a..7c3774ac1d51 100644
3538 +--- a/kernel/jump_label.c
3539 ++++ b/kernel/jump_label.c
3540 +@@ -769,7 +769,7 @@ static __init int jump_label_test(void)
3541 +
3542 + return 0;
3543 + }
3544 +-late_initcall(jump_label_test);
3545 ++early_initcall(jump_label_test);
3546 + #endif /* STATIC_KEYS_SELFTEST */
3547 +
3548 + #endif /* HAVE_JUMP_LABEL */
3549 +diff --git a/kernel/sysctl.c b/kernel/sysctl.c
3550 +index d9c31bc2eaea..56aca862c4f5 100644
3551 +--- a/kernel/sysctl.c
3552 ++++ b/kernel/sysctl.c
3553 +@@ -1822,7 +1822,7 @@ static struct ctl_table fs_table[] = {
3554 + {
3555 + .procname = "pipe-max-size",
3556 + .data = &pipe_max_size,
3557 +- .maxlen = sizeof(int),
3558 ++ .maxlen = sizeof(pipe_max_size),
3559 + .mode = 0644,
3560 + .proc_handler = &pipe_proc_fn,
3561 + .extra1 = &pipe_min_size,
3562 +diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
3563 +index 1ef0cec38d78..dc14beae2c9a 100644
3564 +--- a/lib/asn1_decoder.c
3565 ++++ b/lib/asn1_decoder.c
3566 +@@ -313,42 +313,47 @@ int asn1_ber_decoder(const struct asn1_decoder *decoder,
3567 +
3568 + /* Decide how to handle the operation */
3569 + switch (op) {
3570 +- case ASN1_OP_MATCH_ANY_ACT:
3571 +- case ASN1_OP_MATCH_ANY_ACT_OR_SKIP:
3572 +- case ASN1_OP_COND_MATCH_ANY_ACT:
3573 +- case ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP:
3574 +- ret = actions[machine[pc + 1]](context, hdr, tag, data + dp, len);
3575 +- if (ret < 0)
3576 +- return ret;
3577 +- goto skip_data;
3578 +-
3579 +- case ASN1_OP_MATCH_ACT:
3580 +- case ASN1_OP_MATCH_ACT_OR_SKIP:
3581 +- case ASN1_OP_COND_MATCH_ACT_OR_SKIP:
3582 +- ret = actions[machine[pc + 2]](context, hdr, tag, data + dp, len);
3583 +- if (ret < 0)
3584 +- return ret;
3585 +- goto skip_data;
3586 +-
3587 + case ASN1_OP_MATCH:
3588 + case ASN1_OP_MATCH_OR_SKIP:
3589 ++ case ASN1_OP_MATCH_ACT:
3590 ++ case ASN1_OP_MATCH_ACT_OR_SKIP:
3591 + case ASN1_OP_MATCH_ANY:
3592 + case ASN1_OP_MATCH_ANY_OR_SKIP:
3593 ++ case ASN1_OP_MATCH_ANY_ACT:
3594 ++ case ASN1_OP_MATCH_ANY_ACT_OR_SKIP:
3595 + case ASN1_OP_COND_MATCH_OR_SKIP:
3596 ++ case ASN1_OP_COND_MATCH_ACT_OR_SKIP:
3597 + case ASN1_OP_COND_MATCH_ANY:
3598 + case ASN1_OP_COND_MATCH_ANY_OR_SKIP:
3599 +- skip_data:
3600 ++ case ASN1_OP_COND_MATCH_ANY_ACT:
3601 ++ case ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP:
3602 ++
3603 + if (!(flags & FLAG_CONS)) {
3604 + if (flags & FLAG_INDEFINITE_LENGTH) {
3605 ++ size_t tmp = dp;
3606 ++
3607 + ret = asn1_find_indefinite_length(
3608 +- data, datalen, &dp, &len, &errmsg);
3609 ++ data, datalen, &tmp, &len, &errmsg);
3610 + if (ret < 0)
3611 + goto error;
3612 +- } else {
3613 +- dp += len;
3614 + }
3615 + pr_debug("- LEAF: %zu\n", len);
3616 + }
3617 ++
3618 ++ if (op & ASN1_OP_MATCH__ACT) {
3619 ++ unsigned char act;
3620 ++
3621 ++ if (op & ASN1_OP_MATCH__ANY)
3622 ++ act = machine[pc + 1];
3623 ++ else
3624 ++ act = machine[pc + 2];
3625 ++ ret = actions[act](context, hdr, tag, data + dp, len);
3626 ++ if (ret < 0)
3627 ++ return ret;
3628 ++ }
3629 ++
3630 ++ if (!(flags & FLAG_CONS))
3631 ++ dp += len;
3632 + pc += asn1_op_lengths[op];
3633 + goto next_op;
3634 +
3635 +@@ -434,6 +439,8 @@ int asn1_ber_decoder(const struct asn1_decoder *decoder,
3636 + else
3637 + act = machine[pc + 1];
3638 + ret = actions[act](context, hdr, 0, data + tdp, len);
3639 ++ if (ret < 0)
3640 ++ return ret;
3641 + }
3642 + pc += asn1_op_lengths[op];
3643 + goto next_op;
3644 +diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
3645 +index da796e2dc4f5..c7c96bc7654a 100644
3646 +--- a/lib/dynamic_debug.c
3647 ++++ b/lib/dynamic_debug.c
3648 +@@ -360,6 +360,10 @@ static int ddebug_parse_query(char *words[], int nwords,
3649 + if (parse_lineno(last, &query->last_lineno) < 0)
3650 + return -EINVAL;
3651 +
3652 ++ /* special case for last lineno not specified */
3653 ++ if (query->last_lineno == 0)
3654 ++ query->last_lineno = UINT_MAX;
3655 ++
3656 + if (query->last_lineno < query->first_lineno) {
3657 + pr_err("last-line:%d < 1st-line:%d\n",
3658 + query->last_lineno,
3659 +diff --git a/lib/genalloc.c b/lib/genalloc.c
3660 +index 144fe6b1a03e..ca06adc4f445 100644
3661 +--- a/lib/genalloc.c
3662 ++++ b/lib/genalloc.c
3663 +@@ -194,7 +194,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy
3664 + chunk->phys_addr = phys;
3665 + chunk->start_addr = virt;
3666 + chunk->end_addr = virt + size - 1;
3667 +- atomic_set(&chunk->avail, size);
3668 ++ atomic_long_set(&chunk->avail, size);
3669 +
3670 + spin_lock(&pool->lock);
3671 + list_add_rcu(&chunk->next_chunk, &pool->chunks);
3672 +@@ -304,7 +304,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
3673 + nbits = (size + (1UL << order) - 1) >> order;
3674 + rcu_read_lock();
3675 + list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
3676 +- if (size > atomic_read(&chunk->avail))
3677 ++ if (size > atomic_long_read(&chunk->avail))
3678 + continue;
3679 +
3680 + start_bit = 0;
3681 +@@ -324,7 +324,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
3682 +
3683 + addr = chunk->start_addr + ((unsigned long)start_bit << order);
3684 + size = nbits << order;
3685 +- atomic_sub(size, &chunk->avail);
3686 ++ atomic_long_sub(size, &chunk->avail);
3687 + break;
3688 + }
3689 + rcu_read_unlock();
3690 +@@ -390,7 +390,7 @@ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
3691 + remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
3692 + BUG_ON(remain);
3693 + size = nbits << order;
3694 +- atomic_add(size, &chunk->avail);
3695 ++ atomic_long_add(size, &chunk->avail);
3696 + rcu_read_unlock();
3697 + return;
3698 + }
3699 +@@ -464,7 +464,7 @@ size_t gen_pool_avail(struct gen_pool *pool)
3700 +
3701 + rcu_read_lock();
3702 + list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
3703 +- avail += atomic_read(&chunk->avail);
3704 ++ avail += atomic_long_read(&chunk->avail);
3705 + rcu_read_unlock();
3706 + return avail;
3707 + }
3708 +diff --git a/mm/slub.c b/mm/slub.c
3709 +index 1efbb8123037..8e1c027a30f4 100644
3710 +--- a/mm/slub.c
3711 ++++ b/mm/slub.c
3712 +@@ -5704,6 +5704,10 @@ static int sysfs_slab_add(struct kmem_cache *s)
3713 + return 0;
3714 + }
3715 +
3716 ++ if (!unmergeable && disable_higher_order_debug &&
3717 ++ (slub_debug & DEBUG_METADATA_FLAGS))
3718 ++ unmergeable = 1;
3719 ++
3720 + if (unmergeable) {
3721 + /*
3722 + * Slabcache can never be merged so we can use the name proper.
3723 +diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
3724 +index 7c38e850a8fc..685049a9048d 100644
3725 +--- a/mm/zsmalloc.c
3726 ++++ b/mm/zsmalloc.c
3727 +@@ -1349,7 +1349,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
3728 + * pools/users, we can't allow mapping in interrupt context
3729 + * because it can corrupt another users mappings.
3730 + */
3731 +- WARN_ON_ONCE(in_interrupt());
3732 ++ BUG_ON(in_interrupt());
3733 +
3734 + /* From now on, migration cannot move the object */
3735 + pin_tag(handle);
3736 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
3737 +index 3d9f1c2f81c5..647cfc972bde 100644
3738 +--- a/net/ipv4/route.c
3739 ++++ b/net/ipv4/route.c
3740 +@@ -651,9 +651,12 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
3741 + struct fnhe_hash_bucket *hash;
3742 + struct fib_nh_exception *fnhe;
3743 + struct rtable *rt;
3744 ++ u32 genid, hval;
3745 + unsigned int i;
3746 + int depth;
3747 +- u32 hval = fnhe_hashfun(daddr);
3748 ++
3749 ++ genid = fnhe_genid(dev_net(nh->nh_dev));
3750 ++ hval = fnhe_hashfun(daddr);
3751 +
3752 + spin_lock_bh(&fnhe_lock);
3753 +
3754 +@@ -676,12 +679,13 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
3755 + }
3756 +
3757 + if (fnhe) {
3758 ++ if (fnhe->fnhe_genid != genid)
3759 ++ fnhe->fnhe_genid = genid;
3760 + if (gw)
3761 + fnhe->fnhe_gw = gw;
3762 +- if (pmtu) {
3763 ++ if (pmtu)
3764 + fnhe->fnhe_pmtu = pmtu;
3765 +- fnhe->fnhe_expires = max(1UL, expires);
3766 +- }
3767 ++ fnhe->fnhe_expires = max(1UL, expires);
3768 + /* Update all cached dsts too */
3769 + rt = rcu_dereference(fnhe->fnhe_rth_input);
3770 + if (rt)
3771 +@@ -700,7 +704,7 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
3772 + fnhe->fnhe_next = hash->chain;
3773 + rcu_assign_pointer(hash->chain, fnhe);
3774 + }
3775 +- fnhe->fnhe_genid = fnhe_genid(dev_net(nh->nh_dev));
3776 ++ fnhe->fnhe_genid = genid;
3777 + fnhe->fnhe_daddr = daddr;
3778 + fnhe->fnhe_gw = gw;
3779 + fnhe->fnhe_pmtu = pmtu;
3780 +diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
3781 +index 59c121b932ac..5d6bee070871 100644
3782 +--- a/net/ipv6/ip6_gre.c
3783 ++++ b/net/ipv6/ip6_gre.c
3784 +@@ -461,7 +461,7 @@ static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
3785 + &ipv6h->saddr, &ipv6h->daddr, tpi->key,
3786 + tpi->proto);
3787 + if (tunnel) {
3788 +- ip6_tnl_rcv(tunnel, skb, tpi, NULL, false);
3789 ++ ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
3790 +
3791 + return PACKET_RCVD;
3792 + }
3793 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
3794 +index 6f45d1713452..14c28fbfe6b8 100644
3795 +--- a/net/sctp/socket.c
3796 ++++ b/net/sctp/socket.c
3797 +@@ -83,8 +83,8 @@
3798 + /* Forward declarations for internal helper functions. */
3799 + static int sctp_writeable(struct sock *sk);
3800 + static void sctp_wfree(struct sk_buff *skb);
3801 +-static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p,
3802 +- size_t msg_len);
3803 ++static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
3804 ++ size_t msg_len, struct sock **orig_sk);
3805 + static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p);
3806 + static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
3807 + static int sctp_wait_for_accept(struct sock *sk, long timeo);
3808 +@@ -1962,9 +1962,16 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
3809 +
3810 + timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
3811 + if (!sctp_wspace(asoc)) {
3812 +- err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
3813 +- if (err)
3814 ++ /* sk can be changed by peel off when waiting for buf. */
3815 ++ err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len, &sk);
3816 ++ if (err) {
3817 ++ if (err == -ESRCH) {
3818 ++ /* asoc is already dead. */
3819 ++ new_asoc = NULL;
3820 ++ err = -EPIPE;
3821 ++ }
3822 + goto out_free;
3823 ++ }
3824 + }
3825 +
3826 + /* If an address is passed with the sendto/sendmsg call, it is used
3827 +@@ -4943,12 +4950,6 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
3828 + if (!asoc)
3829 + return -EINVAL;
3830 +
3831 +- /* If there is a thread waiting on more sndbuf space for
3832 +- * sending on this asoc, it cannot be peeled.
3833 +- */
3834 +- if (waitqueue_active(&asoc->wait))
3835 +- return -EBUSY;
3836 +-
3837 + /* An association cannot be branched off from an already peeled-off
3838 + * socket, nor is this supported for tcp style sockets.
3839 + */
3840 +@@ -7822,7 +7823,7 @@ void sctp_sock_rfree(struct sk_buff *skb)
3841 +
3842 + /* Helper function to wait for space in the sndbuf. */
3843 + static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
3844 +- size_t msg_len)
3845 ++ size_t msg_len, struct sock **orig_sk)
3846 + {
3847 + struct sock *sk = asoc->base.sk;
3848 + int err = 0;
3849 +@@ -7839,10 +7840,11 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
3850 + for (;;) {
3851 + prepare_to_wait_exclusive(&asoc->wait, &wait,
3852 + TASK_INTERRUPTIBLE);
3853 ++ if (asoc->base.dead)
3854 ++ goto do_dead;
3855 + if (!*timeo_p)
3856 + goto do_nonblock;
3857 +- if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING ||
3858 +- asoc->base.dead)
3859 ++ if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING)
3860 + goto do_error;
3861 + if (signal_pending(current))
3862 + goto do_interrupted;
3863 +@@ -7855,11 +7857,17 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
3864 + release_sock(sk);
3865 + current_timeo = schedule_timeout(current_timeo);
3866 + lock_sock(sk);
3867 ++ if (sk != asoc->base.sk) {
3868 ++ release_sock(sk);
3869 ++ sk = asoc->base.sk;
3870 ++ lock_sock(sk);
3871 ++ }
3872 +
3873 + *timeo_p = current_timeo;
3874 + }
3875 +
3876 + out:
3877 ++ *orig_sk = sk;
3878 + finish_wait(&asoc->wait, &wait);
3879 +
3880 + /* Release the association's refcnt. */
3881 +@@ -7867,6 +7875,10 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
3882 +
3883 + return err;
3884 +
3885 ++do_dead:
3886 ++ err = -ESRCH;
3887 ++ goto out;
3888 ++
3889 + do_error:
3890 + err = -EPIPE;
3891 + goto out;
3892 +diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
3893 +index 413e3868fbf3..7166e7ecbe86 100644
3894 +--- a/net/smc/smc_core.c
3895 ++++ b/net/smc/smc_core.c
3896 +@@ -571,7 +571,7 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_rmb)
3897 + /* use socket send buffer size (w/o overhead) as start value */
3898 + sk_buf_size = smc->sk.sk_sndbuf / 2;
3899 +
3900 +- for (bufsize_short = smc_compress_bufsize(smc->sk.sk_sndbuf / 2);
3901 ++ for (bufsize_short = smc_compress_bufsize(sk_buf_size);
3902 + bufsize_short >= 0; bufsize_short--) {
3903 +
3904 + if (is_rmb) {
3905 +diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
3906 +index 0cc83839c13c..f9db5fe52d36 100644
3907 +--- a/net/sunrpc/sched.c
3908 ++++ b/net/sunrpc/sched.c
3909 +@@ -274,10 +274,9 @@ static inline void rpc_task_set_debuginfo(struct rpc_task *task)
3910 +
3911 + static void rpc_set_active(struct rpc_task *task)
3912 + {
3913 +- trace_rpc_task_begin(task->tk_client, task, NULL);
3914 +-
3915 + rpc_task_set_debuginfo(task);
3916 + set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
3917 ++ trace_rpc_task_begin(task->tk_client, task, NULL);
3918 + }
3919 +
3920 + /*
3921 +diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
3922 +index 7d80040a37b6..f00383a37622 100644
3923 +--- a/net/tls/tls_sw.c
3924 ++++ b/net/tls/tls_sw.c
3925 +@@ -219,7 +219,7 @@ static int tls_do_encryption(struct tls_context *tls_ctx,
3926 + struct aead_request *aead_req;
3927 + int rc;
3928 +
3929 +- aead_req = kmalloc(req_size, flags);
3930 ++ aead_req = kzalloc(req_size, flags);
3931 + if (!aead_req)
3932 + return -ENOMEM;
3933 +
3934 +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
3935 +index 6eb228a70131..2a6093840e7e 100644
3936 +--- a/net/xfrm/xfrm_policy.c
3937 ++++ b/net/xfrm/xfrm_policy.c
3938 +@@ -1306,6 +1306,7 @@ static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
3939 + newp->xfrm_nr = old->xfrm_nr;
3940 + newp->index = old->index;
3941 + newp->type = old->type;
3942 ++ newp->family = old->family;
3943 + memcpy(newp->xfrm_vec, old->xfrm_vec,
3944 + newp->xfrm_nr*sizeof(struct xfrm_tmpl));
3945 + spin_lock_bh(&net->xfrm.xfrm_policy_lock);
3946 +diff --git a/scripts/coccicheck b/scripts/coccicheck
3947 +index 28ad1feff9e1..dda283aba96b 100755
3948 +--- a/scripts/coccicheck
3949 ++++ b/scripts/coccicheck
3950 +@@ -30,12 +30,6 @@ else
3951 + VERBOSE=0
3952 + fi
3953 +
3954 +-if [ -z "$J" ]; then
3955 +- NPROC=$(getconf _NPROCESSORS_ONLN)
3956 +-else
3957 +- NPROC="$J"
3958 +-fi
3959 +-
3960 + FLAGS="--very-quiet"
3961 +
3962 + # You can use SPFLAGS to append extra arguments to coccicheck or override any
3963 +@@ -70,6 +64,9 @@ if [ "$C" = "1" -o "$C" = "2" ]; then
3964 + # Take only the last argument, which is the C file to test
3965 + shift $(( $# - 1 ))
3966 + OPTIONS="$COCCIINCLUDE $1"
3967 ++
3968 ++ # No need to parallelize Coccinelle since this mode takes one input file.
3969 ++ NPROC=1
3970 + else
3971 + ONLINE=0
3972 + if [ "$KBUILD_EXTMOD" = "" ] ; then
3973 +@@ -77,6 +74,12 @@ else
3974 + else
3975 + OPTIONS="--dir $KBUILD_EXTMOD $COCCIINCLUDE"
3976 + fi
3977 ++
3978 ++ if [ -z "$J" ]; then
3979 ++ NPROC=$(getconf _NPROCESSORS_ONLN)
3980 ++ else
3981 ++ NPROC="$J"
3982 ++ fi
3983 + fi
3984 +
3985 + if [ "$KBUILD_EXTMOD" != "" ] ; then
3986 +diff --git a/scripts/package/Makefile b/scripts/package/Makefile
3987 +index 73f9f3192b9f..34de8b953ecf 100644
3988 +--- a/scripts/package/Makefile
3989 ++++ b/scripts/package/Makefile
3990 +@@ -39,10 +39,9 @@ if test "$(objtree)" != "$(srctree)"; then \
3991 + false; \
3992 + fi ; \
3993 + $(srctree)/scripts/setlocalversion --save-scmversion; \
3994 +-ln -sf $(srctree) $(2); \
3995 + tar -cz $(RCS_TAR_IGNORE) -f $(2).tar.gz \
3996 +- $(addprefix $(2)/,$(TAR_CONTENT) $(3)); \
3997 +-rm -f $(2) $(objtree)/.scmversion
3998 ++ --transform 's:^:$(2)/:S' $(TAR_CONTENT) $(3); \
3999 ++rm -f $(objtree)/.scmversion
4000 +
4001 + # rpm-pkg
4002 + # ---------------------------------------------------------------------------
4003 +@@ -50,7 +49,7 @@ rpm-pkg rpm: FORCE
4004 + $(MAKE) clean
4005 + $(CONFIG_SHELL) $(MKSPEC) >$(objtree)/kernel.spec
4006 + $(call cmd,src_tar,$(KERNELPATH),kernel.spec)
4007 +- rpmbuild $(RPMOPTS) --target $(UTS_MACHINE) -ta $(KERNELPATH).tar.gz
4008 ++ +rpmbuild $(RPMOPTS) --target $(UTS_MACHINE) -ta $(KERNELPATH).tar.gz
4009 + rm $(KERNELPATH).tar.gz kernel.spec
4010 +
4011 + # binrpm-pkg
4012 +@@ -58,7 +57,7 @@ rpm-pkg rpm: FORCE
4013 + binrpm-pkg: FORCE
4014 + $(MAKE) KBUILD_SRC=
4015 + $(CONFIG_SHELL) $(MKSPEC) prebuilt > $(objtree)/binkernel.spec
4016 +- rpmbuild $(RPMOPTS) --define "_builddir $(objtree)" --target \
4017 ++ +rpmbuild $(RPMOPTS) --define "_builddir $(objtree)" --target \
4018 + $(UTS_MACHINE) -bb $(objtree)/binkernel.spec
4019 + rm binkernel.spec
4020 +
4021 +diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
4022 +index 4243b0c3f0e4..586b249d3b46 100644
4023 +--- a/security/apparmor/policy.c
4024 ++++ b/security/apparmor/policy.c
4025 +@@ -502,7 +502,7 @@ struct aa_profile *aa_new_null_profile(struct aa_profile *parent, bool hat,
4026 + {
4027 + struct aa_profile *p, *profile;
4028 + const char *bname;
4029 +- char *name;
4030 ++ char *name = NULL;
4031 +
4032 + AA_BUG(!parent);
4033 +
4034 +@@ -562,6 +562,7 @@ struct aa_profile *aa_new_null_profile(struct aa_profile *parent, bool hat,
4035 + return profile;
4036 +
4037 + fail:
4038 ++ kfree(name);
4039 + aa_free_profile(profile);
4040 + return NULL;
4041 + }
4042 +diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
4043 +index 76d22f726ae4..1ffe60bb2845 100644
4044 +--- a/security/keys/keyctl.c
4045 ++++ b/security/keys/keyctl.c
4046 +@@ -1588,9 +1588,8 @@ long keyctl_session_to_parent(void)
4047 + * The caller must have Setattr permission to change keyring restrictions.
4048 + *
4049 + * The requested type name may be a NULL pointer to reject all attempts
4050 +- * to link to the keyring. If _type is non-NULL, _restriction can be
4051 +- * NULL or a pointer to a string describing the restriction. If _type is
4052 +- * NULL, _restriction must also be NULL.
4053 ++ * to link to the keyring. In this case, _restriction must also be NULL.
4054 ++ * Otherwise, both _type and _restriction must be non-NULL.
4055 + *
4056 + * Returns 0 if successful.
4057 + */
4058 +@@ -1598,7 +1597,6 @@ long keyctl_restrict_keyring(key_serial_t id, const char __user *_type,
4059 + const char __user *_restriction)
4060 + {
4061 + key_ref_t key_ref;
4062 +- bool link_reject = !_type;
4063 + char type[32];
4064 + char *restriction = NULL;
4065 + long ret;
4066 +@@ -1607,31 +1605,29 @@ long keyctl_restrict_keyring(key_serial_t id, const char __user *_type,
4067 + if (IS_ERR(key_ref))
4068 + return PTR_ERR(key_ref);
4069 +
4070 ++ ret = -EINVAL;
4071 + if (_type) {
4072 +- ret = key_get_type_from_user(type, _type, sizeof(type));
4073 +- if (ret < 0)
4074 ++ if (!_restriction)
4075 + goto error;
4076 +- }
4077 +
4078 +- if (_restriction) {
4079 +- if (!_type) {
4080 +- ret = -EINVAL;
4081 ++ ret = key_get_type_from_user(type, _type, sizeof(type));
4082 ++ if (ret < 0)
4083 + goto error;
4084 +- }
4085 +
4086 + restriction = strndup_user(_restriction, PAGE_SIZE);
4087 + if (IS_ERR(restriction)) {
4088 + ret = PTR_ERR(restriction);
4089 + goto error;
4090 + }
4091 ++ } else {
4092 ++ if (_restriction)
4093 ++ goto error;
4094 + }
4095 +
4096 +- ret = keyring_restrict(key_ref, link_reject ? NULL : type, restriction);
4097 ++ ret = keyring_restrict(key_ref, _type ? type : NULL, restriction);
4098 + kfree(restriction);
4099 +-
4100 + error:
4101 + key_ref_put(key_ref);
4102 +-
4103 + return ret;
4104 + }
4105 +
4106 +diff --git a/security/keys/request_key.c b/security/keys/request_key.c
4107 +index e8036cd0ad54..7dc741382154 100644
4108 +--- a/security/keys/request_key.c
4109 ++++ b/security/keys/request_key.c
4110 +@@ -251,11 +251,12 @@ static int construct_key(struct key *key, const void *callout_info,
4111 + * The keyring selected is returned with an extra reference upon it which the
4112 + * caller must release.
4113 + */
4114 +-static void construct_get_dest_keyring(struct key **_dest_keyring)
4115 ++static int construct_get_dest_keyring(struct key **_dest_keyring)
4116 + {
4117 + struct request_key_auth *rka;
4118 + const struct cred *cred = current_cred();
4119 + struct key *dest_keyring = *_dest_keyring, *authkey;
4120 ++ int ret;
4121 +
4122 + kenter("%p", dest_keyring);
4123 +
4124 +@@ -264,6 +265,8 @@ static void construct_get_dest_keyring(struct key **_dest_keyring)
4125 + /* the caller supplied one */
4126 + key_get(dest_keyring);
4127 + } else {
4128 ++ bool do_perm_check = true;
4129 ++
4130 + /* use a default keyring; falling through the cases until we
4131 + * find one that we actually have */
4132 + switch (cred->jit_keyring) {
4133 +@@ -278,8 +281,10 @@ static void construct_get_dest_keyring(struct key **_dest_keyring)
4134 + dest_keyring =
4135 + key_get(rka->dest_keyring);
4136 + up_read(&authkey->sem);
4137 +- if (dest_keyring)
4138 ++ if (dest_keyring) {
4139 ++ do_perm_check = false;
4140 + break;
4141 ++ }
4142 + }
4143 +
4144 + case KEY_REQKEY_DEFL_THREAD_KEYRING:
4145 +@@ -314,11 +319,29 @@ static void construct_get_dest_keyring(struct key **_dest_keyring)
4146 + default:
4147 + BUG();
4148 + }
4149 ++
4150 ++ /*
4151 ++ * Require Write permission on the keyring. This is essential
4152 ++ * because the default keyring may be the session keyring, and
4153 ++ * joining a keyring only requires Search permission.
4154 ++ *
4155 ++ * However, this check is skipped for the "requestor keyring" so
4156 ++ * that /sbin/request-key can itself use request_key() to add
4157 ++ * keys to the original requestor's destination keyring.
4158 ++ */
4159 ++ if (dest_keyring && do_perm_check) {
4160 ++ ret = key_permission(make_key_ref(dest_keyring, 1),
4161 ++ KEY_NEED_WRITE);
4162 ++ if (ret) {
4163 ++ key_put(dest_keyring);
4164 ++ return ret;
4165 ++ }
4166 ++ }
4167 + }
4168 +
4169 + *_dest_keyring = dest_keyring;
4170 + kleave(" [dk %d]", key_serial(dest_keyring));
4171 +- return;
4172 ++ return 0;
4173 + }
4174 +
4175 + /*
4176 +@@ -444,11 +467,15 @@ static struct key *construct_key_and_link(struct keyring_search_context *ctx,
4177 + if (ctx->index_key.type == &key_type_keyring)
4178 + return ERR_PTR(-EPERM);
4179 +
4180 +- user = key_user_lookup(current_fsuid());
4181 +- if (!user)
4182 +- return ERR_PTR(-ENOMEM);
4183 ++ ret = construct_get_dest_keyring(&dest_keyring);
4184 ++ if (ret)
4185 ++ goto error;
4186 +
4187 +- construct_get_dest_keyring(&dest_keyring);
4188 ++ user = key_user_lookup(current_fsuid());
4189 ++ if (!user) {
4190 ++ ret = -ENOMEM;
4191 ++ goto error_put_dest_keyring;
4192 ++ }
4193 +
4194 + ret = construct_alloc_key(ctx, dest_keyring, flags, user, &key);
4195 + key_user_put(user);
4196 +@@ -463,7 +490,7 @@ static struct key *construct_key_and_link(struct keyring_search_context *ctx,
4197 + } else if (ret == -EINPROGRESS) {
4198 + ret = 0;
4199 + } else {
4200 +- goto couldnt_alloc_key;
4201 ++ goto error_put_dest_keyring;
4202 + }
4203 +
4204 + key_put(dest_keyring);
4205 +@@ -473,8 +500,9 @@ static struct key *construct_key_and_link(struct keyring_search_context *ctx,
4206 + construction_failed:
4207 + key_negate_and_link(key, key_negative_timeout, NULL, NULL);
4208 + key_put(key);
4209 +-couldnt_alloc_key:
4210 ++error_put_dest_keyring:
4211 + key_put(dest_keyring);
4212 ++error:
4213 + kleave(" = %d", ret);
4214 + return ERR_PTR(ret);
4215 + }
4216 +diff --git a/sound/core/pcm.c b/sound/core/pcm.c
4217 +index 7eadb7fd8074..7fea724d093a 100644
4218 +--- a/sound/core/pcm.c
4219 ++++ b/sound/core/pcm.c
4220 +@@ -153,7 +153,9 @@ static int snd_pcm_control_ioctl(struct snd_card *card,
4221 + err = -ENXIO;
4222 + goto _error;
4223 + }
4224 ++ mutex_lock(&pcm->open_mutex);
4225 + err = snd_pcm_info_user(substream, info);
4226 ++ mutex_unlock(&pcm->open_mutex);
4227 + _error:
4228 + mutex_unlock(&register_mutex);
4229 + return err;
4230 +diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
4231 +index 37d9cfbc29f9..b80985fbc334 100644
4232 +--- a/sound/core/seq/seq_timer.c
4233 ++++ b/sound/core/seq/seq_timer.c
4234 +@@ -355,7 +355,7 @@ static int initialize_timer(struct snd_seq_timer *tmr)
4235 + unsigned long freq;
4236 +
4237 + t = tmr->timeri->timer;
4238 +- if (snd_BUG_ON(!t))
4239 ++ if (!t)
4240 + return -EINVAL;
4241 +
4242 + freq = tmr->preferred_resolution;
4243 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4244 +index 7c39114d124f..b076386c8952 100644
4245 +--- a/sound/pci/hda/patch_realtek.c
4246 ++++ b/sound/pci/hda/patch_realtek.c
4247 +@@ -330,6 +330,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
4248 + case 0x10ec0236:
4249 + case 0x10ec0255:
4250 + case 0x10ec0256:
4251 ++ case 0x10ec0257:
4252 + case 0x10ec0282:
4253 + case 0x10ec0283:
4254 + case 0x10ec0286:
4255 +@@ -2749,6 +2750,7 @@ enum {
4256 + ALC269_TYPE_ALC298,
4257 + ALC269_TYPE_ALC255,
4258 + ALC269_TYPE_ALC256,
4259 ++ ALC269_TYPE_ALC257,
4260 + ALC269_TYPE_ALC215,
4261 + ALC269_TYPE_ALC225,
4262 + ALC269_TYPE_ALC294,
4263 +@@ -2782,6 +2784,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
4264 + case ALC269_TYPE_ALC298:
4265 + case ALC269_TYPE_ALC255:
4266 + case ALC269_TYPE_ALC256:
4267 ++ case ALC269_TYPE_ALC257:
4268 + case ALC269_TYPE_ALC215:
4269 + case ALC269_TYPE_ALC225:
4270 + case ALC269_TYPE_ALC294:
4271 +@@ -6839,6 +6842,10 @@ static int patch_alc269(struct hda_codec *codec)
4272 + spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */
4273 + alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/
4274 + break;
4275 ++ case 0x10ec0257:
4276 ++ spec->codec_variant = ALC269_TYPE_ALC257;
4277 ++ spec->gen.mixer_nid = 0;
4278 ++ break;
4279 + case 0x10ec0215:
4280 + case 0x10ec0285:
4281 + case 0x10ec0289:
4282 +@@ -7886,6 +7893,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
4283 + HDA_CODEC_ENTRY(0x10ec0236, "ALC236", patch_alc269),
4284 + HDA_CODEC_ENTRY(0x10ec0255, "ALC255", patch_alc269),
4285 + HDA_CODEC_ENTRY(0x10ec0256, "ALC256", patch_alc269),
4286 ++ HDA_CODEC_ENTRY(0x10ec0257, "ALC257", patch_alc269),
4287 + HDA_CODEC_ENTRY(0x10ec0260, "ALC260", patch_alc260),
4288 + HDA_CODEC_ENTRY(0x10ec0262, "ALC262", patch_alc262),
4289 + HDA_CODEC_ENTRY(0x10ec0267, "ALC267", patch_alc268),
4290 +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
4291 +index 2b835cca41b1..4fde4f8d4444 100644
4292 +--- a/sound/usb/mixer.c
4293 ++++ b/sound/usb/mixer.c
4294 +@@ -204,6 +204,10 @@ static int snd_usb_copy_string_desc(struct mixer_build *state,
4295 + int index, char *buf, int maxlen)
4296 + {
4297 + int len = usb_string(state->chip->dev, index, buf, maxlen - 1);
4298 ++
4299 ++ if (len < 0)
4300 ++ return 0;
4301 ++
4302 + buf[len] = 0;
4303 + return len;
4304 + }
4305 +@@ -2174,13 +2178,14 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid,
4306 + if (len)
4307 + ;
4308 + else if (nameid)
4309 +- snd_usb_copy_string_desc(state, nameid, kctl->id.name,
4310 ++ len = snd_usb_copy_string_desc(state, nameid, kctl->id.name,
4311 + sizeof(kctl->id.name));
4312 +- else {
4313 ++ else
4314 + len = get_term_name(state, &state->oterm,
4315 + kctl->id.name, sizeof(kctl->id.name), 0);
4316 +- if (!len)
4317 +- strlcpy(kctl->id.name, "USB", sizeof(kctl->id.name));
4318 ++
4319 ++ if (!len) {
4320 ++ strlcpy(kctl->id.name, "USB", sizeof(kctl->id.name));
4321 +
4322 + if (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR)
4323 + append_ctl_name(kctl, " Clock Source");
4324 +diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
4325 +index eaa3bec273c8..4c99c57736ce 100644
4326 +--- a/tools/hv/hv_kvp_daemon.c
4327 ++++ b/tools/hv/hv_kvp_daemon.c
4328 +@@ -193,11 +193,14 @@ static void kvp_update_mem_state(int pool)
4329 + for (;;) {
4330 + readp = &record[records_read];
4331 + records_read += fread(readp, sizeof(struct kvp_record),
4332 +- ENTRIES_PER_BLOCK * num_blocks,
4333 +- filep);
4334 ++ ENTRIES_PER_BLOCK * num_blocks - records_read,
4335 ++ filep);
4336 +
4337 + if (ferror(filep)) {
4338 +- syslog(LOG_ERR, "Failed to read file, pool: %d", pool);
4339 ++ syslog(LOG_ERR,
4340 ++ "Failed to read file, pool: %d; error: %d %s",
4341 ++ pool, errno, strerror(errno));
4342 ++ kvp_release_lock(pool);
4343 + exit(EXIT_FAILURE);
4344 + }
4345 +
4346 +@@ -210,6 +213,7 @@ static void kvp_update_mem_state(int pool)
4347 +
4348 + if (record == NULL) {
4349 + syslog(LOG_ERR, "malloc failed");
4350 ++ kvp_release_lock(pool);
4351 + exit(EXIT_FAILURE);
4352 + }
4353 + continue;
4354 +@@ -224,15 +228,11 @@ static void kvp_update_mem_state(int pool)
4355 + fclose(filep);
4356 + kvp_release_lock(pool);
4357 + }
4358 ++
4359 + static int kvp_file_init(void)
4360 + {
4361 + int fd;
4362 +- FILE *filep;
4363 +- size_t records_read;
4364 + char *fname;
4365 +- struct kvp_record *record;
4366 +- struct kvp_record *readp;
4367 +- int num_blocks;
4368 + int i;
4369 + int alloc_unit = sizeof(struct kvp_record) * ENTRIES_PER_BLOCK;
4370 +
4371 +@@ -246,61 +246,19 @@ static int kvp_file_init(void)
4372 +
4373 + for (i = 0; i < KVP_POOL_COUNT; i++) {
4374 + fname = kvp_file_info[i].fname;
4375 +- records_read = 0;
4376 +- num_blocks = 1;
4377 + sprintf(fname, "%s/.kvp_pool_%d", KVP_CONFIG_LOC, i);
4378 + fd = open(fname, O_RDWR | O_CREAT | O_CLOEXEC, 0644 /* rw-r--r-- */);
4379 +
4380 + if (fd == -1)
4381 + return 1;
4382 +
4383 +-
4384 +- filep = fopen(fname, "re");
4385 +- if (!filep) {
4386 +- close(fd);
4387 +- return 1;
4388 +- }
4389 +-
4390 +- record = malloc(alloc_unit * num_blocks);
4391 +- if (record == NULL) {
4392 +- fclose(filep);
4393 +- close(fd);
4394 +- return 1;
4395 +- }
4396 +- for (;;) {
4397 +- readp = &record[records_read];
4398 +- records_read += fread(readp, sizeof(struct kvp_record),
4399 +- ENTRIES_PER_BLOCK,
4400 +- filep);
4401 +-
4402 +- if (ferror(filep)) {
4403 +- syslog(LOG_ERR, "Failed to read file, pool: %d",
4404 +- i);
4405 +- exit(EXIT_FAILURE);
4406 +- }
4407 +-
4408 +- if (!feof(filep)) {
4409 +- /*
4410 +- * We have more data to read.
4411 +- */
4412 +- num_blocks++;
4413 +- record = realloc(record, alloc_unit *
4414 +- num_blocks);
4415 +- if (record == NULL) {
4416 +- fclose(filep);
4417 +- close(fd);
4418 +- return 1;
4419 +- }
4420 +- continue;
4421 +- }
4422 +- break;
4423 +- }
4424 + kvp_file_info[i].fd = fd;
4425 +- kvp_file_info[i].num_blocks = num_blocks;
4426 +- kvp_file_info[i].records = record;
4427 +- kvp_file_info[i].num_records = records_read;
4428 +- fclose(filep);
4429 +-
4430 ++ kvp_file_info[i].num_blocks = 1;
4431 ++ kvp_file_info[i].records = malloc(alloc_unit);
4432 ++ if (kvp_file_info[i].records == NULL)
4433 ++ return 1;
4434 ++ kvp_file_info[i].num_records = 0;
4435 ++ kvp_update_mem_state(i);
4436 + }
4437 +
4438 + return 0;
4439 +diff --git a/tools/testing/selftests/x86/mpx-hw.h b/tools/testing/selftests/x86/mpx-hw.h
4440 +index 3f0093911f03..d1b61ab870f8 100644
4441 +--- a/tools/testing/selftests/x86/mpx-hw.h
4442 ++++ b/tools/testing/selftests/x86/mpx-hw.h
4443 +@@ -52,14 +52,14 @@
4444 + struct mpx_bd_entry {
4445 + union {
4446 + char x[MPX_BOUNDS_DIR_ENTRY_SIZE_BYTES];
4447 +- void *contents[1];
4448 ++ void *contents[0];
4449 + };
4450 + } __attribute__((packed));
4451 +
4452 + struct mpx_bt_entry {
4453 + union {
4454 + char x[MPX_BOUNDS_TABLE_ENTRY_SIZE_BYTES];
4455 +- unsigned long contents[1];
4456 ++ unsigned long contents[0];
4457 + };
4458 + } __attribute__((packed));
4459 +
4460 +diff --git a/virt/kvm/arm/hyp/vgic-v2-sr.c b/virt/kvm/arm/hyp/vgic-v2-sr.c
4461 +index a3f18d362366..d7fd46fe9efb 100644
4462 +--- a/virt/kvm/arm/hyp/vgic-v2-sr.c
4463 ++++ b/virt/kvm/arm/hyp/vgic-v2-sr.c
4464 +@@ -34,11 +34,7 @@ static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base)
4465 + else
4466 + elrsr1 = 0;
4467 +
4468 +-#ifdef CONFIG_CPU_BIG_ENDIAN
4469 +- cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1;
4470 +-#else
4471 + cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0;
4472 +-#endif
4473 + }
4474 +
4475 + static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
4476 +diff --git a/virt/kvm/arm/vgic/vgic-irqfd.c b/virt/kvm/arm/vgic/vgic-irqfd.c
4477 +index b7baf581611a..99e026d2dade 100644
4478 +--- a/virt/kvm/arm/vgic/vgic-irqfd.c
4479 ++++ b/virt/kvm/arm/vgic/vgic-irqfd.c
4480 +@@ -112,8 +112,7 @@ int kvm_vgic_setup_default_irq_routing(struct kvm *kvm)
4481 + u32 nr = dist->nr_spis;
4482 + int i, ret;
4483 +
4484 +- entries = kcalloc(nr, sizeof(struct kvm_kernel_irq_routing_entry),
4485 +- GFP_KERNEL);
4486 ++ entries = kcalloc(nr, sizeof(*entries), GFP_KERNEL);
4487 + if (!entries)
4488 + return -ENOMEM;
4489 +
4490 +diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
4491 +index 547f12dc4d54..3108e07526af 100644
4492 +--- a/virt/kvm/arm/vgic/vgic-its.c
4493 ++++ b/virt/kvm/arm/vgic/vgic-its.c
4494 +@@ -775,6 +775,8 @@ static int vgic_its_alloc_collection(struct vgic_its *its,
4495 + return E_ITS_MAPC_COLLECTION_OOR;
4496 +
4497 + collection = kzalloc(sizeof(*collection), GFP_KERNEL);
4498 ++ if (!collection)
4499 ++ return -ENOMEM;
4500 +
4501 + collection->collection_id = coll_id;
4502 + collection->target_addr = COLLECTION_NOT_MAPPED;
4503 +diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
4504 +index 96ea597db0e7..502f2100e7bf 100644
4505 +--- a/virt/kvm/arm/vgic/vgic-v3.c
4506 ++++ b/virt/kvm/arm/vgic/vgic-v3.c
4507 +@@ -324,13 +324,13 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
4508 + int last_byte_offset = -1;
4509 + struct vgic_irq *irq;
4510 + int ret;
4511 ++ u8 val;
4512 +
4513 + list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
4514 + int byte_offset, bit_nr;
4515 + struct kvm_vcpu *vcpu;
4516 + gpa_t pendbase, ptr;
4517 + bool stored;
4518 +- u8 val;
4519 +
4520 + vcpu = irq->target_vcpu;
4521 + if (!vcpu)
4522 +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
4523 +index 9deb5a245b83..484e8820c382 100644
4524 +--- a/virt/kvm/kvm_main.c
4525 ++++ b/virt/kvm/kvm_main.c
4526 +@@ -136,6 +136,11 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
4527 + static unsigned long long kvm_createvm_count;
4528 + static unsigned long long kvm_active_vms;
4529 +
4530 ++__weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
4531 ++ unsigned long start, unsigned long end)
4532 ++{
4533 ++}
4534 ++
4535 + bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
4536 + {
4537 + if (pfn_valid(pfn))
4538 +@@ -361,6 +366,9 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
4539 + kvm_flush_remote_tlbs(kvm);
4540 +
4541 + spin_unlock(&kvm->mmu_lock);
4542 ++
4543 ++ kvm_arch_mmu_notifier_invalidate_range(kvm, start, end);
4544 ++
4545 + srcu_read_unlock(&kvm->srcu, idx);
4546 + }
4547 +