Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.20 commit in: /
Date: Wed, 09 Jan 2019 17:56:58
Message-Id: 1547056578.5f91fe4492f9fdf817b7cff0b16433615057a289.mpagano@gentoo
1 commit: 5f91fe4492f9fdf817b7cff0b16433615057a289
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Jan 9 17:56:18 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Jan 9 17:56:18 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=5f91fe44
7
8 proj/linux-patches: Linux patch 4.20.1
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1001_linux-4.20.1.patch | 6477 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 6481 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index f19e624..543d775 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -43,6 +43,10 @@ EXPERIMENTAL
21 Individual Patch Descriptions:
22 --------------------------------------------------------------------------
23
24 +Patch: 1000_linux-4.20.1.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.20.1
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1001_linux-4.20.1.patch b/1001_linux-4.20.1.patch
33 new file mode 100644
34 index 0000000..03f7422
35 --- /dev/null
36 +++ b/1001_linux-4.20.1.patch
37 @@ -0,0 +1,6477 @@
38 +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
39 +index aefd358a5ca3..bb5c9dc4d270 100644
40 +--- a/Documentation/admin-guide/kernel-parameters.txt
41 ++++ b/Documentation/admin-guide/kernel-parameters.txt
42 +@@ -2096,6 +2096,9 @@
43 + off
44 + Disables hypervisor mitigations and doesn't
45 + emit any warnings.
46 ++ It also drops the swap size and available
47 ++ RAM limit restriction on both hypervisor and
48 ++ bare metal.
49 +
50 + Default is 'flush'.
51 +
52 +diff --git a/Documentation/admin-guide/l1tf.rst b/Documentation/admin-guide/l1tf.rst
53 +index b85dd80510b0..9af977384168 100644
54 +--- a/Documentation/admin-guide/l1tf.rst
55 ++++ b/Documentation/admin-guide/l1tf.rst
56 +@@ -405,6 +405,9 @@ time with the option "l1tf=". The valid arguments for this option are:
57 +
58 + off Disables hypervisor mitigations and doesn't emit any
59 + warnings.
60 ++ It also drops the swap size and available RAM limit restrictions
61 ++ on both hypervisor and bare metal.
62 ++
63 + ============ =============================================================
64 +
65 + The default is 'flush'. For details about L1D flushing see :ref:`l1d_flush`.
66 +@@ -576,7 +579,8 @@ Default mitigations
67 + The kernel default mitigations for vulnerable processors are:
68 +
69 + - PTE inversion to protect against malicious user space. This is done
70 +- unconditionally and cannot be controlled.
71 ++ unconditionally and cannot be controlled. The swap storage is limited
72 ++ to ~16TB.
73 +
74 + - L1D conditional flushing on VMENTER when EPT is enabled for
75 + a guest.
76 +diff --git a/Makefile b/Makefile
77 +index 7a2a9a175756..84d2f8deea30 100644
78 +--- a/Makefile
79 ++++ b/Makefile
80 +@@ -1,7 +1,7 @@
81 + # SPDX-License-Identifier: GPL-2.0
82 + VERSION = 4
83 + PATCHLEVEL = 20
84 +-SUBLEVEL = 0
85 ++SUBLEVEL = 1
86 + EXTRAVERSION =
87 + NAME = Shy Crocodile
88 +
89 +diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
90 +index 6dd783557330..dadb494d83fd 100644
91 +--- a/arch/arc/Kconfig
92 ++++ b/arch/arc/Kconfig
93 +@@ -26,6 +26,7 @@ config ARC
94 + select GENERIC_IRQ_SHOW
95 + select GENERIC_PCI_IOMAP
96 + select GENERIC_PENDING_IRQ if SMP
97 ++ select GENERIC_SCHED_CLOCK
98 + select GENERIC_SMP_IDLE_THREAD
99 + select HAVE_ARCH_KGDB
100 + select HAVE_ARCH_TRACEHOOK
101 +diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
102 +index 03611d50c5a9..e84544b220b9 100644
103 +--- a/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
104 ++++ b/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
105 +@@ -26,8 +26,7 @@
106 + "Speakers", "SPKL",
107 + "Speakers", "SPKR";
108 +
109 +- assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>,
110 +- <&clock CLK_MOUT_EPLL>,
111 ++ assigned-clocks = <&clock CLK_MOUT_EPLL>,
112 + <&clock CLK_MOUT_MAU_EPLL>,
113 + <&clock CLK_MOUT_USER_MAU_EPLL>,
114 + <&clock_audss EXYNOS_MOUT_AUDSS>,
115 +@@ -36,15 +35,13 @@
116 + <&clock_audss EXYNOS_DOUT_AUD_BUS>,
117 + <&clock_audss EXYNOS_DOUT_I2S>;
118 +
119 +- assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>,
120 +- <&clock CLK_FOUT_EPLL>,
121 ++ assigned-clock-parents = <&clock CLK_FOUT_EPLL>,
122 + <&clock CLK_MOUT_EPLL>,
123 + <&clock CLK_MOUT_MAU_EPLL>,
124 + <&clock CLK_MAU_EPLL>,
125 + <&clock_audss EXYNOS_MOUT_AUDSS>;
126 +
127 + assigned-clock-rates = <0>,
128 +- <0>,
129 + <0>,
130 + <0>,
131 + <0>,
132 +@@ -84,4 +81,6 @@
133 +
134 + &i2s0 {
135 + status = "okay";
136 ++ assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>;
137 ++ assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>;
138 + };
139 +diff --git a/arch/arm/boot/dts/exynos5422-odroidxu4.dts b/arch/arm/boot/dts/exynos5422-odroidxu4.dts
140 +index 4a30cc849b00..122174ea9e0a 100644
141 +--- a/arch/arm/boot/dts/exynos5422-odroidxu4.dts
142 ++++ b/arch/arm/boot/dts/exynos5422-odroidxu4.dts
143 +@@ -33,8 +33,7 @@
144 + compatible = "samsung,odroid-xu3-audio";
145 + model = "Odroid-XU4";
146 +
147 +- assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>,
148 +- <&clock CLK_MOUT_EPLL>,
149 ++ assigned-clocks = <&clock CLK_MOUT_EPLL>,
150 + <&clock CLK_MOUT_MAU_EPLL>,
151 + <&clock CLK_MOUT_USER_MAU_EPLL>,
152 + <&clock_audss EXYNOS_MOUT_AUDSS>,
153 +@@ -43,15 +42,13 @@
154 + <&clock_audss EXYNOS_DOUT_AUD_BUS>,
155 + <&clock_audss EXYNOS_DOUT_I2S>;
156 +
157 +- assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>,
158 +- <&clock CLK_FOUT_EPLL>,
159 ++ assigned-clock-parents = <&clock CLK_FOUT_EPLL>,
160 + <&clock CLK_MOUT_EPLL>,
161 + <&clock CLK_MOUT_MAU_EPLL>,
162 + <&clock CLK_MAU_EPLL>,
163 + <&clock_audss EXYNOS_MOUT_AUDSS>;
164 +
165 + assigned-clock-rates = <0>,
166 +- <0>,
167 + <0>,
168 + <0>,
169 + <0>,
170 +@@ -79,6 +76,8 @@
171 +
172 + &i2s0 {
173 + status = "okay";
174 ++ assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>;
175 ++ assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>;
176 + };
177 +
178 + &pwm {
179 +diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
180 +index 6f602af5263c..2dafd936d84d 100644
181 +--- a/arch/arm64/include/asm/kvm_arm.h
182 ++++ b/arch/arm64/include/asm/kvm_arm.h
183 +@@ -104,7 +104,7 @@
184 + TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK | TCR_EL2_T0SZ_MASK)
185 +
186 + /* VTCR_EL2 Registers bits */
187 +-#define VTCR_EL2_RES1 (1 << 31)
188 ++#define VTCR_EL2_RES1 (1U << 31)
189 + #define VTCR_EL2_HD (1 << 22)
190 + #define VTCR_EL2_HA (1 << 21)
191 + #define VTCR_EL2_PS_SHIFT TCR_EL2_PS_SHIFT
192 +diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
193 +index b13ca091f833..85d5c1026204 100644
194 +--- a/arch/arm64/include/asm/unistd.h
195 ++++ b/arch/arm64/include/asm/unistd.h
196 +@@ -40,8 +40,9 @@
197 + * The following SVCs are ARM private.
198 + */
199 + #define __ARM_NR_COMPAT_BASE 0x0f0000
200 +-#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2)
201 +-#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5)
202 ++#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE + 2)
203 ++#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5)
204 ++#define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800)
205 +
206 + #define __NR_compat_syscalls 399
207 + #endif
208 +diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
209 +index 32653d156747..bc348ab3dd6b 100644
210 +--- a/arch/arm64/kernel/sys_compat.c
211 ++++ b/arch/arm64/kernel/sys_compat.c
212 +@@ -66,12 +66,11 @@ do_compat_cache_op(unsigned long start, unsigned long end, int flags)
213 + /*
214 + * Handle all unrecognised system calls.
215 + */
216 +-long compat_arm_syscall(struct pt_regs *regs)
217 ++long compat_arm_syscall(struct pt_regs *regs, int scno)
218 + {
219 +- unsigned int no = regs->regs[7];
220 + void __user *addr;
221 +
222 +- switch (no) {
223 ++ switch (scno) {
224 + /*
225 + * Flush a region from virtual address 'r0' to virtual address 'r1'
226 + * _exclusive_. There is no alignment requirement on either address;
227 +@@ -102,12 +101,12 @@ long compat_arm_syscall(struct pt_regs *regs)
228 +
229 + default:
230 + /*
231 +- * Calls 9f00xx..9f07ff are defined to return -ENOSYS
232 ++ * Calls 0xf0xxx..0xf07ff are defined to return -ENOSYS
233 + * if not implemented, rather than raising SIGILL. This
234 + * way the calling program can gracefully determine whether
235 + * a feature is supported.
236 + */
237 +- if ((no & 0xffff) <= 0x7ff)
238 ++ if (scno < __ARM_NR_COMPAT_END)
239 + return -ENOSYS;
240 + break;
241 + }
242 +@@ -116,6 +115,6 @@ long compat_arm_syscall(struct pt_regs *regs)
243 + (compat_thumb_mode(regs) ? 2 : 4);
244 +
245 + arm64_notify_die("Oops - bad compat syscall(2)", regs,
246 +- SIGILL, ILL_ILLTRP, addr, no);
247 ++ SIGILL, ILL_ILLTRP, addr, scno);
248 + return 0;
249 + }
250 +diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
251 +index 032d22312881..5610ac01c1ec 100644
252 +--- a/arch/arm64/kernel/syscall.c
253 ++++ b/arch/arm64/kernel/syscall.c
254 +@@ -13,16 +13,15 @@
255 + #include <asm/thread_info.h>
256 + #include <asm/unistd.h>
257 +
258 +-long compat_arm_syscall(struct pt_regs *regs);
259 +-
260 ++long compat_arm_syscall(struct pt_regs *regs, int scno);
261 + long sys_ni_syscall(void);
262 +
263 +-asmlinkage long do_ni_syscall(struct pt_regs *regs)
264 ++static long do_ni_syscall(struct pt_regs *regs, int scno)
265 + {
266 + #ifdef CONFIG_COMPAT
267 + long ret;
268 + if (is_compat_task()) {
269 +- ret = compat_arm_syscall(regs);
270 ++ ret = compat_arm_syscall(regs, scno);
271 + if (ret != -ENOSYS)
272 + return ret;
273 + }
274 +@@ -47,7 +46,7 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
275 + syscall_fn = syscall_table[array_index_nospec(scno, sc_nr)];
276 + ret = __invoke_syscall(regs, syscall_fn);
277 + } else {
278 +- ret = do_ni_syscall(regs);
279 ++ ret = do_ni_syscall(regs, scno);
280 + }
281 +
282 + regs->regs[0] = ret;
283 +diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
284 +index 4dbd9c69a96d..7fcc9c1a5f45 100644
285 +--- a/arch/arm64/kvm/hyp/tlb.c
286 ++++ b/arch/arm64/kvm/hyp/tlb.c
287 +@@ -15,14 +15,19 @@
288 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
289 + */
290 +
291 ++#include <linux/irqflags.h>
292 ++
293 + #include <asm/kvm_hyp.h>
294 + #include <asm/kvm_mmu.h>
295 + #include <asm/tlbflush.h>
296 +
297 +-static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
298 ++static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
299 ++ unsigned long *flags)
300 + {
301 + u64 val;
302 +
303 ++ local_irq_save(*flags);
304 ++
305 + /*
306 + * With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and
307 + * most TLB operations target EL2/EL0. In order to affect the
308 +@@ -37,7 +42,8 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
309 + isb();
310 + }
311 +
312 +-static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm)
313 ++static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
314 ++ unsigned long *flags)
315 + {
316 + __load_guest_stage2(kvm);
317 + isb();
318 +@@ -48,7 +54,8 @@ static hyp_alternate_select(__tlb_switch_to_guest,
319 + __tlb_switch_to_guest_vhe,
320 + ARM64_HAS_VIRT_HOST_EXTN);
321 +
322 +-static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm)
323 ++static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm,
324 ++ unsigned long flags)
325 + {
326 + /*
327 + * We're done with the TLB operation, let's restore the host's
328 +@@ -56,9 +63,12 @@ static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm)
329 + */
330 + write_sysreg(0, vttbr_el2);
331 + write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
332 ++ isb();
333 ++ local_irq_restore(flags);
334 + }
335 +
336 +-static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm)
337 ++static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm,
338 ++ unsigned long flags)
339 + {
340 + write_sysreg(0, vttbr_el2);
341 + }
342 +@@ -70,11 +80,13 @@ static hyp_alternate_select(__tlb_switch_to_host,
343 +
344 + void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
345 + {
346 ++ unsigned long flags;
347 ++
348 + dsb(ishst);
349 +
350 + /* Switch to requested VMID */
351 + kvm = kern_hyp_va(kvm);
352 +- __tlb_switch_to_guest()(kvm);
353 ++ __tlb_switch_to_guest()(kvm, &flags);
354 +
355 + /*
356 + * We could do so much better if we had the VA as well.
357 +@@ -117,36 +129,39 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
358 + if (!has_vhe() && icache_is_vpipt())
359 + __flush_icache_all();
360 +
361 +- __tlb_switch_to_host()(kvm);
362 ++ __tlb_switch_to_host()(kvm, flags);
363 + }
364 +
365 + void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
366 + {
367 ++ unsigned long flags;
368 ++
369 + dsb(ishst);
370 +
371 + /* Switch to requested VMID */
372 + kvm = kern_hyp_va(kvm);
373 +- __tlb_switch_to_guest()(kvm);
374 ++ __tlb_switch_to_guest()(kvm, &flags);
375 +
376 + __tlbi(vmalls12e1is);
377 + dsb(ish);
378 + isb();
379 +
380 +- __tlb_switch_to_host()(kvm);
381 ++ __tlb_switch_to_host()(kvm, flags);
382 + }
383 +
384 + void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
385 + {
386 + struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
387 ++ unsigned long flags;
388 +
389 + /* Switch to requested VMID */
390 +- __tlb_switch_to_guest()(kvm);
391 ++ __tlb_switch_to_guest()(kvm, &flags);
392 +
393 + __tlbi(vmalle1);
394 + dsb(nsh);
395 + isb();
396 +
397 +- __tlb_switch_to_host()(kvm);
398 ++ __tlb_switch_to_host()(kvm, flags);
399 + }
400 +
401 + void __hyp_text __kvm_flush_vm_context(void)
402 +diff --git a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
403 +index 37fe58c19a90..542c3ede9722 100644
404 +--- a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
405 ++++ b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
406 +@@ -13,6 +13,7 @@
407 + #include <stdint.h>
408 + #include <stdio.h>
409 + #include <stdlib.h>
410 ++#include "../../../../include/linux/sizes.h"
411 +
412 + int main(int argc, char *argv[])
413 + {
414 +@@ -45,11 +46,11 @@ int main(int argc, char *argv[])
415 + vmlinuz_load_addr = vmlinux_load_addr + vmlinux_size;
416 +
417 + /*
418 +- * Align with 16 bytes: "greater than that used for any standard data
419 +- * types by a MIPS compiler." -- See MIPS Run Linux (Second Edition).
420 ++ * Align with 64KB: KEXEC needs load sections to be aligned to PAGE_SIZE,
421 ++ * which may be as large as 64KB depending on the kernel configuration.
422 + */
423 +
424 +- vmlinuz_load_addr += (16 - vmlinux_size % 16);
425 ++ vmlinuz_load_addr += (SZ_64K - vmlinux_size % SZ_64K);
426 +
427 + printf("0x%llx\n", vmlinuz_load_addr);
428 +
429 +diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c
430 +index 6c79e8a16a26..3ddbb98dff84 100644
431 +--- a/arch/mips/cavium-octeon/executive/cvmx-helper.c
432 ++++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c
433 +@@ -286,7 +286,8 @@ static cvmx_helper_interface_mode_t __cvmx_get_mode_cn7xxx(int interface)
434 + case 3:
435 + return CVMX_HELPER_INTERFACE_MODE_LOOP;
436 + case 4:
437 +- return CVMX_HELPER_INTERFACE_MODE_RGMII;
438 ++ /* TODO: Implement support for AGL (RGMII). */
439 ++ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
440 + default:
441 + return CVMX_HELPER_INTERFACE_MODE_DISABLED;
442 + }
443 +diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
444 +index d4ea7a5b60cf..9e805317847d 100644
445 +--- a/arch/mips/include/asm/atomic.h
446 ++++ b/arch/mips/include/asm/atomic.h
447 +@@ -306,7 +306,7 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
448 + { \
449 + long result; \
450 + \
451 +- if (kernel_uses_llsc && R10000_LLSC_WAR) { \
452 ++ if (kernel_uses_llsc) { \
453 + long temp; \
454 + \
455 + __asm__ __volatile__( \
456 +diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
457 +index a41059d47d31..ed7ffe4e63a3 100644
458 +--- a/arch/mips/include/asm/cpu-info.h
459 ++++ b/arch/mips/include/asm/cpu-info.h
460 +@@ -50,7 +50,7 @@ struct guest_info {
461 + #define MIPS_CACHE_PINDEX 0x00000020 /* Physically indexed cache */
462 +
463 + struct cpuinfo_mips {
464 +- unsigned long asid_cache;
465 ++ u64 asid_cache;
466 + #ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
467 + unsigned long asid_mask;
468 + #endif
469 +diff --git a/arch/mips/include/asm/mach-loongson64/mmzone.h b/arch/mips/include/asm/mach-loongson64/mmzone.h
470 +index c9f7e231e66b..59c8b11c090e 100644
471 +--- a/arch/mips/include/asm/mach-loongson64/mmzone.h
472 ++++ b/arch/mips/include/asm/mach-loongson64/mmzone.h
473 +@@ -21,6 +21,7 @@
474 + #define NODE3_ADDRSPACE_OFFSET 0x300000000000UL
475 +
476 + #define pa_to_nid(addr) (((addr) & 0xf00000000000) >> NODE_ADDRSPACE_SHIFT)
477 ++#define nid_to_addrbase(nid) ((nid) << NODE_ADDRSPACE_SHIFT)
478 +
479 + #define LEVELS_PER_SLICE 128
480 +
481 +diff --git a/arch/mips/include/asm/mmu.h b/arch/mips/include/asm/mmu.h
482 +index 0740be7d5d4a..24d6b42345fb 100644
483 +--- a/arch/mips/include/asm/mmu.h
484 ++++ b/arch/mips/include/asm/mmu.h
485 +@@ -7,7 +7,7 @@
486 + #include <linux/wait.h>
487 +
488 + typedef struct {
489 +- unsigned long asid[NR_CPUS];
490 ++ u64 asid[NR_CPUS];
491 + void *vdso;
492 + atomic_t fp_mode_switching;
493 +
494 +diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
495 +index 94414561de0e..a589585be21b 100644
496 +--- a/arch/mips/include/asm/mmu_context.h
497 ++++ b/arch/mips/include/asm/mmu_context.h
498 +@@ -76,14 +76,14 @@ extern unsigned long pgd_current[];
499 + * All unused by hardware upper bits will be considered
500 + * as a software asid extension.
501 + */
502 +-static unsigned long asid_version_mask(unsigned int cpu)
503 ++static inline u64 asid_version_mask(unsigned int cpu)
504 + {
505 + unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
506 +
507 +- return ~(asid_mask | (asid_mask - 1));
508 ++ return ~(u64)(asid_mask | (asid_mask - 1));
509 + }
510 +
511 +-static unsigned long asid_first_version(unsigned int cpu)
512 ++static inline u64 asid_first_version(unsigned int cpu)
513 + {
514 + return ~asid_version_mask(cpu) + 1;
515 + }
516 +@@ -102,14 +102,12 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
517 + static inline void
518 + get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
519 + {
520 +- unsigned long asid = asid_cache(cpu);
521 ++ u64 asid = asid_cache(cpu);
522 +
523 + if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) {
524 + if (cpu_has_vtag_icache)
525 + flush_icache_all();
526 + local_flush_tlb_all(); /* start new asid cycle */
527 +- if (!asid) /* fix version if needed */
528 +- asid = asid_first_version(cpu);
529 + }
530 +
531 + cpu_context(cpu, mm) = asid_cache(cpu) = asid;
532 +diff --git a/arch/mips/include/asm/mmzone.h b/arch/mips/include/asm/mmzone.h
533 +index f085fba41da5..b826b8473e95 100644
534 +--- a/arch/mips/include/asm/mmzone.h
535 ++++ b/arch/mips/include/asm/mmzone.h
536 +@@ -7,7 +7,18 @@
537 + #define _ASM_MMZONE_H_
538 +
539 + #include <asm/page.h>
540 +-#include <mmzone.h>
541 ++
542 ++#ifdef CONFIG_NEED_MULTIPLE_NODES
543 ++# include <mmzone.h>
544 ++#endif
545 ++
546 ++#ifndef pa_to_nid
547 ++#define pa_to_nid(addr) 0
548 ++#endif
549 ++
550 ++#ifndef nid_to_addrbase
551 ++#define nid_to_addrbase(nid) 0
552 ++#endif
553 +
554 + #ifdef CONFIG_DISCONTIGMEM
555 +
556 +diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
557 +index 0036ea0c7173..93a9dce31f25 100644
558 +--- a/arch/mips/include/asm/pgtable-64.h
559 ++++ b/arch/mips/include/asm/pgtable-64.h
560 +@@ -265,6 +265,11 @@ static inline int pmd_bad(pmd_t pmd)
561 +
562 + static inline int pmd_present(pmd_t pmd)
563 + {
564 ++#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
565 ++ if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
566 ++ return pmd_val(pmd) & _PAGE_PRESENT;
567 ++#endif
568 ++
569 + return pmd_val(pmd) != (unsigned long) invalid_pte_table;
570 + }
571 +
572 +diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
573 +index d19b2d65336b..7f4a32d3345a 100644
574 +--- a/arch/mips/include/asm/r4kcache.h
575 ++++ b/arch/mips/include/asm/r4kcache.h
576 +@@ -20,6 +20,7 @@
577 + #include <asm/cpu-features.h>
578 + #include <asm/cpu-type.h>
579 + #include <asm/mipsmtregs.h>
580 ++#include <asm/mmzone.h>
581 + #include <linux/uaccess.h> /* for uaccess_kernel() */
582 +
583 + extern void (*r4k_blast_dcache)(void);
584 +@@ -674,4 +675,25 @@ __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
585 + __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
586 + __BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
587 +
588 ++/* Currently, this is very specific to Loongson-3 */
589 ++#define __BUILD_BLAST_CACHE_NODE(pfx, desc, indexop, hitop, lsize) \
590 ++static inline void blast_##pfx##cache##lsize##_node(long node) \
591 ++{ \
592 ++ unsigned long start = CAC_BASE | nid_to_addrbase(node); \
593 ++ unsigned long end = start + current_cpu_data.desc.waysize; \
594 ++ unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
595 ++ unsigned long ws_end = current_cpu_data.desc.ways << \
596 ++ current_cpu_data.desc.waybit; \
597 ++ unsigned long ws, addr; \
598 ++ \
599 ++ for (ws = 0; ws < ws_end; ws += ws_inc) \
600 ++ for (addr = start; addr < end; addr += lsize * 32) \
601 ++ cache##lsize##_unroll32(addr|ws, indexop); \
602 ++}
603 ++
604 ++__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
605 ++__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
606 ++__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
607 ++__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
608 ++
609 + #endif /* _ASM_R4KCACHE_H */
610 +diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
611 +index 48a9c6b90e07..9df3ebdc7b0f 100644
612 +--- a/arch/mips/kernel/vdso.c
613 ++++ b/arch/mips/kernel/vdso.c
614 +@@ -126,8 +126,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
615 +
616 + /* Map delay slot emulation page */
617 + base = mmap_region(NULL, STACK_TOP, PAGE_SIZE,
618 +- VM_READ|VM_WRITE|VM_EXEC|
619 +- VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
620 ++ VM_READ | VM_EXEC |
621 ++ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
622 + 0, NULL);
623 + if (IS_ERR_VALUE(base)) {
624 + ret = base;
625 +diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c
626 +index 5450f4d1c920..e2d46cb93ca9 100644
627 +--- a/arch/mips/math-emu/dsemul.c
628 ++++ b/arch/mips/math-emu/dsemul.c
629 +@@ -214,8 +214,9 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
630 + {
631 + int isa16 = get_isa16_mode(regs->cp0_epc);
632 + mips_instruction break_math;
633 +- struct emuframe __user *fr;
634 +- int err, fr_idx;
635 ++ unsigned long fr_uaddr;
636 ++ struct emuframe fr;
637 ++ int fr_idx, ret;
638 +
639 + /* NOP is easy */
640 + if (ir == 0)
641 +@@ -250,27 +251,31 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
642 + fr_idx = alloc_emuframe();
643 + if (fr_idx == BD_EMUFRAME_NONE)
644 + return SIGBUS;
645 +- fr = &dsemul_page()[fr_idx];
646 +
647 + /* Retrieve the appropriately encoded break instruction */
648 + break_math = BREAK_MATH(isa16);
649 +
650 + /* Write the instructions to the frame */
651 + if (isa16) {
652 +- err = __put_user(ir >> 16,
653 +- (u16 __user *)(&fr->emul));
654 +- err |= __put_user(ir & 0xffff,
655 +- (u16 __user *)((long)(&fr->emul) + 2));
656 +- err |= __put_user(break_math >> 16,
657 +- (u16 __user *)(&fr->badinst));
658 +- err |= __put_user(break_math & 0xffff,
659 +- (u16 __user *)((long)(&fr->badinst) + 2));
660 ++ union mips_instruction _emul = {
661 ++ .halfword = { ir >> 16, ir }
662 ++ };
663 ++ union mips_instruction _badinst = {
664 ++ .halfword = { break_math >> 16, break_math }
665 ++ };
666 ++
667 ++ fr.emul = _emul.word;
668 ++ fr.badinst = _badinst.word;
669 + } else {
670 +- err = __put_user(ir, &fr->emul);
671 +- err |= __put_user(break_math, &fr->badinst);
672 ++ fr.emul = ir;
673 ++ fr.badinst = break_math;
674 + }
675 +
676 +- if (unlikely(err)) {
677 ++ /* Write the frame to user memory */
678 ++ fr_uaddr = (unsigned long)&dsemul_page()[fr_idx];
679 ++ ret = access_process_vm(current, fr_uaddr, &fr, sizeof(fr),
680 ++ FOLL_FORCE | FOLL_WRITE);
681 ++ if (unlikely(ret != sizeof(fr))) {
682 + MIPS_FPU_EMU_INC_STATS(errors);
683 + free_emuframe(fr_idx, current->mm);
684 + return SIGBUS;
685 +@@ -282,10 +287,7 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
686 + atomic_set(&current->thread.bd_emu_frame, fr_idx);
687 +
688 + /* Change user register context to execute the frame */
689 +- regs->cp0_epc = (unsigned long)&fr->emul | isa16;
690 +-
691 +- /* Ensure the icache observes our newly written frame */
692 +- flush_cache_sigtramp((unsigned long)&fr->emul);
693 ++ regs->cp0_epc = fr_uaddr | isa16;
694 +
695 + return 0;
696 + }
697 +diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
698 +index 3466fcdae0ca..01848cdf2074 100644
699 +--- a/arch/mips/mm/c-r3k.c
700 ++++ b/arch/mips/mm/c-r3k.c
701 +@@ -245,7 +245,7 @@ static void r3k_flush_cache_page(struct vm_area_struct *vma,
702 + pmd_t *pmdp;
703 + pte_t *ptep;
704 +
705 +- pr_debug("cpage[%08lx,%08lx]\n",
706 ++ pr_debug("cpage[%08llx,%08lx]\n",
707 + cpu_context(smp_processor_id(), mm), addr);
708 +
709 + /* No ASID => no such page in the cache. */
710 +diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
711 +index 05bd77727fb9..2a6ad461286f 100644
712 +--- a/arch/mips/mm/c-r4k.c
713 ++++ b/arch/mips/mm/c-r4k.c
714 +@@ -459,11 +459,28 @@ static void r4k_blast_scache_setup(void)
715 + r4k_blast_scache = blast_scache128;
716 + }
717 +
718 ++static void (*r4k_blast_scache_node)(long node);
719 ++
720 ++static void r4k_blast_scache_node_setup(void)
721 ++{
722 ++ unsigned long sc_lsize = cpu_scache_line_size();
723 ++
724 ++ if (current_cpu_type() != CPU_LOONGSON3)
725 ++ r4k_blast_scache_node = (void *)cache_noop;
726 ++ else if (sc_lsize == 16)
727 ++ r4k_blast_scache_node = blast_scache16_node;
728 ++ else if (sc_lsize == 32)
729 ++ r4k_blast_scache_node = blast_scache32_node;
730 ++ else if (sc_lsize == 64)
731 ++ r4k_blast_scache_node = blast_scache64_node;
732 ++ else if (sc_lsize == 128)
733 ++ r4k_blast_scache_node = blast_scache128_node;
734 ++}
735 ++
736 + static inline void local_r4k___flush_cache_all(void * args)
737 + {
738 + switch (current_cpu_type()) {
739 + case CPU_LOONGSON2:
740 +- case CPU_LOONGSON3:
741 + case CPU_R4000SC:
742 + case CPU_R4000MC:
743 + case CPU_R4400SC:
744 +@@ -480,6 +497,11 @@ static inline void local_r4k___flush_cache_all(void * args)
745 + r4k_blast_scache();
746 + break;
747 +
748 ++ case CPU_LOONGSON3:
749 ++ /* Use get_ebase_cpunum() for both NUMA=y/n */
750 ++ r4k_blast_scache_node(get_ebase_cpunum() >> 2);
751 ++ break;
752 ++
753 + case CPU_BMIPS5000:
754 + r4k_blast_scache();
755 + __sync();
756 +@@ -840,10 +862,14 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
757 +
758 + preempt_disable();
759 + if (cpu_has_inclusive_pcaches) {
760 +- if (size >= scache_size)
761 +- r4k_blast_scache();
762 +- else
763 ++ if (size >= scache_size) {
764 ++ if (current_cpu_type() != CPU_LOONGSON3)
765 ++ r4k_blast_scache();
766 ++ else
767 ++ r4k_blast_scache_node(pa_to_nid(addr));
768 ++ } else {
769 + blast_scache_range(addr, addr + size);
770 ++ }
771 + preempt_enable();
772 + __sync();
773 + return;
774 +@@ -877,9 +903,12 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
775 +
776 + preempt_disable();
777 + if (cpu_has_inclusive_pcaches) {
778 +- if (size >= scache_size)
779 +- r4k_blast_scache();
780 +- else {
781 ++ if (size >= scache_size) {
782 ++ if (current_cpu_type() != CPU_LOONGSON3)
783 ++ r4k_blast_scache();
784 ++ else
785 ++ r4k_blast_scache_node(pa_to_nid(addr));
786 ++ } else {
787 + /*
788 + * There is no clearly documented alignment requirement
789 + * for the cache instruction on MIPS processors and
790 +@@ -1918,6 +1947,7 @@ void r4k_cache_init(void)
791 + r4k_blast_scache_page_setup();
792 + r4k_blast_scache_page_indexed_setup();
793 + r4k_blast_scache_setup();
794 ++ r4k_blast_scache_node_setup();
795 + #ifdef CONFIG_EVA
796 + r4k_blast_dcache_user_page_setup();
797 + r4k_blast_icache_user_page_setup();
798 +diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
799 +index 2d7cffcaa476..059187a3ded7 100644
800 +--- a/arch/parisc/mm/init.c
801 ++++ b/arch/parisc/mm/init.c
802 +@@ -512,8 +512,8 @@ static void __init map_pages(unsigned long start_vaddr,
803 +
804 + void __init set_kernel_text_rw(int enable_read_write)
805 + {
806 +- unsigned long start = (unsigned long)__init_begin;
807 +- unsigned long end = (unsigned long)_etext;
808 ++ unsigned long start = (unsigned long) _text;
809 ++ unsigned long end = (unsigned long) &data_start;
810 +
811 + map_pages(start, __pa(start), end-start,
812 + PAGE_KERNEL_RWX, enable_read_write ? 1:0);
813 +diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
814 +index f6f469fc4073..1b395b85132b 100644
815 +--- a/arch/powerpc/kernel/security.c
816 ++++ b/arch/powerpc/kernel/security.c
817 +@@ -22,7 +22,7 @@ enum count_cache_flush_type {
818 + COUNT_CACHE_FLUSH_SW = 0x2,
819 + COUNT_CACHE_FLUSH_HW = 0x4,
820 + };
821 +-static enum count_cache_flush_type count_cache_flush_type;
822 ++static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
823 +
824 + bool barrier_nospec_enabled;
825 + static bool no_nospec;
826 +diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
827 +index e6474a45cef5..6327fd79b0fb 100644
828 +--- a/arch/powerpc/kernel/signal_32.c
829 ++++ b/arch/powerpc/kernel/signal_32.c
830 +@@ -1140,11 +1140,11 @@ SYSCALL_DEFINE0(rt_sigreturn)
831 + {
832 + struct rt_sigframe __user *rt_sf;
833 + struct pt_regs *regs = current_pt_regs();
834 ++ int tm_restore = 0;
835 + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
836 + struct ucontext __user *uc_transact;
837 + unsigned long msr_hi;
838 + unsigned long tmp;
839 +- int tm_restore = 0;
840 + #endif
841 + /* Always make any pending restarted system calls return -EINTR */
842 + current->restart_block.fn = do_no_restart_syscall;
843 +@@ -1192,11 +1192,19 @@ SYSCALL_DEFINE0(rt_sigreturn)
844 + goto bad;
845 + }
846 + }
847 +- if (!tm_restore)
848 +- /* Fall through, for non-TM restore */
849 ++ if (!tm_restore) {
850 ++ /*
851 ++ * Unset regs->msr because ucontext MSR TS is not
852 ++ * set, and recheckpoint was not called. This avoid
853 ++ * hitting a TM Bad thing at RFID
854 ++ */
855 ++ regs->msr &= ~MSR_TS_MASK;
856 ++ }
857 ++ /* Fall through, for non-TM restore */
858 + #endif
859 +- if (do_setcontext(&rt_sf->uc, regs, 1))
860 +- goto bad;
861 ++ if (!tm_restore)
862 ++ if (do_setcontext(&rt_sf->uc, regs, 1))
863 ++ goto bad;
864 +
865 + /*
866 + * It's not clear whether or why it is desirable to save the
867 +diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
868 +index 83d51bf586c7..daa28cb72272 100644
869 +--- a/arch/powerpc/kernel/signal_64.c
870 ++++ b/arch/powerpc/kernel/signal_64.c
871 +@@ -740,11 +740,23 @@ SYSCALL_DEFINE0(rt_sigreturn)
872 + &uc_transact->uc_mcontext))
873 + goto badframe;
874 + }
875 +- else
876 +- /* Fall through, for non-TM restore */
877 + #endif
878 +- if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
879 +- goto badframe;
880 ++ /* Fall through, for non-TM restore */
881 ++ if (!MSR_TM_ACTIVE(msr)) {
882 ++ /*
883 ++ * Unset MSR[TS] on the thread regs since MSR from user
884 ++ * context does not have MSR active, and recheckpoint was
885 ++ * not called since restore_tm_sigcontexts() was not called
886 ++ * also.
887 ++ *
888 ++ * If not unsetting it, the code can RFID to userspace with
889 ++ * MSR[TS] set, but without CPU in the proper state,
890 ++ * causing a TM bad thing.
891 ++ */
892 ++ current->thread.regs->msr &= ~MSR_TS_MASK;
893 ++ if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
894 ++ goto badframe;
895 ++ }
896 +
897 + if (restore_altstack(&uc->uc_stack))
898 + goto badframe;
899 +diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
900 +index c615617e78ac..a18afda3d0f0 100644
901 +--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
902 ++++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
903 +@@ -743,12 +743,15 @@ void kvmppc_rmap_reset(struct kvm *kvm)
904 + srcu_idx = srcu_read_lock(&kvm->srcu);
905 + slots = kvm_memslots(kvm);
906 + kvm_for_each_memslot(memslot, slots) {
907 ++ /* Mutual exclusion with kvm_unmap_hva_range etc. */
908 ++ spin_lock(&kvm->mmu_lock);
909 + /*
910 + * This assumes it is acceptable to lose reference and
911 + * change bits across a reset.
912 + */
913 + memset(memslot->arch.rmap, 0,
914 + memslot->npages * sizeof(*memslot->arch.rmap));
915 ++ spin_unlock(&kvm->mmu_lock);
916 + }
917 + srcu_read_unlock(&kvm->srcu, srcu_idx);
918 + }
919 +diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
920 +index a56f8413758a..ab43306c4ea1 100644
921 +--- a/arch/powerpc/kvm/book3s_hv.c
922 ++++ b/arch/powerpc/kvm/book3s_hv.c
923 +@@ -4532,12 +4532,15 @@ int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
924 + {
925 + if (nesting_enabled(kvm))
926 + kvmhv_release_all_nested(kvm);
927 ++ kvmppc_rmap_reset(kvm);
928 ++ kvm->arch.process_table = 0;
929 ++ /* Mutual exclusion with kvm_unmap_hva_range etc. */
930 ++ spin_lock(&kvm->mmu_lock);
931 ++ kvm->arch.radix = 0;
932 ++ spin_unlock(&kvm->mmu_lock);
933 + kvmppc_free_radix(kvm);
934 + kvmppc_update_lpcr(kvm, LPCR_VPM1,
935 + LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
936 +- kvmppc_rmap_reset(kvm);
937 +- kvm->arch.radix = 0;
938 +- kvm->arch.process_table = 0;
939 + return 0;
940 + }
941 +
942 +@@ -4549,12 +4552,14 @@ int kvmppc_switch_mmu_to_radix(struct kvm *kvm)
943 + err = kvmppc_init_vm_radix(kvm);
944 + if (err)
945 + return err;
946 +-
947 ++ kvmppc_rmap_reset(kvm);
948 ++ /* Mutual exclusion with kvm_unmap_hva_range etc. */
949 ++ spin_lock(&kvm->mmu_lock);
950 ++ kvm->arch.radix = 1;
951 ++ spin_unlock(&kvm->mmu_lock);
952 + kvmppc_free_hpt(&kvm->arch.hpt);
953 + kvmppc_update_lpcr(kvm, LPCR_UPRT | LPCR_GTSE | LPCR_HR,
954 + LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
955 +- kvmppc_rmap_reset(kvm);
956 +- kvm->arch.radix = 1;
957 + return 0;
958 + }
959 +
960 +diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
961 +index 19b2d2a9b43d..eeb7450db18c 100644
962 +--- a/arch/s390/pci/pci_clp.c
963 ++++ b/arch/s390/pci/pci_clp.c
964 +@@ -436,7 +436,7 @@ int clp_get_state(u32 fid, enum zpci_state *state)
965 + struct clp_state_data sd = {fid, ZPCI_FN_STATE_RESERVED};
966 + int rc;
967 +
968 +- rrb = clp_alloc_block(GFP_KERNEL);
969 ++ rrb = clp_alloc_block(GFP_ATOMIC);
970 + if (!rrb)
971 + return -ENOMEM;
972 +
973 +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
974 +index fbda5a917c5b..e5c0174e330e 100644
975 +--- a/arch/x86/include/asm/kvm_host.h
976 ++++ b/arch/x86/include/asm/kvm_host.h
977 +@@ -1492,7 +1492,7 @@ asmlinkage void kvm_spurious_fault(void);
978 + "cmpb $0, kvm_rebooting \n\t" \
979 + "jne 668b \n\t" \
980 + __ASM_SIZE(push) " $666b \n\t" \
981 +- "call kvm_spurious_fault \n\t" \
982 ++ "jmp kvm_spurious_fault \n\t" \
983 + ".popsection \n\t" \
984 + _ASM_EXTABLE(666b, 667b)
985 +
986 +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
987 +index 500278f5308e..362f3cde6a31 100644
988 +--- a/arch/x86/kernel/cpu/bugs.c
989 ++++ b/arch/x86/kernel/cpu/bugs.c
990 +@@ -1002,7 +1002,8 @@ static void __init l1tf_select_mitigation(void)
991 + #endif
992 +
993 + half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
994 +- if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
995 ++ if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
996 ++ e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
997 + pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
998 + pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
999 + half_pa);
1000 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
1001 +index 8d5d984541be..95784bc4a53c 100644
1002 +--- a/arch/x86/kvm/vmx.c
1003 ++++ b/arch/x86/kvm/vmx.c
1004 +@@ -8031,13 +8031,16 @@ static __init int hardware_setup(void)
1005 +
1006 + kvm_mce_cap_supported |= MCG_LMCE_P;
1007 +
1008 +- return alloc_kvm_area();
1009 ++ r = alloc_kvm_area();
1010 ++ if (r)
1011 ++ goto out;
1012 ++ return 0;
1013 +
1014 + out:
1015 + for (i = 0; i < VMX_BITMAP_NR; i++)
1016 + free_page((unsigned long)vmx_bitmap[i]);
1017 +
1018 +- return r;
1019 ++ return r;
1020 + }
1021 +
1022 + static __exit void hardware_unsetup(void)
1023 +diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
1024 +index ef99f3892e1f..427a955a2cf2 100644
1025 +--- a/arch/x86/mm/init.c
1026 ++++ b/arch/x86/mm/init.c
1027 +@@ -931,7 +931,7 @@ unsigned long max_swapfile_size(void)
1028 +
1029 + pages = generic_max_swapfile_size();
1030 +
1031 +- if (boot_cpu_has_bug(X86_BUG_L1TF)) {
1032 ++ if (boot_cpu_has_bug(X86_BUG_L1TF) && l1tf_mitigation != L1TF_MITIGATION_OFF) {
1033 + /* Limit the swap file size to MAX_PA/2 for L1TF workaround */
1034 + unsigned long long l1tf_limit = l1tf_pfn_limit();
1035 + /*
1036 +diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
1037 +index 5fab264948c2..de95db8ac52f 100644
1038 +--- a/arch/x86/mm/init_64.c
1039 ++++ b/arch/x86/mm/init_64.c
1040 +@@ -584,7 +584,6 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
1041 + paddr_end,
1042 + page_size_mask,
1043 + prot);
1044 +- __flush_tlb_all();
1045 + continue;
1046 + }
1047 + /*
1048 +@@ -627,7 +626,6 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
1049 + pud_populate(&init_mm, pud, pmd);
1050 + spin_unlock(&init_mm.page_table_lock);
1051 + }
1052 +- __flush_tlb_all();
1053 +
1054 + update_page_count(PG_LEVEL_1G, pages);
1055 +
1056 +@@ -668,7 +666,6 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
1057 + paddr_last = phys_pud_init(pud, paddr,
1058 + paddr_end,
1059 + page_size_mask);
1060 +- __flush_tlb_all();
1061 + continue;
1062 + }
1063 +
1064 +@@ -680,7 +677,6 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
1065 + p4d_populate(&init_mm, p4d, pud);
1066 + spin_unlock(&init_mm.page_table_lock);
1067 + }
1068 +- __flush_tlb_all();
1069 +
1070 + return paddr_last;
1071 + }
1072 +@@ -733,8 +729,6 @@ kernel_physical_mapping_init(unsigned long paddr_start,
1073 + if (pgd_changed)
1074 + sync_global_pgds(vaddr_start, vaddr_end - 1);
1075 +
1076 +- __flush_tlb_all();
1077 +-
1078 + return paddr_last;
1079 + }
1080 +
1081 +diff --git a/crypto/cfb.c b/crypto/cfb.c
1082 +index 20987d0e09d8..e81e45673498 100644
1083 +--- a/crypto/cfb.c
1084 ++++ b/crypto/cfb.c
1085 +@@ -144,7 +144,7 @@ static int crypto_cfb_decrypt_segment(struct skcipher_walk *walk,
1086 +
1087 + do {
1088 + crypto_cfb_encrypt_one(tfm, iv, dst);
1089 +- crypto_xor(dst, iv, bsize);
1090 ++ crypto_xor(dst, src, bsize);
1091 + iv = src;
1092 +
1093 + src += bsize;
1094 +diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
1095 +index c20c9f5c18f2..1026173d721a 100644
1096 +--- a/crypto/tcrypt.c
1097 ++++ b/crypto/tcrypt.c
1098 +@@ -1736,6 +1736,7 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
1099 + ret += tcrypt_test("ctr(aes)");
1100 + ret += tcrypt_test("rfc3686(ctr(aes))");
1101 + ret += tcrypt_test("ofb(aes)");
1102 ++ ret += tcrypt_test("cfb(aes)");
1103 + break;
1104 +
1105 + case 11:
1106 +@@ -2060,6 +2061,10 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
1107 + speed_template_16_24_32);
1108 + test_cipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
1109 + speed_template_16_24_32);
1110 ++ test_cipher_speed("cfb(aes)", ENCRYPT, sec, NULL, 0,
1111 ++ speed_template_16_24_32);
1112 ++ test_cipher_speed("cfb(aes)", DECRYPT, sec, NULL, 0,
1113 ++ speed_template_16_24_32);
1114 + break;
1115 +
1116 + case 201:
1117 +diff --git a/crypto/testmgr.c b/crypto/testmgr.c
1118 +index b1f79c6bf409..84937ceb4bd8 100644
1119 +--- a/crypto/testmgr.c
1120 ++++ b/crypto/testmgr.c
1121 +@@ -2690,6 +2690,13 @@ static const struct alg_test_desc alg_test_descs[] = {
1122 + .dec = __VECS(aes_ccm_dec_tv_template)
1123 + }
1124 + }
1125 ++ }, {
1126 ++ .alg = "cfb(aes)",
1127 ++ .test = alg_test_skcipher,
1128 ++ .fips_allowed = 1,
1129 ++ .suite = {
1130 ++ .cipher = __VECS(aes_cfb_tv_template)
1131 ++ },
1132 + }, {
1133 + .alg = "chacha20",
1134 + .test = alg_test_skcipher,
1135 +diff --git a/crypto/testmgr.h b/crypto/testmgr.h
1136 +index 1fe7b97ba03f..b5b0d29761ce 100644
1137 +--- a/crypto/testmgr.h
1138 ++++ b/crypto/testmgr.h
1139 +@@ -11449,6 +11449,82 @@ static const struct cipher_testvec aes_cbc_tv_template[] = {
1140 + },
1141 + };
1142 +
1143 ++static const struct cipher_testvec aes_cfb_tv_template[] = {
1144 ++ { /* From NIST SP800-38A */
1145 ++ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
1146 ++ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
1147 ++ .klen = 16,
1148 ++ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
1149 ++ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
1150 ++ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
1151 ++ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
1152 ++ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
1153 ++ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
1154 ++ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
1155 ++ "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
1156 ++ "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
1157 ++ "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
1158 ++ .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20"
1159 ++ "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a"
1160 ++ "\xc8\xa6\x45\x37\xa0\xb3\xa9\x3f"
1161 ++ "\xcd\xe3\xcd\xad\x9f\x1c\xe5\x8b"
1162 ++ "\x26\x75\x1f\x67\xa3\xcb\xb1\x40"
1163 ++ "\xb1\x80\x8c\xf1\x87\xa4\xf4\xdf"
1164 ++ "\xc0\x4b\x05\x35\x7c\x5d\x1c\x0e"
1165 ++ "\xea\xc4\xc6\x6f\x9f\xf7\xf2\xe6",
1166 ++ .len = 64,
1167 ++ }, {
1168 ++ .key = "\x8e\x73\xb0\xf7\xda\x0e\x64\x52"
1169 ++ "\xc8\x10\xf3\x2b\x80\x90\x79\xe5"
1170 ++ "\x62\xf8\xea\xd2\x52\x2c\x6b\x7b",
1171 ++ .klen = 24,
1172 ++ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
1173 ++ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
1174 ++ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
1175 ++ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
1176 ++ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
1177 ++ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
1178 ++ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
1179 ++ "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
1180 ++ "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
1181 ++ "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
1182 ++ .ctext = "\xcd\xc8\x0d\x6f\xdd\xf1\x8c\xab"
1183 ++ "\x34\xc2\x59\x09\xc9\x9a\x41\x74"
1184 ++ "\x67\xce\x7f\x7f\x81\x17\x36\x21"
1185 ++ "\x96\x1a\x2b\x70\x17\x1d\x3d\x7a"
1186 ++ "\x2e\x1e\x8a\x1d\xd5\x9b\x88\xb1"
1187 ++ "\xc8\xe6\x0f\xed\x1e\xfa\xc4\xc9"
1188 ++ "\xc0\x5f\x9f\x9c\xa9\x83\x4f\xa0"
1189 ++ "\x42\xae\x8f\xba\x58\x4b\x09\xff",
1190 ++ .len = 64,
1191 ++ }, {
1192 ++ .key = "\x60\x3d\xeb\x10\x15\xca\x71\xbe"
1193 ++ "\x2b\x73\xae\xf0\x85\x7d\x77\x81"
1194 ++ "\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
1195 ++ "\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
1196 ++ .klen = 32,
1197 ++ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
1198 ++ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
1199 ++ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
1200 ++ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
1201 ++ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
1202 ++ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
1203 ++ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
1204 ++ "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
1205 ++ "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
1206 ++ "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
1207 ++ .ctext = "\xdc\x7e\x84\xbf\xda\x79\x16\x4b"
1208 ++ "\x7e\xcd\x84\x86\x98\x5d\x38\x60"
1209 ++ "\x39\xff\xed\x14\x3b\x28\xb1\xc8"
1210 ++ "\x32\x11\x3c\x63\x31\xe5\x40\x7b"
1211 ++ "\xdf\x10\x13\x24\x15\xe5\x4b\x92"
1212 ++ "\xa1\x3e\xd0\xa8\x26\x7a\xe2\xf9"
1213 ++ "\x75\xa3\x85\x74\x1a\xb9\xce\xf8"
1214 ++ "\x20\x31\x62\x3d\x55\xb1\xe4\x71",
1215 ++ .len = 64,
1216 ++ },
1217 ++};
1218 ++
1219 + static const struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = {
1220 + { /* Input data from RFC 2410 Case 1 */
1221 + #ifdef __LITTLE_ENDIAN
1222 +diff --git a/drivers/android/binder.c b/drivers/android/binder.c
1223 +index 9f1000d2a40c..b834ee335d9a 100644
1224 +--- a/drivers/android/binder.c
1225 ++++ b/drivers/android/binder.c
1226 +@@ -72,6 +72,7 @@
1227 + #include <linux/spinlock.h>
1228 + #include <linux/ratelimit.h>
1229 + #include <linux/syscalls.h>
1230 ++#include <linux/task_work.h>
1231 +
1232 + #include <uapi/linux/android/binder.h>
1233 +
1234 +@@ -2160,6 +2161,64 @@ static bool binder_validate_fixup(struct binder_buffer *b,
1235 + return (fixup_offset >= last_min_offset);
1236 + }
1237 +
1238 ++/**
1239 ++ * struct binder_task_work_cb - for deferred close
1240 ++ *
1241 ++ * @twork: callback_head for task work
1242 ++ * @fd: fd to close
1243 ++ *
1244 ++ * Structure to pass task work to be handled after
1245 ++ * returning from binder_ioctl() via task_work_add().
1246 ++ */
1247 ++struct binder_task_work_cb {
1248 ++ struct callback_head twork;
1249 ++ struct file *file;
1250 ++};
1251 ++
1252 ++/**
1253 ++ * binder_do_fd_close() - close list of file descriptors
1254 ++ * @twork: callback head for task work
1255 ++ *
1256 ++ * It is not safe to call ksys_close() during the binder_ioctl()
1257 ++ * function if there is a chance that binder's own file descriptor
1258 ++ * might be closed. This is to meet the requirements for using
1259 ++ * fdget() (see comments for __fget_light()). Therefore use
1260 ++ * task_work_add() to schedule the close operation once we have
1261 ++ * returned from binder_ioctl(). This function is a callback
1262 ++ * for that mechanism and does the actual ksys_close() on the
1263 ++ * given file descriptor.
1264 ++ */
1265 ++static void binder_do_fd_close(struct callback_head *twork)
1266 ++{
1267 ++ struct binder_task_work_cb *twcb = container_of(twork,
1268 ++ struct binder_task_work_cb, twork);
1269 ++
1270 ++ fput(twcb->file);
1271 ++ kfree(twcb);
1272 ++}
1273 ++
1274 ++/**
1275 ++ * binder_deferred_fd_close() - schedule a close for the given file-descriptor
1276 ++ * @fd: file-descriptor to close
1277 ++ *
1278 ++ * See comments in binder_do_fd_close(). This function is used to schedule
1279 ++ * a file-descriptor to be closed after returning from binder_ioctl().
1280 ++ */
1281 ++static void binder_deferred_fd_close(int fd)
1282 ++{
1283 ++ struct binder_task_work_cb *twcb;
1284 ++
1285 ++ twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
1286 ++ if (!twcb)
1287 ++ return;
1288 ++ init_task_work(&twcb->twork, binder_do_fd_close);
1289 ++ __close_fd_get_file(fd, &twcb->file);
1290 ++ if (twcb->file)
1291 ++ task_work_add(current, &twcb->twork, true);
1292 ++ else
1293 ++ kfree(twcb);
1294 ++}
1295 ++
1296 + static void binder_transaction_buffer_release(struct binder_proc *proc,
1297 + struct binder_buffer *buffer,
1298 + binder_size_t *failed_at)
1299 +@@ -2299,7 +2358,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
1300 + }
1301 + fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
1302 + for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
1303 +- ksys_close(fd_array[fd_index]);
1304 ++ binder_deferred_fd_close(fd_array[fd_index]);
1305 + } break;
1306 + default:
1307 + pr_err("transaction release %d bad object type %x\n",
1308 +@@ -3912,7 +3971,7 @@ static int binder_apply_fd_fixups(struct binder_transaction *t)
1309 + } else if (ret) {
1310 + u32 *fdp = (u32 *)(t->buffer->data + fixup->offset);
1311 +
1312 +- ksys_close(*fdp);
1313 ++ binder_deferred_fd_close(*fdp);
1314 + }
1315 + list_del(&fixup->fixup_entry);
1316 + kfree(fixup);
1317 +diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
1318 +index f39a920496fb..8da314b81eab 100644
1319 +--- a/drivers/base/platform-msi.c
1320 ++++ b/drivers/base/platform-msi.c
1321 +@@ -368,14 +368,16 @@ void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq,
1322 + unsigned int nvec)
1323 + {
1324 + struct platform_msi_priv_data *data = domain->host_data;
1325 +- struct msi_desc *desc;
1326 +- for_each_msi_entry(desc, data->dev) {
1327 ++ struct msi_desc *desc, *tmp;
1328 ++ for_each_msi_entry_safe(desc, tmp, data->dev) {
1329 + if (WARN_ON(!desc->irq || desc->nvec_used != 1))
1330 + return;
1331 + if (!(desc->irq >= virq && desc->irq < (virq + nvec)))
1332 + continue;
1333 +
1334 + irq_domain_free_irqs_common(domain, desc->irq, 1);
1335 ++ list_del(&desc->list);
1336 ++ free_msi_entry(desc);
1337 + }
1338 + }
1339 +
1340 +diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
1341 +index 129f640424b7..95db630dd722 100644
1342 +--- a/drivers/char/tpm/tpm-interface.c
1343 ++++ b/drivers/char/tpm/tpm-interface.c
1344 +@@ -477,13 +477,15 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip,
1345 +
1346 + if (need_locality) {
1347 + rc = tpm_request_locality(chip, flags);
1348 +- if (rc < 0)
1349 +- goto out_no_locality;
1350 ++ if (rc < 0) {
1351 ++ need_locality = false;
1352 ++ goto out_locality;
1353 ++ }
1354 + }
1355 +
1356 + rc = tpm_cmd_ready(chip, flags);
1357 + if (rc)
1358 +- goto out;
1359 ++ goto out_locality;
1360 +
1361 + rc = tpm2_prepare_space(chip, space, ordinal, buf);
1362 + if (rc)
1363 +@@ -547,14 +549,13 @@ out_recv:
1364 + dev_err(&chip->dev, "tpm2_commit_space: error %d\n", rc);
1365 +
1366 + out:
1367 +- rc = tpm_go_idle(chip, flags);
1368 +- if (rc)
1369 +- goto out;
1370 ++ /* may fail but do not override previous error value in rc */
1371 ++ tpm_go_idle(chip, flags);
1372 +
1373 ++out_locality:
1374 + if (need_locality)
1375 + tpm_relinquish_locality(chip, flags);
1376 +
1377 +-out_no_locality:
1378 + if (chip->ops->clk_enable != NULL)
1379 + chip->ops->clk_enable(chip, false);
1380 +
1381 +diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
1382 +index caa86b19c76d..f74f451baf6a 100644
1383 +--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
1384 ++++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
1385 +@@ -369,6 +369,7 @@ static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len)
1386 + struct device *dev = chip->dev.parent;
1387 + struct i2c_client *client = to_i2c_client(dev);
1388 + u32 ordinal;
1389 ++ unsigned long duration;
1390 + size_t count = 0;
1391 + int burst_count, bytes2write, retries, rc = -EIO;
1392 +
1393 +@@ -455,10 +456,12 @@ static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len)
1394 + return rc;
1395 + }
1396 + ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
1397 +- rc = i2c_nuvoton_wait_for_data_avail(chip,
1398 +- tpm_calc_ordinal_duration(chip,
1399 +- ordinal),
1400 +- &priv->read_queue);
1401 ++ if (chip->flags & TPM_CHIP_FLAG_TPM2)
1402 ++ duration = tpm2_calc_ordinal_duration(chip, ordinal);
1403 ++ else
1404 ++ duration = tpm_calc_ordinal_duration(chip, ordinal);
1405 ++
1406 ++ rc = i2c_nuvoton_wait_for_data_avail(chip, duration, &priv->read_queue);
1407 + if (rc) {
1408 + dev_err(dev, "%s() timeout command duration\n", __func__);
1409 + i2c_nuvoton_ready(chip);
1410 +diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
1411 +index fa25e35ce7d5..08b42b053fce 100644
1412 +--- a/drivers/clk/rockchip/clk-rk3188.c
1413 ++++ b/drivers/clk/rockchip/clk-rk3188.c
1414 +@@ -382,7 +382,7 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
1415 + COMPOSITE_NOMUX(0, "spdif_pre", "i2s_src", 0,
1416 + RK2928_CLKSEL_CON(5), 0, 7, DFLAGS,
1417 + RK2928_CLKGATE_CON(0), 13, GFLAGS),
1418 +- COMPOSITE_FRACMUX(0, "spdif_frac", "spdif_pll", CLK_SET_RATE_PARENT,
1419 ++ COMPOSITE_FRACMUX(0, "spdif_frac", "spdif_pre", CLK_SET_RATE_PARENT,
1420 + RK2928_CLKSEL_CON(9), 0,
1421 + RK2928_CLKGATE_CON(0), 14, GFLAGS,
1422 + &common_spdif_fracmux),
1423 +diff --git a/drivers/clk/sunxi-ng/ccu_nm.c b/drivers/clk/sunxi-ng/ccu_nm.c
1424 +index 6fe3c14f7b2d..424d8635b053 100644
1425 +--- a/drivers/clk/sunxi-ng/ccu_nm.c
1426 ++++ b/drivers/clk/sunxi-ng/ccu_nm.c
1427 +@@ -19,6 +19,17 @@ struct _ccu_nm {
1428 + unsigned long m, min_m, max_m;
1429 + };
1430 +
1431 ++static unsigned long ccu_nm_calc_rate(unsigned long parent,
1432 ++ unsigned long n, unsigned long m)
1433 ++{
1434 ++ u64 rate = parent;
1435 ++
1436 ++ rate *= n;
1437 ++ do_div(rate, m);
1438 ++
1439 ++ return rate;
1440 ++}
1441 ++
1442 + static void ccu_nm_find_best(unsigned long parent, unsigned long rate,
1443 + struct _ccu_nm *nm)
1444 + {
1445 +@@ -28,7 +39,8 @@ static void ccu_nm_find_best(unsigned long parent, unsigned long rate,
1446 +
1447 + for (_n = nm->min_n; _n <= nm->max_n; _n++) {
1448 + for (_m = nm->min_m; _m <= nm->max_m; _m++) {
1449 +- unsigned long tmp_rate = parent * _n / _m;
1450 ++ unsigned long tmp_rate = ccu_nm_calc_rate(parent,
1451 ++ _n, _m);
1452 +
1453 + if (tmp_rate > rate)
1454 + continue;
1455 +@@ -100,7 +112,7 @@ static unsigned long ccu_nm_recalc_rate(struct clk_hw *hw,
1456 + if (ccu_sdm_helper_is_enabled(&nm->common, &nm->sdm))
1457 + rate = ccu_sdm_helper_read_rate(&nm->common, &nm->sdm, m, n);
1458 + else
1459 +- rate = parent_rate * n / m;
1460 ++ rate = ccu_nm_calc_rate(parent_rate, n, m);
1461 +
1462 + if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
1463 + rate /= nm->fixed_post_div;
1464 +@@ -149,7 +161,7 @@ static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate,
1465 + _nm.max_m = nm->m.max ?: 1 << nm->m.width;
1466 +
1467 + ccu_nm_find_best(*parent_rate, rate, &_nm);
1468 +- rate = *parent_rate * _nm.n / _nm.m;
1469 ++ rate = ccu_nm_calc_rate(*parent_rate, _nm.n, _nm.m);
1470 +
1471 + if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
1472 + rate /= nm->fixed_post_div;
1473 +diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
1474 +index 55c77e44bb2d..d9c8a779dd7d 100644
1475 +--- a/drivers/clocksource/Kconfig
1476 ++++ b/drivers/clocksource/Kconfig
1477 +@@ -290,6 +290,7 @@ config CLKSRC_MPS2
1478 +
1479 + config ARC_TIMERS
1480 + bool "Support for 32-bit TIMERn counters in ARC Cores" if COMPILE_TEST
1481 ++ depends on GENERIC_SCHED_CLOCK
1482 + select TIMER_OF
1483 + help
1484 + These are legacy 32-bit TIMER0 and TIMER1 counters found on all ARC cores
1485 +diff --git a/drivers/clocksource/arc_timer.c b/drivers/clocksource/arc_timer.c
1486 +index 20da9b1d7f7d..b28970ca4a7a 100644
1487 +--- a/drivers/clocksource/arc_timer.c
1488 ++++ b/drivers/clocksource/arc_timer.c
1489 +@@ -23,6 +23,7 @@
1490 + #include <linux/cpu.h>
1491 + #include <linux/of.h>
1492 + #include <linux/of_irq.h>
1493 ++#include <linux/sched_clock.h>
1494 +
1495 + #include <soc/arc/timers.h>
1496 + #include <soc/arc/mcip.h>
1497 +@@ -88,6 +89,11 @@ static u64 arc_read_gfrc(struct clocksource *cs)
1498 + return (((u64)h) << 32) | l;
1499 + }
1500 +
1501 ++static notrace u64 arc_gfrc_clock_read(void)
1502 ++{
1503 ++ return arc_read_gfrc(NULL);
1504 ++}
1505 ++
1506 + static struct clocksource arc_counter_gfrc = {
1507 + .name = "ARConnect GFRC",
1508 + .rating = 400,
1509 +@@ -111,6 +117,8 @@ static int __init arc_cs_setup_gfrc(struct device_node *node)
1510 + if (ret)
1511 + return ret;
1512 +
1513 ++ sched_clock_register(arc_gfrc_clock_read, 64, arc_timer_freq);
1514 ++
1515 + return clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq);
1516 + }
1517 + TIMER_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc", arc_cs_setup_gfrc);
1518 +@@ -139,6 +147,11 @@ static u64 arc_read_rtc(struct clocksource *cs)
1519 + return (((u64)h) << 32) | l;
1520 + }
1521 +
1522 ++static notrace u64 arc_rtc_clock_read(void)
1523 ++{
1524 ++ return arc_read_rtc(NULL);
1525 ++}
1526 ++
1527 + static struct clocksource arc_counter_rtc = {
1528 + .name = "ARCv2 RTC",
1529 + .rating = 350,
1530 +@@ -170,6 +183,8 @@ static int __init arc_cs_setup_rtc(struct device_node *node)
1531 +
1532 + write_aux_reg(AUX_RTC_CTRL, 1);
1533 +
1534 ++ sched_clock_register(arc_rtc_clock_read, 64, arc_timer_freq);
1535 ++
1536 + return clocksource_register_hz(&arc_counter_rtc, arc_timer_freq);
1537 + }
1538 + TIMER_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc", arc_cs_setup_rtc);
1539 +@@ -185,6 +200,11 @@ static u64 arc_read_timer1(struct clocksource *cs)
1540 + return (u64) read_aux_reg(ARC_REG_TIMER1_CNT);
1541 + }
1542 +
1543 ++static notrace u64 arc_timer1_clock_read(void)
1544 ++{
1545 ++ return arc_read_timer1(NULL);
1546 ++}
1547 ++
1548 + static struct clocksource arc_counter_timer1 = {
1549 + .name = "ARC Timer1",
1550 + .rating = 300,
1551 +@@ -209,6 +229,8 @@ static int __init arc_cs_setup_timer1(struct device_node *node)
1552 + write_aux_reg(ARC_REG_TIMER1_CNT, 0);
1553 + write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);
1554 +
1555 ++ sched_clock_register(arc_timer1_clock_read, 32, arc_timer_freq);
1556 ++
1557 + return clocksource_register_hz(&arc_counter_timer1, arc_timer_freq);
1558 + }
1559 +
1560 +diff --git a/drivers/crypto/cavium/nitrox/nitrox_algs.c b/drivers/crypto/cavium/nitrox/nitrox_algs.c
1561 +index 2ae6124e5da6..5d54ebc20cb3 100644
1562 +--- a/drivers/crypto/cavium/nitrox/nitrox_algs.c
1563 ++++ b/drivers/crypto/cavium/nitrox/nitrox_algs.c
1564 +@@ -73,7 +73,7 @@ static int flexi_aes_keylen(int keylen)
1565 + static int nitrox_skcipher_init(struct crypto_skcipher *tfm)
1566 + {
1567 + struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm);
1568 +- void *fctx;
1569 ++ struct crypto_ctx_hdr *chdr;
1570 +
1571 + /* get the first device */
1572 + nctx->ndev = nitrox_get_first_device();
1573 +@@ -81,12 +81,14 @@ static int nitrox_skcipher_init(struct crypto_skcipher *tfm)
1574 + return -ENODEV;
1575 +
1576 + /* allocate nitrox crypto context */
1577 +- fctx = crypto_alloc_context(nctx->ndev);
1578 +- if (!fctx) {
1579 ++ chdr = crypto_alloc_context(nctx->ndev);
1580 ++ if (!chdr) {
1581 + nitrox_put_device(nctx->ndev);
1582 + return -ENOMEM;
1583 + }
1584 +- nctx->u.ctx_handle = (uintptr_t)fctx;
1585 ++ nctx->chdr = chdr;
1586 ++ nctx->u.ctx_handle = (uintptr_t)((u8 *)chdr->vaddr +
1587 ++ sizeof(struct ctx_hdr));
1588 + crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(tfm) +
1589 + sizeof(struct nitrox_kcrypt_request));
1590 + return 0;
1591 +@@ -102,7 +104,7 @@ static void nitrox_skcipher_exit(struct crypto_skcipher *tfm)
1592 +
1593 + memset(&fctx->crypto, 0, sizeof(struct crypto_keys));
1594 + memset(&fctx->auth, 0, sizeof(struct auth_keys));
1595 +- crypto_free_context((void *)fctx);
1596 ++ crypto_free_context((void *)nctx->chdr);
1597 + }
1598 + nitrox_put_device(nctx->ndev);
1599 +
1600 +diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c
1601 +index 2260efa42308..9138bae12521 100644
1602 +--- a/drivers/crypto/cavium/nitrox/nitrox_lib.c
1603 ++++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c
1604 +@@ -158,12 +158,19 @@ static void destroy_crypto_dma_pool(struct nitrox_device *ndev)
1605 + void *crypto_alloc_context(struct nitrox_device *ndev)
1606 + {
1607 + struct ctx_hdr *ctx;
1608 ++ struct crypto_ctx_hdr *chdr;
1609 + void *vaddr;
1610 + dma_addr_t dma;
1611 +
1612 ++ chdr = kmalloc(sizeof(*chdr), GFP_KERNEL);
1613 ++ if (!chdr)
1614 ++ return NULL;
1615 ++
1616 + vaddr = dma_pool_zalloc(ndev->ctx_pool, GFP_KERNEL, &dma);
1617 +- if (!vaddr)
1618 ++ if (!vaddr) {
1619 ++ kfree(chdr);
1620 + return NULL;
1621 ++ }
1622 +
1623 + /* fill meta data */
1624 + ctx = vaddr;
1625 +@@ -171,7 +178,11 @@ void *crypto_alloc_context(struct nitrox_device *ndev)
1626 + ctx->dma = dma;
1627 + ctx->ctx_dma = dma + sizeof(struct ctx_hdr);
1628 +
1629 +- return ((u8 *)vaddr + sizeof(struct ctx_hdr));
1630 ++ chdr->pool = ndev->ctx_pool;
1631 ++ chdr->dma = dma;
1632 ++ chdr->vaddr = vaddr;
1633 ++
1634 ++ return chdr;
1635 + }
1636 +
1637 + /**
1638 +@@ -180,13 +191,14 @@ void *crypto_alloc_context(struct nitrox_device *ndev)
1639 + */
1640 + void crypto_free_context(void *ctx)
1641 + {
1642 +- struct ctx_hdr *ctxp;
1643 ++ struct crypto_ctx_hdr *ctxp;
1644 +
1645 + if (!ctx)
1646 + return;
1647 +
1648 +- ctxp = (struct ctx_hdr *)((u8 *)ctx - sizeof(struct ctx_hdr));
1649 +- dma_pool_free(ctxp->pool, ctxp, ctxp->dma);
1650 ++ ctxp = ctx;
1651 ++ dma_pool_free(ctxp->pool, ctxp->vaddr, ctxp->dma);
1652 ++ kfree(ctxp);
1653 + }
1654 +
1655 + /**
1656 +diff --git a/drivers/crypto/cavium/nitrox/nitrox_req.h b/drivers/crypto/cavium/nitrox/nitrox_req.h
1657 +index d091b6f5f5dd..19f0a20e3bb3 100644
1658 +--- a/drivers/crypto/cavium/nitrox/nitrox_req.h
1659 ++++ b/drivers/crypto/cavium/nitrox/nitrox_req.h
1660 +@@ -181,12 +181,19 @@ struct flexi_crypto_context {
1661 + struct auth_keys auth;
1662 + };
1663 +
1664 ++struct crypto_ctx_hdr {
1665 ++ struct dma_pool *pool;
1666 ++ dma_addr_t dma;
1667 ++ void *vaddr;
1668 ++};
1669 ++
1670 + struct nitrox_crypto_ctx {
1671 + struct nitrox_device *ndev;
1672 + union {
1673 + u64 ctx_handle;
1674 + struct flexi_crypto_context *fctx;
1675 + } u;
1676 ++ struct crypto_ctx_hdr *chdr;
1677 + };
1678 +
1679 + struct nitrox_kcrypt_request {
1680 +diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c
1681 +index 461b97e2f1fd..1ff8738631a3 100644
1682 +--- a/drivers/crypto/chelsio/chcr_ipsec.c
1683 ++++ b/drivers/crypto/chelsio/chcr_ipsec.c
1684 +@@ -303,7 +303,10 @@ static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
1685 +
1686 + static inline int is_eth_imm(const struct sk_buff *skb, unsigned int kctx_len)
1687 + {
1688 +- int hdrlen = sizeof(struct chcr_ipsec_req) + kctx_len;
1689 ++ int hdrlen;
1690 ++
1691 ++ hdrlen = sizeof(struct fw_ulptx_wr) +
1692 ++ sizeof(struct chcr_ipsec_req) + kctx_len;
1693 +
1694 + hdrlen += sizeof(struct cpl_tx_pkt);
1695 + if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
1696 +diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
1697 +index 1aaccbe7e1de..c45711fd78e9 100644
1698 +--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
1699 ++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
1700 +@@ -1605,6 +1605,7 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb)
1701 + (char __user *)urelocs + copied,
1702 + len)) {
1703 + end_user:
1704 ++ user_access_end();
1705 + kvfree(relocs);
1706 + err = -EFAULT;
1707 + goto err;
1708 +diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
1709 +index f455f095a146..1b014d92855b 100644
1710 +--- a/drivers/gpu/drm/udl/udl_main.c
1711 ++++ b/drivers/gpu/drm/udl/udl_main.c
1712 +@@ -350,15 +350,10 @@ int udl_driver_load(struct drm_device *dev, unsigned long flags)
1713 + if (ret)
1714 + goto err;
1715 +
1716 +- ret = drm_vblank_init(dev, 1);
1717 +- if (ret)
1718 +- goto err_fb;
1719 +-
1720 + drm_kms_helper_poll_init(dev);
1721 +
1722 + return 0;
1723 +-err_fb:
1724 +- udl_fbdev_cleanup(dev);
1725 ++
1726 + err:
1727 + if (udl->urbs.count)
1728 + udl_free_urb_list(dev);
1729 +diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
1730 +index 4db62c545748..26470c77eb6e 100644
1731 +--- a/drivers/gpu/drm/v3d/v3d_debugfs.c
1732 ++++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
1733 +@@ -71,10 +71,13 @@ static int v3d_v3d_debugfs_regs(struct seq_file *m, void *unused)
1734 + V3D_READ(v3d_hub_reg_defs[i].reg));
1735 + }
1736 +
1737 +- for (i = 0; i < ARRAY_SIZE(v3d_gca_reg_defs); i++) {
1738 +- seq_printf(m, "%s (0x%04x): 0x%08x\n",
1739 +- v3d_gca_reg_defs[i].name, v3d_gca_reg_defs[i].reg,
1740 +- V3D_GCA_READ(v3d_gca_reg_defs[i].reg));
1741 ++ if (v3d->ver < 41) {
1742 ++ for (i = 0; i < ARRAY_SIZE(v3d_gca_reg_defs); i++) {
1743 ++ seq_printf(m, "%s (0x%04x): 0x%08x\n",
1744 ++ v3d_gca_reg_defs[i].name,
1745 ++ v3d_gca_reg_defs[i].reg,
1746 ++ V3D_GCA_READ(v3d_gca_reg_defs[i].reg));
1747 ++ }
1748 + }
1749 +
1750 + for (core = 0; core < v3d->cores; core++) {
1751 +diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
1752 +index a365089a9305..b7257d7dd925 100644
1753 +--- a/drivers/infiniband/hw/hfi1/verbs.c
1754 ++++ b/drivers/infiniband/hw/hfi1/verbs.c
1755 +@@ -919,6 +919,8 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
1756 +
1757 + if (slen > len)
1758 + slen = len;
1759 ++ if (slen > ss->sge.sge_length)
1760 ++ slen = ss->sge.sge_length;
1761 + rvt_update_sge(ss, slen, false);
1762 + seg_pio_copy_mid(pbuf, addr, slen);
1763 + len -= slen;
1764 +diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
1765 +index a94b6494e71a..f322a1768fbb 100644
1766 +--- a/drivers/input/mouse/elan_i2c_core.c
1767 ++++ b/drivers/input/mouse/elan_i2c_core.c
1768 +@@ -1336,6 +1336,7 @@ MODULE_DEVICE_TABLE(i2c, elan_id);
1769 + static const struct acpi_device_id elan_acpi_id[] = {
1770 + { "ELAN0000", 0 },
1771 + { "ELAN0100", 0 },
1772 ++ { "ELAN0501", 0 },
1773 + { "ELAN0600", 0 },
1774 + { "ELAN0602", 0 },
1775 + { "ELAN0605", 0 },
1776 +diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
1777 +index d3aacd534e9c..5c63d25ce84e 100644
1778 +--- a/drivers/input/touchscreen/atmel_mxt_ts.c
1779 ++++ b/drivers/input/touchscreen/atmel_mxt_ts.c
1780 +@@ -1585,10 +1585,10 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *fw)
1781 + /* T7 config may have changed */
1782 + mxt_init_t7_power_cfg(data);
1783 +
1784 +-release_raw:
1785 +- kfree(cfg.raw);
1786 + release_mem:
1787 + kfree(cfg.mem);
1788 ++release_raw:
1789 ++ kfree(cfg.raw);
1790 + return ret;
1791 + }
1792 +
1793 +diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
1794 +index 6947ccf26512..71eda422c926 100644
1795 +--- a/drivers/iommu/arm-smmu-v3.c
1796 ++++ b/drivers/iommu/arm-smmu-v3.c
1797 +@@ -828,7 +828,13 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
1798 + cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_SEV);
1799 + cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSH, ARM_SMMU_SH_ISH);
1800 + cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIATTR, ARM_SMMU_MEMATTR_OIWB);
1801 +- cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIDATA, ent->sync.msidata);
1802 ++ /*
1803 ++ * Commands are written little-endian, but we want the SMMU to
1804 ++ * receive MSIData, and thus write it back to memory, in CPU
1805 ++ * byte order, so big-endian needs an extra byteswap here.
1806 ++ */
1807 ++ cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIDATA,
1808 ++ cpu_to_le32(ent->sync.msidata));
1809 + cmd[1] |= ent->sync.msiaddr & CMDQ_SYNC_1_MSIADDR_MASK;
1810 + break;
1811 + default:
1812 +diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
1813 +index 0ff517d3c98f..a4ceb61c5b60 100644
1814 +--- a/drivers/isdn/capi/kcapi.c
1815 ++++ b/drivers/isdn/capi/kcapi.c
1816 +@@ -852,7 +852,7 @@ u16 capi20_get_manufacturer(u32 contr, u8 *buf)
1817 + u16 ret;
1818 +
1819 + if (contr == 0) {
1820 +- strlcpy(buf, capi_manufakturer, CAPI_MANUFACTURER_LEN);
1821 ++ strncpy(buf, capi_manufakturer, CAPI_MANUFACTURER_LEN);
1822 + return CAPI_NOERROR;
1823 + }
1824 +
1825 +@@ -860,7 +860,7 @@ u16 capi20_get_manufacturer(u32 contr, u8 *buf)
1826 +
1827 + ctr = get_capi_ctr_by_nr(contr);
1828 + if (ctr && ctr->state == CAPI_CTR_RUNNING) {
1829 +- strlcpy(buf, ctr->manu, CAPI_MANUFACTURER_LEN);
1830 ++ strncpy(buf, ctr->manu, CAPI_MANUFACTURER_LEN);
1831 + ret = CAPI_NOERROR;
1832 + } else
1833 + ret = CAPI_REGNOTINSTALLED;
1834 +diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
1835 +index 65a933a21e68..9a5334b726d6 100644
1836 +--- a/drivers/media/cec/cec-adap.c
1837 ++++ b/drivers/media/cec/cec-adap.c
1838 +@@ -455,7 +455,7 @@ int cec_thread_func(void *_adap)
1839 + (adap->needs_hpd &&
1840 + (!adap->is_configured && !adap->is_configuring)) ||
1841 + kthread_should_stop() ||
1842 +- (!adap->transmitting &&
1843 ++ (!adap->transmit_in_progress &&
1844 + !list_empty(&adap->transmit_queue)),
1845 + msecs_to_jiffies(CEC_XFER_TIMEOUT_MS));
1846 + timeout = err == 0;
1847 +@@ -463,7 +463,7 @@ int cec_thread_func(void *_adap)
1848 + /* Otherwise we just wait for something to happen. */
1849 + wait_event_interruptible(adap->kthread_waitq,
1850 + kthread_should_stop() ||
1851 +- (!adap->transmitting &&
1852 ++ (!adap->transmit_in_progress &&
1853 + !list_empty(&adap->transmit_queue)));
1854 + }
1855 +
1856 +@@ -488,6 +488,7 @@ int cec_thread_func(void *_adap)
1857 + pr_warn("cec-%s: message %*ph timed out\n", adap->name,
1858 + adap->transmitting->msg.len,
1859 + adap->transmitting->msg.msg);
1860 ++ adap->transmit_in_progress = false;
1861 + adap->tx_timeouts++;
1862 + /* Just give up on this. */
1863 + cec_data_cancel(adap->transmitting,
1864 +@@ -499,7 +500,7 @@ int cec_thread_func(void *_adap)
1865 + * If we are still transmitting, or there is nothing new to
1866 + * transmit, then just continue waiting.
1867 + */
1868 +- if (adap->transmitting || list_empty(&adap->transmit_queue))
1869 ++ if (adap->transmit_in_progress || list_empty(&adap->transmit_queue))
1870 + goto unlock;
1871 +
1872 + /* Get a new message to transmit */
1873 +@@ -545,6 +546,8 @@ int cec_thread_func(void *_adap)
1874 + if (adap->ops->adap_transmit(adap, data->attempts,
1875 + signal_free_time, &data->msg))
1876 + cec_data_cancel(data, CEC_TX_STATUS_ABORTED);
1877 ++ else
1878 ++ adap->transmit_in_progress = true;
1879 +
1880 + unlock:
1881 + mutex_unlock(&adap->lock);
1882 +@@ -575,14 +578,17 @@ void cec_transmit_done_ts(struct cec_adapter *adap, u8 status,
1883 + data = adap->transmitting;
1884 + if (!data) {
1885 + /*
1886 +- * This can happen if a transmit was issued and the cable is
1887 ++ * This might happen if a transmit was issued and the cable is
1888 + * unplugged while the transmit is ongoing. Ignore this
1889 + * transmit in that case.
1890 + */
1891 +- dprintk(1, "%s was called without an ongoing transmit!\n",
1892 +- __func__);
1893 +- goto unlock;
1894 ++ if (!adap->transmit_in_progress)
1895 ++ dprintk(1, "%s was called without an ongoing transmit!\n",
1896 ++ __func__);
1897 ++ adap->transmit_in_progress = false;
1898 ++ goto wake_thread;
1899 + }
1900 ++ adap->transmit_in_progress = false;
1901 +
1902 + msg = &data->msg;
1903 +
1904 +@@ -648,7 +654,6 @@ wake_thread:
1905 + * for transmitting or to retry the current message.
1906 + */
1907 + wake_up_interruptible(&adap->kthread_waitq);
1908 +-unlock:
1909 + mutex_unlock(&adap->lock);
1910 + }
1911 + EXPORT_SYMBOL_GPL(cec_transmit_done_ts);
1912 +@@ -1496,8 +1501,11 @@ void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block)
1913 + if (adap->monitor_all_cnt)
1914 + WARN_ON(call_op(adap, adap_monitor_all_enable, false));
1915 + mutex_lock(&adap->devnode.lock);
1916 +- if (adap->needs_hpd || list_empty(&adap->devnode.fhs))
1917 ++ if (adap->needs_hpd || list_empty(&adap->devnode.fhs)) {
1918 + WARN_ON(adap->ops->adap_enable(adap, false));
1919 ++ adap->transmit_in_progress = false;
1920 ++ wake_up_interruptible(&adap->kthread_waitq);
1921 ++ }
1922 + mutex_unlock(&adap->devnode.lock);
1923 + if (phys_addr == CEC_PHYS_ADDR_INVALID)
1924 + return;
1925 +@@ -1505,6 +1513,7 @@ void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block)
1926 +
1927 + mutex_lock(&adap->devnode.lock);
1928 + adap->last_initiator = 0xff;
1929 ++ adap->transmit_in_progress = false;
1930 +
1931 + if ((adap->needs_hpd || list_empty(&adap->devnode.fhs)) &&
1932 + adap->ops->adap_enable(adap, true)) {
1933 +diff --git a/drivers/media/cec/cec-pin.c b/drivers/media/cec/cec-pin.c
1934 +index 635db8e70ead..8f987bc0dd88 100644
1935 +--- a/drivers/media/cec/cec-pin.c
1936 ++++ b/drivers/media/cec/cec-pin.c
1937 +@@ -601,8 +601,9 @@ static void cec_pin_tx_states(struct cec_pin *pin, ktime_t ts)
1938 + break;
1939 + /* Was the message ACKed? */
1940 + ack = cec_msg_is_broadcast(&pin->tx_msg) ? v : !v;
1941 +- if (!ack && !pin->tx_ignore_nack_until_eom &&
1942 +- pin->tx_bit / 10 < pin->tx_msg.len && !pin->tx_post_eom) {
1943 ++ if (!ack && (!pin->tx_ignore_nack_until_eom ||
1944 ++ pin->tx_bit / 10 == pin->tx_msg.len - 1) &&
1945 ++ !pin->tx_post_eom) {
1946 + /*
1947 + * Note: the CEC spec is ambiguous regarding
1948 + * what action to take when a NACK appears
1949 +diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
1950 +index fa483b95bc5a..d9a590ae7545 100644
1951 +--- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
1952 ++++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
1953 +@@ -1769,7 +1769,7 @@ typedef struct { u16 __; u8 _; } __packed x24;
1954 + unsigned s; \
1955 + \
1956 + for (s = 0; s < len; s++) { \
1957 +- u8 chr = font8x16[text[s] * 16 + line]; \
1958 ++ u8 chr = font8x16[(u8)text[s] * 16 + line]; \
1959 + \
1960 + if (hdiv == 2 && tpg->hflip) { \
1961 + pos[3] = (chr & (0x01 << 6) ? fg : bg); \
1962 +diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
1963 +index 8ff8722cb6b1..99f736c81286 100644
1964 +--- a/drivers/media/common/videobuf2/videobuf2-core.c
1965 ++++ b/drivers/media/common/videobuf2/videobuf2-core.c
1966 +@@ -812,6 +812,9 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
1967 + memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
1968 + q->memory = memory;
1969 + q->waiting_for_buffers = !q->is_output;
1970 ++ } else if (q->memory != memory) {
1971 ++ dprintk(1, "memory model mismatch\n");
1972 ++ return -EINVAL;
1973 + }
1974 +
1975 + num_buffers = min(*count, VB2_MAX_FRAME - q->num_buffers);
1976 +diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c
1977 +index 11c69281692e..95a0e7d9851a 100644
1978 +--- a/drivers/media/i2c/imx274.c
1979 ++++ b/drivers/media/i2c/imx274.c
1980 +@@ -619,16 +619,19 @@ static int imx274_write_table(struct stimx274 *priv, const struct reg_8 table[])
1981 +
1982 + static inline int imx274_read_reg(struct stimx274 *priv, u16 addr, u8 *val)
1983 + {
1984 ++ unsigned int uint_val;
1985 + int err;
1986 +
1987 +- err = regmap_read(priv->regmap, addr, (unsigned int *)val);
1988 ++ err = regmap_read(priv->regmap, addr, &uint_val);
1989 + if (err)
1990 + dev_err(&priv->client->dev,
1991 + "%s : i2c read failed, addr = %x\n", __func__, addr);
1992 + else
1993 + dev_dbg(&priv->client->dev,
1994 + "%s : addr 0x%x, val=0x%x\n", __func__,
1995 +- addr, *val);
1996 ++ addr, uint_val);
1997 ++
1998 ++ *val = uint_val;
1999 + return err;
2000 + }
2001 +
2002 +diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
2003 +index eaefdb58653b..703d29abb363 100644
2004 +--- a/drivers/media/i2c/ov5640.c
2005 ++++ b/drivers/media/i2c/ov5640.c
2006 +@@ -2020,6 +2020,7 @@ static int ov5640_set_fmt(struct v4l2_subdev *sd,
2007 + struct ov5640_dev *sensor = to_ov5640_dev(sd);
2008 + const struct ov5640_mode_info *new_mode;
2009 + struct v4l2_mbus_framefmt *mbus_fmt = &format->format;
2010 ++ struct v4l2_mbus_framefmt *fmt;
2011 + int ret;
2012 +
2013 + if (format->pad != 0)
2014 +@@ -2037,22 +2038,20 @@ static int ov5640_set_fmt(struct v4l2_subdev *sd,
2015 + if (ret)
2016 + goto out;
2017 +
2018 +- if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
2019 +- struct v4l2_mbus_framefmt *fmt =
2020 +- v4l2_subdev_get_try_format(sd, cfg, 0);
2021 ++ if (format->which == V4L2_SUBDEV_FORMAT_TRY)
2022 ++ fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
2023 ++ else
2024 ++ fmt = &sensor->fmt;
2025 +
2026 +- *fmt = *mbus_fmt;
2027 +- goto out;
2028 +- }
2029 ++ *fmt = *mbus_fmt;
2030 +
2031 + if (new_mode != sensor->current_mode) {
2032 + sensor->current_mode = new_mode;
2033 + sensor->pending_mode_change = true;
2034 + }
2035 +- if (mbus_fmt->code != sensor->fmt.code) {
2036 +- sensor->fmt = *mbus_fmt;
2037 ++ if (mbus_fmt->code != sensor->fmt.code)
2038 + sensor->pending_fmt_change = true;
2039 +- }
2040 ++
2041 + out:
2042 + mutex_unlock(&sensor->lock);
2043 + return ret;
2044 +diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
2045 +index d82db738f174..f938a2c54314 100644
2046 +--- a/drivers/media/platform/vim2m.c
2047 ++++ b/drivers/media/platform/vim2m.c
2048 +@@ -805,10 +805,11 @@ static int vim2m_start_streaming(struct vb2_queue *q, unsigned count)
2049 + static void vim2m_stop_streaming(struct vb2_queue *q)
2050 + {
2051 + struct vim2m_ctx *ctx = vb2_get_drv_priv(q);
2052 ++ struct vim2m_dev *dev = ctx->dev;
2053 + struct vb2_v4l2_buffer *vbuf;
2054 + unsigned long flags;
2055 +
2056 +- flush_scheduled_work();
2057 ++ cancel_delayed_work_sync(&dev->work_run);
2058 + for (;;) {
2059 + if (V4L2_TYPE_IS_OUTPUT(q->type))
2060 + vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
2061 +diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
2062 +index 673772cd17d6..a88637a42f44 100644
2063 +--- a/drivers/media/platform/vivid/vivid-vid-cap.c
2064 ++++ b/drivers/media/platform/vivid/vivid-vid-cap.c
2065 +@@ -449,6 +449,8 @@ void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls)
2066 + tpg_s_rgb_range(&dev->tpg, v4l2_ctrl_g_ctrl(dev->rgb_range_cap));
2067 + break;
2068 + }
2069 ++ vfree(dev->bitmap_cap);
2070 ++ dev->bitmap_cap = NULL;
2071 + vivid_update_quality(dev);
2072 + tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap);
2073 + dev->crop_cap = dev->src_rect;
2074 +diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
2075 +index 552bbe82a160..877978dbd409 100644
2076 +--- a/drivers/media/rc/rc-main.c
2077 ++++ b/drivers/media/rc/rc-main.c
2078 +@@ -695,7 +695,8 @@ void rc_repeat(struct rc_dev *dev)
2079 + (dev->last_toggle ? LIRC_SCANCODE_FLAG_TOGGLE : 0)
2080 + };
2081 +
2082 +- ir_lirc_scancode_event(dev, &sc);
2083 ++ if (dev->allowed_protocols != RC_PROTO_BIT_CEC)
2084 ++ ir_lirc_scancode_event(dev, &sc);
2085 +
2086 + spin_lock_irqsave(&dev->keylock, flags);
2087 +
2088 +@@ -735,7 +736,8 @@ static void ir_do_keydown(struct rc_dev *dev, enum rc_proto protocol,
2089 + .keycode = keycode
2090 + };
2091 +
2092 +- ir_lirc_scancode_event(dev, &sc);
2093 ++ if (dev->allowed_protocols != RC_PROTO_BIT_CEC)
2094 ++ ir_lirc_scancode_event(dev, &sc);
2095 +
2096 + if (new_event && dev->keypressed)
2097 + ir_do_keyup(dev, false);
2098 +diff --git a/drivers/media/usb/dvb-usb-v2/usb_urb.c b/drivers/media/usb/dvb-usb-v2/usb_urb.c
2099 +index 024c751eb165..2ad2ddeaff51 100644
2100 +--- a/drivers/media/usb/dvb-usb-v2/usb_urb.c
2101 ++++ b/drivers/media/usb/dvb-usb-v2/usb_urb.c
2102 +@@ -155,7 +155,6 @@ static int usb_urb_alloc_bulk_urbs(struct usb_data_stream *stream)
2103 + stream->props.u.bulk.buffersize,
2104 + usb_urb_complete, stream);
2105 +
2106 +- stream->urb_list[i]->transfer_flags = URB_FREE_BUFFER;
2107 + stream->urbs_initialized++;
2108 + }
2109 + return 0;
2110 +@@ -186,7 +185,7 @@ static int usb_urb_alloc_isoc_urbs(struct usb_data_stream *stream)
2111 + urb->complete = usb_urb_complete;
2112 + urb->pipe = usb_rcvisocpipe(stream->udev,
2113 + stream->props.endpoint);
2114 +- urb->transfer_flags = URB_ISO_ASAP | URB_FREE_BUFFER;
2115 ++ urb->transfer_flags = URB_ISO_ASAP;
2116 + urb->interval = stream->props.u.isoc.interval;
2117 + urb->number_of_packets = stream->props.u.isoc.framesperurb;
2118 + urb->transfer_buffer_length = stream->props.u.isoc.framesize *
2119 +@@ -210,7 +209,7 @@ static int usb_free_stream_buffers(struct usb_data_stream *stream)
2120 + if (stream->state & USB_STATE_URB_BUF) {
2121 + while (stream->buf_num) {
2122 + stream->buf_num--;
2123 +- stream->buf_list[stream->buf_num] = NULL;
2124 ++ kfree(stream->buf_list[stream->buf_num]);
2125 + }
2126 + }
2127 +
2128 +diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
2129 +index 218f0da0ce76..edd34cf09cf8 100644
2130 +--- a/drivers/media/v4l2-core/v4l2-fwnode.c
2131 ++++ b/drivers/media/v4l2-core/v4l2-fwnode.c
2132 +@@ -310,8 +310,8 @@ v4l2_fwnode_endpoint_parse_parallel_bus(struct fwnode_handle *fwnode,
2133 + }
2134 +
2135 + if (!fwnode_property_read_u32(fwnode, "data-active", &v)) {
2136 +- flags &= ~(V4L2_MBUS_PCLK_SAMPLE_RISING |
2137 +- V4L2_MBUS_PCLK_SAMPLE_FALLING);
2138 ++ flags &= ~(V4L2_MBUS_DATA_ACTIVE_HIGH |
2139 ++ V4L2_MBUS_DATA_ACTIVE_LOW);
2140 + flags |= v ? V4L2_MBUS_DATA_ACTIVE_HIGH :
2141 + V4L2_MBUS_DATA_ACTIVE_LOW;
2142 + pr_debug("data-active %s\n", v ? "high" : "low");
2143 +diff --git a/drivers/misc/ocxl/config.c b/drivers/misc/ocxl/config.c
2144 +index 57a6bb1fd3c9..8f2c5d8bd2ee 100644
2145 +--- a/drivers/misc/ocxl/config.c
2146 ++++ b/drivers/misc/ocxl/config.c
2147 +@@ -318,7 +318,7 @@ static int read_afu_name(struct pci_dev *dev, struct ocxl_fn_config *fn,
2148 + if (rc)
2149 + return rc;
2150 + ptr = (u32 *) &afu->name[i];
2151 +- *ptr = val;
2152 ++ *ptr = le32_to_cpu((__force __le32) val);
2153 + }
2154 + afu->name[OCXL_AFU_NAME_SZ - 1] = '\0'; /* play safe */
2155 + return 0;
2156 +diff --git a/drivers/misc/ocxl/link.c b/drivers/misc/ocxl/link.c
2157 +index 31695a078485..646d16450066 100644
2158 +--- a/drivers/misc/ocxl/link.c
2159 ++++ b/drivers/misc/ocxl/link.c
2160 +@@ -566,7 +566,7 @@ int ocxl_link_update_pe(void *link_handle, int pasid, __u16 tid)
2161 +
2162 + mutex_lock(&spa->spa_lock);
2163 +
2164 +- pe->tid = tid;
2165 ++ pe->tid = cpu_to_be32(tid);
2166 +
2167 + /*
2168 + * The barrier makes sure the PE is updated
2169 +diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
2170 +index 650f2b490a05..9dc16a23429a 100644
2171 +--- a/drivers/mtd/nand/raw/marvell_nand.c
2172 ++++ b/drivers/mtd/nand/raw/marvell_nand.c
2173 +@@ -514,9 +514,14 @@ static void marvell_nfc_enable_int(struct marvell_nfc *nfc, u32 int_mask)
2174 + writel_relaxed(reg & ~int_mask, nfc->regs + NDCR);
2175 + }
2176 +
2177 +-static void marvell_nfc_clear_int(struct marvell_nfc *nfc, u32 int_mask)
2178 ++static u32 marvell_nfc_clear_int(struct marvell_nfc *nfc, u32 int_mask)
2179 + {
2180 ++ u32 reg;
2181 ++
2182 ++ reg = readl_relaxed(nfc->regs + NDSR);
2183 + writel_relaxed(int_mask, nfc->regs + NDSR);
2184 ++
2185 ++ return reg & int_mask;
2186 + }
2187 +
2188 + static void marvell_nfc_force_byte_access(struct nand_chip *chip,
2189 +@@ -683,6 +688,7 @@ static int marvell_nfc_wait_cmdd(struct nand_chip *chip)
2190 + static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms)
2191 + {
2192 + struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
2193 ++ u32 pending;
2194 + int ret;
2195 +
2196 + /* Timeout is expressed in ms */
2197 +@@ -695,8 +701,13 @@ static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms)
2198 + ret = wait_for_completion_timeout(&nfc->complete,
2199 + msecs_to_jiffies(timeout_ms));
2200 + marvell_nfc_disable_int(nfc, NDCR_RDYM);
2201 +- marvell_nfc_clear_int(nfc, NDSR_RDY(0) | NDSR_RDY(1));
2202 +- if (!ret) {
2203 ++ pending = marvell_nfc_clear_int(nfc, NDSR_RDY(0) | NDSR_RDY(1));
2204 ++
2205 ++ /*
2206 ++ * In case the interrupt was not served in the required time frame,
2207 ++ * check if the ISR was not served or if something went actually wrong.
2208 ++ */
2209 ++ if (ret && !pending) {
2210 + dev_err(nfc->dev, "Timeout waiting for RB signal\n");
2211 + return -ETIMEDOUT;
2212 + }
2213 +diff --git a/drivers/mtd/nand/raw/nand_jedec.c b/drivers/mtd/nand/raw/nand_jedec.c
2214 +index 5c26492c841d..38b5dc22cb30 100644
2215 +--- a/drivers/mtd/nand/raw/nand_jedec.c
2216 ++++ b/drivers/mtd/nand/raw/nand_jedec.c
2217 +@@ -107,6 +107,8 @@ int nand_jedec_detect(struct nand_chip *chip)
2218 + pr_warn("Invalid codeword size\n");
2219 + }
2220 +
2221 ++ ret = 1;
2222 ++
2223 + free_jedec_param_page:
2224 + kfree(p);
2225 + return ret;
2226 +diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
2227 +index 886d05c391ef..68e8b9f7f372 100644
2228 +--- a/drivers/mtd/nand/raw/omap2.c
2229 ++++ b/drivers/mtd/nand/raw/omap2.c
2230 +@@ -1944,7 +1944,7 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
2231 + case NAND_OMAP_PREFETCH_DMA:
2232 + dma_cap_zero(mask);
2233 + dma_cap_set(DMA_SLAVE, mask);
2234 +- info->dma = dma_request_chan(dev, "rxtx");
2235 ++ info->dma = dma_request_chan(dev->parent, "rxtx");
2236 +
2237 + if (IS_ERR(info->dma)) {
2238 + dev_err(dev, "DMA engine request failed\n");
2239 +diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
2240 +index 6cc9c929ff57..37775fc09e09 100644
2241 +--- a/drivers/mtd/spi-nor/Kconfig
2242 ++++ b/drivers/mtd/spi-nor/Kconfig
2243 +@@ -41,7 +41,7 @@ config SPI_ASPEED_SMC
2244 +
2245 + config SPI_ATMEL_QUADSPI
2246 + tristate "Atmel Quad SPI Controller"
2247 +- depends on ARCH_AT91 || (ARM && COMPILE_TEST)
2248 ++ depends on ARCH_AT91 || (ARM && COMPILE_TEST && !ARCH_EBSA110)
2249 + depends on OF && HAS_IOMEM
2250 + help
2251 + This enables support for the Quad SPI controller in master mode.
2252 +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
2253 +index b164f705709d..3b5b47e98c73 100644
2254 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
2255 ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
2256 +@@ -9360,10 +9360,16 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
2257 + BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
2258 + rc);
2259 +
2260 +- /* Remove all currently configured VLANs */
2261 +- rc = bnx2x_del_all_vlans(bp);
2262 +- if (rc < 0)
2263 +- BNX2X_ERR("Failed to delete all VLANs\n");
2264 ++ /* The whole *vlan_obj structure may be not initialized if VLAN
2265 ++ * filtering offload is not supported by hardware. Currently this is
2266 ++ * true for all hardware covered by CHIP_IS_E1x().
2267 ++ */
2268 ++ if (!CHIP_IS_E1x(bp)) {
2269 ++ /* Remove all currently configured VLANs */
2270 ++ rc = bnx2x_del_all_vlans(bp);
2271 ++ if (rc < 0)
2272 ++ BNX2X_ERR("Failed to delete all VLANs\n");
2273 ++ }
2274 +
2275 + /* Disable LLH */
2276 + if (!CHIP_IS_E1(bp))
2277 +diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
2278 +index a4681780a55d..098d8764c0ea 100644
2279 +--- a/drivers/net/ethernet/ibm/ibmveth.c
2280 ++++ b/drivers/net/ethernet/ibm/ibmveth.c
2281 +@@ -1171,11 +1171,15 @@ out:
2282 +
2283 + map_failed_frags:
2284 + last = i+1;
2285 +- for (i = 0; i < last; i++)
2286 ++ for (i = 1; i < last; i++)
2287 + dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
2288 + descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
2289 + DMA_TO_DEVICE);
2290 +
2291 ++ dma_unmap_single(&adapter->vdev->dev,
2292 ++ descs[0].fields.address,
2293 ++ descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
2294 ++ DMA_TO_DEVICE);
2295 + map_failed:
2296 + if (!firmware_has_feature(FW_FEATURE_CMO))
2297 + netdev_err(netdev, "tx: unable to map xmit buffer\n");
2298 +diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
2299 +index 17e6dcd2eb42..c99dd3f1e6a8 100644
2300 +--- a/drivers/net/hamradio/6pack.c
2301 ++++ b/drivers/net/hamradio/6pack.c
2302 +@@ -523,10 +523,7 @@ static void resync_tnc(struct timer_list *t)
2303 +
2304 +
2305 + /* Start resync timer again -- the TNC might be still absent */
2306 +-
2307 +- del_timer(&sp->resync_t);
2308 +- sp->resync_t.expires = jiffies + SIXP_RESYNC_TIMEOUT;
2309 +- add_timer(&sp->resync_t);
2310 ++ mod_timer(&sp->resync_t, jiffies + SIXP_RESYNC_TIMEOUT);
2311 + }
2312 +
2313 + static inline int tnc_init(struct sixpack *sp)
2314 +@@ -537,9 +534,7 @@ static inline int tnc_init(struct sixpack *sp)
2315 +
2316 + sp->tty->ops->write(sp->tty, &inbyte, 1);
2317 +
2318 +- del_timer(&sp->resync_t);
2319 +- sp->resync_t.expires = jiffies + SIXP_RESYNC_TIMEOUT;
2320 +- add_timer(&sp->resync_t);
2321 ++ mod_timer(&sp->resync_t, jiffies + SIXP_RESYNC_TIMEOUT);
2322 +
2323 + return 0;
2324 + }
2325 +@@ -897,11 +892,8 @@ static void decode_prio_command(struct sixpack *sp, unsigned char cmd)
2326 + /* if the state byte has been received, the TNC is present,
2327 + so the resync timer can be reset. */
2328 +
2329 +- if (sp->tnc_state == TNC_IN_SYNC) {
2330 +- del_timer(&sp->resync_t);
2331 +- sp->resync_t.expires = jiffies + SIXP_INIT_RESYNC_TIMEOUT;
2332 +- add_timer(&sp->resync_t);
2333 +- }
2334 ++ if (sp->tnc_state == TNC_IN_SYNC)
2335 ++ mod_timer(&sp->resync_t, jiffies + SIXP_INIT_RESYNC_TIMEOUT);
2336 +
2337 + sp->status1 = cmd & SIXP_PRIO_DATA_MASK;
2338 + }
2339 +diff --git a/drivers/net/tap.c b/drivers/net/tap.c
2340 +index f03004f37eca..276f800ed57f 100644
2341 +--- a/drivers/net/tap.c
2342 ++++ b/drivers/net/tap.c
2343 +@@ -1177,8 +1177,6 @@ static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
2344 + goto err_kfree;
2345 + }
2346 +
2347 +- skb_probe_transport_header(skb, ETH_HLEN);
2348 +-
2349 + /* Move network header to the right position for VLAN tagged packets */
2350 + if ((skb->protocol == htons(ETH_P_8021Q) ||
2351 + skb->protocol == htons(ETH_P_8021AD)) &&
2352 +@@ -1189,6 +1187,7 @@ static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
2353 + tap = rcu_dereference(q->tap);
2354 + if (tap) {
2355 + skb->dev = tap->dev;
2356 ++ skb_probe_transport_header(skb, ETH_HLEN);
2357 + dev_queue_xmit(skb);
2358 + } else {
2359 + kfree_skb(skb);
2360 +diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
2361 +index 1098263ab862..46c3d983b7b7 100644
2362 +--- a/drivers/net/wan/x25_asy.c
2363 ++++ b/drivers/net/wan/x25_asy.c
2364 +@@ -485,8 +485,10 @@ static int x25_asy_open(struct net_device *dev)
2365 +
2366 + /* Cleanup */
2367 + kfree(sl->xbuff);
2368 ++ sl->xbuff = NULL;
2369 + noxbuff:
2370 + kfree(sl->rbuff);
2371 ++ sl->rbuff = NULL;
2372 + norbuff:
2373 + return -ENOMEM;
2374 + }
2375 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
2376 +index 7f0a5bade70a..c0e3ae7bf2ae 100644
2377 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
2378 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
2379 +@@ -5196,10 +5196,17 @@ static struct cfg80211_ops brcmf_cfg80211_ops = {
2380 + .del_pmk = brcmf_cfg80211_del_pmk,
2381 + };
2382 +
2383 +-struct cfg80211_ops *brcmf_cfg80211_get_ops(void)
2384 ++struct cfg80211_ops *brcmf_cfg80211_get_ops(struct brcmf_mp_device *settings)
2385 + {
2386 +- return kmemdup(&brcmf_cfg80211_ops, sizeof(brcmf_cfg80211_ops),
2387 ++ struct cfg80211_ops *ops;
2388 ++
2389 ++ ops = kmemdup(&brcmf_cfg80211_ops, sizeof(brcmf_cfg80211_ops),
2390 + GFP_KERNEL);
2391 ++
2392 ++ if (ops && settings->roamoff)
2393 ++ ops->update_connect_params = NULL;
2394 ++
2395 ++ return ops;
2396 + }
2397 +
2398 + struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
2399 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
2400 +index a4aec0004e4f..9a6287f084a9 100644
2401 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
2402 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
2403 +@@ -404,7 +404,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
2404 + void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg);
2405 + s32 brcmf_cfg80211_up(struct net_device *ndev);
2406 + s32 brcmf_cfg80211_down(struct net_device *ndev);
2407 +-struct cfg80211_ops *brcmf_cfg80211_get_ops(void);
2408 ++struct cfg80211_ops *brcmf_cfg80211_get_ops(struct brcmf_mp_device *settings);
2409 + enum nl80211_iftype brcmf_cfg80211_get_iftype(struct brcmf_if *ifp);
2410 +
2411 + struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
2412 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
2413 +index b1f702faff4f..860a4372cb56 100644
2414 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
2415 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
2416 +@@ -1130,7 +1130,7 @@ int brcmf_attach(struct device *dev, struct brcmf_mp_device *settings)
2417 +
2418 + brcmf_dbg(TRACE, "Enter\n");
2419 +
2420 +- ops = brcmf_cfg80211_get_ops();
2421 ++ ops = brcmf_cfg80211_get_ops(settings);
2422 + if (!ops)
2423 + return -ENOMEM;
2424 +
2425 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
2426 +index 9095b830ae4d..9927079a9ace 100644
2427 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
2428 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
2429 +@@ -641,8 +641,9 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev,
2430 + struct brcmf_fw_request *fwreq;
2431 + char chipname[12];
2432 + const char *mp_path;
2433 ++ size_t mp_path_len;
2434 + u32 i, j;
2435 +- char end;
2436 ++ char end = '\0';
2437 + size_t reqsz;
2438 +
2439 + for (i = 0; i < table_size; i++) {
2440 +@@ -667,7 +668,10 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev,
2441 + mapping_table[i].fw_base, chipname);
2442 +
2443 + mp_path = brcmf_mp_global.firmware_path;
2444 +- end = mp_path[strlen(mp_path) - 1];
2445 ++ mp_path_len = strnlen(mp_path, BRCMF_FW_ALTPATH_LEN);
2446 ++ if (mp_path_len)
2447 ++ end = mp_path[mp_path_len - 1];
2448 ++
2449 + fwreq->n_items = n_fwnames;
2450 +
2451 + for (j = 0; j < n_fwnames; j++) {
2452 +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
2453 +index 9e015212c2c0..8d4711590dfc 100644
2454 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
2455 ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
2456 +@@ -513,6 +513,56 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
2457 + {IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
2458 +
2459 + /* 9000 Series */
2460 ++ {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_cfg_soc)},
2461 ++ {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_soc)},
2462 ++ {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_cfg_soc)},
2463 ++ {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_cfg_soc)},
2464 ++ {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_soc)},
2465 ++ {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_soc)},
2466 ++ {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_soc)},
2467 ++ {IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_soc)},
2468 ++ {IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_soc)},
2469 ++ {IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_soc)},
2470 ++ {IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_soc)},
2471 ++ {IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_soc)},
2472 ++ {IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_soc)},
2473 ++ {IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_soc)},
2474 ++ {IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_soc)},
2475 ++ {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_soc)},
2476 ++ {IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
2477 ++ {IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
2478 ++ {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_cfg_soc)},
2479 ++ {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_cfg_soc)},
2480 ++ {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_cfg_soc)},
2481 ++ {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_cfg_soc)},
2482 ++ {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_soc)},
2483 ++ {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_soc)},
2484 ++ {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_soc)},
2485 ++ {IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_cfg_soc)},
2486 ++ {IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_soc)},
2487 ++ {IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_cfg_soc)},
2488 ++ {IWL_PCI_DEVICE(0x06F0, 0x003C, iwl9560_2ac_cfg_soc)},
2489 ++ {IWL_PCI_DEVICE(0x06F0, 0x0060, iwl9461_2ac_cfg_soc)},
2490 ++ {IWL_PCI_DEVICE(0x06F0, 0x0064, iwl9461_2ac_cfg_soc)},
2491 ++ {IWL_PCI_DEVICE(0x06F0, 0x00A0, iwl9462_2ac_cfg_soc)},
2492 ++ {IWL_PCI_DEVICE(0x06F0, 0x00A4, iwl9462_2ac_cfg_soc)},
2493 ++ {IWL_PCI_DEVICE(0x06F0, 0x0230, iwl9560_2ac_cfg_soc)},
2494 ++ {IWL_PCI_DEVICE(0x06F0, 0x0234, iwl9560_2ac_cfg_soc)},
2495 ++ {IWL_PCI_DEVICE(0x06F0, 0x0238, iwl9560_2ac_cfg_soc)},
2496 ++ {IWL_PCI_DEVICE(0x06F0, 0x023C, iwl9560_2ac_cfg_soc)},
2497 ++ {IWL_PCI_DEVICE(0x06F0, 0x0260, iwl9461_2ac_cfg_soc)},
2498 ++ {IWL_PCI_DEVICE(0x06F0, 0x0264, iwl9461_2ac_cfg_soc)},
2499 ++ {IWL_PCI_DEVICE(0x06F0, 0x02A0, iwl9462_2ac_cfg_soc)},
2500 ++ {IWL_PCI_DEVICE(0x06F0, 0x02A4, iwl9462_2ac_cfg_soc)},
2501 ++ {IWL_PCI_DEVICE(0x06F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
2502 ++ {IWL_PCI_DEVICE(0x06F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
2503 ++ {IWL_PCI_DEVICE(0x06F0, 0x2030, iwl9560_2ac_cfg_soc)},
2504 ++ {IWL_PCI_DEVICE(0x06F0, 0x2034, iwl9560_2ac_cfg_soc)},
2505 ++ {IWL_PCI_DEVICE(0x06F0, 0x4030, iwl9560_2ac_cfg_soc)},
2506 ++ {IWL_PCI_DEVICE(0x06F0, 0x4034, iwl9560_2ac_cfg_soc)},
2507 ++ {IWL_PCI_DEVICE(0x06F0, 0x40A4, iwl9462_2ac_cfg_soc)},
2508 ++ {IWL_PCI_DEVICE(0x06F0, 0x4234, iwl9560_2ac_cfg_soc)},
2509 ++ {IWL_PCI_DEVICE(0x06F0, 0x42A4, iwl9462_2ac_cfg_soc)},
2510 + {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
2511 + {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)},
2512 + {IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_cfg)},
2513 +diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
2514 +index a3fb235fea0d..7431a795a624 100644
2515 +--- a/drivers/rtc/rtc-m41t80.c
2516 ++++ b/drivers/rtc/rtc-m41t80.c
2517 +@@ -393,7 +393,7 @@ static int m41t80_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
2518 + alrm->time.tm_min = bcd2bin(alarmvals[3] & 0x7f);
2519 + alrm->time.tm_hour = bcd2bin(alarmvals[2] & 0x3f);
2520 + alrm->time.tm_mday = bcd2bin(alarmvals[1] & 0x3f);
2521 +- alrm->time.tm_mon = bcd2bin(alarmvals[0] & 0x3f);
2522 ++ alrm->time.tm_mon = bcd2bin(alarmvals[0] & 0x3f) - 1;
2523 +
2524 + alrm->enabled = !!(alarmvals[0] & M41T80_ALMON_AFE);
2525 + alrm->pending = (flags & M41T80_FLAGS_AF) && alrm->enabled;
2526 +diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
2527 +index f35cc10772f6..25abf2d1732a 100644
2528 +--- a/drivers/spi/spi-bcm2835.c
2529 ++++ b/drivers/spi/spi-bcm2835.c
2530 +@@ -88,7 +88,7 @@ struct bcm2835_spi {
2531 + u8 *rx_buf;
2532 + int tx_len;
2533 + int rx_len;
2534 +- bool dma_pending;
2535 ++ unsigned int dma_pending;
2536 + };
2537 +
2538 + static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned reg)
2539 +@@ -155,8 +155,7 @@ static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id)
2540 + /* Write as many bytes as possible to FIFO */
2541 + bcm2835_wr_fifo(bs);
2542 +
2543 +- /* based on flags decide if we can finish the transfer */
2544 +- if (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE) {
2545 ++ if (!bs->rx_len) {
2546 + /* Transfer complete - reset SPI HW */
2547 + bcm2835_spi_reset_hw(master);
2548 + /* wake up the framework */
2549 +@@ -233,10 +232,9 @@ static void bcm2835_spi_dma_done(void *data)
2550 + * is called the tx-dma must have finished - can't get to this
2551 + * situation otherwise...
2552 + */
2553 +- dmaengine_terminate_all(master->dma_tx);
2554 +-
2555 +- /* mark as no longer pending */
2556 +- bs->dma_pending = 0;
2557 ++ if (cmpxchg(&bs->dma_pending, true, false)) {
2558 ++ dmaengine_terminate_all(master->dma_tx);
2559 ++ }
2560 +
2561 + /* and mark as completed */;
2562 + complete(&master->xfer_completion);
2563 +@@ -342,6 +340,7 @@ static int bcm2835_spi_transfer_one_dma(struct spi_master *master,
2564 + if (ret) {
2565 + /* need to reset on errors */
2566 + dmaengine_terminate_all(master->dma_tx);
2567 ++ bs->dma_pending = false;
2568 + bcm2835_spi_reset_hw(master);
2569 + return ret;
2570 + }
2571 +@@ -617,10 +616,9 @@ static void bcm2835_spi_handle_err(struct spi_master *master,
2572 + struct bcm2835_spi *bs = spi_master_get_devdata(master);
2573 +
2574 + /* if an error occurred and we have an active dma, then terminate */
2575 +- if (bs->dma_pending) {
2576 ++ if (cmpxchg(&bs->dma_pending, true, false)) {
2577 + dmaengine_terminate_all(master->dma_tx);
2578 + dmaengine_terminate_all(master->dma_rx);
2579 +- bs->dma_pending = 0;
2580 + }
2581 + /* and reset */
2582 + bcm2835_spi_reset_hw(master);
2583 +diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
2584 +index 781754f36da7..8da66e996d23 100644
2585 +--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
2586 ++++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
2587 +@@ -143,7 +143,6 @@ vc_vchi_audio_init(VCHI_INSTANCE_T vchi_instance,
2588 + dev_err(instance->dev,
2589 + "failed to open VCHI service connection (status=%d)\n",
2590 + status);
2591 +- kfree(instance);
2592 + return -EPERM;
2593 + }
2594 +
2595 +diff --git a/drivers/staging/wilc1000/wilc_sdio.c b/drivers/staging/wilc1000/wilc_sdio.c
2596 +index ca351c950344..5c3e4df804eb 100644
2597 +--- a/drivers/staging/wilc1000/wilc_sdio.c
2598 ++++ b/drivers/staging/wilc1000/wilc_sdio.c
2599 +@@ -841,6 +841,7 @@ static int sdio_read_int(struct wilc *wilc, u32 *int_status)
2600 + if (!sdio_priv->irq_gpio) {
2601 + int i;
2602 +
2603 ++ cmd.read_write = 0;
2604 + cmd.function = 1;
2605 + cmd.address = 0x04;
2606 + cmd.data = 0;
2607 +diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
2608 +index 57c66d2c3471..5413a04023f9 100644
2609 +--- a/drivers/tty/serial/xilinx_uartps.c
2610 ++++ b/drivers/tty/serial/xilinx_uartps.c
2611 +@@ -123,7 +123,7 @@ MODULE_PARM_DESC(rx_timeout, "Rx timeout, 1-255");
2612 + #define CDNS_UART_IXR_RXTRIG 0x00000001 /* RX FIFO trigger interrupt */
2613 + #define CDNS_UART_IXR_RXFULL 0x00000004 /* RX FIFO full interrupt. */
2614 + #define CDNS_UART_IXR_RXEMPTY 0x00000002 /* RX FIFO empty interrupt. */
2615 +-#define CDNS_UART_IXR_MASK 0x00001FFF /* Valid bit mask */
2616 ++#define CDNS_UART_IXR_RXMASK 0x000021e7 /* Valid RX bit mask */
2617 +
2618 + /*
2619 + * Do not enable parity error interrupt for the following
2620 +@@ -364,7 +364,7 @@ static irqreturn_t cdns_uart_isr(int irq, void *dev_id)
2621 + cdns_uart_handle_tx(dev_id);
2622 + isrstatus &= ~CDNS_UART_IXR_TXEMPTY;
2623 + }
2624 +- if (isrstatus & CDNS_UART_IXR_MASK)
2625 ++ if (isrstatus & CDNS_UART_IXR_RXMASK)
2626 + cdns_uart_handle_rx(dev_id, isrstatus);
2627 +
2628 + spin_unlock(&port->lock);
2629 +diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
2630 +index 987fc5ba6321..70e6c956c23c 100644
2631 +--- a/drivers/usb/Kconfig
2632 ++++ b/drivers/usb/Kconfig
2633 +@@ -205,8 +205,4 @@ config USB_ULPI_BUS
2634 + To compile this driver as a module, choose M here: the module will
2635 + be called ulpi.
2636 +
2637 +-config USB_ROLE_SWITCH
2638 +- tristate
2639 +- select USB_COMMON
2640 +-
2641 + endif # USB_SUPPORT
2642 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
2643 +index 1b68fed464cb..ed8c62b2d9d1 100644
2644 +--- a/drivers/usb/class/cdc-acm.c
2645 ++++ b/drivers/usb/class/cdc-acm.c
2646 +@@ -581,6 +581,13 @@ static int acm_tty_install(struct tty_driver *driver, struct tty_struct *tty)
2647 + if (retval)
2648 + goto error_init_termios;
2649 +
2650 ++ /*
2651 ++ * Suppress initial echoing for some devices which might send data
2652 ++ * immediately after acm driver has been installed.
2653 ++ */
2654 ++ if (acm->quirks & DISABLE_ECHO)
2655 ++ tty->termios.c_lflag &= ~ECHO;
2656 ++
2657 + tty->driver_data = acm;
2658 +
2659 + return 0;
2660 +@@ -1657,6 +1664,9 @@ static const struct usb_device_id acm_ids[] = {
2661 + { USB_DEVICE(0x0e8d, 0x0003), /* FIREFLY, MediaTek Inc; andrey.arapov@×××××.com */
2662 + .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
2663 + },
2664 ++ { USB_DEVICE(0x0e8d, 0x2000), /* MediaTek Inc Preloader */
2665 ++ .driver_info = DISABLE_ECHO, /* DISABLE ECHO in termios flag */
2666 ++ },
2667 + { USB_DEVICE(0x0e8d, 0x3329), /* MediaTek Inc GPS */
2668 + .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
2669 + },
2670 +diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
2671 +index ca06b20d7af9..515aad0847ee 100644
2672 +--- a/drivers/usb/class/cdc-acm.h
2673 ++++ b/drivers/usb/class/cdc-acm.h
2674 +@@ -140,3 +140,4 @@ struct acm {
2675 + #define QUIRK_CONTROL_LINE_STATE BIT(6)
2676 + #define CLEAR_HALT_CONDITIONS BIT(7)
2677 + #define SEND_ZERO_PACKET BIT(8)
2678 ++#define DISABLE_ECHO BIT(9)
2679 +diff --git a/drivers/usb/common/Makefile b/drivers/usb/common/Makefile
2680 +index fb4d5ef4165c..0a7c45e85481 100644
2681 +--- a/drivers/usb/common/Makefile
2682 ++++ b/drivers/usb/common/Makefile
2683 +@@ -9,4 +9,3 @@ usb-common-$(CONFIG_USB_LED_TRIG) += led.o
2684 +
2685 + obj-$(CONFIG_USB_OTG_FSM) += usb-otg-fsm.o
2686 + obj-$(CONFIG_USB_ULPI_BUS) += ulpi.o
2687 +-obj-$(CONFIG_USB_ROLE_SWITCH) += roles.o
2688 +diff --git a/drivers/usb/common/roles.c b/drivers/usb/common/roles.c
2689 +deleted file mode 100644
2690 +index 99116af07f1d..000000000000
2691 +--- a/drivers/usb/common/roles.c
2692 ++++ /dev/null
2693 +@@ -1,314 +0,0 @@
2694 +-// SPDX-License-Identifier: GPL-2.0
2695 +-/*
2696 +- * USB Role Switch Support
2697 +- *
2698 +- * Copyright (C) 2018 Intel Corporation
2699 +- * Author: Heikki Krogerus <heikki.krogerus@×××××××××××.com>
2700 +- * Hans de Goede <hdegoede@××××××.com>
2701 +- */
2702 +-
2703 +-#include <linux/usb/role.h>
2704 +-#include <linux/device.h>
2705 +-#include <linux/module.h>
2706 +-#include <linux/mutex.h>
2707 +-#include <linux/slab.h>
2708 +-
2709 +-static struct class *role_class;
2710 +-
2711 +-struct usb_role_switch {
2712 +- struct device dev;
2713 +- struct mutex lock; /* device lock*/
2714 +- enum usb_role role;
2715 +-
2716 +- /* From descriptor */
2717 +- struct device *usb2_port;
2718 +- struct device *usb3_port;
2719 +- struct device *udc;
2720 +- usb_role_switch_set_t set;
2721 +- usb_role_switch_get_t get;
2722 +- bool allow_userspace_control;
2723 +-};
2724 +-
2725 +-#define to_role_switch(d) container_of(d, struct usb_role_switch, dev)
2726 +-
2727 +-/**
2728 +- * usb_role_switch_set_role - Set USB role for a switch
2729 +- * @sw: USB role switch
2730 +- * @role: USB role to be switched to
2731 +- *
2732 +- * Set USB role @role for @sw.
2733 +- */
2734 +-int usb_role_switch_set_role(struct usb_role_switch *sw, enum usb_role role)
2735 +-{
2736 +- int ret;
2737 +-
2738 +- if (IS_ERR_OR_NULL(sw))
2739 +- return 0;
2740 +-
2741 +- mutex_lock(&sw->lock);
2742 +-
2743 +- ret = sw->set(sw->dev.parent, role);
2744 +- if (!ret)
2745 +- sw->role = role;
2746 +-
2747 +- mutex_unlock(&sw->lock);
2748 +-
2749 +- return ret;
2750 +-}
2751 +-EXPORT_SYMBOL_GPL(usb_role_switch_set_role);
2752 +-
2753 +-/**
2754 +- * usb_role_switch_get_role - Get the USB role for a switch
2755 +- * @sw: USB role switch
2756 +- *
2757 +- * Depending on the role-switch-driver this function returns either a cached
2758 +- * value of the last set role, or reads back the actual value from the hardware.
2759 +- */
2760 +-enum usb_role usb_role_switch_get_role(struct usb_role_switch *sw)
2761 +-{
2762 +- enum usb_role role;
2763 +-
2764 +- if (IS_ERR_OR_NULL(sw))
2765 +- return USB_ROLE_NONE;
2766 +-
2767 +- mutex_lock(&sw->lock);
2768 +-
2769 +- if (sw->get)
2770 +- role = sw->get(sw->dev.parent);
2771 +- else
2772 +- role = sw->role;
2773 +-
2774 +- mutex_unlock(&sw->lock);
2775 +-
2776 +- return role;
2777 +-}
2778 +-EXPORT_SYMBOL_GPL(usb_role_switch_get_role);
2779 +-
2780 +-static int __switch_match(struct device *dev, const void *name)
2781 +-{
2782 +- return !strcmp((const char *)name, dev_name(dev));
2783 +-}
2784 +-
2785 +-static void *usb_role_switch_match(struct device_connection *con, int ep,
2786 +- void *data)
2787 +-{
2788 +- struct device *dev;
2789 +-
2790 +- dev = class_find_device(role_class, NULL, con->endpoint[ep],
2791 +- __switch_match);
2792 +-
2793 +- return dev ? to_role_switch(dev) : ERR_PTR(-EPROBE_DEFER);
2794 +-}
2795 +-
2796 +-/**
2797 +- * usb_role_switch_get - Find USB role switch linked with the caller
2798 +- * @dev: The caller device
2799 +- *
2800 +- * Finds and returns role switch linked with @dev. The reference count for the
2801 +- * found switch is incremented.
2802 +- */
2803 +-struct usb_role_switch *usb_role_switch_get(struct device *dev)
2804 +-{
2805 +- struct usb_role_switch *sw;
2806 +-
2807 +- sw = device_connection_find_match(dev, "usb-role-switch", NULL,
2808 +- usb_role_switch_match);
2809 +-
2810 +- if (!IS_ERR_OR_NULL(sw))
2811 +- WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
2812 +-
2813 +- return sw;
2814 +-}
2815 +-EXPORT_SYMBOL_GPL(usb_role_switch_get);
2816 +-
2817 +-/**
2818 +- * usb_role_switch_put - Release handle to a switch
2819 +- * @sw: USB Role Switch
2820 +- *
2821 +- * Decrement reference count for @sw.
2822 +- */
2823 +-void usb_role_switch_put(struct usb_role_switch *sw)
2824 +-{
2825 +- if (!IS_ERR_OR_NULL(sw)) {
2826 +- put_device(&sw->dev);
2827 +- module_put(sw->dev.parent->driver->owner);
2828 +- }
2829 +-}
2830 +-EXPORT_SYMBOL_GPL(usb_role_switch_put);
2831 +-
2832 +-static umode_t
2833 +-usb_role_switch_is_visible(struct kobject *kobj, struct attribute *attr, int n)
2834 +-{
2835 +- struct device *dev = container_of(kobj, typeof(*dev), kobj);
2836 +- struct usb_role_switch *sw = to_role_switch(dev);
2837 +-
2838 +- if (sw->allow_userspace_control)
2839 +- return attr->mode;
2840 +-
2841 +- return 0;
2842 +-}
2843 +-
2844 +-static const char * const usb_roles[] = {
2845 +- [USB_ROLE_NONE] = "none",
2846 +- [USB_ROLE_HOST] = "host",
2847 +- [USB_ROLE_DEVICE] = "device",
2848 +-};
2849 +-
2850 +-static ssize_t
2851 +-role_show(struct device *dev, struct device_attribute *attr, char *buf)
2852 +-{
2853 +- struct usb_role_switch *sw = to_role_switch(dev);
2854 +- enum usb_role role = usb_role_switch_get_role(sw);
2855 +-
2856 +- return sprintf(buf, "%s\n", usb_roles[role]);
2857 +-}
2858 +-
2859 +-static ssize_t role_store(struct device *dev, struct device_attribute *attr,
2860 +- const char *buf, size_t size)
2861 +-{
2862 +- struct usb_role_switch *sw = to_role_switch(dev);
2863 +- int ret;
2864 +-
2865 +- ret = sysfs_match_string(usb_roles, buf);
2866 +- if (ret < 0) {
2867 +- bool res;
2868 +-
2869 +- /* Extra check if the user wants to disable the switch */
2870 +- ret = kstrtobool(buf, &res);
2871 +- if (ret || res)
2872 +- return -EINVAL;
2873 +- }
2874 +-
2875 +- ret = usb_role_switch_set_role(sw, ret);
2876 +- if (ret)
2877 +- return ret;
2878 +-
2879 +- return size;
2880 +-}
2881 +-static DEVICE_ATTR_RW(role);
2882 +-
2883 +-static struct attribute *usb_role_switch_attrs[] = {
2884 +- &dev_attr_role.attr,
2885 +- NULL,
2886 +-};
2887 +-
2888 +-static const struct attribute_group usb_role_switch_group = {
2889 +- .is_visible = usb_role_switch_is_visible,
2890 +- .attrs = usb_role_switch_attrs,
2891 +-};
2892 +-
2893 +-static const struct attribute_group *usb_role_switch_groups[] = {
2894 +- &usb_role_switch_group,
2895 +- NULL,
2896 +-};
2897 +-
2898 +-static int
2899 +-usb_role_switch_uevent(struct device *dev, struct kobj_uevent_env *env)
2900 +-{
2901 +- int ret;
2902 +-
2903 +- ret = add_uevent_var(env, "USB_ROLE_SWITCH=%s", dev_name(dev));
2904 +- if (ret)
2905 +- dev_err(dev, "failed to add uevent USB_ROLE_SWITCH\n");
2906 +-
2907 +- return ret;
2908 +-}
2909 +-
2910 +-static void usb_role_switch_release(struct device *dev)
2911 +-{
2912 +- struct usb_role_switch *sw = to_role_switch(dev);
2913 +-
2914 +- kfree(sw);
2915 +-}
2916 +-
2917 +-static const struct device_type usb_role_dev_type = {
2918 +- .name = "usb_role_switch",
2919 +- .groups = usb_role_switch_groups,
2920 +- .uevent = usb_role_switch_uevent,
2921 +- .release = usb_role_switch_release,
2922 +-};
2923 +-
2924 +-/**
2925 +- * usb_role_switch_register - Register USB Role Switch
2926 +- * @parent: Parent device for the switch
2927 +- * @desc: Description of the switch
2928 +- *
2929 +- * USB Role Switch is a device capable or choosing the role for USB connector.
2930 +- * On platforms where the USB controller is dual-role capable, the controller
2931 +- * driver will need to register the switch. On platforms where the USB host and
2932 +- * USB device controllers behind the connector are separate, there will be a
2933 +- * mux, and the driver for that mux will need to register the switch.
2934 +- *
2935 +- * Returns handle to a new role switch or ERR_PTR. The content of @desc is
2936 +- * copied.
2937 +- */
2938 +-struct usb_role_switch *
2939 +-usb_role_switch_register(struct device *parent,
2940 +- const struct usb_role_switch_desc *desc)
2941 +-{
2942 +- struct usb_role_switch *sw;
2943 +- int ret;
2944 +-
2945 +- if (!desc || !desc->set)
2946 +- return ERR_PTR(-EINVAL);
2947 +-
2948 +- sw = kzalloc(sizeof(*sw), GFP_KERNEL);
2949 +- if (!sw)
2950 +- return ERR_PTR(-ENOMEM);
2951 +-
2952 +- mutex_init(&sw->lock);
2953 +-
2954 +- sw->allow_userspace_control = desc->allow_userspace_control;
2955 +- sw->usb2_port = desc->usb2_port;
2956 +- sw->usb3_port = desc->usb3_port;
2957 +- sw->udc = desc->udc;
2958 +- sw->set = desc->set;
2959 +- sw->get = desc->get;
2960 +-
2961 +- sw->dev.parent = parent;
2962 +- sw->dev.class = role_class;
2963 +- sw->dev.type = &usb_role_dev_type;
2964 +- dev_set_name(&sw->dev, "%s-role-switch", dev_name(parent));
2965 +-
2966 +- ret = device_register(&sw->dev);
2967 +- if (ret) {
2968 +- put_device(&sw->dev);
2969 +- return ERR_PTR(ret);
2970 +- }
2971 +-
2972 +- /* TODO: Symlinks for the host port and the device controller. */
2973 +-
2974 +- return sw;
2975 +-}
2976 +-EXPORT_SYMBOL_GPL(usb_role_switch_register);
2977 +-
2978 +-/**
2979 +- * usb_role_switch_unregister - Unregsiter USB Role Switch
2980 +- * @sw: USB Role Switch
2981 +- *
2982 +- * Unregister switch that was registered with usb_role_switch_register().
2983 +- */
2984 +-void usb_role_switch_unregister(struct usb_role_switch *sw)
2985 +-{
2986 +- if (!IS_ERR_OR_NULL(sw))
2987 +- device_unregister(&sw->dev);
2988 +-}
2989 +-EXPORT_SYMBOL_GPL(usb_role_switch_unregister);
2990 +-
2991 +-static int __init usb_roles_init(void)
2992 +-{
2993 +- role_class = class_create(THIS_MODULE, "usb_role");
2994 +- return PTR_ERR_OR_ZERO(role_class);
2995 +-}
2996 +-subsys_initcall(usb_roles_init);
2997 +-
2998 +-static void __exit usb_roles_exit(void)
2999 +-{
3000 +- class_destroy(role_class);
3001 +-}
3002 +-module_exit(usb_roles_exit);
3003 +-
3004 +-MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@×××××××××××.com>");
3005 +-MODULE_AUTHOR("Hans de Goede <hdegoede@××××××.com>");
3006 +-MODULE_LICENSE("GPL v2");
3007 +-MODULE_DESCRIPTION("USB Role Class");
3008 +diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h
3009 +index 3f9bccc95add..c089ffa1f0a8 100644
3010 +--- a/drivers/usb/dwc2/hcd.h
3011 ++++ b/drivers/usb/dwc2/hcd.h
3012 +@@ -366,7 +366,7 @@ struct dwc2_qh {
3013 + u32 desc_list_sz;
3014 + u32 *n_bytes;
3015 + struct timer_list unreserve_timer;
3016 +- struct timer_list wait_timer;
3017 ++ struct hrtimer wait_timer;
3018 + struct dwc2_tt *dwc_tt;
3019 + int ttport;
3020 + unsigned tt_buffer_dirty:1;
3021 +diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
3022 +index 40839591d2ec..ea3aa640c15c 100644
3023 +--- a/drivers/usb/dwc2/hcd_queue.c
3024 ++++ b/drivers/usb/dwc2/hcd_queue.c
3025 +@@ -59,7 +59,7 @@
3026 + #define DWC2_UNRESERVE_DELAY (msecs_to_jiffies(5))
3027 +
3028 + /* If we get a NAK, wait this long before retrying */
3029 +-#define DWC2_RETRY_WAIT_DELAY (msecs_to_jiffies(1))
3030 ++#define DWC2_RETRY_WAIT_DELAY 1*1E6L
3031 +
3032 + /**
3033 + * dwc2_periodic_channel_available() - Checks that a channel is available for a
3034 +@@ -1464,10 +1464,12 @@ static void dwc2_deschedule_periodic(struct dwc2_hsotg *hsotg,
3035 + * qh back to the "inactive" list, then queues transactions.
3036 + *
3037 + * @t: Pointer to wait_timer in a qh.
3038 ++ *
3039 ++ * Return: HRTIMER_NORESTART to not automatically restart this timer.
3040 + */
3041 +-static void dwc2_wait_timer_fn(struct timer_list *t)
3042 ++static enum hrtimer_restart dwc2_wait_timer_fn(struct hrtimer *t)
3043 + {
3044 +- struct dwc2_qh *qh = from_timer(qh, t, wait_timer);
3045 ++ struct dwc2_qh *qh = container_of(t, struct dwc2_qh, wait_timer);
3046 + struct dwc2_hsotg *hsotg = qh->hsotg;
3047 + unsigned long flags;
3048 +
3049 +@@ -1491,6 +1493,7 @@ static void dwc2_wait_timer_fn(struct timer_list *t)
3050 + }
3051 +
3052 + spin_unlock_irqrestore(&hsotg->lock, flags);
3053 ++ return HRTIMER_NORESTART;
3054 + }
3055 +
3056 + /**
3057 +@@ -1521,7 +1524,8 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
3058 + /* Initialize QH */
3059 + qh->hsotg = hsotg;
3060 + timer_setup(&qh->unreserve_timer, dwc2_unreserve_timer_fn, 0);
3061 +- timer_setup(&qh->wait_timer, dwc2_wait_timer_fn, 0);
3062 ++ hrtimer_init(&qh->wait_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3063 ++ qh->wait_timer.function = &dwc2_wait_timer_fn;
3064 + qh->ep_type = ep_type;
3065 + qh->ep_is_in = ep_is_in;
3066 +
3067 +@@ -1690,7 +1694,7 @@ void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
3068 + * won't do anything anyway, but we want it to finish before we free
3069 + * memory.
3070 + */
3071 +- del_timer_sync(&qh->wait_timer);
3072 ++ hrtimer_cancel(&qh->wait_timer);
3073 +
3074 + dwc2_host_put_tt_info(hsotg, qh->dwc_tt);
3075 +
3076 +@@ -1716,6 +1720,7 @@ int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
3077 + {
3078 + int status;
3079 + u32 intr_mask;
3080 ++ ktime_t delay;
3081 +
3082 + if (dbg_qh(qh))
3083 + dev_vdbg(hsotg->dev, "%s()\n", __func__);
3084 +@@ -1734,8 +1739,8 @@ int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
3085 + list_add_tail(&qh->qh_list_entry,
3086 + &hsotg->non_periodic_sched_waiting);
3087 + qh->wait_timer_cancel = false;
3088 +- mod_timer(&qh->wait_timer,
3089 +- jiffies + DWC2_RETRY_WAIT_DELAY + 1);
3090 ++ delay = ktime_set(0, DWC2_RETRY_WAIT_DELAY);
3091 ++ hrtimer_start(&qh->wait_timer, delay, HRTIMER_MODE_REL);
3092 + } else {
3093 + list_add_tail(&qh->qh_list_entry,
3094 + &hsotg->non_periodic_sched_inactive);
3095 +diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
3096 +index 7c1b6938f212..38c813b1d203 100644
3097 +--- a/drivers/usb/dwc2/params.c
3098 ++++ b/drivers/usb/dwc2/params.c
3099 +@@ -111,6 +111,7 @@ static void dwc2_set_amlogic_params(struct dwc2_hsotg *hsotg)
3100 + p->phy_type = DWC2_PHY_TYPE_PARAM_UTMI;
3101 + p->ahbcfg = GAHBCFG_HBSTLEN_INCR8 <<
3102 + GAHBCFG_HBSTLEN_SHIFT;
3103 ++ p->power_down = DWC2_POWER_DOWN_PARAM_NONE;
3104 + }
3105 +
3106 + static void dwc2_set_amcc_params(struct dwc2_hsotg *hsotg)
3107 +diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
3108 +index 842795856bf4..fdc6e4e403e8 100644
3109 +--- a/drivers/usb/dwc3/dwc3-pci.c
3110 ++++ b/drivers/usb/dwc3/dwc3-pci.c
3111 +@@ -170,20 +170,20 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc)
3112 + * put the gpio descriptors again here because the phy driver
3113 + * might want to grab them, too.
3114 + */
3115 +- gpio = devm_gpiod_get_optional(&pdev->dev, "cs",
3116 +- GPIOD_OUT_LOW);
3117 ++ gpio = gpiod_get_optional(&pdev->dev, "cs", GPIOD_OUT_LOW);
3118 + if (IS_ERR(gpio))
3119 + return PTR_ERR(gpio);
3120 +
3121 + gpiod_set_value_cansleep(gpio, 1);
3122 ++ gpiod_put(gpio);
3123 +
3124 +- gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
3125 +- GPIOD_OUT_LOW);
3126 ++ gpio = gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
3127 + if (IS_ERR(gpio))
3128 + return PTR_ERR(gpio);
3129 +
3130 + if (gpio) {
3131 + gpiod_set_value_cansleep(gpio, 1);
3132 ++ gpiod_put(gpio);
3133 + usleep_range(10000, 11000);
3134 + }
3135 + }
3136 +diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
3137 +index 984892dd72f5..42668aeca57c 100644
3138 +--- a/drivers/usb/host/r8a66597-hcd.c
3139 ++++ b/drivers/usb/host/r8a66597-hcd.c
3140 +@@ -1979,6 +1979,8 @@ static int r8a66597_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
3141 +
3142 + static void r8a66597_endpoint_disable(struct usb_hcd *hcd,
3143 + struct usb_host_endpoint *hep)
3144 ++__acquires(r8a66597->lock)
3145 ++__releases(r8a66597->lock)
3146 + {
3147 + struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
3148 + struct r8a66597_pipe *pipe = (struct r8a66597_pipe *)hep->hcpriv;
3149 +@@ -1991,13 +1993,14 @@ static void r8a66597_endpoint_disable(struct usb_hcd *hcd,
3150 + return;
3151 + pipenum = pipe->info.pipenum;
3152 +
3153 ++ spin_lock_irqsave(&r8a66597->lock, flags);
3154 + if (pipenum == 0) {
3155 + kfree(hep->hcpriv);
3156 + hep->hcpriv = NULL;
3157 ++ spin_unlock_irqrestore(&r8a66597->lock, flags);
3158 + return;
3159 + }
3160 +
3161 +- spin_lock_irqsave(&r8a66597->lock, flags);
3162 + pipe_stop(r8a66597, pipe);
3163 + pipe_irq_disable(r8a66597, pipenum);
3164 + disable_irq_empty(r8a66597, pipenum);
3165 +diff --git a/drivers/usb/roles/Kconfig b/drivers/usb/roles/Kconfig
3166 +index f5a5e6f79f1b..e4194ac94510 100644
3167 +--- a/drivers/usb/roles/Kconfig
3168 ++++ b/drivers/usb/roles/Kconfig
3169 +@@ -1,3 +1,16 @@
3170 ++config USB_ROLE_SWITCH
3171 ++ tristate "USB Role Switch Support"
3172 ++ help
3173 ++ USB Role Switch is a device that can select the USB role - host or
3174 ++ device - for a USB port (connector). In most cases dual-role capable
3175 ++ USB controller will also represent the switch, but on some platforms
3176 ++ multiplexer/demultiplexer switch is used to route the data lines on
3177 ++ the USB connector between separate USB host and device controllers.
3178 ++
3179 ++ Say Y here if your USB connectors support both device and host roles.
3180 ++ To compile the driver as module, choose M here: the module will be
3181 ++ called roles.ko.
3182 ++
3183 + if USB_ROLE_SWITCH
3184 +
3185 + config USB_ROLES_INTEL_XHCI
3186 +diff --git a/drivers/usb/roles/Makefile b/drivers/usb/roles/Makefile
3187 +index e44b179ba275..c02873206fc1 100644
3188 +--- a/drivers/usb/roles/Makefile
3189 ++++ b/drivers/usb/roles/Makefile
3190 +@@ -1 +1,3 @@
3191 +-obj-$(CONFIG_USB_ROLES_INTEL_XHCI) += intel-xhci-usb-role-switch.o
3192 ++obj-$(CONFIG_USB_ROLE_SWITCH) += roles.o
3193 ++roles-y := class.o
3194 ++obj-$(CONFIG_USB_ROLES_INTEL_XHCI) += intel-xhci-usb-role-switch.o
3195 +diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
3196 +new file mode 100644
3197 +index 000000000000..99116af07f1d
3198 +--- /dev/null
3199 ++++ b/drivers/usb/roles/class.c
3200 +@@ -0,0 +1,314 @@
3201 ++// SPDX-License-Identifier: GPL-2.0
3202 ++/*
3203 ++ * USB Role Switch Support
3204 ++ *
3205 ++ * Copyright (C) 2018 Intel Corporation
3206 ++ * Author: Heikki Krogerus <heikki.krogerus@×××××××××××.com>
3207 ++ * Hans de Goede <hdegoede@××××××.com>
3208 ++ */
3209 ++
3210 ++#include <linux/usb/role.h>
3211 ++#include <linux/device.h>
3212 ++#include <linux/module.h>
3213 ++#include <linux/mutex.h>
3214 ++#include <linux/slab.h>
3215 ++
3216 ++static struct class *role_class;
3217 ++
3218 ++struct usb_role_switch {
3219 ++ struct device dev;
3220 ++ struct mutex lock; /* device lock*/
3221 ++ enum usb_role role;
3222 ++
3223 ++ /* From descriptor */
3224 ++ struct device *usb2_port;
3225 ++ struct device *usb3_port;
3226 ++ struct device *udc;
3227 ++ usb_role_switch_set_t set;
3228 ++ usb_role_switch_get_t get;
3229 ++ bool allow_userspace_control;
3230 ++};
3231 ++
3232 ++#define to_role_switch(d) container_of(d, struct usb_role_switch, dev)
3233 ++
3234 ++/**
3235 ++ * usb_role_switch_set_role - Set USB role for a switch
3236 ++ * @sw: USB role switch
3237 ++ * @role: USB role to be switched to
3238 ++ *
3239 ++ * Set USB role @role for @sw.
3240 ++ */
3241 ++int usb_role_switch_set_role(struct usb_role_switch *sw, enum usb_role role)
3242 ++{
3243 ++ int ret;
3244 ++
3245 ++ if (IS_ERR_OR_NULL(sw))
3246 ++ return 0;
3247 ++
3248 ++ mutex_lock(&sw->lock);
3249 ++
3250 ++ ret = sw->set(sw->dev.parent, role);
3251 ++ if (!ret)
3252 ++ sw->role = role;
3253 ++
3254 ++ mutex_unlock(&sw->lock);
3255 ++
3256 ++ return ret;
3257 ++}
3258 ++EXPORT_SYMBOL_GPL(usb_role_switch_set_role);
3259 ++
3260 ++/**
3261 ++ * usb_role_switch_get_role - Get the USB role for a switch
3262 ++ * @sw: USB role switch
3263 ++ *
3264 ++ * Depending on the role-switch-driver this function returns either a cached
3265 ++ * value of the last set role, or reads back the actual value from the hardware.
3266 ++ */
3267 ++enum usb_role usb_role_switch_get_role(struct usb_role_switch *sw)
3268 ++{
3269 ++ enum usb_role role;
3270 ++
3271 ++ if (IS_ERR_OR_NULL(sw))
3272 ++ return USB_ROLE_NONE;
3273 ++
3274 ++ mutex_lock(&sw->lock);
3275 ++
3276 ++ if (sw->get)
3277 ++ role = sw->get(sw->dev.parent);
3278 ++ else
3279 ++ role = sw->role;
3280 ++
3281 ++ mutex_unlock(&sw->lock);
3282 ++
3283 ++ return role;
3284 ++}
3285 ++EXPORT_SYMBOL_GPL(usb_role_switch_get_role);
3286 ++
3287 ++static int __switch_match(struct device *dev, const void *name)
3288 ++{
3289 ++ return !strcmp((const char *)name, dev_name(dev));
3290 ++}
3291 ++
3292 ++static void *usb_role_switch_match(struct device_connection *con, int ep,
3293 ++ void *data)
3294 ++{
3295 ++ struct device *dev;
3296 ++
3297 ++ dev = class_find_device(role_class, NULL, con->endpoint[ep],
3298 ++ __switch_match);
3299 ++
3300 ++ return dev ? to_role_switch(dev) : ERR_PTR(-EPROBE_DEFER);
3301 ++}
3302 ++
3303 ++/**
3304 ++ * usb_role_switch_get - Find USB role switch linked with the caller
3305 ++ * @dev: The caller device
3306 ++ *
3307 ++ * Finds and returns role switch linked with @dev. The reference count for the
3308 ++ * found switch is incremented.
3309 ++ */
3310 ++struct usb_role_switch *usb_role_switch_get(struct device *dev)
3311 ++{
3312 ++ struct usb_role_switch *sw;
3313 ++
3314 ++ sw = device_connection_find_match(dev, "usb-role-switch", NULL,
3315 ++ usb_role_switch_match);
3316 ++
3317 ++ if (!IS_ERR_OR_NULL(sw))
3318 ++ WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
3319 ++
3320 ++ return sw;
3321 ++}
3322 ++EXPORT_SYMBOL_GPL(usb_role_switch_get);
3323 ++
3324 ++/**
3325 ++ * usb_role_switch_put - Release handle to a switch
3326 ++ * @sw: USB Role Switch
3327 ++ *
3328 ++ * Decrement reference count for @sw.
3329 ++ */
3330 ++void usb_role_switch_put(struct usb_role_switch *sw)
3331 ++{
3332 ++ if (!IS_ERR_OR_NULL(sw)) {
3333 ++ put_device(&sw->dev);
3334 ++ module_put(sw->dev.parent->driver->owner);
3335 ++ }
3336 ++}
3337 ++EXPORT_SYMBOL_GPL(usb_role_switch_put);
3338 ++
3339 ++static umode_t
3340 ++usb_role_switch_is_visible(struct kobject *kobj, struct attribute *attr, int n)
3341 ++{
3342 ++ struct device *dev = container_of(kobj, typeof(*dev), kobj);
3343 ++ struct usb_role_switch *sw = to_role_switch(dev);
3344 ++
3345 ++ if (sw->allow_userspace_control)
3346 ++ return attr->mode;
3347 ++
3348 ++ return 0;
3349 ++}
3350 ++
3351 ++static const char * const usb_roles[] = {
3352 ++ [USB_ROLE_NONE] = "none",
3353 ++ [USB_ROLE_HOST] = "host",
3354 ++ [USB_ROLE_DEVICE] = "device",
3355 ++};
3356 ++
3357 ++static ssize_t
3358 ++role_show(struct device *dev, struct device_attribute *attr, char *buf)
3359 ++{
3360 ++ struct usb_role_switch *sw = to_role_switch(dev);
3361 ++ enum usb_role role = usb_role_switch_get_role(sw);
3362 ++
3363 ++ return sprintf(buf, "%s\n", usb_roles[role]);
3364 ++}
3365 ++
3366 ++static ssize_t role_store(struct device *dev, struct device_attribute *attr,
3367 ++ const char *buf, size_t size)
3368 ++{
3369 ++ struct usb_role_switch *sw = to_role_switch(dev);
3370 ++ int ret;
3371 ++
3372 ++ ret = sysfs_match_string(usb_roles, buf);
3373 ++ if (ret < 0) {
3374 ++ bool res;
3375 ++
3376 ++ /* Extra check if the user wants to disable the switch */
3377 ++ ret = kstrtobool(buf, &res);
3378 ++ if (ret || res)
3379 ++ return -EINVAL;
3380 ++ }
3381 ++
3382 ++ ret = usb_role_switch_set_role(sw, ret);
3383 ++ if (ret)
3384 ++ return ret;
3385 ++
3386 ++ return size;
3387 ++}
3388 ++static DEVICE_ATTR_RW(role);
3389 ++
3390 ++static struct attribute *usb_role_switch_attrs[] = {
3391 ++ &dev_attr_role.attr,
3392 ++ NULL,
3393 ++};
3394 ++
3395 ++static const struct attribute_group usb_role_switch_group = {
3396 ++ .is_visible = usb_role_switch_is_visible,
3397 ++ .attrs = usb_role_switch_attrs,
3398 ++};
3399 ++
3400 ++static const struct attribute_group *usb_role_switch_groups[] = {
3401 ++ &usb_role_switch_group,
3402 ++ NULL,
3403 ++};
3404 ++
3405 ++static int
3406 ++usb_role_switch_uevent(struct device *dev, struct kobj_uevent_env *env)
3407 ++{
3408 ++ int ret;
3409 ++
3410 ++ ret = add_uevent_var(env, "USB_ROLE_SWITCH=%s", dev_name(dev));
3411 ++ if (ret)
3412 ++ dev_err(dev, "failed to add uevent USB_ROLE_SWITCH\n");
3413 ++
3414 ++ return ret;
3415 ++}
3416 ++
3417 ++static void usb_role_switch_release(struct device *dev)
3418 ++{
3419 ++ struct usb_role_switch *sw = to_role_switch(dev);
3420 ++
3421 ++ kfree(sw);
3422 ++}
3423 ++
3424 ++static const struct device_type usb_role_dev_type = {
3425 ++ .name = "usb_role_switch",
3426 ++ .groups = usb_role_switch_groups,
3427 ++ .uevent = usb_role_switch_uevent,
3428 ++ .release = usb_role_switch_release,
3429 ++};
3430 ++
3431 ++/**
3432 ++ * usb_role_switch_register - Register USB Role Switch
3433 ++ * @parent: Parent device for the switch
3434 ++ * @desc: Description of the switch
3435 ++ *
3436 ++ * USB Role Switch is a device capable or choosing the role for USB connector.
3437 ++ * On platforms where the USB controller is dual-role capable, the controller
3438 ++ * driver will need to register the switch. On platforms where the USB host and
3439 ++ * USB device controllers behind the connector are separate, there will be a
3440 ++ * mux, and the driver for that mux will need to register the switch.
3441 ++ *
3442 ++ * Returns handle to a new role switch or ERR_PTR. The content of @desc is
3443 ++ * copied.
3444 ++ */
3445 ++struct usb_role_switch *
3446 ++usb_role_switch_register(struct device *parent,
3447 ++ const struct usb_role_switch_desc *desc)
3448 ++{
3449 ++ struct usb_role_switch *sw;
3450 ++ int ret;
3451 ++
3452 ++ if (!desc || !desc->set)
3453 ++ return ERR_PTR(-EINVAL);
3454 ++
3455 ++ sw = kzalloc(sizeof(*sw), GFP_KERNEL);
3456 ++ if (!sw)
3457 ++ return ERR_PTR(-ENOMEM);
3458 ++
3459 ++ mutex_init(&sw->lock);
3460 ++
3461 ++ sw->allow_userspace_control = desc->allow_userspace_control;
3462 ++ sw->usb2_port = desc->usb2_port;
3463 ++ sw->usb3_port = desc->usb3_port;
3464 ++ sw->udc = desc->udc;
3465 ++ sw->set = desc->set;
3466 ++ sw->get = desc->get;
3467 ++
3468 ++ sw->dev.parent = parent;
3469 ++ sw->dev.class = role_class;
3470 ++ sw->dev.type = &usb_role_dev_type;
3471 ++ dev_set_name(&sw->dev, "%s-role-switch", dev_name(parent));
3472 ++
3473 ++ ret = device_register(&sw->dev);
3474 ++ if (ret) {
3475 ++ put_device(&sw->dev);
3476 ++ return ERR_PTR(ret);
3477 ++ }
3478 ++
3479 ++ /* TODO: Symlinks for the host port and the device controller. */
3480 ++
3481 ++ return sw;
3482 ++}
3483 ++EXPORT_SYMBOL_GPL(usb_role_switch_register);
3484 ++
3485 ++/**
3486 ++ * usb_role_switch_unregister - Unregsiter USB Role Switch
3487 ++ * @sw: USB Role Switch
3488 ++ *
3489 ++ * Unregister switch that was registered with usb_role_switch_register().
3490 ++ */
3491 ++void usb_role_switch_unregister(struct usb_role_switch *sw)
3492 ++{
3493 ++ if (!IS_ERR_OR_NULL(sw))
3494 ++ device_unregister(&sw->dev);
3495 ++}
3496 ++EXPORT_SYMBOL_GPL(usb_role_switch_unregister);
3497 ++
3498 ++static int __init usb_roles_init(void)
3499 ++{
3500 ++ role_class = class_create(THIS_MODULE, "usb_role");
3501 ++ return PTR_ERR_OR_ZERO(role_class);
3502 ++}
3503 ++subsys_initcall(usb_roles_init);
3504 ++
3505 ++static void __exit usb_roles_exit(void)
3506 ++{
3507 ++ class_destroy(role_class);
3508 ++}
3509 ++module_exit(usb_roles_exit);
3510 ++
3511 ++MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@×××××××××××.com>");
3512 ++MODULE_AUTHOR("Hans de Goede <hdegoede@××××××.com>");
3513 ++MODULE_LICENSE("GPL v2");
3514 ++MODULE_DESCRIPTION("USB Role Class");
3515 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
3516 +index 1ce27f3ff7a7..aef15497ff31 100644
3517 +--- a/drivers/usb/serial/option.c
3518 ++++ b/drivers/usb/serial/option.c
3519 +@@ -1955,6 +1955,10 @@ static const struct usb_device_id option_ids[] = {
3520 + { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x1b) },
3521 + { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 */
3522 + .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
3523 ++ { USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */
3524 ++ .driver_info = RSVD(4) | RSVD(5) },
3525 ++ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */
3526 ++ .driver_info = RSVD(6) },
3527 + { } /* Terminating entry */
3528 + };
3529 + MODULE_DEVICE_TABLE(usb, option_ids);
3530 +diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
3531 +index a4e0d13fc121..98e7a5df0f6d 100644
3532 +--- a/drivers/usb/serial/pl2303.c
3533 ++++ b/drivers/usb/serial/pl2303.c
3534 +@@ -91,9 +91,14 @@ static const struct usb_device_id id_table[] = {
3535 + { USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) },
3536 + { USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) },
3537 + { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
3538 ++ { USB_DEVICE(HP_VENDOR_ID, HP_LD220TA_PRODUCT_ID) },
3539 + { USB_DEVICE(HP_VENDOR_ID, HP_LD960_PRODUCT_ID) },
3540 ++ { USB_DEVICE(HP_VENDOR_ID, HP_LD960TA_PRODUCT_ID) },
3541 + { USB_DEVICE(HP_VENDOR_ID, HP_LCM220_PRODUCT_ID) },
3542 + { USB_DEVICE(HP_VENDOR_ID, HP_LCM960_PRODUCT_ID) },
3543 ++ { USB_DEVICE(HP_VENDOR_ID, HP_LM920_PRODUCT_ID) },
3544 ++ { USB_DEVICE(HP_VENDOR_ID, HP_LM940_PRODUCT_ID) },
3545 ++ { USB_DEVICE(HP_VENDOR_ID, HP_TD620_PRODUCT_ID) },
3546 + { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },
3547 + { USB_DEVICE(ZEAGLE_VENDOR_ID, ZEAGLE_N2ITION3_PRODUCT_ID) },
3548 + { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
3549 +diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
3550 +index 26965cc23c17..4e2554d55362 100644
3551 +--- a/drivers/usb/serial/pl2303.h
3552 ++++ b/drivers/usb/serial/pl2303.h
3553 +@@ -119,10 +119,15 @@
3554 +
3555 + /* Hewlett-Packard POS Pole Displays */
3556 + #define HP_VENDOR_ID 0x03f0
3557 ++#define HP_LM920_PRODUCT_ID 0x026b
3558 ++#define HP_TD620_PRODUCT_ID 0x0956
3559 + #define HP_LD960_PRODUCT_ID 0x0b39
3560 + #define HP_LCM220_PRODUCT_ID 0x3139
3561 + #define HP_LCM960_PRODUCT_ID 0x3239
3562 + #define HP_LD220_PRODUCT_ID 0x3524
3563 ++#define HP_LD220TA_PRODUCT_ID 0x4349
3564 ++#define HP_LD960TA_PRODUCT_ID 0x4439
3565 ++#define HP_LM940_PRODUCT_ID 0x5039
3566 +
3567 + /* Cressi Edy (diving computer) PC interface */
3568 + #define CRESSI_VENDOR_ID 0x04b8
3569 +diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
3570 +index 97d91e55b70a..a0e230b31a88 100644
3571 +--- a/fs/btrfs/btrfs_inode.h
3572 ++++ b/fs/btrfs/btrfs_inode.h
3573 +@@ -146,6 +146,12 @@ struct btrfs_inode {
3574 + */
3575 + u64 last_unlink_trans;
3576 +
3577 ++ /*
3578 ++ * Track the transaction id of the last transaction used to create a
3579 ++ * hard link for the inode. This is used by the log tree (fsync).
3580 ++ */
3581 ++ u64 last_link_trans;
3582 ++
3583 + /*
3584 + * Number of bytes outstanding that are going to need csums. This is
3585 + * used in ENOSPC accounting.
3586 +diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
3587 +index 539901fb5165..99e7645ad94e 100644
3588 +--- a/fs/btrfs/ctree.c
3589 ++++ b/fs/btrfs/ctree.c
3590 +@@ -2584,14 +2584,27 @@ static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
3591 + root_lock = BTRFS_READ_LOCK;
3592 +
3593 + if (p->search_commit_root) {
3594 +- /* The commit roots are read only so we always do read locks */
3595 +- if (p->need_commit_sem)
3596 ++ /*
3597 ++ * The commit roots are read only so we always do read locks,
3598 ++ * and we always must hold the commit_root_sem when doing
3599 ++ * searches on them, the only exception is send where we don't
3600 ++ * want to block transaction commits for a long time, so
3601 ++ * we need to clone the commit root in order to avoid races
3602 ++ * with transaction commits that create a snapshot of one of
3603 ++ * the roots used by a send operation.
3604 ++ */
3605 ++ if (p->need_commit_sem) {
3606 + down_read(&fs_info->commit_root_sem);
3607 +- b = root->commit_root;
3608 +- extent_buffer_get(b);
3609 +- level = btrfs_header_level(b);
3610 +- if (p->need_commit_sem)
3611 ++ b = btrfs_clone_extent_buffer(root->commit_root);
3612 + up_read(&fs_info->commit_root_sem);
3613 ++ if (!b)
3614 ++ return ERR_PTR(-ENOMEM);
3615 ++
3616 ++ } else {
3617 ++ b = root->commit_root;
3618 ++ extent_buffer_get(b);
3619 ++ }
3620 ++ level = btrfs_header_level(b);
3621 + /*
3622 + * Ensure that all callers have set skip_locking when
3623 + * p->search_commit_root = 1.
3624 +@@ -2717,6 +2730,10 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3625 + again:
3626 + prev_cmp = -1;
3627 + b = btrfs_search_slot_get_root(root, p, write_lock_level);
3628 ++ if (IS_ERR(b)) {
3629 ++ ret = PTR_ERR(b);
3630 ++ goto done;
3631 ++ }
3632 +
3633 + while (b) {
3634 + level = btrfs_header_level(b);
3635 +diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
3636 +index 2aa48aecc52b..329d3afcf304 100644
3637 +--- a/fs/btrfs/dev-replace.c
3638 ++++ b/fs/btrfs/dev-replace.c
3639 +@@ -884,6 +884,8 @@ int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info)
3640 + "cannot continue dev_replace, tgtdev is missing");
3641 + btrfs_info(fs_info,
3642 + "you may cancel the operation after 'mount -o degraded'");
3643 ++ dev_replace->replace_state =
3644 ++ BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED;
3645 + btrfs_dev_replace_write_unlock(dev_replace);
3646 + return 0;
3647 + }
3648 +@@ -895,6 +897,10 @@ int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info)
3649 + * dev-replace to start anyway.
3650 + */
3651 + if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
3652 ++ btrfs_dev_replace_write_lock(dev_replace);
3653 ++ dev_replace->replace_state =
3654 ++ BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED;
3655 ++ btrfs_dev_replace_write_unlock(dev_replace);
3656 + btrfs_info(fs_info,
3657 + "cannot resume dev-replace, other exclusive operation running");
3658 + return 0;
3659 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
3660 +index a1febf155747..fe1fef3d7eed 100644
3661 +--- a/fs/btrfs/extent-tree.c
3662 ++++ b/fs/btrfs/extent-tree.c
3663 +@@ -8944,6 +8944,10 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
3664 + goto out_free;
3665 + }
3666 +
3667 ++ err = btrfs_run_delayed_items(trans);
3668 ++ if (err)
3669 ++ goto out_end_trans;
3670 ++
3671 + if (block_rsv)
3672 + trans->block_rsv = block_rsv;
3673 +
3674 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
3675 +index 9ea4c6f0352f..423281c19fad 100644
3676 +--- a/fs/btrfs/inode.c
3677 ++++ b/fs/btrfs/inode.c
3678 +@@ -1372,7 +1372,8 @@ next_slot:
3679 + * Do the same check as in btrfs_cross_ref_exist but
3680 + * without the unnecessary search.
3681 + */
3682 +- if (btrfs_file_extent_generation(leaf, fi) <=
3683 ++ if (!nolock &&
3684 ++ btrfs_file_extent_generation(leaf, fi) <=
3685 + btrfs_root_last_snapshot(&root->root_item))
3686 + goto out_check;
3687 + if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
3688 +@@ -3686,6 +3687,21 @@ cache_index:
3689 + * inode is not a directory, logging its parent unnecessarily.
3690 + */
3691 + BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans;
3692 ++ /*
3693 ++ * Similar reasoning for last_link_trans, needs to be set otherwise
3694 ++ * for a case like the following:
3695 ++ *
3696 ++ * mkdir A
3697 ++ * touch foo
3698 ++ * ln foo A/bar
3699 ++ * echo 2 > /proc/sys/vm/drop_caches
3700 ++ * fsync foo
3701 ++ * <power failure>
3702 ++ *
3703 ++ * Would result in link bar and directory A not existing after the power
3704 ++ * failure.
3705 ++ */
3706 ++ BTRFS_I(inode)->last_link_trans = BTRFS_I(inode)->last_trans;
3707 +
3708 + path->slots[0]++;
3709 + if (inode->i_nlink != 1 ||
3710 +@@ -6625,6 +6641,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
3711 + if (err)
3712 + goto fail;
3713 + }
3714 ++ BTRFS_I(inode)->last_link_trans = trans->transid;
3715 + d_instantiate(dentry, inode);
3716 + ret = btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent,
3717 + true, NULL);
3718 +@@ -9157,6 +9174,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
3719 + ei->index_cnt = (u64)-1;
3720 + ei->dir_index = 0;
3721 + ei->last_unlink_trans = 0;
3722 ++ ei->last_link_trans = 0;
3723 + ei->last_log_commit = 0;
3724 +
3725 + spin_lock_init(&ei->lock);
3726 +diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
3727 +index 902819d3cf41..bbd1b36f4918 100644
3728 +--- a/fs/btrfs/scrub.c
3729 ++++ b/fs/btrfs/scrub.c
3730 +@@ -322,6 +322,7 @@ static struct full_stripe_lock *insert_full_stripe_lock(
3731 + struct rb_node *parent = NULL;
3732 + struct full_stripe_lock *entry;
3733 + struct full_stripe_lock *ret;
3734 ++ unsigned int nofs_flag;
3735 +
3736 + lockdep_assert_held(&locks_root->lock);
3737 +
3738 +@@ -339,8 +340,17 @@ static struct full_stripe_lock *insert_full_stripe_lock(
3739 + }
3740 + }
3741 +
3742 +- /* Insert new lock */
3743 ++ /*
3744 ++ * Insert new lock.
3745 ++ *
3746 ++ * We must use GFP_NOFS because the scrub task might be waiting for a
3747 ++ * worker task executing this function and in turn a transaction commit
3748 ++ * might be waiting the scrub task to pause (which needs to wait for all
3749 ++ * the worker tasks to complete before pausing).
3750 ++ */
3751 ++ nofs_flag = memalloc_nofs_save();
3752 + ret = kmalloc(sizeof(*ret), GFP_KERNEL);
3753 ++ memalloc_nofs_restore(nofs_flag);
3754 + if (!ret)
3755 + return ERR_PTR(-ENOMEM);
3756 + ret->logical = fstripe_logical;
3757 +@@ -1620,8 +1630,19 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
3758 + mutex_lock(&sctx->wr_lock);
3759 + again:
3760 + if (!sctx->wr_curr_bio) {
3761 ++ unsigned int nofs_flag;
3762 ++
3763 ++ /*
3764 ++ * We must use GFP_NOFS because the scrub task might be waiting
3765 ++ * for a worker task executing this function and in turn a
3766 ++ * transaction commit might be waiting the scrub task to pause
3767 ++ * (which needs to wait for all the worker tasks to complete
3768 ++ * before pausing).
3769 ++ */
3770 ++ nofs_flag = memalloc_nofs_save();
3771 + sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio),
3772 + GFP_KERNEL);
3773 ++ memalloc_nofs_restore(nofs_flag);
3774 + if (!sctx->wr_curr_bio) {
3775 + mutex_unlock(&sctx->wr_lock);
3776 + return -ENOMEM;
3777 +@@ -3772,6 +3793,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
3778 + struct scrub_ctx *sctx;
3779 + int ret;
3780 + struct btrfs_device *dev;
3781 ++ unsigned int nofs_flag;
3782 +
3783 + if (btrfs_fs_closing(fs_info))
3784 + return -EINVAL;
3785 +@@ -3875,6 +3897,16 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
3786 + atomic_inc(&fs_info->scrubs_running);
3787 + mutex_unlock(&fs_info->scrub_lock);
3788 +
3789 ++ /*
3790 ++ * In order to avoid deadlock with reclaim when there is a transaction
3791 ++ * trying to pause scrub, make sure we use GFP_NOFS for all the
3792 ++ * allocations done at btrfs_scrub_pages() and scrub_pages_for_parity()
3793 ++ * invoked by our callees. The pausing request is done when the
3794 ++ * transaction commit starts, and it blocks the transaction until scrub
3795 ++ * is paused (done at specific points at scrub_stripe() or right above
3796 ++ * before incrementing fs_info->scrubs_running).
3797 ++ */
3798 ++ nofs_flag = memalloc_nofs_save();
3799 + if (!is_dev_replace) {
3800 + /*
3801 + * by holding device list mutex, we can
3802 +@@ -3887,6 +3919,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
3803 +
3804 + if (!ret)
3805 + ret = scrub_enumerate_chunks(sctx, dev, start, end);
3806 ++ memalloc_nofs_restore(nofs_flag);
3807 +
3808 + wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
3809 + atomic_dec(&fs_info->scrubs_running);
3810 +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
3811 +index a5ce99a6c936..15d2914f0a67 100644
3812 +--- a/fs/btrfs/tree-log.c
3813 ++++ b/fs/btrfs/tree-log.c
3814 +@@ -5778,6 +5778,22 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
3815 + goto end_trans;
3816 + }
3817 +
3818 ++ /*
3819 ++ * If a new hard link was added to the inode in the current transaction
3820 ++ * and its link count is now greater than 1, we need to fallback to a
3821 ++ * transaction commit, otherwise we can end up not logging all its new
3822 ++ * parents for all the hard links. Here just from the dentry used to
3823 ++ * fsync, we can not visit the ancestor inodes for all the other hard
3824 ++ * links to figure out if any is new, so we fallback to a transaction
3825 ++ * commit (instead of adding a lot of complexity of scanning a btree,
3826 ++ * since this scenario is not a common use case).
3827 ++ */
3828 ++ if (inode->vfs_inode.i_nlink > 1 &&
3829 ++ inode->last_link_trans > last_committed) {
3830 ++ ret = -EMLINK;
3831 ++ goto end_trans;
3832 ++ }
3833 ++
3834 + while (1) {
3835 + if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
3836 + break;
3837 +diff --git a/fs/cifs/file.c b/fs/cifs/file.c
3838 +index c9bc56b1baac..c23bf9da93d2 100644
3839 +--- a/fs/cifs/file.c
3840 ++++ b/fs/cifs/file.c
3841 +@@ -2617,11 +2617,13 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
3842 + if (rc)
3843 + break;
3844 +
3845 ++ cur_len = min_t(const size_t, len, wsize);
3846 ++
3847 + if (ctx->direct_io) {
3848 + ssize_t result;
3849 +
3850 + result = iov_iter_get_pages_alloc(
3851 +- from, &pagevec, wsize, &start);
3852 ++ from, &pagevec, cur_len, &start);
3853 + if (result < 0) {
3854 + cifs_dbg(VFS,
3855 + "direct_writev couldn't get user pages "
3856 +@@ -2630,6 +2632,9 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
3857 + result, from->type,
3858 + from->iov_offset, from->count);
3859 + dump_stack();
3860 ++
3861 ++ rc = result;
3862 ++ add_credits_and_wake_if(server, credits, 0);
3863 + break;
3864 + }
3865 + cur_len = (size_t)result;
3866 +@@ -3313,13 +3318,16 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
3867 + cur_len, &start);
3868 + if (result < 0) {
3869 + cifs_dbg(VFS,
3870 +- "couldn't get user pages (cur_len=%zd)"
3871 ++ "couldn't get user pages (rc=%zd)"
3872 + " iter type %d"
3873 + " iov_offset %zd count %zd\n",
3874 + result, direct_iov.type,
3875 + direct_iov.iov_offset,
3876 + direct_iov.count);
3877 + dump_stack();
3878 ++
3879 ++ rc = result;
3880 ++ add_credits_and_wake_if(server, credits, 0);
3881 + break;
3882 + }
3883 + cur_len = (size_t)result;
3884 +diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
3885 +index d47b7f5dfa6c..924269cec135 100644
3886 +--- a/fs/cifs/smb2maperror.c
3887 ++++ b/fs/cifs/smb2maperror.c
3888 +@@ -379,8 +379,8 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
3889 + {STATUS_NONEXISTENT_EA_ENTRY, -EIO, "STATUS_NONEXISTENT_EA_ENTRY"},
3890 + {STATUS_NO_EAS_ON_FILE, -ENODATA, "STATUS_NO_EAS_ON_FILE"},
3891 + {STATUS_EA_CORRUPT_ERROR, -EIO, "STATUS_EA_CORRUPT_ERROR"},
3892 +- {STATUS_FILE_LOCK_CONFLICT, -EIO, "STATUS_FILE_LOCK_CONFLICT"},
3893 +- {STATUS_LOCK_NOT_GRANTED, -EIO, "STATUS_LOCK_NOT_GRANTED"},
3894 ++ {STATUS_FILE_LOCK_CONFLICT, -EACCES, "STATUS_FILE_LOCK_CONFLICT"},
3895 ++ {STATUS_LOCK_NOT_GRANTED, -EACCES, "STATUS_LOCK_NOT_GRANTED"},
3896 + {STATUS_DELETE_PENDING, -ENOENT, "STATUS_DELETE_PENDING"},
3897 + {STATUS_CTL_FILE_NOT_SUPPORTED, -ENOSYS,
3898 + "STATUS_CTL_FILE_NOT_SUPPORTED"},
3899 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
3900 +index e25c7aade98a..391b40e91910 100644
3901 +--- a/fs/cifs/smb2ops.c
3902 ++++ b/fs/cifs/smb2ops.c
3903 +@@ -3384,8 +3384,10 @@ smb3_receive_transform(struct TCP_Server_Info *server,
3904 + }
3905 +
3906 + /* TODO: add support for compounds containing READ. */
3907 +- if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server))
3908 ++ if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server)) {
3909 ++ *num_mids = 1;
3910 + return receive_encrypted_read(server, &mids[0]);
3911 ++ }
3912 +
3913 + return receive_encrypted_standard(server, mids, bufs, num_mids);
3914 + }
3915 +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
3916 +index 3f89d0ab08fc..185a05d3257e 100644
3917 +--- a/fs/ext4/ext4.h
3918 ++++ b/fs/ext4/ext4.h
3919 +@@ -2454,8 +2454,19 @@ int do_journal_get_write_access(handle_t *handle,
3920 + #define FALL_BACK_TO_NONDELALLOC 1
3921 + #define CONVERT_INLINE_DATA 2
3922 +
3923 +-extern struct inode *ext4_iget(struct super_block *, unsigned long);
3924 +-extern struct inode *ext4_iget_normal(struct super_block *, unsigned long);
3925 ++typedef enum {
3926 ++ EXT4_IGET_NORMAL = 0,
3927 ++ EXT4_IGET_SPECIAL = 0x0001, /* OK to iget a system inode */
3928 ++ EXT4_IGET_HANDLE = 0x0002 /* Inode # is from a handle */
3929 ++} ext4_iget_flags;
3930 ++
3931 ++extern struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
3932 ++ ext4_iget_flags flags, const char *function,
3933 ++ unsigned int line);
3934 ++
3935 ++#define ext4_iget(sb, ino, flags) \
3936 ++ __ext4_iget((sb), (ino), (flags), __func__, __LINE__)
3937 ++
3938 + extern int ext4_write_inode(struct inode *, struct writeback_control *);
3939 + extern int ext4_setattr(struct dentry *, struct iattr *);
3940 + extern int ext4_getattr(const struct path *, struct kstat *, u32, unsigned int);
3941 +@@ -2538,6 +2549,8 @@ extern int ext4_group_extend(struct super_block *sb,
3942 + extern int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count);
3943 +
3944 + /* super.c */
3945 ++extern struct buffer_head *ext4_sb_bread(struct super_block *sb,
3946 ++ sector_t block, int op_flags);
3947 + extern int ext4_seq_options_show(struct seq_file *seq, void *offset);
3948 + extern int ext4_calculate_overhead(struct super_block *sb);
3949 + extern void ext4_superblock_csum_set(struct super_block *sb);
3950 +diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
3951 +index 014f6a698cb7..7ff14a1adba3 100644
3952 +--- a/fs/ext4/ialloc.c
3953 ++++ b/fs/ext4/ialloc.c
3954 +@@ -1225,7 +1225,7 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
3955 + if (!ext4_test_bit(bit, bitmap_bh->b_data))
3956 + goto bad_orphan;
3957 +
3958 +- inode = ext4_iget(sb, ino);
3959 ++ inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL);
3960 + if (IS_ERR(inode)) {
3961 + err = PTR_ERR(inode);
3962 + ext4_error(sb, "couldn't read orphan inode %lu (err %d)",
3963 +diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
3964 +index 9c4bac18cc6c..27373d88b5f0 100644
3965 +--- a/fs/ext4/inline.c
3966 ++++ b/fs/ext4/inline.c
3967 +@@ -705,8 +705,11 @@ int ext4_try_to_write_inline_data(struct address_space *mapping,
3968 +
3969 + if (!PageUptodate(page)) {
3970 + ret = ext4_read_inline_page(inode, page);
3971 +- if (ret < 0)
3972 ++ if (ret < 0) {
3973 ++ unlock_page(page);
3974 ++ put_page(page);
3975 + goto out_up_read;
3976 ++ }
3977 + }
3978 +
3979 + ret = 1;
3980 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
3981 +index 22a9d8159720..9affabd07682 100644
3982 +--- a/fs/ext4/inode.c
3983 ++++ b/fs/ext4/inode.c
3984 +@@ -4817,7 +4817,9 @@ static inline u64 ext4_inode_peek_iversion(const struct inode *inode)
3985 + return inode_peek_iversion(inode);
3986 + }
3987 +
3988 +-struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
3989 ++struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
3990 ++ ext4_iget_flags flags, const char *function,
3991 ++ unsigned int line)
3992 + {
3993 + struct ext4_iloc iloc;
3994 + struct ext4_inode *raw_inode;
3995 +@@ -4831,6 +4833,18 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
3996 + gid_t i_gid;
3997 + projid_t i_projid;
3998 +
3999 ++ if (((flags & EXT4_IGET_NORMAL) &&
4000 ++ (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)) ||
4001 ++ (ino < EXT4_ROOT_INO) ||
4002 ++ (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))) {
4003 ++ if (flags & EXT4_IGET_HANDLE)
4004 ++ return ERR_PTR(-ESTALE);
4005 ++ __ext4_error(sb, function, line,
4006 ++ "inode #%lu: comm %s: iget: illegal inode #",
4007 ++ ino, current->comm);
4008 ++ return ERR_PTR(-EFSCORRUPTED);
4009 ++ }
4010 ++
4011 + inode = iget_locked(sb, ino);
4012 + if (!inode)
4013 + return ERR_PTR(-ENOMEM);
4014 +@@ -4846,18 +4860,26 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4015 + raw_inode = ext4_raw_inode(&iloc);
4016 +
4017 + if ((ino == EXT4_ROOT_INO) && (raw_inode->i_links_count == 0)) {
4018 +- EXT4_ERROR_INODE(inode, "root inode unallocated");
4019 ++ ext4_error_inode(inode, function, line, 0,
4020 ++ "iget: root inode unallocated");
4021 + ret = -EFSCORRUPTED;
4022 + goto bad_inode;
4023 + }
4024 +
4025 ++ if ((flags & EXT4_IGET_HANDLE) &&
4026 ++ (raw_inode->i_links_count == 0) && (raw_inode->i_mode == 0)) {
4027 ++ ret = -ESTALE;
4028 ++ goto bad_inode;
4029 ++ }
4030 ++
4031 + if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4032 + ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
4033 + if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
4034 + EXT4_INODE_SIZE(inode->i_sb) ||
4035 + (ei->i_extra_isize & 3)) {
4036 +- EXT4_ERROR_INODE(inode,
4037 +- "bad extra_isize %u (inode size %u)",
4038 ++ ext4_error_inode(inode, function, line, 0,
4039 ++ "iget: bad extra_isize %u "
4040 ++ "(inode size %u)",
4041 + ei->i_extra_isize,
4042 + EXT4_INODE_SIZE(inode->i_sb));
4043 + ret = -EFSCORRUPTED;
4044 +@@ -4879,7 +4901,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4045 + }
4046 +
4047 + if (!ext4_inode_csum_verify(inode, raw_inode, ei)) {
4048 +- EXT4_ERROR_INODE(inode, "checksum invalid");
4049 ++ ext4_error_inode(inode, function, line, 0,
4050 ++ "iget: checksum invalid");
4051 + ret = -EFSBADCRC;
4052 + goto bad_inode;
4053 + }
4054 +@@ -4936,7 +4959,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4055 + ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
4056 + inode->i_size = ext4_isize(sb, raw_inode);
4057 + if ((size = i_size_read(inode)) < 0) {
4058 +- EXT4_ERROR_INODE(inode, "bad i_size value: %lld", size);
4059 ++ ext4_error_inode(inode, function, line, 0,
4060 ++ "iget: bad i_size value: %lld", size);
4061 + ret = -EFSCORRUPTED;
4062 + goto bad_inode;
4063 + }
4064 +@@ -5012,7 +5036,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4065 + ret = 0;
4066 + if (ei->i_file_acl &&
4067 + !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
4068 +- EXT4_ERROR_INODE(inode, "bad extended attribute block %llu",
4069 ++ ext4_error_inode(inode, function, line, 0,
4070 ++ "iget: bad extended attribute block %llu",
4071 + ei->i_file_acl);
4072 + ret = -EFSCORRUPTED;
4073 + goto bad_inode;
4074 +@@ -5040,8 +5065,9 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4075 + } else if (S_ISLNK(inode->i_mode)) {
4076 + /* VFS does not allow setting these so must be corruption */
4077 + if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) {
4078 +- EXT4_ERROR_INODE(inode,
4079 +- "immutable or append flags not allowed on symlinks");
4080 ++ ext4_error_inode(inode, function, line, 0,
4081 ++ "iget: immutable or append flags "
4082 ++ "not allowed on symlinks");
4083 + ret = -EFSCORRUPTED;
4084 + goto bad_inode;
4085 + }
4086 +@@ -5071,7 +5097,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4087 + make_bad_inode(inode);
4088 + } else {
4089 + ret = -EFSCORRUPTED;
4090 +- EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode);
4091 ++ ext4_error_inode(inode, function, line, 0,
4092 ++ "iget: bogus i_mode (%o)", inode->i_mode);
4093 + goto bad_inode;
4094 + }
4095 + brelse(iloc.bh);
4096 +@@ -5085,13 +5112,6 @@ bad_inode:
4097 + return ERR_PTR(ret);
4098 + }
4099 +
4100 +-struct inode *ext4_iget_normal(struct super_block *sb, unsigned long ino)
4101 +-{
4102 +- if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
4103 +- return ERR_PTR(-EFSCORRUPTED);
4104 +- return ext4_iget(sb, ino);
4105 +-}
4106 +-
4107 + static int ext4_inode_blocks_set(handle_t *handle,
4108 + struct ext4_inode *raw_inode,
4109 + struct ext4_inode_info *ei)
4110 +@@ -5380,9 +5400,13 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
4111 + {
4112 + int err;
4113 +
4114 +- if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
4115 ++ if (WARN_ON_ONCE(current->flags & PF_MEMALLOC) ||
4116 ++ sb_rdonly(inode->i_sb))
4117 + return 0;
4118 +
4119 ++ if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
4120 ++ return -EIO;
4121 ++
4122 + if (EXT4_SB(inode->i_sb)->s_journal) {
4123 + if (ext4_journal_current_handle()) {
4124 + jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
4125 +@@ -5398,7 +5422,8 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
4126 + if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync)
4127 + return 0;
4128 +
4129 +- err = ext4_force_commit(inode->i_sb);
4130 ++ err = jbd2_complete_transaction(EXT4_SB(inode->i_sb)->s_journal,
4131 ++ EXT4_I(inode)->i_sync_tid);
4132 + } else {
4133 + struct ext4_iloc iloc;
4134 +
4135 +diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
4136 +index 0edee31913d1..d37dafa1d133 100644
4137 +--- a/fs/ext4/ioctl.c
4138 ++++ b/fs/ext4/ioctl.c
4139 +@@ -125,7 +125,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
4140 + !inode_owner_or_capable(inode) || !capable(CAP_SYS_ADMIN))
4141 + return -EPERM;
4142 +
4143 +- inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO);
4144 ++ inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO, EXT4_IGET_SPECIAL);
4145 + if (IS_ERR(inode_bl))
4146 + return PTR_ERR(inode_bl);
4147 + ei_bl = EXT4_I(inode_bl);
4148 +diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
4149 +index 61a9d1927817..a98bfca9c463 100644
4150 +--- a/fs/ext4/migrate.c
4151 ++++ b/fs/ext4/migrate.c
4152 +@@ -116,9 +116,9 @@ static int update_ind_extent_range(handle_t *handle, struct inode *inode,
4153 + int i, retval = 0;
4154 + unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
4155 +
4156 +- bh = sb_bread(inode->i_sb, pblock);
4157 +- if (!bh)
4158 +- return -EIO;
4159 ++ bh = ext4_sb_bread(inode->i_sb, pblock, 0);
4160 ++ if (IS_ERR(bh))
4161 ++ return PTR_ERR(bh);
4162 +
4163 + i_data = (__le32 *)bh->b_data;
4164 + for (i = 0; i < max_entries; i++) {
4165 +@@ -145,9 +145,9 @@ static int update_dind_extent_range(handle_t *handle, struct inode *inode,
4166 + int i, retval = 0;
4167 + unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
4168 +
4169 +- bh = sb_bread(inode->i_sb, pblock);
4170 +- if (!bh)
4171 +- return -EIO;
4172 ++ bh = ext4_sb_bread(inode->i_sb, pblock, 0);
4173 ++ if (IS_ERR(bh))
4174 ++ return PTR_ERR(bh);
4175 +
4176 + i_data = (__le32 *)bh->b_data;
4177 + for (i = 0; i < max_entries; i++) {
4178 +@@ -175,9 +175,9 @@ static int update_tind_extent_range(handle_t *handle, struct inode *inode,
4179 + int i, retval = 0;
4180 + unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
4181 +
4182 +- bh = sb_bread(inode->i_sb, pblock);
4183 +- if (!bh)
4184 +- return -EIO;
4185 ++ bh = ext4_sb_bread(inode->i_sb, pblock, 0);
4186 ++ if (IS_ERR(bh))
4187 ++ return PTR_ERR(bh);
4188 +
4189 + i_data = (__le32 *)bh->b_data;
4190 + for (i = 0; i < max_entries; i++) {
4191 +@@ -224,9 +224,9 @@ static int free_dind_blocks(handle_t *handle,
4192 + struct buffer_head *bh;
4193 + unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
4194 +
4195 +- bh = sb_bread(inode->i_sb, le32_to_cpu(i_data));
4196 +- if (!bh)
4197 +- return -EIO;
4198 ++ bh = ext4_sb_bread(inode->i_sb, le32_to_cpu(i_data), 0);
4199 ++ if (IS_ERR(bh))
4200 ++ return PTR_ERR(bh);
4201 +
4202 + tmp_idata = (__le32 *)bh->b_data;
4203 + for (i = 0; i < max_entries; i++) {
4204 +@@ -254,9 +254,9 @@ static int free_tind_blocks(handle_t *handle,
4205 + struct buffer_head *bh;
4206 + unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
4207 +
4208 +- bh = sb_bread(inode->i_sb, le32_to_cpu(i_data));
4209 +- if (!bh)
4210 +- return -EIO;
4211 ++ bh = ext4_sb_bread(inode->i_sb, le32_to_cpu(i_data), 0);
4212 ++ if (IS_ERR(bh))
4213 ++ return PTR_ERR(bh);
4214 +
4215 + tmp_idata = (__le32 *)bh->b_data;
4216 + for (i = 0; i < max_entries; i++) {
4217 +@@ -382,9 +382,9 @@ static int free_ext_idx(handle_t *handle, struct inode *inode,
4218 + struct ext4_extent_header *eh;
4219 +
4220 + block = ext4_idx_pblock(ix);
4221 +- bh = sb_bread(inode->i_sb, block);
4222 +- if (!bh)
4223 +- return -EIO;
4224 ++ bh = ext4_sb_bread(inode->i_sb, block, 0);
4225 ++ if (IS_ERR(bh))
4226 ++ return PTR_ERR(bh);
4227 +
4228 + eh = (struct ext4_extent_header *)bh->b_data;
4229 + if (eh->eh_depth != 0) {
4230 +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
4231 +index 437f71fe83ae..2b928eb07fa2 100644
4232 +--- a/fs/ext4/namei.c
4233 ++++ b/fs/ext4/namei.c
4234 +@@ -1571,7 +1571,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
4235 + dentry);
4236 + return ERR_PTR(-EFSCORRUPTED);
4237 + }
4238 +- inode = ext4_iget_normal(dir->i_sb, ino);
4239 ++ inode = ext4_iget(dir->i_sb, ino, EXT4_IGET_NORMAL);
4240 + if (inode == ERR_PTR(-ESTALE)) {
4241 + EXT4_ERROR_INODE(dir,
4242 + "deleted inode referenced: %u",
4243 +@@ -1613,7 +1613,7 @@ struct dentry *ext4_get_parent(struct dentry *child)
4244 + return ERR_PTR(-EFSCORRUPTED);
4245 + }
4246 +
4247 +- return d_obtain_alias(ext4_iget_normal(child->d_sb, ino));
4248 ++ return d_obtain_alias(ext4_iget(child->d_sb, ino, EXT4_IGET_NORMAL));
4249 + }
4250 +
4251 + /*
4252 +diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
4253 +index a5efee34415f..48421de803b7 100644
4254 +--- a/fs/ext4/resize.c
4255 ++++ b/fs/ext4/resize.c
4256 +@@ -127,10 +127,12 @@ static int verify_group_input(struct super_block *sb,
4257 + else if (free_blocks_count < 0)
4258 + ext4_warning(sb, "Bad blocks count %u",
4259 + input->blocks_count);
4260 +- else if (!(bh = sb_bread(sb, end - 1)))
4261 ++ else if (IS_ERR(bh = ext4_sb_bread(sb, end - 1, 0))) {
4262 ++ err = PTR_ERR(bh);
4263 ++ bh = NULL;
4264 + ext4_warning(sb, "Cannot read last block (%llu)",
4265 + end - 1);
4266 +- else if (outside(input->block_bitmap, start, end))
4267 ++ } else if (outside(input->block_bitmap, start, end))
4268 + ext4_warning(sb, "Block bitmap not in group (block %llu)",
4269 + (unsigned long long)input->block_bitmap);
4270 + else if (outside(input->inode_bitmap, start, end))
4271 +@@ -781,11 +783,11 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
4272 + struct ext4_super_block *es = EXT4_SB(sb)->s_es;
4273 + unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
4274 + ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num;
4275 +- struct buffer_head **o_group_desc, **n_group_desc;
4276 +- struct buffer_head *dind;
4277 +- struct buffer_head *gdb_bh;
4278 ++ struct buffer_head **o_group_desc, **n_group_desc = NULL;
4279 ++ struct buffer_head *dind = NULL;
4280 ++ struct buffer_head *gdb_bh = NULL;
4281 + int gdbackups;
4282 +- struct ext4_iloc iloc;
4283 ++ struct ext4_iloc iloc = { .bh = NULL };
4284 + __le32 *data;
4285 + int err;
4286 +
4287 +@@ -794,21 +796,22 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
4288 + "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n",
4289 + gdb_num);
4290 +
4291 +- gdb_bh = sb_bread(sb, gdblock);
4292 +- if (!gdb_bh)
4293 +- return -EIO;
4294 ++ gdb_bh = ext4_sb_bread(sb, gdblock, 0);
4295 ++ if (IS_ERR(gdb_bh))
4296 ++ return PTR_ERR(gdb_bh);
4297 +
4298 + gdbackups = verify_reserved_gdb(sb, group, gdb_bh);
4299 + if (gdbackups < 0) {
4300 + err = gdbackups;
4301 +- goto exit_bh;
4302 ++ goto errout;
4303 + }
4304 +
4305 + data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
4306 +- dind = sb_bread(sb, le32_to_cpu(*data));
4307 +- if (!dind) {
4308 +- err = -EIO;
4309 +- goto exit_bh;
4310 ++ dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
4311 ++ if (IS_ERR(dind)) {
4312 ++ err = PTR_ERR(dind);
4313 ++ dind = NULL;
4314 ++ goto errout;
4315 + }
4316 +
4317 + data = (__le32 *)dind->b_data;
4318 +@@ -816,18 +819,18 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
4319 + ext4_warning(sb, "new group %u GDT block %llu not reserved",
4320 + group, gdblock);
4321 + err = -EINVAL;
4322 +- goto exit_dind;
4323 ++ goto errout;
4324 + }
4325 +
4326 + BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
4327 + err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
4328 + if (unlikely(err))
4329 +- goto exit_dind;
4330 ++ goto errout;
4331 +
4332 + BUFFER_TRACE(gdb_bh, "get_write_access");
4333 + err = ext4_journal_get_write_access(handle, gdb_bh);
4334 + if (unlikely(err))
4335 +- goto exit_dind;
4336 ++ goto errout;
4337 +
4338 + BUFFER_TRACE(dind, "get_write_access");
4339 + err = ext4_journal_get_write_access(handle, dind);
4340 +@@ -837,7 +840,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
4341 + /* ext4_reserve_inode_write() gets a reference on the iloc */
4342 + err = ext4_reserve_inode_write(handle, inode, &iloc);
4343 + if (unlikely(err))
4344 +- goto exit_dind;
4345 ++ goto errout;
4346 +
4347 + n_group_desc = ext4_kvmalloc((gdb_num + 1) *
4348 + sizeof(struct buffer_head *),
4349 +@@ -846,7 +849,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
4350 + err = -ENOMEM;
4351 + ext4_warning(sb, "not enough memory for %lu groups",
4352 + gdb_num + 1);
4353 +- goto exit_inode;
4354 ++ goto errout;
4355 + }
4356 +
4357 + /*
4358 +@@ -862,7 +865,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
4359 + err = ext4_handle_dirty_metadata(handle, NULL, dind);
4360 + if (unlikely(err)) {
4361 + ext4_std_error(sb, err);
4362 +- goto exit_inode;
4363 ++ goto errout;
4364 + }
4365 + inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >>
4366 + (9 - EXT4_SB(sb)->s_cluster_bits);
4367 +@@ -871,8 +874,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
4368 + err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
4369 + if (unlikely(err)) {
4370 + ext4_std_error(sb, err);
4371 +- iloc.bh = NULL;
4372 +- goto exit_inode;
4373 ++ goto errout;
4374 + }
4375 + brelse(dind);
4376 +
4377 +@@ -888,15 +890,11 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
4378 + err = ext4_handle_dirty_super(handle, sb);
4379 + if (err)
4380 + ext4_std_error(sb, err);
4381 +-
4382 + return err;
4383 +-
4384 +-exit_inode:
4385 ++errout:
4386 + kvfree(n_group_desc);
4387 + brelse(iloc.bh);
4388 +-exit_dind:
4389 + brelse(dind);
4390 +-exit_bh:
4391 + brelse(gdb_bh);
4392 +
4393 + ext4_debug("leaving with error %d\n", err);
4394 +@@ -916,9 +914,9 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
4395 +
4396 + gdblock = ext4_meta_bg_first_block_no(sb, group) +
4397 + ext4_bg_has_super(sb, group);
4398 +- gdb_bh = sb_bread(sb, gdblock);
4399 +- if (!gdb_bh)
4400 +- return -EIO;
4401 ++ gdb_bh = ext4_sb_bread(sb, gdblock, 0);
4402 ++ if (IS_ERR(gdb_bh))
4403 ++ return PTR_ERR(gdb_bh);
4404 + n_group_desc = ext4_kvmalloc((gdb_num + 1) *
4405 + sizeof(struct buffer_head *),
4406 + GFP_NOFS);
4407 +@@ -975,9 +973,10 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
4408 + return -ENOMEM;
4409 +
4410 + data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
4411 +- dind = sb_bread(sb, le32_to_cpu(*data));
4412 +- if (!dind) {
4413 +- err = -EIO;
4414 ++ dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
4415 ++ if (IS_ERR(dind)) {
4416 ++ err = PTR_ERR(dind);
4417 ++ dind = NULL;
4418 + goto exit_free;
4419 + }
4420 +
4421 +@@ -996,9 +995,10 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
4422 + err = -EINVAL;
4423 + goto exit_bh;
4424 + }
4425 +- primary[res] = sb_bread(sb, blk);
4426 +- if (!primary[res]) {
4427 +- err = -EIO;
4428 ++ primary[res] = ext4_sb_bread(sb, blk, 0);
4429 ++ if (IS_ERR(primary[res])) {
4430 ++ err = PTR_ERR(primary[res]);
4431 ++ primary[res] = NULL;
4432 + goto exit_bh;
4433 + }
4434 + gdbackups = verify_reserved_gdb(sb, group, primary[res]);
4435 +@@ -1631,13 +1631,13 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
4436 + }
4437 +
4438 + if (reserved_gdb || gdb_off == 0) {
4439 +- if (ext4_has_feature_resize_inode(sb) ||
4440 ++ if (!ext4_has_feature_resize_inode(sb) ||
4441 + !le16_to_cpu(es->s_reserved_gdt_blocks)) {
4442 + ext4_warning(sb,
4443 + "No reserved GDT blocks, can't resize");
4444 + return -EPERM;
4445 + }
4446 +- inode = ext4_iget(sb, EXT4_RESIZE_INO);
4447 ++ inode = ext4_iget(sb, EXT4_RESIZE_INO, EXT4_IGET_SPECIAL);
4448 + if (IS_ERR(inode)) {
4449 + ext4_warning(sb, "Error opening resize inode");
4450 + return PTR_ERR(inode);
4451 +@@ -1965,7 +1965,8 @@ retry:
4452 + }
4453 +
4454 + if (!resize_inode)
4455 +- resize_inode = ext4_iget(sb, EXT4_RESIZE_INO);
4456 ++ resize_inode = ext4_iget(sb, EXT4_RESIZE_INO,
4457 ++ EXT4_IGET_SPECIAL);
4458 + if (IS_ERR(resize_inode)) {
4459 + ext4_warning(sb, "Error opening resize inode");
4460 + return PTR_ERR(resize_inode);
4461 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
4462 +index 53ff6c2a26ed..6641a1b8a6a5 100644
4463 +--- a/fs/ext4/super.c
4464 ++++ b/fs/ext4/super.c
4465 +@@ -140,6 +140,29 @@ MODULE_ALIAS_FS("ext3");
4466 + MODULE_ALIAS("ext3");
4467 + #define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
4468 +
4469 ++/*
4470 ++ * This works like sb_bread() except it uses ERR_PTR for error
4471 ++ * returns. Currently with sb_bread it's impossible to distinguish
4472 ++ * between ENOMEM and EIO situations (since both result in a NULL
4473 ++ * return.
4474 ++ */
4475 ++struct buffer_head *
4476 ++ext4_sb_bread(struct super_block *sb, sector_t block, int op_flags)
4477 ++{
4478 ++ struct buffer_head *bh = sb_getblk(sb, block);
4479 ++
4480 ++ if (bh == NULL)
4481 ++ return ERR_PTR(-ENOMEM);
4482 ++ if (buffer_uptodate(bh))
4483 ++ return bh;
4484 ++ ll_rw_block(REQ_OP_READ, REQ_META | op_flags, 1, &bh);
4485 ++ wait_on_buffer(bh);
4486 ++ if (buffer_uptodate(bh))
4487 ++ return bh;
4488 ++ put_bh(bh);
4489 ++ return ERR_PTR(-EIO);
4490 ++}
4491 ++
4492 + static int ext4_verify_csum_type(struct super_block *sb,
4493 + struct ext4_super_block *es)
4494 + {
4495 +@@ -1151,20 +1174,11 @@ static struct inode *ext4_nfs_get_inode(struct super_block *sb,
4496 + {
4497 + struct inode *inode;
4498 +
4499 +- if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
4500 +- return ERR_PTR(-ESTALE);
4501 +- if (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
4502 +- return ERR_PTR(-ESTALE);
4503 +-
4504 +- /* iget isn't really right if the inode is currently unallocated!!
4505 +- *
4506 +- * ext4_read_inode will return a bad_inode if the inode had been
4507 +- * deleted, so we should be safe.
4508 +- *
4509 ++ /*
4510 + * Currently we don't know the generation for parent directory, so
4511 + * a generation of 0 means "accept any"
4512 + */
4513 +- inode = ext4_iget_normal(sb, ino);
4514 ++ inode = ext4_iget(sb, ino, EXT4_IGET_HANDLE);
4515 + if (IS_ERR(inode))
4516 + return ERR_CAST(inode);
4517 + if (generation && inode->i_generation != generation) {
4518 +@@ -1189,6 +1203,16 @@ static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid,
4519 + ext4_nfs_get_inode);
4520 + }
4521 +
4522 ++static int ext4_nfs_commit_metadata(struct inode *inode)
4523 ++{
4524 ++ struct writeback_control wbc = {
4525 ++ .sync_mode = WB_SYNC_ALL
4526 ++ };
4527 ++
4528 ++ trace_ext4_nfs_commit_metadata(inode);
4529 ++ return ext4_write_inode(inode, &wbc);
4530 ++}
4531 ++
4532 + /*
4533 + * Try to release metadata pages (indirect blocks, directories) which are
4534 + * mapped via the block device. Since these pages could have journal heads
4535 +@@ -1393,6 +1417,7 @@ static const struct export_operations ext4_export_ops = {
4536 + .fh_to_dentry = ext4_fh_to_dentry,
4537 + .fh_to_parent = ext4_fh_to_parent,
4538 + .get_parent = ext4_get_parent,
4539 ++ .commit_metadata = ext4_nfs_commit_metadata,
4540 + };
4541 +
4542 + enum {
4543 +@@ -4328,7 +4353,7 @@ no_journal:
4544 + * so we can safely mount the rest of the filesystem now.
4545 + */
4546 +
4547 +- root = ext4_iget(sb, EXT4_ROOT_INO);
4548 ++ root = ext4_iget(sb, EXT4_ROOT_INO, EXT4_IGET_SPECIAL);
4549 + if (IS_ERR(root)) {
4550 + ext4_msg(sb, KERN_ERR, "get root inode failed");
4551 + ret = PTR_ERR(root);
4552 +@@ -4598,7 +4623,7 @@ static struct inode *ext4_get_journal_inode(struct super_block *sb,
4553 + * happen if we iget() an unused inode, as the subsequent iput()
4554 + * will try to delete it.
4555 + */
4556 +- journal_inode = ext4_iget(sb, journal_inum);
4557 ++ journal_inode = ext4_iget(sb, journal_inum, EXT4_IGET_SPECIAL);
4558 + if (IS_ERR(journal_inode)) {
4559 + ext4_msg(sb, KERN_ERR, "no journal found");
4560 + return NULL;
4561 +@@ -5680,7 +5705,7 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
4562 + if (!qf_inums[type])
4563 + return -EPERM;
4564 +
4565 +- qf_inode = ext4_iget(sb, qf_inums[type]);
4566 ++ qf_inode = ext4_iget(sb, qf_inums[type], EXT4_IGET_SPECIAL);
4567 + if (IS_ERR(qf_inode)) {
4568 + ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]);
4569 + return PTR_ERR(qf_inode);
4570 +@@ -5690,9 +5715,9 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
4571 + qf_inode->i_flags |= S_NOQUOTA;
4572 + lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
4573 + err = dquot_enable(qf_inode, type, format_id, flags);
4574 +- iput(qf_inode);
4575 + if (err)
4576 + lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
4577 ++ iput(qf_inode);
4578 +
4579 + return err;
4580 + }
4581 +diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
4582 +index 7643d52c776c..86ed9c686249 100644
4583 +--- a/fs/ext4/xattr.c
4584 ++++ b/fs/ext4/xattr.c
4585 +@@ -384,7 +384,7 @@ static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino,
4586 + struct inode *inode;
4587 + int err;
4588 +
4589 +- inode = ext4_iget(parent->i_sb, ea_ino);
4590 ++ inode = ext4_iget(parent->i_sb, ea_ino, EXT4_IGET_NORMAL);
4591 + if (IS_ERR(inode)) {
4592 + err = PTR_ERR(inode);
4593 + ext4_error(parent->i_sb,
4594 +@@ -522,14 +522,13 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
4595 + ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
4596 + name_index, name, buffer, (long)buffer_size);
4597 +
4598 +- error = -ENODATA;
4599 + if (!EXT4_I(inode)->i_file_acl)
4600 +- goto cleanup;
4601 ++ return -ENODATA;
4602 + ea_idebug(inode, "reading block %llu",
4603 + (unsigned long long)EXT4_I(inode)->i_file_acl);
4604 +- bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
4605 +- if (!bh)
4606 +- goto cleanup;
4607 ++ bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
4608 ++ if (IS_ERR(bh))
4609 ++ return PTR_ERR(bh);
4610 + ea_bdebug(bh, "b_count=%d, refcount=%d",
4611 + atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
4612 + error = ext4_xattr_check_block(inode, bh);
4613 +@@ -696,26 +695,23 @@ ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
4614 + ea_idebug(inode, "buffer=%p, buffer_size=%ld",
4615 + buffer, (long)buffer_size);
4616 +
4617 +- error = 0;
4618 + if (!EXT4_I(inode)->i_file_acl)
4619 +- goto cleanup;
4620 ++ return 0;
4621 + ea_idebug(inode, "reading block %llu",
4622 + (unsigned long long)EXT4_I(inode)->i_file_acl);
4623 +- bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
4624 +- error = -EIO;
4625 +- if (!bh)
4626 +- goto cleanup;
4627 ++ bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
4628 ++ if (IS_ERR(bh))
4629 ++ return PTR_ERR(bh);
4630 + ea_bdebug(bh, "b_count=%d, refcount=%d",
4631 + atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
4632 + error = ext4_xattr_check_block(inode, bh);
4633 + if (error)
4634 + goto cleanup;
4635 + ext4_xattr_block_cache_insert(EA_BLOCK_CACHE(inode), bh);
4636 +- error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer, buffer_size);
4637 +-
4638 ++ error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer,
4639 ++ buffer_size);
4640 + cleanup:
4641 + brelse(bh);
4642 +-
4643 + return error;
4644 + }
4645 +
4646 +@@ -830,9 +826,9 @@ int ext4_get_inode_usage(struct inode *inode, qsize_t *usage)
4647 + }
4648 +
4649 + if (EXT4_I(inode)->i_file_acl) {
4650 +- bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
4651 +- if (!bh) {
4652 +- ret = -EIO;
4653 ++ bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
4654 ++ if (IS_ERR(bh)) {
4655 ++ ret = PTR_ERR(bh);
4656 + goto out;
4657 + }
4658 +
4659 +@@ -1486,7 +1482,8 @@ ext4_xattr_inode_cache_find(struct inode *inode, const void *value,
4660 + }
4661 +
4662 + while (ce) {
4663 +- ea_inode = ext4_iget(inode->i_sb, ce->e_value);
4664 ++ ea_inode = ext4_iget(inode->i_sb, ce->e_value,
4665 ++ EXT4_IGET_NORMAL);
4666 + if (!IS_ERR(ea_inode) &&
4667 + !is_bad_inode(ea_inode) &&
4668 + (EXT4_I(ea_inode)->i_flags & EXT4_EA_INODE_FL) &&
4669 +@@ -1821,16 +1818,15 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
4670 +
4671 + if (EXT4_I(inode)->i_file_acl) {
4672 + /* The inode already has an extended attribute block. */
4673 +- bs->bh = sb_bread(sb, EXT4_I(inode)->i_file_acl);
4674 +- error = -EIO;
4675 +- if (!bs->bh)
4676 +- goto cleanup;
4677 ++ bs->bh = ext4_sb_bread(sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
4678 ++ if (IS_ERR(bs->bh))
4679 ++ return PTR_ERR(bs->bh);
4680 + ea_bdebug(bs->bh, "b_count=%d, refcount=%d",
4681 + atomic_read(&(bs->bh->b_count)),
4682 + le32_to_cpu(BHDR(bs->bh)->h_refcount));
4683 + error = ext4_xattr_check_block(inode, bs->bh);
4684 + if (error)
4685 +- goto cleanup;
4686 ++ return error;
4687 + /* Find the named attribute. */
4688 + bs->s.base = BHDR(bs->bh);
4689 + bs->s.first = BFIRST(bs->bh);
4690 +@@ -1839,13 +1835,10 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
4691 + error = xattr_find_entry(inode, &bs->s.here, bs->s.end,
4692 + i->name_index, i->name, 1);
4693 + if (error && error != -ENODATA)
4694 +- goto cleanup;
4695 ++ return error;
4696 + bs->s.not_found = error;
4697 + }
4698 +- error = 0;
4699 +-
4700 +-cleanup:
4701 +- return error;
4702 ++ return 0;
4703 + }
4704 +
4705 + static int
4706 +@@ -2274,9 +2267,9 @@ static struct buffer_head *ext4_xattr_get_block(struct inode *inode)
4707 +
4708 + if (!EXT4_I(inode)->i_file_acl)
4709 + return NULL;
4710 +- bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
4711 +- if (!bh)
4712 +- return ERR_PTR(-EIO);
4713 ++ bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
4714 ++ if (IS_ERR(bh))
4715 ++ return bh;
4716 + error = ext4_xattr_check_block(inode, bh);
4717 + if (error) {
4718 + brelse(bh);
4719 +@@ -2729,7 +2722,7 @@ retry:
4720 + base = IFIRST(header);
4721 + end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
4722 + min_offs = end - base;
4723 +- total_ino = sizeof(struct ext4_xattr_ibody_header);
4724 ++ total_ino = sizeof(struct ext4_xattr_ibody_header) + sizeof(u32);
4725 +
4726 + error = xattr_check_inode(inode, header, end);
4727 + if (error)
4728 +@@ -2746,10 +2739,11 @@ retry:
4729 + if (EXT4_I(inode)->i_file_acl) {
4730 + struct buffer_head *bh;
4731 +
4732 +- bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
4733 +- error = -EIO;
4734 +- if (!bh)
4735 ++ bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
4736 ++ if (IS_ERR(bh)) {
4737 ++ error = PTR_ERR(bh);
4738 + goto cleanup;
4739 ++ }
4740 + error = ext4_xattr_check_block(inode, bh);
4741 + if (error) {
4742 + brelse(bh);
4743 +@@ -2903,11 +2897,12 @@ int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
4744 + }
4745 +
4746 + if (EXT4_I(inode)->i_file_acl) {
4747 +- bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
4748 +- if (!bh) {
4749 +- EXT4_ERROR_INODE(inode, "block %llu read error",
4750 +- EXT4_I(inode)->i_file_acl);
4751 +- error = -EIO;
4752 ++ bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
4753 ++ if (IS_ERR(bh)) {
4754 ++ error = PTR_ERR(bh);
4755 ++ if (error == -EIO)
4756 ++ EXT4_ERROR_INODE(inode, "block %llu read error",
4757 ++ EXT4_I(inode)->i_file_acl);
4758 + goto cleanup;
4759 + }
4760 + error = ext4_xattr_check_block(inode, bh);
4761 +@@ -3060,8 +3055,10 @@ ext4_xattr_block_cache_find(struct inode *inode,
4762 + while (ce) {
4763 + struct buffer_head *bh;
4764 +
4765 +- bh = sb_bread(inode->i_sb, ce->e_value);
4766 +- if (!bh) {
4767 ++ bh = ext4_sb_bread(inode->i_sb, ce->e_value, REQ_PRIO);
4768 ++ if (IS_ERR(bh)) {
4769 ++ if (PTR_ERR(bh) == -ENOMEM)
4770 ++ return NULL;
4771 + EXT4_ERROR_INODE(inode, "block %lu read error",
4772 + (unsigned long)ce->e_value);
4773 + } else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) {
4774 +diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
4775 +index b293cb3e27a2..17049b030b6c 100644
4776 +--- a/fs/f2fs/data.c
4777 ++++ b/fs/f2fs/data.c
4778 +@@ -1102,8 +1102,10 @@ next_block:
4779 + if (test_opt(sbi, LFS) && create &&
4780 + flag == F2FS_GET_BLOCK_DIO) {
4781 + err = __allocate_data_block(&dn, map->m_seg_type);
4782 +- if (!err)
4783 ++ if (!err) {
4784 ++ blkaddr = dn.data_blkaddr;
4785 + set_inode_flag(inode, FI_APPEND_WRITE);
4786 ++ }
4787 + }
4788 + } else {
4789 + if (create) {
4790 +diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
4791 +index d338740d0fda..88be946dedd4 100644
4792 +--- a/fs/f2fs/node.c
4793 ++++ b/fs/f2fs/node.c
4794 +@@ -826,6 +826,7 @@ static int truncate_node(struct dnode_of_data *dn)
4795 + struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
4796 + struct node_info ni;
4797 + int err;
4798 ++ pgoff_t index;
4799 +
4800 + err = f2fs_get_node_info(sbi, dn->nid, &ni);
4801 + if (err)
4802 +@@ -845,10 +846,11 @@ static int truncate_node(struct dnode_of_data *dn)
4803 + clear_node_page_dirty(dn->node_page);
4804 + set_sbi_flag(sbi, SBI_IS_DIRTY);
4805 +
4806 ++ index = dn->node_page->index;
4807 + f2fs_put_page(dn->node_page, 1);
4808 +
4809 + invalidate_mapping_pages(NODE_MAPPING(sbi),
4810 +- dn->node_page->index, dn->node_page->index);
4811 ++ index, index);
4812 +
4813 + dn->node_page = NULL;
4814 + trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
4815 +diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
4816 +index af58b2cc21b8..855a622fb052 100644
4817 +--- a/fs/f2fs/super.c
4818 ++++ b/fs/f2fs/super.c
4819 +@@ -1457,19 +1457,16 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
4820 +
4821 + sbi->sb->s_flags |= SB_ACTIVE;
4822 +
4823 +- mutex_lock(&sbi->gc_mutex);
4824 + f2fs_update_time(sbi, DISABLE_TIME);
4825 +
4826 + while (!f2fs_time_over(sbi, DISABLE_TIME)) {
4827 ++ mutex_lock(&sbi->gc_mutex);
4828 + err = f2fs_gc(sbi, true, false, NULL_SEGNO);
4829 + if (err == -ENODATA)
4830 + break;
4831 +- if (err && err != -EAGAIN) {
4832 +- mutex_unlock(&sbi->gc_mutex);
4833 ++ if (err && err != -EAGAIN)
4834 + return err;
4835 +- }
4836 + }
4837 +- mutex_unlock(&sbi->gc_mutex);
4838 +
4839 + err = sync_filesystem(sbi->sb);
4840 + if (err)
4841 +@@ -2496,10 +2493,10 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
4842 + return 1;
4843 + }
4844 +
4845 +- if (segment_count > (le32_to_cpu(raw_super->block_count) >> 9)) {
4846 ++ if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
4847 + f2fs_msg(sb, KERN_INFO,
4848 +- "Wrong segment_count / block_count (%u > %u)",
4849 +- segment_count, le32_to_cpu(raw_super->block_count));
4850 ++ "Wrong segment_count / block_count (%u > %llu)",
4851 ++ segment_count, le64_to_cpu(raw_super->block_count));
4852 + return 1;
4853 + }
4854 +
4855 +diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
4856 +index 7261245c208d..ecd2cf2fc584 100644
4857 +--- a/fs/f2fs/xattr.c
4858 ++++ b/fs/f2fs/xattr.c
4859 +@@ -288,7 +288,7 @@ static int read_xattr_block(struct inode *inode, void *txattr_addr)
4860 + static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
4861 + unsigned int index, unsigned int len,
4862 + const char *name, struct f2fs_xattr_entry **xe,
4863 +- void **base_addr)
4864 ++ void **base_addr, int *base_size)
4865 + {
4866 + void *cur_addr, *txattr_addr, *last_addr = NULL;
4867 + nid_t xnid = F2FS_I(inode)->i_xattr_nid;
4868 +@@ -299,8 +299,8 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
4869 + if (!size && !inline_size)
4870 + return -ENODATA;
4871 +
4872 +- txattr_addr = f2fs_kzalloc(F2FS_I_SB(inode),
4873 +- inline_size + size + XATTR_PADDING_SIZE, GFP_NOFS);
4874 ++ *base_size = inline_size + size + XATTR_PADDING_SIZE;
4875 ++ txattr_addr = f2fs_kzalloc(F2FS_I_SB(inode), *base_size, GFP_NOFS);
4876 + if (!txattr_addr)
4877 + return -ENOMEM;
4878 +
4879 +@@ -312,8 +312,10 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
4880 +
4881 + *xe = __find_inline_xattr(inode, txattr_addr, &last_addr,
4882 + index, len, name);
4883 +- if (*xe)
4884 ++ if (*xe) {
4885 ++ *base_size = inline_size;
4886 + goto check;
4887 ++ }
4888 + }
4889 +
4890 + /* read from xattr node block */
4891 +@@ -474,6 +476,7 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
4892 + int error = 0;
4893 + unsigned int size, len;
4894 + void *base_addr = NULL;
4895 ++ int base_size;
4896 +
4897 + if (name == NULL)
4898 + return -EINVAL;
4899 +@@ -484,7 +487,7 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
4900 +
4901 + down_read(&F2FS_I(inode)->i_xattr_sem);
4902 + error = lookup_all_xattrs(inode, ipage, index, len, name,
4903 +- &entry, &base_addr);
4904 ++ &entry, &base_addr, &base_size);
4905 + up_read(&F2FS_I(inode)->i_xattr_sem);
4906 + if (error)
4907 + return error;
4908 +@@ -498,6 +501,11 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
4909 +
4910 + if (buffer) {
4911 + char *pval = entry->e_name + entry->e_name_len;
4912 ++
4913 ++ if (base_size - (pval - (char *)base_addr) < size) {
4914 ++ error = -ERANGE;
4915 ++ goto out;
4916 ++ }
4917 + memcpy(buffer, pval, size);
4918 + }
4919 + error = size;
4920 +diff --git a/fs/file.c b/fs/file.c
4921 +index 7ffd6e9d103d..8d059d8973e9 100644
4922 +--- a/fs/file.c
4923 ++++ b/fs/file.c
4924 +@@ -640,6 +640,35 @@ out_unlock:
4925 + }
4926 + EXPORT_SYMBOL(__close_fd); /* for ksys_close() */
4927 +
4928 ++/*
4929 ++ * variant of __close_fd that gets a ref on the file for later fput
4930 ++ */
4931 ++int __close_fd_get_file(unsigned int fd, struct file **res)
4932 ++{
4933 ++ struct files_struct *files = current->files;
4934 ++ struct file *file;
4935 ++ struct fdtable *fdt;
4936 ++
4937 ++ spin_lock(&files->file_lock);
4938 ++ fdt = files_fdtable(files);
4939 ++ if (fd >= fdt->max_fds)
4940 ++ goto out_unlock;
4941 ++ file = fdt->fd[fd];
4942 ++ if (!file)
4943 ++ goto out_unlock;
4944 ++ rcu_assign_pointer(fdt->fd[fd], NULL);
4945 ++ __put_unused_fd(files, fd);
4946 ++ spin_unlock(&files->file_lock);
4947 ++ get_file(file);
4948 ++ *res = file;
4949 ++ return filp_close(file, files);
4950 ++
4951 ++out_unlock:
4952 ++ spin_unlock(&files->file_lock);
4953 ++ *res = NULL;
4954 ++ return -ENOENT;
4955 ++}
4956 ++
4957 + void do_close_on_exec(struct files_struct *files)
4958 + {
4959 + unsigned i;
4960 +diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
4961 +index 41615f38bcff..f07c55ea0c22 100644
4962 +--- a/include/linux/fdtable.h
4963 ++++ b/include/linux/fdtable.h
4964 +@@ -121,6 +121,7 @@ extern void __fd_install(struct files_struct *files,
4965 + unsigned int fd, struct file *file);
4966 + extern int __close_fd(struct files_struct *files,
4967 + unsigned int fd);
4968 ++extern int __close_fd_get_file(unsigned int fd, struct file **res);
4969 +
4970 + extern struct kmem_cache *files_cachep;
4971 +
4972 +diff --git a/include/linux/msi.h b/include/linux/msi.h
4973 +index 0e9c50052ff3..eb213b87617c 100644
4974 +--- a/include/linux/msi.h
4975 ++++ b/include/linux/msi.h
4976 +@@ -116,6 +116,8 @@ struct msi_desc {
4977 + list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
4978 + #define for_each_msi_entry(desc, dev) \
4979 + list_for_each_entry((desc), dev_to_msi_list((dev)), list)
4980 ++#define for_each_msi_entry_safe(desc, tmp, dev) \
4981 ++ list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list)
4982 +
4983 + #ifdef CONFIG_PCI_MSI
4984 + #define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev)
4985 +diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
4986 +index 6894976b54e3..186cd8e970c7 100644
4987 +--- a/include/linux/ptr_ring.h
4988 ++++ b/include/linux/ptr_ring.h
4989 +@@ -573,6 +573,8 @@ static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue,
4990 + else if (destroy)
4991 + destroy(ptr);
4992 +
4993 ++ if (producer >= size)
4994 ++ producer = 0;
4995 + __ptr_ring_set_size(r, size);
4996 + r->producer = producer;
4997 + r->consumer_head = 0;
4998 +diff --git a/include/media/cec.h b/include/media/cec.h
4999 +index 3fe5e5d2bb7e..707411ef8ba2 100644
5000 +--- a/include/media/cec.h
5001 ++++ b/include/media/cec.h
5002 +@@ -155,6 +155,7 @@ struct cec_adapter {
5003 + unsigned int transmit_queue_sz;
5004 + struct list_head wait_queue;
5005 + struct cec_data *transmitting;
5006 ++ bool transmit_in_progress;
5007 +
5008 + struct task_struct *kthread_config;
5009 + struct completion config_completion;
5010 +diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
5011 +index 5ce926701bd0..5f67efbb77e8 100644
5012 +--- a/include/net/ip_tunnels.h
5013 ++++ b/include/net/ip_tunnels.h
5014 +@@ -307,6 +307,26 @@ int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op,
5015 + int ip_tunnel_encap_setup(struct ip_tunnel *t,
5016 + struct ip_tunnel_encap *ipencap);
5017 +
5018 ++static inline bool pskb_inet_may_pull(struct sk_buff *skb)
5019 ++{
5020 ++ int nhlen;
5021 ++
5022 ++ switch (skb->protocol) {
5023 ++#if IS_ENABLED(CONFIG_IPV6)
5024 ++ case htons(ETH_P_IPV6):
5025 ++ nhlen = sizeof(struct ipv6hdr);
5026 ++ break;
5027 ++#endif
5028 ++ case htons(ETH_P_IP):
5029 ++ nhlen = sizeof(struct iphdr);
5030 ++ break;
5031 ++ default:
5032 ++ nhlen = 0;
5033 ++ }
5034 ++
5035 ++ return pskb_network_may_pull(skb, nhlen);
5036 ++}
5037 ++
5038 + static inline int ip_encap_hlen(struct ip_tunnel_encap *e)
5039 + {
5040 + const struct ip_tunnel_encap_ops *ops;
5041 +diff --git a/include/net/sock.h b/include/net/sock.h
5042 +index 0e3a09380655..13f11e905a00 100644
5043 +--- a/include/net/sock.h
5044 ++++ b/include/net/sock.h
5045 +@@ -298,6 +298,7 @@ struct sock_common {
5046 + * @sk_filter: socket filtering instructions
5047 + * @sk_timer: sock cleanup timer
5048 + * @sk_stamp: time stamp of last packet received
5049 ++ * @sk_stamp_seq: lock for accessing sk_stamp on 32 bit architectures only
5050 + * @sk_tsflags: SO_TIMESTAMPING socket options
5051 + * @sk_tskey: counter to disambiguate concurrent tstamp requests
5052 + * @sk_zckey: counter to order MSG_ZEROCOPY notifications
5053 +@@ -474,6 +475,9 @@ struct sock {
5054 + const struct cred *sk_peer_cred;
5055 + long sk_rcvtimeo;
5056 + ktime_t sk_stamp;
5057 ++#if BITS_PER_LONG==32
5058 ++ seqlock_t sk_stamp_seq;
5059 ++#endif
5060 + u16 sk_tsflags;
5061 + u8 sk_shutdown;
5062 + u32 sk_tskey;
5063 +@@ -2287,6 +2291,34 @@ static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb)
5064 + atomic_add(segs, &sk->sk_drops);
5065 + }
5066 +
5067 ++static inline ktime_t sock_read_timestamp(struct sock *sk)
5068 ++{
5069 ++#if BITS_PER_LONG==32
5070 ++ unsigned int seq;
5071 ++ ktime_t kt;
5072 ++
5073 ++ do {
5074 ++ seq = read_seqbegin(&sk->sk_stamp_seq);
5075 ++ kt = sk->sk_stamp;
5076 ++ } while (read_seqretry(&sk->sk_stamp_seq, seq));
5077 ++
5078 ++ return kt;
5079 ++#else
5080 ++ return sk->sk_stamp;
5081 ++#endif
5082 ++}
5083 ++
5084 ++static inline void sock_write_timestamp(struct sock *sk, ktime_t kt)
5085 ++{
5086 ++#if BITS_PER_LONG==32
5087 ++ write_seqlock(&sk->sk_stamp_seq);
5088 ++ sk->sk_stamp = kt;
5089 ++ write_sequnlock(&sk->sk_stamp_seq);
5090 ++#else
5091 ++ sk->sk_stamp = kt;
5092 ++#endif
5093 ++}
5094 ++
5095 + void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
5096 + struct sk_buff *skb);
5097 + void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
5098 +@@ -2311,7 +2343,7 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
5099 + (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
5100 + __sock_recv_timestamp(msg, sk, skb);
5101 + else
5102 +- sk->sk_stamp = kt;
5103 ++ sock_write_timestamp(sk, kt);
5104 +
5105 + if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid)
5106 + __sock_recv_wifi_status(msg, sk, skb);
5107 +@@ -2332,9 +2364,9 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
5108 + if (sk->sk_flags & FLAGS_TS_OR_DROPS || sk->sk_tsflags & TSFLAGS_ANY)
5109 + __sock_recv_ts_and_drops(msg, sk, skb);
5110 + else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
5111 +- sk->sk_stamp = skb->tstamp;
5112 ++ sock_write_timestamp(sk, skb->tstamp);
5113 + else if (unlikely(sk->sk_stamp == SK_DEFAULT_STAMP))
5114 +- sk->sk_stamp = 0;
5115 ++ sock_write_timestamp(sk, 0);
5116 + }
5117 +
5118 + void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags);
5119 +diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
5120 +index 698e0d8a5ca4..d68e9e536814 100644
5121 +--- a/include/trace/events/ext4.h
5122 ++++ b/include/trace/events/ext4.h
5123 +@@ -226,6 +226,26 @@ TRACE_EVENT(ext4_drop_inode,
5124 + (unsigned long) __entry->ino, __entry->drop)
5125 + );
5126 +
5127 ++TRACE_EVENT(ext4_nfs_commit_metadata,
5128 ++ TP_PROTO(struct inode *inode),
5129 ++
5130 ++ TP_ARGS(inode),
5131 ++
5132 ++ TP_STRUCT__entry(
5133 ++ __field( dev_t, dev )
5134 ++ __field( ino_t, ino )
5135 ++ ),
5136 ++
5137 ++ TP_fast_assign(
5138 ++ __entry->dev = inode->i_sb->s_dev;
5139 ++ __entry->ino = inode->i_ino;
5140 ++ ),
5141 ++
5142 ++ TP_printk("dev %d,%d ino %lu",
5143 ++ MAJOR(__entry->dev), MINOR(__entry->dev),
5144 ++ (unsigned long) __entry->ino)
5145 ++);
5146 ++
5147 + TRACE_EVENT(ext4_mark_inode_dirty,
5148 + TP_PROTO(struct inode *inode, unsigned long IP),
5149 +
5150 +diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
5151 +index 6aaf5dd5383b..1f84977fab47 100644
5152 +--- a/kernel/cgroup/cgroup.c
5153 ++++ b/kernel/cgroup/cgroup.c
5154 +@@ -4202,20 +4202,25 @@ static void css_task_iter_advance(struct css_task_iter *it)
5155 +
5156 + lockdep_assert_held(&css_set_lock);
5157 + repeat:
5158 +- /*
5159 +- * Advance iterator to find next entry. cset->tasks is consumed
5160 +- * first and then ->mg_tasks. After ->mg_tasks, we move onto the
5161 +- * next cset.
5162 +- */
5163 +- next = it->task_pos->next;
5164 ++ if (it->task_pos) {
5165 ++ /*
5166 ++ * Advance iterator to find next entry. cset->tasks is
5167 ++ * consumed first and then ->mg_tasks. After ->mg_tasks,
5168 ++ * we move onto the next cset.
5169 ++ */
5170 ++ next = it->task_pos->next;
5171 +
5172 +- if (next == it->tasks_head)
5173 +- next = it->mg_tasks_head->next;
5174 ++ if (next == it->tasks_head)
5175 ++ next = it->mg_tasks_head->next;
5176 +
5177 +- if (next == it->mg_tasks_head)
5178 ++ if (next == it->mg_tasks_head)
5179 ++ css_task_iter_advance_css_set(it);
5180 ++ else
5181 ++ it->task_pos = next;
5182 ++ } else {
5183 ++ /* called from start, proceed to the first cset */
5184 + css_task_iter_advance_css_set(it);
5185 +- else
5186 +- it->task_pos = next;
5187 ++ }
5188 +
5189 + /* if PROCS, skip over tasks which aren't group leaders */
5190 + if ((it->flags & CSS_TASK_ITER_PROCS) && it->task_pos &&
5191 +@@ -4255,7 +4260,7 @@ void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags,
5192 +
5193 + it->cset_head = it->cset_pos;
5194 +
5195 +- css_task_iter_advance_css_set(it);
5196 ++ css_task_iter_advance(it);
5197 +
5198 + spin_unlock_irq(&css_set_lock);
5199 + }
5200 +diff --git a/kernel/panic.c b/kernel/panic.c
5201 +index f6d549a29a5c..d10c340c43b0 100644
5202 +--- a/kernel/panic.c
5203 ++++ b/kernel/panic.c
5204 +@@ -14,6 +14,7 @@
5205 + #include <linux/kmsg_dump.h>
5206 + #include <linux/kallsyms.h>
5207 + #include <linux/notifier.h>
5208 ++#include <linux/vt_kern.h>
5209 + #include <linux/module.h>
5210 + #include <linux/random.h>
5211 + #include <linux/ftrace.h>
5212 +@@ -237,7 +238,10 @@ void panic(const char *fmt, ...)
5213 + if (_crash_kexec_post_notifiers)
5214 + __crash_kexec(NULL);
5215 +
5216 +- bust_spinlocks(0);
5217 ++#ifdef CONFIG_VT
5218 ++ unblank_screen();
5219 ++#endif
5220 ++ console_unblank();
5221 +
5222 + /*
5223 + * We may have ended up stopping the CPU holding the lock (in
5224 +diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
5225 +index c603d33d5410..5d01edf8d819 100644
5226 +--- a/net/ax25/af_ax25.c
5227 ++++ b/net/ax25/af_ax25.c
5228 +@@ -653,15 +653,22 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
5229 + break;
5230 + }
5231 +
5232 +- dev = dev_get_by_name(&init_net, devname);
5233 ++ rtnl_lock();
5234 ++ dev = __dev_get_by_name(&init_net, devname);
5235 + if (!dev) {
5236 ++ rtnl_unlock();
5237 + res = -ENODEV;
5238 + break;
5239 + }
5240 +
5241 + ax25->ax25_dev = ax25_dev_ax25dev(dev);
5242 ++ if (!ax25->ax25_dev) {
5243 ++ rtnl_unlock();
5244 ++ res = -ENODEV;
5245 ++ break;
5246 ++ }
5247 + ax25_fillin_cb(ax25, ax25->ax25_dev);
5248 +- dev_put(dev);
5249 ++ rtnl_unlock();
5250 + break;
5251 +
5252 + default:
5253 +diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
5254 +index 9a3a301e1e2f..d92195cd7834 100644
5255 +--- a/net/ax25/ax25_dev.c
5256 ++++ b/net/ax25/ax25_dev.c
5257 +@@ -116,6 +116,7 @@ void ax25_dev_device_down(struct net_device *dev)
5258 + if ((s = ax25_dev_list) == ax25_dev) {
5259 + ax25_dev_list = s->next;
5260 + spin_unlock_bh(&ax25_dev_lock);
5261 ++ dev->ax25_ptr = NULL;
5262 + dev_put(dev);
5263 + kfree(ax25_dev);
5264 + return;
5265 +@@ -125,6 +126,7 @@ void ax25_dev_device_down(struct net_device *dev)
5266 + if (s->next == ax25_dev) {
5267 + s->next = ax25_dev->next;
5268 + spin_unlock_bh(&ax25_dev_lock);
5269 ++ dev->ax25_ptr = NULL;
5270 + dev_put(dev);
5271 + kfree(ax25_dev);
5272 + return;
5273 +diff --git a/net/compat.c b/net/compat.c
5274 +index 47a614b370cd..d1f3a8a0b3ef 100644
5275 +--- a/net/compat.c
5276 ++++ b/net/compat.c
5277 +@@ -467,12 +467,14 @@ int compat_sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
5278 + ctv = (struct compat_timeval __user *) userstamp;
5279 + err = -ENOENT;
5280 + sock_enable_timestamp(sk, SOCK_TIMESTAMP);
5281 +- tv = ktime_to_timeval(sk->sk_stamp);
5282 ++ tv = ktime_to_timeval(sock_read_timestamp(sk));
5283 ++
5284 + if (tv.tv_sec == -1)
5285 + return err;
5286 + if (tv.tv_sec == 0) {
5287 +- sk->sk_stamp = ktime_get_real();
5288 +- tv = ktime_to_timeval(sk->sk_stamp);
5289 ++ ktime_t kt = ktime_get_real();
5290 ++ sock_write_timestamp(sk, kt);
5291 ++ tv = ktime_to_timeval(kt);
5292 + }
5293 + err = 0;
5294 + if (put_user(tv.tv_sec, &ctv->tv_sec) ||
5295 +@@ -494,12 +496,13 @@ int compat_sock_get_timestampns(struct sock *sk, struct timespec __user *usersta
5296 + ctv = (struct compat_timespec __user *) userstamp;
5297 + err = -ENOENT;
5298 + sock_enable_timestamp(sk, SOCK_TIMESTAMP);
5299 +- ts = ktime_to_timespec(sk->sk_stamp);
5300 ++ ts = ktime_to_timespec(sock_read_timestamp(sk));
5301 + if (ts.tv_sec == -1)
5302 + return err;
5303 + if (ts.tv_sec == 0) {
5304 +- sk->sk_stamp = ktime_get_real();
5305 +- ts = ktime_to_timespec(sk->sk_stamp);
5306 ++ ktime_t kt = ktime_get_real();
5307 ++ sock_write_timestamp(sk, kt);
5308 ++ ts = ktime_to_timespec(kt);
5309 + }
5310 + err = 0;
5311 + if (put_user(ts.tv_sec, &ctv->tv_sec) ||
5312 +diff --git a/net/core/sock.c b/net/core/sock.c
5313 +index 080a880a1761..98659fb6e9fb 100644
5314 +--- a/net/core/sock.c
5315 ++++ b/net/core/sock.c
5316 +@@ -2743,6 +2743,9 @@ void sock_init_data(struct socket *sock, struct sock *sk)
5317 + sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
5318 +
5319 + sk->sk_stamp = SK_DEFAULT_STAMP;
5320 ++#if BITS_PER_LONG==32
5321 ++ seqlock_init(&sk->sk_stamp_seq);
5322 ++#endif
5323 + atomic_set(&sk->sk_zckey, 0);
5324 +
5325 + #ifdef CONFIG_NET_RX_BUSY_POLL
5326 +@@ -2842,12 +2845,13 @@ int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
5327 + struct timeval tv;
5328 +
5329 + sock_enable_timestamp(sk, SOCK_TIMESTAMP);
5330 +- tv = ktime_to_timeval(sk->sk_stamp);
5331 ++ tv = ktime_to_timeval(sock_read_timestamp(sk));
5332 + if (tv.tv_sec == -1)
5333 + return -ENOENT;
5334 + if (tv.tv_sec == 0) {
5335 +- sk->sk_stamp = ktime_get_real();
5336 +- tv = ktime_to_timeval(sk->sk_stamp);
5337 ++ ktime_t kt = ktime_get_real();
5338 ++ sock_write_timestamp(sk, kt);
5339 ++ tv = ktime_to_timeval(kt);
5340 + }
5341 + return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
5342 + }
5343 +@@ -2858,11 +2862,12 @@ int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
5344 + struct timespec ts;
5345 +
5346 + sock_enable_timestamp(sk, SOCK_TIMESTAMP);
5347 +- ts = ktime_to_timespec(sk->sk_stamp);
5348 ++ ts = ktime_to_timespec(sock_read_timestamp(sk));
5349 + if (ts.tv_sec == -1)
5350 + return -ENOENT;
5351 + if (ts.tv_sec == 0) {
5352 +- sk->sk_stamp = ktime_get_real();
5353 ++ ktime_t kt = ktime_get_real();
5354 ++ sock_write_timestamp(sk, kt);
5355 + ts = ktime_to_timespec(sk->sk_stamp);
5356 + }
5357 + return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
5358 +diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c
5359 +index ca53efa17be1..8bec827081cd 100644
5360 +--- a/net/ieee802154/6lowpan/tx.c
5361 ++++ b/net/ieee802154/6lowpan/tx.c
5362 +@@ -48,6 +48,9 @@ int lowpan_header_create(struct sk_buff *skb, struct net_device *ldev,
5363 + const struct ipv6hdr *hdr = ipv6_hdr(skb);
5364 + struct neighbour *n;
5365 +
5366 ++ if (!daddr)
5367 ++ return -EINVAL;
5368 ++
5369 + /* TODO:
5370 + * if this package isn't ipv6 one, where should it be routed?
5371 + */
5372 +diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
5373 +index 38befe829caf..0fe9419bd12b 100644
5374 +--- a/net/ipv4/ip_gre.c
5375 ++++ b/net/ipv4/ip_gre.c
5376 +@@ -674,6 +674,9 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
5377 + struct ip_tunnel *tunnel = netdev_priv(dev);
5378 + const struct iphdr *tnl_params;
5379 +
5380 ++ if (!pskb_inet_may_pull(skb))
5381 ++ goto free_skb;
5382 ++
5383 + if (tunnel->collect_md) {
5384 + gre_fb_xmit(skb, dev, skb->protocol);
5385 + return NETDEV_TX_OK;
5386 +@@ -717,6 +720,9 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
5387 + struct ip_tunnel *tunnel = netdev_priv(dev);
5388 + bool truncate = false;
5389 +
5390 ++ if (!pskb_inet_may_pull(skb))
5391 ++ goto free_skb;
5392 ++
5393 + if (tunnel->collect_md) {
5394 + erspan_fb_xmit(skb, dev, skb->protocol);
5395 + return NETDEV_TX_OK;
5396 +@@ -760,6 +766,9 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
5397 + {
5398 + struct ip_tunnel *tunnel = netdev_priv(dev);
5399 +
5400 ++ if (!pskb_inet_may_pull(skb))
5401 ++ goto free_skb;
5402 ++
5403 + if (tunnel->collect_md) {
5404 + gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
5405 + return NETDEV_TX_OK;
5406 +diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
5407 +index 284a22154b4e..c4f5602308ed 100644
5408 +--- a/net/ipv4/ip_tunnel.c
5409 ++++ b/net/ipv4/ip_tunnel.c
5410 +@@ -627,7 +627,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
5411 + const struct iphdr *tnl_params, u8 protocol)
5412 + {
5413 + struct ip_tunnel *tunnel = netdev_priv(dev);
5414 +- unsigned int inner_nhdr_len = 0;
5415 + const struct iphdr *inner_iph;
5416 + struct flowi4 fl4;
5417 + u8 tos, ttl;
5418 +@@ -637,14 +636,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
5419 + __be32 dst;
5420 + bool connected;
5421 +
5422 +- /* ensure we can access the inner net header, for several users below */
5423 +- if (skb->protocol == htons(ETH_P_IP))
5424 +- inner_nhdr_len = sizeof(struct iphdr);
5425 +- else if (skb->protocol == htons(ETH_P_IPV6))
5426 +- inner_nhdr_len = sizeof(struct ipv6hdr);
5427 +- if (unlikely(!pskb_may_pull(skb, inner_nhdr_len)))
5428 +- goto tx_error;
5429 +-
5430 + inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
5431 + connected = (tunnel->parms.iph.daddr != 0);
5432 +
5433 +diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
5434 +index de31b302d69c..d7b43e700023 100644
5435 +--- a/net/ipv4/ip_vti.c
5436 ++++ b/net/ipv4/ip_vti.c
5437 +@@ -241,6 +241,9 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
5438 + struct ip_tunnel *tunnel = netdev_priv(dev);
5439 + struct flowi fl;
5440 +
5441 ++ if (!pskb_inet_may_pull(skb))
5442 ++ goto tx_err;
5443 ++
5444 + memset(&fl, 0, sizeof(fl));
5445 +
5446 + switch (skb->protocol) {
5447 +@@ -253,15 +256,18 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
5448 + memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
5449 + break;
5450 + default:
5451 +- dev->stats.tx_errors++;
5452 +- dev_kfree_skb(skb);
5453 +- return NETDEV_TX_OK;
5454 ++ goto tx_err;
5455 + }
5456 +
5457 + /* override mark with tunnel output key */
5458 + fl.flowi_mark = be32_to_cpu(tunnel->parms.o_key);
5459 +
5460 + return vti_xmit(skb, dev, &fl);
5461 ++
5462 ++tx_err:
5463 ++ dev->stats.tx_errors++;
5464 ++ kfree_skb(skb);
5465 ++ return NETDEV_TX_OK;
5466 + }
5467 +
5468 + static int vti4_err(struct sk_buff *skb, u32 info)
5469 +diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
5470 +index 515adbdba1d2..0f7d434c1eed 100644
5471 +--- a/net/ipv6/ip6_gre.c
5472 ++++ b/net/ipv6/ip6_gre.c
5473 +@@ -879,6 +879,9 @@ static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
5474 + struct net_device_stats *stats = &t->dev->stats;
5475 + int ret;
5476 +
5477 ++ if (!pskb_inet_may_pull(skb))
5478 ++ goto tx_err;
5479 ++
5480 + if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
5481 + goto tx_err;
5482 +
5483 +@@ -921,6 +924,9 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
5484 + int nhoff;
5485 + int thoff;
5486 +
5487 ++ if (!pskb_inet_may_pull(skb))
5488 ++ goto tx_err;
5489 ++
5490 + if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
5491 + goto tx_err;
5492 +
5493 +@@ -993,8 +999,6 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
5494 + goto tx_err;
5495 + }
5496 + } else {
5497 +- struct ipv6hdr *ipv6h = ipv6_hdr(skb);
5498 +-
5499 + switch (skb->protocol) {
5500 + case htons(ETH_P_IP):
5501 + memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
5502 +@@ -1002,7 +1006,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
5503 + &dsfield, &encap_limit);
5504 + break;
5505 + case htons(ETH_P_IPV6):
5506 +- if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr))
5507 ++ if (ipv6_addr_equal(&t->parms.raddr, &ipv6_hdr(skb)->saddr))
5508 + goto tx_err;
5509 + if (prepare_ip6gre_xmit_ipv6(skb, dev, &fl6,
5510 + &dsfield, &encap_limit))
5511 +diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
5512 +index 99179b9c8384..0c6403cf8b52 100644
5513 +--- a/net/ipv6/ip6_tunnel.c
5514 ++++ b/net/ipv6/ip6_tunnel.c
5515 +@@ -1243,10 +1243,6 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
5516 + u8 tproto;
5517 + int err;
5518 +
5519 +- /* ensure we can access the full inner ip header */
5520 +- if (!pskb_may_pull(skb, sizeof(struct iphdr)))
5521 +- return -1;
5522 +-
5523 + iph = ip_hdr(skb);
5524 + memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
5525 +
5526 +@@ -1321,9 +1317,6 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
5527 + u8 tproto;
5528 + int err;
5529 +
5530 +- if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
5531 +- return -1;
5532 +-
5533 + ipv6h = ipv6_hdr(skb);
5534 + tproto = READ_ONCE(t->parms.proto);
5535 + if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
5536 +@@ -1405,6 +1398,9 @@ ip6_tnl_start_xmit(struct sk_buff *skb, struct net_device *dev)
5537 + struct net_device_stats *stats = &t->dev->stats;
5538 + int ret;
5539 +
5540 ++ if (!pskb_inet_may_pull(skb))
5541 ++ goto tx_err;
5542 ++
5543 + switch (skb->protocol) {
5544 + case htons(ETH_P_IP):
5545 + ret = ip4ip6_tnl_xmit(skb, dev);
5546 +diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
5547 +index 706fe42e4928..8b6eefff2f7e 100644
5548 +--- a/net/ipv6/ip6_vti.c
5549 ++++ b/net/ipv6/ip6_vti.c
5550 +@@ -522,18 +522,18 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
5551 + {
5552 + struct ip6_tnl *t = netdev_priv(dev);
5553 + struct net_device_stats *stats = &t->dev->stats;
5554 +- struct ipv6hdr *ipv6h;
5555 + struct flowi fl;
5556 + int ret;
5557 +
5558 ++ if (!pskb_inet_may_pull(skb))
5559 ++ goto tx_err;
5560 ++
5561 + memset(&fl, 0, sizeof(fl));
5562 +
5563 + switch (skb->protocol) {
5564 + case htons(ETH_P_IPV6):
5565 +- ipv6h = ipv6_hdr(skb);
5566 +-
5567 + if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) ||
5568 +- vti6_addr_conflict(t, ipv6h))
5569 ++ vti6_addr_conflict(t, ipv6_hdr(skb)))
5570 + goto tx_err;
5571 +
5572 + xfrm_decode_session(skb, &fl, AF_INET6);
5573 +diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
5574 +index 377a2ee5d9ad..eb3220812b56 100644
5575 +--- a/net/ipv6/ip6mr.c
5576 ++++ b/net/ipv6/ip6mr.c
5577 +@@ -51,6 +51,7 @@
5578 + #include <linux/export.h>
5579 + #include <net/ip6_checksum.h>
5580 + #include <linux/netconf.h>
5581 ++#include <net/ip_tunnels.h>
5582 +
5583 + #include <linux/nospec.h>
5584 +
5585 +@@ -599,13 +600,12 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
5586 + .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
5587 + .flowi6_mark = skb->mark,
5588 + };
5589 +- int err;
5590 +
5591 +- err = ip6mr_fib_lookup(net, &fl6, &mrt);
5592 +- if (err < 0) {
5593 +- kfree_skb(skb);
5594 +- return err;
5595 +- }
5596 ++ if (!pskb_inet_may_pull(skb))
5597 ++ goto tx_err;
5598 ++
5599 ++ if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
5600 ++ goto tx_err;
5601 +
5602 + read_lock(&mrt_lock);
5603 + dev->stats.tx_bytes += skb->len;
5604 +@@ -614,6 +614,11 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
5605 + read_unlock(&mrt_lock);
5606 + kfree_skb(skb);
5607 + return NETDEV_TX_OK;
5608 ++
5609 ++tx_err:
5610 ++ dev->stats.tx_errors++;
5611 ++ kfree_skb(skb);
5612 ++ return NETDEV_TX_OK;
5613 + }
5614 +
5615 + static int reg_vif_get_iflink(const struct net_device *dev)
5616 +diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
5617 +index 51c9f75f34b9..1e03305c0549 100644
5618 +--- a/net/ipv6/sit.c
5619 ++++ b/net/ipv6/sit.c
5620 +@@ -1021,6 +1021,9 @@ tx_error:
5621 + static netdev_tx_t sit_tunnel_xmit(struct sk_buff *skb,
5622 + struct net_device *dev)
5623 + {
5624 ++ if (!pskb_inet_may_pull(skb))
5625 ++ goto tx_err;
5626 ++
5627 + switch (skb->protocol) {
5628 + case htons(ETH_P_IP):
5629 + sit_tunnel_xmit__(skb, dev, IPPROTO_IPIP);
5630 +diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
5631 +index 03f37c4e64fe..1d3144d19903 100644
5632 +--- a/net/netrom/af_netrom.c
5633 ++++ b/net/netrom/af_netrom.c
5634 +@@ -153,7 +153,7 @@ static struct sock *nr_find_listener(ax25_address *addr)
5635 + sk_for_each(s, &nr_list)
5636 + if (!ax25cmp(&nr_sk(s)->source_addr, addr) &&
5637 + s->sk_state == TCP_LISTEN) {
5638 +- bh_lock_sock(s);
5639 ++ sock_hold(s);
5640 + goto found;
5641 + }
5642 + s = NULL;
5643 +@@ -174,7 +174,7 @@ static struct sock *nr_find_socket(unsigned char index, unsigned char id)
5644 + struct nr_sock *nr = nr_sk(s);
5645 +
5646 + if (nr->my_index == index && nr->my_id == id) {
5647 +- bh_lock_sock(s);
5648 ++ sock_hold(s);
5649 + goto found;
5650 + }
5651 + }
5652 +@@ -198,7 +198,7 @@ static struct sock *nr_find_peer(unsigned char index, unsigned char id,
5653 +
5654 + if (nr->your_index == index && nr->your_id == id &&
5655 + !ax25cmp(&nr->dest_addr, dest)) {
5656 +- bh_lock_sock(s);
5657 ++ sock_hold(s);
5658 + goto found;
5659 + }
5660 + }
5661 +@@ -224,7 +224,7 @@ static unsigned short nr_find_next_circuit(void)
5662 + if (i != 0 && j != 0) {
5663 + if ((sk=nr_find_socket(i, j)) == NULL)
5664 + break;
5665 +- bh_unlock_sock(sk);
5666 ++ sock_put(sk);
5667 + }
5668 +
5669 + id++;
5670 +@@ -920,6 +920,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
5671 + }
5672 +
5673 + if (sk != NULL) {
5674 ++ bh_lock_sock(sk);
5675 + skb_reset_transport_header(skb);
5676 +
5677 + if (frametype == NR_CONNACK && skb->len == 22)
5678 +@@ -929,6 +930,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
5679 +
5680 + ret = nr_process_rx_frame(sk, skb);
5681 + bh_unlock_sock(sk);
5682 ++ sock_put(sk);
5683 + return ret;
5684 + }
5685 +
5686 +@@ -960,10 +962,12 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
5687 + (make = nr_make_new(sk)) == NULL) {
5688 + nr_transmit_refusal(skb, 0);
5689 + if (sk)
5690 +- bh_unlock_sock(sk);
5691 ++ sock_put(sk);
5692 + return 0;
5693 + }
5694 +
5695 ++ bh_lock_sock(sk);
5696 ++
5697 + window = skb->data[20];
5698 +
5699 + skb->sk = make;
5700 +@@ -1016,6 +1020,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
5701 + sk->sk_data_ready(sk);
5702 +
5703 + bh_unlock_sock(sk);
5704 ++ sock_put(sk);
5705 +
5706 + nr_insert_socket(make);
5707 +
5708 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
5709 +index 5dda263b4a0a..eedacdebcd4c 100644
5710 +--- a/net/packet/af_packet.c
5711 ++++ b/net/packet/af_packet.c
5712 +@@ -2625,7 +2625,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
5713 + sll_addr)))
5714 + goto out;
5715 + proto = saddr->sll_protocol;
5716 +- addr = saddr->sll_addr;
5717 ++ addr = saddr->sll_halen ? saddr->sll_addr : NULL;
5718 + dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
5719 + if (addr && dev && saddr->sll_halen < dev->addr_len)
5720 + goto out;
5721 +@@ -2825,7 +2825,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
5722 + if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
5723 + goto out;
5724 + proto = saddr->sll_protocol;
5725 +- addr = saddr->sll_addr;
5726 ++ addr = saddr->sll_halen ? saddr->sll_addr : NULL;
5727 + dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
5728 + if (addr && dev && saddr->sll_halen < dev->addr_len)
5729 + goto out;
5730 +diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
5731 +index 986f3ed7d1a2..b7e67310ec37 100644
5732 +--- a/net/sunrpc/svcsock.c
5733 ++++ b/net/sunrpc/svcsock.c
5734 +@@ -549,7 +549,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
5735 + /* Don't enable netstamp, sunrpc doesn't
5736 + need that much accuracy */
5737 + }
5738 +- svsk->sk_sk->sk_stamp = skb->tstamp;
5739 ++ sock_write_timestamp(svsk->sk_sk, skb->tstamp);
5740 + set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */
5741 +
5742 + len = skb->len;
5743 +diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
5744 +index e65c3a8551e4..040153ffc357 100644
5745 +--- a/net/tipc/bearer.c
5746 ++++ b/net/tipc/bearer.c
5747 +@@ -317,7 +317,6 @@ static int tipc_enable_bearer(struct net *net, const char *name,
5748 + res = tipc_disc_create(net, b, &b->bcast_addr, &skb);
5749 + if (res) {
5750 + bearer_disable(net, b);
5751 +- kfree(b);
5752 + errstr = "failed to create discoverer";
5753 + goto rejected;
5754 + }
5755 +diff --git a/security/keys/keyctl_pkey.c b/security/keys/keyctl_pkey.c
5756 +index 70e65a2ff207..8bdea5abad11 100644
5757 +--- a/security/keys/keyctl_pkey.c
5758 ++++ b/security/keys/keyctl_pkey.c
5759 +@@ -50,6 +50,8 @@ static int keyctl_pkey_params_parse(struct kernel_pkey_params *params)
5760 + if (*p == '\0' || *p == ' ' || *p == '\t')
5761 + continue;
5762 + token = match_token(p, param_keys, args);
5763 ++ if (token == Opt_err)
5764 ++ return -EINVAL;
5765 + if (__test_and_set_bit(token, &token_mask))
5766 + return -EINVAL;
5767 + q = args[0].from;
5768 +diff --git a/sound/core/pcm.c b/sound/core/pcm.c
5769 +index fdb9b92fc8d6..01b9d62eef14 100644
5770 +--- a/sound/core/pcm.c
5771 ++++ b/sound/core/pcm.c
5772 +@@ -25,6 +25,7 @@
5773 + #include <linux/time.h>
5774 + #include <linux/mutex.h>
5775 + #include <linux/device.h>
5776 ++#include <linux/nospec.h>
5777 + #include <sound/core.h>
5778 + #include <sound/minors.h>
5779 + #include <sound/pcm.h>
5780 +@@ -129,6 +130,7 @@ static int snd_pcm_control_ioctl(struct snd_card *card,
5781 + return -EFAULT;
5782 + if (stream < 0 || stream > 1)
5783 + return -EINVAL;
5784 ++ stream = array_index_nospec(stream, 2);
5785 + if (get_user(subdevice, &info->subdevice))
5786 + return -EFAULT;
5787 + mutex_lock(&register_mutex);
5788 +diff --git a/sound/firewire/amdtp-stream-trace.h b/sound/firewire/amdtp-stream-trace.h
5789 +index 54cdd4ffa9ce..ac20acf48fc6 100644
5790 +--- a/sound/firewire/amdtp-stream-trace.h
5791 ++++ b/sound/firewire/amdtp-stream-trace.h
5792 +@@ -131,7 +131,7 @@ TRACE_EVENT(in_packet_without_header,
5793 + __entry->index = index;
5794 + ),
5795 + TP_printk(
5796 +- "%02u %04u %04x %04x %02d %03u %3u %3u %02u %01u %02u",
5797 ++ "%02u %04u %04x %04x %02d %03u %02u %03u %02u %01u %02u",
5798 + __entry->second,
5799 + __entry->cycle,
5800 + __entry->src,
5801 +@@ -169,7 +169,7 @@ TRACE_EVENT(out_packet_without_header,
5802 + __entry->dest = fw_parent_device(s->unit)->node_id;
5803 + __entry->payload_quadlets = payload_length / 4;
5804 + __entry->data_blocks = data_blocks,
5805 +- __entry->data_blocks = s->data_block_counter,
5806 ++ __entry->data_block_counter = s->data_block_counter,
5807 + __entry->packet_index = s->packet_index;
5808 + __entry->irq = !!in_interrupt();
5809 + __entry->index = index;
5810 +diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
5811 +index 9be76c808fcc..3ada55ed5381 100644
5812 +--- a/sound/firewire/amdtp-stream.c
5813 ++++ b/sound/firewire/amdtp-stream.c
5814 +@@ -654,15 +654,17 @@ end:
5815 + }
5816 +
5817 + static int handle_in_packet_without_header(struct amdtp_stream *s,
5818 +- unsigned int payload_quadlets, unsigned int cycle,
5819 ++ unsigned int payload_length, unsigned int cycle,
5820 + unsigned int index)
5821 + {
5822 + __be32 *buffer;
5823 ++ unsigned int payload_quadlets;
5824 + unsigned int data_blocks;
5825 + struct snd_pcm_substream *pcm;
5826 + unsigned int pcm_frames;
5827 +
5828 + buffer = s->buffer.packets[s->packet_index].buffer;
5829 ++ payload_quadlets = payload_length / 4;
5830 + data_blocks = payload_quadlets / s->data_block_quadlets;
5831 +
5832 + trace_in_packet_without_header(s, cycle, payload_quadlets, data_blocks,
5833 +diff --git a/sound/firewire/fireface/ff-protocol-ff400.c b/sound/firewire/fireface/ff-protocol-ff400.c
5834 +index 654a50319198..4d191172fe3f 100644
5835 +--- a/sound/firewire/fireface/ff-protocol-ff400.c
5836 ++++ b/sound/firewire/fireface/ff-protocol-ff400.c
5837 +@@ -152,7 +152,7 @@ static int ff400_switch_fetching_mode(struct snd_ff *ff, bool enable)
5838 + if (reg == NULL)
5839 + return -ENOMEM;
5840 +
5841 +- if (enable) {
5842 ++ if (!enable) {
5843 + /*
5844 + * Each quadlet is corresponding to data channels in a data
5845 + * blocks in reverse order. Precisely, quadlets for available
5846 +diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
5847 +index 6ebe817801ea..1f25e6d029d8 100644
5848 +--- a/sound/pci/emu10k1/emufx.c
5849 ++++ b/sound/pci/emu10k1/emufx.c
5850 +@@ -36,6 +36,7 @@
5851 + #include <linux/init.h>
5852 + #include <linux/mutex.h>
5853 + #include <linux/moduleparam.h>
5854 ++#include <linux/nospec.h>
5855 +
5856 + #include <sound/core.h>
5857 + #include <sound/tlv.h>
5858 +@@ -1026,6 +1027,8 @@ static int snd_emu10k1_ipcm_poke(struct snd_emu10k1 *emu,
5859 +
5860 + if (ipcm->substream >= EMU10K1_FX8010_PCM_COUNT)
5861 + return -EINVAL;
5862 ++ ipcm->substream = array_index_nospec(ipcm->substream,
5863 ++ EMU10K1_FX8010_PCM_COUNT);
5864 + if (ipcm->channels > 32)
5865 + return -EINVAL;
5866 + pcm = &emu->fx8010.pcm[ipcm->substream];
5867 +@@ -1072,6 +1075,8 @@ static int snd_emu10k1_ipcm_peek(struct snd_emu10k1 *emu,
5868 +
5869 + if (ipcm->substream >= EMU10K1_FX8010_PCM_COUNT)
5870 + return -EINVAL;
5871 ++ ipcm->substream = array_index_nospec(ipcm->substream,
5872 ++ EMU10K1_FX8010_PCM_COUNT);
5873 + pcm = &emu->fx8010.pcm[ipcm->substream];
5874 + mutex_lock(&emu->fx8010.lock);
5875 + spin_lock_irq(&emu->reg_lock);
5876 +diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c
5877 +index dd7d4242d6d2..86841d46a8fc 100644
5878 +--- a/sound/pci/hda/hda_tegra.c
5879 ++++ b/sound/pci/hda/hda_tegra.c
5880 +@@ -233,10 +233,12 @@ static int hda_tegra_suspend(struct device *dev)
5881 + struct snd_card *card = dev_get_drvdata(dev);
5882 + struct azx *chip = card->private_data;
5883 + struct hda_tegra *hda = container_of(chip, struct hda_tegra, chip);
5884 ++ struct hdac_bus *bus = azx_bus(chip);
5885 +
5886 + snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
5887 +
5888 + azx_stop_chip(chip);
5889 ++ synchronize_irq(bus->irq);
5890 + azx_enter_link_reset(chip);
5891 + hda_tegra_disable_clocks(hda);
5892 +
5893 +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
5894 +index 950e02e71766..51cc6589443f 100644
5895 +--- a/sound/pci/hda/patch_conexant.c
5896 ++++ b/sound/pci/hda/patch_conexant.c
5897 +@@ -923,6 +923,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
5898 + SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
5899 + SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
5900 + SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
5901 ++ SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
5902 + SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
5903 + SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
5904 + SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
5905 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
5906 +index 15021c839372..54fc9c0f07de 100644
5907 +--- a/sound/pci/hda/patch_realtek.c
5908 ++++ b/sound/pci/hda/patch_realtek.c
5909 +@@ -6424,7 +6424,7 @@ static const struct hda_fixup alc269_fixups[] = {
5910 + [ALC294_FIXUP_ASUS_HEADSET_MIC] = {
5911 + .type = HDA_FIXUP_PINS,
5912 + .v.pins = (const struct hda_pintbl[]) {
5913 +- { 0x19, 0x01a1113c }, /* use as headset mic, without its own jack detect */
5914 ++ { 0x19, 0x01a1103c }, /* use as headset mic */
5915 + { }
5916 + },
5917 + .chained = true,
5918 +@@ -6573,6 +6573,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5919 + SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
5920 + SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
5921 + SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5922 ++ SND_PCI_QUIRK(0x1043, 0x10a1, "ASUS UX391UA", ALC294_FIXUP_ASUS_SPK),
5923 + SND_PCI_QUIRK(0x1043, 0x10c0, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
5924 + SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
5925 + SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5926 +diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
5927 +index 1bff4b1b39cd..ba99ff0e93e0 100644
5928 +--- a/sound/pci/rme9652/hdsp.c
5929 ++++ b/sound/pci/rme9652/hdsp.c
5930 +@@ -30,6 +30,7 @@
5931 + #include <linux/math64.h>
5932 + #include <linux/vmalloc.h>
5933 + #include <linux/io.h>
5934 ++#include <linux/nospec.h>
5935 +
5936 + #include <sound/core.h>
5937 + #include <sound/control.h>
5938 +@@ -4092,15 +4093,16 @@ static int snd_hdsp_channel_info(struct snd_pcm_substream *substream,
5939 + struct snd_pcm_channel_info *info)
5940 + {
5941 + struct hdsp *hdsp = snd_pcm_substream_chip(substream);
5942 +- int mapped_channel;
5943 ++ unsigned int channel = info->channel;
5944 +
5945 +- if (snd_BUG_ON(info->channel >= hdsp->max_channels))
5946 ++ if (snd_BUG_ON(channel >= hdsp->max_channels))
5947 + return -EINVAL;
5948 ++ channel = array_index_nospec(channel, hdsp->max_channels);
5949 +
5950 +- if ((mapped_channel = hdsp->channel_map[info->channel]) < 0)
5951 ++ if (hdsp->channel_map[channel] < 0)
5952 + return -EINVAL;
5953 +
5954 +- info->offset = mapped_channel * HDSP_CHANNEL_BUFFER_BYTES;
5955 ++ info->offset = hdsp->channel_map[channel] * HDSP_CHANNEL_BUFFER_BYTES;
5956 + info->first = 0;
5957 + info->step = 32;
5958 + return 0;
5959 +diff --git a/sound/soc/intel/boards/cht_bsw_max98090_ti.c b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
5960 +index 9d9f6e41d81c..08a5152e635a 100644
5961 +--- a/sound/soc/intel/boards/cht_bsw_max98090_ti.c
5962 ++++ b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
5963 +@@ -389,6 +389,20 @@ static struct snd_soc_card snd_soc_card_cht = {
5964 + };
5965 +
5966 + static const struct dmi_system_id cht_max98090_quirk_table[] = {
5967 ++ {
5968 ++ /* Clapper model Chromebook */
5969 ++ .matches = {
5970 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Clapper"),
5971 ++ },
5972 ++ .driver_data = (void *)QUIRK_PMC_PLT_CLK_0,
5973 ++ },
5974 ++ {
5975 ++ /* Gnawty model Chromebook (Acer Chromebook CB3-111) */
5976 ++ .matches = {
5977 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Gnawty"),
5978 ++ },
5979 ++ .driver_data = (void *)QUIRK_PMC_PLT_CLK_0,
5980 ++ },
5981 + {
5982 + /* Swanky model Chromebook (Toshiba Chromebook 2) */
5983 + .matches = {
5984 +diff --git a/sound/synth/emux/emux_hwdep.c b/sound/synth/emux/emux_hwdep.c
5985 +index e557946718a9..d9fcae071b47 100644
5986 +--- a/sound/synth/emux/emux_hwdep.c
5987 ++++ b/sound/synth/emux/emux_hwdep.c
5988 +@@ -22,9 +22,9 @@
5989 + #include <sound/core.h>
5990 + #include <sound/hwdep.h>
5991 + #include <linux/uaccess.h>
5992 ++#include <linux/nospec.h>
5993 + #include "emux_voice.h"
5994 +
5995 +-
5996 + #define TMP_CLIENT_ID 0x1001
5997 +
5998 + /*
5999 +@@ -66,13 +66,16 @@ snd_emux_hwdep_misc_mode(struct snd_emux *emu, void __user *arg)
6000 + return -EFAULT;
6001 + if (info.mode < 0 || info.mode >= EMUX_MD_END)
6002 + return -EINVAL;
6003 ++ info.mode = array_index_nospec(info.mode, EMUX_MD_END);
6004 +
6005 + if (info.port < 0) {
6006 + for (i = 0; i < emu->num_ports; i++)
6007 + emu->portptrs[i]->ctrls[info.mode] = info.value;
6008 + } else {
6009 +- if (info.port < emu->num_ports)
6010 ++ if (info.port < emu->num_ports) {
6011 ++ info.port = array_index_nospec(info.port, emu->num_ports);
6012 + emu->portptrs[info.port]->ctrls[info.mode] = info.value;
6013 ++ }
6014 + }
6015 + return 0;
6016 + }
6017 +diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
6018 +index 3692f29fee46..70144b98141c 100644
6019 +--- a/tools/lib/traceevent/event-parse.c
6020 ++++ b/tools/lib/traceevent/event-parse.c
6021 +@@ -4970,6 +4970,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct tep_e
6022 +
6023 + if (arg->type == TEP_PRINT_BSTRING) {
6024 + trace_seq_puts(s, arg->string.string);
6025 ++ arg = arg->next;
6026 + break;
6027 + }
6028 +
6029 +diff --git a/tools/perf/arch/common.c b/tools/perf/arch/common.c
6030 +index 82657c01a3b8..5f69fd0b745a 100644
6031 +--- a/tools/perf/arch/common.c
6032 ++++ b/tools/perf/arch/common.c
6033 +@@ -200,3 +200,13 @@ int perf_env__lookup_objdump(struct perf_env *env, const char **path)
6034 +
6035 + return perf_env__lookup_binutils_path(env, "objdump", path);
6036 + }
6037 ++
6038 ++/*
6039 ++ * Some architectures have a single address space for kernel and user addresses,
6040 ++ * which makes it possible to determine if an address is in kernel space or user
6041 ++ * space.
6042 ++ */
6043 ++bool perf_env__single_address_space(struct perf_env *env)
6044 ++{
6045 ++ return strcmp(perf_env__arch(env), "sparc");
6046 ++}
6047 +diff --git a/tools/perf/arch/common.h b/tools/perf/arch/common.h
6048 +index 2167001b18c5..c298a446d1f6 100644
6049 +--- a/tools/perf/arch/common.h
6050 ++++ b/tools/perf/arch/common.h
6051 +@@ -5,5 +5,6 @@
6052 + #include "../util/env.h"
6053 +
6054 + int perf_env__lookup_objdump(struct perf_env *env, const char **path);
6055 ++bool perf_env__single_address_space(struct perf_env *env);
6056 +
6057 + #endif /* ARCH_PERF_COMMON_H */
6058 +diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
6059 +index b5bc85bd0bbe..a7b4d3f611c5 100644
6060 +--- a/tools/perf/builtin-script.c
6061 ++++ b/tools/perf/builtin-script.c
6062 +@@ -728,8 +728,8 @@ static int perf_sample__fprintf_brstack(struct perf_sample *sample,
6063 + if (PRINT_FIELD(DSO)) {
6064 + memset(&alf, 0, sizeof(alf));
6065 + memset(&alt, 0, sizeof(alt));
6066 +- thread__find_map(thread, sample->cpumode, from, &alf);
6067 +- thread__find_map(thread, sample->cpumode, to, &alt);
6068 ++ thread__find_map_fb(thread, sample->cpumode, from, &alf);
6069 ++ thread__find_map_fb(thread, sample->cpumode, to, &alt);
6070 + }
6071 +
6072 + printed += fprintf(fp, " 0x%"PRIx64, from);
6073 +@@ -775,8 +775,8 @@ static int perf_sample__fprintf_brstacksym(struct perf_sample *sample,
6074 + from = br->entries[i].from;
6075 + to = br->entries[i].to;
6076 +
6077 +- thread__find_symbol(thread, sample->cpumode, from, &alf);
6078 +- thread__find_symbol(thread, sample->cpumode, to, &alt);
6079 ++ thread__find_symbol_fb(thread, sample->cpumode, from, &alf);
6080 ++ thread__find_symbol_fb(thread, sample->cpumode, to, &alt);
6081 +
6082 + printed += symbol__fprintf_symname_offs(alf.sym, &alf, fp);
6083 + if (PRINT_FIELD(DSO)) {
6084 +@@ -820,11 +820,11 @@ static int perf_sample__fprintf_brstackoff(struct perf_sample *sample,
6085 + from = br->entries[i].from;
6086 + to = br->entries[i].to;
6087 +
6088 +- if (thread__find_map(thread, sample->cpumode, from, &alf) &&
6089 ++ if (thread__find_map_fb(thread, sample->cpumode, from, &alf) &&
6090 + !alf.map->dso->adjust_symbols)
6091 + from = map__map_ip(alf.map, from);
6092 +
6093 +- if (thread__find_map(thread, sample->cpumode, to, &alt) &&
6094 ++ if (thread__find_map_fb(thread, sample->cpumode, to, &alt) &&
6095 + !alt.map->dso->adjust_symbols)
6096 + to = map__map_ip(alt.map, to);
6097 +
6098 +diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
6099 +index 59f38c7693f8..4c23779e271a 100644
6100 +--- a/tools/perf/util/env.c
6101 ++++ b/tools/perf/util/env.c
6102 +@@ -166,7 +166,7 @@ const char *perf_env__arch(struct perf_env *env)
6103 + struct utsname uts;
6104 + char *arch_name;
6105 +
6106 +- if (!env) { /* Assume local operation */
6107 ++ if (!env || !env->arch) { /* Assume local operation */
6108 + if (uname(&uts) < 0)
6109 + return NULL;
6110 + arch_name = uts.machine;
6111 +diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
6112 +index e9c108a6b1c3..24493200cf80 100644
6113 +--- a/tools/perf/util/event.c
6114 ++++ b/tools/perf/util/event.c
6115 +@@ -1577,6 +1577,24 @@ struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
6116 + return al->map;
6117 + }
6118 +
6119 ++/*
6120 ++ * For branch stacks or branch samples, the sample cpumode might not be correct
6121 ++ * because it applies only to the sample 'ip' and not necessary to 'addr' or
6122 ++ * branch stack addresses. If possible, use a fallback to deal with those cases.
6123 ++ */
6124 ++struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr,
6125 ++ struct addr_location *al)
6126 ++{
6127 ++ struct map *map = thread__find_map(thread, cpumode, addr, al);
6128 ++ struct machine *machine = thread->mg->machine;
6129 ++ u8 addr_cpumode = machine__addr_cpumode(machine, cpumode, addr);
6130 ++
6131 ++ if (map || addr_cpumode == cpumode)
6132 ++ return map;
6133 ++
6134 ++ return thread__find_map(thread, addr_cpumode, addr, al);
6135 ++}
6136 ++
6137 + struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
6138 + u64 addr, struct addr_location *al)
6139 + {
6140 +@@ -1586,6 +1604,15 @@ struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
6141 + return al->sym;
6142 + }
6143 +
6144 ++struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode,
6145 ++ u64 addr, struct addr_location *al)
6146 ++{
6147 ++ al->sym = NULL;
6148 ++ if (thread__find_map_fb(thread, cpumode, addr, al))
6149 ++ al->sym = map__find_symbol(al->map, al->addr);
6150 ++ return al->sym;
6151 ++}
6152 ++
6153 + /*
6154 + * Callers need to drop the reference to al->thread, obtained in
6155 + * machine__findnew_thread()
6156 +@@ -1679,7 +1706,7 @@ bool sample_addr_correlates_sym(struct perf_event_attr *attr)
6157 + void thread__resolve(struct thread *thread, struct addr_location *al,
6158 + struct perf_sample *sample)
6159 + {
6160 +- thread__find_map(thread, sample->cpumode, sample->addr, al);
6161 ++ thread__find_map_fb(thread, sample->cpumode, sample->addr, al);
6162 +
6163 + al->cpu = sample->cpu;
6164 + al->sym = NULL;
6165 +diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
6166 +index 8f36ce813bc5..9397e3f2444d 100644
6167 +--- a/tools/perf/util/machine.c
6168 ++++ b/tools/perf/util/machine.c
6169 +@@ -2592,6 +2592,33 @@ int machine__get_kernel_start(struct machine *machine)
6170 + return err;
6171 + }
6172 +
6173 ++u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr)
6174 ++{
6175 ++ u8 addr_cpumode = cpumode;
6176 ++ bool kernel_ip;
6177 ++
6178 ++ if (!machine->single_address_space)
6179 ++ goto out;
6180 ++
6181 ++ kernel_ip = machine__kernel_ip(machine, addr);
6182 ++ switch (cpumode) {
6183 ++ case PERF_RECORD_MISC_KERNEL:
6184 ++ case PERF_RECORD_MISC_USER:
6185 ++ addr_cpumode = kernel_ip ? PERF_RECORD_MISC_KERNEL :
6186 ++ PERF_RECORD_MISC_USER;
6187 ++ break;
6188 ++ case PERF_RECORD_MISC_GUEST_KERNEL:
6189 ++ case PERF_RECORD_MISC_GUEST_USER:
6190 ++ addr_cpumode = kernel_ip ? PERF_RECORD_MISC_GUEST_KERNEL :
6191 ++ PERF_RECORD_MISC_GUEST_USER;
6192 ++ break;
6193 ++ default:
6194 ++ break;
6195 ++ }
6196 ++out:
6197 ++ return addr_cpumode;
6198 ++}
6199 ++
6200 + struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
6201 + {
6202 + return dsos__findnew(&machine->dsos, filename);
6203 +diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
6204 +index d856b85862e2..ebde3ea70225 100644
6205 +--- a/tools/perf/util/machine.h
6206 ++++ b/tools/perf/util/machine.h
6207 +@@ -42,6 +42,7 @@ struct machine {
6208 + u16 id_hdr_size;
6209 + bool comm_exec;
6210 + bool kptr_restrict_warned;
6211 ++ bool single_address_space;
6212 + char *root_dir;
6213 + char *mmap_name;
6214 + struct threads threads[THREADS__TABLE_SIZE];
6215 +@@ -99,6 +100,8 @@ static inline bool machine__kernel_ip(struct machine *machine, u64 ip)
6216 + return ip >= kernel_start;
6217 + }
6218 +
6219 ++u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr);
6220 ++
6221 + struct thread *machine__find_thread(struct machine *machine, pid_t pid,
6222 + pid_t tid);
6223 + struct comm *machine__thread_exec_comm(struct machine *machine,
6224 +diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
6225 +index 7e49baad304d..7348eea0248f 100644
6226 +--- a/tools/perf/util/pmu.c
6227 ++++ b/tools/perf/util/pmu.c
6228 +@@ -145,7 +145,7 @@ static int perf_pmu__parse_scale(struct perf_pmu_alias *alias, char *dir, char *
6229 + int fd, ret = -1;
6230 + char path[PATH_MAX];
6231 +
6232 +- snprintf(path, PATH_MAX, "%s/%s.scale", dir, name);
6233 ++ scnprintf(path, PATH_MAX, "%s/%s.scale", dir, name);
6234 +
6235 + fd = open(path, O_RDONLY);
6236 + if (fd == -1)
6237 +@@ -175,7 +175,7 @@ static int perf_pmu__parse_unit(struct perf_pmu_alias *alias, char *dir, char *n
6238 + ssize_t sret;
6239 + int fd;
6240 +
6241 +- snprintf(path, PATH_MAX, "%s/%s.unit", dir, name);
6242 ++ scnprintf(path, PATH_MAX, "%s/%s.unit", dir, name);
6243 +
6244 + fd = open(path, O_RDONLY);
6245 + if (fd == -1)
6246 +@@ -205,7 +205,7 @@ perf_pmu__parse_per_pkg(struct perf_pmu_alias *alias, char *dir, char *name)
6247 + char path[PATH_MAX];
6248 + int fd;
6249 +
6250 +- snprintf(path, PATH_MAX, "%s/%s.per-pkg", dir, name);
6251 ++ scnprintf(path, PATH_MAX, "%s/%s.per-pkg", dir, name);
6252 +
6253 + fd = open(path, O_RDONLY);
6254 + if (fd == -1)
6255 +@@ -223,7 +223,7 @@ static int perf_pmu__parse_snapshot(struct perf_pmu_alias *alias,
6256 + char path[PATH_MAX];
6257 + int fd;
6258 +
6259 +- snprintf(path, PATH_MAX, "%s/%s.snapshot", dir, name);
6260 ++ scnprintf(path, PATH_MAX, "%s/%s.snapshot", dir, name);
6261 +
6262 + fd = open(path, O_RDONLY);
6263 + if (fd == -1)
6264 +diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
6265 +index 69aa93d4ee99..0c4b050f6fc2 100644
6266 +--- a/tools/perf/util/scripting-engines/trace-event-python.c
6267 ++++ b/tools/perf/util/scripting-engines/trace-event-python.c
6268 +@@ -494,14 +494,14 @@ static PyObject *python_process_brstack(struct perf_sample *sample,
6269 + pydict_set_item_string_decref(pyelem, "cycles",
6270 + PyLong_FromUnsignedLongLong(br->entries[i].flags.cycles));
6271 +
6272 +- thread__find_map(thread, sample->cpumode,
6273 +- br->entries[i].from, &al);
6274 ++ thread__find_map_fb(thread, sample->cpumode,
6275 ++ br->entries[i].from, &al);
6276 + dsoname = get_dsoname(al.map);
6277 + pydict_set_item_string_decref(pyelem, "from_dsoname",
6278 + _PyUnicode_FromString(dsoname));
6279 +
6280 +- thread__find_map(thread, sample->cpumode,
6281 +- br->entries[i].to, &al);
6282 ++ thread__find_map_fb(thread, sample->cpumode,
6283 ++ br->entries[i].to, &al);
6284 + dsoname = get_dsoname(al.map);
6285 + pydict_set_item_string_decref(pyelem, "to_dsoname",
6286 + _PyUnicode_FromString(dsoname));
6287 +@@ -576,14 +576,14 @@ static PyObject *python_process_brstacksym(struct perf_sample *sample,
6288 + if (!pyelem)
6289 + Py_FatalError("couldn't create Python dictionary");
6290 +
6291 +- thread__find_symbol(thread, sample->cpumode,
6292 +- br->entries[i].from, &al);
6293 ++ thread__find_symbol_fb(thread, sample->cpumode,
6294 ++ br->entries[i].from, &al);
6295 + get_symoff(al.sym, &al, true, bf, sizeof(bf));
6296 + pydict_set_item_string_decref(pyelem, "from",
6297 + _PyUnicode_FromString(bf));
6298 +
6299 +- thread__find_symbol(thread, sample->cpumode,
6300 +- br->entries[i].to, &al);
6301 ++ thread__find_symbol_fb(thread, sample->cpumode,
6302 ++ br->entries[i].to, &al);
6303 + get_symoff(al.sym, &al, true, bf, sizeof(bf));
6304 + pydict_set_item_string_decref(pyelem, "to",
6305 + _PyUnicode_FromString(bf));
6306 +diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
6307 +index 7d2c8ce6cfad..f8eab197f35c 100644
6308 +--- a/tools/perf/util/session.c
6309 ++++ b/tools/perf/util/session.c
6310 +@@ -24,6 +24,7 @@
6311 + #include "thread.h"
6312 + #include "thread-stack.h"
6313 + #include "stat.h"
6314 ++#include "arch/common.h"
6315 +
6316 + static int perf_session__deliver_event(struct perf_session *session,
6317 + union perf_event *event,
6318 +@@ -150,6 +151,9 @@ struct perf_session *perf_session__new(struct perf_data *data,
6319 + session->machines.host.env = &perf_env;
6320 + }
6321 +
6322 ++ session->machines.host.single_address_space =
6323 ++ perf_env__single_address_space(session->machines.host.env);
6324 ++
6325 + if (!data || perf_data__is_write(data)) {
6326 + /*
6327 + * In O_RDONLY mode this will be performed when reading the
6328 +diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
6329 +index 30e2b4c165fe..5920c3bb8ffe 100644
6330 +--- a/tools/perf/util/thread.h
6331 ++++ b/tools/perf/util/thread.h
6332 +@@ -96,9 +96,13 @@ struct thread *thread__main_thread(struct machine *machine, struct thread *threa
6333 +
6334 + struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
6335 + struct addr_location *al);
6336 ++struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr,
6337 ++ struct addr_location *al);
6338 +
6339 + struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
6340 + u64 addr, struct addr_location *al);
6341 ++struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode,
6342 ++ u64 addr, struct addr_location *al);
6343 +
6344 + void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
6345 + struct addr_location *al);
6346 +diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
6347 +index 23774970c9df..abcd29db2d7a 100644
6348 +--- a/virt/kvm/arm/arm.c
6349 ++++ b/virt/kvm/arm/arm.c
6350 +@@ -66,7 +66,7 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
6351 + static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
6352 + static u32 kvm_next_vmid;
6353 + static unsigned int kvm_vmid_bits __read_mostly;
6354 +-static DEFINE_RWLOCK(kvm_vmid_lock);
6355 ++static DEFINE_SPINLOCK(kvm_vmid_lock);
6356 +
6357 + static bool vgic_present;
6358 +
6359 +@@ -484,7 +484,9 @@ void force_vm_exit(const cpumask_t *mask)
6360 + */
6361 + static bool need_new_vmid_gen(struct kvm *kvm)
6362 + {
6363 +- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
6364 ++ u64 current_vmid_gen = atomic64_read(&kvm_vmid_gen);
6365 ++ smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */
6366 ++ return unlikely(READ_ONCE(kvm->arch.vmid_gen) != current_vmid_gen);
6367 + }
6368 +
6369 + /**
6370 +@@ -499,16 +501,11 @@ static void update_vttbr(struct kvm *kvm)
6371 + {
6372 + phys_addr_t pgd_phys;
6373 + u64 vmid, cnp = kvm_cpu_has_cnp() ? VTTBR_CNP_BIT : 0;
6374 +- bool new_gen;
6375 +
6376 +- read_lock(&kvm_vmid_lock);
6377 +- new_gen = need_new_vmid_gen(kvm);
6378 +- read_unlock(&kvm_vmid_lock);
6379 +-
6380 +- if (!new_gen)
6381 ++ if (!need_new_vmid_gen(kvm))
6382 + return;
6383 +
6384 +- write_lock(&kvm_vmid_lock);
6385 ++ spin_lock(&kvm_vmid_lock);
6386 +
6387 + /*
6388 + * We need to re-check the vmid_gen here to ensure that if another vcpu
6389 +@@ -516,7 +513,7 @@ static void update_vttbr(struct kvm *kvm)
6390 + * use the same vmid.
6391 + */
6392 + if (!need_new_vmid_gen(kvm)) {
6393 +- write_unlock(&kvm_vmid_lock);
6394 ++ spin_unlock(&kvm_vmid_lock);
6395 + return;
6396 + }
6397 +
6398 +@@ -539,7 +536,6 @@ static void update_vttbr(struct kvm *kvm)
6399 + kvm_call_hyp(__kvm_flush_vm_context);
6400 + }
6401 +
6402 +- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
6403 + kvm->arch.vmid = kvm_next_vmid;
6404 + kvm_next_vmid++;
6405 + kvm_next_vmid &= (1 << kvm_vmid_bits) - 1;
6406 +@@ -550,7 +546,10 @@ static void update_vttbr(struct kvm *kvm)
6407 + vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
6408 + kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid | cnp;
6409 +
6410 +- write_unlock(&kvm_vmid_lock);
6411 ++ smp_wmb();
6412 ++ WRITE_ONCE(kvm->arch.vmid_gen, atomic64_read(&kvm_vmid_gen));
6413 ++
6414 ++ spin_unlock(&kvm_vmid_lock);
6415 + }
6416 +
6417 + static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
6418 +diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
6419 +index f56ff1cf52ec..ceeda7e04a4d 100644
6420 +--- a/virt/kvm/arm/vgic/vgic-mmio.c
6421 ++++ b/virt/kvm/arm/vgic/vgic-mmio.c
6422 +@@ -313,36 +313,30 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
6423 +
6424 + spin_lock_irqsave(&irq->irq_lock, flags);
6425 +
6426 +- /*
6427 +- * If this virtual IRQ was written into a list register, we
6428 +- * have to make sure the CPU that runs the VCPU thread has
6429 +- * synced back the LR state to the struct vgic_irq.
6430 +- *
6431 +- * As long as the conditions below are true, we know the VCPU thread
6432 +- * may be on its way back from the guest (we kicked the VCPU thread in
6433 +- * vgic_change_active_prepare) and still has to sync back this IRQ,
6434 +- * so we release and re-acquire the spin_lock to let the other thread
6435 +- * sync back the IRQ.
6436 +- *
6437 +- * When accessing VGIC state from user space, requester_vcpu is
6438 +- * NULL, which is fine, because we guarantee that no VCPUs are running
6439 +- * when accessing VGIC state from user space so irq->vcpu->cpu is
6440 +- * always -1.
6441 +- */
6442 +- while (irq->vcpu && /* IRQ may have state in an LR somewhere */
6443 +- irq->vcpu != requester_vcpu && /* Current thread is not the VCPU thread */
6444 +- irq->vcpu->cpu != -1) /* VCPU thread is running */
6445 +- cond_resched_lock(&irq->irq_lock);
6446 +-
6447 + if (irq->hw) {
6448 + vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
6449 + } else {
6450 + u32 model = vcpu->kvm->arch.vgic.vgic_model;
6451 ++ u8 active_source;
6452 +
6453 + irq->active = active;
6454 ++
6455 ++ /*
6456 ++ * The GICv2 architecture indicates that the source CPUID for
6457 ++ * an SGI should be provided during an EOI which implies that
6458 ++ * the active state is stored somewhere, but at the same time
6459 ++ * this state is not architecturally exposed anywhere and we
6460 ++ * have no way of knowing the right source.
6461 ++ *
6462 ++ * This may lead to a VCPU not being able to receive
6463 ++ * additional instances of a particular SGI after migration
6464 ++ * for a GICv2 VM on some GIC implementations. Oh well.
6465 ++ */
6466 ++ active_source = (requester_vcpu) ? requester_vcpu->vcpu_id : 0;
6467 ++
6468 + if (model == KVM_DEV_TYPE_ARM_VGIC_V2 &&
6469 + active && vgic_irq_is_sgi(irq->intid))
6470 +- irq->active_source = requester_vcpu->vcpu_id;
6471 ++ irq->active_source = active_source;
6472 + }
6473 +
6474 + if (irq->active)
6475 +@@ -368,14 +362,16 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
6476 + */
6477 + static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
6478 + {
6479 +- if (intid > VGIC_NR_PRIVATE_IRQS)
6480 ++ if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
6481 ++ intid > VGIC_NR_PRIVATE_IRQS)
6482 + kvm_arm_halt_guest(vcpu->kvm);
6483 + }
6484 +
6485 + /* See vgic_change_active_prepare */
6486 + static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
6487 + {
6488 +- if (intid > VGIC_NR_PRIVATE_IRQS)
6489 ++ if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
6490 ++ intid > VGIC_NR_PRIVATE_IRQS)
6491 + kvm_arm_resume_guest(vcpu->kvm);
6492 + }
6493 +
6494 +diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
6495 +index 7cfdfbc910e0..f884a54b2601 100644
6496 +--- a/virt/kvm/arm/vgic/vgic.c
6497 ++++ b/virt/kvm/arm/vgic/vgic.c
6498 +@@ -103,13 +103,13 @@ struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
6499 + {
6500 + /* SGIs and PPIs */
6501 + if (intid <= VGIC_MAX_PRIVATE) {
6502 +- intid = array_index_nospec(intid, VGIC_MAX_PRIVATE);
6503 ++ intid = array_index_nospec(intid, VGIC_MAX_PRIVATE + 1);
6504 + return &vcpu->arch.vgic_cpu.private_irqs[intid];
6505 + }
6506 +
6507 + /* SPIs */
6508 +- if (intid <= VGIC_MAX_SPI) {
6509 +- intid = array_index_nospec(intid, VGIC_MAX_SPI);
6510 ++ if (intid < (kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS)) {
6511 ++ intid = array_index_nospec(intid, kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS);
6512 + return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];
6513 + }
6514 +