Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Thu, 09 Jan 2020 11:17:38
Message-Id: 1578568635.8cfb80cdfb8bad49484b322c40f95304bbf996a7.mpagano@gentoo
1 commit: 8cfb80cdfb8bad49484b322c40f95304bbf996a7
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu Jan 9 11:17:15 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu Jan 9 11:17:15 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=8cfb80cd
7
8 Linux patches 5.4.9 and 5.4.10
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 8 +
13 1008_linux-5.4.9.patch | 7489 +++++++++++++++++++++++++++++++++++++++++++++++
14 1009_linux-5.4.10.patch | 26 +
15 3 files changed, 7523 insertions(+)
16
17 diff --git a/0000_README b/0000_README
18 index 3519610..f39e13c 100644
19 --- a/0000_README
20 +++ b/0000_README
21 @@ -75,6 +75,14 @@ Patch: 1007_linux-5.4.8.patch
22 From: http://www.kernel.org
23 Desc: Linux 5.4.8
24
25 +Patch: 1008_linux-5.4.9.patch
26 +From: http://www.kernel.org
27 +Desc: Linux 5.4.9
28 +
29 +Patch: 1009_linux-5.4.10.patch
30 +From: http://www.kernel.org
31 +Desc: Linux 5.4.10
32 +
33 Patch: 1500_XATTR_USER_PREFIX.patch
34 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
35 Desc: Support for namespace user.pax.* on tmpfs.
36
37 diff --git a/1008_linux-5.4.9.patch b/1008_linux-5.4.9.patch
38 new file mode 100644
39 index 0000000..fcc9128
40 --- /dev/null
41 +++ b/1008_linux-5.4.9.patch
42 @@ -0,0 +1,7489 @@
43 +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
44 +index f5a551e4332d..5594c8bf1dcd 100644
45 +--- a/Documentation/admin-guide/kernel-parameters.txt
46 ++++ b/Documentation/admin-guide/kernel-parameters.txt
47 +@@ -113,7 +113,7 @@
48 + the GPE dispatcher.
49 + This facility can be used to prevent such uncontrolled
50 + GPE floodings.
51 +- Format: <int>
52 ++ Format: <byte>
53 +
54 + acpi_no_auto_serialize [HW,ACPI]
55 + Disable auto-serialization of AML methods
56 +diff --git a/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt b/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt
57 +index e96e085271c1..83f6c6a7c41c 100644
58 +--- a/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt
59 ++++ b/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt
60 +@@ -46,7 +46,7 @@ Required properties:
61 + Example (R-Car H3):
62 +
63 + usb2_clksel: clock-controller@e6590630 {
64 +- compatible = "renesas,r8a77950-rcar-usb2-clock-sel",
65 ++ compatible = "renesas,r8a7795-rcar-usb2-clock-sel",
66 + "renesas,rcar-gen3-usb2-clock-sel";
67 + reg = <0 0xe6590630 0 0x02>;
68 + clocks = <&cpg CPG_MOD 703>, <&usb_extal>, <&usb_xtal>;
69 +diff --git a/Makefile b/Makefile
70 +index 1adee1b06f3d..3ba15c3528c8 100644
71 +--- a/Makefile
72 ++++ b/Makefile
73 +@@ -1,7 +1,7 @@
74 + # SPDX-License-Identifier: GPL-2.0
75 + VERSION = 5
76 + PATCHLEVEL = 4
77 +-SUBLEVEL = 8
78 ++SUBLEVEL = 9
79 + EXTRAVERSION =
80 + NAME = Kleptomaniac Octopus
81 +
82 +diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
83 +index 6039adda12ee..b0b12e389835 100644
84 +--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
85 ++++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
86 +@@ -296,7 +296,7 @@
87 + };
88 +
89 + &usb0_phy {
90 +- status = "okay";
91 ++ status = "disabled";
92 + phy-supply = <&usb_otg_pwr>;
93 + };
94 +
95 +@@ -306,7 +306,7 @@
96 + };
97 +
98 + &usb0 {
99 +- status = "okay";
100 ++ status = "disabled";
101 + };
102 +
103 + &usb1 {
104 +diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts
105 +index 2a5cd303123d..8d6f316a5c7b 100644
106 +--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts
107 ++++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts
108 +@@ -192,6 +192,9 @@
109 + bluetooth {
110 + compatible = "brcm,bcm43438-bt";
111 + shutdown-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>;
112 ++ max-speed = <2000000>;
113 ++ clocks = <&wifi32k>;
114 ++ clock-names = "lpo";
115 + };
116 + };
117 +
118 +diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
119 +index f25ddd18a607..4d67eb715b91 100644
120 +--- a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
121 ++++ b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
122 +@@ -409,6 +409,9 @@
123 + bluetooth {
124 + compatible = "brcm,bcm43438-bt";
125 + shutdown-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>;
126 ++ max-speed = <2000000>;
127 ++ clocks = <&wifi32k>;
128 ++ clock-names = "lpo";
129 + };
130 + };
131 +
132 +diff --git a/arch/arm64/boot/dts/qcom/msm8998-clamshell.dtsi b/arch/arm64/boot/dts/qcom/msm8998-clamshell.dtsi
133 +index 9682d4dd7496..1bae90705746 100644
134 +--- a/arch/arm64/boot/dts/qcom/msm8998-clamshell.dtsi
135 ++++ b/arch/arm64/boot/dts/qcom/msm8998-clamshell.dtsi
136 +@@ -23,6 +23,43 @@
137 + };
138 + };
139 +
140 ++/*
141 ++ * The laptop FW does not appear to support the retention state as it is
142 ++ * not advertised as enabled in ACPI, and enabling it in DT can cause boot
143 ++ * hangs.
144 ++ */
145 ++&CPU0 {
146 ++ cpu-idle-states = <&LITTLE_CPU_SLEEP_1>;
147 ++};
148 ++
149 ++&CPU1 {
150 ++ cpu-idle-states = <&LITTLE_CPU_SLEEP_1>;
151 ++};
152 ++
153 ++&CPU2 {
154 ++ cpu-idle-states = <&LITTLE_CPU_SLEEP_1>;
155 ++};
156 ++
157 ++&CPU3 {
158 ++ cpu-idle-states = <&LITTLE_CPU_SLEEP_1>;
159 ++};
160 ++
161 ++&CPU4 {
162 ++ cpu-idle-states = <&BIG_CPU_SLEEP_1>;
163 ++};
164 ++
165 ++&CPU5 {
166 ++ cpu-idle-states = <&BIG_CPU_SLEEP_1>;
167 ++};
168 ++
169 ++&CPU6 {
170 ++ cpu-idle-states = <&BIG_CPU_SLEEP_1>;
171 ++};
172 ++
173 ++&CPU7 {
174 ++ cpu-idle-states = <&BIG_CPU_SLEEP_1>;
175 ++};
176 ++
177 + &qusb2phy {
178 + status = "okay";
179 +
180 +diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
181 +index 8dc6c5cdabe6..baf52baaa2a5 100644
182 +--- a/arch/arm64/include/asm/pgtable-prot.h
183 ++++ b/arch/arm64/include/asm/pgtable-prot.h
184 +@@ -85,13 +85,12 @@
185 + #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE)
186 + #define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
187 + #define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN)
188 +-#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN)
189 +
190 + #define __P000 PAGE_NONE
191 + #define __P001 PAGE_READONLY
192 + #define __P010 PAGE_READONLY
193 + #define __P011 PAGE_READONLY
194 +-#define __P100 PAGE_EXECONLY
195 ++#define __P100 PAGE_READONLY_EXEC
196 + #define __P101 PAGE_READONLY_EXEC
197 + #define __P110 PAGE_READONLY_EXEC
198 + #define __P111 PAGE_READONLY_EXEC
199 +@@ -100,7 +99,7 @@
200 + #define __S001 PAGE_READONLY
201 + #define __S010 PAGE_SHARED
202 + #define __S011 PAGE_SHARED
203 +-#define __S100 PAGE_EXECONLY
204 ++#define __S100 PAGE_READONLY_EXEC
205 + #define __S101 PAGE_READONLY_EXEC
206 + #define __S110 PAGE_SHARED_EXEC
207 + #define __S111 PAGE_SHARED_EXEC
208 +diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
209 +index 565aa45ef134..13ebe2bad79f 100644
210 +--- a/arch/arm64/include/asm/pgtable.h
211 ++++ b/arch/arm64/include/asm/pgtable.h
212 +@@ -96,12 +96,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
213 + #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
214 +
215 + #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
216 +-/*
217 +- * Execute-only user mappings do not have the PTE_USER bit set. All valid
218 +- * kernel mappings have the PTE_UXN bit set.
219 +- */
220 + #define pte_valid_not_user(pte) \
221 +- ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
222 ++ ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
223 + #define pte_valid_young(pte) \
224 + ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
225 + #define pte_valid_user(pte) \
226 +@@ -117,8 +113,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
227 +
228 + /*
229 + * p??_access_permitted() is true for valid user mappings (subject to the
230 +- * write permission check) other than user execute-only which do not have the
231 +- * PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set.
232 ++ * write permission check). PROT_NONE mappings do not have the PTE_VALID bit
233 ++ * set.
234 + */
235 + #define pte_access_permitted(pte, write) \
236 + (pte_valid_user(pte) && (!(write) || pte_write(pte)))
237 +diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
238 +index 9fc6db0bcbad..d26e6cd28953 100644
239 +--- a/arch/arm64/mm/fault.c
240 ++++ b/arch/arm64/mm/fault.c
241 +@@ -454,7 +454,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
242 + const struct fault_info *inf;
243 + struct mm_struct *mm = current->mm;
244 + vm_fault_t fault, major = 0;
245 +- unsigned long vm_flags = VM_READ | VM_WRITE;
246 ++ unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
247 + unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
248 +
249 + if (kprobe_page_fault(regs, esr))
250 +diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
251 +index 60c929f3683b..d10247fab0fd 100644
252 +--- a/arch/arm64/mm/mmu.c
253 ++++ b/arch/arm64/mm/mmu.c
254 +@@ -1069,7 +1069,6 @@ void arch_remove_memory(int nid, u64 start, u64 size,
255 + {
256 + unsigned long start_pfn = start >> PAGE_SHIFT;
257 + unsigned long nr_pages = size >> PAGE_SHIFT;
258 +- struct zone *zone;
259 +
260 + /*
261 + * FIXME: Cleanup page tables (also in arch_add_memory() in case
262 +@@ -1078,7 +1077,6 @@ void arch_remove_memory(int nid, u64 start, u64 size,
263 + * unplug. ARCH_ENABLE_MEMORY_HOTREMOVE must not be
264 + * unlocked yet.
265 + */
266 +- zone = page_zone(pfn_to_page(start_pfn));
267 +- __remove_pages(zone, start_pfn, nr_pages, altmap);
268 ++ __remove_pages(start_pfn, nr_pages, altmap);
269 + }
270 + #endif
271 +diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
272 +index bf9df2625bc8..a6dd80a2c939 100644
273 +--- a/arch/ia64/mm/init.c
274 ++++ b/arch/ia64/mm/init.c
275 +@@ -689,9 +689,7 @@ void arch_remove_memory(int nid, u64 start, u64 size,
276 + {
277 + unsigned long start_pfn = start >> PAGE_SHIFT;
278 + unsigned long nr_pages = size >> PAGE_SHIFT;
279 +- struct zone *zone;
280 +
281 +- zone = page_zone(pfn_to_page(start_pfn));
282 +- __remove_pages(zone, start_pfn, nr_pages, altmap);
283 ++ __remove_pages(start_pfn, nr_pages, altmap);
284 + }
285 + #endif
286 +diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
287 +index a0bd9bdb5f83..e5c2d47608fe 100644
288 +--- a/arch/mips/Kconfig
289 ++++ b/arch/mips/Kconfig
290 +@@ -46,7 +46,7 @@ config MIPS
291 + select HAVE_ARCH_TRACEHOOK
292 + select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES
293 + select HAVE_ASM_MODVERSIONS
294 +- select HAVE_EBPF_JIT if (!CPU_MICROMIPS)
295 ++ select HAVE_EBPF_JIT if 64BIT && !CPU_MICROMIPS && TARGET_ISA_REV >= 2
296 + select HAVE_CONTEXT_TRACKING
297 + select HAVE_COPY_THREAD_TLS
298 + select HAVE_C_RECORDMCOUNT
299 +diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
300 +index 4993db40482c..ee26f9a4575d 100644
301 +--- a/arch/mips/include/asm/thread_info.h
302 ++++ b/arch/mips/include/asm/thread_info.h
303 +@@ -49,8 +49,26 @@ struct thread_info {
304 + .addr_limit = KERNEL_DS, \
305 + }
306 +
307 +-/* How to get the thread information struct from C. */
308 ++/*
309 ++ * A pointer to the struct thread_info for the currently executing thread is
310 ++ * held in register $28/$gp.
311 ++ *
312 ++ * We declare __current_thread_info as a global register variable rather than a
313 ++ * local register variable within current_thread_info() because clang doesn't
314 ++ * support explicit local register variables.
315 ++ *
316 ++ * When building the VDSO we take care not to declare the global register
317 ++ * variable because this causes GCC to not preserve the value of $28/$gp in
318 ++ * functions that change its value (which is common in the PIC VDSO when
319 ++ * accessing the GOT). Since the VDSO shouldn't be accessing
320 ++ * __current_thread_info anyway we declare it extern in order to cause a link
321 ++ * failure if it's referenced.
322 ++ */
323 ++#ifdef __VDSO__
324 ++extern struct thread_info *__current_thread_info;
325 ++#else
326 + register struct thread_info *__current_thread_info __asm__("$28");
327 ++#endif
328 +
329 + static inline struct thread_info *current_thread_info(void)
330 + {
331 +diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
332 +index 46b76751f3a5..a2405d5f7d1e 100644
333 +--- a/arch/mips/net/ebpf_jit.c
334 ++++ b/arch/mips/net/ebpf_jit.c
335 +@@ -1803,7 +1803,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
336 + unsigned int image_size;
337 + u8 *image_ptr;
338 +
339 +- if (!prog->jit_requested || MIPS_ISA_REV < 2)
340 ++ if (!prog->jit_requested)
341 + return prog;
342 +
343 + tmp = bpf_jit_blind_constants(prog);
344 +diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
345 +index be941d382c8d..460afa415434 100644
346 +--- a/arch/powerpc/mm/mem.c
347 ++++ b/arch/powerpc/mm/mem.c
348 +@@ -104,6 +104,27 @@ int __weak remove_section_mapping(unsigned long start, unsigned long end)
349 + return -ENODEV;
350 + }
351 +
352 ++#define FLUSH_CHUNK_SIZE SZ_1G
353 ++/**
354 ++ * flush_dcache_range_chunked(): Write any modified data cache blocks out to
355 ++ * memory and invalidate them, in chunks of up to FLUSH_CHUNK_SIZE
356 ++ * Does not invalidate the corresponding instruction cache blocks.
357 ++ *
358 ++ * @start: the start address
359 ++ * @stop: the stop address (exclusive)
360 ++ * @chunk: the max size of the chunks
361 ++ */
362 ++static void flush_dcache_range_chunked(unsigned long start, unsigned long stop,
363 ++ unsigned long chunk)
364 ++{
365 ++ unsigned long i;
366 ++
367 ++ for (i = start; i < stop; i += chunk) {
368 ++ flush_dcache_range(i, min(stop, start + chunk));
369 ++ cond_resched();
370 ++ }
371 ++}
372 ++
373 + int __ref arch_add_memory(int nid, u64 start, u64 size,
374 + struct mhp_restrictions *restrictions)
375 + {
376 +@@ -120,7 +141,8 @@ int __ref arch_add_memory(int nid, u64 start, u64 size,
377 + start, start + size, rc);
378 + return -EFAULT;
379 + }
380 +- flush_dcache_range(start, start + size);
381 ++
382 ++ flush_dcache_range_chunked(start, start + size, FLUSH_CHUNK_SIZE);
383 +
384 + return __add_pages(nid, start_pfn, nr_pages, restrictions);
385 + }
386 +@@ -130,14 +152,14 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size,
387 + {
388 + unsigned long start_pfn = start >> PAGE_SHIFT;
389 + unsigned long nr_pages = size >> PAGE_SHIFT;
390 +- struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap);
391 + int ret;
392 +
393 +- __remove_pages(page_zone(page), start_pfn, nr_pages, altmap);
394 ++ __remove_pages(start_pfn, nr_pages, altmap);
395 +
396 + /* Remove htab bolted mappings for this section of memory */
397 + start = (unsigned long)__va(start);
398 +- flush_dcache_range(start, start + size);
399 ++ flush_dcache_range_chunked(start, start + size, FLUSH_CHUNK_SIZE);
400 ++
401 + ret = remove_section_mapping(start, start + size);
402 + WARN_ON_ONCE(ret);
403 +
404 +diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
405 +index 42bbcd47cc85..dffe1a45b6ed 100644
406 +--- a/arch/powerpc/mm/slice.c
407 ++++ b/arch/powerpc/mm/slice.c
408 +@@ -50,7 +50,7 @@ static void slice_print_mask(const char *label, const struct slice_mask *mask) {
409 +
410 + #endif
411 +
412 +-static inline bool slice_addr_is_low(unsigned long addr)
413 ++static inline notrace bool slice_addr_is_low(unsigned long addr)
414 + {
415 + u64 tmp = (u64)addr;
416 +
417 +@@ -659,7 +659,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
418 + mm_ctx_user_psize(&current->mm->context), 1);
419 + }
420 +
421 +-unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
422 ++unsigned int notrace get_slice_psize(struct mm_struct *mm, unsigned long addr)
423 + {
424 + unsigned char *psizes;
425 + int index, mask_index;
426 +diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
427 +index b94d8db5ddcc..c40fdcdeb950 100644
428 +--- a/arch/riscv/kernel/ftrace.c
429 ++++ b/arch/riscv/kernel/ftrace.c
430 +@@ -142,7 +142,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
431 + */
432 + old = *parent;
433 +
434 +- if (function_graph_enter(old, self_addr, frame_pointer, parent))
435 ++ if (!function_graph_enter(old, self_addr, frame_pointer, parent))
436 + *parent = return_hooker;
437 + }
438 +
439 +diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
440 +index 7511b71d2931..fdb8083e7870 100644
441 +--- a/arch/s390/kernel/perf_cpum_sf.c
442 ++++ b/arch/s390/kernel/perf_cpum_sf.c
443 +@@ -1313,18 +1313,28 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
444 + */
445 + if (flush_all && done)
446 + break;
447 +-
448 +- /* If an event overflow happened, discard samples by
449 +- * processing any remaining sample-data-blocks.
450 +- */
451 +- if (event_overflow)
452 +- flush_all = 1;
453 + }
454 +
455 + /* Account sample overflows in the event hardware structure */
456 + if (sampl_overflow)
457 + OVERFLOW_REG(hwc) = DIV_ROUND_UP(OVERFLOW_REG(hwc) +
458 + sampl_overflow, 1 + num_sdb);
459 ++
460 ++ /* Perf_event_overflow() and perf_event_account_interrupt() limit
461 ++ * the interrupt rate to an upper limit. Roughly 1000 samples per
462 ++ * task tick.
463 ++ * Hitting this limit results in a large number
464 ++ * of throttled REF_REPORT_THROTTLE entries and the samples
465 ++ * are dropped.
466 ++ * Slightly increase the interval to avoid hitting this limit.
467 ++ */
468 ++ if (event_overflow) {
469 ++ SAMPL_RATE(hwc) += DIV_ROUND_UP(SAMPL_RATE(hwc), 10);
470 ++ debug_sprintf_event(sfdbg, 1, "%s: rate adjustment %ld\n",
471 ++ __func__,
472 ++ DIV_ROUND_UP(SAMPL_RATE(hwc), 10));
473 ++ }
474 ++
475 + if (sampl_overflow || event_overflow)
476 + debug_sprintf_event(sfdbg, 4, "hw_perf_event_update: "
477 + "overflow stats: sample=%llu event=%llu\n",
478 +diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
479 +index d95c85780e07..06dddd7c4290 100644
480 +--- a/arch/s390/kernel/smp.c
481 ++++ b/arch/s390/kernel/smp.c
482 +@@ -727,39 +727,67 @@ static void __ref smp_get_core_info(struct sclp_core_info *info, int early)
483 +
484 + static int smp_add_present_cpu(int cpu);
485 +
486 +-static int __smp_rescan_cpus(struct sclp_core_info *info, int sysfs_add)
487 ++static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail,
488 ++ bool configured, bool early)
489 + {
490 + struct pcpu *pcpu;
491 +- cpumask_t avail;
492 +- int cpu, nr, i, j;
493 ++ int cpu, nr, i;
494 + u16 address;
495 +
496 + nr = 0;
497 +- cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
498 +- cpu = cpumask_first(&avail);
499 +- for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
500 +- if (sclp.has_core_type && info->core[i].type != boot_core_type)
501 ++ if (sclp.has_core_type && core->type != boot_core_type)
502 ++ return nr;
503 ++ cpu = cpumask_first(avail);
504 ++ address = core->core_id << smp_cpu_mt_shift;
505 ++ for (i = 0; (i <= smp_cpu_mtid) && (cpu < nr_cpu_ids); i++) {
506 ++ if (pcpu_find_address(cpu_present_mask, address + i))
507 + continue;
508 +- address = info->core[i].core_id << smp_cpu_mt_shift;
509 +- for (j = 0; j <= smp_cpu_mtid; j++) {
510 +- if (pcpu_find_address(cpu_present_mask, address + j))
511 +- continue;
512 +- pcpu = pcpu_devices + cpu;
513 +- pcpu->address = address + j;
514 +- pcpu->state =
515 +- (cpu >= info->configured*(smp_cpu_mtid + 1)) ?
516 +- CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
517 +- smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
518 +- set_cpu_present(cpu, true);
519 +- if (sysfs_add && smp_add_present_cpu(cpu) != 0)
520 +- set_cpu_present(cpu, false);
521 +- else
522 +- nr++;
523 +- cpu = cpumask_next(cpu, &avail);
524 +- if (cpu >= nr_cpu_ids)
525 ++ pcpu = pcpu_devices + cpu;
526 ++ pcpu->address = address + i;
527 ++ if (configured)
528 ++ pcpu->state = CPU_STATE_CONFIGURED;
529 ++ else
530 ++ pcpu->state = CPU_STATE_STANDBY;
531 ++ smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
532 ++ set_cpu_present(cpu, true);
533 ++ if (!early && smp_add_present_cpu(cpu) != 0)
534 ++ set_cpu_present(cpu, false);
535 ++ else
536 ++ nr++;
537 ++ cpumask_clear_cpu(cpu, avail);
538 ++ cpu = cpumask_next(cpu, avail);
539 ++ }
540 ++ return nr;
541 ++}
542 ++
543 ++static int __smp_rescan_cpus(struct sclp_core_info *info, bool early)
544 ++{
545 ++ struct sclp_core_entry *core;
546 ++ cpumask_t avail;
547 ++ bool configured;
548 ++ u16 core_id;
549 ++ int nr, i;
550 ++
551 ++ nr = 0;
552 ++ cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
553 ++ /*
554 ++ * Add IPL core first (which got logical CPU number 0) to make sure
555 ++ * that all SMT threads get subsequent logical CPU numbers.
556 ++ */
557 ++ if (early) {
558 ++ core_id = pcpu_devices[0].address >> smp_cpu_mt_shift;
559 ++ for (i = 0; i < info->configured; i++) {
560 ++ core = &info->core[i];
561 ++ if (core->core_id == core_id) {
562 ++ nr += smp_add_core(core, &avail, true, early);
563 + break;
564 ++ }
565 + }
566 + }
567 ++ for (i = 0; i < info->combined; i++) {
568 ++ configured = i < info->configured;
569 ++ nr += smp_add_core(&info->core[i], &avail, configured, early);
570 ++ }
571 + return nr;
572 + }
573 +
574 +@@ -808,7 +836,7 @@ void __init smp_detect_cpus(void)
575 +
576 + /* Add CPUs present at boot */
577 + get_online_cpus();
578 +- __smp_rescan_cpus(info, 0);
579 ++ __smp_rescan_cpus(info, true);
580 + put_online_cpus();
581 + memblock_free_early((unsigned long)info, sizeof(*info));
582 + }
583 +@@ -1153,7 +1181,7 @@ int __ref smp_rescan_cpus(void)
584 + smp_get_core_info(info, 0);
585 + get_online_cpus();
586 + mutex_lock(&smp_cpu_state_mutex);
587 +- nr = __smp_rescan_cpus(info, 1);
588 ++ nr = __smp_rescan_cpus(info, false);
589 + mutex_unlock(&smp_cpu_state_mutex);
590 + put_online_cpus();
591 + kfree(info);
592 +diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
593 +index a124f19f7b3c..c1d96e588152 100644
594 +--- a/arch/s390/mm/init.c
595 ++++ b/arch/s390/mm/init.c
596 +@@ -291,10 +291,8 @@ void arch_remove_memory(int nid, u64 start, u64 size,
597 + {
598 + unsigned long start_pfn = start >> PAGE_SHIFT;
599 + unsigned long nr_pages = size >> PAGE_SHIFT;
600 +- struct zone *zone;
601 +
602 +- zone = page_zone(pfn_to_page(start_pfn));
603 +- __remove_pages(zone, start_pfn, nr_pages, altmap);
604 ++ __remove_pages(start_pfn, nr_pages, altmap);
605 + vmem_remove_mapping(start, size);
606 + }
607 + #endif /* CONFIG_MEMORY_HOTPLUG */
608 +diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
609 +index dfdbaa50946e..d1b1ff2be17a 100644
610 +--- a/arch/sh/mm/init.c
611 ++++ b/arch/sh/mm/init.c
612 +@@ -434,9 +434,7 @@ void arch_remove_memory(int nid, u64 start, u64 size,
613 + {
614 + unsigned long start_pfn = PFN_DOWN(start);
615 + unsigned long nr_pages = size >> PAGE_SHIFT;
616 +- struct zone *zone;
617 +
618 +- zone = page_zone(pfn_to_page(start_pfn));
619 +- __remove_pages(zone, start_pfn, nr_pages, altmap);
620 ++ __remove_pages(start_pfn, nr_pages, altmap);
621 + }
622 + #endif /* CONFIG_MEMORY_HOTPLUG */
623 +diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c
624 +index 5ee3fed881d3..741540d849f3 100644
625 +--- a/arch/x86/events/intel/bts.c
626 ++++ b/arch/x86/events/intel/bts.c
627 +@@ -63,9 +63,17 @@ struct bts_buffer {
628 +
629 + static struct pmu bts_pmu;
630 +
631 ++static int buf_nr_pages(struct page *page)
632 ++{
633 ++ if (!PagePrivate(page))
634 ++ return 1;
635 ++
636 ++ return 1 << page_private(page);
637 ++}
638 ++
639 + static size_t buf_size(struct page *page)
640 + {
641 +- return 1 << (PAGE_SHIFT + page_private(page));
642 ++ return buf_nr_pages(page) * PAGE_SIZE;
643 + }
644 +
645 + static void *
646 +@@ -83,9 +91,7 @@ bts_buffer_setup_aux(struct perf_event *event, void **pages,
647 + /* count all the high order buffers */
648 + for (pg = 0, nbuf = 0; pg < nr_pages;) {
649 + page = virt_to_page(pages[pg]);
650 +- if (WARN_ON_ONCE(!PagePrivate(page) && nr_pages > 1))
651 +- return NULL;
652 +- pg += 1 << page_private(page);
653 ++ pg += buf_nr_pages(page);
654 + nbuf++;
655 + }
656 +
657 +@@ -109,7 +115,7 @@ bts_buffer_setup_aux(struct perf_event *event, void **pages,
658 + unsigned int __nr_pages;
659 +
660 + page = virt_to_page(pages[pg]);
661 +- __nr_pages = PagePrivate(page) ? 1 << page_private(page) : 1;
662 ++ __nr_pages = buf_nr_pages(page);
663 + buf->buf[nbuf].page = page;
664 + buf->buf[nbuf].offset = offset;
665 + buf->buf[nbuf].displacement = (pad ? BTS_RECORD_SIZE - pad : 0);
666 +diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
667 +index 930edeb41ec3..0a74407ef92e 100644
668 +--- a/arch/x86/mm/init_32.c
669 ++++ b/arch/x86/mm/init_32.c
670 +@@ -865,10 +865,8 @@ void arch_remove_memory(int nid, u64 start, u64 size,
671 + {
672 + unsigned long start_pfn = start >> PAGE_SHIFT;
673 + unsigned long nr_pages = size >> PAGE_SHIFT;
674 +- struct zone *zone;
675 +
676 +- zone = page_zone(pfn_to_page(start_pfn));
677 +- __remove_pages(zone, start_pfn, nr_pages, altmap);
678 ++ __remove_pages(start_pfn, nr_pages, altmap);
679 + }
680 + #endif
681 +
682 +diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
683 +index a6b5c653727b..b8541d77452c 100644
684 +--- a/arch/x86/mm/init_64.c
685 ++++ b/arch/x86/mm/init_64.c
686 +@@ -1212,10 +1212,8 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size,
687 + {
688 + unsigned long start_pfn = start >> PAGE_SHIFT;
689 + unsigned long nr_pages = size >> PAGE_SHIFT;
690 +- struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap);
691 +- struct zone *zone = page_zone(page);
692 +
693 +- __remove_pages(zone, start_pfn, nr_pages, altmap);
694 ++ __remove_pages(start_pfn, nr_pages, altmap);
695 + kernel_physical_mapping_remove(start, start + size);
696 + }
697 + #endif /* CONFIG_MEMORY_HOTPLUG */
698 +diff --git a/block/bio.c b/block/bio.c
699 +index 43df756b68c4..c822ceb7c4de 100644
700 +--- a/block/bio.c
701 ++++ b/block/bio.c
702 +@@ -535,6 +535,45 @@ void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
703 + }
704 + EXPORT_SYMBOL(zero_fill_bio_iter);
705 +
706 ++void bio_truncate(struct bio *bio, unsigned new_size)
707 ++{
708 ++ struct bio_vec bv;
709 ++ struct bvec_iter iter;
710 ++ unsigned int done = 0;
711 ++ bool truncated = false;
712 ++
713 ++ if (new_size >= bio->bi_iter.bi_size)
714 ++ return;
715 ++
716 ++ if (bio_data_dir(bio) != READ)
717 ++ goto exit;
718 ++
719 ++ bio_for_each_segment(bv, bio, iter) {
720 ++ if (done + bv.bv_len > new_size) {
721 ++ unsigned offset;
722 ++
723 ++ if (!truncated)
724 ++ offset = new_size - done;
725 ++ else
726 ++ offset = 0;
727 ++ zero_user(bv.bv_page, offset, bv.bv_len - offset);
728 ++ truncated = true;
729 ++ }
730 ++ done += bv.bv_len;
731 ++ }
732 ++
733 ++ exit:
734 ++ /*
735 ++ * Don't touch bvec table here and make it really immutable, since
736 ++ * fs bio user has to retrieve all pages via bio_for_each_segment_all
737 ++ * in its .end_bio() callback.
738 ++ *
739 ++ * It is enough to truncate bio by updating .bi_size since we can make
740 ++ * correct bvec with the updated .bi_size for drivers.
741 ++ */
742 ++ bio->bi_iter.bi_size = new_size;
743 ++}
744 ++
745 + /**
746 + * bio_put - release a reference to a bio
747 + * @bio: bio to release reference to
748 +diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
749 +index 6ca015f92766..7f053468b50d 100644
750 +--- a/block/compat_ioctl.c
751 ++++ b/block/compat_ioctl.c
752 +@@ -6,6 +6,7 @@
753 + #include <linux/compat.h>
754 + #include <linux/elevator.h>
755 + #include <linux/hdreg.h>
756 ++#include <linux/pr.h>
757 + #include <linux/slab.h>
758 + #include <linux/syscalls.h>
759 + #include <linux/types.h>
760 +@@ -354,6 +355,10 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
761 + * but we call blkdev_ioctl, which gets the lock for us
762 + */
763 + case BLKRRPART:
764 ++ case BLKREPORTZONE:
765 ++ case BLKRESETZONE:
766 ++ case BLKGETZONESZ:
767 ++ case BLKGETNRZONES:
768 + return blkdev_ioctl(bdev, mode, cmd,
769 + (unsigned long)compat_ptr(arg));
770 + case BLKBSZSET_32:
771 +@@ -401,6 +406,14 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
772 + case BLKTRACETEARDOWN: /* compatible */
773 + ret = blk_trace_ioctl(bdev, cmd, compat_ptr(arg));
774 + return ret;
775 ++ case IOC_PR_REGISTER:
776 ++ case IOC_PR_RESERVE:
777 ++ case IOC_PR_RELEASE:
778 ++ case IOC_PR_PREEMPT:
779 ++ case IOC_PR_PREEMPT_ABORT:
780 ++ case IOC_PR_CLEAR:
781 ++ return blkdev_ioctl(bdev, mode, cmd,
782 ++ (unsigned long)compat_ptr(arg));
783 + default:
784 + if (disk->fops->compat_ioctl)
785 + ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg);
786 +diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
787 +index 75948a3f1a20..c60d2c6d31d6 100644
788 +--- a/drivers/acpi/sysfs.c
789 ++++ b/drivers/acpi/sysfs.c
790 +@@ -819,14 +819,14 @@ end:
791 + * interface:
792 + * echo unmask > /sys/firmware/acpi/interrupts/gpe00
793 + */
794 +-#define ACPI_MASKABLE_GPE_MAX 0xFF
795 ++#define ACPI_MASKABLE_GPE_MAX 0x100
796 + static DECLARE_BITMAP(acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) __initdata;
797 +
798 + static int __init acpi_gpe_set_masked_gpes(char *val)
799 + {
800 + u8 gpe;
801 +
802 +- if (kstrtou8(val, 0, &gpe) || gpe > ACPI_MASKABLE_GPE_MAX)
803 ++ if (kstrtou8(val, 0, &gpe))
804 + return -EINVAL;
805 + set_bit(gpe, acpi_masked_gpes_map);
806 +
807 +@@ -838,7 +838,7 @@ void __init acpi_gpe_apply_masked_gpes(void)
808 + {
809 + acpi_handle handle;
810 + acpi_status status;
811 +- u8 gpe;
812 ++ u16 gpe;
813 +
814 + for_each_set_bit(gpe, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) {
815 + status = acpi_get_gpe_device(gpe, &handle);
816 +diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
817 +index f41744b9b38a..66a570d0da83 100644
818 +--- a/drivers/ata/ahci_brcm.c
819 ++++ b/drivers/ata/ahci_brcm.c
820 +@@ -76,8 +76,7 @@ enum brcm_ahci_version {
821 + };
822 +
823 + enum brcm_ahci_quirks {
824 +- BRCM_AHCI_QUIRK_NO_NCQ = BIT(0),
825 +- BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE = BIT(1),
826 ++ BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE = BIT(0),
827 + };
828 +
829 + struct brcm_ahci_priv {
830 +@@ -213,19 +212,12 @@ static void brcm_sata_phys_disable(struct brcm_ahci_priv *priv)
831 + brcm_sata_phy_disable(priv, i);
832 + }
833 +
834 +-static u32 brcm_ahci_get_portmask(struct platform_device *pdev,
835 ++static u32 brcm_ahci_get_portmask(struct ahci_host_priv *hpriv,
836 + struct brcm_ahci_priv *priv)
837 + {
838 +- void __iomem *ahci;
839 +- struct resource *res;
840 + u32 impl;
841 +
842 +- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ahci");
843 +- ahci = devm_ioremap_resource(&pdev->dev, res);
844 +- if (IS_ERR(ahci))
845 +- return 0;
846 +-
847 +- impl = readl(ahci + HOST_PORTS_IMPL);
848 ++ impl = readl(hpriv->mmio + HOST_PORTS_IMPL);
849 +
850 + if (fls(impl) > SATA_TOP_MAX_PHYS)
851 + dev_warn(priv->dev, "warning: more ports than PHYs (%#x)\n",
852 +@@ -233,9 +225,6 @@ static u32 brcm_ahci_get_portmask(struct platform_device *pdev,
853 + else if (!impl)
854 + dev_info(priv->dev, "no ports found\n");
855 +
856 +- devm_iounmap(&pdev->dev, ahci);
857 +- devm_release_mem_region(&pdev->dev, res->start, resource_size(res));
858 +-
859 + return impl;
860 + }
861 +
862 +@@ -285,6 +274,13 @@ static unsigned int brcm_ahci_read_id(struct ata_device *dev,
863 + /* Perform the SATA PHY reset sequence */
864 + brcm_sata_phy_disable(priv, ap->port_no);
865 +
866 ++ /* Reset the SATA clock */
867 ++ ahci_platform_disable_clks(hpriv);
868 ++ msleep(10);
869 ++
870 ++ ahci_platform_enable_clks(hpriv);
871 ++ msleep(10);
872 ++
873 + /* Bring the PHY back on */
874 + brcm_sata_phy_enable(priv, ap->port_no);
875 +
876 +@@ -347,11 +343,10 @@ static int brcm_ahci_suspend(struct device *dev)
877 + struct ata_host *host = dev_get_drvdata(dev);
878 + struct ahci_host_priv *hpriv = host->private_data;
879 + struct brcm_ahci_priv *priv = hpriv->plat_data;
880 +- int ret;
881 +
882 +- ret = ahci_platform_suspend(dev);
883 + brcm_sata_phys_disable(priv);
884 +- return ret;
885 ++
886 ++ return ahci_platform_suspend(dev);
887 + }
888 +
889 + static int brcm_ahci_resume(struct device *dev)
890 +@@ -359,11 +354,44 @@ static int brcm_ahci_resume(struct device *dev)
891 + struct ata_host *host = dev_get_drvdata(dev);
892 + struct ahci_host_priv *hpriv = host->private_data;
893 + struct brcm_ahci_priv *priv = hpriv->plat_data;
894 ++ int ret;
895 ++
896 ++ /* Make sure clocks are turned on before re-configuration */
897 ++ ret = ahci_platform_enable_clks(hpriv);
898 ++ if (ret)
899 ++ return ret;
900 +
901 + brcm_sata_init(priv);
902 + brcm_sata_phys_enable(priv);
903 + brcm_sata_alpm_init(hpriv);
904 +- return ahci_platform_resume(dev);
905 ++
906 ++ /* Since we had to enable clocks earlier on, we cannot use
907 ++ * ahci_platform_resume() as-is since a second call to
908 ++ * ahci_platform_enable_resources() would bump up the resources
909 ++ * (regulators, clocks, PHYs) count artificially so we copy the part
910 ++ * after ahci_platform_enable_resources().
911 ++ */
912 ++ ret = ahci_platform_enable_phys(hpriv);
913 ++ if (ret)
914 ++ goto out_disable_phys;
915 ++
916 ++ ret = ahci_platform_resume_host(dev);
917 ++ if (ret)
918 ++ goto out_disable_platform_phys;
919 ++
920 ++ /* We resumed so update PM runtime state */
921 ++ pm_runtime_disable(dev);
922 ++ pm_runtime_set_active(dev);
923 ++ pm_runtime_enable(dev);
924 ++
925 ++ return 0;
926 ++
927 ++out_disable_platform_phys:
928 ++ ahci_platform_disable_phys(hpriv);
929 ++out_disable_phys:
930 ++ brcm_sata_phys_disable(priv);
931 ++ ahci_platform_disable_clks(hpriv);
932 ++ return ret;
933 + }
934 + #endif
935 +
936 +@@ -410,44 +438,71 @@ static int brcm_ahci_probe(struct platform_device *pdev)
937 + if (!IS_ERR_OR_NULL(priv->rcdev))
938 + reset_control_deassert(priv->rcdev);
939 +
940 +- if ((priv->version == BRCM_SATA_BCM7425) ||
941 +- (priv->version == BRCM_SATA_NSP)) {
942 +- priv->quirks |= BRCM_AHCI_QUIRK_NO_NCQ;
943 ++ hpriv = ahci_platform_get_resources(pdev, 0);
944 ++ if (IS_ERR(hpriv)) {
945 ++ ret = PTR_ERR(hpriv);
946 ++ goto out_reset;
947 ++ }
948 ++
949 ++ hpriv->plat_data = priv;
950 ++ hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP | AHCI_HFLAG_NO_WRITE_TO_RO;
951 ++
952 ++ switch (priv->version) {
953 ++ case BRCM_SATA_BCM7425:
954 ++ hpriv->flags |= AHCI_HFLAG_DELAY_ENGINE;
955 ++ /* fall through */
956 ++ case BRCM_SATA_NSP:
957 ++ hpriv->flags |= AHCI_HFLAG_NO_NCQ;
958 + priv->quirks |= BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE;
959 ++ break;
960 ++ default:
961 ++ break;
962 + }
963 +
964 ++ ret = ahci_platform_enable_clks(hpriv);
965 ++ if (ret)
966 ++ goto out_reset;
967 ++
968 ++ /* Must be first so as to configure endianness including that
969 ++ * of the standard AHCI register space.
970 ++ */
971 + brcm_sata_init(priv);
972 +
973 +- priv->port_mask = brcm_ahci_get_portmask(pdev, priv);
974 +- if (!priv->port_mask)
975 +- return -ENODEV;
976 ++ /* Initializes priv->port_mask which is used below */
977 ++ priv->port_mask = brcm_ahci_get_portmask(hpriv, priv);
978 ++ if (!priv->port_mask) {
979 ++ ret = -ENODEV;
980 ++ goto out_disable_clks;
981 ++ }
982 +
983 ++ /* Must be done before ahci_platform_enable_phys() */
984 + brcm_sata_phys_enable(priv);
985 +
986 +- hpriv = ahci_platform_get_resources(pdev, 0);
987 +- if (IS_ERR(hpriv))
988 +- return PTR_ERR(hpriv);
989 +- hpriv->plat_data = priv;
990 +- hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP;
991 +-
992 + brcm_sata_alpm_init(hpriv);
993 +
994 +- ret = ahci_platform_enable_resources(hpriv);
995 ++ ret = ahci_platform_enable_phys(hpriv);
996 + if (ret)
997 +- return ret;
998 +-
999 +- if (priv->quirks & BRCM_AHCI_QUIRK_NO_NCQ)
1000 +- hpriv->flags |= AHCI_HFLAG_NO_NCQ;
1001 +- hpriv->flags |= AHCI_HFLAG_NO_WRITE_TO_RO;
1002 ++ goto out_disable_phys;
1003 +
1004 + ret = ahci_platform_init_host(pdev, hpriv, &ahci_brcm_port_info,
1005 + &ahci_platform_sht);
1006 + if (ret)
1007 +- return ret;
1008 ++ goto out_disable_platform_phys;
1009 +
1010 + dev_info(dev, "Broadcom AHCI SATA3 registered\n");
1011 +
1012 + return 0;
1013 ++
1014 ++out_disable_platform_phys:
1015 ++ ahci_platform_disable_phys(hpriv);
1016 ++out_disable_phys:
1017 ++ brcm_sata_phys_disable(priv);
1018 ++out_disable_clks:
1019 ++ ahci_platform_disable_clks(hpriv);
1020 ++out_reset:
1021 ++ if (!IS_ERR_OR_NULL(priv->rcdev))
1022 ++ reset_control_assert(priv->rcdev);
1023 ++ return ret;
1024 + }
1025 +
1026 + static int brcm_ahci_remove(struct platform_device *pdev)
1027 +@@ -457,12 +512,12 @@ static int brcm_ahci_remove(struct platform_device *pdev)
1028 + struct brcm_ahci_priv *priv = hpriv->plat_data;
1029 + int ret;
1030 +
1031 ++ brcm_sata_phys_disable(priv);
1032 ++
1033 + ret = ata_platform_remove_one(pdev);
1034 + if (ret)
1035 + return ret;
1036 +
1037 +- brcm_sata_phys_disable(priv);
1038 +-
1039 + return 0;
1040 + }
1041 +
1042 +diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
1043 +index 8befce036af8..129556fcf6be 100644
1044 +--- a/drivers/ata/libahci_platform.c
1045 ++++ b/drivers/ata/libahci_platform.c
1046 +@@ -43,7 +43,7 @@ EXPORT_SYMBOL_GPL(ahci_platform_ops);
1047 + * RETURNS:
1048 + * 0 on success otherwise a negative error code
1049 + */
1050 +-static int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
1051 ++int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
1052 + {
1053 + int rc, i;
1054 +
1055 +@@ -74,6 +74,7 @@ disable_phys:
1056 + }
1057 + return rc;
1058 + }
1059 ++EXPORT_SYMBOL_GPL(ahci_platform_enable_phys);
1060 +
1061 + /**
1062 + * ahci_platform_disable_phys - Disable PHYs
1063 +@@ -81,7 +82,7 @@ disable_phys:
1064 + *
1065 + * This function disables all PHYs found in hpriv->phys.
1066 + */
1067 +-static void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
1068 ++void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
1069 + {
1070 + int i;
1071 +
1072 +@@ -90,6 +91,7 @@ static void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
1073 + phy_exit(hpriv->phys[i]);
1074 + }
1075 + }
1076 ++EXPORT_SYMBOL_GPL(ahci_platform_disable_phys);
1077 +
1078 + /**
1079 + * ahci_platform_enable_clks - Enable platform clocks
1080 +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
1081 +index 74c9b3032d46..84b183a6424e 100644
1082 +--- a/drivers/ata/libata-core.c
1083 ++++ b/drivers/ata/libata-core.c
1084 +@@ -5325,6 +5325,30 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
1085 + }
1086 + }
1087 +
1088 ++/**
1089 ++ * ata_qc_get_active - get bitmask of active qcs
1090 ++ * @ap: port in question
1091 ++ *
1092 ++ * LOCKING:
1093 ++ * spin_lock_irqsave(host lock)
1094 ++ *
1095 ++ * RETURNS:
1096 ++ * Bitmask of active qcs
1097 ++ */
1098 ++u64 ata_qc_get_active(struct ata_port *ap)
1099 ++{
1100 ++ u64 qc_active = ap->qc_active;
1101 ++
1102 ++ /* ATA_TAG_INTERNAL is sent to hw as tag 0 */
1103 ++ if (qc_active & (1ULL << ATA_TAG_INTERNAL)) {
1104 ++ qc_active |= (1 << 0);
1105 ++ qc_active &= ~(1ULL << ATA_TAG_INTERNAL);
1106 ++ }
1107 ++
1108 ++ return qc_active;
1109 ++}
1110 ++EXPORT_SYMBOL_GPL(ata_qc_get_active);
1111 ++
1112 + /**
1113 + * ata_qc_complete_multiple - Complete multiple qcs successfully
1114 + * @ap: port in question
1115 +diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
1116 +index 8e9cb198fcd1..ca6c706e9c25 100644
1117 +--- a/drivers/ata/sata_fsl.c
1118 ++++ b/drivers/ata/sata_fsl.c
1119 +@@ -1278,7 +1278,7 @@ static void sata_fsl_host_intr(struct ata_port *ap)
1120 + i, ioread32(hcr_base + CC),
1121 + ioread32(hcr_base + CA));
1122 + }
1123 +- ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
1124 ++ ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
1125 + return;
1126 +
1127 + } else if ((ap->qc_active & (1ULL << ATA_TAG_INTERNAL))) {
1128 +diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
1129 +index ad385a113391..bde695a32097 100644
1130 +--- a/drivers/ata/sata_mv.c
1131 ++++ b/drivers/ata/sata_mv.c
1132 +@@ -2827,7 +2827,7 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp
1133 + }
1134 +
1135 + if (work_done) {
1136 +- ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
1137 ++ ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
1138 +
1139 + /* Update the software queue position index in hardware */
1140 + writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1141 +diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
1142 +index 56946012d113..7510303111fa 100644
1143 +--- a/drivers/ata/sata_nv.c
1144 ++++ b/drivers/ata/sata_nv.c
1145 +@@ -984,7 +984,7 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
1146 + check_commands = 0;
1147 + check_commands &= ~(1 << pos);
1148 + }
1149 +- ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
1150 ++ ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
1151 + }
1152 + }
1153 +
1154 +diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
1155 +index fd1e19f1a49f..3666afa639d1 100644
1156 +--- a/drivers/block/xen-blkback/blkback.c
1157 ++++ b/drivers/block/xen-blkback/blkback.c
1158 +@@ -936,6 +936,8 @@ next:
1159 + out_of_memory:
1160 + pr_alert("%s: out of memory\n", __func__);
1161 + put_free_pages(ring, pages_to_gnt, segs_to_map);
1162 ++ for (i = last_map; i < num; i++)
1163 ++ pages[i]->handle = BLKBACK_INVALID_HANDLE;
1164 + return -ENOMEM;
1165 + }
1166 +
1167 +diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
1168 +index b90dbcd99c03..c4cd68116e7f 100644
1169 +--- a/drivers/block/xen-blkback/xenbus.c
1170 ++++ b/drivers/block/xen-blkback/xenbus.c
1171 +@@ -171,6 +171,15 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
1172 + blkif->domid = domid;
1173 + atomic_set(&blkif->refcnt, 1);
1174 + init_completion(&blkif->drain_complete);
1175 ++
1176 ++ /*
1177 ++ * Because freeing back to the cache may be deferred, it is not
1178 ++ * safe to unload the module (and hence destroy the cache) until
1179 ++ * this has completed. To prevent premature unloading, take an
1180 ++ * extra module reference here and release only when the object
1181 ++ * has been freed back to the cache.
1182 ++ */
1183 ++ __module_get(THIS_MODULE);
1184 + INIT_WORK(&blkif->free_work, xen_blkif_deferred_free);
1185 +
1186 + return blkif;
1187 +@@ -320,6 +329,7 @@ static void xen_blkif_free(struct xen_blkif *blkif)
1188 +
1189 + /* Make sure everything is drained before shutting down */
1190 + kmem_cache_free(xen_blkif_cachep, blkif);
1191 ++ module_put(THIS_MODULE);
1192 + }
1193 +
1194 + int __init xen_blkif_interface_init(void)
1195 +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
1196 +index 23e606aaaea4..04cf767d0708 100644
1197 +--- a/drivers/bluetooth/btusb.c
1198 ++++ b/drivers/bluetooth/btusb.c
1199 +@@ -1200,7 +1200,7 @@ static int btusb_open(struct hci_dev *hdev)
1200 + if (data->setup_on_usb) {
1201 + err = data->setup_on_usb(hdev);
1202 + if (err < 0)
1203 +- return err;
1204 ++ goto setup_fail;
1205 + }
1206 +
1207 + data->intf->needs_remote_wakeup = 1;
1208 +@@ -1239,6 +1239,7 @@ done:
1209 +
1210 + failed:
1211 + clear_bit(BTUSB_INTR_RUNNING, &data->flags);
1212 ++setup_fail:
1213 + usb_autopm_put_interface(data->intf);
1214 + return err;
1215 + }
1216 +diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
1217 +index 470c7ef02ea4..4b04ffbe5e7e 100644
1218 +--- a/drivers/clocksource/timer-riscv.c
1219 ++++ b/drivers/clocksource/timer-riscv.c
1220 +@@ -41,7 +41,7 @@ static unsigned long long riscv_clocksource_rdtime(struct clocksource *cs)
1221 + return get_cycles64();
1222 + }
1223 +
1224 +-static u64 riscv_sched_clock(void)
1225 ++static u64 notrace riscv_sched_clock(void)
1226 + {
1227 + return get_cycles64();
1228 + }
1229 +diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
1230 +index 3a1484e7a3ae..c64d20fdc187 100644
1231 +--- a/drivers/devfreq/devfreq.c
1232 ++++ b/drivers/devfreq/devfreq.c
1233 +@@ -551,26 +551,30 @@ static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
1234 + void *devp)
1235 + {
1236 + struct devfreq *devfreq = container_of(nb, struct devfreq, nb);
1237 +- int ret;
1238 ++ int err = -EINVAL;
1239 +
1240 + mutex_lock(&devfreq->lock);
1241 +
1242 + devfreq->scaling_min_freq = find_available_min_freq(devfreq);
1243 +- if (!devfreq->scaling_min_freq) {
1244 +- mutex_unlock(&devfreq->lock);
1245 +- return -EINVAL;
1246 +- }
1247 ++ if (!devfreq->scaling_min_freq)
1248 ++ goto out;
1249 +
1250 + devfreq->scaling_max_freq = find_available_max_freq(devfreq);
1251 + if (!devfreq->scaling_max_freq) {
1252 +- mutex_unlock(&devfreq->lock);
1253 +- return -EINVAL;
1254 ++ devfreq->scaling_max_freq = ULONG_MAX;
1255 ++ goto out;
1256 + }
1257 +
1258 +- ret = update_devfreq(devfreq);
1259 ++ err = update_devfreq(devfreq);
1260 ++
1261 ++out:
1262 + mutex_unlock(&devfreq->lock);
1263 ++ if (err)
1264 ++ dev_err(devfreq->dev.parent,
1265 ++ "failed to update frequency from OPP notifier (%d)\n",
1266 ++ err);
1267 +
1268 +- return ret;
1269 ++ return NOTIFY_OK;
1270 + }
1271 +
1272 + /**
1273 +@@ -584,11 +588,6 @@ static void devfreq_dev_release(struct device *dev)
1274 + struct devfreq *devfreq = to_devfreq(dev);
1275 +
1276 + mutex_lock(&devfreq_list_lock);
1277 +- if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) {
1278 +- mutex_unlock(&devfreq_list_lock);
1279 +- dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n");
1280 +- return;
1281 +- }
1282 + list_del(&devfreq->node);
1283 + mutex_unlock(&devfreq_list_lock);
1284 +
1285 +@@ -643,6 +642,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
1286 + devfreq->dev.parent = dev;
1287 + devfreq->dev.class = devfreq_class;
1288 + devfreq->dev.release = devfreq_dev_release;
1289 ++ INIT_LIST_HEAD(&devfreq->node);
1290 + devfreq->profile = profile;
1291 + strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
1292 + devfreq->previous_freq = profile->initial_freq;
1293 +@@ -1196,7 +1196,7 @@ static ssize_t available_governors_show(struct device *d,
1294 + * The devfreq with immutable governor (e.g., passive) shows
1295 + * only own governor.
1296 + */
1297 +- if (df->governor->immutable) {
1298 ++ if (df->governor && df->governor->immutable) {
1299 + count = scnprintf(&buf[count], DEVFREQ_NAME_LEN,
1300 + "%s ", df->governor_name);
1301 + /*
1302 +diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
1303 +index cafb1cc065bb..bf95f1d551c5 100644
1304 +--- a/drivers/dma/dma-jz4780.c
1305 ++++ b/drivers/dma/dma-jz4780.c
1306 +@@ -1004,7 +1004,8 @@ static const struct jz4780_dma_soc_data jz4740_dma_soc_data = {
1307 + static const struct jz4780_dma_soc_data jz4725b_dma_soc_data = {
1308 + .nb_channels = 6,
1309 + .transfer_ord_max = 5,
1310 +- .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC,
1311 ++ .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC |
1312 ++ JZ_SOC_DATA_BREAK_LINKS,
1313 + };
1314 +
1315 + static const struct jz4780_dma_soc_data jz4770_dma_soc_data = {
1316 +diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c
1317 +index ec4adf4260a0..256fc662c500 100644
1318 +--- a/drivers/dma/virt-dma.c
1319 ++++ b/drivers/dma/virt-dma.c
1320 +@@ -104,9 +104,8 @@ static void vchan_complete(unsigned long arg)
1321 + dmaengine_desc_get_callback(&vd->tx, &cb);
1322 +
1323 + list_del(&vd->node);
1324 +- vchan_vdesc_fini(vd);
1325 +-
1326 + dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
1327 ++ vchan_vdesc_fini(vd);
1328 + }
1329 + }
1330 +
1331 +diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
1332 +index b132ab9ad607..715e491dfbc3 100644
1333 +--- a/drivers/firewire/net.c
1334 ++++ b/drivers/firewire/net.c
1335 +@@ -250,7 +250,11 @@ static int fwnet_header_cache(const struct neighbour *neigh,
1336 + h = (struct fwnet_header *)((u8 *)hh->hh_data + HH_DATA_OFF(sizeof(*h)));
1337 + h->h_proto = type;
1338 + memcpy(h->h_dest, neigh->ha, net->addr_len);
1339 +- hh->hh_len = FWNET_HLEN;
1340 ++
1341 ++ /* Pairs with the READ_ONCE() in neigh_resolve_output(),
1342 ++ * neigh_hh_output() and neigh_update_hhs().
1343 ++ */
1344 ++ smp_store_release(&hh->hh_len, FWNET_HLEN);
1345 +
1346 + return 0;
1347 + }
1348 +diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
1349 +index 92f843eaf1e0..7a30952b463d 100644
1350 +--- a/drivers/firmware/arm_scmi/bus.c
1351 ++++ b/drivers/firmware/arm_scmi/bus.c
1352 +@@ -135,8 +135,10 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol)
1353 + return NULL;
1354 +
1355 + id = ida_simple_get(&scmi_bus_id, 1, 0, GFP_KERNEL);
1356 +- if (id < 0)
1357 +- goto free_mem;
1358 ++ if (id < 0) {
1359 ++ kfree(scmi_dev);
1360 ++ return NULL;
1361 ++ }
1362 +
1363 + scmi_dev->id = id;
1364 + scmi_dev->protocol_id = protocol;
1365 +@@ -154,8 +156,6 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol)
1366 + put_dev:
1367 + put_device(&scmi_dev->dev);
1368 + ida_simple_remove(&scmi_bus_id, id);
1369 +-free_mem:
1370 +- kfree(scmi_dev);
1371 + return NULL;
1372 + }
1373 +
1374 +diff --git a/drivers/firmware/efi/rci2-table.c b/drivers/firmware/efi/rci2-table.c
1375 +index 76b0c354a027..de1a9a1f9f14 100644
1376 +--- a/drivers/firmware/efi/rci2-table.c
1377 ++++ b/drivers/firmware/efi/rci2-table.c
1378 +@@ -81,6 +81,9 @@ static int __init efi_rci2_sysfs_init(void)
1379 + struct kobject *tables_kobj;
1380 + int ret = -ENOMEM;
1381 +
1382 ++ if (rci2_table_phys == EFI_INVALID_TABLE_ADDR)
1383 ++ return 0;
1384 ++
1385 + rci2_base = memremap(rci2_table_phys,
1386 + sizeof(struct rci2_table_global_hdr),
1387 + MEMREMAP_WB);
1388 +diff --git a/drivers/gpio/gpio-xtensa.c b/drivers/gpio/gpio-xtensa.c
1389 +index 43d3fa5f511a..0fb2211f9573 100644
1390 +--- a/drivers/gpio/gpio-xtensa.c
1391 ++++ b/drivers/gpio/gpio-xtensa.c
1392 +@@ -44,15 +44,14 @@ static inline unsigned long enable_cp(unsigned long *cpenable)
1393 + unsigned long flags;
1394 +
1395 + local_irq_save(flags);
1396 +- RSR_CPENABLE(*cpenable);
1397 +- WSR_CPENABLE(*cpenable | BIT(XCHAL_CP_ID_XTIOP));
1398 +-
1399 ++ *cpenable = xtensa_get_sr(cpenable);
1400 ++ xtensa_set_sr(*cpenable | BIT(XCHAL_CP_ID_XTIOP), cpenable);
1401 + return flags;
1402 + }
1403 +
1404 + static inline void disable_cp(unsigned long flags, unsigned long cpenable)
1405 + {
1406 +- WSR_CPENABLE(cpenable);
1407 ++ xtensa_set_sr(cpenable, cpenable);
1408 + local_irq_restore(flags);
1409 + }
1410 +
1411 +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
1412 +index 104ed299d5ea..99d19f80440e 100644
1413 +--- a/drivers/gpio/gpiolib.c
1414 ++++ b/drivers/gpio/gpiolib.c
1415 +@@ -220,6 +220,14 @@ int gpiod_get_direction(struct gpio_desc *desc)
1416 + chip = gpiod_to_chip(desc);
1417 + offset = gpio_chip_hwgpio(desc);
1418 +
1419 ++ /*
1420 ++ * Open drain emulation using input mode may incorrectly report
1421 ++ * input here, fix that up.
1422 ++ */
1423 ++ if (test_bit(FLAG_OPEN_DRAIN, &desc->flags) &&
1424 ++ test_bit(FLAG_IS_OUT, &desc->flags))
1425 ++ return 0;
1426 ++
1427 + if (!chip->get_direction)
1428 + return -ENOTSUPP;
1429 +
1430 +diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
1431 +index 5850c8e34caa..97d11d792351 100644
1432 +--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
1433 ++++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
1434 +@@ -261,23 +261,29 @@ static void df_v3_6_update_medium_grain_clock_gating(struct amdgpu_device *adev,
1435 + {
1436 + u32 tmp;
1437 +
1438 +- /* Put DF on broadcast mode */
1439 +- adev->df_funcs->enable_broadcast_mode(adev, true);
1440 +-
1441 +- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) {
1442 +- tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
1443 +- tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
1444 +- tmp |= DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY;
1445 +- WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp);
1446 +- } else {
1447 +- tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
1448 +- tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
1449 +- tmp |= DF_V3_6_MGCG_DISABLE;
1450 +- WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp);
1451 +- }
1452 ++ if (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG) {
1453 ++ /* Put DF on broadcast mode */
1454 ++ adev->df_funcs->enable_broadcast_mode(adev, true);
1455 ++
1456 ++ if (enable) {
1457 ++ tmp = RREG32_SOC15(DF, 0,
1458 ++ mmDF_PIE_AON0_DfGlobalClkGater);
1459 ++ tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
1460 ++ tmp |= DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY;
1461 ++ WREG32_SOC15(DF, 0,
1462 ++ mmDF_PIE_AON0_DfGlobalClkGater, tmp);
1463 ++ } else {
1464 ++ tmp = RREG32_SOC15(DF, 0,
1465 ++ mmDF_PIE_AON0_DfGlobalClkGater);
1466 ++ tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
1467 ++ tmp |= DF_V3_6_MGCG_DISABLE;
1468 ++ WREG32_SOC15(DF, 0,
1469 ++ mmDF_PIE_AON0_DfGlobalClkGater, tmp);
1470 ++ }
1471 +
1472 +- /* Exit broadcast mode */
1473 +- adev->df_funcs->enable_broadcast_mode(adev, false);
1474 ++ /* Exit broadcast mode */
1475 ++ adev->df_funcs->enable_broadcast_mode(adev, false);
1476 ++ }
1477 + }
1478 +
1479 + static void df_v3_6_get_clockgating_state(struct amdgpu_device *adev,
1480 +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
1481 +index 87dd55e9d72b..cc88ba76a8d4 100644
1482 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
1483 ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
1484 +@@ -6184,7 +6184,23 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
1485 + bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
1486 + bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
1487 +
1488 +- /* EVENT_WRITE_EOP - flush caches, send int */
1489 ++ /* Workaround for cache flush problems. First send a dummy EOP
1490 ++ * event down the pipe with seq one below.
1491 ++ */
1492 ++ amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
1493 ++ amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
1494 ++ EOP_TC_ACTION_EN |
1495 ++ EOP_TC_WB_ACTION_EN |
1496 ++ EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
1497 ++ EVENT_INDEX(5)));
1498 ++ amdgpu_ring_write(ring, addr & 0xfffffffc);
1499 ++ amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
1500 ++ DATA_SEL(1) | INT_SEL(0));
1501 ++ amdgpu_ring_write(ring, lower_32_bits(seq - 1));
1502 ++ amdgpu_ring_write(ring, upper_32_bits(seq - 1));
1503 ++
1504 ++ /* Then send the real EOP event down the pipe:
1505 ++ * EVENT_WRITE_EOP - flush caches, send int */
1506 + amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
1507 + amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
1508 + EOP_TC_ACTION_EN |
1509 +@@ -6926,7 +6942,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
1510 + 5 + /* COND_EXEC */
1511 + 7 + /* PIPELINE_SYNC */
1512 + VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 9 + /* VM_FLUSH */
1513 +- 8 + /* FENCE for VM_FLUSH */
1514 ++ 12 + /* FENCE for VM_FLUSH */
1515 + 20 + /* GDS switch */
1516 + 4 + /* double SWITCH_BUFFER,
1517 + the first COND_EXEC jump to the place just
1518 +@@ -6938,7 +6954,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
1519 + 31 + /* DE_META */
1520 + 3 + /* CNTX_CTRL */
1521 + 5 + /* HDP_INVL */
1522 +- 8 + 8 + /* FENCE x2 */
1523 ++ 12 + 12 + /* FENCE x2 */
1524 + 2, /* SWITCH_BUFFER */
1525 + .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */
1526 + .emit_ib = gfx_v8_0_ring_emit_ib_gfx,
1527 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
1528 +index 067f5579f452..793aa8e8ec9a 100644
1529 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
1530 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
1531 +@@ -373,7 +373,7 @@ bool dc_link_is_dp_sink_present(struct dc_link *link)
1532 +
1533 + if (GPIO_RESULT_OK != dal_ddc_open(
1534 + ddc, GPIO_MODE_INPUT, GPIO_DDC_CONFIG_TYPE_MODE_I2C)) {
1535 +- dal_gpio_destroy_ddc(&ddc);
1536 ++ dal_ddc_close(ddc);
1537 +
1538 + return present;
1539 + }
1540 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
1541 +index 5a583707d198..0ab890c927ec 100644
1542 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
1543 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
1544 +@@ -3492,7 +3492,14 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
1545 + if (link_enc->funcs->fec_set_enable &&
1546 + link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) {
1547 + if (link->fec_state == dc_link_fec_ready && enable) {
1548 +- msleep(1);
1549 ++ /* Accord to DP spec, FEC enable sequence can first
1550 ++ * be transmitted anytime after 1000 LL codes have
1551 ++ * been transmitted on the link after link training
1552 ++ * completion. Using 1 lane RBR should have the maximum
1553 ++ * time for transmitting 1000 LL codes which is 6.173 us.
1554 ++ * So use 7 microseconds delay instead.
1555 ++ */
1556 ++ udelay(7);
1557 + link_enc->funcs->fec_set_enable(link_enc, true);
1558 + link->fec_state = dc_link_fec_enabled;
1559 + } else if (link->fec_state == dc_link_fec_enabled && !enable) {
1560 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
1561 +index 78b2cc2e122f..3b7769a3e67e 100644
1562 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
1563 ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
1564 +@@ -1419,13 +1419,20 @@ enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state
1565 +
1566 + static void acquire_dsc(struct resource_context *res_ctx,
1567 + const struct resource_pool *pool,
1568 +- struct display_stream_compressor **dsc)
1569 ++ struct display_stream_compressor **dsc,
1570 ++ int pipe_idx)
1571 + {
1572 + int i;
1573 +
1574 + ASSERT(*dsc == NULL);
1575 + *dsc = NULL;
1576 +
1577 ++ if (pool->res_cap->num_dsc == pool->res_cap->num_opp) {
1578 ++ *dsc = pool->dscs[pipe_idx];
1579 ++ res_ctx->is_dsc_acquired[pipe_idx] = true;
1580 ++ return;
1581 ++ }
1582 ++
1583 + /* Find first free DSC */
1584 + for (i = 0; i < pool->res_cap->num_dsc; i++)
1585 + if (!res_ctx->is_dsc_acquired[i]) {
1586 +@@ -1468,7 +1475,7 @@ static enum dc_status add_dsc_to_stream_resource(struct dc *dc,
1587 + if (pipe_ctx->stream != dc_stream)
1588 + continue;
1589 +
1590 +- acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc);
1591 ++ acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc, i);
1592 +
1593 + /* The number of DSCs can be less than the number of pipes */
1594 + if (!pipe_ctx->stream_res.dsc) {
1595 +@@ -1669,7 +1676,7 @@ static bool dcn20_split_stream_for_odm(
1596 + next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx];
1597 + #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
1598 + if (next_odm_pipe->stream->timing.flags.DSC == 1) {
1599 +- acquire_dsc(res_ctx, pool, &next_odm_pipe->stream_res.dsc);
1600 ++ acquire_dsc(res_ctx, pool, &next_odm_pipe->stream_res.dsc, next_odm_pipe->pipe_idx);
1601 + ASSERT(next_odm_pipe->stream_res.dsc);
1602 + if (next_odm_pipe->stream_res.dsc == NULL)
1603 + return false;
1604 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
1605 +index 5ab9d6240498..e95025b1d14d 100644
1606 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
1607 ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
1608 +@@ -492,15 +492,23 @@ void enc2_stream_encoder_dp_unblank(
1609 + DP_VID_N_MUL, n_multiply);
1610 + }
1611 +
1612 +- /* set DIG_START to 0x1 to reset FIFO */
1613 ++ /* make sure stream is disabled before resetting steer fifo */
1614 ++ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, false);
1615 ++ REG_WAIT(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, 0, 10, 5000);
1616 +
1617 ++ /* set DIG_START to 0x1 to reset FIFO */
1618 + REG_UPDATE(DIG_FE_CNTL, DIG_START, 1);
1619 ++ udelay(1);
1620 +
1621 + /* write 0 to take the FIFO out of reset */
1622 +
1623 + REG_UPDATE(DIG_FE_CNTL, DIG_START, 0);
1624 +
1625 +- /* switch DP encoder to CRTC data */
1626 ++ /* switch DP encoder to CRTC data, but reset it the fifo first. It may happen
1627 ++ * that it overflows during mode transition, and sometimes doesn't recover.
1628 ++ */
1629 ++ REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 1);
1630 ++ udelay(10);
1631 +
1632 + REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 0);
1633 +
1634 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
1635 +index de182185fe1f..b0e5e64df212 100644
1636 +--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
1637 ++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
1638 +@@ -258,7 +258,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
1639 + .vmm_page_size_bytes = 4096,
1640 + .dram_clock_change_latency_us = 23.84,
1641 + .return_bus_width_bytes = 64,
1642 +- .dispclk_dppclk_vco_speed_mhz = 3550,
1643 ++ .dispclk_dppclk_vco_speed_mhz = 3600,
1644 + .xfc_bus_transport_time_us = 4,
1645 + .xfc_xbuf_latency_tolerance_us = 4,
1646 + .use_urgent_burst_bw = 1,
1647 +diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
1648 +index 58c091ab67b2..a066e9297777 100644
1649 +--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
1650 ++++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
1651 +@@ -844,6 +844,7 @@ static int smu_sw_init(void *handle)
1652 + smu->smu_baco.platform_support = false;
1653 +
1654 + mutex_init(&smu->sensor_lock);
1655 ++ mutex_init(&smu->metrics_lock);
1656 +
1657 + smu->watermarks_bitmap = 0;
1658 + smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1659 +diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
1660 +index d493a3f8c07a..08a717a34bd6 100644
1661 +--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
1662 ++++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
1663 +@@ -910,18 +910,21 @@ static int arcturus_get_metrics_table(struct smu_context *smu,
1664 + struct smu_table_context *smu_table= &smu->smu_table;
1665 + int ret = 0;
1666 +
1667 ++ mutex_lock(&smu->metrics_lock);
1668 + if (!smu_table->metrics_time ||
1669 + time_after(jiffies, smu_table->metrics_time + HZ / 1000)) {
1670 + ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
1671 + (void *)smu_table->metrics_table, false);
1672 + if (ret) {
1673 + pr_info("Failed to export SMU metrics table!\n");
1674 ++ mutex_unlock(&smu->metrics_lock);
1675 + return ret;
1676 + }
1677 + smu_table->metrics_time = jiffies;
1678 + }
1679 +
1680 + memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
1681 ++ mutex_unlock(&smu->metrics_lock);
1682 +
1683 + return ret;
1684 + }
1685 +@@ -1388,12 +1391,17 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu,
1686 + "VR",
1687 + "COMPUTE",
1688 + "CUSTOM"};
1689 ++ static const char *title[] = {
1690 ++ "PROFILE_INDEX(NAME)"};
1691 + uint32_t i, size = 0;
1692 + int16_t workload_type = 0;
1693 +
1694 + if (!smu->pm_enabled || !buf)
1695 + return -EINVAL;
1696 +
1697 ++ size += sprintf(buf + size, "%16s\n",
1698 ++ title[0]);
1699 ++
1700 + for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
1701 + /*
1702 + * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
1703 +diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
1704 +index 23171a4d9a31..5ad9a7878f6b 100644
1705 +--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
1706 ++++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
1707 +@@ -345,6 +345,7 @@ struct smu_context
1708 + const struct pptable_funcs *ppt_funcs;
1709 + struct mutex mutex;
1710 + struct mutex sensor_lock;
1711 ++ struct mutex metrics_lock;
1712 + uint64_t pool_size;
1713 +
1714 + struct smu_table_context smu_table;
1715 +diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
1716 +index 328e258a6895..7d913a06ebac 100644
1717 +--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
1718 ++++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
1719 +@@ -547,17 +547,20 @@ static int navi10_get_metrics_table(struct smu_context *smu,
1720 + struct smu_table_context *smu_table= &smu->smu_table;
1721 + int ret = 0;
1722 +
1723 ++ mutex_lock(&smu->metrics_lock);
1724 + if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) {
1725 + ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
1726 + (void *)smu_table->metrics_table, false);
1727 + if (ret) {
1728 + pr_info("Failed to export SMU metrics table!\n");
1729 ++ mutex_unlock(&smu->metrics_lock);
1730 + return ret;
1731 + }
1732 + smu_table->metrics_time = jiffies;
1733 + }
1734 +
1735 + memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
1736 ++ mutex_unlock(&smu->metrics_lock);
1737 +
1738 + return ret;
1739 + }
1740 +diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
1741 +index 92c393f613d3..3c3f719971f7 100644
1742 +--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
1743 ++++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
1744 +@@ -1691,17 +1691,20 @@ static int vega20_get_metrics_table(struct smu_context *smu,
1745 + struct smu_table_context *smu_table= &smu->smu_table;
1746 + int ret = 0;
1747 +
1748 ++ mutex_lock(&smu->metrics_lock);
1749 + if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + HZ / 1000)) {
1750 + ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
1751 + (void *)smu_table->metrics_table, false);
1752 + if (ret) {
1753 + pr_info("Failed to export SMU metrics table!\n");
1754 ++ mutex_unlock(&smu->metrics_lock);
1755 + return ret;
1756 + }
1757 + smu_table->metrics_time = jiffies;
1758 + }
1759 +
1760 + memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
1761 ++ mutex_unlock(&smu->metrics_lock);
1762 +
1763 + return ret;
1764 + }
1765 +diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
1766 +index 06a506c29463..d564bfcab6a3 100644
1767 +--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
1768 ++++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
1769 +@@ -525,7 +525,8 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
1770 + */
1771 + if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
1772 + &rq->fence.flags)) {
1773 +- spin_lock(&rq->lock);
1774 ++ spin_lock_nested(&rq->lock,
1775 ++ SINGLE_DEPTH_NESTING);
1776 + i915_request_cancel_breadcrumb(rq);
1777 + spin_unlock(&rq->lock);
1778 + }
1779 +diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
1780 +index f9c9e32b299c..35bb825d1918 100644
1781 +--- a/drivers/gpu/drm/mcde/mcde_dsi.c
1782 ++++ b/drivers/gpu/drm/mcde/mcde_dsi.c
1783 +@@ -935,11 +935,13 @@ static int mcde_dsi_bind(struct device *dev, struct device *master,
1784 + for_each_available_child_of_node(dev->of_node, child) {
1785 + panel = of_drm_find_panel(child);
1786 + if (IS_ERR(panel)) {
1787 +- dev_err(dev, "failed to find panel try bridge (%lu)\n",
1788 ++ dev_err(dev, "failed to find panel try bridge (%ld)\n",
1789 + PTR_ERR(panel));
1790 ++ panel = NULL;
1791 ++
1792 + bridge = of_drm_find_bridge(child);
1793 + if (IS_ERR(bridge)) {
1794 +- dev_err(dev, "failed to find bridge (%lu)\n",
1795 ++ dev_err(dev, "failed to find bridge (%ld)\n",
1796 + PTR_ERR(bridge));
1797 + return PTR_ERR(bridge);
1798 + }
1799 +diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
1800 +index a052364a5d74..edd45f434ccd 100644
1801 +--- a/drivers/gpu/drm/msm/msm_gpu.c
1802 ++++ b/drivers/gpu/drm/msm/msm_gpu.c
1803 +@@ -16,6 +16,7 @@
1804 + #include <linux/pm_opp.h>
1805 + #include <linux/devfreq.h>
1806 + #include <linux/devcoredump.h>
1807 ++#include <linux/sched/task.h>
1808 +
1809 + /*
1810 + * Power Management:
1811 +diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
1812 +index b5b1a34f896f..d735ea7e2d88 100644
1813 +--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
1814 ++++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
1815 +@@ -326,9 +326,9 @@ nv50_outp_atomic_check_view(struct drm_encoder *encoder,
1816 + * same size as the native one (e.g. different
1817 + * refresh rate)
1818 + */
1819 +- if (adjusted_mode->hdisplay == native_mode->hdisplay &&
1820 +- adjusted_mode->vdisplay == native_mode->vdisplay &&
1821 +- adjusted_mode->type & DRM_MODE_TYPE_DRIVER)
1822 ++ if (mode->hdisplay == native_mode->hdisplay &&
1823 ++ mode->vdisplay == native_mode->vdisplay &&
1824 ++ mode->type & DRM_MODE_TYPE_DRIVER)
1825 + break;
1826 + mode = native_mode;
1827 + asyc->scaler.full = true;
1828 +diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
1829 +index a442a955f98c..eb31c5b6c8e9 100644
1830 +--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
1831 ++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
1832 +@@ -245,14 +245,22 @@ nouveau_conn_atomic_duplicate_state(struct drm_connector *connector)
1833 + void
1834 + nouveau_conn_reset(struct drm_connector *connector)
1835 + {
1836 ++ struct nouveau_connector *nv_connector = nouveau_connector(connector);
1837 + struct nouveau_conn_atom *asyc;
1838 +
1839 +- if (WARN_ON(!(asyc = kzalloc(sizeof(*asyc), GFP_KERNEL))))
1840 +- return;
1841 ++ if (drm_drv_uses_atomic_modeset(connector->dev)) {
1842 ++ if (WARN_ON(!(asyc = kzalloc(sizeof(*asyc), GFP_KERNEL))))
1843 ++ return;
1844 ++
1845 ++ if (connector->state)
1846 ++ nouveau_conn_atomic_destroy_state(connector,
1847 ++ connector->state);
1848 ++
1849 ++ __drm_atomic_helper_connector_reset(connector, &asyc->state);
1850 ++ } else {
1851 ++ asyc = &nv_connector->properties_state;
1852 ++ }
1853 +
1854 +- if (connector->state)
1855 +- nouveau_conn_atomic_destroy_state(connector, connector->state);
1856 +- __drm_atomic_helper_connector_reset(connector, &asyc->state);
1857 + asyc->dither.mode = DITHERING_MODE_AUTO;
1858 + asyc->dither.depth = DITHERING_DEPTH_AUTO;
1859 + asyc->scaler.mode = DRM_MODE_SCALE_NONE;
1860 +@@ -276,8 +284,14 @@ void
1861 + nouveau_conn_attach_properties(struct drm_connector *connector)
1862 + {
1863 + struct drm_device *dev = connector->dev;
1864 +- struct nouveau_conn_atom *armc = nouveau_conn_atom(connector->state);
1865 + struct nouveau_display *disp = nouveau_display(dev);
1866 ++ struct nouveau_connector *nv_connector = nouveau_connector(connector);
1867 ++ struct nouveau_conn_atom *armc;
1868 ++
1869 ++ if (drm_drv_uses_atomic_modeset(connector->dev))
1870 ++ armc = nouveau_conn_atom(connector->state);
1871 ++ else
1872 ++ armc = &nv_connector->properties_state;
1873 +
1874 + /* Init DVI-I specific properties. */
1875 + if (connector->connector_type == DRM_MODE_CONNECTOR_DVII)
1876 +@@ -749,9 +763,9 @@ static int
1877 + nouveau_connector_set_property(struct drm_connector *connector,
1878 + struct drm_property *property, uint64_t value)
1879 + {
1880 +- struct nouveau_conn_atom *asyc = nouveau_conn_atom(connector->state);
1881 + struct nouveau_connector *nv_connector = nouveau_connector(connector);
1882 + struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
1883 ++ struct nouveau_conn_atom *asyc = &nv_connector->properties_state;
1884 + struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
1885 + int ret;
1886 +
1887 +diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
1888 +index f43a8d63aef8..de84fb4708c7 100644
1889 +--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
1890 ++++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
1891 +@@ -29,6 +29,7 @@
1892 +
1893 + #include <nvif/notify.h>
1894 +
1895 ++#include <drm/drm_crtc.h>
1896 + #include <drm/drm_edid.h>
1897 + #include <drm/drm_encoder.h>
1898 + #include <drm/drm_dp_helper.h>
1899 +@@ -44,6 +45,60 @@ struct dcb_output;
1900 + struct nouveau_backlight;
1901 + #endif
1902 +
1903 ++#define nouveau_conn_atom(p) \
1904 ++ container_of((p), struct nouveau_conn_atom, state)
1905 ++
1906 ++struct nouveau_conn_atom {
1907 ++ struct drm_connector_state state;
1908 ++
1909 ++ struct {
1910 ++ /* The enum values specifically defined here match nv50/gf119
1911 ++ * hw values, and the code relies on this.
1912 ++ */
1913 ++ enum {
1914 ++ DITHERING_MODE_OFF = 0x00,
1915 ++ DITHERING_MODE_ON = 0x01,
1916 ++ DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON,
1917 ++ DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON,
1918 ++ DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON,
1919 ++ DITHERING_MODE_AUTO
1920 ++ } mode;
1921 ++ enum {
1922 ++ DITHERING_DEPTH_6BPC = 0x00,
1923 ++ DITHERING_DEPTH_8BPC = 0x02,
1924 ++ DITHERING_DEPTH_AUTO
1925 ++ } depth;
1926 ++ } dither;
1927 ++
1928 ++ struct {
1929 ++ int mode; /* DRM_MODE_SCALE_* */
1930 ++ struct {
1931 ++ enum {
1932 ++ UNDERSCAN_OFF,
1933 ++ UNDERSCAN_ON,
1934 ++ UNDERSCAN_AUTO,
1935 ++ } mode;
1936 ++ u32 hborder;
1937 ++ u32 vborder;
1938 ++ } underscan;
1939 ++ bool full;
1940 ++ } scaler;
1941 ++
1942 ++ struct {
1943 ++ int color_vibrance;
1944 ++ int vibrant_hue;
1945 ++ } procamp;
1946 ++
1947 ++ union {
1948 ++ struct {
1949 ++ bool dither:1;
1950 ++ bool scaler:1;
1951 ++ bool procamp:1;
1952 ++ };
1953 ++ u8 mask;
1954 ++ } set;
1955 ++};
1956 ++
1957 + struct nouveau_connector {
1958 + struct drm_connector base;
1959 + enum dcb_connector_type type;
1960 +@@ -63,6 +118,12 @@ struct nouveau_connector {
1961 + #ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
1962 + struct nouveau_backlight *backlight;
1963 + #endif
1964 ++ /*
1965 ++ * Our connector property code expects a nouveau_conn_atom struct
1966 ++ * even on pre-nv50 where we do not support atomic. This embedded
1967 ++ * version gets used in the non atomic modeset case.
1968 ++ */
1969 ++ struct nouveau_conn_atom properties_state;
1970 + };
1971 +
1972 + static inline struct nouveau_connector *nouveau_connector(
1973 +@@ -121,61 +182,6 @@ extern int nouveau_ignorelid;
1974 + extern int nouveau_duallink;
1975 + extern int nouveau_hdmimhz;
1976 +
1977 +-#include <drm/drm_crtc.h>
1978 +-#define nouveau_conn_atom(p) \
1979 +- container_of((p), struct nouveau_conn_atom, state)
1980 +-
1981 +-struct nouveau_conn_atom {
1982 +- struct drm_connector_state state;
1983 +-
1984 +- struct {
1985 +- /* The enum values specifically defined here match nv50/gf119
1986 +- * hw values, and the code relies on this.
1987 +- */
1988 +- enum {
1989 +- DITHERING_MODE_OFF = 0x00,
1990 +- DITHERING_MODE_ON = 0x01,
1991 +- DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON,
1992 +- DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON,
1993 +- DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON,
1994 +- DITHERING_MODE_AUTO
1995 +- } mode;
1996 +- enum {
1997 +- DITHERING_DEPTH_6BPC = 0x00,
1998 +- DITHERING_DEPTH_8BPC = 0x02,
1999 +- DITHERING_DEPTH_AUTO
2000 +- } depth;
2001 +- } dither;
2002 +-
2003 +- struct {
2004 +- int mode; /* DRM_MODE_SCALE_* */
2005 +- struct {
2006 +- enum {
2007 +- UNDERSCAN_OFF,
2008 +- UNDERSCAN_ON,
2009 +- UNDERSCAN_AUTO,
2010 +- } mode;
2011 +- u32 hborder;
2012 +- u32 vborder;
2013 +- } underscan;
2014 +- bool full;
2015 +- } scaler;
2016 +-
2017 +- struct {
2018 +- int color_vibrance;
2019 +- int vibrant_hue;
2020 +- } procamp;
2021 +-
2022 +- union {
2023 +- struct {
2024 +- bool dither:1;
2025 +- bool scaler:1;
2026 +- bool procamp:1;
2027 +- };
2028 +- u8 mask;
2029 +- } set;
2030 +-};
2031 +-
2032 + void nouveau_conn_attach_properties(struct drm_connector *);
2033 + void nouveau_conn_reset(struct drm_connector *);
2034 + struct drm_connector_state *
2035 +diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
2036 +index eb8071a4d6d0..9c3bdfd20337 100644
2037 +--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
2038 ++++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
2039 +@@ -683,8 +683,6 @@ static void sun4i_hdmi_unbind(struct device *dev, struct device *master,
2040 + struct sun4i_hdmi *hdmi = dev_get_drvdata(dev);
2041 +
2042 + cec_unregister_adapter(hdmi->cec_adap);
2043 +- drm_connector_cleanup(&hdmi->connector);
2044 +- drm_encoder_cleanup(&hdmi->encoder);
2045 + i2c_del_adapter(hdmi->i2c);
2046 + i2c_put_adapter(hdmi->ddc_i2c);
2047 + clk_disable_unprepare(hdmi->mod_clk);
2048 +diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
2049 +index 7608ee053114..ac44bf752ff1 100644
2050 +--- a/drivers/hid/i2c-hid/i2c-hid-core.c
2051 ++++ b/drivers/hid/i2c-hid/i2c-hid-core.c
2052 +@@ -48,6 +48,7 @@
2053 + #define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
2054 + #define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1)
2055 + #define I2C_HID_QUIRK_BOGUS_IRQ BIT(4)
2056 ++#define I2C_HID_QUIRK_RESET_ON_RESUME BIT(5)
2057 +
2058 + /* flags */
2059 + #define I2C_HID_STARTED 0
2060 +@@ -174,6 +175,8 @@ static const struct i2c_hid_quirks {
2061 + I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
2062 + { USB_VENDOR_ID_ELAN, HID_ANY_ID,
2063 + I2C_HID_QUIRK_BOGUS_IRQ },
2064 ++ { USB_VENDOR_ID_ALPS_JP, HID_ANY_ID,
2065 ++ I2C_HID_QUIRK_RESET_ON_RESUME },
2066 + { 0, 0 }
2067 + };
2068 +
2069 +@@ -1214,8 +1217,15 @@ static int i2c_hid_resume(struct device *dev)
2070 + * solves "incomplete reports" on Raydium devices 2386:3118 and
2071 + * 2386:4B33 and fixes various SIS touchscreens no longer sending
2072 + * data after a suspend/resume.
2073 ++ *
2074 ++ * However some ALPS touchpads generate IRQ storm without reset, so
2075 ++ * let's still reset them here.
2076 + */
2077 +- ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
2078 ++ if (ihid->quirks & I2C_HID_QUIRK_RESET_ON_RESUME)
2079 ++ ret = i2c_hid_hwreset(client);
2080 ++ else
2081 ++ ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
2082 ++
2083 + if (ret)
2084 + return ret;
2085 +
2086 +diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
2087 +index 2e37f8a6d8cf..be661396095c 100644
2088 +--- a/drivers/iio/accel/st_accel_core.c
2089 ++++ b/drivers/iio/accel/st_accel_core.c
2090 +@@ -993,6 +993,7 @@ static const struct iio_trigger_ops st_accel_trigger_ops = {
2091 + #define ST_ACCEL_TRIGGER_OPS NULL
2092 + #endif
2093 +
2094 ++#ifdef CONFIG_ACPI
2095 + static const struct iio_mount_matrix *
2096 + get_mount_matrix(const struct iio_dev *indio_dev,
2097 + const struct iio_chan_spec *chan)
2098 +@@ -1013,7 +1014,6 @@ static const struct iio_chan_spec_ext_info mount_matrix_ext_info[] = {
2099 + static int apply_acpi_orientation(struct iio_dev *indio_dev,
2100 + struct iio_chan_spec *channels)
2101 + {
2102 +-#ifdef CONFIG_ACPI
2103 + struct st_sensor_data *adata = iio_priv(indio_dev);
2104 + struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
2105 + struct acpi_device *adev;
2106 +@@ -1141,10 +1141,14 @@ static int apply_acpi_orientation(struct iio_dev *indio_dev,
2107 + out:
2108 + kfree(buffer.pointer);
2109 + return ret;
2110 ++}
2111 + #else /* !CONFIG_ACPI */
2112 ++static int apply_acpi_orientation(struct iio_dev *indio_dev,
2113 ++ struct iio_chan_spec *channels)
2114 ++{
2115 + return 0;
2116 +-#endif
2117 + }
2118 ++#endif
2119 +
2120 + /*
2121 + * st_accel_get_settings() - get sensor settings from device name
2122 +diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c
2123 +index da073d72f649..e480529b3f04 100644
2124 +--- a/drivers/iio/adc/max9611.c
2125 ++++ b/drivers/iio/adc/max9611.c
2126 +@@ -89,6 +89,12 @@
2127 + #define MAX9611_TEMP_SCALE_NUM 1000000
2128 + #define MAX9611_TEMP_SCALE_DIV 2083
2129 +
2130 ++/*
2131 ++ * Conversion time is 2 ms (typically) at Ta=25 degreeC
2132 ++ * No maximum value is known, so play it safe.
2133 ++ */
2134 ++#define MAX9611_CONV_TIME_US_RANGE 3000, 3300
2135 ++
2136 + struct max9611_dev {
2137 + struct device *dev;
2138 + struct i2c_client *i2c_client;
2139 +@@ -236,11 +242,9 @@ static int max9611_read_single(struct max9611_dev *max9611,
2140 + return ret;
2141 + }
2142 +
2143 +- /*
2144 +- * need a delay here to make register configuration
2145 +- * stabilize. 1 msec at least, from empirical testing.
2146 +- */
2147 +- usleep_range(1000, 2000);
2148 ++ /* need a delay here to make register configuration stabilize. */
2149 ++
2150 ++ usleep_range(MAX9611_CONV_TIME_US_RANGE);
2151 +
2152 + ret = i2c_smbus_read_word_swapped(max9611->i2c_client, reg_addr);
2153 + if (ret < 0) {
2154 +@@ -507,7 +511,7 @@ static int max9611_init(struct max9611_dev *max9611)
2155 + MAX9611_REG_CTRL2, 0);
2156 + return ret;
2157 + }
2158 +- usleep_range(1000, 2000);
2159 ++ usleep_range(MAX9611_CONV_TIME_US_RANGE);
2160 +
2161 + return 0;
2162 + }
2163 +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
2164 +index d78f67623f24..50052e9a1731 100644
2165 +--- a/drivers/infiniband/core/cma.c
2166 ++++ b/drivers/infiniband/core/cma.c
2167 +@@ -4736,6 +4736,7 @@ err_ib:
2168 + err:
2169 + unregister_netdevice_notifier(&cma_nb);
2170 + ib_sa_unregister_client(&sa_client);
2171 ++ unregister_pernet_subsys(&cma_pernet_operations);
2172 + err_wq:
2173 + destroy_workqueue(cma_wq);
2174 + return ret;
2175 +diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c
2176 +index 680ad27f497d..023478107f0e 100644
2177 +--- a/drivers/infiniband/core/counters.c
2178 ++++ b/drivers/infiniband/core/counters.c
2179 +@@ -282,6 +282,9 @@ int rdma_counter_bind_qp_auto(struct ib_qp *qp, u8 port)
2180 + struct rdma_counter *counter;
2181 + int ret;
2182 +
2183 ++ if (!qp->res.valid)
2184 ++ return 0;
2185 ++
2186 + if (!rdma_is_port_valid(dev, port))
2187 + return -EINVAL;
2188 +
2189 +diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
2190 +index 8d2f1e38b891..907d99822bf0 100644
2191 +--- a/drivers/infiniband/hw/mlx4/main.c
2192 ++++ b/drivers/infiniband/hw/mlx4/main.c
2193 +@@ -3008,16 +3008,17 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
2194 + ibdev->ib_active = false;
2195 + flush_workqueue(wq);
2196 +
2197 +- mlx4_ib_close_sriov(ibdev);
2198 +- mlx4_ib_mad_cleanup(ibdev);
2199 +- ib_unregister_device(&ibdev->ib_dev);
2200 +- mlx4_ib_diag_cleanup(ibdev);
2201 + if (ibdev->iboe.nb.notifier_call) {
2202 + if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2203 + pr_warn("failure unregistering notifier\n");
2204 + ibdev->iboe.nb.notifier_call = NULL;
2205 + }
2206 +
2207 ++ mlx4_ib_close_sriov(ibdev);
2208 ++ mlx4_ib_mad_cleanup(ibdev);
2209 ++ ib_unregister_device(&ibdev->ib_dev);
2210 ++ mlx4_ib_diag_cleanup(ibdev);
2211 ++
2212 + mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2213 + ibdev->steer_qpn_count);
2214 + kfree(ibdev->ib_uc_qpns_bitmap);
2215 +diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
2216 +index 831539419c30..e1cfbedefcbc 100644
2217 +--- a/drivers/infiniband/hw/mlx5/main.c
2218 ++++ b/drivers/infiniband/hw/mlx5/main.c
2219 +@@ -3548,10 +3548,6 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
2220 + }
2221 +
2222 + INIT_LIST_HEAD(&handler->list);
2223 +- if (dst) {
2224 +- memcpy(&dest_arr[0], dst, sizeof(*dst));
2225 +- dest_num++;
2226 +- }
2227 +
2228 + for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
2229 + err = parse_flow_attr(dev->mdev, spec,
2230 +@@ -3564,6 +3560,11 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
2231 + ib_flow += ((union ib_flow_spec *)ib_flow)->size;
2232 + }
2233 +
2234 ++ if (dst && !(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP)) {
2235 ++ memcpy(&dest_arr[0], dst, sizeof(*dst));
2236 ++ dest_num++;
2237 ++ }
2238 ++
2239 + if (!flow_is_multicast_only(flow_attr))
2240 + set_underlay_qp(dev, spec, underlay_qpn);
2241 +
2242 +@@ -3604,10 +3605,8 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
2243 + }
2244 +
2245 + if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
2246 +- if (!(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT)) {
2247 ++ if (!dest_num)
2248 + rule_dst = NULL;
2249 +- dest_num = 0;
2250 +- }
2251 + } else {
2252 + if (is_egress)
2253 + flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
2254 +diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
2255 +index f9a492ed900b..831ad578a7b2 100644
2256 +--- a/drivers/infiniband/sw/rxe/rxe_recv.c
2257 ++++ b/drivers/infiniband/sw/rxe/rxe_recv.c
2258 +@@ -389,7 +389,7 @@ void rxe_rcv(struct sk_buff *skb)
2259 +
2260 + calc_icrc = rxe_icrc_hdr(pkt, skb);
2261 + calc_icrc = rxe_crc32(rxe, calc_icrc, (u8 *)payload_addr(pkt),
2262 +- payload_size(pkt));
2263 ++ payload_size(pkt) + bth_pad(pkt));
2264 + calc_icrc = (__force u32)cpu_to_be32(~calc_icrc);
2265 + if (unlikely(calc_icrc != pack_icrc)) {
2266 + if (skb->protocol == htons(ETH_P_IPV6))
2267 +diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
2268 +index c5d9b558fa90..e5031172c019 100644
2269 +--- a/drivers/infiniband/sw/rxe/rxe_req.c
2270 ++++ b/drivers/infiniband/sw/rxe/rxe_req.c
2271 +@@ -500,6 +500,12 @@ static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
2272 + if (err)
2273 + return err;
2274 + }
2275 ++ if (bth_pad(pkt)) {
2276 ++ u8 *pad = payload_addr(pkt) + paylen;
2277 ++
2278 ++ memset(pad, 0, bth_pad(pkt));
2279 ++ crc = rxe_crc32(rxe, crc, pad, bth_pad(pkt));
2280 ++ }
2281 + }
2282 + p = payload_addr(pkt) + paylen + bth_pad(pkt);
2283 +
2284 +diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
2285 +index 1cbfbd98eb22..c4a8195bf670 100644
2286 +--- a/drivers/infiniband/sw/rxe/rxe_resp.c
2287 ++++ b/drivers/infiniband/sw/rxe/rxe_resp.c
2288 +@@ -732,6 +732,13 @@ static enum resp_states read_reply(struct rxe_qp *qp,
2289 + if (err)
2290 + pr_err("Failed copying memory\n");
2291 +
2292 ++ if (bth_pad(&ack_pkt)) {
2293 ++ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
2294 ++ u8 *pad = payload_addr(&ack_pkt) + payload;
2295 ++
2296 ++ memset(pad, 0, bth_pad(&ack_pkt));
2297 ++ icrc = rxe_crc32(rxe, icrc, pad, bth_pad(&ack_pkt));
2298 ++ }
2299 + p = payload_addr(&ack_pkt) + payload + bth_pad(&ack_pkt);
2300 + *p = ~icrc;
2301 +
2302 +diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
2303 +index 9b159132405d..dca88f9fdf29 100644
2304 +--- a/drivers/iommu/intel-svm.c
2305 ++++ b/drivers/iommu/intel-svm.c
2306 +@@ -104,11 +104,7 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
2307 + {
2308 + struct qi_desc desc;
2309 +
2310 +- /*
2311 +- * Do PASID granu IOTLB invalidation if page selective capability is
2312 +- * not available.
2313 +- */
2314 +- if (pages == -1 || !cap_pgsel_inv(svm->iommu->cap)) {
2315 ++ if (pages == -1) {
2316 + desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
2317 + QI_EIOTLB_DID(sdev->did) |
2318 + QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
2319 +diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
2320 +index bb29aeefcbd0..c7137f50bd1d 100644
2321 +--- a/drivers/md/raid1.c
2322 ++++ b/drivers/md/raid1.c
2323 +@@ -2781,7 +2781,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2324 + write_targets++;
2325 + }
2326 + }
2327 +- if (bio->bi_end_io) {
2328 ++ if (rdev && bio->bi_end_io) {
2329 + atomic_inc(&rdev->nr_pending);
2330 + bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
2331 + bio_set_dev(bio, rdev->bdev);
2332 +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
2333 +index 12a8ce83786e..36cd7c2fbf40 100644
2334 +--- a/drivers/md/raid5.c
2335 ++++ b/drivers/md/raid5.c
2336 +@@ -5726,7 +5726,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
2337 + do_flush = false;
2338 + }
2339 +
2340 +- if (!sh->batch_head)
2341 ++ if (!sh->batch_head || sh == sh->batch_head)
2342 + set_bit(STRIPE_HANDLE, &sh->state);
2343 + clear_bit(STRIPE_DELAYED, &sh->state);
2344 + if ((!sh->batch_head || sh == sh->batch_head) &&
2345 +diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
2346 +index 5ef7daeb8cbd..b14c09cd9593 100644
2347 +--- a/drivers/media/cec/cec-adap.c
2348 ++++ b/drivers/media/cec/cec-adap.c
2349 +@@ -378,7 +378,8 @@ static void cec_data_cancel(struct cec_data *data, u8 tx_status)
2350 + } else {
2351 + list_del_init(&data->list);
2352 + if (!(data->msg.tx_status & CEC_TX_STATUS_OK))
2353 +- data->adap->transmit_queue_sz--;
2354 ++ if (!WARN_ON(!data->adap->transmit_queue_sz))
2355 ++ data->adap->transmit_queue_sz--;
2356 + }
2357 +
2358 + if (data->msg.tx_status & CEC_TX_STATUS_OK) {
2359 +@@ -430,6 +431,14 @@ static void cec_flush(struct cec_adapter *adap)
2360 + * need to do anything special in that case.
2361 + */
2362 + }
2363 ++ /*
2364 ++ * If something went wrong and this counter isn't what it should
2365 ++ * be, then this will reset it back to 0. Warn if it is not 0,
2366 ++ * since it indicates a bug, either in this framework or in a
2367 ++ * CEC driver.
2368 ++ */
2369 ++ if (WARN_ON(adap->transmit_queue_sz))
2370 ++ adap->transmit_queue_sz = 0;
2371 + }
2372 +
2373 + /*
2374 +@@ -454,7 +463,7 @@ int cec_thread_func(void *_adap)
2375 + bool timeout = false;
2376 + u8 attempts;
2377 +
2378 +- if (adap->transmitting) {
2379 ++ if (adap->transmit_in_progress) {
2380 + int err;
2381 +
2382 + /*
2383 +@@ -489,7 +498,7 @@ int cec_thread_func(void *_adap)
2384 + goto unlock;
2385 + }
2386 +
2387 +- if (adap->transmitting && timeout) {
2388 ++ if (adap->transmit_in_progress && timeout) {
2389 + /*
2390 + * If we timeout, then log that. Normally this does
2391 + * not happen and it is an indication of a faulty CEC
2392 +@@ -498,14 +507,18 @@ int cec_thread_func(void *_adap)
2393 + * so much traffic on the bus that the adapter was
2394 + * unable to transmit for CEC_XFER_TIMEOUT_MS (2.1s).
2395 + */
2396 +- pr_warn("cec-%s: message %*ph timed out\n", adap->name,
2397 +- adap->transmitting->msg.len,
2398 +- adap->transmitting->msg.msg);
2399 ++ if (adap->transmitting) {
2400 ++ pr_warn("cec-%s: message %*ph timed out\n", adap->name,
2401 ++ adap->transmitting->msg.len,
2402 ++ adap->transmitting->msg.msg);
2403 ++ /* Just give up on this. */
2404 ++ cec_data_cancel(adap->transmitting,
2405 ++ CEC_TX_STATUS_TIMEOUT);
2406 ++ } else {
2407 ++ pr_warn("cec-%s: transmit timed out\n", adap->name);
2408 ++ }
2409 + adap->transmit_in_progress = false;
2410 + adap->tx_timeouts++;
2411 +- /* Just give up on this. */
2412 +- cec_data_cancel(adap->transmitting,
2413 +- CEC_TX_STATUS_TIMEOUT);
2414 + goto unlock;
2415 + }
2416 +
2417 +@@ -520,7 +533,8 @@ int cec_thread_func(void *_adap)
2418 + data = list_first_entry(&adap->transmit_queue,
2419 + struct cec_data, list);
2420 + list_del_init(&data->list);
2421 +- adap->transmit_queue_sz--;
2422 ++ if (!WARN_ON(!data->adap->transmit_queue_sz))
2423 ++ adap->transmit_queue_sz--;
2424 +
2425 + /* Make this the current transmitting message */
2426 + adap->transmitting = data;
2427 +@@ -1083,11 +1097,11 @@ void cec_received_msg_ts(struct cec_adapter *adap,
2428 + valid_la = false;
2429 + else if (!cec_msg_is_broadcast(msg) && !(dir_fl & DIRECTED))
2430 + valid_la = false;
2431 +- else if (cec_msg_is_broadcast(msg) && !(dir_fl & BCAST1_4))
2432 ++ else if (cec_msg_is_broadcast(msg) && !(dir_fl & BCAST))
2433 + valid_la = false;
2434 + else if (cec_msg_is_broadcast(msg) &&
2435 +- adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0 &&
2436 +- !(dir_fl & BCAST2_0))
2437 ++ adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0 &&
2438 ++ !(dir_fl & BCAST1_4))
2439 + valid_la = false;
2440 + }
2441 + if (valid_la && min_len) {
2442 +diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
2443 +index d1331f828108..039963a7765b 100644
2444 +--- a/drivers/media/usb/b2c2/flexcop-usb.c
2445 ++++ b/drivers/media/usb/b2c2/flexcop-usb.c
2446 +@@ -295,7 +295,7 @@ static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c,
2447 +
2448 + mutex_unlock(&fc_usb->data_mutex);
2449 +
2450 +- return 0;
2451 ++ return ret;
2452 + }
2453 +
2454 + /* actual bus specific access functions,
2455 +diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c
2456 +index 02697d86e8c1..ac93e88d7038 100644
2457 +--- a/drivers/media/usb/dvb-usb/af9005.c
2458 ++++ b/drivers/media/usb/dvb-usb/af9005.c
2459 +@@ -976,8 +976,9 @@ static int af9005_identify_state(struct usb_device *udev,
2460 + else if (reply == 0x02)
2461 + *cold = 0;
2462 + else
2463 +- return -EIO;
2464 +- deb_info("Identify state cold = %d\n", *cold);
2465 ++ ret = -EIO;
2466 ++ if (!ret)
2467 ++ deb_info("Identify state cold = %d\n", *cold);
2468 +
2469 + err:
2470 + kfree(buf);
2471 +diff --git a/drivers/media/usb/pulse8-cec/pulse8-cec.c b/drivers/media/usb/pulse8-cec/pulse8-cec.c
2472 +index ac88ade94cda..59609556d969 100644
2473 +--- a/drivers/media/usb/pulse8-cec/pulse8-cec.c
2474 ++++ b/drivers/media/usb/pulse8-cec/pulse8-cec.c
2475 +@@ -116,6 +116,7 @@ struct pulse8 {
2476 + unsigned int vers;
2477 + struct completion cmd_done;
2478 + struct work_struct work;
2479 ++ u8 work_result;
2480 + struct delayed_work ping_eeprom_work;
2481 + struct cec_msg rx_msg;
2482 + u8 data[DATA_SIZE];
2483 +@@ -137,8 +138,10 @@ static void pulse8_irq_work_handler(struct work_struct *work)
2484 + {
2485 + struct pulse8 *pulse8 =
2486 + container_of(work, struct pulse8, work);
2487 ++ u8 result = pulse8->work_result;
2488 +
2489 +- switch (pulse8->data[0] & 0x3f) {
2490 ++ pulse8->work_result = 0;
2491 ++ switch (result & 0x3f) {
2492 + case MSGCODE_FRAME_DATA:
2493 + cec_received_msg(pulse8->adap, &pulse8->rx_msg);
2494 + break;
2495 +@@ -172,12 +175,12 @@ static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data,
2496 + pulse8->escape = false;
2497 + } else if (data == MSGEND) {
2498 + struct cec_msg *msg = &pulse8->rx_msg;
2499 ++ u8 msgcode = pulse8->buf[0];
2500 +
2501 + if (debug)
2502 + dev_info(pulse8->dev, "received: %*ph\n",
2503 + pulse8->idx, pulse8->buf);
2504 +- pulse8->data[0] = pulse8->buf[0];
2505 +- switch (pulse8->buf[0] & 0x3f) {
2506 ++ switch (msgcode & 0x3f) {
2507 + case MSGCODE_FRAME_START:
2508 + msg->len = 1;
2509 + msg->msg[0] = pulse8->buf[1];
2510 +@@ -186,14 +189,20 @@ static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data,
2511 + if (msg->len == CEC_MAX_MSG_SIZE)
2512 + break;
2513 + msg->msg[msg->len++] = pulse8->buf[1];
2514 +- if (pulse8->buf[0] & MSGCODE_FRAME_EOM)
2515 ++ if (msgcode & MSGCODE_FRAME_EOM) {
2516 ++ WARN_ON(pulse8->work_result);
2517 ++ pulse8->work_result = msgcode;
2518 + schedule_work(&pulse8->work);
2519 ++ break;
2520 ++ }
2521 + break;
2522 + case MSGCODE_TRANSMIT_SUCCEEDED:
2523 + case MSGCODE_TRANSMIT_FAILED_LINE:
2524 + case MSGCODE_TRANSMIT_FAILED_ACK:
2525 + case MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA:
2526 + case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE:
2527 ++ WARN_ON(pulse8->work_result);
2528 ++ pulse8->work_result = msgcode;
2529 + schedule_work(&pulse8->work);
2530 + break;
2531 + case MSGCODE_HIGH_ERROR:
2532 +diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
2533 +index 4e8e80ac8341..9cec5c216e1f 100644
2534 +--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
2535 ++++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
2536 +@@ -973,6 +973,8 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
2537 + struct ath_htc_rx_status *rxstatus;
2538 + struct ath_rx_status rx_stats;
2539 + bool decrypt_error = false;
2540 ++ __be16 rs_datalen;
2541 ++ bool is_phyerr;
2542 +
2543 + if (skb->len < HTC_RX_FRAME_HEADER_SIZE) {
2544 + ath_err(common, "Corrupted RX frame, dropping (len: %d)\n",
2545 +@@ -982,11 +984,24 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
2546 +
2547 + rxstatus = (struct ath_htc_rx_status *)skb->data;
2548 +
2549 +- if (be16_to_cpu(rxstatus->rs_datalen) -
2550 +- (skb->len - HTC_RX_FRAME_HEADER_SIZE) != 0) {
2551 ++ rs_datalen = be16_to_cpu(rxstatus->rs_datalen);
2552 ++ if (unlikely(rs_datalen -
2553 ++ (skb->len - HTC_RX_FRAME_HEADER_SIZE) != 0)) {
2554 + ath_err(common,
2555 + "Corrupted RX data len, dropping (dlen: %d, skblen: %d)\n",
2556 +- rxstatus->rs_datalen, skb->len);
2557 ++ rs_datalen, skb->len);
2558 ++ goto rx_next;
2559 ++ }
2560 ++
2561 ++ is_phyerr = rxstatus->rs_status & ATH9K_RXERR_PHY;
2562 ++ /*
2563 ++ * Discard zero-length packets and packets smaller than an ACK
2564 ++ * which are not PHY_ERROR (short radar pulses have a length of 3)
2565 ++ */
2566 ++ if (unlikely(!rs_datalen || (rs_datalen < 10 && !is_phyerr))) {
2567 ++ ath_warn(common,
2568 ++ "Short RX data len, dropping (dlen: %d)\n",
2569 ++ rs_datalen);
2570 + goto rx_next;
2571 + }
2572 +
2573 +@@ -1011,7 +1026,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
2574 + * Process PHY errors and return so that the packet
2575 + * can be dropped.
2576 + */
2577 +- if (rx_stats.rs_status & ATH9K_RXERR_PHY) {
2578 ++ if (unlikely(is_phyerr)) {
2579 + /* TODO: Not using DFS processing now. */
2580 + if (ath_cmn_process_fft(&priv->spec_priv, hdr,
2581 + &rx_stats, rx_status->mactime)) {
2582 +diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
2583 +index 265f89e11d8b..59474bd0c728 100644
2584 +--- a/drivers/nvme/host/fc.c
2585 ++++ b/drivers/nvme/host/fc.c
2586 +@@ -342,7 +342,8 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
2587 + !template->ls_req || !template->fcp_io ||
2588 + !template->ls_abort || !template->fcp_abort ||
2589 + !template->max_hw_queues || !template->max_sgl_segments ||
2590 +- !template->max_dif_sgl_segments || !template->dma_boundary) {
2591 ++ !template->max_dif_sgl_segments || !template->dma_boundary ||
2592 ++ !template->module) {
2593 + ret = -EINVAL;
2594 + goto out_reghost_failed;
2595 + }
2596 +@@ -2015,6 +2016,7 @@ nvme_fc_ctrl_free(struct kref *ref)
2597 + {
2598 + struct nvme_fc_ctrl *ctrl =
2599 + container_of(ref, struct nvme_fc_ctrl, ref);
2600 ++ struct nvme_fc_lport *lport = ctrl->lport;
2601 + unsigned long flags;
2602 +
2603 + if (ctrl->ctrl.tagset) {
2604 +@@ -2041,6 +2043,7 @@ nvme_fc_ctrl_free(struct kref *ref)
2605 + if (ctrl->ctrl.opts)
2606 + nvmf_free_options(ctrl->ctrl.opts);
2607 + kfree(ctrl);
2608 ++ module_put(lport->ops->module);
2609 + }
2610 +
2611 + static void
2612 +@@ -2907,10 +2910,22 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
2613 + static void
2614 + __nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl)
2615 + {
2616 +- nvme_stop_keep_alive(&ctrl->ctrl);
2617 ++ /*
2618 ++ * if state is connecting - the error occurred as part of a
2619 ++ * reconnect attempt. The create_association error paths will
2620 ++ * clean up any outstanding io.
2621 ++ *
2622 ++ * if it's a different state - ensure all pending io is
2623 ++ * terminated. Given this can delay while waiting for the
2624 ++ * aborted io to return, we recheck adapter state below
2625 ++ * before changing state.
2626 ++ */
2627 ++ if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) {
2628 ++ nvme_stop_keep_alive(&ctrl->ctrl);
2629 +
2630 +- /* will block will waiting for io to terminate */
2631 +- nvme_fc_delete_association(ctrl);
2632 ++ /* will block will waiting for io to terminate */
2633 ++ nvme_fc_delete_association(ctrl);
2634 ++ }
2635 +
2636 + if (ctrl->ctrl.state != NVME_CTRL_CONNECTING &&
2637 + !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
2638 +@@ -3056,10 +3071,15 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
2639 + goto out_fail;
2640 + }
2641 +
2642 ++ if (!try_module_get(lport->ops->module)) {
2643 ++ ret = -EUNATCH;
2644 ++ goto out_free_ctrl;
2645 ++ }
2646 ++
2647 + idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
2648 + if (idx < 0) {
2649 + ret = -ENOSPC;
2650 +- goto out_free_ctrl;
2651 ++ goto out_mod_put;
2652 + }
2653 +
2654 + ctrl->ctrl.opts = opts;
2655 +@@ -3212,6 +3232,8 @@ out_free_queues:
2656 + out_free_ida:
2657 + put_device(ctrl->dev);
2658 + ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
2659 ++out_mod_put:
2660 ++ module_put(lport->ops->module);
2661 + out_free_ctrl:
2662 + kfree(ctrl);
2663 + out_fail:
2664 +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
2665 +index 869f462e6b6e..14d513087a14 100644
2666 +--- a/drivers/nvme/host/pci.c
2667 ++++ b/drivers/nvme/host/pci.c
2668 +@@ -68,14 +68,14 @@ static int io_queue_depth = 1024;
2669 + module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644);
2670 + MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2");
2671 +
2672 +-static int write_queues;
2673 +-module_param(write_queues, int, 0644);
2674 ++static unsigned int write_queues;
2675 ++module_param(write_queues, uint, 0644);
2676 + MODULE_PARM_DESC(write_queues,
2677 + "Number of queues to use for writes. If not set, reads and writes "
2678 + "will share a queue set.");
2679 +
2680 +-static int poll_queues;
2681 +-module_param(poll_queues, int, 0644);
2682 ++static unsigned int poll_queues;
2683 ++module_param(poll_queues, uint, 0644);
2684 + MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO.");
2685 +
2686 + struct nvme_dev;
2687 +@@ -2060,7 +2060,6 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
2688 + .priv = dev,
2689 + };
2690 + unsigned int irq_queues, this_p_queues;
2691 +- unsigned int nr_cpus = num_possible_cpus();
2692 +
2693 + /*
2694 + * Poll queues don't need interrupts, but we need at least one IO
2695 +@@ -2071,10 +2070,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
2696 + this_p_queues = nr_io_queues - 1;
2697 + irq_queues = 1;
2698 + } else {
2699 +- if (nr_cpus < nr_io_queues - this_p_queues)
2700 +- irq_queues = nr_cpus + 1;
2701 +- else
2702 +- irq_queues = nr_io_queues - this_p_queues + 1;
2703 ++ irq_queues = nr_io_queues - this_p_queues + 1;
2704 + }
2705 + dev->io_queues[HCTX_TYPE_POLL] = this_p_queues;
2706 +
2707 +diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
2708 +index b50b53db3746..1c50af6219f3 100644
2709 +--- a/drivers/nvme/target/fcloop.c
2710 ++++ b/drivers/nvme/target/fcloop.c
2711 +@@ -850,6 +850,7 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
2712 + #define FCLOOP_DMABOUND_4G 0xFFFFFFFF
2713 +
2714 + static struct nvme_fc_port_template fctemplate = {
2715 ++ .module = THIS_MODULE,
2716 + .localport_delete = fcloop_localport_delete,
2717 + .remoteport_delete = fcloop_remoteport_delete,
2718 + .create_queue = fcloop_create_queue,
2719 +diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
2720 +index c423e94baf0f..9617b7df7c4d 100644
2721 +--- a/drivers/of/overlay.c
2722 ++++ b/drivers/of/overlay.c
2723 +@@ -305,7 +305,6 @@ static int add_changeset_property(struct overlay_changeset *ovcs,
2724 + {
2725 + struct property *new_prop = NULL, *prop;
2726 + int ret = 0;
2727 +- bool check_for_non_overlay_node = false;
2728 +
2729 + if (target->in_livetree)
2730 + if (!of_prop_cmp(overlay_prop->name, "name") ||
2731 +@@ -318,6 +317,25 @@ static int add_changeset_property(struct overlay_changeset *ovcs,
2732 + else
2733 + prop = NULL;
2734 +
2735 ++ if (prop) {
2736 ++ if (!of_prop_cmp(prop->name, "#address-cells")) {
2737 ++ if (!of_prop_val_eq(prop, overlay_prop)) {
2738 ++ pr_err("ERROR: changing value of #address-cells is not allowed in %pOF\n",
2739 ++ target->np);
2740 ++ ret = -EINVAL;
2741 ++ }
2742 ++ return ret;
2743 ++
2744 ++ } else if (!of_prop_cmp(prop->name, "#size-cells")) {
2745 ++ if (!of_prop_val_eq(prop, overlay_prop)) {
2746 ++ pr_err("ERROR: changing value of #size-cells is not allowed in %pOF\n",
2747 ++ target->np);
2748 ++ ret = -EINVAL;
2749 ++ }
2750 ++ return ret;
2751 ++ }
2752 ++ }
2753 ++
2754 + if (is_symbols_prop) {
2755 + if (prop)
2756 + return -EINVAL;
2757 +@@ -330,33 +348,18 @@ static int add_changeset_property(struct overlay_changeset *ovcs,
2758 + return -ENOMEM;
2759 +
2760 + if (!prop) {
2761 +- check_for_non_overlay_node = true;
2762 + if (!target->in_livetree) {
2763 + new_prop->next = target->np->deadprops;
2764 + target->np->deadprops = new_prop;
2765 + }
2766 + ret = of_changeset_add_property(&ovcs->cset, target->np,
2767 + new_prop);
2768 +- } else if (!of_prop_cmp(prop->name, "#address-cells")) {
2769 +- if (!of_prop_val_eq(prop, new_prop)) {
2770 +- pr_err("ERROR: changing value of #address-cells is not allowed in %pOF\n",
2771 +- target->np);
2772 +- ret = -EINVAL;
2773 +- }
2774 +- } else if (!of_prop_cmp(prop->name, "#size-cells")) {
2775 +- if (!of_prop_val_eq(prop, new_prop)) {
2776 +- pr_err("ERROR: changing value of #size-cells is not allowed in %pOF\n",
2777 +- target->np);
2778 +- ret = -EINVAL;
2779 +- }
2780 + } else {
2781 +- check_for_non_overlay_node = true;
2782 + ret = of_changeset_update_property(&ovcs->cset, target->np,
2783 + new_prop);
2784 + }
2785 +
2786 +- if (check_for_non_overlay_node &&
2787 +- !of_node_check_flag(target->np, OF_OVERLAY))
2788 ++ if (!of_node_check_flag(target->np, OF_OVERLAY))
2789 + pr_err("WARNING: memory leak will occur if overlay removed, property: %pOF/%s\n",
2790 + target->np, new_prop->name);
2791 +
2792 +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
2793 +index a97e2571a527..fcfaadc774ee 100644
2794 +--- a/drivers/pci/pci.c
2795 ++++ b/drivers/pci/pci.c
2796 +@@ -5854,6 +5854,24 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
2797 + return 0;
2798 + }
2799 +
2800 ++#ifdef CONFIG_ACPI
2801 ++bool pci_pr3_present(struct pci_dev *pdev)
2802 ++{
2803 ++ struct acpi_device *adev;
2804 ++
2805 ++ if (acpi_disabled)
2806 ++ return false;
2807 ++
2808 ++ adev = ACPI_COMPANION(&pdev->dev);
2809 ++ if (!adev)
2810 ++ return false;
2811 ++
2812 ++ return adev->power.flags.power_resources &&
2813 ++ acpi_has_method(adev->handle, "_PR3");
2814 ++}
2815 ++EXPORT_SYMBOL_GPL(pci_pr3_present);
2816 ++#endif
2817 ++
2818 + /**
2819 + * pci_add_dma_alias - Add a DMA devfn alias for a device
2820 + * @dev: the PCI device for which alias is added
2821 +diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
2822 +index 6fd1390fd06e..bfb22f868857 100644
2823 +--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
2824 ++++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
2825 +@@ -615,7 +615,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
2826 + return PTR_ERR(channel->base);
2827 +
2828 + /* call request_irq for OTG */
2829 +- irq = platform_get_irq(pdev, 0);
2830 ++ irq = platform_get_irq_optional(pdev, 0);
2831 + if (irq >= 0) {
2832 + INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work);
2833 + irq = devm_request_irq(dev, irq, rcar_gen3_phy_usb2_irq,
2834 +diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
2835 +index 07d1b911e72f..52ef1419b671 100644
2836 +--- a/drivers/platform/x86/pmc_atom.c
2837 ++++ b/drivers/platform/x86/pmc_atom.c
2838 +@@ -429,6 +429,14 @@ static const struct dmi_system_id critclk_systems[] = {
2839 + DMI_MATCH(DMI_PRODUCT_VERSION, "6AV7882-0"),
2840 + },
2841 + },
2842 ++ {
2843 ++ .ident = "CONNECT X300",
2844 ++ .matches = {
2845 ++ DMI_MATCH(DMI_SYS_VENDOR, "SIEMENS AG"),
2846 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "A5E45074588"),
2847 ++ },
2848 ++ },
2849 ++
2850 + { /*sentinel*/ }
2851 + };
2852 +
2853 +diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
2854 +index efb2f01a9101..f60e1b26c2d2 100644
2855 +--- a/drivers/regulator/ab8500.c
2856 ++++ b/drivers/regulator/ab8500.c
2857 +@@ -953,23 +953,6 @@ static struct ab8500_regulator_info
2858 + .update_val_idle = 0x82,
2859 + .update_val_normal = 0x02,
2860 + },
2861 +- [AB8505_LDO_USB] = {
2862 +- .desc = {
2863 +- .name = "LDO-USB",
2864 +- .ops = &ab8500_regulator_mode_ops,
2865 +- .type = REGULATOR_VOLTAGE,
2866 +- .id = AB8505_LDO_USB,
2867 +- .owner = THIS_MODULE,
2868 +- .n_voltages = 1,
2869 +- .volt_table = fixed_3300000_voltage,
2870 +- },
2871 +- .update_bank = 0x03,
2872 +- .update_reg = 0x82,
2873 +- .update_mask = 0x03,
2874 +- .update_val = 0x01,
2875 +- .update_val_idle = 0x03,
2876 +- .update_val_normal = 0x01,
2877 +- },
2878 + [AB8505_LDO_AUDIO] = {
2879 + .desc = {
2880 + .name = "LDO-AUDIO",
2881 +diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
2882 +index 989506bd90b1..16f0c8570036 100644
2883 +--- a/drivers/regulator/axp20x-regulator.c
2884 ++++ b/drivers/regulator/axp20x-regulator.c
2885 +@@ -413,10 +413,13 @@ static int axp20x_set_ramp_delay(struct regulator_dev *rdev, int ramp)
2886 + int i;
2887 +
2888 + for (i = 0; i < rate_count; i++) {
2889 +- if (ramp <= slew_rates[i])
2890 +- cfg = AXP20X_DCDC2_LDO3_V_RAMP_LDO3_RATE(i);
2891 +- else
2892 ++ if (ramp > slew_rates[i])
2893 + break;
2894 ++
2895 ++ if (id == AXP20X_DCDC2)
2896 ++ cfg = AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_RATE(i);
2897 ++ else
2898 ++ cfg = AXP20X_DCDC2_LDO3_V_RAMP_LDO3_RATE(i);
2899 + }
2900 +
2901 + if (cfg == 0xff) {
2902 +@@ -605,7 +608,7 @@ static const struct regulator_desc axp22x_regulators[] = {
2903 + AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO1_MASK),
2904 + AXP_DESC(AXP22X, ELDO2, "eldo2", "eldoin", 700, 3300, 100,
2905 + AXP22X_ELDO2_V_OUT, AXP22X_ELDO2_V_OUT_MASK,
2906 +- AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO1_MASK),
2907 ++ AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO2_MASK),
2908 + AXP_DESC(AXP22X, ELDO3, "eldo3", "eldoin", 700, 3300, 100,
2909 + AXP22X_ELDO3_V_OUT, AXP22X_ELDO3_V_OUT_MASK,
2910 + AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO3_MASK),
2911 +diff --git a/drivers/regulator/bd70528-regulator.c b/drivers/regulator/bd70528-regulator.c
2912 +index 0248a61f1006..6041839ec38c 100644
2913 +--- a/drivers/regulator/bd70528-regulator.c
2914 ++++ b/drivers/regulator/bd70528-regulator.c
2915 +@@ -101,7 +101,6 @@ static const struct regulator_ops bd70528_ldo_ops = {
2916 + .set_voltage_sel = regulator_set_voltage_sel_regmap,
2917 + .get_voltage_sel = regulator_get_voltage_sel_regmap,
2918 + .set_voltage_time_sel = regulator_set_voltage_time_sel,
2919 +- .set_ramp_delay = bd70528_set_ramp_delay,
2920 + };
2921 +
2922 + static const struct regulator_ops bd70528_led_ops = {
2923 +diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
2924 +index f47b4b281b14..d7302c2052f9 100644
2925 +--- a/drivers/scsi/libsas/sas_discover.c
2926 ++++ b/drivers/scsi/libsas/sas_discover.c
2927 +@@ -81,12 +81,21 @@ static int sas_get_port_device(struct asd_sas_port *port)
2928 + else
2929 + dev->dev_type = SAS_SATA_DEV;
2930 + dev->tproto = SAS_PROTOCOL_SATA;
2931 +- } else {
2932 ++ } else if (port->oob_mode == SAS_OOB_MODE) {
2933 + struct sas_identify_frame *id =
2934 + (struct sas_identify_frame *) dev->frame_rcvd;
2935 + dev->dev_type = id->dev_type;
2936 + dev->iproto = id->initiator_bits;
2937 + dev->tproto = id->target_bits;
2938 ++ } else {
2939 ++ /* If the oob mode is OOB_NOT_CONNECTED, the port is
2940 ++ * disconnected due to race with PHY down. We cannot
2941 ++ * continue to discover this port
2942 ++ */
2943 ++ sas_put_device(dev);
2944 ++ pr_warn("Port %016llx is disconnected when discovering\n",
2945 ++ SAS_ADDR(port->attached_sas_addr));
2946 ++ return -ENODEV;
2947 + }
2948 +
2949 + sas_init_dev(dev);
2950 +diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
2951 +index 39a736b887b1..6c2b03415a2c 100644
2952 +--- a/drivers/scsi/lpfc/lpfc_bsg.c
2953 ++++ b/drivers/scsi/lpfc/lpfc_bsg.c
2954 +@@ -4489,12 +4489,6 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
2955 + phba->mbox_ext_buf_ctx.seqNum++;
2956 + nemb_tp = phba->mbox_ext_buf_ctx.nembType;
2957 +
2958 +- dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
2959 +- if (!dd_data) {
2960 +- rc = -ENOMEM;
2961 +- goto job_error;
2962 +- }
2963 +-
2964 + pbuf = (uint8_t *)dmabuf->virt;
2965 + size = job->request_payload.payload_len;
2966 + sg_copy_to_buffer(job->request_payload.sg_list,
2967 +@@ -4531,6 +4525,13 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
2968 + "2968 SLI_CONFIG ext-buffer wr all %d "
2969 + "ebuffers received\n",
2970 + phba->mbox_ext_buf_ctx.numBuf);
2971 ++
2972 ++ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
2973 ++ if (!dd_data) {
2974 ++ rc = -ENOMEM;
2975 ++ goto job_error;
2976 ++ }
2977 ++
2978 + /* mailbox command structure for base driver */
2979 + pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2980 + if (!pmboxq) {
2981 +@@ -4579,6 +4580,8 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
2982 + return SLI_CONFIG_HANDLED;
2983 +
2984 + job_error:
2985 ++ if (pmboxq)
2986 ++ mempool_free(pmboxq, phba->mbox_mem_pool);
2987 + lpfc_bsg_dma_page_free(phba, dmabuf);
2988 + kfree(dd_data);
2989 +
2990 +diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
2991 +index 1286c658ba34..ee70d14e7a9d 100644
2992 +--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
2993 ++++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
2994 +@@ -4843,6 +4843,44 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2995 + }
2996 + }
2997 +
2998 ++/*
2999 ++ * Sets the mailbox completion handler to be used for the
3000 ++ * unreg_rpi command. The handler varies based on the state of
3001 ++ * the port and what will be happening to the rpi next.
3002 ++ */
3003 ++static void
3004 ++lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport,
3005 ++ struct lpfc_nodelist *ndlp, LPFC_MBOXQ_t *mbox)
3006 ++{
3007 ++ unsigned long iflags;
3008 ++
3009 ++ if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
3010 ++ mbox->ctx_ndlp = ndlp;
3011 ++ mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
3012 ++
3013 ++ } else if (phba->sli_rev == LPFC_SLI_REV4 &&
3014 ++ (!(vport->load_flag & FC_UNLOADING)) &&
3015 ++ (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
3016 ++ LPFC_SLI_INTF_IF_TYPE_2) &&
3017 ++ (kref_read(&ndlp->kref) > 0)) {
3018 ++ mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
3019 ++ mbox->mbox_cmpl = lpfc_sli4_unreg_rpi_cmpl_clr;
3020 ++ } else {
3021 ++ if (vport->load_flag & FC_UNLOADING) {
3022 ++ if (phba->sli_rev == LPFC_SLI_REV4) {
3023 ++ spin_lock_irqsave(&vport->phba->ndlp_lock,
3024 ++ iflags);
3025 ++ ndlp->nlp_flag |= NLP_RELEASE_RPI;
3026 ++ spin_unlock_irqrestore(&vport->phba->ndlp_lock,
3027 ++ iflags);
3028 ++ }
3029 ++ lpfc_nlp_get(ndlp);
3030 ++ }
3031 ++ mbox->ctx_ndlp = ndlp;
3032 ++ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3033 ++ }
3034 ++}
3035 ++
3036 + /*
3037 + * Free rpi associated with LPFC_NODELIST entry.
3038 + * This routine is called from lpfc_freenode(), when we are removing
3039 +@@ -4893,33 +4931,12 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
3040 +
3041 + lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
3042 + mbox->vport = vport;
3043 +- if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
3044 +- mbox->ctx_ndlp = ndlp;
3045 +- mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
3046 +- } else {
3047 +- if (phba->sli_rev == LPFC_SLI_REV4 &&
3048 +- (!(vport->load_flag & FC_UNLOADING)) &&
3049 +- (bf_get(lpfc_sli_intf_if_type,
3050 +- &phba->sli4_hba.sli_intf) >=
3051 +- LPFC_SLI_INTF_IF_TYPE_2) &&
3052 +- (kref_read(&ndlp->kref) > 0)) {
3053 +- mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
3054 +- mbox->mbox_cmpl =
3055 +- lpfc_sli4_unreg_rpi_cmpl_clr;
3056 +- /*
3057 +- * accept PLOGIs after unreg_rpi_cmpl
3058 +- */
3059 +- acc_plogi = 0;
3060 +- } else if (vport->load_flag & FC_UNLOADING) {
3061 +- mbox->ctx_ndlp = NULL;
3062 +- mbox->mbox_cmpl =
3063 +- lpfc_sli_def_mbox_cmpl;
3064 +- } else {
3065 +- mbox->ctx_ndlp = ndlp;
3066 +- mbox->mbox_cmpl =
3067 +- lpfc_sli_def_mbox_cmpl;
3068 +- }
3069 +- }
3070 ++ lpfc_set_unreg_login_mbx_cmpl(phba, vport, ndlp, mbox);
3071 ++ if (mbox->mbox_cmpl == lpfc_sli4_unreg_rpi_cmpl_clr)
3072 ++ /*
3073 ++ * accept PLOGIs after unreg_rpi_cmpl
3074 ++ */
3075 ++ acc_plogi = 0;
3076 + if (((ndlp->nlp_DID & Fabric_DID_MASK) !=
3077 + Fabric_DID_MASK) &&
3078 + (!(vport->fc_flag & FC_OFFLINE_MODE)))
3079 +@@ -5060,6 +5077,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
3080 + struct lpfc_hba *phba = vport->phba;
3081 + LPFC_MBOXQ_t *mb, *nextmb;
3082 + struct lpfc_dmabuf *mp;
3083 ++ unsigned long iflags;
3084 +
3085 + /* Cleanup node for NPort <nlp_DID> */
3086 + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
3087 +@@ -5141,8 +5159,20 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
3088 + lpfc_cleanup_vports_rrqs(vport, ndlp);
3089 + if (phba->sli_rev == LPFC_SLI_REV4)
3090 + ndlp->nlp_flag |= NLP_RELEASE_RPI;
3091 +- lpfc_unreg_rpi(vport, ndlp);
3092 +-
3093 ++ if (!lpfc_unreg_rpi(vport, ndlp)) {
3094 ++ /* Clean up unregistered and non freed rpis */
3095 ++ if ((ndlp->nlp_flag & NLP_RELEASE_RPI) &&
3096 ++ !(ndlp->nlp_rpi == LPFC_RPI_ALLOC_ERROR)) {
3097 ++ lpfc_sli4_free_rpi(vport->phba,
3098 ++ ndlp->nlp_rpi);
3099 ++ spin_lock_irqsave(&vport->phba->ndlp_lock,
3100 ++ iflags);
3101 ++ ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
3102 ++ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
3103 ++ spin_unlock_irqrestore(&vport->phba->ndlp_lock,
3104 ++ iflags);
3105 ++ }
3106 ++ }
3107 + return 0;
3108 + }
3109 +
3110 +diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
3111 +index a227e36cbdc2..8e0f03ef346b 100644
3112 +--- a/drivers/scsi/lpfc/lpfc_nvme.c
3113 ++++ b/drivers/scsi/lpfc/lpfc_nvme.c
3114 +@@ -1976,6 +1976,8 @@ out_unlock:
3115 +
3116 + /* Declare and initialization an instance of the FC NVME template. */
3117 + static struct nvme_fc_port_template lpfc_nvme_template = {
3118 ++ .module = THIS_MODULE,
3119 ++
3120 + /* initiator-based functions */
3121 + .localport_delete = lpfc_nvme_localport_delete,
3122 + .remoteport_delete = lpfc_nvme_remoteport_delete,
3123 +diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
3124 +index 2b0e7b32c2df..8860f41af3ff 100644
3125 +--- a/drivers/scsi/lpfc/lpfc_sli.c
3126 ++++ b/drivers/scsi/lpfc/lpfc_sli.c
3127 +@@ -2526,6 +2526,8 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3128 + } else {
3129 + __lpfc_sli_rpi_release(vport, ndlp);
3130 + }
3131 ++ if (vport->load_flag & FC_UNLOADING)
3132 ++ lpfc_nlp_put(ndlp);
3133 + pmb->ctx_ndlp = NULL;
3134 + }
3135 + }
3136 +diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
3137 +index d5386edddaf6..1eb3fe281cc3 100644
3138 +--- a/drivers/scsi/qla2xxx/qla_def.h
3139 ++++ b/drivers/scsi/qla2xxx/qla_def.h
3140 +@@ -2401,6 +2401,7 @@ typedef struct fc_port {
3141 + unsigned int id_changed:1;
3142 + unsigned int scan_needed:1;
3143 + unsigned int n2n_flag:1;
3144 ++ unsigned int explicit_logout:1;
3145 +
3146 + struct completion nvme_del_done;
3147 + uint32_t nvme_prli_service_param;
3148 +diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
3149 +index 5d31e3d52b6b..80f276d67c14 100644
3150 +--- a/drivers/scsi/qla2xxx/qla_init.c
3151 ++++ b/drivers/scsi/qla2xxx/qla_init.c
3152 +@@ -4927,14 +4927,8 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
3153 + set_bit(RSCN_UPDATE, &flags);
3154 + clear_bit(LOCAL_LOOP_UPDATE, &flags);
3155 +
3156 +- } else if (ha->current_topology == ISP_CFG_N) {
3157 +- clear_bit(RSCN_UPDATE, &flags);
3158 +- if (qla_tgt_mode_enabled(vha)) {
3159 +- /* allow the other side to start the login */
3160 +- clear_bit(LOCAL_LOOP_UPDATE, &flags);
3161 +- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
3162 +- }
3163 +- } else if (ha->current_topology == ISP_CFG_NL) {
3164 ++ } else if (ha->current_topology == ISP_CFG_NL ||
3165 ++ ha->current_topology == ISP_CFG_N) {
3166 + clear_bit(RSCN_UPDATE, &flags);
3167 + set_bit(LOCAL_LOOP_UPDATE, &flags);
3168 + } else if (!vha->flags.online ||
3169 +@@ -5051,7 +5045,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
3170 + memcpy(&ha->plogi_els_payld.data,
3171 + (void *)ha->init_cb,
3172 + sizeof(ha->plogi_els_payld.data));
3173 +- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
3174 + } else {
3175 + ql_dbg(ql_dbg_init, vha, 0x00d1,
3176 + "PLOGI ELS param read fail.\n");
3177 +diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
3178 +index 518eb954cf42..bdf1994251b9 100644
3179 +--- a/drivers/scsi/qla2xxx/qla_iocb.c
3180 ++++ b/drivers/scsi/qla2xxx/qla_iocb.c
3181 +@@ -2405,11 +2405,19 @@ qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
3182 + static void
3183 + qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
3184 + {
3185 ++ u16 control_flags = LCF_COMMAND_LOGO;
3186 + logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
3187 +- logio->control_flags =
3188 +- cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
3189 +- if (!sp->fcport->keep_nport_handle)
3190 +- logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
3191 ++
3192 ++ if (sp->fcport->explicit_logout) {
3193 ++ control_flags |= LCF_EXPL_LOGO|LCF_FREE_NPORT;
3194 ++ } else {
3195 ++ control_flags |= LCF_IMPL_LOGO;
3196 ++
3197 ++ if (!sp->fcport->keep_nport_handle)
3198 ++ control_flags |= LCF_FREE_NPORT;
3199 ++ }
3200 ++
3201 ++ logio->control_flags = cpu_to_le16(control_flags);
3202 + logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3203 + logio->port_id[0] = sp->fcport->d_id.b.al_pa;
3204 + logio->port_id[1] = sp->fcport->d_id.b.area;
3205 +@@ -2676,7 +2684,8 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
3206 + ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
3207 + "PLOGI ELS IOCB:\n");
3208 + ql_dump_buffer(ql_log_info, vha, 0x0109,
3209 +- (uint8_t *)els_iocb, 0x70);
3210 ++ (uint8_t *)els_iocb,
3211 ++ sizeof(*els_iocb));
3212 + } else {
3213 + els_iocb->control_flags = 1 << 13;
3214 + els_iocb->tx_byte_count =
3215 +@@ -2842,7 +2851,8 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
3216 +
3217 + ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
3218 + ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109,
3219 +- (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 0x70);
3220 ++ (uint8_t *)elsio->u.els_plogi.els_plogi_pyld,
3221 ++ sizeof(*elsio->u.els_plogi.els_plogi_pyld));
3222 +
3223 + rval = qla2x00_start_sp(sp);
3224 + if (rval != QLA_SUCCESS) {
3225 +diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
3226 +index 9204e8467a4e..b3766b1879e3 100644
3227 +--- a/drivers/scsi/qla2xxx/qla_isr.c
3228 ++++ b/drivers/scsi/qla2xxx/qla_isr.c
3229 +@@ -1061,8 +1061,6 @@ global_port_update:
3230 + ql_dbg(ql_dbg_async, vha, 0x5011,
3231 + "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
3232 + mb[1], mb[2], mb[3]);
3233 +-
3234 +- qlt_async_event(mb[0], vha, mb);
3235 + break;
3236 + }
3237 +
3238 +@@ -1079,8 +1077,6 @@ global_port_update:
3239 + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3240 + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3241 + set_bit(VP_CONFIG_OK, &vha->vp_flags);
3242 +-
3243 +- qlt_async_event(mb[0], vha, mb);
3244 + break;
3245 +
3246 + case MBA_RSCN_UPDATE: /* State Change Registration */
3247 +diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
3248 +index 4d90cf101f5f..eac76e934cbe 100644
3249 +--- a/drivers/scsi/qla2xxx/qla_mbx.c
3250 ++++ b/drivers/scsi/qla2xxx/qla_mbx.c
3251 +@@ -3920,6 +3920,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3252 + vha->d_id.b24 = 0;
3253 + vha->d_id.b.al_pa = 1;
3254 + ha->flags.n2n_bigger = 1;
3255 ++ ha->flags.n2n_ae = 0;
3256 +
3257 + id.b.al_pa = 2;
3258 + ql_dbg(ql_dbg_async, vha, 0x5075,
3259 +@@ -3930,6 +3931,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3260 + "Format 1: Remote login - Waiting for WWPN %8phC.\n",
3261 + rptid_entry->u.f1.port_name);
3262 + ha->flags.n2n_bigger = 0;
3263 ++ ha->flags.n2n_ae = 1;
3264 + }
3265 + qla24xx_post_newsess_work(vha, &id,
3266 + rptid_entry->u.f1.port_name,
3267 +@@ -3941,7 +3943,6 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3268 + /* if our portname is higher then initiate N2N login */
3269 +
3270 + set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
3271 +- ha->flags.n2n_ae = 1;
3272 + return;
3273 + break;
3274 + case TOPO_FL:
3275 +diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
3276 +index 941aa53363f5..bfcd02fdf2b8 100644
3277 +--- a/drivers/scsi/qla2xxx/qla_nvme.c
3278 ++++ b/drivers/scsi/qla2xxx/qla_nvme.c
3279 +@@ -610,6 +610,7 @@ static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
3280 + }
3281 +
3282 + static struct nvme_fc_port_template qla_nvme_fc_transport = {
3283 ++ .module = THIS_MODULE,
3284 + .localport_delete = qla_nvme_localport_delete,
3285 + .remoteport_delete = qla_nvme_remoteport_delete,
3286 + .create_queue = qla_nvme_alloc_queue,
3287 +diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
3288 +index a9bd0f513316..74a378a91b71 100644
3289 +--- a/drivers/scsi/qla2xxx/qla_target.c
3290 ++++ b/drivers/scsi/qla2xxx/qla_target.c
3291 +@@ -1104,6 +1104,7 @@ void qlt_free_session_done(struct work_struct *work)
3292 + }
3293 + }
3294 +
3295 ++ sess->explicit_logout = 0;
3296 + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
3297 + sess->free_pending = 0;
3298 +
3299 +@@ -1264,7 +1265,6 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
3300 + "Scheduling sess %p for deletion %8phC\n",
3301 + sess, sess->port_name);
3302 +
3303 +- INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn);
3304 + WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work));
3305 + }
3306 +
3307 +@@ -4803,6 +4803,7 @@ static int qlt_handle_login(struct scsi_qla_host *vha,
3308 +
3309 + switch (sess->disc_state) {
3310 + case DSC_DELETED:
3311 ++ case DSC_LOGIN_PEND:
3312 + qlt_plogi_ack_unref(vha, pla);
3313 + break;
3314 +
3315 +diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
3316 +index bab2073c1f72..abe7f79bb789 100644
3317 +--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
3318 ++++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
3319 +@@ -350,6 +350,7 @@ static void tcm_qla2xxx_close_session(struct se_session *se_sess)
3320 + target_sess_cmd_list_set_waiting(se_sess);
3321 + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3322 +
3323 ++ sess->explicit_logout = 1;
3324 + tcm_qla2xxx_put_sess(sess);
3325 + }
3326 +
3327 +diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
3328 +index 8c674eca09f1..2323432a0edb 100644
3329 +--- a/drivers/scsi/qla4xxx/ql4_os.c
3330 ++++ b/drivers/scsi/qla4xxx/ql4_os.c
3331 +@@ -4275,7 +4275,6 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
3332 + return QLA_SUCCESS;
3333 +
3334 + mem_alloc_error_exit:
3335 +- qla4xxx_mem_free(ha);
3336 + return QLA_ERROR;
3337 + }
3338 +
3339 +diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
3340 +index 417b868d8735..ed8d9709b9b9 100644
3341 +--- a/drivers/scsi/scsi_transport_iscsi.c
3342 ++++ b/drivers/scsi/scsi_transport_iscsi.c
3343 +@@ -24,6 +24,8 @@
3344 +
3345 + #define ISCSI_TRANSPORT_VERSION "2.0-870"
3346 +
3347 ++#define ISCSI_SEND_MAX_ALLOWED 10
3348 ++
3349 + #define CREATE_TRACE_POINTS
3350 + #include <trace/events/iscsi.h>
3351 +
3352 +@@ -3682,6 +3684,7 @@ iscsi_if_rx(struct sk_buff *skb)
3353 + struct nlmsghdr *nlh;
3354 + struct iscsi_uevent *ev;
3355 + uint32_t group;
3356 ++ int retries = ISCSI_SEND_MAX_ALLOWED;
3357 +
3358 + nlh = nlmsg_hdr(skb);
3359 + if (nlh->nlmsg_len < sizeof(*nlh) + sizeof(*ev) ||
3360 +@@ -3712,6 +3715,10 @@ iscsi_if_rx(struct sk_buff *skb)
3361 + break;
3362 + err = iscsi_if_send_reply(portid, nlh->nlmsg_type,
3363 + ev, sizeof(*ev));
3364 ++ if (err == -EAGAIN && --retries < 0) {
3365 ++ printk(KERN_WARNING "Send reply failed, error %d\n", err);
3366 ++ break;
3367 ++ }
3368 + } while (err < 0 && err != -ECONNREFUSED && err != -ESRCH);
3369 + skb_pull(skb, rlen);
3370 + }
3371 +diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
3372 +index bec758e978fb..d47bd26577b3 100644
3373 +--- a/drivers/spi/spi-fsl-dspi.c
3374 ++++ b/drivers/spi/spi-fsl-dspi.c
3375 +@@ -583,21 +583,14 @@ static void dspi_tcfq_write(struct fsl_dspi *dspi)
3376 + dspi->tx_cmd |= SPI_PUSHR_CMD_CTCNT;
3377 +
3378 + if (dspi->devtype_data->xspi_mode && dspi->bits_per_word > 16) {
3379 +- /* Write two TX FIFO entries first, and then the corresponding
3380 +- * CMD FIFO entry.
3381 ++ /* Write the CMD FIFO entry first, and then the two
3382 ++ * corresponding TX FIFO entries.
3383 + */
3384 + u32 data = dspi_pop_tx(dspi);
3385 +
3386 +- if (dspi->cur_chip->ctar_val & SPI_CTAR_LSBFE) {
3387 +- /* LSB */
3388 +- tx_fifo_write(dspi, data & 0xFFFF);
3389 +- tx_fifo_write(dspi, data >> 16);
3390 +- } else {
3391 +- /* MSB */
3392 +- tx_fifo_write(dspi, data >> 16);
3393 +- tx_fifo_write(dspi, data & 0xFFFF);
3394 +- }
3395 + cmd_fifo_write(dspi);
3396 ++ tx_fifo_write(dspi, data & 0xFFFF);
3397 ++ tx_fifo_write(dspi, data >> 16);
3398 + } else {
3399 + /* Write one entry to both TX FIFO and CMD FIFO
3400 + * simultaneously.
3401 +diff --git a/drivers/spi/spi-uniphier.c b/drivers/spi/spi-uniphier.c
3402 +index 47cde1864630..ce9b30112e26 100644
3403 +--- a/drivers/spi/spi-uniphier.c
3404 ++++ b/drivers/spi/spi-uniphier.c
3405 +@@ -290,25 +290,32 @@ static void uniphier_spi_recv(struct uniphier_spi_priv *priv)
3406 + }
3407 + }
3408 +
3409 +-static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv)
3410 ++static void uniphier_spi_set_fifo_threshold(struct uniphier_spi_priv *priv,
3411 ++ unsigned int threshold)
3412 + {
3413 +- unsigned int fifo_threshold, fill_bytes;
3414 + u32 val;
3415 +
3416 +- fifo_threshold = DIV_ROUND_UP(priv->rx_bytes,
3417 +- bytes_per_word(priv->bits_per_word));
3418 +- fifo_threshold = min(fifo_threshold, SSI_FIFO_DEPTH);
3419 +-
3420 +- fill_bytes = fifo_threshold - (priv->rx_bytes - priv->tx_bytes);
3421 +-
3422 +- /* set fifo threshold */
3423 + val = readl(priv->base + SSI_FC);
3424 + val &= ~(SSI_FC_TXFTH_MASK | SSI_FC_RXFTH_MASK);
3425 +- val |= FIELD_PREP(SSI_FC_TXFTH_MASK, fifo_threshold);
3426 +- val |= FIELD_PREP(SSI_FC_RXFTH_MASK, fifo_threshold);
3427 ++ val |= FIELD_PREP(SSI_FC_TXFTH_MASK, SSI_FIFO_DEPTH - threshold);
3428 ++ val |= FIELD_PREP(SSI_FC_RXFTH_MASK, threshold);
3429 + writel(val, priv->base + SSI_FC);
3430 ++}
3431 ++
3432 ++static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv)
3433 ++{
3434 ++ unsigned int fifo_threshold, fill_words;
3435 ++ unsigned int bpw = bytes_per_word(priv->bits_per_word);
3436 ++
3437 ++ fifo_threshold = DIV_ROUND_UP(priv->rx_bytes, bpw);
3438 ++ fifo_threshold = min(fifo_threshold, SSI_FIFO_DEPTH);
3439 ++
3440 ++ uniphier_spi_set_fifo_threshold(priv, fifo_threshold);
3441 ++
3442 ++ fill_words = fifo_threshold -
3443 ++ DIV_ROUND_UP(priv->rx_bytes - priv->tx_bytes, bpw);
3444 +
3445 +- while (fill_bytes--)
3446 ++ while (fill_words--)
3447 + uniphier_spi_send(priv);
3448 + }
3449 +
3450 +diff --git a/drivers/staging/wlan-ng/Kconfig b/drivers/staging/wlan-ng/Kconfig
3451 +index ac136663fa8e..082c16a31616 100644
3452 +--- a/drivers/staging/wlan-ng/Kconfig
3453 ++++ b/drivers/staging/wlan-ng/Kconfig
3454 +@@ -4,6 +4,7 @@ config PRISM2_USB
3455 + depends on WLAN && USB && CFG80211
3456 + select WIRELESS_EXT
3457 + select WEXT_PRIV
3458 ++ select CRC32
3459 + help
3460 + This is the wlan-ng prism 2.5/3 USB driver for a wide range of
3461 + old USB wireless devices.
3462 +diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
3463 +index 00964b6e4ac1..e0718ee5d42a 100644
3464 +--- a/drivers/tty/serial/msm_serial.c
3465 ++++ b/drivers/tty/serial/msm_serial.c
3466 +@@ -1580,6 +1580,7 @@ static void __msm_console_write(struct uart_port *port, const char *s,
3467 + int num_newlines = 0;
3468 + bool replaced = false;
3469 + void __iomem *tf;
3470 ++ int locked = 1;
3471 +
3472 + if (is_uartdm)
3473 + tf = port->membase + UARTDM_TF;
3474 +@@ -1592,7 +1593,13 @@ static void __msm_console_write(struct uart_port *port, const char *s,
3475 + num_newlines++;
3476 + count += num_newlines;
3477 +
3478 +- spin_lock(&port->lock);
3479 ++ if (port->sysrq)
3480 ++ locked = 0;
3481 ++ else if (oops_in_progress)
3482 ++ locked = spin_trylock(&port->lock);
3483 ++ else
3484 ++ spin_lock(&port->lock);
3485 ++
3486 + if (is_uartdm)
3487 + msm_reset_dm_count(port, count);
3488 +
3489 +@@ -1628,7 +1635,9 @@ static void __msm_console_write(struct uart_port *port, const char *s,
3490 + iowrite32_rep(tf, buf, 1);
3491 + i += num_chars;
3492 + }
3493 +- spin_unlock(&port->lock);
3494 ++
3495 ++ if (locked)
3496 ++ spin_unlock(&port->lock);
3497 + }
3498 +
3499 + static void msm_console_write(struct console *co, const char *s,
3500 +diff --git a/drivers/usb/gadget/function/f_ecm.c b/drivers/usb/gadget/function/f_ecm.c
3501 +index 6ce044008cf6..460d5d7c984f 100644
3502 +--- a/drivers/usb/gadget/function/f_ecm.c
3503 ++++ b/drivers/usb/gadget/function/f_ecm.c
3504 +@@ -621,8 +621,12 @@ static void ecm_disable(struct usb_function *f)
3505 +
3506 + DBG(cdev, "ecm deactivated\n");
3507 +
3508 +- if (ecm->port.in_ep->enabled)
3509 ++ if (ecm->port.in_ep->enabled) {
3510 + gether_disconnect(&ecm->port);
3511 ++ } else {
3512 ++ ecm->port.in_ep->desc = NULL;
3513 ++ ecm->port.out_ep->desc = NULL;
3514 ++ }
3515 +
3516 + usb_ep_disable(ecm->notify);
3517 + ecm->notify->desc = NULL;
3518 +diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
3519 +index d48df36622b7..0d8e4a364ca6 100644
3520 +--- a/drivers/usb/gadget/function/f_rndis.c
3521 ++++ b/drivers/usb/gadget/function/f_rndis.c
3522 +@@ -618,6 +618,7 @@ static void rndis_disable(struct usb_function *f)
3523 + gether_disconnect(&rndis->port);
3524 +
3525 + usb_ep_disable(rndis->notify);
3526 ++ rndis->notify->desc = NULL;
3527 + }
3528 +
3529 + /*-------------------------------------------------------------------------*/
3530 +diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
3531 +index 58e7c100b6ad..4c761abc5688 100644
3532 +--- a/drivers/watchdog/Kconfig
3533 ++++ b/drivers/watchdog/Kconfig
3534 +@@ -1444,6 +1444,7 @@ config SMSC37B787_WDT
3535 + config TQMX86_WDT
3536 + tristate "TQ-Systems TQMX86 Watchdog Timer"
3537 + depends on X86
3538 ++ select WATCHDOG_CORE
3539 + help
3540 + This is the driver for the hardware watchdog timer in the TQMX86 IO
3541 + controller found on some of their ComExpress Modules.
3542 +diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
3543 +index 5bae515c8e25..bed90d612e48 100644
3544 +--- a/drivers/xen/balloon.c
3545 ++++ b/drivers/xen/balloon.c
3546 +@@ -395,7 +395,8 @@ static struct notifier_block xen_memory_nb = {
3547 + #else
3548 + static enum bp_state reserve_additional_memory(void)
3549 + {
3550 +- balloon_stats.target_pages = balloon_stats.current_pages;
3551 ++ balloon_stats.target_pages = balloon_stats.current_pages +
3552 ++ balloon_stats.target_unpopulated;
3553 + return BP_ECANCELED;
3554 + }
3555 + #endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
3556 +diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
3557 +index 4150280509ff..7503899c0a1b 100644
3558 +--- a/fs/afs/dynroot.c
3559 ++++ b/fs/afs/dynroot.c
3560 +@@ -136,6 +136,9 @@ static struct dentry *afs_dynroot_lookup(struct inode *dir, struct dentry *dentr
3561 +
3562 + ASSERTCMP(d_inode(dentry), ==, NULL);
3563 +
3564 ++ if (flags & LOOKUP_CREATE)
3565 ++ return ERR_PTR(-EOPNOTSUPP);
3566 ++
3567 + if (dentry->d_name.len >= AFSNAMEMAX) {
3568 + _leave(" = -ENAMETOOLONG");
3569 + return ERR_PTR(-ENAMETOOLONG);
3570 +diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
3571 +index f532d6d3bd28..79bc5f1338ed 100644
3572 +--- a/fs/afs/mntpt.c
3573 ++++ b/fs/afs/mntpt.c
3574 +@@ -126,7 +126,7 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt)
3575 + if (src_as->cell)
3576 + ctx->cell = afs_get_cell(src_as->cell);
3577 +
3578 +- if (size > PAGE_SIZE - 1)
3579 ++ if (size < 2 || size > PAGE_SIZE - 1)
3580 + return -EINVAL;
3581 +
3582 + page = read_mapping_page(d_inode(mntpt)->i_mapping, 0, NULL);
3583 +@@ -140,7 +140,9 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt)
3584 + }
3585 +
3586 + buf = kmap(page);
3587 +- ret = vfs_parse_fs_string(fc, "source", buf, size);
3588 ++ ret = -EINVAL;
3589 ++ if (buf[size - 1] == '.')
3590 ++ ret = vfs_parse_fs_string(fc, "source", buf, size - 1);
3591 + kunmap(page);
3592 + put_page(page);
3593 + if (ret < 0)
3594 +diff --git a/fs/afs/server.c b/fs/afs/server.c
3595 +index 64d440aaabc0..ca8115ba1724 100644
3596 +--- a/fs/afs/server.c
3597 ++++ b/fs/afs/server.c
3598 +@@ -32,18 +32,11 @@ static void afs_dec_servers_outstanding(struct afs_net *net)
3599 + struct afs_server *afs_find_server(struct afs_net *net,
3600 + const struct sockaddr_rxrpc *srx)
3601 + {
3602 +- const struct sockaddr_in6 *a = &srx->transport.sin6, *b;
3603 + const struct afs_addr_list *alist;
3604 + struct afs_server *server = NULL;
3605 + unsigned int i;
3606 +- bool ipv6 = true;
3607 + int seq = 0, diff;
3608 +
3609 +- if (srx->transport.sin6.sin6_addr.s6_addr32[0] == 0 ||
3610 +- srx->transport.sin6.sin6_addr.s6_addr32[1] == 0 ||
3611 +- srx->transport.sin6.sin6_addr.s6_addr32[2] == htonl(0xffff))
3612 +- ipv6 = false;
3613 +-
3614 + rcu_read_lock();
3615 +
3616 + do {
3617 +@@ -52,7 +45,8 @@ struct afs_server *afs_find_server(struct afs_net *net,
3618 + server = NULL;
3619 + read_seqbegin_or_lock(&net->fs_addr_lock, &seq);
3620 +
3621 +- if (ipv6) {
3622 ++ if (srx->transport.family == AF_INET6) {
3623 ++ const struct sockaddr_in6 *a = &srx->transport.sin6, *b;
3624 + hlist_for_each_entry_rcu(server, &net->fs_addresses6, addr6_link) {
3625 + alist = rcu_dereference(server->addresses);
3626 + for (i = alist->nr_ipv4; i < alist->nr_addrs; i++) {
3627 +@@ -68,15 +62,16 @@ struct afs_server *afs_find_server(struct afs_net *net,
3628 + }
3629 + }
3630 + } else {
3631 ++ const struct sockaddr_in *a = &srx->transport.sin, *b;
3632 + hlist_for_each_entry_rcu(server, &net->fs_addresses4, addr4_link) {
3633 + alist = rcu_dereference(server->addresses);
3634 + for (i = 0; i < alist->nr_ipv4; i++) {
3635 +- b = &alist->addrs[i].transport.sin6;
3636 +- diff = ((u16 __force)a->sin6_port -
3637 +- (u16 __force)b->sin6_port);
3638 ++ b = &alist->addrs[i].transport.sin;
3639 ++ diff = ((u16 __force)a->sin_port -
3640 ++ (u16 __force)b->sin_port);
3641 + if (diff == 0)
3642 +- diff = ((u32 __force)a->sin6_addr.s6_addr32[3] -
3643 +- (u32 __force)b->sin6_addr.s6_addr32[3]);
3644 ++ diff = ((u32 __force)a->sin_addr.s_addr -
3645 ++ (u32 __force)b->sin_addr.s_addr);
3646 + if (diff == 0)
3647 + goto found;
3648 + }
3649 +diff --git a/fs/afs/super.c b/fs/afs/super.c
3650 +index 488641b1a418..d9a6036b70b9 100644
3651 +--- a/fs/afs/super.c
3652 ++++ b/fs/afs/super.c
3653 +@@ -448,7 +448,6 @@ static int afs_fill_super(struct super_block *sb, struct afs_fs_context *ctx)
3654 + /* allocate the root inode and dentry */
3655 + if (as->dyn_root) {
3656 + inode = afs_iget_pseudo_dir(sb, true);
3657 +- sb->s_flags |= SB_RDONLY;
3658 + } else {
3659 + sprintf(sb->s_id, "%llu", as->volume->vid);
3660 + afs_activate_volume(as->volume);
3661 +diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
3662 +index 10a04b99798a..3f3110975f88 100644
3663 +--- a/fs/btrfs/async-thread.c
3664 ++++ b/fs/btrfs/async-thread.c
3665 +@@ -53,16 +53,6 @@ struct btrfs_workqueue {
3666 + struct __btrfs_workqueue *high;
3667 + };
3668 +
3669 +-static void normal_work_helper(struct btrfs_work *work);
3670 +-
3671 +-#define BTRFS_WORK_HELPER(name) \
3672 +-noinline_for_stack void btrfs_##name(struct work_struct *arg) \
3673 +-{ \
3674 +- struct btrfs_work *work = container_of(arg, struct btrfs_work, \
3675 +- normal_work); \
3676 +- normal_work_helper(work); \
3677 +-}
3678 +-
3679 + struct btrfs_fs_info *
3680 + btrfs_workqueue_owner(const struct __btrfs_workqueue *wq)
3681 + {
3682 +@@ -89,29 +79,6 @@ bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq)
3683 + return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2;
3684 + }
3685 +
3686 +-BTRFS_WORK_HELPER(worker_helper);
3687 +-BTRFS_WORK_HELPER(delalloc_helper);
3688 +-BTRFS_WORK_HELPER(flush_delalloc_helper);
3689 +-BTRFS_WORK_HELPER(cache_helper);
3690 +-BTRFS_WORK_HELPER(submit_helper);
3691 +-BTRFS_WORK_HELPER(fixup_helper);
3692 +-BTRFS_WORK_HELPER(endio_helper);
3693 +-BTRFS_WORK_HELPER(endio_meta_helper);
3694 +-BTRFS_WORK_HELPER(endio_meta_write_helper);
3695 +-BTRFS_WORK_HELPER(endio_raid56_helper);
3696 +-BTRFS_WORK_HELPER(endio_repair_helper);
3697 +-BTRFS_WORK_HELPER(rmw_helper);
3698 +-BTRFS_WORK_HELPER(endio_write_helper);
3699 +-BTRFS_WORK_HELPER(freespace_write_helper);
3700 +-BTRFS_WORK_HELPER(delayed_meta_helper);
3701 +-BTRFS_WORK_HELPER(readahead_helper);
3702 +-BTRFS_WORK_HELPER(qgroup_rescan_helper);
3703 +-BTRFS_WORK_HELPER(extent_refs_helper);
3704 +-BTRFS_WORK_HELPER(scrub_helper);
3705 +-BTRFS_WORK_HELPER(scrubwrc_helper);
3706 +-BTRFS_WORK_HELPER(scrubnc_helper);
3707 +-BTRFS_WORK_HELPER(scrubparity_helper);
3708 +-
3709 + static struct __btrfs_workqueue *
3710 + __btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name,
3711 + unsigned int flags, int limit_active, int thresh)
3712 +@@ -302,12 +269,13 @@ static void run_ordered_work(struct __btrfs_workqueue *wq,
3713 + * original work item cannot depend on the recycled work
3714 + * item in that case (see find_worker_executing_work()).
3715 + *
3716 +- * Note that the work of one Btrfs filesystem may depend
3717 +- * on the work of another Btrfs filesystem via, e.g., a
3718 +- * loop device. Therefore, we must not allow the current
3719 +- * work item to be recycled until we are really done,
3720 +- * otherwise we break the above assumption and can
3721 +- * deadlock.
3722 ++ * Note that different types of Btrfs work can depend on
3723 ++ * each other, and one type of work on one Btrfs
3724 ++ * filesystem may even depend on the same type of work
3725 ++ * on another Btrfs filesystem via, e.g., a loop device.
3726 ++ * Therefore, we must not allow the current work item to
3727 ++ * be recycled until we are really done, otherwise we
3728 ++ * break the above assumption and can deadlock.
3729 + */
3730 + free_self = true;
3731 + } else {
3732 +@@ -331,8 +299,10 @@ static void run_ordered_work(struct __btrfs_workqueue *wq,
3733 + }
3734 + }
3735 +
3736 +-static void normal_work_helper(struct btrfs_work *work)
3737 ++static void btrfs_work_helper(struct work_struct *normal_work)
3738 + {
3739 ++ struct btrfs_work *work = container_of(normal_work, struct btrfs_work,
3740 ++ normal_work);
3741 + struct __btrfs_workqueue *wq;
3742 + void *wtag;
3743 + int need_order = 0;
3744 +@@ -362,15 +332,13 @@ static void normal_work_helper(struct btrfs_work *work)
3745 + trace_btrfs_all_work_done(wq->fs_info, wtag);
3746 + }
3747 +
3748 +-void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func,
3749 +- btrfs_func_t func,
3750 +- btrfs_func_t ordered_func,
3751 +- btrfs_func_t ordered_free)
3752 ++void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func,
3753 ++ btrfs_func_t ordered_func, btrfs_func_t ordered_free)
3754 + {
3755 + work->func = func;
3756 + work->ordered_func = ordered_func;
3757 + work->ordered_free = ordered_free;
3758 +- INIT_WORK(&work->normal_work, uniq_func);
3759 ++ INIT_WORK(&work->normal_work, btrfs_work_helper);
3760 + INIT_LIST_HEAD(&work->ordered_list);
3761 + work->flags = 0;
3762 + }
3763 +diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
3764 +index 7861c9feba5f..c5bf2b117c05 100644
3765 +--- a/fs/btrfs/async-thread.h
3766 ++++ b/fs/btrfs/async-thread.h
3767 +@@ -29,42 +29,13 @@ struct btrfs_work {
3768 + unsigned long flags;
3769 + };
3770 +
3771 +-#define BTRFS_WORK_HELPER_PROTO(name) \
3772 +-void btrfs_##name(struct work_struct *arg)
3773 +-
3774 +-BTRFS_WORK_HELPER_PROTO(worker_helper);
3775 +-BTRFS_WORK_HELPER_PROTO(delalloc_helper);
3776 +-BTRFS_WORK_HELPER_PROTO(flush_delalloc_helper);
3777 +-BTRFS_WORK_HELPER_PROTO(cache_helper);
3778 +-BTRFS_WORK_HELPER_PROTO(submit_helper);
3779 +-BTRFS_WORK_HELPER_PROTO(fixup_helper);
3780 +-BTRFS_WORK_HELPER_PROTO(endio_helper);
3781 +-BTRFS_WORK_HELPER_PROTO(endio_meta_helper);
3782 +-BTRFS_WORK_HELPER_PROTO(endio_meta_write_helper);
3783 +-BTRFS_WORK_HELPER_PROTO(endio_raid56_helper);
3784 +-BTRFS_WORK_HELPER_PROTO(endio_repair_helper);
3785 +-BTRFS_WORK_HELPER_PROTO(rmw_helper);
3786 +-BTRFS_WORK_HELPER_PROTO(endio_write_helper);
3787 +-BTRFS_WORK_HELPER_PROTO(freespace_write_helper);
3788 +-BTRFS_WORK_HELPER_PROTO(delayed_meta_helper);
3789 +-BTRFS_WORK_HELPER_PROTO(readahead_helper);
3790 +-BTRFS_WORK_HELPER_PROTO(qgroup_rescan_helper);
3791 +-BTRFS_WORK_HELPER_PROTO(extent_refs_helper);
3792 +-BTRFS_WORK_HELPER_PROTO(scrub_helper);
3793 +-BTRFS_WORK_HELPER_PROTO(scrubwrc_helper);
3794 +-BTRFS_WORK_HELPER_PROTO(scrubnc_helper);
3795 +-BTRFS_WORK_HELPER_PROTO(scrubparity_helper);
3796 +-
3797 +-
3798 + struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info,
3799 + const char *name,
3800 + unsigned int flags,
3801 + int limit_active,
3802 + int thresh);
3803 +-void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t helper,
3804 +- btrfs_func_t func,
3805 +- btrfs_func_t ordered_func,
3806 +- btrfs_func_t ordered_free);
3807 ++void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func,
3808 ++ btrfs_func_t ordered_func, btrfs_func_t ordered_free);
3809 + void btrfs_queue_work(struct btrfs_workqueue *wq,
3810 + struct btrfs_work *work);
3811 + void btrfs_destroy_workqueue(struct btrfs_workqueue *wq);
3812 +diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
3813 +index 0d2da2366869..7dcfa7d7632a 100644
3814 +--- a/fs/btrfs/block-group.c
3815 ++++ b/fs/btrfs/block-group.c
3816 +@@ -695,8 +695,7 @@ int btrfs_cache_block_group(struct btrfs_block_group_cache *cache,
3817 + caching_ctl->block_group = cache;
3818 + caching_ctl->progress = cache->key.objectid;
3819 + refcount_set(&caching_ctl->count, 1);
3820 +- btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
3821 +- caching_thread, NULL, NULL);
3822 ++ btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL);
3823 +
3824 + spin_lock(&cache->lock);
3825 + /*
3826 +diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
3827 +index 57a9ad3e8c29..c7a53e79c66d 100644
3828 +--- a/fs/btrfs/delayed-inode.c
3829 ++++ b/fs/btrfs/delayed-inode.c
3830 +@@ -1367,8 +1367,8 @@ static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
3831 + return -ENOMEM;
3832 +
3833 + async_work->delayed_root = delayed_root;
3834 +- btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper,
3835 +- btrfs_async_run_delayed_root, NULL, NULL);
3836 ++ btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL,
3837 ++ NULL);
3838 + async_work->nr = nr;
3839 +
3840 + btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
3841 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
3842 +index 3895c21853cc..bae334212ee2 100644
3843 +--- a/fs/btrfs/disk-io.c
3844 ++++ b/fs/btrfs/disk-io.c
3845 +@@ -706,43 +706,31 @@ static void end_workqueue_bio(struct bio *bio)
3846 + struct btrfs_end_io_wq *end_io_wq = bio->bi_private;
3847 + struct btrfs_fs_info *fs_info;
3848 + struct btrfs_workqueue *wq;
3849 +- btrfs_work_func_t func;
3850 +
3851 + fs_info = end_io_wq->info;
3852 + end_io_wq->status = bio->bi_status;
3853 +
3854 + if (bio_op(bio) == REQ_OP_WRITE) {
3855 +- if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) {
3856 ++ if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA)
3857 + wq = fs_info->endio_meta_write_workers;
3858 +- func = btrfs_endio_meta_write_helper;
3859 +- } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) {
3860 ++ else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE)
3861 + wq = fs_info->endio_freespace_worker;
3862 +- func = btrfs_freespace_write_helper;
3863 +- } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
3864 ++ else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
3865 + wq = fs_info->endio_raid56_workers;
3866 +- func = btrfs_endio_raid56_helper;
3867 +- } else {
3868 ++ else
3869 + wq = fs_info->endio_write_workers;
3870 +- func = btrfs_endio_write_helper;
3871 +- }
3872 + } else {
3873 +- if (unlikely(end_io_wq->metadata ==
3874 +- BTRFS_WQ_ENDIO_DIO_REPAIR)) {
3875 ++ if (unlikely(end_io_wq->metadata == BTRFS_WQ_ENDIO_DIO_REPAIR))
3876 + wq = fs_info->endio_repair_workers;
3877 +- func = btrfs_endio_repair_helper;
3878 +- } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
3879 ++ else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
3880 + wq = fs_info->endio_raid56_workers;
3881 +- func = btrfs_endio_raid56_helper;
3882 +- } else if (end_io_wq->metadata) {
3883 ++ else if (end_io_wq->metadata)
3884 + wq = fs_info->endio_meta_workers;
3885 +- func = btrfs_endio_meta_helper;
3886 +- } else {
3887 ++ else
3888 + wq = fs_info->endio_workers;
3889 +- func = btrfs_endio_helper;
3890 +- }
3891 + }
3892 +
3893 +- btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL);
3894 ++ btrfs_init_work(&end_io_wq->work, end_workqueue_fn, NULL, NULL);
3895 + btrfs_queue_work(wq, &end_io_wq->work);
3896 + }
3897 +
3898 +@@ -835,8 +823,8 @@ blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
3899 + async->mirror_num = mirror_num;
3900 + async->submit_bio_start = submit_bio_start;
3901 +
3902 +- btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start,
3903 +- run_one_async_done, run_one_async_free);
3904 ++ btrfs_init_work(&async->work, run_one_async_start, run_one_async_done,
3905 ++ run_one_async_free);
3906 +
3907 + async->bio_offset = bio_offset;
3908 +
3909 +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
3910 +index be9dc78aa727..33c6b191ca59 100644
3911 +--- a/fs/btrfs/extent_io.c
3912 ++++ b/fs/btrfs/extent_io.c
3913 +@@ -1899,7 +1899,7 @@ static int __process_pages_contig(struct address_space *mapping,
3914 + if (page_ops & PAGE_SET_PRIVATE2)
3915 + SetPagePrivate2(pages[i]);
3916 +
3917 +- if (pages[i] == locked_page) {
3918 ++ if (locked_page && pages[i] == locked_page) {
3919 + put_page(pages[i]);
3920 + pages_locked++;
3921 + continue;
3922 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
3923 +index e5758f62e8d8..0b2758961b1c 100644
3924 +--- a/fs/btrfs/inode.c
3925 ++++ b/fs/btrfs/inode.c
3926 +@@ -712,10 +712,12 @@ cleanup_and_bail_uncompressed:
3927 + * to our extent and set things up for the async work queue to run
3928 + * cow_file_range to do the normal delalloc dance.
3929 + */
3930 +- if (page_offset(async_chunk->locked_page) >= start &&
3931 +- page_offset(async_chunk->locked_page) <= end)
3932 ++ if (async_chunk->locked_page &&
3933 ++ (page_offset(async_chunk->locked_page) >= start &&
3934 ++ page_offset(async_chunk->locked_page)) <= end) {
3935 + __set_page_dirty_nobuffers(async_chunk->locked_page);
3936 + /* unlocked later on in the async handlers */
3937 ++ }
3938 +
3939 + if (redirty)
3940 + extent_range_redirty_for_io(inode, start, end);
3941 +@@ -795,7 +797,7 @@ retry:
3942 + async_extent->start +
3943 + async_extent->ram_size - 1,
3944 + WB_SYNC_ALL);
3945 +- else if (ret)
3946 ++ else if (ret && async_chunk->locked_page)
3947 + unlock_page(async_chunk->locked_page);
3948 + kfree(async_extent);
3949 + cond_resched();
3950 +@@ -1264,14 +1266,27 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
3951 + async_chunk[i].inode = inode;
3952 + async_chunk[i].start = start;
3953 + async_chunk[i].end = cur_end;
3954 +- async_chunk[i].locked_page = locked_page;
3955 + async_chunk[i].write_flags = write_flags;
3956 + INIT_LIST_HEAD(&async_chunk[i].extents);
3957 +
3958 +- btrfs_init_work(&async_chunk[i].work,
3959 +- btrfs_delalloc_helper,
3960 +- async_cow_start, async_cow_submit,
3961 +- async_cow_free);
3962 ++ /*
3963 ++ * The locked_page comes all the way from writepage and its
3964 ++ * the original page we were actually given. As we spread
3965 ++ * this large delalloc region across multiple async_chunk
3966 ++ * structs, only the first struct needs a pointer to locked_page
3967 ++ *
3968 ++ * This way we don't need racey decisions about who is supposed
3969 ++ * to unlock it.
3970 ++ */
3971 ++ if (locked_page) {
3972 ++ async_chunk[i].locked_page = locked_page;
3973 ++ locked_page = NULL;
3974 ++ } else {
3975 ++ async_chunk[i].locked_page = NULL;
3976 ++ }
3977 ++
3978 ++ btrfs_init_work(&async_chunk[i].work, async_cow_start,
3979 ++ async_cow_submit, async_cow_free);
3980 +
3981 + nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE);
3982 + atomic_add(nr_pages, &fs_info->async_delalloc_pages);
3983 +@@ -1439,10 +1454,10 @@ next_slot:
3984 + disk_num_bytes =
3985 + btrfs_file_extent_disk_num_bytes(leaf, fi);
3986 + /*
3987 +- * If extent we got ends before our range starts, skip
3988 +- * to next extent
3989 ++ * If the extent we got ends before our current offset,
3990 ++ * skip to the next extent.
3991 + */
3992 +- if (extent_end <= start) {
3993 ++ if (extent_end <= cur_offset) {
3994 + path->slots[0]++;
3995 + goto next_slot;
3996 + }
3997 +@@ -2264,8 +2279,7 @@ int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end)
3998 +
3999 + SetPageChecked(page);
4000 + get_page(page);
4001 +- btrfs_init_work(&fixup->work, btrfs_fixup_helper,
4002 +- btrfs_writepage_fixup_worker, NULL, NULL);
4003 ++ btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL);
4004 + fixup->page = page;
4005 + btrfs_queue_work(fs_info->fixup_workers, &fixup->work);
4006 + return -EBUSY;
4007 +@@ -3258,7 +3272,6 @@ void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start,
4008 + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4009 + struct btrfs_ordered_extent *ordered_extent = NULL;
4010 + struct btrfs_workqueue *wq;
4011 +- btrfs_work_func_t func;
4012 +
4013 + trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
4014 +
4015 +@@ -3267,16 +3280,12 @@ void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start,
4016 + end - start + 1, uptodate))
4017 + return;
4018 +
4019 +- if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
4020 ++ if (btrfs_is_free_space_inode(BTRFS_I(inode)))
4021 + wq = fs_info->endio_freespace_worker;
4022 +- func = btrfs_freespace_write_helper;
4023 +- } else {
4024 ++ else
4025 + wq = fs_info->endio_write_workers;
4026 +- func = btrfs_endio_write_helper;
4027 +- }
4028 +
4029 +- btrfs_init_work(&ordered_extent->work, func, finish_ordered_fn, NULL,
4030 +- NULL);
4031 ++ btrfs_init_work(&ordered_extent->work, finish_ordered_fn, NULL, NULL);
4032 + btrfs_queue_work(wq, &ordered_extent->work);
4033 + }
4034 +
4035 +@@ -8213,18 +8222,14 @@ static void __endio_write_update_ordered(struct inode *inode,
4036 + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4037 + struct btrfs_ordered_extent *ordered = NULL;
4038 + struct btrfs_workqueue *wq;
4039 +- btrfs_work_func_t func;
4040 + u64 ordered_offset = offset;
4041 + u64 ordered_bytes = bytes;
4042 + u64 last_offset;
4043 +
4044 +- if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
4045 ++ if (btrfs_is_free_space_inode(BTRFS_I(inode)))
4046 + wq = fs_info->endio_freespace_worker;
4047 +- func = btrfs_freespace_write_helper;
4048 +- } else {
4049 ++ else
4050 + wq = fs_info->endio_write_workers;
4051 +- func = btrfs_endio_write_helper;
4052 +- }
4053 +
4054 + while (ordered_offset < offset + bytes) {
4055 + last_offset = ordered_offset;
4056 +@@ -8232,9 +8237,8 @@ static void __endio_write_update_ordered(struct inode *inode,
4057 + &ordered_offset,
4058 + ordered_bytes,
4059 + uptodate)) {
4060 +- btrfs_init_work(&ordered->work, func,
4061 +- finish_ordered_fn,
4062 +- NULL, NULL);
4063 ++ btrfs_init_work(&ordered->work, finish_ordered_fn, NULL,
4064 ++ NULL);
4065 + btrfs_queue_work(wq, &ordered->work);
4066 + }
4067 + /*
4068 +@@ -10119,8 +10123,7 @@ static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode
4069 + init_completion(&work->completion);
4070 + INIT_LIST_HEAD(&work->list);
4071 + work->inode = inode;
4072 +- btrfs_init_work(&work->work, btrfs_flush_delalloc_helper,
4073 +- btrfs_run_delalloc_work, NULL, NULL);
4074 ++ btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL, NULL);
4075 +
4076 + return work;
4077 + }
4078 +diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
4079 +index 24b6c72b9a59..6240a5a1f2c0 100644
4080 +--- a/fs/btrfs/ordered-data.c
4081 ++++ b/fs/btrfs/ordered-data.c
4082 +@@ -547,7 +547,6 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
4083 + spin_unlock(&root->ordered_extent_lock);
4084 +
4085 + btrfs_init_work(&ordered->flush_work,
4086 +- btrfs_flush_delalloc_helper,
4087 + btrfs_run_ordered_extent_work, NULL, NULL);
4088 + list_add_tail(&ordered->work_list, &works);
4089 + btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
4090 +diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
4091 +index 3ad151655eb8..27a903aaf43b 100644
4092 +--- a/fs/btrfs/qgroup.c
4093 ++++ b/fs/btrfs/qgroup.c
4094 +@@ -3280,7 +3280,6 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
4095 + memset(&fs_info->qgroup_rescan_work, 0,
4096 + sizeof(fs_info->qgroup_rescan_work));
4097 + btrfs_init_work(&fs_info->qgroup_rescan_work,
4098 +- btrfs_qgroup_rescan_helper,
4099 + btrfs_qgroup_rescan_worker, NULL, NULL);
4100 + return 0;
4101 + }
4102 +diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
4103 +index 57a2ac721985..8f47a85944eb 100644
4104 +--- a/fs/btrfs/raid56.c
4105 ++++ b/fs/btrfs/raid56.c
4106 +@@ -190,7 +190,7 @@ static void scrub_parity_work(struct btrfs_work *work);
4107 +
4108 + static void start_async_work(struct btrfs_raid_bio *rbio, btrfs_func_t work_func)
4109 + {
4110 +- btrfs_init_work(&rbio->work, btrfs_rmw_helper, work_func, NULL, NULL);
4111 ++ btrfs_init_work(&rbio->work, work_func, NULL, NULL);
4112 + btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
4113 + }
4114 +
4115 +@@ -1743,8 +1743,7 @@ static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
4116 + plug = container_of(cb, struct btrfs_plug_cb, cb);
4117 +
4118 + if (from_schedule) {
4119 +- btrfs_init_work(&plug->work, btrfs_rmw_helper,
4120 +- unplug_work, NULL, NULL);
4121 ++ btrfs_init_work(&plug->work, unplug_work, NULL, NULL);
4122 + btrfs_queue_work(plug->info->rmw_workers,
4123 + &plug->work);
4124 + return;
4125 +diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
4126 +index dd4f9c2b7107..1feaeadc8cf5 100644
4127 +--- a/fs/btrfs/reada.c
4128 ++++ b/fs/btrfs/reada.c
4129 +@@ -819,8 +819,7 @@ static void reada_start_machine(struct btrfs_fs_info *fs_info)
4130 + /* FIXME we cannot handle this properly right now */
4131 + BUG();
4132 + }
4133 +- btrfs_init_work(&rmw->work, btrfs_readahead_helper,
4134 +- reada_start_machine_worker, NULL, NULL);
4135 ++ btrfs_init_work(&rmw->work, reada_start_machine_worker, NULL, NULL);
4136 + rmw->fs_info = fs_info;
4137 +
4138 + btrfs_queue_work(fs_info->readahead_workers, &rmw->work);
4139 +diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
4140 +index a0770a6aee00..a7b043fd7a57 100644
4141 +--- a/fs/btrfs/scrub.c
4142 ++++ b/fs/btrfs/scrub.c
4143 +@@ -598,8 +598,8 @@ static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
4144 + sbio->index = i;
4145 + sbio->sctx = sctx;
4146 + sbio->page_count = 0;
4147 +- btrfs_init_work(&sbio->work, btrfs_scrub_helper,
4148 +- scrub_bio_end_io_worker, NULL, NULL);
4149 ++ btrfs_init_work(&sbio->work, scrub_bio_end_io_worker, NULL,
4150 ++ NULL);
4151 +
4152 + if (i != SCRUB_BIOS_PER_SCTX - 1)
4153 + sctx->bios[i]->next_free = i + 1;
4154 +@@ -1720,8 +1720,7 @@ static void scrub_wr_bio_end_io(struct bio *bio)
4155 + sbio->status = bio->bi_status;
4156 + sbio->bio = bio;
4157 +
4158 +- btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
4159 +- scrub_wr_bio_end_io_worker, NULL, NULL);
4160 ++ btrfs_init_work(&sbio->work, scrub_wr_bio_end_io_worker, NULL, NULL);
4161 + btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
4162 + }
4163 +
4164 +@@ -2203,8 +2202,7 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock)
4165 + raid56_add_scrub_pages(rbio, spage->page, spage->logical);
4166 + }
4167 +
4168 +- btrfs_init_work(&sblock->work, btrfs_scrub_helper,
4169 +- scrub_missing_raid56_worker, NULL, NULL);
4170 ++ btrfs_init_work(&sblock->work, scrub_missing_raid56_worker, NULL, NULL);
4171 + scrub_block_get(sblock);
4172 + scrub_pending_bio_inc(sctx);
4173 + raid56_submit_missing_rbio(rbio);
4174 +@@ -2742,8 +2740,8 @@ static void scrub_parity_bio_endio(struct bio *bio)
4175 +
4176 + bio_put(bio);
4177 +
4178 +- btrfs_init_work(&sparity->work, btrfs_scrubparity_helper,
4179 +- scrub_parity_bio_endio_worker, NULL, NULL);
4180 ++ btrfs_init_work(&sparity->work, scrub_parity_bio_endio_worker, NULL,
4181 ++ NULL);
4182 + btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work);
4183 + }
4184 +
4185 +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
4186 +index e04409f85063..d8d7b1ee83ca 100644
4187 +--- a/fs/btrfs/volumes.c
4188 ++++ b/fs/btrfs/volumes.c
4189 +@@ -6676,8 +6676,7 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
4190 + else
4191 + generate_random_uuid(dev->uuid);
4192 +
4193 +- btrfs_init_work(&dev->work, btrfs_submit_helper,
4194 +- pending_bios_fn, NULL, NULL);
4195 ++ btrfs_init_work(&dev->work, pending_bios_fn, NULL, NULL);
4196 +
4197 + return dev;
4198 + }
4199 +diff --git a/fs/buffer.c b/fs/buffer.c
4200 +index 86a38b979323..7744488f7bde 100644
4201 +--- a/fs/buffer.c
4202 ++++ b/fs/buffer.c
4203 +@@ -2994,8 +2994,6 @@ static void end_bio_bh_io_sync(struct bio *bio)
4204 + void guard_bio_eod(int op, struct bio *bio)
4205 + {
4206 + sector_t maxsector;
4207 +- struct bio_vec *bvec = bio_last_bvec_all(bio);
4208 +- unsigned truncated_bytes;
4209 + struct hd_struct *part;
4210 +
4211 + rcu_read_lock();
4212 +@@ -3021,28 +3019,7 @@ void guard_bio_eod(int op, struct bio *bio)
4213 + if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
4214 + return;
4215 +
4216 +- /* Uhhuh. We've got a bio that straddles the device size! */
4217 +- truncated_bytes = bio->bi_iter.bi_size - (maxsector << 9);
4218 +-
4219 +- /*
4220 +- * The bio contains more than one segment which spans EOD, just return
4221 +- * and let IO layer turn it into an EIO
4222 +- */
4223 +- if (truncated_bytes > bvec->bv_len)
4224 +- return;
4225 +-
4226 +- /* Truncate the bio.. */
4227 +- bio->bi_iter.bi_size -= truncated_bytes;
4228 +- bvec->bv_len -= truncated_bytes;
4229 +-
4230 +- /* ..and clear the end of the buffer for reads */
4231 +- if (op == REQ_OP_READ) {
4232 +- struct bio_vec bv;
4233 +-
4234 +- mp_bvec_last_segment(bvec, &bv);
4235 +- zero_user(bv.bv_page, bv.bv_offset + bv.bv_len,
4236 +- truncated_bytes);
4237 +- }
4238 ++ bio_truncate(bio, maxsector << 9);
4239 + }
4240 +
4241 + static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
4242 +diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
4243 +index 1692c0c6c23a..2faa05860a48 100644
4244 +--- a/fs/cifs/dfs_cache.c
4245 ++++ b/fs/cifs/dfs_cache.c
4246 +@@ -1317,7 +1317,6 @@ static struct cifs_ses *find_root_ses(struct dfs_cache_vol_info *vi,
4247 + int rc;
4248 + struct dfs_info3_param ref = {0};
4249 + char *mdata = NULL, *devname = NULL;
4250 +- bool is_smb3 = tcon->ses->server->vals->header_preamble_size == 0;
4251 + struct TCP_Server_Info *server;
4252 + struct cifs_ses *ses;
4253 + struct smb_vol vol;
4254 +@@ -1344,7 +1343,7 @@ static struct cifs_ses *find_root_ses(struct dfs_cache_vol_info *vi,
4255 + goto out;
4256 + }
4257 +
4258 +- rc = cifs_setup_volume_info(&vol, mdata, devname, is_smb3);
4259 ++ rc = cifs_setup_volume_info(&vol, mdata, devname, false);
4260 + kfree(devname);
4261 +
4262 + if (rc) {
4263 +diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
4264 +index df9377828e2f..ed59e4a8db59 100644
4265 +--- a/fs/cifs/inode.c
4266 ++++ b/fs/cifs/inode.c
4267 +@@ -163,7 +163,7 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
4268 +
4269 + spin_lock(&inode->i_lock);
4270 + /* we do not want atime to be less than mtime, it broke some apps */
4271 +- if (timespec64_compare(&fattr->cf_atime, &fattr->cf_mtime))
4272 ++ if (timespec64_compare(&fattr->cf_atime, &fattr->cf_mtime) < 0)
4273 + inode->i_atime = fattr->cf_mtime;
4274 + else
4275 + inode->i_atime = fattr->cf_atime;
4276 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
4277 +index c985caa2d955..e1d8cec6ba2e 100644
4278 +--- a/fs/cifs/smb2pdu.c
4279 ++++ b/fs/cifs/smb2pdu.c
4280 +@@ -252,7 +252,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
4281 + if (tcon == NULL)
4282 + return 0;
4283 +
4284 +- if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL)
4285 ++ if (smb2_command == SMB2_TREE_CONNECT)
4286 + return 0;
4287 +
4288 + if (tcon->tidStatus == CifsExiting) {
4289 +@@ -426,16 +426,9 @@ fill_small_buf(__le16 smb2_command, struct cifs_tcon *tcon, void *buf,
4290 + * SMB information in the SMB header. If the return code is zero, this
4291 + * function must have filled in request_buf pointer.
4292 + */
4293 +-static int
4294 +-smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
4295 +- void **request_buf, unsigned int *total_len)
4296 ++static int __smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
4297 ++ void **request_buf, unsigned int *total_len)
4298 + {
4299 +- int rc;
4300 +-
4301 +- rc = smb2_reconnect(smb2_command, tcon);
4302 +- if (rc)
4303 +- return rc;
4304 +-
4305 + /* BB eventually switch this to SMB2 specific small buf size */
4306 + if (smb2_command == SMB2_SET_INFO)
4307 + *request_buf = cifs_buf_get();
4308 +@@ -456,7 +449,31 @@ smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
4309 + cifs_stats_inc(&tcon->num_smbs_sent);
4310 + }
4311 +
4312 +- return rc;
4313 ++ return 0;
4314 ++}
4315 ++
4316 ++static int smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
4317 ++ void **request_buf, unsigned int *total_len)
4318 ++{
4319 ++ int rc;
4320 ++
4321 ++ rc = smb2_reconnect(smb2_command, tcon);
4322 ++ if (rc)
4323 ++ return rc;
4324 ++
4325 ++ return __smb2_plain_req_init(smb2_command, tcon, request_buf,
4326 ++ total_len);
4327 ++}
4328 ++
4329 ++static int smb2_ioctl_req_init(u32 opcode, struct cifs_tcon *tcon,
4330 ++ void **request_buf, unsigned int *total_len)
4331 ++{
4332 ++ /* Skip reconnect only for FSCTL_VALIDATE_NEGOTIATE_INFO IOCTLs */
4333 ++ if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO) {
4334 ++ return __smb2_plain_req_init(SMB2_IOCTL, tcon, request_buf,
4335 ++ total_len);
4336 ++ }
4337 ++ return smb2_plain_req_init(SMB2_IOCTL, tcon, request_buf, total_len);
4338 + }
4339 +
4340 + /* For explanation of negotiate contexts see MS-SMB2 section 2.2.3.1 */
4341 +@@ -2661,7 +2678,7 @@ SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
4342 + int rc;
4343 + char *in_data_buf;
4344 +
4345 +- rc = smb2_plain_req_init(SMB2_IOCTL, tcon, (void **) &req, &total_len);
4346 ++ rc = smb2_ioctl_req_init(opcode, tcon, (void **) &req, &total_len);
4347 + if (rc)
4348 + return rc;
4349 +
4350 +diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
4351 +index a7ec2d3dff92..e0226b2138d6 100644
4352 +--- a/fs/compat_ioctl.c
4353 ++++ b/fs/compat_ioctl.c
4354 +@@ -1032,10 +1032,11 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
4355 + #endif
4356 +
4357 + case FICLONE:
4358 ++ goto do_ioctl;
4359 + case FICLONERANGE:
4360 + case FIDEDUPERANGE:
4361 + case FS_IOC_FIEMAP:
4362 +- goto do_ioctl;
4363 ++ goto found_handler;
4364 +
4365 + case FIBMAP:
4366 + case FIGETBSZ:
4367 +diff --git a/fs/io_uring.c b/fs/io_uring.c
4368 +index 74e786578c77..a60c6315a348 100644
4369 +--- a/fs/io_uring.c
4370 ++++ b/fs/io_uring.c
4371 +@@ -239,7 +239,7 @@ struct io_ring_ctx {
4372 +
4373 + struct user_struct *user;
4374 +
4375 +- struct cred *creds;
4376 ++ const struct cred *creds;
4377 +
4378 + struct completion ctx_done;
4379 +
4380 +@@ -3876,7 +3876,7 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p)
4381 + ctx->account_mem = account_mem;
4382 + ctx->user = user;
4383 +
4384 +- ctx->creds = prepare_creds();
4385 ++ ctx->creds = get_current_cred();
4386 + if (!ctx->creds) {
4387 + ret = -ENOMEM;
4388 + goto err;
4389 +diff --git a/fs/locks.c b/fs/locks.c
4390 +index 6970f55daf54..44b6da032842 100644
4391 +--- a/fs/locks.c
4392 ++++ b/fs/locks.c
4393 +@@ -2853,7 +2853,7 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl,
4394 + }
4395 + if (inode) {
4396 + /* userspace relies on this representation of dev_t */
4397 +- seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
4398 ++ seq_printf(f, "%d %02x:%02x:%lu ", fl_pid,
4399 + MAJOR(inode->i_sb->s_dev),
4400 + MINOR(inode->i_sb->s_dev), inode->i_ino);
4401 + } else {
4402 +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
4403 +index c65aeaa812d4..08f6eb2b73f8 100644
4404 +--- a/fs/nfsd/nfs4state.c
4405 ++++ b/fs/nfsd/nfs4state.c
4406 +@@ -3548,12 +3548,17 @@ static bool replay_matches_cache(struct svc_rqst *rqstp,
4407 + (bool)seq->cachethis)
4408 + return false;
4409 + /*
4410 +- * If there's an error than the reply can have fewer ops than
4411 +- * the call. But if we cached a reply with *more* ops than the
4412 +- * call you're sending us now, then this new call is clearly not
4413 +- * really a replay of the old one:
4414 ++ * If there's an error then the reply can have fewer ops than
4415 ++ * the call.
4416 + */
4417 +- if (slot->sl_opcnt < argp->opcnt)
4418 ++ if (slot->sl_opcnt < argp->opcnt && !slot->sl_status)
4419 ++ return false;
4420 ++ /*
4421 ++ * But if we cached a reply with *more* ops than the call you're
4422 ++ * sending us now, then this new call is clearly not really a
4423 ++ * replay of the old one:
4424 ++ */
4425 ++ if (slot->sl_opcnt > argp->opcnt)
4426 + return false;
4427 + /* This is the only check explicitly called by spec: */
4428 + if (!same_creds(&rqstp->rq_cred, &slot->sl_cred))
4429 +diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
4430 +index 6e774c5ea13b..8a2e284ccfcd 100644
4431 +--- a/fs/ocfs2/dlmglue.c
4432 ++++ b/fs/ocfs2/dlmglue.c
4433 +@@ -3282,6 +3282,7 @@ static void ocfs2_dlm_init_debug(struct ocfs2_super *osb)
4434 +
4435 + debugfs_create_u32("locking_filter", 0600, osb->osb_debug_root,
4436 + &dlm_debug->d_filter_secs);
4437 ++ ocfs2_get_dlm_debug(dlm_debug);
4438 + }
4439 +
4440 + static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb)
4441 +diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
4442 +index 8caff834f002..487ee39b438a 100644
4443 +--- a/fs/pstore/ram.c
4444 ++++ b/fs/pstore/ram.c
4445 +@@ -407,6 +407,17 @@ static int notrace ramoops_pstore_write(struct pstore_record *record)
4446 +
4447 + prz = cxt->dprzs[cxt->dump_write_cnt];
4448 +
4449 ++ /*
4450 ++ * Since this is a new crash dump, we need to reset the buffer in
4451 ++ * case it still has an old dump present. Without this, the new dump
4452 ++ * will get appended, which would seriously confuse anything trying
4453 ++ * to check dump file contents. Specifically, ramoops_read_kmsg_hdr()
4454 ++ * expects to find a dump header in the beginning of buffer data, so
4455 ++ * we must to reset the buffer values, in order to ensure that the
4456 ++ * header will be written to the beginning of the buffer.
4457 ++ */
4458 ++ persistent_ram_zap(prz);
4459 ++
4460 + /* Build header and append record contents. */
4461 + hlen = ramoops_write_kmsg_hdr(prz, record);
4462 + if (!hlen)
4463 +@@ -577,6 +588,7 @@ static int ramoops_init_przs(const char *name,
4464 + dev_err(dev, "failed to request %s mem region (0x%zx@0x%llx): %d\n",
4465 + name, record_size,
4466 + (unsigned long long)*paddr, err);
4467 ++ kfree(label);
4468 +
4469 + while (i > 0) {
4470 + i--;
4471 +@@ -622,6 +634,7 @@ static int ramoops_init_prz(const char *name,
4472 +
4473 + dev_err(dev, "failed to request %s mem region (0x%zx@0x%llx): %d\n",
4474 + name, sz, (unsigned long long)*paddr, err);
4475 ++ kfree(label);
4476 + return err;
4477 + }
4478 +
4479 +diff --git a/fs/ubifs/tnc_commit.c b/fs/ubifs/tnc_commit.c
4480 +index a384a0f9ff32..234be1c4dc87 100644
4481 +--- a/fs/ubifs/tnc_commit.c
4482 ++++ b/fs/ubifs/tnc_commit.c
4483 +@@ -212,7 +212,7 @@ static int is_idx_node_in_use(struct ubifs_info *c, union ubifs_key *key,
4484 + /**
4485 + * layout_leb_in_gaps - layout index nodes using in-the-gaps method.
4486 + * @c: UBIFS file-system description object
4487 +- * @p: return LEB number here
4488 ++ * @p: return LEB number in @c->gap_lebs[p]
4489 + *
4490 + * This function lays out new index nodes for dirty znodes using in-the-gaps
4491 + * method of TNC commit.
4492 +@@ -221,7 +221,7 @@ static int is_idx_node_in_use(struct ubifs_info *c, union ubifs_key *key,
4493 + * This function returns the number of index nodes written into the gaps, or a
4494 + * negative error code on failure.
4495 + */
4496 +-static int layout_leb_in_gaps(struct ubifs_info *c, int *p)
4497 ++static int layout_leb_in_gaps(struct ubifs_info *c, int p)
4498 + {
4499 + struct ubifs_scan_leb *sleb;
4500 + struct ubifs_scan_node *snod;
4501 +@@ -236,7 +236,7 @@ static int layout_leb_in_gaps(struct ubifs_info *c, int *p)
4502 + * filled, however we do not check there at present.
4503 + */
4504 + return lnum; /* Error code */
4505 +- *p = lnum;
4506 ++ c->gap_lebs[p] = lnum;
4507 + dbg_gc("LEB %d", lnum);
4508 + /*
4509 + * Scan the index LEB. We use the generic scan for this even though
4510 +@@ -355,7 +355,7 @@ static int get_leb_cnt(struct ubifs_info *c, int cnt)
4511 + */
4512 + static int layout_in_gaps(struct ubifs_info *c, int cnt)
4513 + {
4514 +- int err, leb_needed_cnt, written, *p;
4515 ++ int err, leb_needed_cnt, written, p = 0, old_idx_lebs, *gap_lebs;
4516 +
4517 + dbg_gc("%d znodes to write", cnt);
4518 +
4519 +@@ -364,9 +364,9 @@ static int layout_in_gaps(struct ubifs_info *c, int cnt)
4520 + if (!c->gap_lebs)
4521 + return -ENOMEM;
4522 +
4523 +- p = c->gap_lebs;
4524 ++ old_idx_lebs = c->lst.idx_lebs;
4525 + do {
4526 +- ubifs_assert(c, p < c->gap_lebs + c->lst.idx_lebs);
4527 ++ ubifs_assert(c, p < c->lst.idx_lebs);
4528 + written = layout_leb_in_gaps(c, p);
4529 + if (written < 0) {
4530 + err = written;
4531 +@@ -392,9 +392,29 @@ static int layout_in_gaps(struct ubifs_info *c, int cnt)
4532 + leb_needed_cnt = get_leb_cnt(c, cnt);
4533 + dbg_gc("%d znodes remaining, need %d LEBs, have %d", cnt,
4534 + leb_needed_cnt, c->ileb_cnt);
4535 ++ /*
4536 ++ * Dynamically change the size of @c->gap_lebs to prevent
4537 ++ * oob, because @c->lst.idx_lebs could be increased by
4538 ++ * function @get_idx_gc_leb (called by layout_leb_in_gaps->
4539 ++ * ubifs_find_dirty_idx_leb) during loop. Only enlarge
4540 ++ * @c->gap_lebs when needed.
4541 ++ *
4542 ++ */
4543 ++ if (leb_needed_cnt > c->ileb_cnt && p >= old_idx_lebs &&
4544 ++ old_idx_lebs < c->lst.idx_lebs) {
4545 ++ old_idx_lebs = c->lst.idx_lebs;
4546 ++ gap_lebs = krealloc(c->gap_lebs, sizeof(int) *
4547 ++ (old_idx_lebs + 1), GFP_NOFS);
4548 ++ if (!gap_lebs) {
4549 ++ kfree(c->gap_lebs);
4550 ++ c->gap_lebs = NULL;
4551 ++ return -ENOMEM;
4552 ++ }
4553 ++ c->gap_lebs = gap_lebs;
4554 ++ }
4555 + } while (leb_needed_cnt > c->ileb_cnt);
4556 +
4557 +- *p = -1;
4558 ++ c->gap_lebs[p] = -1;
4559 + return 0;
4560 + }
4561 +
4562 +diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
4563 +index 02469d59c787..3f76da11197c 100644
4564 +--- a/fs/xfs/libxfs/xfs_bmap.c
4565 ++++ b/fs/xfs/libxfs/xfs_bmap.c
4566 +@@ -5300,7 +5300,7 @@ __xfs_bunmapi(
4567 + * Make sure we don't touch multiple AGF headers out of order
4568 + * in a single transaction, as that could cause AB-BA deadlocks.
4569 + */
4570 +- if (!wasdel) {
4571 ++ if (!wasdel && !isrt) {
4572 + agno = XFS_FSB_TO_AGNO(mp, del.br_startblock);
4573 + if (prev_agno != NULLAGNUMBER && prev_agno > agno)
4574 + break;
4575 +diff --git a/fs/xfs/scrub/common.h b/fs/xfs/scrub/common.h
4576 +index 003a772cd26c..2e50d146105d 100644
4577 +--- a/fs/xfs/scrub/common.h
4578 ++++ b/fs/xfs/scrub/common.h
4579 +@@ -14,8 +14,15 @@
4580 + static inline bool
4581 + xchk_should_terminate(
4582 + struct xfs_scrub *sc,
4583 +- int *error)
4584 ++ int *error)
4585 + {
4586 ++ /*
4587 ++ * If preemption is disabled, we need to yield to the scheduler every
4588 ++ * few seconds so that we don't run afoul of the soft lockup watchdog
4589 ++ * or RCU stall detector.
4590 ++ */
4591 ++ cond_resched();
4592 ++
4593 + if (fatal_signal_pending(current)) {
4594 + if (*error == 0)
4595 + *error = -EAGAIN;
4596 +diff --git a/include/linux/ahci_platform.h b/include/linux/ahci_platform.h
4597 +index 6782f0d45ebe..49e5383d4222 100644
4598 +--- a/include/linux/ahci_platform.h
4599 ++++ b/include/linux/ahci_platform.h
4600 +@@ -19,6 +19,8 @@ struct ahci_host_priv;
4601 + struct platform_device;
4602 + struct scsi_host_template;
4603 +
4604 ++int ahci_platform_enable_phys(struct ahci_host_priv *hpriv);
4605 ++void ahci_platform_disable_phys(struct ahci_host_priv *hpriv);
4606 + int ahci_platform_enable_clks(struct ahci_host_priv *hpriv);
4607 + void ahci_platform_disable_clks(struct ahci_host_priv *hpriv);
4608 + int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv);
4609 +diff --git a/include/linux/bio.h b/include/linux/bio.h
4610 +index 3cdb84cdc488..853d92ceee64 100644
4611 +--- a/include/linux/bio.h
4612 ++++ b/include/linux/bio.h
4613 +@@ -470,6 +470,7 @@ extern struct bio *bio_copy_user_iov(struct request_queue *,
4614 + gfp_t);
4615 + extern int bio_uncopy_user(struct bio *);
4616 + void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter);
4617 ++void bio_truncate(struct bio *bio, unsigned new_size);
4618 +
4619 + static inline void zero_fill_bio(struct bio *bio)
4620 + {
4621 +diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
4622 +index 8fcdee1c0cf9..dad4a68fa009 100644
4623 +--- a/include/linux/dmaengine.h
4624 ++++ b/include/linux/dmaengine.h
4625 +@@ -1364,8 +1364,11 @@ static inline int dma_get_slave_caps(struct dma_chan *chan,
4626 + static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
4627 + {
4628 + struct dma_slave_caps caps;
4629 ++ int ret;
4630 +
4631 +- dma_get_slave_caps(tx->chan, &caps);
4632 ++ ret = dma_get_slave_caps(tx->chan, &caps);
4633 ++ if (ret)
4634 ++ return ret;
4635 +
4636 + if (caps.descriptor_reuse) {
4637 + tx->flags |= DMA_CTRL_REUSE;
4638 +diff --git a/include/linux/libata.h b/include/linux/libata.h
4639 +index 207e7ee764ce..fa0c3dae2094 100644
4640 +--- a/include/linux/libata.h
4641 ++++ b/include/linux/libata.h
4642 +@@ -1174,6 +1174,7 @@ extern unsigned int ata_do_dev_read_id(struct ata_device *dev,
4643 + struct ata_taskfile *tf, u16 *id);
4644 + extern void ata_qc_complete(struct ata_queued_cmd *qc);
4645 + extern int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active);
4646 ++extern u64 ata_qc_get_active(struct ata_port *ap);
4647 + extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd);
4648 + extern int ata_std_bios_param(struct scsi_device *sdev,
4649 + struct block_device *bdev,
4650 +diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
4651 +index f46ea71b4ffd..451efd4499cc 100644
4652 +--- a/include/linux/memory_hotplug.h
4653 ++++ b/include/linux/memory_hotplug.h
4654 +@@ -125,8 +125,8 @@ static inline bool movable_node_is_enabled(void)
4655 +
4656 + extern void arch_remove_memory(int nid, u64 start, u64 size,
4657 + struct vmem_altmap *altmap);
4658 +-extern void __remove_pages(struct zone *zone, unsigned long start_pfn,
4659 +- unsigned long nr_pages, struct vmem_altmap *altmap);
4660 ++extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages,
4661 ++ struct vmem_altmap *altmap);
4662 +
4663 + /* reasonably generic interface to expand the physical pages */
4664 + extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
4665 +@@ -345,6 +345,9 @@ extern int add_memory(int nid, u64 start, u64 size);
4666 + extern int add_memory_resource(int nid, struct resource *resource);
4667 + extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
4668 + unsigned long nr_pages, struct vmem_altmap *altmap);
4669 ++extern void remove_pfn_range_from_zone(struct zone *zone,
4670 ++ unsigned long start_pfn,
4671 ++ unsigned long nr_pages);
4672 + extern bool is_memblock_offlined(struct memory_block *mem);
4673 + extern int sparse_add_section(int nid, unsigned long pfn,
4674 + unsigned long nr_pages, struct vmem_altmap *altmap);
4675 +diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
4676 +index 10f81629b9ce..6d0d70f3219c 100644
4677 +--- a/include/linux/nvme-fc-driver.h
4678 ++++ b/include/linux/nvme-fc-driver.h
4679 +@@ -270,6 +270,8 @@ struct nvme_fc_remote_port {
4680 + *
4681 + * Host/Initiator Transport Entrypoints/Parameters:
4682 + *
4683 ++ * @module: The LLDD module using the interface
4684 ++ *
4685 + * @localport_delete: The LLDD initiates deletion of a localport via
4686 + * nvme_fc_deregister_localport(). However, the teardown is
4687 + * asynchronous. This routine is called upon the completion of the
4688 +@@ -383,6 +385,8 @@ struct nvme_fc_remote_port {
4689 + * Value is Mandatory. Allowed to be zero.
4690 + */
4691 + struct nvme_fc_port_template {
4692 ++ struct module *module;
4693 ++
4694 + /* initiator-based functions */
4695 + void (*localport_delete)(struct nvme_fc_local_port *);
4696 + void (*remoteport_delete)(struct nvme_fc_remote_port *);
4697 +diff --git a/include/linux/pci.h b/include/linux/pci.h
4698 +index f9088c89a534..be529d311122 100644
4699 +--- a/include/linux/pci.h
4700 ++++ b/include/linux/pci.h
4701 +@@ -2310,9 +2310,11 @@ struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus);
4702 +
4703 + void
4704 + pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *));
4705 ++bool pci_pr3_present(struct pci_dev *pdev);
4706 + #else
4707 + static inline struct irq_domain *
4708 + pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; }
4709 ++static inline bool pci_pr3_present(struct pci_dev *pdev) { return false; }
4710 + #endif
4711 +
4712 + #ifdef CONFIG_EEH
4713 +diff --git a/include/linux/regulator/ab8500.h b/include/linux/regulator/ab8500.h
4714 +index 7cf8f797e13a..505e94a6e3e8 100644
4715 +--- a/include/linux/regulator/ab8500.h
4716 ++++ b/include/linux/regulator/ab8500.h
4717 +@@ -37,7 +37,6 @@ enum ab8505_regulator_id {
4718 + AB8505_LDO_AUX6,
4719 + AB8505_LDO_INTCORE,
4720 + AB8505_LDO_ADC,
4721 +- AB8505_LDO_USB,
4722 + AB8505_LDO_AUDIO,
4723 + AB8505_LDO_ANAMIC1,
4724 + AB8505_LDO_ANAMIC2,
4725 +diff --git a/include/net/neighbour.h b/include/net/neighbour.h
4726 +index 5e679c8dae0b..8ec77bfdc1a4 100644
4727 +--- a/include/net/neighbour.h
4728 ++++ b/include/net/neighbour.h
4729 +@@ -467,7 +467,7 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb
4730 +
4731 + do {
4732 + seq = read_seqbegin(&hh->hh_lock);
4733 +- hh_len = hh->hh_len;
4734 ++ hh_len = READ_ONCE(hh->hh_len);
4735 + if (likely(hh_len <= HH_DATA_MOD)) {
4736 + hh_alen = HH_DATA_MOD;
4737 +
4738 +diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
4739 +index 47e61956168d..32e418dba133 100644
4740 +--- a/include/net/sch_generic.h
4741 ++++ b/include/net/sch_generic.h
4742 +@@ -149,8 +149,8 @@ static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
4743 + static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
4744 + {
4745 + if (qdisc_is_percpu_stats(qdisc))
4746 +- return qdisc->empty;
4747 +- return !qdisc->q.qlen;
4748 ++ return READ_ONCE(qdisc->empty);
4749 ++ return !READ_ONCE(qdisc->q.qlen);
4750 + }
4751 +
4752 + static inline bool qdisc_run_begin(struct Qdisc *qdisc)
4753 +@@ -158,7 +158,7 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
4754 + if (qdisc->flags & TCQ_F_NOLOCK) {
4755 + if (!spin_trylock(&qdisc->seqlock))
4756 + return false;
4757 +- qdisc->empty = false;
4758 ++ WRITE_ONCE(qdisc->empty, false);
4759 + } else if (qdisc_is_running(qdisc)) {
4760 + return false;
4761 + }
4762 +diff --git a/include/net/sock.h b/include/net/sock.h
4763 +index e09e2886a836..6c5a3809483e 100644
4764 +--- a/include/net/sock.h
4765 ++++ b/include/net/sock.h
4766 +@@ -2589,9 +2589,9 @@ static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto)
4767 + */
4768 + static inline void sk_pacing_shift_update(struct sock *sk, int val)
4769 + {
4770 +- if (!sk || !sk_fullsock(sk) || sk->sk_pacing_shift == val)
4771 ++ if (!sk || !sk_fullsock(sk) || READ_ONCE(sk->sk_pacing_shift) == val)
4772 + return;
4773 +- sk->sk_pacing_shift = val;
4774 ++ WRITE_ONCE(sk->sk_pacing_shift, val);
4775 + }
4776 +
4777 + /* if a socket is bound to a device, check that the given device
4778 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
4779 +index 9e7cee5307e0..5c51021775af 100644
4780 +--- a/kernel/bpf/verifier.c
4781 ++++ b/kernel/bpf/verifier.c
4782 +@@ -852,7 +852,8 @@ static const int caller_saved[CALLER_SAVED_REGS] = {
4783 + BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
4784 + };
4785 +
4786 +-static void __mark_reg_not_init(struct bpf_reg_state *reg);
4787 ++static void __mark_reg_not_init(const struct bpf_verifier_env *env,
4788 ++ struct bpf_reg_state *reg);
4789 +
4790 + /* Mark the unknown part of a register (variable offset or scalar value) as
4791 + * known to have the value @imm.
4792 +@@ -890,7 +891,7 @@ static void mark_reg_known_zero(struct bpf_verifier_env *env,
4793 + verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
4794 + /* Something bad happened, let's kill all regs */
4795 + for (regno = 0; regno < MAX_BPF_REG; regno++)
4796 +- __mark_reg_not_init(regs + regno);
4797 ++ __mark_reg_not_init(env, regs + regno);
4798 + return;
4799 + }
4800 + __mark_reg_known_zero(regs + regno);
4801 +@@ -999,7 +1000,8 @@ static void __mark_reg_unbounded(struct bpf_reg_state *reg)
4802 + }
4803 +
4804 + /* Mark a register as having a completely unknown (scalar) value. */
4805 +-static void __mark_reg_unknown(struct bpf_reg_state *reg)
4806 ++static void __mark_reg_unknown(const struct bpf_verifier_env *env,
4807 ++ struct bpf_reg_state *reg)
4808 + {
4809 + /*
4810 + * Clear type, id, off, and union(map_ptr, range) and
4811 +@@ -1009,6 +1011,8 @@ static void __mark_reg_unknown(struct bpf_reg_state *reg)
4812 + reg->type = SCALAR_VALUE;
4813 + reg->var_off = tnum_unknown;
4814 + reg->frameno = 0;
4815 ++ reg->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks ?
4816 ++ true : false;
4817 + __mark_reg_unbounded(reg);
4818 + }
4819 +
4820 +@@ -1019,19 +1023,16 @@ static void mark_reg_unknown(struct bpf_verifier_env *env,
4821 + verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
4822 + /* Something bad happened, let's kill all regs except FP */
4823 + for (regno = 0; regno < BPF_REG_FP; regno++)
4824 +- __mark_reg_not_init(regs + regno);
4825 ++ __mark_reg_not_init(env, regs + regno);
4826 + return;
4827 + }
4828 +- regs += regno;
4829 +- __mark_reg_unknown(regs);
4830 +- /* constant backtracking is enabled for root without bpf2bpf calls */
4831 +- regs->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks ?
4832 +- true : false;
4833 ++ __mark_reg_unknown(env, regs + regno);
4834 + }
4835 +
4836 +-static void __mark_reg_not_init(struct bpf_reg_state *reg)
4837 ++static void __mark_reg_not_init(const struct bpf_verifier_env *env,
4838 ++ struct bpf_reg_state *reg)
4839 + {
4840 +- __mark_reg_unknown(reg);
4841 ++ __mark_reg_unknown(env, reg);
4842 + reg->type = NOT_INIT;
4843 + }
4844 +
4845 +@@ -1042,10 +1043,10 @@ static void mark_reg_not_init(struct bpf_verifier_env *env,
4846 + verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
4847 + /* Something bad happened, let's kill all regs except FP */
4848 + for (regno = 0; regno < BPF_REG_FP; regno++)
4849 +- __mark_reg_not_init(regs + regno);
4850 ++ __mark_reg_not_init(env, regs + regno);
4851 + return;
4852 + }
4853 +- __mark_reg_not_init(regs + regno);
4854 ++ __mark_reg_not_init(env, regs + regno);
4855 + }
4856 +
4857 + #define DEF_NOT_SUBREG (0)
4858 +@@ -3066,7 +3067,7 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
4859 + }
4860 + if (state->stack[spi].slot_type[0] == STACK_SPILL &&
4861 + state->stack[spi].spilled_ptr.type == SCALAR_VALUE) {
4862 +- __mark_reg_unknown(&state->stack[spi].spilled_ptr);
4863 ++ __mark_reg_unknown(env, &state->stack[spi].spilled_ptr);
4864 + for (j = 0; j < BPF_REG_SIZE; j++)
4865 + state->stack[spi].slot_type[j] = STACK_MISC;
4866 + goto mark;
4867 +@@ -3706,7 +3707,7 @@ static void __clear_all_pkt_pointers(struct bpf_verifier_env *env,
4868 + if (!reg)
4869 + continue;
4870 + if (reg_is_pkt_pointer_any(reg))
4871 +- __mark_reg_unknown(reg);
4872 ++ __mark_reg_unknown(env, reg);
4873 + }
4874 + }
4875 +
4876 +@@ -3734,7 +3735,7 @@ static void release_reg_references(struct bpf_verifier_env *env,
4877 + if (!reg)
4878 + continue;
4879 + if (reg->ref_obj_id == ref_obj_id)
4880 +- __mark_reg_unknown(reg);
4881 ++ __mark_reg_unknown(env, reg);
4882 + }
4883 + }
4884 +
4885 +@@ -4357,7 +4358,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
4886 + /* Taint dst register if offset had invalid bounds derived from
4887 + * e.g. dead branches.
4888 + */
4889 +- __mark_reg_unknown(dst_reg);
4890 ++ __mark_reg_unknown(env, dst_reg);
4891 + return 0;
4892 + }
4893 +
4894 +@@ -4609,13 +4610,13 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
4895 + /* Taint dst register if offset had invalid bounds derived from
4896 + * e.g. dead branches.
4897 + */
4898 +- __mark_reg_unknown(dst_reg);
4899 ++ __mark_reg_unknown(env, dst_reg);
4900 + return 0;
4901 + }
4902 +
4903 + if (!src_known &&
4904 + opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
4905 +- __mark_reg_unknown(dst_reg);
4906 ++ __mark_reg_unknown(env, dst_reg);
4907 + return 0;
4908 + }
4909 +
4910 +@@ -6746,7 +6747,7 @@ static void clean_func_state(struct bpf_verifier_env *env,
4911 + /* since the register is unused, clear its state
4912 + * to make further comparison simpler
4913 + */
4914 +- __mark_reg_not_init(&st->regs[i]);
4915 ++ __mark_reg_not_init(env, &st->regs[i]);
4916 + }
4917 +
4918 + for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
4919 +@@ -6754,7 +6755,7 @@ static void clean_func_state(struct bpf_verifier_env *env,
4920 + /* liveness must not touch this stack slot anymore */
4921 + st->stack[i].spilled_ptr.live |= REG_LIVE_DONE;
4922 + if (!(live & REG_LIVE_READ)) {
4923 +- __mark_reg_not_init(&st->stack[i].spilled_ptr);
4924 ++ __mark_reg_not_init(env, &st->stack[i].spilled_ptr);
4925 + for (j = 0; j < BPF_REG_SIZE; j++)
4926 + st->stack[i].slot_type[j] = STACK_INVALID;
4927 + }
4928 +diff --git a/kernel/cred.c b/kernel/cred.c
4929 +index c0a4c12d38b2..9ed51b70ed80 100644
4930 +--- a/kernel/cred.c
4931 ++++ b/kernel/cred.c
4932 +@@ -223,7 +223,7 @@ struct cred *cred_alloc_blank(void)
4933 + new->magic = CRED_MAGIC;
4934 + #endif
4935 +
4936 +- if (security_cred_alloc_blank(new, GFP_KERNEL) < 0)
4937 ++ if (security_cred_alloc_blank(new, GFP_KERNEL_ACCOUNT) < 0)
4938 + goto error;
4939 +
4940 + return new;
4941 +@@ -282,7 +282,7 @@ struct cred *prepare_creds(void)
4942 + new->security = NULL;
4943 + #endif
4944 +
4945 +- if (security_prepare_creds(new, old, GFP_KERNEL) < 0)
4946 ++ if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
4947 + goto error;
4948 + validate_creds(new);
4949 + return new;
4950 +@@ -715,7 +715,7 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
4951 + #ifdef CONFIG_SECURITY
4952 + new->security = NULL;
4953 + #endif
4954 +- if (security_prepare_creds(new, old, GFP_KERNEL) < 0)
4955 ++ if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
4956 + goto error;
4957 +
4958 + put_cred(old);
4959 +diff --git a/kernel/exit.c b/kernel/exit.c
4960 +index d351fd09e739..22dfaac9e48c 100644
4961 +--- a/kernel/exit.c
4962 ++++ b/kernel/exit.c
4963 +@@ -517,10 +517,6 @@ static struct task_struct *find_child_reaper(struct task_struct *father,
4964 + }
4965 +
4966 + write_unlock_irq(&tasklist_lock);
4967 +- if (unlikely(pid_ns == &init_pid_ns)) {
4968 +- panic("Attempted to kill init! exitcode=0x%08x\n",
4969 +- father->signal->group_exit_code ?: father->exit_code);
4970 +- }
4971 +
4972 + list_for_each_entry_safe(p, n, dead, ptrace_entry) {
4973 + list_del_init(&p->ptrace_entry);
4974 +@@ -766,6 +762,14 @@ void __noreturn do_exit(long code)
4975 + acct_update_integrals(tsk);
4976 + group_dead = atomic_dec_and_test(&tsk->signal->live);
4977 + if (group_dead) {
4978 ++ /*
4979 ++ * If the last thread of global init has exited, panic
4980 ++ * immediately to get a useable coredump.
4981 ++ */
4982 ++ if (unlikely(is_global_init(tsk)))
4983 ++ panic("Attempted to kill init! exitcode=0x%08x\n",
4984 ++ tsk->signal->group_exit_code ?: (int)code);
4985 ++
4986 + #ifdef CONFIG_POSIX_TIMERS
4987 + hrtimer_cancel(&tsk->signal->real_timer);
4988 + exit_itimers(tsk->signal);
4989 +diff --git a/kernel/module.c b/kernel/module.c
4990 +index ff2d7359a418..cb09a5f37a5f 100644
4991 +--- a/kernel/module.c
4992 ++++ b/kernel/module.c
4993 +@@ -1033,6 +1033,8 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
4994 + strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
4995 +
4996 + free_module(mod);
4997 ++ /* someone could wait for the module in add_unformed_module() */
4998 ++ wake_up_all(&module_wq);
4999 + return 0;
5000 + out:
5001 + mutex_unlock(&module_mutex);
5002 +diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
5003 +index 83105874f255..26b9168321e7 100644
5004 +--- a/kernel/power/snapshot.c
5005 ++++ b/kernel/power/snapshot.c
5006 +@@ -734,8 +734,15 @@ zone_found:
5007 + * We have found the zone. Now walk the radix tree to find the leaf node
5008 + * for our PFN.
5009 + */
5010 ++
5011 ++ /*
5012 ++ * If the zone we wish to scan is the the current zone and the
5013 ++ * pfn falls into the current node then we do not need to walk
5014 ++ * the tree.
5015 ++ */
5016 + node = bm->cur.node;
5017 +- if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
5018 ++ if (zone == bm->cur.zone &&
5019 ++ ((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
5020 + goto node_found;
5021 +
5022 + node = zone->rtree;
5023 +diff --git a/kernel/seccomp.c b/kernel/seccomp.c
5024 +index dba52a7db5e8..614a557a0814 100644
5025 +--- a/kernel/seccomp.c
5026 ++++ b/kernel/seccomp.c
5027 +@@ -1015,6 +1015,13 @@ static long seccomp_notify_recv(struct seccomp_filter *filter,
5028 + struct seccomp_notif unotif;
5029 + ssize_t ret;
5030 +
5031 ++ /* Verify that we're not given garbage to keep struct extensible. */
5032 ++ ret = check_zeroed_user(buf, sizeof(unotif));
5033 ++ if (ret < 0)
5034 ++ return ret;
5035 ++ if (!ret)
5036 ++ return -EINVAL;
5037 ++
5038 + memset(&unotif, 0, sizeof(unotif));
5039 +
5040 + ret = down_interruptible(&filter->notif->request);
5041 +diff --git a/kernel/taskstats.c b/kernel/taskstats.c
5042 +index 13a0f2e6ebc2..e2ac0e37c4ae 100644
5043 +--- a/kernel/taskstats.c
5044 ++++ b/kernel/taskstats.c
5045 +@@ -554,25 +554,33 @@ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
5046 + static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk)
5047 + {
5048 + struct signal_struct *sig = tsk->signal;
5049 +- struct taskstats *stats;
5050 ++ struct taskstats *stats_new, *stats;
5051 +
5052 +- if (sig->stats || thread_group_empty(tsk))
5053 +- goto ret;
5054 ++ /* Pairs with smp_store_release() below. */
5055 ++ stats = smp_load_acquire(&sig->stats);
5056 ++ if (stats || thread_group_empty(tsk))
5057 ++ return stats;
5058 +
5059 + /* No problem if kmem_cache_zalloc() fails */
5060 +- stats = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
5061 ++ stats_new = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
5062 +
5063 + spin_lock_irq(&tsk->sighand->siglock);
5064 +- if (!sig->stats) {
5065 +- sig->stats = stats;
5066 +- stats = NULL;
5067 ++ stats = sig->stats;
5068 ++ if (!stats) {
5069 ++ /*
5070 ++ * Pairs with smp_store_release() above and order the
5071 ++ * kmem_cache_zalloc().
5072 ++ */
5073 ++ smp_store_release(&sig->stats, stats_new);
5074 ++ stats = stats_new;
5075 ++ stats_new = NULL;
5076 + }
5077 + spin_unlock_irq(&tsk->sighand->siglock);
5078 +
5079 +- if (stats)
5080 +- kmem_cache_free(taskstats_cache, stats);
5081 +-ret:
5082 +- return sig->stats;
5083 ++ if (stats_new)
5084 ++ kmem_cache_free(taskstats_cache, stats_new);
5085 ++
5086 ++ return stats;
5087 + }
5088 +
5089 + /* Send pid data out on exit */
5090 +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
5091 +index f296d89be757..0708a41cfe2d 100644
5092 +--- a/kernel/trace/ftrace.c
5093 ++++ b/kernel/trace/ftrace.c
5094 +@@ -524,8 +524,7 @@ static int function_stat_show(struct seq_file *m, void *v)
5095 + }
5096 +
5097 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5098 +- avg = rec->time;
5099 +- do_div(avg, rec->counter);
5100 ++ avg = div64_ul(rec->time, rec->counter);
5101 + if (tracing_thresh && (avg < tracing_thresh))
5102 + goto out;
5103 + #endif
5104 +@@ -551,7 +550,8 @@ static int function_stat_show(struct seq_file *m, void *v)
5105 + * Divide only 1000 for ns^2 -> us^2 conversion.
5106 + * trace_print_graph_duration will divide 1000 again.
5107 + */
5108 +- do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
5109 ++ stddev = div64_ul(stddev,
5110 ++ rec->counter * (rec->counter - 1) * 1000);
5111 + }
5112 +
5113 + trace_seq_init(&s);
5114 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
5115 +index 2fa72419bbd7..d8bd9b1d8bce 100644
5116 +--- a/kernel/trace/trace.c
5117 ++++ b/kernel/trace/trace.c
5118 +@@ -4590,6 +4590,10 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
5119 +
5120 + int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
5121 + {
5122 ++ if ((mask == TRACE_ITER_RECORD_TGID) ||
5123 ++ (mask == TRACE_ITER_RECORD_CMD))
5124 ++ lockdep_assert_held(&event_mutex);
5125 ++
5126 + /* do nothing if flag is already set */
5127 + if (!!(tr->trace_flags & mask) == !!enabled)
5128 + return 0;
5129 +@@ -4657,6 +4661,7 @@ static int trace_set_options(struct trace_array *tr, char *option)
5130 +
5131 + cmp += len;
5132 +
5133 ++ mutex_lock(&event_mutex);
5134 + mutex_lock(&trace_types_lock);
5135 +
5136 + ret = match_string(trace_options, -1, cmp);
5137 +@@ -4667,6 +4672,7 @@ static int trace_set_options(struct trace_array *tr, char *option)
5138 + ret = set_tracer_flag(tr, 1 << ret, !neg);
5139 +
5140 + mutex_unlock(&trace_types_lock);
5141 ++ mutex_unlock(&event_mutex);
5142 +
5143 + /*
5144 + * If the first trailing whitespace is replaced with '\0' by strstrip,
5145 +@@ -7972,9 +7978,11 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
5146 + if (val != 0 && val != 1)
5147 + return -EINVAL;
5148 +
5149 ++ mutex_lock(&event_mutex);
5150 + mutex_lock(&trace_types_lock);
5151 + ret = set_tracer_flag(tr, 1 << index, val);
5152 + mutex_unlock(&trace_types_lock);
5153 ++ mutex_unlock(&event_mutex);
5154 +
5155 + if (ret < 0)
5156 + return ret;
5157 +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
5158 +index fba87d10f0c1..995061bb2dec 100644
5159 +--- a/kernel/trace/trace_events.c
5160 ++++ b/kernel/trace/trace_events.c
5161 +@@ -320,7 +320,8 @@ void trace_event_enable_cmd_record(bool enable)
5162 + struct trace_event_file *file;
5163 + struct trace_array *tr;
5164 +
5165 +- mutex_lock(&event_mutex);
5166 ++ lockdep_assert_held(&event_mutex);
5167 ++
5168 + do_for_each_event_file(tr, file) {
5169 +
5170 + if (!(file->flags & EVENT_FILE_FL_ENABLED))
5171 +@@ -334,7 +335,6 @@ void trace_event_enable_cmd_record(bool enable)
5172 + clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
5173 + }
5174 + } while_for_each_event_file();
5175 +- mutex_unlock(&event_mutex);
5176 + }
5177 +
5178 + void trace_event_enable_tgid_record(bool enable)
5179 +@@ -342,7 +342,8 @@ void trace_event_enable_tgid_record(bool enable)
5180 + struct trace_event_file *file;
5181 + struct trace_array *tr;
5182 +
5183 +- mutex_lock(&event_mutex);
5184 ++ lockdep_assert_held(&event_mutex);
5185 ++
5186 + do_for_each_event_file(tr, file) {
5187 + if (!(file->flags & EVENT_FILE_FL_ENABLED))
5188 + continue;
5189 +@@ -356,7 +357,6 @@ void trace_event_enable_tgid_record(bool enable)
5190 + &file->flags);
5191 + }
5192 + } while_for_each_event_file();
5193 +- mutex_unlock(&event_mutex);
5194 + }
5195 +
5196 + static int __ftrace_event_enable_disable(struct trace_event_file *file,
5197 +diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
5198 +index c9a74f82b14a..bf44f6bbd0c3 100644
5199 +--- a/kernel/trace/trace_events_filter.c
5200 ++++ b/kernel/trace/trace_events_filter.c
5201 +@@ -1662,7 +1662,7 @@ static int process_system_preds(struct trace_subsystem_dir *dir,
5202 + parse_error(pe, FILT_ERR_BAD_SUBSYS_FILTER, 0);
5203 + return -EINVAL;
5204 + fail_mem:
5205 +- kfree(filter);
5206 ++ __free_filter(filter);
5207 + /* If any call succeeded, we still need to sync */
5208 + if (!fail)
5209 + tracepoint_synchronize_unregister();
5210 +diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
5211 +index 7482a1466ebf..c2783915600c 100644
5212 +--- a/kernel/trace/trace_events_hist.c
5213 ++++ b/kernel/trace/trace_events_hist.c
5214 +@@ -911,7 +911,26 @@ static notrace void trace_event_raw_event_synth(void *__data,
5215 + strscpy(str_field, str_val, STR_VAR_LEN_MAX);
5216 + n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
5217 + } else {
5218 +- entry->fields[n_u64] = var_ref_vals[var_ref_idx + i];
5219 ++ struct synth_field *field = event->fields[i];
5220 ++ u64 val = var_ref_vals[var_ref_idx + i];
5221 ++
5222 ++ switch (field->size) {
5223 ++ case 1:
5224 ++ *(u8 *)&entry->fields[n_u64] = (u8)val;
5225 ++ break;
5226 ++
5227 ++ case 2:
5228 ++ *(u16 *)&entry->fields[n_u64] = (u16)val;
5229 ++ break;
5230 ++
5231 ++ case 4:
5232 ++ *(u32 *)&entry->fields[n_u64] = (u32)val;
5233 ++ break;
5234 ++
5235 ++ default:
5236 ++ entry->fields[n_u64] = val;
5237 ++ break;
5238 ++ }
5239 + n_u64++;
5240 + }
5241 + }
5242 +diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c
5243 +index 9a1c22310323..9e31bfc818ff 100644
5244 +--- a/kernel/trace/tracing_map.c
5245 ++++ b/kernel/trace/tracing_map.c
5246 +@@ -148,8 +148,8 @@ static int tracing_map_cmp_atomic64(void *val_a, void *val_b)
5247 + #define DEFINE_TRACING_MAP_CMP_FN(type) \
5248 + static int tracing_map_cmp_##type(void *val_a, void *val_b) \
5249 + { \
5250 +- type a = *(type *)val_a; \
5251 +- type b = *(type *)val_b; \
5252 ++ type a = (type)(*(u64 *)val_a); \
5253 ++ type b = (type)(*(u64 *)val_b); \
5254 + \
5255 + return (a > b) ? 1 : ((a < b) ? -1 : 0); \
5256 + }
5257 +diff --git a/lib/ubsan.c b/lib/ubsan.c
5258 +index 0c4681118fcd..f007a406f89c 100644
5259 +--- a/lib/ubsan.c
5260 ++++ b/lib/ubsan.c
5261 +@@ -140,25 +140,21 @@ static void val_to_string(char *str, size_t size, struct type_descriptor *type,
5262 + }
5263 + }
5264 +
5265 +-static DEFINE_SPINLOCK(report_lock);
5266 +-
5267 +-static void ubsan_prologue(struct source_location *location,
5268 +- unsigned long *flags)
5269 ++static void ubsan_prologue(struct source_location *location)
5270 + {
5271 + current->in_ubsan++;
5272 +- spin_lock_irqsave(&report_lock, *flags);
5273 +
5274 + pr_err("========================================"
5275 + "========================================\n");
5276 + print_source_location("UBSAN: Undefined behaviour in", location);
5277 + }
5278 +
5279 +-static void ubsan_epilogue(unsigned long *flags)
5280 ++static void ubsan_epilogue(void)
5281 + {
5282 + dump_stack();
5283 + pr_err("========================================"
5284 + "========================================\n");
5285 +- spin_unlock_irqrestore(&report_lock, *flags);
5286 ++
5287 + current->in_ubsan--;
5288 + }
5289 +
5290 +@@ -167,14 +163,13 @@ static void handle_overflow(struct overflow_data *data, void *lhs,
5291 + {
5292 +
5293 + struct type_descriptor *type = data->type;
5294 +- unsigned long flags;
5295 + char lhs_val_str[VALUE_LENGTH];
5296 + char rhs_val_str[VALUE_LENGTH];
5297 +
5298 + if (suppress_report(&data->location))
5299 + return;
5300 +
5301 +- ubsan_prologue(&data->location, &flags);
5302 ++ ubsan_prologue(&data->location);
5303 +
5304 + val_to_string(lhs_val_str, sizeof(lhs_val_str), type, lhs);
5305 + val_to_string(rhs_val_str, sizeof(rhs_val_str), type, rhs);
5306 +@@ -186,7 +181,7 @@ static void handle_overflow(struct overflow_data *data, void *lhs,
5307 + rhs_val_str,
5308 + type->type_name);
5309 +
5310 +- ubsan_epilogue(&flags);
5311 ++ ubsan_epilogue();
5312 + }
5313 +
5314 + void __ubsan_handle_add_overflow(struct overflow_data *data,
5315 +@@ -214,20 +209,19 @@ EXPORT_SYMBOL(__ubsan_handle_mul_overflow);
5316 + void __ubsan_handle_negate_overflow(struct overflow_data *data,
5317 + void *old_val)
5318 + {
5319 +- unsigned long flags;
5320 + char old_val_str[VALUE_LENGTH];
5321 +
5322 + if (suppress_report(&data->location))
5323 + return;
5324 +
5325 +- ubsan_prologue(&data->location, &flags);
5326 ++ ubsan_prologue(&data->location);
5327 +
5328 + val_to_string(old_val_str, sizeof(old_val_str), data->type, old_val);
5329 +
5330 + pr_err("negation of %s cannot be represented in type %s:\n",
5331 + old_val_str, data->type->type_name);
5332 +
5333 +- ubsan_epilogue(&flags);
5334 ++ ubsan_epilogue();
5335 + }
5336 + EXPORT_SYMBOL(__ubsan_handle_negate_overflow);
5337 +
5338 +@@ -235,13 +229,12 @@ EXPORT_SYMBOL(__ubsan_handle_negate_overflow);
5339 + void __ubsan_handle_divrem_overflow(struct overflow_data *data,
5340 + void *lhs, void *rhs)
5341 + {
5342 +- unsigned long flags;
5343 + char rhs_val_str[VALUE_LENGTH];
5344 +
5345 + if (suppress_report(&data->location))
5346 + return;
5347 +
5348 +- ubsan_prologue(&data->location, &flags);
5349 ++ ubsan_prologue(&data->location);
5350 +
5351 + val_to_string(rhs_val_str, sizeof(rhs_val_str), data->type, rhs);
5352 +
5353 +@@ -251,58 +244,52 @@ void __ubsan_handle_divrem_overflow(struct overflow_data *data,
5354 + else
5355 + pr_err("division by zero\n");
5356 +
5357 +- ubsan_epilogue(&flags);
5358 ++ ubsan_epilogue();
5359 + }
5360 + EXPORT_SYMBOL(__ubsan_handle_divrem_overflow);
5361 +
5362 + static void handle_null_ptr_deref(struct type_mismatch_data_common *data)
5363 + {
5364 +- unsigned long flags;
5365 +-
5366 + if (suppress_report(data->location))
5367 + return;
5368 +
5369 +- ubsan_prologue(data->location, &flags);
5370 ++ ubsan_prologue(data->location);
5371 +
5372 + pr_err("%s null pointer of type %s\n",
5373 + type_check_kinds[data->type_check_kind],
5374 + data->type->type_name);
5375 +
5376 +- ubsan_epilogue(&flags);
5377 ++ ubsan_epilogue();
5378 + }
5379 +
5380 + static void handle_misaligned_access(struct type_mismatch_data_common *data,
5381 + unsigned long ptr)
5382 + {
5383 +- unsigned long flags;
5384 +-
5385 + if (suppress_report(data->location))
5386 + return;
5387 +
5388 +- ubsan_prologue(data->location, &flags);
5389 ++ ubsan_prologue(data->location);
5390 +
5391 + pr_err("%s misaligned address %p for type %s\n",
5392 + type_check_kinds[data->type_check_kind],
5393 + (void *)ptr, data->type->type_name);
5394 + pr_err("which requires %ld byte alignment\n", data->alignment);
5395 +
5396 +- ubsan_epilogue(&flags);
5397 ++ ubsan_epilogue();
5398 + }
5399 +
5400 + static void handle_object_size_mismatch(struct type_mismatch_data_common *data,
5401 + unsigned long ptr)
5402 + {
5403 +- unsigned long flags;
5404 +-
5405 + if (suppress_report(data->location))
5406 + return;
5407 +
5408 +- ubsan_prologue(data->location, &flags);
5409 ++ ubsan_prologue(data->location);
5410 + pr_err("%s address %p with insufficient space\n",
5411 + type_check_kinds[data->type_check_kind],
5412 + (void *) ptr);
5413 + pr_err("for an object of type %s\n", data->type->type_name);
5414 +- ubsan_epilogue(&flags);
5415 ++ ubsan_epilogue();
5416 + }
5417 +
5418 + static void ubsan_type_mismatch_common(struct type_mismatch_data_common *data,
5419 +@@ -351,25 +338,23 @@ EXPORT_SYMBOL(__ubsan_handle_type_mismatch_v1);
5420 +
5421 + void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data, void *index)
5422 + {
5423 +- unsigned long flags;
5424 + char index_str[VALUE_LENGTH];
5425 +
5426 + if (suppress_report(&data->location))
5427 + return;
5428 +
5429 +- ubsan_prologue(&data->location, &flags);
5430 ++ ubsan_prologue(&data->location);
5431 +
5432 + val_to_string(index_str, sizeof(index_str), data->index_type, index);
5433 + pr_err("index %s is out of range for type %s\n", index_str,
5434 + data->array_type->type_name);
5435 +- ubsan_epilogue(&flags);
5436 ++ ubsan_epilogue();
5437 + }
5438 + EXPORT_SYMBOL(__ubsan_handle_out_of_bounds);
5439 +
5440 + void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data,
5441 + void *lhs, void *rhs)
5442 + {
5443 +- unsigned long flags;
5444 + struct type_descriptor *rhs_type = data->rhs_type;
5445 + struct type_descriptor *lhs_type = data->lhs_type;
5446 + char rhs_str[VALUE_LENGTH];
5447 +@@ -379,7 +364,7 @@ void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data,
5448 + if (suppress_report(&data->location))
5449 + goto out;
5450 +
5451 +- ubsan_prologue(&data->location, &flags);
5452 ++ ubsan_prologue(&data->location);
5453 +
5454 + val_to_string(rhs_str, sizeof(rhs_str), rhs_type, rhs);
5455 + val_to_string(lhs_str, sizeof(lhs_str), lhs_type, lhs);
5456 +@@ -402,7 +387,7 @@ void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data,
5457 + lhs_str, rhs_str,
5458 + lhs_type->type_name);
5459 +
5460 +- ubsan_epilogue(&flags);
5461 ++ ubsan_epilogue();
5462 + out:
5463 + user_access_restore(ua_flags);
5464 + }
5465 +@@ -411,11 +396,9 @@ EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds);
5466 +
5467 + void __ubsan_handle_builtin_unreachable(struct unreachable_data *data)
5468 + {
5469 +- unsigned long flags;
5470 +-
5471 +- ubsan_prologue(&data->location, &flags);
5472 ++ ubsan_prologue(&data->location);
5473 + pr_err("calling __builtin_unreachable()\n");
5474 +- ubsan_epilogue(&flags);
5475 ++ ubsan_epilogue();
5476 + panic("can't return from __builtin_unreachable()");
5477 + }
5478 + EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable);
5479 +@@ -423,19 +406,18 @@ EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable);
5480 + void __ubsan_handle_load_invalid_value(struct invalid_value_data *data,
5481 + void *val)
5482 + {
5483 +- unsigned long flags;
5484 + char val_str[VALUE_LENGTH];
5485 +
5486 + if (suppress_report(&data->location))
5487 + return;
5488 +
5489 +- ubsan_prologue(&data->location, &flags);
5490 ++ ubsan_prologue(&data->location);
5491 +
5492 + val_to_string(val_str, sizeof(val_str), data->type, val);
5493 +
5494 + pr_err("load of value %s is not a valid value for type %s\n",
5495 + val_str, data->type->type_name);
5496 +
5497 +- ubsan_epilogue(&flags);
5498 ++ ubsan_epilogue();
5499 + }
5500 + EXPORT_SYMBOL(__ubsan_handle_load_invalid_value);
5501 +diff --git a/mm/filemap.c b/mm/filemap.c
5502 +index 85b7d087eb45..1f5731768222 100644
5503 +--- a/mm/filemap.c
5504 ++++ b/mm/filemap.c
5505 +@@ -2329,27 +2329,6 @@ EXPORT_SYMBOL(generic_file_read_iter);
5506 +
5507 + #ifdef CONFIG_MMU
5508 + #define MMAP_LOTSAMISS (100)
5509 +-static struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
5510 +- struct file *fpin)
5511 +-{
5512 +- int flags = vmf->flags;
5513 +-
5514 +- if (fpin)
5515 +- return fpin;
5516 +-
5517 +- /*
5518 +- * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
5519 +- * anything, so we only pin the file and drop the mmap_sem if only
5520 +- * FAULT_FLAG_ALLOW_RETRY is set.
5521 +- */
5522 +- if ((flags & (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT)) ==
5523 +- FAULT_FLAG_ALLOW_RETRY) {
5524 +- fpin = get_file(vmf->vma->vm_file);
5525 +- up_read(&vmf->vma->vm_mm->mmap_sem);
5526 +- }
5527 +- return fpin;
5528 +-}
5529 +-
5530 + /*
5531 + * lock_page_maybe_drop_mmap - lock the page, possibly dropping the mmap_sem
5532 + * @vmf - the vm_fault for this fault.
5533 +diff --git a/mm/gup_benchmark.c b/mm/gup_benchmark.c
5534 +index 7dd602d7f8db..ad9d5b1c4473 100644
5535 +--- a/mm/gup_benchmark.c
5536 ++++ b/mm/gup_benchmark.c
5537 +@@ -26,6 +26,7 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
5538 + unsigned long i, nr_pages, addr, next;
5539 + int nr;
5540 + struct page **pages;
5541 ++ int ret = 0;
5542 +
5543 + if (gup->size > ULONG_MAX)
5544 + return -EINVAL;
5545 +@@ -63,7 +64,9 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
5546 + NULL);
5547 + break;
5548 + default:
5549 +- return -1;
5550 ++ kvfree(pages);
5551 ++ ret = -EINVAL;
5552 ++ goto out;
5553 + }
5554 +
5555 + if (nr <= 0)
5556 +@@ -85,7 +88,8 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
5557 + gup->put_delta_usec = ktime_us_delta(end_time, start_time);
5558 +
5559 + kvfree(pages);
5560 +- return 0;
5561 ++out:
5562 ++ return ret;
5563 + }
5564 +
5565 + static long gup_benchmark_ioctl(struct file *filep, unsigned int cmd,
5566 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
5567 +index b45a95363a84..e0afd582ca01 100644
5568 +--- a/mm/hugetlb.c
5569 ++++ b/mm/hugetlb.c
5570 +@@ -27,6 +27,7 @@
5571 + #include <linux/swapops.h>
5572 + #include <linux/jhash.h>
5573 + #include <linux/numa.h>
5574 ++#include <linux/llist.h>
5575 +
5576 + #include <asm/page.h>
5577 + #include <asm/pgtable.h>
5578 +@@ -1255,7 +1256,7 @@ static inline void ClearPageHugeTemporary(struct page *page)
5579 + page[2].mapping = NULL;
5580 + }
5581 +
5582 +-void free_huge_page(struct page *page)
5583 ++static void __free_huge_page(struct page *page)
5584 + {
5585 + /*
5586 + * Can't pass hstate in here because it is called from the
5587 +@@ -1318,6 +1319,54 @@ void free_huge_page(struct page *page)
5588 + spin_unlock(&hugetlb_lock);
5589 + }
5590 +
5591 ++/*
5592 ++ * As free_huge_page() can be called from a non-task context, we have
5593 ++ * to defer the actual freeing in a workqueue to prevent potential
5594 ++ * hugetlb_lock deadlock.
5595 ++ *
5596 ++ * free_hpage_workfn() locklessly retrieves the linked list of pages to
5597 ++ * be freed and frees them one-by-one. As the page->mapping pointer is
5598 ++ * going to be cleared in __free_huge_page() anyway, it is reused as the
5599 ++ * llist_node structure of a lockless linked list of huge pages to be freed.
5600 ++ */
5601 ++static LLIST_HEAD(hpage_freelist);
5602 ++
5603 ++static void free_hpage_workfn(struct work_struct *work)
5604 ++{
5605 ++ struct llist_node *node;
5606 ++ struct page *page;
5607 ++
5608 ++ node = llist_del_all(&hpage_freelist);
5609 ++
5610 ++ while (node) {
5611 ++ page = container_of((struct address_space **)node,
5612 ++ struct page, mapping);
5613 ++ node = node->next;
5614 ++ __free_huge_page(page);
5615 ++ }
5616 ++}
5617 ++static DECLARE_WORK(free_hpage_work, free_hpage_workfn);
5618 ++
5619 ++void free_huge_page(struct page *page)
5620 ++{
5621 ++ /*
5622 ++ * Defer freeing if in non-task context to avoid hugetlb_lock deadlock.
5623 ++ */
5624 ++ if (!in_task()) {
5625 ++ /*
5626 ++ * Only call schedule_work() if hpage_freelist is previously
5627 ++ * empty. Otherwise, schedule_work() had been called but the
5628 ++ * workfn hasn't retrieved the list yet.
5629 ++ */
5630 ++ if (llist_add((struct llist_node *)&page->mapping,
5631 ++ &hpage_freelist))
5632 ++ schedule_work(&free_hpage_work);
5633 ++ return;
5634 ++ }
5635 ++
5636 ++ __free_huge_page(page);
5637 ++}
5638 ++
5639 + static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
5640 + {
5641 + INIT_LIST_HEAD(&page->lru);
5642 +diff --git a/mm/internal.h b/mm/internal.h
5643 +index 0d5f720c75ab..7dd7fbb577a9 100644
5644 +--- a/mm/internal.h
5645 ++++ b/mm/internal.h
5646 +@@ -362,6 +362,27 @@ vma_address(struct page *page, struct vm_area_struct *vma)
5647 + return max(start, vma->vm_start);
5648 + }
5649 +
5650 ++static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
5651 ++ struct file *fpin)
5652 ++{
5653 ++ int flags = vmf->flags;
5654 ++
5655 ++ if (fpin)
5656 ++ return fpin;
5657 ++
5658 ++ /*
5659 ++ * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
5660 ++ * anything, so we only pin the file and drop the mmap_sem if only
5661 ++ * FAULT_FLAG_ALLOW_RETRY is set.
5662 ++ */
5663 ++ if ((flags & (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT)) ==
5664 ++ FAULT_FLAG_ALLOW_RETRY) {
5665 ++ fpin = get_file(vmf->vma->vm_file);
5666 ++ up_read(&vmf->vma->vm_mm->mmap_sem);
5667 ++ }
5668 ++ return fpin;
5669 ++}
5670 ++
5671 + #else /* !CONFIG_MMU */
5672 + static inline void clear_page_mlock(struct page *page) { }
5673 + static inline void mlock_vma_page(struct page *page) { }
5674 +diff --git a/mm/memory.c b/mm/memory.c
5675 +index b1ca51a079f2..cb7c940cf800 100644
5676 +--- a/mm/memory.c
5677 ++++ b/mm/memory.c
5678 +@@ -2227,10 +2227,11 @@ static vm_fault_t do_page_mkwrite(struct vm_fault *vmf)
5679 + *
5680 + * The function expects the page to be locked and unlocks it.
5681 + */
5682 +-static void fault_dirty_shared_page(struct vm_area_struct *vma,
5683 +- struct page *page)
5684 ++static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
5685 + {
5686 ++ struct vm_area_struct *vma = vmf->vma;
5687 + struct address_space *mapping;
5688 ++ struct page *page = vmf->page;
5689 + bool dirtied;
5690 + bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
5691 +
5692 +@@ -2245,16 +2246,30 @@ static void fault_dirty_shared_page(struct vm_area_struct *vma,
5693 + mapping = page_rmapping(page);
5694 + unlock_page(page);
5695 +
5696 ++ if (!page_mkwrite)
5697 ++ file_update_time(vma->vm_file);
5698 ++
5699 ++ /*
5700 ++ * Throttle page dirtying rate down to writeback speed.
5701 ++ *
5702 ++ * mapping may be NULL here because some device drivers do not
5703 ++ * set page.mapping but still dirty their pages
5704 ++ *
5705 ++ * Drop the mmap_sem before waiting on IO, if we can. The file
5706 ++ * is pinning the mapping, as per above.
5707 ++ */
5708 + if ((dirtied || page_mkwrite) && mapping) {
5709 +- /*
5710 +- * Some device drivers do not set page.mapping
5711 +- * but still dirty their pages
5712 +- */
5713 ++ struct file *fpin;
5714 ++
5715 ++ fpin = maybe_unlock_mmap_for_io(vmf, NULL);
5716 + balance_dirty_pages_ratelimited(mapping);
5717 ++ if (fpin) {
5718 ++ fput(fpin);
5719 ++ return VM_FAULT_RETRY;
5720 ++ }
5721 + }
5722 +
5723 +- if (!page_mkwrite)
5724 +- file_update_time(vma->vm_file);
5725 ++ return 0;
5726 + }
5727 +
5728 + /*
5729 +@@ -2497,6 +2512,7 @@ static vm_fault_t wp_page_shared(struct vm_fault *vmf)
5730 + __releases(vmf->ptl)
5731 + {
5732 + struct vm_area_struct *vma = vmf->vma;
5733 ++ vm_fault_t ret = VM_FAULT_WRITE;
5734 +
5735 + get_page(vmf->page);
5736 +
5737 +@@ -2520,10 +2536,10 @@ static vm_fault_t wp_page_shared(struct vm_fault *vmf)
5738 + wp_page_reuse(vmf);
5739 + lock_page(vmf->page);
5740 + }
5741 +- fault_dirty_shared_page(vma, vmf->page);
5742 ++ ret |= fault_dirty_shared_page(vmf);
5743 + put_page(vmf->page);
5744 +
5745 +- return VM_FAULT_WRITE;
5746 ++ return ret;
5747 + }
5748 +
5749 + /*
5750 +@@ -3567,7 +3583,7 @@ static vm_fault_t do_shared_fault(struct vm_fault *vmf)
5751 + return ret;
5752 + }
5753 +
5754 +- fault_dirty_shared_page(vma, vmf->page);
5755 ++ ret |= fault_dirty_shared_page(vmf);
5756 + return ret;
5757 + }
5758 +
5759 +diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
5760 +index f307bd82d750..fab540685279 100644
5761 +--- a/mm/memory_hotplug.c
5762 ++++ b/mm/memory_hotplug.c
5763 +@@ -465,8 +465,9 @@ static void update_pgdat_span(struct pglist_data *pgdat)
5764 + pgdat->node_spanned_pages = node_end_pfn - node_start_pfn;
5765 + }
5766 +
5767 +-static void __remove_zone(struct zone *zone, unsigned long start_pfn,
5768 +- unsigned long nr_pages)
5769 ++void __ref remove_pfn_range_from_zone(struct zone *zone,
5770 ++ unsigned long start_pfn,
5771 ++ unsigned long nr_pages)
5772 + {
5773 + struct pglist_data *pgdat = zone->zone_pgdat;
5774 + unsigned long flags;
5775 +@@ -481,28 +482,30 @@ static void __remove_zone(struct zone *zone, unsigned long start_pfn,
5776 + return;
5777 + #endif
5778 +
5779 ++ clear_zone_contiguous(zone);
5780 ++
5781 + pgdat_resize_lock(zone->zone_pgdat, &flags);
5782 + shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
5783 + update_pgdat_span(pgdat);
5784 + pgdat_resize_unlock(zone->zone_pgdat, &flags);
5785 ++
5786 ++ set_zone_contiguous(zone);
5787 + }
5788 +
5789 +-static void __remove_section(struct zone *zone, unsigned long pfn,
5790 +- unsigned long nr_pages, unsigned long map_offset,
5791 +- struct vmem_altmap *altmap)
5792 ++static void __remove_section(unsigned long pfn, unsigned long nr_pages,
5793 ++ unsigned long map_offset,
5794 ++ struct vmem_altmap *altmap)
5795 + {
5796 + struct mem_section *ms = __nr_to_section(pfn_to_section_nr(pfn));
5797 +
5798 + if (WARN_ON_ONCE(!valid_section(ms)))
5799 + return;
5800 +
5801 +- __remove_zone(zone, pfn, nr_pages);
5802 + sparse_remove_section(ms, pfn, nr_pages, map_offset, altmap);
5803 + }
5804 +
5805 + /**
5806 +- * __remove_pages() - remove sections of pages from a zone
5807 +- * @zone: zone from which pages need to be removed
5808 ++ * __remove_pages() - remove sections of pages
5809 + * @pfn: starting pageframe (must be aligned to start of a section)
5810 + * @nr_pages: number of pages to remove (must be multiple of section size)
5811 + * @altmap: alternative device page map or %NULL if default memmap is used
5812 +@@ -512,16 +515,14 @@ static void __remove_section(struct zone *zone, unsigned long pfn,
5813 + * sure that pages are marked reserved and zones are adjust properly by
5814 + * calling offline_pages().
5815 + */
5816 +-void __remove_pages(struct zone *zone, unsigned long pfn,
5817 +- unsigned long nr_pages, struct vmem_altmap *altmap)
5818 ++void __remove_pages(unsigned long pfn, unsigned long nr_pages,
5819 ++ struct vmem_altmap *altmap)
5820 + {
5821 + unsigned long map_offset = 0;
5822 + unsigned long nr, start_sec, end_sec;
5823 +
5824 + map_offset = vmem_altmap_offset(altmap);
5825 +
5826 +- clear_zone_contiguous(zone);
5827 +-
5828 + if (check_pfn_span(pfn, nr_pages, "remove"))
5829 + return;
5830 +
5831 +@@ -533,13 +534,11 @@ void __remove_pages(struct zone *zone, unsigned long pfn,
5832 + cond_resched();
5833 + pfns = min(nr_pages, PAGES_PER_SECTION
5834 + - (pfn & ~PAGE_SECTION_MASK));
5835 +- __remove_section(zone, pfn, pfns, map_offset, altmap);
5836 ++ __remove_section(pfn, pfns, map_offset, altmap);
5837 + pfn += pfns;
5838 + nr_pages -= pfns;
5839 + map_offset = 0;
5840 + }
5841 +-
5842 +- set_zone_contiguous(zone);
5843 + }
5844 +
5845 + int set_online_page_callback(online_page_callback_t callback)
5846 +@@ -867,6 +866,7 @@ failed_addition:
5847 + (unsigned long long) pfn << PAGE_SHIFT,
5848 + (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1);
5849 + memory_notify(MEM_CANCEL_ONLINE, &arg);
5850 ++ remove_pfn_range_from_zone(zone, pfn, nr_pages);
5851 + mem_hotplug_done();
5852 + return ret;
5853 + }
5854 +@@ -1602,6 +1602,7 @@ static int __ref __offline_pages(unsigned long start_pfn,
5855 + writeback_set_ratelimit();
5856 +
5857 + memory_notify(MEM_OFFLINE, &arg);
5858 ++ remove_pfn_range_from_zone(zone, start_pfn, nr_pages);
5859 + mem_hotplug_done();
5860 + return 0;
5861 +
5862 +diff --git a/mm/memremap.c b/mm/memremap.c
5863 +index 03ccbdfeb697..c51c6bd2fe34 100644
5864 +--- a/mm/memremap.c
5865 ++++ b/mm/memremap.c
5866 +@@ -120,7 +120,7 @@ void memunmap_pages(struct dev_pagemap *pgmap)
5867 +
5868 + mem_hotplug_begin();
5869 + if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
5870 +- __remove_pages(page_zone(first_page), PHYS_PFN(res->start),
5871 ++ __remove_pages(PHYS_PFN(res->start),
5872 + PHYS_PFN(resource_size(res)), NULL);
5873 + } else {
5874 + arch_remove_memory(nid, res->start, resource_size(res),
5875 +diff --git a/mm/migrate.c b/mm/migrate.c
5876 +index 4fe45d1428c8..45d3303e0022 100644
5877 +--- a/mm/migrate.c
5878 ++++ b/mm/migrate.c
5879 +@@ -1516,9 +1516,11 @@ static int do_move_pages_to_node(struct mm_struct *mm,
5880 + /*
5881 + * Resolves the given address to a struct page, isolates it from the LRU and
5882 + * puts it to the given pagelist.
5883 +- * Returns -errno if the page cannot be found/isolated or 0 when it has been
5884 +- * queued or the page doesn't need to be migrated because it is already on
5885 +- * the target node
5886 ++ * Returns:
5887 ++ * errno - if the page cannot be found/isolated
5888 ++ * 0 - when it doesn't have to be migrated because it is already on the
5889 ++ * target node
5890 ++ * 1 - when it has been queued
5891 + */
5892 + static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
5893 + int node, struct list_head *pagelist, bool migrate_all)
5894 +@@ -1557,7 +1559,7 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
5895 + if (PageHuge(page)) {
5896 + if (PageHead(page)) {
5897 + isolate_huge_page(page, pagelist);
5898 +- err = 0;
5899 ++ err = 1;
5900 + }
5901 + } else {
5902 + struct page *head;
5903 +@@ -1567,7 +1569,7 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
5904 + if (err)
5905 + goto out_putpage;
5906 +
5907 +- err = 0;
5908 ++ err = 1;
5909 + list_add_tail(&head->lru, pagelist);
5910 + mod_node_page_state(page_pgdat(head),
5911 + NR_ISOLATED_ANON + page_is_file_cache(head),
5912 +@@ -1644,8 +1646,17 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
5913 + */
5914 + err = add_page_for_migration(mm, addr, current_node,
5915 + &pagelist, flags & MPOL_MF_MOVE_ALL);
5916 +- if (!err)
5917 ++
5918 ++ if (!err) {
5919 ++ /* The page is already on the target node */
5920 ++ err = store_status(status, i, current_node, 1);
5921 ++ if (err)
5922 ++ goto out_flush;
5923 + continue;
5924 ++ } else if (err > 0) {
5925 ++ /* The page is successfully queued for migration */
5926 ++ continue;
5927 ++ }
5928 +
5929 + err = store_status(status, i, err, 1);
5930 + if (err)
5931 +diff --git a/mm/mmap.c b/mm/mmap.c
5932 +index a7d8c84d19b7..4390dbea4aa5 100644
5933 +--- a/mm/mmap.c
5934 ++++ b/mm/mmap.c
5935 +@@ -90,12 +90,6 @@ static void unmap_region(struct mm_struct *mm,
5936 + * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes
5937 + * w: (no) no w: (no) no w: (copy) copy w: (no) no
5938 + * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
5939 +- *
5940 +- * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and
5941 +- * MAP_PRIVATE:
5942 +- * r: (no) no
5943 +- * w: (no) no
5944 +- * x: (yes) yes
5945 + */
5946 + pgprot_t protection_map[16] __ro_after_init = {
5947 + __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
5948 +diff --git a/mm/oom_kill.c b/mm/oom_kill.c
5949 +index 71e3acea7817..d58c481b3df8 100644
5950 +--- a/mm/oom_kill.c
5951 ++++ b/mm/oom_kill.c
5952 +@@ -890,7 +890,7 @@ static void __oom_kill_process(struct task_struct *victim, const char *message)
5953 + K(get_mm_counter(mm, MM_FILEPAGES)),
5954 + K(get_mm_counter(mm, MM_SHMEMPAGES)),
5955 + from_kuid(&init_user_ns, task_uid(victim)),
5956 +- mm_pgtables_bytes(mm), victim->signal->oom_score_adj);
5957 ++ mm_pgtables_bytes(mm) >> 10, victim->signal->oom_score_adj);
5958 + task_unlock(victim);
5959 +
5960 + /*
5961 +diff --git a/mm/shmem.c b/mm/shmem.c
5962 +index 7a22e3e03d11..6074714fdbd4 100644
5963 +--- a/mm/shmem.c
5964 ++++ b/mm/shmem.c
5965 +@@ -2022,16 +2022,14 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf)
5966 + shmem_falloc->waitq &&
5967 + vmf->pgoff >= shmem_falloc->start &&
5968 + vmf->pgoff < shmem_falloc->next) {
5969 ++ struct file *fpin;
5970 + wait_queue_head_t *shmem_falloc_waitq;
5971 + DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
5972 +
5973 + ret = VM_FAULT_NOPAGE;
5974 +- if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
5975 +- !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
5976 +- /* It's polite to up mmap_sem if we can */
5977 +- up_read(&vma->vm_mm->mmap_sem);
5978 ++ fpin = maybe_unlock_mmap_for_io(vmf, NULL);
5979 ++ if (fpin)
5980 + ret = VM_FAULT_RETRY;
5981 +- }
5982 +
5983 + shmem_falloc_waitq = shmem_falloc->waitq;
5984 + prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
5985 +@@ -2049,6 +2047,9 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf)
5986 + spin_lock(&inode->i_lock);
5987 + finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
5988 + spin_unlock(&inode->i_lock);
5989 ++
5990 ++ if (fpin)
5991 ++ fput(fpin);
5992 + return ret;
5993 + }
5994 + spin_unlock(&inode->i_lock);
5995 +diff --git a/mm/sparse.c b/mm/sparse.c
5996 +index f6891c1992b1..c2c01b6330af 100644
5997 +--- a/mm/sparse.c
5998 ++++ b/mm/sparse.c
5999 +@@ -647,7 +647,7 @@ void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
6000 + #endif
6001 +
6002 + #ifdef CONFIG_SPARSEMEM_VMEMMAP
6003 +-static struct page *populate_section_memmap(unsigned long pfn,
6004 ++static struct page * __meminit populate_section_memmap(unsigned long pfn,
6005 + unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
6006 + {
6007 + return __populate_section_memmap(pfn, nr_pages, nid, altmap);
6008 +@@ -669,7 +669,7 @@ static void free_map_bootmem(struct page *memmap)
6009 + vmemmap_free(start, end, NULL);
6010 + }
6011 + #else
6012 +-struct page *populate_section_memmap(unsigned long pfn,
6013 ++struct page * __meminit populate_section_memmap(unsigned long pfn,
6014 + unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
6015 + {
6016 + struct page *page, *ret;
6017 +diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
6018 +index 2b2b9aae8a3c..22d17ecfe7df 100644
6019 +--- a/mm/zsmalloc.c
6020 ++++ b/mm/zsmalloc.c
6021 +@@ -2069,6 +2069,11 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
6022 + zs_pool_dec_isolated(pool);
6023 + }
6024 +
6025 ++ if (page_zone(newpage) != page_zone(page)) {
6026 ++ dec_zone_page_state(page, NR_ZSPAGES);
6027 ++ inc_zone_page_state(newpage, NR_ZSPAGES);
6028 ++ }
6029 ++
6030 + reset_page(page);
6031 + put_page(page);
6032 + page = newpage;
6033 +diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
6034 +index 7ff92dd4c53c..87691404d0c6 100644
6035 +--- a/net/bluetooth/hci_conn.c
6036 ++++ b/net/bluetooth/hci_conn.c
6037 +@@ -1176,8 +1176,10 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
6038 + if (!conn)
6039 + return ERR_PTR(-ENOMEM);
6040 +
6041 +- if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0)
6042 ++ if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
6043 ++ hci_conn_del(conn);
6044 + return ERR_PTR(-EBUSY);
6045 ++ }
6046 +
6047 + conn->state = BT_CONNECT;
6048 + set_bit(HCI_CONN_SCANNING, &conn->flags);
6049 +diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
6050 +index da7fdbdf9c41..a845786258a0 100644
6051 +--- a/net/bluetooth/l2cap_core.c
6052 ++++ b/net/bluetooth/l2cap_core.c
6053 +@@ -4936,10 +4936,8 @@ void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
6054 + BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
6055 + chan, result, local_amp_id, remote_amp_id);
6056 +
6057 +- if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
6058 +- l2cap_chan_unlock(chan);
6059 ++ if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
6060 + return;
6061 +- }
6062 +
6063 + if (chan->state != BT_CONNECTED) {
6064 + l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
6065 +diff --git a/net/core/dev.c b/net/core/dev.c
6066 +index 046307445ece..3e11c6bb4dd6 100644
6067 +--- a/net/core/dev.c
6068 ++++ b/net/core/dev.c
6069 +@@ -3386,7 +3386,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
6070 + qdisc_calculate_pkt_len(skb, q);
6071 +
6072 + if (q->flags & TCQ_F_NOLOCK) {
6073 +- if ((q->flags & TCQ_F_CAN_BYPASS) && q->empty &&
6074 ++ if ((q->flags & TCQ_F_CAN_BYPASS) && READ_ONCE(q->empty) &&
6075 + qdisc_run_begin(q)) {
6076 + if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED,
6077 + &q->state))) {
6078 +diff --git a/net/core/neighbour.c b/net/core/neighbour.c
6079 +index 08ebc3ac5343..f2452496ad9f 100644
6080 +--- a/net/core/neighbour.c
6081 ++++ b/net/core/neighbour.c
6082 +@@ -1194,7 +1194,7 @@ static void neigh_update_hhs(struct neighbour *neigh)
6083 +
6084 + if (update) {
6085 + hh = &neigh->hh;
6086 +- if (hh->hh_len) {
6087 ++ if (READ_ONCE(hh->hh_len)) {
6088 + write_seqlock_bh(&hh->hh_lock);
6089 + update(hh, neigh->dev, neigh->ha);
6090 + write_sequnlock_bh(&hh->hh_lock);
6091 +@@ -1473,7 +1473,7 @@ int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
6092 + struct net_device *dev = neigh->dev;
6093 + unsigned int seq;
6094 +
6095 +- if (dev->header_ops->cache && !neigh->hh.hh_len)
6096 ++ if (dev->header_ops->cache && !READ_ONCE(neigh->hh.hh_len))
6097 + neigh_hh_init(neigh);
6098 +
6099 + do {
6100 +diff --git a/net/core/sock.c b/net/core/sock.c
6101 +index ac78a570e43a..b4d1112174c1 100644
6102 +--- a/net/core/sock.c
6103 ++++ b/net/core/sock.c
6104 +@@ -2918,7 +2918,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
6105 +
6106 + sk->sk_max_pacing_rate = ~0UL;
6107 + sk->sk_pacing_rate = ~0UL;
6108 +- sk->sk_pacing_shift = 10;
6109 ++ WRITE_ONCE(sk->sk_pacing_shift, 10);
6110 + sk->sk_incoming_cpu = -1;
6111 +
6112 + sk_rx_queue_clear(sk);
6113 +diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
6114 +index eb29e5adc84d..9f9e00ba3ad7 100644
6115 +--- a/net/core/sysctl_net_core.c
6116 ++++ b/net/core/sysctl_net_core.c
6117 +@@ -288,6 +288,7 @@ static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write,
6118 + return ret;
6119 + }
6120 +
6121 ++# ifdef CONFIG_HAVE_EBPF_JIT
6122 + static int
6123 + proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,
6124 + void __user *buffer, size_t *lenp,
6125 +@@ -298,6 +299,7 @@ proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,
6126 +
6127 + return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
6128 + }
6129 ++# endif /* CONFIG_HAVE_EBPF_JIT */
6130 +
6131 + static int
6132 + proc_dolongvec_minmax_bpf_restricted(struct ctl_table *table, int write,
6133 +diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
6134 +index 17374afee28f..9040fe55e0f5 100644
6135 +--- a/net/ethernet/eth.c
6136 ++++ b/net/ethernet/eth.c
6137 +@@ -244,7 +244,12 @@ int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16
6138 + eth->h_proto = type;
6139 + memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
6140 + memcpy(eth->h_dest, neigh->ha, ETH_ALEN);
6141 +- hh->hh_len = ETH_HLEN;
6142 ++
6143 ++ /* Pairs with READ_ONCE() in neigh_resolve_output(),
6144 ++ * neigh_hh_output() and neigh_update_hhs().
6145 ++ */
6146 ++ smp_store_release(&hh->hh_len, ETH_HLEN);
6147 ++
6148 + return 0;
6149 + }
6150 + EXPORT_SYMBOL(eth_header_cache);
6151 +diff --git a/net/hsr/hsr_debugfs.c b/net/hsr/hsr_debugfs.c
6152 +index 94447974a3c0..6618a9d8e58e 100644
6153 +--- a/net/hsr/hsr_debugfs.c
6154 ++++ b/net/hsr/hsr_debugfs.c
6155 +@@ -64,7 +64,6 @@ hsr_node_table_open(struct inode *inode, struct file *filp)
6156 + }
6157 +
6158 + static const struct file_operations hsr_fops = {
6159 +- .owner = THIS_MODULE,
6160 + .open = hsr_node_table_open,
6161 + .read = seq_read,
6162 + .llseek = seq_lseek,
6163 +@@ -78,15 +77,14 @@ static const struct file_operations hsr_fops = {
6164 + * When debugfs is configured this routine sets up the node_table file per
6165 + * hsr device for dumping the node_table entries
6166 + */
6167 +-int hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev)
6168 ++void hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev)
6169 + {
6170 +- int rc = -1;
6171 + struct dentry *de = NULL;
6172 +
6173 + de = debugfs_create_dir(hsr_dev->name, NULL);
6174 +- if (!de) {
6175 ++ if (IS_ERR(de)) {
6176 + pr_err("Cannot create hsr debugfs root\n");
6177 +- return rc;
6178 ++ return;
6179 + }
6180 +
6181 + priv->node_tbl_root = de;
6182 +@@ -94,13 +92,13 @@ int hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev)
6183 + de = debugfs_create_file("node_table", S_IFREG | 0444,
6184 + priv->node_tbl_root, priv,
6185 + &hsr_fops);
6186 +- if (!de) {
6187 ++ if (IS_ERR(de)) {
6188 + pr_err("Cannot create hsr node_table directory\n");
6189 +- return rc;
6190 ++ debugfs_remove(priv->node_tbl_root);
6191 ++ priv->node_tbl_root = NULL;
6192 ++ return;
6193 + }
6194 + priv->node_tbl_file = de;
6195 +-
6196 +- return 0;
6197 + }
6198 +
6199 + /* hsr_debugfs_term - Tear down debugfs intrastructure
6200 +diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
6201 +index b01e1bae4ddc..62c03f0d0079 100644
6202 +--- a/net/hsr/hsr_device.c
6203 ++++ b/net/hsr/hsr_device.c
6204 +@@ -368,7 +368,7 @@ static void hsr_dev_destroy(struct net_device *hsr_dev)
6205 + del_timer_sync(&hsr->prune_timer);
6206 + del_timer_sync(&hsr->announce_timer);
6207 +
6208 +- hsr_del_self_node(&hsr->self_node_db);
6209 ++ hsr_del_self_node(hsr);
6210 + hsr_del_nodes(&hsr->node_db);
6211 + }
6212 +
6213 +@@ -440,11 +440,12 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
6214 + INIT_LIST_HEAD(&hsr->ports);
6215 + INIT_LIST_HEAD(&hsr->node_db);
6216 + INIT_LIST_HEAD(&hsr->self_node_db);
6217 ++ spin_lock_init(&hsr->list_lock);
6218 +
6219 + ether_addr_copy(hsr_dev->dev_addr, slave[0]->dev_addr);
6220 +
6221 + /* Make sure we recognize frames from ourselves in hsr_rcv() */
6222 +- res = hsr_create_self_node(&hsr->self_node_db, hsr_dev->dev_addr,
6223 ++ res = hsr_create_self_node(hsr, hsr_dev->dev_addr,
6224 + slave[1]->dev_addr);
6225 + if (res < 0)
6226 + return res;
6227 +@@ -477,31 +478,32 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
6228 +
6229 + res = hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER);
6230 + if (res)
6231 +- goto err_add_port;
6232 ++ goto err_add_master;
6233 +
6234 + res = register_netdevice(hsr_dev);
6235 + if (res)
6236 +- goto fail;
6237 ++ goto err_unregister;
6238 +
6239 + res = hsr_add_port(hsr, slave[0], HSR_PT_SLAVE_A);
6240 + if (res)
6241 +- goto fail;
6242 ++ goto err_add_slaves;
6243 ++
6244 + res = hsr_add_port(hsr, slave[1], HSR_PT_SLAVE_B);
6245 + if (res)
6246 +- goto fail;
6247 ++ goto err_add_slaves;
6248 +
6249 ++ hsr_debugfs_init(hsr, hsr_dev);
6250 + mod_timer(&hsr->prune_timer, jiffies + msecs_to_jiffies(PRUNE_PERIOD));
6251 +- res = hsr_debugfs_init(hsr, hsr_dev);
6252 +- if (res)
6253 +- goto fail;
6254 +
6255 + return 0;
6256 +
6257 +-fail:
6258 ++err_add_slaves:
6259 ++ unregister_netdevice(hsr_dev);
6260 ++err_unregister:
6261 + list_for_each_entry_safe(port, tmp, &hsr->ports, port_list)
6262 + hsr_del_port(port);
6263 +-err_add_port:
6264 +- hsr_del_self_node(&hsr->self_node_db);
6265 ++err_add_master:
6266 ++ hsr_del_self_node(hsr);
6267 +
6268 + return res;
6269 + }
6270 +diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
6271 +index 292be446007b..27dc65d7de67 100644
6272 +--- a/net/hsr/hsr_framereg.c
6273 ++++ b/net/hsr/hsr_framereg.c
6274 +@@ -75,10 +75,11 @@ static struct hsr_node *find_node_by_addr_A(struct list_head *node_db,
6275 + /* Helper for device init; the self_node_db is used in hsr_rcv() to recognize
6276 + * frames from self that's been looped over the HSR ring.
6277 + */
6278 +-int hsr_create_self_node(struct list_head *self_node_db,
6279 ++int hsr_create_self_node(struct hsr_priv *hsr,
6280 + unsigned char addr_a[ETH_ALEN],
6281 + unsigned char addr_b[ETH_ALEN])
6282 + {
6283 ++ struct list_head *self_node_db = &hsr->self_node_db;
6284 + struct hsr_node *node, *oldnode;
6285 +
6286 + node = kmalloc(sizeof(*node), GFP_KERNEL);
6287 +@@ -88,33 +89,33 @@ int hsr_create_self_node(struct list_head *self_node_db,
6288 + ether_addr_copy(node->macaddress_A, addr_a);
6289 + ether_addr_copy(node->macaddress_B, addr_b);
6290 +
6291 +- rcu_read_lock();
6292 ++ spin_lock_bh(&hsr->list_lock);
6293 + oldnode = list_first_or_null_rcu(self_node_db,
6294 + struct hsr_node, mac_list);
6295 + if (oldnode) {
6296 + list_replace_rcu(&oldnode->mac_list, &node->mac_list);
6297 +- rcu_read_unlock();
6298 +- synchronize_rcu();
6299 +- kfree(oldnode);
6300 ++ spin_unlock_bh(&hsr->list_lock);
6301 ++ kfree_rcu(oldnode, rcu_head);
6302 + } else {
6303 +- rcu_read_unlock();
6304 + list_add_tail_rcu(&node->mac_list, self_node_db);
6305 ++ spin_unlock_bh(&hsr->list_lock);
6306 + }
6307 +
6308 + return 0;
6309 + }
6310 +
6311 +-void hsr_del_self_node(struct list_head *self_node_db)
6312 ++void hsr_del_self_node(struct hsr_priv *hsr)
6313 + {
6314 ++ struct list_head *self_node_db = &hsr->self_node_db;
6315 + struct hsr_node *node;
6316 +
6317 +- rcu_read_lock();
6318 ++ spin_lock_bh(&hsr->list_lock);
6319 + node = list_first_or_null_rcu(self_node_db, struct hsr_node, mac_list);
6320 +- rcu_read_unlock();
6321 + if (node) {
6322 + list_del_rcu(&node->mac_list);
6323 +- kfree(node);
6324 ++ kfree_rcu(node, rcu_head);
6325 + }
6326 ++ spin_unlock_bh(&hsr->list_lock);
6327 + }
6328 +
6329 + void hsr_del_nodes(struct list_head *node_db)
6330 +@@ -130,30 +131,43 @@ void hsr_del_nodes(struct list_head *node_db)
6331 + * seq_out is used to initialize filtering of outgoing duplicate frames
6332 + * originating from the newly added node.
6333 + */
6334 +-struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
6335 +- u16 seq_out)
6336 ++static struct hsr_node *hsr_add_node(struct hsr_priv *hsr,
6337 ++ struct list_head *node_db,
6338 ++ unsigned char addr[],
6339 ++ u16 seq_out)
6340 + {
6341 +- struct hsr_node *node;
6342 ++ struct hsr_node *new_node, *node;
6343 + unsigned long now;
6344 + int i;
6345 +
6346 +- node = kzalloc(sizeof(*node), GFP_ATOMIC);
6347 +- if (!node)
6348 ++ new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
6349 ++ if (!new_node)
6350 + return NULL;
6351 +
6352 +- ether_addr_copy(node->macaddress_A, addr);
6353 ++ ether_addr_copy(new_node->macaddress_A, addr);
6354 +
6355 + /* We are only interested in time diffs here, so use current jiffies
6356 + * as initialization. (0 could trigger an spurious ring error warning).
6357 + */
6358 + now = jiffies;
6359 + for (i = 0; i < HSR_PT_PORTS; i++)
6360 +- node->time_in[i] = now;
6361 ++ new_node->time_in[i] = now;
6362 + for (i = 0; i < HSR_PT_PORTS; i++)
6363 +- node->seq_out[i] = seq_out;
6364 +-
6365 +- list_add_tail_rcu(&node->mac_list, node_db);
6366 ++ new_node->seq_out[i] = seq_out;
6367 +
6368 ++ spin_lock_bh(&hsr->list_lock);
6369 ++ list_for_each_entry_rcu(node, node_db, mac_list) {
6370 ++ if (ether_addr_equal(node->macaddress_A, addr))
6371 ++ goto out;
6372 ++ if (ether_addr_equal(node->macaddress_B, addr))
6373 ++ goto out;
6374 ++ }
6375 ++ list_add_tail_rcu(&new_node->mac_list, node_db);
6376 ++ spin_unlock_bh(&hsr->list_lock);
6377 ++ return new_node;
6378 ++out:
6379 ++ spin_unlock_bh(&hsr->list_lock);
6380 ++ kfree(new_node);
6381 + return node;
6382 + }
6383 +
6384 +@@ -163,6 +177,7 @@ struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
6385 + bool is_sup)
6386 + {
6387 + struct list_head *node_db = &port->hsr->node_db;
6388 ++ struct hsr_priv *hsr = port->hsr;
6389 + struct hsr_node *node;
6390 + struct ethhdr *ethhdr;
6391 + u16 seq_out;
6392 +@@ -196,7 +211,7 @@ struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
6393 + seq_out = HSR_SEQNR_START;
6394 + }
6395 +
6396 +- return hsr_add_node(node_db, ethhdr->h_source, seq_out);
6397 ++ return hsr_add_node(hsr, node_db, ethhdr->h_source, seq_out);
6398 + }
6399 +
6400 + /* Use the Supervision frame's info about an eventual macaddress_B for merging
6401 +@@ -206,10 +221,11 @@ struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
6402 + void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
6403 + struct hsr_port *port_rcv)
6404 + {
6405 +- struct ethhdr *ethhdr;
6406 +- struct hsr_node *node_real;
6407 ++ struct hsr_priv *hsr = port_rcv->hsr;
6408 + struct hsr_sup_payload *hsr_sp;
6409 ++ struct hsr_node *node_real;
6410 + struct list_head *node_db;
6411 ++ struct ethhdr *ethhdr;
6412 + int i;
6413 +
6414 + ethhdr = (struct ethhdr *)skb_mac_header(skb);
6415 +@@ -231,7 +247,7 @@ void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
6416 + node_real = find_node_by_addr_A(node_db, hsr_sp->macaddress_A);
6417 + if (!node_real)
6418 + /* No frame received from AddrA of this node yet */
6419 +- node_real = hsr_add_node(node_db, hsr_sp->macaddress_A,
6420 ++ node_real = hsr_add_node(hsr, node_db, hsr_sp->macaddress_A,
6421 + HSR_SEQNR_START - 1);
6422 + if (!node_real)
6423 + goto done; /* No mem */
6424 +@@ -252,7 +268,9 @@ void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
6425 + }
6426 + node_real->addr_B_port = port_rcv->type;
6427 +
6428 ++ spin_lock_bh(&hsr->list_lock);
6429 + list_del_rcu(&node_curr->mac_list);
6430 ++ spin_unlock_bh(&hsr->list_lock);
6431 + kfree_rcu(node_curr, rcu_head);
6432 +
6433 + done:
6434 +@@ -368,12 +386,13 @@ void hsr_prune_nodes(struct timer_list *t)
6435 + {
6436 + struct hsr_priv *hsr = from_timer(hsr, t, prune_timer);
6437 + struct hsr_node *node;
6438 ++ struct hsr_node *tmp;
6439 + struct hsr_port *port;
6440 + unsigned long timestamp;
6441 + unsigned long time_a, time_b;
6442 +
6443 +- rcu_read_lock();
6444 +- list_for_each_entry_rcu(node, &hsr->node_db, mac_list) {
6445 ++ spin_lock_bh(&hsr->list_lock);
6446 ++ list_for_each_entry_safe(node, tmp, &hsr->node_db, mac_list) {
6447 + /* Don't prune own node. Neither time_in[HSR_PT_SLAVE_A]
6448 + * nor time_in[HSR_PT_SLAVE_B], will ever be updated for
6449 + * the master port. Thus the master node will be repeatedly
6450 +@@ -421,7 +440,7 @@ void hsr_prune_nodes(struct timer_list *t)
6451 + kfree_rcu(node, rcu_head);
6452 + }
6453 + }
6454 +- rcu_read_unlock();
6455 ++ spin_unlock_bh(&hsr->list_lock);
6456 +
6457 + /* Restart timer */
6458 + mod_timer(&hsr->prune_timer,
6459 +diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h
6460 +index 89a3ce38151d..0f0fa12b4329 100644
6461 +--- a/net/hsr/hsr_framereg.h
6462 ++++ b/net/hsr/hsr_framereg.h
6463 +@@ -12,10 +12,8 @@
6464 +
6465 + struct hsr_node;
6466 +
6467 +-void hsr_del_self_node(struct list_head *self_node_db);
6468 ++void hsr_del_self_node(struct hsr_priv *hsr);
6469 + void hsr_del_nodes(struct list_head *node_db);
6470 +-struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
6471 +- u16 seq_out);
6472 + struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
6473 + bool is_sup);
6474 + void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
6475 +@@ -33,7 +31,7 @@ int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node,
6476 +
6477 + void hsr_prune_nodes(struct timer_list *t);
6478 +
6479 +-int hsr_create_self_node(struct list_head *self_node_db,
6480 ++int hsr_create_self_node(struct hsr_priv *hsr,
6481 + unsigned char addr_a[ETH_ALEN],
6482 + unsigned char addr_b[ETH_ALEN]);
6483 +
6484 +diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c
6485 +index b9988a662ee1..6deb8fa8d5c8 100644
6486 +--- a/net/hsr/hsr_main.c
6487 ++++ b/net/hsr/hsr_main.c
6488 +@@ -64,7 +64,7 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
6489 +
6490 + /* Make sure we recognize frames from ourselves in hsr_rcv() */
6491 + port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
6492 +- res = hsr_create_self_node(&hsr->self_node_db,
6493 ++ res = hsr_create_self_node(hsr,
6494 + master->dev->dev_addr,
6495 + port ?
6496 + port->dev->dev_addr :
6497 +diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h
6498 +index 96fac696a1e1..9ec38e33b8b1 100644
6499 +--- a/net/hsr/hsr_main.h
6500 ++++ b/net/hsr/hsr_main.h
6501 +@@ -160,8 +160,9 @@ struct hsr_priv {
6502 + int announce_count;
6503 + u16 sequence_nr;
6504 + u16 sup_sequence_nr; /* For HSRv1 separate seq_nr for supervision */
6505 +- u8 prot_version; /* Indicate if HSRv0 or HSRv1. */
6506 +- spinlock_t seqnr_lock; /* locking for sequence_nr */
6507 ++ u8 prot_version; /* Indicate if HSRv0 or HSRv1. */
6508 ++ spinlock_t seqnr_lock; /* locking for sequence_nr */
6509 ++ spinlock_t list_lock; /* locking for node list */
6510 + unsigned char sup_multicast_addr[ETH_ALEN];
6511 + #ifdef CONFIG_DEBUG_FS
6512 + struct dentry *node_tbl_root;
6513 +@@ -184,15 +185,12 @@ static inline u16 hsr_get_skb_sequence_nr(struct sk_buff *skb)
6514 + }
6515 +
6516 + #if IS_ENABLED(CONFIG_DEBUG_FS)
6517 +-int hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev);
6518 ++void hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev);
6519 + void hsr_debugfs_term(struct hsr_priv *priv);
6520 + #else
6521 +-static inline int hsr_debugfs_init(struct hsr_priv *priv,
6522 +- struct net_device *hsr_dev)
6523 +-{
6524 +- return 0;
6525 +-}
6526 +-
6527 ++static inline void hsr_debugfs_init(struct hsr_priv *priv,
6528 ++ struct net_device *hsr_dev)
6529 ++{}
6530 + static inline void hsr_debugfs_term(struct hsr_priv *priv)
6531 + {}
6532 + #endif
6533 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
6534 +index d8876f0e9672..e537a4b6531b 100644
6535 +--- a/net/ipv4/tcp.c
6536 ++++ b/net/ipv4/tcp.c
6537 +@@ -1958,8 +1958,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
6538 + struct sk_buff *skb, *last;
6539 + u32 urg_hole = 0;
6540 + struct scm_timestamping_internal tss;
6541 +- bool has_tss = false;
6542 +- bool has_cmsg;
6543 ++ int cmsg_flags;
6544 +
6545 + if (unlikely(flags & MSG_ERRQUEUE))
6546 + return inet_recv_error(sk, msg, len, addr_len);
6547 +@@ -1974,7 +1973,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
6548 + if (sk->sk_state == TCP_LISTEN)
6549 + goto out;
6550 +
6551 +- has_cmsg = tp->recvmsg_inq;
6552 ++ cmsg_flags = tp->recvmsg_inq ? 1 : 0;
6553 + timeo = sock_rcvtimeo(sk, nonblock);
6554 +
6555 + /* Urgent data needs to be handled specially. */
6556 +@@ -2157,8 +2156,7 @@ skip_copy:
6557 +
6558 + if (TCP_SKB_CB(skb)->has_rxtstamp) {
6559 + tcp_update_recv_tstamps(skb, &tss);
6560 +- has_tss = true;
6561 +- has_cmsg = true;
6562 ++ cmsg_flags |= 2;
6563 + }
6564 + if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
6565 + goto found_fin_ok;
6566 +@@ -2183,10 +2181,10 @@ found_fin_ok:
6567 +
6568 + release_sock(sk);
6569 +
6570 +- if (has_cmsg) {
6571 +- if (has_tss)
6572 ++ if (cmsg_flags) {
6573 ++ if (cmsg_flags & 2)
6574 + tcp_recv_timestamp(msg, sk, &tss);
6575 +- if (tp->recvmsg_inq) {
6576 ++ if (cmsg_flags & 1) {
6577 + inq = tcp_inq_hint(sk);
6578 + put_cmsg(msg, SOL_TCP, TCP_CM_INQ, sizeof(inq), &inq);
6579 + }
6580 +diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
6581 +index 32772d6ded4e..a6545ef0d27b 100644
6582 +--- a/net/ipv4/tcp_bbr.c
6583 ++++ b/net/ipv4/tcp_bbr.c
6584 +@@ -306,7 +306,8 @@ static u32 bbr_tso_segs_goal(struct sock *sk)
6585 + /* Sort of tcp_tso_autosize() but ignoring
6586 + * driver provided sk_gso_max_size.
6587 + */
6588 +- bytes = min_t(unsigned long, sk->sk_pacing_rate >> sk->sk_pacing_shift,
6589 ++ bytes = min_t(unsigned long,
6590 ++ sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift),
6591 + GSO_MAX_SIZE - 1 - MAX_TCP_HEADER);
6592 + segs = max_t(u32, bytes / tp->mss_cache, bbr_min_tso_segs(sk));
6593 +
6594 +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
6595 +index 0269584e9cf7..e4ba915c4bb5 100644
6596 +--- a/net/ipv4/tcp_output.c
6597 ++++ b/net/ipv4/tcp_output.c
6598 +@@ -1728,7 +1728,7 @@ static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
6599 + u32 bytes, segs;
6600 +
6601 + bytes = min_t(unsigned long,
6602 +- sk->sk_pacing_rate >> sk->sk_pacing_shift,
6603 ++ sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift),
6604 + sk->sk_gso_max_size - 1 - MAX_TCP_HEADER);
6605 +
6606 + /* Goal is to send at least one packet per ms,
6607 +@@ -2263,7 +2263,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
6608 +
6609 + limit = max_t(unsigned long,
6610 + 2 * skb->truesize,
6611 +- sk->sk_pacing_rate >> sk->sk_pacing_shift);
6612 ++ sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift));
6613 + if (sk->sk_pacing_status == SK_PACING_NONE)
6614 + limit = min_t(unsigned long, limit,
6615 + sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes);
6616 +diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
6617 +index a2b58de82600..f8f52ff99cfb 100644
6618 +--- a/net/netfilter/nf_queue.c
6619 ++++ b/net/netfilter/nf_queue.c
6620 +@@ -189,7 +189,7 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
6621 + goto err;
6622 + }
6623 +
6624 +- if (!skb_dst_force(skb) && state->hook != NF_INET_PRE_ROUTING) {
6625 ++ if (skb_dst(skb) && !skb_dst_force(skb)) {
6626 + status = -ENETDOWN;
6627 + goto err;
6628 + }
6629 +diff --git a/net/netfilter/nft_tproxy.c b/net/netfilter/nft_tproxy.c
6630 +index f92a82c73880..95980154ef02 100644
6631 +--- a/net/netfilter/nft_tproxy.c
6632 ++++ b/net/netfilter/nft_tproxy.c
6633 +@@ -50,7 +50,7 @@ static void nft_tproxy_eval_v4(const struct nft_expr *expr,
6634 + taddr = nf_tproxy_laddr4(skb, taddr, iph->daddr);
6635 +
6636 + if (priv->sreg_port)
6637 +- tport = regs->data[priv->sreg_port];
6638 ++ tport = nft_reg_load16(&regs->data[priv->sreg_port]);
6639 + if (!tport)
6640 + tport = hp->dest;
6641 +
6642 +@@ -117,7 +117,7 @@ static void nft_tproxy_eval_v6(const struct nft_expr *expr,
6643 + taddr = *nf_tproxy_laddr6(skb, &taddr, &iph->daddr);
6644 +
6645 + if (priv->sreg_port)
6646 +- tport = regs->data[priv->sreg_port];
6647 ++ tport = nft_reg_load16(&regs->data[priv->sreg_port]);
6648 + if (!tport)
6649 + tport = hp->dest;
6650 +
6651 +diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
6652 +index 8769b4b8807d..7c3c5fdb82a9 100644
6653 +--- a/net/sched/sch_generic.c
6654 ++++ b/net/sched/sch_generic.c
6655 +@@ -657,7 +657,7 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
6656 + if (likely(skb)) {
6657 + qdisc_update_stats_at_dequeue(qdisc, skb);
6658 + } else {
6659 +- qdisc->empty = true;
6660 ++ WRITE_ONCE(qdisc->empty, true);
6661 + }
6662 +
6663 + return skb;
6664 +diff --git a/net/socket.c b/net/socket.c
6665 +index d7a106028f0e..ca8de9e1582d 100644
6666 +--- a/net/socket.c
6667 ++++ b/net/socket.c
6668 +@@ -955,7 +955,7 @@ static ssize_t sock_read_iter(struct kiocb *iocb, struct iov_iter *to)
6669 + .msg_iocb = iocb};
6670 + ssize_t res;
6671 +
6672 +- if (file->f_flags & O_NONBLOCK)
6673 ++ if (file->f_flags & O_NONBLOCK || (iocb->ki_flags & IOCB_NOWAIT))
6674 + msg.msg_flags = MSG_DONTWAIT;
6675 +
6676 + if (iocb->ki_pos != 0)
6677 +@@ -980,7 +980,7 @@ static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from)
6678 + if (iocb->ki_pos != 0)
6679 + return -ESPIPE;
6680 +
6681 +- if (file->f_flags & O_NONBLOCK)
6682 ++ if (file->f_flags & O_NONBLOCK || (iocb->ki_flags & IOCB_NOWAIT))
6683 + msg.msg_flags = MSG_DONTWAIT;
6684 +
6685 + if (sock->type == SOCK_SEQPACKET)
6686 +diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
6687 +index a349094f6fb7..f740cb51802a 100644
6688 +--- a/net/sunrpc/cache.c
6689 ++++ b/net/sunrpc/cache.c
6690 +@@ -53,9 +53,6 @@ static void cache_init(struct cache_head *h, struct cache_detail *detail)
6691 + h->last_refresh = now;
6692 + }
6693 +
6694 +-static inline int cache_is_valid(struct cache_head *h);
6695 +-static void cache_fresh_locked(struct cache_head *head, time_t expiry,
6696 +- struct cache_detail *detail);
6697 + static void cache_fresh_unlocked(struct cache_head *head,
6698 + struct cache_detail *detail);
6699 +
6700 +@@ -105,9 +102,6 @@ static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail,
6701 + if (cache_is_expired(detail, tmp)) {
6702 + hlist_del_init_rcu(&tmp->cache_list);
6703 + detail->entries --;
6704 +- if (cache_is_valid(tmp) == -EAGAIN)
6705 +- set_bit(CACHE_NEGATIVE, &tmp->flags);
6706 +- cache_fresh_locked(tmp, 0, detail);
6707 + freeme = tmp;
6708 + break;
6709 + }
6710 +diff --git a/samples/seccomp/user-trap.c b/samples/seccomp/user-trap.c
6711 +index 6d0125ca8af7..20291ec6489f 100644
6712 +--- a/samples/seccomp/user-trap.c
6713 ++++ b/samples/seccomp/user-trap.c
6714 +@@ -298,14 +298,14 @@ int main(void)
6715 + req = malloc(sizes.seccomp_notif);
6716 + if (!req)
6717 + goto out_close;
6718 +- memset(req, 0, sizeof(*req));
6719 +
6720 + resp = malloc(sizes.seccomp_notif_resp);
6721 + if (!resp)
6722 + goto out_req;
6723 +- memset(resp, 0, sizeof(*resp));
6724 ++ memset(resp, 0, sizes.seccomp_notif_resp);
6725 +
6726 + while (1) {
6727 ++ memset(req, 0, sizes.seccomp_notif);
6728 + if (ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, req)) {
6729 + perror("ioctl recv");
6730 + goto out_resp;
6731 +diff --git a/samples/trace_printk/trace-printk.c b/samples/trace_printk/trace-printk.c
6732 +index 7affc3b50b61..cfc159580263 100644
6733 +--- a/samples/trace_printk/trace-printk.c
6734 ++++ b/samples/trace_printk/trace-printk.c
6735 +@@ -36,6 +36,7 @@ static int __init trace_printk_init(void)
6736 +
6737 + /* Kick off printing in irq context */
6738 + irq_work_queue(&irqwork);
6739 ++ irq_work_sync(&irqwork);
6740 +
6741 + trace_printk("This is a %s that will use trace_bprintk()\n",
6742 + "static string");
6743 +diff --git a/scripts/gcc-plugins/Kconfig b/scripts/gcc-plugins/Kconfig
6744 +index d33de0b9f4f5..e3569543bdac 100644
6745 +--- a/scripts/gcc-plugins/Kconfig
6746 ++++ b/scripts/gcc-plugins/Kconfig
6747 +@@ -14,8 +14,8 @@ config HAVE_GCC_PLUGINS
6748 + An arch should select this symbol if it supports building with
6749 + GCC plugins.
6750 +
6751 +-config GCC_PLUGINS
6752 +- bool
6753 ++menuconfig GCC_PLUGINS
6754 ++ bool "GCC plugins"
6755 + depends on HAVE_GCC_PLUGINS
6756 + depends on PLUGIN_HOSTCC != ""
6757 + default y
6758 +@@ -25,8 +25,7 @@ config GCC_PLUGINS
6759 +
6760 + See Documentation/core-api/gcc-plugins.rst for details.
6761 +
6762 +-menu "GCC plugins"
6763 +- depends on GCC_PLUGINS
6764 ++if GCC_PLUGINS
6765 +
6766 + config GCC_PLUGIN_CYC_COMPLEXITY
6767 + bool "Compute the cyclomatic complexity of a function" if EXPERT
6768 +@@ -113,4 +112,4 @@ config GCC_PLUGIN_ARM_SSP_PER_TASK
6769 + bool
6770 + depends on GCC_PLUGINS && ARM
6771 +
6772 +-endmenu
6773 ++endif
6774 +diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
6775 +index 45d13b6462aa..90d21675c3ad 100644
6776 +--- a/security/apparmor/apparmorfs.c
6777 ++++ b/security/apparmor/apparmorfs.c
6778 +@@ -593,7 +593,7 @@ static __poll_t ns_revision_poll(struct file *file, poll_table *pt)
6779 +
6780 + void __aa_bump_ns_revision(struct aa_ns *ns)
6781 + {
6782 +- ns->revision++;
6783 ++ WRITE_ONCE(ns->revision, ns->revision + 1);
6784 + wake_up_interruptible(&ns->wait);
6785 + }
6786 +
6787 +diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
6788 +index 9e0492795267..039ca71872ce 100644
6789 +--- a/security/apparmor/domain.c
6790 ++++ b/security/apparmor/domain.c
6791 +@@ -317,6 +317,7 @@ static int aa_xattrs_match(const struct linux_binprm *bprm,
6792 +
6793 + if (!bprm || !profile->xattr_count)
6794 + return 0;
6795 ++ might_sleep();
6796 +
6797 + /* transition from exec match to xattr set */
6798 + state = aa_dfa_null_transition(profile->xmatch, state);
6799 +@@ -361,10 +362,11 @@ out:
6800 + }
6801 +
6802 + /**
6803 +- * __attach_match_ - find an attachment match
6804 ++ * find_attach - do attachment search for unconfined processes
6805 + * @bprm - binprm structure of transitioning task
6806 +- * @name - to match against (NOT NULL)
6807 ++ * @ns: the current namespace (NOT NULL)
6808 + * @head - profile list to walk (NOT NULL)
6809 ++ * @name - to match against (NOT NULL)
6810 + * @info - info message if there was an error (NOT NULL)
6811 + *
6812 + * Do a linear search on the profiles in the list. There is a matching
6813 +@@ -374,12 +376,11 @@ out:
6814 + *
6815 + * Requires: @head not be shared or have appropriate locks held
6816 + *
6817 +- * Returns: profile or NULL if no match found
6818 ++ * Returns: label or NULL if no match found
6819 + */
6820 +-static struct aa_profile *__attach_match(const struct linux_binprm *bprm,
6821 +- const char *name,
6822 +- struct list_head *head,
6823 +- const char **info)
6824 ++static struct aa_label *find_attach(const struct linux_binprm *bprm,
6825 ++ struct aa_ns *ns, struct list_head *head,
6826 ++ const char *name, const char **info)
6827 + {
6828 + int candidate_len = 0, candidate_xattrs = 0;
6829 + bool conflict = false;
6830 +@@ -388,6 +389,8 @@ static struct aa_profile *__attach_match(const struct linux_binprm *bprm,
6831 + AA_BUG(!name);
6832 + AA_BUG(!head);
6833 +
6834 ++ rcu_read_lock();
6835 ++restart:
6836 + list_for_each_entry_rcu(profile, head, base.list) {
6837 + if (profile->label.flags & FLAG_NULL &&
6838 + &profile->label == ns_unconfined(profile->ns))
6839 +@@ -413,16 +416,32 @@ static struct aa_profile *__attach_match(const struct linux_binprm *bprm,
6840 + perm = dfa_user_allow(profile->xmatch, state);
6841 + /* any accepting state means a valid match. */
6842 + if (perm & MAY_EXEC) {
6843 +- int ret;
6844 ++ int ret = 0;
6845 +
6846 + if (count < candidate_len)
6847 + continue;
6848 +
6849 +- ret = aa_xattrs_match(bprm, profile, state);
6850 +- /* Fail matching if the xattrs don't match */
6851 +- if (ret < 0)
6852 +- continue;
6853 +-
6854 ++ if (bprm && profile->xattr_count) {
6855 ++ long rev = READ_ONCE(ns->revision);
6856 ++
6857 ++ if (!aa_get_profile_not0(profile))
6858 ++ goto restart;
6859 ++ rcu_read_unlock();
6860 ++ ret = aa_xattrs_match(bprm, profile,
6861 ++ state);
6862 ++ rcu_read_lock();
6863 ++ aa_put_profile(profile);
6864 ++ if (rev !=
6865 ++ READ_ONCE(ns->revision))
6866 ++ /* policy changed */
6867 ++ goto restart;
6868 ++ /*
6869 ++ * Fail matching if the xattrs don't
6870 ++ * match
6871 ++ */
6872 ++ if (ret < 0)
6873 ++ continue;
6874 ++ }
6875 + /*
6876 + * TODO: allow for more flexible best match
6877 + *
6878 +@@ -445,43 +464,28 @@ static struct aa_profile *__attach_match(const struct linux_binprm *bprm,
6879 + candidate_xattrs = ret;
6880 + conflict = false;
6881 + }
6882 +- } else if (!strcmp(profile->base.name, name))
6883 ++ } else if (!strcmp(profile->base.name, name)) {
6884 + /*
6885 + * old exact non-re match, without conditionals such
6886 + * as xattrs. no more searching required
6887 + */
6888 +- return profile;
6889 ++ candidate = profile;
6890 ++ goto out;
6891 ++ }
6892 + }
6893 +
6894 +- if (conflict) {
6895 +- *info = "conflicting profile attachments";
6896 ++ if (!candidate || conflict) {
6897 ++ if (conflict)
6898 ++ *info = "conflicting profile attachments";
6899 ++ rcu_read_unlock();
6900 + return NULL;
6901 + }
6902 +
6903 +- return candidate;
6904 +-}
6905 +-
6906 +-/**
6907 +- * find_attach - do attachment search for unconfined processes
6908 +- * @bprm - binprm structure of transitioning task
6909 +- * @ns: the current namespace (NOT NULL)
6910 +- * @list: list to search (NOT NULL)
6911 +- * @name: the executable name to match against (NOT NULL)
6912 +- * @info: info message if there was an error
6913 +- *
6914 +- * Returns: label or NULL if no match found
6915 +- */
6916 +-static struct aa_label *find_attach(const struct linux_binprm *bprm,
6917 +- struct aa_ns *ns, struct list_head *list,
6918 +- const char *name, const char **info)
6919 +-{
6920 +- struct aa_profile *profile;
6921 +-
6922 +- rcu_read_lock();
6923 +- profile = aa_get_profile(__attach_match(bprm, name, list, info));
6924 ++out:
6925 ++ candidate = aa_get_newest_profile(candidate);
6926 + rcu_read_unlock();
6927 +
6928 +- return profile ? &profile->label : NULL;
6929 ++ return &candidate->label;
6930 + }
6931 +
6932 + static const char *next_name(int xtype, const char *name)
6933 +diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
6934 +index ade333074c8e..06355717ee84 100644
6935 +--- a/security/apparmor/policy.c
6936 ++++ b/security/apparmor/policy.c
6937 +@@ -1124,8 +1124,8 @@ ssize_t aa_remove_profiles(struct aa_ns *policy_ns, struct aa_label *subj,
6938 + if (!name) {
6939 + /* remove namespace - can only happen if fqname[0] == ':' */
6940 + mutex_lock_nested(&ns->parent->lock, ns->level);
6941 +- __aa_remove_ns(ns);
6942 + __aa_bump_ns_revision(ns);
6943 ++ __aa_remove_ns(ns);
6944 + mutex_unlock(&ns->parent->lock);
6945 + } else {
6946 + /* remove profile */
6947 +@@ -1137,9 +1137,9 @@ ssize_t aa_remove_profiles(struct aa_ns *policy_ns, struct aa_label *subj,
6948 + goto fail_ns_lock;
6949 + }
6950 + name = profile->base.hname;
6951 ++ __aa_bump_ns_revision(ns);
6952 + __remove_profile(profile);
6953 + __aa_labelset_update_subtree(ns);
6954 +- __aa_bump_ns_revision(ns);
6955 + mutex_unlock(&ns->lock);
6956 + }
6957 +
6958 +diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
6959 +index d4280568a41e..5c74ea2bb44b 100644
6960 +--- a/sound/core/pcm_native.c
6961 ++++ b/sound/core/pcm_native.c
6962 +@@ -3408,7 +3408,8 @@ int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
6963 + #endif /* CONFIG_GENERIC_ALLOCATOR */
6964 + #ifndef CONFIG_X86 /* for avoiding warnings arch/x86/mm/pat.c */
6965 + if (IS_ENABLED(CONFIG_HAS_DMA) && !substream->ops->page &&
6966 +- substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV)
6967 ++ (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV ||
6968 ++ substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_UC))
6969 + return dma_mmap_coherent(substream->dma_buffer.dev.dev,
6970 + area,
6971 + substream->runtime->dma_area,
6972 +diff --git a/sound/firewire/motu/motu-proc.c b/sound/firewire/motu/motu-proc.c
6973 +index ea46fb4c1b5a..126a7bd187bb 100644
6974 +--- a/sound/firewire/motu/motu-proc.c
6975 ++++ b/sound/firewire/motu/motu-proc.c
6976 +@@ -16,7 +16,7 @@ static const char *const clock_names[] = {
6977 + [SND_MOTU_CLOCK_SOURCE_SPDIF_ON_OPT] = "S/PDIF on optical interface",
6978 + [SND_MOTU_CLOCK_SOURCE_SPDIF_ON_OPT_A] = "S/PDIF on optical interface A",
6979 + [SND_MOTU_CLOCK_SOURCE_SPDIF_ON_OPT_B] = "S/PDIF on optical interface B",
6980 +- [SND_MOTU_CLOCK_SOURCE_SPDIF_ON_COAX] = "S/PCIF on coaxial interface",
6981 ++ [SND_MOTU_CLOCK_SOURCE_SPDIF_ON_COAX] = "S/PDIF on coaxial interface",
6982 + [SND_MOTU_CLOCK_SOURCE_AESEBU_ON_XLR] = "AESEBU on XLR interface",
6983 + [SND_MOTU_CLOCK_SOURCE_WORD_ON_BNC] = "Word clock on BNC interface",
6984 + };
6985 +diff --git a/sound/isa/cs423x/cs4236.c b/sound/isa/cs423x/cs4236.c
6986 +index 78dd213589b4..fa3c39cff5f8 100644
6987 +--- a/sound/isa/cs423x/cs4236.c
6988 ++++ b/sound/isa/cs423x/cs4236.c
6989 +@@ -278,7 +278,8 @@ static int snd_cs423x_pnp_init_mpu(int dev, struct pnp_dev *pdev)
6990 + } else {
6991 + mpu_port[dev] = pnp_port_start(pdev, 0);
6992 + if (mpu_irq[dev] >= 0 &&
6993 +- pnp_irq_valid(pdev, 0) && pnp_irq(pdev, 0) >= 0) {
6994 ++ pnp_irq_valid(pdev, 0) &&
6995 ++ pnp_irq(pdev, 0) != (resource_size_t)-1) {
6996 + mpu_irq[dev] = pnp_irq(pdev, 0);
6997 + } else {
6998 + mpu_irq[dev] = -1; /* disable interrupt */
6999 +diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
7000 +index 6387c7e90918..76b507058cb4 100644
7001 +--- a/sound/pci/hda/hda_controller.c
7002 ++++ b/sound/pci/hda/hda_controller.c
7003 +@@ -884,7 +884,7 @@ static int azx_rirb_get_response(struct hdac_bus *bus, unsigned int addr,
7004 + return -EAGAIN; /* give a chance to retry */
7005 + }
7006 +
7007 +- dev_WARN(chip->card->dev,
7008 ++ dev_err(chip->card->dev,
7009 + "azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
7010 + bus->last_cmd[addr]);
7011 + chip->single_cmd = 1;
7012 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
7013 +index 86a416cdeb29..f6cbb831b86a 100644
7014 +--- a/sound/pci/hda/hda_intel.c
7015 ++++ b/sound/pci/hda/hda_intel.c
7016 +@@ -280,12 +280,13 @@ enum {
7017 +
7018 + /* quirks for old Intel chipsets */
7019 + #define AZX_DCAPS_INTEL_ICH \
7020 +- (AZX_DCAPS_OLD_SSYNC | AZX_DCAPS_NO_ALIGN_BUFSIZE)
7021 ++ (AZX_DCAPS_OLD_SSYNC | AZX_DCAPS_NO_ALIGN_BUFSIZE |\
7022 ++ AZX_DCAPS_SYNC_WRITE)
7023 +
7024 + /* quirks for Intel PCH */
7025 + #define AZX_DCAPS_INTEL_PCH_BASE \
7026 + (AZX_DCAPS_NO_ALIGN_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY |\
7027 +- AZX_DCAPS_SNOOP_TYPE(SCH))
7028 ++ AZX_DCAPS_SNOOP_TYPE(SCH) | AZX_DCAPS_SYNC_WRITE)
7029 +
7030 + /* PCH up to IVB; no runtime PM; bind with i915 gfx */
7031 + #define AZX_DCAPS_INTEL_PCH_NOPM \
7032 +@@ -300,13 +301,13 @@ enum {
7033 + #define AZX_DCAPS_INTEL_HASWELL \
7034 + (/*AZX_DCAPS_ALIGN_BUFSIZE |*/ AZX_DCAPS_COUNT_LPIB_DELAY |\
7035 + AZX_DCAPS_PM_RUNTIME | AZX_DCAPS_I915_COMPONENT |\
7036 +- AZX_DCAPS_SNOOP_TYPE(SCH))
7037 ++ AZX_DCAPS_SNOOP_TYPE(SCH) | AZX_DCAPS_SYNC_WRITE)
7038 +
7039 + /* Broadwell HDMI can't use position buffer reliably, force to use LPIB */
7040 + #define AZX_DCAPS_INTEL_BROADWELL \
7041 + (/*AZX_DCAPS_ALIGN_BUFSIZE |*/ AZX_DCAPS_POSFIX_LPIB |\
7042 + AZX_DCAPS_PM_RUNTIME | AZX_DCAPS_I915_COMPONENT |\
7043 +- AZX_DCAPS_SNOOP_TYPE(SCH))
7044 ++ AZX_DCAPS_SNOOP_TYPE(SCH) | AZX_DCAPS_SYNC_WRITE)
7045 +
7046 + #define AZX_DCAPS_INTEL_BAYTRAIL \
7047 + (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_I915_COMPONENT)
7048 +@@ -1280,11 +1281,17 @@ static void init_vga_switcheroo(struct azx *chip)
7049 + {
7050 + struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
7051 + struct pci_dev *p = get_bound_vga(chip->pci);
7052 ++ struct pci_dev *parent;
7053 + if (p) {
7054 + dev_info(chip->card->dev,
7055 + "Handle vga_switcheroo audio client\n");
7056 + hda->use_vga_switcheroo = 1;
7057 +- chip->bus.keep_power = 1; /* cleared in either gpu_bound op or codec probe */
7058 ++
7059 ++ /* cleared in either gpu_bound op or codec probe, or when its
7060 ++ * upstream port has _PR3 (i.e. dGPU).
7061 ++ */
7062 ++ parent = pci_upstream_bridge(p);
7063 ++ chip->bus.keep_power = parent ? !pci_pr3_present(parent) : 1;
7064 + chip->driver_caps |= AZX_DCAPS_PM_RUNTIME;
7065 + pci_dev_put(p);
7066 + }
7067 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
7068 +index e1229dbad6b2..252888f426de 100644
7069 +--- a/sound/pci/hda/patch_realtek.c
7070 ++++ b/sound/pci/hda/patch_realtek.c
7071 +@@ -501,6 +501,7 @@ static void alc_shutup_pins(struct hda_codec *codec)
7072 + struct alc_spec *spec = codec->spec;
7073 +
7074 + switch (codec->core.vendor_id) {
7075 ++ case 0x10ec0283:
7076 + case 0x10ec0286:
7077 + case 0x10ec0288:
7078 + case 0x10ec0298:
7079 +@@ -5547,6 +5548,16 @@ static void alc295_fixup_disable_dac3(struct hda_codec *codec,
7080 + }
7081 + }
7082 +
7083 ++/* force NID 0x17 (Bass Speaker) to DAC1 to share it with the main speaker */
7084 ++static void alc285_fixup_speaker2_to_dac1(struct hda_codec *codec,
7085 ++ const struct hda_fixup *fix, int action)
7086 ++{
7087 ++ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
7088 ++ hda_nid_t conn[1] = { 0x02 };
7089 ++ snd_hda_override_conn_list(codec, 0x17, 1, conn);
7090 ++ }
7091 ++}
7092 ++
7093 + /* Hook to update amp GPIO4 for automute */
7094 + static void alc280_hp_gpio4_automute_hook(struct hda_codec *codec,
7095 + struct hda_jack_callback *jack)
7096 +@@ -5849,6 +5860,7 @@ enum {
7097 + ALC225_FIXUP_DISABLE_MIC_VREF,
7098 + ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
7099 + ALC295_FIXUP_DISABLE_DAC3,
7100 ++ ALC285_FIXUP_SPEAKER2_TO_DAC1,
7101 + ALC280_FIXUP_HP_HEADSET_MIC,
7102 + ALC221_FIXUP_HP_FRONT_MIC,
7103 + ALC292_FIXUP_TPT460,
7104 +@@ -5893,9 +5905,12 @@ enum {
7105 + ALC256_FIXUP_ASUS_HEADSET_MIC,
7106 + ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
7107 + ALC299_FIXUP_PREDATOR_SPK,
7108 +- ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC,
7109 + ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE,
7110 +- ALC294_FIXUP_ASUS_INTSPK_GPIO,
7111 ++ ALC289_FIXUP_DELL_SPK2,
7112 ++ ALC289_FIXUP_DUAL_SPK,
7113 ++ ALC294_FIXUP_SPK2_TO_DAC1,
7114 ++ ALC294_FIXUP_ASUS_DUAL_SPK,
7115 ++
7116 + };
7117 +
7118 + static const struct hda_fixup alc269_fixups[] = {
7119 +@@ -6649,6 +6664,10 @@ static const struct hda_fixup alc269_fixups[] = {
7120 + .type = HDA_FIXUP_FUNC,
7121 + .v.func = alc295_fixup_disable_dac3,
7122 + },
7123 ++ [ALC285_FIXUP_SPEAKER2_TO_DAC1] = {
7124 ++ .type = HDA_FIXUP_FUNC,
7125 ++ .v.func = alc285_fixup_speaker2_to_dac1,
7126 ++ },
7127 + [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = {
7128 + .type = HDA_FIXUP_PINS,
7129 + .v.pins = (const struct hda_pintbl[]) {
7130 +@@ -6966,33 +6985,45 @@ static const struct hda_fixup alc269_fixups[] = {
7131 + { }
7132 + }
7133 + },
7134 +- [ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC] = {
7135 ++ [ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE] = {
7136 + .type = HDA_FIXUP_PINS,
7137 + .v.pins = (const struct hda_pintbl[]) {
7138 +- { 0x14, 0x411111f0 }, /* disable confusing internal speaker */
7139 +- { 0x19, 0x04a11150 }, /* use as headset mic, without its own jack detect */
7140 ++ { 0x19, 0x04a11040 },
7141 ++ { 0x21, 0x04211020 },
7142 + { }
7143 + },
7144 + .chained = true,
7145 +- .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
7146 ++ .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
7147 + },
7148 +- [ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE] = {
7149 ++ [ALC289_FIXUP_DELL_SPK2] = {
7150 + .type = HDA_FIXUP_PINS,
7151 + .v.pins = (const struct hda_pintbl[]) {
7152 +- { 0x19, 0x04a11040 },
7153 +- { 0x21, 0x04211020 },
7154 ++ { 0x17, 0x90170130 }, /* bass spk */
7155 + { }
7156 + },
7157 + .chained = true,
7158 +- .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
7159 ++ .chain_id = ALC269_FIXUP_DELL4_MIC_NO_PRESENCE
7160 + },
7161 +- [ALC294_FIXUP_ASUS_INTSPK_GPIO] = {
7162 ++ [ALC289_FIXUP_DUAL_SPK] = {
7163 ++ .type = HDA_FIXUP_FUNC,
7164 ++ .v.func = alc285_fixup_speaker2_to_dac1,
7165 ++ .chained = true,
7166 ++ .chain_id = ALC289_FIXUP_DELL_SPK2
7167 ++ },
7168 ++ [ALC294_FIXUP_SPK2_TO_DAC1] = {
7169 ++ .type = HDA_FIXUP_FUNC,
7170 ++ .v.func = alc285_fixup_speaker2_to_dac1,
7171 ++ .chained = true,
7172 ++ .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC
7173 ++ },
7174 ++ [ALC294_FIXUP_ASUS_DUAL_SPK] = {
7175 + .type = HDA_FIXUP_FUNC,
7176 + /* The GPIO must be pulled to initialize the AMP */
7177 + .v.func = alc_fixup_gpio4,
7178 + .chained = true,
7179 +- .chain_id = ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC
7180 ++ .chain_id = ALC294_FIXUP_SPK2_TO_DAC1
7181 + },
7182 ++
7183 + };
7184 +
7185 + static const struct snd_pci_quirk alc269_fixup_tbl[] = {
7186 +@@ -7065,6 +7096,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
7187 + SND_PCI_QUIRK(0x1028, 0x08ad, "Dell WYSE AIO", ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE),
7188 + SND_PCI_QUIRK(0x1028, 0x08ae, "Dell WYSE NB", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE),
7189 + SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
7190 ++ SND_PCI_QUIRK(0x1028, 0x097e, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
7191 ++ SND_PCI_QUIRK(0x1028, 0x097d, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
7192 + SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
7193 + SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
7194 + SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
7195 +@@ -7152,7 +7185,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
7196 + SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
7197 + SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
7198 + SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
7199 +- SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_INTSPK_GPIO),
7200 ++ SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
7201 + SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
7202 + SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
7203 + SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
7204 +@@ -7224,6 +7257,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
7205 + SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
7206 + SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
7207 + SND_PCI_QUIRK(0x17aa, 0x225d, "Thinkpad T480", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
7208 ++ SND_PCI_QUIRK(0x17aa, 0x2293, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_SPEAKER2_TO_DAC1),
7209 + SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
7210 + SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
7211 + SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
7212 +@@ -7408,6 +7442,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
7213 + {.id = ALC255_FIXUP_DELL_SPK_NOISE, .name = "dell-spk-noise"},
7214 + {.id = ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc225-dell1"},
7215 + {.id = ALC295_FIXUP_DISABLE_DAC3, .name = "alc295-disable-dac3"},
7216 ++ {.id = ALC285_FIXUP_SPEAKER2_TO_DAC1, .name = "alc285-speaker2-to-dac1"},
7217 + {.id = ALC280_FIXUP_HP_HEADSET_MIC, .name = "alc280-hp-headset"},
7218 + {.id = ALC221_FIXUP_HP_FRONT_MIC, .name = "alc221-hp-mic"},
7219 + {.id = ALC298_FIXUP_SPK_VOLUME, .name = "alc298-spk-volume"},
7220 +diff --git a/sound/pci/ice1712/ice1724.c b/sound/pci/ice1712/ice1724.c
7221 +index e62c11816683..f360b33a1042 100644
7222 +--- a/sound/pci/ice1712/ice1724.c
7223 ++++ b/sound/pci/ice1712/ice1724.c
7224 +@@ -647,6 +647,7 @@ static int snd_vt1724_set_pro_rate(struct snd_ice1712 *ice, unsigned int rate,
7225 + unsigned long flags;
7226 + unsigned char mclk_change;
7227 + unsigned int i, old_rate;
7228 ++ bool call_set_rate = false;
7229 +
7230 + if (rate > ice->hw_rates->list[ice->hw_rates->count - 1])
7231 + return -EINVAL;
7232 +@@ -670,7 +671,7 @@ static int snd_vt1724_set_pro_rate(struct snd_ice1712 *ice, unsigned int rate,
7233 + * setting clock rate for internal clock mode */
7234 + old_rate = ice->get_rate(ice);
7235 + if (force || (old_rate != rate))
7236 +- ice->set_rate(ice, rate);
7237 ++ call_set_rate = true;
7238 + else if (rate == ice->cur_rate) {
7239 + spin_unlock_irqrestore(&ice->reg_lock, flags);
7240 + return 0;
7241 +@@ -678,12 +679,14 @@ static int snd_vt1724_set_pro_rate(struct snd_ice1712 *ice, unsigned int rate,
7242 + }
7243 +
7244 + ice->cur_rate = rate;
7245 ++ spin_unlock_irqrestore(&ice->reg_lock, flags);
7246 ++
7247 ++ if (call_set_rate)
7248 ++ ice->set_rate(ice, rate);
7249 +
7250 + /* setting master clock */
7251 + mclk_change = ice->set_mclk(ice, rate);
7252 +
7253 +- spin_unlock_irqrestore(&ice->reg_lock, flags);
7254 +-
7255 + if (mclk_change && ice->gpio.i2s_mclk_changed)
7256 + ice->gpio.i2s_mclk_changed(ice);
7257 + if (ice->gpio.set_pro_rate)
7258 +diff --git a/sound/usb/card.h b/sound/usb/card.h
7259 +index 2991b9986f66..395403a2d33f 100644
7260 +--- a/sound/usb/card.h
7261 ++++ b/sound/usb/card.h
7262 +@@ -145,6 +145,7 @@ struct snd_usb_substream {
7263 + struct snd_usb_endpoint *sync_endpoint;
7264 + unsigned long flags;
7265 + bool need_setup_ep; /* (re)configure EP at prepare? */
7266 ++ bool need_setup_fmt; /* (re)configure fmt after resume? */
7267 + unsigned int speed; /* USB_SPEED_XXX */
7268 +
7269 + u64 formats; /* format bitmasks (all or'ed) */
7270 +diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
7271 +index ff5ab24f3bd1..a04c727dcd19 100644
7272 +--- a/sound/usb/pcm.c
7273 ++++ b/sound/usb/pcm.c
7274 +@@ -506,15 +506,15 @@ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt)
7275 + if (WARN_ON(!iface))
7276 + return -EINVAL;
7277 + alts = usb_altnum_to_altsetting(iface, fmt->altsetting);
7278 +- altsd = get_iface_desc(alts);
7279 +- if (WARN_ON(altsd->bAlternateSetting != fmt->altsetting))
7280 ++ if (WARN_ON(!alts))
7281 + return -EINVAL;
7282 ++ altsd = get_iface_desc(alts);
7283 +
7284 +- if (fmt == subs->cur_audiofmt)
7285 ++ if (fmt == subs->cur_audiofmt && !subs->need_setup_fmt)
7286 + return 0;
7287 +
7288 + /* close the old interface */
7289 +- if (subs->interface >= 0 && subs->interface != fmt->iface) {
7290 ++ if (subs->interface >= 0 && (subs->interface != fmt->iface || subs->need_setup_fmt)) {
7291 + if (!subs->stream->chip->keep_iface) {
7292 + err = usb_set_interface(subs->dev, subs->interface, 0);
7293 + if (err < 0) {
7294 +@@ -528,6 +528,9 @@ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt)
7295 + subs->altset_idx = 0;
7296 + }
7297 +
7298 ++ if (subs->need_setup_fmt)
7299 ++ subs->need_setup_fmt = false;
7300 ++
7301 + /* set interface */
7302 + if (iface->cur_altsetting != alts) {
7303 + err = snd_usb_select_mode_quirk(subs, fmt);
7304 +@@ -1735,6 +1738,13 @@ static int snd_usb_substream_playback_trigger(struct snd_pcm_substream *substrea
7305 + subs->data_endpoint->retire_data_urb = retire_playback_urb;
7306 + subs->running = 0;
7307 + return 0;
7308 ++ case SNDRV_PCM_TRIGGER_SUSPEND:
7309 ++ if (subs->stream->chip->setup_fmt_after_resume_quirk) {
7310 ++ stop_endpoints(subs, true);
7311 ++ subs->need_setup_fmt = true;
7312 ++ return 0;
7313 ++ }
7314 ++ break;
7315 + }
7316 +
7317 + return -EINVAL;
7318 +@@ -1767,6 +1777,13 @@ static int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream
7319 + subs->data_endpoint->retire_data_urb = retire_capture_urb;
7320 + subs->running = 1;
7321 + return 0;
7322 ++ case SNDRV_PCM_TRIGGER_SUSPEND:
7323 ++ if (subs->stream->chip->setup_fmt_after_resume_quirk) {
7324 ++ stop_endpoints(subs, true);
7325 ++ subs->need_setup_fmt = true;
7326 ++ return 0;
7327 ++ }
7328 ++ break;
7329 + }
7330 +
7331 + return -EINVAL;
7332 +diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
7333 +index 70c338f3ae24..d187aa6d50db 100644
7334 +--- a/sound/usb/quirks-table.h
7335 ++++ b/sound/usb/quirks-table.h
7336 +@@ -3466,7 +3466,8 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
7337 + .vendor_name = "Dell",
7338 + .product_name = "WD19 Dock",
7339 + .profile_name = "Dell-WD15-Dock",
7340 +- .ifnum = QUIRK_NO_INTERFACE
7341 ++ .ifnum = QUIRK_ANY_INTERFACE,
7342 ++ .type = QUIRK_SETUP_FMT_AFTER_RESUME
7343 + }
7344 + },
7345 + /* MOTU Microbook II */
7346 +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
7347 +index 349e1e52996d..a81c2066499f 100644
7348 +--- a/sound/usb/quirks.c
7349 ++++ b/sound/usb/quirks.c
7350 +@@ -508,6 +508,16 @@ static int create_standard_mixer_quirk(struct snd_usb_audio *chip,
7351 + return snd_usb_create_mixer(chip, quirk->ifnum, 0);
7352 + }
7353 +
7354 ++
7355 ++static int setup_fmt_after_resume_quirk(struct snd_usb_audio *chip,
7356 ++ struct usb_interface *iface,
7357 ++ struct usb_driver *driver,
7358 ++ const struct snd_usb_audio_quirk *quirk)
7359 ++{
7360 ++ chip->setup_fmt_after_resume_quirk = 1;
7361 ++ return 1; /* Continue with creating streams and mixer */
7362 ++}
7363 ++
7364 + /*
7365 + * audio-interface quirks
7366 + *
7367 +@@ -546,6 +556,7 @@ int snd_usb_create_quirk(struct snd_usb_audio *chip,
7368 + [QUIRK_AUDIO_EDIROL_UAXX] = create_uaxx_quirk,
7369 + [QUIRK_AUDIO_ALIGN_TRANSFER] = create_align_transfer_quirk,
7370 + [QUIRK_AUDIO_STANDARD_MIXER] = create_standard_mixer_quirk,
7371 ++ [QUIRK_SETUP_FMT_AFTER_RESUME] = setup_fmt_after_resume_quirk,
7372 + };
7373 +
7374 + if (quirk->type < QUIRK_TYPE_COUNT) {
7375 +diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
7376 +index feb30f9c1716..e360680f45f3 100644
7377 +--- a/sound/usb/usbaudio.h
7378 ++++ b/sound/usb/usbaudio.h
7379 +@@ -33,7 +33,7 @@ struct snd_usb_audio {
7380 + wait_queue_head_t shutdown_wait;
7381 + unsigned int txfr_quirk:1; /* Subframe boundaries on transfers */
7382 + unsigned int tx_length_quirk:1; /* Put length specifier in transfers */
7383 +-
7384 ++ unsigned int setup_fmt_after_resume_quirk:1; /* setup the format to interface after resume */
7385 + int num_interfaces;
7386 + int num_suspended_intf;
7387 + int sample_rate_read_error;
7388 +@@ -98,6 +98,7 @@ enum quirk_type {
7389 + QUIRK_AUDIO_EDIROL_UAXX,
7390 + QUIRK_AUDIO_ALIGN_TRANSFER,
7391 + QUIRK_AUDIO_STANDARD_MIXER,
7392 ++ QUIRK_SETUP_FMT_AFTER_RESUME,
7393 +
7394 + QUIRK_TYPE_COUNT
7395 + };
7396 +diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
7397 +index 70a9f8716a4b..888814df758d 100644
7398 +--- a/tools/perf/util/machine.c
7399 ++++ b/tools/perf/util/machine.c
7400 +@@ -2403,7 +2403,7 @@ static int thread__resolve_callchain_sample(struct thread *thread,
7401 + }
7402 +
7403 + check_calls:
7404 +- if (callchain_param.order != ORDER_CALLEE) {
7405 ++ if (chain && callchain_param.order != ORDER_CALLEE) {
7406 + err = find_prev_cpumode(chain, thread, cursor, parent, root_al,
7407 + &cpumode, chain->nr - first_call);
7408 + if (err)
7409 +diff --git a/tools/testing/selftests/rseq/param_test.c b/tools/testing/selftests/rseq/param_test.c
7410 +index eec2663261f2..e8a657a5f48a 100644
7411 +--- a/tools/testing/selftests/rseq/param_test.c
7412 ++++ b/tools/testing/selftests/rseq/param_test.c
7413 +@@ -15,7 +15,7 @@
7414 + #include <errno.h>
7415 + #include <stddef.h>
7416 +
7417 +-static inline pid_t gettid(void)
7418 ++static inline pid_t rseq_gettid(void)
7419 + {
7420 + return syscall(__NR_gettid);
7421 + }
7422 +@@ -373,11 +373,12 @@ void *test_percpu_spinlock_thread(void *arg)
7423 + rseq_percpu_unlock(&data->lock, cpu);
7424 + #ifndef BENCHMARK
7425 + if (i != 0 && !(i % (reps / 10)))
7426 +- printf_verbose("tid %d: count %lld\n", (int) gettid(), i);
7427 ++ printf_verbose("tid %d: count %lld\n",
7428 ++ (int) rseq_gettid(), i);
7429 + #endif
7430 + }
7431 + printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n",
7432 +- (int) gettid(), nr_abort, signals_delivered);
7433 ++ (int) rseq_gettid(), nr_abort, signals_delivered);
7434 + if (!opt_disable_rseq && thread_data->reg &&
7435 + rseq_unregister_current_thread())
7436 + abort();
7437 +@@ -454,11 +455,12 @@ void *test_percpu_inc_thread(void *arg)
7438 + } while (rseq_unlikely(ret));
7439 + #ifndef BENCHMARK
7440 + if (i != 0 && !(i % (reps / 10)))
7441 +- printf_verbose("tid %d: count %lld\n", (int) gettid(), i);
7442 ++ printf_verbose("tid %d: count %lld\n",
7443 ++ (int) rseq_gettid(), i);
7444 + #endif
7445 + }
7446 + printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n",
7447 +- (int) gettid(), nr_abort, signals_delivered);
7448 ++ (int) rseq_gettid(), nr_abort, signals_delivered);
7449 + if (!opt_disable_rseq && thread_data->reg &&
7450 + rseq_unregister_current_thread())
7451 + abort();
7452 +@@ -605,7 +607,7 @@ void *test_percpu_list_thread(void *arg)
7453 + }
7454 +
7455 + printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n",
7456 +- (int) gettid(), nr_abort, signals_delivered);
7457 ++ (int) rseq_gettid(), nr_abort, signals_delivered);
7458 + if (!opt_disable_rseq && rseq_unregister_current_thread())
7459 + abort();
7460 +
7461 +@@ -796,7 +798,7 @@ void *test_percpu_buffer_thread(void *arg)
7462 + }
7463 +
7464 + printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n",
7465 +- (int) gettid(), nr_abort, signals_delivered);
7466 ++ (int) rseq_gettid(), nr_abort, signals_delivered);
7467 + if (!opt_disable_rseq && rseq_unregister_current_thread())
7468 + abort();
7469 +
7470 +@@ -1011,7 +1013,7 @@ void *test_percpu_memcpy_buffer_thread(void *arg)
7471 + }
7472 +
7473 + printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n",
7474 +- (int) gettid(), nr_abort, signals_delivered);
7475 ++ (int) rseq_gettid(), nr_abort, signals_delivered);
7476 + if (!opt_disable_rseq && rseq_unregister_current_thread())
7477 + abort();
7478 +
7479 +diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
7480 +index b505bb062d07..96bbda4f10fc 100644
7481 +--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
7482 ++++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
7483 +@@ -3147,7 +3147,18 @@ TEST(user_notification_basic)
7484 + EXPECT_GT(poll(&pollfd, 1, -1), 0);
7485 + EXPECT_EQ(pollfd.revents, POLLIN);
7486 +
7487 +- EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
7488 ++ /* Test that we can't pass garbage to the kernel. */
7489 ++ memset(&req, 0, sizeof(req));
7490 ++ req.pid = -1;
7491 ++ errno = 0;
7492 ++ ret = ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req);
7493 ++ EXPECT_EQ(-1, ret);
7494 ++ EXPECT_EQ(EINVAL, errno);
7495 ++
7496 ++ if (ret) {
7497 ++ req.pid = 0;
7498 ++ EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
7499 ++ }
7500 +
7501 + pollfd.fd = listener;
7502 + pollfd.events = POLLIN | POLLOUT;
7503 +@@ -3267,6 +3278,7 @@ TEST(user_notification_signal)
7504 +
7505 + close(sk_pair[1]);
7506 +
7507 ++ memset(&req, 0, sizeof(req));
7508 + EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
7509 +
7510 + EXPECT_EQ(kill(pid, SIGUSR1), 0);
7511 +@@ -3285,6 +3297,7 @@ TEST(user_notification_signal)
7512 + EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), -1);
7513 + EXPECT_EQ(errno, ENOENT);
7514 +
7515 ++ memset(&req, 0, sizeof(req));
7516 + EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
7517 +
7518 + resp.id = req.id;
7519 +diff --git a/usr/gen_initramfs_list.sh b/usr/gen_initramfs_list.sh
7520 +index 0aad760fcd8c..2bbac73e6477 100755
7521 +--- a/usr/gen_initramfs_list.sh
7522 ++++ b/usr/gen_initramfs_list.sh
7523 +@@ -128,7 +128,7 @@ parse() {
7524 + str="${ftype} ${name} ${location} ${str}"
7525 + ;;
7526 + "nod")
7527 +- local dev=`LC_ALL=C ls -l "${location}"`
7528 ++ local dev="`LC_ALL=C ls -l "${location}"`"
7529 + local maj=`field 5 ${dev}`
7530 + local min=`field 6 ${dev}`
7531 + maj=${maj%,}
7532
7533 diff --git a/1009_linux-5.4.10.patch b/1009_linux-5.4.10.patch
7534 new file mode 100644
7535 index 0000000..ef48304
7536 --- /dev/null
7537 +++ b/1009_linux-5.4.10.patch
7538 @@ -0,0 +1,26 @@
7539 +diff --git a/Makefile b/Makefile
7540 +index 3ba15c3528c8..726bb3dacd5b 100644
7541 +--- a/Makefile
7542 ++++ b/Makefile
7543 +@@ -1,7 +1,7 @@
7544 + # SPDX-License-Identifier: GPL-2.0
7545 + VERSION = 5
7546 + PATCHLEVEL = 4
7547 +-SUBLEVEL = 9
7548 ++SUBLEVEL = 10
7549 + EXTRAVERSION =
7550 + NAME = Kleptomaniac Octopus
7551 +
7552 +diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
7553 +index 460afa415434..d30a2e6e68b4 100644
7554 +--- a/arch/powerpc/mm/mem.c
7555 ++++ b/arch/powerpc/mm/mem.c
7556 +@@ -120,7 +120,7 @@ static void flush_dcache_range_chunked(unsigned long start, unsigned long stop,
7557 + unsigned long i;
7558 +
7559 + for (i = start; i < stop; i += chunk) {
7560 +- flush_dcache_range(i, min(stop, start + chunk));
7561 ++ flush_dcache_range(i, min(stop, i + chunk));
7562 + cond_resched();
7563 + }
7564 + }