Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.8 commit in: /
Date: Wed, 09 Sep 2020 18:02:28
Message-Id: 1599674532.9823217538d25d8a5408a78372823d3649ae9eed.mpagano@gentoo
1 commit: 9823217538d25d8a5408a78372823d3649ae9eed
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Sep 9 18:02:12 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Sep 9 18:02:12 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=98232175
7
8 Linux patch 5.8.8
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1007_linux-5.8.8.patch | 7178 ++++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 7182 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 62e43d7..93860e0 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -71,6 +71,10 @@ Patch: 1006_linux-5.8.7.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.8.7
23
24 +Patch: 1007_linux-5.8.8.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.8.8
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1007_linux-5.8.8.patch b/1007_linux-5.8.8.patch
33 new file mode 100644
34 index 0000000..1dd4fdb
35 --- /dev/null
36 +++ b/1007_linux-5.8.8.patch
37 @@ -0,0 +1,7178 @@
38 +diff --git a/Documentation/devicetree/bindings/mmc/mtk-sd.txt b/Documentation/devicetree/bindings/mmc/mtk-sd.txt
39 +index 8a532f4453f26..09aecec47003a 100644
40 +--- a/Documentation/devicetree/bindings/mmc/mtk-sd.txt
41 ++++ b/Documentation/devicetree/bindings/mmc/mtk-sd.txt
42 +@@ -49,6 +49,8 @@ Optional properties:
43 + error caused by stop clock(fifo full)
44 + Valid range = [0:0x7]. if not present, default value is 0.
45 + applied to compatible "mediatek,mt2701-mmc".
46 ++- resets: Phandle and reset specifier pair to softreset line of MSDC IP.
47 ++- reset-names: Should be "hrst".
48 +
49 + Examples:
50 + mmc0: mmc@11230000 {
51 +diff --git a/Documentation/filesystems/affs.rst b/Documentation/filesystems/affs.rst
52 +index 7f1a40dce6d3d..5776cbd5fa532 100644
53 +--- a/Documentation/filesystems/affs.rst
54 ++++ b/Documentation/filesystems/affs.rst
55 +@@ -110,13 +110,15 @@ The Amiga protection flags RWEDRWEDHSPARWED are handled as follows:
56 +
57 + - R maps to r for user, group and others. On directories, R implies x.
58 +
59 +- - If both W and D are allowed, w will be set.
60 ++ - W maps to w.
61 +
62 + - E maps to x.
63 +
64 +- - H and P are always retained and ignored under Linux.
65 ++ - D is ignored.
66 +
67 +- - A is always reset when a file is written to.
68 ++ - H, S and P are always retained and ignored under Linux.
69 ++
70 ++ - A is cleared when a file is written to.
71 +
72 + User id and group id will be used unless set[gu]id are given as mount
73 + options. Since most of the Amiga file systems are single user systems
74 +@@ -128,11 +130,13 @@ Linux -> Amiga:
75 +
76 + The Linux rwxrwxrwx file mode is handled as follows:
77 +
78 +- - r permission will set R for user, group and others.
79 ++ - r permission will allow R for user, group and others.
80 ++
81 ++ - w permission will allow W for user, group and others.
82 +
83 +- - w permission will set W and D for user, group and others.
84 ++ - x permission of the user will allow E for plain files.
85 +
86 +- - x permission of the user will set E for plain files.
87 ++ - D will be allowed for user, group and others.
88 +
89 + - All other flags (suid, sgid, ...) are ignored and will
90 + not be retained.
91 +diff --git a/Makefile b/Makefile
92 +index 5081bd85af29f..dba4d8f2f7862 100644
93 +--- a/Makefile
94 ++++ b/Makefile
95 +@@ -1,7 +1,7 @@
96 + # SPDX-License-Identifier: GPL-2.0
97 + VERSION = 5
98 + PATCHLEVEL = 8
99 +-SUBLEVEL = 7
100 ++SUBLEVEL = 8
101 + EXTRAVERSION =
102 + NAME = Kleptomaniac Octopus
103 +
104 +diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
105 +index 661fd842ea97d..79849f37e782c 100644
106 +--- a/arch/arc/kernel/perf_event.c
107 ++++ b/arch/arc/kernel/perf_event.c
108 +@@ -562,7 +562,7 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
109 + {
110 + struct arc_reg_pct_build pct_bcr;
111 + struct arc_reg_cc_build cc_bcr;
112 +- int i, has_interrupts;
113 ++ int i, has_interrupts, irq;
114 + int counter_size; /* in bits */
115 +
116 + union cc_name {
117 +@@ -637,13 +637,7 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
118 + .attr_groups = arc_pmu->attr_groups,
119 + };
120 +
121 +- if (has_interrupts) {
122 +- int irq = platform_get_irq(pdev, 0);
123 +-
124 +- if (irq < 0) {
125 +- pr_err("Cannot get IRQ number for the platform\n");
126 +- return -ENODEV;
127 +- }
128 ++ if (has_interrupts && (irq = platform_get_irq(pdev, 0) >= 0)) {
129 +
130 + arc_pmu->irq = irq;
131 +
132 +@@ -652,9 +646,9 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
133 + this_cpu_ptr(&arc_pmu_cpu));
134 +
135 + on_each_cpu(arc_cpu_pmu_irq_init, &irq, 1);
136 +-
137 +- } else
138 ++ } else {
139 + arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
140 ++ }
141 +
142 + /*
143 + * perf parser doesn't really like '-' symbol in events name, so let's
144 +diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
145 +index e7bdc2ac1c87c..8fcb9e25aa648 100644
146 +--- a/arch/arc/mm/init.c
147 ++++ b/arch/arc/mm/init.c
148 +@@ -27,8 +27,8 @@ static unsigned long low_mem_sz;
149 +
150 + #ifdef CONFIG_HIGHMEM
151 + static unsigned long min_high_pfn, max_high_pfn;
152 +-static u64 high_mem_start;
153 +-static u64 high_mem_sz;
154 ++static phys_addr_t high_mem_start;
155 ++static phys_addr_t high_mem_sz;
156 + #endif
157 +
158 + #ifdef CONFIG_DISCONTIGMEM
159 +@@ -70,6 +70,7 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
160 + high_mem_sz = size;
161 + in_use = 1;
162 + memblock_add_node(base, size, 1);
163 ++ memblock_reserve(base, size);
164 + #endif
165 + }
166 +
167 +@@ -158,7 +159,7 @@ void __init setup_arch_memory(void)
168 + min_high_pfn = PFN_DOWN(high_mem_start);
169 + max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
170 +
171 +- max_zone_pfn[ZONE_HIGHMEM] = max_high_pfn;
172 ++ max_zone_pfn[ZONE_HIGHMEM] = min_low_pfn;
173 +
174 + high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
175 + kmap_init();
176 +@@ -167,22 +168,26 @@ void __init setup_arch_memory(void)
177 + free_area_init(max_zone_pfn);
178 + }
179 +
180 +-/*
181 +- * mem_init - initializes memory
182 +- *
183 +- * Frees up bootmem
184 +- * Calculates and displays memory available/used
185 +- */
186 +-void __init mem_init(void)
187 ++static void __init highmem_init(void)
188 + {
189 + #ifdef CONFIG_HIGHMEM
190 + unsigned long tmp;
191 +
192 +- reset_all_zones_managed_pages();
193 ++ memblock_free(high_mem_start, high_mem_sz);
194 + for (tmp = min_high_pfn; tmp < max_high_pfn; tmp++)
195 + free_highmem_page(pfn_to_page(tmp));
196 + #endif
197 ++}
198 +
199 ++/*
200 ++ * mem_init - initializes memory
201 ++ *
202 ++ * Frees up bootmem
203 ++ * Calculates and displays memory available/used
204 ++ */
205 ++void __init mem_init(void)
206 ++{
207 + memblock_free_all();
208 ++ highmem_init();
209 + mem_init_print_info(NULL);
210 + }
211 +diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
212 +index 1a39e0ef776bb..5b9ec032ce8d8 100644
213 +--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
214 ++++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
215 +@@ -686,6 +686,8 @@
216 + clocks = <&pericfg CLK_PERI_MSDC30_0_PD>,
217 + <&topckgen CLK_TOP_MSDC50_0_SEL>;
218 + clock-names = "source", "hclk";
219 ++ resets = <&pericfg MT7622_PERI_MSDC0_SW_RST>;
220 ++ reset-names = "hrst";
221 + status = "disabled";
222 + };
223 +
224 +diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
225 +index efce5defcc5cf..011eb6bbf81a5 100644
226 +--- a/arch/mips/kernel/perf_event_mipsxx.c
227 ++++ b/arch/mips/kernel/perf_event_mipsxx.c
228 +@@ -1898,8 +1898,8 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
229 + (base_id >= 64 && base_id < 90) ||
230 + (base_id >= 128 && base_id < 164) ||
231 + (base_id >= 192 && base_id < 200) ||
232 +- (base_id >= 256 && base_id < 274) ||
233 +- (base_id >= 320 && base_id < 358) ||
234 ++ (base_id >= 256 && base_id < 275) ||
235 ++ (base_id >= 320 && base_id < 361) ||
236 + (base_id >= 384 && base_id < 574))
237 + break;
238 +
239 +diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
240 +index 2f513506a3d52..1dbfb5aadffd6 100644
241 +--- a/arch/mips/kernel/smp-bmips.c
242 ++++ b/arch/mips/kernel/smp-bmips.c
243 +@@ -239,6 +239,8 @@ static int bmips_boot_secondary(int cpu, struct task_struct *idle)
244 + */
245 + static void bmips_init_secondary(void)
246 + {
247 ++ bmips_cpu_setup();
248 ++
249 + switch (current_cpu_type()) {
250 + case CPU_BMIPS4350:
251 + case CPU_BMIPS4380:
252 +diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
253 +index e664d8b43e72b..2e9d0637591c9 100644
254 +--- a/arch/mips/kernel/traps.c
255 ++++ b/arch/mips/kernel/traps.c
256 +@@ -1286,6 +1286,18 @@ static int enable_restore_fp_context(int msa)
257 + err = own_fpu_inatomic(1);
258 + if (msa && !err) {
259 + enable_msa();
260 ++ /*
261 ++ * with MSA enabled, userspace can see MSACSR
262 ++ * and MSA regs, but the values in them are from
263 ++ * other task before current task, restore them
264 ++ * from saved fp/msa context
265 ++ */
266 ++ write_msa_csr(current->thread.fpu.msacsr);
267 ++ /*
268 ++ * own_fpu_inatomic(1) just restore low 64bit,
269 ++ * fix the high 64bit
270 ++ */
271 ++ init_msa_upper();
272 + set_thread_flag(TIF_USEDMSA);
273 + set_thread_flag(TIF_MSA_CTX_LIVE);
274 + }
275 +diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
276 +index 49569e5666d7a..cb32a00d286e1 100644
277 +--- a/arch/mips/mm/c-r4k.c
278 ++++ b/arch/mips/mm/c-r4k.c
279 +@@ -1712,7 +1712,11 @@ static void setup_scache(void)
280 + printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
281 + scache_size >> 10,
282 + way_string[c->scache.ways], c->scache.linesz);
283 ++
284 ++ if (current_cpu_type() == CPU_BMIPS5000)
285 ++ c->options |= MIPS_CPU_INCLUSIVE_CACHES;
286 + }
287 ++
288 + #else
289 + if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
290 + panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
291 +diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
292 +index 1493c49ca47a1..55d7b7fd18b6f 100644
293 +--- a/arch/mips/oprofile/op_model_mipsxx.c
294 ++++ b/arch/mips/oprofile/op_model_mipsxx.c
295 +@@ -245,7 +245,6 @@ static int mipsxx_perfcount_handler(void)
296 +
297 + switch (counters) {
298 + #define HANDLE_COUNTER(n) \
299 +- fallthrough; \
300 + case n + 1: \
301 + control = r_c0_perfctrl ## n(); \
302 + counter = r_c0_perfcntr ## n(); \
303 +@@ -256,8 +255,11 @@ static int mipsxx_perfcount_handler(void)
304 + handled = IRQ_HANDLED; \
305 + }
306 + HANDLE_COUNTER(3)
307 ++ fallthrough;
308 + HANDLE_COUNTER(2)
309 ++ fallthrough;
310 + HANDLE_COUNTER(1)
311 ++ fallthrough;
312 + HANDLE_COUNTER(0)
313 + }
314 +
315 +diff --git a/arch/mips/sni/a20r.c b/arch/mips/sni/a20r.c
316 +index 0ecffb65fd6d1..b09dc844985a8 100644
317 +--- a/arch/mips/sni/a20r.c
318 ++++ b/arch/mips/sni/a20r.c
319 +@@ -222,8 +222,8 @@ void __init sni_a20r_irq_init(void)
320 + irq_set_chip_and_handler(i, &a20r_irq_type, handle_level_irq);
321 + sni_hwint = a20r_hwint;
322 + change_c0_status(ST0_IM, IE_IRQ0);
323 +- if (request_irq(SNI_A20R_IRQ_BASE + 3, sni_isa_irq_handler, 0, "ISA",
324 +- NULL))
325 ++ if (request_irq(SNI_A20R_IRQ_BASE + 3, sni_isa_irq_handler,
326 ++ IRQF_SHARED, "ISA", sni_isa_irq_handler))
327 + pr_err("Failed to register ISA interrupt\n");
328 + }
329 +
330 +diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
331 +index 4907a5149a8a3..79e074ffad139 100644
332 +--- a/arch/s390/Kconfig
333 ++++ b/arch/s390/Kconfig
334 +@@ -30,7 +30,7 @@ config GENERIC_BUG_RELATIVE_POINTERS
335 + def_bool y
336 +
337 + config GENERIC_LOCKBREAK
338 +- def_bool y if PREEMPTTION
339 ++ def_bool y if PREEMPTION
340 +
341 + config PGSTE
342 + def_bool y if KVM
343 +diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
344 +index 50b4ce8cddfdc..918f0ba4f4d20 100644
345 +--- a/arch/s390/include/asm/percpu.h
346 ++++ b/arch/s390/include/asm/percpu.h
347 +@@ -29,7 +29,7 @@
348 + typedef typeof(pcp) pcp_op_T__; \
349 + pcp_op_T__ old__, new__, prev__; \
350 + pcp_op_T__ *ptr__; \
351 +- preempt_disable(); \
352 ++ preempt_disable_notrace(); \
353 + ptr__ = raw_cpu_ptr(&(pcp)); \
354 + prev__ = *ptr__; \
355 + do { \
356 +@@ -37,7 +37,7 @@
357 + new__ = old__ op (val); \
358 + prev__ = cmpxchg(ptr__, old__, new__); \
359 + } while (prev__ != old__); \
360 +- preempt_enable(); \
361 ++ preempt_enable_notrace(); \
362 + new__; \
363 + })
364 +
365 +@@ -68,7 +68,7 @@
366 + typedef typeof(pcp) pcp_op_T__; \
367 + pcp_op_T__ val__ = (val); \
368 + pcp_op_T__ old__, *ptr__; \
369 +- preempt_disable(); \
370 ++ preempt_disable_notrace(); \
371 + ptr__ = raw_cpu_ptr(&(pcp)); \
372 + if (__builtin_constant_p(val__) && \
373 + ((szcast)val__ > -129) && ((szcast)val__ < 128)) { \
374 +@@ -84,7 +84,7 @@
375 + : [val__] "d" (val__) \
376 + : "cc"); \
377 + } \
378 +- preempt_enable(); \
379 ++ preempt_enable_notrace(); \
380 + }
381 +
382 + #define this_cpu_add_4(pcp, val) arch_this_cpu_add(pcp, val, "laa", "asi", int)
383 +@@ -95,14 +95,14 @@
384 + typedef typeof(pcp) pcp_op_T__; \
385 + pcp_op_T__ val__ = (val); \
386 + pcp_op_T__ old__, *ptr__; \
387 +- preempt_disable(); \
388 ++ preempt_disable_notrace(); \
389 + ptr__ = raw_cpu_ptr(&(pcp)); \
390 + asm volatile( \
391 + op " %[old__],%[val__],%[ptr__]\n" \
392 + : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
393 + : [val__] "d" (val__) \
394 + : "cc"); \
395 +- preempt_enable(); \
396 ++ preempt_enable_notrace(); \
397 + old__ + val__; \
398 + })
399 +
400 +@@ -114,14 +114,14 @@
401 + typedef typeof(pcp) pcp_op_T__; \
402 + pcp_op_T__ val__ = (val); \
403 + pcp_op_T__ old__, *ptr__; \
404 +- preempt_disable(); \
405 ++ preempt_disable_notrace(); \
406 + ptr__ = raw_cpu_ptr(&(pcp)); \
407 + asm volatile( \
408 + op " %[old__],%[val__],%[ptr__]\n" \
409 + : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
410 + : [val__] "d" (val__) \
411 + : "cc"); \
412 +- preempt_enable(); \
413 ++ preempt_enable_notrace(); \
414 + }
415 +
416 + #define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lan")
417 +@@ -136,10 +136,10 @@
418 + typedef typeof(pcp) pcp_op_T__; \
419 + pcp_op_T__ ret__; \
420 + pcp_op_T__ *ptr__; \
421 +- preempt_disable(); \
422 ++ preempt_disable_notrace(); \
423 + ptr__ = raw_cpu_ptr(&(pcp)); \
424 + ret__ = cmpxchg(ptr__, oval, nval); \
425 +- preempt_enable(); \
426 ++ preempt_enable_notrace(); \
427 + ret__; \
428 + })
429 +
430 +@@ -152,10 +152,10 @@
431 + ({ \
432 + typeof(pcp) *ptr__; \
433 + typeof(pcp) ret__; \
434 +- preempt_disable(); \
435 ++ preempt_disable_notrace(); \
436 + ptr__ = raw_cpu_ptr(&(pcp)); \
437 + ret__ = xchg(ptr__, nval); \
438 +- preempt_enable(); \
439 ++ preempt_enable_notrace(); \
440 + ret__; \
441 + })
442 +
443 +@@ -171,11 +171,11 @@
444 + typeof(pcp1) *p1__; \
445 + typeof(pcp2) *p2__; \
446 + int ret__; \
447 +- preempt_disable(); \
448 ++ preempt_disable_notrace(); \
449 + p1__ = raw_cpu_ptr(&(pcp1)); \
450 + p2__ = raw_cpu_ptr(&(pcp2)); \
451 + ret__ = __cmpxchg_double(p1__, p2__, o1__, o2__, n1__, n2__); \
452 +- preempt_enable(); \
453 ++ preempt_enable_notrace(); \
454 + ret__; \
455 + })
456 +
457 +diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
458 +index f09288431f289..606c4e25ee934 100644
459 +--- a/arch/x86/entry/common.c
460 ++++ b/arch/x86/entry/common.c
461 +@@ -55,8 +55,16 @@ static noinstr void check_user_regs(struct pt_regs *regs)
462 + * state, not the interrupt state as imagined by Xen.
463 + */
464 + unsigned long flags = native_save_fl();
465 +- WARN_ON_ONCE(flags & (X86_EFLAGS_AC | X86_EFLAGS_DF |
466 +- X86_EFLAGS_NT));
467 ++ unsigned long mask = X86_EFLAGS_DF | X86_EFLAGS_NT;
468 ++
469 ++ /*
470 ++ * For !SMAP hardware we patch out CLAC on entry.
471 ++ */
472 ++ if (boot_cpu_has(X86_FEATURE_SMAP) ||
473 ++ (IS_ENABLED(CONFIG_64_BIT) && boot_cpu_has(X86_FEATURE_XENPV)))
474 ++ mask |= X86_EFLAGS_AC;
475 ++
476 ++ WARN_ON_ONCE(flags & mask);
477 +
478 + /* We think we came from user mode. Make sure pt_regs agrees. */
479 + WARN_ON_ONCE(!user_mode(regs));
480 +diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
481 +index 255b2dde2c1b7..9675d8b2c6666 100644
482 +--- a/arch/x86/include/asm/ptrace.h
483 ++++ b/arch/x86/include/asm/ptrace.h
484 +@@ -322,8 +322,8 @@ static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs,
485 + static const unsigned int argument_offs[] = {
486 + #ifdef __i386__
487 + offsetof(struct pt_regs, ax),
488 +- offsetof(struct pt_regs, cx),
489 + offsetof(struct pt_regs, dx),
490 ++ offsetof(struct pt_regs, cx),
491 + #define NR_REG_ARGUMENTS 3
492 + #else
493 + offsetof(struct pt_regs, di),
494 +diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
495 +index 9f69cc497f4b6..0e059b73437b4 100644
496 +--- a/arch/x86/include/asm/switch_to.h
497 ++++ b/arch/x86/include/asm/switch_to.h
498 +@@ -12,6 +12,27 @@ struct task_struct *__switch_to_asm(struct task_struct *prev,
499 + __visible struct task_struct *__switch_to(struct task_struct *prev,
500 + struct task_struct *next);
501 +
502 ++/* This runs runs on the previous thread's stack. */
503 ++static inline void prepare_switch_to(struct task_struct *next)
504 ++{
505 ++#ifdef CONFIG_VMAP_STACK
506 ++ /*
507 ++ * If we switch to a stack that has a top-level paging entry
508 ++ * that is not present in the current mm, the resulting #PF will
509 ++ * will be promoted to a double-fault and we'll panic. Probe
510 ++ * the new stack now so that vmalloc_fault can fix up the page
511 ++ * tables if needed. This can only happen if we use a stack
512 ++ * in vmap space.
513 ++ *
514 ++ * We assume that the stack is aligned so that it never spans
515 ++ * more than one top-level paging entry.
516 ++ *
517 ++ * To minimize cache pollution, just follow the stack pointer.
518 ++ */
519 ++ READ_ONCE(*(unsigned char *)next->thread.sp);
520 ++#endif
521 ++}
522 ++
523 + asmlinkage void ret_from_fork(void);
524 +
525 + /*
526 +@@ -46,6 +67,8 @@ struct fork_frame {
527 +
528 + #define switch_to(prev, next, last) \
529 + do { \
530 ++ prepare_switch_to(next); \
531 ++ \
532 + ((last) = __switch_to_asm((prev), (next))); \
533 + } while (0)
534 +
535 +diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
536 +index fd945ce78554e..e6d7894ad1279 100644
537 +--- a/arch/x86/kernel/setup_percpu.c
538 ++++ b/arch/x86/kernel/setup_percpu.c
539 +@@ -287,9 +287,9 @@ void __init setup_per_cpu_areas(void)
540 + /*
541 + * Sync back kernel address range again. We already did this in
542 + * setup_arch(), but percpu data also needs to be available in
543 +- * the smpboot asm and arch_sync_kernel_mappings() doesn't sync to
544 +- * swapper_pg_dir on 32-bit. The per-cpu mappings need to be available
545 +- * there too.
546 ++ * the smpboot asm. We can't reliably pick up percpu mappings
547 ++ * using vmalloc_fault(), because exception dispatch needs
548 ++ * percpu data.
549 + *
550 + * FIXME: Can the later sync in setup_cpu_entry_areas() replace
551 + * this call?
552 +diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
553 +index b7cb3e0716f7d..69cc823109740 100644
554 +--- a/arch/x86/kernel/traps.c
555 ++++ b/arch/x86/kernel/traps.c
556 +@@ -733,20 +733,9 @@ static bool is_sysenter_singlestep(struct pt_regs *regs)
557 + #endif
558 + }
559 +
560 +-static __always_inline void debug_enter(unsigned long *dr6, unsigned long *dr7)
561 ++static __always_inline unsigned long debug_read_clear_dr6(void)
562 + {
563 +- /*
564 +- * Disable breakpoints during exception handling; recursive exceptions
565 +- * are exceedingly 'fun'.
566 +- *
567 +- * Since this function is NOKPROBE, and that also applies to
568 +- * HW_BREAKPOINT_X, we can't hit a breakpoint before this (XXX except a
569 +- * HW_BREAKPOINT_W on our stack)
570 +- *
571 +- * Entry text is excluded for HW_BP_X and cpu_entry_area, which
572 +- * includes the entry stack is excluded for everything.
573 +- */
574 +- *dr7 = local_db_save();
575 ++ unsigned long dr6;
576 +
577 + /*
578 + * The Intel SDM says:
579 +@@ -759,15 +748,12 @@ static __always_inline void debug_enter(unsigned long *dr6, unsigned long *dr7)
580 + *
581 + * Keep it simple: clear DR6 immediately.
582 + */
583 +- get_debugreg(*dr6, 6);
584 ++ get_debugreg(dr6, 6);
585 + set_debugreg(0, 6);
586 + /* Filter out all the reserved bits which are preset to 1 */
587 +- *dr6 &= ~DR6_RESERVED;
588 +-}
589 ++ dr6 &= ~DR6_RESERVED;
590 +
591 +-static __always_inline void debug_exit(unsigned long dr7)
592 +-{
593 +- local_db_restore(dr7);
594 ++ return dr6;
595 + }
596 +
597 + /*
598 +@@ -867,6 +853,19 @@ out:
599 + static __always_inline void exc_debug_kernel(struct pt_regs *regs,
600 + unsigned long dr6)
601 + {
602 ++ /*
603 ++ * Disable breakpoints during exception handling; recursive exceptions
604 ++ * are exceedingly 'fun'.
605 ++ *
606 ++ * Since this function is NOKPROBE, and that also applies to
607 ++ * HW_BREAKPOINT_X, we can't hit a breakpoint before this (XXX except a
608 ++ * HW_BREAKPOINT_W on our stack)
609 ++ *
610 ++ * Entry text is excluded for HW_BP_X and cpu_entry_area, which
611 ++ * includes the entry stack is excluded for everything.
612 ++ */
613 ++ unsigned long dr7 = local_db_save();
614 ++
615 + nmi_enter();
616 + instrumentation_begin();
617 + trace_hardirqs_off_finish();
618 +@@ -890,6 +889,8 @@ static __always_inline void exc_debug_kernel(struct pt_regs *regs,
619 + trace_hardirqs_on_prepare();
620 + instrumentation_end();
621 + nmi_exit();
622 ++
623 ++ local_db_restore(dr7);
624 + }
625 +
626 + static __always_inline void exc_debug_user(struct pt_regs *regs,
627 +@@ -901,6 +902,15 @@ static __always_inline void exc_debug_user(struct pt_regs *regs,
628 + */
629 + WARN_ON_ONCE(!user_mode(regs));
630 +
631 ++ /*
632 ++ * NB: We can't easily clear DR7 here because
633 ++ * idtentry_exit_to_usermode() can invoke ptrace, schedule, access
634 ++ * user memory, etc. This means that a recursive #DB is possible. If
635 ++ * this happens, that #DB will hit exc_debug_kernel() and clear DR7.
636 ++ * Since we're not on the IST stack right now, everything will be
637 ++ * fine.
638 ++ */
639 ++
640 + idtentry_enter_user(regs);
641 + instrumentation_begin();
642 +
643 +@@ -913,36 +923,24 @@ static __always_inline void exc_debug_user(struct pt_regs *regs,
644 + /* IST stack entry */
645 + DEFINE_IDTENTRY_DEBUG(exc_debug)
646 + {
647 +- unsigned long dr6, dr7;
648 +-
649 +- debug_enter(&dr6, &dr7);
650 +- exc_debug_kernel(regs, dr6);
651 +- debug_exit(dr7);
652 ++ exc_debug_kernel(regs, debug_read_clear_dr6());
653 + }
654 +
655 + /* User entry, runs on regular task stack */
656 + DEFINE_IDTENTRY_DEBUG_USER(exc_debug)
657 + {
658 +- unsigned long dr6, dr7;
659 +-
660 +- debug_enter(&dr6, &dr7);
661 +- exc_debug_user(regs, dr6);
662 +- debug_exit(dr7);
663 ++ exc_debug_user(regs, debug_read_clear_dr6());
664 + }
665 + #else
666 + /* 32 bit does not have separate entry points. */
667 + DEFINE_IDTENTRY_RAW(exc_debug)
668 + {
669 +- unsigned long dr6, dr7;
670 +-
671 +- debug_enter(&dr6, &dr7);
672 ++ unsigned long dr6 = debug_read_clear_dr6();
673 +
674 + if (user_mode(regs))
675 + exc_debug_user(regs, dr6);
676 + else
677 + exc_debug_kernel(regs, dr6);
678 +-
679 +- debug_exit(dr7);
680 + }
681 + #endif
682 +
683 +diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
684 +index 1ead568c01012..370c314b8f44d 100644
685 +--- a/arch/x86/mm/fault.c
686 ++++ b/arch/x86/mm/fault.c
687 +@@ -215,6 +215,44 @@ void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
688 + }
689 + }
690 +
691 ++/*
692 ++ * 32-bit:
693 ++ *
694 ++ * Handle a fault on the vmalloc or module mapping area
695 ++ */
696 ++static noinline int vmalloc_fault(unsigned long address)
697 ++{
698 ++ unsigned long pgd_paddr;
699 ++ pmd_t *pmd_k;
700 ++ pte_t *pte_k;
701 ++
702 ++ /* Make sure we are in vmalloc area: */
703 ++ if (!(address >= VMALLOC_START && address < VMALLOC_END))
704 ++ return -1;
705 ++
706 ++ /*
707 ++ * Synchronize this task's top level page-table
708 ++ * with the 'reference' page table.
709 ++ *
710 ++ * Do _not_ use "current" here. We might be inside
711 ++ * an interrupt in the middle of a task switch..
712 ++ */
713 ++ pgd_paddr = read_cr3_pa();
714 ++ pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
715 ++ if (!pmd_k)
716 ++ return -1;
717 ++
718 ++ if (pmd_large(*pmd_k))
719 ++ return 0;
720 ++
721 ++ pte_k = pte_offset_kernel(pmd_k, address);
722 ++ if (!pte_present(*pte_k))
723 ++ return -1;
724 ++
725 ++ return 0;
726 ++}
727 ++NOKPROBE_SYMBOL(vmalloc_fault);
728 ++
729 + /*
730 + * Did it hit the DOS screen memory VA from vm86 mode?
731 + */
732 +@@ -279,6 +317,79 @@ out:
733 +
734 + #else /* CONFIG_X86_64: */
735 +
736 ++/*
737 ++ * 64-bit:
738 ++ *
739 ++ * Handle a fault on the vmalloc area
740 ++ */
741 ++static noinline int vmalloc_fault(unsigned long address)
742 ++{
743 ++ pgd_t *pgd, *pgd_k;
744 ++ p4d_t *p4d, *p4d_k;
745 ++ pud_t *pud;
746 ++ pmd_t *pmd;
747 ++ pte_t *pte;
748 ++
749 ++ /* Make sure we are in vmalloc area: */
750 ++ if (!(address >= VMALLOC_START && address < VMALLOC_END))
751 ++ return -1;
752 ++
753 ++ /*
754 ++ * Copy kernel mappings over when needed. This can also
755 ++ * happen within a race in page table update. In the later
756 ++ * case just flush:
757 ++ */
758 ++ pgd = (pgd_t *)__va(read_cr3_pa()) + pgd_index(address);
759 ++ pgd_k = pgd_offset_k(address);
760 ++ if (pgd_none(*pgd_k))
761 ++ return -1;
762 ++
763 ++ if (pgtable_l5_enabled()) {
764 ++ if (pgd_none(*pgd)) {
765 ++ set_pgd(pgd, *pgd_k);
766 ++ arch_flush_lazy_mmu_mode();
767 ++ } else {
768 ++ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_k));
769 ++ }
770 ++ }
771 ++
772 ++ /* With 4-level paging, copying happens on the p4d level. */
773 ++ p4d = p4d_offset(pgd, address);
774 ++ p4d_k = p4d_offset(pgd_k, address);
775 ++ if (p4d_none(*p4d_k))
776 ++ return -1;
777 ++
778 ++ if (p4d_none(*p4d) && !pgtable_l5_enabled()) {
779 ++ set_p4d(p4d, *p4d_k);
780 ++ arch_flush_lazy_mmu_mode();
781 ++ } else {
782 ++ BUG_ON(p4d_pfn(*p4d) != p4d_pfn(*p4d_k));
783 ++ }
784 ++
785 ++ BUILD_BUG_ON(CONFIG_PGTABLE_LEVELS < 4);
786 ++
787 ++ pud = pud_offset(p4d, address);
788 ++ if (pud_none(*pud))
789 ++ return -1;
790 ++
791 ++ if (pud_large(*pud))
792 ++ return 0;
793 ++
794 ++ pmd = pmd_offset(pud, address);
795 ++ if (pmd_none(*pmd))
796 ++ return -1;
797 ++
798 ++ if (pmd_large(*pmd))
799 ++ return 0;
800 ++
801 ++ pte = pte_offset_kernel(pmd, address);
802 ++ if (!pte_present(*pte))
803 ++ return -1;
804 ++
805 ++ return 0;
806 ++}
807 ++NOKPROBE_SYMBOL(vmalloc_fault);
808 ++
809 + #ifdef CONFIG_CPU_SUP_AMD
810 + static const char errata93_warning[] =
811 + KERN_ERR
812 +@@ -1111,6 +1222,29 @@ do_kern_addr_fault(struct pt_regs *regs, unsigned long hw_error_code,
813 + */
814 + WARN_ON_ONCE(hw_error_code & X86_PF_PK);
815 +
816 ++ /*
817 ++ * We can fault-in kernel-space virtual memory on-demand. The
818 ++ * 'reference' page table is init_mm.pgd.
819 ++ *
820 ++ * NOTE! We MUST NOT take any locks for this case. We may
821 ++ * be in an interrupt or a critical region, and should
822 ++ * only copy the information from the master page table,
823 ++ * nothing more.
824 ++ *
825 ++ * Before doing this on-demand faulting, ensure that the
826 ++ * fault is not any of the following:
827 ++ * 1. A fault on a PTE with a reserved bit set.
828 ++ * 2. A fault caused by a user-mode access. (Do not demand-
829 ++ * fault kernel memory due to user-mode accesses).
830 ++ * 3. A fault caused by a page-level protection violation.
831 ++ * (A demand fault would be on a non-present page which
832 ++ * would have X86_PF_PROT==0).
833 ++ */
834 ++ if (!(hw_error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) {
835 ++ if (vmalloc_fault(address) >= 0)
836 ++ return;
837 ++ }
838 ++
839 + /* Was the fault spurious, caused by lazy TLB invalidation? */
840 + if (spurious_kernel_fault(hw_error_code, address))
841 + return;
842 +diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
843 +index c5174b4e318b4..683cd12f47938 100644
844 +--- a/arch/x86/mm/numa_emulation.c
845 ++++ b/arch/x86/mm/numa_emulation.c
846 +@@ -321,7 +321,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
847 + u64 addr, u64 max_addr, u64 size)
848 + {
849 + return split_nodes_size_interleave_uniform(ei, pi, addr, max_addr, size,
850 +- 0, NULL, NUMA_NO_NODE);
851 ++ 0, NULL, 0);
852 + }
853 +
854 + static int __init setup_emu2phys_nid(int *dfl_phys_nid)
855 +diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
856 +index a8a924b3c3358..0b0d1cdce2e73 100644
857 +--- a/arch/x86/mm/pti.c
858 ++++ b/arch/x86/mm/pti.c
859 +@@ -447,7 +447,13 @@ static void __init pti_clone_user_shared(void)
860 + * the sp1 and sp2 slots.
861 + *
862 + * This is done for all possible CPUs during boot to ensure
863 +- * that it's propagated to all mms.
864 ++ * that it's propagated to all mms. If we were to add one of
865 ++ * these mappings during CPU hotplug, we would need to take
866 ++ * some measure to make sure that every mm that subsequently
867 ++ * ran on that CPU would have the relevant PGD entry in its
868 ++ * pagetables. The usual vmalloc_fault() mechanism would not
869 ++ * work for page faults taken in entry_SYSCALL_64 before RSP
870 ++ * is set up.
871 + */
872 +
873 + unsigned long va = (unsigned long)&per_cpu(cpu_tss_rw, cpu);
874 +diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
875 +index 1a3569b43aa5b..cf81902e6992f 100644
876 +--- a/arch/x86/mm/tlb.c
877 ++++ b/arch/x86/mm/tlb.c
878 +@@ -317,6 +317,34 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
879 + local_irq_restore(flags);
880 + }
881 +
882 ++static void sync_current_stack_to_mm(struct mm_struct *mm)
883 ++{
884 ++ unsigned long sp = current_stack_pointer;
885 ++ pgd_t *pgd = pgd_offset(mm, sp);
886 ++
887 ++ if (pgtable_l5_enabled()) {
888 ++ if (unlikely(pgd_none(*pgd))) {
889 ++ pgd_t *pgd_ref = pgd_offset_k(sp);
890 ++
891 ++ set_pgd(pgd, *pgd_ref);
892 ++ }
893 ++ } else {
894 ++ /*
895 ++ * "pgd" is faked. The top level entries are "p4d"s, so sync
896 ++ * the p4d. This compiles to approximately the same code as
897 ++ * the 5-level case.
898 ++ */
899 ++ p4d_t *p4d = p4d_offset(pgd, sp);
900 ++
901 ++ if (unlikely(p4d_none(*p4d))) {
902 ++ pgd_t *pgd_ref = pgd_offset_k(sp);
903 ++ p4d_t *p4d_ref = p4d_offset(pgd_ref, sp);
904 ++
905 ++ set_p4d(p4d, *p4d_ref);
906 ++ }
907 ++ }
908 ++}
909 ++
910 + static inline unsigned long mm_mangle_tif_spec_ib(struct task_struct *next)
911 + {
912 + unsigned long next_tif = task_thread_info(next)->flags;
913 +@@ -525,6 +553,15 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
914 + */
915 + cond_ibpb(tsk);
916 +
917 ++ if (IS_ENABLED(CONFIG_VMAP_STACK)) {
918 ++ /*
919 ++ * If our current stack is in vmalloc space and isn't
920 ++ * mapped in the new pgd, we'll double-fault. Forcibly
921 ++ * map it.
922 ++ */
923 ++ sync_current_stack_to_mm(next);
924 ++ }
925 ++
926 + /*
927 + * Stop remote flushes for the previous mm.
928 + * Skip kernel threads; we never send init_mm TLB flushing IPIs,
929 +diff --git a/block/blk-core.c b/block/blk-core.c
930 +index 03252af8c82c8..619a3dcd3f5e7 100644
931 +--- a/block/blk-core.c
932 ++++ b/block/blk-core.c
933 +@@ -526,6 +526,7 @@ struct request_queue *__blk_alloc_queue(int node_id)
934 + goto fail_stats;
935 +
936 + q->backing_dev_info->ra_pages = VM_READAHEAD_PAGES;
937 ++ q->backing_dev_info->io_pages = VM_READAHEAD_PAGES;
938 + q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
939 + q->node = node_id;
940 +
941 +diff --git a/block/blk-iocost.c b/block/blk-iocost.c
942 +index 86ba6fd254e1d..27c05e3caf75a 100644
943 +--- a/block/blk-iocost.c
944 ++++ b/block/blk-iocost.c
945 +@@ -2094,14 +2094,15 @@ static void ioc_pd_free(struct blkg_policy_data *pd)
946 + {
947 + struct ioc_gq *iocg = pd_to_iocg(pd);
948 + struct ioc *ioc = iocg->ioc;
949 ++ unsigned long flags;
950 +
951 + if (ioc) {
952 +- spin_lock(&ioc->lock);
953 ++ spin_lock_irqsave(&ioc->lock, flags);
954 + if (!list_empty(&iocg->active_list)) {
955 + propagate_active_weight(iocg, 0, 0);
956 + list_del_init(&iocg->active_list);
957 + }
958 +- spin_unlock(&ioc->lock);
959 ++ spin_unlock_irqrestore(&ioc->lock, flags);
960 +
961 + hrtimer_cancel(&iocg->waitq_timer);
962 + hrtimer_cancel(&iocg->delay_timer);
963 +diff --git a/block/blk-stat.c b/block/blk-stat.c
964 +index 7da302ff88d0d..ae3dd1fb8e61d 100644
965 +--- a/block/blk-stat.c
966 ++++ b/block/blk-stat.c
967 +@@ -137,6 +137,7 @@ void blk_stat_add_callback(struct request_queue *q,
968 + struct blk_stat_callback *cb)
969 + {
970 + unsigned int bucket;
971 ++ unsigned long flags;
972 + int cpu;
973 +
974 + for_each_possible_cpu(cpu) {
975 +@@ -147,20 +148,22 @@ void blk_stat_add_callback(struct request_queue *q,
976 + blk_rq_stat_init(&cpu_stat[bucket]);
977 + }
978 +
979 +- spin_lock(&q->stats->lock);
980 ++ spin_lock_irqsave(&q->stats->lock, flags);
981 + list_add_tail_rcu(&cb->list, &q->stats->callbacks);
982 + blk_queue_flag_set(QUEUE_FLAG_STATS, q);
983 +- spin_unlock(&q->stats->lock);
984 ++ spin_unlock_irqrestore(&q->stats->lock, flags);
985 + }
986 +
987 + void blk_stat_remove_callback(struct request_queue *q,
988 + struct blk_stat_callback *cb)
989 + {
990 +- spin_lock(&q->stats->lock);
991 ++ unsigned long flags;
992 ++
993 ++ spin_lock_irqsave(&q->stats->lock, flags);
994 + list_del_rcu(&cb->list);
995 + if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting)
996 + blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
997 +- spin_unlock(&q->stats->lock);
998 ++ spin_unlock_irqrestore(&q->stats->lock, flags);
999 +
1000 + del_timer_sync(&cb->timer);
1001 + }
1002 +@@ -183,10 +186,12 @@ void blk_stat_free_callback(struct blk_stat_callback *cb)
1003 +
1004 + void blk_stat_enable_accounting(struct request_queue *q)
1005 + {
1006 +- spin_lock(&q->stats->lock);
1007 ++ unsigned long flags;
1008 ++
1009 ++ spin_lock_irqsave(&q->stats->lock, flags);
1010 + q->stats->enable_accounting = true;
1011 + blk_queue_flag_set(QUEUE_FLAG_STATS, q);
1012 +- spin_unlock(&q->stats->lock);
1013 ++ spin_unlock_irqrestore(&q->stats->lock, flags);
1014 + }
1015 + EXPORT_SYMBOL_GPL(blk_stat_enable_accounting);
1016 +
1017 +diff --git a/block/partitions/core.c b/block/partitions/core.c
1018 +index 78951e33b2d7c..534e11285a8d4 100644
1019 +--- a/block/partitions/core.c
1020 ++++ b/block/partitions/core.c
1021 +@@ -524,19 +524,20 @@ int bdev_add_partition(struct block_device *bdev, int partno,
1022 + int bdev_del_partition(struct block_device *bdev, int partno)
1023 + {
1024 + struct block_device *bdevp;
1025 +- struct hd_struct *part;
1026 +- int ret = 0;
1027 +-
1028 +- part = disk_get_part(bdev->bd_disk, partno);
1029 +- if (!part)
1030 +- return -ENXIO;
1031 ++ struct hd_struct *part = NULL;
1032 ++ int ret;
1033 +
1034 +- ret = -ENOMEM;
1035 +- bdevp = bdget(part_devt(part));
1036 ++ bdevp = bdget_disk(bdev->bd_disk, partno);
1037 + if (!bdevp)
1038 +- goto out_put_part;
1039 ++ return -ENOMEM;
1040 +
1041 + mutex_lock(&bdevp->bd_mutex);
1042 ++ mutex_lock_nested(&bdev->bd_mutex, 1);
1043 ++
1044 ++ ret = -ENXIO;
1045 ++ part = disk_get_part(bdev->bd_disk, partno);
1046 ++ if (!part)
1047 ++ goto out_unlock;
1048 +
1049 + ret = -EBUSY;
1050 + if (bdevp->bd_openers)
1051 +@@ -545,16 +546,14 @@ int bdev_del_partition(struct block_device *bdev, int partno)
1052 + sync_blockdev(bdevp);
1053 + invalidate_bdev(bdevp);
1054 +
1055 +- mutex_lock_nested(&bdev->bd_mutex, 1);
1056 + delete_partition(bdev->bd_disk, part);
1057 +- mutex_unlock(&bdev->bd_mutex);
1058 +-
1059 + ret = 0;
1060 + out_unlock:
1061 ++ mutex_unlock(&bdev->bd_mutex);
1062 + mutex_unlock(&bdevp->bd_mutex);
1063 + bdput(bdevp);
1064 +-out_put_part:
1065 +- disk_put_part(part);
1066 ++ if (part)
1067 ++ disk_put_part(part);
1068 + return ret;
1069 + }
1070 +
1071 +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
1072 +index b1cd4d97bc2a7..1be73d29119ab 100644
1073 +--- a/drivers/ata/libata-core.c
1074 ++++ b/drivers/ata/libata-core.c
1075 +@@ -3868,9 +3868,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
1076 + /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
1077 + { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
1078 +
1079 +- /* Some Sandisk SSDs lock up hard with NCQ enabled. Reported on
1080 +- SD7SN6S256G and SD8SN8U256G */
1081 +- { "SanDisk SD[78]SN*G", NULL, ATA_HORKAGE_NONCQ, },
1082 ++ /* Sandisk SD7/8/9s lock up hard on large trims */
1083 ++ { "SanDisk SD[789]*", NULL, ATA_HORKAGE_MAX_TRIM_128M, },
1084 +
1085 + /* devices which puke on READ_NATIVE_MAX */
1086 + { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
1087 +diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
1088 +index 46336084b1a90..cc7bedafb3923 100644
1089 +--- a/drivers/ata/libata-scsi.c
1090 ++++ b/drivers/ata/libata-scsi.c
1091 +@@ -2080,6 +2080,7 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf)
1092 +
1093 + static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
1094 + {
1095 ++ struct ata_device *dev = args->dev;
1096 + u16 min_io_sectors;
1097 +
1098 + rbuf[1] = 0xb0;
1099 +@@ -2105,7 +2106,12 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
1100 + * with the unmap bit set.
1101 + */
1102 + if (ata_id_has_trim(args->id)) {
1103 +- put_unaligned_be64(65535 * ATA_MAX_TRIM_RNUM, &rbuf[36]);
1104 ++ u64 max_blocks = 65535 * ATA_MAX_TRIM_RNUM;
1105 ++
1106 ++ if (dev->horkage & ATA_HORKAGE_MAX_TRIM_128M)
1107 ++ max_blocks = 128 << (20 - SECTOR_SHIFT);
1108 ++
1109 ++ put_unaligned_be64(max_blocks, &rbuf[36]);
1110 + put_unaligned_be32(1, &rbuf[28]);
1111 + }
1112 +
1113 +diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
1114 +index ce7e9f223b20b..bc9dc1f847e19 100644
1115 +--- a/drivers/block/nbd.c
1116 ++++ b/drivers/block/nbd.c
1117 +@@ -1360,6 +1360,8 @@ static void nbd_set_cmd_timeout(struct nbd_device *nbd, u64 timeout)
1118 + nbd->tag_set.timeout = timeout * HZ;
1119 + if (timeout)
1120 + blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
1121 ++ else
1122 ++ blk_queue_rq_timeout(nbd->disk->queue, 30 * HZ);
1123 + }
1124 +
1125 + /* Must be called with config_lock held */
1126 +diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
1127 +index 87197319ab069..2fe4f3cdf54d7 100644
1128 +--- a/drivers/cpuidle/cpuidle.c
1129 ++++ b/drivers/cpuidle/cpuidle.c
1130 +@@ -153,7 +153,8 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv,
1131 + */
1132 + stop_critical_timings();
1133 + drv->states[index].enter_s2idle(dev, drv, index);
1134 +- WARN_ON(!irqs_disabled());
1135 ++ if (WARN_ON_ONCE(!irqs_disabled()))
1136 ++ local_irq_disable();
1137 + /*
1138 + * timekeeping_resume() that will be called by tick_unfreeze() for the
1139 + * first CPU executing it calls functions containing RCU read-side
1140 +diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
1141 +index 73a20780744bf..626819b33a325 100644
1142 +--- a/drivers/dma/at_hdmac.c
1143 ++++ b/drivers/dma/at_hdmac.c
1144 +@@ -1650,13 +1650,17 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1145 + return NULL;
1146 +
1147 + dmac_pdev = of_find_device_by_node(dma_spec->np);
1148 ++ if (!dmac_pdev)
1149 ++ return NULL;
1150 +
1151 + dma_cap_zero(mask);
1152 + dma_cap_set(DMA_SLAVE, mask);
1153 +
1154 + atslave = kmalloc(sizeof(*atslave), GFP_KERNEL);
1155 +- if (!atslave)
1156 ++ if (!atslave) {
1157 ++ put_device(&dmac_pdev->dev);
1158 + return NULL;
1159 ++ }
1160 +
1161 + atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW;
1162 + /*
1163 +@@ -1685,8 +1689,11 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1164 + atslave->dma_dev = &dmac_pdev->dev;
1165 +
1166 + chan = dma_request_channel(mask, at_dma_filter, atslave);
1167 +- if (!chan)
1168 ++ if (!chan) {
1169 ++ put_device(&dmac_pdev->dev);
1170 ++ kfree(atslave);
1171 + return NULL;
1172 ++ }
1173 +
1174 + atchan = to_at_dma_chan(chan);
1175 + atchan->per_if = dma_spec->args[0] & 0xff;
1176 +diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
1177 +index ed430ad9b3dd8..b971505b87152 100644
1178 +--- a/drivers/dma/dw-edma/dw-edma-core.c
1179 ++++ b/drivers/dma/dw-edma/dw-edma-core.c
1180 +@@ -405,7 +405,7 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
1181 + if (xfer->cyclic) {
1182 + burst->dar = xfer->xfer.cyclic.paddr;
1183 + } else {
1184 +- burst->dar = sg_dma_address(sg);
1185 ++ burst->dar = dst_addr;
1186 + /* Unlike the typical assumption by other
1187 + * drivers/IPs the peripheral memory isn't
1188 + * a FIFO memory, in this case, it's a
1189 +@@ -413,14 +413,13 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
1190 + * and destination addresses are increased
1191 + * by the same portion (data length)
1192 + */
1193 +- src_addr += sg_dma_len(sg);
1194 + }
1195 + } else {
1196 + burst->dar = dst_addr;
1197 + if (xfer->cyclic) {
1198 + burst->sar = xfer->xfer.cyclic.paddr;
1199 + } else {
1200 +- burst->sar = sg_dma_address(sg);
1201 ++ burst->sar = src_addr;
1202 + /* Unlike the typical assumption by other
1203 + * drivers/IPs the peripheral memory isn't
1204 + * a FIFO memory, in this case, it's a
1205 +@@ -428,12 +427,14 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
1206 + * and destination addresses are increased
1207 + * by the same portion (data length)
1208 + */
1209 +- dst_addr += sg_dma_len(sg);
1210 + }
1211 + }
1212 +
1213 +- if (!xfer->cyclic)
1214 ++ if (!xfer->cyclic) {
1215 ++ src_addr += sg_dma_len(sg);
1216 ++ dst_addr += sg_dma_len(sg);
1217 + sg = sg_next(sg);
1218 ++ }
1219 + }
1220 +
1221 + return vchan_tx_prep(&chan->vc, &desc->vd, xfer->flags);
1222 +diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
1223 +index 56f18ae992332..308bed0a560ac 100644
1224 +--- a/drivers/dma/fsldma.h
1225 ++++ b/drivers/dma/fsldma.h
1226 +@@ -205,10 +205,10 @@ struct fsldma_chan {
1227 + #else
1228 + static u64 fsl_ioread64(const u64 __iomem *addr)
1229 + {
1230 +- u32 fsl_addr = lower_32_bits(addr);
1231 +- u64 fsl_addr_hi = (u64)in_le32((u32 *)(fsl_addr + 1)) << 32;
1232 ++ u32 val_lo = in_le32((u32 __iomem *)addr);
1233 ++ u32 val_hi = in_le32((u32 __iomem *)addr + 1);
1234 +
1235 +- return fsl_addr_hi | in_le32((u32 *)fsl_addr);
1236 ++ return ((u64)val_hi << 32) + val_lo;
1237 + }
1238 +
1239 + static void fsl_iowrite64(u64 val, u64 __iomem *addr)
1240 +@@ -219,10 +219,10 @@ static void fsl_iowrite64(u64 val, u64 __iomem *addr)
1241 +
1242 + static u64 fsl_ioread64be(const u64 __iomem *addr)
1243 + {
1244 +- u32 fsl_addr = lower_32_bits(addr);
1245 +- u64 fsl_addr_hi = (u64)in_be32((u32 *)fsl_addr) << 32;
1246 ++ u32 val_hi = in_be32((u32 __iomem *)addr);
1247 ++ u32 val_lo = in_be32((u32 __iomem *)addr + 1);
1248 +
1249 +- return fsl_addr_hi | in_be32((u32 *)(fsl_addr + 1));
1250 ++ return ((u64)val_hi << 32) + val_lo;
1251 + }
1252 +
1253 + static void fsl_iowrite64be(u64 val, u64 __iomem *addr)
1254 +diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
1255 +index b2c2b5e8093cf..0db816eb8080d 100644
1256 +--- a/drivers/dma/of-dma.c
1257 ++++ b/drivers/dma/of-dma.c
1258 +@@ -71,12 +71,12 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec,
1259 + return NULL;
1260 +
1261 + chan = ofdma_target->of_dma_xlate(&dma_spec_target, ofdma_target);
1262 +- if (chan) {
1263 +- chan->router = ofdma->dma_router;
1264 +- chan->route_data = route_data;
1265 +- } else {
1266 ++ if (IS_ERR_OR_NULL(chan)) {
1267 + ofdma->dma_router->route_free(ofdma->dma_router->dev,
1268 + route_data);
1269 ++ } else {
1270 ++ chan->router = ofdma->dma_router;
1271 ++ chan->route_data = route_data;
1272 + }
1273 +
1274 + /*
1275 +diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
1276 +index 88b884cbb7c1b..9d8a235a5b884 100644
1277 +--- a/drivers/dma/pl330.c
1278 ++++ b/drivers/dma/pl330.c
1279 +@@ -2788,6 +2788,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
1280 + while (burst != (1 << desc->rqcfg.brst_size))
1281 + desc->rqcfg.brst_size++;
1282 +
1283 ++ desc->rqcfg.brst_len = get_burst_len(desc, len);
1284 + /*
1285 + * If burst size is smaller than bus width then make sure we only
1286 + * transfer one at a time to avoid a burst stradling an MFIFO entry.
1287 +@@ -2795,7 +2796,6 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
1288 + if (desc->rqcfg.brst_size * 8 < pl330->pcfg.data_bus_width)
1289 + desc->rqcfg.brst_len = 1;
1290 +
1291 +- desc->rqcfg.brst_len = get_burst_len(desc, len);
1292 + desc->bytes_requested = len;
1293 +
1294 + desc->txd.flags = flags;
1295 +diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
1296 +index 6c879a7343604..3e488d963f246 100644
1297 +--- a/drivers/dma/ti/k3-udma.c
1298 ++++ b/drivers/dma/ti/k3-udma.c
1299 +@@ -2109,9 +2109,9 @@ udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
1300 + return NULL;
1301 + }
1302 +
1303 +- cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false,
1304 +- CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
1305 +- cppi5_tr_csf_set(&tr_req[i].flags, CPPI5_TR_CSF_SUPR_EVT);
1306 ++ cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
1307 ++ false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
1308 ++ cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT);
1309 +
1310 + tr_req[tr_idx].addr = sg_addr;
1311 + tr_req[tr_idx].icnt0 = tr0_cnt0;
1312 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1313 +index 666ebe04837af..3f7eced92c0c8 100644
1314 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1315 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1316 +@@ -2822,12 +2822,18 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
1317 + &dm_atomic_state_funcs);
1318 +
1319 + r = amdgpu_display_modeset_create_props(adev);
1320 +- if (r)
1321 ++ if (r) {
1322 ++ dc_release_state(state->context);
1323 ++ kfree(state);
1324 + return r;
1325 ++ }
1326 +
1327 + r = amdgpu_dm_audio_init(adev);
1328 +- if (r)
1329 ++ if (r) {
1330 ++ dc_release_state(state->context);
1331 ++ kfree(state);
1332 + return r;
1333 ++ }
1334 +
1335 + return 0;
1336 + }
1337 +@@ -2844,6 +2850,8 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
1338 + #if defined(CONFIG_ACPI)
1339 + struct amdgpu_dm_backlight_caps caps;
1340 +
1341 ++ memset(&caps, 0, sizeof(caps));
1342 ++
1343 + if (dm->backlight_caps.caps_valid)
1344 + return;
1345 +
1346 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
1347 +index e5ecc5affa1eb..5098fc98cc255 100644
1348 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
1349 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
1350 +@@ -67,7 +67,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
1351 + result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload,
1352 + &operation_result);
1353 +
1354 +- if (payload.write)
1355 ++ if (payload.write && result >= 0)
1356 + result = msg->size;
1357 +
1358 + if (result < 0)
1359 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
1360 +index 31aa31c280ee6..885beb0bcc199 100644
1361 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
1362 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
1363 +@@ -767,6 +767,7 @@ static bool detect_dp(struct dc_link *link,
1364 + sink_caps->signal = dp_passive_dongle_detection(link->ddc,
1365 + sink_caps,
1366 + audio_support);
1367 ++ link->dpcd_caps.dongle_type = sink_caps->dongle_type;
1368 + }
1369 +
1370 + return true;
1371 +@@ -3265,10 +3266,10 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
1372 + core_link_set_avmute(pipe_ctx, true);
1373 + }
1374 +
1375 +- dc->hwss.blank_stream(pipe_ctx);
1376 + #if defined(CONFIG_DRM_AMD_DC_HDCP)
1377 + update_psp_stream_config(pipe_ctx, true);
1378 + #endif
1379 ++ dc->hwss.blank_stream(pipe_ctx);
1380 +
1381 + if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
1382 + deallocate_mst_payload(pipe_ctx);
1383 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
1384 +index 91cd884d6f257..7728fd71d1f3a 100644
1385 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
1386 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
1387 +@@ -4346,9 +4346,9 @@ bool dc_link_get_backlight_level_nits(struct dc_link *link,
1388 + link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
1389 + return false;
1390 +
1391 +- if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CURRENT_PEAK,
1392 ++ if (core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CURRENT_PEAK,
1393 + dpcd_backlight_get.raw,
1394 +- sizeof(union dpcd_source_backlight_get)))
1395 ++ sizeof(union dpcd_source_backlight_get)) != DC_OK)
1396 + return false;
1397 +
1398 + *backlight_millinits_avg =
1399 +@@ -4387,9 +4387,9 @@ bool dc_link_read_default_bl_aux(struct dc_link *link, uint32_t *backlight_milli
1400 + link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
1401 + return false;
1402 +
1403 +- if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
1404 ++ if (core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
1405 + (uint8_t *) backlight_millinits,
1406 +- sizeof(uint32_t)))
1407 ++ sizeof(uint32_t)) != DC_OK)
1408 + return false;
1409 +
1410 + return true;
1411 +diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
1412 +index 49aad691e687e..ccac2315a903a 100644
1413 +--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
1414 ++++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
1415 +@@ -222,7 +222,7 @@ struct dc_stream_state {
1416 + union stream_update_flags update_flags;
1417 + };
1418 +
1419 +-#define ABM_LEVEL_IMMEDIATE_DISABLE 0xFFFFFFFF
1420 ++#define ABM_LEVEL_IMMEDIATE_DISABLE 255
1421 +
1422 + struct dc_stream_update {
1423 + struct dc_stream_state *stream;
1424 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
1425 +index 17d5cb422025e..8939541ad7afc 100644
1426 +--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
1427 ++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
1428 +@@ -1213,6 +1213,7 @@ static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *cont
1429 + bool video_large = false;
1430 + bool desktop_large = false;
1431 + bool dcc_disabled = false;
1432 ++ bool mpo_enabled = false;
1433 +
1434 + for (i = 0; i < context->stream_count; i++) {
1435 + if (context->stream_status[i].plane_count == 0)
1436 +@@ -1221,6 +1222,9 @@ static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *cont
1437 + if (context->stream_status[i].plane_count > 2)
1438 + return DC_FAIL_UNSUPPORTED_1;
1439 +
1440 ++ if (context->stream_status[i].plane_count > 1)
1441 ++ mpo_enabled = true;
1442 ++
1443 + for (j = 0; j < context->stream_status[i].plane_count; j++) {
1444 + struct dc_plane_state *plane =
1445 + context->stream_status[i].plane_states[j];
1446 +@@ -1244,6 +1248,10 @@ static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *cont
1447 + }
1448 + }
1449 +
1450 ++ /* Disable MPO in multi-display configurations. */
1451 ++ if (context->stream_count > 1 && mpo_enabled)
1452 ++ return DC_FAIL_UNSUPPORTED_1;
1453 ++
1454 + /*
1455 + * Workaround: On DCN10 there is UMC issue that causes underflow when
1456 + * playing 4k video on 4k desktop with video downscaled and single channel
1457 +diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
1458 +index eff87c8968380..0e7ae58180347 100644
1459 +--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
1460 ++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
1461 +@@ -374,8 +374,18 @@ static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
1462 + /* compare them in unit celsius degree */
1463 + if (low < range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)
1464 + low = range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1465 +- if (high > tdp_table->usSoftwareShutdownTemp)
1466 +- high = tdp_table->usSoftwareShutdownTemp;
1467 ++
1468 ++ /*
1469 ++ * As a common sense, usSoftwareShutdownTemp should be bigger
1470 ++ * than ThotspotLimit. For any invalid usSoftwareShutdownTemp,
1471 ++ * we will just use the max possible setting VEGA10_THERMAL_MAXIMUM_ALERT_TEMP
1472 ++ * to avoid false alarms.
1473 ++ */
1474 ++ if ((tdp_table->usSoftwareShutdownTemp >
1475 ++ range->hotspot_crit_max / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)) {
1476 ++ if (high > tdp_table->usSoftwareShutdownTemp)
1477 ++ high = tdp_table->usSoftwareShutdownTemp;
1478 ++ }
1479 +
1480 + if (low > high)
1481 + return -EINVAL;
1482 +diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
1483 +index ad54f4500af1f..63016c14b9428 100644
1484 +--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
1485 ++++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
1486 +@@ -37,6 +37,7 @@
1487 + #include "cgs_common.h"
1488 + #include "atombios.h"
1489 + #include "pppcielanes.h"
1490 ++#include "smu7_smumgr.h"
1491 +
1492 + #include "smu/smu_7_0_1_d.h"
1493 + #include "smu/smu_7_0_1_sh_mask.h"
1494 +@@ -2948,6 +2949,7 @@ const struct pp_smumgr_func ci_smu_funcs = {
1495 + .request_smu_load_specific_fw = NULL,
1496 + .send_msg_to_smc = ci_send_msg_to_smc,
1497 + .send_msg_to_smc_with_parameter = ci_send_msg_to_smc_with_parameter,
1498 ++ .get_argument = smu7_get_argument,
1499 + .download_pptable_settings = NULL,
1500 + .upload_pptable_settings = NULL,
1501 + .get_offsetof = ci_get_offsetof,
1502 +diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
1503 +index 2cbc4619b4ce6..525658fd201fd 100644
1504 +--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
1505 ++++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
1506 +@@ -336,8 +336,10 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
1507 +
1508 + /* Fill up the empty slots in sha_text and write it out */
1509 + sha_empty = sizeof(sha_text) - sha_leftovers;
1510 +- for (j = 0; j < sha_empty; j++)
1511 +- sha_text |= ksv[j] << ((sizeof(sha_text) - j - 1) * 8);
1512 ++ for (j = 0; j < sha_empty; j++) {
1513 ++ u8 off = ((sizeof(sha_text) - j - 1 - sha_leftovers) * 8);
1514 ++ sha_text |= ksv[j] << off;
1515 ++ }
1516 +
1517 + ret = intel_write_sha_text(dev_priv, sha_text);
1518 + if (ret < 0)
1519 +@@ -435,7 +437,7 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
1520 + /* Write 32 bits of text */
1521 + intel_de_write(dev_priv, HDCP_REP_CTL,
1522 + rep_ctl | HDCP_SHA1_TEXT_32);
1523 +- sha_text |= bstatus[0] << 24 | bstatus[1] << 16;
1524 ++ sha_text |= bstatus[0] << 8 | bstatus[1];
1525 + ret = intel_write_sha_text(dev_priv, sha_text);
1526 + if (ret < 0)
1527 + return ret;
1528 +@@ -450,17 +452,29 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
1529 + return ret;
1530 + sha_idx += sizeof(sha_text);
1531 + }
1532 ++
1533 ++ /*
1534 ++ * Terminate the SHA-1 stream by hand. For the other leftover
1535 ++ * cases this is appended by the hardware.
1536 ++ */
1537 ++ intel_de_write(dev_priv, HDCP_REP_CTL,
1538 ++ rep_ctl | HDCP_SHA1_TEXT_32);
1539 ++ sha_text = DRM_HDCP_SHA1_TERMINATOR << 24;
1540 ++ ret = intel_write_sha_text(dev_priv, sha_text);
1541 ++ if (ret < 0)
1542 ++ return ret;
1543 ++ sha_idx += sizeof(sha_text);
1544 + } else if (sha_leftovers == 3) {
1545 +- /* Write 32 bits of text */
1546 ++ /* Write 32 bits of text (filled from LSB) */
1547 + intel_de_write(dev_priv, HDCP_REP_CTL,
1548 + rep_ctl | HDCP_SHA1_TEXT_32);
1549 +- sha_text |= bstatus[0] << 24;
1550 ++ sha_text |= bstatus[0];
1551 + ret = intel_write_sha_text(dev_priv, sha_text);
1552 + if (ret < 0)
1553 + return ret;
1554 + sha_idx += sizeof(sha_text);
1555 +
1556 +- /* Write 8 bits of text, 24 bits of M0 */
1557 ++ /* Write 8 bits of text (filled from LSB), 24 bits of M0 */
1558 + intel_de_write(dev_priv, HDCP_REP_CTL,
1559 + rep_ctl | HDCP_SHA1_TEXT_8);
1560 + ret = intel_write_sha_text(dev_priv, bstatus[1]);
1561 +diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
1562 +index 1d330204c465c..2dd1cf1ffbe25 100644
1563 +--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
1564 ++++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
1565 +@@ -207,6 +207,16 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu)
1566 + {
1567 + int ret;
1568 + u32 val;
1569 ++ u32 mask, reset_val;
1570 ++
1571 ++ val = gmu_read(gmu, REG_A6XX_GMU_CM3_DTCM_START + 0xff8);
1572 ++ if (val <= 0x20010004) {
1573 ++ mask = 0xffffffff;
1574 ++ reset_val = 0xbabeface;
1575 ++ } else {
1576 ++ mask = 0x1ff;
1577 ++ reset_val = 0x100;
1578 ++ }
1579 +
1580 + gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
1581 +
1582 +@@ -218,7 +228,7 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu)
1583 + gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0);
1584 +
1585 + ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val,
1586 +- val == 0xbabeface, 100, 10000);
1587 ++ (val & mask) == reset_val, 100, 10000);
1588 +
1589 + if (ret)
1590 + DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n");
1591 +diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
1592 +index 969d95aa873c4..1026e1e5bec10 100644
1593 +--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
1594 ++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
1595 +@@ -827,7 +827,7 @@ static void dpu_crtc_enable(struct drm_crtc *crtc,
1596 + {
1597 + struct dpu_crtc *dpu_crtc;
1598 + struct drm_encoder *encoder;
1599 +- bool request_bandwidth;
1600 ++ bool request_bandwidth = false;
1601 +
1602 + if (!crtc) {
1603 + DPU_ERROR("invalid crtc\n");
1604 +diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
1605 +index 0946a86b37b28..c0cd936314e66 100644
1606 +--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
1607 ++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
1608 +@@ -586,7 +586,10 @@ static int dpu_encoder_virt_atomic_check(
1609 + dpu_kms = to_dpu_kms(priv->kms);
1610 + mode = &crtc_state->mode;
1611 + adj_mode = &crtc_state->adjusted_mode;
1612 +- global_state = dpu_kms_get_existing_global_state(dpu_kms);
1613 ++ global_state = dpu_kms_get_global_state(crtc_state->state);
1614 ++ if (IS_ERR(global_state))
1615 ++ return PTR_ERR(global_state);
1616 ++
1617 + trace_dpu_enc_atomic_check(DRMID(drm_enc));
1618 +
1619 + /*
1620 +@@ -621,12 +624,15 @@ static int dpu_encoder_virt_atomic_check(
1621 + /* Reserve dynamic resources now. */
1622 + if (!ret) {
1623 + /*
1624 +- * Avoid reserving resources when mode set is pending. Topology
1625 +- * info may not be available to complete reservation.
1626 ++ * Release and Allocate resources on every modeset
1627 ++ * Dont allocate when active is false.
1628 + */
1629 + if (drm_atomic_crtc_needs_modeset(crtc_state)) {
1630 +- ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
1631 +- drm_enc, crtc_state, topology);
1632 ++ dpu_rm_release(global_state, drm_enc);
1633 ++
1634 ++ if (!crtc_state->active_changed || crtc_state->active)
1635 ++ ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
1636 ++ drm_enc, crtc_state, topology);
1637 + }
1638 + }
1639 +
1640 +@@ -1175,7 +1181,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
1641 + struct dpu_encoder_virt *dpu_enc = NULL;
1642 + struct msm_drm_private *priv;
1643 + struct dpu_kms *dpu_kms;
1644 +- struct dpu_global_state *global_state;
1645 + int i = 0;
1646 +
1647 + if (!drm_enc) {
1648 +@@ -1194,7 +1199,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
1649 +
1650 + priv = drm_enc->dev->dev_private;
1651 + dpu_kms = to_dpu_kms(priv->kms);
1652 +- global_state = dpu_kms_get_existing_global_state(dpu_kms);
1653 +
1654 + trace_dpu_enc_disable(DRMID(drm_enc));
1655 +
1656 +@@ -1224,8 +1228,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
1657 +
1658 + DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
1659 +
1660 +- dpu_rm_release(global_state, drm_enc);
1661 +-
1662 + mutex_unlock(&dpu_enc->enc_lock);
1663 + }
1664 +
1665 +diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
1666 +index 3b9c33e694bf4..994d23bad3870 100644
1667 +--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
1668 ++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
1669 +@@ -866,9 +866,9 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
1670 + crtc_state = drm_atomic_get_new_crtc_state(state->state,
1671 + state->crtc);
1672 +
1673 +- min_scale = FRAC_16_16(1, pdpu->pipe_sblk->maxdwnscale);
1674 ++ min_scale = FRAC_16_16(1, pdpu->pipe_sblk->maxupscale);
1675 + ret = drm_atomic_helper_check_plane_state(state, crtc_state, min_scale,
1676 +- pdpu->pipe_sblk->maxupscale << 16,
1677 ++ pdpu->pipe_sblk->maxdwnscale << 16,
1678 + true, true);
1679 + if (ret) {
1680 + DPU_DEBUG_PLANE(pdpu, "Check plane state failed (%d)\n", ret);
1681 +diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
1682 +index 5ccfad794c6a5..561bfa48841c3 100644
1683 +--- a/drivers/gpu/drm/msm/msm_atomic.c
1684 ++++ b/drivers/gpu/drm/msm/msm_atomic.c
1685 +@@ -27,6 +27,34 @@ int msm_atomic_prepare_fb(struct drm_plane *plane,
1686 + return msm_framebuffer_prepare(new_state->fb, kms->aspace);
1687 + }
1688 +
1689 ++/*
1690 ++ * Helpers to control vblanks while we flush.. basically just to ensure
1691 ++ * that vblank accounting is switched on, so we get valid seqn/timestamp
1692 ++ * on pageflip events (if requested)
1693 ++ */
1694 ++
1695 ++static void vblank_get(struct msm_kms *kms, unsigned crtc_mask)
1696 ++{
1697 ++ struct drm_crtc *crtc;
1698 ++
1699 ++ for_each_crtc_mask(kms->dev, crtc, crtc_mask) {
1700 ++ if (!crtc->state->active)
1701 ++ continue;
1702 ++ drm_crtc_vblank_get(crtc);
1703 ++ }
1704 ++}
1705 ++
1706 ++static void vblank_put(struct msm_kms *kms, unsigned crtc_mask)
1707 ++{
1708 ++ struct drm_crtc *crtc;
1709 ++
1710 ++ for_each_crtc_mask(kms->dev, crtc, crtc_mask) {
1711 ++ if (!crtc->state->active)
1712 ++ continue;
1713 ++ drm_crtc_vblank_put(crtc);
1714 ++ }
1715 ++}
1716 ++
1717 + static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx)
1718 + {
1719 + unsigned crtc_mask = BIT(crtc_idx);
1720 +@@ -44,6 +72,8 @@ static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx)
1721 +
1722 + kms->funcs->enable_commit(kms);
1723 +
1724 ++ vblank_get(kms, crtc_mask);
1725 ++
1726 + /*
1727 + * Flush hardware updates:
1728 + */
1729 +@@ -58,6 +88,8 @@ static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx)
1730 + kms->funcs->wait_flush(kms, crtc_mask);
1731 + trace_msm_atomic_wait_flush_finish(crtc_mask);
1732 +
1733 ++ vblank_put(kms, crtc_mask);
1734 ++
1735 + mutex_lock(&kms->commit_lock);
1736 + kms->funcs->complete_commit(kms, crtc_mask);
1737 + mutex_unlock(&kms->commit_lock);
1738 +@@ -221,6 +253,8 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state)
1739 + */
1740 + kms->pending_crtc_mask &= ~crtc_mask;
1741 +
1742 ++ vblank_get(kms, crtc_mask);
1743 ++
1744 + /*
1745 + * Flush hardware updates:
1746 + */
1747 +@@ -235,6 +269,8 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state)
1748 + kms->funcs->wait_flush(kms, crtc_mask);
1749 + trace_msm_atomic_wait_flush_finish(crtc_mask);
1750 +
1751 ++ vblank_put(kms, crtc_mask);
1752 ++
1753 + mutex_lock(&kms->commit_lock);
1754 + kms->funcs->complete_commit(kms, crtc_mask);
1755 + mutex_unlock(&kms->commit_lock);
1756 +diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
1757 +index f6ce40bf36998..b4d61af7a104e 100644
1758 +--- a/drivers/gpu/drm/msm/msm_drv.c
1759 ++++ b/drivers/gpu/drm/msm/msm_drv.c
1760 +@@ -1328,6 +1328,13 @@ static int msm_pdev_remove(struct platform_device *pdev)
1761 + return 0;
1762 + }
1763 +
1764 ++static void msm_pdev_shutdown(struct platform_device *pdev)
1765 ++{
1766 ++ struct drm_device *drm = platform_get_drvdata(pdev);
1767 ++
1768 ++ drm_atomic_helper_shutdown(drm);
1769 ++}
1770 ++
1771 + static const struct of_device_id dt_match[] = {
1772 + { .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 },
1773 + { .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 },
1774 +@@ -1340,6 +1347,7 @@ MODULE_DEVICE_TABLE(of, dt_match);
1775 + static struct platform_driver msm_platform_driver = {
1776 + .probe = msm_pdev_probe,
1777 + .remove = msm_pdev_remove,
1778 ++ .shutdown = msm_pdev_shutdown,
1779 + .driver = {
1780 + .name = "msm",
1781 + .of_match_table = dt_match,
1782 +diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
1783 +index 6d40914675dad..328a4a74f534e 100644
1784 +--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
1785 ++++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
1786 +@@ -451,11 +451,12 @@ static void omap_crtc_atomic_enable(struct drm_crtc *crtc,
1787 + if (omap_state->manually_updated)
1788 + return;
1789 +
1790 +- spin_lock_irq(&crtc->dev->event_lock);
1791 + drm_crtc_vblank_on(crtc);
1792 ++
1793 + ret = drm_crtc_vblank_get(crtc);
1794 + WARN_ON(ret != 0);
1795 +
1796 ++ spin_lock_irq(&crtc->dev->event_lock);
1797 + omap_crtc_arm_event(crtc);
1798 + spin_unlock_irq(&crtc->dev->event_lock);
1799 + }
1800 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
1801 +index 7cfa9785bfbb0..6ea3619842d8d 100644
1802 +--- a/drivers/hid/hid-ids.h
1803 ++++ b/drivers/hid/hid-ids.h
1804 +@@ -727,6 +727,9 @@
1805 + #define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067
1806 + #define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085
1807 + #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D 0x608d
1808 ++#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019 0x6019
1809 ++#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_602E 0x602e
1810 ++#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6093 0x6093
1811 +
1812 + #define USB_VENDOR_ID_LG 0x1fd2
1813 + #define USB_DEVICE_ID_LG_MULTITOUCH 0x0064
1814 +diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
1815 +index c242150d35a3a..a65aef6a322fb 100644
1816 +--- a/drivers/hid/hid-quirks.c
1817 ++++ b/drivers/hid/hid-quirks.c
1818 +@@ -105,6 +105,9 @@ static const struct hid_device_id hid_quirks[] = {
1819 + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M406XE), HID_QUIRK_MULTI_INPUT },
1820 + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2), HID_QUIRK_ALWAYS_POLL },
1821 + { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D), HID_QUIRK_ALWAYS_POLL },
1822 ++ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019), HID_QUIRK_ALWAYS_POLL },
1823 ++ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_602E), HID_QUIRK_ALWAYS_POLL },
1824 ++ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6093), HID_QUIRK_ALWAYS_POLL },
1825 + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C007), HID_QUIRK_ALWAYS_POLL },
1826 + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077), HID_QUIRK_ALWAYS_POLL },
1827 + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS), HID_QUIRK_NOGET },
1828 +diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
1829 +index 92ee0fe4c919e..a4e8d96513c22 100644
1830 +--- a/drivers/hv/hv_util.c
1831 ++++ b/drivers/hv/hv_util.c
1832 +@@ -282,26 +282,52 @@ static struct {
1833 + spinlock_t lock;
1834 + } host_ts;
1835 +
1836 +-static struct timespec64 hv_get_adj_host_time(void)
1837 ++static inline u64 reftime_to_ns(u64 reftime)
1838 + {
1839 +- struct timespec64 ts;
1840 +- u64 newtime, reftime;
1841 ++ return (reftime - WLTIMEDELTA) * 100;
1842 ++}
1843 ++
1844 ++/*
1845 ++ * Hard coded threshold for host timesync delay: 600 seconds
1846 ++ */
1847 ++static const u64 HOST_TIMESYNC_DELAY_THRESH = 600 * (u64)NSEC_PER_SEC;
1848 ++
1849 ++static int hv_get_adj_host_time(struct timespec64 *ts)
1850 ++{
1851 ++ u64 newtime, reftime, timediff_adj;
1852 + unsigned long flags;
1853 ++ int ret = 0;
1854 +
1855 + spin_lock_irqsave(&host_ts.lock, flags);
1856 + reftime = hv_read_reference_counter();
1857 +- newtime = host_ts.host_time + (reftime - host_ts.ref_time);
1858 +- ts = ns_to_timespec64((newtime - WLTIMEDELTA) * 100);
1859 ++
1860 ++ /*
1861 ++ * We need to let the caller know that last update from host
1862 ++ * is older than the max allowable threshold. clock_gettime()
1863 ++ * and PTP ioctl do not have a documented error that we could
1864 ++ * return for this specific case. Use ESTALE to report this.
1865 ++ */
1866 ++ timediff_adj = reftime - host_ts.ref_time;
1867 ++ if (timediff_adj * 100 > HOST_TIMESYNC_DELAY_THRESH) {
1868 ++ pr_warn_once("TIMESYNC IC: Stale time stamp, %llu nsecs old\n",
1869 ++ (timediff_adj * 100));
1870 ++ ret = -ESTALE;
1871 ++ }
1872 ++
1873 ++ newtime = host_ts.host_time + timediff_adj;
1874 ++ *ts = ns_to_timespec64(reftime_to_ns(newtime));
1875 + spin_unlock_irqrestore(&host_ts.lock, flags);
1876 +
1877 +- return ts;
1878 ++ return ret;
1879 + }
1880 +
1881 + static void hv_set_host_time(struct work_struct *work)
1882 + {
1883 +- struct timespec64 ts = hv_get_adj_host_time();
1884 +
1885 +- do_settimeofday64(&ts);
1886 ++ struct timespec64 ts;
1887 ++
1888 ++ if (!hv_get_adj_host_time(&ts))
1889 ++ do_settimeofday64(&ts);
1890 + }
1891 +
1892 + /*
1893 +@@ -361,10 +387,23 @@ static void timesync_onchannelcallback(void *context)
1894 + struct ictimesync_ref_data *refdata;
1895 + u8 *time_txf_buf = util_timesynch.recv_buffer;
1896 +
1897 +- vmbus_recvpacket(channel, time_txf_buf,
1898 +- HV_HYP_PAGE_SIZE, &recvlen, &requestid);
1899 ++ /*
1900 ++ * Drain the ring buffer and use the last packet to update
1901 ++ * host_ts
1902 ++ */
1903 ++ while (1) {
1904 ++ int ret = vmbus_recvpacket(channel, time_txf_buf,
1905 ++ HV_HYP_PAGE_SIZE, &recvlen,
1906 ++ &requestid);
1907 ++ if (ret) {
1908 ++ pr_warn_once("TimeSync IC pkt recv failed (Err: %d)\n",
1909 ++ ret);
1910 ++ break;
1911 ++ }
1912 ++
1913 ++ if (!recvlen)
1914 ++ break;
1915 +
1916 +- if (recvlen > 0) {
1917 + icmsghdrp = (struct icmsg_hdr *)&time_txf_buf[
1918 + sizeof(struct vmbuspipe_hdr)];
1919 +
1920 +@@ -622,9 +661,7 @@ static int hv_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
1921 +
1922 + static int hv_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
1923 + {
1924 +- *ts = hv_get_adj_host_time();
1925 +-
1926 +- return 0;
1927 ++ return hv_get_adj_host_time(ts);
1928 + }
1929 +
1930 + static struct ptp_clock_info ptp_hyperv_info = {
1931 +diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
1932 +index 3166184093157..a18887990f4a2 100644
1933 +--- a/drivers/hwmon/applesmc.c
1934 ++++ b/drivers/hwmon/applesmc.c
1935 +@@ -753,15 +753,18 @@ static ssize_t applesmc_light_show(struct device *dev,
1936 + }
1937 +
1938 + ret = applesmc_read_key(LIGHT_SENSOR_LEFT_KEY, buffer, data_length);
1939 ++ if (ret)
1940 ++ goto out;
1941 + /* newer macbooks report a single 10-bit bigendian value */
1942 + if (data_length == 10) {
1943 + left = be16_to_cpu(*(__be16 *)(buffer + 6)) >> 2;
1944 + goto out;
1945 + }
1946 + left = buffer[2];
1947 ++
1948 ++ ret = applesmc_read_key(LIGHT_SENSOR_RIGHT_KEY, buffer, data_length);
1949 + if (ret)
1950 + goto out;
1951 +- ret = applesmc_read_key(LIGHT_SENSOR_RIGHT_KEY, buffer, data_length);
1952 + right = buffer[2];
1953 +
1954 + out:
1955 +@@ -810,12 +813,11 @@ static ssize_t applesmc_show_fan_speed(struct device *dev,
1956 + to_index(attr));
1957 +
1958 + ret = applesmc_read_key(newkey, buffer, 2);
1959 +- speed = ((buffer[0] << 8 | buffer[1]) >> 2);
1960 +-
1961 + if (ret)
1962 + return ret;
1963 +- else
1964 +- return snprintf(sysfsbuf, PAGE_SIZE, "%u\n", speed);
1965 ++
1966 ++ speed = ((buffer[0] << 8 | buffer[1]) >> 2);
1967 ++ return snprintf(sysfsbuf, PAGE_SIZE, "%u\n", speed);
1968 + }
1969 +
1970 + static ssize_t applesmc_store_fan_speed(struct device *dev,
1971 +@@ -851,12 +853,11 @@ static ssize_t applesmc_show_fan_manual(struct device *dev,
1972 + u8 buffer[2];
1973 +
1974 + ret = applesmc_read_key(FANS_MANUAL, buffer, 2);
1975 +- manual = ((buffer[0] << 8 | buffer[1]) >> to_index(attr)) & 0x01;
1976 +-
1977 + if (ret)
1978 + return ret;
1979 +- else
1980 +- return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", manual);
1981 ++
1982 ++ manual = ((buffer[0] << 8 | buffer[1]) >> to_index(attr)) & 0x01;
1983 ++ return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", manual);
1984 + }
1985 +
1986 + static ssize_t applesmc_store_fan_manual(struct device *dev,
1987 +@@ -872,10 +873,11 @@ static ssize_t applesmc_store_fan_manual(struct device *dev,
1988 + return -EINVAL;
1989 +
1990 + ret = applesmc_read_key(FANS_MANUAL, buffer, 2);
1991 +- val = (buffer[0] << 8 | buffer[1]);
1992 + if (ret)
1993 + goto out;
1994 +
1995 ++ val = (buffer[0] << 8 | buffer[1]);
1996 ++
1997 + if (input)
1998 + val = val | (0x01 << to_index(attr));
1999 + else
2000 +@@ -951,13 +953,12 @@ static ssize_t applesmc_key_count_show(struct device *dev,
2001 + u32 count;
2002 +
2003 + ret = applesmc_read_key(KEY_COUNT_KEY, buffer, 4);
2004 +- count = ((u32)buffer[0]<<24) + ((u32)buffer[1]<<16) +
2005 +- ((u32)buffer[2]<<8) + buffer[3];
2006 +-
2007 + if (ret)
2008 + return ret;
2009 +- else
2010 +- return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", count);
2011 ++
2012 ++ count = ((u32)buffer[0]<<24) + ((u32)buffer[1]<<16) +
2013 ++ ((u32)buffer[2]<<8) + buffer[3];
2014 ++ return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", count);
2015 + }
2016 +
2017 + static ssize_t applesmc_key_at_index_read_show(struct device *dev,
2018 +diff --git a/drivers/hwmon/pmbus/isl68137.c b/drivers/hwmon/pmbus/isl68137.c
2019 +index 0c622711ef7e0..58aa95a3c010c 100644
2020 +--- a/drivers/hwmon/pmbus/isl68137.c
2021 ++++ b/drivers/hwmon/pmbus/isl68137.c
2022 +@@ -67,6 +67,7 @@ enum variants {
2023 + raa_dmpvr1_2rail,
2024 + raa_dmpvr2_1rail,
2025 + raa_dmpvr2_2rail,
2026 ++ raa_dmpvr2_2rail_nontc,
2027 + raa_dmpvr2_3rail,
2028 + raa_dmpvr2_hv,
2029 + };
2030 +@@ -241,6 +242,10 @@ static int isl68137_probe(struct i2c_client *client,
2031 + info->pages = 1;
2032 + info->read_word_data = raa_dmpvr2_read_word_data;
2033 + break;
2034 ++ case raa_dmpvr2_2rail_nontc:
2035 ++ info->func[0] &= ~PMBUS_HAVE_TEMP;
2036 ++ info->func[1] &= ~PMBUS_HAVE_TEMP;
2037 ++ fallthrough;
2038 + case raa_dmpvr2_2rail:
2039 + info->pages = 2;
2040 + info->read_word_data = raa_dmpvr2_read_word_data;
2041 +@@ -304,7 +309,7 @@ static const struct i2c_device_id raa_dmpvr_id[] = {
2042 + {"raa228000", raa_dmpvr2_hv},
2043 + {"raa228004", raa_dmpvr2_hv},
2044 + {"raa228006", raa_dmpvr2_hv},
2045 +- {"raa228228", raa_dmpvr2_2rail},
2046 ++ {"raa228228", raa_dmpvr2_2rail_nontc},
2047 + {"raa229001", raa_dmpvr2_2rail},
2048 + {"raa229004", raa_dmpvr2_2rail},
2049 + {}
2050 +diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
2051 +index 688e928188214..d8295b1c379d1 100644
2052 +--- a/drivers/i2c/busses/i2c-bcm-iproc.c
2053 ++++ b/drivers/i2c/busses/i2c-bcm-iproc.c
2054 +@@ -720,7 +720,7 @@ static int bcm_iproc_i2c_xfer_internal(struct bcm_iproc_i2c_dev *iproc_i2c,
2055 +
2056 + /* mark the last byte */
2057 + if (!process_call && (i == msg->len - 1))
2058 +- val |= 1 << M_TX_WR_STATUS_SHIFT;
2059 ++ val |= BIT(M_TX_WR_STATUS_SHIFT);
2060 +
2061 + iproc_i2c_wr_reg(iproc_i2c, M_TX_OFFSET, val);
2062 + }
2063 +@@ -738,7 +738,7 @@ static int bcm_iproc_i2c_xfer_internal(struct bcm_iproc_i2c_dev *iproc_i2c,
2064 + */
2065 + addr = i2c_8bit_addr_from_msg(msg);
2066 + /* mark it the last byte out */
2067 +- val = addr | (1 << M_TX_WR_STATUS_SHIFT);
2068 ++ val = addr | BIT(M_TX_WR_STATUS_SHIFT);
2069 + iproc_i2c_wr_reg(iproc_i2c, M_TX_OFFSET, val);
2070 + }
2071 +
2072 +diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
2073 +index b0f308cb7f7c2..201b2718f0755 100644
2074 +--- a/drivers/iommu/Kconfig
2075 ++++ b/drivers/iommu/Kconfig
2076 +@@ -143,7 +143,7 @@ config AMD_IOMMU
2077 + select IOMMU_API
2078 + select IOMMU_IOVA
2079 + select IOMMU_DMA
2080 +- depends on X86_64 && PCI && ACPI
2081 ++ depends on X86_64 && PCI && ACPI && HAVE_CMPXCHG_DOUBLE
2082 + help
2083 + With this option you can enable support for AMD IOMMU hardware in
2084 + your system. An IOMMU is a hardware component which provides
2085 +diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
2086 +index 6ebd4825e3206..bf45f8e2c7edd 100644
2087 +--- a/drivers/iommu/amd/init.c
2088 ++++ b/drivers/iommu/amd/init.c
2089 +@@ -1518,7 +1518,14 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
2090 + iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
2091 + else
2092 + iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
2093 +- if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
2094 ++
2095 ++ /*
2096 ++ * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports.
2097 ++ * GAM also requires GA mode. Therefore, we need to
2098 ++ * check cmpxchg16b support before enabling it.
2099 ++ */
2100 ++ if (!boot_cpu_has(X86_FEATURE_CX16) ||
2101 ++ ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
2102 + amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
2103 + break;
2104 + case 0x11:
2105 +@@ -1527,8 +1534,18 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
2106 + iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
2107 + else
2108 + iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
2109 +- if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0))
2110 ++
2111 ++ /*
2112 ++ * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports.
2113 ++ * XT, GAM also requires GA mode. Therefore, we need to
2114 ++ * check cmpxchg16b support before enabling them.
2115 ++ */
2116 ++ if (!boot_cpu_has(X86_FEATURE_CX16) ||
2117 ++ ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) {
2118 + amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
2119 ++ break;
2120 ++ }
2121 ++
2122 + /*
2123 + * Note: Since iommu_update_intcapxt() leverages
2124 + * the IOMMU MMIO access to MSI capability block registers
2125 +diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
2126 +index 2f22326ee4dfe..200ee948f6ec1 100644
2127 +--- a/drivers/iommu/amd/iommu.c
2128 ++++ b/drivers/iommu/amd/iommu.c
2129 +@@ -3283,6 +3283,7 @@ out:
2130 + static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte,
2131 + struct amd_ir_data *data)
2132 + {
2133 ++ bool ret;
2134 + struct irq_remap_table *table;
2135 + struct amd_iommu *iommu;
2136 + unsigned long flags;
2137 +@@ -3300,10 +3301,18 @@ static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte,
2138 +
2139 + entry = (struct irte_ga *)table->table;
2140 + entry = &entry[index];
2141 +- entry->lo.fields_remap.valid = 0;
2142 +- entry->hi.val = irte->hi.val;
2143 +- entry->lo.val = irte->lo.val;
2144 +- entry->lo.fields_remap.valid = 1;
2145 ++
2146 ++ ret = cmpxchg_double(&entry->lo.val, &entry->hi.val,
2147 ++ entry->lo.val, entry->hi.val,
2148 ++ irte->lo.val, irte->hi.val);
2149 ++ /*
2150 ++ * We use cmpxchg16 to atomically update the 128-bit IRTE,
2151 ++ * and it cannot be updated by the hardware or other processors
2152 ++ * behind us, so the return value of cmpxchg16 should be the
2153 ++ * same as the old value.
2154 ++ */
2155 ++ WARN_ON(!ret);
2156 ++
2157 + if (data)
2158 + data->ref = entry;
2159 +
2160 +@@ -3841,6 +3850,7 @@ int amd_iommu_deactivate_guest_mode(void *data)
2161 + struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
2162 + struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
2163 + struct irq_cfg *cfg = ir_data->cfg;
2164 ++ u64 valid = entry->lo.fields_remap.valid;
2165 +
2166 + if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
2167 + !entry || !entry->lo.fields_vapic.guest_mode)
2168 +@@ -3849,6 +3859,7 @@ int amd_iommu_deactivate_guest_mode(void *data)
2169 + entry->lo.val = 0;
2170 + entry->hi.val = 0;
2171 +
2172 ++ entry->lo.fields_remap.valid = valid;
2173 + entry->lo.fields_remap.dm = apic->irq_dest_mode;
2174 + entry->lo.fields_remap.int_type = apic->irq_delivery_mode;
2175 + entry->hi.fields.vector = cfg->vector;
2176 +diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
2177 +index 04e82f1756010..fbe0b0cc56edf 100644
2178 +--- a/drivers/iommu/intel/iommu.c
2179 ++++ b/drivers/iommu/intel/iommu.c
2180 +@@ -123,29 +123,29 @@ static inline unsigned int level_to_offset_bits(int level)
2181 + return (level - 1) * LEVEL_STRIDE;
2182 + }
2183 +
2184 +-static inline int pfn_level_offset(unsigned long pfn, int level)
2185 ++static inline int pfn_level_offset(u64 pfn, int level)
2186 + {
2187 + return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
2188 + }
2189 +
2190 +-static inline unsigned long level_mask(int level)
2191 ++static inline u64 level_mask(int level)
2192 + {
2193 +- return -1UL << level_to_offset_bits(level);
2194 ++ return -1ULL << level_to_offset_bits(level);
2195 + }
2196 +
2197 +-static inline unsigned long level_size(int level)
2198 ++static inline u64 level_size(int level)
2199 + {
2200 +- return 1UL << level_to_offset_bits(level);
2201 ++ return 1ULL << level_to_offset_bits(level);
2202 + }
2203 +
2204 +-static inline unsigned long align_to_level(unsigned long pfn, int level)
2205 ++static inline u64 align_to_level(u64 pfn, int level)
2206 + {
2207 + return (pfn + level_size(level) - 1) & level_mask(level);
2208 + }
2209 +
2210 + static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
2211 + {
2212 +- return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
2213 ++ return 1UL << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
2214 + }
2215 +
2216 + /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
2217 +diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
2218 +index aa096b333a991..4828f4fe09ab5 100644
2219 +--- a/drivers/iommu/intel/irq_remapping.c
2220 ++++ b/drivers/iommu/intel/irq_remapping.c
2221 +@@ -507,12 +507,18 @@ static void iommu_enable_irq_remapping(struct intel_iommu *iommu)
2222 +
2223 + /* Enable interrupt-remapping */
2224 + iommu->gcmd |= DMA_GCMD_IRE;
2225 +- iommu->gcmd &= ~DMA_GCMD_CFI; /* Block compatibility-format MSIs */
2226 + writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
2227 +-
2228 + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
2229 + readl, (sts & DMA_GSTS_IRES), sts);
2230 +
2231 ++ /* Block compatibility-format MSIs */
2232 ++ if (sts & DMA_GSTS_CFIS) {
2233 ++ iommu->gcmd &= ~DMA_GCMD_CFI;
2234 ++ writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
2235 ++ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
2236 ++ readl, !(sts & DMA_GSTS_CFIS), sts);
2237 ++ }
2238 ++
2239 + /*
2240 + * With CFI clear in the Global Command register, we should be
2241 + * protected from dangerous (i.e. compatibility) interrupts
2242 +diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c
2243 +index 9f3da4260ca65..b61a8901ef722 100644
2244 +--- a/drivers/irqchip/irq-ingenic.c
2245 ++++ b/drivers/irqchip/irq-ingenic.c
2246 +@@ -125,7 +125,7 @@ static int __init ingenic_intc_of_init(struct device_node *node,
2247 + irq_reg_writel(gc, IRQ_MSK(32), JZ_REG_INTC_SET_MASK);
2248 + }
2249 +
2250 +- if (request_irq(parent_irq, intc_cascade, 0,
2251 ++ if (request_irq(parent_irq, intc_cascade, IRQF_NO_SUSPEND,
2252 + "SoC intc cascade interrupt", NULL))
2253 + pr_err("Failed to register SoC intc cascade interrupt\n");
2254 + return 0;
2255 +diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
2256 +index 151aa95775be2..af6d4f898e4c1 100644
2257 +--- a/drivers/md/dm-cache-metadata.c
2258 ++++ b/drivers/md/dm-cache-metadata.c
2259 +@@ -537,12 +537,16 @@ static int __create_persistent_data_objects(struct dm_cache_metadata *cmd,
2260 + CACHE_MAX_CONCURRENT_LOCKS);
2261 + if (IS_ERR(cmd->bm)) {
2262 + DMERR("could not create block manager");
2263 +- return PTR_ERR(cmd->bm);
2264 ++ r = PTR_ERR(cmd->bm);
2265 ++ cmd->bm = NULL;
2266 ++ return r;
2267 + }
2268 +
2269 + r = __open_or_format_metadata(cmd, may_format_device);
2270 +- if (r)
2271 ++ if (r) {
2272 + dm_block_manager_destroy(cmd->bm);
2273 ++ cmd->bm = NULL;
2274 ++ }
2275 +
2276 + return r;
2277 + }
2278 +diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
2279 +index 000ddfab5ba05..195ff0974ece9 100644
2280 +--- a/drivers/md/dm-crypt.c
2281 ++++ b/drivers/md/dm-crypt.c
2282 +@@ -736,7 +736,7 @@ static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
2283 + u8 buf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(__le64));
2284 + struct skcipher_request *req;
2285 + struct scatterlist src, dst;
2286 +- struct crypto_wait wait;
2287 ++ DECLARE_CRYPTO_WAIT(wait);
2288 + int err;
2289 +
2290 + req = skcipher_request_alloc(any_tfm(cc), GFP_NOIO);
2291 +@@ -933,7 +933,7 @@ static int crypt_iv_elephant(struct crypt_config *cc, struct dm_crypt_request *d
2292 + u8 *es, *ks, *data, *data2, *data_offset;
2293 + struct skcipher_request *req;
2294 + struct scatterlist *sg, *sg2, src, dst;
2295 +- struct crypto_wait wait;
2296 ++ DECLARE_CRYPTO_WAIT(wait);
2297 + int i, r;
2298 +
2299 + req = skcipher_request_alloc(elephant->tfm, GFP_NOIO);
2300 +diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
2301 +index a83a1de1e03fa..8b4289014c00d 100644
2302 +--- a/drivers/md/dm-integrity.c
2303 ++++ b/drivers/md/dm-integrity.c
2304 +@@ -2487,6 +2487,7 @@ next_chunk:
2305 + range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
2306 + if (unlikely(range.logical_sector >= ic->provided_data_sectors)) {
2307 + if (ic->mode == 'B') {
2308 ++ block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
2309 + DEBUG_print("queue_delayed_work: bitmap_flush_work\n");
2310 + queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
2311 + }
2312 +@@ -2564,6 +2565,17 @@ next_chunk:
2313 + goto err;
2314 + }
2315 +
2316 ++ if (ic->mode == 'B') {
2317 ++ sector_t start, end;
2318 ++ start = (range.logical_sector >>
2319 ++ (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
2320 ++ (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2321 ++ end = ((range.logical_sector + range.n_sectors) >>
2322 ++ (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
2323 ++ (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2324 ++ block_bitmap_op(ic, ic->recalc_bitmap, start, end - start, BITMAP_OP_CLEAR);
2325 ++ }
2326 ++
2327 + advance_and_next:
2328 + cond_resched();
2329 +
2330 +diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
2331 +index 78cff42d987ee..dc5846971d6cc 100644
2332 +--- a/drivers/md/dm-mpath.c
2333 ++++ b/drivers/md/dm-mpath.c
2334 +@@ -1247,17 +1247,25 @@ static void multipath_wait_for_pg_init_completion(struct multipath *m)
2335 + static void flush_multipath_work(struct multipath *m)
2336 + {
2337 + if (m->hw_handler_name) {
2338 +- set_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
2339 +- smp_mb__after_atomic();
2340 ++ unsigned long flags;
2341 ++
2342 ++ if (!atomic_read(&m->pg_init_in_progress))
2343 ++ goto skip;
2344 ++
2345 ++ spin_lock_irqsave(&m->lock, flags);
2346 ++ if (atomic_read(&m->pg_init_in_progress) &&
2347 ++ !test_and_set_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) {
2348 ++ spin_unlock_irqrestore(&m->lock, flags);
2349 +
2350 +- if (atomic_read(&m->pg_init_in_progress))
2351 + flush_workqueue(kmpath_handlerd);
2352 +- multipath_wait_for_pg_init_completion(m);
2353 ++ multipath_wait_for_pg_init_completion(m);
2354 +
2355 +- clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
2356 +- smp_mb__after_atomic();
2357 ++ spin_lock_irqsave(&m->lock, flags);
2358 ++ clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
2359 ++ }
2360 ++ spin_unlock_irqrestore(&m->lock, flags);
2361 + }
2362 +-
2363 ++skip:
2364 + if (m->queue_mode == DM_TYPE_BIO_BASED)
2365 + flush_work(&m->process_queued_bios);
2366 + flush_work(&m->trigger_event);
2367 +diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
2368 +index 76b6b323bf4bd..b461836b6d263 100644
2369 +--- a/drivers/md/dm-thin-metadata.c
2370 ++++ b/drivers/md/dm-thin-metadata.c
2371 +@@ -739,12 +739,16 @@ static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool f
2372 + THIN_MAX_CONCURRENT_LOCKS);
2373 + if (IS_ERR(pmd->bm)) {
2374 + DMERR("could not create block manager");
2375 +- return PTR_ERR(pmd->bm);
2376 ++ r = PTR_ERR(pmd->bm);
2377 ++ pmd->bm = NULL;
2378 ++ return r;
2379 + }
2380 +
2381 + r = __open_or_format_metadata(pmd, format_device);
2382 +- if (r)
2383 ++ if (r) {
2384 + dm_block_manager_destroy(pmd->bm);
2385 ++ pmd->bm = NULL;
2386 ++ }
2387 +
2388 + return r;
2389 + }
2390 +@@ -954,7 +958,7 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
2391 + }
2392 +
2393 + pmd_write_lock_in_core(pmd);
2394 +- if (!dm_bm_is_read_only(pmd->bm) && !pmd->fail_io) {
2395 ++ if (!pmd->fail_io && !dm_bm_is_read_only(pmd->bm)) {
2396 + r = __commit_transaction(pmd);
2397 + if (r < 0)
2398 + DMWARN("%s: __commit_transaction() failed, error = %d",
2399 +diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
2400 +index 5358894bb9fdc..1533419f18758 100644
2401 +--- a/drivers/md/dm-writecache.c
2402 ++++ b/drivers/md/dm-writecache.c
2403 +@@ -231,6 +231,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
2404 + pfn_t pfn;
2405 + int id;
2406 + struct page **pages;
2407 ++ sector_t offset;
2408 +
2409 + wc->memory_vmapped = false;
2410 +
2411 +@@ -245,9 +246,16 @@ static int persistent_memory_claim(struct dm_writecache *wc)
2412 + goto err1;
2413 + }
2414 +
2415 ++ offset = get_start_sect(wc->ssd_dev->bdev);
2416 ++ if (offset & (PAGE_SIZE / 512 - 1)) {
2417 ++ r = -EINVAL;
2418 ++ goto err1;
2419 ++ }
2420 ++ offset >>= PAGE_SHIFT - 9;
2421 ++
2422 + id = dax_read_lock();
2423 +
2424 +- da = dax_direct_access(wc->ssd_dev->dax_dev, 0, p, &wc->memory_map, &pfn);
2425 ++ da = dax_direct_access(wc->ssd_dev->dax_dev, offset, p, &wc->memory_map, &pfn);
2426 + if (da < 0) {
2427 + wc->memory_map = NULL;
2428 + r = da;
2429 +@@ -269,7 +277,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
2430 + i = 0;
2431 + do {
2432 + long daa;
2433 +- daa = dax_direct_access(wc->ssd_dev->dax_dev, i, p - i,
2434 ++ daa = dax_direct_access(wc->ssd_dev->dax_dev, offset + i, p - i,
2435 + NULL, &pfn);
2436 + if (daa <= 0) {
2437 + r = daa ? daa : -EINVAL;
2438 +diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
2439 +index 749ec268d957d..54c089a50b152 100644
2440 +--- a/drivers/md/persistent-data/dm-block-manager.c
2441 ++++ b/drivers/md/persistent-data/dm-block-manager.c
2442 +@@ -493,7 +493,7 @@ int dm_bm_write_lock(struct dm_block_manager *bm,
2443 + void *p;
2444 + int r;
2445 +
2446 +- if (bm->read_only)
2447 ++ if (dm_bm_is_read_only(bm))
2448 + return -EPERM;
2449 +
2450 + p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result);
2451 +@@ -562,7 +562,7 @@ int dm_bm_write_lock_zero(struct dm_block_manager *bm,
2452 + struct buffer_aux *aux;
2453 + void *p;
2454 +
2455 +- if (bm->read_only)
2456 ++ if (dm_bm_is_read_only(bm))
2457 + return -EPERM;
2458 +
2459 + p = dm_bufio_new(bm->bufio, b, (struct dm_buffer **) result);
2460 +@@ -602,7 +602,7 @@ EXPORT_SYMBOL_GPL(dm_bm_unlock);
2461 +
2462 + int dm_bm_flush(struct dm_block_manager *bm)
2463 + {
2464 +- if (bm->read_only)
2465 ++ if (dm_bm_is_read_only(bm))
2466 + return -EPERM;
2467 +
2468 + return dm_bufio_write_dirty_buffers(bm->bufio);
2469 +@@ -616,19 +616,21 @@ void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b)
2470 +
2471 + bool dm_bm_is_read_only(struct dm_block_manager *bm)
2472 + {
2473 +- return bm->read_only;
2474 ++ return (bm ? bm->read_only : true);
2475 + }
2476 + EXPORT_SYMBOL_GPL(dm_bm_is_read_only);
2477 +
2478 + void dm_bm_set_read_only(struct dm_block_manager *bm)
2479 + {
2480 +- bm->read_only = true;
2481 ++ if (bm)
2482 ++ bm->read_only = true;
2483 + }
2484 + EXPORT_SYMBOL_GPL(dm_bm_set_read_only);
2485 +
2486 + void dm_bm_set_read_write(struct dm_block_manager *bm)
2487 + {
2488 +- bm->read_only = false;
2489 ++ if (bm)
2490 ++ bm->read_only = false;
2491 + }
2492 + EXPORT_SYMBOL_GPL(dm_bm_set_read_write);
2493 +
2494 +diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
2495 +index da11036ad804d..6b1a6851ccb0b 100644
2496 +--- a/drivers/media/i2c/Kconfig
2497 ++++ b/drivers/media/i2c/Kconfig
2498 +@@ -728,7 +728,7 @@ config VIDEO_HI556
2499 + config VIDEO_IMX214
2500 + tristate "Sony IMX214 sensor support"
2501 + depends on GPIOLIB && I2C && VIDEO_V4L2
2502 +- depends on V4L2_FWNODE
2503 ++ select V4L2_FWNODE
2504 + select MEDIA_CONTROLLER
2505 + select VIDEO_V4L2_SUBDEV_API
2506 + select REGMAP_I2C
2507 +diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
2508 +index d7064d664d528..38aa0c2de243f 100644
2509 +--- a/drivers/media/rc/rc-main.c
2510 ++++ b/drivers/media/rc/rc-main.c
2511 +@@ -1292,6 +1292,10 @@ static ssize_t store_protocols(struct device *device,
2512 + }
2513 +
2514 + mutex_lock(&dev->lock);
2515 ++ if (!dev->registered) {
2516 ++ mutex_unlock(&dev->lock);
2517 ++ return -ENODEV;
2518 ++ }
2519 +
2520 + old_protocols = *current_protocols;
2521 + new_protocols = old_protocols;
2522 +@@ -1430,6 +1434,10 @@ static ssize_t store_filter(struct device *device,
2523 + return -EINVAL;
2524 +
2525 + mutex_lock(&dev->lock);
2526 ++ if (!dev->registered) {
2527 ++ mutex_unlock(&dev->lock);
2528 ++ return -ENODEV;
2529 ++ }
2530 +
2531 + new_filter = *filter;
2532 + if (fattr->mask)
2533 +@@ -1544,6 +1552,10 @@ static ssize_t store_wakeup_protocols(struct device *device,
2534 + int i;
2535 +
2536 + mutex_lock(&dev->lock);
2537 ++ if (!dev->registered) {
2538 ++ mutex_unlock(&dev->lock);
2539 ++ return -ENODEV;
2540 ++ }
2541 +
2542 + allowed = dev->allowed_wakeup_protocols;
2543 +
2544 +@@ -1601,25 +1613,25 @@ static void rc_dev_release(struct device *device)
2545 + kfree(dev);
2546 + }
2547 +
2548 +-#define ADD_HOTPLUG_VAR(fmt, val...) \
2549 +- do { \
2550 +- int err = add_uevent_var(env, fmt, val); \
2551 +- if (err) \
2552 +- return err; \
2553 +- } while (0)
2554 +-
2555 + static int rc_dev_uevent(struct device *device, struct kobj_uevent_env *env)
2556 + {
2557 + struct rc_dev *dev = to_rc_dev(device);
2558 ++ int ret = 0;
2559 +
2560 +- if (dev->rc_map.name)
2561 +- ADD_HOTPLUG_VAR("NAME=%s", dev->rc_map.name);
2562 +- if (dev->driver_name)
2563 +- ADD_HOTPLUG_VAR("DRV_NAME=%s", dev->driver_name);
2564 +- if (dev->device_name)
2565 +- ADD_HOTPLUG_VAR("DEV_NAME=%s", dev->device_name);
2566 ++ mutex_lock(&dev->lock);
2567 +
2568 +- return 0;
2569 ++ if (!dev->registered)
2570 ++ ret = -ENODEV;
2571 ++ if (ret == 0 && dev->rc_map.name)
2572 ++ ret = add_uevent_var(env, "NAME=%s", dev->rc_map.name);
2573 ++ if (ret == 0 && dev->driver_name)
2574 ++ ret = add_uevent_var(env, "DRV_NAME=%s", dev->driver_name);
2575 ++ if (ret == 0 && dev->device_name)
2576 ++ ret = add_uevent_var(env, "DEV_NAME=%s", dev->device_name);
2577 ++
2578 ++ mutex_unlock(&dev->lock);
2579 ++
2580 ++ return ret;
2581 + }
2582 +
2583 + /*
2584 +@@ -2011,14 +2023,14 @@ void rc_unregister_device(struct rc_dev *dev)
2585 + del_timer_sync(&dev->timer_keyup);
2586 + del_timer_sync(&dev->timer_repeat);
2587 +
2588 +- rc_free_rx_device(dev);
2589 +-
2590 + mutex_lock(&dev->lock);
2591 + if (dev->users && dev->close)
2592 + dev->close(dev);
2593 + dev->registered = false;
2594 + mutex_unlock(&dev->lock);
2595 +
2596 ++ rc_free_rx_device(dev);
2597 ++
2598 + /*
2599 + * lirc device should be freed with dev->registered = false, so
2600 + * that userspace polling will get notified.
2601 +diff --git a/drivers/media/test-drivers/vicodec/vicodec-core.c b/drivers/media/test-drivers/vicodec/vicodec-core.c
2602 +index e879290727ef4..25c4ca6884dda 100644
2603 +--- a/drivers/media/test-drivers/vicodec/vicodec-core.c
2604 ++++ b/drivers/media/test-drivers/vicodec/vicodec-core.c
2605 +@@ -1994,6 +1994,7 @@ static int vicodec_request_validate(struct media_request *req)
2606 + }
2607 + ctrl = v4l2_ctrl_request_hdl_ctrl_find(hdl,
2608 + vicodec_ctrl_stateless_state.id);
2609 ++ v4l2_ctrl_request_hdl_put(hdl);
2610 + if (!ctrl) {
2611 + v4l2_info(&ctx->dev->v4l2_dev,
2612 + "Missing required codec control\n");
2613 +diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
2614 +index 59608d1bac880..baa4e66d4c457 100644
2615 +--- a/drivers/misc/habanalabs/device.c
2616 ++++ b/drivers/misc/habanalabs/device.c
2617 +@@ -1027,7 +1027,7 @@ again:
2618 + goto out_err;
2619 + }
2620 +
2621 +- hl_set_max_power(hdev, hdev->max_power);
2622 ++ hl_set_max_power(hdev);
2623 + } else {
2624 + rc = hdev->asic_funcs->soft_reset_late_init(hdev);
2625 + if (rc) {
2626 +@@ -1268,6 +1268,11 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
2627 + goto out_disabled;
2628 + }
2629 +
2630 ++ /* Need to call this again because the max power might change,
2631 ++ * depending on card type for certain ASICs
2632 ++ */
2633 ++ hl_set_max_power(hdev);
2634 ++
2635 + /*
2636 + * hl_hwmon_init() must be called after device_late_init(), because only
2637 + * there we get the information from the device about which
2638 +diff --git a/drivers/misc/habanalabs/firmware_if.c b/drivers/misc/habanalabs/firmware_if.c
2639 +index d27841cb5bcb3..345c228a7971e 100644
2640 +--- a/drivers/misc/habanalabs/firmware_if.c
2641 ++++ b/drivers/misc/habanalabs/firmware_if.c
2642 +@@ -13,6 +13,7 @@
2643 + #include <linux/io-64-nonatomic-lo-hi.h>
2644 + #include <linux/slab.h>
2645 +
2646 ++#define FW_FILE_MAX_SIZE 0x1400000 /* maximum size of 20MB */
2647 + /**
2648 + * hl_fw_load_fw_to_device() - Load F/W code to device's memory.
2649 + * @hdev: pointer to hl_device structure.
2650 +@@ -45,6 +46,14 @@ int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
2651 +
2652 + dev_dbg(hdev->dev, "%s firmware size == %zu\n", fw_name, fw_size);
2653 +
2654 ++ if (fw_size > FW_FILE_MAX_SIZE) {
2655 ++ dev_err(hdev->dev,
2656 ++ "FW file size %zu exceeds maximum of %u bytes\n",
2657 ++ fw_size, FW_FILE_MAX_SIZE);
2658 ++ rc = -EINVAL;
2659 ++ goto out;
2660 ++ }
2661 ++
2662 + fw_data = (const u64 *) fw->data;
2663 +
2664 + memcpy_toio(dst, fw_data, fw_size);
2665 +diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
2666 +index 637a9d608707f..ca183733847b6 100644
2667 +--- a/drivers/misc/habanalabs/gaudi/gaudi.c
2668 ++++ b/drivers/misc/habanalabs/gaudi/gaudi.c
2669 +@@ -154,6 +154,29 @@ static const u16 gaudi_packet_sizes[MAX_PACKET_ID] = {
2670 + [PACKET_LOAD_AND_EXE] = sizeof(struct packet_load_and_exe)
2671 + };
2672 +
2673 ++static inline bool validate_packet_id(enum packet_id id)
2674 ++{
2675 ++ switch (id) {
2676 ++ case PACKET_WREG_32:
2677 ++ case PACKET_WREG_BULK:
2678 ++ case PACKET_MSG_LONG:
2679 ++ case PACKET_MSG_SHORT:
2680 ++ case PACKET_CP_DMA:
2681 ++ case PACKET_REPEAT:
2682 ++ case PACKET_MSG_PROT:
2683 ++ case PACKET_FENCE:
2684 ++ case PACKET_LIN_DMA:
2685 ++ case PACKET_NOP:
2686 ++ case PACKET_STOP:
2687 ++ case PACKET_ARB_POINT:
2688 ++ case PACKET_WAIT:
2689 ++ case PACKET_LOAD_AND_EXE:
2690 ++ return true;
2691 ++ default:
2692 ++ return false;
2693 ++ }
2694 ++}
2695 ++
2696 + static const char * const
2697 + gaudi_tpc_interrupts_cause[GAUDI_NUM_OF_TPC_INTR_CAUSE] = {
2698 + "tpc_address_exceed_slm",
2699 +@@ -424,7 +447,7 @@ static int gaudi_get_fixed_properties(struct hl_device *hdev)
2700 + prop->num_of_events = GAUDI_EVENT_SIZE;
2701 + prop->tpc_enabled_mask = TPC_ENABLED_MASK;
2702 +
2703 +- prop->max_power_default = MAX_POWER_DEFAULT;
2704 ++ prop->max_power_default = MAX_POWER_DEFAULT_PCI;
2705 +
2706 + prop->cb_pool_cb_cnt = GAUDI_CB_POOL_CB_CNT;
2707 + prop->cb_pool_cb_size = GAUDI_CB_POOL_CB_SIZE;
2708 +@@ -2541,6 +2564,7 @@ static void gaudi_set_clock_gating(struct hl_device *hdev)
2709 + {
2710 + struct gaudi_device *gaudi = hdev->asic_specific;
2711 + u32 qman_offset;
2712 ++ bool enable;
2713 + int i;
2714 +
2715 + /* In case we are during debug session, don't enable the clock gate
2716 +@@ -2550,46 +2574,43 @@ static void gaudi_set_clock_gating(struct hl_device *hdev)
2717 + return;
2718 +
2719 + for (i = GAUDI_PCI_DMA_1, qman_offset = 0 ; i < GAUDI_HBM_DMA_1 ; i++) {
2720 +- if (!(hdev->clock_gating_mask &
2721 +- (BIT_ULL(gaudi_dma_assignment[i]))))
2722 +- continue;
2723 ++ enable = !!(hdev->clock_gating_mask &
2724 ++ (BIT_ULL(gaudi_dma_assignment[i])));
2725 +
2726 + qman_offset = gaudi_dma_assignment[i] * DMA_QMAN_OFFSET;
2727 +- WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset, QMAN_CGM1_PWR_GATE_EN);
2728 ++ WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset,
2729 ++ enable ? QMAN_CGM1_PWR_GATE_EN : 0);
2730 + WREG32(mmDMA0_QM_CGM_CFG + qman_offset,
2731 +- QMAN_UPPER_CP_CGM_PWR_GATE_EN);
2732 ++ enable ? QMAN_UPPER_CP_CGM_PWR_GATE_EN : 0);
2733 + }
2734 +
2735 + for (i = GAUDI_HBM_DMA_1 ; i < GAUDI_DMA_MAX ; i++) {
2736 +- if (!(hdev->clock_gating_mask &
2737 +- (BIT_ULL(gaudi_dma_assignment[i]))))
2738 +- continue;
2739 ++ enable = !!(hdev->clock_gating_mask &
2740 ++ (BIT_ULL(gaudi_dma_assignment[i])));
2741 +
2742 + qman_offset = gaudi_dma_assignment[i] * DMA_QMAN_OFFSET;
2743 +- WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset, QMAN_CGM1_PWR_GATE_EN);
2744 ++ WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset,
2745 ++ enable ? QMAN_CGM1_PWR_GATE_EN : 0);
2746 + WREG32(mmDMA0_QM_CGM_CFG + qman_offset,
2747 +- QMAN_COMMON_CP_CGM_PWR_GATE_EN);
2748 ++ enable ? QMAN_COMMON_CP_CGM_PWR_GATE_EN : 0);
2749 + }
2750 +
2751 +- if (hdev->clock_gating_mask & (BIT_ULL(GAUDI_ENGINE_ID_MME_0))) {
2752 +- WREG32(mmMME0_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN);
2753 +- WREG32(mmMME0_QM_CGM_CFG, QMAN_COMMON_CP_CGM_PWR_GATE_EN);
2754 +- }
2755 ++ enable = !!(hdev->clock_gating_mask & (BIT_ULL(GAUDI_ENGINE_ID_MME_0)));
2756 ++ WREG32(mmMME0_QM_CGM_CFG1, enable ? QMAN_CGM1_PWR_GATE_EN : 0);
2757 ++ WREG32(mmMME0_QM_CGM_CFG, enable ? QMAN_COMMON_CP_CGM_PWR_GATE_EN : 0);
2758 +
2759 +- if (hdev->clock_gating_mask & (BIT_ULL(GAUDI_ENGINE_ID_MME_2))) {
2760 +- WREG32(mmMME2_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN);
2761 +- WREG32(mmMME2_QM_CGM_CFG, QMAN_COMMON_CP_CGM_PWR_GATE_EN);
2762 +- }
2763 ++ enable = !!(hdev->clock_gating_mask & (BIT_ULL(GAUDI_ENGINE_ID_MME_2)));
2764 ++ WREG32(mmMME2_QM_CGM_CFG1, enable ? QMAN_CGM1_PWR_GATE_EN : 0);
2765 ++ WREG32(mmMME2_QM_CGM_CFG, enable ? QMAN_COMMON_CP_CGM_PWR_GATE_EN : 0);
2766 +
2767 + for (i = 0, qman_offset = 0 ; i < TPC_NUMBER_OF_ENGINES ; i++) {
2768 +- if (!(hdev->clock_gating_mask &
2769 +- (BIT_ULL(GAUDI_ENGINE_ID_TPC_0 + i))))
2770 +- continue;
2771 ++ enable = !!(hdev->clock_gating_mask &
2772 ++ (BIT_ULL(GAUDI_ENGINE_ID_TPC_0 + i)));
2773 +
2774 + WREG32(mmTPC0_QM_CGM_CFG1 + qman_offset,
2775 +- QMAN_CGM1_PWR_GATE_EN);
2776 ++ enable ? QMAN_CGM1_PWR_GATE_EN : 0);
2777 + WREG32(mmTPC0_QM_CGM_CFG + qman_offset,
2778 +- QMAN_COMMON_CP_CGM_PWR_GATE_EN);
2779 ++ enable ? QMAN_COMMON_CP_CGM_PWR_GATE_EN : 0);
2780 +
2781 + qman_offset += TPC_QMAN_OFFSET;
2782 + }
2783 +@@ -3859,6 +3880,12 @@ static int gaudi_validate_cb(struct hl_device *hdev,
2784 + PACKET_HEADER_PACKET_ID_MASK) >>
2785 + PACKET_HEADER_PACKET_ID_SHIFT);
2786 +
2787 ++ if (!validate_packet_id(pkt_id)) {
2788 ++ dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id);
2789 ++ rc = -EINVAL;
2790 ++ break;
2791 ++ }
2792 ++
2793 + pkt_size = gaudi_packet_sizes[pkt_id];
2794 + cb_parsed_length += pkt_size;
2795 + if (cb_parsed_length > parser->user_cb_size) {
2796 +@@ -4082,6 +4109,12 @@ static int gaudi_patch_cb(struct hl_device *hdev,
2797 + PACKET_HEADER_PACKET_ID_MASK) >>
2798 + PACKET_HEADER_PACKET_ID_SHIFT);
2799 +
2800 ++ if (!validate_packet_id(pkt_id)) {
2801 ++ dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id);
2802 ++ rc = -EINVAL;
2803 ++ break;
2804 ++ }
2805 ++
2806 + pkt_size = gaudi_packet_sizes[pkt_id];
2807 + cb_parsed_length += pkt_size;
2808 + if (cb_parsed_length > parser->user_cb_size) {
2809 +@@ -6208,6 +6241,15 @@ static int gaudi_armcp_info_get(struct hl_device *hdev)
2810 + strncpy(prop->armcp_info.card_name, GAUDI_DEFAULT_CARD_NAME,
2811 + CARD_NAME_MAX_LEN);
2812 +
2813 ++ hdev->card_type = le32_to_cpu(hdev->asic_prop.armcp_info.card_type);
2814 ++
2815 ++ if (hdev->card_type == armcp_card_type_pci)
2816 ++ prop->max_power_default = MAX_POWER_DEFAULT_PCI;
2817 ++ else if (hdev->card_type == armcp_card_type_pmc)
2818 ++ prop->max_power_default = MAX_POWER_DEFAULT_PMC;
2819 ++
2820 ++ hdev->max_power = prop->max_power_default;
2821 ++
2822 + return 0;
2823 + }
2824 +
2825 +diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h
2826 +index 41a8d9bff6bf9..00f1efeaa8832 100644
2827 +--- a/drivers/misc/habanalabs/gaudi/gaudiP.h
2828 ++++ b/drivers/misc/habanalabs/gaudi/gaudiP.h
2829 +@@ -41,7 +41,8 @@
2830 +
2831 + #define GAUDI_MAX_CLK_FREQ 2200000000ull /* 2200 MHz */
2832 +
2833 +-#define MAX_POWER_DEFAULT 200000 /* 200W */
2834 ++#define MAX_POWER_DEFAULT_PCI 200000 /* 200W */
2835 ++#define MAX_POWER_DEFAULT_PMC 350000 /* 350W */
2836 +
2837 + #define GAUDI_CPU_TIMEOUT_USEC 15000000 /* 15s */
2838 +
2839 +diff --git a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
2840 +index bf0e062d7b874..cc3d03549a6e4 100644
2841 +--- a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
2842 ++++ b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
2843 +@@ -523,7 +523,7 @@ static int gaudi_config_etf(struct hl_device *hdev,
2844 + }
2845 +
2846 + static bool gaudi_etr_validate_address(struct hl_device *hdev, u64 addr,
2847 +- u32 size, bool *is_host)
2848 ++ u64 size, bool *is_host)
2849 + {
2850 + struct asic_fixed_properties *prop = &hdev->asic_prop;
2851 + struct gaudi_device *gaudi = hdev->asic_specific;
2852 +@@ -535,6 +535,12 @@ static bool gaudi_etr_validate_address(struct hl_device *hdev, u64 addr,
2853 + return false;
2854 + }
2855 +
2856 ++ if (addr > (addr + size)) {
2857 ++ dev_err(hdev->dev,
2858 ++ "ETR buffer size %llu overflow\n", size);
2859 ++ return false;
2860 ++ }
2861 ++
2862 + /* PMMU and HPMMU addresses are equal, check only one of them */
2863 + if ((gaudi->hw_cap_initialized & HW_CAP_MMU) &&
2864 + hl_mem_area_inside_range(addr, size,
2865 +diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
2866 +index 88460b2138d88..c179085ced7b8 100644
2867 +--- a/drivers/misc/habanalabs/goya/goya.c
2868 ++++ b/drivers/misc/habanalabs/goya/goya.c
2869 +@@ -139,6 +139,25 @@ static u16 goya_packet_sizes[MAX_PACKET_ID] = {
2870 + [PACKET_STOP] = sizeof(struct packet_stop)
2871 + };
2872 +
2873 ++static inline bool validate_packet_id(enum packet_id id)
2874 ++{
2875 ++ switch (id) {
2876 ++ case PACKET_WREG_32:
2877 ++ case PACKET_WREG_BULK:
2878 ++ case PACKET_MSG_LONG:
2879 ++ case PACKET_MSG_SHORT:
2880 ++ case PACKET_CP_DMA:
2881 ++ case PACKET_MSG_PROT:
2882 ++ case PACKET_FENCE:
2883 ++ case PACKET_LIN_DMA:
2884 ++ case PACKET_NOP:
2885 ++ case PACKET_STOP:
2886 ++ return true;
2887 ++ default:
2888 ++ return false;
2889 ++ }
2890 ++}
2891 ++
2892 + static u64 goya_mmu_regs[GOYA_MMU_REGS_NUM] = {
2893 + mmDMA_QM_0_GLBL_NON_SECURE_PROPS,
2894 + mmDMA_QM_1_GLBL_NON_SECURE_PROPS,
2895 +@@ -3381,6 +3400,12 @@ static int goya_validate_cb(struct hl_device *hdev,
2896 + PACKET_HEADER_PACKET_ID_MASK) >>
2897 + PACKET_HEADER_PACKET_ID_SHIFT);
2898 +
2899 ++ if (!validate_packet_id(pkt_id)) {
2900 ++ dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id);
2901 ++ rc = -EINVAL;
2902 ++ break;
2903 ++ }
2904 ++
2905 + pkt_size = goya_packet_sizes[pkt_id];
2906 + cb_parsed_length += pkt_size;
2907 + if (cb_parsed_length > parser->user_cb_size) {
2908 +@@ -3616,6 +3641,12 @@ static int goya_patch_cb(struct hl_device *hdev,
2909 + PACKET_HEADER_PACKET_ID_MASK) >>
2910 + PACKET_HEADER_PACKET_ID_SHIFT);
2911 +
2912 ++ if (!validate_packet_id(pkt_id)) {
2913 ++ dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id);
2914 ++ rc = -EINVAL;
2915 ++ break;
2916 ++ }
2917 ++
2918 + pkt_size = goya_packet_sizes[pkt_id];
2919 + cb_parsed_length += pkt_size;
2920 + if (cb_parsed_length > parser->user_cb_size) {
2921 +diff --git a/drivers/misc/habanalabs/goya/goya_coresight.c b/drivers/misc/habanalabs/goya/goya_coresight.c
2922 +index 1258724ea5106..c23a9fcb74b57 100644
2923 +--- a/drivers/misc/habanalabs/goya/goya_coresight.c
2924 ++++ b/drivers/misc/habanalabs/goya/goya_coresight.c
2925 +@@ -358,11 +358,17 @@ static int goya_config_etf(struct hl_device *hdev,
2926 + }
2927 +
2928 + static int goya_etr_validate_address(struct hl_device *hdev, u64 addr,
2929 +- u32 size)
2930 ++ u64 size)
2931 + {
2932 + struct asic_fixed_properties *prop = &hdev->asic_prop;
2933 + u64 range_start, range_end;
2934 +
2935 ++ if (addr > (addr + size)) {
2936 ++ dev_err(hdev->dev,
2937 ++ "ETR buffer size %llu overflow\n", size);
2938 ++ return false;
2939 ++ }
2940 ++
2941 + if (hdev->mmu_enable) {
2942 + range_start = prop->dmmu.start_addr;
2943 + range_end = prop->dmmu.end_addr;
2944 +diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h
2945 +index 194d833526964..1072f300252a4 100644
2946 +--- a/drivers/misc/habanalabs/habanalabs.h
2947 ++++ b/drivers/misc/habanalabs/habanalabs.h
2948 +@@ -1408,6 +1408,8 @@ struct hl_device_idle_busy_ts {
2949 + * details.
2950 + * @in_reset: is device in reset flow.
2951 + * @curr_pll_profile: current PLL profile.
2952 ++ * @card_type: Various ASICs have several card types. This indicates the card
2953 ++ * type of the current device.
2954 + * @cs_active_cnt: number of active command submissions on this device (active
2955 + * means already in H/W queues)
2956 + * @major: habanalabs kernel driver major.
2957 +@@ -1503,6 +1505,7 @@ struct hl_device {
2958 + u64 clock_gating_mask;
2959 + atomic_t in_reset;
2960 + enum hl_pll_frequency curr_pll_profile;
2961 ++ enum armcp_card_types card_type;
2962 + int cs_active_cnt;
2963 + u32 major;
2964 + u32 high_pll;
2965 +@@ -1587,7 +1590,7 @@ struct hl_ioctl_desc {
2966 + *
2967 + * Return: true if the area is inside the valid range, false otherwise.
2968 + */
2969 +-static inline bool hl_mem_area_inside_range(u64 address, u32 size,
2970 ++static inline bool hl_mem_area_inside_range(u64 address, u64 size,
2971 + u64 range_start_address, u64 range_end_address)
2972 + {
2973 + u64 end_address = address + size;
2974 +@@ -1792,7 +1795,7 @@ int hl_get_pwm_info(struct hl_device *hdev,
2975 + void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
2976 + long value);
2977 + u64 hl_get_max_power(struct hl_device *hdev);
2978 +-void hl_set_max_power(struct hl_device *hdev, u64 value);
2979 ++void hl_set_max_power(struct hl_device *hdev);
2980 + int hl_set_voltage(struct hl_device *hdev,
2981 + int sensor_index, u32 attr, long value);
2982 + int hl_set_current(struct hl_device *hdev,
2983 +diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c
2984 +index 47da84a177197..e30b1b1877efa 100644
2985 +--- a/drivers/misc/habanalabs/memory.c
2986 ++++ b/drivers/misc/habanalabs/memory.c
2987 +@@ -66,6 +66,11 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
2988 + num_pgs = (args->alloc.mem_size + (page_size - 1)) >> page_shift;
2989 + total_size = num_pgs << page_shift;
2990 +
2991 ++ if (!total_size) {
2992 ++ dev_err(hdev->dev, "Cannot allocate 0 bytes\n");
2993 ++ return -EINVAL;
2994 ++ }
2995 ++
2996 + contiguous = args->flags & HL_MEM_CONTIGUOUS;
2997 +
2998 + if (contiguous) {
2999 +@@ -93,7 +98,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
3000 + phys_pg_pack->contiguous = contiguous;
3001 +
3002 + phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL);
3003 +- if (!phys_pg_pack->pages) {
3004 ++ if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) {
3005 + rc = -ENOMEM;
3006 + goto pages_arr_err;
3007 + }
3008 +@@ -683,7 +688,7 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
3009 +
3010 + phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64),
3011 + GFP_KERNEL);
3012 +- if (!phys_pg_pack->pages) {
3013 ++ if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) {
3014 + rc = -ENOMEM;
3015 + goto page_pack_arr_mem_err;
3016 + }
3017 +diff --git a/drivers/misc/habanalabs/mmu.c b/drivers/misc/habanalabs/mmu.c
3018 +index a290d6b49d788..eb582bd4937ba 100644
3019 +--- a/drivers/misc/habanalabs/mmu.c
3020 ++++ b/drivers/misc/habanalabs/mmu.c
3021 +@@ -450,7 +450,7 @@ int hl_mmu_init(struct hl_device *hdev)
3022 + hdev->mmu_shadow_hop0 = kvmalloc_array(prop->max_asid,
3023 + prop->mmu_hop_table_size,
3024 + GFP_KERNEL | __GFP_ZERO);
3025 +- if (!hdev->mmu_shadow_hop0) {
3026 ++ if (ZERO_OR_NULL_PTR(hdev->mmu_shadow_hop0)) {
3027 + rc = -ENOMEM;
3028 + goto err_pool_add;
3029 + }
3030 +diff --git a/drivers/misc/habanalabs/pci.c b/drivers/misc/habanalabs/pci.c
3031 +index 9f634ef6f5b37..77022c0b42027 100644
3032 +--- a/drivers/misc/habanalabs/pci.c
3033 ++++ b/drivers/misc/habanalabs/pci.c
3034 +@@ -378,15 +378,17 @@ int hl_pci_init(struct hl_device *hdev)
3035 + rc = hdev->asic_funcs->init_iatu(hdev);
3036 + if (rc) {
3037 + dev_err(hdev->dev, "Failed to initialize iATU\n");
3038 +- goto disable_device;
3039 ++ goto unmap_pci_bars;
3040 + }
3041 +
3042 + rc = hl_pci_set_dma_mask(hdev);
3043 + if (rc)
3044 +- goto disable_device;
3045 ++ goto unmap_pci_bars;
3046 +
3047 + return 0;
3048 +
3049 ++unmap_pci_bars:
3050 ++ hl_pci_bars_unmap(hdev);
3051 + disable_device:
3052 + pci_clear_master(pdev);
3053 + pci_disable_device(pdev);
3054 +diff --git a/drivers/misc/habanalabs/sysfs.c b/drivers/misc/habanalabs/sysfs.c
3055 +index 70b6b1863c2ef..87dadb53ac59d 100644
3056 +--- a/drivers/misc/habanalabs/sysfs.c
3057 ++++ b/drivers/misc/habanalabs/sysfs.c
3058 +@@ -81,7 +81,7 @@ u64 hl_get_max_power(struct hl_device *hdev)
3059 + return result;
3060 + }
3061 +
3062 +-void hl_set_max_power(struct hl_device *hdev, u64 value)
3063 ++void hl_set_max_power(struct hl_device *hdev)
3064 + {
3065 + struct armcp_packet pkt;
3066 + int rc;
3067 +@@ -90,7 +90,7 @@ void hl_set_max_power(struct hl_device *hdev, u64 value)
3068 +
3069 + pkt.ctl = cpu_to_le32(ARMCP_PACKET_MAX_POWER_SET <<
3070 + ARMCP_PKT_CTL_OPCODE_SHIFT);
3071 +- pkt.value = cpu_to_le64(value);
3072 ++ pkt.value = cpu_to_le64(hdev->max_power);
3073 +
3074 + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
3075 + 0, NULL);
3076 +@@ -316,7 +316,7 @@ static ssize_t max_power_store(struct device *dev,
3077 + }
3078 +
3079 + hdev->max_power = value;
3080 +- hl_set_max_power(hdev, value);
3081 ++ hl_set_max_power(hdev);
3082 +
3083 + out:
3084 + return count;
3085 +@@ -419,6 +419,7 @@ int hl_sysfs_init(struct hl_device *hdev)
3086 + hdev->pm_mng_profile = PM_AUTO;
3087 + else
3088 + hdev->pm_mng_profile = PM_MANUAL;
3089 ++
3090 + hdev->max_power = hdev->asic_prop.max_power_default;
3091 +
3092 + hdev->asic_funcs->add_device_attr(hdev, &hl_dev_clks_attr_group);
3093 +diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
3094 +index 39e7fc54c438f..0319eac3a05d7 100644
3095 +--- a/drivers/mmc/host/mtk-sd.c
3096 ++++ b/drivers/mmc/host/mtk-sd.c
3097 +@@ -22,6 +22,7 @@
3098 + #include <linux/slab.h>
3099 + #include <linux/spinlock.h>
3100 + #include <linux/interrupt.h>
3101 ++#include <linux/reset.h>
3102 +
3103 + #include <linux/mmc/card.h>
3104 + #include <linux/mmc/core.h>
3105 +@@ -414,6 +415,7 @@ struct msdc_host {
3106 + struct pinctrl_state *pins_uhs;
3107 + struct delayed_work req_timeout;
3108 + int irq; /* host interrupt */
3109 ++ struct reset_control *reset;
3110 +
3111 + struct clk *src_clk; /* msdc source clock */
3112 + struct clk *h_clk; /* msdc h_clk */
3113 +@@ -1516,6 +1518,12 @@ static void msdc_init_hw(struct msdc_host *host)
3114 + u32 val;
3115 + u32 tune_reg = host->dev_comp->pad_tune_reg;
3116 +
3117 ++ if (host->reset) {
3118 ++ reset_control_assert(host->reset);
3119 ++ usleep_range(10, 50);
3120 ++ reset_control_deassert(host->reset);
3121 ++ }
3122 ++
3123 + /* Configure to MMC/SD mode, clock free running */
3124 + sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_MODE | MSDC_CFG_CKPDN);
3125 +
3126 +@@ -2273,6 +2281,11 @@ static int msdc_drv_probe(struct platform_device *pdev)
3127 + if (IS_ERR(host->src_clk_cg))
3128 + host->src_clk_cg = NULL;
3129 +
3130 ++ host->reset = devm_reset_control_get_optional_exclusive(&pdev->dev,
3131 ++ "hrst");
3132 ++ if (IS_ERR(host->reset))
3133 ++ return PTR_ERR(host->reset);
3134 ++
3135 + host->irq = platform_get_irq(pdev, 0);
3136 + if (host->irq < 0) {
3137 + ret = -EINVAL;
3138 +diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
3139 +index d8b76cb8698aa..2d9f79b50a7fa 100644
3140 +--- a/drivers/mmc/host/sdhci-acpi.c
3141 ++++ b/drivers/mmc/host/sdhci-acpi.c
3142 +@@ -535,6 +535,11 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_qcom_sd = {
3143 + .caps = MMC_CAP_NONREMOVABLE,
3144 + };
3145 +
3146 ++struct amd_sdhci_host {
3147 ++ bool tuned_clock;
3148 ++ bool dll_enabled;
3149 ++};
3150 ++
3151 + /* AMD sdhci reset dll register. */
3152 + #define SDHCI_AMD_RESET_DLL_REGISTER 0x908
3153 +
3154 +@@ -554,26 +559,66 @@ static void sdhci_acpi_amd_hs400_dll(struct sdhci_host *host)
3155 + }
3156 +
3157 + /*
3158 +- * For AMD Platform it is required to disable the tuning
3159 +- * bit first controller to bring to HS Mode from HS200
3160 +- * mode, later enable to tune to HS400 mode.
3161 ++ * The initialization sequence for HS400 is:
3162 ++ * HS->HS200->Perform Tuning->HS->HS400
3163 ++ *
3164 ++ * The re-tuning sequence is:
3165 ++ * HS400->DDR52->HS->HS200->Perform Tuning->HS->HS400
3166 ++ *
3167 ++ * The AMD eMMC Controller can only use the tuned clock while in HS200 and HS400
3168 ++ * mode. If we switch to a different mode, we need to disable the tuned clock.
3169 ++ * If we have previously performed tuning and switch back to HS200 or
3170 ++ * HS400, we can re-enable the tuned clock.
3171 ++ *
3172 + */
3173 + static void amd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
3174 + {
3175 + struct sdhci_host *host = mmc_priv(mmc);
3176 ++ struct sdhci_acpi_host *acpi_host = sdhci_priv(host);
3177 ++ struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host);
3178 + unsigned int old_timing = host->timing;
3179 ++ u16 val;
3180 +
3181 + sdhci_set_ios(mmc, ios);
3182 +- if (old_timing == MMC_TIMING_MMC_HS200 &&
3183 +- ios->timing == MMC_TIMING_MMC_HS)
3184 +- sdhci_writew(host, 0x9, SDHCI_HOST_CONTROL2);
3185 +- if (old_timing != MMC_TIMING_MMC_HS400 &&
3186 +- ios->timing == MMC_TIMING_MMC_HS400) {
3187 +- sdhci_writew(host, 0x80, SDHCI_HOST_CONTROL2);
3188 +- sdhci_acpi_amd_hs400_dll(host);
3189 ++
3190 ++ if (old_timing != host->timing && amd_host->tuned_clock) {
3191 ++ if (host->timing == MMC_TIMING_MMC_HS400 ||
3192 ++ host->timing == MMC_TIMING_MMC_HS200) {
3193 ++ val = sdhci_readw(host, SDHCI_HOST_CONTROL2);
3194 ++ val |= SDHCI_CTRL_TUNED_CLK;
3195 ++ sdhci_writew(host, val, SDHCI_HOST_CONTROL2);
3196 ++ } else {
3197 ++ val = sdhci_readw(host, SDHCI_HOST_CONTROL2);
3198 ++ val &= ~SDHCI_CTRL_TUNED_CLK;
3199 ++ sdhci_writew(host, val, SDHCI_HOST_CONTROL2);
3200 ++ }
3201 ++
3202 ++ /* DLL is only required for HS400 */
3203 ++ if (host->timing == MMC_TIMING_MMC_HS400 &&
3204 ++ !amd_host->dll_enabled) {
3205 ++ sdhci_acpi_amd_hs400_dll(host);
3206 ++ amd_host->dll_enabled = true;
3207 ++ }
3208 + }
3209 + }
3210 +
3211 ++static int amd_sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
3212 ++{
3213 ++ int err;
3214 ++ struct sdhci_host *host = mmc_priv(mmc);
3215 ++ struct sdhci_acpi_host *acpi_host = sdhci_priv(host);
3216 ++ struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host);
3217 ++
3218 ++ amd_host->tuned_clock = false;
3219 ++
3220 ++ err = sdhci_execute_tuning(mmc, opcode);
3221 ++
3222 ++ if (!err && !host->tuning_err)
3223 ++ amd_host->tuned_clock = true;
3224 ++
3225 ++ return err;
3226 ++}
3227 ++
3228 + static const struct sdhci_ops sdhci_acpi_ops_amd = {
3229 + .set_clock = sdhci_set_clock,
3230 + .set_bus_width = sdhci_set_bus_width,
3231 +@@ -601,6 +646,7 @@ static int sdhci_acpi_emmc_amd_probe_slot(struct platform_device *pdev,
3232 +
3233 + host->mmc_host_ops.select_drive_strength = amd_select_drive_strength;
3234 + host->mmc_host_ops.set_ios = amd_set_ios;
3235 ++ host->mmc_host_ops.execute_tuning = amd_sdhci_execute_tuning;
3236 + return 0;
3237 + }
3238 +
3239 +@@ -612,6 +658,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_amd_emmc = {
3240 + SDHCI_QUIRK_32BIT_ADMA_SIZE,
3241 + .quirks2 = SDHCI_QUIRK2_BROKEN_64_BIT_DMA,
3242 + .probe_slot = sdhci_acpi_emmc_amd_probe_slot,
3243 ++ .priv_size = sizeof(struct amd_sdhci_host),
3244 + };
3245 +
3246 + struct sdhci_acpi_uid_slot {
3247 +diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
3248 +index bb6802448b2f4..af413805bbf1a 100644
3249 +--- a/drivers/mmc/host/sdhci-pci-core.c
3250 ++++ b/drivers/mmc/host/sdhci-pci-core.c
3251 +@@ -232,6 +232,14 @@ static void sdhci_pci_dumpregs(struct mmc_host *mmc)
3252 + sdhci_dumpregs(mmc_priv(mmc));
3253 + }
3254 +
3255 ++static void sdhci_cqhci_reset(struct sdhci_host *host, u8 mask)
3256 ++{
3257 ++ if ((host->mmc->caps2 & MMC_CAP2_CQE) && (mask & SDHCI_RESET_ALL) &&
3258 ++ host->mmc->cqe_private)
3259 ++ cqhci_deactivate(host->mmc);
3260 ++ sdhci_reset(host, mask);
3261 ++}
3262 ++
3263 + /*****************************************************************************\
3264 + * *
3265 + * Hardware specific quirk handling *
3266 +@@ -718,7 +726,7 @@ static const struct sdhci_ops sdhci_intel_glk_ops = {
3267 + .set_power = sdhci_intel_set_power,
3268 + .enable_dma = sdhci_pci_enable_dma,
3269 + .set_bus_width = sdhci_set_bus_width,
3270 +- .reset = sdhci_reset,
3271 ++ .reset = sdhci_cqhci_reset,
3272 + .set_uhs_signaling = sdhci_set_uhs_signaling,
3273 + .hw_reset = sdhci_pci_hw_reset,
3274 + .irq = sdhci_cqhci_irq,
3275 +diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
3276 +index db1a8d1c96b36..0919ff11d8173 100644
3277 +--- a/drivers/mmc/host/sdhci-tegra.c
3278 ++++ b/drivers/mmc/host/sdhci-tegra.c
3279 +@@ -101,6 +101,12 @@
3280 + #define NVQUIRK_DIS_CARD_CLK_CONFIG_TAP BIT(8)
3281 + #define NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING BIT(9)
3282 +
3283 ++/*
3284 ++ * NVQUIRK_HAS_TMCLK is for SoC's having separate timeout clock for Tegra
3285 ++ * SDMMC hardware data timeout.
3286 ++ */
3287 ++#define NVQUIRK_HAS_TMCLK BIT(10)
3288 ++
3289 + /* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */
3290 + #define SDHCI_TEGRA_CQE_BASE_ADDR 0xF000
3291 +
3292 +@@ -131,6 +137,7 @@ struct sdhci_tegra_autocal_offsets {
3293 + struct sdhci_tegra {
3294 + const struct sdhci_tegra_soc_data *soc_data;
3295 + struct gpio_desc *power_gpio;
3296 ++ struct clk *tmclk;
3297 + bool ddr_signaling;
3298 + bool pad_calib_required;
3299 + bool pad_control_available;
3300 +@@ -1424,7 +1431,8 @@ static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
3301 + NVQUIRK_HAS_PADCALIB |
3302 + NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
3303 + NVQUIRK_ENABLE_SDR50 |
3304 +- NVQUIRK_ENABLE_SDR104,
3305 ++ NVQUIRK_ENABLE_SDR104 |
3306 ++ NVQUIRK_HAS_TMCLK,
3307 + .min_tap_delay = 106,
3308 + .max_tap_delay = 185,
3309 + };
3310 +@@ -1462,6 +1470,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra186 = {
3311 + NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
3312 + NVQUIRK_ENABLE_SDR50 |
3313 + NVQUIRK_ENABLE_SDR104 |
3314 ++ NVQUIRK_HAS_TMCLK |
3315 + NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING,
3316 + .min_tap_delay = 84,
3317 + .max_tap_delay = 136,
3318 +@@ -1474,7 +1483,8 @@ static const struct sdhci_tegra_soc_data soc_data_tegra194 = {
3319 + NVQUIRK_HAS_PADCALIB |
3320 + NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
3321 + NVQUIRK_ENABLE_SDR50 |
3322 +- NVQUIRK_ENABLE_SDR104,
3323 ++ NVQUIRK_ENABLE_SDR104 |
3324 ++ NVQUIRK_HAS_TMCLK,
3325 + .min_tap_delay = 96,
3326 + .max_tap_delay = 139,
3327 + };
3328 +@@ -1602,6 +1612,43 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
3329 + goto err_power_req;
3330 + }
3331 +
3332 ++ /*
3333 ++ * Tegra210 has a separate SDMMC_LEGACY_TM clock used for host
3334 ++ * timeout clock and SW can choose TMCLK or SDCLK for hardware
3335 ++ * data timeout through the bit USE_TMCLK_FOR_DATA_TIMEOUT of
3336 ++ * the register SDHCI_TEGRA_VENDOR_SYS_SW_CTRL.
3337 ++ *
3338 ++ * USE_TMCLK_FOR_DATA_TIMEOUT bit default is set to 1 and SDMMC uses
3339 ++ * 12Mhz TMCLK which is advertised in host capability register.
3340 ++ * With TMCLK of 12Mhz provides maximum data timeout period that can
3341 ++ * be achieved is 11s better than using SDCLK for data timeout.
3342 ++ *
3343 ++ * So, TMCLK is set to 12Mhz and kept enabled all the time on SoC's
3344 ++ * supporting separate TMCLK.
3345 ++ */
3346 ++
3347 ++ if (soc_data->nvquirks & NVQUIRK_HAS_TMCLK) {
3348 ++ clk = devm_clk_get(&pdev->dev, "tmclk");
3349 ++ if (IS_ERR(clk)) {
3350 ++ rc = PTR_ERR(clk);
3351 ++ if (rc == -EPROBE_DEFER)
3352 ++ goto err_power_req;
3353 ++
3354 ++ dev_warn(&pdev->dev, "failed to get tmclk: %d\n", rc);
3355 ++ clk = NULL;
3356 ++ }
3357 ++
3358 ++ clk_set_rate(clk, 12000000);
3359 ++ rc = clk_prepare_enable(clk);
3360 ++ if (rc) {
3361 ++ dev_err(&pdev->dev,
3362 ++ "failed to enable tmclk: %d\n", rc);
3363 ++ goto err_power_req;
3364 ++ }
3365 ++
3366 ++ tegra_host->tmclk = clk;
3367 ++ }
3368 ++
3369 + clk = devm_clk_get(mmc_dev(host->mmc), NULL);
3370 + if (IS_ERR(clk)) {
3371 + rc = PTR_ERR(clk);
3372 +@@ -1645,6 +1692,7 @@ err_add_host:
3373 + err_rst_get:
3374 + clk_disable_unprepare(pltfm_host->clk);
3375 + err_clk_get:
3376 ++ clk_disable_unprepare(tegra_host->tmclk);
3377 + err_power_req:
3378 + err_parse_dt:
3379 + sdhci_pltfm_free(pdev);
3380 +@@ -1662,6 +1710,7 @@ static int sdhci_tegra_remove(struct platform_device *pdev)
3381 + reset_control_assert(tegra_host->rst);
3382 + usleep_range(2000, 4000);
3383 + clk_disable_unprepare(pltfm_host->clk);
3384 ++ clk_disable_unprepare(tegra_host->tmclk);
3385 +
3386 + sdhci_pltfm_free(pdev);
3387 +
3388 +diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
3389 +index 8dcb8a49ab67f..238417db26f9b 100644
3390 +--- a/drivers/net/dsa/mt7530.c
3391 ++++ b/drivers/net/dsa/mt7530.c
3392 +@@ -1501,7 +1501,7 @@ unsupported:
3393 + phylink_set(mask, 100baseT_Full);
3394 +
3395 + if (state->interface != PHY_INTERFACE_MODE_MII) {
3396 +- phylink_set(mask, 1000baseT_Half);
3397 ++ /* This switch only supports 1G full-duplex. */
3398 + phylink_set(mask, 1000baseT_Full);
3399 + if (port == 5)
3400 + phylink_set(mask, 1000baseX_Full);
3401 +diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c
3402 +index 0187dbf3b87df..54cdafdd067db 100644
3403 +--- a/drivers/net/ethernet/arc/emac_mdio.c
3404 ++++ b/drivers/net/ethernet/arc/emac_mdio.c
3405 +@@ -153,6 +153,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
3406 + if (IS_ERR(data->reset_gpio)) {
3407 + error = PTR_ERR(data->reset_gpio);
3408 + dev_err(priv->dev, "Failed to request gpio: %d\n", error);
3409 ++ mdiobus_free(bus);
3410 + return error;
3411 + }
3412 +
3413 +diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
3414 +index b25356e21a1ea..e6ccc2122573d 100644
3415 +--- a/drivers/net/ethernet/broadcom/bcmsysport.c
3416 ++++ b/drivers/net/ethernet/broadcom/bcmsysport.c
3417 +@@ -2462,8 +2462,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
3418 + priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
3419 + sizeof(struct bcm_sysport_tx_ring),
3420 + GFP_KERNEL);
3421 +- if (!priv->tx_rings)
3422 +- return -ENOMEM;
3423 ++ if (!priv->tx_rings) {
3424 ++ ret = -ENOMEM;
3425 ++ goto err_free_netdev;
3426 ++ }
3427 +
3428 + priv->is_lite = params->is_lite;
3429 + priv->num_rx_desc_words = params->num_rx_desc_words;
3430 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
3431 +index 7463a1847cebd..cd5c7a1412c6d 100644
3432 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
3433 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
3434 +@@ -1141,6 +1141,9 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
3435 +
3436 + static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
3437 + {
3438 ++ if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
3439 ++ return;
3440 ++
3441 + if (BNXT_PF(bp))
3442 + queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
3443 + else
3444 +@@ -1157,10 +1160,12 @@ static void bnxt_queue_sp_work(struct bnxt *bp)
3445 +
3446 + static void bnxt_cancel_sp_work(struct bnxt *bp)
3447 + {
3448 +- if (BNXT_PF(bp))
3449 ++ if (BNXT_PF(bp)) {
3450 + flush_workqueue(bnxt_pf_wq);
3451 +- else
3452 ++ } else {
3453 + cancel_work_sync(&bp->sp_task);
3454 ++ cancel_delayed_work_sync(&bp->fw_reset_task);
3455 ++ }
3456 + }
3457 +
3458 + static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3459 +@@ -8987,16 +8992,19 @@ static ssize_t bnxt_show_temp(struct device *dev,
3460 + struct hwrm_temp_monitor_query_input req = {0};
3461 + struct hwrm_temp_monitor_query_output *resp;
3462 + struct bnxt *bp = dev_get_drvdata(dev);
3463 +- u32 temp = 0;
3464 ++ u32 len = 0;
3465 +
3466 + resp = bp->hwrm_cmd_resp_addr;
3467 + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
3468 + mutex_lock(&bp->hwrm_cmd_lock);
3469 +- if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
3470 +- temp = resp->temp * 1000; /* display millidegree */
3471 ++ if (!_hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
3472 ++ len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
3473 + mutex_unlock(&bp->hwrm_cmd_lock);
3474 +
3475 +- return sprintf(buf, "%u\n", temp);
3476 ++ if (len)
3477 ++ return len;
3478 ++
3479 ++ return sprintf(buf, "unknown\n");
3480 + }
3481 + static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
3482 +
3483 +@@ -9178,15 +9186,15 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
3484 + }
3485 + }
3486 +
3487 +- bnxt_enable_napi(bp);
3488 +- bnxt_debug_dev_init(bp);
3489 +-
3490 + rc = bnxt_init_nic(bp, irq_re_init);
3491 + if (rc) {
3492 + netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
3493 +- goto open_err;
3494 ++ goto open_err_irq;
3495 + }
3496 +
3497 ++ bnxt_enable_napi(bp);
3498 ++ bnxt_debug_dev_init(bp);
3499 ++
3500 + if (link_re_init) {
3501 + mutex_lock(&bp->link_lock);
3502 + rc = bnxt_update_phy_setting(bp);
3503 +@@ -9217,10 +9225,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
3504 + bnxt_vf_reps_open(bp);
3505 + return 0;
3506 +
3507 +-open_err:
3508 +- bnxt_debug_dev_exit(bp);
3509 +- bnxt_disable_napi(bp);
3510 +-
3511 + open_err_irq:
3512 + bnxt_del_napi(bp);
3513 +
3514 +@@ -11501,6 +11505,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
3515 + unregister_netdev(dev);
3516 + bnxt_dl_unregister(bp);
3517 + bnxt_shutdown_tc(bp);
3518 ++ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
3519 + bnxt_cancel_sp_work(bp);
3520 + bp->sp_event = 0;
3521 +
3522 +@@ -12065,6 +12070,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3523 + (long)pci_resource_start(pdev, 0), dev->dev_addr);
3524 + pcie_print_link_status(pdev);
3525 +
3526 ++ pci_save_state(pdev);
3527 + return 0;
3528 +
3529 + init_err_cleanup:
3530 +@@ -12260,6 +12266,8 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
3531 + "Cannot re-enable PCI device after reset.\n");
3532 + } else {
3533 + pci_set_master(pdev);
3534 ++ pci_restore_state(pdev);
3535 ++ pci_save_state(pdev);
3536 +
3537 + err = bnxt_hwrm_func_reset(bp);
3538 + if (!err) {
3539 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
3540 +index b4aa56dc4f9fb..bc2c76fa54cad 100644
3541 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
3542 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
3543 +@@ -494,20 +494,13 @@ static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp)
3544 + static int bnxt_get_num_ring_stats(struct bnxt *bp)
3545 + {
3546 + int rx, tx, cmn;
3547 +- bool sh = false;
3548 +-
3549 +- if (bp->flags & BNXT_FLAG_SHARED_RINGS)
3550 +- sh = true;
3551 +
3552 + rx = NUM_RING_RX_HW_STATS + NUM_RING_RX_SW_STATS +
3553 + bnxt_get_num_tpa_ring_stats(bp);
3554 + tx = NUM_RING_TX_HW_STATS;
3555 + cmn = NUM_RING_CMN_SW_STATS;
3556 +- if (sh)
3557 +- return (rx + tx + cmn) * bp->cp_nr_rings;
3558 +- else
3559 +- return rx * bp->rx_nr_rings + tx * bp->tx_nr_rings +
3560 +- cmn * bp->cp_nr_rings;
3561 ++ return rx * bp->rx_nr_rings + tx * bp->tx_nr_rings +
3562 ++ cmn * bp->cp_nr_rings;
3563 + }
3564 +
3565 + static int bnxt_get_num_stats(struct bnxt *bp)
3566 +@@ -847,7 +840,7 @@ static void bnxt_get_channels(struct net_device *dev,
3567 + int max_tx_sch_inputs;
3568 +
3569 + /* Get the most up-to-date max_tx_sch_inputs. */
3570 +- if (BNXT_NEW_RM(bp))
3571 ++ if (netif_running(dev) && BNXT_NEW_RM(bp))
3572 + bnxt_hwrm_func_resc_qcaps(bp, false);
3573 + max_tx_sch_inputs = hw_resc->max_tx_sch_inputs;
3574 +
3575 +@@ -2270,6 +2263,9 @@ static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
3576 + if (rc != 0)
3577 + return rc;
3578 +
3579 ++ if (!dir_entries || !entry_length)
3580 ++ return -EIO;
3581 ++
3582 + /* Insert 2 bytes of directory info (count and size of entries) */
3583 + if (len < 2)
3584 + return -EINVAL;
3585 +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
3586 +index e471b14fc6e98..f0074c873da3b 100644
3587 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
3588 ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
3589 +@@ -1364,7 +1364,7 @@ static int bcmgenet_validate_flow(struct net_device *dev,
3590 + case ETHER_FLOW:
3591 + eth_mask = &cmd->fs.m_u.ether_spec;
3592 + /* don't allow mask which isn't valid */
3593 +- if (VALIDATE_MASK(eth_mask->h_source) ||
3594 ++ if (VALIDATE_MASK(eth_mask->h_dest) ||
3595 + VALIDATE_MASK(eth_mask->h_source) ||
3596 + VALIDATE_MASK(eth_mask->h_proto)) {
3597 + netdev_err(dev, "rxnfc: Unsupported mask\n");
3598 +diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
3599 +index ebff1fc0d8cef..4515804d1ce4c 100644
3600 +--- a/drivers/net/ethernet/broadcom/tg3.c
3601 ++++ b/drivers/net/ethernet/broadcom/tg3.c
3602 +@@ -7221,8 +7221,8 @@ static inline void tg3_reset_task_schedule(struct tg3 *tp)
3603 +
3604 + static inline void tg3_reset_task_cancel(struct tg3 *tp)
3605 + {
3606 +- cancel_work_sync(&tp->reset_task);
3607 +- tg3_flag_clear(tp, RESET_TASK_PENDING);
3608 ++ if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
3609 ++ cancel_work_sync(&tp->reset_task);
3610 + tg3_flag_clear(tp, TX_RECOVERY_PENDING);
3611 + }
3612 +
3613 +@@ -11209,18 +11209,27 @@ static void tg3_reset_task(struct work_struct *work)
3614 +
3615 + tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3616 + err = tg3_init_hw(tp, true);
3617 +- if (err)
3618 ++ if (err) {
3619 ++ tg3_full_unlock(tp);
3620 ++ tp->irq_sync = 0;
3621 ++ tg3_napi_enable(tp);
3622 ++ /* Clear this flag so that tg3_reset_task_cancel() will not
3623 ++ * call cancel_work_sync() and wait forever.
3624 ++ */
3625 ++ tg3_flag_clear(tp, RESET_TASK_PENDING);
3626 ++ dev_close(tp->dev);
3627 + goto out;
3628 ++ }
3629 +
3630 + tg3_netif_start(tp);
3631 +
3632 +-out:
3633 + tg3_full_unlock(tp);
3634 +
3635 + if (!err)
3636 + tg3_phy_start(tp);
3637 +
3638 + tg3_flag_clear(tp, RESET_TASK_PENDING);
3639 ++out:
3640 + rtnl_unlock();
3641 + }
3642 +
3643 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c
3644 +index 3de8a5e83b6c7..d7fefdbf3e575 100644
3645 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c
3646 ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c
3647 +@@ -62,6 +62,7 @@ static struct thermal_zone_device_ops cxgb4_thermal_ops = {
3648 + int cxgb4_thermal_init(struct adapter *adap)
3649 + {
3650 + struct ch_thermal *ch_thermal = &adap->ch_thermal;
3651 ++ char ch_tz_name[THERMAL_NAME_LENGTH];
3652 + int num_trip = CXGB4_NUM_TRIPS;
3653 + u32 param, val;
3654 + int ret;
3655 +@@ -82,7 +83,8 @@ int cxgb4_thermal_init(struct adapter *adap)
3656 + ch_thermal->trip_type = THERMAL_TRIP_CRITICAL;
3657 + }
3658 +
3659 +- ch_thermal->tzdev = thermal_zone_device_register("cxgb4", num_trip,
3660 ++ snprintf(ch_tz_name, sizeof(ch_tz_name), "cxgb4_%s", adap->name);
3661 ++ ch_thermal->tzdev = thermal_zone_device_register(ch_tz_name, num_trip,
3662 + 0, adap,
3663 + &cxgb4_thermal_ops,
3664 + NULL, 0, 0);
3665 +@@ -97,7 +99,9 @@ int cxgb4_thermal_init(struct adapter *adap)
3666 +
3667 + int cxgb4_thermal_remove(struct adapter *adap)
3668 + {
3669 +- if (adap->ch_thermal.tzdev)
3670 ++ if (adap->ch_thermal.tzdev) {
3671 + thermal_zone_device_unregister(adap->ch_thermal.tzdev);
3672 ++ adap->ch_thermal.tzdev = NULL;
3673 ++ }
3674 + return 0;
3675 + }
3676 +diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
3677 +index 62e271aea4a50..ffec0f3dd9578 100644
3678 +--- a/drivers/net/ethernet/cortina/gemini.c
3679 ++++ b/drivers/net/ethernet/cortina/gemini.c
3680 +@@ -2446,8 +2446,8 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
3681 + port->reset = devm_reset_control_get_exclusive(dev, NULL);
3682 + if (IS_ERR(port->reset)) {
3683 + dev_err(dev, "no reset\n");
3684 +- clk_disable_unprepare(port->pclk);
3685 +- return PTR_ERR(port->reset);
3686 ++ ret = PTR_ERR(port->reset);
3687 ++ goto unprepare;
3688 + }
3689 + reset_control_reset(port->reset);
3690 + usleep_range(100, 500);
3691 +@@ -2502,25 +2502,25 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
3692 + IRQF_SHARED,
3693 + port_names[port->id],
3694 + port);
3695 +- if (ret) {
3696 +- clk_disable_unprepare(port->pclk);
3697 +- return ret;
3698 +- }
3699 ++ if (ret)
3700 ++ goto unprepare;
3701 +
3702 + ret = register_netdev(netdev);
3703 +- if (!ret) {
3704 ++ if (ret)
3705 ++ goto unprepare;
3706 ++
3707 ++ netdev_info(netdev,
3708 ++ "irq %d, DMA @ 0x%pap, GMAC @ 0x%pap\n",
3709 ++ port->irq, &dmares->start,
3710 ++ &gmacres->start);
3711 ++ ret = gmac_setup_phy(netdev);
3712 ++ if (ret)
3713 + netdev_info(netdev,
3714 +- "irq %d, DMA @ 0x%pap, GMAC @ 0x%pap\n",
3715 +- port->irq, &dmares->start,
3716 +- &gmacres->start);
3717 +- ret = gmac_setup_phy(netdev);
3718 +- if (ret)
3719 +- netdev_info(netdev,
3720 +- "PHY init failed, deferring to ifup time\n");
3721 +- return 0;
3722 +- }
3723 ++ "PHY init failed, deferring to ifup time\n");
3724 ++ return 0;
3725 +
3726 +- port->netdev = NULL;
3727 ++unprepare:
3728 ++ clk_disable_unprepare(port->pclk);
3729 + return ret;
3730 + }
3731 +
3732 +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
3733 +index 23f278e46975b..22522f8a52999 100644
3734 +--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
3735 ++++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
3736 +@@ -2282,8 +2282,10 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
3737 + priv->enet_ver = AE_VERSION_1;
3738 + else if (acpi_dev_found(hns_enet_acpi_match[1].id))
3739 + priv->enet_ver = AE_VERSION_2;
3740 +- else
3741 +- return -ENXIO;
3742 ++ else {
3743 ++ ret = -ENXIO;
3744 ++ goto out_read_prop_fail;
3745 ++ }
3746 +
3747 + /* try to find port-idx-in-ae first */
3748 + ret = acpi_node_get_property_reference(dev->fwnode,
3749 +@@ -2299,7 +2301,8 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
3750 + priv->fwnode = args.fwnode;
3751 + } else {
3752 + dev_err(dev, "cannot read cfg data from OF or acpi\n");
3753 +- return -ENXIO;
3754 ++ ret = -ENXIO;
3755 ++ goto out_read_prop_fail;
3756 + }
3757 +
3758 + ret = device_property_read_u32(dev, "port-idx-in-ae", &port_id);
3759 +diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
3760 +index d2986f1f2db02..d7444782bfdd0 100644
3761 +--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
3762 ++++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
3763 +@@ -114,7 +114,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
3764 + goto err_out;
3765 +
3766 + for (i = 0; i <= buddy->max_order; ++i) {
3767 +- s = BITS_TO_LONGS(1 << (buddy->max_order - i));
3768 ++ s = BITS_TO_LONGS(1UL << (buddy->max_order - i));
3769 + buddy->bits[i] = kvmalloc_array(s, sizeof(long), GFP_KERNEL | __GFP_ZERO);
3770 + if (!buddy->bits[i])
3771 + goto err_out_free;
3772 +diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
3773 +index 2df3deedf9fd8..7248d248f6041 100644
3774 +--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
3775 ++++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
3776 +@@ -61,6 +61,7 @@ struct nfp_tun_active_tuns {
3777 + * @flags: options part of the request
3778 + * @tun_info.ipv6: dest IPv6 address of active route
3779 + * @tun_info.egress_port: port the encapsulated packet egressed
3780 ++ * @tun_info.extra: reserved for future use
3781 + * @tun_info: tunnels that have sent traffic in reported period
3782 + */
3783 + struct nfp_tun_active_tuns_v6 {
3784 +@@ -70,6 +71,7 @@ struct nfp_tun_active_tuns_v6 {
3785 + struct route_ip_info_v6 {
3786 + struct in6_addr ipv6;
3787 + __be32 egress_port;
3788 ++ __be32 extra[2];
3789 + } tun_info[];
3790 + };
3791 +
3792 +diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
3793 +index 99f7aae102ce1..df89d09b253e2 100644
3794 +--- a/drivers/net/ethernet/renesas/ravb_main.c
3795 ++++ b/drivers/net/ethernet/renesas/ravb_main.c
3796 +@@ -1342,6 +1342,51 @@ static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler,
3797 + return error;
3798 + }
3799 +
3800 ++/* MDIO bus init function */
3801 ++static int ravb_mdio_init(struct ravb_private *priv)
3802 ++{
3803 ++ struct platform_device *pdev = priv->pdev;
3804 ++ struct device *dev = &pdev->dev;
3805 ++ int error;
3806 ++
3807 ++ /* Bitbang init */
3808 ++ priv->mdiobb.ops = &bb_ops;
3809 ++
3810 ++ /* MII controller setting */
3811 ++ priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
3812 ++ if (!priv->mii_bus)
3813 ++ return -ENOMEM;
3814 ++
3815 ++ /* Hook up MII support for ethtool */
3816 ++ priv->mii_bus->name = "ravb_mii";
3817 ++ priv->mii_bus->parent = dev;
3818 ++ snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
3819 ++ pdev->name, pdev->id);
3820 ++
3821 ++ /* Register MDIO bus */
3822 ++ error = of_mdiobus_register(priv->mii_bus, dev->of_node);
3823 ++ if (error)
3824 ++ goto out_free_bus;
3825 ++
3826 ++ return 0;
3827 ++
3828 ++out_free_bus:
3829 ++ free_mdio_bitbang(priv->mii_bus);
3830 ++ return error;
3831 ++}
3832 ++
3833 ++/* MDIO bus release function */
3834 ++static int ravb_mdio_release(struct ravb_private *priv)
3835 ++{
3836 ++ /* Unregister mdio bus */
3837 ++ mdiobus_unregister(priv->mii_bus);
3838 ++
3839 ++ /* Free bitbang info */
3840 ++ free_mdio_bitbang(priv->mii_bus);
3841 ++
3842 ++ return 0;
3843 ++}
3844 ++
3845 + /* Network device open function for Ethernet AVB */
3846 + static int ravb_open(struct net_device *ndev)
3847 + {
3848 +@@ -1350,6 +1395,13 @@ static int ravb_open(struct net_device *ndev)
3849 + struct device *dev = &pdev->dev;
3850 + int error;
3851 +
3852 ++ /* MDIO bus init */
3853 ++ error = ravb_mdio_init(priv);
3854 ++ if (error) {
3855 ++ netdev_err(ndev, "failed to initialize MDIO\n");
3856 ++ return error;
3857 ++ }
3858 ++
3859 + napi_enable(&priv->napi[RAVB_BE]);
3860 + napi_enable(&priv->napi[RAVB_NC]);
3861 +
3862 +@@ -1427,6 +1479,7 @@ out_free_irq:
3863 + out_napi_off:
3864 + napi_disable(&priv->napi[RAVB_NC]);
3865 + napi_disable(&priv->napi[RAVB_BE]);
3866 ++ ravb_mdio_release(priv);
3867 + return error;
3868 + }
3869 +
3870 +@@ -1736,6 +1789,8 @@ static int ravb_close(struct net_device *ndev)
3871 + ravb_ring_free(ndev, RAVB_BE);
3872 + ravb_ring_free(ndev, RAVB_NC);
3873 +
3874 ++ ravb_mdio_release(priv);
3875 ++
3876 + return 0;
3877 + }
3878 +
3879 +@@ -1887,51 +1942,6 @@ static const struct net_device_ops ravb_netdev_ops = {
3880 + .ndo_set_features = ravb_set_features,
3881 + };
3882 +
3883 +-/* MDIO bus init function */
3884 +-static int ravb_mdio_init(struct ravb_private *priv)
3885 +-{
3886 +- struct platform_device *pdev = priv->pdev;
3887 +- struct device *dev = &pdev->dev;
3888 +- int error;
3889 +-
3890 +- /* Bitbang init */
3891 +- priv->mdiobb.ops = &bb_ops;
3892 +-
3893 +- /* MII controller setting */
3894 +- priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
3895 +- if (!priv->mii_bus)
3896 +- return -ENOMEM;
3897 +-
3898 +- /* Hook up MII support for ethtool */
3899 +- priv->mii_bus->name = "ravb_mii";
3900 +- priv->mii_bus->parent = dev;
3901 +- snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
3902 +- pdev->name, pdev->id);
3903 +-
3904 +- /* Register MDIO bus */
3905 +- error = of_mdiobus_register(priv->mii_bus, dev->of_node);
3906 +- if (error)
3907 +- goto out_free_bus;
3908 +-
3909 +- return 0;
3910 +-
3911 +-out_free_bus:
3912 +- free_mdio_bitbang(priv->mii_bus);
3913 +- return error;
3914 +-}
3915 +-
3916 +-/* MDIO bus release function */
3917 +-static int ravb_mdio_release(struct ravb_private *priv)
3918 +-{
3919 +- /* Unregister mdio bus */
3920 +- mdiobus_unregister(priv->mii_bus);
3921 +-
3922 +- /* Free bitbang info */
3923 +- free_mdio_bitbang(priv->mii_bus);
3924 +-
3925 +- return 0;
3926 +-}
3927 +-
3928 + static const struct of_device_id ravb_match_table[] = {
3929 + { .compatible = "renesas,etheravb-r8a7790", .data = (void *)RCAR_GEN2 },
3930 + { .compatible = "renesas,etheravb-r8a7794", .data = (void *)RCAR_GEN2 },
3931 +@@ -2174,13 +2184,6 @@ static int ravb_probe(struct platform_device *pdev)
3932 + eth_hw_addr_random(ndev);
3933 + }
3934 +
3935 +- /* MDIO bus init */
3936 +- error = ravb_mdio_init(priv);
3937 +- if (error) {
3938 +- dev_err(&pdev->dev, "failed to initialize MDIO\n");
3939 +- goto out_dma_free;
3940 +- }
3941 +-
3942 + netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64);
3943 + netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64);
3944 +
3945 +@@ -2202,8 +2205,6 @@ static int ravb_probe(struct platform_device *pdev)
3946 + out_napi_del:
3947 + netif_napi_del(&priv->napi[RAVB_NC]);
3948 + netif_napi_del(&priv->napi[RAVB_BE]);
3949 +- ravb_mdio_release(priv);
3950 +-out_dma_free:
3951 + dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
3952 + priv->desc_bat_dma);
3953 +
3954 +@@ -2235,7 +2236,6 @@ static int ravb_remove(struct platform_device *pdev)
3955 + unregister_netdev(ndev);
3956 + netif_napi_del(&priv->napi[RAVB_NC]);
3957 + netif_napi_del(&priv->napi[RAVB_BE]);
3958 +- ravb_mdio_release(priv);
3959 + pm_runtime_disable(&pdev->dev);
3960 + free_netdev(ndev);
3961 + platform_set_drvdata(pdev, NULL);
3962 +diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
3963 +index 88832277edd5a..c7c9980e02604 100644
3964 +--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
3965 ++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
3966 +@@ -172,6 +172,8 @@ void am65_cpsw_nuss_adjust_link(struct net_device *ndev)
3967 + if (phy->speed == 10 && phy_interface_is_rgmii(phy))
3968 + /* Can be used with in band mode only */
3969 + mac_control |= CPSW_SL_CTL_EXT_EN;
3970 ++ if (phy->speed == 100 && phy->interface == PHY_INTERFACE_MODE_RMII)
3971 ++ mac_control |= CPSW_SL_CTL_IFCTL_A;
3972 + if (phy->duplex)
3973 + mac_control |= CPSW_SL_CTL_FULLDUPLEX;
3974 +
3975 +diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
3976 +index 9b17bbbe102fe..4a65edc5a3759 100644
3977 +--- a/drivers/net/ethernet/ti/cpsw.c
3978 ++++ b/drivers/net/ethernet/ti/cpsw.c
3979 +@@ -1116,7 +1116,7 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
3980 + HOST_PORT_NUM, ALE_VLAN, vid);
3981 + ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
3982 + 0, ALE_VLAN, vid);
3983 +- ret |= cpsw_ale_flush_multicast(cpsw->ale, 0, vid);
3984 ++ ret |= cpsw_ale_flush_multicast(cpsw->ale, ALE_PORT_HOST, vid);
3985 + err:
3986 + pm_runtime_put(cpsw->dev);
3987 + return ret;
3988 +diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
3989 +index 1247d35d42ef3..8ed78577cdedf 100644
3990 +--- a/drivers/net/ethernet/ti/cpsw_new.c
3991 ++++ b/drivers/net/ethernet/ti/cpsw_new.c
3992 +@@ -1032,19 +1032,34 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
3993 + return ret;
3994 + }
3995 +
3996 ++ /* reset the return code as pm_runtime_get_sync() can return
3997 ++ * non zero values as well.
3998 ++ */
3999 ++ ret = 0;
4000 + for (i = 0; i < cpsw->data.slaves; i++) {
4001 + if (cpsw->slaves[i].ndev &&
4002 +- vid == cpsw->slaves[i].port_vlan)
4003 ++ vid == cpsw->slaves[i].port_vlan) {
4004 ++ ret = -EINVAL;
4005 + goto err;
4006 ++ }
4007 + }
4008 +
4009 + dev_dbg(priv->dev, "removing vlanid %d from vlan filter\n", vid);
4010 +- cpsw_ale_del_vlan(cpsw->ale, vid, 0);
4011 +- cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
4012 +- HOST_PORT_NUM, ALE_VLAN, vid);
4013 +- cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
4014 +- 0, ALE_VLAN, vid);
4015 +- cpsw_ale_flush_multicast(cpsw->ale, 0, vid);
4016 ++ ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0);
4017 ++ if (ret)
4018 ++ dev_err(priv->dev, "cpsw_ale_del_vlan() failed: ret %d\n", ret);
4019 ++ ret = cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
4020 ++ HOST_PORT_NUM, ALE_VLAN, vid);
4021 ++ if (ret)
4022 ++ dev_err(priv->dev, "cpsw_ale_del_ucast() failed: ret %d\n",
4023 ++ ret);
4024 ++ ret = cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
4025 ++ 0, ALE_VLAN, vid);
4026 ++ if (ret)
4027 ++ dev_err(priv->dev, "cpsw_ale_del_mcast failed. ret %d\n",
4028 ++ ret);
4029 ++ cpsw_ale_flush_multicast(cpsw->ale, ALE_PORT_HOST, vid);
4030 ++ ret = 0;
4031 + err:
4032 + pm_runtime_put(cpsw->dev);
4033 + return ret;
4034 +diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
4035 +index 21640a035d7df..8e47d0112e5dc 100644
4036 +--- a/drivers/net/gtp.c
4037 ++++ b/drivers/net/gtp.c
4038 +@@ -1179,6 +1179,7 @@ static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
4039 + goto nlmsg_failure;
4040 +
4041 + if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) ||
4042 ++ nla_put_u32(skb, GTPA_LINK, pctx->dev->ifindex) ||
4043 + nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer_addr_ip4.s_addr) ||
4044 + nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr))
4045 + goto nla_put_failure;
4046 +diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
4047 +index f3c04981b8da6..cd7032628a28c 100644
4048 +--- a/drivers/net/phy/dp83867.c
4049 ++++ b/drivers/net/phy/dp83867.c
4050 +@@ -215,9 +215,9 @@ static int dp83867_set_wol(struct phy_device *phydev,
4051 + if (wol->wolopts & WAKE_MAGICSECURE) {
4052 + phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP1,
4053 + (wol->sopass[1] << 8) | wol->sopass[0]);
4054 +- phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP1,
4055 ++ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP2,
4056 + (wol->sopass[3] << 8) | wol->sopass[2]);
4057 +- phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP1,
4058 ++ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP3,
4059 + (wol->sopass[5] << 8) | wol->sopass[4]);
4060 +
4061 + val_rxcfg |= DP83867_WOL_SEC_EN;
4062 +diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
4063 +index e39f41efda3ec..7bc6e8f856fe0 100644
4064 +--- a/drivers/net/usb/asix_common.c
4065 ++++ b/drivers/net/usb/asix_common.c
4066 +@@ -296,7 +296,7 @@ int asix_read_phy_addr(struct usbnet *dev, int internal)
4067 +
4068 + netdev_dbg(dev->net, "asix_get_phy_addr()\n");
4069 +
4070 +- if (ret < 0) {
4071 ++ if (ret < 2) {
4072 + netdev_err(dev->net, "Error reading PHYID register: %02x\n", ret);
4073 + goto out;
4074 + }
4075 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
4076 +index f38548e6d55ec..fa0039dcacc66 100644
4077 +--- a/drivers/nvme/host/core.c
4078 ++++ b/drivers/nvme/host/core.c
4079 +@@ -4148,7 +4148,7 @@ static void nvme_free_ctrl(struct device *dev)
4080 + container_of(dev, struct nvme_ctrl, ctrl_device);
4081 + struct nvme_subsystem *subsys = ctrl->subsys;
4082 +
4083 +- if (subsys && ctrl->instance != subsys->instance)
4084 ++ if (!subsys || ctrl->instance != subsys->instance)
4085 + ida_simple_remove(&nvme_instance_ida, ctrl->instance);
4086 +
4087 + kfree(ctrl->effects);
4088 +diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
4089 +index 6e2f623e472e9..58b035cc67a01 100644
4090 +--- a/drivers/nvme/target/core.c
4091 ++++ b/drivers/nvme/target/core.c
4092 +@@ -396,6 +396,9 @@ static void nvmet_keep_alive_timer(struct work_struct *work)
4093 +
4094 + static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
4095 + {
4096 ++ if (unlikely(ctrl->kato == 0))
4097 ++ return;
4098 ++
4099 + pr_debug("ctrl %d start keep-alive timer for %d secs\n",
4100 + ctrl->cntlid, ctrl->kato);
4101 +
4102 +@@ -405,6 +408,9 @@ static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
4103 +
4104 + static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
4105 + {
4106 ++ if (unlikely(ctrl->kato == 0))
4107 ++ return;
4108 ++
4109 + pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
4110 +
4111 + cancel_delayed_work_sync(&ctrl->ka_work);
4112 +diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
4113 +index 27fd3b5aa621c..f98a1ba4dc47c 100644
4114 +--- a/drivers/nvme/target/fc.c
4115 ++++ b/drivers/nvme/target/fc.c
4116 +@@ -2362,9 +2362,9 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
4117 + return;
4118 + if (fcpreq->fcp_error ||
4119 + fcpreq->transferred_length != fcpreq->transfer_length) {
4120 +- spin_lock(&fod->flock);
4121 ++ spin_lock_irqsave(&fod->flock, flags);
4122 + fod->abort = true;
4123 +- spin_unlock(&fod->flock);
4124 ++ spin_unlock_irqrestore(&fod->flock, flags);
4125 +
4126 + nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
4127 + return;
4128 +diff --git a/drivers/opp/core.c b/drivers/opp/core.c
4129 +index 8c90f78717723..91dcad982d362 100644
4130 +--- a/drivers/opp/core.c
4131 ++++ b/drivers/opp/core.c
4132 +@@ -1265,13 +1265,19 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
4133 + }
4134 + EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
4135 +
4136 +-void _opp_remove_all_static(struct opp_table *opp_table)
4137 ++bool _opp_remove_all_static(struct opp_table *opp_table)
4138 + {
4139 + struct dev_pm_opp *opp, *tmp;
4140 ++ bool ret = true;
4141 +
4142 + mutex_lock(&opp_table->lock);
4143 +
4144 +- if (!opp_table->parsed_static_opps || --opp_table->parsed_static_opps)
4145 ++ if (!opp_table->parsed_static_opps) {
4146 ++ ret = false;
4147 ++ goto unlock;
4148 ++ }
4149 ++
4150 ++ if (--opp_table->parsed_static_opps)
4151 + goto unlock;
4152 +
4153 + list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
4154 +@@ -1281,6 +1287,8 @@ void _opp_remove_all_static(struct opp_table *opp_table)
4155 +
4156 + unlock:
4157 + mutex_unlock(&opp_table->lock);
4158 ++
4159 ++ return ret;
4160 + }
4161 +
4162 + /**
4163 +@@ -2382,13 +2390,15 @@ void _dev_pm_opp_find_and_remove_table(struct device *dev)
4164 + return;
4165 + }
4166 +
4167 +- _opp_remove_all_static(opp_table);
4168 ++ /*
4169 ++ * Drop the extra reference only if the OPP table was successfully added
4170 ++ * with dev_pm_opp_of_add_table() earlier.
4171 ++ **/
4172 ++ if (_opp_remove_all_static(opp_table))
4173 ++ dev_pm_opp_put_opp_table(opp_table);
4174 +
4175 + /* Drop reference taken by _find_opp_table() */
4176 + dev_pm_opp_put_opp_table(opp_table);
4177 +-
4178 +- /* Drop reference taken while the OPP table was added */
4179 +- dev_pm_opp_put_opp_table(opp_table);
4180 + }
4181 +
4182 + /**
4183 +diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h
4184 +index e51646ff279eb..c3fcd571e446d 100644
4185 +--- a/drivers/opp/opp.h
4186 ++++ b/drivers/opp/opp.h
4187 +@@ -212,7 +212,7 @@ struct opp_table {
4188 +
4189 + /* Routines internal to opp core */
4190 + void dev_pm_opp_get(struct dev_pm_opp *opp);
4191 +-void _opp_remove_all_static(struct opp_table *opp_table);
4192 ++bool _opp_remove_all_static(struct opp_table *opp_table);
4193 + void _get_opp_table_kref(struct opp_table *opp_table);
4194 + int _get_opp_count(struct opp_table *opp_table);
4195 + struct opp_table *_find_opp_table(struct device *dev);
4196 +diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c
4197 +index bc27f9430eeb1..7c6b91f0e780a 100644
4198 +--- a/drivers/staging/media/sunxi/cedrus/cedrus.c
4199 ++++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
4200 +@@ -199,6 +199,7 @@ static int cedrus_request_validate(struct media_request *req)
4201 + struct v4l2_ctrl *ctrl_test;
4202 + unsigned int count;
4203 + unsigned int i;
4204 ++ int ret = 0;
4205 +
4206 + list_for_each_entry(obj, &req->objects, list) {
4207 + struct vb2_buffer *vb;
4208 +@@ -243,12 +244,16 @@ static int cedrus_request_validate(struct media_request *req)
4209 + if (!ctrl_test) {
4210 + v4l2_info(&ctx->dev->v4l2_dev,
4211 + "Missing required codec control\n");
4212 +- return -ENOENT;
4213 ++ ret = -ENOENT;
4214 ++ break;
4215 + }
4216 + }
4217 +
4218 + v4l2_ctrl_request_hdl_put(hdl);
4219 +
4220 ++ if (ret)
4221 ++ return ret;
4222 ++
4223 + return vb2_request_validate(req);
4224 + }
4225 +
4226 +diff --git a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
4227 +index bf7bae42c141c..6dc879fea9c8a 100644
4228 +--- a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
4229 ++++ b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
4230 +@@ -1,6 +1,6 @@
4231 + // SPDX-License-Identifier: GPL-2.0-only
4232 + /*
4233 +- * Copyright (c) 2011-2015, 2017, The Linux Foundation. All rights reserved.
4234 ++ * Copyright (c) 2011-2015, 2017, 2020, The Linux Foundation. All rights reserved.
4235 + */
4236 +
4237 + #include <linux/bitops.h>
4238 +@@ -191,7 +191,7 @@ static int qpnp_tm_get_temp(void *data, int *temp)
4239 + chip->temp = mili_celsius;
4240 + }
4241 +
4242 +- *temp = chip->temp < 0 ? 0 : chip->temp;
4243 ++ *temp = chip->temp;
4244 +
4245 + return 0;
4246 + }
4247 +diff --git a/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c b/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c
4248 +index 63b02bfb2adf6..fdb8a495ab69a 100644
4249 +--- a/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c
4250 ++++ b/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c
4251 +@@ -37,20 +37,21 @@ static struct temp_sensor_data omap4430_mpu_temp_sensor_data = {
4252 +
4253 + /*
4254 + * Temperature values in milli degree celsius
4255 +- * ADC code values from 530 to 923
4256 ++ * ADC code values from 13 to 107, see TRM
4257 ++ * "18.4.10.2.3 ADC Codes Versus Temperature".
4258 + */
4259 + static const int
4260 + omap4430_adc_to_temp[OMAP4430_ADC_END_VALUE - OMAP4430_ADC_START_VALUE + 1] = {
4261 +- -38000, -35000, -34000, -32000, -30000, -28000, -26000, -24000, -22000,
4262 +- -20000, -18000, -17000, -15000, -13000, -12000, -10000, -8000, -6000,
4263 +- -5000, -3000, -1000, 0, 2000, 3000, 5000, 6000, 8000, 10000, 12000,
4264 +- 13000, 15000, 17000, 19000, 21000, 23000, 25000, 27000, 28000, 30000,
4265 +- 32000, 33000, 35000, 37000, 38000, 40000, 42000, 43000, 45000, 47000,
4266 +- 48000, 50000, 52000, 53000, 55000, 57000, 58000, 60000, 62000, 64000,
4267 +- 66000, 68000, 70000, 71000, 73000, 75000, 77000, 78000, 80000, 82000,
4268 +- 83000, 85000, 87000, 88000, 90000, 92000, 93000, 95000, 97000, 98000,
4269 +- 100000, 102000, 103000, 105000, 107000, 109000, 111000, 113000, 115000,
4270 +- 117000, 118000, 120000, 122000, 123000,
4271 ++ -40000, -38000, -35000, -34000, -32000, -30000, -28000, -26000, -24000,
4272 ++ -22000, -20000, -18500, -17000, -15000, -13500, -12000, -10000, -8000,
4273 ++ -6500, -5000, -3500, -1500, 0, 2000, 3500, 5000, 6500, 8500, 10000,
4274 ++ 12000, 13500, 15000, 17000, 19000, 21000, 23000, 25000, 27000, 28500,
4275 ++ 30000, 32000, 33500, 35000, 37000, 38500, 40000, 42000, 43500, 45000,
4276 ++ 47000, 48500, 50000, 52000, 53500, 55000, 57000, 58500, 60000, 62000,
4277 ++ 64000, 66000, 68000, 70000, 71500, 73500, 75000, 77000, 78500, 80000,
4278 ++ 82000, 83500, 85000, 87000, 88500, 90000, 92000, 93500, 95000, 97000,
4279 ++ 98500, 100000, 102000, 103500, 105000, 107000, 109000, 111000, 113000,
4280 ++ 115000, 117000, 118500, 120000, 122000, 123500, 125000,
4281 + };
4282 +
4283 + /* OMAP4430 data */
4284 +diff --git a/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h b/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h
4285 +index a453ff8eb313e..9a3955c3853ba 100644
4286 +--- a/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h
4287 ++++ b/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h
4288 +@@ -53,9 +53,13 @@
4289 + * and thresholds for OMAP4430.
4290 + */
4291 +
4292 +-/* ADC conversion table limits */
4293 +-#define OMAP4430_ADC_START_VALUE 0
4294 +-#define OMAP4430_ADC_END_VALUE 127
4295 ++/*
4296 ++ * ADC conversion table limits. Ignore values outside the TRM listed
4297 ++ * range to avoid bogus thermal shutdowns. See omap4430 TRM chapter
4298 ++ * "18.4.10.2.3 ADC Codes Versus Temperature".
4299 ++ */
4300 ++#define OMAP4430_ADC_START_VALUE 13
4301 ++#define OMAP4430_ADC_END_VALUE 107
4302 + /* bandgap clock limits (no control on 4430) */
4303 + #define OMAP4430_MAX_FREQ 32768
4304 + #define OMAP4430_MIN_FREQ 32768
4305 +diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
4306 +index 457c0bf8cbf83..ffdf6da016c21 100644
4307 +--- a/drivers/tty/serial/qcom_geni_serial.c
4308 ++++ b/drivers/tty/serial/qcom_geni_serial.c
4309 +@@ -1047,7 +1047,7 @@ static unsigned int qcom_geni_serial_tx_empty(struct uart_port *uport)
4310 + }
4311 +
4312 + #ifdef CONFIG_SERIAL_QCOM_GENI_CONSOLE
4313 +-static int __init qcom_geni_console_setup(struct console *co, char *options)
4314 ++static int qcom_geni_console_setup(struct console *co, char *options)
4315 + {
4316 + struct uart_port *uport;
4317 + struct qcom_geni_serial_port *port;
4318 +diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
4319 +index 786fbb7d8be06..907bcbb93afbf 100644
4320 +--- a/drivers/xen/xenbus/xenbus_client.c
4321 ++++ b/drivers/xen/xenbus/xenbus_client.c
4322 +@@ -379,8 +379,14 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
4323 + int i, j;
4324 +
4325 + for (i = 0; i < nr_pages; i++) {
4326 +- err = gnttab_grant_foreign_access(dev->otherend_id,
4327 +- virt_to_gfn(vaddr), 0);
4328 ++ unsigned long gfn;
4329 ++
4330 ++ if (is_vmalloc_addr(vaddr))
4331 ++ gfn = pfn_to_gfn(vmalloc_to_pfn(vaddr));
4332 ++ else
4333 ++ gfn = virt_to_gfn(vaddr);
4334 ++
4335 ++ err = gnttab_grant_foreign_access(dev->otherend_id, gfn, 0);
4336 + if (err < 0) {
4337 + xenbus_dev_fatal(dev, err,
4338 + "granting access to ring page");
4339 +diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
4340 +index f708c45d5f664..29f11e10a7c7d 100644
4341 +--- a/fs/affs/amigaffs.c
4342 ++++ b/fs/affs/amigaffs.c
4343 +@@ -420,24 +420,51 @@ affs_mode_to_prot(struct inode *inode)
4344 + u32 prot = AFFS_I(inode)->i_protect;
4345 + umode_t mode = inode->i_mode;
4346 +
4347 ++ /*
4348 ++ * First, clear all RWED bits for owner, group, other.
4349 ++ * Then, recalculate them afresh.
4350 ++ *
4351 ++ * We'll always clear the delete-inhibit bit for the owner, as that is
4352 ++ * the classic single-user mode AmigaOS protection bit and we need to
4353 ++ * stay compatible with all scenarios.
4354 ++ *
4355 ++ * Since multi-user AmigaOS is an extension, we'll only set the
4356 ++ * delete-allow bit if any of the other bits in the same user class
4357 ++ * (group/other) are used.
4358 ++ */
4359 ++ prot &= ~(FIBF_NOEXECUTE | FIBF_NOREAD
4360 ++ | FIBF_NOWRITE | FIBF_NODELETE
4361 ++ | FIBF_GRP_EXECUTE | FIBF_GRP_READ
4362 ++ | FIBF_GRP_WRITE | FIBF_GRP_DELETE
4363 ++ | FIBF_OTR_EXECUTE | FIBF_OTR_READ
4364 ++ | FIBF_OTR_WRITE | FIBF_OTR_DELETE);
4365 ++
4366 ++ /* Classic single-user AmigaOS flags. These are inverted. */
4367 + if (!(mode & 0100))
4368 + prot |= FIBF_NOEXECUTE;
4369 + if (!(mode & 0400))
4370 + prot |= FIBF_NOREAD;
4371 + if (!(mode & 0200))
4372 + prot |= FIBF_NOWRITE;
4373 ++
4374 ++ /* Multi-user extended flags. Not inverted. */
4375 + if (mode & 0010)
4376 + prot |= FIBF_GRP_EXECUTE;
4377 + if (mode & 0040)
4378 + prot |= FIBF_GRP_READ;
4379 + if (mode & 0020)
4380 + prot |= FIBF_GRP_WRITE;
4381 ++ if (mode & 0070)
4382 ++ prot |= FIBF_GRP_DELETE;
4383 ++
4384 + if (mode & 0001)
4385 + prot |= FIBF_OTR_EXECUTE;
4386 + if (mode & 0004)
4387 + prot |= FIBF_OTR_READ;
4388 + if (mode & 0002)
4389 + prot |= FIBF_OTR_WRITE;
4390 ++ if (mode & 0007)
4391 ++ prot |= FIBF_OTR_DELETE;
4392 +
4393 + AFFS_I(inode)->i_protect = prot;
4394 + }
4395 +diff --git a/fs/affs/file.c b/fs/affs/file.c
4396 +index a85817f54483f..ba084b0b214b9 100644
4397 +--- a/fs/affs/file.c
4398 ++++ b/fs/affs/file.c
4399 +@@ -428,6 +428,24 @@ static int affs_write_begin(struct file *file, struct address_space *mapping,
4400 + return ret;
4401 + }
4402 +
4403 ++static int affs_write_end(struct file *file, struct address_space *mapping,
4404 ++ loff_t pos, unsigned int len, unsigned int copied,
4405 ++ struct page *page, void *fsdata)
4406 ++{
4407 ++ struct inode *inode = mapping->host;
4408 ++ int ret;
4409 ++
4410 ++ ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
4411 ++
4412 ++ /* Clear Archived bit on file writes, as AmigaOS would do */
4413 ++ if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) {
4414 ++ AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED;
4415 ++ mark_inode_dirty(inode);
4416 ++ }
4417 ++
4418 ++ return ret;
4419 ++}
4420 ++
4421 + static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
4422 + {
4423 + return generic_block_bmap(mapping,block,affs_get_block);
4424 +@@ -437,7 +455,7 @@ const struct address_space_operations affs_aops = {
4425 + .readpage = affs_readpage,
4426 + .writepage = affs_writepage,
4427 + .write_begin = affs_write_begin,
4428 +- .write_end = generic_write_end,
4429 ++ .write_end = affs_write_end,
4430 + .direct_IO = affs_direct_IO,
4431 + .bmap = _affs_bmap
4432 + };
4433 +@@ -794,6 +812,12 @@ done:
4434 + if (tmp > inode->i_size)
4435 + inode->i_size = AFFS_I(inode)->mmu_private = tmp;
4436 +
4437 ++ /* Clear Archived bit on file writes, as AmigaOS would do */
4438 ++ if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) {
4439 ++ AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED;
4440 ++ mark_inode_dirty(inode);
4441 ++ }
4442 ++
4443 + err_first_bh:
4444 + unlock_page(page);
4445 + put_page(page);
4446 +diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c
4447 +index 5d9ef517cf816..e7e98ad63a91a 100644
4448 +--- a/fs/afs/fs_probe.c
4449 ++++ b/fs/afs/fs_probe.c
4450 +@@ -161,8 +161,8 @@ responded:
4451 + }
4452 + }
4453 +
4454 +- rtt_us = rxrpc_kernel_get_srtt(call->net->socket, call->rxcall);
4455 +- if (rtt_us < server->probe.rtt) {
4456 ++ if (rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us) &&
4457 ++ rtt_us < server->probe.rtt) {
4458 + server->probe.rtt = rtt_us;
4459 + server->rtt = rtt_us;
4460 + alist->preferred = index;
4461 +diff --git a/fs/afs/vl_probe.c b/fs/afs/vl_probe.c
4462 +index e3aa013c21779..081b7e5b13f58 100644
4463 +--- a/fs/afs/vl_probe.c
4464 ++++ b/fs/afs/vl_probe.c
4465 +@@ -92,8 +92,8 @@ responded:
4466 + }
4467 + }
4468 +
4469 +- rtt_us = rxrpc_kernel_get_srtt(call->net->socket, call->rxcall);
4470 +- if (rtt_us < server->probe.rtt) {
4471 ++ if (rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us) &&
4472 ++ rtt_us < server->probe.rtt) {
4473 + server->probe.rtt = rtt_us;
4474 + alist->preferred = index;
4475 + have_result = true;
4476 +diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
4477 +index c037ef514b64a..8702e8a4d20db 100644
4478 +--- a/fs/btrfs/block-group.c
4479 ++++ b/fs/btrfs/block-group.c
4480 +@@ -1814,7 +1814,6 @@ static struct btrfs_block_group *btrfs_create_block_group_cache(
4481 +
4482 + cache->fs_info = fs_info;
4483 + cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start);
4484 +- set_free_space_tree_thresholds(cache);
4485 +
4486 + cache->discard_index = BTRFS_DISCARD_INDEX_UNUSED;
4487 +
4488 +@@ -1928,6 +1927,8 @@ static int read_one_block_group(struct btrfs_fs_info *info,
4489 + if (ret < 0)
4490 + goto error;
4491 +
4492 ++ set_free_space_tree_thresholds(cache);
4493 ++
4494 + if (need_clear) {
4495 + /*
4496 + * When we mount with old space cache, we need to
4497 +@@ -2148,6 +2149,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
4498 + return -ENOMEM;
4499 +
4500 + cache->length = size;
4501 ++ set_free_space_tree_thresholds(cache);
4502 + cache->used = bytes_used;
4503 + cache->flags = type;
4504 + cache->last_byte_to_unpin = (u64)-1;
4505 +diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
4506 +index 82ab6e5a386da..367e3044b620b 100644
4507 +--- a/fs/btrfs/ctree.c
4508 ++++ b/fs/btrfs/ctree.c
4509 +@@ -1297,6 +1297,8 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
4510 + btrfs_tree_read_unlock_blocking(eb);
4511 + free_extent_buffer(eb);
4512 +
4513 ++ btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb_rewin),
4514 ++ eb_rewin, btrfs_header_level(eb_rewin));
4515 + btrfs_tree_read_lock(eb_rewin);
4516 + __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
4517 + WARN_ON(btrfs_header_nritems(eb_rewin) >
4518 +@@ -1370,7 +1372,6 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
4519 +
4520 + if (!eb)
4521 + return NULL;
4522 +- btrfs_tree_read_lock(eb);
4523 + if (old_root) {
4524 + btrfs_set_header_bytenr(eb, eb->start);
4525 + btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
4526 +@@ -1378,6 +1379,9 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
4527 + btrfs_set_header_level(eb, old_root->level);
4528 + btrfs_set_header_generation(eb, old_generation);
4529 + }
4530 ++ btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb), eb,
4531 ++ btrfs_header_level(eb));
4532 ++ btrfs_tree_read_lock(eb);
4533 + if (tm)
4534 + __tree_mod_log_rewind(fs_info, eb, time_seq, tm);
4535 + else
4536 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
4537 +index 5871ef78edbac..e9eedc053fc52 100644
4538 +--- a/fs/btrfs/extent-tree.c
4539 ++++ b/fs/btrfs/extent-tree.c
4540 +@@ -4527,7 +4527,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4541 + return ERR_PTR(-EUCLEAN);
4542 + }
4543 +
4544 +- btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
4545 ++ btrfs_set_buffer_lockdep_class(owner, buf, level);
4546 + btrfs_tree_lock(buf);
4547 + btrfs_clean_tree_block(buf);
4548 + clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
4549 +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
4550 +index 8ba8788461ae5..df68736bdad1b 100644
4551 +--- a/fs/btrfs/extent_io.c
4552 ++++ b/fs/btrfs/extent_io.c
4553 +@@ -5640,9 +5640,9 @@ void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
4554 + }
4555 + }
4556 +
4557 +-int read_extent_buffer_to_user(const struct extent_buffer *eb,
4558 +- void __user *dstv,
4559 +- unsigned long start, unsigned long len)
4560 ++int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
4561 ++ void __user *dstv,
4562 ++ unsigned long start, unsigned long len)
4563 + {
4564 + size_t cur;
4565 + size_t offset;
4566 +@@ -5662,7 +5662,7 @@ int read_extent_buffer_to_user(const struct extent_buffer *eb,
4567 +
4568 + cur = min(len, (PAGE_SIZE - offset));
4569 + kaddr = page_address(page);
4570 +- if (copy_to_user(dst, kaddr + offset, cur)) {
4571 ++ if (copy_to_user_nofault(dst, kaddr + offset, cur)) {
4572 + ret = -EFAULT;
4573 + break;
4574 + }
4575 +diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
4576 +index 87f60a48f7500..0ab8a20d282b8 100644
4577 +--- a/fs/btrfs/extent_io.h
4578 ++++ b/fs/btrfs/extent_io.h
4579 +@@ -241,9 +241,9 @@ int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
4580 + void read_extent_buffer(const struct extent_buffer *eb, void *dst,
4581 + unsigned long start,
4582 + unsigned long len);
4583 +-int read_extent_buffer_to_user(const struct extent_buffer *eb,
4584 +- void __user *dst, unsigned long start,
4585 +- unsigned long len);
4586 ++int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
4587 ++ void __user *dst, unsigned long start,
4588 ++ unsigned long len);
4589 + void write_extent_buffer_fsid(const struct extent_buffer *eb, const void *src);
4590 + void write_extent_buffer_chunk_tree_uuid(const struct extent_buffer *eb,
4591 + const void *src);
4592 +diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
4593 +index 8b1f5c8897b75..6b9faf3b0e967 100644
4594 +--- a/fs/btrfs/free-space-tree.c
4595 ++++ b/fs/btrfs/free-space-tree.c
4596 +@@ -22,6 +22,10 @@ void set_free_space_tree_thresholds(struct btrfs_block_group *cache)
4597 + size_t bitmap_size;
4598 + u64 num_bitmaps, total_bitmap_size;
4599 +
4600 ++ if (WARN_ON(cache->length == 0))
4601 ++ btrfs_warn(cache->fs_info, "block group %llu length is zero",
4602 ++ cache->start);
4603 ++
4604 + /*
4605 + * We convert to bitmaps when the disk space required for using extents
4606 + * exceeds that required for using bitmaps.
4607 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
4608 +index 1448bc43561c2..5cbebf32082ab 100644
4609 +--- a/fs/btrfs/ioctl.c
4610 ++++ b/fs/btrfs/ioctl.c
4611 +@@ -2086,9 +2086,14 @@ static noinline int copy_to_sk(struct btrfs_path *path,
4612 + sh.len = item_len;
4613 + sh.transid = found_transid;
4614 +
4615 +- /* copy search result header */
4616 +- if (copy_to_user(ubuf + *sk_offset, &sh, sizeof(sh))) {
4617 +- ret = -EFAULT;
4618 ++ /*
4619 ++ * Copy search result header. If we fault then loop again so we
4620 ++ * can fault in the pages and -EFAULT there if there's a
4621 ++ * problem. Otherwise we'll fault and then copy the buffer in
4622 ++ * properly this next time through
4623 ++ */
4624 ++ if (copy_to_user_nofault(ubuf + *sk_offset, &sh, sizeof(sh))) {
4625 ++ ret = 0;
4626 + goto out;
4627 + }
4628 +
4629 +@@ -2096,10 +2101,14 @@ static noinline int copy_to_sk(struct btrfs_path *path,
4630 +
4631 + if (item_len) {
4632 + char __user *up = ubuf + *sk_offset;
4633 +- /* copy the item */
4634 +- if (read_extent_buffer_to_user(leaf, up,
4635 +- item_off, item_len)) {
4636 +- ret = -EFAULT;
4637 ++ /*
4638 ++ * Copy the item, same behavior as above, but reset the
4639 ++ * * sk_offset so we copy the full thing again.
4640 ++ */
4641 ++ if (read_extent_buffer_to_user_nofault(leaf, up,
4642 ++ item_off, item_len)) {
4643 ++ ret = 0;
4644 ++ *sk_offset -= sizeof(sh);
4645 + goto out;
4646 + }
4647 +
4648 +@@ -2184,6 +2193,10 @@ static noinline int search_ioctl(struct inode *inode,
4649 + key.offset = sk->min_offset;
4650 +
4651 + while (1) {
4652 ++ ret = fault_in_pages_writeable(ubuf, *buf_size - sk_offset);
4653 ++ if (ret)
4654 ++ break;
4655 ++
4656 + ret = btrfs_search_forward(root, &key, path, sk->min_transid);
4657 + if (ret != 0) {
4658 + if (ret > 0)
4659 +diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
4660 +index 5f5b21e389dbc..4e857e91c76e5 100644
4661 +--- a/fs/btrfs/scrub.c
4662 ++++ b/fs/btrfs/scrub.c
4663 +@@ -3783,50 +3783,84 @@ static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
4664 + return 0;
4665 + }
4666 +
4667 ++static void scrub_workers_put(struct btrfs_fs_info *fs_info)
4668 ++{
4669 ++ if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt,
4670 ++ &fs_info->scrub_lock)) {
4671 ++ struct btrfs_workqueue *scrub_workers = NULL;
4672 ++ struct btrfs_workqueue *scrub_wr_comp = NULL;
4673 ++ struct btrfs_workqueue *scrub_parity = NULL;
4674 ++
4675 ++ scrub_workers = fs_info->scrub_workers;
4676 ++ scrub_wr_comp = fs_info->scrub_wr_completion_workers;
4677 ++ scrub_parity = fs_info->scrub_parity_workers;
4678 ++
4679 ++ fs_info->scrub_workers = NULL;
4680 ++ fs_info->scrub_wr_completion_workers = NULL;
4681 ++ fs_info->scrub_parity_workers = NULL;
4682 ++ mutex_unlock(&fs_info->scrub_lock);
4683 ++
4684 ++ btrfs_destroy_workqueue(scrub_workers);
4685 ++ btrfs_destroy_workqueue(scrub_wr_comp);
4686 ++ btrfs_destroy_workqueue(scrub_parity);
4687 ++ }
4688 ++}
4689 ++
4690 + /*
4691 + * get a reference count on fs_info->scrub_workers. start worker if necessary
4692 + */
4693 + static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
4694 + int is_dev_replace)
4695 + {
4696 ++ struct btrfs_workqueue *scrub_workers = NULL;
4697 ++ struct btrfs_workqueue *scrub_wr_comp = NULL;
4698 ++ struct btrfs_workqueue *scrub_parity = NULL;
4699 + unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
4700 + int max_active = fs_info->thread_pool_size;
4701 ++ int ret = -ENOMEM;
4702 +
4703 +- lockdep_assert_held(&fs_info->scrub_lock);
4704 ++ if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt))
4705 ++ return 0;
4706 +
4707 +- if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) {
4708 +- ASSERT(fs_info->scrub_workers == NULL);
4709 +- fs_info->scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub",
4710 +- flags, is_dev_replace ? 1 : max_active, 4);
4711 +- if (!fs_info->scrub_workers)
4712 +- goto fail_scrub_workers;
4713 +-
4714 +- ASSERT(fs_info->scrub_wr_completion_workers == NULL);
4715 +- fs_info->scrub_wr_completion_workers =
4716 +- btrfs_alloc_workqueue(fs_info, "scrubwrc", flags,
4717 +- max_active, 2);
4718 +- if (!fs_info->scrub_wr_completion_workers)
4719 +- goto fail_scrub_wr_completion_workers;
4720 ++ scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub", flags,
4721 ++ is_dev_replace ? 1 : max_active, 4);
4722 ++ if (!scrub_workers)
4723 ++ goto fail_scrub_workers;
4724 +
4725 +- ASSERT(fs_info->scrub_parity_workers == NULL);
4726 +- fs_info->scrub_parity_workers =
4727 +- btrfs_alloc_workqueue(fs_info, "scrubparity", flags,
4728 ++ scrub_wr_comp = btrfs_alloc_workqueue(fs_info, "scrubwrc", flags,
4729 + max_active, 2);
4730 +- if (!fs_info->scrub_parity_workers)
4731 +- goto fail_scrub_parity_workers;
4732 ++ if (!scrub_wr_comp)
4733 ++ goto fail_scrub_wr_completion_workers;
4734 +
4735 ++ scrub_parity = btrfs_alloc_workqueue(fs_info, "scrubparity", flags,
4736 ++ max_active, 2);
4737 ++ if (!scrub_parity)
4738 ++ goto fail_scrub_parity_workers;
4739 ++
4740 ++ mutex_lock(&fs_info->scrub_lock);
4741 ++ if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) {
4742 ++ ASSERT(fs_info->scrub_workers == NULL &&
4743 ++ fs_info->scrub_wr_completion_workers == NULL &&
4744 ++ fs_info->scrub_parity_workers == NULL);
4745 ++ fs_info->scrub_workers = scrub_workers;
4746 ++ fs_info->scrub_wr_completion_workers = scrub_wr_comp;
4747 ++ fs_info->scrub_parity_workers = scrub_parity;
4748 + refcount_set(&fs_info->scrub_workers_refcnt, 1);
4749 +- } else {
4750 +- refcount_inc(&fs_info->scrub_workers_refcnt);
4751 ++ mutex_unlock(&fs_info->scrub_lock);
4752 ++ return 0;
4753 + }
4754 +- return 0;
4755 ++ /* Other thread raced in and created the workers for us */
4756 ++ refcount_inc(&fs_info->scrub_workers_refcnt);
4757 ++ mutex_unlock(&fs_info->scrub_lock);
4758 +
4759 ++ ret = 0;
4760 ++ btrfs_destroy_workqueue(scrub_parity);
4761 + fail_scrub_parity_workers:
4762 +- btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
4763 ++ btrfs_destroy_workqueue(scrub_wr_comp);
4764 + fail_scrub_wr_completion_workers:
4765 +- btrfs_destroy_workqueue(fs_info->scrub_workers);
4766 ++ btrfs_destroy_workqueue(scrub_workers);
4767 + fail_scrub_workers:
4768 +- return -ENOMEM;
4769 ++ return ret;
4770 + }
4771 +
4772 + int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
4773 +@@ -3837,9 +3871,6 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
4774 + int ret;
4775 + struct btrfs_device *dev;
4776 + unsigned int nofs_flag;
4777 +- struct btrfs_workqueue *scrub_workers = NULL;
4778 +- struct btrfs_workqueue *scrub_wr_comp = NULL;
4779 +- struct btrfs_workqueue *scrub_parity = NULL;
4780 +
4781 + if (btrfs_fs_closing(fs_info))
4782 + return -EAGAIN;
4783 +@@ -3886,13 +3917,17 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
4784 + if (IS_ERR(sctx))
4785 + return PTR_ERR(sctx);
4786 +
4787 ++ ret = scrub_workers_get(fs_info, is_dev_replace);
4788 ++ if (ret)
4789 ++ goto out_free_ctx;
4790 ++
4791 + mutex_lock(&fs_info->fs_devices->device_list_mutex);
4792 + dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);
4793 + if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
4794 + !is_dev_replace)) {
4795 + mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4796 + ret = -ENODEV;
4797 +- goto out_free_ctx;
4798 ++ goto out;
4799 + }
4800 +
4801 + if (!is_dev_replace && !readonly &&
4802 +@@ -3901,7 +3936,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
4803 + btrfs_err_in_rcu(fs_info, "scrub: device %s is not writable",
4804 + rcu_str_deref(dev->name));
4805 + ret = -EROFS;
4806 +- goto out_free_ctx;
4807 ++ goto out;
4808 + }
4809 +
4810 + mutex_lock(&fs_info->scrub_lock);
4811 +@@ -3910,7 +3945,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
4812 + mutex_unlock(&fs_info->scrub_lock);
4813 + mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4814 + ret = -EIO;
4815 +- goto out_free_ctx;
4816 ++ goto out;
4817 + }
4818 +
4819 + down_read(&fs_info->dev_replace.rwsem);
4820 +@@ -3921,17 +3956,10 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
4821 + mutex_unlock(&fs_info->scrub_lock);
4822 + mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4823 + ret = -EINPROGRESS;
4824 +- goto out_free_ctx;
4825 ++ goto out;
4826 + }
4827 + up_read(&fs_info->dev_replace.rwsem);
4828 +
4829 +- ret = scrub_workers_get(fs_info, is_dev_replace);
4830 +- if (ret) {
4831 +- mutex_unlock(&fs_info->scrub_lock);
4832 +- mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4833 +- goto out_free_ctx;
4834 +- }
4835 +-
4836 + sctx->readonly = readonly;
4837 + dev->scrub_ctx = sctx;
4838 + mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4839 +@@ -3984,24 +4012,14 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
4840 +
4841 + mutex_lock(&fs_info->scrub_lock);
4842 + dev->scrub_ctx = NULL;
4843 +- if (refcount_dec_and_test(&fs_info->scrub_workers_refcnt)) {
4844 +- scrub_workers = fs_info->scrub_workers;
4845 +- scrub_wr_comp = fs_info->scrub_wr_completion_workers;
4846 +- scrub_parity = fs_info->scrub_parity_workers;
4847 +-
4848 +- fs_info->scrub_workers = NULL;
4849 +- fs_info->scrub_wr_completion_workers = NULL;
4850 +- fs_info->scrub_parity_workers = NULL;
4851 +- }
4852 + mutex_unlock(&fs_info->scrub_lock);
4853 +
4854 +- btrfs_destroy_workqueue(scrub_workers);
4855 +- btrfs_destroy_workqueue(scrub_wr_comp);
4856 +- btrfs_destroy_workqueue(scrub_parity);
4857 ++ scrub_workers_put(fs_info);
4858 + scrub_put_ctx(sctx);
4859 +
4860 + return ret;
4861 +-
4862 ++out:
4863 ++ scrub_workers_put(fs_info);
4864 + out_free_ctx:
4865 + scrub_free_ctx(sctx);
4866 +
4867 +diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
4868 +index 517b44300a05c..7b1fee630f978 100644
4869 +--- a/fs/btrfs/tree-checker.c
4870 ++++ b/fs/btrfs/tree-checker.c
4871 +@@ -984,7 +984,7 @@ static int check_inode_item(struct extent_buffer *leaf,
4872 + /* Note for ROOT_TREE_DIR_ITEM, mkfs could set its transid 0 */
4873 + if (btrfs_inode_transid(leaf, iitem) > super_gen + 1) {
4874 + inode_item_err(leaf, slot,
4875 +- "invalid inode generation: has %llu expect [0, %llu]",
4876 ++ "invalid inode transid: has %llu expect [0, %llu]",
4877 + btrfs_inode_transid(leaf, iitem), super_gen + 1);
4878 + return -EUCLEAN;
4879 + }
4880 +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
4881 +index 0fecf1e4d8f66..0e50b885d3fd6 100644
4882 +--- a/fs/btrfs/volumes.c
4883 ++++ b/fs/btrfs/volumes.c
4884 +@@ -4462,6 +4462,7 @@ int btrfs_uuid_scan_kthread(void *data)
4885 + goto skip;
4886 + }
4887 + update_tree:
4888 ++ btrfs_release_path(path);
4889 + if (!btrfs_is_empty_uuid(root_item.uuid)) {
4890 + ret = btrfs_uuid_tree_add(trans, root_item.uuid,
4891 + BTRFS_UUID_KEY_SUBVOL,
4892 +@@ -4486,6 +4487,7 @@ update_tree:
4893 + }
4894 +
4895 + skip:
4896 ++ btrfs_release_path(path);
4897 + if (trans) {
4898 + ret = btrfs_end_transaction(trans);
4899 + trans = NULL;
4900 +@@ -4493,7 +4495,6 @@ skip:
4901 + break;
4902 + }
4903 +
4904 +- btrfs_release_path(path);
4905 + if (key.offset < (u64)-1) {
4906 + key.offset++;
4907 + } else if (key.type < BTRFS_ROOT_ITEM_KEY) {
4908 +diff --git a/fs/ceph/file.c b/fs/ceph/file.c
4909 +index d51c3f2fdca02..327649883ec7c 100644
4910 +--- a/fs/ceph/file.c
4911 ++++ b/fs/ceph/file.c
4912 +@@ -2507,6 +2507,7 @@ const struct file_operations ceph_file_fops = {
4913 + .mmap = ceph_mmap,
4914 + .fsync = ceph_fsync,
4915 + .lock = ceph_lock,
4916 ++ .setlease = simple_nosetlease,
4917 + .flock = ceph_flock,
4918 + .splice_read = generic_file_splice_read,
4919 + .splice_write = iter_file_splice_write,
4920 +diff --git a/fs/eventpoll.c b/fs/eventpoll.c
4921 +index e0decff22ae27..8107e06d7f6f5 100644
4922 +--- a/fs/eventpoll.c
4923 ++++ b/fs/eventpoll.c
4924 +@@ -1995,9 +1995,9 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
4925 + * during ep_insert().
4926 + */
4927 + if (list_empty(&epi->ffd.file->f_tfile_llink)) {
4928 +- get_file(epi->ffd.file);
4929 +- list_add(&epi->ffd.file->f_tfile_llink,
4930 +- &tfile_check_list);
4931 ++ if (get_file_rcu(epi->ffd.file))
4932 ++ list_add(&epi->ffd.file->f_tfile_llink,
4933 ++ &tfile_check_list);
4934 + }
4935 + }
4936 + }
4937 +diff --git a/fs/ext2/file.c b/fs/ext2/file.c
4938 +index 60378ddf1424b..96044f5dbc0e0 100644
4939 +--- a/fs/ext2/file.c
4940 ++++ b/fs/ext2/file.c
4941 +@@ -93,8 +93,10 @@ static vm_fault_t ext2_dax_fault(struct vm_fault *vmf)
4942 + struct inode *inode = file_inode(vmf->vma->vm_file);
4943 + struct ext2_inode_info *ei = EXT2_I(inode);
4944 + vm_fault_t ret;
4945 ++ bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
4946 ++ (vmf->vma->vm_flags & VM_SHARED);
4947 +
4948 +- if (vmf->flags & FAULT_FLAG_WRITE) {
4949 ++ if (write) {
4950 + sb_start_pagefault(inode->i_sb);
4951 + file_update_time(vmf->vma->vm_file);
4952 + }
4953 +@@ -103,7 +105,7 @@ static vm_fault_t ext2_dax_fault(struct vm_fault *vmf)
4954 + ret = dax_iomap_fault(vmf, PE_SIZE_PTE, NULL, NULL, &ext2_iomap_ops);
4955 +
4956 + up_read(&ei->dax_sem);
4957 +- if (vmf->flags & FAULT_FLAG_WRITE)
4958 ++ if (write)
4959 + sb_end_pagefault(inode->i_sb);
4960 + return ret;
4961 + }
4962 +diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
4963 +index a76e55bc28ebf..27f467a0f008e 100644
4964 +--- a/fs/gfs2/log.c
4965 ++++ b/fs/gfs2/log.c
4966 +@@ -901,6 +901,36 @@ static void empty_ail1_list(struct gfs2_sbd *sdp)
4967 + }
4968 + }
4969 +
4970 ++/**
4971 ++ * drain_bd - drain the buf and databuf queue for a failed transaction
4972 ++ * @tr: the transaction to drain
4973 ++ *
4974 ++ * When this is called, we're taking an error exit for a log write that failed
4975 ++ * but since we bypassed the after_commit functions, we need to remove the
4976 ++ * items from the buf and databuf queue.
4977 ++ */
4978 ++static void trans_drain(struct gfs2_trans *tr)
4979 ++{
4980 ++ struct gfs2_bufdata *bd;
4981 ++ struct list_head *head;
4982 ++
4983 ++ if (!tr)
4984 ++ return;
4985 ++
4986 ++ head = &tr->tr_buf;
4987 ++ while (!list_empty(head)) {
4988 ++ bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
4989 ++ list_del_init(&bd->bd_list);
4990 ++ kmem_cache_free(gfs2_bufdata_cachep, bd);
4991 ++ }
4992 ++ head = &tr->tr_databuf;
4993 ++ while (!list_empty(head)) {
4994 ++ bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
4995 ++ list_del_init(&bd->bd_list);
4996 ++ kmem_cache_free(gfs2_bufdata_cachep, bd);
4997 ++ }
4998 ++}
4999 ++
5000 + /**
5001 + * gfs2_log_flush - flush incore transaction(s)
5002 + * @sdp: the filesystem
5003 +@@ -1005,6 +1035,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
5004 +
5005 + out:
5006 + if (gfs2_withdrawn(sdp)) {
5007 ++ trans_drain(tr);
5008 + /**
5009 + * If the tr_list is empty, we're withdrawing during a log
5010 + * flush that targets a transaction, but the transaction was
5011 +diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c
5012 +index a3dfa3aa87ad9..d897dd73c5999 100644
5013 +--- a/fs/gfs2/trans.c
5014 ++++ b/fs/gfs2/trans.c
5015 +@@ -52,6 +52,7 @@ int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
5016 + tr->tr_reserved += gfs2_struct2blk(sdp, revokes);
5017 + INIT_LIST_HEAD(&tr->tr_databuf);
5018 + INIT_LIST_HEAD(&tr->tr_buf);
5019 ++ INIT_LIST_HEAD(&tr->tr_list);
5020 + INIT_LIST_HEAD(&tr->tr_ail1_list);
5021 + INIT_LIST_HEAD(&tr->tr_ail2_list);
5022 +
5023 +diff --git a/fs/io_uring.c b/fs/io_uring.c
5024 +index 4115bfedf15dc..38f3ec15ba3b1 100644
5025 +--- a/fs/io_uring.c
5026 ++++ b/fs/io_uring.c
5027 +@@ -2697,8 +2697,15 @@ static int io_read(struct io_kiocb *req, bool force_nonblock)
5028 + else
5029 + ret2 = -EINVAL;
5030 +
5031 ++ /* no retry on NONBLOCK marked file */
5032 ++ if (ret2 == -EAGAIN && (req->file->f_flags & O_NONBLOCK)) {
5033 ++ ret = 0;
5034 ++ goto done;
5035 ++ }
5036 ++
5037 + /* Catch -EAGAIN return for forced non-blocking submission */
5038 + if (!force_nonblock || ret2 != -EAGAIN) {
5039 ++ done:
5040 + kiocb_done(kiocb, ret2);
5041 + } else {
5042 + copy_iov:
5043 +@@ -2823,7 +2830,13 @@ static int io_write(struct io_kiocb *req, bool force_nonblock)
5044 + */
5045 + if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
5046 + ret2 = -EAGAIN;
5047 ++ /* no retry on NONBLOCK marked file */
5048 ++ if (ret2 == -EAGAIN && (req->file->f_flags & O_NONBLOCK)) {
5049 ++ ret = 0;
5050 ++ goto done;
5051 ++ }
5052 + if (!force_nonblock || ret2 != -EAGAIN) {
5053 ++done:
5054 + kiocb_done(kiocb, ret2);
5055 + } else {
5056 + copy_iov:
5057 +@@ -6928,7 +6941,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
5058 + table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
5059 + index = i & IORING_FILE_TABLE_MASK;
5060 + if (table->files[index]) {
5061 +- file = io_file_from_index(ctx, index);
5062 ++ file = table->files[index];
5063 + err = io_queue_file_removal(data, file);
5064 + if (err)
5065 + break;
5066 +@@ -6957,6 +6970,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
5067 + table->files[index] = file;
5068 + err = io_sqe_file_register(ctx, file, i);
5069 + if (err) {
5070 ++ table->files[index] = NULL;
5071 + fput(file);
5072 + break;
5073 + }
5074 +diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
5075 +index 2f7e89e4be3e3..4eb2ecd31b0d2 100644
5076 +--- a/fs/xfs/libxfs/xfs_attr_leaf.c
5077 ++++ b/fs/xfs/libxfs/xfs_attr_leaf.c
5078 +@@ -996,8 +996,10 @@ xfs_attr_shortform_verify(
5079 + * struct xfs_attr_sf_entry has a variable length.
5080 + * Check the fixed-offset parts of the structure are
5081 + * within the data buffer.
5082 ++ * xfs_attr_sf_entry is defined with a 1-byte variable
5083 ++ * array at the end, so we must subtract that off.
5084 + */
5085 +- if (((char *)sfep + sizeof(*sfep)) >= endp)
5086 ++ if (((char *)sfep + sizeof(*sfep) - 1) >= endp)
5087 + return __this_address;
5088 +
5089 + /* Don't allow names with known bad length. */
5090 +diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
5091 +index 667cdd0dfdf4a..aa784404964a0 100644
5092 +--- a/fs/xfs/libxfs/xfs_bmap.c
5093 ++++ b/fs/xfs/libxfs/xfs_bmap.c
5094 +@@ -6222,7 +6222,7 @@ xfs_bmap_validate_extent(
5095 +
5096 + isrt = XFS_IS_REALTIME_INODE(ip);
5097 + endfsb = irec->br_startblock + irec->br_blockcount - 1;
5098 +- if (isrt) {
5099 ++ if (isrt && whichfork == XFS_DATA_FORK) {
5100 + if (!xfs_verify_rtbno(mp, irec->br_startblock))
5101 + return __this_address;
5102 + if (!xfs_verify_rtbno(mp, endfsb))
5103 +diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
5104 +index afdc7f8e0e701..feb277874a1fb 100644
5105 +--- a/fs/xfs/xfs_bmap_util.c
5106 ++++ b/fs/xfs/xfs_bmap_util.c
5107 +@@ -1165,7 +1165,7 @@ xfs_insert_file_space(
5108 + goto out_trans_cancel;
5109 +
5110 + do {
5111 +- error = xfs_trans_roll_inode(&tp, ip);
5112 ++ error = xfs_defer_finish(&tp);
5113 + if (error)
5114 + goto out_trans_cancel;
5115 +
5116 +diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
5117 +index 00db81eac80d6..4d7385426149c 100644
5118 +--- a/fs/xfs/xfs_file.c
5119 ++++ b/fs/xfs/xfs_file.c
5120 +@@ -1220,6 +1220,14 @@ __xfs_filemap_fault(
5121 + return ret;
5122 + }
5123 +
5124 ++static inline bool
5125 ++xfs_is_write_fault(
5126 ++ struct vm_fault *vmf)
5127 ++{
5128 ++ return (vmf->flags & FAULT_FLAG_WRITE) &&
5129 ++ (vmf->vma->vm_flags & VM_SHARED);
5130 ++}
5131 ++
5132 + static vm_fault_t
5133 + xfs_filemap_fault(
5134 + struct vm_fault *vmf)
5135 +@@ -1227,7 +1235,7 @@ xfs_filemap_fault(
5136 + /* DAX can shortcut the normal fault path on write faults! */
5137 + return __xfs_filemap_fault(vmf, PE_SIZE_PTE,
5138 + IS_DAX(file_inode(vmf->vma->vm_file)) &&
5139 +- (vmf->flags & FAULT_FLAG_WRITE));
5140 ++ xfs_is_write_fault(vmf));
5141 + }
5142 +
5143 + static vm_fault_t
5144 +@@ -1240,7 +1248,7 @@ xfs_filemap_huge_fault(
5145 +
5146 + /* DAX can shortcut the normal fault path on write faults! */
5147 + return __xfs_filemap_fault(vmf, pe_size,
5148 +- (vmf->flags & FAULT_FLAG_WRITE));
5149 ++ xfs_is_write_fault(vmf));
5150 + }
5151 +
5152 + static vm_fault_t
5153 +diff --git a/include/drm/drm_hdcp.h b/include/drm/drm_hdcp.h
5154 +index c6bab4986a658..fe58dbb46962a 100644
5155 +--- a/include/drm/drm_hdcp.h
5156 ++++ b/include/drm/drm_hdcp.h
5157 +@@ -29,6 +29,9 @@
5158 + /* Slave address for the HDCP registers in the receiver */
5159 + #define DRM_HDCP_DDC_ADDR 0x3A
5160 +
5161 ++/* Value to use at the end of the SHA-1 bytestream used for repeaters */
5162 ++#define DRM_HDCP_SHA1_TERMINATOR 0x80
5163 ++
5164 + /* HDCP register offsets for HDMI/DVI devices */
5165 + #define DRM_HDCP_DDC_BKSV 0x00
5166 + #define DRM_HDCP_DDC_RI_PRIME 0x08
5167 +diff --git a/include/linux/bvec.h b/include/linux/bvec.h
5168 +index ac0c7299d5b8a..dd74503f7e5ea 100644
5169 +--- a/include/linux/bvec.h
5170 ++++ b/include/linux/bvec.h
5171 +@@ -117,11 +117,18 @@ static inline bool bvec_iter_advance(const struct bio_vec *bv,
5172 + return true;
5173 + }
5174 +
5175 ++static inline void bvec_iter_skip_zero_bvec(struct bvec_iter *iter)
5176 ++{
5177 ++ iter->bi_bvec_done = 0;
5178 ++ iter->bi_idx++;
5179 ++}
5180 ++
5181 + #define for_each_bvec(bvl, bio_vec, iter, start) \
5182 + for (iter = (start); \
5183 + (iter).bi_size && \
5184 + ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \
5185 +- bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len))
5186 ++ (bvl).bv_len ? (void)bvec_iter_advance((bio_vec), &(iter), \
5187 ++ (bvl).bv_len) : bvec_iter_skip_zero_bvec(&(iter)))
5188 +
5189 + /* for iterating one bio from start to end */
5190 + #define BVEC_ITER_ALL_INIT (struct bvec_iter) \
5191 +diff --git a/include/linux/libata.h b/include/linux/libata.h
5192 +index 77ccf040a128b..5f550eb27f811 100644
5193 +--- a/include/linux/libata.h
5194 ++++ b/include/linux/libata.h
5195 +@@ -421,6 +421,7 @@ enum {
5196 + ATA_HORKAGE_NO_DMA_LOG = (1 << 23), /* don't use DMA for log read */
5197 + ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */
5198 + ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */
5199 ++ ATA_HORKAGE_MAX_TRIM_128M = (1 << 26), /* Limit max trim size to 128M */
5200 +
5201 + /* DMA mask for user DMA control: User visible values; DO NOT
5202 + renumber */
5203 +diff --git a/include/linux/log2.h b/include/linux/log2.h
5204 +index 83a4a3ca3e8a7..c619ec6eff4ae 100644
5205 +--- a/include/linux/log2.h
5206 ++++ b/include/linux/log2.h
5207 +@@ -173,7 +173,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
5208 + #define roundup_pow_of_two(n) \
5209 + ( \
5210 + __builtin_constant_p(n) ? ( \
5211 +- (n == 1) ? 1 : \
5212 ++ ((n) == 1) ? 1 : \
5213 + (1UL << (ilog2((n) - 1) + 1)) \
5214 + ) : \
5215 + __roundup_pow_of_two(n) \
5216 +diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
5217 +index 851425c3178f1..89016d08f6a27 100644
5218 +--- a/include/linux/netfilter/nfnetlink.h
5219 ++++ b/include/linux/netfilter/nfnetlink.h
5220 +@@ -43,8 +43,7 @@ int nfnetlink_has_listeners(struct net *net, unsigned int group);
5221 + int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid,
5222 + unsigned int group, int echo, gfp_t flags);
5223 + int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error);
5224 +-int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid,
5225 +- int flags);
5226 ++int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid);
5227 +
5228 + static inline u16 nfnl_msg_type(u8 subsys, u8 msg_type)
5229 + {
5230 +diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
5231 +index 91eacbdcf33d2..f6abcc0bbd6e7 100644
5232 +--- a/include/net/af_rxrpc.h
5233 ++++ b/include/net/af_rxrpc.h
5234 +@@ -59,7 +59,7 @@ bool rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *,
5235 + void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *);
5236 + void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *,
5237 + struct sockaddr_rxrpc *);
5238 +-u32 rxrpc_kernel_get_srtt(struct socket *, struct rxrpc_call *);
5239 ++bool rxrpc_kernel_get_srtt(struct socket *, struct rxrpc_call *, u32 *);
5240 + int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
5241 + rxrpc_user_attach_call_t, unsigned long, gfp_t,
5242 + unsigned int);
5243 +diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
5244 +index 6f0f6fca9ac3e..ec2cbfab71f35 100644
5245 +--- a/include/net/netfilter/nf_tables.h
5246 ++++ b/include/net/netfilter/nf_tables.h
5247 +@@ -143,6 +143,8 @@ static inline u64 nft_reg_load64(const u32 *sreg)
5248 + static inline void nft_data_copy(u32 *dst, const struct nft_data *src,
5249 + unsigned int len)
5250 + {
5251 ++ if (len % NFT_REG32_SIZE)
5252 ++ dst[len / NFT_REG32_SIZE] = 0;
5253 + memcpy(dst, src, len);
5254 + }
5255 +
5256 +diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
5257 +index 059b6e45a0283..c33079b986e86 100644
5258 +--- a/include/trace/events/rxrpc.h
5259 ++++ b/include/trace/events/rxrpc.h
5260 +@@ -138,11 +138,16 @@ enum rxrpc_recvmsg_trace {
5261 + };
5262 +
5263 + enum rxrpc_rtt_tx_trace {
5264 ++ rxrpc_rtt_tx_cancel,
5265 + rxrpc_rtt_tx_data,
5266 ++ rxrpc_rtt_tx_no_slot,
5267 + rxrpc_rtt_tx_ping,
5268 + };
5269 +
5270 + enum rxrpc_rtt_rx_trace {
5271 ++ rxrpc_rtt_rx_cancel,
5272 ++ rxrpc_rtt_rx_lost,
5273 ++ rxrpc_rtt_rx_obsolete,
5274 + rxrpc_rtt_rx_ping_response,
5275 + rxrpc_rtt_rx_requested_ack,
5276 + };
5277 +@@ -339,10 +344,15 @@ enum rxrpc_tx_point {
5278 + E_(rxrpc_recvmsg_wait, "WAIT")
5279 +
5280 + #define rxrpc_rtt_tx_traces \
5281 ++ EM(rxrpc_rtt_tx_cancel, "CNCE") \
5282 + EM(rxrpc_rtt_tx_data, "DATA") \
5283 ++ EM(rxrpc_rtt_tx_no_slot, "FULL") \
5284 + E_(rxrpc_rtt_tx_ping, "PING")
5285 +
5286 + #define rxrpc_rtt_rx_traces \
5287 ++ EM(rxrpc_rtt_rx_cancel, "CNCL") \
5288 ++ EM(rxrpc_rtt_rx_obsolete, "OBSL") \
5289 ++ EM(rxrpc_rtt_rx_lost, "LOST") \
5290 + EM(rxrpc_rtt_rx_ping_response, "PONG") \
5291 + E_(rxrpc_rtt_rx_requested_ack, "RACK")
5292 +
5293 +@@ -1087,38 +1097,43 @@ TRACE_EVENT(rxrpc_recvmsg,
5294 +
5295 + TRACE_EVENT(rxrpc_rtt_tx,
5296 + TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_tx_trace why,
5297 +- rxrpc_serial_t send_serial),
5298 ++ int slot, rxrpc_serial_t send_serial),
5299 +
5300 +- TP_ARGS(call, why, send_serial),
5301 ++ TP_ARGS(call, why, slot, send_serial),
5302 +
5303 + TP_STRUCT__entry(
5304 + __field(unsigned int, call )
5305 + __field(enum rxrpc_rtt_tx_trace, why )
5306 ++ __field(int, slot )
5307 + __field(rxrpc_serial_t, send_serial )
5308 + ),
5309 +
5310 + TP_fast_assign(
5311 + __entry->call = call->debug_id;
5312 + __entry->why = why;
5313 ++ __entry->slot = slot;
5314 + __entry->send_serial = send_serial;
5315 + ),
5316 +
5317 +- TP_printk("c=%08x %s sr=%08x",
5318 ++ TP_printk("c=%08x [%d] %s sr=%08x",
5319 + __entry->call,
5320 ++ __entry->slot,
5321 + __print_symbolic(__entry->why, rxrpc_rtt_tx_traces),
5322 + __entry->send_serial)
5323 + );
5324 +
5325 + TRACE_EVENT(rxrpc_rtt_rx,
5326 + TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
5327 ++ int slot,
5328 + rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial,
5329 + u32 rtt, u32 rto),
5330 +
5331 +- TP_ARGS(call, why, send_serial, resp_serial, rtt, rto),
5332 ++ TP_ARGS(call, why, slot, send_serial, resp_serial, rtt, rto),
5333 +
5334 + TP_STRUCT__entry(
5335 + __field(unsigned int, call )
5336 + __field(enum rxrpc_rtt_rx_trace, why )
5337 ++ __field(int, slot )
5338 + __field(rxrpc_serial_t, send_serial )
5339 + __field(rxrpc_serial_t, resp_serial )
5340 + __field(u32, rtt )
5341 +@@ -1128,14 +1143,16 @@ TRACE_EVENT(rxrpc_rtt_rx,
5342 + TP_fast_assign(
5343 + __entry->call = call->debug_id;
5344 + __entry->why = why;
5345 ++ __entry->slot = slot;
5346 + __entry->send_serial = send_serial;
5347 + __entry->resp_serial = resp_serial;
5348 + __entry->rtt = rtt;
5349 + __entry->rto = rto;
5350 + ),
5351 +
5352 +- TP_printk("c=%08x %s sr=%08x rr=%08x rtt=%u rto=%u",
5353 ++ TP_printk("c=%08x [%d] %s sr=%08x rr=%08x rtt=%u rto=%u",
5354 + __entry->call,
5355 ++ __entry->slot,
5356 + __print_symbolic(__entry->why, rxrpc_rtt_rx_traces),
5357 + __entry->send_serial,
5358 + __entry->resp_serial,
5359 +diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
5360 +index 4565456c0ef44..0b27da1d771ba 100644
5361 +--- a/include/uapi/linux/netfilter/nf_tables.h
5362 ++++ b/include/uapi/linux/netfilter/nf_tables.h
5363 +@@ -133,7 +133,7 @@ enum nf_tables_msg_types {
5364 + * @NFTA_LIST_ELEM: list element (NLA_NESTED)
5365 + */
5366 + enum nft_list_attributes {
5367 +- NFTA_LIST_UNPEC,
5368 ++ NFTA_LIST_UNSPEC,
5369 + NFTA_LIST_ELEM,
5370 + __NFTA_LIST_MAX
5371 + };
5372 +diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
5373 +index 0fd80ac81f705..72e943b3bd656 100644
5374 +--- a/kernel/bpf/syscall.c
5375 ++++ b/kernel/bpf/syscall.c
5376 +@@ -2629,7 +2629,7 @@ static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link,
5377 + u32 ulen = info->raw_tracepoint.tp_name_len;
5378 + size_t tp_len = strlen(tp_name);
5379 +
5380 +- if (ulen && !ubuf)
5381 ++ if (!ulen ^ !ubuf)
5382 + return -EINVAL;
5383 +
5384 + info->raw_tracepoint.tp_name_len = tp_len + 1;
5385 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
5386 +index 7952c6cb6f08c..e153509820958 100644
5387 +--- a/mm/hugetlb.c
5388 ++++ b/mm/hugetlb.c
5389 +@@ -1251,21 +1251,32 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
5390 + int nid, nodemask_t *nodemask)
5391 + {
5392 + unsigned long nr_pages = 1UL << huge_page_order(h);
5393 ++ if (nid == NUMA_NO_NODE)
5394 ++ nid = numa_mem_id();
5395 +
5396 + #ifdef CONFIG_CMA
5397 + {
5398 + struct page *page;
5399 + int node;
5400 +
5401 +- for_each_node_mask(node, *nodemask) {
5402 +- if (!hugetlb_cma[node])
5403 +- continue;
5404 +-
5405 +- page = cma_alloc(hugetlb_cma[node], nr_pages,
5406 +- huge_page_order(h), true);
5407 ++ if (hugetlb_cma[nid]) {
5408 ++ page = cma_alloc(hugetlb_cma[nid], nr_pages,
5409 ++ huge_page_order(h), true);
5410 + if (page)
5411 + return page;
5412 + }
5413 ++
5414 ++ if (!(gfp_mask & __GFP_THISNODE)) {
5415 ++ for_each_node_mask(node, *nodemask) {
5416 ++ if (node == nid || !hugetlb_cma[node])
5417 ++ continue;
5418 ++
5419 ++ page = cma_alloc(hugetlb_cma[node], nr_pages,
5420 ++ huge_page_order(h), true);
5421 ++ if (page)
5422 ++ return page;
5423 ++ }
5424 ++ }
5425 + }
5426 + #endif
5427 +
5428 +@@ -3469,6 +3480,22 @@ static unsigned int cpuset_mems_nr(unsigned int *array)
5429 + }
5430 +
5431 + #ifdef CONFIG_SYSCTL
5432 ++static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write,
5433 ++ void *buffer, size_t *length,
5434 ++ loff_t *ppos, unsigned long *out)
5435 ++{
5436 ++ struct ctl_table dup_table;
5437 ++
5438 ++ /*
5439 ++ * In order to avoid races with __do_proc_doulongvec_minmax(), we
5440 ++ * can duplicate the @table and alter the duplicate of it.
5441 ++ */
5442 ++ dup_table = *table;
5443 ++ dup_table.data = out;
5444 ++
5445 ++ return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos);
5446 ++}
5447 ++
5448 + static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
5449 + struct ctl_table *table, int write,
5450 + void *buffer, size_t *length, loff_t *ppos)
5451 +@@ -3480,9 +3507,8 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
5452 + if (!hugepages_supported())
5453 + return -EOPNOTSUPP;
5454 +
5455 +- table->data = &tmp;
5456 +- table->maxlen = sizeof(unsigned long);
5457 +- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
5458 ++ ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
5459 ++ &tmp);
5460 + if (ret)
5461 + goto out;
5462 +
5463 +@@ -3525,9 +3551,8 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
5464 + if (write && hstate_is_gigantic(h))
5465 + return -EINVAL;
5466 +
5467 +- table->data = &tmp;
5468 +- table->maxlen = sizeof(unsigned long);
5469 +- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
5470 ++ ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
5471 ++ &tmp);
5472 + if (ret)
5473 + goto out;
5474 +
5475 +diff --git a/mm/khugepaged.c b/mm/khugepaged.c
5476 +index dd592ea9a4a06..e6fc7c3e7dc98 100644
5477 +--- a/mm/khugepaged.c
5478 ++++ b/mm/khugepaged.c
5479 +@@ -1709,7 +1709,7 @@ static void collapse_file(struct mm_struct *mm,
5480 + xas_unlock_irq(&xas);
5481 + page_cache_sync_readahead(mapping, &file->f_ra,
5482 + file, index,
5483 +- PAGE_SIZE);
5484 ++ end - index);
5485 + /* drain pagevecs to help isolate_lru_page() */
5486 + lru_add_drain();
5487 + page = find_lock_page(mapping, index);
5488 +diff --git a/mm/madvise.c b/mm/madvise.c
5489 +index dd1d43cf026de..d4aa5f7765435 100644
5490 +--- a/mm/madvise.c
5491 ++++ b/mm/madvise.c
5492 +@@ -289,9 +289,9 @@ static long madvise_willneed(struct vm_area_struct *vma,
5493 + */
5494 + *prev = NULL; /* tell sys_madvise we drop mmap_lock */
5495 + get_file(file);
5496 +- mmap_read_unlock(current->mm);
5497 + offset = (loff_t)(start - vma->vm_start)
5498 + + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
5499 ++ mmap_read_unlock(current->mm);
5500 + vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED);
5501 + fput(file);
5502 + mmap_read_lock(current->mm);
5503 +diff --git a/mm/memory.c b/mm/memory.c
5504 +index a279c1a26af7e..03c693ea59bda 100644
5505 +--- a/mm/memory.c
5506 ++++ b/mm/memory.c
5507 +@@ -71,6 +71,7 @@
5508 + #include <linux/dax.h>
5509 + #include <linux/oom.h>
5510 + #include <linux/numa.h>
5511 ++#include <linux/vmalloc.h>
5512 +
5513 + #include <trace/events/kmem.h>
5514 +
5515 +@@ -2201,7 +2202,8 @@ EXPORT_SYMBOL(vm_iomap_memory);
5516 +
5517 + static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
5518 + unsigned long addr, unsigned long end,
5519 +- pte_fn_t fn, void *data, bool create)
5520 ++ pte_fn_t fn, void *data, bool create,
5521 ++ pgtbl_mod_mask *mask)
5522 + {
5523 + pte_t *pte;
5524 + int err = 0;
5525 +@@ -2209,7 +2211,7 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
5526 +
5527 + if (create) {
5528 + pte = (mm == &init_mm) ?
5529 +- pte_alloc_kernel(pmd, addr) :
5530 ++ pte_alloc_kernel_track(pmd, addr, mask) :
5531 + pte_alloc_map_lock(mm, pmd, addr, &ptl);
5532 + if (!pte)
5533 + return -ENOMEM;
5534 +@@ -2230,6 +2232,7 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
5535 + break;
5536 + }
5537 + } while (addr += PAGE_SIZE, addr != end);
5538 ++ *mask |= PGTBL_PTE_MODIFIED;
5539 +
5540 + arch_leave_lazy_mmu_mode();
5541 +
5542 +@@ -2240,7 +2243,8 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
5543 +
5544 + static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
5545 + unsigned long addr, unsigned long end,
5546 +- pte_fn_t fn, void *data, bool create)
5547 ++ pte_fn_t fn, void *data, bool create,
5548 ++ pgtbl_mod_mask *mask)
5549 + {
5550 + pmd_t *pmd;
5551 + unsigned long next;
5552 +@@ -2249,7 +2253,7 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
5553 + BUG_ON(pud_huge(*pud));
5554 +
5555 + if (create) {
5556 +- pmd = pmd_alloc(mm, pud, addr);
5557 ++ pmd = pmd_alloc_track(mm, pud, addr, mask);
5558 + if (!pmd)
5559 + return -ENOMEM;
5560 + } else {
5561 +@@ -2259,7 +2263,7 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
5562 + next = pmd_addr_end(addr, end);
5563 + if (create || !pmd_none_or_clear_bad(pmd)) {
5564 + err = apply_to_pte_range(mm, pmd, addr, next, fn, data,
5565 +- create);
5566 ++ create, mask);
5567 + if (err)
5568 + break;
5569 + }
5570 +@@ -2269,14 +2273,15 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
5571 +
5572 + static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
5573 + unsigned long addr, unsigned long end,
5574 +- pte_fn_t fn, void *data, bool create)
5575 ++ pte_fn_t fn, void *data, bool create,
5576 ++ pgtbl_mod_mask *mask)
5577 + {
5578 + pud_t *pud;
5579 + unsigned long next;
5580 + int err = 0;
5581 +
5582 + if (create) {
5583 +- pud = pud_alloc(mm, p4d, addr);
5584 ++ pud = pud_alloc_track(mm, p4d, addr, mask);
5585 + if (!pud)
5586 + return -ENOMEM;
5587 + } else {
5588 +@@ -2286,7 +2291,7 @@ static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
5589 + next = pud_addr_end(addr, end);
5590 + if (create || !pud_none_or_clear_bad(pud)) {
5591 + err = apply_to_pmd_range(mm, pud, addr, next, fn, data,
5592 +- create);
5593 ++ create, mask);
5594 + if (err)
5595 + break;
5596 + }
5597 +@@ -2296,14 +2301,15 @@ static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
5598 +
5599 + static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
5600 + unsigned long addr, unsigned long end,
5601 +- pte_fn_t fn, void *data, bool create)
5602 ++ pte_fn_t fn, void *data, bool create,
5603 ++ pgtbl_mod_mask *mask)
5604 + {
5605 + p4d_t *p4d;
5606 + unsigned long next;
5607 + int err = 0;
5608 +
5609 + if (create) {
5610 +- p4d = p4d_alloc(mm, pgd, addr);
5611 ++ p4d = p4d_alloc_track(mm, pgd, addr, mask);
5612 + if (!p4d)
5613 + return -ENOMEM;
5614 + } else {
5615 +@@ -2313,7 +2319,7 @@ static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
5616 + next = p4d_addr_end(addr, end);
5617 + if (create || !p4d_none_or_clear_bad(p4d)) {
5618 + err = apply_to_pud_range(mm, p4d, addr, next, fn, data,
5619 +- create);
5620 ++ create, mask);
5621 + if (err)
5622 + break;
5623 + }
5624 +@@ -2326,8 +2332,9 @@ static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
5625 + void *data, bool create)
5626 + {
5627 + pgd_t *pgd;
5628 +- unsigned long next;
5629 ++ unsigned long start = addr, next;
5630 + unsigned long end = addr + size;
5631 ++ pgtbl_mod_mask mask = 0;
5632 + int err = 0;
5633 +
5634 + if (WARN_ON(addr >= end))
5635 +@@ -2338,11 +2345,14 @@ static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
5636 + next = pgd_addr_end(addr, end);
5637 + if (!create && pgd_none_or_clear_bad(pgd))
5638 + continue;
5639 +- err = apply_to_p4d_range(mm, pgd, addr, next, fn, data, create);
5640 ++ err = apply_to_p4d_range(mm, pgd, addr, next, fn, data, create, &mask);
5641 + if (err)
5642 + break;
5643 + } while (pgd++, addr = next, addr != end);
5644 +
5645 ++ if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
5646 ++ arch_sync_kernel_mappings(start, start + size);
5647 ++
5648 + return err;
5649 + }
5650 +
5651 +diff --git a/mm/migrate.c b/mm/migrate.c
5652 +index 40cd7016ae6fc..3511f9529ea60 100644
5653 +--- a/mm/migrate.c
5654 ++++ b/mm/migrate.c
5655 +@@ -251,7 +251,7 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
5656 + entry = make_device_private_entry(new, pte_write(pte));
5657 + pte = swp_entry_to_pte(entry);
5658 + if (pte_swp_uffd_wp(*pvmw.pte))
5659 +- pte = pte_mkuffd_wp(pte);
5660 ++ pte = pte_swp_mkuffd_wp(pte);
5661 + }
5662 + }
5663 +
5664 +@@ -2330,10 +2330,17 @@ again:
5665 + entry = make_migration_entry(page, mpfn &
5666 + MIGRATE_PFN_WRITE);
5667 + swp_pte = swp_entry_to_pte(entry);
5668 +- if (pte_soft_dirty(pte))
5669 +- swp_pte = pte_swp_mksoft_dirty(swp_pte);
5670 +- if (pte_uffd_wp(pte))
5671 +- swp_pte = pte_swp_mkuffd_wp(swp_pte);
5672 ++ if (pte_present(pte)) {
5673 ++ if (pte_soft_dirty(pte))
5674 ++ swp_pte = pte_swp_mksoft_dirty(swp_pte);
5675 ++ if (pte_uffd_wp(pte))
5676 ++ swp_pte = pte_swp_mkuffd_wp(swp_pte);
5677 ++ } else {
5678 ++ if (pte_swp_soft_dirty(pte))
5679 ++ swp_pte = pte_swp_mksoft_dirty(swp_pte);
5680 ++ if (pte_swp_uffd_wp(pte))
5681 ++ swp_pte = pte_swp_mkuffd_wp(swp_pte);
5682 ++ }
5683 + set_pte_at(mm, addr, ptep, swp_pte);
5684 +
5685 + /*
5686 +diff --git a/mm/rmap.c b/mm/rmap.c
5687 +index 6cce9ef06753b..536f2706a6c8d 100644
5688 +--- a/mm/rmap.c
5689 ++++ b/mm/rmap.c
5690 +@@ -1511,9 +1511,14 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
5691 + */
5692 + entry = make_migration_entry(page, 0);
5693 + swp_pte = swp_entry_to_pte(entry);
5694 +- if (pte_soft_dirty(pteval))
5695 ++
5696 ++ /*
5697 ++ * pteval maps a zone device page and is therefore
5698 ++ * a swap pte.
5699 ++ */
5700 ++ if (pte_swp_soft_dirty(pteval))
5701 + swp_pte = pte_swp_mksoft_dirty(swp_pte);
5702 +- if (pte_uffd_wp(pteval))
5703 ++ if (pte_swp_uffd_wp(pteval))
5704 + swp_pte = pte_swp_mkuffd_wp(swp_pte);
5705 + set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
5706 + /*
5707 +diff --git a/mm/slub.c b/mm/slub.c
5708 +index ef303070d175a..76d005862c4d9 100644
5709 +--- a/mm/slub.c
5710 ++++ b/mm/slub.c
5711 +@@ -680,12 +680,12 @@ static void slab_fix(struct kmem_cache *s, char *fmt, ...)
5712 + }
5713 +
5714 + static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
5715 +- void *freelist, void *nextfree)
5716 ++ void **freelist, void *nextfree)
5717 + {
5718 + if ((s->flags & SLAB_CONSISTENCY_CHECKS) &&
5719 +- !check_valid_pointer(s, page, nextfree)) {
5720 +- object_err(s, page, freelist, "Freechain corrupt");
5721 +- freelist = NULL;
5722 ++ !check_valid_pointer(s, page, nextfree) && freelist) {
5723 ++ object_err(s, page, *freelist, "Freechain corrupt");
5724 ++ *freelist = NULL;
5725 + slab_fix(s, "Isolate corrupted freechain");
5726 + return true;
5727 + }
5728 +@@ -1425,7 +1425,7 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
5729 + int objects) {}
5730 +
5731 + static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
5732 +- void *freelist, void *nextfree)
5733 ++ void **freelist, void *nextfree)
5734 + {
5735 + return false;
5736 + }
5737 +@@ -2117,7 +2117,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
5738 + * 'freelist' is already corrupted. So isolate all objects
5739 + * starting at 'freelist'.
5740 + */
5741 +- if (freelist_corrupted(s, page, freelist, nextfree))
5742 ++ if (freelist_corrupted(s, page, &freelist, nextfree))
5743 + break;
5744 +
5745 + do {
5746 +diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
5747 +index 18028b9f95f01..65b1280cf2fc1 100644
5748 +--- a/net/batman-adv/bat_v_ogm.c
5749 ++++ b/net/batman-adv/bat_v_ogm.c
5750 +@@ -874,6 +874,12 @@ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset,
5751 + ntohl(ogm_packet->seqno), ogm_throughput, ogm_packet->ttl,
5752 + ogm_packet->version, ntohs(ogm_packet->tvlv_len));
5753 +
5754 ++ if (batadv_is_my_mac(bat_priv, ogm_packet->orig)) {
5755 ++ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
5756 ++ "Drop packet: originator packet from ourself\n");
5757 ++ return;
5758 ++ }
5759 ++
5760 + /* If the throughput metric is 0, immediately drop the packet. No need
5761 + * to create orig_node / neigh_node for an unusable route.
5762 + */
5763 +@@ -1001,11 +1007,6 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb,
5764 + if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
5765 + goto free_skb;
5766 +
5767 +- ogm_packet = (struct batadv_ogm2_packet *)skb->data;
5768 +-
5769 +- if (batadv_is_my_mac(bat_priv, ogm_packet->orig))
5770 +- goto free_skb;
5771 +-
5772 + batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_RX);
5773 + batadv_add_counter(bat_priv, BATADV_CNT_MGMT_RX_BYTES,
5774 + skb->len + ETH_HLEN);
5775 +diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
5776 +index 41cc87f06b142..cfb9e16afe38a 100644
5777 +--- a/net/batman-adv/bridge_loop_avoidance.c
5778 ++++ b/net/batman-adv/bridge_loop_avoidance.c
5779 +@@ -437,7 +437,10 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
5780 + batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
5781 + skb->len + ETH_HLEN);
5782 +
5783 +- netif_rx(skb);
5784 ++ if (in_interrupt())
5785 ++ netif_rx(skb);
5786 ++ else
5787 ++ netif_rx_ni(skb);
5788 + out:
5789 + if (primary_if)
5790 + batadv_hardif_put(primary_if);
5791 +diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
5792 +index a18dcc686dc31..ef3f85b576c4c 100644
5793 +--- a/net/batman-adv/gateway_client.c
5794 ++++ b/net/batman-adv/gateway_client.c
5795 +@@ -703,8 +703,10 @@ batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len,
5796 +
5797 + chaddr_offset = *header_len + BATADV_DHCP_CHADDR_OFFSET;
5798 + /* store the client address if the message is going to a client */
5799 +- if (ret == BATADV_DHCP_TO_CLIENT &&
5800 +- pskb_may_pull(skb, chaddr_offset + ETH_ALEN)) {
5801 ++ if (ret == BATADV_DHCP_TO_CLIENT) {
5802 ++ if (!pskb_may_pull(skb, chaddr_offset + ETH_ALEN))
5803 ++ return BATADV_DHCP_NO;
5804 ++
5805 + /* check if the DHCP packet carries an Ethernet DHCP */
5806 + p = skb->data + *header_len + BATADV_DHCP_HTYPE_OFFSET;
5807 + if (*p != BATADV_DHCP_HTYPE_ETHERNET)
5808 +diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
5809 +index 41fba93d857a6..fc28dc201b936 100644
5810 +--- a/net/bluetooth/hci_core.c
5811 ++++ b/net/bluetooth/hci_core.c
5812 +@@ -3370,7 +3370,7 @@ done:
5813 + bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
5814 + action, ret);
5815 +
5816 +- return NOTIFY_STOP;
5817 ++ return NOTIFY_DONE;
5818 + }
5819 +
5820 + /* Alloc HCI device */
5821 +diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
5822 +index 760ca24228165..af402f458ee02 100644
5823 +--- a/net/netfilter/nf_conntrack_proto_udp.c
5824 ++++ b/net/netfilter/nf_conntrack_proto_udp.c
5825 +@@ -81,18 +81,6 @@ static bool udp_error(struct sk_buff *skb,
5826 + return false;
5827 + }
5828 +
5829 +-static void nf_conntrack_udp_refresh_unreplied(struct nf_conn *ct,
5830 +- struct sk_buff *skb,
5831 +- enum ip_conntrack_info ctinfo,
5832 +- u32 extra_jiffies)
5833 +-{
5834 +- if (unlikely(ctinfo == IP_CT_ESTABLISHED_REPLY &&
5835 +- ct->status & IPS_NAT_CLASH))
5836 +- nf_ct_kill(ct);
5837 +- else
5838 +- nf_ct_refresh_acct(ct, ctinfo, skb, extra_jiffies);
5839 +-}
5840 +-
5841 + /* Returns verdict for packet, and may modify conntracktype */
5842 + int nf_conntrack_udp_packet(struct nf_conn *ct,
5843 + struct sk_buff *skb,
5844 +@@ -124,12 +112,15 @@ int nf_conntrack_udp_packet(struct nf_conn *ct,
5845 +
5846 + nf_ct_refresh_acct(ct, ctinfo, skb, extra);
5847 +
5848 ++ /* never set ASSURED for IPS_NAT_CLASH, they time out soon */
5849 ++ if (unlikely((ct->status & IPS_NAT_CLASH)))
5850 ++ return NF_ACCEPT;
5851 ++
5852 + /* Also, more likely to be important, and not a probe */
5853 + if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
5854 + nf_conntrack_event_cache(IPCT_ASSURED, ct);
5855 + } else {
5856 +- nf_conntrack_udp_refresh_unreplied(ct, skb, ctinfo,
5857 +- timeouts[UDP_CT_UNREPLIED]);
5858 ++ nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[UDP_CT_UNREPLIED]);
5859 + }
5860 + return NF_ACCEPT;
5861 + }
5862 +@@ -206,12 +197,15 @@ int nf_conntrack_udplite_packet(struct nf_conn *ct,
5863 + if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
5864 + nf_ct_refresh_acct(ct, ctinfo, skb,
5865 + timeouts[UDP_CT_REPLIED]);
5866 ++
5867 ++ if (unlikely((ct->status & IPS_NAT_CLASH)))
5868 ++ return NF_ACCEPT;
5869 ++
5870 + /* Also, more likely to be important, and not a probe */
5871 + if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
5872 + nf_conntrack_event_cache(IPCT_ASSURED, ct);
5873 + } else {
5874 +- nf_conntrack_udp_refresh_unreplied(ct, skb, ctinfo,
5875 +- timeouts[UDP_CT_UNREPLIED]);
5876 ++ nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[UDP_CT_UNREPLIED]);
5877 + }
5878 + return NF_ACCEPT;
5879 + }
5880 +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
5881 +index d31832d32e028..05059f620d41e 100644
5882 +--- a/net/netfilter/nf_tables_api.c
5883 ++++ b/net/netfilter/nf_tables_api.c
5884 +@@ -797,11 +797,11 @@ static int nf_tables_gettable(struct net *net, struct sock *nlsk,
5885 + nlh->nlmsg_seq, NFT_MSG_NEWTABLE, 0,
5886 + family, table);
5887 + if (err < 0)
5888 +- goto err;
5889 ++ goto err_fill_table_info;
5890 +
5891 +- return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
5892 ++ return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
5893 +
5894 +-err:
5895 ++err_fill_table_info:
5896 + kfree_skb(skb2);
5897 + return err;
5898 + }
5899 +@@ -1527,11 +1527,11 @@ static int nf_tables_getchain(struct net *net, struct sock *nlsk,
5900 + nlh->nlmsg_seq, NFT_MSG_NEWCHAIN, 0,
5901 + family, table, chain);
5902 + if (err < 0)
5903 +- goto err;
5904 ++ goto err_fill_chain_info;
5905 +
5906 +- return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
5907 ++ return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
5908 +
5909 +-err:
5910 ++err_fill_chain_info:
5911 + kfree_skb(skb2);
5912 + return err;
5913 + }
5914 +@@ -2898,11 +2898,11 @@ static int nf_tables_getrule(struct net *net, struct sock *nlsk,
5915 + nlh->nlmsg_seq, NFT_MSG_NEWRULE, 0,
5916 + family, table, chain, rule, NULL);
5917 + if (err < 0)
5918 +- goto err;
5919 ++ goto err_fill_rule_info;
5920 +
5921 +- return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
5922 ++ return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
5923 +
5924 +-err:
5925 ++err_fill_rule_info:
5926 + kfree_skb(skb2);
5927 + return err;
5928 + }
5929 +@@ -3643,7 +3643,8 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
5930 + goto nla_put_failure;
5931 + }
5932 +
5933 +- if (nla_put(skb, NFTA_SET_USERDATA, set->udlen, set->udata))
5934 ++ if (set->udata &&
5935 ++ nla_put(skb, NFTA_SET_USERDATA, set->udlen, set->udata))
5936 + goto nla_put_failure;
5937 +
5938 + nest = nla_nest_start_noflag(skb, NFTA_SET_DESC);
5939 +@@ -3828,11 +3829,11 @@ static int nf_tables_getset(struct net *net, struct sock *nlsk,
5940 +
5941 + err = nf_tables_fill_set(skb2, &ctx, set, NFT_MSG_NEWSET, 0);
5942 + if (err < 0)
5943 +- goto err;
5944 ++ goto err_fill_set_info;
5945 +
5946 +- return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
5947 ++ return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
5948 +
5949 +-err:
5950 ++err_fill_set_info:
5951 + kfree_skb(skb2);
5952 + return err;
5953 + }
5954 +@@ -4720,24 +4721,18 @@ static int nft_get_set_elem(struct nft_ctx *ctx, struct nft_set *set,
5955 + err = -ENOMEM;
5956 + skb = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
5957 + if (skb == NULL)
5958 +- goto err1;
5959 ++ return err;
5960 +
5961 + err = nf_tables_fill_setelem_info(skb, ctx, ctx->seq, ctx->portid,
5962 + NFT_MSG_NEWSETELEM, 0, set, &elem);
5963 + if (err < 0)
5964 +- goto err2;
5965 ++ goto err_fill_setelem;
5966 +
5967 +- err = nfnetlink_unicast(skb, ctx->net, ctx->portid, MSG_DONTWAIT);
5968 +- /* This avoids a loop in nfnetlink. */
5969 +- if (err < 0)
5970 +- goto err1;
5971 ++ return nfnetlink_unicast(skb, ctx->net, ctx->portid);
5972 +
5973 +- return 0;
5974 +-err2:
5975 ++err_fill_setelem:
5976 + kfree_skb(skb);
5977 +-err1:
5978 +- /* this avoids a loop in nfnetlink. */
5979 +- return err == -EAGAIN ? -ENOBUFS : err;
5980 ++ return err;
5981 + }
5982 +
5983 + /* called with rcu_read_lock held */
5984 +@@ -5991,10 +5986,11 @@ static int nf_tables_getobj(struct net *net, struct sock *nlsk,
5985 + nlh->nlmsg_seq, NFT_MSG_NEWOBJ, 0,
5986 + family, table, obj, reset);
5987 + if (err < 0)
5988 +- goto err;
5989 ++ goto err_fill_obj_info;
5990 +
5991 +- return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
5992 +-err:
5993 ++ return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
5994 ++
5995 ++err_fill_obj_info:
5996 + kfree_skb(skb2);
5997 + return err;
5998 + }
5999 +@@ -6843,10 +6839,11 @@ static int nf_tables_getflowtable(struct net *net, struct sock *nlsk,
6000 + NFT_MSG_NEWFLOWTABLE, 0, family,
6001 + flowtable, &flowtable->hook_list);
6002 + if (err < 0)
6003 +- goto err;
6004 ++ goto err_fill_flowtable_info;
6005 +
6006 +- return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
6007 +-err:
6008 ++ return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
6009 ++
6010 ++err_fill_flowtable_info:
6011 + kfree_skb(skb2);
6012 + return err;
6013 + }
6014 +@@ -7017,10 +7014,11 @@ static int nf_tables_getgen(struct net *net, struct sock *nlsk,
6015 + err = nf_tables_fill_gen_info(skb2, net, NETLINK_CB(skb).portid,
6016 + nlh->nlmsg_seq);
6017 + if (err < 0)
6018 +- goto err;
6019 ++ goto err_fill_gen_info;
6020 +
6021 +- return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
6022 +-err:
6023 ++ return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
6024 ++
6025 ++err_fill_gen_info:
6026 + kfree_skb(skb2);
6027 + return err;
6028 + }
6029 +diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
6030 +index 5f24edf958309..3a2e64e13b227 100644
6031 +--- a/net/netfilter/nfnetlink.c
6032 ++++ b/net/netfilter/nfnetlink.c
6033 +@@ -149,10 +149,15 @@ int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error)
6034 + }
6035 + EXPORT_SYMBOL_GPL(nfnetlink_set_err);
6036 +
6037 +-int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid,
6038 +- int flags)
6039 ++int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid)
6040 + {
6041 +- return netlink_unicast(net->nfnl, skb, portid, flags);
6042 ++ int err;
6043 ++
6044 ++ err = nlmsg_unicast(net->nfnl, skb, portid);
6045 ++ if (err == -EAGAIN)
6046 ++ err = -ENOBUFS;
6047 ++
6048 ++ return err;
6049 + }
6050 + EXPORT_SYMBOL_GPL(nfnetlink_unicast);
6051 +
6052 +diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
6053 +index 0ba020ca38e68..7ca2ca4bba055 100644
6054 +--- a/net/netfilter/nfnetlink_log.c
6055 ++++ b/net/netfilter/nfnetlink_log.c
6056 +@@ -356,8 +356,7 @@ __nfulnl_send(struct nfulnl_instance *inst)
6057 + goto out;
6058 + }
6059 + }
6060 +- nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid,
6061 +- MSG_DONTWAIT);
6062 ++ nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid);
6063 + out:
6064 + inst->qlen = 0;
6065 + inst->skb = NULL;
6066 +diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
6067 +index 3243a31f6e829..70d086944bcc7 100644
6068 +--- a/net/netfilter/nfnetlink_queue.c
6069 ++++ b/net/netfilter/nfnetlink_queue.c
6070 +@@ -681,7 +681,7 @@ __nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue,
6071 + *packet_id_ptr = htonl(entry->id);
6072 +
6073 + /* nfnetlink_unicast will either free the nskb or add it to a socket */
6074 +- err = nfnetlink_unicast(nskb, net, queue->peer_portid, MSG_DONTWAIT);
6075 ++ err = nfnetlink_unicast(nskb, net, queue->peer_portid);
6076 + if (err < 0) {
6077 + if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
6078 + failopen = 1;
6079 +diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
6080 +index 3b9b97aa4b32e..3a6c84fb2c90d 100644
6081 +--- a/net/netfilter/nft_flow_offload.c
6082 ++++ b/net/netfilter/nft_flow_offload.c
6083 +@@ -102,7 +102,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
6084 + }
6085 +
6086 + if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) ||
6087 +- ct->status & IPS_SEQ_ADJUST)
6088 ++ ct->status & (IPS_SEQ_ADJUST | IPS_NAT_CLASH))
6089 + goto out;
6090 +
6091 + if (!nf_ct_is_confirmed(ct))
6092 +diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
6093 +index a7de3a58f553d..67ce866a446d9 100644
6094 +--- a/net/netfilter/nft_payload.c
6095 ++++ b/net/netfilter/nft_payload.c
6096 +@@ -87,7 +87,9 @@ void nft_payload_eval(const struct nft_expr *expr,
6097 + u32 *dest = &regs->data[priv->dreg];
6098 + int offset;
6099 +
6100 +- dest[priv->len / NFT_REG32_SIZE] = 0;
6101 ++ if (priv->len % NFT_REG32_SIZE)
6102 ++ dest[priv->len / NFT_REG32_SIZE] = 0;
6103 ++
6104 + switch (priv->base) {
6105 + case NFT_PAYLOAD_LL_HEADER:
6106 + if (!skb_mac_header_was_set(skb))
6107 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
6108 +index 301f41d4929bd..82f7802983797 100644
6109 +--- a/net/packet/af_packet.c
6110 ++++ b/net/packet/af_packet.c
6111 +@@ -2170,7 +2170,8 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
6112 + int skb_len = skb->len;
6113 + unsigned int snaplen, res;
6114 + unsigned long status = TP_STATUS_USER;
6115 +- unsigned short macoff, netoff, hdrlen;
6116 ++ unsigned short macoff, hdrlen;
6117 ++ unsigned int netoff;
6118 + struct sk_buff *copy_skb = NULL;
6119 + struct timespec64 ts;
6120 + __u32 ts_status;
6121 +@@ -2239,6 +2240,10 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
6122 + }
6123 + macoff = netoff - maclen;
6124 + }
6125 ++ if (netoff > USHRT_MAX) {
6126 ++ atomic_inc(&po->tp_drops);
6127 ++ goto drop_n_restore;
6128 ++ }
6129 + if (po->tp_version <= TPACKET_V2) {
6130 + if (macoff + snaplen > po->rx_ring.frame_size) {
6131 + if (po->copy_thresh &&
6132 +diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
6133 +index 9a2139ebd67d7..ca1fea72c8d29 100644
6134 +--- a/net/rxrpc/ar-internal.h
6135 ++++ b/net/rxrpc/ar-internal.h
6136 +@@ -488,7 +488,6 @@ enum rxrpc_call_flag {
6137 + RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */
6138 + RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */
6139 + RXRPC_CALL_SEND_PING, /* A ping will need to be sent */
6140 +- RXRPC_CALL_PINGING, /* Ping in process */
6141 + RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */
6142 + RXRPC_CALL_BEGAN_RX_TIMER, /* We began the expect_rx_by timer */
6143 + RXRPC_CALL_RX_HEARD, /* The peer responded at least once to this call */
6144 +@@ -673,9 +672,13 @@ struct rxrpc_call {
6145 + rxrpc_seq_t ackr_consumed; /* Highest packet shown consumed */
6146 + rxrpc_seq_t ackr_seen; /* Highest packet shown seen */
6147 +
6148 +- /* ping management */
6149 +- rxrpc_serial_t ping_serial; /* Last ping sent */
6150 +- ktime_t ping_time; /* Time last ping sent */
6151 ++ /* RTT management */
6152 ++ rxrpc_serial_t rtt_serial[4]; /* Serial number of DATA or PING sent */
6153 ++ ktime_t rtt_sent_at[4]; /* Time packet sent */
6154 ++ unsigned long rtt_avail; /* Mask of available slots in bits 0-3,
6155 ++ * Mask of pending samples in 8-11 */
6156 ++#define RXRPC_CALL_RTT_AVAIL_MASK 0xf
6157 ++#define RXRPC_CALL_RTT_PEND_SHIFT 8
6158 +
6159 + /* transmission-phase ACK management */
6160 + ktime_t acks_latest_ts; /* Timestamp of latest ACK received */
6161 +@@ -1037,7 +1040,7 @@ static inline bool __rxrpc_abort_eproto(struct rxrpc_call *call,
6162 + /*
6163 + * rtt.c
6164 + */
6165 +-void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace,
6166 ++void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace, int,
6167 + rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
6168 + unsigned long rxrpc_get_rto_backoff(struct rxrpc_peer *, bool);
6169 + void rxrpc_peer_init_rtt(struct rxrpc_peer *);
6170 +diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
6171 +index 38a46167523fa..a40fae0139423 100644
6172 +--- a/net/rxrpc/call_object.c
6173 ++++ b/net/rxrpc/call_object.c
6174 +@@ -153,6 +153,7 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
6175 + call->cong_ssthresh = RXRPC_RXTX_BUFF_SIZE - 1;
6176 +
6177 + call->rxnet = rxnet;
6178 ++ call->rtt_avail = RXRPC_CALL_RTT_AVAIL_MASK;
6179 + atomic_inc(&rxnet->nr_calls);
6180 + return call;
6181 +
6182 +diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
6183 +index 767579328a069..19ddfc9807e89 100644
6184 +--- a/net/rxrpc/input.c
6185 ++++ b/net/rxrpc/input.c
6186 +@@ -608,36 +608,57 @@ unlock:
6187 + }
6188 +
6189 + /*
6190 +- * Process a requested ACK.
6191 ++ * See if there's a cached RTT probe to complete.
6192 + */
6193 +-static void rxrpc_input_requested_ack(struct rxrpc_call *call,
6194 +- ktime_t resp_time,
6195 +- rxrpc_serial_t orig_serial,
6196 +- rxrpc_serial_t ack_serial)
6197 ++static void rxrpc_complete_rtt_probe(struct rxrpc_call *call,
6198 ++ ktime_t resp_time,
6199 ++ rxrpc_serial_t acked_serial,
6200 ++ rxrpc_serial_t ack_serial,
6201 ++ enum rxrpc_rtt_rx_trace type)
6202 + {
6203 +- struct rxrpc_skb_priv *sp;
6204 +- struct sk_buff *skb;
6205 ++ rxrpc_serial_t orig_serial;
6206 ++ unsigned long avail;
6207 + ktime_t sent_at;
6208 +- int ix;
6209 ++ bool matched = false;
6210 ++ int i;
6211 +
6212 +- for (ix = 0; ix < RXRPC_RXTX_BUFF_SIZE; ix++) {
6213 +- skb = call->rxtx_buffer[ix];
6214 +- if (!skb)
6215 +- continue;
6216 ++ avail = READ_ONCE(call->rtt_avail);
6217 ++ smp_rmb(); /* Read avail bits before accessing data. */
6218 +
6219 +- sent_at = skb->tstamp;
6220 +- smp_rmb(); /* Read timestamp before serial. */
6221 +- sp = rxrpc_skb(skb);
6222 +- if (sp->hdr.serial != orig_serial)
6223 ++ for (i = 0; i < ARRAY_SIZE(call->rtt_serial); i++) {
6224 ++ if (!test_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &avail))
6225 + continue;
6226 +- goto found;
6227 +- }
6228 +
6229 +- return;
6230 ++ sent_at = call->rtt_sent_at[i];
6231 ++ orig_serial = call->rtt_serial[i];
6232 ++
6233 ++ if (orig_serial == acked_serial) {
6234 ++ clear_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
6235 ++ smp_mb(); /* Read data before setting avail bit */
6236 ++ set_bit(i, &call->rtt_avail);
6237 ++ if (type != rxrpc_rtt_rx_cancel)
6238 ++ rxrpc_peer_add_rtt(call, type, i, acked_serial, ack_serial,
6239 ++ sent_at, resp_time);
6240 ++ else
6241 ++ trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_cancel, i,
6242 ++ orig_serial, acked_serial, 0, 0);
6243 ++ matched = true;
6244 ++ }
6245 ++
6246 ++ /* If a later serial is being acked, then mark this slot as
6247 ++ * being available.
6248 ++ */
6249 ++ if (after(acked_serial, orig_serial)) {
6250 ++ trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_obsolete, i,
6251 ++ orig_serial, acked_serial, 0, 0);
6252 ++ clear_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
6253 ++ smp_wmb();
6254 ++ set_bit(i, &call->rtt_avail);
6255 ++ }
6256 ++ }
6257 +
6258 +-found:
6259 +- rxrpc_peer_add_rtt(call, rxrpc_rtt_rx_requested_ack,
6260 +- orig_serial, ack_serial, sent_at, resp_time);
6261 ++ if (!matched)
6262 ++ trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_lost, 9, 0, acked_serial, 0, 0);
6263 + }
6264 +
6265 + /*
6266 +@@ -682,27 +703,11 @@ static void rxrpc_input_check_for_lost_ack(struct rxrpc_call *call)
6267 + */
6268 + static void rxrpc_input_ping_response(struct rxrpc_call *call,
6269 + ktime_t resp_time,
6270 +- rxrpc_serial_t orig_serial,
6271 ++ rxrpc_serial_t acked_serial,
6272 + rxrpc_serial_t ack_serial)
6273 + {
6274 +- rxrpc_serial_t ping_serial;
6275 +- ktime_t ping_time;
6276 +-
6277 +- ping_time = call->ping_time;
6278 +- smp_rmb();
6279 +- ping_serial = READ_ONCE(call->ping_serial);
6280 +-
6281 +- if (orig_serial == call->acks_lost_ping)
6282 ++ if (acked_serial == call->acks_lost_ping)
6283 + rxrpc_input_check_for_lost_ack(call);
6284 +-
6285 +- if (before(orig_serial, ping_serial) ||
6286 +- !test_and_clear_bit(RXRPC_CALL_PINGING, &call->flags))
6287 +- return;
6288 +- if (after(orig_serial, ping_serial))
6289 +- return;
6290 +-
6291 +- rxrpc_peer_add_rtt(call, rxrpc_rtt_rx_ping_response,
6292 +- orig_serial, ack_serial, ping_time, resp_time);
6293 + }
6294 +
6295 + /*
6296 +@@ -843,7 +848,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
6297 + struct rxrpc_ackinfo info;
6298 + u8 acks[RXRPC_MAXACKS];
6299 + } buf;
6300 +- rxrpc_serial_t acked_serial;
6301 ++ rxrpc_serial_t ack_serial, acked_serial;
6302 + rxrpc_seq_t first_soft_ack, hard_ack, prev_pkt;
6303 + int nr_acks, offset, ioffset;
6304 +
6305 +@@ -856,6 +861,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
6306 + }
6307 + offset += sizeof(buf.ack);
6308 +
6309 ++ ack_serial = sp->hdr.serial;
6310 + acked_serial = ntohl(buf.ack.serial);
6311 + first_soft_ack = ntohl(buf.ack.firstPacket);
6312 + prev_pkt = ntohl(buf.ack.previousPacket);
6313 +@@ -864,31 +870,42 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
6314 + summary.ack_reason = (buf.ack.reason < RXRPC_ACK__INVALID ?
6315 + buf.ack.reason : RXRPC_ACK__INVALID);
6316 +
6317 +- trace_rxrpc_rx_ack(call, sp->hdr.serial, acked_serial,
6318 ++ trace_rxrpc_rx_ack(call, ack_serial, acked_serial,
6319 + first_soft_ack, prev_pkt,
6320 + summary.ack_reason, nr_acks);
6321 +
6322 +- if (buf.ack.reason == RXRPC_ACK_PING_RESPONSE)
6323 ++ switch (buf.ack.reason) {
6324 ++ case RXRPC_ACK_PING_RESPONSE:
6325 + rxrpc_input_ping_response(call, skb->tstamp, acked_serial,
6326 +- sp->hdr.serial);
6327 +- if (buf.ack.reason == RXRPC_ACK_REQUESTED)
6328 +- rxrpc_input_requested_ack(call, skb->tstamp, acked_serial,
6329 +- sp->hdr.serial);
6330 ++ ack_serial);
6331 ++ rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
6332 ++ rxrpc_rtt_rx_ping_response);
6333 ++ break;
6334 ++ case RXRPC_ACK_REQUESTED:
6335 ++ rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
6336 ++ rxrpc_rtt_rx_requested_ack);
6337 ++ break;
6338 ++ default:
6339 ++ if (acked_serial != 0)
6340 ++ rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
6341 ++ rxrpc_rtt_rx_cancel);
6342 ++ break;
6343 ++ }
6344 +
6345 + if (buf.ack.reason == RXRPC_ACK_PING) {
6346 +- _proto("Rx ACK %%%u PING Request", sp->hdr.serial);
6347 ++ _proto("Rx ACK %%%u PING Request", ack_serial);
6348 + rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE,
6349 +- sp->hdr.serial, true, true,
6350 ++ ack_serial, true, true,
6351 + rxrpc_propose_ack_respond_to_ping);
6352 + } else if (sp->hdr.flags & RXRPC_REQUEST_ACK) {
6353 + rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED,
6354 +- sp->hdr.serial, true, true,
6355 ++ ack_serial, true, true,
6356 + rxrpc_propose_ack_respond_to_ack);
6357 + }
6358 +
6359 + /* Discard any out-of-order or duplicate ACKs (outside lock). */
6360 + if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) {
6361 +- trace_rxrpc_rx_discard_ack(call->debug_id, sp->hdr.serial,
6362 ++ trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial,
6363 + first_soft_ack, call->ackr_first_seq,
6364 + prev_pkt, call->ackr_prev_seq);
6365 + return;
6366 +@@ -904,7 +921,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
6367 +
6368 + /* Discard any out-of-order or duplicate ACKs (inside lock). */
6369 + if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) {
6370 +- trace_rxrpc_rx_discard_ack(call->debug_id, sp->hdr.serial,
6371 ++ trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial,
6372 + first_soft_ack, call->ackr_first_seq,
6373 + prev_pkt, call->ackr_prev_seq);
6374 + goto out;
6375 +@@ -964,7 +981,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
6376 + RXRPC_TX_ANNO_LAST &&
6377 + summary.nr_acks == call->tx_top - hard_ack &&
6378 + rxrpc_is_client_call(call))
6379 +- rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
6380 ++ rxrpc_propose_ACK(call, RXRPC_ACK_PING, ack_serial,
6381 + false, true,
6382 + rxrpc_propose_ack_ping_for_lost_reply);
6383 +
6384 +diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
6385 +index 1ba43c3df4adb..3cfff7922ba82 100644
6386 +--- a/net/rxrpc/output.c
6387 ++++ b/net/rxrpc/output.c
6388 +@@ -123,6 +123,49 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
6389 + return top - hard_ack + 3;
6390 + }
6391 +
6392 ++/*
6393 ++ * Record the beginning of an RTT probe.
6394 ++ */
6395 ++static int rxrpc_begin_rtt_probe(struct rxrpc_call *call, rxrpc_serial_t serial,
6396 ++ enum rxrpc_rtt_tx_trace why)
6397 ++{
6398 ++ unsigned long avail = call->rtt_avail;
6399 ++ int rtt_slot = 9;
6400 ++
6401 ++ if (!(avail & RXRPC_CALL_RTT_AVAIL_MASK))
6402 ++ goto no_slot;
6403 ++
6404 ++ rtt_slot = __ffs(avail & RXRPC_CALL_RTT_AVAIL_MASK);
6405 ++ if (!test_and_clear_bit(rtt_slot, &call->rtt_avail))
6406 ++ goto no_slot;
6407 ++
6408 ++ call->rtt_serial[rtt_slot] = serial;
6409 ++ call->rtt_sent_at[rtt_slot] = ktime_get_real();
6410 ++ smp_wmb(); /* Write data before avail bit */
6411 ++ set_bit(rtt_slot + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
6412 ++
6413 ++ trace_rxrpc_rtt_tx(call, why, rtt_slot, serial);
6414 ++ return rtt_slot;
6415 ++
6416 ++no_slot:
6417 ++ trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_no_slot, rtt_slot, serial);
6418 ++ return -1;
6419 ++}
6420 ++
6421 ++/*
6422 ++ * Cancel an RTT probe.
6423 ++ */
6424 ++static void rxrpc_cancel_rtt_probe(struct rxrpc_call *call,
6425 ++ rxrpc_serial_t serial, int rtt_slot)
6426 ++{
6427 ++ if (rtt_slot != -1) {
6428 ++ clear_bit(rtt_slot + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
6429 ++ smp_wmb(); /* Clear pending bit before setting slot */
6430 ++ set_bit(rtt_slot, &call->rtt_avail);
6431 ++ trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_cancel, rtt_slot, serial);
6432 ++ }
6433 ++}
6434 ++
6435 + /*
6436 + * Send an ACK call packet.
6437 + */
6438 +@@ -136,7 +179,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
6439 + rxrpc_serial_t serial;
6440 + rxrpc_seq_t hard_ack, top;
6441 + size_t len, n;
6442 +- int ret;
6443 ++ int ret, rtt_slot = -1;
6444 + u8 reason;
6445 +
6446 + if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
6447 +@@ -196,18 +239,8 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
6448 + if (_serial)
6449 + *_serial = serial;
6450 +
6451 +- if (ping) {
6452 +- call->ping_serial = serial;
6453 +- smp_wmb();
6454 +- /* We need to stick a time in before we send the packet in case
6455 +- * the reply gets back before kernel_sendmsg() completes - but
6456 +- * asking UDP to send the packet can take a relatively long
6457 +- * time.
6458 +- */
6459 +- call->ping_time = ktime_get_real();
6460 +- set_bit(RXRPC_CALL_PINGING, &call->flags);
6461 +- trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_ping, serial);
6462 +- }
6463 ++ if (ping)
6464 ++ rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_ping);
6465 +
6466 + ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
6467 + conn->params.peer->last_tx_at = ktime_get_seconds();
6468 +@@ -221,8 +254,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
6469 +
6470 + if (call->state < RXRPC_CALL_COMPLETE) {
6471 + if (ret < 0) {
6472 +- if (ping)
6473 +- clear_bit(RXRPC_CALL_PINGING, &call->flags);
6474 ++ rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
6475 + rxrpc_propose_ACK(call, pkt->ack.reason,
6476 + ntohl(pkt->ack.serial),
6477 + false, true,
6478 +@@ -321,7 +353,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
6479 + struct kvec iov[2];
6480 + rxrpc_serial_t serial;
6481 + size_t len;
6482 +- int ret;
6483 ++ int ret, rtt_slot = -1;
6484 +
6485 + _enter(",{%d}", skb->len);
6486 +
6487 +@@ -397,6 +429,8 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
6488 + sp->hdr.serial = serial;
6489 + smp_wmb(); /* Set serial before timestamp */
6490 + skb->tstamp = ktime_get_real();
6491 ++ if (whdr.flags & RXRPC_REQUEST_ACK)
6492 ++ rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_data);
6493 +
6494 + /* send the packet by UDP
6495 + * - returns -EMSGSIZE if UDP would have to fragment the packet
6496 +@@ -408,12 +442,15 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
6497 + conn->params.peer->last_tx_at = ktime_get_seconds();
6498 +
6499 + up_read(&conn->params.local->defrag_sem);
6500 +- if (ret < 0)
6501 ++ if (ret < 0) {
6502 ++ rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
6503 + trace_rxrpc_tx_fail(call->debug_id, serial, ret,
6504 + rxrpc_tx_point_call_data_nofrag);
6505 +- else
6506 ++ } else {
6507 + trace_rxrpc_tx_packet(call->debug_id, &whdr,
6508 + rxrpc_tx_point_call_data_nofrag);
6509 ++ }
6510 ++
6511 + rxrpc_tx_backoff(call, ret);
6512 + if (ret == -EMSGSIZE)
6513 + goto send_fragmentable;
6514 +@@ -422,7 +459,6 @@ done:
6515 + if (ret >= 0) {
6516 + if (whdr.flags & RXRPC_REQUEST_ACK) {
6517 + call->peer->rtt_last_req = skb->tstamp;
6518 +- trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial);
6519 + if (call->peer->rtt_count > 1) {
6520 + unsigned long nowj = jiffies, ack_lost_at;
6521 +
6522 +@@ -469,6 +505,8 @@ send_fragmentable:
6523 + sp->hdr.serial = serial;
6524 + smp_wmb(); /* Set serial before timestamp */
6525 + skb->tstamp = ktime_get_real();
6526 ++ if (whdr.flags & RXRPC_REQUEST_ACK)
6527 ++ rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_data);
6528 +
6529 + switch (conn->params.local->srx.transport.family) {
6530 + case AF_INET6:
6531 +@@ -487,12 +525,14 @@ send_fragmentable:
6532 + BUG();
6533 + }
6534 +
6535 +- if (ret < 0)
6536 ++ if (ret < 0) {
6537 ++ rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
6538 + trace_rxrpc_tx_fail(call->debug_id, serial, ret,
6539 + rxrpc_tx_point_call_data_frag);
6540 +- else
6541 ++ } else {
6542 + trace_rxrpc_tx_packet(call->debug_id, &whdr,
6543 + rxrpc_tx_point_call_data_frag);
6544 ++ }
6545 + rxrpc_tx_backoff(call, ret);
6546 +
6547 + up_write(&conn->params.local->defrag_sem);
6548 +diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
6549 +index ca29976bb193e..68396d0520525 100644
6550 +--- a/net/rxrpc/peer_object.c
6551 ++++ b/net/rxrpc/peer_object.c
6552 +@@ -502,11 +502,21 @@ EXPORT_SYMBOL(rxrpc_kernel_get_peer);
6553 + * rxrpc_kernel_get_srtt - Get a call's peer smoothed RTT
6554 + * @sock: The socket on which the call is in progress.
6555 + * @call: The call to query
6556 ++ * @_srtt: Where to store the SRTT value.
6557 + *
6558 +- * Get the call's peer smoothed RTT.
6559 ++ * Get the call's peer smoothed RTT in uS.
6560 + */
6561 +-u32 rxrpc_kernel_get_srtt(struct socket *sock, struct rxrpc_call *call)
6562 ++bool rxrpc_kernel_get_srtt(struct socket *sock, struct rxrpc_call *call,
6563 ++ u32 *_srtt)
6564 + {
6565 +- return call->peer->srtt_us >> 3;
6566 ++ struct rxrpc_peer *peer = call->peer;
6567 ++
6568 ++ if (peer->rtt_count == 0) {
6569 ++ *_srtt = 1000000; /* 1S */
6570 ++ return false;
6571 ++ }
6572 ++
6573 ++ *_srtt = call->peer->srtt_us >> 3;
6574 ++ return true;
6575 + }
6576 + EXPORT_SYMBOL(rxrpc_kernel_get_srtt);
6577 +diff --git a/net/rxrpc/rtt.c b/net/rxrpc/rtt.c
6578 +index 928d8b34a3eee..1221b0637a7ec 100644
6579 +--- a/net/rxrpc/rtt.c
6580 ++++ b/net/rxrpc/rtt.c
6581 +@@ -146,6 +146,7 @@ static void rxrpc_ack_update_rtt(struct rxrpc_peer *peer, long rtt_us)
6582 + * exclusive access to the peer RTT data.
6583 + */
6584 + void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
6585 ++ int rtt_slot,
6586 + rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial,
6587 + ktime_t send_time, ktime_t resp_time)
6588 + {
6589 +@@ -162,7 +163,7 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
6590 + peer->rtt_count++;
6591 + spin_unlock(&peer->rtt_input_lock);
6592 +
6593 +- trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial,
6594 ++ trace_rxrpc_rtt_rx(call, why, rtt_slot, send_serial, resp_serial,
6595 + peer->srtt_us >> 3, peer->rto_j);
6596 + }
6597 +
6598 +diff --git a/net/wireless/reg.c b/net/wireless/reg.c
6599 +index 0d74a31ef0ab4..fc2af2c8b6d54 100644
6600 +--- a/net/wireless/reg.c
6601 ++++ b/net/wireless/reg.c
6602 +@@ -2944,6 +2944,9 @@ int regulatory_hint_user(const char *alpha2,
6603 + if (WARN_ON(!alpha2))
6604 + return -EINVAL;
6605 +
6606 ++ if (!is_world_regdom(alpha2) && !is_an_alpha2(alpha2))
6607 ++ return -EINVAL;
6608 ++
6609 + request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL);
6610 + if (!request)
6611 + return -ENOMEM;
6612 +diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
6613 +index 4c820607540bf..e73e998d582a1 100755
6614 +--- a/scripts/checkpatch.pl
6615 ++++ b/scripts/checkpatch.pl
6616 +@@ -2636,8 +2636,8 @@ sub process {
6617 +
6618 + # Check if the commit log has what seems like a diff which can confuse patch
6619 + if ($in_commit_log && !$commit_log_has_diff &&
6620 +- (($line =~ m@^\s+diff\b.*a/[\w/]+@ &&
6621 +- $line =~ m@^\s+diff\b.*a/([\w/]+)\s+b/$1\b@) ||
6622 ++ (($line =~ m@^\s+diff\b.*a/([\w/]+)@ &&
6623 ++ $line =~ m@^\s+diff\b.*a/[\w/]+\s+b/$1\b@) ||
6624 + $line =~ m@^\s*(?:\-\-\-\s+a/|\+\+\+\s+b/)@ ||
6625 + $line =~ m/^\s*\@\@ \-\d+,\d+ \+\d+,\d+ \@\@/)) {
6626 + ERROR("DIFF_IN_COMMIT_MSG",
6627 +diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
6628 +index 19857d18d814d..1c78ba49ca992 100755
6629 +--- a/scripts/kconfig/streamline_config.pl
6630 ++++ b/scripts/kconfig/streamline_config.pl
6631 +@@ -593,7 +593,10 @@ while ($repeat) {
6632 + }
6633 +
6634 + my %setconfigs;
6635 +-my @preserved_kconfigs = split(/:/,$ENV{LMC_KEEP});
6636 ++my @preserved_kconfigs;
6637 ++if (defined($ENV{'LMC_KEEP'})) {
6638 ++ @preserved_kconfigs = split(/:/,$ENV{LMC_KEEP});
6639 ++}
6640 +
6641 + sub in_preserved_kconfigs {
6642 + my $kconfig = $config2kfile{$_[0]};
6643 +diff --git a/sound/core/oss/mulaw.c b/sound/core/oss/mulaw.c
6644 +index 3788906421a73..fe27034f28460 100644
6645 +--- a/sound/core/oss/mulaw.c
6646 ++++ b/sound/core/oss/mulaw.c
6647 +@@ -329,8 +329,8 @@ int snd_pcm_plugin_build_mulaw(struct snd_pcm_substream *plug,
6648 + snd_BUG();
6649 + return -EINVAL;
6650 + }
6651 +- if (snd_BUG_ON(!snd_pcm_format_linear(format->format)))
6652 +- return -ENXIO;
6653 ++ if (!snd_pcm_format_linear(format->format))
6654 ++ return -EINVAL;
6655 +
6656 + err = snd_pcm_plugin_build(plug, "Mu-Law<->linear conversion",
6657 + src_format, dst_format,
6658 +diff --git a/sound/firewire/digi00x/digi00x.c b/sound/firewire/digi00x/digi00x.c
6659 +index c84b913a9fe01..ab8408966ec33 100644
6660 +--- a/sound/firewire/digi00x/digi00x.c
6661 ++++ b/sound/firewire/digi00x/digi00x.c
6662 +@@ -14,6 +14,7 @@ MODULE_LICENSE("GPL v2");
6663 + #define VENDOR_DIGIDESIGN 0x00a07e
6664 + #define MODEL_CONSOLE 0x000001
6665 + #define MODEL_RACK 0x000002
6666 ++#define SPEC_VERSION 0x000001
6667 +
6668 + static int name_card(struct snd_dg00x *dg00x)
6669 + {
6670 +@@ -175,14 +176,18 @@ static const struct ieee1394_device_id snd_dg00x_id_table[] = {
6671 + /* Both of 002/003 use the same ID. */
6672 + {
6673 + .match_flags = IEEE1394_MATCH_VENDOR_ID |
6674 ++ IEEE1394_MATCH_VERSION |
6675 + IEEE1394_MATCH_MODEL_ID,
6676 + .vendor_id = VENDOR_DIGIDESIGN,
6677 ++ .version = SPEC_VERSION,
6678 + .model_id = MODEL_CONSOLE,
6679 + },
6680 + {
6681 + .match_flags = IEEE1394_MATCH_VENDOR_ID |
6682 ++ IEEE1394_MATCH_VERSION |
6683 + IEEE1394_MATCH_MODEL_ID,
6684 + .vendor_id = VENDOR_DIGIDESIGN,
6685 ++ .version = SPEC_VERSION,
6686 + .model_id = MODEL_RACK,
6687 + },
6688 + {}
6689 +diff --git a/sound/firewire/tascam/tascam.c b/sound/firewire/tascam/tascam.c
6690 +index 5dac0d9fc58e5..75f2edd8e78fb 100644
6691 +--- a/sound/firewire/tascam/tascam.c
6692 ++++ b/sound/firewire/tascam/tascam.c
6693 +@@ -39,9 +39,6 @@ static const struct snd_tscm_spec model_specs[] = {
6694 + .midi_capture_ports = 2,
6695 + .midi_playback_ports = 4,
6696 + },
6697 +- // This kernel module doesn't support FE-8 because the most of features
6698 +- // can be implemented in userspace without any specific support of this
6699 +- // module.
6700 + };
6701 +
6702 + static int identify_model(struct snd_tscm *tscm)
6703 +@@ -211,11 +208,39 @@ static void snd_tscm_remove(struct fw_unit *unit)
6704 + }
6705 +
6706 + static const struct ieee1394_device_id snd_tscm_id_table[] = {
6707 ++ // Tascam, FW-1884.
6708 ++ {
6709 ++ .match_flags = IEEE1394_MATCH_VENDOR_ID |
6710 ++ IEEE1394_MATCH_SPECIFIER_ID |
6711 ++ IEEE1394_MATCH_VERSION,
6712 ++ .vendor_id = 0x00022e,
6713 ++ .specifier_id = 0x00022e,
6714 ++ .version = 0x800000,
6715 ++ },
6716 ++ // Tascam, FE-8 (.version = 0x800001)
6717 ++ // This kernel module doesn't support FE-8 because the most of features
6718 ++ // can be implemented in userspace without any specific support of this
6719 ++ // module.
6720 ++ //
6721 ++ // .version = 0x800002 is unknown.
6722 ++ //
6723 ++ // Tascam, FW-1082.
6724 ++ {
6725 ++ .match_flags = IEEE1394_MATCH_VENDOR_ID |
6726 ++ IEEE1394_MATCH_SPECIFIER_ID |
6727 ++ IEEE1394_MATCH_VERSION,
6728 ++ .vendor_id = 0x00022e,
6729 ++ .specifier_id = 0x00022e,
6730 ++ .version = 0x800003,
6731 ++ },
6732 ++ // Tascam, FW-1804.
6733 + {
6734 + .match_flags = IEEE1394_MATCH_VENDOR_ID |
6735 +- IEEE1394_MATCH_SPECIFIER_ID,
6736 ++ IEEE1394_MATCH_SPECIFIER_ID |
6737 ++ IEEE1394_MATCH_VERSION,
6738 + .vendor_id = 0x00022e,
6739 + .specifier_id = 0x00022e,
6740 ++ .version = 0x800004,
6741 + },
6742 + {}
6743 + };
6744 +diff --git a/sound/pci/ca0106/ca0106_main.c b/sound/pci/ca0106/ca0106_main.c
6745 +index 70d775ff967eb..c189f70c82cb9 100644
6746 +--- a/sound/pci/ca0106/ca0106_main.c
6747 ++++ b/sound/pci/ca0106/ca0106_main.c
6748 +@@ -537,7 +537,8 @@ static int snd_ca0106_pcm_power_dac(struct snd_ca0106 *chip, int channel_id,
6749 + else
6750 + /* Power down */
6751 + chip->spi_dac_reg[reg] |= bit;
6752 +- return snd_ca0106_spi_write(chip, chip->spi_dac_reg[reg]);
6753 ++ if (snd_ca0106_spi_write(chip, chip->spi_dac_reg[reg]) != 0)
6754 ++ return -ENXIO;
6755 + }
6756 + return 0;
6757 + }
6758 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
6759 +index 1a26940a3fd7c..4c23b169ac67e 100644
6760 +--- a/sound/pci/hda/hda_intel.c
6761 ++++ b/sound/pci/hda/hda_intel.c
6762 +@@ -2747,8 +2747,6 @@ static const struct pci_device_id azx_ids[] = {
6763 + .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_HDMI },
6764 + /* Zhaoxin */
6765 + { PCI_DEVICE(0x1d17, 0x3288), .driver_data = AZX_DRIVER_ZHAOXIN },
6766 +- /* Loongson */
6767 +- { PCI_DEVICE(0x0014, 0x7a07), .driver_data = AZX_DRIVER_GENERIC },
6768 + { 0, }
6769 + };
6770 + MODULE_DEVICE_TABLE(pci, azx_ids);
6771 +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
6772 +index f0c6d2907e396..fc22bdc30da3e 100644
6773 +--- a/sound/pci/hda/patch_hdmi.c
6774 ++++ b/sound/pci/hda/patch_hdmi.c
6775 +@@ -2737,6 +2737,7 @@ static void i915_pin_cvt_fixup(struct hda_codec *codec,
6776 + hda_nid_t cvt_nid)
6777 + {
6778 + if (per_pin) {
6779 ++ haswell_verify_D0(codec, per_pin->cvt_nid, per_pin->pin_nid);
6780 + snd_hda_set_dev_select(codec, per_pin->pin_nid,
6781 + per_pin->dev_id);
6782 + intel_verify_pin_cvt_connect(codec, per_pin);
6783 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
6784 +index da23c2d4ca51e..0b9907c9cd84f 100644
6785 +--- a/sound/pci/hda/patch_realtek.c
6786 ++++ b/sound/pci/hda/patch_realtek.c
6787 +@@ -2467,6 +2467,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
6788 + SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950),
6789 + SND_PCI_QUIRK(0x1462, 0x1293, "MSI-GP65", ALC1220_FIXUP_CLEVO_P950),
6790 + SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
6791 ++ SND_PCI_QUIRK(0x1462, 0x9c37, "MSI X570-A PRO", ALC1220_FIXUP_CLEVO_P950),
6792 + SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
6793 + SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
6794 + SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
6795 +@@ -5879,6 +5880,39 @@ static void alc275_fixup_gpio4_off(struct hda_codec *codec,
6796 + }
6797 + }
6798 +
6799 ++/* Quirk for Thinkpad X1 7th and 8th Gen
6800 ++ * The following fixed routing needed
6801 ++ * DAC1 (NID 0x02) -> Speaker (NID 0x14); some eq applied secretly
6802 ++ * DAC2 (NID 0x03) -> Bass (NID 0x17) & Headphone (NID 0x21); sharing a DAC
6803 ++ * DAC3 (NID 0x06) -> Unused, due to the lack of volume amp
6804 ++ */
6805 ++static void alc285_fixup_thinkpad_x1_gen7(struct hda_codec *codec,
6806 ++ const struct hda_fixup *fix, int action)
6807 ++{
6808 ++ static const hda_nid_t conn[] = { 0x02, 0x03 }; /* exclude 0x06 */
6809 ++ static const hda_nid_t preferred_pairs[] = {
6810 ++ 0x14, 0x02, 0x17, 0x03, 0x21, 0x03, 0
6811 ++ };
6812 ++ struct alc_spec *spec = codec->spec;
6813 ++
6814 ++ switch (action) {
6815 ++ case HDA_FIXUP_ACT_PRE_PROBE:
6816 ++ snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn);
6817 ++ spec->gen.preferred_dacs = preferred_pairs;
6818 ++ break;
6819 ++ case HDA_FIXUP_ACT_BUILD:
6820 ++ /* The generic parser creates somewhat unintuitive volume ctls
6821 ++ * with the fixed routing above, and the shared DAC2 may be
6822 ++ * confusing for PA.
6823 ++ * Rename those to unique names so that PA doesn't touch them
6824 ++ * and use only Master volume.
6825 ++ */
6826 ++ rename_ctl(codec, "Front Playback Volume", "DAC1 Playback Volume");
6827 ++ rename_ctl(codec, "Bass Speaker Playback Volume", "DAC2 Playback Volume");
6828 ++ break;
6829 ++ }
6830 ++}
6831 ++
6832 + static void alc233_alc662_fixup_lenovo_dual_codecs(struct hda_codec *codec,
6833 + const struct hda_fixup *fix,
6834 + int action)
6835 +@@ -6147,6 +6181,7 @@ enum {
6836 + ALC289_FIXUP_DUAL_SPK,
6837 + ALC294_FIXUP_SPK2_TO_DAC1,
6838 + ALC294_FIXUP_ASUS_DUAL_SPK,
6839 ++ ALC285_FIXUP_THINKPAD_X1_GEN7,
6840 + ALC285_FIXUP_THINKPAD_HEADSET_JACK,
6841 + ALC294_FIXUP_ASUS_HPE,
6842 + ALC294_FIXUP_ASUS_COEF_1B,
6843 +@@ -7292,11 +7327,17 @@ static const struct hda_fixup alc269_fixups[] = {
6844 + .chained = true,
6845 + .chain_id = ALC294_FIXUP_SPK2_TO_DAC1
6846 + },
6847 ++ [ALC285_FIXUP_THINKPAD_X1_GEN7] = {
6848 ++ .type = HDA_FIXUP_FUNC,
6849 ++ .v.func = alc285_fixup_thinkpad_x1_gen7,
6850 ++ .chained = true,
6851 ++ .chain_id = ALC269_FIXUP_THINKPAD_ACPI
6852 ++ },
6853 + [ALC285_FIXUP_THINKPAD_HEADSET_JACK] = {
6854 + .type = HDA_FIXUP_FUNC,
6855 + .v.func = alc_fixup_headset_jack,
6856 + .chained = true,
6857 +- .chain_id = ALC285_FIXUP_SPEAKER2_TO_DAC1
6858 ++ .chain_id = ALC285_FIXUP_THINKPAD_X1_GEN7
6859 + },
6860 + [ALC294_FIXUP_ASUS_HPE] = {
6861 + .type = HDA_FIXUP_VERBS,
6862 +@@ -7707,7 +7748,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6863 + SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
6864 + SND_PCI_QUIRK(0x144d, 0xc176, "Samsung Notebook 9 Pro (NP930MBE-K04US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
6865 + SND_PCI_QUIRK(0x144d, 0xc189, "Samsung Galaxy Flex Book (NT950QCG-X716)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
6866 +- SND_PCI_QUIRK(0x144d, 0xc18a, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
6867 ++ SND_PCI_QUIRK(0x144d, 0xc18a, "Samsung Galaxy Book Ion (NP930XCJ-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
6868 ++ SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
6869 + SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8),
6870 + SND_PCI_QUIRK(0x144d, 0xc812, "Samsung Notebook Pen S (NT950SBE-X58)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
6871 + SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
6872 +diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
6873 +index eb3cececda794..28506415c7ad5 100644
6874 +--- a/sound/usb/pcm.c
6875 ++++ b/sound/usb/pcm.c
6876 +@@ -369,11 +369,13 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
6877 + case USB_ID(0x07fd, 0x0008): /* MOTU M Series */
6878 + case USB_ID(0x31e9, 0x0001): /* Solid State Logic SSL2 */
6879 + case USB_ID(0x31e9, 0x0002): /* Solid State Logic SSL2+ */
6880 ++ case USB_ID(0x0499, 0x172f): /* Steinberg UR22C */
6881 + case USB_ID(0x0d9a, 0x00df): /* RTX6001 */
6882 + ep = 0x81;
6883 + ifnum = 2;
6884 + goto add_sync_ep_from_ifnum;
6885 + case USB_ID(0x2b73, 0x000a): /* Pioneer DJ DJM-900NXS2 */
6886 ++ case USB_ID(0x2b73, 0x0017): /* Pioneer DJ DJM-250MK2 */
6887 + ep = 0x82;
6888 + ifnum = 0;
6889 + goto add_sync_ep_from_ifnum;
6890 +diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
6891 +index 366faaa4ba82c..5410e5ac82f91 100644
6892 +--- a/sound/usb/quirks-table.h
6893 ++++ b/sound/usb/quirks-table.h
6894 +@@ -3532,14 +3532,40 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
6895 + {
6896 + /*
6897 + * Pioneer DJ DJM-250MK2
6898 +- * PCM is 8 channels out @ 48 fixed (endpoints 0x01).
6899 +- * The output from computer to the mixer is usable.
6900 ++ * PCM is 8 channels out @ 48 fixed (endpoint 0x01)
6901 ++ * and 8 channels in @ 48 fixed (endpoint 0x82).
6902 + *
6903 +- * The input (phono or line to computer) is not working.
6904 +- * It should be at endpoint 0x82 and probably also 8 channels,
6905 +- * but it seems that it works only with Pioneer proprietary software.
6906 +- * Even on officially supported OS, the Audacity was unable to record
6907 +- * and Mixxx to recognize the control vinyls.
6908 ++ * Both playback and recording is working, even simultaneously.
6909 ++ *
6910 ++ * Playback channels could be mapped to:
6911 ++ * - CH1
6912 ++ * - CH2
6913 ++ * - AUX
6914 ++ *
6915 ++ * Recording channels could be mapped to:
6916 ++ * - Post CH1 Fader
6917 ++ * - Post CH2 Fader
6918 ++ * - Cross Fader A
6919 ++ * - Cross Fader B
6920 ++ * - MIC
6921 ++ * - AUX
6922 ++ * - REC OUT
6923 ++ *
6924 ++ * There is remaining problem with recording directly from PHONO/LINE.
6925 ++ * If we map a channel to:
6926 ++ * - CH1 Control Tone PHONO
6927 ++ * - CH1 Control Tone LINE
6928 ++ * - CH2 Control Tone PHONO
6929 ++ * - CH2 Control Tone LINE
6930 ++ * it is silent.
6931 ++ * There is no signal even on other operating systems with official drivers.
6932 ++ * The signal appears only when a supported application is started.
6933 ++ * This needs to be investigated yet...
6934 ++ * (there is quite a lot communication on the USB in both directions)
6935 ++ *
6936 ++ * In current version this mixer could be used for playback
6937 ++ * and for recording from vinyls (through Post CH* Fader)
6938 ++ * but not for DVS (Digital Vinyl Systems) like in Mixxx.
6939 + */
6940 + USB_DEVICE_VENDOR_SPEC(0x2b73, 0x0017),
6941 + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
6942 +@@ -3563,6 +3589,26 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
6943 + .rate_max = 48000,
6944 + .nr_rates = 1,
6945 + .rate_table = (unsigned int[]) { 48000 }
6946 ++ }
6947 ++ },
6948 ++ {
6949 ++ .ifnum = 0,
6950 ++ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
6951 ++ .data = &(const struct audioformat) {
6952 ++ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
6953 ++ .channels = 8, // inputs
6954 ++ .iface = 0,
6955 ++ .altsetting = 1,
6956 ++ .altset_idx = 1,
6957 ++ .endpoint = 0x82,
6958 ++ .ep_attr = USB_ENDPOINT_XFER_ISOC|
6959 ++ USB_ENDPOINT_SYNC_ASYNC|
6960 ++ USB_ENDPOINT_USAGE_IMPLICIT_FB,
6961 ++ .rates = SNDRV_PCM_RATE_48000,
6962 ++ .rate_min = 48000,
6963 ++ .rate_max = 48000,
6964 ++ .nr_rates = 1,
6965 ++ .rate_table = (unsigned int[]) { 48000 }
6966 + }
6967 + },
6968 + {
6969 +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
6970 +index ef1c1cf040b45..bf2d521b6768c 100644
6971 +--- a/sound/usb/quirks.c
6972 ++++ b/sound/usb/quirks.c
6973 +@@ -1493,6 +1493,7 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
6974 + set_format_emu_quirk(subs, fmt);
6975 + break;
6976 + case USB_ID(0x2b73, 0x000a): /* Pioneer DJ DJM-900NXS2 */
6977 ++ case USB_ID(0x2b73, 0x0017): /* Pioneer DJ DJM-250MK2 */
6978 + pioneer_djm_set_format_quirk(subs);
6979 + break;
6980 + case USB_ID(0x534d, 0x2109): /* MacroSilicon MS2109 */
6981 +diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
6982 +index 7b2d6fc9e6ed7..bc8c4816ba386 100644
6983 +--- a/tools/include/uapi/linux/perf_event.h
6984 ++++ b/tools/include/uapi/linux/perf_event.h
6985 +@@ -1155,7 +1155,7 @@ union perf_mem_data_src {
6986 +
6987 + #define PERF_MEM_SNOOPX_FWD 0x01 /* forward */
6988 + /* 1 free */
6989 +-#define PERF_MEM_SNOOPX_SHIFT 37
6990 ++#define PERF_MEM_SNOOPX_SHIFT 38
6991 +
6992 + /* locked instruction */
6993 + #define PERF_MEM_LOCK_NA 0x01 /* not available */
6994 +diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
6995 +index c8209467076b1..d8299b77f5c89 100644
6996 +--- a/tools/perf/Documentation/perf-stat.txt
6997 ++++ b/tools/perf/Documentation/perf-stat.txt
6998 +@@ -380,6 +380,9 @@ counts for all hardware threads in a core but show the sum counts per
6999 + hardware thread. This is essentially a replacement for the any bit and
7000 + convenient for post processing.
7001 +
7002 ++--summary::
7003 ++Print summary for interval mode (-I).
7004 ++
7005 + EXAMPLES
7006 + --------
7007 +
7008 +diff --git a/tools/perf/bench/synthesize.c b/tools/perf/bench/synthesize.c
7009 +index 8d624aea1c5e5..b2924e3181dc3 100644
7010 +--- a/tools/perf/bench/synthesize.c
7011 ++++ b/tools/perf/bench/synthesize.c
7012 +@@ -162,8 +162,8 @@ static int do_run_multi_threaded(struct target *target,
7013 + init_stats(&event_stats);
7014 + for (i = 0; i < multi_iterations; i++) {
7015 + session = perf_session__new(NULL, false, NULL);
7016 +- if (!session)
7017 +- return -ENOMEM;
7018 ++ if (IS_ERR(session))
7019 ++ return PTR_ERR(session);
7020 +
7021 + atomic_set(&event_count, 0);
7022 + gettimeofday(&start, NULL);
7023 +diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
7024 +index 23ea934f30b34..07313217db4cd 100644
7025 +--- a/tools/perf/builtin-record.c
7026 ++++ b/tools/perf/builtin-record.c
7027 +@@ -2417,7 +2417,7 @@ static struct option __record_options[] = {
7028 + OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
7029 + "synthesize non-sample events at the end of output"),
7030 + OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
7031 +- OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "record bpf events"),
7032 ++ OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "do not record bpf events"),
7033 + OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
7034 + "Fail if the specified frequency can't be used"),
7035 + OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
7036 +diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
7037 +index 459e4229945e4..7b9511e59b434 100644
7038 +--- a/tools/perf/builtin-sched.c
7039 ++++ b/tools/perf/builtin-sched.c
7040 +@@ -2575,7 +2575,8 @@ static int timehist_sched_change_event(struct perf_tool *tool,
7041 + }
7042 +
7043 + if (!sched->idle_hist || thread->tid == 0) {
7044 +- timehist_update_runtime_stats(tr, t, tprev);
7045 ++ if (!cpu_list || test_bit(sample->cpu, cpu_bitmap))
7046 ++ timehist_update_runtime_stats(tr, t, tprev);
7047 +
7048 + if (sched->idle_hist) {
7049 + struct idle_thread_runtime *itr = (void *)tr;
7050 +@@ -2848,6 +2849,9 @@ static void timehist_print_summary(struct perf_sched *sched,
7051 +
7052 + printf("\nIdle stats:\n");
7053 + for (i = 0; i < idle_max_cpu; ++i) {
7054 ++ if (cpu_list && !test_bit(i, cpu_bitmap))
7055 ++ continue;
7056 ++
7057 + t = idle_threads[i];
7058 + if (!t)
7059 + continue;
7060 +diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
7061 +index 9be020e0098ad..6e2502de755a8 100644
7062 +--- a/tools/perf/builtin-stat.c
7063 ++++ b/tools/perf/builtin-stat.c
7064 +@@ -402,7 +402,7 @@ static void read_counters(struct timespec *rs)
7065 + {
7066 + struct evsel *counter;
7067 +
7068 +- if (!stat_config.summary && (read_affinity_counters(rs) < 0))
7069 ++ if (!stat_config.stop_read_counter && (read_affinity_counters(rs) < 0))
7070 + return;
7071 +
7072 + evlist__for_each_entry(evsel_list, counter) {
7073 +@@ -826,9 +826,9 @@ try_again_reset:
7074 + if (stat_config.walltime_run_table)
7075 + stat_config.walltime_run[run_idx] = t1 - t0;
7076 +
7077 +- if (interval) {
7078 ++ if (interval && stat_config.summary) {
7079 + stat_config.interval = 0;
7080 +- stat_config.summary = true;
7081 ++ stat_config.stop_read_counter = true;
7082 + init_stats(&walltime_nsecs_stats);
7083 + update_stats(&walltime_nsecs_stats, t1 - t0);
7084 +
7085 +@@ -1066,6 +1066,8 @@ static struct option stat_options[] = {
7086 + "Use with 'percore' event qualifier to show the event "
7087 + "counts of one hardware thread by sum up total hardware "
7088 + "threads of same physical core"),
7089 ++ OPT_BOOLEAN(0, "summary", &stat_config.summary,
7090 ++ "print summary for interval mode"),
7091 + #ifdef HAVE_LIBPFM
7092 + OPT_CALLBACK(0, "pfm-events", &evsel_list, "event",
7093 + "libpfm4 event selector. use 'perf list' to list available events",
7094 +diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
7095 +index 13889d73f8dd5..c665d69c0651d 100644
7096 +--- a/tools/perf/builtin-top.c
7097 ++++ b/tools/perf/builtin-top.c
7098 +@@ -1746,6 +1746,7 @@ int cmd_top(int argc, const char **argv)
7099 + goto out_delete_evlist;
7100 + }
7101 +
7102 ++#ifdef HAVE_LIBBPF_SUPPORT
7103 + if (!top.record_opts.no_bpf_event) {
7104 + top.sb_evlist = evlist__new();
7105 +
7106 +@@ -1759,6 +1760,7 @@ int cmd_top(int argc, const char **argv)
7107 + goto out_delete_evlist;
7108 + }
7109 + }
7110 ++#endif
7111 +
7112 + if (perf_evlist__start_sb_thread(top.sb_evlist, target)) {
7113 + pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
7114 +diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
7115 +index fa86c5f997cc5..fc9c158bfa134 100644
7116 +--- a/tools/perf/pmu-events/jevents.c
7117 ++++ b/tools/perf/pmu-events/jevents.c
7118 +@@ -137,7 +137,7 @@ static char *fixregex(char *s)
7119 + return s;
7120 +
7121 + /* allocate space for a new string */
7122 +- fixed = (char *) malloc(len + 1);
7123 ++ fixed = (char *) malloc(len + esc_count + 1);
7124 + if (!fixed)
7125 + return NULL;
7126 +
7127 +diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
7128 +index be9c4c0549bc8..a07626f072087 100644
7129 +--- a/tools/perf/ui/browsers/hists.c
7130 ++++ b/tools/perf/ui/browsers/hists.c
7131 +@@ -3629,8 +3629,8 @@ int perf_evlist__tui_browse_hists(struct evlist *evlist, const char *help,
7132 + {
7133 + int nr_entries = evlist->core.nr_entries;
7134 +
7135 +-single_entry:
7136 + if (perf_evlist__single_entry(evlist)) {
7137 ++single_entry: {
7138 + struct evsel *first = evlist__first(evlist);
7139 +
7140 + return perf_evsel__hists_browse(first, nr_entries, help,
7141 +@@ -3638,6 +3638,7 @@ single_entry:
7142 + env, warn_lost_event,
7143 + annotation_opts);
7144 + }
7145 ++ }
7146 +
7147 + if (symbol_conf.event_group) {
7148 + struct evsel *pos;
7149 +diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
7150 +index c283223fb31f2..a2a369e2fbb67 100644
7151 +--- a/tools/perf/util/cs-etm.c
7152 ++++ b/tools/perf/util/cs-etm.c
7153 +@@ -1344,8 +1344,15 @@ static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
7154 + attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
7155 + }
7156 +
7157 +- if (etm->synth_opts.last_branch)
7158 ++ if (etm->synth_opts.last_branch) {
7159 + attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
7160 ++ /*
7161 ++ * We don't use the hardware index, but the sample generation
7162 ++ * code uses the new format branch_stack with this field,
7163 ++ * so the event attributes must indicate that it's present.
7164 ++ */
7165 ++ attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX;
7166 ++ }
7167 +
7168 + if (etm->synth_opts.instructions) {
7169 + attr.config = PERF_COUNT_HW_INSTRUCTIONS;
7170 +diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
7171 +index cb3c1e569a2db..9357b5f62c273 100644
7172 +--- a/tools/perf/util/intel-pt.c
7173 ++++ b/tools/perf/util/intel-pt.c
7174 +@@ -2913,8 +2913,15 @@ static int intel_pt_synth_events(struct intel_pt *pt,
7175 +
7176 + if (pt->synth_opts.callchain)
7177 + attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
7178 +- if (pt->synth_opts.last_branch)
7179 ++ if (pt->synth_opts.last_branch) {
7180 + attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
7181 ++ /*
7182 ++ * We don't use the hardware index, but the sample generation
7183 ++ * code uses the new format branch_stack with this field,
7184 ++ * so the event attributes must indicate that it's present.
7185 ++ */
7186 ++ attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX;
7187 ++ }
7188 +
7189 + if (pt->synth_opts.instructions) {
7190 + attr.config = PERF_COUNT_HW_INSTRUCTIONS;
7191 +diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
7192 +index f75ae679eb281..d8a9dd786bf43 100644
7193 +--- a/tools/perf/util/stat.h
7194 ++++ b/tools/perf/util/stat.h
7195 +@@ -113,6 +113,7 @@ struct perf_stat_config {
7196 + bool summary;
7197 + bool metric_no_group;
7198 + bool metric_no_merge;
7199 ++ bool stop_read_counter;
7200 + FILE *output;
7201 + unsigned int interval;
7202 + unsigned int timeout;
7203 +diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
7204 +index 754cf611723ee..0d92ebcb335d1 100644
7205 +--- a/tools/testing/selftests/bpf/test_maps.c
7206 ++++ b/tools/testing/selftests/bpf/test_maps.c
7207 +@@ -1274,6 +1274,8 @@ static void __run_parallel(unsigned int tasks,
7208 + pid_t pid[tasks];
7209 + int i;
7210 +
7211 ++ fflush(stdout);
7212 ++
7213 + for (i = 0; i < tasks; i++) {
7214 + pid[i] = fork();
7215 + if (pid[i] == 0) {