Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.10 commit in: /
Date: Wed, 14 Jul 2021 16:21:58
Message-Id: 1626279701.225807b2829dd6d5b341d875303fb27c82164a08.mpagano@gentoo
1 commit: 225807b2829dd6d5b341d875303fb27c82164a08
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Jul 14 16:21:41 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Jul 14 16:21:41 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=225807b2
7
8 Linux patch 5.10.50
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1049_linux-5.10.50.patch | 21067 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 21071 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 2efc79e..e9246be 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -239,6 +239,10 @@ Patch: 1048_linux-5.10.49.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.10.49
23
24 +Patch: 1049_linux-5.10.50.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.10.50
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1049_linux-5.10.50.patch b/1049_linux-5.10.50.patch
33 new file mode 100644
34 index 0000000..2eba41f
35 --- /dev/null
36 +++ b/1049_linux-5.10.50.patch
37 @@ -0,0 +1,21067 @@
38 +diff --git a/Documentation/ABI/testing/evm b/Documentation/ABI/testing/evm
39 +index 3c477ba48a312..2243b72e41107 100644
40 +--- a/Documentation/ABI/testing/evm
41 ++++ b/Documentation/ABI/testing/evm
42 +@@ -49,8 +49,30 @@ Description:
43 + modification of EVM-protected metadata and
44 + disable all further modification of policy
45 +
46 +- Note that once a key has been loaded, it will no longer be
47 +- possible to enable metadata modification.
48 ++ Echoing a value is additive, the new value is added to the
49 ++ existing initialization flags.
50 ++
51 ++ For example, after::
52 ++
53 ++ echo 2 ><securityfs>/evm
54 ++
55 ++ another echo can be performed::
56 ++
57 ++ echo 1 ><securityfs>/evm
58 ++
59 ++ and the resulting value will be 3.
60 ++
61 ++ Note that once an HMAC key has been loaded, it will no longer
62 ++ be possible to enable metadata modification. Signaling that an
63 ++ HMAC key has been loaded will clear the corresponding flag.
64 ++ For example, if the current value is 6 (2 and 4 set)::
65 ++
66 ++ echo 1 ><securityfs>/evm
67 ++
68 ++ will set the new value to 3 (4 cleared).
69 ++
70 ++ Loading an HMAC key is the only way to disable metadata
71 ++ modification.
72 +
73 + Until key loading has been signaled EVM can not create
74 + or validate the 'security.evm' xattr, but returns
75 +diff --git a/Documentation/ABI/testing/sysfs-bus-papr-pmem b/Documentation/ABI/testing/sysfs-bus-papr-pmem
76 +index 8316c33862a04..0aa02bf2bde5c 100644
77 +--- a/Documentation/ABI/testing/sysfs-bus-papr-pmem
78 ++++ b/Documentation/ABI/testing/sysfs-bus-papr-pmem
79 +@@ -39,9 +39,11 @@ KernelVersion: v5.9
80 + Contact: linuxppc-dev <linuxppc-dev@××××××××××××.org>, linux-nvdimm@××××××××.org,
81 + Description:
82 + (RO) Report various performance stats related to papr-scm NVDIMM
83 +- device. Each stat is reported on a new line with each line
84 +- composed of a stat-identifier followed by it value. Below are
85 +- currently known dimm performance stats which are reported:
86 ++ device. This attribute is only available for NVDIMM devices
87 ++ that support reporting NVDIMM performance stats. Each stat is
88 ++ reported on a new line with each line composed of a
89 ++ stat-identifier followed by it value. Below are currently known
90 ++ dimm performance stats which are reported:
91 +
92 + * "CtlResCt" : Controller Reset Count
93 + * "CtlResTm" : Controller Reset Elapsed Time
94 +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
95 +index 26bfe7ae711b8..f103667d3727f 100644
96 +--- a/Documentation/admin-guide/kernel-parameters.txt
97 ++++ b/Documentation/admin-guide/kernel-parameters.txt
98 +@@ -577,6 +577,12 @@
99 + loops can be debugged more effectively on production
100 + systems.
101 +
102 ++ clocksource.max_cswd_read_retries= [KNL]
103 ++ Number of clocksource_watchdog() retries due to
104 ++ external delays before the clock will be marked
105 ++ unstable. Defaults to three retries, that is,
106 ++ four attempts to read the clock under test.
107 ++
108 + clearcpuid=BITNUM[,BITNUM...] [X86]
109 + Disable CPUID feature X for the kernel. See
110 + arch/x86/include/asm/cpufeatures.h for the valid bit
111 +diff --git a/Documentation/hwmon/max31790.rst b/Documentation/hwmon/max31790.rst
112 +index f301385d8cef3..7b097c3b9b908 100644
113 +--- a/Documentation/hwmon/max31790.rst
114 ++++ b/Documentation/hwmon/max31790.rst
115 +@@ -38,6 +38,7 @@ Sysfs entries
116 + fan[1-12]_input RO fan tachometer speed in RPM
117 + fan[1-12]_fault RO fan experienced fault
118 + fan[1-6]_target RW desired fan speed in RPM
119 +-pwm[1-6]_enable RW regulator mode, 0=disabled, 1=manual mode, 2=rpm mode
120 +-pwm[1-6] RW fan target duty cycle (0-255)
121 ++pwm[1-6]_enable RW regulator mode, 0=disabled (duty cycle=0%), 1=manual mode, 2=rpm mode
122 ++pwm[1-6] RW read: current pwm duty cycle,
123 ++ write: target pwm duty cycle (0-255)
124 + ================== === =======================================================
125 +diff --git a/Documentation/userspace-api/media/v4l/ext-ctrls-codec.rst b/Documentation/userspace-api/media/v4l/ext-ctrls-codec.rst
126 +index ce728c757eaf8..b864869b42bc8 100644
127 +--- a/Documentation/userspace-api/media/v4l/ext-ctrls-codec.rst
128 ++++ b/Documentation/userspace-api/media/v4l/ext-ctrls-codec.rst
129 +@@ -4030,7 +4030,7 @@ enum v4l2_mpeg_video_hevc_size_of_length_field -
130 + :stub-columns: 0
131 + :widths: 1 1 2
132 +
133 +- * - ``V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT``
134 ++ * - ``V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT_ENABLED``
135 + - 0x00000001
136 + -
137 + * - ``V4L2_HEVC_PPS_FLAG_OUTPUT_FLAG_PRESENT``
138 +@@ -4238,6 +4238,9 @@ enum v4l2_mpeg_video_hevc_size_of_length_field -
139 + * - ``V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED``
140 + - 0x00000100
141 + -
142 ++ * - ``V4L2_HEVC_SLICE_PARAMS_FLAG_DEPENDENT_SLICE_SEGMENT``
143 ++ - 0x00000200
144 ++ -
145 +
146 + .. c:type:: v4l2_hevc_dpb_entry
147 +
148 +diff --git a/Documentation/vm/arch_pgtable_helpers.rst b/Documentation/vm/arch_pgtable_helpers.rst
149 +index f3591ee3aaa89..552567d863b86 100644
150 +--- a/Documentation/vm/arch_pgtable_helpers.rst
151 ++++ b/Documentation/vm/arch_pgtable_helpers.rst
152 +@@ -50,7 +50,7 @@ PTE Page Table Helpers
153 + +---------------------------+--------------------------------------------------+
154 + | pte_mkwrite | Creates a writable PTE |
155 + +---------------------------+--------------------------------------------------+
156 +-| pte_mkwrprotect | Creates a write protected PTE |
157 ++| pte_wrprotect | Creates a write protected PTE |
158 + +---------------------------+--------------------------------------------------+
159 + | pte_mkspecial | Creates a special PTE |
160 + +---------------------------+--------------------------------------------------+
161 +@@ -120,7 +120,7 @@ PMD Page Table Helpers
162 + +---------------------------+--------------------------------------------------+
163 + | pmd_mkwrite | Creates a writable PMD |
164 + +---------------------------+--------------------------------------------------+
165 +-| pmd_mkwrprotect | Creates a write protected PMD |
166 ++| pmd_wrprotect | Creates a write protected PMD |
167 + +---------------------------+--------------------------------------------------+
168 + | pmd_mkspecial | Creates a special PMD |
169 + +---------------------------+--------------------------------------------------+
170 +@@ -186,7 +186,7 @@ PUD Page Table Helpers
171 + +---------------------------+--------------------------------------------------+
172 + | pud_mkwrite | Creates a writable PUD |
173 + +---------------------------+--------------------------------------------------+
174 +-| pud_mkwrprotect | Creates a write protected PUD |
175 ++| pud_wrprotect | Creates a write protected PUD |
176 + +---------------------------+--------------------------------------------------+
177 + | pud_mkdevmap | Creates a ZONE_DEVICE mapped PUD |
178 + +---------------------------+--------------------------------------------------+
179 +@@ -224,7 +224,7 @@ HugeTLB Page Table Helpers
180 + +---------------------------+--------------------------------------------------+
181 + | huge_pte_mkwrite | Creates a writable HugeTLB |
182 + +---------------------------+--------------------------------------------------+
183 +-| huge_pte_mkwrprotect | Creates a write protected HugeTLB |
184 ++| huge_pte_wrprotect | Creates a write protected HugeTLB |
185 + +---------------------------+--------------------------------------------------+
186 + | huge_ptep_get_and_clear | Clears a HugeTLB |
187 + +---------------------------+--------------------------------------------------+
188 +diff --git a/Makefile b/Makefile
189 +index c51b73455ea33..695f8e739a91b 100644
190 +--- a/Makefile
191 ++++ b/Makefile
192 +@@ -1,7 +1,7 @@
193 + # SPDX-License-Identifier: GPL-2.0
194 + VERSION = 5
195 + PATCHLEVEL = 10
196 +-SUBLEVEL = 49
197 ++SUBLEVEL = 50
198 + EXTRAVERSION =
199 + NAME = Dare mighty things
200 +
201 +@@ -978,7 +978,7 @@ LDFLAGS_vmlinux += $(call ld-option, -X,)
202 + endif
203 +
204 + ifeq ($(CONFIG_RELR),y)
205 +-LDFLAGS_vmlinux += --pack-dyn-relocs=relr
206 ++LDFLAGS_vmlinux += --pack-dyn-relocs=relr --use-android-relr-tags
207 + endif
208 +
209 + # We never want expected sections to be placed heuristically by the
210 +diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
211 +index f4dd9f3f30010..4b2575f936d46 100644
212 +--- a/arch/alpha/kernel/smp.c
213 ++++ b/arch/alpha/kernel/smp.c
214 +@@ -166,7 +166,6 @@ smp_callin(void)
215 + DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
216 + cpuid, current, current->active_mm));
217 +
218 +- preempt_disable();
219 + cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
220 + }
221 +
222 +diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
223 +index 52906d3145371..db0e104d68355 100644
224 +--- a/arch/arc/kernel/smp.c
225 ++++ b/arch/arc/kernel/smp.c
226 +@@ -189,7 +189,6 @@ void start_kernel_secondary(void)
227 + pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
228 +
229 + local_irq_enable();
230 +- preempt_disable();
231 + cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
232 + }
233 +
234 +diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
235 +index 04f24cf752d34..e5c2c52013e3e 100644
236 +--- a/arch/arm/boot/dts/sama5d4.dtsi
237 ++++ b/arch/arm/boot/dts/sama5d4.dtsi
238 +@@ -809,7 +809,7 @@
239 + 0xffffffff 0x3ffcfe7c 0x1c010101 /* pioA */
240 + 0x7fffffff 0xfffccc3a 0x3f00cc3a /* pioB */
241 + 0xffffffff 0x3ff83fff 0xff00ffff /* pioC */
242 +- 0x0003ff00 0x8002a800 0x00000000 /* pioD */
243 ++ 0xb003ff00 0x8002a800 0x00000000 /* pioD */
244 + 0xffffffff 0x7fffffff 0x76fff1bf /* pioE */
245 + >;
246 +
247 +diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi
248 +index ff47cbf6ed3b7..359c1219b0bab 100644
249 +--- a/arch/arm/boot/dts/ste-href.dtsi
250 ++++ b/arch/arm/boot/dts/ste-href.dtsi
251 +@@ -4,6 +4,7 @@
252 + */
253 +
254 + #include <dt-bindings/interrupt-controller/irq.h>
255 ++#include <dt-bindings/leds/common.h>
256 + #include "ste-href-family-pinctrl.dtsi"
257 +
258 + / {
259 +@@ -64,17 +65,20 @@
260 + reg = <0>;
261 + led-cur = /bits/ 8 <0x2f>;
262 + max-cur = /bits/ 8 <0x5f>;
263 ++ color = <LED_COLOR_ID_BLUE>;
264 + linux,default-trigger = "heartbeat";
265 + };
266 + chan@1 {
267 + reg = <1>;
268 + led-cur = /bits/ 8 <0x2f>;
269 + max-cur = /bits/ 8 <0x5f>;
270 ++ color = <LED_COLOR_ID_BLUE>;
271 + };
272 + chan@2 {
273 + reg = <2>;
274 + led-cur = /bits/ 8 <0x2f>;
275 + max-cur = /bits/ 8 <0x5f>;
276 ++ color = <LED_COLOR_ID_BLUE>;
277 + };
278 + };
279 + lp5521@34 {
280 +@@ -88,16 +92,19 @@
281 + reg = <0>;
282 + led-cur = /bits/ 8 <0x2f>;
283 + max-cur = /bits/ 8 <0x5f>;
284 ++ color = <LED_COLOR_ID_BLUE>;
285 + };
286 + chan@1 {
287 + reg = <1>;
288 + led-cur = /bits/ 8 <0x2f>;
289 + max-cur = /bits/ 8 <0x5f>;
290 ++ color = <LED_COLOR_ID_BLUE>;
291 + };
292 + chan@2 {
293 + reg = <2>;
294 + led-cur = /bits/ 8 <0x2f>;
295 + max-cur = /bits/ 8 <0x5f>;
296 ++ color = <LED_COLOR_ID_BLUE>;
297 + };
298 + };
299 + bh1780@29 {
300 +diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
301 +index 2924d7910b106..eb2190477da10 100644
302 +--- a/arch/arm/kernel/perf_event_v7.c
303 ++++ b/arch/arm/kernel/perf_event_v7.c
304 +@@ -773,10 +773,10 @@ static inline void armv7pmu_write_counter(struct perf_event *event, u64 value)
305 + pr_err("CPU%u writing wrong counter %d\n",
306 + smp_processor_id(), idx);
307 + } else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
308 +- asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
309 ++ asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" ((u32)value));
310 + } else {
311 + armv7_pmnc_select_counter(idx);
312 +- asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
313 ++ asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" ((u32)value));
314 + }
315 + }
316 +
317 +diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
318 +index 48099c6e1e4a6..8aa7fa949c232 100644
319 +--- a/arch/arm/kernel/smp.c
320 ++++ b/arch/arm/kernel/smp.c
321 +@@ -432,7 +432,6 @@ asmlinkage void secondary_start_kernel(void)
322 + #endif
323 + pr_debug("CPU%u: Booted secondary processor\n", cpu);
324 +
325 +- preempt_disable();
326 + trace_hardirqs_off();
327 +
328 + /*
329 +diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
330 +index a89e47d95eef2..879115dfdf828 100644
331 +--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
332 ++++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
333 +@@ -134,7 +134,7 @@
334 +
335 + uart0: serial@12000 {
336 + compatible = "marvell,armada-3700-uart";
337 +- reg = <0x12000 0x200>;
338 ++ reg = <0x12000 0x18>;
339 + clocks = <&xtalclk>;
340 + interrupts =
341 + <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
342 +diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
343 +index f68a0e64482a1..5ef624fef44a2 100644
344 +--- a/arch/arm64/include/asm/asm-uaccess.h
345 ++++ b/arch/arm64/include/asm/asm-uaccess.h
346 +@@ -15,10 +15,10 @@
347 + .macro __uaccess_ttbr0_disable, tmp1
348 + mrs \tmp1, ttbr1_el1 // swapper_pg_dir
349 + bic \tmp1, \tmp1, #TTBR_ASID_MASK
350 +- sub \tmp1, \tmp1, #RESERVED_TTBR0_SIZE // reserved_ttbr0 just before swapper_pg_dir
351 ++ sub \tmp1, \tmp1, #PAGE_SIZE // reserved_pg_dir just before swapper_pg_dir
352 + msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
353 + isb
354 +- add \tmp1, \tmp1, #RESERVED_TTBR0_SIZE
355 ++ add \tmp1, \tmp1, #PAGE_SIZE
356 + msr ttbr1_el1, \tmp1 // set reserved ASID
357 + isb
358 + .endm
359 +diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
360 +index 19ca76ea60d98..587c504a4c8b2 100644
361 +--- a/arch/arm64/include/asm/kernel-pgtable.h
362 ++++ b/arch/arm64/include/asm/kernel-pgtable.h
363 +@@ -89,12 +89,6 @@
364 + #define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR, _end))
365 + #define IDMAP_DIR_SIZE (IDMAP_PGTABLE_LEVELS * PAGE_SIZE)
366 +
367 +-#ifdef CONFIG_ARM64_SW_TTBR0_PAN
368 +-#define RESERVED_TTBR0_SIZE (PAGE_SIZE)
369 +-#else
370 +-#define RESERVED_TTBR0_SIZE (0)
371 +-#endif
372 +-
373 + /* Initial memory map size */
374 + #if ARM64_SWAPPER_USES_SECTION_MAPS
375 + #define SWAPPER_BLOCK_SHIFT SECTION_SHIFT
376 +diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
377 +index 4e2ba94778450..5a54a5ab5f928 100644
378 +--- a/arch/arm64/include/asm/mmu_context.h
379 ++++ b/arch/arm64/include/asm/mmu_context.h
380 +@@ -36,11 +36,11 @@ static inline void contextidr_thread_switch(struct task_struct *next)
381 + }
382 +
383 + /*
384 +- * Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0.
385 ++ * Set TTBR0 to reserved_pg_dir. No translations will be possible via TTBR0.
386 + */
387 + static inline void cpu_set_reserved_ttbr0(void)
388 + {
389 +- unsigned long ttbr = phys_to_ttbr(__pa_symbol(empty_zero_page));
390 ++ unsigned long ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
391 +
392 + write_sysreg(ttbr, ttbr0_el1);
393 + isb();
394 +@@ -192,9 +192,9 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
395 + return;
396 +
397 + if (mm == &init_mm)
398 +- ttbr = __pa_symbol(empty_zero_page);
399 ++ ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
400 + else
401 +- ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
402 ++ ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) | ASID(mm) << 48;
403 +
404 + WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
405 + }
406 +diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
407 +index 717f13d52ecc5..10ffbc96ac31f 100644
408 +--- a/arch/arm64/include/asm/pgtable.h
409 ++++ b/arch/arm64/include/asm/pgtable.h
410 +@@ -530,6 +530,7 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
411 + extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
412 + extern pgd_t idmap_pg_end[];
413 + extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
414 ++extern pgd_t reserved_pg_dir[PTRS_PER_PGD];
415 +
416 + extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd);
417 +
418 +diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h
419 +index 80e946b2abee2..e83f0982b99c1 100644
420 +--- a/arch/arm64/include/asm/preempt.h
421 ++++ b/arch/arm64/include/asm/preempt.h
422 +@@ -23,7 +23,7 @@ static inline void preempt_count_set(u64 pc)
423 + } while (0)
424 +
425 + #define init_idle_preempt_count(p, cpu) do { \
426 +- task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
427 ++ task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
428 + } while (0)
429 +
430 + static inline void set_preempt_need_resched(void)
431 +diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
432 +index 991dd5f031e46..385a189f7d39e 100644
433 +--- a/arch/arm64/include/asm/uaccess.h
434 ++++ b/arch/arm64/include/asm/uaccess.h
435 +@@ -113,8 +113,8 @@ static inline void __uaccess_ttbr0_disable(void)
436 + local_irq_save(flags);
437 + ttbr = read_sysreg(ttbr1_el1);
438 + ttbr &= ~TTBR_ASID_MASK;
439 +- /* reserved_ttbr0 placed before swapper_pg_dir */
440 +- write_sysreg(ttbr - RESERVED_TTBR0_SIZE, ttbr0_el1);
441 ++ /* reserved_pg_dir placed before swapper_pg_dir */
442 ++ write_sysreg(ttbr - PAGE_SIZE, ttbr0_el1);
443 + isb();
444 + /* Set reserved ASID */
445 + write_sysreg(ttbr, ttbr1_el1);
446 +diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
447 +index 60d3991233600..fe83d6d67ec3d 100644
448 +--- a/arch/arm64/kernel/entry.S
449 ++++ b/arch/arm64/kernel/entry.S
450 +@@ -770,9 +770,10 @@ SYM_CODE_END(ret_to_user)
451 + */
452 + .pushsection ".entry.tramp.text", "ax"
453 +
454 ++ // Move from tramp_pg_dir to swapper_pg_dir
455 + .macro tramp_map_kernel, tmp
456 + mrs \tmp, ttbr1_el1
457 +- add \tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
458 ++ add \tmp, \tmp, #(2 * PAGE_SIZE)
459 + bic \tmp, \tmp, #USER_ASID_FLAG
460 + msr ttbr1_el1, \tmp
461 + #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
462 +@@ -789,9 +790,10 @@ alternative_else_nop_endif
463 + #endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
464 + .endm
465 +
466 ++ // Move from swapper_pg_dir to tramp_pg_dir
467 + .macro tramp_unmap_kernel, tmp
468 + mrs \tmp, ttbr1_el1
469 +- sub \tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
470 ++ sub \tmp, \tmp, #(2 * PAGE_SIZE)
471 + orr \tmp, \tmp, #USER_ASID_FLAG
472 + msr ttbr1_el1, \tmp
473 + /*
474 +diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
475 +index 11852e05ee32a..cdb3d4549b3a9 100644
476 +--- a/arch/arm64/kernel/perf_event.c
477 ++++ b/arch/arm64/kernel/perf_event.c
478 +@@ -312,7 +312,7 @@ static ssize_t slots_show(struct device *dev, struct device_attribute *attr,
479 + struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
480 + u32 slots = cpu_pmu->reg_pmmir & ARMV8_PMU_SLOTS_MASK;
481 +
482 +- return snprintf(page, PAGE_SIZE, "0x%08x\n", slots);
483 ++ return sysfs_emit(page, "0x%08x\n", slots);
484 + }
485 +
486 + static DEVICE_ATTR_RO(slots);
487 +diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
488 +index 133257ffd8591..eb4b24652c105 100644
489 +--- a/arch/arm64/kernel/setup.c
490 ++++ b/arch/arm64/kernel/setup.c
491 +@@ -366,7 +366,7 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
492 + * faults in case uaccess_enable() is inadvertently called by the init
493 + * thread.
494 + */
495 +- init_task.thread_info.ttbr0 = __pa_symbol(empty_zero_page);
496 ++ init_task.thread_info.ttbr0 = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
497 + #endif
498 +
499 + if (boot_args[1] || boot_args[2] || boot_args[3]) {
500 +diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
501 +index 18e9727d3f645..feee5a3cd1288 100644
502 +--- a/arch/arm64/kernel/smp.c
503 ++++ b/arch/arm64/kernel/smp.c
504 +@@ -223,7 +223,6 @@ asmlinkage notrace void secondary_start_kernel(void)
505 + init_gic_priority_masking();
506 +
507 + rcu_cpu_starting(cpu);
508 +- preempt_disable();
509 + trace_hardirqs_off();
510 +
511 + /*
512 +diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
513 +index 1bda604f4c704..30c1029789427 100644
514 +--- a/arch/arm64/kernel/vmlinux.lds.S
515 ++++ b/arch/arm64/kernel/vmlinux.lds.S
516 +@@ -164,13 +164,11 @@ SECTIONS
517 + . += PAGE_SIZE;
518 + #endif
519 +
520 +-#ifdef CONFIG_ARM64_SW_TTBR0_PAN
521 +- reserved_ttbr0 = .;
522 +- . += RESERVED_TTBR0_SIZE;
523 +-#endif
524 ++ reserved_pg_dir = .;
525 ++ . += PAGE_SIZE;
526 ++
527 + swapper_pg_dir = .;
528 + . += PAGE_SIZE;
529 +- swapper_pg_end = .;
530 +
531 + . = ALIGN(SEGMENT_ALIGN);
532 + __init_begin = .;
533 +diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
534 +index 2dd164bb1c5a9..4b30260e1abf4 100644
535 +--- a/arch/arm64/kvm/pmu-emul.c
536 ++++ b/arch/arm64/kvm/pmu-emul.c
537 +@@ -578,6 +578,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
538 + kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
539 +
540 + if (val & ARMV8_PMU_PMCR_P) {
541 ++ mask &= ~BIT(ARMV8_PMU_CYCLE_IDX);
542 + for_each_set_bit(i, &mask, 32)
543 + kvm_pmu_set_counter_value(vcpu, i, 0);
544 + }
545 +diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
546 +index a14927360be26..aacc7eab9b2ff 100644
547 +--- a/arch/arm64/mm/proc.S
548 ++++ b/arch/arm64/mm/proc.S
549 +@@ -168,7 +168,7 @@ SYM_FUNC_END(cpu_do_resume)
550 + .pushsection ".idmap.text", "awx"
551 +
552 + .macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2
553 +- adrp \tmp1, empty_zero_page
554 ++ adrp \tmp1, reserved_pg_dir
555 + phys_to_ttbr \tmp2, \tmp1
556 + offset_ttbr1 \tmp2, \tmp1
557 + msr ttbr1_el1, \tmp2
558 +diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c
559 +index 041d0de6a1b67..1a8d7eaf1ff71 100644
560 +--- a/arch/csky/kernel/smp.c
561 ++++ b/arch/csky/kernel/smp.c
562 +@@ -282,7 +282,6 @@ void csky_start_secondary(void)
563 + pr_info("CPU%u Online: %s...\n", cpu, __func__);
564 +
565 + local_irq_enable();
566 +- preempt_disable();
567 + cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
568 + }
569 +
570 +diff --git a/arch/csky/mm/syscache.c b/arch/csky/mm/syscache.c
571 +index ffade2f9a4c87..cd847ad62c7ee 100644
572 +--- a/arch/csky/mm/syscache.c
573 ++++ b/arch/csky/mm/syscache.c
574 +@@ -12,14 +12,17 @@ SYSCALL_DEFINE3(cacheflush,
575 + int, cache)
576 + {
577 + switch (cache) {
578 +- case ICACHE:
579 + case BCACHE:
580 +- flush_icache_mm_range(current->mm,
581 +- (unsigned long)addr,
582 +- (unsigned long)addr + bytes);
583 + case DCACHE:
584 + dcache_wb_range((unsigned long)addr,
585 + (unsigned long)addr + bytes);
586 ++ if (cache != BCACHE)
587 ++ break;
588 ++ fallthrough;
589 ++ case ICACHE:
590 ++ flush_icache_mm_range(current->mm,
591 ++ (unsigned long)addr,
592 ++ (unsigned long)addr + bytes);
593 + break;
594 + default:
595 + return -EINVAL;
596 +diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c
597 +index 4d0ab323dee8c..2a40268c3d494 100644
598 +--- a/arch/ia64/kernel/mca_drv.c
599 ++++ b/arch/ia64/kernel/mca_drv.c
600 +@@ -343,7 +343,7 @@ init_record_index_pools(void)
601 +
602 + /* - 2 - */
603 + sect_min_size = sal_log_sect_min_sizes[0];
604 +- for (i = 1; i < sizeof sal_log_sect_min_sizes/sizeof(size_t); i++)
605 ++ for (i = 1; i < ARRAY_SIZE(sal_log_sect_min_sizes); i++)
606 + if (sect_min_size > sal_log_sect_min_sizes[i])
607 + sect_min_size = sal_log_sect_min_sizes[i];
608 +
609 +diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
610 +index 093040f7e626a..0cad990385c04 100644
611 +--- a/arch/ia64/kernel/smpboot.c
612 ++++ b/arch/ia64/kernel/smpboot.c
613 +@@ -440,7 +440,6 @@ start_secondary (void *unused)
614 + #endif
615 + efi_map_pal_code();
616 + cpu_init();
617 +- preempt_disable();
618 + smp_callin();
619 +
620 + cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
621 +diff --git a/arch/m68k/Kconfig.machine b/arch/m68k/Kconfig.machine
622 +index 17e8c3a292d77..e161a4e1493b4 100644
623 +--- a/arch/m68k/Kconfig.machine
624 ++++ b/arch/m68k/Kconfig.machine
625 +@@ -23,6 +23,9 @@ config ATARI
626 + this kernel on an Atari, say Y here and browse the material
627 + available in <file:Documentation/m68k>; otherwise say N.
628 +
629 ++config ATARI_KBD_CORE
630 ++ bool
631 ++
632 + config MAC
633 + bool "Macintosh support"
634 + depends on MMU
635 +diff --git a/arch/mips/include/asm/highmem.h b/arch/mips/include/asm/highmem.h
636 +index f1f788b571666..9f021cf51aa71 100644
637 +--- a/arch/mips/include/asm/highmem.h
638 ++++ b/arch/mips/include/asm/highmem.h
639 +@@ -36,7 +36,7 @@ extern pte_t *pkmap_page_table;
640 + * easily, subsequent pte tables have to be allocated in one physical
641 + * chunk of RAM.
642 + */
643 +-#ifdef CONFIG_PHYS_ADDR_T_64BIT
644 ++#if defined(CONFIG_PHYS_ADDR_T_64BIT) || defined(CONFIG_MIPS_HUGE_TLB_SUPPORT)
645 + #define LAST_PKMAP 512
646 + #else
647 + #define LAST_PKMAP 1024
648 +diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
649 +index 48d84d5fcc361..ff25926c5458c 100644
650 +--- a/arch/mips/kernel/smp.c
651 ++++ b/arch/mips/kernel/smp.c
652 +@@ -348,7 +348,6 @@ asmlinkage void start_secondary(void)
653 + */
654 +
655 + calibrate_delay();
656 +- preempt_disable();
657 + cpu = smp_processor_id();
658 + cpu_data[cpu].udelay_val = loops_per_jiffy;
659 +
660 +diff --git a/arch/openrisc/kernel/smp.c b/arch/openrisc/kernel/smp.c
661 +index 29c82ef2e207c..e4dad76066aed 100644
662 +--- a/arch/openrisc/kernel/smp.c
663 ++++ b/arch/openrisc/kernel/smp.c
664 +@@ -134,8 +134,6 @@ asmlinkage __init void secondary_start_kernel(void)
665 + set_cpu_online(cpu, true);
666 +
667 + local_irq_enable();
668 +-
669 +- preempt_disable();
670 + /*
671 + * OK, it's off to the idle thread for us
672 + */
673 +diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
674 +index 10227f667c8a6..1405b603b91b6 100644
675 +--- a/arch/parisc/kernel/smp.c
676 ++++ b/arch/parisc/kernel/smp.c
677 +@@ -302,7 +302,6 @@ void __init smp_callin(unsigned long pdce_proc)
678 + #endif
679 +
680 + smp_cpu_init(slave_id);
681 +- preempt_disable();
682 +
683 + flush_cache_all_local(); /* start with known state */
684 + flush_tlb_all_local(NULL);
685 +diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
686 +index 98c8bd155bf9d..b167186aaee4a 100644
687 +--- a/arch/powerpc/include/asm/cputhreads.h
688 ++++ b/arch/powerpc/include/asm/cputhreads.h
689 +@@ -98,6 +98,36 @@ static inline int cpu_last_thread_sibling(int cpu)
690 + return cpu | (threads_per_core - 1);
691 + }
692 +
693 ++/*
694 ++ * tlb_thread_siblings are siblings which share a TLB. This is not
695 ++ * architected, is not something a hypervisor could emulate and a future
696 ++ * CPU may change behaviour even in compat mode, so this should only be
697 ++ * used on PowerNV, and only with care.
698 ++ */
699 ++static inline int cpu_first_tlb_thread_sibling(int cpu)
700 ++{
701 ++ if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
702 ++ return cpu & ~0x6; /* Big Core */
703 ++ else
704 ++ return cpu_first_thread_sibling(cpu);
705 ++}
706 ++
707 ++static inline int cpu_last_tlb_thread_sibling(int cpu)
708 ++{
709 ++ if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
710 ++ return cpu | 0x6; /* Big Core */
711 ++ else
712 ++ return cpu_last_thread_sibling(cpu);
713 ++}
714 ++
715 ++static inline int cpu_tlb_thread_sibling_step(void)
716 ++{
717 ++ if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
718 ++ return 2; /* Big Core */
719 ++ else
720 ++ return 1;
721 ++}
722 ++
723 + static inline u32 get_tensr(void)
724 + {
725 + #ifdef CONFIG_BOOKE
726 +diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
727 +index b7e173754a2e9..ea8b002820cec 100644
728 +--- a/arch/powerpc/kernel/mce_power.c
729 ++++ b/arch/powerpc/kernel/mce_power.c
730 +@@ -475,12 +475,11 @@ static int mce_find_instr_ea_and_phys(struct pt_regs *regs, uint64_t *addr,
731 + return -1;
732 + }
733 +
734 +-static int mce_handle_ierror(struct pt_regs *regs,
735 ++static int mce_handle_ierror(struct pt_regs *regs, unsigned long srr1,
736 + const struct mce_ierror_table table[],
737 + struct mce_error_info *mce_err, uint64_t *addr,
738 + uint64_t *phys_addr)
739 + {
740 +- uint64_t srr1 = regs->msr;
741 + int handled = 0;
742 + int i;
743 +
744 +@@ -683,19 +682,19 @@ static long mce_handle_ue_error(struct pt_regs *regs,
745 + }
746 +
747 + static long mce_handle_error(struct pt_regs *regs,
748 ++ unsigned long srr1,
749 + const struct mce_derror_table dtable[],
750 + const struct mce_ierror_table itable[])
751 + {
752 + struct mce_error_info mce_err = { 0 };
753 + uint64_t addr, phys_addr = ULONG_MAX;
754 +- uint64_t srr1 = regs->msr;
755 + long handled;
756 +
757 + if (SRR1_MC_LOADSTORE(srr1))
758 + handled = mce_handle_derror(regs, dtable, &mce_err, &addr,
759 + &phys_addr);
760 + else
761 +- handled = mce_handle_ierror(regs, itable, &mce_err, &addr,
762 ++ handled = mce_handle_ierror(regs, srr1, itable, &mce_err, &addr,
763 + &phys_addr);
764 +
765 + if (!handled && mce_err.error_type == MCE_ERROR_TYPE_UE)
766 +@@ -711,16 +710,20 @@ long __machine_check_early_realmode_p7(struct pt_regs *regs)
767 + /* P7 DD1 leaves top bits of DSISR undefined */
768 + regs->dsisr &= 0x0000ffff;
769 +
770 +- return mce_handle_error(regs, mce_p7_derror_table, mce_p7_ierror_table);
771 ++ return mce_handle_error(regs, regs->msr,
772 ++ mce_p7_derror_table, mce_p7_ierror_table);
773 + }
774 +
775 + long __machine_check_early_realmode_p8(struct pt_regs *regs)
776 + {
777 +- return mce_handle_error(regs, mce_p8_derror_table, mce_p8_ierror_table);
778 ++ return mce_handle_error(regs, regs->msr,
779 ++ mce_p8_derror_table, mce_p8_ierror_table);
780 + }
781 +
782 + long __machine_check_early_realmode_p9(struct pt_regs *regs)
783 + {
784 ++ unsigned long srr1 = regs->msr;
785 ++
786 + /*
787 + * On POWER9 DD2.1 and below, it's possible to get a machine check
788 + * caused by a paste instruction where only DSISR bit 25 is set. This
789 +@@ -734,10 +737,39 @@ long __machine_check_early_realmode_p9(struct pt_regs *regs)
790 + if (SRR1_MC_LOADSTORE(regs->msr) && regs->dsisr == 0x02000000)
791 + return 1;
792 +
793 +- return mce_handle_error(regs, mce_p9_derror_table, mce_p9_ierror_table);
794 ++ /*
795 ++ * Async machine check due to bad real address from store or foreign
796 ++ * link time out comes with the load/store bit (PPC bit 42) set in
797 ++ * SRR1, but the cause comes in SRR1 not DSISR. Clear bit 42 so we're
798 ++ * directed to the ierror table so it will find the cause (which
799 ++ * describes it correctly as a store error).
800 ++ */
801 ++ if (SRR1_MC_LOADSTORE(srr1) &&
802 ++ ((srr1 & 0x081c0000) == 0x08140000 ||
803 ++ (srr1 & 0x081c0000) == 0x08180000)) {
804 ++ srr1 &= ~PPC_BIT(42);
805 ++ }
806 ++
807 ++ return mce_handle_error(regs, srr1,
808 ++ mce_p9_derror_table, mce_p9_ierror_table);
809 + }
810 +
811 + long __machine_check_early_realmode_p10(struct pt_regs *regs)
812 + {
813 +- return mce_handle_error(regs, mce_p10_derror_table, mce_p10_ierror_table);
814 ++ unsigned long srr1 = regs->msr;
815 ++
816 ++ /*
817 ++ * Async machine check due to bad real address from store comes with
818 ++ * the load/store bit (PPC bit 42) set in SRR1, but the cause comes in
819 ++ * SRR1 not DSISR. Clear bit 42 so we're directed to the ierror table
820 ++ * so it will find the cause (which describes it correctly as a store
821 ++ * error).
822 ++ */
823 ++ if (SRR1_MC_LOADSTORE(srr1) &&
824 ++ (srr1 & 0x081c0000) == 0x08140000) {
825 ++ srr1 &= ~PPC_BIT(42);
826 ++ }
827 ++
828 ++ return mce_handle_error(regs, srr1,
829 ++ mce_p10_derror_table, mce_p10_ierror_table);
830 + }
831 +diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
832 +index 1a1d2657fe8dd..3064694afea17 100644
833 +--- a/arch/powerpc/kernel/process.c
834 ++++ b/arch/powerpc/kernel/process.c
835 +@@ -1227,6 +1227,19 @@ struct task_struct *__switch_to(struct task_struct *prev,
836 + __flush_tlb_pending(batch);
837 + batch->active = 0;
838 + }
839 ++
840 ++ /*
841 ++ * On POWER9 the copy-paste buffer can only paste into
842 ++ * foreign real addresses, so unprivileged processes can not
843 ++ * see the data or use it in any way unless they have
844 ++ * foreign real mappings. If the new process has the foreign
845 ++ * real address mappings, we must issue a cp_abort to clear
846 ++ * any state and prevent snooping, corruption or a covert
847 ++ * channel. ISA v3.1 supports paste into local memory.
848 ++ */
849 ++ if (new->mm && (cpu_has_feature(CPU_FTR_ARCH_31) ||
850 ++ atomic_read(&new->mm->context.vas_windows)))
851 ++ asm volatile(PPC_CP_ABORT);
852 + #endif /* CONFIG_PPC_BOOK3S_64 */
853 +
854 + #ifdef CONFIG_PPC_ADV_DEBUG_REGS
855 +@@ -1272,30 +1285,33 @@ struct task_struct *__switch_to(struct task_struct *prev,
856 +
857 + last = _switch(old_thread, new_thread);
858 +
859 ++ /*
860 ++ * Nothing after _switch will be run for newly created tasks,
861 ++ * because they switch directly to ret_from_fork/ret_from_kernel_thread
862 ++ * etc. Code added here should have a comment explaining why that is
863 ++ * okay.
864 ++ */
865 ++
866 + #ifdef CONFIG_PPC_BOOK3S_64
867 ++ /*
868 ++ * This applies to a process that was context switched while inside
869 ++ * arch_enter_lazy_mmu_mode(), to re-activate the batch that was
870 ++ * deactivated above, before _switch(). This will never be the case
871 ++ * for new tasks.
872 ++ */
873 + if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
874 + current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
875 + batch = this_cpu_ptr(&ppc64_tlb_batch);
876 + batch->active = 1;
877 + }
878 +
879 +- if (current->thread.regs) {
880 ++ /*
881 ++ * Math facilities are masked out of the child MSR in copy_thread.
882 ++ * A new task does not need to restore_math because it will
883 ++ * demand fault them.
884 ++ */
885 ++ if (current->thread.regs)
886 + restore_math(current->thread.regs);
887 +-
888 +- /*
889 +- * On POWER9 the copy-paste buffer can only paste into
890 +- * foreign real addresses, so unprivileged processes can not
891 +- * see the data or use it in any way unless they have
892 +- * foreign real mappings. If the new process has the foreign
893 +- * real address mappings, we must issue a cp_abort to clear
894 +- * any state and prevent snooping, corruption or a covert
895 +- * channel. ISA v3.1 supports paste into local memory.
896 +- */
897 +- if (current->mm &&
898 +- (cpu_has_feature(CPU_FTR_ARCH_31) ||
899 +- atomic_read(&current->mm->context.vas_windows)))
900 +- asm volatile(PPC_CP_ABORT);
901 +- }
902 + #endif /* CONFIG_PPC_BOOK3S_64 */
903 +
904 + return last;
905 +diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
906 +index db7ac77bea3a7..26a028a9233af 100644
907 +--- a/arch/powerpc/kernel/smp.c
908 ++++ b/arch/powerpc/kernel/smp.c
909 +@@ -600,6 +600,8 @@ static void nmi_stop_this_cpu(struct pt_regs *regs)
910 + /*
911 + * IRQs are already hard disabled by the smp_handle_nmi_ipi.
912 + */
913 ++ set_cpu_online(smp_processor_id(), false);
914 ++
915 + spin_begin();
916 + while (1)
917 + spin_cpu_relax();
918 +@@ -615,6 +617,15 @@ void smp_send_stop(void)
919 + static void stop_this_cpu(void *dummy)
920 + {
921 + hard_irq_disable();
922 ++
923 ++ /*
924 ++ * Offlining CPUs in stop_this_cpu can result in scheduler warnings,
925 ++ * (see commit de6e5d38417e), but printk_safe_flush_on_panic() wants
926 ++ * to know other CPUs are offline before it breaks locks to flush
927 ++ * printk buffers, in case we panic()ed while holding the lock.
928 ++ */
929 ++ set_cpu_online(smp_processor_id(), false);
930 ++
931 + spin_begin();
932 + while (1)
933 + spin_cpu_relax();
934 +@@ -1426,7 +1437,6 @@ void start_secondary(void *unused)
935 + smp_store_cpu_info(cpu);
936 + set_dec(tb_ticks_per_jiffy);
937 + rcu_cpu_starting(cpu);
938 +- preempt_disable();
939 + cpu_callin_map[cpu] = 1;
940 +
941 + if (smp_ops->setup_cpu)
942 +diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c
943 +index b6440657ef92d..2f926ea9b7b94 100644
944 +--- a/arch/powerpc/kernel/stacktrace.c
945 ++++ b/arch/powerpc/kernel/stacktrace.c
946 +@@ -19,6 +19,7 @@
947 + #include <asm/ptrace.h>
948 + #include <asm/processor.h>
949 + #include <linux/ftrace.h>
950 ++#include <linux/delay.h>
951 + #include <asm/kprobes.h>
952 +
953 + #include <asm/paca.h>
954 +@@ -230,17 +231,31 @@ static void handle_backtrace_ipi(struct pt_regs *regs)
955 +
956 + static void raise_backtrace_ipi(cpumask_t *mask)
957 + {
958 ++ struct paca_struct *p;
959 + unsigned int cpu;
960 ++ u64 delay_us;
961 +
962 + for_each_cpu(cpu, mask) {
963 +- if (cpu == smp_processor_id())
964 ++ if (cpu == smp_processor_id()) {
965 + handle_backtrace_ipi(NULL);
966 +- else
967 +- smp_send_safe_nmi_ipi(cpu, handle_backtrace_ipi, 5 * USEC_PER_SEC);
968 +- }
969 ++ continue;
970 ++ }
971 +
972 +- for_each_cpu(cpu, mask) {
973 +- struct paca_struct *p = paca_ptrs[cpu];
974 ++ delay_us = 5 * USEC_PER_SEC;
975 ++
976 ++ if (smp_send_safe_nmi_ipi(cpu, handle_backtrace_ipi, delay_us)) {
977 ++ // Now wait up to 5s for the other CPU to do its backtrace
978 ++ while (cpumask_test_cpu(cpu, mask) && delay_us) {
979 ++ udelay(1);
980 ++ delay_us--;
981 ++ }
982 ++
983 ++ // Other CPU cleared itself from the mask
984 ++ if (delay_us)
985 ++ continue;
986 ++ }
987 ++
988 ++ p = paca_ptrs[cpu];
989 +
990 + cpumask_clear_cpu(cpu, mask);
991 +
992 +diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
993 +index 965b702208d85..2325b7a6e95f8 100644
994 +--- a/arch/powerpc/kvm/book3s_hv.c
995 ++++ b/arch/powerpc/kvm/book3s_hv.c
996 +@@ -2578,7 +2578,7 @@ static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
997 + cpumask_t *cpu_in_guest;
998 + int i;
999 +
1000 +- cpu = cpu_first_thread_sibling(cpu);
1001 ++ cpu = cpu_first_tlb_thread_sibling(cpu);
1002 + if (nested) {
1003 + cpumask_set_cpu(cpu, &nested->need_tlb_flush);
1004 + cpu_in_guest = &nested->cpu_in_guest;
1005 +@@ -2592,9 +2592,10 @@ static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
1006 + * the other side is the first smp_mb() in kvmppc_run_core().
1007 + */
1008 + smp_mb();
1009 +- for (i = 0; i < threads_per_core; ++i)
1010 +- if (cpumask_test_cpu(cpu + i, cpu_in_guest))
1011 +- smp_call_function_single(cpu + i, do_nothing, NULL, 1);
1012 ++ for (i = cpu; i <= cpu_last_tlb_thread_sibling(cpu);
1013 ++ i += cpu_tlb_thread_sibling_step())
1014 ++ if (cpumask_test_cpu(i, cpu_in_guest))
1015 ++ smp_call_function_single(i, do_nothing, NULL, 1);
1016 + }
1017 +
1018 + static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
1019 +@@ -2625,8 +2626,8 @@ static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
1020 + */
1021 + if (prev_cpu != pcpu) {
1022 + if (prev_cpu >= 0 &&
1023 +- cpu_first_thread_sibling(prev_cpu) !=
1024 +- cpu_first_thread_sibling(pcpu))
1025 ++ cpu_first_tlb_thread_sibling(prev_cpu) !=
1026 ++ cpu_first_tlb_thread_sibling(pcpu))
1027 + radix_flush_cpu(kvm, prev_cpu, vcpu);
1028 + if (nested)
1029 + nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu;
1030 +diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
1031 +index 8f58dd20b362a..4621905bdd9ea 100644
1032 +--- a/arch/powerpc/kvm/book3s_hv_builtin.c
1033 ++++ b/arch/powerpc/kvm/book3s_hv_builtin.c
1034 +@@ -893,7 +893,7 @@ void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu,
1035 + * Thus we make all 4 threads use the same bit.
1036 + */
1037 + if (cpu_has_feature(CPU_FTR_ARCH_300))
1038 +- pcpu = cpu_first_thread_sibling(pcpu);
1039 ++ pcpu = cpu_first_tlb_thread_sibling(pcpu);
1040 +
1041 + if (nested)
1042 + need_tlb_flush = &nested->need_tlb_flush;
1043 +diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
1044 +index 33b58549a9aaf..065738819db9b 100644
1045 +--- a/arch/powerpc/kvm/book3s_hv_nested.c
1046 ++++ b/arch/powerpc/kvm/book3s_hv_nested.c
1047 +@@ -51,7 +51,8 @@ void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
1048 + hr->ppr = vcpu->arch.ppr;
1049 + }
1050 +
1051 +-static void byteswap_pt_regs(struct pt_regs *regs)
1052 ++/* Use noinline_for_stack due to https://bugs.llvm.org/show_bug.cgi?id=49610 */
1053 ++static noinline_for_stack void byteswap_pt_regs(struct pt_regs *regs)
1054 + {
1055 + unsigned long *addr = (unsigned long *) regs;
1056 +
1057 +diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
1058 +index 88da2764c1bb9..3ddc83d2e8493 100644
1059 +--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
1060 ++++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
1061 +@@ -67,7 +67,7 @@ static int global_invalidates(struct kvm *kvm)
1062 + * so use the bit for the first thread to represent the core.
1063 + */
1064 + if (cpu_has_feature(CPU_FTR_ARCH_300))
1065 +- cpu = cpu_first_thread_sibling(cpu);
1066 ++ cpu = cpu_first_tlb_thread_sibling(cpu);
1067 + cpumask_clear_cpu(cpu, &kvm->arch.need_tlb_flush);
1068 + }
1069 +
1070 +diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
1071 +index c855a0aeb49cc..d7ab868aab54a 100644
1072 +--- a/arch/powerpc/platforms/cell/smp.c
1073 ++++ b/arch/powerpc/platforms/cell/smp.c
1074 +@@ -78,9 +78,6 @@ static inline int smp_startup_cpu(unsigned int lcpu)
1075 +
1076 + pcpu = get_hard_smp_processor_id(lcpu);
1077 +
1078 +- /* Fixup atomic count: it exited inside IRQ handler. */
1079 +- task_thread_info(paca_ptrs[lcpu]->__current)->preempt_count = 0;
1080 +-
1081 + /*
1082 + * If the RTAS start-cpu token does not exist then presume the
1083 + * cpu is already spinning.
1084 +diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
1085 +index 835163f54244a..057acbb9116dd 100644
1086 +--- a/arch/powerpc/platforms/pseries/papr_scm.c
1087 ++++ b/arch/powerpc/platforms/pseries/papr_scm.c
1088 +@@ -18,6 +18,7 @@
1089 + #include <asm/plpar_wrappers.h>
1090 + #include <asm/papr_pdsm.h>
1091 + #include <asm/mce.h>
1092 ++#include <asm/unaligned.h>
1093 +
1094 + #define BIND_ANY_ADDR (~0ul)
1095 +
1096 +@@ -867,6 +868,20 @@ static ssize_t flags_show(struct device *dev,
1097 + }
1098 + DEVICE_ATTR_RO(flags);
1099 +
1100 ++static umode_t papr_nd_attribute_visible(struct kobject *kobj,
1101 ++ struct attribute *attr, int n)
1102 ++{
1103 ++ struct device *dev = kobj_to_dev(kobj);
1104 ++ struct nvdimm *nvdimm = to_nvdimm(dev);
1105 ++ struct papr_scm_priv *p = nvdimm_provider_data(nvdimm);
1106 ++
1107 ++ /* For if perf-stats not available remove perf_stats sysfs */
1108 ++ if (attr == &dev_attr_perf_stats.attr && p->stat_buffer_len == 0)
1109 ++ return 0;
1110 ++
1111 ++ return attr->mode;
1112 ++}
1113 ++
1114 + /* papr_scm specific dimm attributes */
1115 + static struct attribute *papr_nd_attributes[] = {
1116 + &dev_attr_flags.attr,
1117 +@@ -876,6 +891,7 @@ static struct attribute *papr_nd_attributes[] = {
1118 +
1119 + static struct attribute_group papr_nd_attribute_group = {
1120 + .name = "papr",
1121 ++ .is_visible = papr_nd_attribute_visible,
1122 + .attrs = papr_nd_attributes,
1123 + };
1124 +
1125 +@@ -891,7 +907,6 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
1126 + struct nd_region_desc ndr_desc;
1127 + unsigned long dimm_flags;
1128 + int target_nid, online_nid;
1129 +- ssize_t stat_size;
1130 +
1131 + p->bus_desc.ndctl = papr_scm_ndctl;
1132 + p->bus_desc.module = THIS_MODULE;
1133 +@@ -962,16 +977,6 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
1134 + list_add_tail(&p->region_list, &papr_nd_regions);
1135 + mutex_unlock(&papr_ndr_lock);
1136 +
1137 +- /* Try retriving the stat buffer and see if its supported */
1138 +- stat_size = drc_pmem_query_stats(p, NULL, 0);
1139 +- if (stat_size > 0) {
1140 +- p->stat_buffer_len = stat_size;
1141 +- dev_dbg(&p->pdev->dev, "Max perf-stat size %lu-bytes\n",
1142 +- p->stat_buffer_len);
1143 +- } else {
1144 +- dev_info(&p->pdev->dev, "Dimm performance stats unavailable\n");
1145 +- }
1146 +-
1147 + return 0;
1148 +
1149 + err: nvdimm_bus_unregister(p->bus);
1150 +@@ -1047,8 +1052,10 @@ static int papr_scm_probe(struct platform_device *pdev)
1151 + u32 drc_index, metadata_size;
1152 + u64 blocks, block_size;
1153 + struct papr_scm_priv *p;
1154 ++ u8 uuid_raw[UUID_SIZE];
1155 + const char *uuid_str;
1156 +- u64 uuid[2];
1157 ++ ssize_t stat_size;
1158 ++ uuid_t uuid;
1159 + int rc;
1160 +
1161 + /* check we have all the required DT properties */
1162 +@@ -1090,16 +1097,23 @@ static int papr_scm_probe(struct platform_device *pdev)
1163 + p->is_volatile = !of_property_read_bool(dn, "ibm,cache-flush-required");
1164 +
1165 + /* We just need to ensure that set cookies are unique across */
1166 +- uuid_parse(uuid_str, (uuid_t *) uuid);
1167 ++ uuid_parse(uuid_str, &uuid);
1168 ++
1169 + /*
1170 +- * cookie1 and cookie2 are not really little endian
1171 +- * we store a little endian representation of the
1172 +- * uuid str so that we can compare this with the label
1173 +- * area cookie irrespective of the endian config with which
1174 +- * the kernel is built.
1175 ++ * The cookie1 and cookie2 are not really little endian.
1176 ++ * We store a raw buffer representation of the
1177 ++ * uuid string so that we can compare this with the label
1178 ++ * area cookie irrespective of the endian configuration
1179 ++ * with which the kernel is built.
1180 ++ *
1181 ++ * Historically we stored the cookie in the below format.
1182 ++ * for a uuid string 72511b67-0b3b-42fd-8d1d-5be3cae8bcaa
1183 ++ * cookie1 was 0xfd423b0b671b5172
1184 ++ * cookie2 was 0xaabce8cae35b1d8d
1185 + */
1186 +- p->nd_set.cookie1 = cpu_to_le64(uuid[0]);
1187 +- p->nd_set.cookie2 = cpu_to_le64(uuid[1]);
1188 ++ export_uuid(uuid_raw, &uuid);
1189 ++ p->nd_set.cookie1 = get_unaligned_le64(&uuid_raw[0]);
1190 ++ p->nd_set.cookie2 = get_unaligned_le64(&uuid_raw[8]);
1191 +
1192 + /* might be zero */
1193 + p->metadata_size = metadata_size;
1194 +@@ -1124,6 +1138,14 @@ static int papr_scm_probe(struct platform_device *pdev)
1195 + p->res.name = pdev->name;
1196 + p->res.flags = IORESOURCE_MEM;
1197 +
1198 ++ /* Try retrieving the stat buffer and see if its supported */
1199 ++ stat_size = drc_pmem_query_stats(p, NULL, 0);
1200 ++ if (stat_size > 0) {
1201 ++ p->stat_buffer_len = stat_size;
1202 ++ dev_dbg(&p->pdev->dev, "Max perf-stat size %lu-bytes\n",
1203 ++ p->stat_buffer_len);
1204 ++ }
1205 ++
1206 + rc = papr_scm_nvdimm_init(p);
1207 + if (rc)
1208 + goto err2;
1209 +diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
1210 +index 92922491a81c6..624e80b00eb18 100644
1211 +--- a/arch/powerpc/platforms/pseries/smp.c
1212 ++++ b/arch/powerpc/platforms/pseries/smp.c
1213 +@@ -104,9 +104,6 @@ static inline int smp_startup_cpu(unsigned int lcpu)
1214 + return 1;
1215 + }
1216 +
1217 +- /* Fixup atomic count: it exited inside IRQ handler. */
1218 +- task_thread_info(paca_ptrs[lcpu]->__current)->preempt_count = 0;
1219 +-
1220 + /*
1221 + * If the RTAS start-cpu token does not exist then presume the
1222 + * cpu is already spinning.
1223 +diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
1224 +index 96167d55ed984..0b04e0eae3ab5 100644
1225 +--- a/arch/riscv/kernel/smpboot.c
1226 ++++ b/arch/riscv/kernel/smpboot.c
1227 +@@ -166,7 +166,6 @@ asmlinkage __visible void smp_callin(void)
1228 + * Disable preemption before enabling interrupts, so we don't try to
1229 + * schedule a CPU that hasn't actually started yet.
1230 + */
1231 +- preempt_disable();
1232 + local_irq_enable();
1233 + cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
1234 + }
1235 +diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
1236 +index 4a2a12be04c96..896b68e541b2e 100644
1237 +--- a/arch/s390/Kconfig
1238 ++++ b/arch/s390/Kconfig
1239 +@@ -154,6 +154,8 @@ config S390
1240 + select HAVE_FUTEX_CMPXCHG if FUTEX
1241 + select HAVE_GCC_PLUGINS
1242 + select HAVE_GENERIC_VDSO
1243 ++ select HAVE_IOREMAP_PROT if PCI
1244 ++ select HAVE_IRQ_EXIT_ON_IRQ_STACK
1245 + select HAVE_KERNEL_BZIP2
1246 + select HAVE_KERNEL_GZIP
1247 + select HAVE_KERNEL_LZ4
1248 +@@ -856,7 +858,7 @@ config CMM_IUCV
1249 + config APPLDATA_BASE
1250 + def_bool n
1251 + prompt "Linux - VM Monitor Stream, base infrastructure"
1252 +- depends on PROC_FS
1253 ++ depends on PROC_SYSCTL
1254 + help
1255 + This provides a kernel interface for creating and updating z/VM APPLDATA
1256 + monitor records. The monitor records are updated at certain time
1257 +diff --git a/arch/s390/boot/uv.c b/arch/s390/boot/uv.c
1258 +index 87641dd65ccf9..b3501ea5039e4 100644
1259 +--- a/arch/s390/boot/uv.c
1260 ++++ b/arch/s390/boot/uv.c
1261 +@@ -36,6 +36,7 @@ void uv_query_info(void)
1262 + uv_info.max_sec_stor_addr = ALIGN(uvcb.max_guest_stor_addr, PAGE_SIZE);
1263 + uv_info.max_num_sec_conf = uvcb.max_num_sec_conf;
1264 + uv_info.max_guest_cpu_id = uvcb.max_guest_cpu_id;
1265 ++ uv_info.uv_feature_indications = uvcb.uv_feature_indications;
1266 + }
1267 +
1268 + #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
1269 +diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
1270 +index b5dbae78969b9..2338345912a31 100644
1271 +--- a/arch/s390/include/asm/pgtable.h
1272 ++++ b/arch/s390/include/asm/pgtable.h
1273 +@@ -864,6 +864,25 @@ static inline int pte_unused(pte_t pte)
1274 + return pte_val(pte) & _PAGE_UNUSED;
1275 + }
1276 +
1277 ++/*
1278 ++ * Extract the pgprot value from the given pte while at the same time making it
1279 ++ * usable for kernel address space mappings where fault driven dirty and
1280 ++ * young/old accounting is not supported, i.e _PAGE_PROTECT and _PAGE_INVALID
1281 ++ * must not be set.
1282 ++ */
1283 ++static inline pgprot_t pte_pgprot(pte_t pte)
1284 ++{
1285 ++ unsigned long pte_flags = pte_val(pte) & _PAGE_CHG_MASK;
1286 ++
1287 ++ if (pte_write(pte))
1288 ++ pte_flags |= pgprot_val(PAGE_KERNEL);
1289 ++ else
1290 ++ pte_flags |= pgprot_val(PAGE_KERNEL_RO);
1291 ++ pte_flags |= pte_val(pte) & mio_wb_bit_mask;
1292 ++
1293 ++ return __pgprot(pte_flags);
1294 ++}
1295 ++
1296 + /*
1297 + * pgd/pmd/pte modification functions
1298 + */
1299 +diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h
1300 +index 6ede29907fbf7..b5f545db461a4 100644
1301 +--- a/arch/s390/include/asm/preempt.h
1302 ++++ b/arch/s390/include/asm/preempt.h
1303 +@@ -29,12 +29,6 @@ static inline void preempt_count_set(int pc)
1304 + old, new) != old);
1305 + }
1306 +
1307 +-#define init_task_preempt_count(p) do { } while (0)
1308 +-
1309 +-#define init_idle_preempt_count(p, cpu) do { \
1310 +- S390_lowcore.preempt_count = PREEMPT_ENABLED; \
1311 +-} while (0)
1312 +-
1313 + static inline void set_preempt_need_resched(void)
1314 + {
1315 + __atomic_and(~PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
1316 +@@ -88,12 +82,6 @@ static inline void preempt_count_set(int pc)
1317 + S390_lowcore.preempt_count = pc;
1318 + }
1319 +
1320 +-#define init_task_preempt_count(p) do { } while (0)
1321 +-
1322 +-#define init_idle_preempt_count(p, cpu) do { \
1323 +- S390_lowcore.preempt_count = PREEMPT_ENABLED; \
1324 +-} while (0)
1325 +-
1326 + static inline void set_preempt_need_resched(void)
1327 + {
1328 + }
1329 +@@ -130,6 +118,10 @@ static inline bool should_resched(int preempt_offset)
1330 +
1331 + #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
1332 +
1333 ++#define init_task_preempt_count(p) do { } while (0)
1334 ++/* Deferred to CPU bringup time */
1335 ++#define init_idle_preempt_count(p, cpu) do { } while (0)
1336 ++
1337 + #ifdef CONFIG_PREEMPTION
1338 + extern asmlinkage void preempt_schedule(void);
1339 + #define __preempt_schedule() preempt_schedule()
1340 +diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h
1341 +index 7b98d4caee779..12c5f006c1364 100644
1342 +--- a/arch/s390/include/asm/uv.h
1343 ++++ b/arch/s390/include/asm/uv.h
1344 +@@ -73,6 +73,10 @@ enum uv_cmds_inst {
1345 + BIT_UVC_CMD_UNPIN_PAGE_SHARED = 22,
1346 + };
1347 +
1348 ++enum uv_feat_ind {
1349 ++ BIT_UV_FEAT_MISC = 0,
1350 ++};
1351 ++
1352 + struct uv_cb_header {
1353 + u16 len;
1354 + u16 cmd; /* Command Code */
1355 +@@ -97,7 +101,8 @@ struct uv_cb_qui {
1356 + u64 max_guest_stor_addr;
1357 + u8 reserved88[158 - 136];
1358 + u16 max_guest_cpu_id;
1359 +- u8 reserveda0[200 - 160];
1360 ++ u64 uv_feature_indications;
1361 ++ u8 reserveda0[200 - 168];
1362 + } __packed __aligned(8);
1363 +
1364 + /* Initialize Ultravisor */
1365 +@@ -274,6 +279,7 @@ struct uv_info {
1366 + unsigned long max_sec_stor_addr;
1367 + unsigned int max_num_sec_conf;
1368 + unsigned short max_guest_cpu_id;
1369 ++ unsigned long uv_feature_indications;
1370 + };
1371 +
1372 + extern struct uv_info uv_info;
1373 +diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
1374 +index e83ce909686c5..83a3f346e5bd9 100644
1375 +--- a/arch/s390/kernel/setup.c
1376 ++++ b/arch/s390/kernel/setup.c
1377 +@@ -454,6 +454,7 @@ static void __init setup_lowcore_dat_off(void)
1378 + lc->br_r1_trampoline = 0x07f1; /* br %r1 */
1379 + lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
1380 + lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
1381 ++ lc->preempt_count = PREEMPT_DISABLED;
1382 +
1383 + set_prefix((u32)(unsigned long) lc);
1384 + lowcore_ptr[0] = lc;
1385 +diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
1386 +index 791bc373418bd..5674792726cd9 100644
1387 +--- a/arch/s390/kernel/smp.c
1388 ++++ b/arch/s390/kernel/smp.c
1389 +@@ -215,6 +215,7 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
1390 + lc->br_r1_trampoline = 0x07f1; /* br %r1 */
1391 + lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
1392 + lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
1393 ++ lc->preempt_count = PREEMPT_DISABLED;
1394 + if (nmi_alloc_per_cpu(lc))
1395 + goto out_async;
1396 + if (vdso_alloc_per_cpu(lc))
1397 +@@ -863,7 +864,6 @@ static void smp_init_secondary(void)
1398 + set_cpu_flag(CIF_ASCE_SECONDARY);
1399 + cpu_init();
1400 + rcu_cpu_starting(cpu);
1401 +- preempt_disable();
1402 + init_cpu_timer();
1403 + vtime_init();
1404 + pfault_init();
1405 +diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
1406 +index b2d2ad1530676..c811b2313100b 100644
1407 +--- a/arch/s390/kernel/uv.c
1408 ++++ b/arch/s390/kernel/uv.c
1409 +@@ -364,6 +364,15 @@ static ssize_t uv_query_facilities(struct kobject *kobj,
1410 + static struct kobj_attribute uv_query_facilities_attr =
1411 + __ATTR(facilities, 0444, uv_query_facilities, NULL);
1412 +
1413 ++static ssize_t uv_query_feature_indications(struct kobject *kobj,
1414 ++ struct kobj_attribute *attr, char *buf)
1415 ++{
1416 ++ return sysfs_emit(buf, "%lx\n", uv_info.uv_feature_indications);
1417 ++}
1418 ++
1419 ++static struct kobj_attribute uv_query_feature_indications_attr =
1420 ++ __ATTR(feature_indications, 0444, uv_query_feature_indications, NULL);
1421 ++
1422 + static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
1423 + struct kobj_attribute *attr, char *page)
1424 + {
1425 +@@ -396,6 +405,7 @@ static struct kobj_attribute uv_query_max_guest_addr_attr =
1426 +
1427 + static struct attribute *uv_query_attrs[] = {
1428 + &uv_query_facilities_attr.attr,
1429 ++ &uv_query_feature_indications_attr.attr,
1430 + &uv_query_max_guest_cpus_attr.attr,
1431 + &uv_query_max_guest_vms_attr.attr,
1432 + &uv_query_max_guest_addr_attr.attr,
1433 +diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
1434 +index 20afffd6b9820..f94b4f78d4dab 100644
1435 +--- a/arch/s390/kvm/kvm-s390.c
1436 ++++ b/arch/s390/kvm/kvm-s390.c
1437 +@@ -327,31 +327,31 @@ static void allow_cpu_feat(unsigned long nr)
1438 +
1439 + static inline int plo_test_bit(unsigned char nr)
1440 + {
1441 +- register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
1442 ++ unsigned long function = (unsigned long)nr | 0x100;
1443 + int cc;
1444 +
1445 + asm volatile(
1446 ++ " lgr 0,%[function]\n"
1447 + /* Parameter registers are ignored for "test bit" */
1448 + " plo 0,0,0,0(0)\n"
1449 + " ipm %0\n"
1450 + " srl %0,28\n"
1451 + : "=d" (cc)
1452 +- : "d" (r0)
1453 +- : "cc");
1454 ++ : [function] "d" (function)
1455 ++ : "cc", "0");
1456 + return cc == 0;
1457 + }
1458 +
1459 + static __always_inline void __insn32_query(unsigned int opcode, u8 *query)
1460 + {
1461 +- register unsigned long r0 asm("0") = 0; /* query function */
1462 +- register unsigned long r1 asm("1") = (unsigned long) query;
1463 +-
1464 + asm volatile(
1465 +- /* Parameter regs are ignored */
1466 ++ " lghi 0,0\n"
1467 ++ " lgr 1,%[query]\n"
1468 ++ /* Parameter registers are ignored */
1469 + " .insn rrf,%[opc] << 16,2,4,6,0\n"
1470 + :
1471 +- : "d" (r0), "a" (r1), [opc] "i" (opcode)
1472 +- : "cc", "memory");
1473 ++ : [query] "d" ((unsigned long)query), [opc] "i" (opcode)
1474 ++ : "cc", "memory", "0", "1");
1475 + }
1476 +
1477 + #define INSN_SORTL 0xb938
1478 +diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
1479 +index 996884dcc9fdb..ed517fad0d035 100644
1480 +--- a/arch/s390/mm/fault.c
1481 ++++ b/arch/s390/mm/fault.c
1482 +@@ -805,6 +805,32 @@ void do_secure_storage_access(struct pt_regs *regs)
1483 + struct page *page;
1484 + int rc;
1485 +
1486 ++ /*
1487 ++ * bit 61 tells us if the address is valid, if it's not we
1488 ++ * have a major problem and should stop the kernel or send a
1489 ++ * SIGSEGV to the process. Unfortunately bit 61 is not
1490 ++ * reliable without the misc UV feature so we need to check
1491 ++ * for that as well.
1492 ++ */
1493 ++ if (test_bit_inv(BIT_UV_FEAT_MISC, &uv_info.uv_feature_indications) &&
1494 ++ !test_bit_inv(61, &regs->int_parm_long)) {
1495 ++ /*
1496 ++ * When this happens, userspace did something that it
1497 ++ * was not supposed to do, e.g. branching into secure
1498 ++ * memory. Trigger a segmentation fault.
1499 ++ */
1500 ++ if (user_mode(regs)) {
1501 ++ send_sig(SIGSEGV, current, 0);
1502 ++ return;
1503 ++ }
1504 ++
1505 ++ /*
1506 ++ * The kernel should never run into this case and we
1507 ++ * have no way out of this situation.
1508 ++ */
1509 ++ panic("Unexpected PGM 0x3d with TEID bit 61=0");
1510 ++ }
1511 ++
1512 + switch (get_fault_type(regs)) {
1513 + case USER_FAULT:
1514 + mm = current->mm;
1515 +diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
1516 +index 372acdc9033eb..65924d9ec2459 100644
1517 +--- a/arch/sh/kernel/smp.c
1518 ++++ b/arch/sh/kernel/smp.c
1519 +@@ -186,8 +186,6 @@ asmlinkage void start_secondary(void)
1520 +
1521 + per_cpu_trap_init();
1522 +
1523 +- preempt_disable();
1524 +-
1525 + notify_cpu_starting(cpu);
1526 +
1527 + local_irq_enable();
1528 +diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c
1529 +index 50c127ab46d5b..22b148e5a5f88 100644
1530 +--- a/arch/sparc/kernel/smp_32.c
1531 ++++ b/arch/sparc/kernel/smp_32.c
1532 +@@ -348,7 +348,6 @@ static void sparc_start_secondary(void *arg)
1533 + */
1534 + arch_cpu_pre_starting(arg);
1535 +
1536 +- preempt_disable();
1537 + cpu = smp_processor_id();
1538 +
1539 + notify_cpu_starting(cpu);
1540 +diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
1541 +index e38d8bf454e86..ae5faa1d989d2 100644
1542 +--- a/arch/sparc/kernel/smp_64.c
1543 ++++ b/arch/sparc/kernel/smp_64.c
1544 +@@ -138,9 +138,6 @@ void smp_callin(void)
1545 +
1546 + set_cpu_online(cpuid, true);
1547 +
1548 +- /* idle thread is expected to have preempt disabled */
1549 +- preempt_disable();
1550 +-
1551 + local_irq_enable();
1552 +
1553 + cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
1554 +diff --git a/arch/x86/crypto/curve25519-x86_64.c b/arch/x86/crypto/curve25519-x86_64.c
1555 +index 5af8021b98cea..11b4c83c715e3 100644
1556 +--- a/arch/x86/crypto/curve25519-x86_64.c
1557 ++++ b/arch/x86/crypto/curve25519-x86_64.c
1558 +@@ -1500,7 +1500,7 @@ static int __init curve25519_mod_init(void)
1559 + static void __exit curve25519_mod_exit(void)
1560 + {
1561 + if (IS_REACHABLE(CONFIG_CRYPTO_KPP) &&
1562 +- (boot_cpu_has(X86_FEATURE_BMI2) || boot_cpu_has(X86_FEATURE_ADX)))
1563 ++ static_branch_likely(&curve25519_use_bmi2_adx))
1564 + crypto_unregister_kpp(&curve25519_alg);
1565 + }
1566 +
1567 +diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
1568 +index cad08703c4ad7..f18f3932e971a 100644
1569 +--- a/arch/x86/entry/entry_64.S
1570 ++++ b/arch/x86/entry/entry_64.S
1571 +@@ -508,7 +508,7 @@ SYM_CODE_START(\asmsym)
1572 +
1573 + movq %rsp, %rdi /* pt_regs pointer */
1574 +
1575 +- call \cfunc
1576 ++ call kernel_\cfunc
1577 +
1578 + /*
1579 + * No need to switch back to the IST stack. The current stack is either
1580 +@@ -519,7 +519,7 @@ SYM_CODE_START(\asmsym)
1581 +
1582 + /* Switch to the regular task stack */
1583 + .Lfrom_usermode_switch_stack_\@:
1584 +- idtentry_body safe_stack_\cfunc, has_error_code=1
1585 ++ idtentry_body user_\cfunc, has_error_code=1
1586 +
1587 + _ASM_NOKPROBE(\asmsym)
1588 + SYM_CODE_END(\asmsym)
1589 +diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h
1590 +index 0e3325790f3a9..dc2a8b1657f4a 100644
1591 +--- a/arch/x86/include/asm/idtentry.h
1592 ++++ b/arch/x86/include/asm/idtentry.h
1593 +@@ -315,8 +315,8 @@ static __always_inline void __##func(struct pt_regs *regs)
1594 + */
1595 + #define DECLARE_IDTENTRY_VC(vector, func) \
1596 + DECLARE_IDTENTRY_RAW_ERRORCODE(vector, func); \
1597 +- __visible noinstr void ist_##func(struct pt_regs *regs, unsigned long error_code); \
1598 +- __visible noinstr void safe_stack_##func(struct pt_regs *regs, unsigned long error_code)
1599 ++ __visible noinstr void kernel_##func(struct pt_regs *regs, unsigned long error_code); \
1600 ++ __visible noinstr void user_##func(struct pt_regs *regs, unsigned long error_code)
1601 +
1602 + /**
1603 + * DEFINE_IDTENTRY_IST - Emit code for IST entry points
1604 +@@ -358,33 +358,24 @@ static __always_inline void __##func(struct pt_regs *regs)
1605 + DEFINE_IDTENTRY_RAW_ERRORCODE(func)
1606 +
1607 + /**
1608 +- * DEFINE_IDTENTRY_VC_SAFE_STACK - Emit code for VMM communication handler
1609 +- which runs on a safe stack.
1610 ++ * DEFINE_IDTENTRY_VC_KERNEL - Emit code for VMM communication handler
1611 ++ when raised from kernel mode
1612 + * @func: Function name of the entry point
1613 + *
1614 + * Maps to DEFINE_IDTENTRY_RAW_ERRORCODE
1615 + */
1616 +-#define DEFINE_IDTENTRY_VC_SAFE_STACK(func) \
1617 +- DEFINE_IDTENTRY_RAW_ERRORCODE(safe_stack_##func)
1618 ++#define DEFINE_IDTENTRY_VC_KERNEL(func) \
1619 ++ DEFINE_IDTENTRY_RAW_ERRORCODE(kernel_##func)
1620 +
1621 + /**
1622 +- * DEFINE_IDTENTRY_VC_IST - Emit code for VMM communication handler
1623 +- which runs on the VC fall-back stack
1624 ++ * DEFINE_IDTENTRY_VC_USER - Emit code for VMM communication handler
1625 ++ when raised from user mode
1626 + * @func: Function name of the entry point
1627 + *
1628 + * Maps to DEFINE_IDTENTRY_RAW_ERRORCODE
1629 + */
1630 +-#define DEFINE_IDTENTRY_VC_IST(func) \
1631 +- DEFINE_IDTENTRY_RAW_ERRORCODE(ist_##func)
1632 +-
1633 +-/**
1634 +- * DEFINE_IDTENTRY_VC - Emit code for VMM communication handler
1635 +- * @func: Function name of the entry point
1636 +- *
1637 +- * Maps to DEFINE_IDTENTRY_RAW_ERRORCODE
1638 +- */
1639 +-#define DEFINE_IDTENTRY_VC(func) \
1640 +- DEFINE_IDTENTRY_RAW_ERRORCODE(func)
1641 ++#define DEFINE_IDTENTRY_VC_USER(func) \
1642 ++ DEFINE_IDTENTRY_RAW_ERRORCODE(user_##func)
1643 +
1644 + #else /* CONFIG_X86_64 */
1645 +
1646 +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
1647 +index d1ac2de41ea8a..b1cd8334db11a 100644
1648 +--- a/arch/x86/include/asm/kvm_host.h
1649 ++++ b/arch/x86/include/asm/kvm_host.h
1650 +@@ -84,7 +84,7 @@
1651 + #define KVM_REQ_APICV_UPDATE \
1652 + KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
1653 + #define KVM_REQ_TLB_FLUSH_CURRENT KVM_ARCH_REQ(26)
1654 +-#define KVM_REQ_HV_TLB_FLUSH \
1655 ++#define KVM_REQ_TLB_FLUSH_GUEST \
1656 + KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_NO_WAKEUP)
1657 + #define KVM_REQ_APF_READY KVM_ARCH_REQ(28)
1658 + #define KVM_REQ_MSR_FILTER_CHANGED KVM_ARCH_REQ(29)
1659 +diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
1660 +index 69485ca13665f..a334dd0d7c42c 100644
1661 +--- a/arch/x86/include/asm/preempt.h
1662 ++++ b/arch/x86/include/asm/preempt.h
1663 +@@ -43,7 +43,7 @@ static __always_inline void preempt_count_set(int pc)
1664 + #define init_task_preempt_count(p) do { } while (0)
1665 +
1666 + #define init_idle_preempt_count(p, cpu) do { \
1667 +- per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \
1668 ++ per_cpu(__preempt_count, (cpu)) = PREEMPT_DISABLED; \
1669 + } while (0)
1670 +
1671 + /*
1672 +diff --git a/arch/x86/include/uapi/asm/hwcap2.h b/arch/x86/include/uapi/asm/hwcap2.h
1673 +index 5fdfcb47000f9..054604aba9f00 100644
1674 +--- a/arch/x86/include/uapi/asm/hwcap2.h
1675 ++++ b/arch/x86/include/uapi/asm/hwcap2.h
1676 +@@ -2,10 +2,12 @@
1677 + #ifndef _ASM_X86_HWCAP2_H
1678 + #define _ASM_X86_HWCAP2_H
1679 +
1680 ++#include <linux/const.h>
1681 ++
1682 + /* MONITOR/MWAIT enabled in Ring 3 */
1683 +-#define HWCAP2_RING3MWAIT (1 << 0)
1684 ++#define HWCAP2_RING3MWAIT _BITUL(0)
1685 +
1686 + /* Kernel allows FSGSBASE instructions available in Ring 3 */
1687 +-#define HWCAP2_FSGSBASE BIT(1)
1688 ++#define HWCAP2_FSGSBASE _BITUL(1)
1689 +
1690 + #endif
1691 +diff --git a/arch/x86/kernel/sev-es.c b/arch/x86/kernel/sev-es.c
1692 +index e0cdab7cb632b..f3202b2e3c157 100644
1693 +--- a/arch/x86/kernel/sev-es.c
1694 ++++ b/arch/x86/kernel/sev-es.c
1695 +@@ -12,7 +12,6 @@
1696 + #include <linux/sched/debug.h> /* For show_regs() */
1697 + #include <linux/percpu-defs.h>
1698 + #include <linux/mem_encrypt.h>
1699 +-#include <linux/lockdep.h>
1700 + #include <linux/printk.h>
1701 + #include <linux/mm_types.h>
1702 + #include <linux/set_memory.h>
1703 +@@ -180,11 +179,19 @@ void noinstr __sev_es_ist_exit(void)
1704 + this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], *(unsigned long *)ist);
1705 + }
1706 +
1707 +-static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state)
1708 ++/*
1709 ++ * Nothing shall interrupt this code path while holding the per-CPU
1710 ++ * GHCB. The backup GHCB is only for NMIs interrupting this path.
1711 ++ *
1712 ++ * Callers must disable local interrupts around it.
1713 ++ */
1714 ++static noinstr struct ghcb *__sev_get_ghcb(struct ghcb_state *state)
1715 + {
1716 + struct sev_es_runtime_data *data;
1717 + struct ghcb *ghcb;
1718 +
1719 ++ WARN_ON(!irqs_disabled());
1720 ++
1721 + data = this_cpu_read(runtime_data);
1722 + ghcb = &data->ghcb_page;
1723 +
1724 +@@ -201,7 +208,9 @@ static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state)
1725 + data->ghcb_active = false;
1726 + data->backup_ghcb_active = false;
1727 +
1728 ++ instrumentation_begin();
1729 + panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
1730 ++ instrumentation_end();
1731 + }
1732 +
1733 + /* Mark backup_ghcb active before writing to it */
1734 +@@ -452,11 +461,13 @@ static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt
1735 + /* Include code shared with pre-decompression boot stage */
1736 + #include "sev-es-shared.c"
1737 +
1738 +-static __always_inline void sev_es_put_ghcb(struct ghcb_state *state)
1739 ++static noinstr void __sev_put_ghcb(struct ghcb_state *state)
1740 + {
1741 + struct sev_es_runtime_data *data;
1742 + struct ghcb *ghcb;
1743 +
1744 ++ WARN_ON(!irqs_disabled());
1745 ++
1746 + data = this_cpu_read(runtime_data);
1747 + ghcb = &data->ghcb_page;
1748 +
1749 +@@ -480,7 +491,7 @@ void noinstr __sev_es_nmi_complete(void)
1750 + struct ghcb_state state;
1751 + struct ghcb *ghcb;
1752 +
1753 +- ghcb = sev_es_get_ghcb(&state);
1754 ++ ghcb = __sev_get_ghcb(&state);
1755 +
1756 + vc_ghcb_invalidate(ghcb);
1757 + ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_NMI_COMPLETE);
1758 +@@ -490,7 +501,7 @@ void noinstr __sev_es_nmi_complete(void)
1759 + sev_es_wr_ghcb_msr(__pa_nodebug(ghcb));
1760 + VMGEXIT();
1761 +
1762 +- sev_es_put_ghcb(&state);
1763 ++ __sev_put_ghcb(&state);
1764 + }
1765 +
1766 + static u64 get_jump_table_addr(void)
1767 +@@ -502,7 +513,7 @@ static u64 get_jump_table_addr(void)
1768 +
1769 + local_irq_save(flags);
1770 +
1771 +- ghcb = sev_es_get_ghcb(&state);
1772 ++ ghcb = __sev_get_ghcb(&state);
1773 +
1774 + vc_ghcb_invalidate(ghcb);
1775 + ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_JUMP_TABLE);
1776 +@@ -516,7 +527,7 @@ static u64 get_jump_table_addr(void)
1777 + ghcb_sw_exit_info_2_is_valid(ghcb))
1778 + ret = ghcb->save.sw_exit_info_2;
1779 +
1780 +- sev_es_put_ghcb(&state);
1781 ++ __sev_put_ghcb(&state);
1782 +
1783 + local_irq_restore(flags);
1784 +
1785 +@@ -641,7 +652,7 @@ static void sev_es_ap_hlt_loop(void)
1786 + struct ghcb_state state;
1787 + struct ghcb *ghcb;
1788 +
1789 +- ghcb = sev_es_get_ghcb(&state);
1790 ++ ghcb = __sev_get_ghcb(&state);
1791 +
1792 + while (true) {
1793 + vc_ghcb_invalidate(ghcb);
1794 +@@ -658,7 +669,7 @@ static void sev_es_ap_hlt_loop(void)
1795 + break;
1796 + }
1797 +
1798 +- sev_es_put_ghcb(&state);
1799 ++ __sev_put_ghcb(&state);
1800 + }
1801 +
1802 + /*
1803 +@@ -748,7 +759,7 @@ void __init sev_es_init_vc_handling(void)
1804 + sev_es_setup_play_dead();
1805 +
1806 + /* Secondary CPUs use the runtime #VC handler */
1807 +- initial_vc_handler = (unsigned long)safe_stack_exc_vmm_communication;
1808 ++ initial_vc_handler = (unsigned long)kernel_exc_vmm_communication;
1809 + }
1810 +
1811 + static void __init vc_early_forward_exception(struct es_em_ctxt *ctxt)
1812 +@@ -1186,14 +1197,6 @@ static enum es_result vc_handle_trap_ac(struct ghcb *ghcb,
1813 + return ES_EXCEPTION;
1814 + }
1815 +
1816 +-static __always_inline void vc_handle_trap_db(struct pt_regs *regs)
1817 +-{
1818 +- if (user_mode(regs))
1819 +- noist_exc_debug(regs);
1820 +- else
1821 +- exc_debug(regs);
1822 +-}
1823 +-
1824 + static enum es_result vc_handle_exitcode(struct es_em_ctxt *ctxt,
1825 + struct ghcb *ghcb,
1826 + unsigned long exit_code)
1827 +@@ -1289,44 +1292,15 @@ static __always_inline bool on_vc_fallback_stack(struct pt_regs *regs)
1828 + return (sp >= __this_cpu_ist_bottom_va(VC2) && sp < __this_cpu_ist_top_va(VC2));
1829 + }
1830 +
1831 +-/*
1832 +- * Main #VC exception handler. It is called when the entry code was able to
1833 +- * switch off the IST to a safe kernel stack.
1834 +- *
1835 +- * With the current implementation it is always possible to switch to a safe
1836 +- * stack because #VC exceptions only happen at known places, like intercepted
1837 +- * instructions or accesses to MMIO areas/IO ports. They can also happen with
1838 +- * code instrumentation when the hypervisor intercepts #DB, but the critical
1839 +- * paths are forbidden to be instrumented, so #DB exceptions currently also
1840 +- * only happen in safe places.
1841 +- */
1842 +-DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
1843 ++static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_code)
1844 + {
1845 +- irqentry_state_t irq_state;
1846 + struct ghcb_state state;
1847 + struct es_em_ctxt ctxt;
1848 + enum es_result result;
1849 + struct ghcb *ghcb;
1850 ++ bool ret = true;
1851 +
1852 +- /*
1853 +- * Handle #DB before calling into !noinstr code to avoid recursive #DB.
1854 +- */
1855 +- if (error_code == SVM_EXIT_EXCP_BASE + X86_TRAP_DB) {
1856 +- vc_handle_trap_db(regs);
1857 +- return;
1858 +- }
1859 +-
1860 +- irq_state = irqentry_nmi_enter(regs);
1861 +- lockdep_assert_irqs_disabled();
1862 +- instrumentation_begin();
1863 +-
1864 +- /*
1865 +- * This is invoked through an interrupt gate, so IRQs are disabled. The
1866 +- * code below might walk page-tables for user or kernel addresses, so
1867 +- * keep the IRQs disabled to protect us against concurrent TLB flushes.
1868 +- */
1869 +-
1870 +- ghcb = sev_es_get_ghcb(&state);
1871 ++ ghcb = __sev_get_ghcb(&state);
1872 +
1873 + vc_ghcb_invalidate(ghcb);
1874 + result = vc_init_em_ctxt(&ctxt, regs, error_code);
1875 +@@ -1334,7 +1308,7 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
1876 + if (result == ES_OK)
1877 + result = vc_handle_exitcode(&ctxt, ghcb, error_code);
1878 +
1879 +- sev_es_put_ghcb(&state);
1880 ++ __sev_put_ghcb(&state);
1881 +
1882 + /* Done - now check the result */
1883 + switch (result) {
1884 +@@ -1344,15 +1318,18 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
1885 + case ES_UNSUPPORTED:
1886 + pr_err_ratelimited("Unsupported exit-code 0x%02lx in early #VC exception (IP: 0x%lx)\n",
1887 + error_code, regs->ip);
1888 +- goto fail;
1889 ++ ret = false;
1890 ++ break;
1891 + case ES_VMM_ERROR:
1892 + pr_err_ratelimited("Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
1893 + error_code, regs->ip);
1894 +- goto fail;
1895 ++ ret = false;
1896 ++ break;
1897 + case ES_DECODE_FAILED:
1898 + pr_err_ratelimited("Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
1899 + error_code, regs->ip);
1900 +- goto fail;
1901 ++ ret = false;
1902 ++ break;
1903 + case ES_EXCEPTION:
1904 + vc_forward_exception(&ctxt);
1905 + break;
1906 +@@ -1368,24 +1345,52 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
1907 + BUG();
1908 + }
1909 +
1910 +-out:
1911 +- instrumentation_end();
1912 +- irqentry_nmi_exit(regs, irq_state);
1913 ++ return ret;
1914 ++}
1915 +
1916 +- return;
1917 ++static __always_inline bool vc_is_db(unsigned long error_code)
1918 ++{
1919 ++ return error_code == SVM_EXIT_EXCP_BASE + X86_TRAP_DB;
1920 ++}
1921 +
1922 +-fail:
1923 +- if (user_mode(regs)) {
1924 +- /*
1925 +- * Do not kill the machine if user-space triggered the
1926 +- * exception. Send SIGBUS instead and let user-space deal with
1927 +- * it.
1928 +- */
1929 +- force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)0);
1930 +- } else {
1931 +- pr_emerg("PANIC: Unhandled #VC exception in kernel space (result=%d)\n",
1932 +- result);
1933 ++/*
1934 ++ * Runtime #VC exception handler when raised from kernel mode. Runs in NMI mode
1935 ++ * and will panic when an error happens.
1936 ++ */
1937 ++DEFINE_IDTENTRY_VC_KERNEL(exc_vmm_communication)
1938 ++{
1939 ++ irqentry_state_t irq_state;
1940 +
1941 ++ /*
1942 ++ * With the current implementation it is always possible to switch to a
1943 ++ * safe stack because #VC exceptions only happen at known places, like
1944 ++ * intercepted instructions or accesses to MMIO areas/IO ports. They can
1945 ++ * also happen with code instrumentation when the hypervisor intercepts
1946 ++ * #DB, but the critical paths are forbidden to be instrumented, so #DB
1947 ++ * exceptions currently also only happen in safe places.
1948 ++ *
1949 ++ * But keep this here in case the noinstr annotations are violated due
1950 ++ * to bug elsewhere.
1951 ++ */
1952 ++ if (unlikely(on_vc_fallback_stack(regs))) {
1953 ++ instrumentation_begin();
1954 ++ panic("Can't handle #VC exception from unsupported context\n");
1955 ++ instrumentation_end();
1956 ++ }
1957 ++
1958 ++ /*
1959 ++ * Handle #DB before calling into !noinstr code to avoid recursive #DB.
1960 ++ */
1961 ++ if (vc_is_db(error_code)) {
1962 ++ exc_debug(regs);
1963 ++ return;
1964 ++ }
1965 ++
1966 ++ irq_state = irqentry_nmi_enter(regs);
1967 ++
1968 ++ instrumentation_begin();
1969 ++
1970 ++ if (!vc_raw_handle_exception(regs, error_code)) {
1971 + /* Show some debug info */
1972 + show_regs(regs);
1973 +
1974 +@@ -1396,23 +1401,38 @@ fail:
1975 + panic("Returned from Terminate-Request to Hypervisor\n");
1976 + }
1977 +
1978 +- goto out;
1979 ++ instrumentation_end();
1980 ++ irqentry_nmi_exit(regs, irq_state);
1981 + }
1982 +
1983 +-/* This handler runs on the #VC fall-back stack. It can cause further #VC exceptions */
1984 +-DEFINE_IDTENTRY_VC_IST(exc_vmm_communication)
1985 ++/*
1986 ++ * Runtime #VC exception handler when raised from user mode. Runs in IRQ mode
1987 ++ * and will kill the current task with SIGBUS when an error happens.
1988 ++ */
1989 ++DEFINE_IDTENTRY_VC_USER(exc_vmm_communication)
1990 + {
1991 ++ /*
1992 ++ * Handle #DB before calling into !noinstr code to avoid recursive #DB.
1993 ++ */
1994 ++ if (vc_is_db(error_code)) {
1995 ++ noist_exc_debug(regs);
1996 ++ return;
1997 ++ }
1998 ++
1999 ++ irqentry_enter_from_user_mode(regs);
2000 + instrumentation_begin();
2001 +- panic("Can't handle #VC exception from unsupported context\n");
2002 +- instrumentation_end();
2003 +-}
2004 +
2005 +-DEFINE_IDTENTRY_VC(exc_vmm_communication)
2006 +-{
2007 +- if (likely(!on_vc_fallback_stack(regs)))
2008 +- safe_stack_exc_vmm_communication(regs, error_code);
2009 +- else
2010 +- ist_exc_vmm_communication(regs, error_code);
2011 ++ if (!vc_raw_handle_exception(regs, error_code)) {
2012 ++ /*
2013 ++ * Do not kill the machine if user-space triggered the
2014 ++ * exception. Send SIGBUS instead and let user-space deal with
2015 ++ * it.
2016 ++ */
2017 ++ force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)0);
2018 ++ }
2019 ++
2020 ++ instrumentation_end();
2021 ++ irqentry_exit_to_user_mode(regs);
2022 + }
2023 +
2024 + bool __init handle_vc_boot_ghcb(struct pt_regs *regs)
2025 +diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
2026 +index 582387fc939f4..8baff500914ea 100644
2027 +--- a/arch/x86/kernel/smpboot.c
2028 ++++ b/arch/x86/kernel/smpboot.c
2029 +@@ -230,7 +230,6 @@ static void notrace start_secondary(void *unused)
2030 + cpu_init_exception_handling();
2031 + cpu_init();
2032 + x86_cpuinit.early_percpu_clock_init();
2033 +- preempt_disable();
2034 + smp_callin();
2035 +
2036 + enable_start_cpu0 = 0;
2037 +diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
2038 +index f70dffc2771f5..56289170753c5 100644
2039 +--- a/arch/x86/kernel/tsc.c
2040 ++++ b/arch/x86/kernel/tsc.c
2041 +@@ -1151,7 +1151,8 @@ static struct clocksource clocksource_tsc = {
2042 + .mask = CLOCKSOURCE_MASK(64),
2043 + .flags = CLOCK_SOURCE_IS_CONTINUOUS |
2044 + CLOCK_SOURCE_VALID_FOR_HRES |
2045 +- CLOCK_SOURCE_MUST_VERIFY,
2046 ++ CLOCK_SOURCE_MUST_VERIFY |
2047 ++ CLOCK_SOURCE_VERIFY_PERCPU,
2048 + .vdso_clock_mode = VDSO_CLOCKMODE_TSC,
2049 + .enable = tsc_cs_enable,
2050 + .resume = tsc_resume,
2051 +diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
2052 +index 5c7c4060b45cb..bb39f493447cf 100644
2053 +--- a/arch/x86/kvm/hyperv.c
2054 ++++ b/arch/x86/kvm/hyperv.c
2055 +@@ -1564,7 +1564,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa,
2056 + * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't
2057 + * analyze it here, flush TLB regardless of the specified address space.
2058 + */
2059 +- kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH,
2060 ++ kvm_make_vcpus_request_mask(kvm, KVM_REQ_TLB_FLUSH_GUEST,
2061 + NULL, vcpu_mask, &hv_vcpu->tlb_flush);
2062 +
2063 + ret_success:
2064 +diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
2065 +index f2eeaf197294d..7e6dc454ea28d 100644
2066 +--- a/arch/x86/kvm/mmu/mmu.c
2067 ++++ b/arch/x86/kvm/mmu/mmu.c
2068 +@@ -4133,7 +4133,15 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
2069 + void
2070 + reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
2071 + {
2072 +- bool uses_nx = context->nx ||
2073 ++ /*
2074 ++ * KVM uses NX when TDP is disabled to handle a variety of scenarios,
2075 ++ * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and
2076 ++ * to generate correct permissions for CR0.WP=0/CR4.SMEP=1/EFER.NX=0.
2077 ++ * The iTLB multi-hit workaround can be toggled at any time, so assume
2078 ++ * NX can be used by any non-nested shadow MMU to avoid having to reset
2079 ++ * MMU contexts. Note, KVM forces EFER.NX=1 when TDP is disabled.
2080 ++ */
2081 ++ bool uses_nx = context->nx || !tdp_enabled ||
2082 + context->mmu_role.base.smep_andnot_wp;
2083 + struct rsvd_bits_validate *shadow_zero_check;
2084 + int i;
2085 +diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
2086 +index 00a0bfaed6e86..d6cd702e85b68 100644
2087 +--- a/arch/x86/kvm/mmu/paging_tmpl.h
2088 ++++ b/arch/x86/kvm/mmu/paging_tmpl.h
2089 +@@ -471,8 +471,7 @@ retry_walk:
2090 +
2091 + error:
2092 + errcode |= write_fault | user_fault;
2093 +- if (fetch_fault && (mmu->nx ||
2094 +- kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)))
2095 ++ if (fetch_fault && (mmu->nx || mmu->mmu_role.ext.cr4_smep))
2096 + errcode |= PFERR_FETCH_MASK;
2097 +
2098 + walker->fault.vector = PF_VECTOR;
2099 +diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
2100 +index 61c00f8631f1a..f2ddf663e72e9 100644
2101 +--- a/arch/x86/kvm/mmu/tdp_mmu.c
2102 ++++ b/arch/x86/kvm/mmu/tdp_mmu.c
2103 +@@ -527,7 +527,7 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write,
2104 + kvm_pfn_t pfn, bool prefault)
2105 + {
2106 + u64 new_spte;
2107 +- int ret = 0;
2108 ++ int ret = RET_PF_FIXED;
2109 + int make_spte_ret = 0;
2110 +
2111 + if (unlikely(is_noslot_pfn(pfn))) {
2112 +diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
2113 +index 32e6f33c2c45b..67554bc7adb26 100644
2114 +--- a/arch/x86/kvm/vmx/nested.c
2115 ++++ b/arch/x86/kvm/vmx/nested.c
2116 +@@ -1142,12 +1142,19 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne
2117 +
2118 + /*
2119 + * Unconditionally skip the TLB flush on fast CR3 switch, all TLB
2120 +- * flushes are handled by nested_vmx_transition_tlb_flush(). See
2121 +- * nested_vmx_transition_mmu_sync for details on skipping the MMU sync.
2122 ++ * flushes are handled by nested_vmx_transition_tlb_flush().
2123 + */
2124 +- if (!nested_ept)
2125 +- kvm_mmu_new_pgd(vcpu, cr3, true,
2126 +- !nested_vmx_transition_mmu_sync(vcpu));
2127 ++ if (!nested_ept) {
2128 ++ kvm_mmu_new_pgd(vcpu, cr3, true, true);
2129 ++
2130 ++ /*
2131 ++ * A TLB flush on VM-Enter/VM-Exit flushes all linear mappings
2132 ++ * across all PCIDs, i.e. all PGDs need to be synchronized.
2133 ++ * See nested_vmx_transition_mmu_sync() for more details.
2134 ++ */
2135 ++ if (nested_vmx_transition_mmu_sync(vcpu))
2136 ++ kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
2137 ++ }
2138 +
2139 + vcpu->arch.cr3 = cr3;
2140 + kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
2141 +@@ -5477,8 +5484,6 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
2142 + {
2143 + u32 index = kvm_rcx_read(vcpu);
2144 + u64 new_eptp;
2145 +- bool accessed_dirty;
2146 +- struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
2147 +
2148 + if (!nested_cpu_has_eptp_switching(vmcs12) ||
2149 + !nested_cpu_has_ept(vmcs12))
2150 +@@ -5487,13 +5492,10 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
2151 + if (index >= VMFUNC_EPTP_ENTRIES)
2152 + return 1;
2153 +
2154 +-
2155 + if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT,
2156 + &new_eptp, index * 8, 8))
2157 + return 1;
2158 +
2159 +- accessed_dirty = !!(new_eptp & VMX_EPTP_AD_ENABLE_BIT);
2160 +-
2161 + /*
2162 + * If the (L2) guest does a vmfunc to the currently
2163 + * active ept pointer, we don't have to do anything else
2164 +@@ -5502,8 +5504,6 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
2165 + if (!nested_vmx_check_eptp(vcpu, new_eptp))
2166 + return 1;
2167 +
2168 +- mmu->ept_ad = accessed_dirty;
2169 +- mmu->mmu_role.base.ad_disabled = !accessed_dirty;
2170 + vmcs12->ept_pointer = new_eptp;
2171 +
2172 + kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
2173 +@@ -5529,7 +5529,7 @@ static int handle_vmfunc(struct kvm_vcpu *vcpu)
2174 + }
2175 +
2176 + vmcs12 = get_vmcs12(vcpu);
2177 +- if ((vmcs12->vm_function_control & (1 << function)) == 0)
2178 ++ if (!(vmcs12->vm_function_control & BIT_ULL(function)))
2179 + goto fail;
2180 +
2181 + switch (function) {
2182 +@@ -5787,6 +5787,9 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
2183 + else if (is_breakpoint(intr_info) &&
2184 + vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
2185 + return true;
2186 ++ else if (is_alignment_check(intr_info) &&
2187 ++ !vmx_guest_inject_ac(vcpu))
2188 ++ return true;
2189 + return false;
2190 + case EXIT_REASON_EXTERNAL_INTERRUPT:
2191 + return true;
2192 +diff --git a/arch/x86/kvm/vmx/vmcs.h b/arch/x86/kvm/vmx/vmcs.h
2193 +index 1472c6c376f74..571d9ad80a59e 100644
2194 +--- a/arch/x86/kvm/vmx/vmcs.h
2195 ++++ b/arch/x86/kvm/vmx/vmcs.h
2196 +@@ -117,6 +117,11 @@ static inline bool is_gp_fault(u32 intr_info)
2197 + return is_exception_n(intr_info, GP_VECTOR);
2198 + }
2199 +
2200 ++static inline bool is_alignment_check(u32 intr_info)
2201 ++{
2202 ++ return is_exception_n(intr_info, AC_VECTOR);
2203 ++}
2204 ++
2205 + static inline bool is_machine_check(u32 intr_info)
2206 + {
2207 + return is_exception_n(intr_info, MC_VECTOR);
2208 +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
2209 +index 45877364e6829..de24d3826788a 100644
2210 +--- a/arch/x86/kvm/vmx/vmx.c
2211 ++++ b/arch/x86/kvm/vmx/vmx.c
2212 +@@ -4755,7 +4755,7 @@ static int handle_machine_check(struct kvm_vcpu *vcpu)
2213 + * - Guest has #AC detection enabled in CR0
2214 + * - Guest EFLAGS has AC bit set
2215 + */
2216 +-static inline bool guest_inject_ac(struct kvm_vcpu *vcpu)
2217 ++bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu)
2218 + {
2219 + if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
2220 + return true;
2221 +@@ -4864,7 +4864,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
2222 + kvm_run->debug.arch.exception = ex_no;
2223 + break;
2224 + case AC_VECTOR:
2225 +- if (guest_inject_ac(vcpu)) {
2226 ++ if (vmx_guest_inject_ac(vcpu)) {
2227 + kvm_queue_exception_e(vcpu, AC_VECTOR, error_code);
2228 + return 1;
2229 + }
2230 +diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
2231 +index ae3a89ac0600d..73d87d44b6578 100644
2232 +--- a/arch/x86/kvm/vmx/vmx.h
2233 ++++ b/arch/x86/kvm/vmx/vmx.h
2234 +@@ -352,6 +352,7 @@ void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
2235 + u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa,
2236 + int root_level);
2237 +
2238 ++bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu);
2239 + void update_exception_bitmap(struct kvm_vcpu *vcpu);
2240 + void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
2241 + bool vmx_nmi_blocked(struct kvm_vcpu *vcpu);
2242 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
2243 +index d3372cb973079..7bf88e6cbd0e9 100644
2244 +--- a/arch/x86/kvm/x86.c
2245 ++++ b/arch/x86/kvm/x86.c
2246 +@@ -8852,7 +8852,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
2247 + }
2248 + if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
2249 + kvm_vcpu_flush_tlb_current(vcpu);
2250 +- if (kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu))
2251 ++ if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu))
2252 + kvm_vcpu_flush_tlb_guest(vcpu);
2253 +
2254 + if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
2255 +diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c
2256 +index cd85a7a2722ba..1254da07ead1f 100644
2257 +--- a/arch/xtensa/kernel/smp.c
2258 ++++ b/arch/xtensa/kernel/smp.c
2259 +@@ -145,7 +145,6 @@ void secondary_start_kernel(void)
2260 + cpumask_set_cpu(cpu, mm_cpumask(mm));
2261 + enter_lazy_tlb(mm, current);
2262 +
2263 +- preempt_disable();
2264 + trace_hardirqs_off();
2265 +
2266 + calibrate_delay();
2267 +diff --git a/block/blk-flush.c b/block/blk-flush.c
2268 +index fd5cee9f1a3be..7ee7e5e8905d5 100644
2269 +--- a/block/blk-flush.c
2270 ++++ b/block/blk-flush.c
2271 +@@ -220,8 +220,6 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
2272 + unsigned long flags = 0;
2273 + struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
2274 +
2275 +- blk_account_io_flush(flush_rq);
2276 +-
2277 + /* release the tag's ownership to the req cloned from */
2278 + spin_lock_irqsave(&fq->mq_flush_lock, flags);
2279 +
2280 +@@ -231,6 +229,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
2281 + return;
2282 + }
2283 +
2284 ++ blk_account_io_flush(flush_rq);
2285 + /*
2286 + * Flush request has to be marked as IDLE when it is really ended
2287 + * because its .end_io() is called from timeout code path too for
2288 +diff --git a/block/blk-merge.c b/block/blk-merge.c
2289 +index 7cdd566966473..349cd7d3af815 100644
2290 +--- a/block/blk-merge.c
2291 ++++ b/block/blk-merge.c
2292 +@@ -552,10 +552,14 @@ static inline unsigned int blk_rq_get_max_segments(struct request *rq)
2293 + static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
2294 + unsigned int nr_phys_segs)
2295 + {
2296 +- if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
2297 ++ if (blk_integrity_merge_bio(req->q, req, bio) == false)
2298 + goto no_merge;
2299 +
2300 +- if (blk_integrity_merge_bio(req->q, req, bio) == false)
2301 ++ /* discard request merge won't add new segment */
2302 ++ if (req_op(req) == REQ_OP_DISCARD)
2303 ++ return 1;
2304 ++
2305 ++ if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
2306 + goto no_merge;
2307 +
2308 + /*
2309 +diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
2310 +index 9c92053e704dc..c4f2f6c123aed 100644
2311 +--- a/block/blk-mq-tag.c
2312 ++++ b/block/blk-mq-tag.c
2313 +@@ -199,6 +199,20 @@ struct bt_iter_data {
2314 + bool reserved;
2315 + };
2316 +
2317 ++static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
2318 ++ unsigned int bitnr)
2319 ++{
2320 ++ struct request *rq;
2321 ++ unsigned long flags;
2322 ++
2323 ++ spin_lock_irqsave(&tags->lock, flags);
2324 ++ rq = tags->rqs[bitnr];
2325 ++ if (!rq || !refcount_inc_not_zero(&rq->ref))
2326 ++ rq = NULL;
2327 ++ spin_unlock_irqrestore(&tags->lock, flags);
2328 ++ return rq;
2329 ++}
2330 ++
2331 + static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
2332 + {
2333 + struct bt_iter_data *iter_data = data;
2334 +@@ -206,18 +220,22 @@ static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
2335 + struct blk_mq_tags *tags = hctx->tags;
2336 + bool reserved = iter_data->reserved;
2337 + struct request *rq;
2338 ++ bool ret = true;
2339 +
2340 + if (!reserved)
2341 + bitnr += tags->nr_reserved_tags;
2342 +- rq = tags->rqs[bitnr];
2343 +-
2344 + /*
2345 + * We can hit rq == NULL here, because the tagging functions
2346 + * test and set the bit before assigning ->rqs[].
2347 + */
2348 +- if (rq && rq->q == hctx->queue && rq->mq_hctx == hctx)
2349 +- return iter_data->fn(hctx, rq, iter_data->data, reserved);
2350 +- return true;
2351 ++ rq = blk_mq_find_and_get_req(tags, bitnr);
2352 ++ if (!rq)
2353 ++ return true;
2354 ++
2355 ++ if (rq->q == hctx->queue && rq->mq_hctx == hctx)
2356 ++ ret = iter_data->fn(hctx, rq, iter_data->data, reserved);
2357 ++ blk_mq_put_rq_ref(rq);
2358 ++ return ret;
2359 + }
2360 +
2361 + /**
2362 +@@ -264,6 +282,8 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
2363 + struct blk_mq_tags *tags = iter_data->tags;
2364 + bool reserved = iter_data->flags & BT_TAG_ITER_RESERVED;
2365 + struct request *rq;
2366 ++ bool ret = true;
2367 ++ bool iter_static_rqs = !!(iter_data->flags & BT_TAG_ITER_STATIC_RQS);
2368 +
2369 + if (!reserved)
2370 + bitnr += tags->nr_reserved_tags;
2371 +@@ -272,16 +292,19 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
2372 + * We can hit rq == NULL here, because the tagging functions
2373 + * test and set the bit before assigning ->rqs[].
2374 + */
2375 +- if (iter_data->flags & BT_TAG_ITER_STATIC_RQS)
2376 ++ if (iter_static_rqs)
2377 + rq = tags->static_rqs[bitnr];
2378 + else
2379 +- rq = tags->rqs[bitnr];
2380 ++ rq = blk_mq_find_and_get_req(tags, bitnr);
2381 + if (!rq)
2382 + return true;
2383 +- if ((iter_data->flags & BT_TAG_ITER_STARTED) &&
2384 +- !blk_mq_request_started(rq))
2385 +- return true;
2386 +- return iter_data->fn(rq, iter_data->data, reserved);
2387 ++
2388 ++ if (!(iter_data->flags & BT_TAG_ITER_STARTED) ||
2389 ++ blk_mq_request_started(rq))
2390 ++ ret = iter_data->fn(rq, iter_data->data, reserved);
2391 ++ if (!iter_static_rqs)
2392 ++ blk_mq_put_rq_ref(rq);
2393 ++ return ret;
2394 + }
2395 +
2396 + /**
2397 +@@ -348,6 +371,9 @@ void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
2398 + * indicates whether or not @rq is a reserved request. Return
2399 + * true to continue iterating tags, false to stop.
2400 + * @priv: Will be passed as second argument to @fn.
2401 ++ *
2402 ++ * We grab one request reference before calling @fn and release it after
2403 ++ * @fn returns.
2404 + */
2405 + void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
2406 + busy_tag_iter_fn *fn, void *priv)
2407 +@@ -516,6 +542,7 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
2408 +
2409 + tags->nr_tags = total_tags;
2410 + tags->nr_reserved_tags = reserved_tags;
2411 ++ spin_lock_init(&tags->lock);
2412 +
2413 + if (flags & BLK_MQ_F_TAG_HCTX_SHARED)
2414 + return tags;
2415 +diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
2416 +index 7d3e6b333a4a9..f887988e5ef60 100644
2417 +--- a/block/blk-mq-tag.h
2418 ++++ b/block/blk-mq-tag.h
2419 +@@ -20,6 +20,12 @@ struct blk_mq_tags {
2420 + struct request **rqs;
2421 + struct request **static_rqs;
2422 + struct list_head page_list;
2423 ++
2424 ++ /*
2425 ++ * used to clear request reference in rqs[] before freeing one
2426 ++ * request pool
2427 ++ */
2428 ++ spinlock_t lock;
2429 + };
2430 +
2431 + extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags,
2432 +diff --git a/block/blk-mq.c b/block/blk-mq.c
2433 +index 4bf9449b45868..a368eb6dc6470 100644
2434 +--- a/block/blk-mq.c
2435 ++++ b/block/blk-mq.c
2436 +@@ -927,6 +927,14 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
2437 + return false;
2438 + }
2439 +
2440 ++void blk_mq_put_rq_ref(struct request *rq)
2441 ++{
2442 ++ if (is_flush_rq(rq, rq->mq_hctx))
2443 ++ rq->end_io(rq, 0);
2444 ++ else if (refcount_dec_and_test(&rq->ref))
2445 ++ __blk_mq_free_request(rq);
2446 ++}
2447 ++
2448 + static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
2449 + struct request *rq, void *priv, bool reserved)
2450 + {
2451 +@@ -960,11 +968,7 @@ static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
2452 + if (blk_mq_req_expired(rq, next))
2453 + blk_mq_rq_timed_out(rq, reserved);
2454 +
2455 +- if (is_flush_rq(rq, hctx))
2456 +- rq->end_io(rq, 0);
2457 +- else if (refcount_dec_and_test(&rq->ref))
2458 +- __blk_mq_free_request(rq);
2459 +-
2460 ++ blk_mq_put_rq_ref(rq);
2461 + return true;
2462 + }
2463 +
2464 +@@ -1238,9 +1242,6 @@ static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy)
2465 + {
2466 + unsigned int ewma;
2467 +
2468 +- if (hctx->queue->elevator)
2469 +- return;
2470 +-
2471 + ewma = hctx->dispatch_busy;
2472 +
2473 + if (!ewma && !busy)
2474 +@@ -2272,6 +2273,45 @@ queue_exit:
2475 + return BLK_QC_T_NONE;
2476 + }
2477 +
2478 ++static size_t order_to_size(unsigned int order)
2479 ++{
2480 ++ return (size_t)PAGE_SIZE << order;
2481 ++}
2482 ++
2483 ++/* called before freeing request pool in @tags */
2484 ++static void blk_mq_clear_rq_mapping(struct blk_mq_tag_set *set,
2485 ++ struct blk_mq_tags *tags, unsigned int hctx_idx)
2486 ++{
2487 ++ struct blk_mq_tags *drv_tags = set->tags[hctx_idx];
2488 ++ struct page *page;
2489 ++ unsigned long flags;
2490 ++
2491 ++ list_for_each_entry(page, &tags->page_list, lru) {
2492 ++ unsigned long start = (unsigned long)page_address(page);
2493 ++ unsigned long end = start + order_to_size(page->private);
2494 ++ int i;
2495 ++
2496 ++ for (i = 0; i < set->queue_depth; i++) {
2497 ++ struct request *rq = drv_tags->rqs[i];
2498 ++ unsigned long rq_addr = (unsigned long)rq;
2499 ++
2500 ++ if (rq_addr >= start && rq_addr < end) {
2501 ++ WARN_ON_ONCE(refcount_read(&rq->ref) != 0);
2502 ++ cmpxchg(&drv_tags->rqs[i], rq, NULL);
2503 ++ }
2504 ++ }
2505 ++ }
2506 ++
2507 ++ /*
2508 ++ * Wait until all pending iteration is done.
2509 ++ *
2510 ++ * Request reference is cleared and it is guaranteed to be observed
2511 ++ * after the ->lock is released.
2512 ++ */
2513 ++ spin_lock_irqsave(&drv_tags->lock, flags);
2514 ++ spin_unlock_irqrestore(&drv_tags->lock, flags);
2515 ++}
2516 ++
2517 + void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
2518 + unsigned int hctx_idx)
2519 + {
2520 +@@ -2290,6 +2330,8 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
2521 + }
2522 + }
2523 +
2524 ++ blk_mq_clear_rq_mapping(set, tags, hctx_idx);
2525 ++
2526 + while (!list_empty(&tags->page_list)) {
2527 + page = list_first_entry(&tags->page_list, struct page, lru);
2528 + list_del_init(&page->lru);
2529 +@@ -2349,11 +2391,6 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
2530 + return tags;
2531 + }
2532 +
2533 +-static size_t order_to_size(unsigned int order)
2534 +-{
2535 +- return (size_t)PAGE_SIZE << order;
2536 +-}
2537 +-
2538 + static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
2539 + unsigned int hctx_idx, int node)
2540 + {
2541 +diff --git a/block/blk-mq.h b/block/blk-mq.h
2542 +index d2359f7cfd5f2..f792a0920ebb1 100644
2543 +--- a/block/blk-mq.h
2544 ++++ b/block/blk-mq.h
2545 +@@ -47,6 +47,7 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
2546 + void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
2547 + struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
2548 + struct blk_mq_ctx *start);
2549 ++void blk_mq_put_rq_ref(struct request *rq);
2550 +
2551 + /*
2552 + * Internal helpers for allocating/freeing the request map
2553 +diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h
2554 +index 2bc43e94f4c40..2bcb3495e376b 100644
2555 +--- a/block/blk-rq-qos.h
2556 ++++ b/block/blk-rq-qos.h
2557 +@@ -7,6 +7,7 @@
2558 + #include <linux/blk_types.h>
2559 + #include <linux/atomic.h>
2560 + #include <linux/wait.h>
2561 ++#include <linux/blk-mq.h>
2562 +
2563 + #include "blk-mq-debugfs.h"
2564 +
2565 +@@ -99,8 +100,21 @@ static inline void rq_wait_init(struct rq_wait *rq_wait)
2566 +
2567 + static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
2568 + {
2569 ++ /*
2570 ++ * No IO can be in-flight when adding rqos, so freeze queue, which
2571 ++ * is fine since we only support rq_qos for blk-mq queue.
2572 ++ *
2573 ++ * Reuse ->queue_lock for protecting against other concurrent
2574 ++ * rq_qos adding/deleting
2575 ++ */
2576 ++ blk_mq_freeze_queue(q);
2577 ++
2578 ++ spin_lock_irq(&q->queue_lock);
2579 + rqos->next = q->rq_qos;
2580 + q->rq_qos = rqos;
2581 ++ spin_unlock_irq(&q->queue_lock);
2582 ++
2583 ++ blk_mq_unfreeze_queue(q);
2584 +
2585 + if (rqos->ops->debugfs_attrs)
2586 + blk_mq_debugfs_register_rqos(rqos);
2587 +@@ -110,12 +124,22 @@ static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
2588 + {
2589 + struct rq_qos **cur;
2590 +
2591 ++ /*
2592 ++ * See comment in rq_qos_add() about freezing queue & using
2593 ++ * ->queue_lock.
2594 ++ */
2595 ++ blk_mq_freeze_queue(q);
2596 ++
2597 ++ spin_lock_irq(&q->queue_lock);
2598 + for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
2599 + if (*cur == rqos) {
2600 + *cur = rqos->next;
2601 + break;
2602 + }
2603 + }
2604 ++ spin_unlock_irq(&q->queue_lock);
2605 ++
2606 ++ blk_mq_unfreeze_queue(q);
2607 +
2608 + blk_mq_debugfs_unregister_rqos(rqos);
2609 + }
2610 +diff --git a/block/blk-wbt.c b/block/blk-wbt.c
2611 +index fd410086fe1de..35d81b5deae1c 100644
2612 +--- a/block/blk-wbt.c
2613 ++++ b/block/blk-wbt.c
2614 +@@ -77,7 +77,8 @@ enum {
2615 +
2616 + static inline bool rwb_enabled(struct rq_wb *rwb)
2617 + {
2618 +- return rwb && rwb->wb_normal != 0;
2619 ++ return rwb && rwb->enable_state != WBT_STATE_OFF_DEFAULT &&
2620 ++ rwb->wb_normal != 0;
2621 + }
2622 +
2623 + static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
2624 +@@ -636,9 +637,13 @@ void wbt_set_write_cache(struct request_queue *q, bool write_cache_on)
2625 + void wbt_enable_default(struct request_queue *q)
2626 + {
2627 + struct rq_qos *rqos = wbt_rq_qos(q);
2628 ++
2629 + /* Throttling already enabled? */
2630 +- if (rqos)
2631 ++ if (rqos) {
2632 ++ if (RQWB(rqos)->enable_state == WBT_STATE_OFF_DEFAULT)
2633 ++ RQWB(rqos)->enable_state = WBT_STATE_ON_DEFAULT;
2634 + return;
2635 ++ }
2636 +
2637 + /* Queue not registered? Maybe shutting down... */
2638 + if (!blk_queue_registered(q))
2639 +@@ -702,7 +707,7 @@ void wbt_disable_default(struct request_queue *q)
2640 + rwb = RQWB(rqos);
2641 + if (rwb->enable_state == WBT_STATE_ON_DEFAULT) {
2642 + blk_stat_deactivate(rwb->cb);
2643 +- rwb->wb_normal = 0;
2644 ++ rwb->enable_state = WBT_STATE_OFF_DEFAULT;
2645 + }
2646 + }
2647 + EXPORT_SYMBOL_GPL(wbt_disable_default);
2648 +diff --git a/block/blk-wbt.h b/block/blk-wbt.h
2649 +index 16bdc85b8df92..2eb01becde8c4 100644
2650 +--- a/block/blk-wbt.h
2651 ++++ b/block/blk-wbt.h
2652 +@@ -34,6 +34,7 @@ enum {
2653 + enum {
2654 + WBT_STATE_ON_DEFAULT = 1,
2655 + WBT_STATE_ON_MANUAL = 2,
2656 ++ WBT_STATE_OFF_DEFAULT
2657 + };
2658 +
2659 + struct rq_wb {
2660 +diff --git a/crypto/shash.c b/crypto/shash.c
2661 +index 2e3433ad97629..0a0a50cb694f0 100644
2662 +--- a/crypto/shash.c
2663 ++++ b/crypto/shash.c
2664 +@@ -20,12 +20,24 @@
2665 +
2666 + static const struct crypto_type crypto_shash_type;
2667 +
2668 +-int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
2669 +- unsigned int keylen)
2670 ++static int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
2671 ++ unsigned int keylen)
2672 + {
2673 + return -ENOSYS;
2674 + }
2675 +-EXPORT_SYMBOL_GPL(shash_no_setkey);
2676 ++
2677 ++/*
2678 ++ * Check whether an shash algorithm has a setkey function.
2679 ++ *
2680 ++ * For CFI compatibility, this must not be an inline function. This is because
2681 ++ * when CFI is enabled, modules won't get the same address for shash_no_setkey
2682 ++ * (if it were exported, which inlining would require) as the core kernel will.
2683 ++ */
2684 ++bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
2685 ++{
2686 ++ return alg->setkey != shash_no_setkey;
2687 ++}
2688 ++EXPORT_SYMBOL_GPL(crypto_shash_alg_has_setkey);
2689 +
2690 + static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
2691 + unsigned int keylen)
2692 +diff --git a/crypto/sm2.c b/crypto/sm2.c
2693 +index 767e160333f6e..db8a4a265669d 100644
2694 +--- a/crypto/sm2.c
2695 ++++ b/crypto/sm2.c
2696 +@@ -79,10 +79,17 @@ static int sm2_ec_ctx_init(struct mpi_ec_ctx *ec)
2697 + goto free;
2698 +
2699 + rc = -ENOMEM;
2700 ++
2701 ++ ec->Q = mpi_point_new(0);
2702 ++ if (!ec->Q)
2703 ++ goto free;
2704 ++
2705 + /* mpi_ec_setup_elliptic_curve */
2706 + ec->G = mpi_point_new(0);
2707 +- if (!ec->G)
2708 ++ if (!ec->G) {
2709 ++ mpi_point_release(ec->Q);
2710 + goto free;
2711 ++ }
2712 +
2713 + mpi_set(ec->G->x, x);
2714 + mpi_set(ec->G->y, y);
2715 +@@ -91,6 +98,7 @@ static int sm2_ec_ctx_init(struct mpi_ec_ctx *ec)
2716 + rc = -EINVAL;
2717 + ec->n = mpi_scanval(ecp->n);
2718 + if (!ec->n) {
2719 ++ mpi_point_release(ec->Q);
2720 + mpi_point_release(ec->G);
2721 + goto free;
2722 + }
2723 +@@ -119,12 +127,6 @@ static void sm2_ec_ctx_deinit(struct mpi_ec_ctx *ec)
2724 + memset(ec, 0, sizeof(*ec));
2725 + }
2726 +
2727 +-static int sm2_ec_ctx_reset(struct mpi_ec_ctx *ec)
2728 +-{
2729 +- sm2_ec_ctx_deinit(ec);
2730 +- return sm2_ec_ctx_init(ec);
2731 +-}
2732 +-
2733 + /* RESULT must have been initialized and is set on success to the
2734 + * point given by VALUE.
2735 + */
2736 +@@ -132,55 +134,48 @@ static int sm2_ecc_os2ec(MPI_POINT result, MPI value)
2737 + {
2738 + int rc;
2739 + size_t n;
2740 +- const unsigned char *buf;
2741 +- unsigned char *buf_memory;
2742 ++ unsigned char *buf;
2743 + MPI x, y;
2744 +
2745 +- n = (mpi_get_nbits(value)+7)/8;
2746 +- buf_memory = kmalloc(n, GFP_KERNEL);
2747 +- rc = mpi_print(GCRYMPI_FMT_USG, buf_memory, n, &n, value);
2748 +- if (rc) {
2749 +- kfree(buf_memory);
2750 +- return rc;
2751 +- }
2752 +- buf = buf_memory;
2753 ++ n = MPI_NBYTES(value);
2754 ++ buf = kmalloc(n, GFP_KERNEL);
2755 ++ if (!buf)
2756 ++ return -ENOMEM;
2757 +
2758 +- if (n < 1) {
2759 +- kfree(buf_memory);
2760 +- return -EINVAL;
2761 +- }
2762 +- if (*buf != 4) {
2763 +- kfree(buf_memory);
2764 +- return -EINVAL; /* No support for point compression. */
2765 +- }
2766 +- if (((n-1)%2)) {
2767 +- kfree(buf_memory);
2768 +- return -EINVAL;
2769 +- }
2770 +- n = (n-1)/2;
2771 ++ rc = mpi_print(GCRYMPI_FMT_USG, buf, n, &n, value);
2772 ++ if (rc)
2773 ++ goto err_freebuf;
2774 ++
2775 ++ rc = -EINVAL;
2776 ++ if (n < 1 || ((n - 1) % 2))
2777 ++ goto err_freebuf;
2778 ++ /* No support for point compression */
2779 ++ if (*buf != 0x4)
2780 ++ goto err_freebuf;
2781 ++
2782 ++ rc = -ENOMEM;
2783 ++ n = (n - 1) / 2;
2784 + x = mpi_read_raw_data(buf + 1, n);
2785 +- if (!x) {
2786 +- kfree(buf_memory);
2787 +- return -ENOMEM;
2788 +- }
2789 ++ if (!x)
2790 ++ goto err_freebuf;
2791 + y = mpi_read_raw_data(buf + 1 + n, n);
2792 +- kfree(buf_memory);
2793 +- if (!y) {
2794 +- mpi_free(x);
2795 +- return -ENOMEM;
2796 +- }
2797 ++ if (!y)
2798 ++ goto err_freex;
2799 +
2800 + mpi_normalize(x);
2801 + mpi_normalize(y);
2802 +-
2803 + mpi_set(result->x, x);
2804 + mpi_set(result->y, y);
2805 + mpi_set_ui(result->z, 1);
2806 +
2807 +- mpi_free(x);
2808 +- mpi_free(y);
2809 ++ rc = 0;
2810 +
2811 +- return 0;
2812 ++ mpi_free(y);
2813 ++err_freex:
2814 ++ mpi_free(x);
2815 ++err_freebuf:
2816 ++ kfree(buf);
2817 ++ return rc;
2818 + }
2819 +
2820 + struct sm2_signature_ctx {
2821 +@@ -399,31 +394,15 @@ static int sm2_set_pub_key(struct crypto_akcipher *tfm,
2822 + MPI a;
2823 + int rc;
2824 +
2825 +- rc = sm2_ec_ctx_reset(ec);
2826 +- if (rc)
2827 +- return rc;
2828 +-
2829 +- ec->Q = mpi_point_new(0);
2830 +- if (!ec->Q)
2831 +- return -ENOMEM;
2832 +-
2833 + /* include the uncompressed flag '0x04' */
2834 +- rc = -ENOMEM;
2835 + a = mpi_read_raw_data(key, keylen);
2836 + if (!a)
2837 +- goto error;
2838 ++ return -ENOMEM;
2839 +
2840 + mpi_normalize(a);
2841 + rc = sm2_ecc_os2ec(ec->Q, a);
2842 + mpi_free(a);
2843 +- if (rc)
2844 +- goto error;
2845 +
2846 +- return 0;
2847 +-
2848 +-error:
2849 +- mpi_point_release(ec->Q);
2850 +- ec->Q = NULL;
2851 + return rc;
2852 + }
2853 +
2854 +diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
2855 +index 44e4125063178..4466156474eef 100644
2856 +--- a/drivers/acpi/Makefile
2857 ++++ b/drivers/acpi/Makefile
2858 +@@ -8,6 +8,11 @@ ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
2859 + #
2860 + # ACPI Boot-Time Table Parsing
2861 + #
2862 ++ifeq ($(CONFIG_ACPI_CUSTOM_DSDT),y)
2863 ++tables.o: $(src)/../../include/$(subst $\",,$(CONFIG_ACPI_CUSTOM_DSDT_FILE)) ;
2864 ++
2865 ++endif
2866 ++
2867 + obj-$(CONFIG_ACPI) += tables.o
2868 + obj-$(CONFIG_X86) += blacklist.o
2869 +
2870 +diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
2871 +index b8745ce48a47b..b84ab722feb44 100644
2872 +--- a/drivers/acpi/acpi_pad.c
2873 ++++ b/drivers/acpi/acpi_pad.c
2874 +@@ -261,7 +261,7 @@ static uint32_t acpi_pad_idle_cpus_num(void)
2875 + return ps_tsk_num;
2876 + }
2877 +
2878 +-static ssize_t acpi_pad_rrtime_store(struct device *dev,
2879 ++static ssize_t rrtime_store(struct device *dev,
2880 + struct device_attribute *attr, const char *buf, size_t count)
2881 + {
2882 + unsigned long num;
2883 +@@ -275,16 +275,14 @@ static ssize_t acpi_pad_rrtime_store(struct device *dev,
2884 + return count;
2885 + }
2886 +
2887 +-static ssize_t acpi_pad_rrtime_show(struct device *dev,
2888 ++static ssize_t rrtime_show(struct device *dev,
2889 + struct device_attribute *attr, char *buf)
2890 + {
2891 + return scnprintf(buf, PAGE_SIZE, "%d\n", round_robin_time);
2892 + }
2893 +-static DEVICE_ATTR(rrtime, S_IRUGO|S_IWUSR,
2894 +- acpi_pad_rrtime_show,
2895 +- acpi_pad_rrtime_store);
2896 ++static DEVICE_ATTR_RW(rrtime);
2897 +
2898 +-static ssize_t acpi_pad_idlepct_store(struct device *dev,
2899 ++static ssize_t idlepct_store(struct device *dev,
2900 + struct device_attribute *attr, const char *buf, size_t count)
2901 + {
2902 + unsigned long num;
2903 +@@ -298,16 +296,14 @@ static ssize_t acpi_pad_idlepct_store(struct device *dev,
2904 + return count;
2905 + }
2906 +
2907 +-static ssize_t acpi_pad_idlepct_show(struct device *dev,
2908 ++static ssize_t idlepct_show(struct device *dev,
2909 + struct device_attribute *attr, char *buf)
2910 + {
2911 + return scnprintf(buf, PAGE_SIZE, "%d\n", idle_pct);
2912 + }
2913 +-static DEVICE_ATTR(idlepct, S_IRUGO|S_IWUSR,
2914 +- acpi_pad_idlepct_show,
2915 +- acpi_pad_idlepct_store);
2916 ++static DEVICE_ATTR_RW(idlepct);
2917 +
2918 +-static ssize_t acpi_pad_idlecpus_store(struct device *dev,
2919 ++static ssize_t idlecpus_store(struct device *dev,
2920 + struct device_attribute *attr, const char *buf, size_t count)
2921 + {
2922 + unsigned long num;
2923 +@@ -319,16 +315,14 @@ static ssize_t acpi_pad_idlecpus_store(struct device *dev,
2924 + return count;
2925 + }
2926 +
2927 +-static ssize_t acpi_pad_idlecpus_show(struct device *dev,
2928 ++static ssize_t idlecpus_show(struct device *dev,
2929 + struct device_attribute *attr, char *buf)
2930 + {
2931 + return cpumap_print_to_pagebuf(false, buf,
2932 + to_cpumask(pad_busy_cpus_bits));
2933 + }
2934 +
2935 +-static DEVICE_ATTR(idlecpus, S_IRUGO|S_IWUSR,
2936 +- acpi_pad_idlecpus_show,
2937 +- acpi_pad_idlecpus_store);
2938 ++static DEVICE_ATTR_RW(idlecpus);
2939 +
2940 + static int acpi_pad_add_sysfs(struct acpi_device *device)
2941 + {
2942 +diff --git a/drivers/acpi/acpi_tad.c b/drivers/acpi/acpi_tad.c
2943 +index 7d45cce0c3c18..e9b8e8305e23e 100644
2944 +--- a/drivers/acpi/acpi_tad.c
2945 ++++ b/drivers/acpi/acpi_tad.c
2946 +@@ -237,7 +237,7 @@ static ssize_t time_show(struct device *dev, struct device_attribute *attr,
2947 + rt.tz, rt.daylight);
2948 + }
2949 +
2950 +-static DEVICE_ATTR(time, S_IRUSR | S_IWUSR, time_show, time_store);
2951 ++static DEVICE_ATTR_RW(time);
2952 +
2953 + static struct attribute *acpi_tad_time_attrs[] = {
2954 + &dev_attr_time.attr,
2955 +@@ -446,7 +446,7 @@ static ssize_t ac_alarm_show(struct device *dev, struct device_attribute *attr,
2956 + return acpi_tad_alarm_read(dev, buf, ACPI_TAD_AC_TIMER);
2957 + }
2958 +
2959 +-static DEVICE_ATTR(ac_alarm, S_IRUSR | S_IWUSR, ac_alarm_show, ac_alarm_store);
2960 ++static DEVICE_ATTR_RW(ac_alarm);
2961 +
2962 + static ssize_t ac_policy_store(struct device *dev, struct device_attribute *attr,
2963 + const char *buf, size_t count)
2964 +@@ -462,7 +462,7 @@ static ssize_t ac_policy_show(struct device *dev, struct device_attribute *attr,
2965 + return acpi_tad_policy_read(dev, buf, ACPI_TAD_AC_TIMER);
2966 + }
2967 +
2968 +-static DEVICE_ATTR(ac_policy, S_IRUSR | S_IWUSR, ac_policy_show, ac_policy_store);
2969 ++static DEVICE_ATTR_RW(ac_policy);
2970 +
2971 + static ssize_t ac_status_store(struct device *dev, struct device_attribute *attr,
2972 + const char *buf, size_t count)
2973 +@@ -478,7 +478,7 @@ static ssize_t ac_status_show(struct device *dev, struct device_attribute *attr,
2974 + return acpi_tad_status_read(dev, buf, ACPI_TAD_AC_TIMER);
2975 + }
2976 +
2977 +-static DEVICE_ATTR(ac_status, S_IRUSR | S_IWUSR, ac_status_show, ac_status_store);
2978 ++static DEVICE_ATTR_RW(ac_status);
2979 +
2980 + static struct attribute *acpi_tad_attrs[] = {
2981 + &dev_attr_caps.attr,
2982 +@@ -505,7 +505,7 @@ static ssize_t dc_alarm_show(struct device *dev, struct device_attribute *attr,
2983 + return acpi_tad_alarm_read(dev, buf, ACPI_TAD_DC_TIMER);
2984 + }
2985 +
2986 +-static DEVICE_ATTR(dc_alarm, S_IRUSR | S_IWUSR, dc_alarm_show, dc_alarm_store);
2987 ++static DEVICE_ATTR_RW(dc_alarm);
2988 +
2989 + static ssize_t dc_policy_store(struct device *dev, struct device_attribute *attr,
2990 + const char *buf, size_t count)
2991 +@@ -521,7 +521,7 @@ static ssize_t dc_policy_show(struct device *dev, struct device_attribute *attr,
2992 + return acpi_tad_policy_read(dev, buf, ACPI_TAD_DC_TIMER);
2993 + }
2994 +
2995 +-static DEVICE_ATTR(dc_policy, S_IRUSR | S_IWUSR, dc_policy_show, dc_policy_store);
2996 ++static DEVICE_ATTR_RW(dc_policy);
2997 +
2998 + static ssize_t dc_status_store(struct device *dev, struct device_attribute *attr,
2999 + const char *buf, size_t count)
3000 +@@ -537,7 +537,7 @@ static ssize_t dc_status_show(struct device *dev, struct device_attribute *attr,
3001 + return acpi_tad_status_read(dev, buf, ACPI_TAD_DC_TIMER);
3002 + }
3003 +
3004 +-static DEVICE_ATTR(dc_status, S_IRUSR | S_IWUSR, dc_status_show, dc_status_store);
3005 ++static DEVICE_ATTR_RW(dc_status);
3006 +
3007 + static struct attribute *acpi_tad_dc_attrs[] = {
3008 + &dev_attr_dc_alarm.attr,
3009 +diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
3010 +index 125143c41bb81..8768594c79e58 100644
3011 +--- a/drivers/acpi/acpica/nsrepair2.c
3012 ++++ b/drivers/acpi/acpica/nsrepair2.c
3013 +@@ -375,6 +375,13 @@ acpi_ns_repair_CID(struct acpi_evaluate_info *info,
3014 +
3015 + (*element_ptr)->common.reference_count =
3016 + original_ref_count;
3017 ++
3018 ++ /*
3019 ++ * The original_element holds a reference from the package object
3020 ++ * that represents _HID. Since a new element was created by _HID,
3021 ++ * remove the reference from the _CID package.
3022 ++ */
3023 ++ acpi_ut_remove_reference(original_element);
3024 + }
3025 +
3026 + element_ptr++;
3027 +diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
3028 +index fce7ade2aba92..0c8330ed1ffd5 100644
3029 +--- a/drivers/acpi/apei/ghes.c
3030 ++++ b/drivers/acpi/apei/ghes.c
3031 +@@ -441,28 +441,35 @@ static void ghes_kick_task_work(struct callback_head *head)
3032 + gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, node_len);
3033 + }
3034 +
3035 +-static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
3036 +- int sev)
3037 ++static bool ghes_do_memory_failure(u64 physical_addr, int flags)
3038 + {
3039 + unsigned long pfn;
3040 +- int flags = -1;
3041 +- int sec_sev = ghes_severity(gdata->error_severity);
3042 +- struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
3043 +
3044 + if (!IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE))
3045 + return false;
3046 +
3047 +- if (!(mem_err->validation_bits & CPER_MEM_VALID_PA))
3048 +- return false;
3049 +-
3050 +- pfn = mem_err->physical_addr >> PAGE_SHIFT;
3051 ++ pfn = PHYS_PFN(physical_addr);
3052 + if (!pfn_valid(pfn)) {
3053 + pr_warn_ratelimited(FW_WARN GHES_PFX
3054 + "Invalid address in generic error data: %#llx\n",
3055 +- mem_err->physical_addr);
3056 ++ physical_addr);
3057 + return false;
3058 + }
3059 +
3060 ++ memory_failure_queue(pfn, flags);
3061 ++ return true;
3062 ++}
3063 ++
3064 ++static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
3065 ++ int sev)
3066 ++{
3067 ++ int flags = -1;
3068 ++ int sec_sev = ghes_severity(gdata->error_severity);
3069 ++ struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
3070 ++
3071 ++ if (!(mem_err->validation_bits & CPER_MEM_VALID_PA))
3072 ++ return false;
3073 ++
3074 + /* iff following two events can be handled properly by now */
3075 + if (sec_sev == GHES_SEV_CORRECTED &&
3076 + (gdata->flags & CPER_SEC_ERROR_THRESHOLD_EXCEEDED))
3077 +@@ -470,14 +477,56 @@ static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
3078 + if (sev == GHES_SEV_RECOVERABLE && sec_sev == GHES_SEV_RECOVERABLE)
3079 + flags = 0;
3080 +
3081 +- if (flags != -1) {
3082 +- memory_failure_queue(pfn, flags);
3083 +- return true;
3084 +- }
3085 ++ if (flags != -1)
3086 ++ return ghes_do_memory_failure(mem_err->physical_addr, flags);
3087 +
3088 + return false;
3089 + }
3090 +
3091 ++static bool ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata, int sev)
3092 ++{
3093 ++ struct cper_sec_proc_arm *err = acpi_hest_get_payload(gdata);
3094 ++ bool queued = false;
3095 ++ int sec_sev, i;
3096 ++ char *p;
3097 ++
3098 ++ log_arm_hw_error(err);
3099 ++
3100 ++ sec_sev = ghes_severity(gdata->error_severity);
3101 ++ if (sev != GHES_SEV_RECOVERABLE || sec_sev != GHES_SEV_RECOVERABLE)
3102 ++ return false;
3103 ++
3104 ++ p = (char *)(err + 1);
3105 ++ for (i = 0; i < err->err_info_num; i++) {
3106 ++ struct cper_arm_err_info *err_info = (struct cper_arm_err_info *)p;
3107 ++ bool is_cache = (err_info->type == CPER_ARM_CACHE_ERROR);
3108 ++ bool has_pa = (err_info->validation_bits & CPER_ARM_INFO_VALID_PHYSICAL_ADDR);
3109 ++ const char *error_type = "unknown error";
3110 ++
3111 ++ /*
3112 ++ * The field (err_info->error_info & BIT(26)) is fixed to set to
3113 ++ * 1 in some old firmware of HiSilicon Kunpeng920. We assume that
3114 ++ * firmware won't mix corrected errors in an uncorrected section,
3115 ++ * and don't filter out 'corrected' error here.
3116 ++ */
3117 ++ if (is_cache && has_pa) {
3118 ++ queued = ghes_do_memory_failure(err_info->physical_fault_addr, 0);
3119 ++ p += err_info->length;
3120 ++ continue;
3121 ++ }
3122 ++
3123 ++ if (err_info->type < ARRAY_SIZE(cper_proc_error_type_strs))
3124 ++ error_type = cper_proc_error_type_strs[err_info->type];
3125 ++
3126 ++ pr_warn_ratelimited(FW_WARN GHES_PFX
3127 ++ "Unhandled processor error type: %s\n",
3128 ++ error_type);
3129 ++ p += err_info->length;
3130 ++ }
3131 ++
3132 ++ return queued;
3133 ++}
3134 ++
3135 + /*
3136 + * PCIe AER errors need to be sent to the AER driver for reporting and
3137 + * recovery. The GHES severities map to the following AER severities and
3138 +@@ -605,9 +654,7 @@ static bool ghes_do_proc(struct ghes *ghes,
3139 + ghes_handle_aer(gdata);
3140 + }
3141 + else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
3142 +- struct cper_sec_proc_arm *err = acpi_hest_get_payload(gdata);
3143 +-
3144 +- log_arm_hw_error(err);
3145 ++ queued = ghes_handle_arm_hw_error(gdata, sev);
3146 + } else {
3147 + void *err = acpi_hest_get_payload(gdata);
3148 +
3149 +diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
3150 +index 251f961c28cc4..e0d14017706ea 100644
3151 +--- a/drivers/acpi/bgrt.c
3152 ++++ b/drivers/acpi/bgrt.c
3153 +@@ -15,40 +15,19 @@
3154 + static void *bgrt_image;
3155 + static struct kobject *bgrt_kobj;
3156 +
3157 +-static ssize_t show_version(struct device *dev,
3158 +- struct device_attribute *attr, char *buf)
3159 +-{
3160 +- return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.version);
3161 +-}
3162 +-static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
3163 +-
3164 +-static ssize_t show_status(struct device *dev,
3165 +- struct device_attribute *attr, char *buf)
3166 +-{
3167 +- return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.status);
3168 +-}
3169 +-static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
3170 +-
3171 +-static ssize_t show_type(struct device *dev,
3172 +- struct device_attribute *attr, char *buf)
3173 +-{
3174 +- return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.image_type);
3175 +-}
3176 +-static DEVICE_ATTR(type, S_IRUGO, show_type, NULL);
3177 +-
3178 +-static ssize_t show_xoffset(struct device *dev,
3179 +- struct device_attribute *attr, char *buf)
3180 +-{
3181 +- return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.image_offset_x);
3182 +-}
3183 +-static DEVICE_ATTR(xoffset, S_IRUGO, show_xoffset, NULL);
3184 +-
3185 +-static ssize_t show_yoffset(struct device *dev,
3186 +- struct device_attribute *attr, char *buf)
3187 +-{
3188 +- return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.image_offset_y);
3189 +-}
3190 +-static DEVICE_ATTR(yoffset, S_IRUGO, show_yoffset, NULL);
3191 ++#define BGRT_SHOW(_name, _member) \
3192 ++ static ssize_t _name##_show(struct kobject *kobj, \
3193 ++ struct kobj_attribute *attr, char *buf) \
3194 ++ { \
3195 ++ return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab._member); \
3196 ++ } \
3197 ++ struct kobj_attribute bgrt_attr_##_name = __ATTR_RO(_name)
3198 ++
3199 ++BGRT_SHOW(version, version);
3200 ++BGRT_SHOW(status, status);
3201 ++BGRT_SHOW(type, image_type);
3202 ++BGRT_SHOW(xoffset, image_offset_x);
3203 ++BGRT_SHOW(yoffset, image_offset_y);
3204 +
3205 + static ssize_t image_read(struct file *file, struct kobject *kobj,
3206 + struct bin_attribute *attr, char *buf, loff_t off, size_t count)
3207 +@@ -60,11 +39,11 @@ static ssize_t image_read(struct file *file, struct kobject *kobj,
3208 + static BIN_ATTR_RO(image, 0); /* size gets filled in later */
3209 +
3210 + static struct attribute *bgrt_attributes[] = {
3211 +- &dev_attr_version.attr,
3212 +- &dev_attr_status.attr,
3213 +- &dev_attr_type.attr,
3214 +- &dev_attr_xoffset.attr,
3215 +- &dev_attr_yoffset.attr,
3216 ++ &bgrt_attr_version.attr,
3217 ++ &bgrt_attr_status.attr,
3218 ++ &bgrt_attr_type.attr,
3219 ++ &bgrt_attr_xoffset.attr,
3220 ++ &bgrt_attr_yoffset.attr,
3221 + NULL,
3222 + };
3223 +
3224 +diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
3225 +index 1682f8b454a2e..e317214aabec5 100644
3226 +--- a/drivers/acpi/bus.c
3227 ++++ b/drivers/acpi/bus.c
3228 +@@ -1245,6 +1245,7 @@ static int __init acpi_init(void)
3229 +
3230 + result = acpi_bus_init();
3231 + if (result) {
3232 ++ kobject_put(acpi_kobj);
3233 + disable_acpi();
3234 + return result;
3235 + }
3236 +diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
3237 +index 48ff6821a83d4..ecd2ddc2215f5 100644
3238 +--- a/drivers/acpi/device_pm.c
3239 ++++ b/drivers/acpi/device_pm.c
3240 +@@ -18,6 +18,7 @@
3241 + #include <linux/pm_runtime.h>
3242 + #include <linux/suspend.h>
3243 +
3244 ++#include "fan.h"
3245 + #include "internal.h"
3246 +
3247 + #define _COMPONENT ACPI_POWER_COMPONENT
3248 +@@ -1298,10 +1299,7 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
3249 + * with the generic ACPI PM domain.
3250 + */
3251 + static const struct acpi_device_id special_pm_ids[] = {
3252 +- {"PNP0C0B", }, /* Generic ACPI fan */
3253 +- {"INT3404", }, /* Fan */
3254 +- {"INTC1044", }, /* Fan for Tiger Lake generation */
3255 +- {"INTC1048", }, /* Fan for Alder Lake generation */
3256 ++ ACPI_FAN_DEVICE_IDS,
3257 + {}
3258 + };
3259 + struct acpi_device *adev = ACPI_COMPANION(dev);
3260 +diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
3261 +index bfca116482b8b..fe8c7e79f4726 100644
3262 +--- a/drivers/acpi/device_sysfs.c
3263 ++++ b/drivers/acpi/device_sysfs.c
3264 +@@ -325,11 +325,11 @@ int acpi_device_modalias(struct device *dev, char *buf, int size)
3265 + EXPORT_SYMBOL_GPL(acpi_device_modalias);
3266 +
3267 + static ssize_t
3268 +-acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
3269 ++modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
3270 + {
3271 + return __acpi_device_modalias(to_acpi_device(dev), buf, 1024);
3272 + }
3273 +-static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL);
3274 ++static DEVICE_ATTR_RO(modalias);
3275 +
3276 + static ssize_t real_power_state_show(struct device *dev,
3277 + struct device_attribute *attr, char *buf)
3278 +@@ -358,8 +358,8 @@ static ssize_t power_state_show(struct device *dev,
3279 + static DEVICE_ATTR_RO(power_state);
3280 +
3281 + static ssize_t
3282 +-acpi_eject_store(struct device *d, struct device_attribute *attr,
3283 +- const char *buf, size_t count)
3284 ++eject_store(struct device *d, struct device_attribute *attr,
3285 ++ const char *buf, size_t count)
3286 + {
3287 + struct acpi_device *acpi_device = to_acpi_device(d);
3288 + acpi_object_type not_used;
3289 +@@ -387,28 +387,28 @@ acpi_eject_store(struct device *d, struct device_attribute *attr,
3290 + return status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN;
3291 + }
3292 +
3293 +-static DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store);
3294 ++static DEVICE_ATTR_WO(eject);
3295 +
3296 + static ssize_t
3297 +-acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *buf)
3298 ++hid_show(struct device *dev, struct device_attribute *attr, char *buf)
3299 + {
3300 + struct acpi_device *acpi_dev = to_acpi_device(dev);
3301 +
3302 + return sprintf(buf, "%s\n", acpi_device_hid(acpi_dev));
3303 + }
3304 +-static DEVICE_ATTR(hid, 0444, acpi_device_hid_show, NULL);
3305 ++static DEVICE_ATTR_RO(hid);
3306 +
3307 +-static ssize_t acpi_device_uid_show(struct device *dev,
3308 +- struct device_attribute *attr, char *buf)
3309 ++static ssize_t uid_show(struct device *dev,
3310 ++ struct device_attribute *attr, char *buf)
3311 + {
3312 + struct acpi_device *acpi_dev = to_acpi_device(dev);
3313 +
3314 + return sprintf(buf, "%s\n", acpi_dev->pnp.unique_id);
3315 + }
3316 +-static DEVICE_ATTR(uid, 0444, acpi_device_uid_show, NULL);
3317 ++static DEVICE_ATTR_RO(uid);
3318 +
3319 +-static ssize_t acpi_device_adr_show(struct device *dev,
3320 +- struct device_attribute *attr, char *buf)
3321 ++static ssize_t adr_show(struct device *dev,
3322 ++ struct device_attribute *attr, char *buf)
3323 + {
3324 + struct acpi_device *acpi_dev = to_acpi_device(dev);
3325 +
3326 +@@ -417,16 +417,16 @@ static ssize_t acpi_device_adr_show(struct device *dev,
3327 + else
3328 + return sprintf(buf, "0x%08llx\n", acpi_dev->pnp.bus_address);
3329 + }
3330 +-static DEVICE_ATTR(adr, 0444, acpi_device_adr_show, NULL);
3331 ++static DEVICE_ATTR_RO(adr);
3332 +
3333 +-static ssize_t acpi_device_path_show(struct device *dev,
3334 +- struct device_attribute *attr, char *buf)
3335 ++static ssize_t path_show(struct device *dev,
3336 ++ struct device_attribute *attr, char *buf)
3337 + {
3338 + struct acpi_device *acpi_dev = to_acpi_device(dev);
3339 +
3340 + return acpi_object_path(acpi_dev->handle, buf);
3341 + }
3342 +-static DEVICE_ATTR(path, 0444, acpi_device_path_show, NULL);
3343 ++static DEVICE_ATTR_RO(path);
3344 +
3345 + /* sysfs file that shows description text from the ACPI _STR method */
3346 + static ssize_t description_show(struct device *dev,
3347 +@@ -446,7 +446,7 @@ static ssize_t description_show(struct device *dev,
3348 + (wchar_t *)acpi_dev->pnp.str_obj->buffer.pointer,
3349 + acpi_dev->pnp.str_obj->buffer.length,
3350 + UTF16_LITTLE_ENDIAN, buf,
3351 +- PAGE_SIZE);
3352 ++ PAGE_SIZE - 1);
3353 +
3354 + buf[result++] = '\n';
3355 +
3356 +@@ -455,8 +455,8 @@ static ssize_t description_show(struct device *dev,
3357 + static DEVICE_ATTR_RO(description);
3358 +
3359 + static ssize_t
3360 +-acpi_device_sun_show(struct device *dev, struct device_attribute *attr,
3361 +- char *buf) {
3362 ++sun_show(struct device *dev, struct device_attribute *attr,
3363 ++ char *buf) {
3364 + struct acpi_device *acpi_dev = to_acpi_device(dev);
3365 + acpi_status status;
3366 + unsigned long long sun;
3367 +@@ -467,11 +467,11 @@ acpi_device_sun_show(struct device *dev, struct device_attribute *attr,
3368 +
3369 + return sprintf(buf, "%llu\n", sun);
3370 + }
3371 +-static DEVICE_ATTR(sun, 0444, acpi_device_sun_show, NULL);
3372 ++static DEVICE_ATTR_RO(sun);
3373 +
3374 + static ssize_t
3375 +-acpi_device_hrv_show(struct device *dev, struct device_attribute *attr,
3376 +- char *buf) {
3377 ++hrv_show(struct device *dev, struct device_attribute *attr,
3378 ++ char *buf) {
3379 + struct acpi_device *acpi_dev = to_acpi_device(dev);
3380 + acpi_status status;
3381 + unsigned long long hrv;
3382 +@@ -482,7 +482,7 @@ acpi_device_hrv_show(struct device *dev, struct device_attribute *attr,
3383 +
3384 + return sprintf(buf, "%llu\n", hrv);
3385 + }
3386 +-static DEVICE_ATTR(hrv, 0444, acpi_device_hrv_show, NULL);
3387 ++static DEVICE_ATTR_RO(hrv);
3388 +
3389 + static ssize_t status_show(struct device *dev, struct device_attribute *attr,
3390 + char *buf) {
3391 +diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
3392 +index 24e076f44d238..0937ceab052e8 100644
3393 +--- a/drivers/acpi/dock.c
3394 ++++ b/drivers/acpi/dock.c
3395 +@@ -484,7 +484,7 @@ int dock_notify(struct acpi_device *adev, u32 event)
3396 + /*
3397 + * show_docked - read method for "docked" file in sysfs
3398 + */
3399 +-static ssize_t show_docked(struct device *dev,
3400 ++static ssize_t docked_show(struct device *dev,
3401 + struct device_attribute *attr, char *buf)
3402 + {
3403 + struct dock_station *dock_station = dev->platform_data;
3404 +@@ -493,25 +493,25 @@ static ssize_t show_docked(struct device *dev,
3405 + acpi_bus_get_device(dock_station->handle, &adev);
3406 + return snprintf(buf, PAGE_SIZE, "%u\n", acpi_device_enumerated(adev));
3407 + }
3408 +-static DEVICE_ATTR(docked, S_IRUGO, show_docked, NULL);
3409 ++static DEVICE_ATTR_RO(docked);
3410 +
3411 + /*
3412 + * show_flags - read method for flags file in sysfs
3413 + */
3414 +-static ssize_t show_flags(struct device *dev,
3415 ++static ssize_t flags_show(struct device *dev,
3416 + struct device_attribute *attr, char *buf)
3417 + {
3418 + struct dock_station *dock_station = dev->platform_data;
3419 + return snprintf(buf, PAGE_SIZE, "%d\n", dock_station->flags);
3420 +
3421 + }
3422 +-static DEVICE_ATTR(flags, S_IRUGO, show_flags, NULL);
3423 ++static DEVICE_ATTR_RO(flags);
3424 +
3425 + /*
3426 + * write_undock - write method for "undock" file in sysfs
3427 + */
3428 +-static ssize_t write_undock(struct device *dev, struct device_attribute *attr,
3429 +- const char *buf, size_t count)
3430 ++static ssize_t undock_store(struct device *dev, struct device_attribute *attr,
3431 ++ const char *buf, size_t count)
3432 + {
3433 + int ret;
3434 + struct dock_station *dock_station = dev->platform_data;
3435 +@@ -525,13 +525,13 @@ static ssize_t write_undock(struct device *dev, struct device_attribute *attr,
3436 + acpi_scan_lock_release();
3437 + return ret ? ret: count;
3438 + }
3439 +-static DEVICE_ATTR(undock, S_IWUSR, NULL, write_undock);
3440 ++static DEVICE_ATTR_WO(undock);
3441 +
3442 + /*
3443 + * show_dock_uid - read method for "uid" file in sysfs
3444 + */
3445 +-static ssize_t show_dock_uid(struct device *dev,
3446 +- struct device_attribute *attr, char *buf)
3447 ++static ssize_t uid_show(struct device *dev,
3448 ++ struct device_attribute *attr, char *buf)
3449 + {
3450 + unsigned long long lbuf;
3451 + struct dock_station *dock_station = dev->platform_data;
3452 +@@ -542,10 +542,10 @@ static ssize_t show_dock_uid(struct device *dev,
3453 +
3454 + return snprintf(buf, PAGE_SIZE, "%llx\n", lbuf);
3455 + }
3456 +-static DEVICE_ATTR(uid, S_IRUGO, show_dock_uid, NULL);
3457 ++static DEVICE_ATTR_RO(uid);
3458 +
3459 +-static ssize_t show_dock_type(struct device *dev,
3460 +- struct device_attribute *attr, char *buf)
3461 ++static ssize_t type_show(struct device *dev,
3462 ++ struct device_attribute *attr, char *buf)
3463 + {
3464 + struct dock_station *dock_station = dev->platform_data;
3465 + char *type;
3466 +@@ -561,7 +561,7 @@ static ssize_t show_dock_type(struct device *dev,
3467 +
3468 + return snprintf(buf, PAGE_SIZE, "%s\n", type);
3469 + }
3470 +-static DEVICE_ATTR(type, S_IRUGO, show_dock_type, NULL);
3471 ++static DEVICE_ATTR_RO(type);
3472 +
3473 + static struct attribute *dock_attributes[] = {
3474 + &dev_attr_docked.attr,
3475 +diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
3476 +index e0cb1bcfffb29..be3e0921a6c00 100644
3477 +--- a/drivers/acpi/ec.c
3478 ++++ b/drivers/acpi/ec.c
3479 +@@ -183,6 +183,7 @@ static struct workqueue_struct *ec_query_wq;
3480 +
3481 + static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */
3482 + static int EC_FLAGS_IGNORE_DSDT_GPE; /* Needs ECDT GPE as correction setting */
3483 ++static int EC_FLAGS_TRUST_DSDT_GPE; /* Needs DSDT GPE as correction setting */
3484 + static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
3485 +
3486 + /* --------------------------------------------------------------------------
3487 +@@ -1606,7 +1607,8 @@ static int acpi_ec_add(struct acpi_device *device)
3488 + }
3489 +
3490 + if (boot_ec && ec->command_addr == boot_ec->command_addr &&
3491 +- ec->data_addr == boot_ec->data_addr) {
3492 ++ ec->data_addr == boot_ec->data_addr &&
3493 ++ !EC_FLAGS_TRUST_DSDT_GPE) {
3494 + /*
3495 + * Trust PNP0C09 namespace location rather than
3496 + * ECDT ID. But trust ECDT GPE rather than _GPE
3497 +@@ -1829,6 +1831,18 @@ static int ec_correct_ecdt(const struct dmi_system_id *id)
3498 + return 0;
3499 + }
3500 +
3501 ++/*
3502 ++ * Some ECDTs contain wrong GPE setting, but they share the same port addresses
3503 ++ * with DSDT EC, don't duplicate the DSDT EC with ECDT EC in this case.
3504 ++ * https://bugzilla.kernel.org/show_bug.cgi?id=209989
3505 ++ */
3506 ++static int ec_honor_dsdt_gpe(const struct dmi_system_id *id)
3507 ++{
3508 ++ pr_debug("Detected system needing DSDT GPE setting.\n");
3509 ++ EC_FLAGS_TRUST_DSDT_GPE = 1;
3510 ++ return 0;
3511 ++}
3512 ++
3513 + /*
3514 + * Some DSDTs contain wrong GPE setting.
3515 + * Asus FX502VD/VE, GL702VMK, X550VXK, X580VD
3516 +@@ -1859,6 +1873,22 @@ static const struct dmi_system_id ec_dmi_table[] __initconst = {
3517 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
3518 + DMI_MATCH(DMI_PRODUCT_NAME, "GL702VMK"),}, NULL},
3519 + {
3520 ++ ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X505BA", {
3521 ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
3522 ++ DMI_MATCH(DMI_PRODUCT_NAME, "X505BA"),}, NULL},
3523 ++ {
3524 ++ ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X505BP", {
3525 ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
3526 ++ DMI_MATCH(DMI_PRODUCT_NAME, "X505BP"),}, NULL},
3527 ++ {
3528 ++ ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X542BA", {
3529 ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
3530 ++ DMI_MATCH(DMI_PRODUCT_NAME, "X542BA"),}, NULL},
3531 ++ {
3532 ++ ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X542BP", {
3533 ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
3534 ++ DMI_MATCH(DMI_PRODUCT_NAME, "X542BP"),}, NULL},
3535 ++ {
3536 + ec_honor_ecdt_gpe, "ASUS X550VXK", {
3537 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
3538 + DMI_MATCH(DMI_PRODUCT_NAME, "X550VXK"),}, NULL},
3539 +@@ -1867,6 +1897,11 @@ static const struct dmi_system_id ec_dmi_table[] __initconst = {
3540 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
3541 + DMI_MATCH(DMI_PRODUCT_NAME, "X580VD"),}, NULL},
3542 + {
3543 ++ /* https://bugzilla.kernel.org/show_bug.cgi?id=209989 */
3544 ++ ec_honor_dsdt_gpe, "HP Pavilion Gaming Laptop 15-cx0xxx", {
3545 ++ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
3546 ++ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-cx0xxx"),}, NULL},
3547 ++ {
3548 + ec_clear_on_resume, "Samsung hardware", {
3549 + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
3550 + {},
3551 +diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
3552 +index 66c3983f0ccca..5cd0ceb50bc8a 100644
3553 +--- a/drivers/acpi/fan.c
3554 ++++ b/drivers/acpi/fan.c
3555 +@@ -16,6 +16,8 @@
3556 + #include <linux/platform_device.h>
3557 + #include <linux/sort.h>
3558 +
3559 ++#include "fan.h"
3560 ++
3561 + MODULE_AUTHOR("Paul Diefenbaugh");
3562 + MODULE_DESCRIPTION("ACPI Fan Driver");
3563 + MODULE_LICENSE("GPL");
3564 +@@ -24,10 +26,7 @@ static int acpi_fan_probe(struct platform_device *pdev);
3565 + static int acpi_fan_remove(struct platform_device *pdev);
3566 +
3567 + static const struct acpi_device_id fan_device_ids[] = {
3568 +- {"PNP0C0B", 0},
3569 +- {"INT3404", 0},
3570 +- {"INTC1044", 0},
3571 +- {"INTC1048", 0},
3572 ++ ACPI_FAN_DEVICE_IDS,
3573 + {"", 0},
3574 + };
3575 + MODULE_DEVICE_TABLE(acpi, fan_device_ids);
3576 +diff --git a/drivers/acpi/fan.h b/drivers/acpi/fan.h
3577 +new file mode 100644
3578 +index 0000000000000..dc9a6efa514b0
3579 +--- /dev/null
3580 ++++ b/drivers/acpi/fan.h
3581 +@@ -0,0 +1,13 @@
3582 ++/* SPDX-License-Identifier: GPL-2.0-only */
3583 ++
3584 ++/*
3585 ++ * ACPI fan device IDs are shared between the fan driver and the device power
3586 ++ * management code.
3587 ++ *
3588 ++ * Add new device IDs before the generic ACPI fan one.
3589 ++ */
3590 ++#define ACPI_FAN_DEVICE_IDS \
3591 ++ {"INT3404", }, /* Fan */ \
3592 ++ {"INTC1044", }, /* Fan for Tiger Lake generation */ \
3593 ++ {"INTC1048", }, /* Fan for Alder Lake generation */ \
3594 ++ {"PNP0C0B", } /* Generic ACPI fan */
3595 +diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
3596 +index 8048da85b7e07..61115ed8b93fb 100644
3597 +--- a/drivers/acpi/power.c
3598 ++++ b/drivers/acpi/power.c
3599 +@@ -886,15 +886,16 @@ static void acpi_release_power_resource(struct device *dev)
3600 + kfree(resource);
3601 + }
3602 +
3603 +-static ssize_t acpi_power_in_use_show(struct device *dev,
3604 +- struct device_attribute *attr,
3605 +- char *buf) {
3606 ++static ssize_t resource_in_use_show(struct device *dev,
3607 ++ struct device_attribute *attr,
3608 ++ char *buf)
3609 ++{
3610 + struct acpi_power_resource *resource;
3611 +
3612 + resource = to_power_resource(to_acpi_device(dev));
3613 + return sprintf(buf, "%u\n", !!resource->ref_count);
3614 + }
3615 +-static DEVICE_ATTR(resource_in_use, 0444, acpi_power_in_use_show, NULL);
3616 ++static DEVICE_ATTR_RO(resource_in_use);
3617 +
3618 + static void acpi_power_sysfs_remove(struct acpi_device *device)
3619 + {
3620 +diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
3621 +index fb161a21d0aec..8377c3ed10ffa 100644
3622 +--- a/drivers/acpi/processor_idle.c
3623 ++++ b/drivers/acpi/processor_idle.c
3624 +@@ -16,6 +16,7 @@
3625 + #include <linux/acpi.h>
3626 + #include <linux/dmi.h>
3627 + #include <linux/sched.h> /* need_resched() */
3628 ++#include <linux/sort.h>
3629 + #include <linux/tick.h>
3630 + #include <linux/cpuidle.h>
3631 + #include <linux/cpu.h>
3632 +@@ -389,10 +390,37 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
3633 + return;
3634 + }
3635 +
3636 ++static int acpi_cst_latency_cmp(const void *a, const void *b)
3637 ++{
3638 ++ const struct acpi_processor_cx *x = a, *y = b;
3639 ++
3640 ++ if (!(x->valid && y->valid))
3641 ++ return 0;
3642 ++ if (x->latency > y->latency)
3643 ++ return 1;
3644 ++ if (x->latency < y->latency)
3645 ++ return -1;
3646 ++ return 0;
3647 ++}
3648 ++static void acpi_cst_latency_swap(void *a, void *b, int n)
3649 ++{
3650 ++ struct acpi_processor_cx *x = a, *y = b;
3651 ++ u32 tmp;
3652 ++
3653 ++ if (!(x->valid && y->valid))
3654 ++ return;
3655 ++ tmp = x->latency;
3656 ++ x->latency = y->latency;
3657 ++ y->latency = tmp;
3658 ++}
3659 ++
3660 + static int acpi_processor_power_verify(struct acpi_processor *pr)
3661 + {
3662 + unsigned int i;
3663 + unsigned int working = 0;
3664 ++ unsigned int last_latency = 0;
3665 ++ unsigned int last_type = 0;
3666 ++ bool buggy_latency = false;
3667 +
3668 + pr->power.timer_broadcast_on_state = INT_MAX;
3669 +
3670 +@@ -416,12 +444,24 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
3671 + }
3672 + if (!cx->valid)
3673 + continue;
3674 ++ if (cx->type >= last_type && cx->latency < last_latency)
3675 ++ buggy_latency = true;
3676 ++ last_latency = cx->latency;
3677 ++ last_type = cx->type;
3678 +
3679 + lapic_timer_check_state(i, pr, cx);
3680 + tsc_check_state(cx->type);
3681 + working++;
3682 + }
3683 +
3684 ++ if (buggy_latency) {
3685 ++ pr_notice("FW issue: working around C-state latencies out of order\n");
3686 ++ sort(&pr->power.states[1], max_cstate,
3687 ++ sizeof(struct acpi_processor_cx),
3688 ++ acpi_cst_latency_cmp,
3689 ++ acpi_cst_latency_swap);
3690 ++ }
3691 ++
3692 + lapic_timer_propagate_broadcast(pr);
3693 +
3694 + return (working);
3695 +diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
3696 +index f2f5f1dc7c61d..9d82440a1d75b 100644
3697 +--- a/drivers/acpi/resource.c
3698 ++++ b/drivers/acpi/resource.c
3699 +@@ -430,6 +430,13 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
3700 + }
3701 + }
3702 +
3703 ++static bool irq_is_legacy(struct acpi_resource_irq *irq)
3704 ++{
3705 ++ return irq->triggering == ACPI_EDGE_SENSITIVE &&
3706 ++ irq->polarity == ACPI_ACTIVE_HIGH &&
3707 ++ irq->shareable == ACPI_EXCLUSIVE;
3708 ++}
3709 ++
3710 + /**
3711 + * acpi_dev_resource_interrupt - Extract ACPI interrupt resource information.
3712 + * @ares: Input ACPI resource object.
3713 +@@ -468,7 +475,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
3714 + }
3715 + acpi_dev_get_irqresource(res, irq->interrupts[index],
3716 + irq->triggering, irq->polarity,
3717 +- irq->shareable, true);
3718 ++ irq->shareable, irq_is_legacy(irq));
3719 + break;
3720 + case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
3721 + ext_irq = &ares->data.extended_irq;
3722 +diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
3723 +index 83cd4c95faf0d..33474fd969913 100644
3724 +--- a/drivers/acpi/video_detect.c
3725 ++++ b/drivers/acpi/video_detect.c
3726 +@@ -385,6 +385,30 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
3727 + DMI_MATCH(DMI_BOARD_NAME, "BA51_MV"),
3728 + },
3729 + },
3730 ++ {
3731 ++ .callback = video_detect_force_native,
3732 ++ .ident = "ASUSTeK COMPUTER INC. GA401",
3733 ++ .matches = {
3734 ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
3735 ++ DMI_MATCH(DMI_PRODUCT_NAME, "GA401"),
3736 ++ },
3737 ++ },
3738 ++ {
3739 ++ .callback = video_detect_force_native,
3740 ++ .ident = "ASUSTeK COMPUTER INC. GA502",
3741 ++ .matches = {
3742 ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
3743 ++ DMI_MATCH(DMI_PRODUCT_NAME, "GA502"),
3744 ++ },
3745 ++ },
3746 ++ {
3747 ++ .callback = video_detect_force_native,
3748 ++ .ident = "ASUSTeK COMPUTER INC. GA503",
3749 ++ .matches = {
3750 ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
3751 ++ DMI_MATCH(DMI_PRODUCT_NAME, "GA503"),
3752 ++ },
3753 ++ },
3754 +
3755 + /*
3756 + * Desktops which falsely report a backlight and which our heuristics
3757 +diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
3758 +index badab67088935..46208ececbb6a 100644
3759 +--- a/drivers/ata/pata_ep93xx.c
3760 ++++ b/drivers/ata/pata_ep93xx.c
3761 +@@ -928,7 +928,7 @@ static int ep93xx_pata_probe(struct platform_device *pdev)
3762 + /* INT[3] (IRQ_EP93XX_EXT3) line connected as pull down */
3763 + irq = platform_get_irq(pdev, 0);
3764 + if (irq < 0) {
3765 +- err = -ENXIO;
3766 ++ err = irq;
3767 + goto err_rel_gpio;
3768 + }
3769 +
3770 +diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
3771 +index bd87476ab4813..b5a3f710d76de 100644
3772 +--- a/drivers/ata/pata_octeon_cf.c
3773 ++++ b/drivers/ata/pata_octeon_cf.c
3774 +@@ -898,10 +898,11 @@ static int octeon_cf_probe(struct platform_device *pdev)
3775 + return -EINVAL;
3776 + }
3777 +
3778 +- irq_handler = octeon_cf_interrupt;
3779 + i = platform_get_irq(dma_dev, 0);
3780 +- if (i > 0)
3781 ++ if (i > 0) {
3782 + irq = i;
3783 ++ irq_handler = octeon_cf_interrupt;
3784 ++ }
3785 + }
3786 + of_node_put(dma_node);
3787 + }
3788 +diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
3789 +index 479c4b29b8562..303f8c375b3af 100644
3790 +--- a/drivers/ata/pata_rb532_cf.c
3791 ++++ b/drivers/ata/pata_rb532_cf.c
3792 +@@ -115,10 +115,12 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
3793 + }
3794 +
3795 + irq = platform_get_irq(pdev, 0);
3796 +- if (irq <= 0) {
3797 ++ if (irq < 0) {
3798 + dev_err(&pdev->dev, "no IRQ resource found\n");
3799 +- return -ENOENT;
3800 ++ return irq;
3801 + }
3802 ++ if (!irq)
3803 ++ return -EINVAL;
3804 +
3805 + gpiod = devm_gpiod_get(&pdev->dev, NULL, GPIOD_IN);
3806 + if (IS_ERR(gpiod)) {
3807 +diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
3808 +index 64b2ef15ec191..8440203e835ed 100644
3809 +--- a/drivers/ata/sata_highbank.c
3810 ++++ b/drivers/ata/sata_highbank.c
3811 +@@ -469,10 +469,12 @@ static int ahci_highbank_probe(struct platform_device *pdev)
3812 + }
3813 +
3814 + irq = platform_get_irq(pdev, 0);
3815 +- if (irq <= 0) {
3816 ++ if (irq < 0) {
3817 + dev_err(dev, "no irq\n");
3818 +- return -EINVAL;
3819 ++ return irq;
3820 + }
3821 ++ if (!irq)
3822 ++ return -EINVAL;
3823 +
3824 + hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
3825 + if (!hpriv) {
3826 +diff --git a/drivers/block/loop.c b/drivers/block/loop.c
3827 +index a58084c2ed7ce..06d44ae9701f1 100644
3828 +--- a/drivers/block/loop.c
3829 ++++ b/drivers/block/loop.c
3830 +@@ -1161,6 +1161,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
3831 + blk_queue_physical_block_size(lo->lo_queue, bsize);
3832 + blk_queue_io_min(lo->lo_queue, bsize);
3833 +
3834 ++ loop_config_discard(lo);
3835 + loop_update_rotational(lo);
3836 + loop_update_dio(lo);
3837 + loop_sysfs_init(lo);
3838 +diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
3839 +index ce9dcffdc5bfd..7551cac3fd7a9 100644
3840 +--- a/drivers/bluetooth/btqca.c
3841 ++++ b/drivers/bluetooth/btqca.c
3842 +@@ -143,7 +143,7 @@ int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
3843 + EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd);
3844 +
3845 + static void qca_tlv_check_data(struct qca_fw_config *config,
3846 +- const struct firmware *fw, enum qca_btsoc_type soc_type)
3847 ++ u8 *fw_data, enum qca_btsoc_type soc_type)
3848 + {
3849 + const u8 *data;
3850 + u32 type_len;
3851 +@@ -154,7 +154,7 @@ static void qca_tlv_check_data(struct qca_fw_config *config,
3852 + struct tlv_type_nvm *tlv_nvm;
3853 + uint8_t nvm_baud_rate = config->user_baud_rate;
3854 +
3855 +- tlv = (struct tlv_type_hdr *)fw->data;
3856 ++ tlv = (struct tlv_type_hdr *)fw_data;
3857 +
3858 + type_len = le32_to_cpu(tlv->type_len);
3859 + length = (type_len >> 8) & 0x00ffffff;
3860 +@@ -350,8 +350,9 @@ static int qca_download_firmware(struct hci_dev *hdev,
3861 + enum qca_btsoc_type soc_type)
3862 + {
3863 + const struct firmware *fw;
3864 ++ u8 *data;
3865 + const u8 *segment;
3866 +- int ret, remain, i = 0;
3867 ++ int ret, size, remain, i = 0;
3868 +
3869 + bt_dev_info(hdev, "QCA Downloading %s", config->fwname);
3870 +
3871 +@@ -362,10 +363,22 @@ static int qca_download_firmware(struct hci_dev *hdev,
3872 + return ret;
3873 + }
3874 +
3875 +- qca_tlv_check_data(config, fw, soc_type);
3876 ++ size = fw->size;
3877 ++ data = vmalloc(fw->size);
3878 ++ if (!data) {
3879 ++ bt_dev_err(hdev, "QCA Failed to allocate memory for file: %s",
3880 ++ config->fwname);
3881 ++ release_firmware(fw);
3882 ++ return -ENOMEM;
3883 ++ }
3884 ++
3885 ++ memcpy(data, fw->data, size);
3886 ++ release_firmware(fw);
3887 ++
3888 ++ qca_tlv_check_data(config, data, soc_type);
3889 +
3890 +- segment = fw->data;
3891 +- remain = fw->size;
3892 ++ segment = data;
3893 ++ remain = size;
3894 + while (remain > 0) {
3895 + int segsize = min(MAX_SIZE_PER_TLV_SEGMENT, remain);
3896 +
3897 +@@ -395,7 +408,7 @@ static int qca_download_firmware(struct hci_dev *hdev,
3898 + ret = qca_inject_cmd_complete_event(hdev);
3899 +
3900 + out:
3901 +- release_firmware(fw);
3902 ++ vfree(data);
3903 +
3904 + return ret;
3905 + }
3906 +diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
3907 +index ad47ff0d55c2e..4184faef9f169 100644
3908 +--- a/drivers/bluetooth/hci_qca.c
3909 ++++ b/drivers/bluetooth/hci_qca.c
3910 +@@ -1809,8 +1809,6 @@ static void qca_power_shutdown(struct hci_uart *hu)
3911 + unsigned long flags;
3912 + enum qca_btsoc_type soc_type = qca_soc_type(hu);
3913 +
3914 +- qcadev = serdev_device_get_drvdata(hu->serdev);
3915 +-
3916 + /* From this point we go into power off state. But serial port is
3917 + * still open, stop queueing the IBS data and flush all the buffered
3918 + * data in skb's.
3919 +@@ -1826,6 +1824,8 @@ static void qca_power_shutdown(struct hci_uart *hu)
3920 + if (!hu->serdev)
3921 + return;
3922 +
3923 ++ qcadev = serdev_device_get_drvdata(hu->serdev);
3924 ++
3925 + if (qca_is_wcn399x(soc_type)) {
3926 + host_set_baudrate(hu, 2400);
3927 + qca_send_power_pulse(hu, false);
3928 +diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c
3929 +index aeb895c084607..044dcdd723a70 100644
3930 +--- a/drivers/bus/mhi/core/pm.c
3931 ++++ b/drivers/bus/mhi/core/pm.c
3932 +@@ -809,6 +809,7 @@ int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
3933 +
3934 + ret = wait_event_timeout(mhi_cntrl->state_event,
3935 + mhi_cntrl->dev_state == MHI_STATE_M0 ||
3936 ++ mhi_cntrl->dev_state == MHI_STATE_M2 ||
3937 + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
3938 + msecs_to_jiffies(mhi_cntrl->timeout_ms));
3939 +
3940 +diff --git a/drivers/char/hw_random/exynos-trng.c b/drivers/char/hw_random/exynos-trng.c
3941 +index 8e1fe3f8dd2df..c8db62bc5ff72 100644
3942 +--- a/drivers/char/hw_random/exynos-trng.c
3943 ++++ b/drivers/char/hw_random/exynos-trng.c
3944 +@@ -132,7 +132,7 @@ static int exynos_trng_probe(struct platform_device *pdev)
3945 + return PTR_ERR(trng->mem);
3946 +
3947 + pm_runtime_enable(&pdev->dev);
3948 +- ret = pm_runtime_get_sync(&pdev->dev);
3949 ++ ret = pm_runtime_resume_and_get(&pdev->dev);
3950 + if (ret < 0) {
3951 + dev_err(&pdev->dev, "Could not get runtime PM.\n");
3952 + goto err_pm_get;
3953 +@@ -165,7 +165,7 @@ err_register:
3954 + clk_disable_unprepare(trng->clk);
3955 +
3956 + err_clock:
3957 +- pm_runtime_put_sync(&pdev->dev);
3958 ++ pm_runtime_put_noidle(&pdev->dev);
3959 +
3960 + err_pm_get:
3961 + pm_runtime_disable(&pdev->dev);
3962 +diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
3963 +index 89681f07bc787..9468e9520cee0 100644
3964 +--- a/drivers/char/pcmcia/cm4000_cs.c
3965 ++++ b/drivers/char/pcmcia/cm4000_cs.c
3966 +@@ -544,6 +544,10 @@ static int set_protocol(struct cm4000_dev *dev, struct ptsreq *ptsreq)
3967 + io_read_num_rec_bytes(iobase, &num_bytes_read);
3968 + if (num_bytes_read >= 4) {
3969 + DEBUGP(2, dev, "NumRecBytes = %i\n", num_bytes_read);
3970 ++ if (num_bytes_read > 4) {
3971 ++ rc = -EIO;
3972 ++ goto exit_setprotocol;
3973 ++ }
3974 + break;
3975 + }
3976 + usleep_range(10000, 11000);
3977 +diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
3978 +index 55b9d3965ae1b..69579efb247b3 100644
3979 +--- a/drivers/char/tpm/tpm_tis_core.c
3980 ++++ b/drivers/char/tpm/tpm_tis_core.c
3981 +@@ -196,13 +196,24 @@ static u8 tpm_tis_status(struct tpm_chip *chip)
3982 + return 0;
3983 +
3984 + if (unlikely((status & TPM_STS_READ_ZERO) != 0)) {
3985 +- /*
3986 +- * If this trips, the chances are the read is
3987 +- * returning 0xff because the locality hasn't been
3988 +- * acquired. Usually because tpm_try_get_ops() hasn't
3989 +- * been called before doing a TPM operation.
3990 +- */
3991 +- WARN_ONCE(1, "TPM returned invalid status\n");
3992 ++ if (!test_and_set_bit(TPM_TIS_INVALID_STATUS, &priv->flags)) {
3993 ++ /*
3994 ++ * If this trips, the chances are the read is
3995 ++ * returning 0xff because the locality hasn't been
3996 ++ * acquired. Usually because tpm_try_get_ops() hasn't
3997 ++ * been called before doing a TPM operation.
3998 ++ */
3999 ++ dev_err(&chip->dev, "invalid TPM_STS.x 0x%02x, dumping stack for forensics\n",
4000 ++ status);
4001 ++
4002 ++ /*
4003 ++ * Dump stack for forensics, as invalid TPM_STS.x could be
4004 ++ * potentially triggered by impaired tpm_try_get_ops() or
4005 ++ * tpm_find_get_ops().
4006 ++ */
4007 ++ dump_stack();
4008 ++ }
4009 ++
4010 + return 0;
4011 + }
4012 +
4013 +diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
4014 +index 9b2d32a59f670..b2a3c6c72882d 100644
4015 +--- a/drivers/char/tpm/tpm_tis_core.h
4016 ++++ b/drivers/char/tpm/tpm_tis_core.h
4017 +@@ -83,6 +83,7 @@ enum tis_defaults {
4018 +
4019 + enum tpm_tis_flags {
4020 + TPM_TIS_ITPM_WORKAROUND = BIT(0),
4021 ++ TPM_TIS_INVALID_STATUS = BIT(1),
4022 + };
4023 +
4024 + struct tpm_tis_data {
4025 +@@ -90,7 +91,7 @@ struct tpm_tis_data {
4026 + int locality;
4027 + int irq;
4028 + bool irq_tested;
4029 +- unsigned int flags;
4030 ++ unsigned long flags;
4031 + void __iomem *ilb_base_addr;
4032 + u16 clkrun_enabled;
4033 + wait_queue_head_t int_queue;
4034 +diff --git a/drivers/char/tpm/tpm_tis_spi_main.c b/drivers/char/tpm/tpm_tis_spi_main.c
4035 +index 3856f6ebcb34f..de4209003a448 100644
4036 +--- a/drivers/char/tpm/tpm_tis_spi_main.c
4037 ++++ b/drivers/char/tpm/tpm_tis_spi_main.c
4038 +@@ -260,6 +260,8 @@ static int tpm_tis_spi_remove(struct spi_device *dev)
4039 + }
4040 +
4041 + static const struct spi_device_id tpm_tis_spi_id[] = {
4042 ++ { "st33htpm-spi", (unsigned long)tpm_tis_spi_probe },
4043 ++ { "slb9670", (unsigned long)tpm_tis_spi_probe },
4044 + { "tpm_tis_spi", (unsigned long)tpm_tis_spi_probe },
4045 + { "cr50", (unsigned long)cr50_spi_probe },
4046 + {}
4047 +diff --git a/drivers/clk/actions/owl-s500.c b/drivers/clk/actions/owl-s500.c
4048 +index 61bb224f63309..cbeb51c804eb5 100644
4049 +--- a/drivers/clk/actions/owl-s500.c
4050 ++++ b/drivers/clk/actions/owl-s500.c
4051 +@@ -127,8 +127,7 @@ static struct clk_factor_table sd_factor_table[] = {
4052 + { 12, 1, 13 }, { 13, 1, 14 }, { 14, 1, 15 }, { 15, 1, 16 },
4053 + { 16, 1, 17 }, { 17, 1, 18 }, { 18, 1, 19 }, { 19, 1, 20 },
4054 + { 20, 1, 21 }, { 21, 1, 22 }, { 22, 1, 23 }, { 23, 1, 24 },
4055 +- { 24, 1, 25 }, { 25, 1, 26 }, { 26, 1, 27 }, { 27, 1, 28 },
4056 +- { 28, 1, 29 }, { 29, 1, 30 }, { 30, 1, 31 }, { 31, 1, 32 },
4057 ++ { 24, 1, 25 },
4058 +
4059 + /* bit8: /128 */
4060 + { 256, 1, 1 * 128 }, { 257, 1, 2 * 128 }, { 258, 1, 3 * 128 }, { 259, 1, 4 * 128 },
4061 +@@ -137,19 +136,20 @@ static struct clk_factor_table sd_factor_table[] = {
4062 + { 268, 1, 13 * 128 }, { 269, 1, 14 * 128 }, { 270, 1, 15 * 128 }, { 271, 1, 16 * 128 },
4063 + { 272, 1, 17 * 128 }, { 273, 1, 18 * 128 }, { 274, 1, 19 * 128 }, { 275, 1, 20 * 128 },
4064 + { 276, 1, 21 * 128 }, { 277, 1, 22 * 128 }, { 278, 1, 23 * 128 }, { 279, 1, 24 * 128 },
4065 +- { 280, 1, 25 * 128 }, { 281, 1, 26 * 128 }, { 282, 1, 27 * 128 }, { 283, 1, 28 * 128 },
4066 +- { 284, 1, 29 * 128 }, { 285, 1, 30 * 128 }, { 286, 1, 31 * 128 }, { 287, 1, 32 * 128 },
4067 ++ { 280, 1, 25 * 128 },
4068 + { 0, 0, 0 },
4069 + };
4070 +
4071 +-static struct clk_factor_table bisp_factor_table[] = {
4072 +- { 0, 1, 1 }, { 1, 1, 2 }, { 2, 1, 3 }, { 3, 1, 4 },
4073 +- { 4, 1, 5 }, { 5, 1, 6 }, { 6, 1, 7 }, { 7, 1, 8 },
4074 ++static struct clk_factor_table de_factor_table[] = {
4075 ++ { 0, 1, 1 }, { 1, 2, 3 }, { 2, 1, 2 }, { 3, 2, 5 },
4076 ++ { 4, 1, 3 }, { 5, 1, 4 }, { 6, 1, 6 }, { 7, 1, 8 },
4077 ++ { 8, 1, 12 },
4078 + { 0, 0, 0 },
4079 + };
4080 +
4081 +-static struct clk_factor_table ahb_factor_table[] = {
4082 +- { 1, 1, 2 }, { 2, 1, 3 },
4083 ++static struct clk_factor_table hde_factor_table[] = {
4084 ++ { 0, 1, 1 }, { 1, 2, 3 }, { 2, 1, 2 }, { 3, 2, 5 },
4085 ++ { 4, 1, 3 }, { 5, 1, 4 }, { 6, 1, 6 }, { 7, 1, 8 },
4086 + { 0, 0, 0 },
4087 + };
4088 +
4089 +@@ -158,6 +158,13 @@ static struct clk_div_table rmii_ref_div_table[] = {
4090 + { 0, 0 },
4091 + };
4092 +
4093 ++static struct clk_div_table std12rate_div_table[] = {
4094 ++ { 0, 1 }, { 1, 2 }, { 2, 3 }, { 3, 4 },
4095 ++ { 4, 5 }, { 5, 6 }, { 6, 7 }, { 7, 8 },
4096 ++ { 8, 9 }, { 9, 10 }, { 10, 11 }, { 11, 12 },
4097 ++ { 0, 0 },
4098 ++};
4099 ++
4100 + static struct clk_div_table i2s_div_table[] = {
4101 + { 0, 1 }, { 1, 2 }, { 2, 3 }, { 3, 4 },
4102 + { 4, 6 }, { 5, 8 }, { 6, 12 }, { 7, 16 },
4103 +@@ -174,7 +181,6 @@ static struct clk_div_table nand_div_table[] = {
4104 +
4105 + /* mux clock */
4106 + static OWL_MUX(dev_clk, "dev_clk", dev_clk_mux_p, CMU_DEVPLL, 12, 1, CLK_SET_RATE_PARENT);
4107 +-static OWL_MUX(ahbprediv_clk, "ahbprediv_clk", ahbprediv_clk_mux_p, CMU_BUSCLK1, 8, 3, CLK_SET_RATE_PARENT);
4108 +
4109 + /* gate clocks */
4110 + static OWL_GATE(gpio_clk, "gpio_clk", "apb_clk", CMU_DEVCLKEN0, 18, 0, 0);
4111 +@@ -187,45 +193,54 @@ static OWL_GATE(timer_clk, "timer_clk", "hosc", CMU_DEVCLKEN1, 27, 0, 0);
4112 + static OWL_GATE(hdmi_clk, "hdmi_clk", "hosc", CMU_DEVCLKEN1, 3, 0, 0);
4113 +
4114 + /* divider clocks */
4115 +-static OWL_DIVIDER(h_clk, "h_clk", "ahbprediv_clk", CMU_BUSCLK1, 12, 2, NULL, 0, 0);
4116 ++static OWL_DIVIDER(h_clk, "h_clk", "ahbprediv_clk", CMU_BUSCLK1, 2, 2, NULL, 0, 0);
4117 + static OWL_DIVIDER(apb_clk, "apb_clk", "ahb_clk", CMU_BUSCLK1, 14, 2, NULL, 0, 0);
4118 + static OWL_DIVIDER(rmii_ref_clk, "rmii_ref_clk", "ethernet_pll_clk", CMU_ETHERNETPLL, 1, 1, rmii_ref_div_table, 0, 0);
4119 +
4120 + /* factor clocks */
4121 +-static OWL_FACTOR(ahb_clk, "ahb_clk", "h_clk", CMU_BUSCLK1, 2, 2, ahb_factor_table, 0, 0);
4122 +-static OWL_FACTOR(de1_clk, "de_clk1", "de_clk", CMU_DECLK, 0, 3, bisp_factor_table, 0, 0);
4123 +-static OWL_FACTOR(de2_clk, "de_clk2", "de_clk", CMU_DECLK, 4, 3, bisp_factor_table, 0, 0);
4124 ++static OWL_FACTOR(de1_clk, "de_clk1", "de_clk", CMU_DECLK, 0, 4, de_factor_table, 0, 0);
4125 ++static OWL_FACTOR(de2_clk, "de_clk2", "de_clk", CMU_DECLK, 4, 4, de_factor_table, 0, 0);
4126 +
4127 + /* composite clocks */
4128 ++static OWL_COMP_DIV(ahbprediv_clk, "ahbprediv_clk", ahbprediv_clk_mux_p,
4129 ++ OWL_MUX_HW(CMU_BUSCLK1, 8, 3),
4130 ++ { 0 },
4131 ++ OWL_DIVIDER_HW(CMU_BUSCLK1, 12, 2, 0, NULL),
4132 ++ CLK_SET_RATE_PARENT);
4133 ++
4134 ++static OWL_COMP_FIXED_FACTOR(ahb_clk, "ahb_clk", "h_clk",
4135 ++ { 0 },
4136 ++ 1, 1, 0);
4137 ++
4138 + static OWL_COMP_FACTOR(vce_clk, "vce_clk", hde_clk_mux_p,
4139 + OWL_MUX_HW(CMU_VCECLK, 4, 2),
4140 + OWL_GATE_HW(CMU_DEVCLKEN0, 26, 0),
4141 +- OWL_FACTOR_HW(CMU_VCECLK, 0, 3, 0, bisp_factor_table),
4142 ++ OWL_FACTOR_HW(CMU_VCECLK, 0, 3, 0, hde_factor_table),
4143 + 0);
4144 +
4145 + static OWL_COMP_FACTOR(vde_clk, "vde_clk", hde_clk_mux_p,
4146 + OWL_MUX_HW(CMU_VDECLK, 4, 2),
4147 + OWL_GATE_HW(CMU_DEVCLKEN0, 25, 0),
4148 +- OWL_FACTOR_HW(CMU_VDECLK, 0, 3, 0, bisp_factor_table),
4149 ++ OWL_FACTOR_HW(CMU_VDECLK, 0, 3, 0, hde_factor_table),
4150 + 0);
4151 +
4152 +-static OWL_COMP_FACTOR(bisp_clk, "bisp_clk", bisp_clk_mux_p,
4153 ++static OWL_COMP_DIV(bisp_clk, "bisp_clk", bisp_clk_mux_p,
4154 + OWL_MUX_HW(CMU_BISPCLK, 4, 1),
4155 + OWL_GATE_HW(CMU_DEVCLKEN0, 14, 0),
4156 +- OWL_FACTOR_HW(CMU_BISPCLK, 0, 3, 0, bisp_factor_table),
4157 ++ OWL_DIVIDER_HW(CMU_BISPCLK, 0, 4, 0, std12rate_div_table),
4158 + 0);
4159 +
4160 +-static OWL_COMP_FACTOR(sensor0_clk, "sensor0_clk", sensor_clk_mux_p,
4161 ++static OWL_COMP_DIV(sensor0_clk, "sensor0_clk", sensor_clk_mux_p,
4162 + OWL_MUX_HW(CMU_SENSORCLK, 4, 1),
4163 + OWL_GATE_HW(CMU_DEVCLKEN0, 14, 0),
4164 +- OWL_FACTOR_HW(CMU_SENSORCLK, 0, 3, 0, bisp_factor_table),
4165 +- CLK_IGNORE_UNUSED);
4166 ++ OWL_DIVIDER_HW(CMU_SENSORCLK, 0, 4, 0, std12rate_div_table),
4167 ++ 0);
4168 +
4169 +-static OWL_COMP_FACTOR(sensor1_clk, "sensor1_clk", sensor_clk_mux_p,
4170 ++static OWL_COMP_DIV(sensor1_clk, "sensor1_clk", sensor_clk_mux_p,
4171 + OWL_MUX_HW(CMU_SENSORCLK, 4, 1),
4172 + OWL_GATE_HW(CMU_DEVCLKEN0, 14, 0),
4173 +- OWL_FACTOR_HW(CMU_SENSORCLK, 8, 3, 0, bisp_factor_table),
4174 +- CLK_IGNORE_UNUSED);
4175 ++ OWL_DIVIDER_HW(CMU_SENSORCLK, 8, 4, 0, std12rate_div_table),
4176 ++ 0);
4177 +
4178 + static OWL_COMP_FACTOR(sd0_clk, "sd0_clk", sd_clk_mux_p,
4179 + OWL_MUX_HW(CMU_SD0CLK, 9, 1),
4180 +@@ -305,7 +320,7 @@ static OWL_COMP_FIXED_FACTOR(i2c3_clk, "i2c3_clk", "ethernet_pll_clk",
4181 + static OWL_COMP_DIV(uart0_clk, "uart0_clk", uart_clk_mux_p,
4182 + OWL_MUX_HW(CMU_UART0CLK, 16, 1),
4183 + OWL_GATE_HW(CMU_DEVCLKEN1, 6, 0),
4184 +- OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
4185 ++ OWL_DIVIDER_HW(CMU_UART0CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
4186 + CLK_IGNORE_UNUSED);
4187 +
4188 + static OWL_COMP_DIV(uart1_clk, "uart1_clk", uart_clk_mux_p,
4189 +@@ -317,31 +332,31 @@ static OWL_COMP_DIV(uart1_clk, "uart1_clk", uart_clk_mux_p,
4190 + static OWL_COMP_DIV(uart2_clk, "uart2_clk", uart_clk_mux_p,
4191 + OWL_MUX_HW(CMU_UART2CLK, 16, 1),
4192 + OWL_GATE_HW(CMU_DEVCLKEN1, 8, 0),
4193 +- OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
4194 ++ OWL_DIVIDER_HW(CMU_UART2CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
4195 + CLK_IGNORE_UNUSED);
4196 +
4197 + static OWL_COMP_DIV(uart3_clk, "uart3_clk", uart_clk_mux_p,
4198 + OWL_MUX_HW(CMU_UART3CLK, 16, 1),
4199 + OWL_GATE_HW(CMU_DEVCLKEN1, 19, 0),
4200 +- OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
4201 ++ OWL_DIVIDER_HW(CMU_UART3CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
4202 + CLK_IGNORE_UNUSED);
4203 +
4204 + static OWL_COMP_DIV(uart4_clk, "uart4_clk", uart_clk_mux_p,
4205 + OWL_MUX_HW(CMU_UART4CLK, 16, 1),
4206 + OWL_GATE_HW(CMU_DEVCLKEN1, 20, 0),
4207 +- OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
4208 ++ OWL_DIVIDER_HW(CMU_UART4CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
4209 + CLK_IGNORE_UNUSED);
4210 +
4211 + static OWL_COMP_DIV(uart5_clk, "uart5_clk", uart_clk_mux_p,
4212 + OWL_MUX_HW(CMU_UART5CLK, 16, 1),
4213 + OWL_GATE_HW(CMU_DEVCLKEN1, 21, 0),
4214 +- OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
4215 ++ OWL_DIVIDER_HW(CMU_UART5CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
4216 + CLK_IGNORE_UNUSED);
4217 +
4218 + static OWL_COMP_DIV(uart6_clk, "uart6_clk", uart_clk_mux_p,
4219 + OWL_MUX_HW(CMU_UART6CLK, 16, 1),
4220 + OWL_GATE_HW(CMU_DEVCLKEN1, 18, 0),
4221 +- OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
4222 ++ OWL_DIVIDER_HW(CMU_UART6CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
4223 + CLK_IGNORE_UNUSED);
4224 +
4225 + static OWL_COMP_DIV(i2srx_clk, "i2srx_clk", i2s_clk_mux_p,
4226 +diff --git a/drivers/clk/clk-si5341.c b/drivers/clk/clk-si5341.c
4227 +index e0446e66fa645..eb22f4fdbc6b4 100644
4228 +--- a/drivers/clk/clk-si5341.c
4229 ++++ b/drivers/clk/clk-si5341.c
4230 +@@ -92,12 +92,22 @@ struct clk_si5341_output_config {
4231 + #define SI5341_PN_BASE 0x0002
4232 + #define SI5341_DEVICE_REV 0x0005
4233 + #define SI5341_STATUS 0x000C
4234 ++#define SI5341_LOS 0x000D
4235 ++#define SI5341_STATUS_STICKY 0x0011
4236 ++#define SI5341_LOS_STICKY 0x0012
4237 + #define SI5341_SOFT_RST 0x001C
4238 + #define SI5341_IN_SEL 0x0021
4239 ++#define SI5341_DEVICE_READY 0x00FE
4240 + #define SI5341_XAXB_CFG 0x090E
4241 + #define SI5341_IN_EN 0x0949
4242 + #define SI5341_INX_TO_PFD_EN 0x094A
4243 +
4244 ++/* Status bits */
4245 ++#define SI5341_STATUS_SYSINCAL BIT(0)
4246 ++#define SI5341_STATUS_LOSXAXB BIT(1)
4247 ++#define SI5341_STATUS_LOSREF BIT(2)
4248 ++#define SI5341_STATUS_LOL BIT(3)
4249 ++
4250 + /* Input selection */
4251 + #define SI5341_IN_SEL_MASK 0x06
4252 + #define SI5341_IN_SEL_SHIFT 1
4253 +@@ -340,6 +350,8 @@ static const struct si5341_reg_default si5341_reg_defaults[] = {
4254 + { 0x094A, 0x00 }, /* INx_TO_PFD_EN (disabled) */
4255 + { 0x0A02, 0x00 }, /* Not in datasheet */
4256 + { 0x0B44, 0x0F }, /* PDIV_ENB (datasheet does not mention what it is) */
4257 ++ { 0x0B57, 0x10 }, /* VCO_RESET_CALCODE (not described in datasheet) */
4258 ++ { 0x0B58, 0x05 }, /* VCO_RESET_CALCODE (not described in datasheet) */
4259 + };
4260 +
4261 + /* Read and interpret a 44-bit followed by a 32-bit value in the regmap */
4262 +@@ -623,6 +635,9 @@ static unsigned long si5341_synth_clk_recalc_rate(struct clk_hw *hw,
4263 + SI5341_SYNTH_N_NUM(synth->index), &n_num, &n_den);
4264 + if (err < 0)
4265 + return err;
4266 ++ /* Check for bogus/uninitialized settings */
4267 ++ if (!n_num || !n_den)
4268 ++ return 0;
4269 +
4270 + /*
4271 + * n_num and n_den are shifted left as much as possible, so to prevent
4272 +@@ -806,6 +821,9 @@ static long si5341_output_clk_round_rate(struct clk_hw *hw, unsigned long rate,
4273 + {
4274 + unsigned long r;
4275 +
4276 ++ if (!rate)
4277 ++ return 0;
4278 ++
4279 + r = *parent_rate >> 1;
4280 +
4281 + /* If rate is an even divisor, no changes to parent required */
4282 +@@ -834,11 +852,16 @@ static int si5341_output_clk_set_rate(struct clk_hw *hw, unsigned long rate,
4283 + unsigned long parent_rate)
4284 + {
4285 + struct clk_si5341_output *output = to_clk_si5341_output(hw);
4286 +- /* Frequency divider is (r_div + 1) * 2 */
4287 +- u32 r_div = (parent_rate / rate) >> 1;
4288 ++ u32 r_div;
4289 + int err;
4290 + u8 r[3];
4291 +
4292 ++ if (!rate)
4293 ++ return -EINVAL;
4294 ++
4295 ++ /* Frequency divider is (r_div + 1) * 2 */
4296 ++ r_div = (parent_rate / rate) >> 1;
4297 ++
4298 + if (r_div <= 1)
4299 + r_div = 0;
4300 + else if (r_div >= BIT(24))
4301 +@@ -1083,7 +1106,7 @@ static const struct si5341_reg_default si5341_preamble[] = {
4302 + { 0x0B25, 0x00 },
4303 + { 0x0502, 0x01 },
4304 + { 0x0505, 0x03 },
4305 +- { 0x0957, 0x1F },
4306 ++ { 0x0957, 0x17 },
4307 + { 0x0B4E, 0x1A },
4308 + };
4309 +
4310 +@@ -1189,6 +1212,32 @@ static const struct regmap_range_cfg si5341_regmap_ranges[] = {
4311 + },
4312 + };
4313 +
4314 ++static int si5341_wait_device_ready(struct i2c_client *client)
4315 ++{
4316 ++ int count;
4317 ++
4318 ++ /* Datasheet warns: Any attempt to read or write any register other
4319 ++ * than DEVICE_READY before DEVICE_READY reads as 0x0F may corrupt the
4320 ++ * NVM programming and may corrupt the register contents, as they are
4321 ++ * read from NVM. Note that this includes accesses to the PAGE register.
4322 ++ * Also: DEVICE_READY is available on every register page, so no page
4323 ++ * change is needed to read it.
4324 ++ * Do this outside regmap to avoid automatic PAGE register access.
4325 ++ * May take up to 300ms to complete.
4326 ++ */
4327 ++ for (count = 0; count < 15; ++count) {
4328 ++ s32 result = i2c_smbus_read_byte_data(client,
4329 ++ SI5341_DEVICE_READY);
4330 ++ if (result < 0)
4331 ++ return result;
4332 ++ if (result == 0x0F)
4333 ++ return 0;
4334 ++ msleep(20);
4335 ++ }
4336 ++ dev_err(&client->dev, "timeout waiting for DEVICE_READY\n");
4337 ++ return -EIO;
4338 ++}
4339 ++
4340 + static const struct regmap_config si5341_regmap_config = {
4341 + .reg_bits = 8,
4342 + .val_bits = 8,
4343 +@@ -1378,6 +1427,7 @@ static int si5341_probe(struct i2c_client *client,
4344 + unsigned int i;
4345 + struct clk_si5341_output_config config[SI5341_MAX_NUM_OUTPUTS];
4346 + bool initialization_required;
4347 ++ u32 status;
4348 +
4349 + data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
4350 + if (!data)
4351 +@@ -1385,6 +1435,11 @@ static int si5341_probe(struct i2c_client *client,
4352 +
4353 + data->i2c_client = client;
4354 +
4355 ++ /* Must be done before otherwise touching hardware */
4356 ++ err = si5341_wait_device_ready(client);
4357 ++ if (err)
4358 ++ return err;
4359 ++
4360 + for (i = 0; i < SI5341_NUM_INPUTS; ++i) {
4361 + input = devm_clk_get(&client->dev, si5341_input_clock_names[i]);
4362 + if (IS_ERR(input)) {
4363 +@@ -1540,6 +1595,22 @@ static int si5341_probe(struct i2c_client *client,
4364 + return err;
4365 + }
4366 +
4367 ++ /* wait for device to report input clock present and PLL lock */
4368 ++ err = regmap_read_poll_timeout(data->regmap, SI5341_STATUS, status,
4369 ++ !(status & (SI5341_STATUS_LOSREF | SI5341_STATUS_LOL)),
4370 ++ 10000, 250000);
4371 ++ if (err) {
4372 ++ dev_err(&client->dev, "Error waiting for input clock or PLL lock\n");
4373 ++ return err;
4374 ++ }
4375 ++
4376 ++ /* clear sticky alarm bits from initialization */
4377 ++ err = regmap_write(data->regmap, SI5341_STATUS_STICKY, 0);
4378 ++ if (err) {
4379 ++ dev_err(&client->dev, "unable to clear sticky status\n");
4380 ++ return err;
4381 ++ }
4382 ++
4383 + /* Free the names, clk framework makes copies */
4384 + for (i = 0; i < data->num_synth; ++i)
4385 + devm_kfree(&client->dev, (void *)synth_clock_names[i]);
4386 +diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c
4387 +index 43db67337bc06..4e741f94baf02 100644
4388 +--- a/drivers/clk/clk-versaclock5.c
4389 ++++ b/drivers/clk/clk-versaclock5.c
4390 +@@ -69,7 +69,10 @@
4391 + #define VC5_FEEDBACK_FRAC_DIV(n) (0x19 + (n))
4392 + #define VC5_RC_CONTROL0 0x1e
4393 + #define VC5_RC_CONTROL1 0x1f
4394 +-/* Register 0x20 is factory reserved */
4395 ++
4396 ++/* These registers are named "Unused Factory Reserved Registers" */
4397 ++#define VC5_RESERVED_X0(idx) (0x20 + ((idx) * 0x10))
4398 ++#define VC5_RESERVED_X0_BYPASS_SYNC BIT(7) /* bypass_sync<idx> bit */
4399 +
4400 + /* Output divider control for divider 1,2,3,4 */
4401 + #define VC5_OUT_DIV_CONTROL(idx) (0x21 + ((idx) * 0x10))
4402 +@@ -87,7 +90,6 @@
4403 + #define VC5_OUT_DIV_SKEW_INT(idx, n) (0x2b + ((idx) * 0x10) + (n))
4404 + #define VC5_OUT_DIV_INT(idx, n) (0x2d + ((idx) * 0x10) + (n))
4405 + #define VC5_OUT_DIV_SKEW_FRAC(idx) (0x2f + ((idx) * 0x10))
4406 +-/* Registers 0x30, 0x40, 0x50 are factory reserved */
4407 +
4408 + /* Clock control register for clock 1,2 */
4409 + #define VC5_CLK_OUTPUT_CFG(idx, n) (0x60 + ((idx) * 0x2) + (n))
4410 +@@ -140,6 +142,8 @@
4411 + #define VC5_HAS_INTERNAL_XTAL BIT(0)
4412 + /* chip has PFD requency doubler */
4413 + #define VC5_HAS_PFD_FREQ_DBL BIT(1)
4414 ++/* chip has bits to disable FOD sync */
4415 ++#define VC5_HAS_BYPASS_SYNC_BIT BIT(2)
4416 +
4417 + /* Supported IDT VC5 models. */
4418 + enum vc5_model {
4419 +@@ -581,6 +585,23 @@ static int vc5_clk_out_prepare(struct clk_hw *hw)
4420 + unsigned int src;
4421 + int ret;
4422 +
4423 ++ /*
4424 ++ * When enabling a FOD, all currently enabled FODs are briefly
4425 ++ * stopped in order to synchronize all of them. This causes a clock
4426 ++ * disruption to any unrelated chips that might be already using
4427 ++ * other clock outputs. Bypass the sync feature to avoid the issue,
4428 ++ * which is possible on the VersaClock 6E family via reserved
4429 ++ * registers.
4430 ++ */
4431 ++ if (vc5->chip_info->flags & VC5_HAS_BYPASS_SYNC_BIT) {
4432 ++ ret = regmap_update_bits(vc5->regmap,
4433 ++ VC5_RESERVED_X0(hwdata->num),
4434 ++ VC5_RESERVED_X0_BYPASS_SYNC,
4435 ++ VC5_RESERVED_X0_BYPASS_SYNC);
4436 ++ if (ret)
4437 ++ return ret;
4438 ++ }
4439 ++
4440 + /*
4441 + * If the input mux is disabled, enable it first and
4442 + * select source from matching FOD.
4443 +@@ -1102,7 +1123,7 @@ static const struct vc5_chip_info idt_5p49v6965_info = {
4444 + .model = IDT_VC6_5P49V6965,
4445 + .clk_fod_cnt = 4,
4446 + .clk_out_cnt = 5,
4447 +- .flags = 0,
4448 ++ .flags = VC5_HAS_BYPASS_SYNC_BIT,
4449 + };
4450 +
4451 + static const struct i2c_device_id vc5_id[] = {
4452 +diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
4453 +index 4e6c81a702214..aac6bcc65c20c 100644
4454 +--- a/drivers/clk/imx/clk-imx8mq.c
4455 ++++ b/drivers/clk/imx/clk-imx8mq.c
4456 +@@ -350,46 +350,26 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
4457 + hws[IMX8MQ_VIDEO2_PLL_OUT] = imx_clk_hw_sscg_pll("video2_pll_out", video2_pll_out_sels, ARRAY_SIZE(video2_pll_out_sels), 0, 0, 0, base + 0x54, 0);
4458 +
4459 + /* SYS PLL1 fixed output */
4460 +- hws[IMX8MQ_SYS1_PLL_40M_CG] = imx_clk_hw_gate("sys1_pll_40m_cg", "sys1_pll_out", base + 0x30, 9);
4461 +- hws[IMX8MQ_SYS1_PLL_80M_CG] = imx_clk_hw_gate("sys1_pll_80m_cg", "sys1_pll_out", base + 0x30, 11);
4462 +- hws[IMX8MQ_SYS1_PLL_100M_CG] = imx_clk_hw_gate("sys1_pll_100m_cg", "sys1_pll_out", base + 0x30, 13);
4463 +- hws[IMX8MQ_SYS1_PLL_133M_CG] = imx_clk_hw_gate("sys1_pll_133m_cg", "sys1_pll_out", base + 0x30, 15);
4464 +- hws[IMX8MQ_SYS1_PLL_160M_CG] = imx_clk_hw_gate("sys1_pll_160m_cg", "sys1_pll_out", base + 0x30, 17);
4465 +- hws[IMX8MQ_SYS1_PLL_200M_CG] = imx_clk_hw_gate("sys1_pll_200m_cg", "sys1_pll_out", base + 0x30, 19);
4466 +- hws[IMX8MQ_SYS1_PLL_266M_CG] = imx_clk_hw_gate("sys1_pll_266m_cg", "sys1_pll_out", base + 0x30, 21);
4467 +- hws[IMX8MQ_SYS1_PLL_400M_CG] = imx_clk_hw_gate("sys1_pll_400m_cg", "sys1_pll_out", base + 0x30, 23);
4468 +- hws[IMX8MQ_SYS1_PLL_800M_CG] = imx_clk_hw_gate("sys1_pll_800m_cg", "sys1_pll_out", base + 0x30, 25);
4469 +-
4470 +- hws[IMX8MQ_SYS1_PLL_40M] = imx_clk_hw_fixed_factor("sys1_pll_40m", "sys1_pll_40m_cg", 1, 20);
4471 +- hws[IMX8MQ_SYS1_PLL_80M] = imx_clk_hw_fixed_factor("sys1_pll_80m", "sys1_pll_80m_cg", 1, 10);
4472 +- hws[IMX8MQ_SYS1_PLL_100M] = imx_clk_hw_fixed_factor("sys1_pll_100m", "sys1_pll_100m_cg", 1, 8);
4473 +- hws[IMX8MQ_SYS1_PLL_133M] = imx_clk_hw_fixed_factor("sys1_pll_133m", "sys1_pll_133m_cg", 1, 6);
4474 +- hws[IMX8MQ_SYS1_PLL_160M] = imx_clk_hw_fixed_factor("sys1_pll_160m", "sys1_pll_160m_cg", 1, 5);
4475 +- hws[IMX8MQ_SYS1_PLL_200M] = imx_clk_hw_fixed_factor("sys1_pll_200m", "sys1_pll_200m_cg", 1, 4);
4476 +- hws[IMX8MQ_SYS1_PLL_266M] = imx_clk_hw_fixed_factor("sys1_pll_266m", "sys1_pll_266m_cg", 1, 3);
4477 +- hws[IMX8MQ_SYS1_PLL_400M] = imx_clk_hw_fixed_factor("sys1_pll_400m", "sys1_pll_400m_cg", 1, 2);
4478 +- hws[IMX8MQ_SYS1_PLL_800M] = imx_clk_hw_fixed_factor("sys1_pll_800m", "sys1_pll_800m_cg", 1, 1);
4479 ++ hws[IMX8MQ_SYS1_PLL_40M] = imx_clk_hw_fixed_factor("sys1_pll_40m", "sys1_pll_out", 1, 20);
4480 ++ hws[IMX8MQ_SYS1_PLL_80M] = imx_clk_hw_fixed_factor("sys1_pll_80m", "sys1_pll_out", 1, 10);
4481 ++ hws[IMX8MQ_SYS1_PLL_100M] = imx_clk_hw_fixed_factor("sys1_pll_100m", "sys1_pll_out", 1, 8);
4482 ++ hws[IMX8MQ_SYS1_PLL_133M] = imx_clk_hw_fixed_factor("sys1_pll_133m", "sys1_pll_out", 1, 6);
4483 ++ hws[IMX8MQ_SYS1_PLL_160M] = imx_clk_hw_fixed_factor("sys1_pll_160m", "sys1_pll_out", 1, 5);
4484 ++ hws[IMX8MQ_SYS1_PLL_200M] = imx_clk_hw_fixed_factor("sys1_pll_200m", "sys1_pll_out", 1, 4);
4485 ++ hws[IMX8MQ_SYS1_PLL_266M] = imx_clk_hw_fixed_factor("sys1_pll_266m", "sys1_pll_out", 1, 3);
4486 ++ hws[IMX8MQ_SYS1_PLL_400M] = imx_clk_hw_fixed_factor("sys1_pll_400m", "sys1_pll_out", 1, 2);
4487 ++ hws[IMX8MQ_SYS1_PLL_800M] = imx_clk_hw_fixed_factor("sys1_pll_800m", "sys1_pll_out", 1, 1);
4488 +
4489 + /* SYS PLL2 fixed output */
4490 +- hws[IMX8MQ_SYS2_PLL_50M_CG] = imx_clk_hw_gate("sys2_pll_50m_cg", "sys2_pll_out", base + 0x3c, 9);
4491 +- hws[IMX8MQ_SYS2_PLL_100M_CG] = imx_clk_hw_gate("sys2_pll_100m_cg", "sys2_pll_out", base + 0x3c, 11);
4492 +- hws[IMX8MQ_SYS2_PLL_125M_CG] = imx_clk_hw_gate("sys2_pll_125m_cg", "sys2_pll_out", base + 0x3c, 13);
4493 +- hws[IMX8MQ_SYS2_PLL_166M_CG] = imx_clk_hw_gate("sys2_pll_166m_cg", "sys2_pll_out", base + 0x3c, 15);
4494 +- hws[IMX8MQ_SYS2_PLL_200M_CG] = imx_clk_hw_gate("sys2_pll_200m_cg", "sys2_pll_out", base + 0x3c, 17);
4495 +- hws[IMX8MQ_SYS2_PLL_250M_CG] = imx_clk_hw_gate("sys2_pll_250m_cg", "sys2_pll_out", base + 0x3c, 19);
4496 +- hws[IMX8MQ_SYS2_PLL_333M_CG] = imx_clk_hw_gate("sys2_pll_333m_cg", "sys2_pll_out", base + 0x3c, 21);
4497 +- hws[IMX8MQ_SYS2_PLL_500M_CG] = imx_clk_hw_gate("sys2_pll_500m_cg", "sys2_pll_out", base + 0x3c, 23);
4498 +- hws[IMX8MQ_SYS2_PLL_1000M_CG] = imx_clk_hw_gate("sys2_pll_1000m_cg", "sys2_pll_out", base + 0x3c, 25);
4499 +-
4500 +- hws[IMX8MQ_SYS2_PLL_50M] = imx_clk_hw_fixed_factor("sys2_pll_50m", "sys2_pll_50m_cg", 1, 20);
4501 +- hws[IMX8MQ_SYS2_PLL_100M] = imx_clk_hw_fixed_factor("sys2_pll_100m", "sys2_pll_100m_cg", 1, 10);
4502 +- hws[IMX8MQ_SYS2_PLL_125M] = imx_clk_hw_fixed_factor("sys2_pll_125m", "sys2_pll_125m_cg", 1, 8);
4503 +- hws[IMX8MQ_SYS2_PLL_166M] = imx_clk_hw_fixed_factor("sys2_pll_166m", "sys2_pll_166m_cg", 1, 6);
4504 +- hws[IMX8MQ_SYS2_PLL_200M] = imx_clk_hw_fixed_factor("sys2_pll_200m", "sys2_pll_200m_cg", 1, 5);
4505 +- hws[IMX8MQ_SYS2_PLL_250M] = imx_clk_hw_fixed_factor("sys2_pll_250m", "sys2_pll_250m_cg", 1, 4);
4506 +- hws[IMX8MQ_SYS2_PLL_333M] = imx_clk_hw_fixed_factor("sys2_pll_333m", "sys2_pll_333m_cg", 1, 3);
4507 +- hws[IMX8MQ_SYS2_PLL_500M] = imx_clk_hw_fixed_factor("sys2_pll_500m", "sys2_pll_500m_cg", 1, 2);
4508 +- hws[IMX8MQ_SYS2_PLL_1000M] = imx_clk_hw_fixed_factor("sys2_pll_1000m", "sys2_pll_1000m_cg", 1, 1);
4509 ++ hws[IMX8MQ_SYS2_PLL_50M] = imx_clk_hw_fixed_factor("sys2_pll_50m", "sys2_pll_out", 1, 20);
4510 ++ hws[IMX8MQ_SYS2_PLL_100M] = imx_clk_hw_fixed_factor("sys2_pll_100m", "sys2_pll_out", 1, 10);
4511 ++ hws[IMX8MQ_SYS2_PLL_125M] = imx_clk_hw_fixed_factor("sys2_pll_125m", "sys2_pll_out", 1, 8);
4512 ++ hws[IMX8MQ_SYS2_PLL_166M] = imx_clk_hw_fixed_factor("sys2_pll_166m", "sys2_pll_out", 1, 6);
4513 ++ hws[IMX8MQ_SYS2_PLL_200M] = imx_clk_hw_fixed_factor("sys2_pll_200m", "sys2_pll_out", 1, 5);
4514 ++ hws[IMX8MQ_SYS2_PLL_250M] = imx_clk_hw_fixed_factor("sys2_pll_250m", "sys2_pll_out", 1, 4);
4515 ++ hws[IMX8MQ_SYS2_PLL_333M] = imx_clk_hw_fixed_factor("sys2_pll_333m", "sys2_pll_out", 1, 3);
4516 ++ hws[IMX8MQ_SYS2_PLL_500M] = imx_clk_hw_fixed_factor("sys2_pll_500m", "sys2_pll_out", 1, 2);
4517 ++ hws[IMX8MQ_SYS2_PLL_1000M] = imx_clk_hw_fixed_factor("sys2_pll_1000m", "sys2_pll_out", 1, 1);
4518 +
4519 + np = dev->of_node;
4520 + base = devm_platform_ioremap_resource(pdev, 0);
4521 +diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
4522 +index b814d44917a5d..2876bb83d9d0e 100644
4523 +--- a/drivers/clk/meson/g12a.c
4524 ++++ b/drivers/clk/meson/g12a.c
4525 +@@ -1602,7 +1602,7 @@ static struct clk_regmap g12b_cpub_clk_trace = {
4526 + };
4527 +
4528 + static const struct pll_mult_range g12a_gp0_pll_mult_range = {
4529 +- .min = 55,
4530 ++ .min = 125,
4531 + .max = 255,
4532 + };
4533 +
4534 +diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
4535 +index 564431130a760..1a571c04a76cb 100644
4536 +--- a/drivers/clk/qcom/clk-alpha-pll.c
4537 ++++ b/drivers/clk/qcom/clk-alpha-pll.c
4538 +@@ -1214,7 +1214,7 @@ static int alpha_pll_fabia_prepare(struct clk_hw *hw)
4539 + return -EINVAL;
4540 +
4541 + /* Setup PLL for calibration frequency */
4542 +- regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), cal_l);
4543 ++ regmap_write(pll->clkr.regmap, PLL_CAL_L_VAL(pll), cal_l);
4544 +
4545 + /* Bringup the PLL at calibration frequency */
4546 + ret = clk_alpha_pll_enable(hw);
4547 +diff --git a/drivers/clk/socfpga/clk-agilex.c b/drivers/clk/socfpga/clk-agilex.c
4548 +index bb3e80928ebe8..438075a50b9f2 100644
4549 +--- a/drivers/clk/socfpga/clk-agilex.c
4550 ++++ b/drivers/clk/socfpga/clk-agilex.c
4551 +@@ -186,6 +186,41 @@ static const struct clk_parent_data noc_mux[] = {
4552 + .name = "boot_clk", },
4553 + };
4554 +
4555 ++static const struct clk_parent_data sdmmc_mux[] = {
4556 ++ { .fw_name = "sdmmc_free_clk",
4557 ++ .name = "sdmmc_free_clk", },
4558 ++ { .fw_name = "boot_clk",
4559 ++ .name = "boot_clk", },
4560 ++};
4561 ++
4562 ++static const struct clk_parent_data s2f_user1_mux[] = {
4563 ++ { .fw_name = "s2f_user1_free_clk",
4564 ++ .name = "s2f_user1_free_clk", },
4565 ++ { .fw_name = "boot_clk",
4566 ++ .name = "boot_clk", },
4567 ++};
4568 ++
4569 ++static const struct clk_parent_data psi_mux[] = {
4570 ++ { .fw_name = "psi_ref_free_clk",
4571 ++ .name = "psi_ref_free_clk", },
4572 ++ { .fw_name = "boot_clk",
4573 ++ .name = "boot_clk", },
4574 ++};
4575 ++
4576 ++static const struct clk_parent_data gpio_db_mux[] = {
4577 ++ { .fw_name = "gpio_db_free_clk",
4578 ++ .name = "gpio_db_free_clk", },
4579 ++ { .fw_name = "boot_clk",
4580 ++ .name = "boot_clk", },
4581 ++};
4582 ++
4583 ++static const struct clk_parent_data emac_ptp_mux[] = {
4584 ++ { .fw_name = "emac_ptp_free_clk",
4585 ++ .name = "emac_ptp_free_clk", },
4586 ++ { .fw_name = "boot_clk",
4587 ++ .name = "boot_clk", },
4588 ++};
4589 ++
4590 + /* clocks in AO (always on) controller */
4591 + static const struct stratix10_pll_clock agilex_pll_clks[] = {
4592 + { AGILEX_BOOT_CLK, "boot_clk", boot_mux, ARRAY_SIZE(boot_mux), 0,
4593 +@@ -211,11 +246,9 @@ static const struct stratix10_perip_cnt_clock agilex_main_perip_cnt_clks[] = {
4594 + { AGILEX_MPU_FREE_CLK, "mpu_free_clk", NULL, mpu_free_mux, ARRAY_SIZE(mpu_free_mux),
4595 + 0, 0x3C, 0, 0, 0},
4596 + { AGILEX_NOC_FREE_CLK, "noc_free_clk", NULL, noc_free_mux, ARRAY_SIZE(noc_free_mux),
4597 +- 0, 0x40, 0, 0, 1},
4598 +- { AGILEX_L4_SYS_FREE_CLK, "l4_sys_free_clk", "noc_free_clk", NULL, 1, 0,
4599 +- 0, 4, 0, 0},
4600 +- { AGILEX_NOC_CLK, "noc_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux),
4601 +- 0, 0, 0, 0x30, 1},
4602 ++ 0, 0x40, 0, 0, 0},
4603 ++ { AGILEX_L4_SYS_FREE_CLK, "l4_sys_free_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0,
4604 ++ 0, 4, 0x30, 1},
4605 + { AGILEX_EMAC_A_FREE_CLK, "emaca_free_clk", NULL, emaca_free_mux, ARRAY_SIZE(emaca_free_mux),
4606 + 0, 0xD4, 0, 0x88, 0},
4607 + { AGILEX_EMAC_B_FREE_CLK, "emacb_free_clk", NULL, emacb_free_mux, ARRAY_SIZE(emacb_free_mux),
4608 +@@ -225,7 +258,7 @@ static const struct stratix10_perip_cnt_clock agilex_main_perip_cnt_clks[] = {
4609 + { AGILEX_GPIO_DB_FREE_CLK, "gpio_db_free_clk", NULL, gpio_db_free_mux,
4610 + ARRAY_SIZE(gpio_db_free_mux), 0, 0xE0, 0, 0x88, 3},
4611 + { AGILEX_SDMMC_FREE_CLK, "sdmmc_free_clk", NULL, sdmmc_free_mux,
4612 +- ARRAY_SIZE(sdmmc_free_mux), 0, 0xE4, 0, 0x88, 4},
4613 ++ ARRAY_SIZE(sdmmc_free_mux), 0, 0xE4, 0, 0, 0},
4614 + { AGILEX_S2F_USER0_FREE_CLK, "s2f_user0_free_clk", NULL, s2f_usr0_free_mux,
4615 + ARRAY_SIZE(s2f_usr0_free_mux), 0, 0xE8, 0, 0, 0},
4616 + { AGILEX_S2F_USER1_FREE_CLK, "s2f_user1_free_clk", NULL, s2f_usr1_free_mux,
4617 +@@ -241,24 +274,24 @@ static const struct stratix10_gate_clock agilex_gate_clks[] = {
4618 + 0, 0, 0, 0, 0, 0, 4},
4619 + { AGILEX_MPU_CCU_CLK, "mpu_ccu_clk", "mpu_clk", NULL, 1, 0, 0x24,
4620 + 0, 0, 0, 0, 0, 0, 2},
4621 +- { AGILEX_L4_MAIN_CLK, "l4_main_clk", "noc_clk", NULL, 1, 0, 0x24,
4622 +- 1, 0x44, 0, 2, 0, 0, 0},
4623 +- { AGILEX_L4_MP_CLK, "l4_mp_clk", "noc_clk", NULL, 1, 0, 0x24,
4624 +- 2, 0x44, 8, 2, 0, 0, 0},
4625 ++ { AGILEX_L4_MAIN_CLK, "l4_main_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0, 0x24,
4626 ++ 1, 0x44, 0, 2, 0x30, 1, 0},
4627 ++ { AGILEX_L4_MP_CLK, "l4_mp_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0, 0x24,
4628 ++ 2, 0x44, 8, 2, 0x30, 1, 0},
4629 + /*
4630 + * The l4_sp_clk feeds a 100 MHz clock to various peripherals, one of them
4631 + * being the SP timers, thus cannot get gated.
4632 + */
4633 +- { AGILEX_L4_SP_CLK, "l4_sp_clk", "noc_clk", NULL, 1, CLK_IS_CRITICAL, 0x24,
4634 +- 3, 0x44, 16, 2, 0, 0, 0},
4635 +- { AGILEX_CS_AT_CLK, "cs_at_clk", "noc_clk", NULL, 1, 0, 0x24,
4636 +- 4, 0x44, 24, 2, 0, 0, 0},
4637 +- { AGILEX_CS_TRACE_CLK, "cs_trace_clk", "noc_clk", NULL, 1, 0, 0x24,
4638 +- 4, 0x44, 26, 2, 0, 0, 0},
4639 ++ { AGILEX_L4_SP_CLK, "l4_sp_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), CLK_IS_CRITICAL, 0x24,
4640 ++ 3, 0x44, 16, 2, 0x30, 1, 0},
4641 ++ { AGILEX_CS_AT_CLK, "cs_at_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0, 0x24,
4642 ++ 4, 0x44, 24, 2, 0x30, 1, 0},
4643 ++ { AGILEX_CS_TRACE_CLK, "cs_trace_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0, 0x24,
4644 ++ 4, 0x44, 26, 2, 0x30, 1, 0},
4645 + { AGILEX_CS_PDBG_CLK, "cs_pdbg_clk", "cs_at_clk", NULL, 1, 0, 0x24,
4646 + 4, 0x44, 28, 1, 0, 0, 0},
4647 +- { AGILEX_CS_TIMER_CLK, "cs_timer_clk", "noc_clk", NULL, 1, 0, 0x24,
4648 +- 5, 0, 0, 0, 0, 0, 0},
4649 ++ { AGILEX_CS_TIMER_CLK, "cs_timer_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0, 0x24,
4650 ++ 5, 0, 0, 0, 0x30, 1, 0},
4651 + { AGILEX_S2F_USER0_CLK, "s2f_user0_clk", NULL, s2f_usr0_mux, ARRAY_SIZE(s2f_usr0_mux), 0, 0x24,
4652 + 6, 0, 0, 0, 0, 0, 0},
4653 + { AGILEX_EMAC0_CLK, "emac0_clk", NULL, emac_mux, ARRAY_SIZE(emac_mux), 0, 0x7C,
4654 +@@ -267,16 +300,16 @@ static const struct stratix10_gate_clock agilex_gate_clks[] = {
4655 + 1, 0, 0, 0, 0x94, 27, 0},
4656 + { AGILEX_EMAC2_CLK, "emac2_clk", NULL, emac_mux, ARRAY_SIZE(emac_mux), 0, 0x7C,
4657 + 2, 0, 0, 0, 0x94, 28, 0},
4658 +- { AGILEX_EMAC_PTP_CLK, "emac_ptp_clk", "emac_ptp_free_clk", NULL, 1, 0, 0x7C,
4659 +- 3, 0, 0, 0, 0, 0, 0},
4660 +- { AGILEX_GPIO_DB_CLK, "gpio_db_clk", "gpio_db_free_clk", NULL, 1, 0, 0x7C,
4661 +- 4, 0x98, 0, 16, 0, 0, 0},
4662 +- { AGILEX_SDMMC_CLK, "sdmmc_clk", "sdmmc_free_clk", NULL, 1, 0, 0x7C,
4663 +- 5, 0, 0, 0, 0, 0, 4},
4664 +- { AGILEX_S2F_USER1_CLK, "s2f_user1_clk", "s2f_user1_free_clk", NULL, 1, 0, 0x7C,
4665 +- 6, 0, 0, 0, 0, 0, 0},
4666 +- { AGILEX_PSI_REF_CLK, "psi_ref_clk", "psi_ref_free_clk", NULL, 1, 0, 0x7C,
4667 +- 7, 0, 0, 0, 0, 0, 0},
4668 ++ { AGILEX_EMAC_PTP_CLK, "emac_ptp_clk", NULL, emac_ptp_mux, ARRAY_SIZE(emac_ptp_mux), 0, 0x7C,
4669 ++ 3, 0, 0, 0, 0x88, 2, 0},
4670 ++ { AGILEX_GPIO_DB_CLK, "gpio_db_clk", NULL, gpio_db_mux, ARRAY_SIZE(gpio_db_mux), 0, 0x7C,
4671 ++ 4, 0x98, 0, 16, 0x88, 3, 0},
4672 ++ { AGILEX_SDMMC_CLK, "sdmmc_clk", NULL, sdmmc_mux, ARRAY_SIZE(sdmmc_mux), 0, 0x7C,
4673 ++ 5, 0, 0, 0, 0x88, 4, 4},
4674 ++ { AGILEX_S2F_USER1_CLK, "s2f_user1_clk", NULL, s2f_user1_mux, ARRAY_SIZE(s2f_user1_mux), 0, 0x7C,
4675 ++ 6, 0, 0, 0, 0x88, 5, 0},
4676 ++ { AGILEX_PSI_REF_CLK, "psi_ref_clk", NULL, psi_mux, ARRAY_SIZE(psi_mux), 0, 0x7C,
4677 ++ 7, 0, 0, 0, 0x88, 6, 0},
4678 + { AGILEX_USB_CLK, "usb_clk", "l4_mp_clk", NULL, 1, 0, 0x7C,
4679 + 8, 0, 0, 0, 0, 0, 0},
4680 + { AGILEX_SPI_M_CLK, "spi_m_clk", "l4_mp_clk", NULL, 1, 0, 0x7C,
4681 +diff --git a/drivers/clk/socfpga/clk-periph-s10.c b/drivers/clk/socfpga/clk-periph-s10.c
4682 +index 397b77b89b166..bae595f17061f 100644
4683 +--- a/drivers/clk/socfpga/clk-periph-s10.c
4684 ++++ b/drivers/clk/socfpga/clk-periph-s10.c
4685 +@@ -49,16 +49,21 @@ static u8 clk_periclk_get_parent(struct clk_hw *hwclk)
4686 + {
4687 + struct socfpga_periph_clk *socfpgaclk = to_periph_clk(hwclk);
4688 + u32 clk_src, mask;
4689 +- u8 parent;
4690 ++ u8 parent = 0;
4691 +
4692 ++ /* handle the bypass first */
4693 + if (socfpgaclk->bypass_reg) {
4694 + mask = (0x1 << socfpgaclk->bypass_shift);
4695 + parent = ((readl(socfpgaclk->bypass_reg) & mask) >>
4696 + socfpgaclk->bypass_shift);
4697 +- } else {
4698 ++ if (parent)
4699 ++ return parent;
4700 ++ }
4701 ++
4702 ++ if (socfpgaclk->hw.reg) {
4703 + clk_src = readl(socfpgaclk->hw.reg);
4704 + parent = (clk_src >> CLK_MGR_FREE_SHIFT) &
4705 +- CLK_MGR_FREE_MASK;
4706 ++ CLK_MGR_FREE_MASK;
4707 + }
4708 + return parent;
4709 + }
4710 +diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c
4711 +index 661a8e9bfb9bd..aaf69058b1dca 100644
4712 +--- a/drivers/clk/socfpga/clk-s10.c
4713 ++++ b/drivers/clk/socfpga/clk-s10.c
4714 +@@ -144,6 +144,41 @@ static const struct clk_parent_data mpu_free_mux[] = {
4715 + .name = "f2s-free-clk", },
4716 + };
4717 +
4718 ++static const struct clk_parent_data sdmmc_mux[] = {
4719 ++ { .fw_name = "sdmmc_free_clk",
4720 ++ .name = "sdmmc_free_clk", },
4721 ++ { .fw_name = "boot_clk",
4722 ++ .name = "boot_clk", },
4723 ++};
4724 ++
4725 ++static const struct clk_parent_data s2f_user1_mux[] = {
4726 ++ { .fw_name = "s2f_user1_free_clk",
4727 ++ .name = "s2f_user1_free_clk", },
4728 ++ { .fw_name = "boot_clk",
4729 ++ .name = "boot_clk", },
4730 ++};
4731 ++
4732 ++static const struct clk_parent_data psi_mux[] = {
4733 ++ { .fw_name = "psi_ref_free_clk",
4734 ++ .name = "psi_ref_free_clk", },
4735 ++ { .fw_name = "boot_clk",
4736 ++ .name = "boot_clk", },
4737 ++};
4738 ++
4739 ++static const struct clk_parent_data gpio_db_mux[] = {
4740 ++ { .fw_name = "gpio_db_free_clk",
4741 ++ .name = "gpio_db_free_clk", },
4742 ++ { .fw_name = "boot_clk",
4743 ++ .name = "boot_clk", },
4744 ++};
4745 ++
4746 ++static const struct clk_parent_data emac_ptp_mux[] = {
4747 ++ { .fw_name = "emac_ptp_free_clk",
4748 ++ .name = "emac_ptp_free_clk", },
4749 ++ { .fw_name = "boot_clk",
4750 ++ .name = "boot_clk", },
4751 ++};
4752 ++
4753 + /* clocks in AO (always on) controller */
4754 + static const struct stratix10_pll_clock s10_pll_clks[] = {
4755 + { STRATIX10_BOOT_CLK, "boot_clk", boot_mux, ARRAY_SIZE(boot_mux), 0,
4756 +@@ -167,7 +202,7 @@ static const struct stratix10_perip_cnt_clock s10_main_perip_cnt_clks[] = {
4757 + { STRATIX10_MPU_FREE_CLK, "mpu_free_clk", NULL, mpu_free_mux, ARRAY_SIZE(mpu_free_mux),
4758 + 0, 0x48, 0, 0, 0},
4759 + { STRATIX10_NOC_FREE_CLK, "noc_free_clk", NULL, noc_free_mux, ARRAY_SIZE(noc_free_mux),
4760 +- 0, 0x4C, 0, 0, 0},
4761 ++ 0, 0x4C, 0, 0x3C, 1},
4762 + { STRATIX10_MAIN_EMACA_CLK, "main_emaca_clk", "main_noc_base_clk", NULL, 1, 0,
4763 + 0x50, 0, 0, 0},
4764 + { STRATIX10_MAIN_EMACB_CLK, "main_emacb_clk", "main_noc_base_clk", NULL, 1, 0,
4765 +@@ -200,10 +235,8 @@ static const struct stratix10_perip_cnt_clock s10_main_perip_cnt_clks[] = {
4766 + 0, 0xD4, 0, 0, 0},
4767 + { STRATIX10_PERI_PSI_REF_CLK, "peri_psi_ref_clk", "peri_noc_base_clk", NULL, 1, 0,
4768 + 0xD8, 0, 0, 0},
4769 +- { STRATIX10_L4_SYS_FREE_CLK, "l4_sys_free_clk", "noc_free_clk", NULL, 1, 0,
4770 +- 0, 4, 0, 0},
4771 +- { STRATIX10_NOC_CLK, "noc_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux),
4772 +- 0, 0, 0, 0x3C, 1},
4773 ++ { STRATIX10_L4_SYS_FREE_CLK, "l4_sys_free_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0,
4774 ++ 0, 4, 0x3C, 1},
4775 + { STRATIX10_EMAC_A_FREE_CLK, "emaca_free_clk", NULL, emaca_free_mux, ARRAY_SIZE(emaca_free_mux),
4776 + 0, 0, 2, 0xB0, 0},
4777 + { STRATIX10_EMAC_B_FREE_CLK, "emacb_free_clk", NULL, emacb_free_mux, ARRAY_SIZE(emacb_free_mux),
4778 +@@ -227,20 +260,20 @@ static const struct stratix10_gate_clock s10_gate_clks[] = {
4779 + 0, 0, 0, 0, 0, 0, 4},
4780 + { STRATIX10_MPU_L2RAM_CLK, "mpu_l2ram_clk", "mpu_clk", NULL, 1, 0, 0x30,
4781 + 0, 0, 0, 0, 0, 0, 2},
4782 +- { STRATIX10_L4_MAIN_CLK, "l4_main_clk", "noc_clk", NULL, 1, 0, 0x30,
4783 +- 1, 0x70, 0, 2, 0, 0, 0},
4784 +- { STRATIX10_L4_MP_CLK, "l4_mp_clk", "noc_clk", NULL, 1, 0, 0x30,
4785 +- 2, 0x70, 8, 2, 0, 0, 0},
4786 +- { STRATIX10_L4_SP_CLK, "l4_sp_clk", "noc_clk", NULL, 1, CLK_IS_CRITICAL, 0x30,
4787 +- 3, 0x70, 16, 2, 0, 0, 0},
4788 +- { STRATIX10_CS_AT_CLK, "cs_at_clk", "noc_clk", NULL, 1, 0, 0x30,
4789 +- 4, 0x70, 24, 2, 0, 0, 0},
4790 +- { STRATIX10_CS_TRACE_CLK, "cs_trace_clk", "noc_clk", NULL, 1, 0, 0x30,
4791 +- 4, 0x70, 26, 2, 0, 0, 0},
4792 ++ { STRATIX10_L4_MAIN_CLK, "l4_main_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0, 0x30,
4793 ++ 1, 0x70, 0, 2, 0x3C, 1, 0},
4794 ++ { STRATIX10_L4_MP_CLK, "l4_mp_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0, 0x30,
4795 ++ 2, 0x70, 8, 2, 0x3C, 1, 0},
4796 ++ { STRATIX10_L4_SP_CLK, "l4_sp_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), CLK_IS_CRITICAL, 0x30,
4797 ++ 3, 0x70, 16, 2, 0x3C, 1, 0},
4798 ++ { STRATIX10_CS_AT_CLK, "cs_at_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0, 0x30,
4799 ++ 4, 0x70, 24, 2, 0x3C, 1, 0},
4800 ++ { STRATIX10_CS_TRACE_CLK, "cs_trace_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0, 0x30,
4801 ++ 4, 0x70, 26, 2, 0x3C, 1, 0},
4802 + { STRATIX10_CS_PDBG_CLK, "cs_pdbg_clk", "cs_at_clk", NULL, 1, 0, 0x30,
4803 + 4, 0x70, 28, 1, 0, 0, 0},
4804 +- { STRATIX10_CS_TIMER_CLK, "cs_timer_clk", "noc_clk", NULL, 1, 0, 0x30,
4805 +- 5, 0, 0, 0, 0, 0, 0},
4806 ++ { STRATIX10_CS_TIMER_CLK, "cs_timer_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0, 0x30,
4807 ++ 5, 0, 0, 0, 0x3C, 1, 0},
4808 + { STRATIX10_S2F_USER0_CLK, "s2f_user0_clk", NULL, s2f_usr0_mux, ARRAY_SIZE(s2f_usr0_mux), 0, 0x30,
4809 + 6, 0, 0, 0, 0, 0, 0},
4810 + { STRATIX10_EMAC0_CLK, "emac0_clk", NULL, emac_mux, ARRAY_SIZE(emac_mux), 0, 0xA4,
4811 +@@ -249,16 +282,16 @@ static const struct stratix10_gate_clock s10_gate_clks[] = {
4812 + 1, 0, 0, 0, 0xDC, 27, 0},
4813 + { STRATIX10_EMAC2_CLK, "emac2_clk", NULL, emac_mux, ARRAY_SIZE(emac_mux), 0, 0xA4,
4814 + 2, 0, 0, 0, 0xDC, 28, 0},
4815 +- { STRATIX10_EMAC_PTP_CLK, "emac_ptp_clk", "emac_ptp_free_clk", NULL, 1, 0, 0xA4,
4816 +- 3, 0, 0, 0, 0, 0, 0},
4817 +- { STRATIX10_GPIO_DB_CLK, "gpio_db_clk", "gpio_db_free_clk", NULL, 1, 0, 0xA4,
4818 +- 4, 0xE0, 0, 16, 0, 0, 0},
4819 +- { STRATIX10_SDMMC_CLK, "sdmmc_clk", "sdmmc_free_clk", NULL, 1, 0, 0xA4,
4820 +- 5, 0, 0, 0, 0, 0, 4},
4821 +- { STRATIX10_S2F_USER1_CLK, "s2f_user1_clk", "s2f_user1_free_clk", NULL, 1, 0, 0xA4,
4822 +- 6, 0, 0, 0, 0, 0, 0},
4823 +- { STRATIX10_PSI_REF_CLK, "psi_ref_clk", "psi_ref_free_clk", NULL, 1, 0, 0xA4,
4824 +- 7, 0, 0, 0, 0, 0, 0},
4825 ++ { STRATIX10_EMAC_PTP_CLK, "emac_ptp_clk", NULL, emac_ptp_mux, ARRAY_SIZE(emac_ptp_mux), 0, 0xA4,
4826 ++ 3, 0, 0, 0, 0xB0, 2, 0},
4827 ++ { STRATIX10_GPIO_DB_CLK, "gpio_db_clk", NULL, gpio_db_mux, ARRAY_SIZE(gpio_db_mux), 0, 0xA4,
4828 ++ 4, 0xE0, 0, 16, 0xB0, 3, 0},
4829 ++ { STRATIX10_SDMMC_CLK, "sdmmc_clk", NULL, sdmmc_mux, ARRAY_SIZE(sdmmc_mux), 0, 0xA4,
4830 ++ 5, 0, 0, 0, 0xB0, 4, 4},
4831 ++ { STRATIX10_S2F_USER1_CLK, "s2f_user1_clk", NULL, s2f_user1_mux, ARRAY_SIZE(s2f_user1_mux), 0, 0xA4,
4832 ++ 6, 0, 0, 0, 0xB0, 5, 0},
4833 ++ { STRATIX10_PSI_REF_CLK, "psi_ref_clk", NULL, psi_mux, ARRAY_SIZE(psi_mux), 0, 0xA4,
4834 ++ 7, 0, 0, 0, 0xB0, 6, 0},
4835 + { STRATIX10_USB_CLK, "usb_clk", "l4_mp_clk", NULL, 1, 0, 0xA4,
4836 + 8, 0, 0, 0, 0, 0, 0},
4837 + { STRATIX10_SPI_M_CLK, "spi_m_clk", "l4_mp_clk", NULL, 1, 0, 0xA4,
4838 +diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
4839 +index 9cf249c344d9e..31e752318a104 100644
4840 +--- a/drivers/clk/tegra/clk-tegra30.c
4841 ++++ b/drivers/clk/tegra/clk-tegra30.c
4842 +@@ -1248,7 +1248,7 @@ static struct tegra_clk_init_table init_table[] __initdata = {
4843 + { TEGRA30_CLK_GR3D, TEGRA30_CLK_PLL_C, 300000000, 0 },
4844 + { TEGRA30_CLK_GR3D2, TEGRA30_CLK_PLL_C, 300000000, 0 },
4845 + { TEGRA30_CLK_PLL_U, TEGRA30_CLK_CLK_MAX, 480000000, 0 },
4846 +- { TEGRA30_CLK_VDE, TEGRA30_CLK_PLL_C, 600000000, 0 },
4847 ++ { TEGRA30_CLK_VDE, TEGRA30_CLK_PLL_C, 300000000, 0 },
4848 + { TEGRA30_CLK_SPDIF_IN_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
4849 + { TEGRA30_CLK_I2S0_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
4850 + { TEGRA30_CLK_I2S1_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
4851 +diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
4852 +index 33eeabf9c3d12..e5c631f1b5cbe 100644
4853 +--- a/drivers/clocksource/timer-ti-dm.c
4854 ++++ b/drivers/clocksource/timer-ti-dm.c
4855 +@@ -78,6 +78,9 @@ static void omap_dm_timer_write_reg(struct omap_dm_timer *timer, u32 reg,
4856 +
4857 + static void omap_timer_restore_context(struct omap_dm_timer *timer)
4858 + {
4859 ++ __omap_dm_timer_write(timer, OMAP_TIMER_OCP_CFG_OFFSET,
4860 ++ timer->context.ocp_cfg, 0);
4861 ++
4862 + omap_dm_timer_write_reg(timer, OMAP_TIMER_WAKEUP_EN_REG,
4863 + timer->context.twer);
4864 + omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG,
4865 +@@ -95,6 +98,9 @@ static void omap_timer_restore_context(struct omap_dm_timer *timer)
4866 +
4867 + static void omap_timer_save_context(struct omap_dm_timer *timer)
4868 + {
4869 ++ timer->context.ocp_cfg =
4870 ++ __omap_dm_timer_read(timer, OMAP_TIMER_OCP_CFG_OFFSET, 0);
4871 ++
4872 + timer->context.tclr =
4873 + omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
4874 + timer->context.twer =
4875 +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
4876 +index 1e7e3f2ff09f0..ebee0ad559fad 100644
4877 +--- a/drivers/cpufreq/cpufreq.c
4878 ++++ b/drivers/cpufreq/cpufreq.c
4879 +@@ -1368,9 +1368,14 @@ static int cpufreq_online(unsigned int cpu)
4880 + goto out_free_policy;
4881 + }
4882 +
4883 ++ /*
4884 ++ * The initialization has succeeded and the policy is online.
4885 ++ * If there is a problem with its frequency table, take it
4886 ++ * offline and drop it.
4887 ++ */
4888 + ret = cpufreq_table_validate_and_sort(policy);
4889 + if (ret)
4890 +- goto out_exit_policy;
4891 ++ goto out_offline_policy;
4892 +
4893 + /* related_cpus should at least include policy->cpus. */
4894 + cpumask_copy(policy->related_cpus, policy->cpus);
4895 +@@ -1513,6 +1518,10 @@ out_destroy_policy:
4896 +
4897 + up_write(&policy->rwsem);
4898 +
4899 ++out_offline_policy:
4900 ++ if (cpufreq_driver->offline)
4901 ++ cpufreq_driver->offline(policy);
4902 ++
4903 + out_exit_policy:
4904 + if (cpufreq_driver->exit)
4905 + cpufreq_driver->exit(policy);
4906 +diff --git a/drivers/crypto/cavium/nitrox/nitrox_isr.c b/drivers/crypto/cavium/nitrox/nitrox_isr.c
4907 +index 3dec570a190ad..10e3408bf704c 100644
4908 +--- a/drivers/crypto/cavium/nitrox/nitrox_isr.c
4909 ++++ b/drivers/crypto/cavium/nitrox/nitrox_isr.c
4910 +@@ -306,6 +306,10 @@ int nitrox_register_interrupts(struct nitrox_device *ndev)
4911 + * Entry 192: NPS_CORE_INT_ACTIVE
4912 + */
4913 + nr_vecs = pci_msix_vec_count(pdev);
4914 ++ if (nr_vecs < 0) {
4915 ++ dev_err(DEV(ndev), "Error in getting vec count %d\n", nr_vecs);
4916 ++ return nr_vecs;
4917 ++ }
4918 +
4919 + /* Enable MSI-X */
4920 + ret = pci_alloc_irq_vectors(pdev, nr_vecs, nr_vecs, PCI_IRQ_MSIX);
4921 +diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
4922 +index 21caed429cc52..d0018794e92e8 100644
4923 +--- a/drivers/crypto/ccp/sev-dev.c
4924 ++++ b/drivers/crypto/ccp/sev-dev.c
4925 +@@ -42,6 +42,10 @@ static int psp_probe_timeout = 5;
4926 + module_param(psp_probe_timeout, int, 0644);
4927 + MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe");
4928 +
4929 ++MODULE_FIRMWARE("amd/amd_sev_fam17h_model0xh.sbin"); /* 1st gen EPYC */
4930 ++MODULE_FIRMWARE("amd/amd_sev_fam17h_model3xh.sbin"); /* 2nd gen EPYC */
4931 ++MODULE_FIRMWARE("amd/amd_sev_fam19h_model0xh.sbin"); /* 3rd gen EPYC */
4932 ++
4933 + static bool psp_dead;
4934 + static int psp_timeout;
4935 +
4936 +diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
4937 +index f471dbaef1fbc..7d346d842a39e 100644
4938 +--- a/drivers/crypto/ccp/sp-pci.c
4939 ++++ b/drivers/crypto/ccp/sp-pci.c
4940 +@@ -222,7 +222,7 @@ static int sp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
4941 + if (ret) {
4942 + dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n",
4943 + ret);
4944 +- goto e_err;
4945 ++ goto free_irqs;
4946 + }
4947 + }
4948 +
4949 +@@ -230,10 +230,12 @@ static int sp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
4950 +
4951 + ret = sp_init(sp);
4952 + if (ret)
4953 +- goto e_err;
4954 ++ goto free_irqs;
4955 +
4956 + return 0;
4957 +
4958 ++free_irqs:
4959 ++ sp_free_irqs(sp);
4960 + e_err:
4961 + dev_notice(dev, "initialization failed\n");
4962 + return ret;
4963 +diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
4964 +index 41f1fcacb2809..630dcb59ad569 100644
4965 +--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
4966 ++++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
4967 +@@ -1515,11 +1515,11 @@ static struct skcipher_alg sec_skciphers[] = {
4968 + AES_BLOCK_SIZE, AES_BLOCK_SIZE)
4969 +
4970 + SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb,
4971 +- SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE,
4972 ++ SEC_DES3_3KEY_SIZE, SEC_DES3_3KEY_SIZE,
4973 + DES3_EDE_BLOCK_SIZE, 0)
4974 +
4975 + SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc,
4976 +- SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE,
4977 ++ SEC_DES3_3KEY_SIZE, SEC_DES3_3KEY_SIZE,
4978 + DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE)
4979 +
4980 + SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts,
4981 +diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
4982 +index 276012e7c482f..5e474a7a1912c 100644
4983 +--- a/drivers/crypto/ixp4xx_crypto.c
4984 ++++ b/drivers/crypto/ixp4xx_crypto.c
4985 +@@ -149,6 +149,8 @@ struct crypt_ctl {
4986 + struct ablk_ctx {
4987 + struct buffer_desc *src;
4988 + struct buffer_desc *dst;
4989 ++ u8 iv[MAX_IVLEN];
4990 ++ bool encrypt;
4991 + };
4992 +
4993 + struct aead_ctx {
4994 +@@ -330,7 +332,7 @@ static void free_buf_chain(struct device *dev, struct buffer_desc *buf,
4995 +
4996 + buf1 = buf->next;
4997 + phys1 = buf->phys_next;
4998 +- dma_unmap_single(dev, buf->phys_next, buf->buf_len, buf->dir);
4999 ++ dma_unmap_single(dev, buf->phys_addr, buf->buf_len, buf->dir);
5000 + dma_pool_free(buffer_pool, buf, phys);
5001 + buf = buf1;
5002 + phys = phys1;
5003 +@@ -381,6 +383,20 @@ static void one_packet(dma_addr_t phys)
5004 + case CTL_FLAG_PERFORM_ABLK: {
5005 + struct skcipher_request *req = crypt->data.ablk_req;
5006 + struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
5007 ++ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
5008 ++ unsigned int ivsize = crypto_skcipher_ivsize(tfm);
5009 ++ unsigned int offset;
5010 ++
5011 ++ if (ivsize > 0) {
5012 ++ offset = req->cryptlen - ivsize;
5013 ++ if (req_ctx->encrypt) {
5014 ++ scatterwalk_map_and_copy(req->iv, req->dst,
5015 ++ offset, ivsize, 0);
5016 ++ } else {
5017 ++ memcpy(req->iv, req_ctx->iv, ivsize);
5018 ++ memzero_explicit(req_ctx->iv, ivsize);
5019 ++ }
5020 ++ }
5021 +
5022 + if (req_ctx->dst) {
5023 + free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
5024 +@@ -876,6 +892,7 @@ static int ablk_perform(struct skcipher_request *req, int encrypt)
5025 + struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
5026 + struct buffer_desc src_hook;
5027 + struct device *dev = &pdev->dev;
5028 ++ unsigned int offset;
5029 + gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
5030 + GFP_KERNEL : GFP_ATOMIC;
5031 +
5032 +@@ -885,6 +902,7 @@ static int ablk_perform(struct skcipher_request *req, int encrypt)
5033 + return -EAGAIN;
5034 +
5035 + dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
5036 ++ req_ctx->encrypt = encrypt;
5037 +
5038 + crypt = get_crypt_desc();
5039 + if (!crypt)
5040 +@@ -900,6 +918,10 @@ static int ablk_perform(struct skcipher_request *req, int encrypt)
5041 +
5042 + BUG_ON(ivsize && !req->iv);
5043 + memcpy(crypt->iv, req->iv, ivsize);
5044 ++ if (ivsize > 0 && !encrypt) {
5045 ++ offset = req->cryptlen - ivsize;
5046 ++ scatterwalk_map_and_copy(req_ctx->iv, req->src, offset, ivsize, 0);
5047 ++ }
5048 + if (req->src != req->dst) {
5049 + struct buffer_desc dst_hook;
5050 + crypt->mode |= NPE_OP_NOT_IN_PLACE;
5051 +diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-842-pseries.c
5052 +index 2de5e3672e423..c5ec50a28f30d 100644
5053 +--- a/drivers/crypto/nx/nx-842-pseries.c
5054 ++++ b/drivers/crypto/nx/nx-842-pseries.c
5055 +@@ -538,13 +538,15 @@ static int nx842_OF_set_defaults(struct nx842_devdata *devdata)
5056 + * The status field indicates if the device is enabled when the status
5057 + * is 'okay'. Otherwise the device driver will be disabled.
5058 + *
5059 +- * @prop - struct property point containing the maxsyncop for the update
5060 ++ * @devdata: struct nx842_devdata to use for dev_info
5061 ++ * @prop: struct property point containing the maxsyncop for the update
5062 + *
5063 + * Returns:
5064 + * 0 - Device is available
5065 + * -ENODEV - Device is not available
5066 + */
5067 +-static int nx842_OF_upd_status(struct property *prop)
5068 ++static int nx842_OF_upd_status(struct nx842_devdata *devdata,
5069 ++ struct property *prop)
5070 + {
5071 + const char *status = (const char *)prop->value;
5072 +
5073 +@@ -758,7 +760,7 @@ static int nx842_OF_upd(struct property *new_prop)
5074 + goto out;
5075 +
5076 + /* Perform property updates */
5077 +- ret = nx842_OF_upd_status(status);
5078 ++ ret = nx842_OF_upd_status(new_devdata, status);
5079 + if (ret)
5080 + goto error_out;
5081 +
5082 +@@ -1071,6 +1073,7 @@ static const struct vio_device_id nx842_vio_driver_ids[] = {
5083 + {"ibm,compression-v1", "ibm,compression"},
5084 + {"", ""},
5085 + };
5086 ++MODULE_DEVICE_TABLE(vio, nx842_vio_driver_ids);
5087 +
5088 + static struct vio_driver nx842_vio_driver = {
5089 + .name = KBUILD_MODNAME,
5090 +diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
5091 +index 6d5ce1a66f1ee..02ad26012c665 100644
5092 +--- a/drivers/crypto/nx/nx-aes-ctr.c
5093 ++++ b/drivers/crypto/nx/nx-aes-ctr.c
5094 +@@ -118,7 +118,7 @@ static int ctr3686_aes_nx_crypt(struct skcipher_request *req)
5095 + struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
5096 + u8 iv[16];
5097 +
5098 +- memcpy(iv, nx_ctx->priv.ctr.nonce, CTR_RFC3686_IV_SIZE);
5099 ++ memcpy(iv, nx_ctx->priv.ctr.nonce, CTR_RFC3686_NONCE_SIZE);
5100 + memcpy(iv + CTR_RFC3686_NONCE_SIZE, req->iv, CTR_RFC3686_IV_SIZE);
5101 + iv[12] = iv[13] = iv[14] = 0;
5102 + iv[15] = 1;
5103 +diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
5104 +index a3b38d2c92e70..39d17ed1db2f2 100644
5105 +--- a/drivers/crypto/omap-sham.c
5106 ++++ b/drivers/crypto/omap-sham.c
5107 +@@ -371,7 +371,7 @@ static int omap_sham_hw_init(struct omap_sham_dev *dd)
5108 + {
5109 + int err;
5110 +
5111 +- err = pm_runtime_get_sync(dd->dev);
5112 ++ err = pm_runtime_resume_and_get(dd->dev);
5113 + if (err < 0) {
5114 + dev_err(dd->dev, "failed to get sync: %d\n", err);
5115 + return err;
5116 +@@ -2243,7 +2243,7 @@ static int omap_sham_suspend(struct device *dev)
5117 +
5118 + static int omap_sham_resume(struct device *dev)
5119 + {
5120 +- int err = pm_runtime_get_sync(dev);
5121 ++ int err = pm_runtime_resume_and_get(dev);
5122 + if (err < 0) {
5123 + dev_err(dev, "failed to get sync: %d\n", err);
5124 + return err;
5125 +diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
5126 +index 52ef80efeddc6..b40e81e0088f0 100644
5127 +--- a/drivers/crypto/qat/qat_common/qat_hal.c
5128 ++++ b/drivers/crypto/qat/qat_common/qat_hal.c
5129 +@@ -1213,7 +1213,11 @@ static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle,
5130 + pr_err("QAT: bad xfrAddr=0x%x\n", xfr_addr);
5131 + return -EINVAL;
5132 + }
5133 +- qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval);
5134 ++ status = qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval);
5135 ++ if (status) {
5136 ++ pr_err("QAT: failed to read register");
5137 ++ return status;
5138 ++ }
5139 + gpr_addr = qat_hal_get_reg_addr(ICP_GPB_REL, gprnum);
5140 + data16low = 0xffff & data;
5141 + data16hi = 0xffff & (data >> 0x10);
5142 +diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
5143 +index 5d1f28cd66809..6adc91fedb083 100644
5144 +--- a/drivers/crypto/qat/qat_common/qat_uclo.c
5145 ++++ b/drivers/crypto/qat/qat_common/qat_uclo.c
5146 +@@ -342,7 +342,6 @@ static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
5147 + return 0;
5148 + }
5149 +
5150 +-#define ICP_DH895XCC_PESRAM_BAR_SIZE 0x80000
5151 + static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
5152 + struct icp_qat_uof_initmem *init_mem)
5153 + {
5154 +diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
5155 +index a2d3da0ad95f3..d8053789c8828 100644
5156 +--- a/drivers/crypto/qce/skcipher.c
5157 ++++ b/drivers/crypto/qce/skcipher.c
5158 +@@ -71,7 +71,7 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
5159 + struct scatterlist *sg;
5160 + bool diff_dst;
5161 + gfp_t gfp;
5162 +- int ret;
5163 ++ int dst_nents, src_nents, ret;
5164 +
5165 + rctx->iv = req->iv;
5166 + rctx->ivsize = crypto_skcipher_ivsize(skcipher);
5167 +@@ -122,21 +122,26 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
5168 + sg_mark_end(sg);
5169 + rctx->dst_sg = rctx->dst_tbl.sgl;
5170 +
5171 +- ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
5172 +- if (ret < 0)
5173 ++ dst_nents = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
5174 ++ if (dst_nents < 0) {
5175 ++ ret = dst_nents;
5176 + goto error_free;
5177 ++ }
5178 +
5179 + if (diff_dst) {
5180 +- ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
5181 +- if (ret < 0)
5182 ++ src_nents = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
5183 ++ if (src_nents < 0) {
5184 ++ ret = src_nents;
5185 + goto error_unmap_dst;
5186 ++ }
5187 + rctx->src_sg = req->src;
5188 + } else {
5189 + rctx->src_sg = rctx->dst_sg;
5190 ++ src_nents = dst_nents - 1;
5191 + }
5192 +
5193 +- ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
5194 +- rctx->dst_sg, rctx->dst_nents,
5195 ++ ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, src_nents,
5196 ++ rctx->dst_sg, dst_nents,
5197 + qce_skcipher_done, async_req);
5198 + if (ret)
5199 + goto error_unmap_src;
5200 +diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
5201 +index 4640fe0c1f221..f15fc1fb37079 100644
5202 +--- a/drivers/crypto/sa2ul.c
5203 ++++ b/drivers/crypto/sa2ul.c
5204 +@@ -2270,9 +2270,9 @@ static int sa_dma_init(struct sa_crypto_data *dd)
5205 +
5206 + dd->dma_rx2 = dma_request_chan(dd->dev, "rx2");
5207 + if (IS_ERR(dd->dma_rx2)) {
5208 +- dma_release_channel(dd->dma_rx1);
5209 +- return dev_err_probe(dd->dev, PTR_ERR(dd->dma_rx2),
5210 +- "Unable to request rx2 DMA channel\n");
5211 ++ ret = dev_err_probe(dd->dev, PTR_ERR(dd->dma_rx2),
5212 ++ "Unable to request rx2 DMA channel\n");
5213 ++ goto err_dma_rx2;
5214 + }
5215 +
5216 + dd->dma_tx = dma_request_chan(dd->dev, "tx");
5217 +@@ -2293,28 +2293,31 @@ static int sa_dma_init(struct sa_crypto_data *dd)
5218 + if (ret) {
5219 + dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
5220 + ret);
5221 +- return ret;
5222 ++ goto err_dma_config;
5223 + }
5224 +
5225 + ret = dmaengine_slave_config(dd->dma_rx2, &cfg);
5226 + if (ret) {
5227 + dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
5228 + ret);
5229 +- return ret;
5230 ++ goto err_dma_config;
5231 + }
5232 +
5233 + ret = dmaengine_slave_config(dd->dma_tx, &cfg);
5234 + if (ret) {
5235 + dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
5236 + ret);
5237 +- return ret;
5238 ++ goto err_dma_config;
5239 + }
5240 +
5241 + return 0;
5242 +
5243 ++err_dma_config:
5244 ++ dma_release_channel(dd->dma_tx);
5245 + err_dma_tx:
5246 +- dma_release_channel(dd->dma_rx1);
5247 + dma_release_channel(dd->dma_rx2);
5248 ++err_dma_rx2:
5249 ++ dma_release_channel(dd->dma_rx1);
5250 +
5251 + return ret;
5252 + }
5253 +@@ -2353,13 +2356,14 @@ static int sa_ul_probe(struct platform_device *pdev)
5254 + if (ret < 0) {
5255 + dev_err(&pdev->dev, "%s: failed to get sync: %d\n", __func__,
5256 + ret);
5257 ++ pm_runtime_disable(dev);
5258 + return ret;
5259 + }
5260 +
5261 + sa_init_mem(dev_data);
5262 + ret = sa_dma_init(dev_data);
5263 + if (ret)
5264 +- goto disable_pm_runtime;
5265 ++ goto destroy_dma_pool;
5266 +
5267 + spin_lock_init(&dev_data->scid_lock);
5268 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
5269 +@@ -2389,9 +2393,9 @@ release_dma:
5270 + dma_release_channel(dev_data->dma_rx1);
5271 + dma_release_channel(dev_data->dma_tx);
5272 +
5273 ++destroy_dma_pool:
5274 + dma_pool_destroy(dev_data->sc_pool);
5275 +
5276 +-disable_pm_runtime:
5277 + pm_runtime_put_sync(&pdev->dev);
5278 + pm_runtime_disable(&pdev->dev);
5279 +
5280 +diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
5281 +index 3d407eebb2bab..1e2daf4030327 100644
5282 +--- a/drivers/crypto/ux500/hash/hash_core.c
5283 ++++ b/drivers/crypto/ux500/hash/hash_core.c
5284 +@@ -1009,6 +1009,7 @@ static int hash_hw_final(struct ahash_request *req)
5285 + goto out;
5286 + }
5287 + } else if (req->nbytes == 0 && ctx->keylen > 0) {
5288 ++ ret = -EPERM;
5289 + dev_err(device_data->dev, "%s: Empty message with keylength > 0, NOT supported\n",
5290 + __func__);
5291 + goto out;
5292 +diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
5293 +index 98f03a02d1122..829128c0cc68c 100644
5294 +--- a/drivers/devfreq/devfreq.c
5295 ++++ b/drivers/devfreq/devfreq.c
5296 +@@ -789,6 +789,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
5297 + if (devfreq->profile->timer < 0
5298 + || devfreq->profile->timer >= DEVFREQ_TIMER_NUM) {
5299 + mutex_unlock(&devfreq->lock);
5300 ++ err = -EINVAL;
5301 + goto err_dev;
5302 + }
5303 +
5304 +diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
5305 +index 7b52691c45d26..4912a7b883801 100644
5306 +--- a/drivers/edac/i10nm_base.c
5307 ++++ b/drivers/edac/i10nm_base.c
5308 +@@ -263,6 +263,9 @@ static int __init i10nm_init(void)
5309 + if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
5310 + return -EBUSY;
5311 +
5312 ++ if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
5313 ++ return -ENODEV;
5314 ++
5315 + id = x86_match_cpu(i10nm_cpuids);
5316 + if (!id)
5317 + return -ENODEV;
5318 +diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
5319 +index 928f63a374c78..c94ca1f790c43 100644
5320 +--- a/drivers/edac/pnd2_edac.c
5321 ++++ b/drivers/edac/pnd2_edac.c
5322 +@@ -1554,6 +1554,9 @@ static int __init pnd2_init(void)
5323 + if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
5324 + return -EBUSY;
5325 +
5326 ++ if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
5327 ++ return -ENODEV;
5328 ++
5329 + id = x86_match_cpu(pnd2_cpuids);
5330 + if (!id)
5331 + return -ENODEV;
5332 +diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
5333 +index 93daa4297f2e0..4c626fcd4dcbb 100644
5334 +--- a/drivers/edac/sb_edac.c
5335 ++++ b/drivers/edac/sb_edac.c
5336 +@@ -3510,6 +3510,9 @@ static int __init sbridge_init(void)
5337 + if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
5338 + return -EBUSY;
5339 +
5340 ++ if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
5341 ++ return -ENODEV;
5342 ++
5343 + id = x86_match_cpu(sbridge_cpuids);
5344 + if (!id)
5345 + return -ENODEV;
5346 +diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c
5347 +index 2c7db95df3263..f887e31666510 100644
5348 +--- a/drivers/edac/skx_base.c
5349 ++++ b/drivers/edac/skx_base.c
5350 +@@ -656,6 +656,9 @@ static int __init skx_init(void)
5351 + if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
5352 + return -EBUSY;
5353 +
5354 ++ if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
5355 ++ return -ENODEV;
5356 ++
5357 + id = x86_match_cpu(skx_cpuids);
5358 + if (!id)
5359 + return -ENODEV;
5360 +diff --git a/drivers/edac/ti_edac.c b/drivers/edac/ti_edac.c
5361 +index e7eae20f83d1d..169f96e51c293 100644
5362 +--- a/drivers/edac/ti_edac.c
5363 ++++ b/drivers/edac/ti_edac.c
5364 +@@ -197,6 +197,7 @@ static const struct of_device_id ti_edac_of_match[] = {
5365 + { .compatible = "ti,emif-dra7xx", .data = (void *)EMIF_TYPE_DRA7 },
5366 + {},
5367 + };
5368 ++MODULE_DEVICE_TABLE(of, ti_edac_of_match);
5369 +
5370 + static int _emif_get_id(struct device_node *node)
5371 + {
5372 +diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
5373 +index 337b0eea4e629..64008808675ef 100644
5374 +--- a/drivers/extcon/extcon-max8997.c
5375 ++++ b/drivers/extcon/extcon-max8997.c
5376 +@@ -729,7 +729,7 @@ static int max8997_muic_probe(struct platform_device *pdev)
5377 + 2, info->status);
5378 + if (ret) {
5379 + dev_err(info->dev, "failed to read MUIC register\n");
5380 +- return ret;
5381 ++ goto err_irq;
5382 + }
5383 + cable_type = max8997_muic_get_cable_type(info,
5384 + MAX8997_CABLE_GROUP_ADC, &attached);
5385 +@@ -784,3 +784,4 @@ module_platform_driver(max8997_muic_driver);
5386 + MODULE_DESCRIPTION("Maxim MAX8997 Extcon driver");
5387 + MODULE_AUTHOR("Donggeun Kim <dg77.kim@×××××××.com>");
5388 + MODULE_LICENSE("GPL");
5389 ++MODULE_ALIAS("platform:max8997-muic");
5390 +diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c
5391 +index 106d4da647bd9..5e0718dee03bc 100644
5392 +--- a/drivers/extcon/extcon-sm5502.c
5393 ++++ b/drivers/extcon/extcon-sm5502.c
5394 +@@ -88,7 +88,6 @@ static struct reg_data sm5502_reg_data[] = {
5395 + | SM5502_REG_INTM2_MHL_MASK,
5396 + .invert = true,
5397 + },
5398 +- { }
5399 + };
5400 +
5401 + /* List of detectable cables */
5402 +diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
5403 +index 3aa489dba30a7..2a7687911c097 100644
5404 +--- a/drivers/firmware/stratix10-svc.c
5405 ++++ b/drivers/firmware/stratix10-svc.c
5406 +@@ -1034,24 +1034,32 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
5407 +
5408 + /* add svc client device(s) */
5409 + svc = devm_kzalloc(dev, sizeof(*svc), GFP_KERNEL);
5410 +- if (!svc)
5411 +- return -ENOMEM;
5412 ++ if (!svc) {
5413 ++ ret = -ENOMEM;
5414 ++ goto err_free_kfifo;
5415 ++ }
5416 +
5417 + svc->stratix10_svc_rsu = platform_device_alloc(STRATIX10_RSU, 0);
5418 + if (!svc->stratix10_svc_rsu) {
5419 + dev_err(dev, "failed to allocate %s device\n", STRATIX10_RSU);
5420 +- return -ENOMEM;
5421 ++ ret = -ENOMEM;
5422 ++ goto err_free_kfifo;
5423 + }
5424 +
5425 + ret = platform_device_add(svc->stratix10_svc_rsu);
5426 +- if (ret) {
5427 +- platform_device_put(svc->stratix10_svc_rsu);
5428 +- return ret;
5429 +- }
5430 ++ if (ret)
5431 ++ goto err_put_device;
5432 ++
5433 + dev_set_drvdata(dev, svc);
5434 +
5435 + pr_info("Intel Service Layer Driver Initialized\n");
5436 +
5437 ++ return 0;
5438 ++
5439 ++err_put_device:
5440 ++ platform_device_put(svc->stratix10_svc_rsu);
5441 ++err_free_kfifo:
5442 ++ kfifo_free(&controller->svc_fifo);
5443 + return ret;
5444 + }
5445 +
5446 +diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
5447 +index 4e60e84cd17a5..59ddc9fd5bca4 100644
5448 +--- a/drivers/fsi/fsi-core.c
5449 ++++ b/drivers/fsi/fsi-core.c
5450 +@@ -724,7 +724,7 @@ static ssize_t cfam_read(struct file *filep, char __user *buf, size_t count,
5451 + rc = count;
5452 + fail:
5453 + *offset = off;
5454 +- return count;
5455 ++ return rc;
5456 + }
5457 +
5458 + static ssize_t cfam_write(struct file *filep, const char __user *buf,
5459 +@@ -761,7 +761,7 @@ static ssize_t cfam_write(struct file *filep, const char __user *buf,
5460 + rc = count;
5461 + fail:
5462 + *offset = off;
5463 +- return count;
5464 ++ return rc;
5465 + }
5466 +
5467 + static loff_t cfam_llseek(struct file *file, loff_t offset, int whence)
5468 +diff --git a/drivers/fsi/fsi-occ.c b/drivers/fsi/fsi-occ.c
5469 +index 9eeb856c8905e..a691f9732a13b 100644
5470 +--- a/drivers/fsi/fsi-occ.c
5471 ++++ b/drivers/fsi/fsi-occ.c
5472 +@@ -445,6 +445,7 @@ int fsi_occ_submit(struct device *dev, const void *request, size_t req_len,
5473 + goto done;
5474 +
5475 + if (resp->return_status == OCC_RESP_CMD_IN_PRG ||
5476 ++ resp->return_status == OCC_RESP_CRIT_INIT ||
5477 + resp->seq_no != seq_no) {
5478 + rc = -ETIMEDOUT;
5479 +
5480 +diff --git a/drivers/fsi/fsi-sbefifo.c b/drivers/fsi/fsi-sbefifo.c
5481 +index bfd5e5da80209..84cb965bfed5c 100644
5482 +--- a/drivers/fsi/fsi-sbefifo.c
5483 ++++ b/drivers/fsi/fsi-sbefifo.c
5484 +@@ -325,7 +325,8 @@ static int sbefifo_up_write(struct sbefifo *sbefifo, __be32 word)
5485 + static int sbefifo_request_reset(struct sbefifo *sbefifo)
5486 + {
5487 + struct device *dev = &sbefifo->fsi_dev->dev;
5488 +- u32 status, timeout;
5489 ++ unsigned long end_time;
5490 ++ u32 status;
5491 + int rc;
5492 +
5493 + dev_dbg(dev, "Requesting FIFO reset\n");
5494 +@@ -341,7 +342,8 @@ static int sbefifo_request_reset(struct sbefifo *sbefifo)
5495 + }
5496 +
5497 + /* Wait for it to complete */
5498 +- for (timeout = 0; timeout < SBEFIFO_RESET_TIMEOUT; timeout++) {
5499 ++ end_time = jiffies + msecs_to_jiffies(SBEFIFO_RESET_TIMEOUT);
5500 ++ while (!time_after(jiffies, end_time)) {
5501 + rc = sbefifo_regr(sbefifo, SBEFIFO_UP | SBEFIFO_STS, &status);
5502 + if (rc) {
5503 + dev_err(dev, "Failed to read UP fifo status during reset"
5504 +@@ -355,7 +357,7 @@ static int sbefifo_request_reset(struct sbefifo *sbefifo)
5505 + return 0;
5506 + }
5507 +
5508 +- msleep(1);
5509 ++ cond_resched();
5510 + }
5511 + dev_err(dev, "FIFO reset timed out\n");
5512 +
5513 +@@ -400,7 +402,7 @@ static int sbefifo_cleanup_hw(struct sbefifo *sbefifo)
5514 + /* The FIFO already contains a reset request from the SBE ? */
5515 + if (down_status & SBEFIFO_STS_RESET_REQ) {
5516 + dev_info(dev, "Cleanup: FIFO reset request set, resetting\n");
5517 +- rc = sbefifo_regw(sbefifo, SBEFIFO_UP, SBEFIFO_PERFORM_RESET);
5518 ++ rc = sbefifo_regw(sbefifo, SBEFIFO_DOWN, SBEFIFO_PERFORM_RESET);
5519 + if (rc) {
5520 + sbefifo->broken = true;
5521 + dev_err(dev, "Cleanup: Reset reg write failed, rc=%d\n", rc);
5522 +diff --git a/drivers/fsi/fsi-scom.c b/drivers/fsi/fsi-scom.c
5523 +index b45bfab7b7f55..75d1389e2626d 100644
5524 +--- a/drivers/fsi/fsi-scom.c
5525 ++++ b/drivers/fsi/fsi-scom.c
5526 +@@ -38,9 +38,10 @@
5527 + #define SCOM_STATUS_PIB_RESP_MASK 0x00007000
5528 + #define SCOM_STATUS_PIB_RESP_SHIFT 12
5529 +
5530 +-#define SCOM_STATUS_ANY_ERR (SCOM_STATUS_PROTECTION | \
5531 +- SCOM_STATUS_PARITY | \
5532 +- SCOM_STATUS_PIB_ABORT | \
5533 ++#define SCOM_STATUS_FSI2PIB_ERROR (SCOM_STATUS_PROTECTION | \
5534 ++ SCOM_STATUS_PARITY | \
5535 ++ SCOM_STATUS_PIB_ABORT)
5536 ++#define SCOM_STATUS_ANY_ERR (SCOM_STATUS_FSI2PIB_ERROR | \
5537 + SCOM_STATUS_PIB_RESP_MASK)
5538 + /* SCOM address encodings */
5539 + #define XSCOM_ADDR_IND_FLAG BIT_ULL(63)
5540 +@@ -240,13 +241,14 @@ static int handle_fsi2pib_status(struct scom_device *scom, uint32_t status)
5541 + {
5542 + uint32_t dummy = -1;
5543 +
5544 +- if (status & SCOM_STATUS_PROTECTION)
5545 +- return -EPERM;
5546 +- if (status & SCOM_STATUS_PARITY) {
5547 ++ if (status & SCOM_STATUS_FSI2PIB_ERROR)
5548 + fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG, &dummy,
5549 + sizeof(uint32_t));
5550 ++
5551 ++ if (status & SCOM_STATUS_PROTECTION)
5552 ++ return -EPERM;
5553 ++ if (status & SCOM_STATUS_PARITY)
5554 + return -EIO;
5555 +- }
5556 + /* Return -EBUSY on PIB abort to force a retry */
5557 + if (status & SCOM_STATUS_PIB_ABORT)
5558 + return -EBUSY;
5559 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
5560 +index 1e448f1b39a18..955a055bd9800 100644
5561 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
5562 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
5563 +@@ -268,6 +268,9 @@ dm_dp_mst_detect(struct drm_connector *connector,
5564 + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5565 + struct amdgpu_dm_connector *master = aconnector->mst_port;
5566 +
5567 ++ if (drm_connector_is_unregistered(connector))
5568 ++ return connector_status_disconnected;
5569 ++
5570 + return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr,
5571 + aconnector->port);
5572 + }
5573 +diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
5574 +index 77066bca87939..ee82b2ddf9325 100644
5575 +--- a/drivers/gpu/drm/ast/ast_main.c
5576 ++++ b/drivers/gpu/drm/ast/ast_main.c
5577 +@@ -409,7 +409,7 @@ struct ast_private *ast_device_create(struct drm_driver *drv,
5578 + dev->pdev = pdev;
5579 + pci_set_drvdata(pdev, dev);
5580 +
5581 +- ast->regs = pci_iomap(dev->pdev, 1, 0);
5582 ++ ast->regs = pcim_iomap(pdev, 1, 0);
5583 + if (!ast->regs)
5584 + return ERR_PTR(-EIO);
5585 +
5586 +@@ -425,7 +425,7 @@ struct ast_private *ast_device_create(struct drm_driver *drv,
5587 +
5588 + /* "map" IO regs if the above hasn't done so already */
5589 + if (!ast->ioregs) {
5590 +- ast->ioregs = pci_iomap(dev->pdev, 2, 0);
5591 ++ ast->ioregs = pcim_iomap(pdev, 2, 0);
5592 + if (!ast->ioregs)
5593 + return ERR_PTR(-EIO);
5594 + }
5595 +diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
5596 +index e145cbb35baca..4e82647a621ef 100644
5597 +--- a/drivers/gpu/drm/bridge/Kconfig
5598 ++++ b/drivers/gpu/drm/bridge/Kconfig
5599 +@@ -130,7 +130,7 @@ config DRM_SIL_SII8620
5600 + tristate "Silicon Image SII8620 HDMI/MHL bridge"
5601 + depends on OF
5602 + select DRM_KMS_HELPER
5603 +- imply EXTCON
5604 ++ select EXTCON
5605 + depends on RC_CORE || !RC_CORE
5606 + help
5607 + Silicon Image SII8620 HDMI/MHL bridge chip driver.
5608 +diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
5609 +index 64f0effb52ac1..044acd07c1538 100644
5610 +--- a/drivers/gpu/drm/drm_bridge.c
5611 ++++ b/drivers/gpu/drm/drm_bridge.c
5612 +@@ -522,6 +522,9 @@ void drm_bridge_chain_pre_enable(struct drm_bridge *bridge)
5613 + list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
5614 + if (iter->funcs->pre_enable)
5615 + iter->funcs->pre_enable(iter);
5616 ++
5617 ++ if (iter == bridge)
5618 ++ break;
5619 + }
5620 + }
5621 + EXPORT_SYMBOL(drm_bridge_chain_pre_enable);
5622 +diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
5623 +index 3416e9617ee9a..96f3908e4c5b9 100644
5624 +--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
5625 ++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
5626 +@@ -222,7 +222,7 @@ int dpu_mdss_init(struct drm_device *dev)
5627 + struct msm_drm_private *priv = dev->dev_private;
5628 + struct dpu_mdss *dpu_mdss;
5629 + struct dss_module_power *mp;
5630 +- int ret = 0;
5631 ++ int ret;
5632 + int irq;
5633 +
5634 + dpu_mdss = devm_kzalloc(dev->dev, sizeof(*dpu_mdss), GFP_KERNEL);
5635 +@@ -250,8 +250,10 @@ int dpu_mdss_init(struct drm_device *dev)
5636 + goto irq_domain_error;
5637 +
5638 + irq = platform_get_irq(pdev, 0);
5639 +- if (irq < 0)
5640 ++ if (irq < 0) {
5641 ++ ret = irq;
5642 + goto irq_error;
5643 ++ }
5644 +
5645 + irq_set_chained_handler_and_data(irq, dpu_mdss_irq,
5646 + dpu_mdss);
5647 +@@ -260,7 +262,7 @@ int dpu_mdss_init(struct drm_device *dev)
5648 +
5649 + pm_runtime_enable(dev->dev);
5650 +
5651 +- return ret;
5652 ++ return 0;
5653 +
5654 + irq_error:
5655 + _dpu_mdss_irq_domain_fini(dpu_mdss);
5656 +diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
5657 +index 0aacc43faefa3..edee4c2a76ce4 100644
5658 +--- a/drivers/gpu/drm/msm/msm_drv.c
5659 ++++ b/drivers/gpu/drm/msm/msm_drv.c
5660 +@@ -505,6 +505,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
5661 + priv->event_thread[i].worker = kthread_create_worker(0,
5662 + "crtc_event:%d", priv->event_thread[i].crtc_id);
5663 + if (IS_ERR(priv->event_thread[i].worker)) {
5664 ++ ret = PTR_ERR(priv->event_thread[i].worker);
5665 + DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
5666 + goto err_msm_uninit;
5667 + }
5668 +diff --git a/drivers/gpu/drm/pl111/Kconfig b/drivers/gpu/drm/pl111/Kconfig
5669 +index 80f6748055e36..3aae387a96af2 100644
5670 +--- a/drivers/gpu/drm/pl111/Kconfig
5671 ++++ b/drivers/gpu/drm/pl111/Kconfig
5672 +@@ -3,6 +3,7 @@ config DRM_PL111
5673 + tristate "DRM Support for PL111 CLCD Controller"
5674 + depends on DRM
5675 + depends on ARM || ARM64 || COMPILE_TEST
5676 ++ depends on VEXPRESS_CONFIG || VEXPRESS_CONFIG=n
5677 + depends on COMMON_CLK
5678 + select DRM_KMS_HELPER
5679 + select DRM_KMS_CMA_HELPER
5680 +diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
5681 +index c04cd5a2553ce..e377bdbff90dd 100644
5682 +--- a/drivers/gpu/drm/qxl/qxl_dumb.c
5683 ++++ b/drivers/gpu/drm/qxl/qxl_dumb.c
5684 +@@ -58,6 +58,8 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
5685 + surf.height = args->height;
5686 + surf.stride = pitch;
5687 + surf.format = format;
5688 ++ surf.data = 0;
5689 ++
5690 + r = qxl_gem_object_create_with_handle(qdev, file_priv,
5691 + QXL_GEM_DOMAIN_SURFACE,
5692 + args->size, &surf, &qobj,
5693 +diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
5694 +index a4a45daf93f2b..6802d9b65f828 100644
5695 +--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
5696 ++++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
5697 +@@ -73,6 +73,7 @@ static int cdn_dp_grf_write(struct cdn_dp_device *dp,
5698 + ret = regmap_write(dp->grf, reg, val);
5699 + if (ret) {
5700 + DRM_DEV_ERROR(dp->dev, "Could not write to GRF: %d\n", ret);
5701 ++ clk_disable_unprepare(dp->grf_clk);
5702 + return ret;
5703 + }
5704 +
5705 +diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
5706 +index 9d2163ef4d6e2..33fb4d05c5065 100644
5707 +--- a/drivers/gpu/drm/rockchip/cdn-dp-reg.c
5708 ++++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
5709 +@@ -658,7 +658,7 @@ int cdn_dp_config_video(struct cdn_dp_device *dp)
5710 + */
5711 + do {
5712 + tu_size_reg += 2;
5713 +- symbol = tu_size_reg * mode->clock * bit_per_pix;
5714 ++ symbol = (u64)tu_size_reg * mode->clock * bit_per_pix;
5715 + do_div(symbol, dp->max_lanes * link_rate * 8);
5716 + rem = do_div(symbol, 1000);
5717 + if (tu_size_reg > 64) {
5718 +diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
5719 +index 542dcf7eddd66..75a76408cb29e 100644
5720 +--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
5721 ++++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
5722 +@@ -692,13 +692,8 @@ static const struct dw_mipi_dsi_phy_ops dw_mipi_dsi_rockchip_phy_ops = {
5723 + .get_timing = dw_mipi_dsi_phy_get_timing,
5724 + };
5725 +
5726 +-static void dw_mipi_dsi_rockchip_config(struct dw_mipi_dsi_rockchip *dsi,
5727 +- int mux)
5728 ++static void dw_mipi_dsi_rockchip_config(struct dw_mipi_dsi_rockchip *dsi)
5729 + {
5730 +- if (dsi->cdata->lcdsel_grf_reg)
5731 +- regmap_write(dsi->grf_regmap, dsi->cdata->lcdsel_grf_reg,
5732 +- mux ? dsi->cdata->lcdsel_lit : dsi->cdata->lcdsel_big);
5733 +-
5734 + if (dsi->cdata->lanecfg1_grf_reg)
5735 + regmap_write(dsi->grf_regmap, dsi->cdata->lanecfg1_grf_reg,
5736 + dsi->cdata->lanecfg1);
5737 +@@ -712,6 +707,13 @@ static void dw_mipi_dsi_rockchip_config(struct dw_mipi_dsi_rockchip *dsi,
5738 + dsi->cdata->enable);
5739 + }
5740 +
5741 ++static void dw_mipi_dsi_rockchip_set_lcdsel(struct dw_mipi_dsi_rockchip *dsi,
5742 ++ int mux)
5743 ++{
5744 ++ regmap_write(dsi->grf_regmap, dsi->cdata->lcdsel_grf_reg,
5745 ++ mux ? dsi->cdata->lcdsel_lit : dsi->cdata->lcdsel_big);
5746 ++}
5747 ++
5748 + static int
5749 + dw_mipi_dsi_encoder_atomic_check(struct drm_encoder *encoder,
5750 + struct drm_crtc_state *crtc_state,
5751 +@@ -767,9 +769,9 @@ static void dw_mipi_dsi_encoder_enable(struct drm_encoder *encoder)
5752 + return;
5753 + }
5754 +
5755 +- dw_mipi_dsi_rockchip_config(dsi, mux);
5756 ++ dw_mipi_dsi_rockchip_set_lcdsel(dsi, mux);
5757 + if (dsi->slave)
5758 +- dw_mipi_dsi_rockchip_config(dsi->slave, mux);
5759 ++ dw_mipi_dsi_rockchip_set_lcdsel(dsi->slave, mux);
5760 +
5761 + clk_disable_unprepare(dsi->grf_clk);
5762 + }
5763 +@@ -923,6 +925,24 @@ static int dw_mipi_dsi_rockchip_bind(struct device *dev,
5764 + return ret;
5765 + }
5766 +
5767 ++ /*
5768 ++ * With the GRF clock running, write lane and dual-mode configurations
5769 ++ * that won't change immediately. If we waited until enable() to do
5770 ++ * this, things like panel preparation would not be able to send
5771 ++ * commands over DSI.
5772 ++ */
5773 ++ ret = clk_prepare_enable(dsi->grf_clk);
5774 ++ if (ret) {
5775 ++ DRM_DEV_ERROR(dsi->dev, "Failed to enable grf_clk: %d\n", ret);
5776 ++ return ret;
5777 ++ }
5778 ++
5779 ++ dw_mipi_dsi_rockchip_config(dsi);
5780 ++ if (dsi->slave)
5781 ++ dw_mipi_dsi_rockchip_config(dsi->slave);
5782 ++
5783 ++ clk_disable_unprepare(dsi->grf_clk);
5784 ++
5785 + ret = rockchip_dsi_drm_create_encoder(dsi, drm_dev);
5786 + if (ret) {
5787 + DRM_DEV_ERROR(dev, "Failed to create drm encoder\n");
5788 +diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
5789 +index c80f7d9fd13f8..0f23144491e40 100644
5790 +--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
5791 ++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
5792 +@@ -1013,6 +1013,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
5793 + VOP_WIN_SET(vop, win, alpha_en, 1);
5794 + } else {
5795 + VOP_WIN_SET(vop, win, src_alpha_ctl, SRC_ALPHA_EN(0));
5796 ++ VOP_WIN_SET(vop, win, alpha_en, 0);
5797 + }
5798 +
5799 + VOP_WIN_SET(vop, win, enable, 1);
5800 +diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
5801 +index 41edd0a421b25..7c20b4a24a7e2 100644
5802 +--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
5803 ++++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
5804 +@@ -499,11 +499,11 @@ static int px30_lvds_probe(struct platform_device *pdev,
5805 + if (IS_ERR(lvds->dphy))
5806 + return PTR_ERR(lvds->dphy);
5807 +
5808 +- phy_init(lvds->dphy);
5809 ++ ret = phy_init(lvds->dphy);
5810 + if (ret)
5811 + return ret;
5812 +
5813 +- phy_set_mode(lvds->dphy, PHY_MODE_LVDS);
5814 ++ ret = phy_set_mode(lvds->dphy, PHY_MODE_LVDS);
5815 + if (ret)
5816 + return ret;
5817 +
5818 +diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
5819 +index 88a8cb840cd54..25a09aaf58838 100644
5820 +--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
5821 ++++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
5822 +@@ -1795,7 +1795,7 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
5823 + &hpd_gpio_flags);
5824 + if (vc4_hdmi->hpd_gpio < 0) {
5825 + ret = vc4_hdmi->hpd_gpio;
5826 +- goto err_unprepare_hsm;
5827 ++ goto err_put_ddc;
5828 + }
5829 +
5830 + vc4_hdmi->hpd_active_low = hpd_gpio_flags & OF_GPIO_ACTIVE_LOW;
5831 +@@ -1836,8 +1836,8 @@ err_destroy_conn:
5832 + vc4_hdmi_connector_destroy(&vc4_hdmi->connector);
5833 + err_destroy_encoder:
5834 + drm_encoder_cleanup(encoder);
5835 +-err_unprepare_hsm:
5836 + pm_runtime_disable(dev);
5837 ++err_put_ddc:
5838 + put_device(&vc4_hdmi->ddc->dev);
5839 +
5840 + return ret;
5841 +diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
5842 +index 4db25bd9fa22d..127eaf0a0a580 100644
5843 +--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
5844 ++++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
5845 +@@ -1467,6 +1467,7 @@ struct svga3dsurface_cache {
5846 +
5847 + /**
5848 + * struct svga3dsurface_loc - Surface location
5849 ++ * @sheet: The multisample sheet.
5850 + * @sub_resource: Surface subresource. Defined as layer * num_mip_levels +
5851 + * mip_level.
5852 + * @x: X coordinate.
5853 +@@ -1474,6 +1475,7 @@ struct svga3dsurface_cache {
5854 + * @z: Z coordinate.
5855 + */
5856 + struct svga3dsurface_loc {
5857 ++ u32 sheet;
5858 + u32 sub_resource;
5859 + u32 x, y, z;
5860 + };
5861 +@@ -1566,8 +1568,8 @@ svga3dsurface_get_loc(const struct svga3dsurface_cache *cache,
5862 + u32 layer;
5863 + int i;
5864 +
5865 +- if (offset >= cache->sheet_bytes)
5866 +- offset %= cache->sheet_bytes;
5867 ++ loc->sheet = offset / cache->sheet_bytes;
5868 ++ offset -= loc->sheet * cache->sheet_bytes;
5869 +
5870 + layer = offset / cache->mip_chain_bytes;
5871 + offset -= layer * cache->mip_chain_bytes;
5872 +@@ -1631,6 +1633,7 @@ svga3dsurface_min_loc(const struct svga3dsurface_cache *cache,
5873 + u32 sub_resource,
5874 + struct svga3dsurface_loc *loc)
5875 + {
5876 ++ loc->sheet = 0;
5877 + loc->sub_resource = sub_resource;
5878 + loc->x = loc->y = loc->z = 0;
5879 + }
5880 +@@ -1652,6 +1655,7 @@ svga3dsurface_max_loc(const struct svga3dsurface_cache *cache,
5881 + const struct drm_vmw_size *size;
5882 + u32 mip;
5883 +
5884 ++ loc->sheet = 0;
5885 + loc->sub_resource = sub_resource + 1;
5886 + mip = sub_resource % cache->num_mip_levels;
5887 + size = &cache->mip[mip].size;
5888 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
5889 +index e67e2e8f6e6fa..83e1b54eb8647 100644
5890 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
5891 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
5892 +@@ -2759,12 +2759,24 @@ static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
5893 + {
5894 + VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
5895 + container_of(header, typeof(*cmd), header);
5896 +- struct vmw_resource *ret;
5897 ++ struct vmw_resource *view;
5898 ++ struct vmw_res_cache_entry *rcache;
5899 +
5900 +- ret = vmw_view_id_val_add(sw_context, vmw_view_sr,
5901 +- cmd->body.shaderResourceViewId);
5902 ++ view = vmw_view_id_val_add(sw_context, vmw_view_sr,
5903 ++ cmd->body.shaderResourceViewId);
5904 ++ if (IS_ERR(view))
5905 ++ return PTR_ERR(view);
5906 +
5907 +- return PTR_ERR_OR_ZERO(ret);
5908 ++ /*
5909 ++ * Normally the shader-resource view is not gpu-dirtying, but for
5910 ++ * this particular command it is...
5911 ++ * So mark the last looked-up surface, which is the surface
5912 ++ * the view points to, gpu-dirty.
5913 ++ */
5914 ++ rcache = &sw_context->res_cache[vmw_res_surface];
5915 ++ vmw_validation_res_set_dirty(sw_context->ctx, rcache->private,
5916 ++ VMW_RES_DIRTY_SET);
5917 ++ return 0;
5918 + }
5919 +
5920 + /**
5921 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
5922 +index 3914bfee0533b..f493b20c7a38c 100644
5923 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
5924 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
5925 +@@ -1802,6 +1802,19 @@ static void vmw_surface_tex_dirty_range_add(struct vmw_resource *res,
5926 + svga3dsurface_get_loc(cache, &loc2, end - 1);
5927 + svga3dsurface_inc_loc(cache, &loc2);
5928 +
5929 ++ if (loc1.sheet != loc2.sheet) {
5930 ++ u32 sub_res;
5931 ++
5932 ++ /*
5933 ++ * Multiple multisample sheets. To do this in an optimized
5934 ++ * fashion, compute the dirty region for each sheet and the
5935 ++ * resulting union. Since this is not a common case, just dirty
5936 ++ * the whole surface.
5937 ++ */
5938 ++ for (sub_res = 0; sub_res < dirty->num_subres; ++sub_res)
5939 ++ vmw_subres_dirty_full(dirty, sub_res);
5940 ++ return;
5941 ++ }
5942 + if (loc1.sub_resource + 1 == loc2.sub_resource) {
5943 + /* Dirty range covers a single sub-resource */
5944 + vmw_subres_dirty_add(dirty, &loc1, &loc2);
5945 +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
5946 +index 0f69f35f2957e..5550c943f9855 100644
5947 +--- a/drivers/hid/hid-core.c
5948 ++++ b/drivers/hid/hid-core.c
5949 +@@ -2306,12 +2306,8 @@ static int hid_device_remove(struct device *dev)
5950 + {
5951 + struct hid_device *hdev = to_hid_device(dev);
5952 + struct hid_driver *hdrv;
5953 +- int ret = 0;
5954 +
5955 +- if (down_interruptible(&hdev->driver_input_lock)) {
5956 +- ret = -EINTR;
5957 +- goto end;
5958 +- }
5959 ++ down(&hdev->driver_input_lock);
5960 + hdev->io_started = false;
5961 +
5962 + hdrv = hdev->driver;
5963 +@@ -2326,8 +2322,8 @@ static int hid_device_remove(struct device *dev)
5964 +
5965 + if (!hdev->io_started)
5966 + up(&hdev->driver_input_lock);
5967 +-end:
5968 +- return ret;
5969 ++
5970 ++ return 0;
5971 + }
5972 +
5973 + static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
5974 +diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
5975 +index 195910dd2154e..e3835407e8d23 100644
5976 +--- a/drivers/hid/wacom_wac.h
5977 ++++ b/drivers/hid/wacom_wac.h
5978 +@@ -122,7 +122,7 @@
5979 + #define WACOM_HID_WD_TOUCHONOFF (WACOM_HID_UP_WACOMDIGITIZER | 0x0454)
5980 + #define WACOM_HID_WD_BATTERY_LEVEL (WACOM_HID_UP_WACOMDIGITIZER | 0x043b)
5981 + #define WACOM_HID_WD_EXPRESSKEY00 (WACOM_HID_UP_WACOMDIGITIZER | 0x0910)
5982 +-#define WACOM_HID_WD_EXPRESSKEYCAP00 (WACOM_HID_UP_WACOMDIGITIZER | 0x0950)
5983 ++#define WACOM_HID_WD_EXPRESSKEYCAP00 (WACOM_HID_UP_WACOMDIGITIZER | 0x0940)
5984 + #define WACOM_HID_WD_MODE_CHANGE (WACOM_HID_UP_WACOMDIGITIZER | 0x0980)
5985 + #define WACOM_HID_WD_MUTE_DEVICE (WACOM_HID_UP_WACOMDIGITIZER | 0x0981)
5986 + #define WACOM_HID_WD_CONTROLPANEL (WACOM_HID_UP_WACOMDIGITIZER | 0x0982)
5987 +diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
5988 +index 11170d9a2e1a5..bfd7f00a59ecf 100644
5989 +--- a/drivers/hv/connection.c
5990 ++++ b/drivers/hv/connection.c
5991 +@@ -229,8 +229,10 @@ int vmbus_connect(void)
5992 + */
5993 +
5994 + for (i = 0; ; i++) {
5995 +- if (i == ARRAY_SIZE(vmbus_versions))
5996 ++ if (i == ARRAY_SIZE(vmbus_versions)) {
5997 ++ ret = -EDOM;
5998 + goto cleanup;
5999 ++ }
6000 +
6001 + version = vmbus_versions[i];
6002 + if (version > max_version)
6003 +diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
6004 +index 05566ecdbe4b4..1b914e418e41e 100644
6005 +--- a/drivers/hv/hv_util.c
6006 ++++ b/drivers/hv/hv_util.c
6007 +@@ -696,8 +696,8 @@ static int hv_timesync_init(struct hv_util_service *srv)
6008 + */
6009 + hv_ptp_clock = ptp_clock_register(&ptp_hyperv_info, NULL);
6010 + if (IS_ERR_OR_NULL(hv_ptp_clock)) {
6011 +- pr_err("cannot register PTP clock: %ld\n",
6012 +- PTR_ERR(hv_ptp_clock));
6013 ++ pr_err("cannot register PTP clock: %d\n",
6014 ++ PTR_ERR_OR_ZERO(hv_ptp_clock));
6015 + hv_ptp_clock = NULL;
6016 + }
6017 +
6018 +diff --git a/drivers/hwmon/lm70.c b/drivers/hwmon/lm70.c
6019 +index ae2b84263a445..6b884ea009877 100644
6020 +--- a/drivers/hwmon/lm70.c
6021 ++++ b/drivers/hwmon/lm70.c
6022 +@@ -22,10 +22,10 @@
6023 + #include <linux/hwmon.h>
6024 + #include <linux/mutex.h>
6025 + #include <linux/mod_devicetable.h>
6026 ++#include <linux/of.h>
6027 ++#include <linux/property.h>
6028 + #include <linux/spi/spi.h>
6029 + #include <linux/slab.h>
6030 +-#include <linux/of_device.h>
6031 +-#include <linux/acpi.h>
6032 +
6033 + #define DRVNAME "lm70"
6034 +
6035 +@@ -148,50 +148,17 @@ static const struct of_device_id lm70_of_ids[] = {
6036 + MODULE_DEVICE_TABLE(of, lm70_of_ids);
6037 + #endif
6038 +
6039 +-#ifdef CONFIG_ACPI
6040 +-static const struct acpi_device_id lm70_acpi_ids[] = {
6041 +- {
6042 +- .id = "LM000070",
6043 +- .driver_data = LM70_CHIP_LM70,
6044 +- },
6045 +- {
6046 +- .id = "TMP00121",
6047 +- .driver_data = LM70_CHIP_TMP121,
6048 +- },
6049 +- {
6050 +- .id = "LM000071",
6051 +- .driver_data = LM70_CHIP_LM71,
6052 +- },
6053 +- {
6054 +- .id = "LM000074",
6055 +- .driver_data = LM70_CHIP_LM74,
6056 +- },
6057 +- {},
6058 +-};
6059 +-MODULE_DEVICE_TABLE(acpi, lm70_acpi_ids);
6060 +-#endif
6061 +-
6062 + static int lm70_probe(struct spi_device *spi)
6063 + {
6064 +- const struct of_device_id *of_match;
6065 + struct device *hwmon_dev;
6066 + struct lm70 *p_lm70;
6067 + int chip;
6068 +
6069 +- of_match = of_match_device(lm70_of_ids, &spi->dev);
6070 +- if (of_match)
6071 +- chip = (int)(uintptr_t)of_match->data;
6072 +- else {
6073 +-#ifdef CONFIG_ACPI
6074 +- const struct acpi_device_id *acpi_match;
6075 +-
6076 +- acpi_match = acpi_match_device(lm70_acpi_ids, &spi->dev);
6077 +- if (acpi_match)
6078 +- chip = (int)(uintptr_t)acpi_match->driver_data;
6079 +- else
6080 +-#endif
6081 +- chip = spi_get_device_id(spi)->driver_data;
6082 +- }
6083 ++ if (dev_fwnode(&spi->dev))
6084 ++ chip = (int)(uintptr_t)device_get_match_data(&spi->dev);
6085 ++ else
6086 ++ chip = spi_get_device_id(spi)->driver_data;
6087 ++
6088 +
6089 + /* signaling is SPI_MODE_0 */
6090 + if (spi->mode & (SPI_CPOL | SPI_CPHA))
6091 +@@ -227,7 +194,6 @@ static struct spi_driver lm70_driver = {
6092 + .driver = {
6093 + .name = "lm70",
6094 + .of_match_table = of_match_ptr(lm70_of_ids),
6095 +- .acpi_match_table = ACPI_PTR(lm70_acpi_ids),
6096 + },
6097 + .id_table = lm70_ids,
6098 + .probe = lm70_probe,
6099 +diff --git a/drivers/hwmon/max31722.c b/drivers/hwmon/max31722.c
6100 +index 062eceb7be0db..613338cbcb170 100644
6101 +--- a/drivers/hwmon/max31722.c
6102 ++++ b/drivers/hwmon/max31722.c
6103 +@@ -6,7 +6,6 @@
6104 + * Copyright (c) 2016, Intel Corporation.
6105 + */
6106 +
6107 +-#include <linux/acpi.h>
6108 + #include <linux/hwmon.h>
6109 + #include <linux/hwmon-sysfs.h>
6110 + #include <linux/kernel.h>
6111 +@@ -133,20 +132,12 @@ static const struct spi_device_id max31722_spi_id[] = {
6112 + {"max31723", 0},
6113 + {}
6114 + };
6115 +-
6116 +-static const struct acpi_device_id __maybe_unused max31722_acpi_id[] = {
6117 +- {"MAX31722", 0},
6118 +- {"MAX31723", 0},
6119 +- {}
6120 +-};
6121 +-
6122 + MODULE_DEVICE_TABLE(spi, max31722_spi_id);
6123 +
6124 + static struct spi_driver max31722_driver = {
6125 + .driver = {
6126 + .name = "max31722",
6127 + .pm = &max31722_pm_ops,
6128 +- .acpi_match_table = ACPI_PTR(max31722_acpi_id),
6129 + },
6130 + .probe = max31722_probe,
6131 + .remove = max31722_remove,
6132 +diff --git a/drivers/hwmon/max31790.c b/drivers/hwmon/max31790.c
6133 +index 86e6c71db685c..67677c4377687 100644
6134 +--- a/drivers/hwmon/max31790.c
6135 ++++ b/drivers/hwmon/max31790.c
6136 +@@ -27,6 +27,7 @@
6137 +
6138 + /* Fan Config register bits */
6139 + #define MAX31790_FAN_CFG_RPM_MODE 0x80
6140 ++#define MAX31790_FAN_CFG_CTRL_MON 0x10
6141 + #define MAX31790_FAN_CFG_TACH_INPUT_EN 0x08
6142 + #define MAX31790_FAN_CFG_TACH_INPUT 0x01
6143 +
6144 +@@ -104,7 +105,7 @@ static struct max31790_data *max31790_update_device(struct device *dev)
6145 + data->tach[NR_CHANNEL + i] = rv;
6146 + } else {
6147 + rv = i2c_smbus_read_word_swapped(client,
6148 +- MAX31790_REG_PWMOUT(i));
6149 ++ MAX31790_REG_PWM_DUTY_CYCLE(i));
6150 + if (rv < 0)
6151 + goto abort;
6152 + data->pwm[i] = rv;
6153 +@@ -170,7 +171,7 @@ static int max31790_read_fan(struct device *dev, u32 attr, int channel,
6154 +
6155 + switch (attr) {
6156 + case hwmon_fan_input:
6157 +- sr = get_tach_period(data->fan_dynamics[channel]);
6158 ++ sr = get_tach_period(data->fan_dynamics[channel % NR_CHANNEL]);
6159 + rpm = RPM_FROM_REG(data->tach[channel], sr);
6160 + *val = rpm;
6161 + return 0;
6162 +@@ -271,12 +272,12 @@ static int max31790_read_pwm(struct device *dev, u32 attr, int channel,
6163 + *val = data->pwm[channel] >> 8;
6164 + return 0;
6165 + case hwmon_pwm_enable:
6166 +- if (fan_config & MAX31790_FAN_CFG_RPM_MODE)
6167 ++ if (fan_config & MAX31790_FAN_CFG_CTRL_MON)
6168 ++ *val = 0;
6169 ++ else if (fan_config & MAX31790_FAN_CFG_RPM_MODE)
6170 + *val = 2;
6171 +- else if (fan_config & MAX31790_FAN_CFG_TACH_INPUT_EN)
6172 +- *val = 1;
6173 + else
6174 +- *val = 0;
6175 ++ *val = 1;
6176 + return 0;
6177 + default:
6178 + return -EOPNOTSUPP;
6179 +@@ -299,31 +300,41 @@ static int max31790_write_pwm(struct device *dev, u32 attr, int channel,
6180 + err = -EINVAL;
6181 + break;
6182 + }
6183 +- data->pwm[channel] = val << 8;
6184 ++ data->valid = false;
6185 + err = i2c_smbus_write_word_swapped(client,
6186 + MAX31790_REG_PWMOUT(channel),
6187 +- data->pwm[channel]);
6188 ++ val << 8);
6189 + break;
6190 + case hwmon_pwm_enable:
6191 + fan_config = data->fan_config[channel];
6192 + if (val == 0) {
6193 +- fan_config &= ~(MAX31790_FAN_CFG_TACH_INPUT_EN |
6194 +- MAX31790_FAN_CFG_RPM_MODE);
6195 ++ fan_config |= MAX31790_FAN_CFG_CTRL_MON;
6196 ++ /*
6197 ++ * Disable RPM mode; otherwise disabling fan speed
6198 ++ * monitoring is not possible.
6199 ++ */
6200 ++ fan_config &= ~MAX31790_FAN_CFG_RPM_MODE;
6201 + } else if (val == 1) {
6202 +- fan_config = (fan_config |
6203 +- MAX31790_FAN_CFG_TACH_INPUT_EN) &
6204 +- ~MAX31790_FAN_CFG_RPM_MODE;
6205 ++ fan_config &= ~(MAX31790_FAN_CFG_CTRL_MON | MAX31790_FAN_CFG_RPM_MODE);
6206 + } else if (val == 2) {
6207 +- fan_config |= MAX31790_FAN_CFG_TACH_INPUT_EN |
6208 +- MAX31790_FAN_CFG_RPM_MODE;
6209 ++ fan_config &= ~MAX31790_FAN_CFG_CTRL_MON;
6210 ++ /*
6211 ++ * The chip sets MAX31790_FAN_CFG_TACH_INPUT_EN on its
6212 ++ * own if MAX31790_FAN_CFG_RPM_MODE is set.
6213 ++ * Do it here as well to reflect the actual register
6214 ++ * value in the cache.
6215 ++ */
6216 ++ fan_config |= (MAX31790_FAN_CFG_RPM_MODE | MAX31790_FAN_CFG_TACH_INPUT_EN);
6217 + } else {
6218 + err = -EINVAL;
6219 + break;
6220 + }
6221 +- data->fan_config[channel] = fan_config;
6222 +- err = i2c_smbus_write_byte_data(client,
6223 +- MAX31790_REG_FAN_CONFIG(channel),
6224 +- fan_config);
6225 ++ if (fan_config != data->fan_config[channel]) {
6226 ++ err = i2c_smbus_write_byte_data(client, MAX31790_REG_FAN_CONFIG(channel),
6227 ++ fan_config);
6228 ++ if (!err)
6229 ++ data->fan_config[channel] = fan_config;
6230 ++ }
6231 + break;
6232 + default:
6233 + err = -EOPNOTSUPP;
6234 +diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
6235 +index cc9e8025c533c..b2088d2d386a4 100644
6236 +--- a/drivers/hwtracing/coresight/coresight-core.c
6237 ++++ b/drivers/hwtracing/coresight/coresight-core.c
6238 +@@ -581,7 +581,7 @@ static struct coresight_device *
6239 + coresight_find_enabled_sink(struct coresight_device *csdev)
6240 + {
6241 + int i;
6242 +- struct coresight_device *sink;
6243 ++ struct coresight_device *sink = NULL;
6244 +
6245 + if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
6246 + csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) &&
6247 +diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
6248 +index 6b74c2b04c157..da56488182d07 100644
6249 +--- a/drivers/iio/accel/bma180.c
6250 ++++ b/drivers/iio/accel/bma180.c
6251 +@@ -55,7 +55,7 @@ struct bma180_part_info {
6252 +
6253 + u8 int_reset_reg, int_reset_mask;
6254 + u8 sleep_reg, sleep_mask;
6255 +- u8 bw_reg, bw_mask;
6256 ++ u8 bw_reg, bw_mask, bw_offset;
6257 + u8 scale_reg, scale_mask;
6258 + u8 power_reg, power_mask, lowpower_val;
6259 + u8 int_enable_reg, int_enable_mask;
6260 +@@ -127,6 +127,7 @@ struct bma180_part_info {
6261 +
6262 + #define BMA250_RANGE_MASK GENMASK(3, 0) /* Range of accel values */
6263 + #define BMA250_BW_MASK GENMASK(4, 0) /* Accel bandwidth */
6264 ++#define BMA250_BW_OFFSET 8
6265 + #define BMA250_SUSPEND_MASK BIT(7) /* chip will sleep */
6266 + #define BMA250_LOWPOWER_MASK BIT(6)
6267 + #define BMA250_DATA_INTEN_MASK BIT(4)
6268 +@@ -143,6 +144,7 @@ struct bma180_part_info {
6269 +
6270 + #define BMA254_RANGE_MASK GENMASK(3, 0) /* Range of accel values */
6271 + #define BMA254_BW_MASK GENMASK(4, 0) /* Accel bandwidth */
6272 ++#define BMA254_BW_OFFSET 8
6273 + #define BMA254_SUSPEND_MASK BIT(7) /* chip will sleep */
6274 + #define BMA254_LOWPOWER_MASK BIT(6)
6275 + #define BMA254_DATA_INTEN_MASK BIT(4)
6276 +@@ -162,7 +164,11 @@ struct bma180_data {
6277 + int scale;
6278 + int bw;
6279 + bool pmode;
6280 +- u8 buff[16]; /* 3x 16-bit + 8-bit + padding + timestamp */
6281 ++ /* Ensure timestamp is naturally aligned */
6282 ++ struct {
6283 ++ s16 chan[4];
6284 ++ s64 timestamp __aligned(8);
6285 ++ } scan;
6286 + };
6287 +
6288 + enum bma180_chan {
6289 +@@ -283,7 +289,8 @@ static int bma180_set_bw(struct bma180_data *data, int val)
6290 + for (i = 0; i < data->part_info->num_bw; ++i) {
6291 + if (data->part_info->bw_table[i] == val) {
6292 + ret = bma180_set_bits(data, data->part_info->bw_reg,
6293 +- data->part_info->bw_mask, i);
6294 ++ data->part_info->bw_mask,
6295 ++ i + data->part_info->bw_offset);
6296 + if (ret) {
6297 + dev_err(&data->client->dev,
6298 + "failed to set bandwidth\n");
6299 +@@ -876,6 +883,7 @@ static const struct bma180_part_info bma180_part_info[] = {
6300 + .sleep_mask = BMA250_SUSPEND_MASK,
6301 + .bw_reg = BMA250_BW_REG,
6302 + .bw_mask = BMA250_BW_MASK,
6303 ++ .bw_offset = BMA250_BW_OFFSET,
6304 + .scale_reg = BMA250_RANGE_REG,
6305 + .scale_mask = BMA250_RANGE_MASK,
6306 + .power_reg = BMA250_POWER_REG,
6307 +@@ -905,6 +913,7 @@ static const struct bma180_part_info bma180_part_info[] = {
6308 + .sleep_mask = BMA254_SUSPEND_MASK,
6309 + .bw_reg = BMA254_BW_REG,
6310 + .bw_mask = BMA254_BW_MASK,
6311 ++ .bw_offset = BMA254_BW_OFFSET,
6312 + .scale_reg = BMA254_RANGE_REG,
6313 + .scale_mask = BMA254_RANGE_MASK,
6314 + .power_reg = BMA254_POWER_REG,
6315 +@@ -938,12 +947,12 @@ static irqreturn_t bma180_trigger_handler(int irq, void *p)
6316 + mutex_unlock(&data->mutex);
6317 + goto err;
6318 + }
6319 +- ((s16 *)data->buff)[i++] = ret;
6320 ++ data->scan.chan[i++] = ret;
6321 + }
6322 +
6323 + mutex_unlock(&data->mutex);
6324 +
6325 +- iio_push_to_buffers_with_timestamp(indio_dev, data->buff, time_ns);
6326 ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, time_ns);
6327 + err:
6328 + iio_trigger_notify_done(indio_dev->trig);
6329 +
6330 +diff --git a/drivers/iio/accel/bma220_spi.c b/drivers/iio/accel/bma220_spi.c
6331 +index 3c9b0c6954e60..e8a9db1a82ad8 100644
6332 +--- a/drivers/iio/accel/bma220_spi.c
6333 ++++ b/drivers/iio/accel/bma220_spi.c
6334 +@@ -63,7 +63,11 @@ static const int bma220_scale_table[][2] = {
6335 + struct bma220_data {
6336 + struct spi_device *spi_device;
6337 + struct mutex lock;
6338 +- s8 buffer[16]; /* 3x8-bit channels + 5x8 padding + 8x8 timestamp */
6339 ++ struct {
6340 ++ s8 chans[3];
6341 ++ /* Ensure timestamp is naturally aligned. */
6342 ++ s64 timestamp __aligned(8);
6343 ++ } scan;
6344 + u8 tx_buf[2] ____cacheline_aligned;
6345 + };
6346 +
6347 +@@ -94,12 +98,12 @@ static irqreturn_t bma220_trigger_handler(int irq, void *p)
6348 +
6349 + mutex_lock(&data->lock);
6350 + data->tx_buf[0] = BMA220_REG_ACCEL_X | BMA220_READ_MASK;
6351 +- ret = spi_write_then_read(spi, data->tx_buf, 1, data->buffer,
6352 ++ ret = spi_write_then_read(spi, data->tx_buf, 1, &data->scan.chans,
6353 + ARRAY_SIZE(bma220_channels) - 1);
6354 + if (ret < 0)
6355 + goto err;
6356 +
6357 +- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
6358 ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
6359 + pf->timestamp);
6360 + err:
6361 + mutex_unlock(&data->lock);
6362 +diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
6363 +index 4c5e594024f8c..f05840d17fb71 100644
6364 +--- a/drivers/iio/accel/hid-sensor-accel-3d.c
6365 ++++ b/drivers/iio/accel/hid-sensor-accel-3d.c
6366 +@@ -27,8 +27,11 @@ struct accel_3d_state {
6367 + struct hid_sensor_hub_callbacks callbacks;
6368 + struct hid_sensor_common common_attributes;
6369 + struct hid_sensor_hub_attribute_info accel[ACCEL_3D_CHANNEL_MAX];
6370 +- /* Reserve for 3 channels + padding + timestamp */
6371 +- u32 accel_val[ACCEL_3D_CHANNEL_MAX + 3];
6372 ++ /* Ensure timestamp is naturally aligned */
6373 ++ struct {
6374 ++ u32 accel_val[3];
6375 ++ s64 timestamp __aligned(8);
6376 ++ } scan;
6377 + int scale_pre_decml;
6378 + int scale_post_decml;
6379 + int scale_precision;
6380 +@@ -239,8 +242,8 @@ static int accel_3d_proc_event(struct hid_sensor_hub_device *hsdev,
6381 + accel_state->timestamp = iio_get_time_ns(indio_dev);
6382 +
6383 + hid_sensor_push_data(indio_dev,
6384 +- accel_state->accel_val,
6385 +- sizeof(accel_state->accel_val),
6386 ++ &accel_state->scan,
6387 ++ sizeof(accel_state->scan),
6388 + accel_state->timestamp);
6389 +
6390 + accel_state->timestamp = 0;
6391 +@@ -265,7 +268,7 @@ static int accel_3d_capture_sample(struct hid_sensor_hub_device *hsdev,
6392 + case HID_USAGE_SENSOR_ACCEL_Y_AXIS:
6393 + case HID_USAGE_SENSOR_ACCEL_Z_AXIS:
6394 + offset = usage_id - HID_USAGE_SENSOR_ACCEL_X_AXIS;
6395 +- accel_state->accel_val[CHANNEL_SCAN_INDEX_X + offset] =
6396 ++ accel_state->scan.accel_val[CHANNEL_SCAN_INDEX_X + offset] =
6397 + *(u32 *)raw_data;
6398 + ret = 0;
6399 + break;
6400 +diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
6401 +index 560a3373ff20d..c99e90469a245 100644
6402 +--- a/drivers/iio/accel/kxcjk-1013.c
6403 ++++ b/drivers/iio/accel/kxcjk-1013.c
6404 +@@ -132,13 +132,24 @@ enum kx_acpi_type {
6405 + ACPI_KIOX010A,
6406 + };
6407 +
6408 ++enum kxcjk1013_axis {
6409 ++ AXIS_X,
6410 ++ AXIS_Y,
6411 ++ AXIS_Z,
6412 ++ AXIS_MAX
6413 ++};
6414 ++
6415 + struct kxcjk1013_data {
6416 + struct i2c_client *client;
6417 + struct iio_trigger *dready_trig;
6418 + struct iio_trigger *motion_trig;
6419 + struct iio_mount_matrix orientation;
6420 + struct mutex mutex;
6421 +- s16 buffer[8];
6422 ++ /* Ensure timestamp naturally aligned */
6423 ++ struct {
6424 ++ s16 chans[AXIS_MAX];
6425 ++ s64 timestamp __aligned(8);
6426 ++ } scan;
6427 + u8 odr_bits;
6428 + u8 range;
6429 + int wake_thres;
6430 +@@ -152,13 +163,6 @@ struct kxcjk1013_data {
6431 + enum kx_acpi_type acpi_type;
6432 + };
6433 +
6434 +-enum kxcjk1013_axis {
6435 +- AXIS_X,
6436 +- AXIS_Y,
6437 +- AXIS_Z,
6438 +- AXIS_MAX,
6439 +-};
6440 +-
6441 + enum kxcjk1013_mode {
6442 + STANDBY,
6443 + OPERATION,
6444 +@@ -1092,12 +1096,12 @@ static irqreturn_t kxcjk1013_trigger_handler(int irq, void *p)
6445 + ret = i2c_smbus_read_i2c_block_data_or_emulated(data->client,
6446 + KXCJK1013_REG_XOUT_L,
6447 + AXIS_MAX * 2,
6448 +- (u8 *)data->buffer);
6449 ++ (u8 *)data->scan.chans);
6450 + mutex_unlock(&data->mutex);
6451 + if (ret < 0)
6452 + goto err;
6453 +
6454 +- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
6455 ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
6456 + data->timestamp);
6457 + err:
6458 + iio_trigger_notify_done(indio_dev->trig);
6459 +diff --git a/drivers/iio/accel/mxc4005.c b/drivers/iio/accel/mxc4005.c
6460 +index f877263dc6efb..5a2b0ffbb145d 100644
6461 +--- a/drivers/iio/accel/mxc4005.c
6462 ++++ b/drivers/iio/accel/mxc4005.c
6463 +@@ -56,7 +56,11 @@ struct mxc4005_data {
6464 + struct mutex mutex;
6465 + struct regmap *regmap;
6466 + struct iio_trigger *dready_trig;
6467 +- __be16 buffer[8];
6468 ++ /* Ensure timestamp is naturally aligned */
6469 ++ struct {
6470 ++ __be16 chans[3];
6471 ++ s64 timestamp __aligned(8);
6472 ++ } scan;
6473 + bool trigger_enabled;
6474 + };
6475 +
6476 +@@ -135,7 +139,7 @@ static int mxc4005_read_xyz(struct mxc4005_data *data)
6477 + int ret;
6478 +
6479 + ret = regmap_bulk_read(data->regmap, MXC4005_REG_XOUT_UPPER,
6480 +- data->buffer, sizeof(data->buffer));
6481 ++ data->scan.chans, sizeof(data->scan.chans));
6482 + if (ret < 0) {
6483 + dev_err(data->dev, "failed to read axes\n");
6484 + return ret;
6485 +@@ -301,7 +305,7 @@ static irqreturn_t mxc4005_trigger_handler(int irq, void *private)
6486 + if (ret < 0)
6487 + goto err;
6488 +
6489 +- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
6490 ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
6491 + pf->timestamp);
6492 +
6493 + err:
6494 +diff --git a/drivers/iio/accel/stk8312.c b/drivers/iio/accel/stk8312.c
6495 +index 3b59887a8581b..7d24801e8aa7c 100644
6496 +--- a/drivers/iio/accel/stk8312.c
6497 ++++ b/drivers/iio/accel/stk8312.c
6498 +@@ -103,7 +103,11 @@ struct stk8312_data {
6499 + u8 mode;
6500 + struct iio_trigger *dready_trig;
6501 + bool dready_trigger_on;
6502 +- s8 buffer[16]; /* 3x8-bit channels + 5x8 padding + 64-bit timestamp */
6503 ++ /* Ensure timestamp is naturally aligned */
6504 ++ struct {
6505 ++ s8 chans[3];
6506 ++ s64 timestamp __aligned(8);
6507 ++ } scan;
6508 + };
6509 +
6510 + static IIO_CONST_ATTR(in_accel_scale_available, STK8312_SCALE_AVAIL);
6511 +@@ -438,7 +442,7 @@ static irqreturn_t stk8312_trigger_handler(int irq, void *p)
6512 + ret = i2c_smbus_read_i2c_block_data(data->client,
6513 + STK8312_REG_XOUT,
6514 + STK8312_ALL_CHANNEL_SIZE,
6515 +- data->buffer);
6516 ++ data->scan.chans);
6517 + if (ret < STK8312_ALL_CHANNEL_SIZE) {
6518 + dev_err(&data->client->dev, "register read failed\n");
6519 + mutex_unlock(&data->lock);
6520 +@@ -452,12 +456,12 @@ static irqreturn_t stk8312_trigger_handler(int irq, void *p)
6521 + mutex_unlock(&data->lock);
6522 + goto err;
6523 + }
6524 +- data->buffer[i++] = ret;
6525 ++ data->scan.chans[i++] = ret;
6526 + }
6527 + }
6528 + mutex_unlock(&data->lock);
6529 +
6530 +- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
6531 ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
6532 + pf->timestamp);
6533 + err:
6534 + iio_trigger_notify_done(indio_dev->trig);
6535 +diff --git a/drivers/iio/accel/stk8ba50.c b/drivers/iio/accel/stk8ba50.c
6536 +index 3ead378b02c9b..e8087d7ee49f9 100644
6537 +--- a/drivers/iio/accel/stk8ba50.c
6538 ++++ b/drivers/iio/accel/stk8ba50.c
6539 +@@ -91,12 +91,11 @@ struct stk8ba50_data {
6540 + u8 sample_rate_idx;
6541 + struct iio_trigger *dready_trig;
6542 + bool dready_trigger_on;
6543 +- /*
6544 +- * 3 x 16-bit channels (10-bit data, 6-bit padding) +
6545 +- * 1 x 16 padding +
6546 +- * 4 x 16 64-bit timestamp
6547 +- */
6548 +- s16 buffer[8];
6549 ++ /* Ensure timestamp is naturally aligned */
6550 ++ struct {
6551 ++ s16 chans[3];
6552 ++ s64 timetamp __aligned(8);
6553 ++ } scan;
6554 + };
6555 +
6556 + #define STK8BA50_ACCEL_CHANNEL(index, reg, axis) { \
6557 +@@ -324,7 +323,7 @@ static irqreturn_t stk8ba50_trigger_handler(int irq, void *p)
6558 + ret = i2c_smbus_read_i2c_block_data(data->client,
6559 + STK8BA50_REG_XOUT,
6560 + STK8BA50_ALL_CHANNEL_SIZE,
6561 +- (u8 *)data->buffer);
6562 ++ (u8 *)data->scan.chans);
6563 + if (ret < STK8BA50_ALL_CHANNEL_SIZE) {
6564 + dev_err(&data->client->dev, "register read failed\n");
6565 + goto err;
6566 +@@ -337,10 +336,10 @@ static irqreturn_t stk8ba50_trigger_handler(int irq, void *p)
6567 + if (ret < 0)
6568 + goto err;
6569 +
6570 +- data->buffer[i++] = ret;
6571 ++ data->scan.chans[i++] = ret;
6572 + }
6573 + }
6574 +- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
6575 ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
6576 + pf->timestamp);
6577 + err:
6578 + mutex_unlock(&data->lock);
6579 +diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
6580 +index b917a4714a9c9..b8139c435a4b0 100644
6581 +--- a/drivers/iio/adc/at91-sama5d2_adc.c
6582 ++++ b/drivers/iio/adc/at91-sama5d2_adc.c
6583 +@@ -403,7 +403,8 @@ struct at91_adc_state {
6584 + struct at91_adc_dma dma_st;
6585 + struct at91_adc_touch touch_st;
6586 + struct iio_dev *indio_dev;
6587 +- u16 buffer[AT91_BUFFER_MAX_HWORDS];
6588 ++ /* Ensure naturally aligned timestamp */
6589 ++ u16 buffer[AT91_BUFFER_MAX_HWORDS] __aligned(8);
6590 + /*
6591 + * lock to prevent concurrent 'single conversion' requests through
6592 + * sysfs.
6593 +diff --git a/drivers/iio/adc/hx711.c b/drivers/iio/adc/hx711.c
6594 +index 6a173531d355b..f7ee856a6b8b6 100644
6595 +--- a/drivers/iio/adc/hx711.c
6596 ++++ b/drivers/iio/adc/hx711.c
6597 +@@ -86,9 +86,9 @@ struct hx711_data {
6598 + struct mutex lock;
6599 + /*
6600 + * triggered buffer
6601 +- * 2x32-bit channel + 64-bit timestamp
6602 ++ * 2x32-bit channel + 64-bit naturally aligned timestamp
6603 + */
6604 +- u32 buffer[4];
6605 ++ u32 buffer[4] __aligned(8);
6606 + /*
6607 + * delay after a rising edge on SCK until the data is ready DOUT
6608 + * this is dependent on the hx711 where the datasheet tells a
6609 +diff --git a/drivers/iio/adc/mxs-lradc-adc.c b/drivers/iio/adc/mxs-lradc-adc.c
6610 +index 30e29f44ebd2e..c480cb489c1a3 100644
6611 +--- a/drivers/iio/adc/mxs-lradc-adc.c
6612 ++++ b/drivers/iio/adc/mxs-lradc-adc.c
6613 +@@ -115,7 +115,8 @@ struct mxs_lradc_adc {
6614 + struct device *dev;
6615 +
6616 + void __iomem *base;
6617 +- u32 buffer[10];
6618 ++ /* Maximum of 8 channels + 8 byte ts */
6619 ++ u32 buffer[10] __aligned(8);
6620 + struct iio_trigger *trig;
6621 + struct completion completion;
6622 + spinlock_t lock;
6623 +diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
6624 +index 9fef39bcf997b..5b828428be77c 100644
6625 +--- a/drivers/iio/adc/ti-ads1015.c
6626 ++++ b/drivers/iio/adc/ti-ads1015.c
6627 +@@ -395,10 +395,14 @@ static irqreturn_t ads1015_trigger_handler(int irq, void *p)
6628 + struct iio_poll_func *pf = p;
6629 + struct iio_dev *indio_dev = pf->indio_dev;
6630 + struct ads1015_data *data = iio_priv(indio_dev);
6631 +- s16 buf[8]; /* 1x s16 ADC val + 3x s16 padding + 4x s16 timestamp */
6632 ++ /* Ensure natural alignment of timestamp */
6633 ++ struct {
6634 ++ s16 chan;
6635 ++ s64 timestamp __aligned(8);
6636 ++ } scan;
6637 + int chan, ret, res;
6638 +
6639 +- memset(buf, 0, sizeof(buf));
6640 ++ memset(&scan, 0, sizeof(scan));
6641 +
6642 + mutex_lock(&data->lock);
6643 + chan = find_first_bit(indio_dev->active_scan_mask,
6644 +@@ -409,10 +413,10 @@ static irqreturn_t ads1015_trigger_handler(int irq, void *p)
6645 + goto err;
6646 + }
6647 +
6648 +- buf[0] = res;
6649 ++ scan.chan = res;
6650 + mutex_unlock(&data->lock);
6651 +
6652 +- iio_push_to_buffers_with_timestamp(indio_dev, buf,
6653 ++ iio_push_to_buffers_with_timestamp(indio_dev, &scan,
6654 + iio_get_time_ns(indio_dev));
6655 +
6656 + err:
6657 +diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c
6658 +index 16bcb37eebb72..79c803537dc42 100644
6659 +--- a/drivers/iio/adc/ti-ads8688.c
6660 ++++ b/drivers/iio/adc/ti-ads8688.c
6661 +@@ -383,7 +383,8 @@ static irqreturn_t ads8688_trigger_handler(int irq, void *p)
6662 + {
6663 + struct iio_poll_func *pf = p;
6664 + struct iio_dev *indio_dev = pf->indio_dev;
6665 +- u16 buffer[ADS8688_MAX_CHANNELS + sizeof(s64)/sizeof(u16)];
6666 ++ /* Ensure naturally aligned timestamp */
6667 ++ u16 buffer[ADS8688_MAX_CHANNELS + sizeof(s64)/sizeof(u16)] __aligned(8);
6668 + int i, j = 0;
6669 +
6670 + for (i = 0; i < indio_dev->masklength; i++) {
6671 +diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
6672 +index 1d794cf3e3f13..fd57fc43e8e5c 100644
6673 +--- a/drivers/iio/adc/vf610_adc.c
6674 ++++ b/drivers/iio/adc/vf610_adc.c
6675 +@@ -167,7 +167,11 @@ struct vf610_adc {
6676 + u32 sample_freq_avail[5];
6677 +
6678 + struct completion completion;
6679 +- u16 buffer[8];
6680 ++ /* Ensure the timestamp is naturally aligned */
6681 ++ struct {
6682 ++ u16 chan;
6683 ++ s64 timestamp __aligned(8);
6684 ++ } scan;
6685 + };
6686 +
6687 + static const u32 vf610_hw_avgs[] = { 1, 4, 8, 16, 32 };
6688 +@@ -579,9 +583,9 @@ static irqreturn_t vf610_adc_isr(int irq, void *dev_id)
6689 + if (coco & VF610_ADC_HS_COCO0) {
6690 + info->value = vf610_adc_read_data(info);
6691 + if (iio_buffer_enabled(indio_dev)) {
6692 +- info->buffer[0] = info->value;
6693 ++ info->scan.chan = info->value;
6694 + iio_push_to_buffers_with_timestamp(indio_dev,
6695 +- info->buffer,
6696 ++ &info->scan,
6697 + iio_get_time_ns(indio_dev));
6698 + iio_trigger_notify_done(indio_dev->trig);
6699 + } else
6700 +diff --git a/drivers/iio/chemical/atlas-sensor.c b/drivers/iio/chemical/atlas-sensor.c
6701 +index cdab9d04dedd0..0c8a50de89408 100644
6702 +--- a/drivers/iio/chemical/atlas-sensor.c
6703 ++++ b/drivers/iio/chemical/atlas-sensor.c
6704 +@@ -91,8 +91,8 @@ struct atlas_data {
6705 + struct regmap *regmap;
6706 + struct irq_work work;
6707 + unsigned int interrupt_enabled;
6708 +-
6709 +- __be32 buffer[6]; /* 96-bit data + 32-bit pad + 64-bit timestamp */
6710 ++ /* 96-bit data + 32-bit pad + 64-bit timestamp */
6711 ++ __be32 buffer[6] __aligned(8);
6712 + };
6713 +
6714 + static const struct regmap_config atlas_regmap_config = {
6715 +diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
6716 +index 82c050a3899d9..8f885b0af38e5 100644
6717 +--- a/drivers/iio/frequency/adf4350.c
6718 ++++ b/drivers/iio/frequency/adf4350.c
6719 +@@ -563,8 +563,10 @@ static int adf4350_probe(struct spi_device *spi)
6720 +
6721 + st->lock_detect_gpiod = devm_gpiod_get_optional(&spi->dev, NULL,
6722 + GPIOD_IN);
6723 +- if (IS_ERR(st->lock_detect_gpiod))
6724 +- return PTR_ERR(st->lock_detect_gpiod);
6725 ++ if (IS_ERR(st->lock_detect_gpiod)) {
6726 ++ ret = PTR_ERR(st->lock_detect_gpiod);
6727 ++ goto error_disable_reg;
6728 ++ }
6729 +
6730 + if (pdata->power_up_frequency) {
6731 + ret = adf4350_set_freq(st, pdata->power_up_frequency);
6732 +diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
6733 +index 8ddda96455fcb..39fe0b1785920 100644
6734 +--- a/drivers/iio/gyro/bmg160_core.c
6735 ++++ b/drivers/iio/gyro/bmg160_core.c
6736 +@@ -96,7 +96,11 @@ struct bmg160_data {
6737 + struct iio_trigger *motion_trig;
6738 + struct iio_mount_matrix orientation;
6739 + struct mutex mutex;
6740 +- s16 buffer[8];
6741 ++ /* Ensure naturally aligned timestamp */
6742 ++ struct {
6743 ++ s16 chans[3];
6744 ++ s64 timestamp __aligned(8);
6745 ++ } scan;
6746 + u32 dps_range;
6747 + int ev_enable_state;
6748 + int slope_thres;
6749 +@@ -880,12 +884,12 @@ static irqreturn_t bmg160_trigger_handler(int irq, void *p)
6750 +
6751 + mutex_lock(&data->mutex);
6752 + ret = regmap_bulk_read(data->regmap, BMG160_REG_XOUT_L,
6753 +- data->buffer, AXIS_MAX * 2);
6754 ++ data->scan.chans, AXIS_MAX * 2);
6755 + mutex_unlock(&data->mutex);
6756 + if (ret < 0)
6757 + goto err;
6758 +
6759 +- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
6760 ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
6761 + pf->timestamp);
6762 + err:
6763 + iio_trigger_notify_done(indio_dev->trig);
6764 +diff --git a/drivers/iio/humidity/am2315.c b/drivers/iio/humidity/am2315.c
6765 +index 02ad1767c845e..3398fa413ec5c 100644
6766 +--- a/drivers/iio/humidity/am2315.c
6767 ++++ b/drivers/iio/humidity/am2315.c
6768 +@@ -33,7 +33,11 @@
6769 + struct am2315_data {
6770 + struct i2c_client *client;
6771 + struct mutex lock;
6772 +- s16 buffer[8]; /* 2x16-bit channels + 2x16 padding + 4x16 timestamp */
6773 ++ /* Ensure timestamp is naturally aligned */
6774 ++ struct {
6775 ++ s16 chans[2];
6776 ++ s64 timestamp __aligned(8);
6777 ++ } scan;
6778 + };
6779 +
6780 + struct am2315_sensor_data {
6781 +@@ -167,20 +171,20 @@ static irqreturn_t am2315_trigger_handler(int irq, void *p)
6782 +
6783 + mutex_lock(&data->lock);
6784 + if (*(indio_dev->active_scan_mask) == AM2315_ALL_CHANNEL_MASK) {
6785 +- data->buffer[0] = sensor_data.hum_data;
6786 +- data->buffer[1] = sensor_data.temp_data;
6787 ++ data->scan.chans[0] = sensor_data.hum_data;
6788 ++ data->scan.chans[1] = sensor_data.temp_data;
6789 + } else {
6790 + i = 0;
6791 + for_each_set_bit(bit, indio_dev->active_scan_mask,
6792 + indio_dev->masklength) {
6793 +- data->buffer[i] = (bit ? sensor_data.temp_data :
6794 +- sensor_data.hum_data);
6795 ++ data->scan.chans[i] = (bit ? sensor_data.temp_data :
6796 ++ sensor_data.hum_data);
6797 + i++;
6798 + }
6799 + }
6800 + mutex_unlock(&data->lock);
6801 +
6802 +- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
6803 ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
6804 + pf->timestamp);
6805 + err:
6806 + iio_trigger_notify_done(indio_dev->trig);
6807 +diff --git a/drivers/iio/imu/adis16400.c b/drivers/iio/imu/adis16400.c
6808 +index 785a4ce606d89..4aff16466da02 100644
6809 +--- a/drivers/iio/imu/adis16400.c
6810 ++++ b/drivers/iio/imu/adis16400.c
6811 +@@ -647,9 +647,6 @@ static irqreturn_t adis16400_trigger_handler(int irq, void *p)
6812 + void *buffer;
6813 + int ret;
6814 +
6815 +- if (!adis->buffer)
6816 +- return -ENOMEM;
6817 +-
6818 + if (!(st->variant->flags & ADIS16400_NO_BURST) &&
6819 + st->adis.spi->max_speed_hz > ADIS16400_SPI_BURST) {
6820 + st->adis.spi->max_speed_hz = ADIS16400_SPI_BURST;
6821 +diff --git a/drivers/iio/imu/adis16475.c b/drivers/iio/imu/adis16475.c
6822 +index 197d482409911..3c4e4deb87608 100644
6823 +--- a/drivers/iio/imu/adis16475.c
6824 ++++ b/drivers/iio/imu/adis16475.c
6825 +@@ -990,7 +990,7 @@ static irqreturn_t adis16475_trigger_handler(int irq, void *p)
6826 +
6827 + ret = spi_sync(adis->spi, &adis->msg);
6828 + if (ret)
6829 +- return ret;
6830 ++ goto check_burst32;
6831 +
6832 + adis->spi->max_speed_hz = cached_spi_speed_hz;
6833 + buffer = adis->buffer;
6834 +diff --git a/drivers/iio/imu/adis_buffer.c b/drivers/iio/imu/adis_buffer.c
6835 +index ac354321f63a3..175af154e4437 100644
6836 +--- a/drivers/iio/imu/adis_buffer.c
6837 ++++ b/drivers/iio/imu/adis_buffer.c
6838 +@@ -129,9 +129,6 @@ static irqreturn_t adis_trigger_handler(int irq, void *p)
6839 + struct adis *adis = iio_device_get_drvdata(indio_dev);
6840 + int ret;
6841 +
6842 +- if (!adis->buffer)
6843 +- return -ENOMEM;
6844 +-
6845 + if (adis->data->has_paging) {
6846 + mutex_lock(&adis->state_lock);
6847 + if (adis->current_page != 0) {
6848 +diff --git a/drivers/iio/light/isl29125.c b/drivers/iio/light/isl29125.c
6849 +index b93b85dbc3a6a..ba53b50d711a1 100644
6850 +--- a/drivers/iio/light/isl29125.c
6851 ++++ b/drivers/iio/light/isl29125.c
6852 +@@ -51,7 +51,11 @@
6853 + struct isl29125_data {
6854 + struct i2c_client *client;
6855 + u8 conf1;
6856 +- u16 buffer[8]; /* 3x 16-bit, padding, 8 bytes timestamp */
6857 ++ /* Ensure timestamp is naturally aligned */
6858 ++ struct {
6859 ++ u16 chans[3];
6860 ++ s64 timestamp __aligned(8);
6861 ++ } scan;
6862 + };
6863 +
6864 + #define ISL29125_CHANNEL(_color, _si) { \
6865 +@@ -184,10 +188,10 @@ static irqreturn_t isl29125_trigger_handler(int irq, void *p)
6866 + if (ret < 0)
6867 + goto done;
6868 +
6869 +- data->buffer[j++] = ret;
6870 ++ data->scan.chans[j++] = ret;
6871 + }
6872 +
6873 +- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
6874 ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
6875 + iio_get_time_ns(indio_dev));
6876 +
6877 + done:
6878 +diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
6879 +index b4323d2db0b19..74ed2d88a3ed3 100644
6880 +--- a/drivers/iio/light/ltr501.c
6881 ++++ b/drivers/iio/light/ltr501.c
6882 +@@ -32,9 +32,12 @@
6883 + #define LTR501_PART_ID 0x86
6884 + #define LTR501_MANUFAC_ID 0x87
6885 + #define LTR501_ALS_DATA1 0x88 /* 16-bit, little endian */
6886 ++#define LTR501_ALS_DATA1_UPPER 0x89 /* upper 8 bits of LTR501_ALS_DATA1 */
6887 + #define LTR501_ALS_DATA0 0x8a /* 16-bit, little endian */
6888 ++#define LTR501_ALS_DATA0_UPPER 0x8b /* upper 8 bits of LTR501_ALS_DATA0 */
6889 + #define LTR501_ALS_PS_STATUS 0x8c
6890 + #define LTR501_PS_DATA 0x8d /* 16-bit, little endian */
6891 ++#define LTR501_PS_DATA_UPPER 0x8e /* upper 8 bits of LTR501_PS_DATA */
6892 + #define LTR501_INTR 0x8f /* output mode, polarity, mode */
6893 + #define LTR501_PS_THRESH_UP 0x90 /* 11 bit, ps upper threshold */
6894 + #define LTR501_PS_THRESH_LOW 0x92 /* 11 bit, ps lower threshold */
6895 +@@ -406,18 +409,19 @@ static int ltr501_read_als(const struct ltr501_data *data, __le16 buf[2])
6896 +
6897 + static int ltr501_read_ps(const struct ltr501_data *data)
6898 + {
6899 +- int ret, status;
6900 ++ __le16 status;
6901 ++ int ret;
6902 +
6903 + ret = ltr501_drdy(data, LTR501_STATUS_PS_RDY);
6904 + if (ret < 0)
6905 + return ret;
6906 +
6907 + ret = regmap_bulk_read(data->regmap, LTR501_PS_DATA,
6908 +- &status, 2);
6909 ++ &status, sizeof(status));
6910 + if (ret < 0)
6911 + return ret;
6912 +
6913 +- return status;
6914 ++ return le16_to_cpu(status);
6915 + }
6916 +
6917 + static int ltr501_read_intr_prst(const struct ltr501_data *data,
6918 +@@ -1205,7 +1209,7 @@ static struct ltr501_chip_info ltr501_chip_info_tbl[] = {
6919 + .als_gain_tbl_size = ARRAY_SIZE(ltr559_als_gain_tbl),
6920 + .ps_gain = ltr559_ps_gain_tbl,
6921 + .ps_gain_tbl_size = ARRAY_SIZE(ltr559_ps_gain_tbl),
6922 +- .als_mode_active = BIT(1),
6923 ++ .als_mode_active = BIT(0),
6924 + .als_gain_mask = BIT(2) | BIT(3) | BIT(4),
6925 + .als_gain_shift = 2,
6926 + .info = &ltr501_info,
6927 +@@ -1354,9 +1358,12 @@ static bool ltr501_is_volatile_reg(struct device *dev, unsigned int reg)
6928 + {
6929 + switch (reg) {
6930 + case LTR501_ALS_DATA1:
6931 ++ case LTR501_ALS_DATA1_UPPER:
6932 + case LTR501_ALS_DATA0:
6933 ++ case LTR501_ALS_DATA0_UPPER:
6934 + case LTR501_ALS_PS_STATUS:
6935 + case LTR501_PS_DATA:
6936 ++ case LTR501_PS_DATA_UPPER:
6937 + return true;
6938 + default:
6939 + return false;
6940 +diff --git a/drivers/iio/light/tcs3414.c b/drivers/iio/light/tcs3414.c
6941 +index 6fe5d46f80d40..0593abd600ec2 100644
6942 +--- a/drivers/iio/light/tcs3414.c
6943 ++++ b/drivers/iio/light/tcs3414.c
6944 +@@ -53,7 +53,11 @@ struct tcs3414_data {
6945 + u8 control;
6946 + u8 gain;
6947 + u8 timing;
6948 +- u16 buffer[8]; /* 4x 16-bit + 8 bytes timestamp */
6949 ++ /* Ensure timestamp is naturally aligned */
6950 ++ struct {
6951 ++ u16 chans[4];
6952 ++ s64 timestamp __aligned(8);
6953 ++ } scan;
6954 + };
6955 +
6956 + #define TCS3414_CHANNEL(_color, _si, _addr) { \
6957 +@@ -209,10 +213,10 @@ static irqreturn_t tcs3414_trigger_handler(int irq, void *p)
6958 + if (ret < 0)
6959 + goto done;
6960 +
6961 +- data->buffer[j++] = ret;
6962 ++ data->scan.chans[j++] = ret;
6963 + }
6964 +
6965 +- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
6966 ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
6967 + iio_get_time_ns(indio_dev));
6968 +
6969 + done:
6970 +diff --git a/drivers/iio/light/tcs3472.c b/drivers/iio/light/tcs3472.c
6971 +index a0dc447aeb68b..371c6a39a1654 100644
6972 +--- a/drivers/iio/light/tcs3472.c
6973 ++++ b/drivers/iio/light/tcs3472.c
6974 +@@ -64,7 +64,11 @@ struct tcs3472_data {
6975 + u8 control;
6976 + u8 atime;
6977 + u8 apers;
6978 +- u16 buffer[8]; /* 4 16-bit channels + 64-bit timestamp */
6979 ++ /* Ensure timestamp is naturally aligned */
6980 ++ struct {
6981 ++ u16 chans[4];
6982 ++ s64 timestamp __aligned(8);
6983 ++ } scan;
6984 + };
6985 +
6986 + static const struct iio_event_spec tcs3472_events[] = {
6987 +@@ -386,10 +390,10 @@ static irqreturn_t tcs3472_trigger_handler(int irq, void *p)
6988 + if (ret < 0)
6989 + goto done;
6990 +
6991 +- data->buffer[j++] = ret;
6992 ++ data->scan.chans[j++] = ret;
6993 + }
6994 +
6995 +- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
6996 ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
6997 + iio_get_time_ns(indio_dev));
6998 +
6999 + done:
7000 +@@ -531,7 +535,8 @@ static int tcs3472_probe(struct i2c_client *client,
7001 + return 0;
7002 +
7003 + free_irq:
7004 +- free_irq(client->irq, indio_dev);
7005 ++ if (client->irq)
7006 ++ free_irq(client->irq, indio_dev);
7007 + buffer_cleanup:
7008 + iio_triggered_buffer_cleanup(indio_dev);
7009 + return ret;
7010 +@@ -559,7 +564,8 @@ static int tcs3472_remove(struct i2c_client *client)
7011 + struct iio_dev *indio_dev = i2c_get_clientdata(client);
7012 +
7013 + iio_device_unregister(indio_dev);
7014 +- free_irq(client->irq, indio_dev);
7015 ++ if (client->irq)
7016 ++ free_irq(client->irq, indio_dev);
7017 + iio_triggered_buffer_cleanup(indio_dev);
7018 + tcs3472_powerdown(iio_priv(indio_dev));
7019 +
7020 +diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
7021 +index fff4b36b8b58d..f4feb44903b3f 100644
7022 +--- a/drivers/iio/light/vcnl4000.c
7023 ++++ b/drivers/iio/light/vcnl4000.c
7024 +@@ -910,7 +910,7 @@ static irqreturn_t vcnl4010_trigger_handler(int irq, void *p)
7025 + struct iio_dev *indio_dev = pf->indio_dev;
7026 + struct vcnl4000_data *data = iio_priv(indio_dev);
7027 + const unsigned long *active_scan_mask = indio_dev->active_scan_mask;
7028 +- u16 buffer[8] = {0}; /* 1x16-bit + ts */
7029 ++ u16 buffer[8] __aligned(8) = {0}; /* 1x16-bit + naturally aligned ts */
7030 + bool data_read = false;
7031 + unsigned long isr;
7032 + int val = 0;
7033 +diff --git a/drivers/iio/light/vcnl4035.c b/drivers/iio/light/vcnl4035.c
7034 +index 765c44adac574..1bd85e21fd114 100644
7035 +--- a/drivers/iio/light/vcnl4035.c
7036 ++++ b/drivers/iio/light/vcnl4035.c
7037 +@@ -102,7 +102,8 @@ static irqreturn_t vcnl4035_trigger_consumer_handler(int irq, void *p)
7038 + struct iio_poll_func *pf = p;
7039 + struct iio_dev *indio_dev = pf->indio_dev;
7040 + struct vcnl4035_data *data = iio_priv(indio_dev);
7041 +- u8 buffer[ALIGN(sizeof(u16), sizeof(s64)) + sizeof(s64)];
7042 ++ /* Ensure naturally aligned timestamp */
7043 ++ u8 buffer[ALIGN(sizeof(u16), sizeof(s64)) + sizeof(s64)] __aligned(8);
7044 + int ret;
7045 +
7046 + ret = regmap_read(data->regmap, VCNL4035_ALS_DATA, (int *)buffer);
7047 +diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c
7048 +index fc6840f9c1fa6..8042175275d09 100644
7049 +--- a/drivers/iio/magnetometer/bmc150_magn.c
7050 ++++ b/drivers/iio/magnetometer/bmc150_magn.c
7051 +@@ -136,8 +136,11 @@ struct bmc150_magn_data {
7052 + struct mutex mutex;
7053 + struct regmap *regmap;
7054 + struct iio_mount_matrix orientation;
7055 +- /* 4 x 32 bits for x, y z, 4 bytes align, 64 bits timestamp */
7056 +- s32 buffer[6];
7057 ++ /* Ensure timestamp is naturally aligned */
7058 ++ struct {
7059 ++ s32 chans[3];
7060 ++ s64 timestamp __aligned(8);
7061 ++ } scan;
7062 + struct iio_trigger *dready_trig;
7063 + bool dready_trigger_on;
7064 + int max_odr;
7065 +@@ -673,11 +676,11 @@ static irqreturn_t bmc150_magn_trigger_handler(int irq, void *p)
7066 + int ret;
7067 +
7068 + mutex_lock(&data->mutex);
7069 +- ret = bmc150_magn_read_xyz(data, data->buffer);
7070 ++ ret = bmc150_magn_read_xyz(data, data->scan.chans);
7071 + if (ret < 0)
7072 + goto err;
7073 +
7074 +- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
7075 ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
7076 + pf->timestamp);
7077 +
7078 + err:
7079 +diff --git a/drivers/iio/magnetometer/hmc5843.h b/drivers/iio/magnetometer/hmc5843.h
7080 +index 3f6c0b6629415..242f742f2643a 100644
7081 +--- a/drivers/iio/magnetometer/hmc5843.h
7082 ++++ b/drivers/iio/magnetometer/hmc5843.h
7083 +@@ -33,7 +33,8 @@ enum hmc5843_ids {
7084 + * @lock: update and read regmap data
7085 + * @regmap: hardware access register maps
7086 + * @variant: describe chip variants
7087 +- * @buffer: 3x 16-bit channels + padding + 64-bit timestamp
7088 ++ * @scan: buffer to pack data for passing to
7089 ++ * iio_push_to_buffers_with_timestamp()
7090 + */
7091 + struct hmc5843_data {
7092 + struct device *dev;
7093 +@@ -41,7 +42,10 @@ struct hmc5843_data {
7094 + struct regmap *regmap;
7095 + const struct hmc5843_chip_info *variant;
7096 + struct iio_mount_matrix orientation;
7097 +- __be16 buffer[8];
7098 ++ struct {
7099 ++ __be16 chans[3];
7100 ++ s64 timestamp __aligned(8);
7101 ++ } scan;
7102 + };
7103 +
7104 + int hmc5843_common_probe(struct device *dev, struct regmap *regmap,
7105 +diff --git a/drivers/iio/magnetometer/hmc5843_core.c b/drivers/iio/magnetometer/hmc5843_core.c
7106 +index 780faea61d82e..221563e0c18fd 100644
7107 +--- a/drivers/iio/magnetometer/hmc5843_core.c
7108 ++++ b/drivers/iio/magnetometer/hmc5843_core.c
7109 +@@ -446,13 +446,13 @@ static irqreturn_t hmc5843_trigger_handler(int irq, void *p)
7110 + }
7111 +
7112 + ret = regmap_bulk_read(data->regmap, HMC5843_DATA_OUT_MSB_REGS,
7113 +- data->buffer, 3 * sizeof(__be16));
7114 ++ data->scan.chans, sizeof(data->scan.chans));
7115 +
7116 + mutex_unlock(&data->lock);
7117 + if (ret < 0)
7118 + goto done;
7119 +
7120 +- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
7121 ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
7122 + iio_get_time_ns(indio_dev));
7123 +
7124 + done:
7125 +diff --git a/drivers/iio/magnetometer/rm3100-core.c b/drivers/iio/magnetometer/rm3100-core.c
7126 +index 7242897a05e95..720234a91db11 100644
7127 +--- a/drivers/iio/magnetometer/rm3100-core.c
7128 ++++ b/drivers/iio/magnetometer/rm3100-core.c
7129 +@@ -78,7 +78,8 @@ struct rm3100_data {
7130 + bool use_interrupt;
7131 + int conversion_time;
7132 + int scale;
7133 +- u8 buffer[RM3100_SCAN_BYTES];
7134 ++ /* Ensure naturally aligned timestamp */
7135 ++ u8 buffer[RM3100_SCAN_BYTES] __aligned(8);
7136 + struct iio_trigger *drdy_trig;
7137 +
7138 + /*
7139 +diff --git a/drivers/iio/potentiostat/lmp91000.c b/drivers/iio/potentiostat/lmp91000.c
7140 +index f34ca769dc20d..d7ff74a798ba3 100644
7141 +--- a/drivers/iio/potentiostat/lmp91000.c
7142 ++++ b/drivers/iio/potentiostat/lmp91000.c
7143 +@@ -71,8 +71,8 @@ struct lmp91000_data {
7144 +
7145 + struct completion completion;
7146 + u8 chan_select;
7147 +-
7148 +- u32 buffer[4]; /* 64-bit data + 64-bit timestamp */
7149 ++ /* 64-bit data + 64-bit naturally aligned timestamp */
7150 ++ u32 buffer[4] __aligned(8);
7151 + };
7152 +
7153 + static const struct iio_chan_spec lmp91000_channels[] = {
7154 +diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
7155 +index b79ada839e012..98330e26ac3bd 100644
7156 +--- a/drivers/iio/proximity/as3935.c
7157 ++++ b/drivers/iio/proximity/as3935.c
7158 +@@ -59,7 +59,11 @@ struct as3935_state {
7159 + unsigned long noise_tripped;
7160 + u32 tune_cap;
7161 + u32 nflwdth_reg;
7162 +- u8 buffer[16]; /* 8-bit data + 56-bit padding + 64-bit timestamp */
7163 ++ /* Ensure timestamp is naturally aligned */
7164 ++ struct {
7165 ++ u8 chan;
7166 ++ s64 timestamp __aligned(8);
7167 ++ } scan;
7168 + u8 buf[2] ____cacheline_aligned;
7169 + };
7170 +
7171 +@@ -225,8 +229,8 @@ static irqreturn_t as3935_trigger_handler(int irq, void *private)
7172 + if (ret)
7173 + goto err_read;
7174 +
7175 +- st->buffer[0] = val & AS3935_DATA_MASK;
7176 +- iio_push_to_buffers_with_timestamp(indio_dev, &st->buffer,
7177 ++ st->scan.chan = val & AS3935_DATA_MASK;
7178 ++ iio_push_to_buffers_with_timestamp(indio_dev, &st->scan,
7179 + iio_get_time_ns(indio_dev));
7180 + err_read:
7181 + iio_trigger_notify_done(indio_dev->trig);
7182 +diff --git a/drivers/iio/proximity/isl29501.c b/drivers/iio/proximity/isl29501.c
7183 +index 90e76451c972a..5b6ea783795d9 100644
7184 +--- a/drivers/iio/proximity/isl29501.c
7185 ++++ b/drivers/iio/proximity/isl29501.c
7186 +@@ -938,7 +938,7 @@ static irqreturn_t isl29501_trigger_handler(int irq, void *p)
7187 + struct iio_dev *indio_dev = pf->indio_dev;
7188 + struct isl29501_private *isl29501 = iio_priv(indio_dev);
7189 + const unsigned long *active_mask = indio_dev->active_scan_mask;
7190 +- u32 buffer[4] = {}; /* 1x16-bit + ts */
7191 ++ u32 buffer[4] __aligned(8) = {}; /* 1x16-bit + naturally aligned ts */
7192 +
7193 + if (test_bit(ISL29501_DISTANCE_SCAN_INDEX, active_mask))
7194 + isl29501_register_read(isl29501, REG_DISTANCE, buffer);
7195 +diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
7196 +index cc206bfa09c78..d854b8d5fbbaf 100644
7197 +--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
7198 ++++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
7199 +@@ -44,7 +44,11 @@ struct lidar_data {
7200 + int (*xfer)(struct lidar_data *data, u8 reg, u8 *val, int len);
7201 + int i2c_enabled;
7202 +
7203 +- u16 buffer[8]; /* 2 byte distance + 8 byte timestamp */
7204 ++ /* Ensure timestamp is naturally aligned */
7205 ++ struct {
7206 ++ u16 chan;
7207 ++ s64 timestamp __aligned(8);
7208 ++ } scan;
7209 + };
7210 +
7211 + static const struct iio_chan_spec lidar_channels[] = {
7212 +@@ -230,9 +234,9 @@ static irqreturn_t lidar_trigger_handler(int irq, void *private)
7213 + struct lidar_data *data = iio_priv(indio_dev);
7214 + int ret;
7215 +
7216 +- ret = lidar_get_measurement(data, data->buffer);
7217 ++ ret = lidar_get_measurement(data, &data->scan.chan);
7218 + if (!ret) {
7219 +- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
7220 ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
7221 + iio_get_time_ns(indio_dev));
7222 + } else if (ret != -EINVAL) {
7223 + dev_err(&data->client->dev, "cannot read LIDAR measurement");
7224 +diff --git a/drivers/iio/proximity/srf08.c b/drivers/iio/proximity/srf08.c
7225 +index 70beac5c9c1df..9b0886760f76d 100644
7226 +--- a/drivers/iio/proximity/srf08.c
7227 ++++ b/drivers/iio/proximity/srf08.c
7228 +@@ -63,11 +63,11 @@ struct srf08_data {
7229 + int range_mm;
7230 + struct mutex lock;
7231 +
7232 +- /*
7233 +- * triggered buffer
7234 +- * 1x16-bit channel + 3x16 padding + 4x16 timestamp
7235 +- */
7236 +- s16 buffer[8];
7237 ++ /* Ensure timestamp is naturally aligned */
7238 ++ struct {
7239 ++ s16 chan;
7240 ++ s64 timestamp __aligned(8);
7241 ++ } scan;
7242 +
7243 + /* Sensor-Type */
7244 + enum srf08_sensor_type sensor_type;
7245 +@@ -190,9 +190,9 @@ static irqreturn_t srf08_trigger_handler(int irq, void *p)
7246 +
7247 + mutex_lock(&data->lock);
7248 +
7249 +- data->buffer[0] = sensor_data;
7250 ++ data->scan.chan = sensor_data;
7251 + iio_push_to_buffers_with_timestamp(indio_dev,
7252 +- data->buffer, pf->timestamp);
7253 ++ &data->scan, pf->timestamp);
7254 +
7255 + mutex_unlock(&data->lock);
7256 + err:
7257 +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
7258 +index d1e94147fb165..0c879e40bd18d 100644
7259 +--- a/drivers/infiniband/core/cma.c
7260 ++++ b/drivers/infiniband/core/cma.c
7261 +@@ -1856,6 +1856,7 @@ static void _destroy_id(struct rdma_id_private *id_priv,
7262 + {
7263 + cma_cancel_operation(id_priv, state);
7264 +
7265 ++ rdma_restrack_del(&id_priv->res);
7266 + if (id_priv->cma_dev) {
7267 + if (rdma_cap_ib_cm(id_priv->id.device, 1)) {
7268 + if (id_priv->cm_id.ib)
7269 +@@ -1865,7 +1866,6 @@ static void _destroy_id(struct rdma_id_private *id_priv,
7270 + iw_destroy_cm_id(id_priv->cm_id.iw);
7271 + }
7272 + cma_leave_mc_groups(id_priv);
7273 +- rdma_restrack_del(&id_priv->res);
7274 + cma_release_dev(id_priv);
7275 + }
7276 +
7277 +@@ -2476,8 +2476,10 @@ static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
7278 + if (IS_ERR(id))
7279 + return PTR_ERR(id);
7280 +
7281 ++ mutex_lock(&id_priv->qp_mutex);
7282 + id->tos = id_priv->tos;
7283 + id->tos_set = id_priv->tos_set;
7284 ++ mutex_unlock(&id_priv->qp_mutex);
7285 + id_priv->cm_id.iw = id;
7286 +
7287 + memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv),
7288 +@@ -2537,8 +2539,10 @@ static int cma_listen_on_dev(struct rdma_id_private *id_priv,
7289 + cma_id_get(id_priv);
7290 + dev_id_priv->internal_id = 1;
7291 + dev_id_priv->afonly = id_priv->afonly;
7292 ++ mutex_lock(&id_priv->qp_mutex);
7293 + dev_id_priv->tos_set = id_priv->tos_set;
7294 + dev_id_priv->tos = id_priv->tos;
7295 ++ mutex_unlock(&id_priv->qp_mutex);
7296 +
7297 + ret = rdma_listen(&dev_id_priv->id, id_priv->backlog);
7298 + if (ret)
7299 +@@ -2585,8 +2589,10 @@ void rdma_set_service_type(struct rdma_cm_id *id, int tos)
7300 + struct rdma_id_private *id_priv;
7301 +
7302 + id_priv = container_of(id, struct rdma_id_private, id);
7303 ++ mutex_lock(&id_priv->qp_mutex);
7304 + id_priv->tos = (u8) tos;
7305 + id_priv->tos_set = true;
7306 ++ mutex_unlock(&id_priv->qp_mutex);
7307 + }
7308 + EXPORT_SYMBOL(rdma_set_service_type);
7309 +
7310 +@@ -2613,8 +2619,10 @@ int rdma_set_ack_timeout(struct rdma_cm_id *id, u8 timeout)
7311 + return -EINVAL;
7312 +
7313 + id_priv = container_of(id, struct rdma_id_private, id);
7314 ++ mutex_lock(&id_priv->qp_mutex);
7315 + id_priv->timeout = timeout;
7316 + id_priv->timeout_set = true;
7317 ++ mutex_unlock(&id_priv->qp_mutex);
7318 +
7319 + return 0;
7320 + }
7321 +@@ -3000,8 +3008,11 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
7322 +
7323 + u8 default_roce_tos = id_priv->cma_dev->default_roce_tos[id_priv->id.port_num -
7324 + rdma_start_port(id_priv->cma_dev->device)];
7325 +- u8 tos = id_priv->tos_set ? id_priv->tos : default_roce_tos;
7326 ++ u8 tos;
7327 +
7328 ++ mutex_lock(&id_priv->qp_mutex);
7329 ++ tos = id_priv->tos_set ? id_priv->tos : default_roce_tos;
7330 ++ mutex_unlock(&id_priv->qp_mutex);
7331 +
7332 + work = kzalloc(sizeof *work, GFP_KERNEL);
7333 + if (!work)
7334 +@@ -3048,8 +3059,12 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
7335 + * PacketLifeTime = local ACK timeout/2
7336 + * as a reasonable approximation for RoCE networks.
7337 + */
7338 +- route->path_rec->packet_life_time = id_priv->timeout_set ?
7339 +- id_priv->timeout - 1 : CMA_IBOE_PACKET_LIFETIME;
7340 ++ mutex_lock(&id_priv->qp_mutex);
7341 ++ if (id_priv->timeout_set && id_priv->timeout)
7342 ++ route->path_rec->packet_life_time = id_priv->timeout - 1;
7343 ++ else
7344 ++ route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME;
7345 ++ mutex_unlock(&id_priv->qp_mutex);
7346 +
7347 + if (!route->path_rec->mtu) {
7348 + ret = -EINVAL;
7349 +@@ -4073,8 +4088,11 @@ static int cma_connect_iw(struct rdma_id_private *id_priv,
7350 + if (IS_ERR(cm_id))
7351 + return PTR_ERR(cm_id);
7352 +
7353 ++ mutex_lock(&id_priv->qp_mutex);
7354 + cm_id->tos = id_priv->tos;
7355 + cm_id->tos_set = id_priv->tos_set;
7356 ++ mutex_unlock(&id_priv->qp_mutex);
7357 ++
7358 + id_priv->cm_id.iw = cm_id;
7359 +
7360 + memcpy(&cm_id->local_addr, cma_src_addr(id_priv),
7361 +diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
7362 +index 418d133a8fb08..466026825dd75 100644
7363 +--- a/drivers/infiniband/core/uverbs_cmd.c
7364 ++++ b/drivers/infiniband/core/uverbs_cmd.c
7365 +@@ -3000,12 +3000,29 @@ static int ib_uverbs_ex_modify_wq(struct uverbs_attr_bundle *attrs)
7366 + if (!wq)
7367 + return -EINVAL;
7368 +
7369 +- wq_attr.curr_wq_state = cmd.curr_wq_state;
7370 +- wq_attr.wq_state = cmd.wq_state;
7371 + if (cmd.attr_mask & IB_WQ_FLAGS) {
7372 + wq_attr.flags = cmd.flags;
7373 + wq_attr.flags_mask = cmd.flags_mask;
7374 + }
7375 ++
7376 ++ if (cmd.attr_mask & IB_WQ_CUR_STATE) {
7377 ++ if (cmd.curr_wq_state > IB_WQS_ERR)
7378 ++ return -EINVAL;
7379 ++
7380 ++ wq_attr.curr_wq_state = cmd.curr_wq_state;
7381 ++ } else {
7382 ++ wq_attr.curr_wq_state = wq->state;
7383 ++ }
7384 ++
7385 ++ if (cmd.attr_mask & IB_WQ_STATE) {
7386 ++ if (cmd.wq_state > IB_WQS_ERR)
7387 ++ return -EINVAL;
7388 ++
7389 ++ wq_attr.wq_state = cmd.wq_state;
7390 ++ } else {
7391 ++ wq_attr.wq_state = wq_attr.curr_wq_state;
7392 ++ }
7393 ++
7394 + ret = wq->device->ops.modify_wq(wq, &wq_attr, cmd.attr_mask,
7395 + &attrs->driver_udata);
7396 + rdma_lookup_put_uobject(&wq->uobject->uevent.uobject,
7397 +diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
7398 +index 5cb8e602294ca..6bc0818f4b2c6 100644
7399 +--- a/drivers/infiniband/hw/mlx4/qp.c
7400 ++++ b/drivers/infiniband/hw/mlx4/qp.c
7401 +@@ -4244,13 +4244,8 @@ int mlx4_ib_modify_wq(struct ib_wq *ibwq, struct ib_wq_attr *wq_attr,
7402 + if (wq_attr_mask & IB_WQ_FLAGS)
7403 + return -EOPNOTSUPP;
7404 +
7405 +- cur_state = wq_attr_mask & IB_WQ_CUR_STATE ? wq_attr->curr_wq_state :
7406 +- ibwq->state;
7407 +- new_state = wq_attr_mask & IB_WQ_STATE ? wq_attr->wq_state : cur_state;
7408 +-
7409 +- if (cur_state < IB_WQS_RESET || cur_state > IB_WQS_ERR ||
7410 +- new_state < IB_WQS_RESET || new_state > IB_WQS_ERR)
7411 +- return -EINVAL;
7412 ++ cur_state = wq_attr->curr_wq_state;
7413 ++ new_state = wq_attr->wq_state;
7414 +
7415 + if ((new_state == IB_WQS_RDY) && (cur_state == IB_WQS_ERR))
7416 + return -EINVAL;
7417 +diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
7418 +index b19506707e45c..eb69bec77e5d4 100644
7419 +--- a/drivers/infiniband/hw/mlx5/main.c
7420 ++++ b/drivers/infiniband/hw/mlx5/main.c
7421 +@@ -3440,8 +3440,6 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
7422 +
7423 + port->mp.mpi = NULL;
7424 +
7425 +- list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
7426 +-
7427 + spin_unlock(&port->mp.mpi_lock);
7428 +
7429 + err = mlx5_nic_vport_unaffiliate_multiport(mpi->mdev);
7430 +@@ -3594,6 +3592,8 @@ static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev)
7431 + dev->port[i].mp.mpi = NULL;
7432 + } else {
7433 + mlx5_ib_dbg(dev, "unbinding port_num: %d\n", i + 1);
7434 ++ list_add_tail(&dev->port[i].mp.mpi->list,
7435 ++ &mlx5_ib_unaffiliated_port_list);
7436 + mlx5_ib_unbind_slave_port(dev, dev->port[i].mp.mpi);
7437 + }
7438 + }
7439 +diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
7440 +index 6d2715f65d788..8beba002e5dd7 100644
7441 +--- a/drivers/infiniband/hw/mlx5/qp.c
7442 ++++ b/drivers/infiniband/hw/mlx5/qp.c
7443 +@@ -5236,10 +5236,8 @@ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
7444 +
7445 + rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
7446 +
7447 +- curr_wq_state = (wq_attr_mask & IB_WQ_CUR_STATE) ?
7448 +- wq_attr->curr_wq_state : wq->state;
7449 +- wq_state = (wq_attr_mask & IB_WQ_STATE) ?
7450 +- wq_attr->wq_state : curr_wq_state;
7451 ++ curr_wq_state = wq_attr->curr_wq_state;
7452 ++ wq_state = wq_attr->wq_state;
7453 + if (curr_wq_state == IB_WQS_ERR)
7454 + curr_wq_state = MLX5_RQC_STATE_ERR;
7455 + if (wq_state == IB_WQS_ERR)
7456 +diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
7457 +index bce44502ab0ed..c071d5b1b85a7 100644
7458 +--- a/drivers/infiniband/sw/rxe/rxe_net.c
7459 ++++ b/drivers/infiniband/sw/rxe/rxe_net.c
7460 +@@ -212,10 +212,8 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
7461 +
7462 + /* Create UDP socket */
7463 + err = udp_sock_create(net, &udp_cfg, &sock);
7464 +- if (err < 0) {
7465 +- pr_err("failed to create udp socket. err = %d\n", err);
7466 ++ if (err < 0)
7467 + return ERR_PTR(err);
7468 +- }
7469 +
7470 + tnl_cfg.encap_type = 1;
7471 + tnl_cfg.encap_rcv = rxe_udp_encap_recv;
7472 +@@ -616,6 +614,12 @@ static int rxe_net_ipv6_init(void)
7473 +
7474 + recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net,
7475 + htons(ROCE_V2_UDP_DPORT), true);
7476 ++ if (PTR_ERR(recv_sockets.sk6) == -EAFNOSUPPORT) {
7477 ++ recv_sockets.sk6 = NULL;
7478 ++ pr_warn("IPv6 is not supported, can not create a UDPv6 socket\n");
7479 ++ return 0;
7480 ++ }
7481 ++
7482 + if (IS_ERR(recv_sockets.sk6)) {
7483 + recv_sockets.sk6 = NULL;
7484 + pr_err("Failed to create IPv6 UDP tunnel\n");
7485 +diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
7486 +index 1e716fe7014cc..a1b79015e6f22 100644
7487 +--- a/drivers/infiniband/sw/rxe/rxe_qp.c
7488 ++++ b/drivers/infiniband/sw/rxe/rxe_qp.c
7489 +@@ -125,7 +125,6 @@ static void free_rd_atomic_resources(struct rxe_qp *qp)
7490 + void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res)
7491 + {
7492 + if (res->type == RXE_ATOMIC_MASK) {
7493 +- rxe_drop_ref(qp);
7494 + kfree_skb(res->atomic.skb);
7495 + } else if (res->type == RXE_READ_MASK) {
7496 + if (res->read.mr)
7497 +diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
7498 +index c7e3b6a4af38f..83c03212099a2 100644
7499 +--- a/drivers/infiniband/sw/rxe/rxe_resp.c
7500 ++++ b/drivers/infiniband/sw/rxe/rxe_resp.c
7501 +@@ -966,8 +966,6 @@ static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
7502 + goto out;
7503 + }
7504 +
7505 +- rxe_add_ref(qp);
7506 +-
7507 + res = &qp->resp.resources[qp->resp.res_head];
7508 + free_rd_atomic_resource(qp, res);
7509 + rxe_advance_resp_resource(qp);
7510 +diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
7511 +index 7db550ba25d7f..46fad202a380e 100644
7512 +--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
7513 ++++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
7514 +@@ -811,6 +811,9 @@ static struct rtrs_clt_sess *get_next_path_min_inflight(struct path_it *it)
7515 + int inflight;
7516 +
7517 + list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) {
7518 ++ if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED))
7519 ++ continue;
7520 ++
7521 + if (unlikely(!list_empty(raw_cpu_ptr(sess->mp_skip_entry))))
7522 + continue;
7523 +
7524 +@@ -1724,7 +1727,19 @@ static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
7525 + queue_depth);
7526 + return -ECONNRESET;
7527 + }
7528 +- if (!sess->rbufs || sess->queue_depth < queue_depth) {
7529 ++ if (sess->queue_depth > 0 && queue_depth != sess->queue_depth) {
7530 ++ rtrs_err(clt, "Error: queue depth changed\n");
7531 ++
7532 ++ /*
7533 ++ * Stop any more reconnection attempts
7534 ++ */
7535 ++ sess->reconnect_attempts = -1;
7536 ++ rtrs_err(clt,
7537 ++ "Disabling auto-reconnect. Trigger a manual reconnect after issue is resolved\n");
7538 ++ return -ECONNRESET;
7539 ++ }
7540 ++
7541 ++ if (!sess->rbufs) {
7542 + kfree(sess->rbufs);
7543 + sess->rbufs = kcalloc(queue_depth, sizeof(*sess->rbufs),
7544 + GFP_KERNEL);
7545 +@@ -1738,7 +1753,7 @@ static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
7546 + sess->chunk_size = sess->max_io_size + sess->max_hdr_size;
7547 +
7548 + /*
7549 +- * Global queue depth and IO size is always a minimum.
7550 ++ * Global IO size is always a minimum.
7551 + * If while a reconnection server sends us a value a bit
7552 + * higher - client does not care and uses cached minimum.
7553 + *
7554 +@@ -1746,8 +1761,7 @@ static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
7555 + * connections in parallel, use lock.
7556 + */
7557 + mutex_lock(&clt->paths_mutex);
7558 +- clt->queue_depth = min_not_zero(sess->queue_depth,
7559 +- clt->queue_depth);
7560 ++ clt->queue_depth = sess->queue_depth;
7561 + clt->max_io_size = min_not_zero(sess->max_io_size,
7562 + clt->max_io_size);
7563 + mutex_unlock(&clt->paths_mutex);
7564 +@@ -2692,6 +2706,8 @@ struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
7565 + if (err) {
7566 + list_del_rcu(&sess->s.entry);
7567 + rtrs_clt_close_conns(sess, true);
7568 ++ free_percpu(sess->stats->pcpu_stats);
7569 ++ kfree(sess->stats);
7570 + free_sess(sess);
7571 + goto close_all_sess;
7572 + }
7573 +@@ -2700,6 +2716,8 @@ struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
7574 + if (err) {
7575 + list_del_rcu(&sess->s.entry);
7576 + rtrs_clt_close_conns(sess, true);
7577 ++ free_percpu(sess->stats->pcpu_stats);
7578 ++ kfree(sess->stats);
7579 + free_sess(sess);
7580 + goto close_all_sess;
7581 + }
7582 +@@ -2959,6 +2977,8 @@ int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt,
7583 + close_sess:
7584 + rtrs_clt_remove_path_from_arr(sess);
7585 + rtrs_clt_close_conns(sess, true);
7586 ++ free_percpu(sess->stats->pcpu_stats);
7587 ++ kfree(sess->stats);
7588 + free_sess(sess);
7589 +
7590 + return err;
7591 +diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
7592 +index 39708ab4f26e5..7c75e14590173 100644
7593 +--- a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
7594 ++++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
7595 +@@ -214,6 +214,7 @@ rtrs_srv_destroy_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
7596 + device_del(&srv->dev);
7597 + put_device(&srv->dev);
7598 + } else {
7599 ++ put_device(&srv->dev);
7600 + mutex_unlock(&srv->paths_mutex);
7601 + }
7602 + }
7603 +diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
7604 +index 43806180f85ec..b033bfa9f3839 100644
7605 +--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
7606 ++++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
7607 +@@ -1490,6 +1490,7 @@ static void free_sess(struct rtrs_srv_sess *sess)
7608 + kobject_del(&sess->kobj);
7609 + kobject_put(&sess->kobj);
7610 + } else {
7611 ++ kfree(sess->stats);
7612 + kfree(sess);
7613 + }
7614 + }
7615 +@@ -1613,7 +1614,7 @@ static int create_con(struct rtrs_srv_sess *sess,
7616 + struct rtrs_sess *s = &sess->s;
7617 + struct rtrs_srv_con *con;
7618 +
7619 +- u32 cq_size, wr_queue_size;
7620 ++ u32 cq_size, max_send_wr, max_recv_wr, wr_limit;
7621 + int err, cq_vector;
7622 +
7623 + con = kzalloc(sizeof(*con), GFP_KERNEL);
7624 +@@ -1634,30 +1635,42 @@ static int create_con(struct rtrs_srv_sess *sess,
7625 + * All receive and all send (each requiring invalidate)
7626 + * + 2 for drain and heartbeat
7627 + */
7628 +- wr_queue_size = SERVICE_CON_QUEUE_DEPTH * 3 + 2;
7629 +- cq_size = wr_queue_size;
7630 ++ max_send_wr = SERVICE_CON_QUEUE_DEPTH * 2 + 2;
7631 ++ max_recv_wr = SERVICE_CON_QUEUE_DEPTH + 2;
7632 ++ cq_size = max_send_wr + max_recv_wr;
7633 + } else {
7634 +- /*
7635 +- * If we have all receive requests posted and
7636 +- * all write requests posted and each read request
7637 +- * requires an invalidate request + drain
7638 +- * and qp gets into error state.
7639 +- */
7640 +- cq_size = srv->queue_depth * 3 + 1;
7641 + /*
7642 + * In theory we might have queue_depth * 32
7643 + * outstanding requests if an unsafe global key is used
7644 + * and we have queue_depth read requests each consisting
7645 + * of 32 different addresses. div 3 for mlx5.
7646 + */
7647 +- wr_queue_size = sess->s.dev->ib_dev->attrs.max_qp_wr / 3;
7648 ++ wr_limit = sess->s.dev->ib_dev->attrs.max_qp_wr / 3;
7649 ++ /* when always_invlaidate enalbed, we need linv+rinv+mr+imm */
7650 ++ if (always_invalidate)
7651 ++ max_send_wr =
7652 ++ min_t(int, wr_limit,
7653 ++ srv->queue_depth * (1 + 4) + 1);
7654 ++ else
7655 ++ max_send_wr =
7656 ++ min_t(int, wr_limit,
7657 ++ srv->queue_depth * (1 + 2) + 1);
7658 ++
7659 ++ max_recv_wr = srv->queue_depth + 1;
7660 ++ /*
7661 ++ * If we have all receive requests posted and
7662 ++ * all write requests posted and each read request
7663 ++ * requires an invalidate request + drain
7664 ++ * and qp gets into error state.
7665 ++ */
7666 ++ cq_size = max_send_wr + max_recv_wr;
7667 + }
7668 +- atomic_set(&con->sq_wr_avail, wr_queue_size);
7669 ++ atomic_set(&con->sq_wr_avail, max_send_wr);
7670 + cq_vector = rtrs_srv_get_next_cq_vector(sess);
7671 +
7672 + /* TODO: SOFTIRQ can be faster, but be careful with softirq context */
7673 + err = rtrs_cq_qp_create(&sess->s, &con->c, 1, cq_vector, cq_size,
7674 +- wr_queue_size, wr_queue_size,
7675 ++ max_send_wr, max_recv_wr,
7676 + IB_POLL_WORKQUEUE);
7677 + if (err) {
7678 + rtrs_err(s, "rtrs_cq_qp_create(), err: %d\n", err);
7679 +diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c
7680 +index d13aff0aa8165..4629bb758126a 100644
7681 +--- a/drivers/infiniband/ulp/rtrs/rtrs.c
7682 ++++ b/drivers/infiniband/ulp/rtrs/rtrs.c
7683 +@@ -373,7 +373,6 @@ void rtrs_stop_hb(struct rtrs_sess *sess)
7684 + {
7685 + cancel_delayed_work_sync(&sess->hb_dwork);
7686 + sess->hb_missed_cnt = 0;
7687 +- sess->hb_missed_max = 0;
7688 + }
7689 + EXPORT_SYMBOL_GPL(rtrs_stop_hb);
7690 +
7691 +diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
7692 +index a8f85993dab30..86d5c4c92b363 100644
7693 +--- a/drivers/infiniband/ulp/srp/ib_srp.c
7694 ++++ b/drivers/infiniband/ulp/srp/ib_srp.c
7695 +@@ -998,7 +998,6 @@ static int srp_alloc_req_data(struct srp_rdma_ch *ch)
7696 + struct srp_device *srp_dev = target->srp_host->srp_dev;
7697 + struct ib_device *ibdev = srp_dev->dev;
7698 + struct srp_request *req;
7699 +- void *mr_list;
7700 + dma_addr_t dma_addr;
7701 + int i, ret = -ENOMEM;
7702 +
7703 +@@ -1009,12 +1008,12 @@ static int srp_alloc_req_data(struct srp_rdma_ch *ch)
7704 +
7705 + for (i = 0; i < target->req_ring_size; ++i) {
7706 + req = &ch->req_ring[i];
7707 +- mr_list = kmalloc_array(target->mr_per_cmd, sizeof(void *),
7708 +- GFP_KERNEL);
7709 +- if (!mr_list)
7710 +- goto out;
7711 +- if (srp_dev->use_fast_reg)
7712 +- req->fr_list = mr_list;
7713 ++ if (srp_dev->use_fast_reg) {
7714 ++ req->fr_list = kmalloc_array(target->mr_per_cmd,
7715 ++ sizeof(void *), GFP_KERNEL);
7716 ++ if (!req->fr_list)
7717 ++ goto out;
7718 ++ }
7719 + req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
7720 + if (!req->indirect_desc)
7721 + goto out;
7722 +diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
7723 +index 430dc69750048..675fcd0952a2d 100644
7724 +--- a/drivers/input/joydev.c
7725 ++++ b/drivers/input/joydev.c
7726 +@@ -500,7 +500,7 @@ static int joydev_handle_JSIOCSBTNMAP(struct joydev *joydev,
7727 + memcpy(joydev->keypam, keypam, len);
7728 +
7729 + for (i = 0; i < joydev->nkey; i++)
7730 +- joydev->keymap[keypam[i] - BTN_MISC] = i;
7731 ++ joydev->keymap[joydev->keypam[i] - BTN_MISC] = i;
7732 +
7733 + out:
7734 + kfree(keypam);
7735 +diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
7736 +index 793ecbbda32ca..9f60f1559e499 100644
7737 +--- a/drivers/input/keyboard/Kconfig
7738 ++++ b/drivers/input/keyboard/Kconfig
7739 +@@ -67,9 +67,6 @@ config KEYBOARD_AMIGA
7740 + To compile this driver as a module, choose M here: the
7741 + module will be called amikbd.
7742 +
7743 +-config ATARI_KBD_CORE
7744 +- bool
7745 +-
7746 + config KEYBOARD_APPLESPI
7747 + tristate "Apple SPI keyboard and trackpad"
7748 + depends on ACPI && EFI
7749 +diff --git a/drivers/input/keyboard/hil_kbd.c b/drivers/input/keyboard/hil_kbd.c
7750 +index bb29a7c9a1c0c..54afb38601b9f 100644
7751 +--- a/drivers/input/keyboard/hil_kbd.c
7752 ++++ b/drivers/input/keyboard/hil_kbd.c
7753 +@@ -512,6 +512,7 @@ static int hil_dev_connect(struct serio *serio, struct serio_driver *drv)
7754 + HIL_IDD_NUM_AXES_PER_SET(*idd)) {
7755 + printk(KERN_INFO PREFIX
7756 + "combo devices are not supported.\n");
7757 ++ error = -EINVAL;
7758 + goto bail1;
7759 + }
7760 +
7761 +diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
7762 +index 45113767db964..a06385c55af2a 100644
7763 +--- a/drivers/input/touchscreen/goodix.c
7764 ++++ b/drivers/input/touchscreen/goodix.c
7765 +@@ -178,51 +178,6 @@ static const unsigned long goodix_irq_flags[] = {
7766 + IRQ_TYPE_LEVEL_HIGH,
7767 + };
7768 +
7769 +-/*
7770 +- * Those tablets have their coordinates origin at the bottom right
7771 +- * of the tablet, as if rotated 180 degrees
7772 +- */
7773 +-static const struct dmi_system_id rotated_screen[] = {
7774 +-#if defined(CONFIG_DMI) && defined(CONFIG_X86)
7775 +- {
7776 +- .ident = "Teclast X89",
7777 +- .matches = {
7778 +- /* tPAD is too generic, also match on bios date */
7779 +- DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"),
7780 +- DMI_MATCH(DMI_BOARD_NAME, "tPAD"),
7781 +- DMI_MATCH(DMI_BIOS_DATE, "12/19/2014"),
7782 +- },
7783 +- },
7784 +- {
7785 +- .ident = "Teclast X98 Pro",
7786 +- .matches = {
7787 +- /*
7788 +- * Only match BIOS date, because the manufacturers
7789 +- * BIOS does not report the board name at all
7790 +- * (sometimes)...
7791 +- */
7792 +- DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"),
7793 +- DMI_MATCH(DMI_BIOS_DATE, "10/28/2015"),
7794 +- },
7795 +- },
7796 +- {
7797 +- .ident = "WinBook TW100",
7798 +- .matches = {
7799 +- DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
7800 +- DMI_MATCH(DMI_PRODUCT_NAME, "TW100")
7801 +- }
7802 +- },
7803 +- {
7804 +- .ident = "WinBook TW700",
7805 +- .matches = {
7806 +- DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
7807 +- DMI_MATCH(DMI_PRODUCT_NAME, "TW700")
7808 +- },
7809 +- },
7810 +-#endif
7811 +- {}
7812 +-};
7813 +-
7814 + static const struct dmi_system_id nine_bytes_report[] = {
7815 + #if defined(CONFIG_DMI) && defined(CONFIG_X86)
7816 + {
7817 +@@ -1121,13 +1076,6 @@ static int goodix_configure_dev(struct goodix_ts_data *ts)
7818 + ABS_MT_POSITION_Y, ts->prop.max_y);
7819 + }
7820 +
7821 +- if (dmi_check_system(rotated_screen)) {
7822 +- ts->prop.invert_x = true;
7823 +- ts->prop.invert_y = true;
7824 +- dev_dbg(&ts->client->dev,
7825 +- "Applying '180 degrees rotated screen' quirk\n");
7826 +- }
7827 +-
7828 + if (dmi_check_system(nine_bytes_report)) {
7829 + ts->contact_size = 9;
7830 +
7831 +diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
7832 +index 397cb1d3f481b..544a8f40b81f1 100644
7833 +--- a/drivers/input/touchscreen/usbtouchscreen.c
7834 ++++ b/drivers/input/touchscreen/usbtouchscreen.c
7835 +@@ -251,7 +251,7 @@ static int e2i_init(struct usbtouch_usb *usbtouch)
7836 + int ret;
7837 + struct usb_device *udev = interface_to_usbdev(usbtouch->interface);
7838 +
7839 +- ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
7840 ++ ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
7841 + 0x01, 0x02, 0x0000, 0x0081,
7842 + NULL, 0, USB_CTRL_SET_TIMEOUT);
7843 +
7844 +@@ -531,7 +531,7 @@ static int mtouch_init(struct usbtouch_usb *usbtouch)
7845 + if (ret)
7846 + return ret;
7847 +
7848 +- ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
7849 ++ ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
7850 + MTOUCHUSB_RESET,
7851 + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
7852 + 1, 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
7853 +@@ -543,7 +543,7 @@ static int mtouch_init(struct usbtouch_usb *usbtouch)
7854 + msleep(150);
7855 +
7856 + for (i = 0; i < 3; i++) {
7857 +- ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
7858 ++ ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
7859 + MTOUCHUSB_ASYNC_REPORT,
7860 + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
7861 + 1, 1, NULL, 0, USB_CTRL_SET_TIMEOUT);
7862 +@@ -722,7 +722,7 @@ static int dmc_tsc10_init(struct usbtouch_usb *usbtouch)
7863 + }
7864 +
7865 + /* start sending data */
7866 +- ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0),
7867 ++ ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
7868 + TSC10_CMD_DATA1,
7869 + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
7870 + 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
7871 +diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
7872 +index cc9869cc48e41..fa57986c2309c 100644
7873 +--- a/drivers/iommu/amd/init.c
7874 ++++ b/drivers/iommu/amd/init.c
7875 +@@ -1914,8 +1914,8 @@ static void print_iommu_info(void)
7876 + pci_info(pdev, "Found IOMMU cap 0x%hx\n", iommu->cap_ptr);
7877 +
7878 + if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
7879 +- pci_info(pdev, "Extended features (%#llx):",
7880 +- iommu->features);
7881 ++ pr_info("Extended features (%#llx):", iommu->features);
7882 ++
7883 + for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
7884 + if (iommu_feature(iommu, (1ULL << i)))
7885 + pr_cont(" %s", feat_str[i]);
7886 +diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
7887 +index 0cbcd3fc3e7e8..d1539b7399a96 100644
7888 +--- a/drivers/iommu/dma-iommu.c
7889 ++++ b/drivers/iommu/dma-iommu.c
7890 +@@ -216,9 +216,11 @@ resv_iova:
7891 + lo = iova_pfn(iovad, start);
7892 + hi = iova_pfn(iovad, end);
7893 + reserve_iova(iovad, lo, hi);
7894 +- } else {
7895 ++ } else if (end < start) {
7896 + /* dma_ranges list should be sorted */
7897 +- dev_err(&dev->dev, "Failed to reserve IOVA\n");
7898 ++ dev_err(&dev->dev,
7899 ++ "Failed to reserve IOVA [%pa-%pa]\n",
7900 ++ &start, &end);
7901 + return -EINVAL;
7902 + }
7903 +
7904 +diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
7905 +index 849d3c5f908e4..56e8198e13d10 100644
7906 +--- a/drivers/leds/Kconfig
7907 ++++ b/drivers/leds/Kconfig
7908 +@@ -199,6 +199,7 @@ config LEDS_LM3530
7909 +
7910 + config LEDS_LM3532
7911 + tristate "LCD Backlight driver for LM3532"
7912 ++ select REGMAP_I2C
7913 + depends on LEDS_CLASS
7914 + depends on I2C
7915 + help
7916 +diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
7917 +index 131ca83f5fb38..4365c1cc4505f 100644
7918 +--- a/drivers/leds/led-class.c
7919 ++++ b/drivers/leds/led-class.c
7920 +@@ -286,10 +286,6 @@ struct led_classdev *__must_check devm_of_led_get(struct device *dev,
7921 + if (!dev)
7922 + return ERR_PTR(-EINVAL);
7923 +
7924 +- /* Not using device tree? */
7925 +- if (!IS_ENABLED(CONFIG_OF) || !dev->of_node)
7926 +- return ERR_PTR(-ENOTSUPP);
7927 +-
7928 + led = of_led_get(dev->of_node, index);
7929 + if (IS_ERR(led))
7930 + return led;
7931 +diff --git a/drivers/leds/leds-as3645a.c b/drivers/leds/leds-as3645a.c
7932 +index e8922fa033796..80411d41e802d 100644
7933 +--- a/drivers/leds/leds-as3645a.c
7934 ++++ b/drivers/leds/leds-as3645a.c
7935 +@@ -545,6 +545,7 @@ static int as3645a_parse_node(struct as3645a *flash,
7936 + if (!flash->indicator_node) {
7937 + dev_warn(&flash->client->dev,
7938 + "can't find indicator node\n");
7939 ++ rval = -ENODEV;
7940 + goto out_err;
7941 + }
7942 +
7943 +diff --git a/drivers/leds/leds-ktd2692.c b/drivers/leds/leds-ktd2692.c
7944 +index 632f10db4b3ff..f341da1503a49 100644
7945 +--- a/drivers/leds/leds-ktd2692.c
7946 ++++ b/drivers/leds/leds-ktd2692.c
7947 +@@ -256,6 +256,17 @@ static void ktd2692_setup(struct ktd2692_context *led)
7948 + | KTD2692_REG_FLASH_CURRENT_BASE);
7949 + }
7950 +
7951 ++static void regulator_disable_action(void *_data)
7952 ++{
7953 ++ struct device *dev = _data;
7954 ++ struct ktd2692_context *led = dev_get_drvdata(dev);
7955 ++ int ret;
7956 ++
7957 ++ ret = regulator_disable(led->regulator);
7958 ++ if (ret)
7959 ++ dev_err(dev, "Failed to disable supply: %d\n", ret);
7960 ++}
7961 ++
7962 + static int ktd2692_parse_dt(struct ktd2692_context *led, struct device *dev,
7963 + struct ktd2692_led_config_data *cfg)
7964 + {
7965 +@@ -286,8 +297,14 @@ static int ktd2692_parse_dt(struct ktd2692_context *led, struct device *dev,
7966 +
7967 + if (led->regulator) {
7968 + ret = regulator_enable(led->regulator);
7969 +- if (ret)
7970 ++ if (ret) {
7971 + dev_err(dev, "Failed to enable supply: %d\n", ret);
7972 ++ } else {
7973 ++ ret = devm_add_action_or_reset(dev,
7974 ++ regulator_disable_action, dev);
7975 ++ if (ret)
7976 ++ return ret;
7977 ++ }
7978 + }
7979 +
7980 + child_node = of_get_next_available_child(np, NULL);
7981 +@@ -377,17 +394,9 @@ static int ktd2692_probe(struct platform_device *pdev)
7982 + static int ktd2692_remove(struct platform_device *pdev)
7983 + {
7984 + struct ktd2692_context *led = platform_get_drvdata(pdev);
7985 +- int ret;
7986 +
7987 + led_classdev_flash_unregister(&led->fled_cdev);
7988 +
7989 +- if (led->regulator) {
7990 +- ret = regulator_disable(led->regulator);
7991 +- if (ret)
7992 +- dev_err(&pdev->dev,
7993 +- "Failed to disable supply: %d\n", ret);
7994 +- }
7995 +-
7996 + mutex_destroy(&led->lock);
7997 +
7998 + return 0;
7999 +diff --git a/drivers/leds/leds-lm36274.c b/drivers/leds/leds-lm36274.c
8000 +index aadb03468a40a..a23a9424c2f38 100644
8001 +--- a/drivers/leds/leds-lm36274.c
8002 ++++ b/drivers/leds/leds-lm36274.c
8003 +@@ -127,6 +127,7 @@ static int lm36274_probe(struct platform_device *pdev)
8004 +
8005 + ret = lm36274_init(chip);
8006 + if (ret) {
8007 ++ fwnode_handle_put(init_data.fwnode);
8008 + dev_err(chip->dev, "Failed to init the device\n");
8009 + return ret;
8010 + }
8011 +diff --git a/drivers/leds/leds-lm3692x.c b/drivers/leds/leds-lm3692x.c
8012 +index e945de45388ca..55e6443997ec9 100644
8013 +--- a/drivers/leds/leds-lm3692x.c
8014 ++++ b/drivers/leds/leds-lm3692x.c
8015 +@@ -435,6 +435,7 @@ static int lm3692x_probe_dt(struct lm3692x_led *led)
8016 +
8017 + ret = fwnode_property_read_u32(child, "reg", &led->led_enable);
8018 + if (ret) {
8019 ++ fwnode_handle_put(child);
8020 + dev_err(&led->client->dev, "reg DT property missing\n");
8021 + return ret;
8022 + }
8023 +@@ -449,12 +450,11 @@ static int lm3692x_probe_dt(struct lm3692x_led *led)
8024 +
8025 + ret = devm_led_classdev_register_ext(&led->client->dev, &led->led_dev,
8026 + &init_data);
8027 +- if (ret) {
8028 ++ if (ret)
8029 + dev_err(&led->client->dev, "led register err: %d\n", ret);
8030 +- return ret;
8031 +- }
8032 +
8033 +- return 0;
8034 ++ fwnode_handle_put(init_data.fwnode);
8035 ++ return ret;
8036 + }
8037 +
8038 + static int lm3692x_probe(struct i2c_client *client,
8039 +diff --git a/drivers/leds/leds-lm3697.c b/drivers/leds/leds-lm3697.c
8040 +index 7d216cdb91a8a..912e8bb22a995 100644
8041 +--- a/drivers/leds/leds-lm3697.c
8042 ++++ b/drivers/leds/leds-lm3697.c
8043 +@@ -203,11 +203,9 @@ static int lm3697_probe_dt(struct lm3697 *priv)
8044 +
8045 + priv->enable_gpio = devm_gpiod_get_optional(dev, "enable",
8046 + GPIOD_OUT_LOW);
8047 +- if (IS_ERR(priv->enable_gpio)) {
8048 +- ret = PTR_ERR(priv->enable_gpio);
8049 +- dev_err(dev, "Failed to get enable gpio: %d\n", ret);
8050 +- return ret;
8051 +- }
8052 ++ if (IS_ERR(priv->enable_gpio))
8053 ++ return dev_err_probe(dev, PTR_ERR(priv->enable_gpio),
8054 ++ "Failed to get enable GPIO\n");
8055 +
8056 + priv->regulator = devm_regulator_get(dev, "vled");
8057 + if (IS_ERR(priv->regulator))
8058 +diff --git a/drivers/leds/leds-lp50xx.c b/drivers/leds/leds-lp50xx.c
8059 +index f13117eed976d..d4529082935b8 100644
8060 +--- a/drivers/leds/leds-lp50xx.c
8061 ++++ b/drivers/leds/leds-lp50xx.c
8062 +@@ -496,6 +496,7 @@ static int lp50xx_probe_dt(struct lp50xx *priv)
8063 + ret = fwnode_property_read_u32(led_node, "color",
8064 + &color_id);
8065 + if (ret) {
8066 ++ fwnode_handle_put(led_node);
8067 + dev_err(priv->dev, "Cannot read color\n");
8068 + goto child_out;
8069 + }
8070 +@@ -519,7 +520,6 @@ static int lp50xx_probe_dt(struct lp50xx *priv)
8071 + goto child_out;
8072 + }
8073 + i++;
8074 +- fwnode_handle_put(child);
8075 + }
8076 +
8077 + return 0;
8078 +diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
8079 +index 077e5c6a9ef7d..3d100a004760f 100644
8080 +--- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c
8081 ++++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
8082 +@@ -128,7 +128,7 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
8083 + if (apcs_data->clk_name) {
8084 + apcs->clk = platform_device_register_data(&pdev->dev,
8085 + apcs_data->clk_name,
8086 +- PLATFORM_DEVID_NONE,
8087 ++ PLATFORM_DEVID_AUTO,
8088 + NULL, 0);
8089 + if (IS_ERR(apcs->clk))
8090 + dev_err(&pdev->dev, "failed to register APCS clk\n");
8091 +diff --git a/drivers/mailbox/qcom-ipcc.c b/drivers/mailbox/qcom-ipcc.c
8092 +index 2d13c72944c6f..584700cd15855 100644
8093 +--- a/drivers/mailbox/qcom-ipcc.c
8094 ++++ b/drivers/mailbox/qcom-ipcc.c
8095 +@@ -155,6 +155,11 @@ static int qcom_ipcc_mbox_send_data(struct mbox_chan *chan, void *data)
8096 + return 0;
8097 + }
8098 +
8099 ++static void qcom_ipcc_mbox_shutdown(struct mbox_chan *chan)
8100 ++{
8101 ++ chan->con_priv = NULL;
8102 ++}
8103 ++
8104 + static struct mbox_chan *qcom_ipcc_mbox_xlate(struct mbox_controller *mbox,
8105 + const struct of_phandle_args *ph)
8106 + {
8107 +@@ -184,6 +189,7 @@ static struct mbox_chan *qcom_ipcc_mbox_xlate(struct mbox_controller *mbox,
8108 +
8109 + static const struct mbox_chan_ops ipcc_mbox_chan_ops = {
8110 + .send_data = qcom_ipcc_mbox_send_data,
8111 ++ .shutdown = qcom_ipcc_mbox_shutdown,
8112 + };
8113 +
8114 + static int qcom_ipcc_setup_mbox(struct qcom_ipcc *ipcc)
8115 +diff --git a/drivers/media/cec/platform/s5p/s5p_cec.c b/drivers/media/cec/platform/s5p/s5p_cec.c
8116 +index 2a3e7ffefe0a2..028a09a7531ef 100644
8117 +--- a/drivers/media/cec/platform/s5p/s5p_cec.c
8118 ++++ b/drivers/media/cec/platform/s5p/s5p_cec.c
8119 +@@ -35,10 +35,13 @@ MODULE_PARM_DESC(debug, "debug level (0-2)");
8120 +
8121 + static int s5p_cec_adap_enable(struct cec_adapter *adap, bool enable)
8122 + {
8123 ++ int ret;
8124 + struct s5p_cec_dev *cec = cec_get_drvdata(adap);
8125 +
8126 + if (enable) {
8127 +- pm_runtime_get_sync(cec->dev);
8128 ++ ret = pm_runtime_resume_and_get(cec->dev);
8129 ++ if (ret < 0)
8130 ++ return ret;
8131 +
8132 + s5p_cec_reset(cec);
8133 +
8134 +@@ -51,7 +54,7 @@ static int s5p_cec_adap_enable(struct cec_adapter *adap, bool enable)
8135 + } else {
8136 + s5p_cec_mask_tx_interrupts(cec);
8137 + s5p_cec_mask_rx_interrupts(cec);
8138 +- pm_runtime_disable(cec->dev);
8139 ++ pm_runtime_put(cec->dev);
8140 + }
8141 +
8142 + return 0;
8143 +diff --git a/drivers/media/common/siano/smscoreapi.c b/drivers/media/common/siano/smscoreapi.c
8144 +index c1511094fdc7b..b735e23701373 100644
8145 +--- a/drivers/media/common/siano/smscoreapi.c
8146 ++++ b/drivers/media/common/siano/smscoreapi.c
8147 +@@ -908,7 +908,7 @@ static int smscore_load_firmware_family2(struct smscore_device_t *coredev,
8148 + void *buffer, size_t size)
8149 + {
8150 + struct sms_firmware *firmware = (struct sms_firmware *) buffer;
8151 +- struct sms_msg_data4 *msg;
8152 ++ struct sms_msg_data5 *msg;
8153 + u32 mem_address, calc_checksum = 0;
8154 + u32 i, *ptr;
8155 + u8 *payload = firmware->payload;
8156 +@@ -989,24 +989,20 @@ static int smscore_load_firmware_family2(struct smscore_device_t *coredev,
8157 + goto exit_fw_download;
8158 +
8159 + if (coredev->mode == DEVICE_MODE_NONE) {
8160 +- struct sms_msg_data *trigger_msg =
8161 +- (struct sms_msg_data *) msg;
8162 +-
8163 + pr_debug("sending MSG_SMS_SWDOWNLOAD_TRIGGER_REQ\n");
8164 + SMS_INIT_MSG(&msg->x_msg_header,
8165 + MSG_SMS_SWDOWNLOAD_TRIGGER_REQ,
8166 +- sizeof(struct sms_msg_hdr) +
8167 +- sizeof(u32) * 5);
8168 ++ sizeof(*msg));
8169 +
8170 +- trigger_msg->msg_data[0] = firmware->start_address;
8171 ++ msg->msg_data[0] = firmware->start_address;
8172 + /* Entry point */
8173 +- trigger_msg->msg_data[1] = 6; /* Priority */
8174 +- trigger_msg->msg_data[2] = 0x200; /* Stack size */
8175 +- trigger_msg->msg_data[3] = 0; /* Parameter */
8176 +- trigger_msg->msg_data[4] = 4; /* Task ID */
8177 ++ msg->msg_data[1] = 6; /* Priority */
8178 ++ msg->msg_data[2] = 0x200; /* Stack size */
8179 ++ msg->msg_data[3] = 0; /* Parameter */
8180 ++ msg->msg_data[4] = 4; /* Task ID */
8181 +
8182 +- rc = smscore_sendrequest_and_wait(coredev, trigger_msg,
8183 +- trigger_msg->x_msg_header.msg_length,
8184 ++ rc = smscore_sendrequest_and_wait(coredev, msg,
8185 ++ msg->x_msg_header.msg_length,
8186 + &coredev->trigger_done);
8187 + } else {
8188 + SMS_INIT_MSG(&msg->x_msg_header, MSG_SW_RELOAD_EXEC_REQ,
8189 +diff --git a/drivers/media/common/siano/smscoreapi.h b/drivers/media/common/siano/smscoreapi.h
8190 +index b3b793b5caf35..16c45afabc530 100644
8191 +--- a/drivers/media/common/siano/smscoreapi.h
8192 ++++ b/drivers/media/common/siano/smscoreapi.h
8193 +@@ -629,9 +629,9 @@ struct sms_msg_data2 {
8194 + u32 msg_data[2];
8195 + };
8196 +
8197 +-struct sms_msg_data4 {
8198 ++struct sms_msg_data5 {
8199 + struct sms_msg_hdr x_msg_header;
8200 +- u32 msg_data[4];
8201 ++ u32 msg_data[5];
8202 + };
8203 +
8204 + struct sms_data_download {
8205 +diff --git a/drivers/media/common/siano/smsdvb-main.c b/drivers/media/common/siano/smsdvb-main.c
8206 +index ae17407e477a4..7cc654bc52d37 100644
8207 +--- a/drivers/media/common/siano/smsdvb-main.c
8208 ++++ b/drivers/media/common/siano/smsdvb-main.c
8209 +@@ -1176,6 +1176,10 @@ static int smsdvb_hotplug(struct smscore_device_t *coredev,
8210 + return 0;
8211 +
8212 + media_graph_error:
8213 ++ mutex_lock(&g_smsdvb_clientslock);
8214 ++ list_del(&client->entry);
8215 ++ mutex_unlock(&g_smsdvb_clientslock);
8216 ++
8217 + smsdvb_debugfs_release(client);
8218 +
8219 + client_error:
8220 +diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
8221 +index 89620da983bab..dddebea644bb8 100644
8222 +--- a/drivers/media/dvb-core/dvb_net.c
8223 ++++ b/drivers/media/dvb-core/dvb_net.c
8224 +@@ -45,6 +45,7 @@
8225 + #include <linux/module.h>
8226 + #include <linux/kernel.h>
8227 + #include <linux/netdevice.h>
8228 ++#include <linux/nospec.h>
8229 + #include <linux/etherdevice.h>
8230 + #include <linux/dvb/net.h>
8231 + #include <linux/uio.h>
8232 +@@ -1462,14 +1463,20 @@ static int dvb_net_do_ioctl(struct file *file,
8233 + struct net_device *netdev;
8234 + struct dvb_net_priv *priv_data;
8235 + struct dvb_net_if *dvbnetif = parg;
8236 ++ int if_num = dvbnetif->if_num;
8237 +
8238 +- if (dvbnetif->if_num >= DVB_NET_DEVICES_MAX ||
8239 +- !dvbnet->state[dvbnetif->if_num]) {
8240 ++ if (if_num >= DVB_NET_DEVICES_MAX) {
8241 + ret = -EINVAL;
8242 + goto ioctl_error;
8243 + }
8244 ++ if_num = array_index_nospec(if_num, DVB_NET_DEVICES_MAX);
8245 +
8246 +- netdev = dvbnet->device[dvbnetif->if_num];
8247 ++ if (!dvbnet->state[if_num]) {
8248 ++ ret = -EINVAL;
8249 ++ goto ioctl_error;
8250 ++ }
8251 ++
8252 ++ netdev = dvbnet->device[if_num];
8253 +
8254 + priv_data = netdev_priv(netdev);
8255 + dvbnetif->pid=priv_data->pid;
8256 +@@ -1522,14 +1529,20 @@ static int dvb_net_do_ioctl(struct file *file,
8257 + struct net_device *netdev;
8258 + struct dvb_net_priv *priv_data;
8259 + struct __dvb_net_if_old *dvbnetif = parg;
8260 ++ int if_num = dvbnetif->if_num;
8261 ++
8262 ++ if (if_num >= DVB_NET_DEVICES_MAX) {
8263 ++ ret = -EINVAL;
8264 ++ goto ioctl_error;
8265 ++ }
8266 ++ if_num = array_index_nospec(if_num, DVB_NET_DEVICES_MAX);
8267 +
8268 +- if (dvbnetif->if_num >= DVB_NET_DEVICES_MAX ||
8269 +- !dvbnet->state[dvbnetif->if_num]) {
8270 ++ if (!dvbnet->state[if_num]) {
8271 + ret = -EINVAL;
8272 + goto ioctl_error;
8273 + }
8274 +
8275 +- netdev = dvbnet->device[dvbnetif->if_num];
8276 ++ netdev = dvbnet->device[if_num];
8277 +
8278 + priv_data = netdev_priv(netdev);
8279 + dvbnetif->pid=priv_data->pid;
8280 +diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c
8281 +index e8119ad0bc71d..92376592455ee 100644
8282 +--- a/drivers/media/i2c/ir-kbd-i2c.c
8283 ++++ b/drivers/media/i2c/ir-kbd-i2c.c
8284 +@@ -678,8 +678,8 @@ static int zilog_tx(struct rc_dev *rcdev, unsigned int *txbuf,
8285 + goto out_unlock;
8286 + }
8287 +
8288 +- i = i2c_master_recv(ir->tx_c, buf, 1);
8289 +- if (i != 1) {
8290 ++ ret = i2c_master_recv(ir->tx_c, buf, 1);
8291 ++ if (ret != 1) {
8292 + dev_err(&ir->rc->dev, "i2c_master_recv failed with %d\n", ret);
8293 + ret = -EIO;
8294 + goto out_unlock;
8295 +diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c
8296 +index 42f64175a6dff..fb78a1cedc03b 100644
8297 +--- a/drivers/media/i2c/ov2659.c
8298 ++++ b/drivers/media/i2c/ov2659.c
8299 +@@ -204,6 +204,7 @@ struct ov2659 {
8300 + struct i2c_client *client;
8301 + struct v4l2_ctrl_handler ctrls;
8302 + struct v4l2_ctrl *link_frequency;
8303 ++ struct clk *clk;
8304 + const struct ov2659_framesize *frame_size;
8305 + struct sensor_register *format_ctrl_regs;
8306 + struct ov2659_pll_ctrl pll;
8307 +@@ -1270,6 +1271,8 @@ static int ov2659_power_off(struct device *dev)
8308 +
8309 + gpiod_set_value(ov2659->pwdn_gpio, 1);
8310 +
8311 ++ clk_disable_unprepare(ov2659->clk);
8312 ++
8313 + return 0;
8314 + }
8315 +
8316 +@@ -1278,9 +1281,17 @@ static int ov2659_power_on(struct device *dev)
8317 + struct i2c_client *client = to_i2c_client(dev);
8318 + struct v4l2_subdev *sd = i2c_get_clientdata(client);
8319 + struct ov2659 *ov2659 = to_ov2659(sd);
8320 ++ int ret;
8321 +
8322 + dev_dbg(&client->dev, "%s:\n", __func__);
8323 +
8324 ++ ret = clk_prepare_enable(ov2659->clk);
8325 ++ if (ret) {
8326 ++ dev_err(&client->dev, "%s: failed to enable clock\n",
8327 ++ __func__);
8328 ++ return ret;
8329 ++ }
8330 ++
8331 + gpiod_set_value(ov2659->pwdn_gpio, 0);
8332 +
8333 + if (ov2659->resetb_gpio) {
8334 +@@ -1425,7 +1436,6 @@ static int ov2659_probe(struct i2c_client *client)
8335 + const struct ov2659_platform_data *pdata = ov2659_get_pdata(client);
8336 + struct v4l2_subdev *sd;
8337 + struct ov2659 *ov2659;
8338 +- struct clk *clk;
8339 + int ret;
8340 +
8341 + if (!pdata) {
8342 +@@ -1440,11 +1450,11 @@ static int ov2659_probe(struct i2c_client *client)
8343 + ov2659->pdata = pdata;
8344 + ov2659->client = client;
8345 +
8346 +- clk = devm_clk_get(&client->dev, "xvclk");
8347 +- if (IS_ERR(clk))
8348 +- return PTR_ERR(clk);
8349 ++ ov2659->clk = devm_clk_get(&client->dev, "xvclk");
8350 ++ if (IS_ERR(ov2659->clk))
8351 ++ return PTR_ERR(ov2659->clk);
8352 +
8353 +- ov2659->xvclk_frequency = clk_get_rate(clk);
8354 ++ ov2659->xvclk_frequency = clk_get_rate(ov2659->clk);
8355 + if (ov2659->xvclk_frequency < 6000000 ||
8356 + ov2659->xvclk_frequency > 27000000)
8357 + return -EINVAL;
8358 +@@ -1506,7 +1516,9 @@ static int ov2659_probe(struct i2c_client *client)
8359 + ov2659->frame_size = &ov2659_framesizes[2];
8360 + ov2659->format_ctrl_regs = ov2659_formats[0].format_ctrl_regs;
8361 +
8362 +- ov2659_power_on(&client->dev);
8363 ++ ret = ov2659_power_on(&client->dev);
8364 ++ if (ret < 0)
8365 ++ goto error;
8366 +
8367 + ret = ov2659_detect(sd);
8368 + if (ret < 0)
8369 +diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
8370 +index 5b4c4a3547c93..71804a70bc6d7 100644
8371 +--- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c
8372 ++++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
8373 +@@ -1386,7 +1386,7 @@ static int __s5c73m3_power_on(struct s5c73m3 *state)
8374 + s5c73m3_gpio_deassert(state, STBY);
8375 + usleep_range(100, 200);
8376 +
8377 +- s5c73m3_gpio_deassert(state, RST);
8378 ++ s5c73m3_gpio_deassert(state, RSET);
8379 + usleep_range(50, 100);
8380 +
8381 + return 0;
8382 +@@ -1401,7 +1401,7 @@ static int __s5c73m3_power_off(struct s5c73m3 *state)
8383 + {
8384 + int i, ret;
8385 +
8386 +- if (s5c73m3_gpio_assert(state, RST))
8387 ++ if (s5c73m3_gpio_assert(state, RSET))
8388 + usleep_range(10, 50);
8389 +
8390 + if (s5c73m3_gpio_assert(state, STBY))
8391 +@@ -1606,7 +1606,7 @@ static int s5c73m3_get_platform_data(struct s5c73m3 *state)
8392 +
8393 + state->mclk_frequency = pdata->mclk_frequency;
8394 + state->gpio[STBY] = pdata->gpio_stby;
8395 +- state->gpio[RST] = pdata->gpio_reset;
8396 ++ state->gpio[RSET] = pdata->gpio_reset;
8397 + return 0;
8398 + }
8399 +
8400 +diff --git a/drivers/media/i2c/s5c73m3/s5c73m3.h b/drivers/media/i2c/s5c73m3/s5c73m3.h
8401 +index ef7e85b34263b..c3fcfdd3ea66d 100644
8402 +--- a/drivers/media/i2c/s5c73m3/s5c73m3.h
8403 ++++ b/drivers/media/i2c/s5c73m3/s5c73m3.h
8404 +@@ -353,7 +353,7 @@ struct s5c73m3_ctrls {
8405 +
8406 + enum s5c73m3_gpio_id {
8407 + STBY,
8408 +- RST,
8409 ++ RSET,
8410 + GPIO_NUM,
8411 + };
8412 +
8413 +diff --git a/drivers/media/i2c/s5k4ecgx.c b/drivers/media/i2c/s5k4ecgx.c
8414 +index b2d53417badf6..4e97309a67f41 100644
8415 +--- a/drivers/media/i2c/s5k4ecgx.c
8416 ++++ b/drivers/media/i2c/s5k4ecgx.c
8417 +@@ -173,7 +173,7 @@ static const char * const s5k4ecgx_supply_names[] = {
8418 +
8419 + enum s5k4ecgx_gpio_id {
8420 + STBY,
8421 +- RST,
8422 ++ RSET,
8423 + GPIO_NUM,
8424 + };
8425 +
8426 +@@ -476,7 +476,7 @@ static int __s5k4ecgx_power_on(struct s5k4ecgx *priv)
8427 + if (s5k4ecgx_gpio_set_value(priv, STBY, priv->gpio[STBY].level))
8428 + usleep_range(30, 50);
8429 +
8430 +- if (s5k4ecgx_gpio_set_value(priv, RST, priv->gpio[RST].level))
8431 ++ if (s5k4ecgx_gpio_set_value(priv, RSET, priv->gpio[RSET].level))
8432 + usleep_range(30, 50);
8433 +
8434 + return 0;
8435 +@@ -484,7 +484,7 @@ static int __s5k4ecgx_power_on(struct s5k4ecgx *priv)
8436 +
8437 + static int __s5k4ecgx_power_off(struct s5k4ecgx *priv)
8438 + {
8439 +- if (s5k4ecgx_gpio_set_value(priv, RST, !priv->gpio[RST].level))
8440 ++ if (s5k4ecgx_gpio_set_value(priv, RSET, !priv->gpio[RSET].level))
8441 + usleep_range(30, 50);
8442 +
8443 + if (s5k4ecgx_gpio_set_value(priv, STBY, !priv->gpio[STBY].level))
8444 +@@ -872,7 +872,7 @@ static int s5k4ecgx_config_gpios(struct s5k4ecgx *priv,
8445 + int ret;
8446 +
8447 + priv->gpio[STBY].gpio = -EINVAL;
8448 +- priv->gpio[RST].gpio = -EINVAL;
8449 ++ priv->gpio[RSET].gpio = -EINVAL;
8450 +
8451 + ret = s5k4ecgx_config_gpio(gpio->gpio, gpio->level, "S5K4ECGX_STBY");
8452 +
8453 +@@ -891,7 +891,7 @@ static int s5k4ecgx_config_gpios(struct s5k4ecgx *priv,
8454 + s5k4ecgx_free_gpios(priv);
8455 + return ret;
8456 + }
8457 +- priv->gpio[RST] = *gpio;
8458 ++ priv->gpio[RSET] = *gpio;
8459 + if (gpio_is_valid(gpio->gpio))
8460 + gpio_set_value(gpio->gpio, 0);
8461 +
8462 +diff --git a/drivers/media/i2c/s5k5baf.c b/drivers/media/i2c/s5k5baf.c
8463 +index ec6f22efe19ad..ec65a8e084c6a 100644
8464 +--- a/drivers/media/i2c/s5k5baf.c
8465 ++++ b/drivers/media/i2c/s5k5baf.c
8466 +@@ -235,7 +235,7 @@ struct s5k5baf_gpio {
8467 +
8468 + enum s5k5baf_gpio_id {
8469 + STBY,
8470 +- RST,
8471 ++ RSET,
8472 + NUM_GPIOS,
8473 + };
8474 +
8475 +@@ -969,7 +969,7 @@ static int s5k5baf_power_on(struct s5k5baf *state)
8476 +
8477 + s5k5baf_gpio_deassert(state, STBY);
8478 + usleep_range(50, 100);
8479 +- s5k5baf_gpio_deassert(state, RST);
8480 ++ s5k5baf_gpio_deassert(state, RSET);
8481 + return 0;
8482 +
8483 + err_reg_dis:
8484 +@@ -987,7 +987,7 @@ static int s5k5baf_power_off(struct s5k5baf *state)
8485 + state->apply_cfg = 0;
8486 + state->apply_crop = 0;
8487 +
8488 +- s5k5baf_gpio_assert(state, RST);
8489 ++ s5k5baf_gpio_assert(state, RSET);
8490 + s5k5baf_gpio_assert(state, STBY);
8491 +
8492 + if (!IS_ERR(state->clock))
8493 +diff --git a/drivers/media/i2c/s5k6aa.c b/drivers/media/i2c/s5k6aa.c
8494 +index 72439fae7968b..6516e205e9a3d 100644
8495 +--- a/drivers/media/i2c/s5k6aa.c
8496 ++++ b/drivers/media/i2c/s5k6aa.c
8497 +@@ -177,7 +177,7 @@ static const char * const s5k6aa_supply_names[] = {
8498 +
8499 + enum s5k6aa_gpio_id {
8500 + STBY,
8501 +- RST,
8502 ++ RSET,
8503 + GPIO_NUM,
8504 + };
8505 +
8506 +@@ -841,7 +841,7 @@ static int __s5k6aa_power_on(struct s5k6aa *s5k6aa)
8507 + ret = s5k6aa->s_power(1);
8508 + usleep_range(4000, 5000);
8509 +
8510 +- if (s5k6aa_gpio_deassert(s5k6aa, RST))
8511 ++ if (s5k6aa_gpio_deassert(s5k6aa, RSET))
8512 + msleep(20);
8513 +
8514 + return ret;
8515 +@@ -851,7 +851,7 @@ static int __s5k6aa_power_off(struct s5k6aa *s5k6aa)
8516 + {
8517 + int ret;
8518 +
8519 +- if (s5k6aa_gpio_assert(s5k6aa, RST))
8520 ++ if (s5k6aa_gpio_assert(s5k6aa, RSET))
8521 + usleep_range(100, 150);
8522 +
8523 + if (s5k6aa->s_power) {
8524 +@@ -1510,7 +1510,7 @@ static int s5k6aa_configure_gpios(struct s5k6aa *s5k6aa,
8525 + int ret;
8526 +
8527 + s5k6aa->gpio[STBY].gpio = -EINVAL;
8528 +- s5k6aa->gpio[RST].gpio = -EINVAL;
8529 ++ s5k6aa->gpio[RSET].gpio = -EINVAL;
8530 +
8531 + gpio = &pdata->gpio_stby;
8532 + if (gpio_is_valid(gpio->gpio)) {
8533 +@@ -1533,7 +1533,7 @@ static int s5k6aa_configure_gpios(struct s5k6aa *s5k6aa,
8534 + if (ret < 0)
8535 + return ret;
8536 +
8537 +- s5k6aa->gpio[RST] = *gpio;
8538 ++ s5k6aa->gpio[RSET] = *gpio;
8539 + }
8540 +
8541 + return 0;
8542 +diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
8543 +index 1b309bb743c7b..f21da11caf224 100644
8544 +--- a/drivers/media/i2c/tc358743.c
8545 ++++ b/drivers/media/i2c/tc358743.c
8546 +@@ -1974,6 +1974,7 @@ static int tc358743_probe_of(struct tc358743_state *state)
8547 + bps_pr_lane = 2 * endpoint.link_frequencies[0];
8548 + if (bps_pr_lane < 62500000U || bps_pr_lane > 1000000000U) {
8549 + dev_err(dev, "unsupported bps per lane: %u bps\n", bps_pr_lane);
8550 ++ ret = -EINVAL;
8551 + goto disable_clk;
8552 + }
8553 +
8554 +diff --git a/drivers/media/mc/Makefile b/drivers/media/mc/Makefile
8555 +index 119037f0e686d..2b7af42ba59c1 100644
8556 +--- a/drivers/media/mc/Makefile
8557 ++++ b/drivers/media/mc/Makefile
8558 +@@ -3,7 +3,7 @@
8559 + mc-objs := mc-device.o mc-devnode.o mc-entity.o \
8560 + mc-request.o
8561 +
8562 +-ifeq ($(CONFIG_USB),y)
8563 ++ifneq ($(CONFIG_USB),)
8564 + mc-objs += mc-dev-allocator.o
8565 + endif
8566 +
8567 +diff --git a/drivers/media/pci/bt8xx/bt878.c b/drivers/media/pci/bt8xx/bt878.c
8568 +index 79ba15a9385a5..0705913972c66 100644
8569 +--- a/drivers/media/pci/bt8xx/bt878.c
8570 ++++ b/drivers/media/pci/bt8xx/bt878.c
8571 +@@ -300,7 +300,8 @@ static irqreturn_t bt878_irq(int irq, void *dev_id)
8572 + }
8573 + if (astat & BT878_ARISCI) {
8574 + bt->finished_block = (stat & BT878_ARISCS) >> 28;
8575 +- tasklet_schedule(&bt->tasklet);
8576 ++ if (bt->tasklet.callback)
8577 ++ tasklet_schedule(&bt->tasklet);
8578 + break;
8579 + }
8580 + count++;
8581 +@@ -477,6 +478,9 @@ static int bt878_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
8582 + btwrite(0, BT878_AINT_MASK);
8583 + bt878_num++;
8584 +
8585 ++ if (!bt->tasklet.func)
8586 ++ tasklet_disable(&bt->tasklet);
8587 ++
8588 + return 0;
8589 +
8590 + fail2:
8591 +diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c
8592 +index 0695078ef8125..1bd8bbe57a30e 100644
8593 +--- a/drivers/media/pci/cobalt/cobalt-driver.c
8594 ++++ b/drivers/media/pci/cobalt/cobalt-driver.c
8595 +@@ -667,6 +667,7 @@ static int cobalt_probe(struct pci_dev *pci_dev,
8596 + return -ENOMEM;
8597 + cobalt->pci_dev = pci_dev;
8598 + cobalt->instance = i;
8599 ++ mutex_init(&cobalt->pci_lock);
8600 +
8601 + retval = v4l2_device_register(&pci_dev->dev, &cobalt->v4l2_dev);
8602 + if (retval) {
8603 +diff --git a/drivers/media/pci/cobalt/cobalt-driver.h b/drivers/media/pci/cobalt/cobalt-driver.h
8604 +index bca68572b3242..12c33e035904c 100644
8605 +--- a/drivers/media/pci/cobalt/cobalt-driver.h
8606 ++++ b/drivers/media/pci/cobalt/cobalt-driver.h
8607 +@@ -251,6 +251,8 @@ struct cobalt {
8608 + int instance;
8609 + struct pci_dev *pci_dev;
8610 + struct v4l2_device v4l2_dev;
8611 ++ /* serialize PCI access in cobalt_s_bit_sysctrl() */
8612 ++ struct mutex pci_lock;
8613 +
8614 + void __iomem *bar0, *bar1;
8615 +
8616 +@@ -320,10 +322,13 @@ static inline u32 cobalt_g_sysctrl(struct cobalt *cobalt)
8617 + static inline void cobalt_s_bit_sysctrl(struct cobalt *cobalt,
8618 + int bit, int val)
8619 + {
8620 +- u32 ctrl = cobalt_read_bar1(cobalt, COBALT_SYS_CTRL_BASE);
8621 ++ u32 ctrl;
8622 +
8623 ++ mutex_lock(&cobalt->pci_lock);
8624 ++ ctrl = cobalt_read_bar1(cobalt, COBALT_SYS_CTRL_BASE);
8625 + cobalt_write_bar1(cobalt, COBALT_SYS_CTRL_BASE,
8626 + (ctrl & ~(1UL << bit)) | (val << bit));
8627 ++ mutex_unlock(&cobalt->pci_lock);
8628 + }
8629 +
8630 + static inline u32 cobalt_g_sysstat(struct cobalt *cobalt)
8631 +diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
8632 +index dcbfe8c9abc72..2fe4a0bd02844 100644
8633 +--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
8634 ++++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
8635 +@@ -1476,7 +1476,8 @@ static int cio2_parse_firmware(struct cio2_device *cio2)
8636 + struct v4l2_fwnode_endpoint vep = {
8637 + .bus_type = V4L2_MBUS_CSI2_DPHY
8638 + };
8639 +- struct sensor_async_subdev *s_asd = NULL;
8640 ++ struct sensor_async_subdev *s_asd;
8641 ++ struct v4l2_async_subdev *asd;
8642 + struct fwnode_handle *ep;
8643 +
8644 + ep = fwnode_graph_get_endpoint_by_id(
8645 +@@ -1490,27 +1491,23 @@ static int cio2_parse_firmware(struct cio2_device *cio2)
8646 + if (ret)
8647 + goto err_parse;
8648 +
8649 +- s_asd = kzalloc(sizeof(*s_asd), GFP_KERNEL);
8650 +- if (!s_asd) {
8651 +- ret = -ENOMEM;
8652 ++ asd = v4l2_async_notifier_add_fwnode_remote_subdev(
8653 ++ &cio2->notifier, ep, sizeof(*s_asd));
8654 ++ if (IS_ERR(asd)) {
8655 ++ ret = PTR_ERR(asd);
8656 + goto err_parse;
8657 + }
8658 +
8659 ++ s_asd = container_of(asd, struct sensor_async_subdev, asd);
8660 + s_asd->csi2.port = vep.base.port;
8661 + s_asd->csi2.lanes = vep.bus.mipi_csi2.num_data_lanes;
8662 +
8663 +- ret = v4l2_async_notifier_add_fwnode_remote_subdev(
8664 +- &cio2->notifier, ep, &s_asd->asd);
8665 +- if (ret)
8666 +- goto err_parse;
8667 +-
8668 + fwnode_handle_put(ep);
8669 +
8670 + continue;
8671 +
8672 + err_parse:
8673 + fwnode_handle_put(ep);
8674 +- kfree(s_asd);
8675 + return ret;
8676 + }
8677 +
8678 +diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
8679 +index 0fb9f9ba1219d..31cee69adbe1f 100644
8680 +--- a/drivers/media/platform/am437x/am437x-vpfe.c
8681 ++++ b/drivers/media/platform/am437x/am437x-vpfe.c
8682 +@@ -1021,7 +1021,9 @@ static int vpfe_initialize_device(struct vpfe_device *vpfe)
8683 + if (ret)
8684 + return ret;
8685 +
8686 +- pm_runtime_get_sync(vpfe->pdev);
8687 ++ ret = pm_runtime_resume_and_get(vpfe->pdev);
8688 ++ if (ret < 0)
8689 ++ return ret;
8690 +
8691 + vpfe_config_enable(&vpfe->ccdc, 1);
8692 +
8693 +@@ -2443,7 +2445,11 @@ static int vpfe_probe(struct platform_device *pdev)
8694 + pm_runtime_enable(&pdev->dev);
8695 +
8696 + /* for now just enable it here instead of waiting for the open */
8697 +- pm_runtime_get_sync(&pdev->dev);
8698 ++ ret = pm_runtime_resume_and_get(&pdev->dev);
8699 ++ if (ret < 0) {
8700 ++ vpfe_err(vpfe, "Unable to resume device.\n");
8701 ++ goto probe_out_v4l2_unregister;
8702 ++ }
8703 +
8704 + vpfe_ccdc_config_defaults(ccdc);
8705 +
8706 +@@ -2530,6 +2536,11 @@ static int vpfe_suspend(struct device *dev)
8707 +
8708 + /* only do full suspend if streaming has started */
8709 + if (vb2_start_streaming_called(&vpfe->buffer_queue)) {
8710 ++ /*
8711 ++ * ignore RPM resume errors here, as it is already too late.
8712 ++ * A check like that should happen earlier, either at
8713 ++ * open() or just before start streaming.
8714 ++ */
8715 + pm_runtime_get_sync(dev);
8716 + vpfe_config_enable(ccdc, 1);
8717 +
8718 +diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
8719 +index 27a3c92c73bce..f1cf847d1cc2d 100644
8720 +--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
8721 ++++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
8722 +@@ -56,10 +56,8 @@ static void __gsc_m2m_job_abort(struct gsc_ctx *ctx)
8723 + static int gsc_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
8724 + {
8725 + struct gsc_ctx *ctx = q->drv_priv;
8726 +- int ret;
8727 +
8728 +- ret = pm_runtime_get_sync(&ctx->gsc_dev->pdev->dev);
8729 +- return ret > 0 ? 0 : ret;
8730 ++ return pm_runtime_resume_and_get(&ctx->gsc_dev->pdev->dev);
8731 + }
8732 +
8733 + static void __gsc_m2m_cleanup_queue(struct gsc_ctx *ctx)
8734 +diff --git a/drivers/media/platform/exynos4-is/fimc-capture.c b/drivers/media/platform/exynos4-is/fimc-capture.c
8735 +index 6000a4e789adb..808b490c1910f 100644
8736 +--- a/drivers/media/platform/exynos4-is/fimc-capture.c
8737 ++++ b/drivers/media/platform/exynos4-is/fimc-capture.c
8738 +@@ -478,11 +478,9 @@ static int fimc_capture_open(struct file *file)
8739 + goto unlock;
8740 +
8741 + set_bit(ST_CAPT_BUSY, &fimc->state);
8742 +- ret = pm_runtime_get_sync(&fimc->pdev->dev);
8743 +- if (ret < 0) {
8744 +- pm_runtime_put_sync(&fimc->pdev->dev);
8745 ++ ret = pm_runtime_resume_and_get(&fimc->pdev->dev);
8746 ++ if (ret < 0)
8747 + goto unlock;
8748 +- }
8749 +
8750 + ret = v4l2_fh_open(file);
8751 + if (ret) {
8752 +diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
8753 +index 32ab01e89196d..d26fa5967d821 100644
8754 +--- a/drivers/media/platform/exynos4-is/fimc-is.c
8755 ++++ b/drivers/media/platform/exynos4-is/fimc-is.c
8756 +@@ -828,9 +828,9 @@ static int fimc_is_probe(struct platform_device *pdev)
8757 + goto err_irq;
8758 + }
8759 +
8760 +- ret = pm_runtime_get_sync(dev);
8761 ++ ret = pm_runtime_resume_and_get(dev);
8762 + if (ret < 0)
8763 +- goto err_pm;
8764 ++ goto err_irq;
8765 +
8766 + vb2_dma_contig_set_max_seg_size(dev, DMA_BIT_MASK(32));
8767 +
8768 +diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.c b/drivers/media/platform/exynos4-is/fimc-isp-video.c
8769 +index 612b9872afc87..83688a7982f70 100644
8770 +--- a/drivers/media/platform/exynos4-is/fimc-isp-video.c
8771 ++++ b/drivers/media/platform/exynos4-is/fimc-isp-video.c
8772 +@@ -275,7 +275,7 @@ static int isp_video_open(struct file *file)
8773 + if (ret < 0)
8774 + goto unlock;
8775 +
8776 +- ret = pm_runtime_get_sync(&isp->pdev->dev);
8777 ++ ret = pm_runtime_resume_and_get(&isp->pdev->dev);
8778 + if (ret < 0)
8779 + goto rel_fh;
8780 +
8781 +@@ -293,7 +293,6 @@ static int isp_video_open(struct file *file)
8782 + if (!ret)
8783 + goto unlock;
8784 + rel_fh:
8785 +- pm_runtime_put_noidle(&isp->pdev->dev);
8786 + v4l2_fh_release(file);
8787 + unlock:
8788 + mutex_unlock(&isp->video_lock);
8789 +@@ -306,17 +305,20 @@ static int isp_video_release(struct file *file)
8790 + struct fimc_is_video *ivc = &isp->video_capture;
8791 + struct media_entity *entity = &ivc->ve.vdev.entity;
8792 + struct media_device *mdev = entity->graph_obj.mdev;
8793 ++ bool is_singular_file;
8794 +
8795 + mutex_lock(&isp->video_lock);
8796 +
8797 +- if (v4l2_fh_is_singular_file(file) && ivc->streaming) {
8798 ++ is_singular_file = v4l2_fh_is_singular_file(file);
8799 ++
8800 ++ if (is_singular_file && ivc->streaming) {
8801 + media_pipeline_stop(entity);
8802 + ivc->streaming = 0;
8803 + }
8804 +
8805 + _vb2_fop_release(file, NULL);
8806 +
8807 +- if (v4l2_fh_is_singular_file(file)) {
8808 ++ if (is_singular_file) {
8809 + fimc_pipeline_call(&ivc->ve, close);
8810 +
8811 + mutex_lock(&mdev->graph_mutex);
8812 +diff --git a/drivers/media/platform/exynos4-is/fimc-isp.c b/drivers/media/platform/exynos4-is/fimc-isp.c
8813 +index a77c49b185115..74b49d30901ed 100644
8814 +--- a/drivers/media/platform/exynos4-is/fimc-isp.c
8815 ++++ b/drivers/media/platform/exynos4-is/fimc-isp.c
8816 +@@ -304,11 +304,10 @@ static int fimc_isp_subdev_s_power(struct v4l2_subdev *sd, int on)
8817 + pr_debug("on: %d\n", on);
8818 +
8819 + if (on) {
8820 +- ret = pm_runtime_get_sync(&is->pdev->dev);
8821 +- if (ret < 0) {
8822 +- pm_runtime_put(&is->pdev->dev);
8823 ++ ret = pm_runtime_resume_and_get(&is->pdev->dev);
8824 ++ if (ret < 0)
8825 + return ret;
8826 +- }
8827 ++
8828 + set_bit(IS_ST_PWR_ON, &is->state);
8829 +
8830 + ret = fimc_is_start_firmware(is);
8831 +diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
8832 +index fdd0d369b1925..d279f282d5921 100644
8833 +--- a/drivers/media/platform/exynos4-is/fimc-lite.c
8834 ++++ b/drivers/media/platform/exynos4-is/fimc-lite.c
8835 +@@ -469,9 +469,9 @@ static int fimc_lite_open(struct file *file)
8836 + }
8837 +
8838 + set_bit(ST_FLITE_IN_USE, &fimc->state);
8839 +- ret = pm_runtime_get_sync(&fimc->pdev->dev);
8840 ++ ret = pm_runtime_resume_and_get(&fimc->pdev->dev);
8841 + if (ret < 0)
8842 +- goto err_pm;
8843 ++ goto err_in_use;
8844 +
8845 + ret = v4l2_fh_open(file);
8846 + if (ret < 0)
8847 +@@ -499,6 +499,7 @@ static int fimc_lite_open(struct file *file)
8848 + v4l2_fh_release(file);
8849 + err_pm:
8850 + pm_runtime_put_sync(&fimc->pdev->dev);
8851 ++err_in_use:
8852 + clear_bit(ST_FLITE_IN_USE, &fimc->state);
8853 + unlock:
8854 + mutex_unlock(&fimc->lock);
8855 +diff --git a/drivers/media/platform/exynos4-is/fimc-m2m.c b/drivers/media/platform/exynos4-is/fimc-m2m.c
8856 +index 4acb179556c41..24b1badd20807 100644
8857 +--- a/drivers/media/platform/exynos4-is/fimc-m2m.c
8858 ++++ b/drivers/media/platform/exynos4-is/fimc-m2m.c
8859 +@@ -73,17 +73,14 @@ static void fimc_m2m_shutdown(struct fimc_ctx *ctx)
8860 + static int start_streaming(struct vb2_queue *q, unsigned int count)
8861 + {
8862 + struct fimc_ctx *ctx = q->drv_priv;
8863 +- int ret;
8864 +
8865 +- ret = pm_runtime_get_sync(&ctx->fimc_dev->pdev->dev);
8866 +- return ret > 0 ? 0 : ret;
8867 ++ return pm_runtime_resume_and_get(&ctx->fimc_dev->pdev->dev);
8868 + }
8869 +
8870 + static void stop_streaming(struct vb2_queue *q)
8871 + {
8872 + struct fimc_ctx *ctx = q->drv_priv;
8873 +
8874 +-
8875 + fimc_m2m_shutdown(ctx);
8876 + fimc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
8877 + pm_runtime_put(&ctx->fimc_dev->pdev->dev);
8878 +diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
8879 +index e636c33e847bd..a9a8f0433fb2c 100644
8880 +--- a/drivers/media/platform/exynos4-is/media-dev.c
8881 ++++ b/drivers/media/platform/exynos4-is/media-dev.c
8882 +@@ -508,11 +508,9 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
8883 + if (!fmd->pmf)
8884 + return -ENXIO;
8885 +
8886 +- ret = pm_runtime_get_sync(fmd->pmf);
8887 +- if (ret < 0) {
8888 +- pm_runtime_put(fmd->pmf);
8889 ++ ret = pm_runtime_resume_and_get(fmd->pmf);
8890 ++ if (ret < 0)
8891 + return ret;
8892 +- }
8893 +
8894 + fmd->num_sensors = 0;
8895 +
8896 +@@ -1282,13 +1280,11 @@ static DEVICE_ATTR(subdev_conf_mode, S_IWUSR | S_IRUGO,
8897 + static int cam_clk_prepare(struct clk_hw *hw)
8898 + {
8899 + struct cam_clk *camclk = to_cam_clk(hw);
8900 +- int ret;
8901 +
8902 + if (camclk->fmd->pmf == NULL)
8903 + return -ENODEV;
8904 +
8905 +- ret = pm_runtime_get_sync(camclk->fmd->pmf);
8906 +- return ret < 0 ? ret : 0;
8907 ++ return pm_runtime_resume_and_get(camclk->fmd->pmf);
8908 + }
8909 +
8910 + static void cam_clk_unprepare(struct clk_hw *hw)
8911 +diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
8912 +index 1aac167abb175..ebf39c8568943 100644
8913 +--- a/drivers/media/platform/exynos4-is/mipi-csis.c
8914 ++++ b/drivers/media/platform/exynos4-is/mipi-csis.c
8915 +@@ -494,7 +494,7 @@ static int s5pcsis_s_power(struct v4l2_subdev *sd, int on)
8916 + struct device *dev = &state->pdev->dev;
8917 +
8918 + if (on)
8919 +- return pm_runtime_get_sync(dev);
8920 ++ return pm_runtime_resume_and_get(dev);
8921 +
8922 + return pm_runtime_put_sync(dev);
8923 + }
8924 +@@ -509,11 +509,9 @@ static int s5pcsis_s_stream(struct v4l2_subdev *sd, int enable)
8925 +
8926 + if (enable) {
8927 + s5pcsis_clear_counters(state);
8928 +- ret = pm_runtime_get_sync(&state->pdev->dev);
8929 +- if (ret && ret != 1) {
8930 +- pm_runtime_put_noidle(&state->pdev->dev);
8931 ++ ret = pm_runtime_resume_and_get(&state->pdev->dev);
8932 ++ if (ret < 0)
8933 + return ret;
8934 +- }
8935 + }
8936 +
8937 + mutex_lock(&state->lock);
8938 +@@ -535,7 +533,7 @@ unlock:
8939 + if (!enable)
8940 + pm_runtime_put(&state->pdev->dev);
8941 +
8942 +- return ret == 1 ? 0 : ret;
8943 ++ return ret;
8944 + }
8945 +
8946 + static int s5pcsis_enum_mbus_code(struct v4l2_subdev *sd,
8947 +diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
8948 +index 34266fba824f2..e56c5e56e824a 100644
8949 +--- a/drivers/media/platform/marvell-ccic/mcam-core.c
8950 ++++ b/drivers/media/platform/marvell-ccic/mcam-core.c
8951 +@@ -918,6 +918,7 @@ static int mclk_enable(struct clk_hw *hw)
8952 + struct mcam_camera *cam = container_of(hw, struct mcam_camera, mclk_hw);
8953 + int mclk_src;
8954 + int mclk_div;
8955 ++ int ret;
8956 +
8957 + /*
8958 + * Clock the sensor appropriately. Controller clock should
8959 +@@ -931,7 +932,9 @@ static int mclk_enable(struct clk_hw *hw)
8960 + mclk_div = 2;
8961 + }
8962 +
8963 +- pm_runtime_get_sync(cam->dev);
8964 ++ ret = pm_runtime_resume_and_get(cam->dev);
8965 ++ if (ret < 0)
8966 ++ return ret;
8967 + clk_enable(cam->clk[0]);
8968 + mcam_reg_write(cam, REG_CLKCTRL, (mclk_src << 29) | mclk_div);
8969 + mcam_ctlr_power_up(cam);
8970 +@@ -1611,7 +1614,9 @@ static int mcam_v4l_open(struct file *filp)
8971 + ret = sensor_call(cam, core, s_power, 1);
8972 + if (ret)
8973 + goto out;
8974 +- pm_runtime_get_sync(cam->dev);
8975 ++ ret = pm_runtime_resume_and_get(cam->dev);
8976 ++ if (ret < 0)
8977 ++ goto out;
8978 + __mcam_cam_reset(cam);
8979 + mcam_set_config_needed(cam, 1);
8980 + }
8981 +diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
8982 +index 724c7333b6e5a..45fc741c55411 100644
8983 +--- a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
8984 ++++ b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
8985 +@@ -394,12 +394,12 @@ static int mtk_mdp_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
8986 + struct mtk_mdp_ctx *ctx = q->drv_priv;
8987 + int ret;
8988 +
8989 +- ret = pm_runtime_get_sync(&ctx->mdp_dev->pdev->dev);
8990 ++ ret = pm_runtime_resume_and_get(&ctx->mdp_dev->pdev->dev);
8991 + if (ret < 0)
8992 +- mtk_mdp_dbg(1, "[%d] pm_runtime_get_sync failed:%d",
8993 ++ mtk_mdp_dbg(1, "[%d] pm_runtime_resume_and_get failed:%d",
8994 + ctx->id, ret);
8995 +
8996 +- return 0;
8997 ++ return ret;
8998 + }
8999 +
9000 + static void *mtk_mdp_m2m_buf_remove(struct mtk_mdp_ctx *ctx,
9001 +diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
9002 +index 145686d2c219c..f59ef8c8c9db4 100644
9003 +--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
9004 ++++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
9005 +@@ -126,7 +126,9 @@ static int fops_vcodec_open(struct file *file)
9006 + mtk_vcodec_dec_set_default_params(ctx);
9007 +
9008 + if (v4l2_fh_is_singular(&ctx->fh)) {
9009 +- mtk_vcodec_dec_pw_on(&dev->pm);
9010 ++ ret = mtk_vcodec_dec_pw_on(&dev->pm);
9011 ++ if (ret < 0)
9012 ++ goto err_load_fw;
9013 + /*
9014 + * Does nothing if firmware was already loaded.
9015 + */
9016 +diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
9017 +index ddee7046ce422..6038db96f71c3 100644
9018 +--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
9019 ++++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
9020 +@@ -88,13 +88,15 @@ void mtk_vcodec_release_dec_pm(struct mtk_vcodec_dev *dev)
9021 + put_device(dev->pm.larbvdec);
9022 + }
9023 +
9024 +-void mtk_vcodec_dec_pw_on(struct mtk_vcodec_pm *pm)
9025 ++int mtk_vcodec_dec_pw_on(struct mtk_vcodec_pm *pm)
9026 + {
9027 + int ret;
9028 +
9029 +- ret = pm_runtime_get_sync(pm->dev);
9030 ++ ret = pm_runtime_resume_and_get(pm->dev);
9031 + if (ret)
9032 +- mtk_v4l2_err("pm_runtime_get_sync fail %d", ret);
9033 ++ mtk_v4l2_err("pm_runtime_resume_and_get fail %d", ret);
9034 ++
9035 ++ return ret;
9036 + }
9037 +
9038 + void mtk_vcodec_dec_pw_off(struct mtk_vcodec_pm *pm)
9039 +diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.h
9040 +index 872d8bf8cfaf3..280aeaefdb651 100644
9041 +--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.h
9042 ++++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.h
9043 +@@ -12,7 +12,7 @@
9044 + int mtk_vcodec_init_dec_pm(struct mtk_vcodec_dev *dev);
9045 + void mtk_vcodec_release_dec_pm(struct mtk_vcodec_dev *dev);
9046 +
9047 +-void mtk_vcodec_dec_pw_on(struct mtk_vcodec_pm *pm);
9048 ++int mtk_vcodec_dec_pw_on(struct mtk_vcodec_pm *pm);
9049 + void mtk_vcodec_dec_pw_off(struct mtk_vcodec_pm *pm);
9050 + void mtk_vcodec_dec_clock_on(struct mtk_vcodec_pm *pm);
9051 + void mtk_vcodec_dec_clock_off(struct mtk_vcodec_pm *pm);
9052 +diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
9053 +index b1fc4518e275d..1311b4996eceb 100644
9054 +--- a/drivers/media/platform/omap3isp/isp.c
9055 ++++ b/drivers/media/platform/omap3isp/isp.c
9056 +@@ -2126,21 +2126,6 @@ static void isp_parse_of_csi1_endpoint(struct device *dev,
9057 + buscfg->bus.ccp2.crc = 1;
9058 + }
9059 +
9060 +-static int isp_alloc_isd(struct isp_async_subdev **isd,
9061 +- struct isp_bus_cfg **buscfg)
9062 +-{
9063 +- struct isp_async_subdev *__isd;
9064 +-
9065 +- __isd = kzalloc(sizeof(*__isd), GFP_KERNEL);
9066 +- if (!__isd)
9067 +- return -ENOMEM;
9068 +-
9069 +- *isd = __isd;
9070 +- *buscfg = &__isd->bus;
9071 +-
9072 +- return 0;
9073 +-}
9074 +-
9075 + static struct {
9076 + u32 phy;
9077 + u32 csi2_if;
9078 +@@ -2156,7 +2141,7 @@ static int isp_parse_of_endpoints(struct isp_device *isp)
9079 + {
9080 + struct fwnode_handle *ep;
9081 + struct isp_async_subdev *isd = NULL;
9082 +- struct isp_bus_cfg *buscfg;
9083 ++ struct v4l2_async_subdev *asd;
9084 + unsigned int i;
9085 +
9086 + ep = fwnode_graph_get_endpoint_by_id(
9087 +@@ -2174,20 +2159,15 @@ static int isp_parse_of_endpoints(struct isp_device *isp)
9088 + ret = v4l2_fwnode_endpoint_parse(ep, &vep);
9089 +
9090 + if (!ret) {
9091 +- ret = isp_alloc_isd(&isd, &buscfg);
9092 +- if (ret)
9093 +- return ret;
9094 +- }
9095 +-
9096 +- if (!ret) {
9097 +- isp_parse_of_parallel_endpoint(isp->dev, &vep, buscfg);
9098 +- ret = v4l2_async_notifier_add_fwnode_remote_subdev(
9099 +- &isp->notifier, ep, &isd->asd);
9100 ++ asd = v4l2_async_notifier_add_fwnode_remote_subdev(
9101 ++ &isp->notifier, ep, sizeof(*isd));
9102 ++ if (!IS_ERR(asd)) {
9103 ++ isd = container_of(asd, struct isp_async_subdev, asd);
9104 ++ isp_parse_of_parallel_endpoint(isp->dev, &vep, &isd->bus);
9105 ++ }
9106 + }
9107 +
9108 + fwnode_handle_put(ep);
9109 +- if (ret)
9110 +- kfree(isd);
9111 + }
9112 +
9113 + for (i = 0; i < ARRAY_SIZE(isp_bus_interfaces); i++) {
9114 +@@ -2206,15 +2186,8 @@ static int isp_parse_of_endpoints(struct isp_device *isp)
9115 + dev_dbg(isp->dev, "parsing serial interface %u, node %pOF\n", i,
9116 + to_of_node(ep));
9117 +
9118 +- ret = isp_alloc_isd(&isd, &buscfg);
9119 +- if (ret)
9120 +- return ret;
9121 +-
9122 + ret = v4l2_fwnode_endpoint_parse(ep, &vep);
9123 +- if (!ret) {
9124 +- buscfg->interface = isp_bus_interfaces[i].csi2_if;
9125 +- isp_parse_of_csi2_endpoint(isp->dev, &vep, buscfg);
9126 +- } else if (ret == -ENXIO) {
9127 ++ if (ret == -ENXIO) {
9128 + vep = (struct v4l2_fwnode_endpoint)
9129 + { .bus_type = V4L2_MBUS_CSI1 };
9130 + ret = v4l2_fwnode_endpoint_parse(ep, &vep);
9131 +@@ -2224,21 +2197,35 @@ static int isp_parse_of_endpoints(struct isp_device *isp)
9132 + { .bus_type = V4L2_MBUS_CCP2 };
9133 + ret = v4l2_fwnode_endpoint_parse(ep, &vep);
9134 + }
9135 +- if (!ret) {
9136 +- buscfg->interface =
9137 +- isp_bus_interfaces[i].csi1_if;
9138 +- isp_parse_of_csi1_endpoint(isp->dev, &vep,
9139 +- buscfg);
9140 +- }
9141 + }
9142 +
9143 +- if (!ret)
9144 +- ret = v4l2_async_notifier_add_fwnode_remote_subdev(
9145 +- &isp->notifier, ep, &isd->asd);
9146 ++ if (!ret) {
9147 ++ asd = v4l2_async_notifier_add_fwnode_remote_subdev(
9148 ++ &isp->notifier, ep, sizeof(*isd));
9149 ++
9150 ++ if (!IS_ERR(asd)) {
9151 ++ isd = container_of(asd, struct isp_async_subdev, asd);
9152 ++
9153 ++ switch (vep.bus_type) {
9154 ++ case V4L2_MBUS_CSI2_DPHY:
9155 ++ isd->bus.interface =
9156 ++ isp_bus_interfaces[i].csi2_if;
9157 ++ isp_parse_of_csi2_endpoint(isp->dev, &vep, &isd->bus);
9158 ++ break;
9159 ++ case V4L2_MBUS_CSI1:
9160 ++ case V4L2_MBUS_CCP2:
9161 ++ isd->bus.interface =
9162 ++ isp_bus_interfaces[i].csi1_if;
9163 ++ isp_parse_of_csi1_endpoint(isp->dev, &vep,
9164 ++ &isd->bus);
9165 ++ break;
9166 ++ default:
9167 ++ break;
9168 ++ }
9169 ++ }
9170 ++ }
9171 +
9172 + fwnode_handle_put(ep);
9173 +- if (ret)
9174 +- kfree(isd);
9175 + }
9176 +
9177 + return 0;
9178 +diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
9179 +index fd5993b3e6743..58ddebbb84468 100644
9180 +--- a/drivers/media/platform/qcom/venus/core.c
9181 ++++ b/drivers/media/platform/qcom/venus/core.c
9182 +@@ -48,52 +48,86 @@ static const struct hfi_core_ops venus_core_ops = {
9183 + .event_notify = venus_event_notify,
9184 + };
9185 +
9186 ++#define RPM_WAIT_FOR_IDLE_MAX_ATTEMPTS 10
9187 ++
9188 + static void venus_sys_error_handler(struct work_struct *work)
9189 + {
9190 + struct venus_core *core =
9191 + container_of(work, struct venus_core, work.work);
9192 +- int ret = 0;
9193 +-
9194 +- pm_runtime_get_sync(core->dev);
9195 ++ int ret, i, max_attempts = RPM_WAIT_FOR_IDLE_MAX_ATTEMPTS;
9196 ++ const char *err_msg = "";
9197 ++ bool failed = false;
9198 ++
9199 ++ ret = pm_runtime_get_sync(core->dev);
9200 ++ if (ret < 0) {
9201 ++ err_msg = "resume runtime PM";
9202 ++ max_attempts = 0;
9203 ++ failed = true;
9204 ++ }
9205 +
9206 + hfi_core_deinit(core, true);
9207 +
9208 +- dev_warn(core->dev, "system error has occurred, starting recovery!\n");
9209 +-
9210 + mutex_lock(&core->lock);
9211 +
9212 +- while (pm_runtime_active(core->dev_dec) || pm_runtime_active(core->dev_enc))
9213 ++ for (i = 0; i < max_attempts; i++) {
9214 ++ if (!pm_runtime_active(core->dev_dec) && !pm_runtime_active(core->dev_enc))
9215 ++ break;
9216 + msleep(10);
9217 ++ }
9218 +
9219 + venus_shutdown(core);
9220 +
9221 + pm_runtime_put_sync(core->dev);
9222 +
9223 +- while (core->pmdomains[0] && pm_runtime_active(core->pmdomains[0]))
9224 ++ for (i = 0; i < max_attempts; i++) {
9225 ++ if (!core->pmdomains[0] || !pm_runtime_active(core->pmdomains[0]))
9226 ++ break;
9227 + usleep_range(1000, 1500);
9228 ++ }
9229 +
9230 + hfi_reinit(core);
9231 +
9232 +- pm_runtime_get_sync(core->dev);
9233 ++ ret = pm_runtime_get_sync(core->dev);
9234 ++ if (ret < 0) {
9235 ++ err_msg = "resume runtime PM";
9236 ++ failed = true;
9237 ++ }
9238 ++
9239 ++ ret = venus_boot(core);
9240 ++ if (ret && !failed) {
9241 ++ err_msg = "boot Venus";
9242 ++ failed = true;
9243 ++ }
9244 +
9245 +- ret |= venus_boot(core);
9246 +- ret |= hfi_core_resume(core, true);
9247 ++ ret = hfi_core_resume(core, true);
9248 ++ if (ret && !failed) {
9249 ++ err_msg = "resume HFI";
9250 ++ failed = true;
9251 ++ }
9252 +
9253 + enable_irq(core->irq);
9254 +
9255 + mutex_unlock(&core->lock);
9256 +
9257 +- ret |= hfi_core_init(core);
9258 ++ ret = hfi_core_init(core);
9259 ++ if (ret && !failed) {
9260 ++ err_msg = "init HFI";
9261 ++ failed = true;
9262 ++ }
9263 +
9264 + pm_runtime_put_sync(core->dev);
9265 +
9266 +- if (ret) {
9267 ++ if (failed) {
9268 + disable_irq_nosync(core->irq);
9269 +- dev_warn(core->dev, "recovery failed (%d)\n", ret);
9270 ++ dev_warn_ratelimited(core->dev,
9271 ++ "System error has occurred, recovery failed to %s\n",
9272 ++ err_msg);
9273 + schedule_delayed_work(&core->work, msecs_to_jiffies(10));
9274 + return;
9275 + }
9276 +
9277 ++ dev_warn(core->dev, "system error has occurred (recovered)\n");
9278 ++
9279 + mutex_lock(&core->lock);
9280 + core->sys_error = false;
9281 + mutex_unlock(&core->lock);
9282 +diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
9283 +index 15bcb7f6e113c..1cb5eaabf340b 100644
9284 +--- a/drivers/media/platform/s5p-g2d/g2d.c
9285 ++++ b/drivers/media/platform/s5p-g2d/g2d.c
9286 +@@ -276,6 +276,9 @@ static int g2d_release(struct file *file)
9287 + struct g2d_dev *dev = video_drvdata(file);
9288 + struct g2d_ctx *ctx = fh2ctx(file->private_data);
9289 +
9290 ++ mutex_lock(&dev->mutex);
9291 ++ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
9292 ++ mutex_unlock(&dev->mutex);
9293 + v4l2_ctrl_handler_free(&ctx->ctrl_handler);
9294 + v4l2_fh_del(&ctx->fh);
9295 + v4l2_fh_exit(&ctx->fh);
9296 +diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
9297 +index 9b22dd8e34f44..d515eb08c3ee4 100644
9298 +--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
9299 ++++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
9300 +@@ -2566,11 +2566,8 @@ static void s5p_jpeg_buf_queue(struct vb2_buffer *vb)
9301 + static int s5p_jpeg_start_streaming(struct vb2_queue *q, unsigned int count)
9302 + {
9303 + struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(q);
9304 +- int ret;
9305 +-
9306 +- ret = pm_runtime_get_sync(ctx->jpeg->dev);
9307 +
9308 +- return ret > 0 ? 0 : ret;
9309 ++ return pm_runtime_resume_and_get(ctx->jpeg->dev);
9310 + }
9311 +
9312 + static void s5p_jpeg_stop_streaming(struct vb2_queue *q)
9313 +diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/sh_vou.c
9314 +index b22dc1d725276..7d30e0c9447e8 100644
9315 +--- a/drivers/media/platform/sh_vou.c
9316 ++++ b/drivers/media/platform/sh_vou.c
9317 +@@ -1133,7 +1133,11 @@ static int sh_vou_open(struct file *file)
9318 + if (v4l2_fh_is_singular_file(file) &&
9319 + vou_dev->status == SH_VOU_INITIALISING) {
9320 + /* First open */
9321 +- pm_runtime_get_sync(vou_dev->v4l2_dev.dev);
9322 ++ err = pm_runtime_resume_and_get(vou_dev->v4l2_dev.dev);
9323 ++ if (err < 0) {
9324 ++ v4l2_fh_release(file);
9325 ++ goto done_open;
9326 ++ }
9327 + err = sh_vou_hw_init(vou_dev);
9328 + if (err < 0) {
9329 + pm_runtime_put(vou_dev->v4l2_dev.dev);
9330 +diff --git a/drivers/media/platform/sti/bdisp/Makefile b/drivers/media/platform/sti/bdisp/Makefile
9331 +index caf7ccd193eaa..39ade0a347236 100644
9332 +--- a/drivers/media/platform/sti/bdisp/Makefile
9333 ++++ b/drivers/media/platform/sti/bdisp/Makefile
9334 +@@ -1,4 +1,4 @@
9335 + # SPDX-License-Identifier: GPL-2.0-only
9336 +-obj-$(CONFIG_VIDEO_STI_BDISP) := bdisp.o
9337 ++obj-$(CONFIG_VIDEO_STI_BDISP) += bdisp.o
9338 +
9339 + bdisp-objs := bdisp-v4l2.o bdisp-hw.o bdisp-debug.o
9340 +diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
9341 +index 060ca85f64d5d..85288da9d2ae6 100644
9342 +--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
9343 ++++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
9344 +@@ -499,7 +499,7 @@ static int bdisp_start_streaming(struct vb2_queue *q, unsigned int count)
9345 + {
9346 + struct bdisp_ctx *ctx = q->drv_priv;
9347 + struct vb2_v4l2_buffer *buf;
9348 +- int ret = pm_runtime_get_sync(ctx->bdisp_dev->dev);
9349 ++ int ret = pm_runtime_resume_and_get(ctx->bdisp_dev->dev);
9350 +
9351 + if (ret < 0) {
9352 + dev_err(ctx->bdisp_dev->dev, "failed to set runtime PM\n");
9353 +@@ -1364,10 +1364,10 @@ static int bdisp_probe(struct platform_device *pdev)
9354 +
9355 + /* Power management */
9356 + pm_runtime_enable(dev);
9357 +- ret = pm_runtime_get_sync(dev);
9358 ++ ret = pm_runtime_resume_and_get(dev);
9359 + if (ret < 0) {
9360 + dev_err(dev, "failed to set PM\n");
9361 +- goto err_pm;
9362 ++ goto err_remove;
9363 + }
9364 +
9365 + /* Filters */
9366 +@@ -1395,6 +1395,7 @@ err_filter:
9367 + bdisp_hw_free_filters(bdisp->dev);
9368 + err_pm:
9369 + pm_runtime_put(dev);
9370 ++err_remove:
9371 + bdisp_debugfs_remove(bdisp);
9372 + v4l2_device_unregister(&bdisp->v4l2_dev);
9373 + err_clk:
9374 +diff --git a/drivers/media/platform/sti/delta/Makefile b/drivers/media/platform/sti/delta/Makefile
9375 +index 92b37e216f004..32412fa4c6328 100644
9376 +--- a/drivers/media/platform/sti/delta/Makefile
9377 ++++ b/drivers/media/platform/sti/delta/Makefile
9378 +@@ -1,5 +1,5 @@
9379 + # SPDX-License-Identifier: GPL-2.0-only
9380 +-obj-$(CONFIG_VIDEO_STI_DELTA_DRIVER) := st-delta.o
9381 ++obj-$(CONFIG_VIDEO_STI_DELTA_DRIVER) += st-delta.o
9382 + st-delta-y := delta-v4l2.o delta-mem.o delta-ipc.o delta-debug.o
9383 +
9384 + # MJPEG support
9385 +diff --git a/drivers/media/platform/sti/hva/Makefile b/drivers/media/platform/sti/hva/Makefile
9386 +index 74b41ec52f976..b5a5478bdd016 100644
9387 +--- a/drivers/media/platform/sti/hva/Makefile
9388 ++++ b/drivers/media/platform/sti/hva/Makefile
9389 +@@ -1,4 +1,4 @@
9390 + # SPDX-License-Identifier: GPL-2.0-only
9391 +-obj-$(CONFIG_VIDEO_STI_HVA) := st-hva.o
9392 ++obj-$(CONFIG_VIDEO_STI_HVA) += st-hva.o
9393 + st-hva-y := hva-v4l2.o hva-hw.o hva-mem.o hva-h264.o
9394 + st-hva-$(CONFIG_VIDEO_STI_HVA_DEBUGFS) += hva-debugfs.o
9395 +diff --git a/drivers/media/platform/sti/hva/hva-hw.c b/drivers/media/platform/sti/hva/hva-hw.c
9396 +index 43f279e2a6a38..cf4c891bf619a 100644
9397 +--- a/drivers/media/platform/sti/hva/hva-hw.c
9398 ++++ b/drivers/media/platform/sti/hva/hva-hw.c
9399 +@@ -130,8 +130,7 @@ static irqreturn_t hva_hw_its_irq_thread(int irq, void *arg)
9400 + ctx_id = (hva->sts_reg & 0xFF00) >> 8;
9401 + if (ctx_id >= HVA_MAX_INSTANCES) {
9402 + dev_err(dev, "%s %s: bad context identifier: %d\n",
9403 +- ctx->name, __func__, ctx_id);
9404 +- ctx->hw_err = true;
9405 ++ HVA_PREFIX, __func__, ctx_id);
9406 + goto out;
9407 + }
9408 +
9409 +diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
9410 +index eb15c8c725ca0..64f25921463e9 100644
9411 +--- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
9412 ++++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
9413 +@@ -118,6 +118,7 @@ static int sun4i_csi_notifier_init(struct sun4i_csi *csi)
9414 + struct v4l2_fwnode_endpoint vep = {
9415 + .bus_type = V4L2_MBUS_PARALLEL,
9416 + };
9417 ++ struct v4l2_async_subdev *asd;
9418 + struct fwnode_handle *ep;
9419 + int ret;
9420 +
9421 +@@ -134,10 +135,12 @@ static int sun4i_csi_notifier_init(struct sun4i_csi *csi)
9422 +
9423 + csi->bus = vep.bus.parallel;
9424 +
9425 +- ret = v4l2_async_notifier_add_fwnode_remote_subdev(&csi->notifier,
9426 +- ep, &csi->asd);
9427 +- if (ret)
9428 ++ asd = v4l2_async_notifier_add_fwnode_remote_subdev(&csi->notifier,
9429 ++ ep, sizeof(*asd));
9430 ++ if (IS_ERR(asd)) {
9431 ++ ret = PTR_ERR(asd);
9432 + goto out;
9433 ++ }
9434 +
9435 + csi->notifier.ops = &sun4i_csi_notify_ops;
9436 +
9437 +diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h
9438 +index 0f67ff652c2e1..a5f61ee0ec4df 100644
9439 +--- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h
9440 ++++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h
9441 +@@ -139,7 +139,6 @@ struct sun4i_csi {
9442 + struct v4l2_mbus_framefmt subdev_fmt;
9443 +
9444 + /* V4L2 Async variables */
9445 +- struct v4l2_async_subdev asd;
9446 + struct v4l2_async_notifier notifier;
9447 + struct v4l2_subdev *src_subdev;
9448 + int src_pad;
9449 +diff --git a/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c b/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c
9450 +index 3f81dd17755cb..fbcca59a0517c 100644
9451 +--- a/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c
9452 ++++ b/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c
9453 +@@ -494,7 +494,7 @@ static int rotate_start_streaming(struct vb2_queue *vq, unsigned int count)
9454 + struct device *dev = ctx->dev->dev;
9455 + int ret;
9456 +
9457 +- ret = pm_runtime_get_sync(dev);
9458 ++ ret = pm_runtime_resume_and_get(dev);
9459 + if (ret < 0) {
9460 + dev_err(dev, "Failed to enable module\n");
9461 +
9462 +diff --git a/drivers/media/platform/video-mux.c b/drivers/media/platform/video-mux.c
9463 +index 53570250a25d5..640ce76fe0d92 100644
9464 +--- a/drivers/media/platform/video-mux.c
9465 ++++ b/drivers/media/platform/video-mux.c
9466 +@@ -362,7 +362,7 @@ static int video_mux_async_register(struct video_mux *vmux,
9467 +
9468 + for (i = 0; i < num_input_pads; i++) {
9469 + struct v4l2_async_subdev *asd;
9470 +- struct fwnode_handle *ep;
9471 ++ struct fwnode_handle *ep, *remote_ep;
9472 +
9473 + ep = fwnode_graph_get_endpoint_by_id(
9474 + dev_fwnode(vmux->subdev.dev), i, 0,
9475 +@@ -370,19 +370,21 @@ static int video_mux_async_register(struct video_mux *vmux,
9476 + if (!ep)
9477 + continue;
9478 +
9479 +- asd = kzalloc(sizeof(*asd), GFP_KERNEL);
9480 +- if (!asd) {
9481 ++ /* Skip dangling endpoints for backwards compatibility */
9482 ++ remote_ep = fwnode_graph_get_remote_endpoint(ep);
9483 ++ if (!remote_ep) {
9484 + fwnode_handle_put(ep);
9485 +- return -ENOMEM;
9486 ++ continue;
9487 + }
9488 ++ fwnode_handle_put(remote_ep);
9489 +
9490 +- ret = v4l2_async_notifier_add_fwnode_remote_subdev(
9491 +- &vmux->notifier, ep, asd);
9492 ++ asd = v4l2_async_notifier_add_fwnode_remote_subdev(
9493 ++ &vmux->notifier, ep, sizeof(*asd));
9494 +
9495 + fwnode_handle_put(ep);
9496 +
9497 +- if (ret) {
9498 +- kfree(asd);
9499 ++ if (IS_ERR(asd)) {
9500 ++ ret = PTR_ERR(asd);
9501 + /* OK if asd already exists */
9502 + if (ret != -EEXIST)
9503 + return ret;
9504 +diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
9505 +index a8a72d5fbd129..caefac07af927 100644
9506 +--- a/drivers/media/usb/au0828/au0828-core.c
9507 ++++ b/drivers/media/usb/au0828/au0828-core.c
9508 +@@ -199,8 +199,8 @@ static int au0828_media_device_init(struct au0828_dev *dev,
9509 + struct media_device *mdev;
9510 +
9511 + mdev = media_device_usb_allocate(udev, KBUILD_MODNAME, THIS_MODULE);
9512 +- if (!mdev)
9513 +- return -ENOMEM;
9514 ++ if (IS_ERR(mdev))
9515 ++ return PTR_ERR(mdev);
9516 +
9517 + dev->media_dev = mdev;
9518 + #endif
9519 +diff --git a/drivers/media/usb/cpia2/cpia2.h b/drivers/media/usb/cpia2/cpia2.h
9520 +index 50835f5f7512c..57b7f1ea68da5 100644
9521 +--- a/drivers/media/usb/cpia2/cpia2.h
9522 ++++ b/drivers/media/usb/cpia2/cpia2.h
9523 +@@ -429,6 +429,7 @@ int cpia2_send_command(struct camera_data *cam, struct cpia2_command *cmd);
9524 + int cpia2_do_command(struct camera_data *cam,
9525 + unsigned int command,
9526 + unsigned char direction, unsigned char param);
9527 ++void cpia2_deinit_camera_struct(struct camera_data *cam, struct usb_interface *intf);
9528 + struct camera_data *cpia2_init_camera_struct(struct usb_interface *intf);
9529 + int cpia2_init_camera(struct camera_data *cam);
9530 + int cpia2_allocate_buffers(struct camera_data *cam);
9531 +diff --git a/drivers/media/usb/cpia2/cpia2_core.c b/drivers/media/usb/cpia2/cpia2_core.c
9532 +index e747548ab2869..b5a2d06fb356b 100644
9533 +--- a/drivers/media/usb/cpia2/cpia2_core.c
9534 ++++ b/drivers/media/usb/cpia2/cpia2_core.c
9535 +@@ -2163,6 +2163,18 @@ static void reset_camera_struct(struct camera_data *cam)
9536 + cam->height = cam->params.roi.height;
9537 + }
9538 +
9539 ++/******************************************************************************
9540 ++ *
9541 ++ * cpia2_init_camera_struct
9542 ++ *
9543 ++ * Deinitialize camera struct
9544 ++ *****************************************************************************/
9545 ++void cpia2_deinit_camera_struct(struct camera_data *cam, struct usb_interface *intf)
9546 ++{
9547 ++ v4l2_device_unregister(&cam->v4l2_dev);
9548 ++ kfree(cam);
9549 ++}
9550 ++
9551 + /******************************************************************************
9552 + *
9553 + * cpia2_init_camera_struct
9554 +diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c
9555 +index 3ab80a7b44985..76aac06f9fb8e 100644
9556 +--- a/drivers/media/usb/cpia2/cpia2_usb.c
9557 ++++ b/drivers/media/usb/cpia2/cpia2_usb.c
9558 +@@ -844,15 +844,13 @@ static int cpia2_usb_probe(struct usb_interface *intf,
9559 + ret = set_alternate(cam, USBIF_CMDONLY);
9560 + if (ret < 0) {
9561 + ERR("%s: usb_set_interface error (ret = %d)\n", __func__, ret);
9562 +- kfree(cam);
9563 +- return ret;
9564 ++ goto alt_err;
9565 + }
9566 +
9567 +
9568 + if((ret = cpia2_init_camera(cam)) < 0) {
9569 + ERR("%s: failed to initialize cpia2 camera (ret = %d)\n", __func__, ret);
9570 +- kfree(cam);
9571 +- return ret;
9572 ++ goto alt_err;
9573 + }
9574 + LOG(" CPiA Version: %d.%02d (%d.%d)\n",
9575 + cam->params.version.firmware_revision_hi,
9576 +@@ -872,11 +870,14 @@ static int cpia2_usb_probe(struct usb_interface *intf,
9577 + ret = cpia2_register_camera(cam);
9578 + if (ret < 0) {
9579 + ERR("%s: Failed to register cpia2 camera (ret = %d)\n", __func__, ret);
9580 +- kfree(cam);
9581 +- return ret;
9582 ++ goto alt_err;
9583 + }
9584 +
9585 + return 0;
9586 ++
9587 ++alt_err:
9588 ++ cpia2_deinit_camera_struct(cam, intf);
9589 ++ return ret;
9590 + }
9591 +
9592 + /******************************************************************************
9593 +diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
9594 +index 969a7ec71dff7..4116ba5c45fcb 100644
9595 +--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
9596 ++++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
9597 +@@ -78,6 +78,8 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
9598 +
9599 + ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 3, 0);
9600 + if (ret < 0) {
9601 ++ if (adap->fe_adap[0].fe)
9602 ++ adap->fe_adap[0].fe->ops.release(adap->fe_adap[0].fe);
9603 + deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep state info\n");
9604 + }
9605 + mutex_unlock(&d->data_mutex);
9606 +diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
9607 +index 761992ad05e2a..7707de7bae7ca 100644
9608 +--- a/drivers/media/usb/dvb-usb/cxusb.c
9609 ++++ b/drivers/media/usb/dvb-usb/cxusb.c
9610 +@@ -1947,7 +1947,7 @@ static struct dvb_usb_device_properties cxusb_bluebird_lgz201_properties = {
9611 +
9612 + .size_of_priv = sizeof(struct cxusb_state),
9613 +
9614 +- .num_adapters = 2,
9615 ++ .num_adapters = 1,
9616 + .adapter = {
9617 + {
9618 + .num_frontends = 1,
9619 +diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c
9620 +index 5aa15a7a49def..59529cbf9cd0b 100644
9621 +--- a/drivers/media/usb/em28xx/em28xx-input.c
9622 ++++ b/drivers/media/usb/em28xx/em28xx-input.c
9623 +@@ -720,7 +720,8 @@ static int em28xx_ir_init(struct em28xx *dev)
9624 + dev->board.has_ir_i2c = 0;
9625 + dev_warn(&dev->intf->dev,
9626 + "No i2c IR remote control device found.\n");
9627 +- return -ENODEV;
9628 ++ err = -ENODEV;
9629 ++ goto ref_put;
9630 + }
9631 + }
9632 +
9633 +@@ -735,7 +736,7 @@ static int em28xx_ir_init(struct em28xx *dev)
9634 +
9635 + ir = kzalloc(sizeof(*ir), GFP_KERNEL);
9636 + if (!ir)
9637 +- return -ENOMEM;
9638 ++ goto ref_put;
9639 + rc = rc_allocate_device(RC_DRIVER_SCANCODE);
9640 + if (!rc)
9641 + goto error;
9642 +@@ -839,6 +840,9 @@ error:
9643 + dev->ir = NULL;
9644 + rc_free_device(rc);
9645 + kfree(ir);
9646 ++ref_put:
9647 ++ em28xx_shutdown_buttons(dev);
9648 ++ kref_put(&dev->ref, em28xx_free_device);
9649 + return err;
9650 + }
9651 +
9652 +diff --git a/drivers/media/usb/gspca/gl860/gl860.c b/drivers/media/usb/gspca/gl860/gl860.c
9653 +index 2c05ea2598e76..ce4ee8bc75c85 100644
9654 +--- a/drivers/media/usb/gspca/gl860/gl860.c
9655 ++++ b/drivers/media/usb/gspca/gl860/gl860.c
9656 +@@ -561,8 +561,8 @@ int gl860_RTx(struct gspca_dev *gspca_dev,
9657 + len, 400 + 200 * (len > 1));
9658 + memcpy(pdata, gspca_dev->usb_buf, len);
9659 + } else {
9660 +- r = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
9661 +- req, pref, val, index, NULL, len, 400);
9662 ++ gspca_err(gspca_dev, "zero-length read request\n");
9663 ++ r = -EINVAL;
9664 + }
9665 + }
9666 +
9667 +diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
9668 +index f4a727918e352..d38dee1792e41 100644
9669 +--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
9670 ++++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
9671 +@@ -2676,9 +2676,8 @@ void pvr2_hdw_destroy(struct pvr2_hdw *hdw)
9672 + pvr2_stream_destroy(hdw->vid_stream);
9673 + hdw->vid_stream = NULL;
9674 + }
9675 +- pvr2_i2c_core_done(hdw);
9676 + v4l2_device_unregister(&hdw->v4l2_dev);
9677 +- pvr2_hdw_remove_usb_stuff(hdw);
9678 ++ pvr2_hdw_disconnect(hdw);
9679 + mutex_lock(&pvr2_unit_mtx);
9680 + do {
9681 + if ((hdw->unit_number >= 0) &&
9682 +@@ -2705,6 +2704,7 @@ void pvr2_hdw_disconnect(struct pvr2_hdw *hdw)
9683 + {
9684 + pvr2_trace(PVR2_TRACE_INIT,"pvr2_hdw_disconnect(hdw=%p)",hdw);
9685 + LOCK_TAKE(hdw->big_lock);
9686 ++ pvr2_i2c_core_done(hdw);
9687 + LOCK_TAKE(hdw->ctl_lock);
9688 + pvr2_hdw_remove_usb_stuff(hdw);
9689 + LOCK_GIVE(hdw->ctl_lock);
9690 +diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c
9691 +index e3ab003a6c851..33babe6e8b3a2 100644
9692 +--- a/drivers/media/v4l2-core/v4l2-async.c
9693 ++++ b/drivers/media/v4l2-core/v4l2-async.c
9694 +@@ -673,26 +673,26 @@ v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier *notifier,
9695 + }
9696 + EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_fwnode_subdev);
9697 +
9698 +-int
9699 ++struct v4l2_async_subdev *
9700 + v4l2_async_notifier_add_fwnode_remote_subdev(struct v4l2_async_notifier *notif,
9701 + struct fwnode_handle *endpoint,
9702 +- struct v4l2_async_subdev *asd)
9703 ++ unsigned int asd_struct_size)
9704 + {
9705 ++ struct v4l2_async_subdev *asd;
9706 + struct fwnode_handle *remote;
9707 +- int ret;
9708 +
9709 + remote = fwnode_graph_get_remote_port_parent(endpoint);
9710 + if (!remote)
9711 +- return -ENOTCONN;
9712 ++ return ERR_PTR(-ENOTCONN);
9713 +
9714 +- asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
9715 +- asd->match.fwnode = remote;
9716 +-
9717 +- ret = v4l2_async_notifier_add_subdev(notif, asd);
9718 +- if (ret)
9719 +- fwnode_handle_put(remote);
9720 +-
9721 +- return ret;
9722 ++ asd = v4l2_async_notifier_add_fwnode_subdev(notif, remote,
9723 ++ asd_struct_size);
9724 ++ /*
9725 ++ * Calling v4l2_async_notifier_add_fwnode_subdev grabs a refcount,
9726 ++ * so drop the one we got in fwnode_graph_get_remote_port_parent.
9727 ++ */
9728 ++ fwnode_handle_put(remote);
9729 ++ return asd;
9730 + }
9731 + EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_fwnode_remote_subdev);
9732 +
9733 +diff --git a/drivers/media/v4l2-core/v4l2-fh.c b/drivers/media/v4l2-core/v4l2-fh.c
9734 +index 684574f58e82d..90eec79ee995a 100644
9735 +--- a/drivers/media/v4l2-core/v4l2-fh.c
9736 ++++ b/drivers/media/v4l2-core/v4l2-fh.c
9737 +@@ -96,6 +96,7 @@ int v4l2_fh_release(struct file *filp)
9738 + v4l2_fh_del(fh);
9739 + v4l2_fh_exit(fh);
9740 + kfree(fh);
9741 ++ filp->private_data = NULL;
9742 + }
9743 + return 0;
9744 + }
9745 +diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
9746 +index a7d508e74d6b3..fbf0dcb313c82 100644
9747 +--- a/drivers/media/v4l2-core/v4l2-subdev.c
9748 ++++ b/drivers/media/v4l2-core/v4l2-subdev.c
9749 +@@ -428,30 +428,6 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
9750 +
9751 + return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK);
9752 +
9753 +- case VIDIOC_DQEVENT_TIME32: {
9754 +- struct v4l2_event_time32 *ev32 = arg;
9755 +- struct v4l2_event ev = { };
9756 +-
9757 +- if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
9758 +- return -ENOIOCTLCMD;
9759 +-
9760 +- rval = v4l2_event_dequeue(vfh, &ev, file->f_flags & O_NONBLOCK);
9761 +-
9762 +- *ev32 = (struct v4l2_event_time32) {
9763 +- .type = ev.type,
9764 +- .pending = ev.pending,
9765 +- .sequence = ev.sequence,
9766 +- .timestamp.tv_sec = ev.timestamp.tv_sec,
9767 +- .timestamp.tv_nsec = ev.timestamp.tv_nsec,
9768 +- .id = ev.id,
9769 +- };
9770 +-
9771 +- memcpy(&ev32->u, &ev.u, sizeof(ev.u));
9772 +- memcpy(&ev32->reserved, &ev.reserved, sizeof(ev.reserved));
9773 +-
9774 +- return rval;
9775 +- }
9776 +-
9777 + case VIDIOC_SUBSCRIBE_EVENT:
9778 + return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg);
9779 +
9780 +diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c
9781 +index 102dbb8080da5..29271ad4728a2 100644
9782 +--- a/drivers/memstick/host/rtsx_usb_ms.c
9783 ++++ b/drivers/memstick/host/rtsx_usb_ms.c
9784 +@@ -799,9 +799,9 @@ static int rtsx_usb_ms_drv_probe(struct platform_device *pdev)
9785 +
9786 + return 0;
9787 + err_out:
9788 +- memstick_free_host(msh);
9789 + pm_runtime_disable(ms_dev(host));
9790 + pm_runtime_put_noidle(ms_dev(host));
9791 ++ memstick_free_host(msh);
9792 + return err;
9793 + }
9794 +
9795 +@@ -828,9 +828,6 @@ static int rtsx_usb_ms_drv_remove(struct platform_device *pdev)
9796 + }
9797 + mutex_unlock(&host->host_mutex);
9798 +
9799 +- memstick_remove_host(msh);
9800 +- memstick_free_host(msh);
9801 +-
9802 + /* Balance possible unbalanced usage count
9803 + * e.g. unconditional module removal
9804 + */
9805 +@@ -838,10 +835,11 @@ static int rtsx_usb_ms_drv_remove(struct platform_device *pdev)
9806 + pm_runtime_put(ms_dev(host));
9807 +
9808 + pm_runtime_disable(ms_dev(host));
9809 +- platform_set_drvdata(pdev, NULL);
9810 +-
9811 ++ memstick_remove_host(msh);
9812 + dev_dbg(ms_dev(host),
9813 + ": Realtek USB Memstick controller has been removed\n");
9814 ++ memstick_free_host(msh);
9815 ++ platform_set_drvdata(pdev, NULL);
9816 +
9817 + return 0;
9818 + }
9819 +diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
9820 +index 4789507f325b8..b8847ae04d938 100644
9821 +--- a/drivers/mfd/Kconfig
9822 ++++ b/drivers/mfd/Kconfig
9823 +@@ -465,6 +465,7 @@ config MFD_MP2629
9824 + tristate "Monolithic Power Systems MP2629 ADC and Battery charger"
9825 + depends on I2C
9826 + select REGMAP_I2C
9827 ++ select MFD_CORE
9828 + help
9829 + Select this option to enable support for Monolithic Power Systems
9830 + battery charger. This provides ADC, thermal and battery charger power
9831 +diff --git a/drivers/mfd/rn5t618.c b/drivers/mfd/rn5t618.c
9832 +index dc452df1f1bfe..652a5e60067f8 100644
9833 +--- a/drivers/mfd/rn5t618.c
9834 ++++ b/drivers/mfd/rn5t618.c
9835 +@@ -104,7 +104,7 @@ static int rn5t618_irq_init(struct rn5t618 *rn5t618)
9836 +
9837 + ret = devm_regmap_add_irq_chip(rn5t618->dev, rn5t618->regmap,
9838 + rn5t618->irq,
9839 +- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
9840 ++ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
9841 + 0, irq_chip, &rn5t618->irq_data);
9842 + if (ret)
9843 + dev_err(rn5t618->dev, "Failed to register IRQ chip\n");
9844 +diff --git a/drivers/misc/eeprom/idt_89hpesx.c b/drivers/misc/eeprom/idt_89hpesx.c
9845 +index 81c70e5bc168f..3e4a594c110b3 100644
9846 +--- a/drivers/misc/eeprom/idt_89hpesx.c
9847 ++++ b/drivers/misc/eeprom/idt_89hpesx.c
9848 +@@ -1126,11 +1126,10 @@ static void idt_get_fw_data(struct idt_89hpesx_dev *pdev)
9849 +
9850 + device_for_each_child_node(dev, fwnode) {
9851 + ee_id = idt_ee_match_id(fwnode);
9852 +- if (!ee_id) {
9853 +- dev_warn(dev, "Skip unsupported EEPROM device");
9854 +- continue;
9855 +- } else
9856 ++ if (ee_id)
9857 + break;
9858 ++
9859 ++ dev_warn(dev, "Skip unsupported EEPROM device %pfw\n", fwnode);
9860 + }
9861 +
9862 + /* If there is no fwnode EEPROM device, then set zero size */
9863 +@@ -1161,6 +1160,7 @@ static void idt_get_fw_data(struct idt_89hpesx_dev *pdev)
9864 + else /* if (!fwnode_property_read_bool(node, "read-only")) */
9865 + pdev->eero = false;
9866 +
9867 ++ fwnode_handle_put(fwnode);
9868 + dev_info(dev, "EEPROM of %d bytes found by 0x%x",
9869 + pdev->eesize, pdev->eeaddr);
9870 + }
9871 +diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c
9872 +index 3bcef64a677ae..ded92b3cbdb27 100644
9873 +--- a/drivers/misc/habanalabs/common/habanalabs_drv.c
9874 ++++ b/drivers/misc/habanalabs/common/habanalabs_drv.c
9875 +@@ -421,6 +421,7 @@ static int hl_pci_probe(struct pci_dev *pdev,
9876 + return 0;
9877 +
9878 + disable_device:
9879 ++ pci_disable_pcie_error_reporting(pdev);
9880 + pci_set_drvdata(pdev, NULL);
9881 + destroy_hdev(hdev);
9882 +
9883 +diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
9884 +index 3246598e4d7e3..87bac99207023 100644
9885 +--- a/drivers/mmc/core/block.c
9886 ++++ b/drivers/mmc/core/block.c
9887 +@@ -1003,6 +1003,12 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
9888 +
9889 + switch (mq_rq->drv_op) {
9890 + case MMC_DRV_OP_IOCTL:
9891 ++ if (card->ext_csd.cmdq_en) {
9892 ++ ret = mmc_cmdq_disable(card);
9893 ++ if (ret)
9894 ++ break;
9895 ++ }
9896 ++ fallthrough;
9897 + case MMC_DRV_OP_IOCTL_RPMB:
9898 + idata = mq_rq->drv_op_data;
9899 + for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) {
9900 +@@ -1013,6 +1019,8 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
9901 + /* Always switch back to main area after RPMB access */
9902 + if (rpmb_ioctl)
9903 + mmc_blk_part_switch(card, 0);
9904 ++ else if (card->reenable_cmdq && !card->ext_csd.cmdq_en)
9905 ++ mmc_cmdq_enable(card);
9906 + break;
9907 + case MMC_DRV_OP_BOOT_WP:
9908 + ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
9909 +diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
9910 +index 19cbb6171b358..9cd8862e6cbd0 100644
9911 +--- a/drivers/mmc/host/sdhci-sprd.c
9912 ++++ b/drivers/mmc/host/sdhci-sprd.c
9913 +@@ -393,6 +393,7 @@ static void sdhci_sprd_request_done(struct sdhci_host *host,
9914 + static struct sdhci_ops sdhci_sprd_ops = {
9915 + .read_l = sdhci_sprd_readl,
9916 + .write_l = sdhci_sprd_writel,
9917 ++ .write_w = sdhci_sprd_writew,
9918 + .write_b = sdhci_sprd_writeb,
9919 + .set_clock = sdhci_sprd_set_clock,
9920 + .get_max_clock = sdhci_sprd_get_max_clock,
9921 +diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
9922 +index 615f3d008af1e..b9b79b1089a00 100644
9923 +--- a/drivers/mmc/host/usdhi6rol0.c
9924 ++++ b/drivers/mmc/host/usdhi6rol0.c
9925 +@@ -1801,6 +1801,7 @@ static int usdhi6_probe(struct platform_device *pdev)
9926 +
9927 + version = usdhi6_read(host, USDHI6_VERSION);
9928 + if ((version & 0xfff) != 0xa0d) {
9929 ++ ret = -EPERM;
9930 + dev_err(dev, "Version not recognized %x\n", version);
9931 + goto e_clk_off;
9932 + }
9933 +diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
9934 +index 9b755ea0fa03c..f07c71db3cafe 100644
9935 +--- a/drivers/mmc/host/via-sdmmc.c
9936 ++++ b/drivers/mmc/host/via-sdmmc.c
9937 +@@ -857,6 +857,9 @@ static void via_sdc_data_isr(struct via_crdr_mmc_host *host, u16 intmask)
9938 + {
9939 + BUG_ON(intmask == 0);
9940 +
9941 ++ if (!host->data)
9942 ++ return;
9943 ++
9944 + if (intmask & VIA_CRDR_SDSTS_DT)
9945 + host->data->error = -ETIMEDOUT;
9946 + else if (intmask & (VIA_CRDR_SDSTS_RC | VIA_CRDR_SDSTS_WC))
9947 +diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
9948 +index 739cf63ef6e2f..4950d10d3a191 100644
9949 +--- a/drivers/mmc/host/vub300.c
9950 ++++ b/drivers/mmc/host/vub300.c
9951 +@@ -2279,7 +2279,7 @@ static int vub300_probe(struct usb_interface *interface,
9952 + if (retval < 0)
9953 + goto error5;
9954 + retval =
9955 +- usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0),
9956 ++ usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
9957 + SET_ROM_WAIT_STATES,
9958 + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
9959 + firmware_rom_wait_states, 0x0000, NULL, 0, HZ);
9960 +diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c
9961 +index fbb4ea751be8e..0ee3192916d97 100644
9962 +--- a/drivers/mtd/nand/raw/arasan-nand-controller.c
9963 ++++ b/drivers/mtd/nand/raw/arasan-nand-controller.c
9964 +@@ -272,6 +272,37 @@ static int anfc_pkt_len_config(unsigned int len, unsigned int *steps,
9965 + return 0;
9966 + }
9967 +
9968 ++static int anfc_select_target(struct nand_chip *chip, int target)
9969 ++{
9970 ++ struct anand *anand = to_anand(chip);
9971 ++ struct arasan_nfc *nfc = to_anfc(chip->controller);
9972 ++ int ret;
9973 ++
9974 ++ /* Update the controller timings and the potential ECC configuration */
9975 ++ writel_relaxed(anand->timings, nfc->base + DATA_INTERFACE_REG);
9976 ++
9977 ++ /* Update clock frequency */
9978 ++ if (nfc->cur_clk != anand->clk) {
9979 ++ clk_disable_unprepare(nfc->controller_clk);
9980 ++ ret = clk_set_rate(nfc->controller_clk, anand->clk);
9981 ++ if (ret) {
9982 ++ dev_err(nfc->dev, "Failed to change clock rate\n");
9983 ++ return ret;
9984 ++ }
9985 ++
9986 ++ ret = clk_prepare_enable(nfc->controller_clk);
9987 ++ if (ret) {
9988 ++ dev_err(nfc->dev,
9989 ++ "Failed to re-enable the controller clock\n");
9990 ++ return ret;
9991 ++ }
9992 ++
9993 ++ nfc->cur_clk = anand->clk;
9994 ++ }
9995 ++
9996 ++ return 0;
9997 ++}
9998 ++
9999 + /*
10000 + * When using the embedded hardware ECC engine, the controller is in charge of
10001 + * feeding the engine with, first, the ECC residue present in the data array.
10002 +@@ -400,6 +431,18 @@ static int anfc_read_page_hw_ecc(struct nand_chip *chip, u8 *buf,
10003 + return 0;
10004 + }
10005 +
10006 ++static int anfc_sel_read_page_hw_ecc(struct nand_chip *chip, u8 *buf,
10007 ++ int oob_required, int page)
10008 ++{
10009 ++ int ret;
10010 ++
10011 ++ ret = anfc_select_target(chip, chip->cur_cs);
10012 ++ if (ret)
10013 ++ return ret;
10014 ++
10015 ++ return anfc_read_page_hw_ecc(chip, buf, oob_required, page);
10016 ++};
10017 ++
10018 + static int anfc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
10019 + int oob_required, int page)
10020 + {
10021 +@@ -460,6 +503,18 @@ static int anfc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
10022 + return ret;
10023 + }
10024 +
10025 ++static int anfc_sel_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
10026 ++ int oob_required, int page)
10027 ++{
10028 ++ int ret;
10029 ++
10030 ++ ret = anfc_select_target(chip, chip->cur_cs);
10031 ++ if (ret)
10032 ++ return ret;
10033 ++
10034 ++ return anfc_write_page_hw_ecc(chip, buf, oob_required, page);
10035 ++};
10036 ++
10037 + /* NAND framework ->exec_op() hooks and related helpers */
10038 + static int anfc_parse_instructions(struct nand_chip *chip,
10039 + const struct nand_subop *subop,
10040 +@@ -752,37 +807,6 @@ static const struct nand_op_parser anfc_op_parser = NAND_OP_PARSER(
10041 + NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
10042 + );
10043 +
10044 +-static int anfc_select_target(struct nand_chip *chip, int target)
10045 +-{
10046 +- struct anand *anand = to_anand(chip);
10047 +- struct arasan_nfc *nfc = to_anfc(chip->controller);
10048 +- int ret;
10049 +-
10050 +- /* Update the controller timings and the potential ECC configuration */
10051 +- writel_relaxed(anand->timings, nfc->base + DATA_INTERFACE_REG);
10052 +-
10053 +- /* Update clock frequency */
10054 +- if (nfc->cur_clk != anand->clk) {
10055 +- clk_disable_unprepare(nfc->controller_clk);
10056 +- ret = clk_set_rate(nfc->controller_clk, anand->clk);
10057 +- if (ret) {
10058 +- dev_err(nfc->dev, "Failed to change clock rate\n");
10059 +- return ret;
10060 +- }
10061 +-
10062 +- ret = clk_prepare_enable(nfc->controller_clk);
10063 +- if (ret) {
10064 +- dev_err(nfc->dev,
10065 +- "Failed to re-enable the controller clock\n");
10066 +- return ret;
10067 +- }
10068 +-
10069 +- nfc->cur_clk = anand->clk;
10070 +- }
10071 +-
10072 +- return 0;
10073 +-}
10074 +-
10075 + static int anfc_check_op(struct nand_chip *chip,
10076 + const struct nand_operation *op)
10077 + {
10078 +@@ -1006,8 +1030,8 @@ static int anfc_init_hw_ecc_controller(struct arasan_nfc *nfc,
10079 + if (!anand->bch)
10080 + return -EINVAL;
10081 +
10082 +- ecc->read_page = anfc_read_page_hw_ecc;
10083 +- ecc->write_page = anfc_write_page_hw_ecc;
10084 ++ ecc->read_page = anfc_sel_read_page_hw_ecc;
10085 ++ ecc->write_page = anfc_sel_write_page_hw_ecc;
10086 +
10087 + return 0;
10088 + }
10089 +diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
10090 +index f5ca2002d08e8..d00c916f133bd 100644
10091 +--- a/drivers/mtd/nand/raw/marvell_nand.c
10092 ++++ b/drivers/mtd/nand/raw/marvell_nand.c
10093 +@@ -3036,8 +3036,10 @@ static int __maybe_unused marvell_nfc_resume(struct device *dev)
10094 + return ret;
10095 +
10096 + ret = clk_prepare_enable(nfc->reg_clk);
10097 +- if (ret < 0)
10098 ++ if (ret < 0) {
10099 ++ clk_disable_unprepare(nfc->core_clk);
10100 + return ret;
10101 ++ }
10102 +
10103 + /*
10104 + * Reset nfc->selected_chip so the next command will cause the timing
10105 +diff --git a/drivers/mtd/parsers/redboot.c b/drivers/mtd/parsers/redboot.c
10106 +index 91146bdc47132..3ccd6363ee8cb 100644
10107 +--- a/drivers/mtd/parsers/redboot.c
10108 ++++ b/drivers/mtd/parsers/redboot.c
10109 +@@ -45,6 +45,7 @@ static inline int redboot_checksum(struct fis_image_desc *img)
10110 + static void parse_redboot_of(struct mtd_info *master)
10111 + {
10112 + struct device_node *np;
10113 ++ struct device_node *npart;
10114 + u32 dirblock;
10115 + int ret;
10116 +
10117 +@@ -52,7 +53,11 @@ static void parse_redboot_of(struct mtd_info *master)
10118 + if (!np)
10119 + return;
10120 +
10121 +- ret = of_property_read_u32(np, "fis-index-block", &dirblock);
10122 ++ npart = of_get_child_by_name(np, "partitions");
10123 ++ if (!npart)
10124 ++ return;
10125 ++
10126 ++ ret = of_property_read_u32(npart, "fis-index-block", &dirblock);
10127 + if (ret)
10128 + return;
10129 +
10130 +diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
10131 +index 40c33b8a5fda3..ac5801a98680d 100644
10132 +--- a/drivers/net/can/peak_canfd/peak_canfd.c
10133 ++++ b/drivers/net/can/peak_canfd/peak_canfd.c
10134 +@@ -351,8 +351,8 @@ static int pucan_handle_status(struct peak_canfd_priv *priv,
10135 + return err;
10136 + }
10137 +
10138 +- /* start network queue (echo_skb array is empty) */
10139 +- netif_start_queue(ndev);
10140 ++ /* wake network queue up (echo_skb array is empty) */
10141 ++ netif_wake_queue(ndev);
10142 +
10143 + return 0;
10144 + }
10145 +diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
10146 +index 4f52810bebf89..db9f15f17610b 100644
10147 +--- a/drivers/net/can/usb/ems_usb.c
10148 ++++ b/drivers/net/can/usb/ems_usb.c
10149 +@@ -1053,7 +1053,6 @@ static void ems_usb_disconnect(struct usb_interface *intf)
10150 +
10151 + if (dev) {
10152 + unregister_netdev(dev->netdev);
10153 +- free_candev(dev->netdev);
10154 +
10155 + unlink_all_urbs(dev);
10156 +
10157 +@@ -1061,6 +1060,8 @@ static void ems_usb_disconnect(struct usb_interface *intf)
10158 +
10159 + kfree(dev->intr_in_buffer);
10160 + kfree(dev->tx_msg_buffer);
10161 ++
10162 ++ free_candev(dev->netdev);
10163 + }
10164 + }
10165 +
10166 +diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
10167 +index e273b2bd82ba7..82852c57cc0e4 100644
10168 +--- a/drivers/net/dsa/sja1105/sja1105_main.c
10169 ++++ b/drivers/net/dsa/sja1105/sja1105_main.c
10170 +@@ -1711,6 +1711,12 @@ static int sja1105_reload_cbs(struct sja1105_private *priv)
10171 + {
10172 + int rc = 0, i;
10173 +
10174 ++ /* The credit based shapers are only allocated if
10175 ++ * CONFIG_NET_SCH_CBS is enabled.
10176 ++ */
10177 ++ if (!priv->cbs)
10178 ++ return 0;
10179 ++
10180 + for (i = 0; i < priv->info->num_cbs_shapers; i++) {
10181 + struct sja1105_cbs_entry *cbs = &priv->cbs[i];
10182 +
10183 +diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
10184 +index 9c5891bbfe61a..f4f50b3a472e1 100644
10185 +--- a/drivers/net/ethernet/aeroflex/greth.c
10186 ++++ b/drivers/net/ethernet/aeroflex/greth.c
10187 +@@ -1539,10 +1539,11 @@ static int greth_of_remove(struct platform_device *of_dev)
10188 + mdiobus_unregister(greth->mdio);
10189 +
10190 + unregister_netdev(ndev);
10191 +- free_netdev(ndev);
10192 +
10193 + of_iounmap(&of_dev->resource[0], greth->regs, resource_size(&of_dev->resource[0]));
10194 +
10195 ++ free_netdev(ndev);
10196 ++
10197 + return 0;
10198 + }
10199 +
10200 +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.h b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.h
10201 +index f5fba8b8cdea9..a47e2710487ec 100644
10202 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.h
10203 ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.h
10204 +@@ -91,7 +91,7 @@ struct aq_macsec_txsc {
10205 + u32 hw_sc_idx;
10206 + unsigned long tx_sa_idx_busy;
10207 + const struct macsec_secy *sw_secy;
10208 +- u8 tx_sa_key[MACSEC_NUM_AN][MACSEC_KEYID_LEN];
10209 ++ u8 tx_sa_key[MACSEC_NUM_AN][MACSEC_MAX_KEY_LEN];
10210 + struct aq_macsec_tx_sc_stats stats;
10211 + struct aq_macsec_tx_sa_stats tx_sa_stats[MACSEC_NUM_AN];
10212 + };
10213 +@@ -101,7 +101,7 @@ struct aq_macsec_rxsc {
10214 + unsigned long rx_sa_idx_busy;
10215 + const struct macsec_secy *sw_secy;
10216 + const struct macsec_rx_sc *sw_rxsc;
10217 +- u8 rx_sa_key[MACSEC_NUM_AN][MACSEC_KEYID_LEN];
10218 ++ u8 rx_sa_key[MACSEC_NUM_AN][MACSEC_MAX_KEY_LEN];
10219 + struct aq_macsec_rx_sa_stats rx_sa_stats[MACSEC_NUM_AN];
10220 + };
10221 +
10222 +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
10223 +index fcca023f22e54..41f7f078cd27c 100644
10224 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
10225 ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
10226 +@@ -4296,3 +4296,4 @@ MODULE_AUTHOR("Broadcom Corporation");
10227 + MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
10228 + MODULE_ALIAS("platform:bcmgenet");
10229 + MODULE_LICENSE("GPL");
10230 ++MODULE_SOFTDEP("pre: mdio-bcm-unimac");
10231 +diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
10232 +index 701c12c9e0337..649c5c429bd7c 100644
10233 +--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
10234 ++++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
10235 +@@ -550,7 +550,7 @@ int be_process_mcc(struct be_adapter *adapter)
10236 + int num = 0, status = 0;
10237 + struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
10238 +
10239 +- spin_lock_bh(&adapter->mcc_cq_lock);
10240 ++ spin_lock(&adapter->mcc_cq_lock);
10241 +
10242 + while ((compl = be_mcc_compl_get(adapter))) {
10243 + if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
10244 +@@ -566,7 +566,7 @@ int be_process_mcc(struct be_adapter *adapter)
10245 + if (num)
10246 + be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
10247 +
10248 +- spin_unlock_bh(&adapter->mcc_cq_lock);
10249 ++ spin_unlock(&adapter->mcc_cq_lock);
10250 + return status;
10251 + }
10252 +
10253 +@@ -581,7 +581,9 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
10254 + if (be_check_error(adapter, BE_ERROR_ANY))
10255 + return -EIO;
10256 +
10257 ++ local_bh_disable();
10258 + status = be_process_mcc(adapter);
10259 ++ local_bh_enable();
10260 +
10261 + if (atomic_read(&mcc_obj->q.used) == 0)
10262 + break;
10263 +diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
10264 +index cb1e1ad652d09..89697cb09d1c0 100644
10265 +--- a/drivers/net/ethernet/emulex/benet/be_main.c
10266 ++++ b/drivers/net/ethernet/emulex/benet/be_main.c
10267 +@@ -5509,7 +5509,9 @@ static void be_worker(struct work_struct *work)
10268 + * mcc completions
10269 + */
10270 + if (!netif_running(adapter->netdev)) {
10271 ++ local_bh_disable();
10272 + be_process_mcc(adapter);
10273 ++ local_bh_enable();
10274 + goto reschedule;
10275 + }
10276 +
10277 +diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
10278 +index 815fb62c4b02e..3d74401b4f102 100644
10279 +--- a/drivers/net/ethernet/ezchip/nps_enet.c
10280 ++++ b/drivers/net/ethernet/ezchip/nps_enet.c
10281 +@@ -610,7 +610,7 @@ static s32 nps_enet_probe(struct platform_device *pdev)
10282 +
10283 + /* Get IRQ number */
10284 + priv->irq = platform_get_irq(pdev, 0);
10285 +- if (!priv->irq) {
10286 ++ if (priv->irq < 0) {
10287 + dev_err(dev, "failed to retrieve <irq Rx-Tx> value from device tree\n");
10288 + err = -ENODEV;
10289 + goto out_netdev;
10290 +@@ -645,8 +645,8 @@ static s32 nps_enet_remove(struct platform_device *pdev)
10291 + struct nps_enet_priv *priv = netdev_priv(ndev);
10292 +
10293 + unregister_netdev(ndev);
10294 +- free_netdev(ndev);
10295 + netif_napi_del(&priv->napi);
10296 ++ free_netdev(ndev);
10297 +
10298 + return 0;
10299 + }
10300 +diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
10301 +index c9c380c508791..5bc11d1bb9df8 100644
10302 +--- a/drivers/net/ethernet/faraday/ftgmac100.c
10303 ++++ b/drivers/net/ethernet/faraday/ftgmac100.c
10304 +@@ -1831,14 +1831,17 @@ static int ftgmac100_probe(struct platform_device *pdev)
10305 + if (np && of_get_property(np, "use-ncsi", NULL)) {
10306 + if (!IS_ENABLED(CONFIG_NET_NCSI)) {
10307 + dev_err(&pdev->dev, "NCSI stack not enabled\n");
10308 ++ err = -EINVAL;
10309 + goto err_ncsi_dev;
10310 + }
10311 +
10312 + dev_info(&pdev->dev, "Using NCSI interface\n");
10313 + priv->use_ncsi = true;
10314 + priv->ndev = ncsi_register_dev(netdev, ftgmac100_ncsi_handler);
10315 +- if (!priv->ndev)
10316 ++ if (!priv->ndev) {
10317 ++ err = -EINVAL;
10318 + goto err_ncsi_dev;
10319 ++ }
10320 + } else if (np && of_get_property(np, "phy-handle", NULL)) {
10321 + struct phy_device *phy;
10322 +
10323 +@@ -1846,6 +1849,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
10324 + &ftgmac100_adjust_link);
10325 + if (!phy) {
10326 + dev_err(&pdev->dev, "Failed to connect to phy\n");
10327 ++ err = -EINVAL;
10328 + goto err_setup_mdio;
10329 + }
10330 +
10331 +diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
10332 +index d6e35421d8f7b..3a74e4645ce65 100644
10333 +--- a/drivers/net/ethernet/google/gve/gve_main.c
10334 ++++ b/drivers/net/ethernet/google/gve/gve_main.c
10335 +@@ -1286,8 +1286,8 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10336 +
10337 + gve_write_version(&reg_bar->driver_version);
10338 + /* Get max queues to alloc etherdev */
10339 +- max_rx_queues = ioread32be(&reg_bar->max_tx_queues);
10340 +- max_tx_queues = ioread32be(&reg_bar->max_rx_queues);
10341 ++ max_tx_queues = ioread32be(&reg_bar->max_tx_queues);
10342 ++ max_rx_queues = ioread32be(&reg_bar->max_rx_queues);
10343 + /* Alloc and setup the netdev and priv */
10344 + dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues);
10345 + if (!dev) {
10346 +diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
10347 +index c2e7404757869..f630667364253 100644
10348 +--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
10349 ++++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
10350 +@@ -2617,10 +2617,8 @@ static int ehea_restart_qps(struct net_device *dev)
10351 + u16 dummy16 = 0;
10352 +
10353 + cb0 = (void *)get_zeroed_page(GFP_KERNEL);
10354 +- if (!cb0) {
10355 +- ret = -ENOMEM;
10356 +- goto out;
10357 +- }
10358 ++ if (!cb0)
10359 ++ return -ENOMEM;
10360 +
10361 + for (i = 0; i < (port->num_def_qps); i++) {
10362 + struct ehea_port_res *pr = &port->port_res[i];
10363 +@@ -2640,6 +2638,7 @@ static int ehea_restart_qps(struct net_device *dev)
10364 + cb0);
10365 + if (hret != H_SUCCESS) {
10366 + netdev_err(dev, "query_ehea_qp failed (1)\n");
10367 ++ ret = -EFAULT;
10368 + goto out;
10369 + }
10370 +
10371 +@@ -2652,6 +2651,7 @@ static int ehea_restart_qps(struct net_device *dev)
10372 + &dummy64, &dummy16, &dummy16);
10373 + if (hret != H_SUCCESS) {
10374 + netdev_err(dev, "modify_ehea_qp failed (1)\n");
10375 ++ ret = -EFAULT;
10376 + goto out;
10377 + }
10378 +
10379 +@@ -2660,6 +2660,7 @@ static int ehea_restart_qps(struct net_device *dev)
10380 + cb0);
10381 + if (hret != H_SUCCESS) {
10382 + netdev_err(dev, "query_ehea_qp failed (2)\n");
10383 ++ ret = -EFAULT;
10384 + goto out;
10385 + }
10386 +
10387 +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
10388 +index 8cc444684491a..3134c1988db36 100644
10389 +--- a/drivers/net/ethernet/ibm/ibmvnic.c
10390 ++++ b/drivers/net/ethernet/ibm/ibmvnic.c
10391 +@@ -212,12 +212,11 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
10392 + mutex_lock(&adapter->fw_lock);
10393 + adapter->fw_done_rc = 0;
10394 + reinit_completion(&adapter->fw_done);
10395 +- rc = send_request_map(adapter, ltb->addr,
10396 +- ltb->size, ltb->map_id);
10397 ++
10398 ++ rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
10399 + if (rc) {
10400 +- dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
10401 +- mutex_unlock(&adapter->fw_lock);
10402 +- return rc;
10403 ++ dev_err(dev, "send_request_map failed, rc = %d\n", rc);
10404 ++ goto out;
10405 + }
10406 +
10407 + rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
10408 +@@ -225,20 +224,23 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
10409 + dev_err(dev,
10410 + "Long term map request aborted or timed out,rc = %d\n",
10411 + rc);
10412 +- dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
10413 +- mutex_unlock(&adapter->fw_lock);
10414 +- return rc;
10415 ++ goto out;
10416 + }
10417 +
10418 + if (adapter->fw_done_rc) {
10419 + dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
10420 + adapter->fw_done_rc);
10421 ++ rc = -1;
10422 ++ goto out;
10423 ++ }
10424 ++ rc = 0;
10425 ++out:
10426 ++ if (rc) {
10427 + dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
10428 +- mutex_unlock(&adapter->fw_lock);
10429 +- return -1;
10430 ++ ltb->buff = NULL;
10431 + }
10432 + mutex_unlock(&adapter->fw_lock);
10433 +- return 0;
10434 ++ return rc;
10435 + }
10436 +
10437 + static void free_long_term_buff(struct ibmvnic_adapter *adapter,
10438 +@@ -258,6 +260,8 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
10439 + adapter->reset_reason != VNIC_RESET_TIMEOUT)
10440 + send_request_unmap(adapter, ltb->map_id);
10441 + dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
10442 ++ ltb->buff = NULL;
10443 ++ ltb->map_id = 0;
10444 + }
10445 +
10446 + static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
10447 +@@ -747,8 +751,11 @@ static int init_tx_pools(struct net_device *netdev)
10448 +
10449 + adapter->tso_pool = kcalloc(tx_subcrqs,
10450 + sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
10451 +- if (!adapter->tso_pool)
10452 ++ if (!adapter->tso_pool) {
10453 ++ kfree(adapter->tx_pool);
10454 ++ adapter->tx_pool = NULL;
10455 + return -1;
10456 ++ }
10457 +
10458 + adapter->num_active_tx_pools = tx_subcrqs;
10459 +
10460 +@@ -1166,6 +1173,11 @@ static int __ibmvnic_open(struct net_device *netdev)
10461 +
10462 + netif_tx_start_all_queues(netdev);
10463 +
10464 ++ if (prev_state == VNIC_CLOSED) {
10465 ++ for (i = 0; i < adapter->req_rx_queues; i++)
10466 ++ napi_schedule(&adapter->napi[i]);
10467 ++ }
10468 ++
10469 + adapter->state = VNIC_OPEN;
10470 + return rc;
10471 + }
10472 +diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
10473 +index a0948002ddf85..b3ad95ac3d859 100644
10474 +--- a/drivers/net/ethernet/intel/e1000e/netdev.c
10475 ++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
10476 +@@ -5222,18 +5222,20 @@ static void e1000_watchdog_task(struct work_struct *work)
10477 + pm_runtime_resume(netdev->dev.parent);
10478 +
10479 + /* Checking if MAC is in DMoff state*/
10480 +- pcim_state = er32(STATUS);
10481 +- while (pcim_state & E1000_STATUS_PCIM_STATE) {
10482 +- if (tries++ == dmoff_exit_timeout) {
10483 +- e_dbg("Error in exiting dmoff\n");
10484 +- break;
10485 +- }
10486 +- usleep_range(10000, 20000);
10487 ++ if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
10488 + pcim_state = er32(STATUS);
10489 +-
10490 +- /* Checking if MAC exited DMoff state */
10491 +- if (!(pcim_state & E1000_STATUS_PCIM_STATE))
10492 +- e1000_phy_hw_reset(&adapter->hw);
10493 ++ while (pcim_state & E1000_STATUS_PCIM_STATE) {
10494 ++ if (tries++ == dmoff_exit_timeout) {
10495 ++ e_dbg("Error in exiting dmoff\n");
10496 ++ break;
10497 ++ }
10498 ++ usleep_range(10000, 20000);
10499 ++ pcim_state = er32(STATUS);
10500 ++
10501 ++ /* Checking if MAC exited DMoff state */
10502 ++ if (!(pcim_state & E1000_STATUS_PCIM_STATE))
10503 ++ e1000_phy_hw_reset(&adapter->hw);
10504 ++ }
10505 + }
10506 +
10507 + /* update snapshot of PHY registers on LSC */
10508 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
10509 +index 5d48bc0c3f6c4..874073f7f0248 100644
10510 +--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
10511 ++++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
10512 +@@ -1262,8 +1262,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
10513 + if (ethtool_link_ksettings_test_link_mode(&safe_ks,
10514 + supported,
10515 + Autoneg) &&
10516 +- hw->phy.link_info.phy_type !=
10517 +- I40E_PHY_TYPE_10GBASE_T) {
10518 ++ hw->phy.media_type != I40E_MEDIA_TYPE_BASET) {
10519 + netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
10520 + err = -EINVAL;
10521 + goto done;
10522 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
10523 +index f0edea7cdbccc..52e31f712a545 100644
10524 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
10525 ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
10526 +@@ -31,7 +31,7 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
10527 + static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
10528 + static int i40e_add_vsi(struct i40e_vsi *vsi);
10529 + static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
10530 +-static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
10531 ++static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired);
10532 + static int i40e_setup_misc_vector(struct i40e_pf *pf);
10533 + static void i40e_determine_queue_usage(struct i40e_pf *pf);
10534 + static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
10535 +@@ -8347,6 +8347,8 @@ int i40e_vsi_open(struct i40e_vsi *vsi)
10536 + dev_driver_string(&pf->pdev->dev),
10537 + dev_name(&pf->pdev->dev));
10538 + err = i40e_vsi_request_irq(vsi, int_name);
10539 ++ if (err)
10540 ++ goto err_setup_rx;
10541 +
10542 + } else {
10543 + err = -EINVAL;
10544 +@@ -10112,7 +10114,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
10545 + /* do basic switch setup */
10546 + if (!lock_acquired)
10547 + rtnl_lock();
10548 +- ret = i40e_setup_pf_switch(pf, reinit);
10549 ++ ret = i40e_setup_pf_switch(pf, reinit, true);
10550 + if (ret)
10551 + goto end_unlock;
10552 +
10553 +@@ -14167,10 +14169,11 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
10554 + * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
10555 + * @pf: board private structure
10556 + * @reinit: if the Main VSI needs to re-initialized.
10557 ++ * @lock_acquired: indicates whether or not the lock has been acquired
10558 + *
10559 + * Returns 0 on success, negative value on failure
10560 + **/
10561 +-static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
10562 ++static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired)
10563 + {
10564 + u16 flags = 0;
10565 + int ret;
10566 +@@ -14272,9 +14275,15 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
10567 +
10568 + i40e_ptp_init(pf);
10569 +
10570 ++ if (!lock_acquired)
10571 ++ rtnl_lock();
10572 ++
10573 + /* repopulate tunnel port filters */
10574 + udp_tunnel_nic_reset_ntf(pf->vsi[pf->lan_vsi]->netdev);
10575 +
10576 ++ if (!lock_acquired)
10577 ++ rtnl_unlock();
10578 ++
10579 + return ret;
10580 + }
10581 +
10582 +@@ -15046,7 +15055,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10583 + pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
10584 + }
10585 + #endif
10586 +- err = i40e_setup_pf_switch(pf, false);
10587 ++ err = i40e_setup_pf_switch(pf, false, false);
10588 + if (err) {
10589 + dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
10590 + goto err_vsis;
10591 +diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
10592 +index 6aa13c9f9fc9c..a9f65d6677617 100644
10593 +--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
10594 ++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
10595 +@@ -7045,6 +7045,8 @@ static int mvpp2_probe(struct platform_device *pdev)
10596 + return 0;
10597 +
10598 + err_port_probe:
10599 ++ fwnode_handle_put(port_fwnode);
10600 ++
10601 + i = 0;
10602 + fwnode_for_each_available_child_node(fwnode, port_fwnode) {
10603 + if (priv->port_list[i])
10604 +diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
10605 +index ade8c44c01cd1..9a0870dc2f034 100644
10606 +--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
10607 ++++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
10608 +@@ -2536,9 +2536,13 @@ static int pch_gbe_probe(struct pci_dev *pdev,
10609 + adapter->pdev = pdev;
10610 + adapter->hw.back = adapter;
10611 + adapter->hw.reg = pcim_iomap_table(pdev)[PCH_GBE_PCI_BAR];
10612 ++
10613 + adapter->pdata = (struct pch_gbe_privdata *)pci_id->driver_data;
10614 +- if (adapter->pdata && adapter->pdata->platform_init)
10615 +- adapter->pdata->platform_init(pdev);
10616 ++ if (adapter->pdata && adapter->pdata->platform_init) {
10617 ++ ret = adapter->pdata->platform_init(pdev);
10618 ++ if (ret)
10619 ++ goto err_free_netdev;
10620 ++ }
10621 +
10622 + adapter->ptp_pdev =
10623 + pci_get_domain_bus_and_slot(pci_domain_nr(adapter->pdev->bus),
10624 +@@ -2633,7 +2637,7 @@ err_free_netdev:
10625 + */
10626 + static int pch_gbe_minnow_platform_init(struct pci_dev *pdev)
10627 + {
10628 +- unsigned long flags = GPIOF_DIR_OUT | GPIOF_INIT_HIGH | GPIOF_EXPORT;
10629 ++ unsigned long flags = GPIOF_OUT_INIT_HIGH;
10630 + unsigned gpio = MINNOW_PHY_RESET_GPIO;
10631 + int ret;
10632 +
10633 +diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
10634 +index 501d676fd88b9..0805edef56254 100644
10635 +--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
10636 ++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
10637 +@@ -1433,12 +1433,12 @@ static void am65_cpsw_nuss_free_tx_chns(void *data)
10638 + for (i = 0; i < common->tx_ch_num; i++) {
10639 + struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
10640 +
10641 +- if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
10642 +- k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
10643 +-
10644 + if (!IS_ERR_OR_NULL(tx_chn->desc_pool))
10645 + k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
10646 +
10647 ++ if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
10648 ++ k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
10649 ++
10650 + memset(tx_chn, 0, sizeof(*tx_chn));
10651 + }
10652 + }
10653 +@@ -1458,12 +1458,12 @@ void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
10654 +
10655 + netif_napi_del(&tx_chn->napi_tx);
10656 +
10657 +- if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
10658 +- k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
10659 +-
10660 + if (!IS_ERR_OR_NULL(tx_chn->desc_pool))
10661 + k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
10662 +
10663 ++ if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
10664 ++ k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
10665 ++
10666 + memset(tx_chn, 0, sizeof(*tx_chn));
10667 + }
10668 + }
10669 +@@ -1550,11 +1550,11 @@ static void am65_cpsw_nuss_free_rx_chns(void *data)
10670 +
10671 + rx_chn = &common->rx_chns;
10672 +
10673 +- if (!IS_ERR_OR_NULL(rx_chn->rx_chn))
10674 +- k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
10675 +-
10676 + if (!IS_ERR_OR_NULL(rx_chn->desc_pool))
10677 + k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
10678 ++
10679 ++ if (!IS_ERR_OR_NULL(rx_chn->rx_chn))
10680 ++ k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
10681 + }
10682 +
10683 + static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
10684 +diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
10685 +index c0bf7d78276e4..626e1ce817fcf 100644
10686 +--- a/drivers/net/ieee802154/mac802154_hwsim.c
10687 ++++ b/drivers/net/ieee802154/mac802154_hwsim.c
10688 +@@ -480,7 +480,7 @@ static int hwsim_del_edge_nl(struct sk_buff *msg, struct genl_info *info)
10689 + struct hwsim_edge *e;
10690 + u32 v0, v1;
10691 +
10692 +- if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] &&
10693 ++ if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] ||
10694 + !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
10695 + return -EINVAL;
10696 +
10697 +@@ -715,6 +715,8 @@ static int hwsim_subscribe_all_others(struct hwsim_phy *phy)
10698 +
10699 + return 0;
10700 +
10701 ++sub_fail:
10702 ++ hwsim_edge_unsubscribe_me(phy);
10703 + me_fail:
10704 + rcu_read_lock();
10705 + list_for_each_entry_rcu(e, &phy->edges, list) {
10706 +@@ -722,8 +724,6 @@ me_fail:
10707 + hwsim_free_edge(e);
10708 + }
10709 + rcu_read_unlock();
10710 +-sub_fail:
10711 +- hwsim_edge_unsubscribe_me(phy);
10712 + return -ENOMEM;
10713 + }
10714 +
10715 +@@ -824,12 +824,17 @@ err_pib:
10716 + static void hwsim_del(struct hwsim_phy *phy)
10717 + {
10718 + struct hwsim_pib *pib;
10719 ++ struct hwsim_edge *e;
10720 +
10721 + hwsim_edge_unsubscribe_me(phy);
10722 +
10723 + list_del(&phy->list);
10724 +
10725 + rcu_read_lock();
10726 ++ list_for_each_entry_rcu(e, &phy->edges, list) {
10727 ++ list_del_rcu(&e->list);
10728 ++ hwsim_free_edge(e);
10729 ++ }
10730 + pib = rcu_dereference(phy->pib);
10731 + rcu_read_unlock();
10732 +
10733 +diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
10734 +index 11ca5fa902a16..c601d3df27220 100644
10735 +--- a/drivers/net/macsec.c
10736 ++++ b/drivers/net/macsec.c
10737 +@@ -1818,7 +1818,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
10738 + ctx.sa.rx_sa = rx_sa;
10739 + ctx.secy = secy;
10740 + memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
10741 +- MACSEC_KEYID_LEN);
10742 ++ secy->key_len);
10743 +
10744 + err = macsec_offload(ops->mdo_add_rxsa, &ctx);
10745 + if (err)
10746 +@@ -2060,7 +2060,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
10747 + ctx.sa.tx_sa = tx_sa;
10748 + ctx.secy = secy;
10749 + memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
10750 +- MACSEC_KEYID_LEN);
10751 ++ secy->key_len);
10752 +
10753 + err = macsec_offload(ops->mdo_add_txsa, &ctx);
10754 + if (err)
10755 +diff --git a/drivers/net/phy/mscc/mscc_macsec.c b/drivers/net/phy/mscc/mscc_macsec.c
10756 +index 10be266e48e8b..b7b2521c73fb6 100644
10757 +--- a/drivers/net/phy/mscc/mscc_macsec.c
10758 ++++ b/drivers/net/phy/mscc/mscc_macsec.c
10759 +@@ -501,7 +501,7 @@ static u32 vsc8584_macsec_flow_context_id(struct macsec_flow *flow)
10760 + }
10761 +
10762 + /* Derive the AES key to get a key for the hash autentication */
10763 +-static int vsc8584_macsec_derive_key(const u8 key[MACSEC_KEYID_LEN],
10764 ++static int vsc8584_macsec_derive_key(const u8 key[MACSEC_MAX_KEY_LEN],
10765 + u16 key_len, u8 hkey[16])
10766 + {
10767 + const u8 input[AES_BLOCK_SIZE] = {0};
10768 +diff --git a/drivers/net/phy/mscc/mscc_macsec.h b/drivers/net/phy/mscc/mscc_macsec.h
10769 +index 9c6d25e36de2a..453304bae7784 100644
10770 +--- a/drivers/net/phy/mscc/mscc_macsec.h
10771 ++++ b/drivers/net/phy/mscc/mscc_macsec.h
10772 +@@ -81,7 +81,7 @@ struct macsec_flow {
10773 + /* Highest takes precedence [0..15] */
10774 + u8 priority;
10775 +
10776 +- u8 key[MACSEC_KEYID_LEN];
10777 ++ u8 key[MACSEC_MAX_KEY_LEN];
10778 +
10779 + union {
10780 + struct macsec_rx_sa *rx_sa;
10781 +diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
10782 +index bc96ac0c5769c..2746f77745e4d 100644
10783 +--- a/drivers/net/vrf.c
10784 ++++ b/drivers/net/vrf.c
10785 +@@ -1312,22 +1312,22 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
10786 + int orig_iif = skb->skb_iif;
10787 + bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
10788 + bool is_ndisc = ipv6_ndisc_frame(skb);
10789 +- bool is_ll_src;
10790 +
10791 + /* loopback, multicast & non-ND link-local traffic; do not push through
10792 + * packet taps again. Reset pkt_type for upper layers to process skb.
10793 +- * for packets with lladdr src, however, skip so that the dst can be
10794 +- * determine at input using original ifindex in the case that daddr
10795 +- * needs strict
10796 ++ * For strict packets with a source LLA, determine the dst using the
10797 ++ * original ifindex.
10798 + */
10799 +- is_ll_src = ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL;
10800 +- if (skb->pkt_type == PACKET_LOOPBACK ||
10801 +- (need_strict && !is_ndisc && !is_ll_src)) {
10802 ++ if (skb->pkt_type == PACKET_LOOPBACK || (need_strict && !is_ndisc)) {
10803 + skb->dev = vrf_dev;
10804 + skb->skb_iif = vrf_dev->ifindex;
10805 + IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
10806 ++
10807 + if (skb->pkt_type == PACKET_LOOPBACK)
10808 + skb->pkt_type = PACKET_HOST;
10809 ++ else if (ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)
10810 ++ vrf_ip6_input_dst(skb, vrf_dev, orig_iif);
10811 ++
10812 + goto out;
10813 + }
10814 +
10815 +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
10816 +index d3b698d9e2e6a..48fbdce6a70e7 100644
10817 +--- a/drivers/net/vxlan.c
10818 ++++ b/drivers/net/vxlan.c
10819 +@@ -2163,6 +2163,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
10820 + struct neighbour *n;
10821 + struct nd_msg *msg;
10822 +
10823 ++ rcu_read_lock();
10824 + in6_dev = __in6_dev_get(dev);
10825 + if (!in6_dev)
10826 + goto out;
10827 +@@ -2214,6 +2215,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
10828 + }
10829 +
10830 + out:
10831 ++ rcu_read_unlock();
10832 + consume_skb(skb);
10833 + return NETDEV_TX_OK;
10834 + }
10835 +diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
10836 +index f5c0f9bac8404..36183fdfb7f03 100644
10837 +--- a/drivers/net/wireless/ath/ath10k/mac.c
10838 ++++ b/drivers/net/wireless/ath/ath10k/mac.c
10839 +@@ -5482,6 +5482,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
10840 +
10841 + if (arvif->nohwcrypt &&
10842 + !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
10843 ++ ret = -EINVAL;
10844 + ath10k_warn(ar, "cryptmode module param needed for sw crypto\n");
10845 + goto err;
10846 + }
10847 +diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
10848 +index 36426efdb2ea0..86f52bcb3e4db 100644
10849 +--- a/drivers/net/wireless/ath/ath10k/pci.c
10850 ++++ b/drivers/net/wireless/ath/ath10k/pci.c
10851 +@@ -3684,8 +3684,10 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
10852 + ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
10853 + if (bus_params.chip_id != 0xffffffff) {
10854 + if (!ath10k_pci_chip_is_supported(pdev->device,
10855 +- bus_params.chip_id))
10856 ++ bus_params.chip_id)) {
10857 ++ ret = -ENODEV;
10858 + goto err_unsupported;
10859 ++ }
10860 + }
10861 + }
10862 +
10863 +@@ -3696,11 +3698,15 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
10864 + }
10865 +
10866 + bus_params.chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
10867 +- if (bus_params.chip_id == 0xffffffff)
10868 ++ if (bus_params.chip_id == 0xffffffff) {
10869 ++ ret = -ENODEV;
10870 + goto err_unsupported;
10871 ++ }
10872 +
10873 +- if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id))
10874 +- goto err_free_irq;
10875 ++ if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id)) {
10876 ++ ret = -ENODEV;
10877 ++ goto err_unsupported;
10878 ++ }
10879 +
10880 + ret = ath10k_core_register(ar, &bus_params);
10881 + if (ret) {
10882 +diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
10883 +index a68fe3a45a744..28de2c7ae8991 100644
10884 +--- a/drivers/net/wireless/ath/ath11k/core.c
10885 ++++ b/drivers/net/wireless/ath/ath11k/core.c
10886 +@@ -329,7 +329,8 @@ static int ath11k_core_fetch_board_data_api_n(struct ath11k_base *ab,
10887 + if (len < ALIGN(ie_len, 4)) {
10888 + ath11k_err(ab, "invalid length for board ie_id %d ie_len %zu len %zu\n",
10889 + ie_id, ie_len, len);
10890 +- return -EINVAL;
10891 ++ ret = -EINVAL;
10892 ++ goto err;
10893 + }
10894 +
10895 + switch (ie_id) {
10896 +diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
10897 +index 0738c784616f1..cc0c30ceaa0d4 100644
10898 +--- a/drivers/net/wireless/ath/ath11k/mac.c
10899 ++++ b/drivers/net/wireless/ath/ath11k/mac.c
10900 +@@ -5123,11 +5123,6 @@ ath11k_mac_update_vif_chan(struct ath11k *ar,
10901 + if (WARN_ON(!arvif->is_up))
10902 + continue;
10903 +
10904 +- ret = ath11k_mac_setup_bcn_tmpl(arvif);
10905 +- if (ret)
10906 +- ath11k_warn(ab, "failed to update bcn tmpl during csa: %d\n",
10907 +- ret);
10908 +-
10909 + ret = ath11k_mac_vdev_restart(arvif, &vifs[i].new_ctx->def);
10910 + if (ret) {
10911 + ath11k_warn(ab, "failed to restart vdev %d: %d\n",
10912 +@@ -5135,6 +5130,11 @@ ath11k_mac_update_vif_chan(struct ath11k *ar,
10913 + continue;
10914 + }
10915 +
10916 ++ ret = ath11k_mac_setup_bcn_tmpl(arvif);
10917 ++ if (ret)
10918 ++ ath11k_warn(ab, "failed to update bcn tmpl during csa: %d\n",
10919 ++ ret);
10920 ++
10921 + ret = ath11k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
10922 + arvif->bssid);
10923 + if (ret) {
10924 +diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
10925 +index 8dbf68b94228c..ac805f56627ab 100644
10926 +--- a/drivers/net/wireless/ath/ath9k/main.c
10927 ++++ b/drivers/net/wireless/ath/ath9k/main.c
10928 +@@ -307,6 +307,11 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
10929 + hchan = ah->curchan;
10930 + }
10931 +
10932 ++ if (!hchan) {
10933 ++ fastcc = false;
10934 ++ hchan = ath9k_cmn_get_channel(sc->hw, ah, &sc->cur_chan->chandef);
10935 ++ }
10936 ++
10937 + if (!ath_prepare_reset(sc))
10938 + fastcc = false;
10939 +
10940 +diff --git a/drivers/net/wireless/ath/carl9170/Kconfig b/drivers/net/wireless/ath/carl9170/Kconfig
10941 +index b2d760873992f..ba9bea79381c5 100644
10942 +--- a/drivers/net/wireless/ath/carl9170/Kconfig
10943 ++++ b/drivers/net/wireless/ath/carl9170/Kconfig
10944 +@@ -16,13 +16,11 @@ config CARL9170
10945 +
10946 + config CARL9170_LEDS
10947 + bool "SoftLED Support"
10948 +- depends on CARL9170
10949 +- select MAC80211_LEDS
10950 +- select LEDS_CLASS
10951 +- select NEW_LEDS
10952 + default y
10953 ++ depends on CARL9170
10954 ++ depends on MAC80211_LEDS
10955 + help
10956 +- This option is necessary, if you want your device' LEDs to blink
10957 ++ This option is necessary, if you want your device's LEDs to blink.
10958 +
10959 + Say Y, unless you need the LEDs for firmware debugging.
10960 +
10961 +diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
10962 +index 706728fba72d7..9f8e44210e89a 100644
10963 +--- a/drivers/net/wireless/ath/wcn36xx/main.c
10964 ++++ b/drivers/net/wireless/ath/wcn36xx/main.c
10965 +@@ -293,23 +293,16 @@ static int wcn36xx_start(struct ieee80211_hw *hw)
10966 + goto out_free_dxe_pool;
10967 + }
10968 +
10969 +- wcn->hal_buf = kmalloc(WCN36XX_HAL_BUF_SIZE, GFP_KERNEL);
10970 +- if (!wcn->hal_buf) {
10971 +- wcn36xx_err("Failed to allocate smd buf\n");
10972 +- ret = -ENOMEM;
10973 +- goto out_free_dxe_ctl;
10974 +- }
10975 +-
10976 + ret = wcn36xx_smd_load_nv(wcn);
10977 + if (ret) {
10978 + wcn36xx_err("Failed to push NV to chip\n");
10979 +- goto out_free_smd_buf;
10980 ++ goto out_free_dxe_ctl;
10981 + }
10982 +
10983 + ret = wcn36xx_smd_start(wcn);
10984 + if (ret) {
10985 + wcn36xx_err("Failed to start chip\n");
10986 +- goto out_free_smd_buf;
10987 ++ goto out_free_dxe_ctl;
10988 + }
10989 +
10990 + if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
10991 +@@ -336,8 +329,6 @@ static int wcn36xx_start(struct ieee80211_hw *hw)
10992 +
10993 + out_smd_stop:
10994 + wcn36xx_smd_stop(wcn);
10995 +-out_free_smd_buf:
10996 +- kfree(wcn->hal_buf);
10997 + out_free_dxe_ctl:
10998 + wcn36xx_dxe_free_ctl_blks(wcn);
10999 + out_free_dxe_pool:
11000 +@@ -372,8 +363,6 @@ static void wcn36xx_stop(struct ieee80211_hw *hw)
11001 +
11002 + wcn36xx_dxe_free_mem_pools(wcn);
11003 + wcn36xx_dxe_free_ctl_blks(wcn);
11004 +-
11005 +- kfree(wcn->hal_buf);
11006 + }
11007 +
11008 + static void wcn36xx_change_ps(struct wcn36xx *wcn, bool enable)
11009 +@@ -1398,6 +1387,12 @@ static int wcn36xx_probe(struct platform_device *pdev)
11010 + mutex_init(&wcn->hal_mutex);
11011 + mutex_init(&wcn->scan_lock);
11012 +
11013 ++ wcn->hal_buf = devm_kmalloc(wcn->dev, WCN36XX_HAL_BUF_SIZE, GFP_KERNEL);
11014 ++ if (!wcn->hal_buf) {
11015 ++ ret = -ENOMEM;
11016 ++ goto out_wq;
11017 ++ }
11018 ++
11019 + ret = dma_set_mask_and_coherent(wcn->dev, DMA_BIT_MASK(32));
11020 + if (ret < 0) {
11021 + wcn36xx_err("failed to set DMA mask: %d\n", ret);
11022 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
11023 +index 23e6422c2251b..c2b6e5c966d04 100644
11024 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
11025 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
11026 +@@ -2767,8 +2767,9 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
11027 + struct brcmf_sta_info_le sta_info_le;
11028 + u32 sta_flags;
11029 + u32 is_tdls_peer;
11030 +- s32 total_rssi;
11031 +- s32 count_rssi;
11032 ++ s32 total_rssi_avg = 0;
11033 ++ s32 total_rssi = 0;
11034 ++ s32 count_rssi = 0;
11035 + int rssi;
11036 + u32 i;
11037 +
11038 +@@ -2834,25 +2835,27 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
11039 + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES);
11040 + sinfo->rx_bytes = le64_to_cpu(sta_info_le.rx_tot_bytes);
11041 + }
11042 +- total_rssi = 0;
11043 +- count_rssi = 0;
11044 + for (i = 0; i < BRCMF_ANT_MAX; i++) {
11045 +- if (sta_info_le.rssi[i]) {
11046 +- sinfo->chain_signal_avg[count_rssi] =
11047 +- sta_info_le.rssi[i];
11048 +- sinfo->chain_signal[count_rssi] =
11049 +- sta_info_le.rssi[i];
11050 +- total_rssi += sta_info_le.rssi[i];
11051 +- count_rssi++;
11052 +- }
11053 ++ if (sta_info_le.rssi[i] == 0 ||
11054 ++ sta_info_le.rx_lastpkt_rssi[i] == 0)
11055 ++ continue;
11056 ++ sinfo->chains |= BIT(count_rssi);
11057 ++ sinfo->chain_signal[count_rssi] =
11058 ++ sta_info_le.rx_lastpkt_rssi[i];
11059 ++ sinfo->chain_signal_avg[count_rssi] =
11060 ++ sta_info_le.rssi[i];
11061 ++ total_rssi += sta_info_le.rx_lastpkt_rssi[i];
11062 ++ total_rssi_avg += sta_info_le.rssi[i];
11063 ++ count_rssi++;
11064 + }
11065 + if (count_rssi) {
11066 +- sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL);
11067 +- sinfo->chains = count_rssi;
11068 +-
11069 + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
11070 +- total_rssi /= count_rssi;
11071 +- sinfo->signal = total_rssi;
11072 ++ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
11073 ++ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL);
11074 ++ sinfo->filled |=
11075 ++ BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG);
11076 ++ sinfo->signal = total_rssi / count_rssi;
11077 ++ sinfo->signal_avg = total_rssi_avg / count_rssi;
11078 + } else if (test_bit(BRCMF_VIF_STATUS_CONNECTED,
11079 + &ifp->vif->sme_state)) {
11080 + memset(&scb_val, 0, sizeof(scb_val));
11081 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
11082 +index 59c2b2b6027da..6d5d5c39c6359 100644
11083 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
11084 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
11085 +@@ -4157,7 +4157,6 @@ static int brcmf_sdio_bus_reset(struct device *dev)
11086 + if (ret) {
11087 + brcmf_err("Failed to probe after sdio device reset: ret %d\n",
11088 + ret);
11089 +- brcmf_sdiod_remove(sdiodev);
11090 + }
11091 +
11092 + return ret;
11093 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
11094 +index 818e523f6025d..fb76b4a69a059 100644
11095 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
11096 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
11097 +@@ -1221,6 +1221,7 @@ static int brcms_bcma_probe(struct bcma_device *pdev)
11098 + {
11099 + struct brcms_info *wl;
11100 + struct ieee80211_hw *hw;
11101 ++ int ret;
11102 +
11103 + dev_info(&pdev->dev, "mfg %x core %x rev %d class %d irq %d\n",
11104 + pdev->id.manuf, pdev->id.id, pdev->id.rev, pdev->id.class,
11105 +@@ -1245,11 +1246,16 @@ static int brcms_bcma_probe(struct bcma_device *pdev)
11106 + wl = brcms_attach(pdev);
11107 + if (!wl) {
11108 + pr_err("%s: brcms_attach failed!\n", __func__);
11109 +- return -ENODEV;
11110 ++ ret = -ENODEV;
11111 ++ goto err_free_ieee80211;
11112 + }
11113 + brcms_led_register(wl);
11114 +
11115 + return 0;
11116 ++
11117 ++err_free_ieee80211:
11118 ++ ieee80211_free_hw(hw);
11119 ++ return ret;
11120 + }
11121 +
11122 + static int brcms_suspend(struct bcma_device *pdev)
11123 +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h
11124 +index e4f91bce222d8..61d3d4e0b7d94 100644
11125 +--- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h
11126 ++++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h
11127 +@@ -1,7 +1,7 @@
11128 + /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
11129 + /******************************************************************************
11130 + *
11131 +- * Copyright(c) 2020 Intel Corporation
11132 ++ * Copyright(c) 2020-2021 Intel Corporation
11133 + *
11134 + *****************************************************************************/
11135 +
11136 +@@ -10,7 +10,7 @@
11137 +
11138 + #include "fw/notif-wait.h"
11139 +
11140 +-#define MVM_UCODE_PNVM_TIMEOUT (HZ / 10)
11141 ++#define MVM_UCODE_PNVM_TIMEOUT (HZ / 4)
11142 +
11143 + int iwl_pnvm_load(struct iwl_trans *trans,
11144 + struct iwl_notif_wait_data *notif_wait);
11145 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
11146 +index 7626117c01fa3..7186e1dbbd6b5 100644
11147 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
11148 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
11149 +@@ -1085,6 +1085,9 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
11150 + if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA))
11151 + return -1;
11152 +
11153 ++ if (unlikely(ieee80211_is_any_nullfunc(fc)) && sta->he_cap.has_he)
11154 ++ return -1;
11155 ++
11156 + if (unlikely(ieee80211_is_probe_resp(fc)))
11157 + iwl_mvm_probe_resp_set_noa(mvm, skb);
11158 +
11159 +diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
11160 +index 33cf952cc01d3..b2de8d03c5fac 100644
11161 +--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
11162 ++++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
11163 +@@ -1232,7 +1232,7 @@ static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter)
11164 + static int mwifiex_pcie_alloc_sleep_cookie_buf(struct mwifiex_adapter *adapter)
11165 + {
11166 + struct pcie_service_card *card = adapter->card;
11167 +- u32 tmp;
11168 ++ u32 *cookie;
11169 +
11170 + card->sleep_cookie_vbase = dma_alloc_coherent(&card->dev->dev,
11171 + sizeof(u32),
11172 +@@ -1243,13 +1243,11 @@ static int mwifiex_pcie_alloc_sleep_cookie_buf(struct mwifiex_adapter *adapter)
11173 + "dma_alloc_coherent failed!\n");
11174 + return -ENOMEM;
11175 + }
11176 ++ cookie = (u32 *)card->sleep_cookie_vbase;
11177 + /* Init val of Sleep Cookie */
11178 +- tmp = FW_AWAKE_COOKIE;
11179 +- put_unaligned(tmp, card->sleep_cookie_vbase);
11180 ++ *cookie = FW_AWAKE_COOKIE;
11181 +
11182 +- mwifiex_dbg(adapter, INFO,
11183 +- "alloc_scook: sleep cookie=0x%x\n",
11184 +- get_unaligned(card->sleep_cookie_vbase));
11185 ++ mwifiex_dbg(adapter, INFO, "alloc_scook: sleep cookie=0x%x\n", *cookie);
11186 +
11187 + return 0;
11188 + }
11189 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
11190 +index 4cf7c5d343258..490d55651de39 100644
11191 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
11192 ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
11193 +@@ -133,20 +133,21 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
11194 + struct mt76_tx_info *tx_info)
11195 + {
11196 + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
11197 +- struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid);
11198 + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
11199 + struct ieee80211_key_conf *key = info->control.hw_key;
11200 + int pid, id;
11201 + u8 *txwi = (u8 *)txwi_ptr;
11202 + struct mt76_txwi_cache *t;
11203 ++ struct mt7615_sta *msta;
11204 + void *txp;
11205 +
11206 ++ msta = wcid ? container_of(wcid, struct mt7615_sta, wcid) : NULL;
11207 + if (!wcid)
11208 + wcid = &dev->mt76.global_wcid;
11209 +
11210 + pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
11211 +
11212 +- if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
11213 ++ if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && msta) {
11214 + struct mt7615_phy *phy = &dev->phy;
11215 +
11216 + if ((info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY) && mdev->phy2)
11217 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
11218 +index 3b29a6d3dc641..18082b4ce7d3d 100644
11219 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
11220 ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
11221 +@@ -243,14 +243,15 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
11222 + struct ieee80211_sta *sta,
11223 + struct mt76_tx_info *tx_info)
11224 + {
11225 +- struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid);
11226 + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
11227 + struct sk_buff *skb = tx_info->skb;
11228 + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
11229 ++ struct mt7615_sta *msta;
11230 + int pad;
11231 +
11232 ++ msta = wcid ? container_of(wcid, struct mt7615_sta, wcid) : NULL;
11233 + if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) &&
11234 +- !msta->rate_probe) {
11235 ++ msta && !msta->rate_probe) {
11236 + /* request to configure sampling rate */
11237 + spin_lock_bh(&dev->mt76.lock);
11238 + mt7615_mac_set_rates(&dev->phy, msta, &info->control.rates[0],
11239 +diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
11240 +index 44ef4bc7a46e5..073c29eb2ed8f 100644
11241 +--- a/drivers/net/wireless/mediatek/mt76/tx.c
11242 ++++ b/drivers/net/wireless/mediatek/mt76/tx.c
11243 +@@ -278,7 +278,7 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
11244 + skb_set_queue_mapping(skb, qid);
11245 + }
11246 +
11247 +- if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
11248 ++ if (wcid && !(wcid->tx_info & MT_WCID_TX_INFO_SET))
11249 + ieee80211_get_tx_rates(info->control.vif, sta, skb,
11250 + info->control.rates, 1);
11251 +
11252 +diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
11253 +index b718f5d810be8..79ad6232dce83 100644
11254 +--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
11255 ++++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
11256 +@@ -3510,26 +3510,28 @@ static void rtw8822c_pwrtrack_set(struct rtw_dev *rtwdev, u8 rf_path)
11257 + }
11258 + }
11259 +
11260 +-static void rtw8822c_pwr_track_path(struct rtw_dev *rtwdev,
11261 +- struct rtw_swing_table *swing_table,
11262 +- u8 path)
11263 ++static void rtw8822c_pwr_track_stats(struct rtw_dev *rtwdev, u8 path)
11264 + {
11265 +- struct rtw_dm_info *dm_info = &rtwdev->dm_info;
11266 +- u8 thermal_value, delta;
11267 ++ u8 thermal_value;
11268 +
11269 + if (rtwdev->efuse.thermal_meter[path] == 0xff)
11270 + return;
11271 +
11272 + thermal_value = rtw_read_rf(rtwdev, path, RF_T_METER, 0x7e);
11273 +-
11274 + rtw_phy_pwrtrack_avg(rtwdev, thermal_value, path);
11275 ++}
11276 +
11277 +- delta = rtw_phy_pwrtrack_get_delta(rtwdev, path);
11278 ++static void rtw8822c_pwr_track_path(struct rtw_dev *rtwdev,
11279 ++ struct rtw_swing_table *swing_table,
11280 ++ u8 path)
11281 ++{
11282 ++ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
11283 ++ u8 delta;
11284 +
11285 ++ delta = rtw_phy_pwrtrack_get_delta(rtwdev, path);
11286 + dm_info->delta_power_index[path] =
11287 + rtw_phy_pwrtrack_get_pwridx(rtwdev, swing_table, path, path,
11288 + delta);
11289 +-
11290 + rtw8822c_pwrtrack_set(rtwdev, path);
11291 + }
11292 +
11293 +@@ -3540,12 +3542,12 @@ static void __rtw8822c_pwr_track(struct rtw_dev *rtwdev)
11294 +
11295 + rtw_phy_config_swing_table(rtwdev, &swing_table);
11296 +
11297 ++ for (i = 0; i < rtwdev->hal.rf_path_num; i++)
11298 ++ rtw8822c_pwr_track_stats(rtwdev, i);
11299 + if (rtw_phy_pwrtrack_need_lck(rtwdev))
11300 + rtw8822c_do_lck(rtwdev);
11301 +-
11302 + for (i = 0; i < rtwdev->hal.rf_path_num; i++)
11303 + rtw8822c_pwr_track_path(rtwdev, &swing_table, i);
11304 +-
11305 + }
11306 +
11307 + static void rtw8822c_pwr_track(struct rtw_dev *rtwdev)
11308 +diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
11309 +index ce9892152f4d4..99b21a2c83861 100644
11310 +--- a/drivers/net/wireless/rsi/rsi_91x_hal.c
11311 ++++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
11312 +@@ -203,7 +203,7 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
11313 + wh->frame_control |= cpu_to_le16(RSI_SET_PS_ENABLE);
11314 +
11315 + if ((!(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) &&
11316 +- (common->secinfo.security_enable)) {
11317 ++ info->control.hw_key) {
11318 + if (rsi_is_cipher_wep(common))
11319 + ieee80211_size += 4;
11320 + else
11321 +@@ -470,9 +470,9 @@ int rsi_prepare_beacon(struct rsi_common *common, struct sk_buff *skb)
11322 + }
11323 +
11324 + if (common->band == NL80211_BAND_2GHZ)
11325 +- bcn_frm->bbp_info |= cpu_to_le16(RSI_RATE_1);
11326 ++ bcn_frm->rate_info |= cpu_to_le16(RSI_RATE_1);
11327 + else
11328 +- bcn_frm->bbp_info |= cpu_to_le16(RSI_RATE_6);
11329 ++ bcn_frm->rate_info |= cpu_to_le16(RSI_RATE_6);
11330 +
11331 + if (mac_bcn->data[tim_offset + 2] == 0)
11332 + bcn_frm->frame_info |= cpu_to_le16(RSI_DATA_DESC_DTIM_BEACON);
11333 +diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
11334 +index 16025300cddb3..57c9e3559dfd1 100644
11335 +--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
11336 ++++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
11337 +@@ -1028,7 +1028,6 @@ static int rsi_mac80211_set_key(struct ieee80211_hw *hw,
11338 + mutex_lock(&common->mutex);
11339 + switch (cmd) {
11340 + case SET_KEY:
11341 +- secinfo->security_enable = true;
11342 + status = rsi_hal_key_config(hw, vif, key, sta);
11343 + if (status) {
11344 + mutex_unlock(&common->mutex);
11345 +@@ -1047,8 +1046,6 @@ static int rsi_mac80211_set_key(struct ieee80211_hw *hw,
11346 + break;
11347 +
11348 + case DISABLE_KEY:
11349 +- if (vif->type == NL80211_IFTYPE_STATION)
11350 +- secinfo->security_enable = false;
11351 + rsi_dbg(ERR_ZONE, "%s: RSI del key\n", __func__);
11352 + memset(key, 0, sizeof(struct ieee80211_key_conf));
11353 + status = rsi_hal_key_config(hw, vif, key, sta);
11354 +diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
11355 +index 33c76d39a8e96..b6d050a2fbe7e 100644
11356 +--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
11357 ++++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
11358 +@@ -1803,8 +1803,7 @@ int rsi_send_wowlan_request(struct rsi_common *common, u16 flags,
11359 + RSI_WIFI_MGMT_Q);
11360 + cmd_frame->desc.desc_dword0.frame_type = WOWLAN_CONFIG_PARAMS;
11361 + cmd_frame->host_sleep_status = sleep_status;
11362 +- if (common->secinfo.security_enable &&
11363 +- common->secinfo.gtk_cipher)
11364 ++ if (common->secinfo.gtk_cipher)
11365 + flags |= RSI_WOW_GTK_REKEY;
11366 + if (sleep_status)
11367 + cmd_frame->wow_flags = flags;
11368 +diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h
11369 +index 73a19e43106b1..b3e25bc28682c 100644
11370 +--- a/drivers/net/wireless/rsi/rsi_main.h
11371 ++++ b/drivers/net/wireless/rsi/rsi_main.h
11372 +@@ -151,7 +151,6 @@ enum edca_queue {
11373 + };
11374 +
11375 + struct security_info {
11376 +- bool security_enable;
11377 + u32 ptk_cipher;
11378 + u32 gtk_cipher;
11379 + };
11380 +diff --git a/drivers/net/wireless/st/cw1200/scan.c b/drivers/net/wireless/st/cw1200/scan.c
11381 +index 988581cc134b7..1f856fbbc0ea4 100644
11382 +--- a/drivers/net/wireless/st/cw1200/scan.c
11383 ++++ b/drivers/net/wireless/st/cw1200/scan.c
11384 +@@ -75,30 +75,27 @@ int cw1200_hw_scan(struct ieee80211_hw *hw,
11385 + if (req->n_ssids > WSM_SCAN_MAX_NUM_OF_SSIDS)
11386 + return -EINVAL;
11387 +
11388 +- /* will be unlocked in cw1200_scan_work() */
11389 +- down(&priv->scan.lock);
11390 +- mutex_lock(&priv->conf_mutex);
11391 +-
11392 + frame.skb = ieee80211_probereq_get(hw, priv->vif->addr, NULL, 0,
11393 + req->ie_len);
11394 +- if (!frame.skb) {
11395 +- mutex_unlock(&priv->conf_mutex);
11396 +- up(&priv->scan.lock);
11397 ++ if (!frame.skb)
11398 + return -ENOMEM;
11399 +- }
11400 +
11401 + if (req->ie_len)
11402 + skb_put_data(frame.skb, req->ie, req->ie_len);
11403 +
11404 ++ /* will be unlocked in cw1200_scan_work() */
11405 ++ down(&priv->scan.lock);
11406 ++ mutex_lock(&priv->conf_mutex);
11407 ++
11408 + ret = wsm_set_template_frame(priv, &frame);
11409 + if (!ret) {
11410 + /* Host want to be the probe responder. */
11411 + ret = wsm_set_probe_responder(priv, true);
11412 + }
11413 + if (ret) {
11414 +- dev_kfree_skb(frame.skb);
11415 + mutex_unlock(&priv->conf_mutex);
11416 + up(&priv->scan.lock);
11417 ++ dev_kfree_skb(frame.skb);
11418 + return ret;
11419 + }
11420 +
11421 +@@ -120,8 +117,8 @@ int cw1200_hw_scan(struct ieee80211_hw *hw,
11422 + ++priv->scan.n_ssids;
11423 + }
11424 +
11425 +- dev_kfree_skb(frame.skb);
11426 + mutex_unlock(&priv->conf_mutex);
11427 ++ dev_kfree_skb(frame.skb);
11428 + queue_work(priv->workqueue, &priv->scan.work);
11429 + return 0;
11430 + }
11431 +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
11432 +index c1f3446216c5c..3f05df98697d3 100644
11433 +--- a/drivers/nvme/host/pci.c
11434 ++++ b/drivers/nvme/host/pci.c
11435 +@@ -1027,7 +1027,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
11436 +
11437 + static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
11438 + {
11439 +- u16 tmp = nvmeq->cq_head + 1;
11440 ++ u32 tmp = nvmeq->cq_head + 1;
11441 +
11442 + if (tmp == nvmeq->q_depth) {
11443 + nvmeq->cq_head = 0;
11444 +@@ -2836,10 +2836,7 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
11445 + #ifdef CONFIG_ACPI
11446 + static bool nvme_acpi_storage_d3(struct pci_dev *dev)
11447 + {
11448 +- struct acpi_device *adev;
11449 +- struct pci_dev *root;
11450 +- acpi_handle handle;
11451 +- acpi_status status;
11452 ++ struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
11453 + u8 val;
11454 +
11455 + /*
11456 +@@ -2847,28 +2844,9 @@ static bool nvme_acpi_storage_d3(struct pci_dev *dev)
11457 + * must use D3 to support deep platform power savings during
11458 + * suspend-to-idle.
11459 + */
11460 +- root = pcie_find_root_port(dev);
11461 +- if (!root)
11462 +- return false;
11463 +
11464 +- adev = ACPI_COMPANION(&root->dev);
11465 + if (!adev)
11466 + return false;
11467 +-
11468 +- /*
11469 +- * The property is defined in the PXSX device for South complex ports
11470 +- * and in the PEGP device for North complex ports.
11471 +- */
11472 +- status = acpi_get_handle(adev->handle, "PXSX", &handle);
11473 +- if (ACPI_FAILURE(status)) {
11474 +- status = acpi_get_handle(adev->handle, "PEGP", &handle);
11475 +- if (ACPI_FAILURE(status))
11476 +- return false;
11477 +- }
11478 +-
11479 +- if (acpi_bus_get_device(handle, &adev))
11480 +- return false;
11481 +-
11482 + if (fwnode_property_read_u8(acpi_fwnode_handle(adev), "StorageD3Enable",
11483 + &val))
11484 + return false;
11485 +diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
11486 +index cd4e73aa98074..640031cbda7cc 100644
11487 +--- a/drivers/nvme/target/fc.c
11488 ++++ b/drivers/nvme/target/fc.c
11489 +@@ -2499,13 +2499,6 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
11490 + u32 xfrlen = be32_to_cpu(cmdiu->data_len);
11491 + int ret;
11492 +
11493 +- /*
11494 +- * if there is no nvmet mapping to the targetport there
11495 +- * shouldn't be requests. just terminate them.
11496 +- */
11497 +- if (!tgtport->pe)
11498 +- goto transport_error;
11499 +-
11500 + /*
11501 + * Fused commands are currently not supported in the linux
11502 + * implementation.
11503 +@@ -2533,7 +2526,8 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
11504 +
11505 + fod->req.cmd = &fod->cmdiubuf.sqe;
11506 + fod->req.cqe = &fod->rspiubuf.cqe;
11507 +- fod->req.port = tgtport->pe->port;
11508 ++ if (tgtport->pe)
11509 ++ fod->req.port = tgtport->pe->port;
11510 +
11511 + /* clear any response payload */
11512 + memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
11513 +diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
11514 +index f2e697000b96f..57ff31b6b1e47 100644
11515 +--- a/drivers/of/fdt.c
11516 ++++ b/drivers/of/fdt.c
11517 +@@ -501,11 +501,11 @@ static int __init __reserved_mem_reserve_reg(unsigned long node,
11518 +
11519 + if (size &&
11520 + early_init_dt_reserve_memory_arch(base, size, nomap) == 0)
11521 +- pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %ld MiB\n",
11522 +- uname, &base, (unsigned long)size / SZ_1M);
11523 ++ pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n",
11524 ++ uname, &base, (unsigned long)(size / SZ_1M));
11525 + else
11526 +- pr_info("Reserved memory: failed to reserve memory for node '%s': base %pa, size %ld MiB\n",
11527 +- uname, &base, (unsigned long)size / SZ_1M);
11528 ++ pr_info("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n",
11529 ++ uname, &base, (unsigned long)(size / SZ_1M));
11530 +
11531 + len -= t_len;
11532 + if (first) {
11533 +diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
11534 +index a7fbc5e37e19e..6c95bbdf9265a 100644
11535 +--- a/drivers/of/of_reserved_mem.c
11536 ++++ b/drivers/of/of_reserved_mem.c
11537 +@@ -134,9 +134,9 @@ static int __init __reserved_mem_alloc_size(unsigned long node,
11538 + ret = early_init_dt_alloc_reserved_memory_arch(size,
11539 + align, start, end, nomap, &base);
11540 + if (ret == 0) {
11541 +- pr_debug("allocated memory for '%s' node: base %pa, size %ld MiB\n",
11542 ++ pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n",
11543 + uname, &base,
11544 +- (unsigned long)size / SZ_1M);
11545 ++ (unsigned long)(size / SZ_1M));
11546 + break;
11547 + }
11548 + len -= t_len;
11549 +@@ -146,8 +146,8 @@ static int __init __reserved_mem_alloc_size(unsigned long node,
11550 + ret = early_init_dt_alloc_reserved_memory_arch(size, align,
11551 + 0, 0, nomap, &base);
11552 + if (ret == 0)
11553 +- pr_debug("allocated memory for '%s' node: base %pa, size %ld MiB\n",
11554 +- uname, &base, (unsigned long)size / SZ_1M);
11555 ++ pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n",
11556 ++ uname, &base, (unsigned long)(size / SZ_1M));
11557 + }
11558 +
11559 + if (base == 0) {
11560 +diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
11561 +index 03ed5cb1c4b25..d57c538bbb2db 100644
11562 +--- a/drivers/pci/controller/pci-hyperv.c
11563 ++++ b/drivers/pci/controller/pci-hyperv.c
11564 +@@ -3480,6 +3480,9 @@ static void __exit exit_hv_pci_drv(void)
11565 +
11566 + static int __init init_hv_pci_drv(void)
11567 + {
11568 ++ if (!hv_is_hyperv_initialized())
11569 ++ return -ENODEV;
11570 ++
11571 + /* Set the invalid domain number's bit, so it will not be used */
11572 + set_bit(HVPCI_DOM_INVALID, hvpci_dom_map);
11573 +
11574 +diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
11575 +index 46defb1dcf867..bb019e3839888 100644
11576 +--- a/drivers/perf/arm-cmn.c
11577 ++++ b/drivers/perf/arm-cmn.c
11578 +@@ -1212,7 +1212,7 @@ static int arm_cmn_init_irqs(struct arm_cmn *cmn)
11579 + irq = cmn->dtc[i].irq;
11580 + for (j = i; j--; ) {
11581 + if (cmn->dtc[j].irq == irq) {
11582 +- cmn->dtc[j].irq_friend = j - i;
11583 ++ cmn->dtc[j].irq_friend = i - j;
11584 + goto next;
11585 + }
11586 + }
11587 +diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
11588 +index 5274f7fe359eb..afa8efbdad8fa 100644
11589 +--- a/drivers/perf/arm_smmuv3_pmu.c
11590 ++++ b/drivers/perf/arm_smmuv3_pmu.c
11591 +@@ -275,7 +275,7 @@ static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
11592 + struct perf_event *event, int idx)
11593 + {
11594 + u32 span, sid;
11595 +- unsigned int num_ctrs = smmu_pmu->num_counters;
11596 ++ unsigned int cur_idx, num_ctrs = smmu_pmu->num_counters;
11597 + bool filter_en = !!get_filter_enable(event);
11598 +
11599 + span = filter_en ? get_filter_span(event) :
11600 +@@ -283,17 +283,19 @@ static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
11601 + sid = filter_en ? get_filter_stream_id(event) :
11602 + SMMU_PMCG_DEFAULT_FILTER_SID;
11603 +
11604 +- /* Support individual filter settings */
11605 +- if (!smmu_pmu->global_filter) {
11606 ++ cur_idx = find_first_bit(smmu_pmu->used_counters, num_ctrs);
11607 ++ /*
11608 ++ * Per-counter filtering, or scheduling the first globally-filtered
11609 ++ * event into an empty PMU so idx == 0 and it works out equivalent.
11610 ++ */
11611 ++ if (!smmu_pmu->global_filter || cur_idx == num_ctrs) {
11612 + smmu_pmu_set_event_filter(event, idx, span, sid);
11613 + return 0;
11614 + }
11615 +
11616 +- /* Requested settings same as current global settings*/
11617 +- idx = find_first_bit(smmu_pmu->used_counters, num_ctrs);
11618 +- if (idx == num_ctrs ||
11619 +- smmu_pmu_check_global_filter(smmu_pmu->events[idx], event)) {
11620 +- smmu_pmu_set_event_filter(event, 0, span, sid);
11621 ++ /* Otherwise, must match whatever's currently scheduled */
11622 ++ if (smmu_pmu_check_global_filter(smmu_pmu->events[cur_idx], event)) {
11623 ++ smmu_pmu_set_evtyper(smmu_pmu, idx, get_event(event));
11624 + return 0;
11625 + }
11626 +
11627 +diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
11628 +index 397540a4b799c..7f7bc0993670f 100644
11629 +--- a/drivers/perf/fsl_imx8_ddr_perf.c
11630 ++++ b/drivers/perf/fsl_imx8_ddr_perf.c
11631 +@@ -623,8 +623,10 @@ static int ddr_perf_probe(struct platform_device *pdev)
11632 +
11633 + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME "%d",
11634 + num);
11635 +- if (!name)
11636 +- return -ENOMEM;
11637 ++ if (!name) {
11638 ++ ret = -ENOMEM;
11639 ++ goto cpuhp_state_err;
11640 ++ }
11641 +
11642 + pmu->devtype_data = of_device_get_match_data(&pdev->dev);
11643 +
11644 +diff --git a/drivers/phy/socionext/phy-uniphier-pcie.c b/drivers/phy/socionext/phy-uniphier-pcie.c
11645 +index e4adab375c737..6bdbd1f214dd4 100644
11646 +--- a/drivers/phy/socionext/phy-uniphier-pcie.c
11647 ++++ b/drivers/phy/socionext/phy-uniphier-pcie.c
11648 +@@ -24,11 +24,13 @@
11649 + #define PORT_SEL_1 FIELD_PREP(PORT_SEL_MASK, 1)
11650 +
11651 + #define PCL_PHY_TEST_I 0x2000
11652 +-#define PCL_PHY_TEST_O 0x2004
11653 + #define TESTI_DAT_MASK GENMASK(13, 6)
11654 + #define TESTI_ADR_MASK GENMASK(5, 1)
11655 + #define TESTI_WR_EN BIT(0)
11656 +
11657 ++#define PCL_PHY_TEST_O 0x2004
11658 ++#define TESTO_DAT_MASK GENMASK(7, 0)
11659 ++
11660 + #define PCL_PHY_RESET 0x200c
11661 + #define PCL_PHY_RESET_N_MNMODE BIT(8) /* =1:manual */
11662 + #define PCL_PHY_RESET_N BIT(0) /* =1:deasssert */
11663 +@@ -77,11 +79,12 @@ static void uniphier_pciephy_set_param(struct uniphier_pciephy_priv *priv,
11664 + val = FIELD_PREP(TESTI_DAT_MASK, 1);
11665 + val |= FIELD_PREP(TESTI_ADR_MASK, reg);
11666 + uniphier_pciephy_testio_write(priv, val);
11667 +- val = readl(priv->base + PCL_PHY_TEST_O);
11668 ++ val = readl(priv->base + PCL_PHY_TEST_O) & TESTO_DAT_MASK;
11669 +
11670 + /* update value */
11671 +- val &= ~FIELD_PREP(TESTI_DAT_MASK, mask);
11672 +- val = FIELD_PREP(TESTI_DAT_MASK, mask & param);
11673 ++ val &= ~mask;
11674 ++ val |= mask & param;
11675 ++ val = FIELD_PREP(TESTI_DAT_MASK, val);
11676 + val |= FIELD_PREP(TESTI_ADR_MASK, reg);
11677 + uniphier_pciephy_testio_write(priv, val);
11678 + uniphier_pciephy_testio_write(priv, val | TESTI_WR_EN);
11679 +diff --git a/drivers/phy/ti/phy-dm816x-usb.c b/drivers/phy/ti/phy-dm816x-usb.c
11680 +index 57adc08a89b2d..9fe6ea6fdae55 100644
11681 +--- a/drivers/phy/ti/phy-dm816x-usb.c
11682 ++++ b/drivers/phy/ti/phy-dm816x-usb.c
11683 +@@ -242,19 +242,28 @@ static int dm816x_usb_phy_probe(struct platform_device *pdev)
11684 +
11685 + pm_runtime_enable(phy->dev);
11686 + generic_phy = devm_phy_create(phy->dev, NULL, &ops);
11687 +- if (IS_ERR(generic_phy))
11688 +- return PTR_ERR(generic_phy);
11689 ++ if (IS_ERR(generic_phy)) {
11690 ++ error = PTR_ERR(generic_phy);
11691 ++ goto clk_unprepare;
11692 ++ }
11693 +
11694 + phy_set_drvdata(generic_phy, phy);
11695 +
11696 + phy_provider = devm_of_phy_provider_register(phy->dev,
11697 + of_phy_simple_xlate);
11698 +- if (IS_ERR(phy_provider))
11699 +- return PTR_ERR(phy_provider);
11700 ++ if (IS_ERR(phy_provider)) {
11701 ++ error = PTR_ERR(phy_provider);
11702 ++ goto clk_unprepare;
11703 ++ }
11704 +
11705 + usb_add_phy_dev(&phy->phy);
11706 +
11707 + return 0;
11708 ++
11709 ++clk_unprepare:
11710 ++ pm_runtime_disable(phy->dev);
11711 ++ clk_unprepare(phy->refclk);
11712 ++ return error;
11713 + }
11714 +
11715 + static int dm816x_usb_phy_remove(struct platform_device *pdev)
11716 +diff --git a/drivers/pinctrl/renesas/pfc-r8a7796.c b/drivers/pinctrl/renesas/pfc-r8a7796.c
11717 +index 55f0344a3d3e9..3878d6b0db149 100644
11718 +--- a/drivers/pinctrl/renesas/pfc-r8a7796.c
11719 ++++ b/drivers/pinctrl/renesas/pfc-r8a7796.c
11720 +@@ -68,6 +68,7 @@
11721 + PIN_NOGP_CFG(QSPI1_MOSI_IO0, "QSPI1_MOSI_IO0", fn, CFG_FLAGS), \
11722 + PIN_NOGP_CFG(QSPI1_SPCLK, "QSPI1_SPCLK", fn, CFG_FLAGS), \
11723 + PIN_NOGP_CFG(QSPI1_SSL, "QSPI1_SSL", fn, CFG_FLAGS), \
11724 ++ PIN_NOGP_CFG(PRESET_N, "PRESET#", fn, SH_PFC_PIN_CFG_PULL_DOWN),\
11725 + PIN_NOGP_CFG(RPC_INT_N, "RPC_INT#", fn, CFG_FLAGS), \
11726 + PIN_NOGP_CFG(RPC_RESET_N, "RPC_RESET#", fn, CFG_FLAGS), \
11727 + PIN_NOGP_CFG(RPC_WP_N, "RPC_WP#", fn, CFG_FLAGS), \
11728 +@@ -6109,7 +6110,7 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = {
11729 + [ 4] = RCAR_GP_PIN(6, 29), /* USB30_OVC */
11730 + [ 5] = RCAR_GP_PIN(6, 30), /* GP6_30 */
11731 + [ 6] = RCAR_GP_PIN(6, 31), /* GP6_31 */
11732 +- [ 7] = SH_PFC_PIN_NONE,
11733 ++ [ 7] = PIN_PRESET_N, /* PRESET# */
11734 + [ 8] = SH_PFC_PIN_NONE,
11735 + [ 9] = SH_PFC_PIN_NONE,
11736 + [10] = SH_PFC_PIN_NONE,
11737 +diff --git a/drivers/pinctrl/renesas/pfc-r8a77990.c b/drivers/pinctrl/renesas/pfc-r8a77990.c
11738 +index aed04a4c61163..240aadc4611fb 100644
11739 +--- a/drivers/pinctrl/renesas/pfc-r8a77990.c
11740 ++++ b/drivers/pinctrl/renesas/pfc-r8a77990.c
11741 +@@ -54,10 +54,10 @@
11742 + PIN_NOGP_CFG(FSCLKST_N, "FSCLKST_N", fn, CFG_FLAGS), \
11743 + PIN_NOGP_CFG(MLB_REF, "MLB_REF", fn, CFG_FLAGS), \
11744 + PIN_NOGP_CFG(PRESETOUT_N, "PRESETOUT_N", fn, CFG_FLAGS), \
11745 +- PIN_NOGP_CFG(TCK, "TCK", fn, CFG_FLAGS), \
11746 +- PIN_NOGP_CFG(TDI, "TDI", fn, CFG_FLAGS), \
11747 +- PIN_NOGP_CFG(TMS, "TMS", fn, CFG_FLAGS), \
11748 +- PIN_NOGP_CFG(TRST_N, "TRST_N", fn, CFG_FLAGS)
11749 ++ PIN_NOGP_CFG(TCK, "TCK", fn, SH_PFC_PIN_CFG_PULL_UP), \
11750 ++ PIN_NOGP_CFG(TDI, "TDI", fn, SH_PFC_PIN_CFG_PULL_UP), \
11751 ++ PIN_NOGP_CFG(TMS, "TMS", fn, SH_PFC_PIN_CFG_PULL_UP), \
11752 ++ PIN_NOGP_CFG(TRST_N, "TRST_N", fn, SH_PFC_PIN_CFG_PULL_UP)
11753 +
11754 + /*
11755 + * F_() : just information
11756 +diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
11757 +index 1d9fbabd02fb7..949ddeb673bc5 100644
11758 +--- a/drivers/platform/x86/asus-nb-wmi.c
11759 ++++ b/drivers/platform/x86/asus-nb-wmi.c
11760 +@@ -110,11 +110,6 @@ static struct quirk_entry quirk_asus_forceals = {
11761 + .wmi_force_als_set = true,
11762 + };
11763 +
11764 +-static struct quirk_entry quirk_asus_vendor_backlight = {
11765 +- .wmi_backlight_power = true,
11766 +- .wmi_backlight_set_devstate = true,
11767 +-};
11768 +-
11769 + static struct quirk_entry quirk_asus_use_kbd_dock_devid = {
11770 + .use_kbd_dock_devid = true,
11771 + };
11772 +@@ -420,78 +415,6 @@ static const struct dmi_system_id asus_quirks[] = {
11773 + },
11774 + .driver_data = &quirk_asus_forceals,
11775 + },
11776 +- {
11777 +- .callback = dmi_matched,
11778 +- .ident = "ASUSTeK COMPUTER INC. GA401IH",
11779 +- .matches = {
11780 +- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
11781 +- DMI_MATCH(DMI_PRODUCT_NAME, "GA401IH"),
11782 +- },
11783 +- .driver_data = &quirk_asus_vendor_backlight,
11784 +- },
11785 +- {
11786 +- .callback = dmi_matched,
11787 +- .ident = "ASUSTeK COMPUTER INC. GA401II",
11788 +- .matches = {
11789 +- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
11790 +- DMI_MATCH(DMI_PRODUCT_NAME, "GA401II"),
11791 +- },
11792 +- .driver_data = &quirk_asus_vendor_backlight,
11793 +- },
11794 +- {
11795 +- .callback = dmi_matched,
11796 +- .ident = "ASUSTeK COMPUTER INC. GA401IU",
11797 +- .matches = {
11798 +- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
11799 +- DMI_MATCH(DMI_PRODUCT_NAME, "GA401IU"),
11800 +- },
11801 +- .driver_data = &quirk_asus_vendor_backlight,
11802 +- },
11803 +- {
11804 +- .callback = dmi_matched,
11805 +- .ident = "ASUSTeK COMPUTER INC. GA401IV",
11806 +- .matches = {
11807 +- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
11808 +- DMI_MATCH(DMI_PRODUCT_NAME, "GA401IV"),
11809 +- },
11810 +- .driver_data = &quirk_asus_vendor_backlight,
11811 +- },
11812 +- {
11813 +- .callback = dmi_matched,
11814 +- .ident = "ASUSTeK COMPUTER INC. GA401IVC",
11815 +- .matches = {
11816 +- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
11817 +- DMI_MATCH(DMI_PRODUCT_NAME, "GA401IVC"),
11818 +- },
11819 +- .driver_data = &quirk_asus_vendor_backlight,
11820 +- },
11821 +- {
11822 +- .callback = dmi_matched,
11823 +- .ident = "ASUSTeK COMPUTER INC. GA502II",
11824 +- .matches = {
11825 +- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
11826 +- DMI_MATCH(DMI_PRODUCT_NAME, "GA502II"),
11827 +- },
11828 +- .driver_data = &quirk_asus_vendor_backlight,
11829 +- },
11830 +- {
11831 +- .callback = dmi_matched,
11832 +- .ident = "ASUSTeK COMPUTER INC. GA502IU",
11833 +- .matches = {
11834 +- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
11835 +- DMI_MATCH(DMI_PRODUCT_NAME, "GA502IU"),
11836 +- },
11837 +- .driver_data = &quirk_asus_vendor_backlight,
11838 +- },
11839 +- {
11840 +- .callback = dmi_matched,
11841 +- .ident = "ASUSTeK COMPUTER INC. GA502IV",
11842 +- .matches = {
11843 +- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
11844 +- DMI_MATCH(DMI_PRODUCT_NAME, "GA502IV"),
11845 +- },
11846 +- .driver_data = &quirk_asus_vendor_backlight,
11847 +- },
11848 + {
11849 + .callback = dmi_matched,
11850 + .ident = "Asus Transformer T100TA / T100HA / T100CHI",
11851 +diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
11852 +index fa7232ad8c395..352508d304675 100644
11853 +--- a/drivers/platform/x86/toshiba_acpi.c
11854 ++++ b/drivers/platform/x86/toshiba_acpi.c
11855 +@@ -2831,6 +2831,7 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
11856 +
11857 + if (!dev->info_supported && !dev->system_event_supported) {
11858 + pr_warn("No hotkey query interface found\n");
11859 ++ error = -EINVAL;
11860 + goto err_remove_filter;
11861 + }
11862 +
11863 +diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
11864 +index 3743d895399e7..99260915122c0 100644
11865 +--- a/drivers/platform/x86/touchscreen_dmi.c
11866 ++++ b/drivers/platform/x86/touchscreen_dmi.c
11867 +@@ -299,6 +299,35 @@ static const struct ts_dmi_data estar_beauty_hd_data = {
11868 + .properties = estar_beauty_hd_props,
11869 + };
11870 +
11871 ++/* Generic props + data for upside-down mounted GDIX1001 touchscreens */
11872 ++static const struct property_entry gdix1001_upside_down_props[] = {
11873 ++ PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
11874 ++ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
11875 ++ { }
11876 ++};
11877 ++
11878 ++static const struct ts_dmi_data gdix1001_00_upside_down_data = {
11879 ++ .acpi_name = "GDIX1001:00",
11880 ++ .properties = gdix1001_upside_down_props,
11881 ++};
11882 ++
11883 ++static const struct ts_dmi_data gdix1001_01_upside_down_data = {
11884 ++ .acpi_name = "GDIX1001:01",
11885 ++ .properties = gdix1001_upside_down_props,
11886 ++};
11887 ++
11888 ++static const struct property_entry glavey_tm800a550l_props[] = {
11889 ++ PROPERTY_ENTRY_STRING("firmware-name", "gt912-glavey-tm800a550l.fw"),
11890 ++ PROPERTY_ENTRY_STRING("goodix,config-name", "gt912-glavey-tm800a550l.cfg"),
11891 ++ PROPERTY_ENTRY_U32("goodix,main-clk", 54),
11892 ++ { }
11893 ++};
11894 ++
11895 ++static const struct ts_dmi_data glavey_tm800a550l_data = {
11896 ++ .acpi_name = "GDIX1001:00",
11897 ++ .properties = glavey_tm800a550l_props,
11898 ++};
11899 ++
11900 + static const struct property_entry gp_electronic_t701_props[] = {
11901 + PROPERTY_ENTRY_U32("touchscreen-size-x", 960),
11902 + PROPERTY_ENTRY_U32("touchscreen-size-y", 640),
11903 +@@ -995,6 +1024,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
11904 + DMI_MATCH(DMI_PRODUCT_NAME, "eSTAR BEAUTY HD Intel Quad core"),
11905 + },
11906 + },
11907 ++ { /* Glavey TM800A550L */
11908 ++ .driver_data = (void *)&glavey_tm800a550l_data,
11909 ++ .matches = {
11910 ++ DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
11911 ++ DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
11912 ++ /* Above strings are too generic, also match on BIOS version */
11913 ++ DMI_MATCH(DMI_BIOS_VERSION, "ZY-8-BI-PX4S70VTR400-X423B-005-D"),
11914 ++ },
11915 ++ },
11916 + {
11917 + /* GP-electronic T701 */
11918 + .driver_data = (void *)&gp_electronic_t701_data,
11919 +@@ -1268,6 +1306,24 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
11920 + DMI_MATCH(DMI_BOARD_NAME, "X3 Plus"),
11921 + },
11922 + },
11923 ++ {
11924 ++ /* Teclast X89 (Android version / BIOS) */
11925 ++ .driver_data = (void *)&gdix1001_00_upside_down_data,
11926 ++ .matches = {
11927 ++ DMI_MATCH(DMI_BOARD_VENDOR, "WISKY"),
11928 ++ DMI_MATCH(DMI_BOARD_NAME, "3G062i"),
11929 ++ },
11930 ++ },
11931 ++ {
11932 ++ /* Teclast X89 (Windows version / BIOS) */
11933 ++ .driver_data = (void *)&gdix1001_01_upside_down_data,
11934 ++ .matches = {
11935 ++ /* tPAD is too generic, also match on bios date */
11936 ++ DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"),
11937 ++ DMI_MATCH(DMI_BOARD_NAME, "tPAD"),
11938 ++ DMI_MATCH(DMI_BIOS_DATE, "12/19/2014"),
11939 ++ },
11940 ++ },
11941 + {
11942 + /* Teclast X98 Plus II */
11943 + .driver_data = (void *)&teclast_x98plus2_data,
11944 +@@ -1276,6 +1332,19 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
11945 + DMI_MATCH(DMI_PRODUCT_NAME, "X98 Plus II"),
11946 + },
11947 + },
11948 ++ {
11949 ++ /* Teclast X98 Pro */
11950 ++ .driver_data = (void *)&gdix1001_00_upside_down_data,
11951 ++ .matches = {
11952 ++ /*
11953 ++ * Only match BIOS date, because the manufacturers
11954 ++ * BIOS does not report the board name at all
11955 ++ * (sometimes)...
11956 ++ */
11957 ++ DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"),
11958 ++ DMI_MATCH(DMI_BIOS_DATE, "10/28/2015"),
11959 ++ },
11960 ++ },
11961 + {
11962 + /* Trekstor Primebook C11 */
11963 + .driver_data = (void *)&trekstor_primebook_c11_data,
11964 +@@ -1351,6 +1420,22 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
11965 + DMI_MATCH(DMI_PRODUCT_NAME, "VINGA Twizzle J116"),
11966 + },
11967 + },
11968 ++ {
11969 ++ /* "WinBook TW100" */
11970 ++ .driver_data = (void *)&gdix1001_00_upside_down_data,
11971 ++ .matches = {
11972 ++ DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
11973 ++ DMI_MATCH(DMI_PRODUCT_NAME, "TW100")
11974 ++ }
11975 ++ },
11976 ++ {
11977 ++ /* WinBook TW700 */
11978 ++ .driver_data = (void *)&gdix1001_00_upside_down_data,
11979 ++ .matches = {
11980 ++ DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
11981 ++ DMI_MATCH(DMI_PRODUCT_NAME, "TW700")
11982 ++ },
11983 ++ },
11984 + {
11985 + /* Yours Y8W81, same case and touchscreen as Chuwi Vi8 */
11986 + .driver_data = (void *)&chuwi_vi8_data,
11987 +diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c
11988 +index e18d291c7f21c..23fa429ebe760 100644
11989 +--- a/drivers/regulator/da9052-regulator.c
11990 ++++ b/drivers/regulator/da9052-regulator.c
11991 +@@ -250,7 +250,8 @@ static int da9052_regulator_set_voltage_time_sel(struct regulator_dev *rdev,
11992 + case DA9052_ID_BUCK3:
11993 + case DA9052_ID_LDO2:
11994 + case DA9052_ID_LDO3:
11995 +- ret = (new_sel - old_sel) * info->step_uV / 6250;
11996 ++ ret = DIV_ROUND_UP(abs(new_sel - old_sel) * info->step_uV,
11997 ++ 6250);
11998 + break;
11999 + }
12000 +
12001 +diff --git a/drivers/regulator/fan53880.c b/drivers/regulator/fan53880.c
12002 +index 1684faf82ed25..94f02f3099dd4 100644
12003 +--- a/drivers/regulator/fan53880.c
12004 ++++ b/drivers/regulator/fan53880.c
12005 +@@ -79,7 +79,7 @@ static const struct regulator_desc fan53880_regulators[] = {
12006 + .n_linear_ranges = 2,
12007 + .n_voltages = 0xf8,
12008 + .vsel_reg = FAN53880_BUCKVOUT,
12009 +- .vsel_mask = 0x7f,
12010 ++ .vsel_mask = 0xff,
12011 + .enable_reg = FAN53880_ENABLE,
12012 + .enable_mask = 0x10,
12013 + .enable_time = 480,
12014 +diff --git a/drivers/regulator/hi655x-regulator.c b/drivers/regulator/hi655x-regulator.c
12015 +index ac2ee2030211a..b44f492a2b832 100644
12016 +--- a/drivers/regulator/hi655x-regulator.c
12017 ++++ b/drivers/regulator/hi655x-regulator.c
12018 +@@ -72,7 +72,7 @@ enum hi655x_regulator_id {
12019 + static int hi655x_is_enabled(struct regulator_dev *rdev)
12020 + {
12021 + unsigned int value = 0;
12022 +- struct hi655x_regulator *regulator = rdev_get_drvdata(rdev);
12023 ++ const struct hi655x_regulator *regulator = rdev_get_drvdata(rdev);
12024 +
12025 + regmap_read(rdev->regmap, regulator->status_reg, &value);
12026 + return (value & rdev->desc->enable_mask);
12027 +@@ -80,7 +80,7 @@ static int hi655x_is_enabled(struct regulator_dev *rdev)
12028 +
12029 + static int hi655x_disable(struct regulator_dev *rdev)
12030 + {
12031 +- struct hi655x_regulator *regulator = rdev_get_drvdata(rdev);
12032 ++ const struct hi655x_regulator *regulator = rdev_get_drvdata(rdev);
12033 +
12034 + return regmap_write(rdev->regmap, regulator->disable_reg,
12035 + rdev->desc->enable_mask);
12036 +@@ -169,7 +169,6 @@ static const struct hi655x_regulator regulators[] = {
12037 + static int hi655x_regulator_probe(struct platform_device *pdev)
12038 + {
12039 + unsigned int i;
12040 +- struct hi655x_regulator *regulator;
12041 + struct hi655x_pmic *pmic;
12042 + struct regulator_config config = { };
12043 + struct regulator_dev *rdev;
12044 +@@ -180,22 +179,17 @@ static int hi655x_regulator_probe(struct platform_device *pdev)
12045 + return -ENODEV;
12046 + }
12047 +
12048 +- regulator = devm_kzalloc(&pdev->dev, sizeof(*regulator), GFP_KERNEL);
12049 +- if (!regulator)
12050 +- return -ENOMEM;
12051 +-
12052 +- platform_set_drvdata(pdev, regulator);
12053 +-
12054 + config.dev = pdev->dev.parent;
12055 + config.regmap = pmic->regmap;
12056 +- config.driver_data = regulator;
12057 + for (i = 0; i < ARRAY_SIZE(regulators); i++) {
12058 ++ config.driver_data = (void *) &regulators[i];
12059 ++
12060 + rdev = devm_regulator_register(&pdev->dev,
12061 + &regulators[i].rdesc,
12062 + &config);
12063 + if (IS_ERR(rdev)) {
12064 + dev_err(&pdev->dev, "failed to register regulator %s\n",
12065 +- regulator->rdesc.name);
12066 ++ regulators[i].rdesc.name);
12067 + return PTR_ERR(rdev);
12068 + }
12069 + }
12070 +diff --git a/drivers/regulator/mt6358-regulator.c b/drivers/regulator/mt6358-regulator.c
12071 +index 13cb6ac9a8929..1d4eb5dc4fac8 100644
12072 +--- a/drivers/regulator/mt6358-regulator.c
12073 ++++ b/drivers/regulator/mt6358-regulator.c
12074 +@@ -457,7 +457,7 @@ static struct mt6358_regulator_info mt6358_regulators[] = {
12075 + MT6358_REG_FIXED("ldo_vaud28", VAUD28,
12076 + MT6358_LDO_VAUD28_CON0, 0, 2800000),
12077 + MT6358_LDO("ldo_vdram2", VDRAM2, vdram2_voltages, vdram2_idx,
12078 +- MT6358_LDO_VDRAM2_CON0, 0, MT6358_LDO_VDRAM2_ELR0, 0x10, 0),
12079 ++ MT6358_LDO_VDRAM2_CON0, 0, MT6358_LDO_VDRAM2_ELR0, 0xf, 0),
12080 + MT6358_LDO("ldo_vsim1", VSIM1, vsim_voltages, vsim_idx,
12081 + MT6358_LDO_VSIM1_CON0, 0, MT6358_VSIM1_ANA_CON0, 0xf00, 8),
12082 + MT6358_LDO("ldo_vibr", VIBR, vibr_voltages, vibr_idx,
12083 +diff --git a/drivers/regulator/uniphier-regulator.c b/drivers/regulator/uniphier-regulator.c
12084 +index 2e02e26b516c4..e75b0973e3256 100644
12085 +--- a/drivers/regulator/uniphier-regulator.c
12086 ++++ b/drivers/regulator/uniphier-regulator.c
12087 +@@ -201,6 +201,7 @@ static const struct of_device_id uniphier_regulator_match[] = {
12088 + },
12089 + { /* Sentinel */ },
12090 + };
12091 ++MODULE_DEVICE_TABLE(of, uniphier_regulator_match);
12092 +
12093 + static struct platform_driver uniphier_regulator_driver = {
12094 + .probe = uniphier_regulator_probe,
12095 +diff --git a/drivers/rtc/rtc-stm32.c b/drivers/rtc/rtc-stm32.c
12096 +index d774aa18f57a5..d096b58cd06c1 100644
12097 +--- a/drivers/rtc/rtc-stm32.c
12098 ++++ b/drivers/rtc/rtc-stm32.c
12099 +@@ -754,7 +754,7 @@ static int stm32_rtc_probe(struct platform_device *pdev)
12100 +
12101 + ret = clk_prepare_enable(rtc->rtc_ck);
12102 + if (ret)
12103 +- goto err;
12104 ++ goto err_no_rtc_ck;
12105 +
12106 + if (rtc->data->need_dbp)
12107 + regmap_update_bits(rtc->dbp, rtc->dbp_reg,
12108 +@@ -830,10 +830,12 @@ static int stm32_rtc_probe(struct platform_device *pdev)
12109 + }
12110 +
12111 + return 0;
12112 ++
12113 + err:
12114 ++ clk_disable_unprepare(rtc->rtc_ck);
12115 ++err_no_rtc_ck:
12116 + if (rtc->data->has_pclk)
12117 + clk_disable_unprepare(rtc->pclk);
12118 +- clk_disable_unprepare(rtc->rtc_ck);
12119 +
12120 + if (rtc->data->need_dbp)
12121 + regmap_update_bits(rtc->dbp, rtc->dbp_reg, rtc->dbp_mask, 0);
12122 +diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
12123 +index dfcbe54591fbd..93e22785a0e09 100644
12124 +--- a/drivers/s390/cio/chp.c
12125 ++++ b/drivers/s390/cio/chp.c
12126 +@@ -255,6 +255,9 @@ static ssize_t chp_status_write(struct device *dev,
12127 + if (!num_args)
12128 + return count;
12129 +
12130 ++ /* Wait until previous actions have settled. */
12131 ++ css_wait_for_slow_path();
12132 ++
12133 + if (!strncasecmp(cmd, "on", 2) || !strcmp(cmd, "1")) {
12134 + mutex_lock(&cp->lock);
12135 + error = s390_vary_chpid(cp->chpid, 1);
12136 +diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
12137 +index fc06a40021688..93aa7eabe8b1f 100644
12138 +--- a/drivers/s390/cio/chsc.c
12139 ++++ b/drivers/s390/cio/chsc.c
12140 +@@ -757,8 +757,6 @@ int chsc_chp_vary(struct chp_id chpid, int on)
12141 + {
12142 + struct channel_path *chp = chpid_to_chp(chpid);
12143 +
12144 +- /* Wait until previous actions have settled. */
12145 +- css_wait_for_slow_path();
12146 + /*
12147 + * Redo PathVerification on the devices the chpid connects to
12148 + */
12149 +diff --git a/drivers/scsi/FlashPoint.c b/drivers/scsi/FlashPoint.c
12150 +index 24ace18240480..ec8a621d232d6 100644
12151 +--- a/drivers/scsi/FlashPoint.c
12152 ++++ b/drivers/scsi/FlashPoint.c
12153 +@@ -40,7 +40,7 @@ struct sccb_mgr_info {
12154 + u16 si_per_targ_ultra_nego;
12155 + u16 si_per_targ_no_disc;
12156 + u16 si_per_targ_wide_nego;
12157 +- u16 si_flags;
12158 ++ u16 si_mflags;
12159 + unsigned char si_card_family;
12160 + unsigned char si_bustype;
12161 + unsigned char si_card_model[3];
12162 +@@ -1073,22 +1073,22 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo)
12163 + ScamFlg =
12164 + (unsigned char)FPT_utilEERead(ioport, SCAM_CONFIG / 2);
12165 +
12166 +- pCardInfo->si_flags = 0x0000;
12167 ++ pCardInfo->si_mflags = 0x0000;
12168 +
12169 + if (i & 0x01)
12170 +- pCardInfo->si_flags |= SCSI_PARITY_ENA;
12171 ++ pCardInfo->si_mflags |= SCSI_PARITY_ENA;
12172 +
12173 + if (!(i & 0x02))
12174 +- pCardInfo->si_flags |= SOFT_RESET;
12175 ++ pCardInfo->si_mflags |= SOFT_RESET;
12176 +
12177 + if (i & 0x10)
12178 +- pCardInfo->si_flags |= EXTENDED_TRANSLATION;
12179 ++ pCardInfo->si_mflags |= EXTENDED_TRANSLATION;
12180 +
12181 + if (ScamFlg & SCAM_ENABLED)
12182 +- pCardInfo->si_flags |= FLAG_SCAM_ENABLED;
12183 ++ pCardInfo->si_mflags |= FLAG_SCAM_ENABLED;
12184 +
12185 + if (ScamFlg & SCAM_LEVEL2)
12186 +- pCardInfo->si_flags |= FLAG_SCAM_LEVEL2;
12187 ++ pCardInfo->si_mflags |= FLAG_SCAM_LEVEL2;
12188 +
12189 + j = (RD_HARPOON(ioport + hp_bm_ctrl) & ~SCSI_TERM_ENA_L);
12190 + if (i & 0x04) {
12191 +@@ -1104,7 +1104,7 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo)
12192 +
12193 + if (!(RD_HARPOON(ioport + hp_page_ctrl) & NARROW_SCSI_CARD))
12194 +
12195 +- pCardInfo->si_flags |= SUPPORT_16TAR_32LUN;
12196 ++ pCardInfo->si_mflags |= SUPPORT_16TAR_32LUN;
12197 +
12198 + pCardInfo->si_card_family = HARPOON_FAMILY;
12199 + pCardInfo->si_bustype = BUSTYPE_PCI;
12200 +@@ -1140,15 +1140,15 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo)
12201 +
12202 + if (pCardInfo->si_card_model[1] == '3') {
12203 + if (RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7))
12204 +- pCardInfo->si_flags |= LOW_BYTE_TERM;
12205 ++ pCardInfo->si_mflags |= LOW_BYTE_TERM;
12206 + } else if (pCardInfo->si_card_model[2] == '0') {
12207 + temp = RD_HARPOON(ioport + hp_xfer_pad);
12208 + WR_HARPOON(ioport + hp_xfer_pad, (temp & ~BIT(4)));
12209 + if (RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7))
12210 +- pCardInfo->si_flags |= LOW_BYTE_TERM;
12211 ++ pCardInfo->si_mflags |= LOW_BYTE_TERM;
12212 + WR_HARPOON(ioport + hp_xfer_pad, (temp | BIT(4)));
12213 + if (RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7))
12214 +- pCardInfo->si_flags |= HIGH_BYTE_TERM;
12215 ++ pCardInfo->si_mflags |= HIGH_BYTE_TERM;
12216 + WR_HARPOON(ioport + hp_xfer_pad, temp);
12217 + } else {
12218 + temp = RD_HARPOON(ioport + hp_ee_ctrl);
12219 +@@ -1166,9 +1166,9 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo)
12220 + WR_HARPOON(ioport + hp_ee_ctrl, temp);
12221 + WR_HARPOON(ioport + hp_xfer_pad, temp2);
12222 + if (!(temp3 & BIT(7)))
12223 +- pCardInfo->si_flags |= LOW_BYTE_TERM;
12224 ++ pCardInfo->si_mflags |= LOW_BYTE_TERM;
12225 + if (!(temp3 & BIT(6)))
12226 +- pCardInfo->si_flags |= HIGH_BYTE_TERM;
12227 ++ pCardInfo->si_mflags |= HIGH_BYTE_TERM;
12228 + }
12229 +
12230 + ARAM_ACCESS(ioport);
12231 +@@ -1275,7 +1275,7 @@ static void *FlashPoint_HardwareResetHostAdapter(struct sccb_mgr_info
12232 + WR_HARPOON(ioport + hp_arb_id, pCardInfo->si_id);
12233 + CurrCard->ourId = pCardInfo->si_id;
12234 +
12235 +- i = (unsigned char)pCardInfo->si_flags;
12236 ++ i = (unsigned char)pCardInfo->si_mflags;
12237 + if (i & SCSI_PARITY_ENA)
12238 + WR_HARPOON(ioport + hp_portctrl_1, (HOST_MODE8 | CHK_SCSI_P));
12239 +
12240 +@@ -1289,14 +1289,14 @@ static void *FlashPoint_HardwareResetHostAdapter(struct sccb_mgr_info
12241 + j |= SCSI_TERM_ENA_H;
12242 + WR_HARPOON(ioport + hp_ee_ctrl, j);
12243 +
12244 +- if (!(pCardInfo->si_flags & SOFT_RESET)) {
12245 ++ if (!(pCardInfo->si_mflags & SOFT_RESET)) {
12246 +
12247 + FPT_sresb(ioport, thisCard);
12248 +
12249 + FPT_scini(thisCard, pCardInfo->si_id, 0);
12250 + }
12251 +
12252 +- if (pCardInfo->si_flags & POST_ALL_UNDERRRUNS)
12253 ++ if (pCardInfo->si_mflags & POST_ALL_UNDERRRUNS)
12254 + CurrCard->globalFlags |= F_NO_FILTER;
12255 +
12256 + if (pCurrNvRam) {
12257 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
12258 +index 5f845d7094fcc..008f734698f71 100644
12259 +--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
12260 ++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
12261 +@@ -6007,8 +6007,10 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
12262 + handle, parent_handle,
12263 + (u64)sas_expander->sas_address, sas_expander->num_phys);
12264 +
12265 +- if (!sas_expander->num_phys)
12266 ++ if (!sas_expander->num_phys) {
12267 ++ rc = -1;
12268 + goto out_fail;
12269 ++ }
12270 + sas_expander->phy = kcalloc(sas_expander->num_phys,
12271 + sizeof(struct _sas_phy), GFP_KERNEL);
12272 + if (!sas_expander->phy) {
12273 +diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
12274 +index 31d7a6ddc9db7..a045d00509d5c 100644
12275 +--- a/drivers/scsi/scsi_lib.c
12276 ++++ b/drivers/scsi/scsi_lib.c
12277 +@@ -760,6 +760,7 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
12278 + case 0x07: /* operation in progress */
12279 + case 0x08: /* Long write in progress */
12280 + case 0x09: /* self test in progress */
12281 ++ case 0x11: /* notify (enable spinup) required */
12282 + case 0x14: /* space allocation in progress */
12283 + case 0x1a: /* start stop unit in progress */
12284 + case 0x1b: /* sanitize in progress */
12285 +diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
12286 +index c53c3f9fa526a..c520239082fc6 100644
12287 +--- a/drivers/scsi/scsi_transport_iscsi.c
12288 ++++ b/drivers/scsi/scsi_transport_iscsi.c
12289 +@@ -1979,6 +1979,8 @@ static void __iscsi_unblock_session(struct work_struct *work)
12290 + */
12291 + void iscsi_unblock_session(struct iscsi_cls_session *session)
12292 + {
12293 ++ flush_work(&session->block_work);
12294 ++
12295 + queue_work(iscsi_eh_timer_workq, &session->unblock_work);
12296 + /*
12297 + * Blocking the session can be done from any context so we only
12298 +diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
12299 +index a418c3c7001c0..304ff2ee7d75a 100644
12300 +--- a/drivers/soundwire/stream.c
12301 ++++ b/drivers/soundwire/stream.c
12302 +@@ -422,7 +422,6 @@ static int sdw_prep_deprep_slave_ports(struct sdw_bus *bus,
12303 + struct completion *port_ready;
12304 + struct sdw_dpn_prop *dpn_prop;
12305 + struct sdw_prepare_ch prep_ch;
12306 +- unsigned int time_left;
12307 + bool intr = false;
12308 + int ret = 0, val;
12309 + u32 addr;
12310 +@@ -479,15 +478,15 @@ static int sdw_prep_deprep_slave_ports(struct sdw_bus *bus,
12311 +
12312 + /* Wait for completion on port ready */
12313 + port_ready = &s_rt->slave->port_ready[prep_ch.num];
12314 +- time_left = wait_for_completion_timeout(port_ready,
12315 +- msecs_to_jiffies(dpn_prop->ch_prep_timeout));
12316 ++ wait_for_completion_timeout(port_ready,
12317 ++ msecs_to_jiffies(dpn_prop->ch_prep_timeout));
12318 +
12319 + val = sdw_read(s_rt->slave, SDW_DPN_PREPARESTATUS(p_rt->num));
12320 +- val &= p_rt->ch_mask;
12321 +- if (!time_left || val) {
12322 ++ if ((val < 0) || (val & p_rt->ch_mask)) {
12323 ++ ret = (val < 0) ? val : -ETIMEDOUT;
12324 + dev_err(&s_rt->slave->dev,
12325 +- "Chn prep failed for port:%d\n", prep_ch.num);
12326 +- return -ETIMEDOUT;
12327 ++ "Chn prep failed for port %d: %d\n", prep_ch.num, ret);
12328 ++ return ret;
12329 + }
12330 + }
12331 +
12332 +diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c
12333 +index df981e55c24c9..89b91cdfb2a54 100644
12334 +--- a/drivers/spi/spi-loopback-test.c
12335 ++++ b/drivers/spi/spi-loopback-test.c
12336 +@@ -874,7 +874,7 @@ static int spi_test_run_iter(struct spi_device *spi,
12337 + test.transfers[i].len = len;
12338 + if (test.transfers[i].tx_buf)
12339 + test.transfers[i].tx_buf += tx_off;
12340 +- if (test.transfers[i].tx_buf)
12341 ++ if (test.transfers[i].rx_buf)
12342 + test.transfers[i].rx_buf += rx_off;
12343 + }
12344 +
12345 +diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
12346 +index ecba6b4a5d85d..b2c4621db34d7 100644
12347 +--- a/drivers/spi/spi-meson-spicc.c
12348 ++++ b/drivers/spi/spi-meson-spicc.c
12349 +@@ -725,7 +725,7 @@ static int meson_spicc_probe(struct platform_device *pdev)
12350 + ret = clk_prepare_enable(spicc->pclk);
12351 + if (ret) {
12352 + dev_err(&pdev->dev, "pclk clock enable failed\n");
12353 +- goto out_master;
12354 ++ goto out_core_clk;
12355 + }
12356 +
12357 + device_reset_optional(&pdev->dev);
12358 +@@ -752,7 +752,7 @@ static int meson_spicc_probe(struct platform_device *pdev)
12359 + ret = meson_spicc_clk_init(spicc);
12360 + if (ret) {
12361 + dev_err(&pdev->dev, "clock registration failed\n");
12362 +- goto out_master;
12363 ++ goto out_clk;
12364 + }
12365 +
12366 + ret = devm_spi_register_master(&pdev->dev, master);
12367 +@@ -764,9 +764,11 @@ static int meson_spicc_probe(struct platform_device *pdev)
12368 + return 0;
12369 +
12370 + out_clk:
12371 +- clk_disable_unprepare(spicc->core);
12372 + clk_disable_unprepare(spicc->pclk);
12373 +
12374 ++out_core_clk:
12375 ++ clk_disable_unprepare(spicc->core);
12376 ++
12377 + out_master:
12378 + spi_master_put(master);
12379 +
12380 +diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
12381 +index ccd817ee4917b..0d0cd061d3563 100644
12382 +--- a/drivers/spi/spi-omap-100k.c
12383 ++++ b/drivers/spi/spi-omap-100k.c
12384 +@@ -241,7 +241,7 @@ static int omap1_spi100k_setup_transfer(struct spi_device *spi,
12385 + else
12386 + word_len = spi->bits_per_word;
12387 +
12388 +- if (spi->bits_per_word > 32)
12389 ++ if (word_len > 32)
12390 + return -EINVAL;
12391 + cs->word_len = word_len;
12392 +
12393 +diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
12394 +index 19238e1b76b44..803d92f8d0316 100644
12395 +--- a/drivers/spi/spi-sun6i.c
12396 ++++ b/drivers/spi/spi-sun6i.c
12397 +@@ -290,6 +290,10 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
12398 + }
12399 +
12400 + sun6i_spi_write(sspi, SUN6I_CLK_CTL_REG, reg);
12401 ++ /* Finally enable the bus - doing so before might raise SCK to HIGH */
12402 ++ reg = sun6i_spi_read(sspi, SUN6I_GBL_CTL_REG);
12403 ++ reg |= SUN6I_GBL_CTL_BUS_ENABLE;
12404 ++ sun6i_spi_write(sspi, SUN6I_GBL_CTL_REG, reg);
12405 +
12406 + /* Setup the transfer now... */
12407 + if (sspi->tx_buf)
12408 +@@ -398,7 +402,7 @@ static int sun6i_spi_runtime_resume(struct device *dev)
12409 + }
12410 +
12411 + sun6i_spi_write(sspi, SUN6I_GBL_CTL_REG,
12412 +- SUN6I_GBL_CTL_BUS_ENABLE | SUN6I_GBL_CTL_MASTER | SUN6I_GBL_CTL_TP);
12413 ++ SUN6I_GBL_CTL_MASTER | SUN6I_GBL_CTL_TP);
12414 +
12415 + return 0;
12416 +
12417 +diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
12418 +index b459e369079f8..7fb020a1d66aa 100644
12419 +--- a/drivers/spi/spi-topcliff-pch.c
12420 ++++ b/drivers/spi/spi-topcliff-pch.c
12421 +@@ -580,8 +580,10 @@ static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw)
12422 + data->pkt_tx_buff = kzalloc(size, GFP_KERNEL);
12423 + if (data->pkt_tx_buff != NULL) {
12424 + data->pkt_rx_buff = kzalloc(size, GFP_KERNEL);
12425 +- if (!data->pkt_rx_buff)
12426 ++ if (!data->pkt_rx_buff) {
12427 + kfree(data->pkt_tx_buff);
12428 ++ data->pkt_tx_buff = NULL;
12429 ++ }
12430 + }
12431 +
12432 + if (!data->pkt_rx_buff) {
12433 +diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
12434 +index 0cf67de741e78..8c261eac2cee5 100644
12435 +--- a/drivers/spi/spi.c
12436 ++++ b/drivers/spi/spi.c
12437 +@@ -2050,6 +2050,7 @@ of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
12438 + /* Store a pointer to the node in the device structure */
12439 + of_node_get(nc);
12440 + spi->dev.of_node = nc;
12441 ++ spi->dev.fwnode = of_fwnode_handle(nc);
12442 +
12443 + /* Register the new device */
12444 + rc = spi_add_device(spi);
12445 +@@ -2613,9 +2614,10 @@ static int spi_get_gpio_descs(struct spi_controller *ctlr)
12446 + native_cs_mask |= BIT(i);
12447 + }
12448 +
12449 +- ctlr->unused_native_cs = ffz(native_cs_mask);
12450 +- if (num_cs_gpios && ctlr->max_native_cs &&
12451 +- ctlr->unused_native_cs >= ctlr->max_native_cs) {
12452 ++ ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
12453 ++
12454 ++ if ((ctlr->flags & SPI_MASTER_GPIO_SS) && num_cs_gpios &&
12455 ++ ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
12456 + dev_err(dev, "No unused native chip select available\n");
12457 + return -EINVAL;
12458 + }
12459 +diff --git a/drivers/ssb/scan.c b/drivers/ssb/scan.c
12460 +index f49ab1aa2149a..4161e5d1f276e 100644
12461 +--- a/drivers/ssb/scan.c
12462 ++++ b/drivers/ssb/scan.c
12463 +@@ -325,6 +325,7 @@ int ssb_bus_scan(struct ssb_bus *bus,
12464 + if (bus->nr_devices > ARRAY_SIZE(bus->devices)) {
12465 + pr_err("More than %d ssb cores found (%d)\n",
12466 + SSB_MAX_NR_CORES, bus->nr_devices);
12467 ++ err = -EINVAL;
12468 + goto err_unmap;
12469 + }
12470 + if (bus->bustype == SSB_BUSTYPE_SSB) {
12471 +diff --git a/drivers/ssb/sdio.c b/drivers/ssb/sdio.c
12472 +index 7fe0afb42234f..66c5c2169704b 100644
12473 +--- a/drivers/ssb/sdio.c
12474 ++++ b/drivers/ssb/sdio.c
12475 +@@ -411,7 +411,6 @@ static void ssb_sdio_block_write(struct ssb_device *dev, const void *buffer,
12476 + sdio_claim_host(bus->host_sdio);
12477 + if (unlikely(ssb_sdio_switch_core(bus, dev))) {
12478 + error = -EIO;
12479 +- memset((void *)buffer, 0xff, count);
12480 + goto err_out;
12481 + }
12482 + offset |= bus->sdio_sbaddr & 0xffff;
12483 +diff --git a/drivers/staging/fbtft/fb_agm1264k-fl.c b/drivers/staging/fbtft/fb_agm1264k-fl.c
12484 +index eeeeec97ad278..b545c2ca80a41 100644
12485 +--- a/drivers/staging/fbtft/fb_agm1264k-fl.c
12486 ++++ b/drivers/staging/fbtft/fb_agm1264k-fl.c
12487 +@@ -84,9 +84,9 @@ static void reset(struct fbtft_par *par)
12488 +
12489 + dev_dbg(par->info->device, "%s()\n", __func__);
12490 +
12491 +- gpiod_set_value(par->gpio.reset, 0);
12492 +- udelay(20);
12493 + gpiod_set_value(par->gpio.reset, 1);
12494 ++ udelay(20);
12495 ++ gpiod_set_value(par->gpio.reset, 0);
12496 + mdelay(120);
12497 + }
12498 +
12499 +@@ -194,12 +194,12 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
12500 + /* select chip */
12501 + if (*buf) {
12502 + /* cs1 */
12503 +- gpiod_set_value(par->CS0, 1);
12504 +- gpiod_set_value(par->CS1, 0);
12505 +- } else {
12506 +- /* cs0 */
12507 + gpiod_set_value(par->CS0, 0);
12508 + gpiod_set_value(par->CS1, 1);
12509 ++ } else {
12510 ++ /* cs0 */
12511 ++ gpiod_set_value(par->CS0, 1);
12512 ++ gpiod_set_value(par->CS1, 0);
12513 + }
12514 +
12515 + gpiod_set_value(par->RS, 0); /* RS->0 (command mode) */
12516 +@@ -397,8 +397,8 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
12517 + }
12518 + kfree(convert_buf);
12519 +
12520 +- gpiod_set_value(par->CS0, 1);
12521 +- gpiod_set_value(par->CS1, 1);
12522 ++ gpiod_set_value(par->CS0, 0);
12523 ++ gpiod_set_value(par->CS1, 0);
12524 +
12525 + return ret;
12526 + }
12527 +@@ -419,10 +419,10 @@ static int write(struct fbtft_par *par, void *buf, size_t len)
12528 + for (i = 0; i < 8; ++i)
12529 + gpiod_set_value(par->gpio.db[i], data & (1 << i));
12530 + /* set E */
12531 +- gpiod_set_value(par->EPIN, 1);
12532 ++ gpiod_set_value(par->EPIN, 0);
12533 + udelay(5);
12534 + /* unset E - write */
12535 +- gpiod_set_value(par->EPIN, 0);
12536 ++ gpiod_set_value(par->EPIN, 1);
12537 + udelay(1);
12538 + }
12539 +
12540 +diff --git a/drivers/staging/fbtft/fb_bd663474.c b/drivers/staging/fbtft/fb_bd663474.c
12541 +index e2c7646588f8c..1629c2c440a97 100644
12542 +--- a/drivers/staging/fbtft/fb_bd663474.c
12543 ++++ b/drivers/staging/fbtft/fb_bd663474.c
12544 +@@ -12,7 +12,6 @@
12545 + #include <linux/module.h>
12546 + #include <linux/kernel.h>
12547 + #include <linux/init.h>
12548 +-#include <linux/gpio/consumer.h>
12549 + #include <linux/delay.h>
12550 +
12551 + #include "fbtft.h"
12552 +@@ -24,9 +23,6 @@
12553 +
12554 + static int init_display(struct fbtft_par *par)
12555 + {
12556 +- if (par->gpio.cs)
12557 +- gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
12558 +-
12559 + par->fbtftops.reset(par);
12560 +
12561 + /* Initialization sequence from Lib_UTFT */
12562 +diff --git a/drivers/staging/fbtft/fb_ili9163.c b/drivers/staging/fbtft/fb_ili9163.c
12563 +index 05648c3ffe474..6582a2c90aafc 100644
12564 +--- a/drivers/staging/fbtft/fb_ili9163.c
12565 ++++ b/drivers/staging/fbtft/fb_ili9163.c
12566 +@@ -11,7 +11,6 @@
12567 + #include <linux/module.h>
12568 + #include <linux/kernel.h>
12569 + #include <linux/init.h>
12570 +-#include <linux/gpio/consumer.h>
12571 + #include <linux/delay.h>
12572 + #include <video/mipi_display.h>
12573 +
12574 +@@ -77,9 +76,6 @@ static int init_display(struct fbtft_par *par)
12575 + {
12576 + par->fbtftops.reset(par);
12577 +
12578 +- if (par->gpio.cs)
12579 +- gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
12580 +-
12581 + write_reg(par, MIPI_DCS_SOFT_RESET); /* software reset */
12582 + mdelay(500);
12583 + write_reg(par, MIPI_DCS_EXIT_SLEEP_MODE); /* exit sleep */
12584 +diff --git a/drivers/staging/fbtft/fb_ili9320.c b/drivers/staging/fbtft/fb_ili9320.c
12585 +index f2e72d14431db..a8f4c618b754c 100644
12586 +--- a/drivers/staging/fbtft/fb_ili9320.c
12587 ++++ b/drivers/staging/fbtft/fb_ili9320.c
12588 +@@ -8,7 +8,6 @@
12589 + #include <linux/module.h>
12590 + #include <linux/kernel.h>
12591 + #include <linux/init.h>
12592 +-#include <linux/gpio/consumer.h>
12593 + #include <linux/spi/spi.h>
12594 + #include <linux/delay.h>
12595 +
12596 +diff --git a/drivers/staging/fbtft/fb_ili9325.c b/drivers/staging/fbtft/fb_ili9325.c
12597 +index c9aa4cb431236..16d3b17ca2798 100644
12598 +--- a/drivers/staging/fbtft/fb_ili9325.c
12599 ++++ b/drivers/staging/fbtft/fb_ili9325.c
12600 +@@ -10,7 +10,6 @@
12601 + #include <linux/module.h>
12602 + #include <linux/kernel.h>
12603 + #include <linux/init.h>
12604 +-#include <linux/gpio/consumer.h>
12605 + #include <linux/delay.h>
12606 +
12607 + #include "fbtft.h"
12608 +@@ -85,9 +84,6 @@ static int init_display(struct fbtft_par *par)
12609 + {
12610 + par->fbtftops.reset(par);
12611 +
12612 +- if (par->gpio.cs)
12613 +- gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
12614 +-
12615 + bt &= 0x07;
12616 + vc &= 0x07;
12617 + vrh &= 0x0f;
12618 +diff --git a/drivers/staging/fbtft/fb_ili9340.c b/drivers/staging/fbtft/fb_ili9340.c
12619 +index 415183c7054a8..704236bcaf3ff 100644
12620 +--- a/drivers/staging/fbtft/fb_ili9340.c
12621 ++++ b/drivers/staging/fbtft/fb_ili9340.c
12622 +@@ -8,7 +8,6 @@
12623 + #include <linux/module.h>
12624 + #include <linux/kernel.h>
12625 + #include <linux/init.h>
12626 +-#include <linux/gpio/consumer.h>
12627 + #include <linux/delay.h>
12628 + #include <video/mipi_display.h>
12629 +
12630 +diff --git a/drivers/staging/fbtft/fb_s6d1121.c b/drivers/staging/fbtft/fb_s6d1121.c
12631 +index 8c7de32903434..62f27172f8449 100644
12632 +--- a/drivers/staging/fbtft/fb_s6d1121.c
12633 ++++ b/drivers/staging/fbtft/fb_s6d1121.c
12634 +@@ -12,7 +12,6 @@
12635 + #include <linux/module.h>
12636 + #include <linux/kernel.h>
12637 + #include <linux/init.h>
12638 +-#include <linux/gpio/consumer.h>
12639 + #include <linux/delay.h>
12640 +
12641 + #include "fbtft.h"
12642 +@@ -29,9 +28,6 @@ static int init_display(struct fbtft_par *par)
12643 + {
12644 + par->fbtftops.reset(par);
12645 +
12646 +- if (par->gpio.cs)
12647 +- gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
12648 +-
12649 + /* Initialization sequence from Lib_UTFT */
12650 +
12651 + write_reg(par, 0x0011, 0x2004);
12652 +diff --git a/drivers/staging/fbtft/fb_sh1106.c b/drivers/staging/fbtft/fb_sh1106.c
12653 +index 6f7249493ea3b..7b9ab39e1c1a8 100644
12654 +--- a/drivers/staging/fbtft/fb_sh1106.c
12655 ++++ b/drivers/staging/fbtft/fb_sh1106.c
12656 +@@ -9,7 +9,6 @@
12657 + #include <linux/module.h>
12658 + #include <linux/kernel.h>
12659 + #include <linux/init.h>
12660 +-#include <linux/gpio/consumer.h>
12661 + #include <linux/delay.h>
12662 +
12663 + #include "fbtft.h"
12664 +diff --git a/drivers/staging/fbtft/fb_ssd1289.c b/drivers/staging/fbtft/fb_ssd1289.c
12665 +index 7a3fe022cc69d..f27bab38b3ec4 100644
12666 +--- a/drivers/staging/fbtft/fb_ssd1289.c
12667 ++++ b/drivers/staging/fbtft/fb_ssd1289.c
12668 +@@ -10,7 +10,6 @@
12669 + #include <linux/module.h>
12670 + #include <linux/kernel.h>
12671 + #include <linux/init.h>
12672 +-#include <linux/gpio/consumer.h>
12673 +
12674 + #include "fbtft.h"
12675 +
12676 +@@ -28,9 +27,6 @@ static int init_display(struct fbtft_par *par)
12677 + {
12678 + par->fbtftops.reset(par);
12679 +
12680 +- if (par->gpio.cs)
12681 +- gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
12682 +-
12683 + write_reg(par, 0x00, 0x0001);
12684 + write_reg(par, 0x03, 0xA8A4);
12685 + write_reg(par, 0x0C, 0x0000);
12686 +diff --git a/drivers/staging/fbtft/fb_ssd1325.c b/drivers/staging/fbtft/fb_ssd1325.c
12687 +index 8a3140d41d8bb..796a2ac3e1948 100644
12688 +--- a/drivers/staging/fbtft/fb_ssd1325.c
12689 ++++ b/drivers/staging/fbtft/fb_ssd1325.c
12690 +@@ -35,8 +35,6 @@ static int init_display(struct fbtft_par *par)
12691 + {
12692 + par->fbtftops.reset(par);
12693 +
12694 +- gpiod_set_value(par->gpio.cs, 0);
12695 +-
12696 + write_reg(par, 0xb3);
12697 + write_reg(par, 0xf0);
12698 + write_reg(par, 0xae);
12699 +diff --git a/drivers/staging/fbtft/fb_ssd1331.c b/drivers/staging/fbtft/fb_ssd1331.c
12700 +index 37622c9462aa7..ec5eced7f8cbd 100644
12701 +--- a/drivers/staging/fbtft/fb_ssd1331.c
12702 ++++ b/drivers/staging/fbtft/fb_ssd1331.c
12703 +@@ -81,8 +81,7 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
12704 + va_start(args, len);
12705 +
12706 + *buf = (u8)va_arg(args, unsigned int);
12707 +- if (par->gpio.dc)
12708 +- gpiod_set_value(par->gpio.dc, 0);
12709 ++ gpiod_set_value(par->gpio.dc, 0);
12710 + ret = par->fbtftops.write(par, par->buf, sizeof(u8));
12711 + if (ret < 0) {
12712 + va_end(args);
12713 +@@ -104,8 +103,7 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
12714 + return;
12715 + }
12716 + }
12717 +- if (par->gpio.dc)
12718 +- gpiod_set_value(par->gpio.dc, 1);
12719 ++ gpiod_set_value(par->gpio.dc, 1);
12720 + va_end(args);
12721 + }
12722 +
12723 +diff --git a/drivers/staging/fbtft/fb_ssd1351.c b/drivers/staging/fbtft/fb_ssd1351.c
12724 +index 900b28d826b28..cf263a58a1489 100644
12725 +--- a/drivers/staging/fbtft/fb_ssd1351.c
12726 ++++ b/drivers/staging/fbtft/fb_ssd1351.c
12727 +@@ -2,7 +2,6 @@
12728 + #include <linux/module.h>
12729 + #include <linux/kernel.h>
12730 + #include <linux/init.h>
12731 +-#include <linux/gpio/consumer.h>
12732 + #include <linux/spi/spi.h>
12733 + #include <linux/delay.h>
12734 +
12735 +diff --git a/drivers/staging/fbtft/fb_upd161704.c b/drivers/staging/fbtft/fb_upd161704.c
12736 +index c77832ae5e5ba..c680160d63807 100644
12737 +--- a/drivers/staging/fbtft/fb_upd161704.c
12738 ++++ b/drivers/staging/fbtft/fb_upd161704.c
12739 +@@ -12,7 +12,6 @@
12740 + #include <linux/module.h>
12741 + #include <linux/kernel.h>
12742 + #include <linux/init.h>
12743 +-#include <linux/gpio/consumer.h>
12744 + #include <linux/delay.h>
12745 +
12746 + #include "fbtft.h"
12747 +@@ -26,9 +25,6 @@ static int init_display(struct fbtft_par *par)
12748 + {
12749 + par->fbtftops.reset(par);
12750 +
12751 +- if (par->gpio.cs)
12752 +- gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
12753 +-
12754 + /* Initialization sequence from Lib_UTFT */
12755 +
12756 + /* register reset */
12757 +diff --git a/drivers/staging/fbtft/fb_watterott.c b/drivers/staging/fbtft/fb_watterott.c
12758 +index 76b25df376b8f..a57e1f4feef35 100644
12759 +--- a/drivers/staging/fbtft/fb_watterott.c
12760 ++++ b/drivers/staging/fbtft/fb_watterott.c
12761 +@@ -8,7 +8,6 @@
12762 + #include <linux/module.h>
12763 + #include <linux/kernel.h>
12764 + #include <linux/init.h>
12765 +-#include <linux/gpio/consumer.h>
12766 + #include <linux/delay.h>
12767 +
12768 + #include "fbtft.h"
12769 +diff --git a/drivers/staging/fbtft/fbtft-bus.c b/drivers/staging/fbtft/fbtft-bus.c
12770 +index 63c65dd67b175..3d422bc116411 100644
12771 +--- a/drivers/staging/fbtft/fbtft-bus.c
12772 ++++ b/drivers/staging/fbtft/fbtft-bus.c
12773 +@@ -135,8 +135,7 @@ int fbtft_write_vmem16_bus8(struct fbtft_par *par, size_t offset, size_t len)
12774 + remain = len / 2;
12775 + vmem16 = (u16 *)(par->info->screen_buffer + offset);
12776 +
12777 +- if (par->gpio.dc)
12778 +- gpiod_set_value(par->gpio.dc, 1);
12779 ++ gpiod_set_value(par->gpio.dc, 1);
12780 +
12781 + /* non buffered write */
12782 + if (!par->txbuf.buf)
12783 +diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
12784 +index 4f362dad4436a..3723269890d5f 100644
12785 +--- a/drivers/staging/fbtft/fbtft-core.c
12786 ++++ b/drivers/staging/fbtft/fbtft-core.c
12787 +@@ -38,8 +38,7 @@ int fbtft_write_buf_dc(struct fbtft_par *par, void *buf, size_t len, int dc)
12788 + {
12789 + int ret;
12790 +
12791 +- if (par->gpio.dc)
12792 +- gpiod_set_value(par->gpio.dc, dc);
12793 ++ gpiod_set_value(par->gpio.dc, dc);
12794 +
12795 + ret = par->fbtftops.write(par, buf, len);
12796 + if (ret < 0)
12797 +@@ -76,20 +75,16 @@ static int fbtft_request_one_gpio(struct fbtft_par *par,
12798 + struct gpio_desc **gpiop)
12799 + {
12800 + struct device *dev = par->info->device;
12801 +- int ret = 0;
12802 +
12803 + *gpiop = devm_gpiod_get_index_optional(dev, name, index,
12804 +- GPIOD_OUT_HIGH);
12805 +- if (IS_ERR(*gpiop)) {
12806 +- ret = PTR_ERR(*gpiop);
12807 +- dev_err(dev,
12808 +- "Failed to request %s GPIO: %d\n", name, ret);
12809 +- return ret;
12810 +- }
12811 ++ GPIOD_OUT_LOW);
12812 ++ if (IS_ERR(*gpiop))
12813 ++ return dev_err_probe(dev, PTR_ERR(*gpiop), "Failed to request %s GPIO\n", name);
12814 ++
12815 + fbtft_par_dbg(DEBUG_REQUEST_GPIOS, par, "%s: '%s' GPIO\n",
12816 + __func__, name);
12817 +
12818 +- return ret;
12819 ++ return 0;
12820 + }
12821 +
12822 + static int fbtft_request_gpios(struct fbtft_par *par)
12823 +@@ -226,11 +221,15 @@ static void fbtft_reset(struct fbtft_par *par)
12824 + {
12825 + if (!par->gpio.reset)
12826 + return;
12827 ++
12828 + fbtft_par_dbg(DEBUG_RESET, par, "%s()\n", __func__);
12829 ++
12830 + gpiod_set_value_cansleep(par->gpio.reset, 1);
12831 + usleep_range(20, 40);
12832 + gpiod_set_value_cansleep(par->gpio.reset, 0);
12833 + msleep(120);
12834 ++
12835 ++ gpiod_set_value_cansleep(par->gpio.cs, 1); /* Activate chip */
12836 + }
12837 +
12838 + static void fbtft_update_display(struct fbtft_par *par, unsigned int start_line,
12839 +@@ -922,8 +921,6 @@ static int fbtft_init_display_from_property(struct fbtft_par *par)
12840 + goto out_free;
12841 +
12842 + par->fbtftops.reset(par);
12843 +- if (par->gpio.cs)
12844 +- gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
12845 +
12846 + index = -1;
12847 + val = values[++index];
12848 +@@ -1018,8 +1015,6 @@ int fbtft_init_display(struct fbtft_par *par)
12849 + }
12850 +
12851 + par->fbtftops.reset(par);
12852 +- if (par->gpio.cs)
12853 +- gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
12854 +
12855 + i = 0;
12856 + while (i < FBTFT_MAX_INIT_SEQUENCE) {
12857 +diff --git a/drivers/staging/fbtft/fbtft-io.c b/drivers/staging/fbtft/fbtft-io.c
12858 +index 0863d257d7620..de1904a443c27 100644
12859 +--- a/drivers/staging/fbtft/fbtft-io.c
12860 ++++ b/drivers/staging/fbtft/fbtft-io.c
12861 +@@ -142,12 +142,12 @@ int fbtft_write_gpio8_wr(struct fbtft_par *par, void *buf, size_t len)
12862 + data = *(u8 *)buf;
12863 +
12864 + /* Start writing by pulling down /WR */
12865 +- gpiod_set_value(par->gpio.wr, 0);
12866 ++ gpiod_set_value(par->gpio.wr, 1);
12867 +
12868 + /* Set data */
12869 + #ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
12870 + if (data == prev_data) {
12871 +- gpiod_set_value(par->gpio.wr, 0); /* used as delay */
12872 ++ gpiod_set_value(par->gpio.wr, 1); /* used as delay */
12873 + } else {
12874 + for (i = 0; i < 8; i++) {
12875 + if ((data & 1) != (prev_data & 1))
12876 +@@ -165,7 +165,7 @@ int fbtft_write_gpio8_wr(struct fbtft_par *par, void *buf, size_t len)
12877 + #endif
12878 +
12879 + /* Pullup /WR */
12880 +- gpiod_set_value(par->gpio.wr, 1);
12881 ++ gpiod_set_value(par->gpio.wr, 0);
12882 +
12883 + #ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
12884 + prev_data = *(u8 *)buf;
12885 +@@ -192,12 +192,12 @@ int fbtft_write_gpio16_wr(struct fbtft_par *par, void *buf, size_t len)
12886 + data = *(u16 *)buf;
12887 +
12888 + /* Start writing by pulling down /WR */
12889 +- gpiod_set_value(par->gpio.wr, 0);
12890 ++ gpiod_set_value(par->gpio.wr, 1);
12891 +
12892 + /* Set data */
12893 + #ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
12894 + if (data == prev_data) {
12895 +- gpiod_set_value(par->gpio.wr, 0); /* used as delay */
12896 ++ gpiod_set_value(par->gpio.wr, 1); /* used as delay */
12897 + } else {
12898 + for (i = 0; i < 16; i++) {
12899 + if ((data & 1) != (prev_data & 1))
12900 +@@ -215,7 +215,7 @@ int fbtft_write_gpio16_wr(struct fbtft_par *par, void *buf, size_t len)
12901 + #endif
12902 +
12903 + /* Pullup /WR */
12904 +- gpiod_set_value(par->gpio.wr, 1);
12905 ++ gpiod_set_value(par->gpio.wr, 0);
12906 +
12907 + #ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
12908 + prev_data = *(u16 *)buf;
12909 +diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c
12910 +index 571f47d394843..bd5f874334043 100644
12911 +--- a/drivers/staging/gdm724x/gdm_lte.c
12912 ++++ b/drivers/staging/gdm724x/gdm_lte.c
12913 +@@ -611,10 +611,12 @@ static void gdm_lte_netif_rx(struct net_device *dev, char *buf,
12914 + * bytes (99,130,83,99 dec)
12915 + */
12916 + } __packed;
12917 +- void *addr = buf + sizeof(struct iphdr) +
12918 +- sizeof(struct udphdr) +
12919 +- offsetof(struct dhcp_packet, chaddr);
12920 +- ether_addr_copy(nic->dest_mac_addr, addr);
12921 ++ int offset = sizeof(struct iphdr) +
12922 ++ sizeof(struct udphdr) +
12923 ++ offsetof(struct dhcp_packet, chaddr);
12924 ++ if (offset + ETH_ALEN > len)
12925 ++ return;
12926 ++ ether_addr_copy(nic->dest_mac_addr, buf + offset);
12927 + }
12928 + }
12929 +
12930 +@@ -677,6 +679,7 @@ static void gdm_lte_multi_sdu_pkt(struct phy_dev *phy_dev, char *buf, int len)
12931 + struct sdu *sdu = NULL;
12932 + u8 endian = phy_dev->get_endian(phy_dev->priv_dev);
12933 + u8 *data = (u8 *)multi_sdu->data;
12934 ++ int copied;
12935 + u16 i = 0;
12936 + u16 num_packet;
12937 + u16 hci_len;
12938 +@@ -688,6 +691,12 @@ static void gdm_lte_multi_sdu_pkt(struct phy_dev *phy_dev, char *buf, int len)
12939 + num_packet = gdm_dev16_to_cpu(endian, multi_sdu->num_packet);
12940 +
12941 + for (i = 0; i < num_packet; i++) {
12942 ++ copied = data - multi_sdu->data;
12943 ++ if (len < copied + sizeof(*sdu)) {
12944 ++ pr_err("rx prevent buffer overflow");
12945 ++ return;
12946 ++ }
12947 ++
12948 + sdu = (struct sdu *)data;
12949 +
12950 + cmd_evt = gdm_dev16_to_cpu(endian, sdu->cmd_evt);
12951 +@@ -698,7 +707,8 @@ static void gdm_lte_multi_sdu_pkt(struct phy_dev *phy_dev, char *buf, int len)
12952 + pr_err("rx sdu wrong hci %04x\n", cmd_evt);
12953 + return;
12954 + }
12955 +- if (hci_len < 12) {
12956 ++ if (hci_len < 12 ||
12957 ++ len < copied + sizeof(*sdu) + (hci_len - 12)) {
12958 + pr_err("rx sdu invalid len %d\n", hci_len);
12959 + return;
12960 + }
12961 +diff --git a/drivers/staging/media/hantro/hantro_drv.c b/drivers/staging/media/hantro/hantro_drv.c
12962 +index 3cd00cc0a3641..7749ca9a8ebbf 100644
12963 +--- a/drivers/staging/media/hantro/hantro_drv.c
12964 ++++ b/drivers/staging/media/hantro/hantro_drv.c
12965 +@@ -56,16 +56,12 @@ dma_addr_t hantro_get_ref(struct hantro_ctx *ctx, u64 ts)
12966 + return hantro_get_dec_buf_addr(ctx, buf);
12967 + }
12968 +
12969 +-static void hantro_job_finish(struct hantro_dev *vpu,
12970 +- struct hantro_ctx *ctx,
12971 +- enum vb2_buffer_state result)
12972 ++static void hantro_job_finish_no_pm(struct hantro_dev *vpu,
12973 ++ struct hantro_ctx *ctx,
12974 ++ enum vb2_buffer_state result)
12975 + {
12976 + struct vb2_v4l2_buffer *src, *dst;
12977 +
12978 +- pm_runtime_mark_last_busy(vpu->dev);
12979 +- pm_runtime_put_autosuspend(vpu->dev);
12980 +- clk_bulk_disable(vpu->variant->num_clocks, vpu->clocks);
12981 +-
12982 + src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
12983 + dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
12984 +
12985 +@@ -81,6 +77,18 @@ static void hantro_job_finish(struct hantro_dev *vpu,
12986 + result);
12987 + }
12988 +
12989 ++static void hantro_job_finish(struct hantro_dev *vpu,
12990 ++ struct hantro_ctx *ctx,
12991 ++ enum vb2_buffer_state result)
12992 ++{
12993 ++ pm_runtime_mark_last_busy(vpu->dev);
12994 ++ pm_runtime_put_autosuspend(vpu->dev);
12995 ++
12996 ++ clk_bulk_disable(vpu->variant->num_clocks, vpu->clocks);
12997 ++
12998 ++ hantro_job_finish_no_pm(vpu, ctx, result);
12999 ++}
13000 ++
13001 + void hantro_irq_done(struct hantro_dev *vpu,
13002 + enum vb2_buffer_state result)
13003 + {
13004 +@@ -152,12 +160,15 @@ static void device_run(void *priv)
13005 + src = hantro_get_src_buf(ctx);
13006 + dst = hantro_get_dst_buf(ctx);
13007 +
13008 ++ ret = pm_runtime_get_sync(ctx->dev->dev);
13009 ++ if (ret < 0) {
13010 ++ pm_runtime_put_noidle(ctx->dev->dev);
13011 ++ goto err_cancel_job;
13012 ++ }
13013 ++
13014 + ret = clk_bulk_enable(ctx->dev->variant->num_clocks, ctx->dev->clocks);
13015 + if (ret)
13016 + goto err_cancel_job;
13017 +- ret = pm_runtime_get_sync(ctx->dev->dev);
13018 +- if (ret < 0)
13019 +- goto err_cancel_job;
13020 +
13021 + v4l2_m2m_buf_copy_metadata(src, dst, true);
13022 +
13023 +@@ -165,7 +176,7 @@ static void device_run(void *priv)
13024 + return;
13025 +
13026 + err_cancel_job:
13027 +- hantro_job_finish(ctx->dev, ctx, VB2_BUF_STATE_ERROR);
13028 ++ hantro_job_finish_no_pm(ctx->dev, ctx, VB2_BUF_STATE_ERROR);
13029 + }
13030 +
13031 + static struct v4l2_m2m_ops vpu_m2m_ops = {
13032 +diff --git a/drivers/staging/media/hantro/hantro_v4l2.c b/drivers/staging/media/hantro/hantro_v4l2.c
13033 +index f5fbdbc4ffdb1..5c2ca61add8e8 100644
13034 +--- a/drivers/staging/media/hantro/hantro_v4l2.c
13035 ++++ b/drivers/staging/media/hantro/hantro_v4l2.c
13036 +@@ -639,7 +639,14 @@ static int hantro_buf_prepare(struct vb2_buffer *vb)
13037 + ret = hantro_buf_plane_check(vb, pix_fmt);
13038 + if (ret)
13039 + return ret;
13040 +- vb2_set_plane_payload(vb, 0, pix_fmt->plane_fmt[0].sizeimage);
13041 ++ /*
13042 ++ * Buffer's bytesused must be written by driver for CAPTURE buffers.
13043 ++ * (for OUTPUT buffers, if userspace passes 0 bytesused, v4l2-core sets
13044 ++ * it to buffer length).
13045 ++ */
13046 ++ if (V4L2_TYPE_IS_CAPTURE(vq->type))
13047 ++ vb2_set_plane_payload(vb, 0, pix_fmt->plane_fmt[0].sizeimage);
13048 ++
13049 + return 0;
13050 + }
13051 +
13052 +diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
13053 +index 21ebf77696964..d9a8667b4bedf 100644
13054 +--- a/drivers/staging/media/imx/imx-media-csi.c
13055 ++++ b/drivers/staging/media/imx/imx-media-csi.c
13056 +@@ -753,9 +753,10 @@ static int csi_setup(struct csi_priv *priv)
13057 +
13058 + static int csi_start(struct csi_priv *priv)
13059 + {
13060 +- struct v4l2_fract *output_fi;
13061 ++ struct v4l2_fract *input_fi, *output_fi;
13062 + int ret;
13063 +
13064 ++ input_fi = &priv->frame_interval[CSI_SINK_PAD];
13065 + output_fi = &priv->frame_interval[priv->active_output_pad];
13066 +
13067 + /* start upstream */
13068 +@@ -764,6 +765,17 @@ static int csi_start(struct csi_priv *priv)
13069 + if (ret)
13070 + return ret;
13071 +
13072 ++ /* Skip first few frames from a BT.656 source */
13073 ++ if (priv->upstream_ep.bus_type == V4L2_MBUS_BT656) {
13074 ++ u32 delay_usec, bad_frames = 20;
13075 ++
13076 ++ delay_usec = DIV_ROUND_UP_ULL((u64)USEC_PER_SEC *
13077 ++ input_fi->numerator * bad_frames,
13078 ++ input_fi->denominator);
13079 ++
13080 ++ usleep_range(delay_usec, delay_usec + 1000);
13081 ++ }
13082 ++
13083 + if (priv->dest == IPU_CSI_DEST_IDMAC) {
13084 + ret = csi_idmac_start(priv);
13085 + if (ret)
13086 +@@ -1930,19 +1942,13 @@ static int imx_csi_async_register(struct csi_priv *priv)
13087 + port, 0,
13088 + FWNODE_GRAPH_ENDPOINT_NEXT);
13089 + if (ep) {
13090 +- asd = kzalloc(sizeof(*asd), GFP_KERNEL);
13091 +- if (!asd) {
13092 +- fwnode_handle_put(ep);
13093 +- return -ENOMEM;
13094 +- }
13095 +-
13096 +- ret = v4l2_async_notifier_add_fwnode_remote_subdev(
13097 +- &priv->notifier, ep, asd);
13098 ++ asd = v4l2_async_notifier_add_fwnode_remote_subdev(
13099 ++ &priv->notifier, ep, sizeof(*asd));
13100 +
13101 + fwnode_handle_put(ep);
13102 +
13103 +- if (ret) {
13104 +- kfree(asd);
13105 ++ if (IS_ERR(asd)) {
13106 ++ ret = PTR_ERR(asd);
13107 + /* OK if asd already exists */
13108 + if (ret != -EEXIST)
13109 + return ret;
13110 +diff --git a/drivers/staging/media/imx/imx6-mipi-csi2.c b/drivers/staging/media/imx/imx6-mipi-csi2.c
13111 +index 94d87d27d3896..9457761b7c8ba 100644
13112 +--- a/drivers/staging/media/imx/imx6-mipi-csi2.c
13113 ++++ b/drivers/staging/media/imx/imx6-mipi-csi2.c
13114 +@@ -557,7 +557,7 @@ static int csi2_async_register(struct csi2_dev *csi2)
13115 + struct v4l2_fwnode_endpoint vep = {
13116 + .bus_type = V4L2_MBUS_CSI2_DPHY,
13117 + };
13118 +- struct v4l2_async_subdev *asd = NULL;
13119 ++ struct v4l2_async_subdev *asd;
13120 + struct fwnode_handle *ep;
13121 + int ret;
13122 +
13123 +@@ -577,19 +577,13 @@ static int csi2_async_register(struct csi2_dev *csi2)
13124 + dev_dbg(csi2->dev, "data lanes: %d\n", csi2->bus.num_data_lanes);
13125 + dev_dbg(csi2->dev, "flags: 0x%08x\n", csi2->bus.flags);
13126 +
13127 +- asd = kzalloc(sizeof(*asd), GFP_KERNEL);
13128 +- if (!asd) {
13129 +- ret = -ENOMEM;
13130 +- goto err_parse;
13131 +- }
13132 +-
13133 +- ret = v4l2_async_notifier_add_fwnode_remote_subdev(
13134 +- &csi2->notifier, ep, asd);
13135 +- if (ret)
13136 +- goto err_parse;
13137 +-
13138 ++ asd = v4l2_async_notifier_add_fwnode_remote_subdev(
13139 ++ &csi2->notifier, ep, sizeof(*asd));
13140 + fwnode_handle_put(ep);
13141 +
13142 ++ if (IS_ERR(asd))
13143 ++ return PTR_ERR(asd);
13144 ++
13145 + csi2->notifier.ops = &csi2_notify_ops;
13146 +
13147 + ret = v4l2_async_subdev_notifier_register(&csi2->sd,
13148 +@@ -601,7 +595,6 @@ static int csi2_async_register(struct csi2_dev *csi2)
13149 +
13150 + err_parse:
13151 + fwnode_handle_put(ep);
13152 +- kfree(asd);
13153 + return ret;
13154 + }
13155 +
13156 +diff --git a/drivers/staging/media/imx/imx7-media-csi.c b/drivers/staging/media/imx/imx7-media-csi.c
13157 +index ac52b1daf9914..6c59485291ca3 100644
13158 +--- a/drivers/staging/media/imx/imx7-media-csi.c
13159 ++++ b/drivers/staging/media/imx/imx7-media-csi.c
13160 +@@ -1191,7 +1191,7 @@ static const struct v4l2_async_notifier_operations imx7_csi_notify_ops = {
13161 +
13162 + static int imx7_csi_async_register(struct imx7_csi *csi)
13163 + {
13164 +- struct v4l2_async_subdev *asd = NULL;
13165 ++ struct v4l2_async_subdev *asd;
13166 + struct fwnode_handle *ep;
13167 + int ret;
13168 +
13169 +@@ -1200,19 +1200,13 @@ static int imx7_csi_async_register(struct imx7_csi *csi)
13170 + ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(csi->dev), 0, 0,
13171 + FWNODE_GRAPH_ENDPOINT_NEXT);
13172 + if (ep) {
13173 +- asd = kzalloc(sizeof(*asd), GFP_KERNEL);
13174 +- if (!asd) {
13175 +- fwnode_handle_put(ep);
13176 +- return -ENOMEM;
13177 +- }
13178 +-
13179 +- ret = v4l2_async_notifier_add_fwnode_remote_subdev(
13180 +- &csi->notifier, ep, asd);
13181 ++ asd = v4l2_async_notifier_add_fwnode_remote_subdev(
13182 ++ &csi->notifier, ep, sizeof(*asd));
13183 +
13184 + fwnode_handle_put(ep);
13185 +
13186 +- if (ret) {
13187 +- kfree(asd);
13188 ++ if (IS_ERR(asd)) {
13189 ++ ret = PTR_ERR(asd);
13190 + /* OK if asd already exists */
13191 + if (ret != -EEXIST)
13192 + return ret;
13193 +diff --git a/drivers/staging/media/imx/imx7-mipi-csis.c b/drivers/staging/media/imx/imx7-mipi-csis.c
13194 +index 7612993cc1d68..a392f9012626b 100644
13195 +--- a/drivers/staging/media/imx/imx7-mipi-csis.c
13196 ++++ b/drivers/staging/media/imx/imx7-mipi-csis.c
13197 +@@ -597,13 +597,15 @@ static void mipi_csis_clear_counters(struct csi_state *state)
13198 +
13199 + static void mipi_csis_log_counters(struct csi_state *state, bool non_errors)
13200 + {
13201 +- int i = non_errors ? MIPI_CSIS_NUM_EVENTS : MIPI_CSIS_NUM_EVENTS - 4;
13202 ++ unsigned int num_events = non_errors ? MIPI_CSIS_NUM_EVENTS
13203 ++ : MIPI_CSIS_NUM_EVENTS - 6;
13204 + struct device *dev = &state->pdev->dev;
13205 + unsigned long flags;
13206 ++ unsigned int i;
13207 +
13208 + spin_lock_irqsave(&state->slock, flags);
13209 +
13210 +- for (i--; i >= 0; i--) {
13211 ++ for (i = 0; i < num_events; ++i) {
13212 + if (state->events[i].counter > 0 || state->debug)
13213 + dev_info(dev, "%s events: %d\n", state->events[i].name,
13214 + state->events[i].counter);
13215 +@@ -1004,7 +1006,7 @@ static int mipi_csis_async_register(struct csi_state *state)
13216 + struct v4l2_fwnode_endpoint vep = {
13217 + .bus_type = V4L2_MBUS_CSI2_DPHY,
13218 + };
13219 +- struct v4l2_async_subdev *asd = NULL;
13220 ++ struct v4l2_async_subdev *asd;
13221 + struct fwnode_handle *ep;
13222 + int ret;
13223 +
13224 +@@ -1024,17 +1026,13 @@ static int mipi_csis_async_register(struct csi_state *state)
13225 + dev_dbg(state->dev, "data lanes: %d\n", state->bus.num_data_lanes);
13226 + dev_dbg(state->dev, "flags: 0x%08x\n", state->bus.flags);
13227 +
13228 +- asd = kzalloc(sizeof(*asd), GFP_KERNEL);
13229 +- if (!asd) {
13230 +- ret = -ENOMEM;
13231 ++ asd = v4l2_async_notifier_add_fwnode_remote_subdev(
13232 ++ &state->notifier, ep, sizeof(*asd));
13233 ++ if (IS_ERR(asd)) {
13234 ++ ret = PTR_ERR(asd);
13235 + goto err_parse;
13236 + }
13237 +
13238 +- ret = v4l2_async_notifier_add_fwnode_remote_subdev(
13239 +- &state->notifier, ep, asd);
13240 +- if (ret)
13241 +- goto err_parse;
13242 +-
13243 + fwnode_handle_put(ep);
13244 +
13245 + state->notifier.ops = &mipi_csis_notify_ops;
13246 +@@ -1048,7 +1046,6 @@ static int mipi_csis_async_register(struct csi_state *state)
13247 +
13248 + err_parse:
13249 + fwnode_handle_put(ep);
13250 +- kfree(asd);
13251 +
13252 + return ret;
13253 + }
13254 +diff --git a/drivers/staging/media/rkisp1/rkisp1-dev.c b/drivers/staging/media/rkisp1/rkisp1-dev.c
13255 +index 91584695804bb..06de5540c8af4 100644
13256 +--- a/drivers/staging/media/rkisp1/rkisp1-dev.c
13257 ++++ b/drivers/staging/media/rkisp1/rkisp1-dev.c
13258 +@@ -252,6 +252,7 @@ static int rkisp1_subdev_notifier(struct rkisp1_device *rkisp1)
13259 + .bus_type = V4L2_MBUS_CSI2_DPHY
13260 + };
13261 + struct rkisp1_sensor_async *rk_asd = NULL;
13262 ++ struct v4l2_async_subdev *asd;
13263 + struct fwnode_handle *ep;
13264 +
13265 + ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(rkisp1->dev),
13266 +@@ -263,21 +264,18 @@ static int rkisp1_subdev_notifier(struct rkisp1_device *rkisp1)
13267 + if (ret)
13268 + goto err_parse;
13269 +
13270 +- rk_asd = kzalloc(sizeof(*rk_asd), GFP_KERNEL);
13271 +- if (!rk_asd) {
13272 +- ret = -ENOMEM;
13273 ++ asd = v4l2_async_notifier_add_fwnode_remote_subdev(ntf, ep,
13274 ++ sizeof(*rk_asd));
13275 ++ if (IS_ERR(asd)) {
13276 ++ ret = PTR_ERR(asd);
13277 + goto err_parse;
13278 + }
13279 +
13280 ++ rk_asd = container_of(asd, struct rkisp1_sensor_async, asd);
13281 + rk_asd->mbus_type = vep.bus_type;
13282 + rk_asd->mbus_flags = vep.bus.mipi_csi2.flags;
13283 + rk_asd->lanes = vep.bus.mipi_csi2.num_data_lanes;
13284 +
13285 +- ret = v4l2_async_notifier_add_fwnode_remote_subdev(ntf, ep,
13286 +- &rk_asd->asd);
13287 +- if (ret)
13288 +- goto err_parse;
13289 +-
13290 + dev_dbg(rkisp1->dev, "registered ep id %d with %d lanes\n",
13291 + vep.base.id, rk_asd->lanes);
13292 +
13293 +@@ -288,7 +286,6 @@ static int rkisp1_subdev_notifier(struct rkisp1_device *rkisp1)
13294 + continue;
13295 + err_parse:
13296 + fwnode_handle_put(ep);
13297 +- kfree(rk_asd);
13298 + v4l2_async_notifier_cleanup(ntf);
13299 + return ret;
13300 + }
13301 +diff --git a/drivers/staging/media/rkvdec/rkvdec.c b/drivers/staging/media/rkvdec/rkvdec.c
13302 +index 1263991de76f9..e68303e2b3907 100644
13303 +--- a/drivers/staging/media/rkvdec/rkvdec.c
13304 ++++ b/drivers/staging/media/rkvdec/rkvdec.c
13305 +@@ -471,7 +471,15 @@ static int rkvdec_buf_prepare(struct vb2_buffer *vb)
13306 + if (vb2_plane_size(vb, i) < sizeimage)
13307 + return -EINVAL;
13308 + }
13309 +- vb2_set_plane_payload(vb, 0, f->fmt.pix_mp.plane_fmt[0].sizeimage);
13310 ++
13311 ++ /*
13312 ++ * Buffer's bytesused must be written by driver for CAPTURE buffers.
13313 ++ * (for OUTPUT buffers, if userspace passes 0 bytesused, v4l2-core sets
13314 ++ * it to buffer length).
13315 ++ */
13316 ++ if (V4L2_TYPE_IS_CAPTURE(vq->type))
13317 ++ vb2_set_plane_payload(vb, 0, f->fmt.pix_mp.plane_fmt[0].sizeimage);
13318 ++
13319 + return 0;
13320 + }
13321 +
13322 +@@ -691,7 +699,7 @@ static void rkvdec_device_run(void *priv)
13323 + if (WARN_ON(!desc))
13324 + return;
13325 +
13326 +- ret = pm_runtime_get_sync(rkvdec->dev);
13327 ++ ret = pm_runtime_resume_and_get(rkvdec->dev);
13328 + if (ret < 0) {
13329 + rkvdec_job_finish_no_pm(ctx, VB2_BUF_STATE_ERROR);
13330 + return;
13331 +diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
13332 +index ce497d0197dfc..10744fab7ceaa 100644
13333 +--- a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
13334 ++++ b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
13335 +@@ -477,8 +477,8 @@ static void cedrus_h265_setup(struct cedrus_ctx *ctx,
13336 + slice_params->flags);
13337 +
13338 + reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_DEPENDENT_SLICE_SEGMENT,
13339 +- V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT,
13340 +- pps->flags);
13341 ++ V4L2_HEVC_SLICE_PARAMS_FLAG_DEPENDENT_SLICE_SEGMENT,
13342 ++ slice_params->flags);
13343 +
13344 + /* FIXME: For multi-slice support. */
13345 + reg |= VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_FIRST_SLICE_SEGMENT_IN_PIC;
13346 +diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_video.c b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
13347 +index 911f607d9b092..16327be904d1a 100644
13348 +--- a/drivers/staging/media/sunxi/cedrus/cedrus_video.c
13349 ++++ b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
13350 +@@ -449,7 +449,13 @@ static int cedrus_buf_prepare(struct vb2_buffer *vb)
13351 + if (vb2_plane_size(vb, 0) < pix_fmt->sizeimage)
13352 + return -EINVAL;
13353 +
13354 +- vb2_set_plane_payload(vb, 0, pix_fmt->sizeimage);
13355 ++ /*
13356 ++ * Buffer's bytesused must be written by driver for CAPTURE buffers.
13357 ++ * (for OUTPUT buffers, if userspace passes 0 bytesused, v4l2-core sets
13358 ++ * it to buffer length).
13359 ++ */
13360 ++ if (V4L2_TYPE_IS_CAPTURE(vq->type))
13361 ++ vb2_set_plane_payload(vb, 0, pix_fmt->sizeimage);
13362 +
13363 + return 0;
13364 + }
13365 +diff --git a/drivers/staging/mt7621-dts/mt7621.dtsi b/drivers/staging/mt7621-dts/mt7621.dtsi
13366 +index 82aa93634eda3..27222f7b246fd 100644
13367 +--- a/drivers/staging/mt7621-dts/mt7621.dtsi
13368 ++++ b/drivers/staging/mt7621-dts/mt7621.dtsi
13369 +@@ -519,7 +519,7 @@
13370 +
13371 + bus-range = <0 255>;
13372 + ranges = <
13373 +- 0x02000000 0 0x00000000 0x60000000 0 0x10000000 /* pci memory */
13374 ++ 0x02000000 0 0x60000000 0x60000000 0 0x10000000 /* pci memory */
13375 + 0x01000000 0 0x00000000 0x1e160000 0 0x00010000 /* io space */
13376 + >;
13377 +
13378 +diff --git a/drivers/staging/rtl8712/hal_init.c b/drivers/staging/rtl8712/hal_init.c
13379 +index 715f1fe8b4726..22974277afa08 100644
13380 +--- a/drivers/staging/rtl8712/hal_init.c
13381 ++++ b/drivers/staging/rtl8712/hal_init.c
13382 +@@ -40,7 +40,10 @@ static void rtl871x_load_fw_cb(const struct firmware *firmware, void *context)
13383 + dev_err(&udev->dev, "r8712u: Firmware request failed\n");
13384 + usb_put_dev(udev);
13385 + usb_set_intfdata(usb_intf, NULL);
13386 ++ r8712_free_drv_sw(adapter);
13387 ++ adapter->dvobj_deinit(adapter);
13388 + complete(&adapter->rtl8712_fw_ready);
13389 ++ free_netdev(adapter->pnetdev);
13390 + return;
13391 + }
13392 + adapter->fw = firmware;
13393 +diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
13394 +index 0c3ae8495afb7..2214aca097308 100644
13395 +--- a/drivers/staging/rtl8712/os_intfs.c
13396 ++++ b/drivers/staging/rtl8712/os_intfs.c
13397 +@@ -328,8 +328,6 @@ int r8712_init_drv_sw(struct _adapter *padapter)
13398 +
13399 + void r8712_free_drv_sw(struct _adapter *padapter)
13400 + {
13401 +- struct net_device *pnetdev = padapter->pnetdev;
13402 +-
13403 + r8712_free_cmd_priv(&padapter->cmdpriv);
13404 + r8712_free_evt_priv(&padapter->evtpriv);
13405 + r8712_DeInitSwLeds(padapter);
13406 +@@ -339,8 +337,6 @@ void r8712_free_drv_sw(struct _adapter *padapter)
13407 + _r8712_free_sta_priv(&padapter->stapriv);
13408 + _r8712_free_recv_priv(&padapter->recvpriv);
13409 + mp871xdeinit(padapter);
13410 +- if (pnetdev)
13411 +- free_netdev(pnetdev);
13412 + }
13413 +
13414 + static void enable_video_mode(struct _adapter *padapter, int cbw40_value)
13415 +diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
13416 +index dc21e7743349c..b760bc3559373 100644
13417 +--- a/drivers/staging/rtl8712/usb_intf.c
13418 ++++ b/drivers/staging/rtl8712/usb_intf.c
13419 +@@ -361,7 +361,7 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
13420 + /* step 1. */
13421 + pnetdev = r8712_init_netdev();
13422 + if (!pnetdev)
13423 +- goto error;
13424 ++ goto put_dev;
13425 + padapter = netdev_priv(pnetdev);
13426 + disable_ht_for_spec_devid(pdid, padapter);
13427 + pdvobjpriv = &padapter->dvobjpriv;
13428 +@@ -381,16 +381,16 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
13429 + * initialize the dvobj_priv
13430 + */
13431 + if (!padapter->dvobj_init) {
13432 +- goto error;
13433 ++ goto put_dev;
13434 + } else {
13435 + status = padapter->dvobj_init(padapter);
13436 + if (status != _SUCCESS)
13437 +- goto error;
13438 ++ goto free_netdev;
13439 + }
13440 + /* step 4. */
13441 + status = r8712_init_drv_sw(padapter);
13442 + if (status)
13443 +- goto error;
13444 ++ goto dvobj_deinit;
13445 + /* step 5. read efuse/eeprom data and get mac_addr */
13446 + {
13447 + int i, offset;
13448 +@@ -570,17 +570,20 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
13449 + }
13450 + /* step 6. Load the firmware asynchronously */
13451 + if (rtl871x_load_fw(padapter))
13452 +- goto error;
13453 ++ goto deinit_drv_sw;
13454 + spin_lock_init(&padapter->lock_rx_ff0_filter);
13455 + mutex_init(&padapter->mutex_start);
13456 + return 0;
13457 +-error:
13458 ++
13459 ++deinit_drv_sw:
13460 ++ r8712_free_drv_sw(padapter);
13461 ++dvobj_deinit:
13462 ++ padapter->dvobj_deinit(padapter);
13463 ++free_netdev:
13464 ++ free_netdev(pnetdev);
13465 ++put_dev:
13466 + usb_put_dev(udev);
13467 + usb_set_intfdata(pusb_intf, NULL);
13468 +- if (padapter && padapter->dvobj_deinit)
13469 +- padapter->dvobj_deinit(padapter);
13470 +- if (pnetdev)
13471 +- free_netdev(pnetdev);
13472 + return -ENODEV;
13473 + }
13474 +
13475 +@@ -612,6 +615,7 @@ static void r871xu_dev_remove(struct usb_interface *pusb_intf)
13476 + r8712_stop_drv_timers(padapter);
13477 + r871x_dev_unload(padapter);
13478 + r8712_free_drv_sw(padapter);
13479 ++ free_netdev(pnetdev);
13480 +
13481 + /* decrease the reference count of the usb device structure
13482 + * when disconnect
13483 +diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
13484 +index 9097bcbd67d82..d697ea55a0da1 100644
13485 +--- a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
13486 ++++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
13487 +@@ -1862,7 +1862,7 @@ int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
13488 + int status;
13489 + int err = -ENODEV;
13490 + struct vchiq_mmal_instance *instance;
13491 +- static struct vchiq_instance *vchiq_instance;
13492 ++ struct vchiq_instance *vchiq_instance;
13493 + struct vchiq_service_params_kernel params = {
13494 + .version = VC_MMAL_VER,
13495 + .version_min = VC_MMAL_MIN_VER,
13496 +diff --git a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
13497 +index af35251232eb3..b044999ad002b 100644
13498 +--- a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
13499 ++++ b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
13500 +@@ -265,12 +265,13 @@ void cxgbit_unmap_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
13501 + struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
13502 +
13503 + if (ccmd->release) {
13504 +- struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo;
13505 +-
13506 +- if (ttinfo->sgl) {
13507 ++ if (cmd->se_cmd.se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
13508 ++ put_page(sg_page(&ccmd->sg));
13509 ++ } else {
13510 + struct cxgbit_sock *csk = conn->context;
13511 + struct cxgbit_device *cdev = csk->com.cdev;
13512 + struct cxgbi_ppm *ppm = cdev2ppm(cdev);
13513 ++ struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo;
13514 +
13515 + /* Abort the TCP conn if DDP is not complete to
13516 + * avoid any possibility of DDP after freeing
13517 +@@ -280,14 +281,14 @@ void cxgbit_unmap_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
13518 + cmd->se_cmd.data_length))
13519 + cxgbit_abort_conn(csk);
13520 +
13521 ++ if (unlikely(ttinfo->sgl)) {
13522 ++ dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl,
13523 ++ ttinfo->nents, DMA_FROM_DEVICE);
13524 ++ ttinfo->nents = 0;
13525 ++ ttinfo->sgl = NULL;
13526 ++ }
13527 + cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
13528 +-
13529 +- dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl,
13530 +- ttinfo->nents, DMA_FROM_DEVICE);
13531 +- } else {
13532 +- put_page(sg_page(&ccmd->sg));
13533 + }
13534 +-
13535 + ccmd->release = false;
13536 + }
13537 + }
13538 +diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c
13539 +index b926e1d6c7b8e..282297ffc4044 100644
13540 +--- a/drivers/target/iscsi/cxgbit/cxgbit_target.c
13541 ++++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c
13542 +@@ -997,17 +997,18 @@ static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk)
13543 + struct scatterlist *sg_start;
13544 + struct iscsi_conn *conn = csk->conn;
13545 + struct iscsi_cmd *cmd = NULL;
13546 ++ struct cxgbit_cmd *ccmd;
13547 ++ struct cxgbi_task_tag_info *ttinfo;
13548 + struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
13549 + struct iscsi_data *hdr = (struct iscsi_data *)pdu_cb->hdr;
13550 + u32 data_offset = be32_to_cpu(hdr->offset);
13551 +- u32 data_len = pdu_cb->dlen;
13552 ++ u32 data_len = ntoh24(hdr->dlength);
13553 + int rc, sg_nents, sg_off;
13554 + bool dcrc_err = false;
13555 +
13556 + if (pdu_cb->flags & PDUCBF_RX_DDP_CMP) {
13557 + u32 offset = be32_to_cpu(hdr->offset);
13558 + u32 ddp_data_len;
13559 +- u32 payload_length = ntoh24(hdr->dlength);
13560 + bool success = false;
13561 +
13562 + cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, 0);
13563 +@@ -1022,7 +1023,7 @@ static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk)
13564 + cmd->data_sn = be32_to_cpu(hdr->datasn);
13565 +
13566 + rc = __iscsit_check_dataout_hdr(conn, (unsigned char *)hdr,
13567 +- cmd, payload_length, &success);
13568 ++ cmd, data_len, &success);
13569 + if (rc < 0)
13570 + return rc;
13571 + else if (!success)
13572 +@@ -1060,6 +1061,20 @@ static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk)
13573 + cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents, skip);
13574 + }
13575 +
13576 ++ ccmd = iscsit_priv_cmd(cmd);
13577 ++ ttinfo = &ccmd->ttinfo;
13578 ++
13579 ++ if (ccmd->release && ttinfo->sgl &&
13580 ++ (cmd->se_cmd.data_length == (cmd->write_data_done + data_len))) {
13581 ++ struct cxgbit_device *cdev = csk->com.cdev;
13582 ++ struct cxgbi_ppm *ppm = cdev2ppm(cdev);
13583 ++
13584 ++ dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl, ttinfo->nents,
13585 ++ DMA_FROM_DEVICE);
13586 ++ ttinfo->nents = 0;
13587 ++ ttinfo->sgl = NULL;
13588 ++ }
13589 ++
13590 + check_payload:
13591 +
13592 + rc = iscsit_check_dataout_payload(cmd, hdr, dcrc_err);
13593 +diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c
13594 +index 3f6a69ccc1737..6e1d6a31ee4fb 100644
13595 +--- a/drivers/thermal/cpufreq_cooling.c
13596 ++++ b/drivers/thermal/cpufreq_cooling.c
13597 +@@ -443,7 +443,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
13598 + ret = freq_qos_update_request(&cpufreq_cdev->qos_req, frequency);
13599 + if (ret >= 0) {
13600 + cpufreq_cdev->cpufreq_state = state;
13601 +- cpus = cpufreq_cdev->policy->cpus;
13602 ++ cpus = cpufreq_cdev->policy->related_cpus;
13603 + max_capacity = arch_scale_cpu_capacity(cpumask_first(cpus));
13604 + capacity = frequency * max_capacity;
13605 + capacity /= cpufreq_cdev->policy->cpuinfo.max_freq;
13606 +diff --git a/drivers/thunderbolt/test.c b/drivers/thunderbolt/test.c
13607 +index 464c2d37b992e..e254f8c37cb73 100644
13608 +--- a/drivers/thunderbolt/test.c
13609 ++++ b/drivers/thunderbolt/test.c
13610 +@@ -259,14 +259,14 @@ static struct tb_switch *alloc_dev_default(struct kunit *test,
13611 + if (port->dual_link_port && upstream_port->dual_link_port) {
13612 + port->dual_link_port->remote = upstream_port->dual_link_port;
13613 + upstream_port->dual_link_port->remote = port->dual_link_port;
13614 +- }
13615 +
13616 +- if (bonded) {
13617 +- /* Bonding is used */
13618 +- port->bonded = true;
13619 +- port->dual_link_port->bonded = true;
13620 +- upstream_port->bonded = true;
13621 +- upstream_port->dual_link_port->bonded = true;
13622 ++ if (bonded) {
13623 ++ /* Bonding is used */
13624 ++ port->bonded = true;
13625 ++ port->dual_link_port->bonded = true;
13626 ++ upstream_port->bonded = true;
13627 ++ upstream_port->dual_link_port->bonded = true;
13628 ++ }
13629 + }
13630 +
13631 + return sw;
13632 +diff --git a/drivers/tty/nozomi.c b/drivers/tty/nozomi.c
13633 +index d42b854cb7df2..6890418a29a40 100644
13634 +--- a/drivers/tty/nozomi.c
13635 ++++ b/drivers/tty/nozomi.c
13636 +@@ -1394,7 +1394,7 @@ static int nozomi_card_init(struct pci_dev *pdev,
13637 + NOZOMI_NAME, dc);
13638 + if (unlikely(ret)) {
13639 + dev_err(&pdev->dev, "can't request irq %d\n", pdev->irq);
13640 +- goto err_free_kfifo;
13641 ++ goto err_free_all_kfifo;
13642 + }
13643 +
13644 + DBG1("base_addr: %p", dc->base_addr);
13645 +@@ -1432,12 +1432,15 @@ static int nozomi_card_init(struct pci_dev *pdev,
13646 + return 0;
13647 +
13648 + err_free_tty:
13649 +- for (i = 0; i < MAX_PORT; ++i) {
13650 ++ for (i--; i >= 0; i--) {
13651 + tty_unregister_device(ntty_driver, dc->index_start + i);
13652 + tty_port_destroy(&dc->port[i].port);
13653 + }
13654 ++ free_irq(pdev->irq, dc);
13655 ++err_free_all_kfifo:
13656 ++ i = MAX_PORT;
13657 + err_free_kfifo:
13658 +- for (i = 0; i < MAX_PORT; i++)
13659 ++ for (i--; i >= PORT_MDM; i--)
13660 + kfifo_free(&dc->port[i].fifo_ul);
13661 + err_free_sbuf:
13662 + kfree(dc->send_buf);
13663 +diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
13664 +index 0cc6d35a08156..95e2d6de4f213 100644
13665 +--- a/drivers/tty/serial/8250/8250_omap.c
13666 ++++ b/drivers/tty/serial/8250/8250_omap.c
13667 +@@ -27,6 +27,7 @@
13668 + #include <linux/pm_qos.h>
13669 + #include <linux/pm_wakeirq.h>
13670 + #include <linux/dma-mapping.h>
13671 ++#include <linux/sys_soc.h>
13672 +
13673 + #include "8250.h"
13674 +
13675 +@@ -41,6 +42,8 @@
13676 + */
13677 + #define UART_ERRATA_CLOCK_DISABLE (1 << 3)
13678 + #define UART_HAS_EFR2 BIT(4)
13679 ++#define UART_HAS_RHR_IT_DIS BIT(5)
13680 ++#define UART_RX_TIMEOUT_QUIRK BIT(6)
13681 +
13682 + #define OMAP_UART_FCR_RX_TRIG 6
13683 + #define OMAP_UART_FCR_TX_TRIG 4
13684 +@@ -94,10 +97,17 @@
13685 + #define OMAP_UART_REV_52 0x0502
13686 + #define OMAP_UART_REV_63 0x0603
13687 +
13688 ++/* Interrupt Enable Register 2 */
13689 ++#define UART_OMAP_IER2 0x1B
13690 ++#define UART_OMAP_IER2_RHR_IT_DIS BIT(2)
13691 ++
13692 + /* Enhanced features register 2 */
13693 + #define UART_OMAP_EFR2 0x23
13694 + #define UART_OMAP_EFR2_TIMEOUT_BEHAVE BIT(6)
13695 +
13696 ++/* RX FIFO occupancy indicator */
13697 ++#define UART_OMAP_RX_LVL 0x64
13698 ++
13699 + struct omap8250_priv {
13700 + int line;
13701 + u8 habit;
13702 +@@ -592,6 +602,7 @@ static int omap_8250_dma_handle_irq(struct uart_port *port);
13703 + static irqreturn_t omap8250_irq(int irq, void *dev_id)
13704 + {
13705 + struct uart_port *port = dev_id;
13706 ++ struct omap8250_priv *priv = port->private_data;
13707 + struct uart_8250_port *up = up_to_u8250p(port);
13708 + unsigned int iir;
13709 + int ret;
13710 +@@ -606,6 +617,18 @@ static irqreturn_t omap8250_irq(int irq, void *dev_id)
13711 + serial8250_rpm_get(up);
13712 + iir = serial_port_in(port, UART_IIR);
13713 + ret = serial8250_handle_irq(port, iir);
13714 ++
13715 ++ /*
13716 ++ * On K3 SoCs, it is observed that RX TIMEOUT is signalled after
13717 ++ * FIFO has been drained, in which case a dummy read of RX FIFO
13718 ++ * is required to clear RX TIMEOUT condition.
13719 ++ */
13720 ++ if (priv->habit & UART_RX_TIMEOUT_QUIRK &&
13721 ++ (iir & UART_IIR_RX_TIMEOUT) == UART_IIR_RX_TIMEOUT &&
13722 ++ serial_port_in(port, UART_OMAP_RX_LVL) == 0) {
13723 ++ serial_port_in(port, UART_RX);
13724 ++ }
13725 ++
13726 + serial8250_rpm_put(up);
13727 +
13728 + return IRQ_RETVAL(ret);
13729 +@@ -756,17 +779,27 @@ static void __dma_rx_do_complete(struct uart_8250_port *p)
13730 + {
13731 + struct uart_8250_dma *dma = p->dma;
13732 + struct tty_port *tty_port = &p->port.state->port;
13733 ++ struct omap8250_priv *priv = p->port.private_data;
13734 + struct dma_chan *rxchan = dma->rxchan;
13735 + dma_cookie_t cookie;
13736 + struct dma_tx_state state;
13737 + int count;
13738 + int ret;
13739 ++ u32 reg;
13740 +
13741 + if (!dma->rx_running)
13742 + goto out;
13743 +
13744 + cookie = dma->rx_cookie;
13745 + dma->rx_running = 0;
13746 ++
13747 ++ /* Re-enable RX FIFO interrupt now that transfer is complete */
13748 ++ if (priv->habit & UART_HAS_RHR_IT_DIS) {
13749 ++ reg = serial_in(p, UART_OMAP_IER2);
13750 ++ reg &= ~UART_OMAP_IER2_RHR_IT_DIS;
13751 ++ serial_out(p, UART_OMAP_IER2, UART_OMAP_IER2_RHR_IT_DIS);
13752 ++ }
13753 ++
13754 + dmaengine_tx_status(rxchan, cookie, &state);
13755 +
13756 + count = dma->rx_size - state.residue + state.in_flight_bytes;
13757 +@@ -784,7 +817,7 @@ static void __dma_rx_do_complete(struct uart_8250_port *p)
13758 + poll_count--)
13759 + cpu_relax();
13760 +
13761 +- if (!poll_count)
13762 ++ if (poll_count == -1)
13763 + dev_err(p->port.dev, "teardown incomplete\n");
13764 + }
13765 + }
13766 +@@ -862,6 +895,7 @@ static int omap_8250_rx_dma(struct uart_8250_port *p)
13767 + int err = 0;
13768 + struct dma_async_tx_descriptor *desc;
13769 + unsigned long flags;
13770 ++ u32 reg;
13771 +
13772 + if (priv->rx_dma_broken)
13773 + return -EINVAL;
13774 +@@ -897,6 +931,17 @@ static int omap_8250_rx_dma(struct uart_8250_port *p)
13775 +
13776 + dma->rx_cookie = dmaengine_submit(desc);
13777 +
13778 ++ /*
13779 ++ * Disable RX FIFO interrupt while RX DMA is enabled, else
13780 ++ * spurious interrupt may be raised when data is in the RX FIFO
13781 ++ * but is yet to be drained by DMA.
13782 ++ */
13783 ++ if (priv->habit & UART_HAS_RHR_IT_DIS) {
13784 ++ reg = serial_in(p, UART_OMAP_IER2);
13785 ++ reg |= UART_OMAP_IER2_RHR_IT_DIS;
13786 ++ serial_out(p, UART_OMAP_IER2, UART_OMAP_IER2_RHR_IT_DIS);
13787 ++ }
13788 ++
13789 + dma_async_issue_pending(dma->rxchan);
13790 + out:
13791 + spin_unlock_irqrestore(&priv->rx_dma_lock, flags);
13792 +@@ -1163,6 +1208,11 @@ static int omap8250_no_handle_irq(struct uart_port *port)
13793 + return 0;
13794 + }
13795 +
13796 ++static const struct soc_device_attribute k3_soc_devices[] = {
13797 ++ { .family = "AM65X", },
13798 ++ { .family = "J721E", .revision = "SR1.0" },
13799 ++};
13800 ++
13801 + static struct omap8250_dma_params am654_dma = {
13802 + .rx_size = SZ_2K,
13803 + .rx_trigger = 1,
13804 +@@ -1177,7 +1227,8 @@ static struct omap8250_dma_params am33xx_dma = {
13805 +
13806 + static struct omap8250_platdata am654_platdata = {
13807 + .dma_params = &am654_dma,
13808 +- .habit = UART_HAS_EFR2,
13809 ++ .habit = UART_HAS_EFR2 | UART_HAS_RHR_IT_DIS |
13810 ++ UART_RX_TIMEOUT_QUIRK,
13811 + };
13812 +
13813 + static struct omap8250_platdata am33xx_platdata = {
13814 +@@ -1367,6 +1418,13 @@ static int omap8250_probe(struct platform_device *pdev)
13815 + up.dma->rxconf.src_maxburst = RX_TRIGGER;
13816 + up.dma->txconf.dst_maxburst = TX_TRIGGER;
13817 + }
13818 ++
13819 ++ /*
13820 ++ * AM65x SR1.0, AM65x SR2.0 and J721e SR1.0 don't
13821 ++ * don't have RHR_IT_DIS bit in IER2 register
13822 ++ */
13823 ++ if (soc_device_match(k3_soc_devices))
13824 ++ priv->habit &= ~UART_HAS_RHR_IT_DIS;
13825 + }
13826 + #endif
13827 + ret = serial8250_register_8250_port(&up);
13828 +diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
13829 +index 6e141429c9808..6d9c494bed7d2 100644
13830 +--- a/drivers/tty/serial/8250/8250_port.c
13831 ++++ b/drivers/tty/serial/8250/8250_port.c
13832 +@@ -2635,6 +2635,21 @@ static unsigned int serial8250_get_baud_rate(struct uart_port *port,
13833 + struct ktermios *old)
13834 + {
13835 + unsigned int tolerance = port->uartclk / 100;
13836 ++ unsigned int min;
13837 ++ unsigned int max;
13838 ++
13839 ++ /*
13840 ++ * Handle magic divisors for baud rates above baud_base on SMSC
13841 ++ * Super I/O chips. Enable custom rates of clk/4 and clk/8, but
13842 ++ * disable divisor values beyond 32767, which are unavailable.
13843 ++ */
13844 ++ if (port->flags & UPF_MAGIC_MULTIPLIER) {
13845 ++ min = port->uartclk / 16 / UART_DIV_MAX >> 1;
13846 ++ max = (port->uartclk + tolerance) / 4;
13847 ++ } else {
13848 ++ min = port->uartclk / 16 / UART_DIV_MAX;
13849 ++ max = (port->uartclk + tolerance) / 16;
13850 ++ }
13851 +
13852 + /*
13853 + * Ask the core to calculate the divisor for us.
13854 +@@ -2642,9 +2657,7 @@ static unsigned int serial8250_get_baud_rate(struct uart_port *port,
13855 + * slower than nominal still match standard baud rates without
13856 + * causing transmission errors.
13857 + */
13858 +- return uart_get_baud_rate(port, termios, old,
13859 +- port->uartclk / 16 / UART_DIV_MAX,
13860 +- (port->uartclk + tolerance) / 16);
13861 ++ return uart_get_baud_rate(port, termios, old, min, max);
13862 + }
13863 +
13864 + /*
13865 +diff --git a/drivers/tty/serial/8250/serial_cs.c b/drivers/tty/serial/8250/serial_cs.c
13866 +index e3d10794dbba3..1d3ec8503cef3 100644
13867 +--- a/drivers/tty/serial/8250/serial_cs.c
13868 ++++ b/drivers/tty/serial/8250/serial_cs.c
13869 +@@ -780,6 +780,7 @@ static const struct pcmcia_device_id serial_ids[] = {
13870 + PCMCIA_DEVICE_PROD_ID12("Multi-Tech", "MT2834LT", 0x5f73be51, 0x4cd7c09e),
13871 + PCMCIA_DEVICE_PROD_ID12("OEM ", "C288MX ", 0xb572d360, 0xd2385b7a),
13872 + PCMCIA_DEVICE_PROD_ID12("Option International", "V34bis GSM/PSTN Data/Fax Modem", 0x9d7cd6f5, 0x5cb8bf41),
13873 ++ PCMCIA_DEVICE_PROD_ID12("Option International", "GSM-Ready 56K/ISDN", 0x9d7cd6f5, 0xb23844aa),
13874 + PCMCIA_DEVICE_PROD_ID12("PCMCIA ", "C336MX ", 0x99bcafe9, 0xaa25bcab),
13875 + PCMCIA_DEVICE_PROD_ID12("Quatech Inc", "PCMCIA Dual RS-232 Serial Port Card", 0xc4420b35, 0x92abc92f),
13876 + PCMCIA_DEVICE_PROD_ID12("Quatech Inc", "Dual RS-232 Serial Port PC Card", 0xc4420b35, 0x031a380d),
13877 +@@ -807,7 +808,6 @@ static const struct pcmcia_device_id serial_ids[] = {
13878 + PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "cis/COMpad4.cis"),
13879 + PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "cis/COMpad2.cis"),
13880 + PCMCIA_DEVICE_CIS_PROD_ID2("RS-COM 2P", 0xad20b156, "cis/RS-COM-2P.cis"),
13881 +- PCMCIA_DEVICE_CIS_MANF_CARD(0x0013, 0x0000, "cis/GLOBETROTTER.cis"),
13882 + PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.", "SERIAL CARD: SL100 1.00.", 0x19ca78af, 0xf964f42b),
13883 + PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.", "SERIAL CARD: SL100", 0x19ca78af, 0x71d98e83),
13884 + PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.", "SERIAL CARD: SL232 1.00.", 0x19ca78af, 0x69fb7490),
13885 +diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
13886 +index bd047e1f9bea7..de5ee4aad9f34 100644
13887 +--- a/drivers/tty/serial/fsl_lpuart.c
13888 ++++ b/drivers/tty/serial/fsl_lpuart.c
13889 +@@ -1408,17 +1408,7 @@ static unsigned int lpuart_get_mctrl(struct uart_port *port)
13890 +
13891 + static unsigned int lpuart32_get_mctrl(struct uart_port *port)
13892 + {
13893 +- unsigned int temp = 0;
13894 +- unsigned long reg;
13895 +-
13896 +- reg = lpuart32_read(port, UARTMODIR);
13897 +- if (reg & UARTMODIR_TXCTSE)
13898 +- temp |= TIOCM_CTS;
13899 +-
13900 +- if (reg & UARTMODIR_RXRTSE)
13901 +- temp |= TIOCM_RTS;
13902 +-
13903 +- return temp;
13904 ++ return 0;
13905 + }
13906 +
13907 + static void lpuart_set_mctrl(struct uart_port *port, unsigned int mctrl)
13908 +@@ -1625,7 +1615,7 @@ static void lpuart_rx_dma_startup(struct lpuart_port *sport)
13909 + sport->lpuart_dma_rx_use = true;
13910 + rx_dma_timer_init(sport);
13911 +
13912 +- if (sport->port.has_sysrq) {
13913 ++ if (sport->port.has_sysrq && !lpuart_is_32(sport)) {
13914 + cr3 = readb(sport->port.membase + UARTCR3);
13915 + cr3 |= UARTCR3_FEIE;
13916 + writeb(cr3, sport->port.membase + UARTCR3);
13917 +diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
13918 +index 51b0ecabf2ec9..1e26220c78527 100644
13919 +--- a/drivers/tty/serial/mvebu-uart.c
13920 ++++ b/drivers/tty/serial/mvebu-uart.c
13921 +@@ -445,12 +445,11 @@ static void mvebu_uart_shutdown(struct uart_port *port)
13922 +
13923 + static int mvebu_uart_baud_rate_set(struct uart_port *port, unsigned int baud)
13924 + {
13925 +- struct mvebu_uart *mvuart = to_mvuart(port);
13926 + unsigned int d_divisor, m_divisor;
13927 + u32 brdv, osamp;
13928 +
13929 +- if (IS_ERR(mvuart->clk))
13930 +- return -PTR_ERR(mvuart->clk);
13931 ++ if (!port->uartclk)
13932 ++ return -EOPNOTSUPP;
13933 +
13934 + /*
13935 + * The baudrate is derived from the UART clock thanks to two divisors:
13936 +@@ -463,7 +462,7 @@ static int mvebu_uart_baud_rate_set(struct uart_port *port, unsigned int baud)
13937 + * makes use of D to configure the desired baudrate.
13938 + */
13939 + m_divisor = OSAMP_DEFAULT_DIVISOR;
13940 +- d_divisor = DIV_ROUND_UP(port->uartclk, baud * m_divisor);
13941 ++ d_divisor = DIV_ROUND_CLOSEST(port->uartclk, baud * m_divisor);
13942 +
13943 + brdv = readl(port->membase + UART_BRDV);
13944 + brdv &= ~BRDV_BAUD_MASK;
13945 +@@ -482,7 +481,7 @@ static void mvebu_uart_set_termios(struct uart_port *port,
13946 + struct ktermios *old)
13947 + {
13948 + unsigned long flags;
13949 +- unsigned int baud;
13950 ++ unsigned int baud, min_baud, max_baud;
13951 +
13952 + spin_lock_irqsave(&port->lock, flags);
13953 +
13954 +@@ -501,16 +500,21 @@ static void mvebu_uart_set_termios(struct uart_port *port,
13955 + port->ignore_status_mask |= STAT_RX_RDY(port) | STAT_BRK_ERR;
13956 +
13957 + /*
13958 ++ * Maximal divisor is 1023 * 16 when using default (x16) scheme.
13959 + * Maximum achievable frequency with simple baudrate divisor is 230400.
13960 + * Since the error per bit frame would be of more than 15%, achieving
13961 + * higher frequencies would require to implement the fractional divisor
13962 + * feature.
13963 + */
13964 +- baud = uart_get_baud_rate(port, termios, old, 0, 230400);
13965 ++ min_baud = DIV_ROUND_UP(port->uartclk, 1023 * 16);
13966 ++ max_baud = 230400;
13967 ++
13968 ++ baud = uart_get_baud_rate(port, termios, old, min_baud, max_baud);
13969 + if (mvebu_uart_baud_rate_set(port, baud)) {
13970 + /* No clock available, baudrate cannot be changed */
13971 + if (old)
13972 +- baud = uart_get_baud_rate(port, old, NULL, 0, 230400);
13973 ++ baud = uart_get_baud_rate(port, old, NULL,
13974 ++ min_baud, max_baud);
13975 + } else {
13976 + tty_termios_encode_baud_rate(termios, baud, baud);
13977 + uart_update_timeout(port, termios->c_cflag, baud);
13978 +diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
13979 +index 3b1aaa93d750e..70898a999a498 100644
13980 +--- a/drivers/tty/serial/sh-sci.c
13981 ++++ b/drivers/tty/serial/sh-sci.c
13982 +@@ -610,6 +610,14 @@ static void sci_stop_tx(struct uart_port *port)
13983 + ctrl &= ~SCSCR_TIE;
13984 +
13985 + serial_port_out(port, SCSCR, ctrl);
13986 ++
13987 ++#ifdef CONFIG_SERIAL_SH_SCI_DMA
13988 ++ if (to_sci_port(port)->chan_tx &&
13989 ++ !dma_submit_error(to_sci_port(port)->cookie_tx)) {
13990 ++ dmaengine_terminate_async(to_sci_port(port)->chan_tx);
13991 ++ to_sci_port(port)->cookie_tx = -EINVAL;
13992 ++ }
13993 ++#endif
13994 + }
13995 +
13996 + static void sci_start_rx(struct uart_port *port)
13997 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
13998 +index 6fbabf56dbb76..df5b2d1e214f1 100644
13999 +--- a/drivers/usb/class/cdc-acm.c
14000 ++++ b/drivers/usb/class/cdc-acm.c
14001 +@@ -1948,6 +1948,11 @@ static const struct usb_device_id acm_ids[] = {
14002 + .driver_info = IGNORE_DEVICE,
14003 + },
14004 +
14005 ++ /* Exclude Heimann Sensor GmbH USB appset demo */
14006 ++ { USB_DEVICE(0x32a7, 0x0000),
14007 ++ .driver_info = IGNORE_DEVICE,
14008 ++ },
14009 ++
14010 + /* control interfaces without any protocol set */
14011 + { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
14012 + USB_CDC_PROTO_NONE) },
14013 +diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
14014 +index fec17a2d2447d..15911ac7582b4 100644
14015 +--- a/drivers/usb/dwc2/core.c
14016 ++++ b/drivers/usb/dwc2/core.c
14017 +@@ -1167,15 +1167,6 @@ static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
14018 + usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16);
14019 + if (hsotg->params.phy_utmi_width == 16)
14020 + usbcfg |= GUSBCFG_PHYIF16;
14021 +-
14022 +- /* Set turnaround time */
14023 +- if (dwc2_is_device_mode(hsotg)) {
14024 +- usbcfg &= ~GUSBCFG_USBTRDTIM_MASK;
14025 +- if (hsotg->params.phy_utmi_width == 16)
14026 +- usbcfg |= 5 << GUSBCFG_USBTRDTIM_SHIFT;
14027 +- else
14028 +- usbcfg |= 9 << GUSBCFG_USBTRDTIM_SHIFT;
14029 +- }
14030 + break;
14031 + default:
14032 + dev_err(hsotg->dev, "FS PHY selected at HS!\n");
14033 +@@ -1197,6 +1188,24 @@ static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
14034 + return retval;
14035 + }
14036 +
14037 ++static void dwc2_set_turnaround_time(struct dwc2_hsotg *hsotg)
14038 ++{
14039 ++ u32 usbcfg;
14040 ++
14041 ++ if (hsotg->params.phy_type != DWC2_PHY_TYPE_PARAM_UTMI)
14042 ++ return;
14043 ++
14044 ++ usbcfg = dwc2_readl(hsotg, GUSBCFG);
14045 ++
14046 ++ usbcfg &= ~GUSBCFG_USBTRDTIM_MASK;
14047 ++ if (hsotg->params.phy_utmi_width == 16)
14048 ++ usbcfg |= 5 << GUSBCFG_USBTRDTIM_SHIFT;
14049 ++ else
14050 ++ usbcfg |= 9 << GUSBCFG_USBTRDTIM_SHIFT;
14051 ++
14052 ++ dwc2_writel(hsotg, usbcfg, GUSBCFG);
14053 ++}
14054 ++
14055 + int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
14056 + {
14057 + u32 usbcfg;
14058 +@@ -1214,6 +1223,9 @@ int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
14059 + retval = dwc2_hs_phy_init(hsotg, select_phy);
14060 + if (retval)
14061 + return retval;
14062 ++
14063 ++ if (dwc2_is_device_mode(hsotg))
14064 ++ dwc2_set_turnaround_time(hsotg);
14065 + }
14066 +
14067 + if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
14068 +diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
14069 +index 7537dd50ad533..bfb72902f3a68 100644
14070 +--- a/drivers/usb/dwc3/core.c
14071 ++++ b/drivers/usb/dwc3/core.c
14072 +@@ -1590,17 +1590,18 @@ static int dwc3_probe(struct platform_device *pdev)
14073 + }
14074 +
14075 + dwc3_check_params(dwc);
14076 ++ dwc3_debugfs_init(dwc);
14077 +
14078 + ret = dwc3_core_init_mode(dwc);
14079 + if (ret)
14080 + goto err5;
14081 +
14082 +- dwc3_debugfs_init(dwc);
14083 + pm_runtime_put(dev);
14084 +
14085 + return 0;
14086 +
14087 + err5:
14088 ++ dwc3_debugfs_exit(dwc);
14089 + dwc3_event_buffers_cleanup(dwc);
14090 +
14091 + usb_phy_shutdown(dwc->usb2_phy);
14092 +diff --git a/drivers/usb/gadget/function/f_eem.c b/drivers/usb/gadget/function/f_eem.c
14093 +index 2cd9942707b46..5d38f29bda720 100644
14094 +--- a/drivers/usb/gadget/function/f_eem.c
14095 ++++ b/drivers/usb/gadget/function/f_eem.c
14096 +@@ -30,6 +30,11 @@ struct f_eem {
14097 + u8 ctrl_id;
14098 + };
14099 +
14100 ++struct in_context {
14101 ++ struct sk_buff *skb;
14102 ++ struct usb_ep *ep;
14103 ++};
14104 ++
14105 + static inline struct f_eem *func_to_eem(struct usb_function *f)
14106 + {
14107 + return container_of(f, struct f_eem, port.func);
14108 +@@ -320,9 +325,12 @@ fail:
14109 +
14110 + static void eem_cmd_complete(struct usb_ep *ep, struct usb_request *req)
14111 + {
14112 +- struct sk_buff *skb = (struct sk_buff *)req->context;
14113 ++ struct in_context *ctx = req->context;
14114 +
14115 +- dev_kfree_skb_any(skb);
14116 ++ dev_kfree_skb_any(ctx->skb);
14117 ++ kfree(req->buf);
14118 ++ usb_ep_free_request(ctx->ep, req);
14119 ++ kfree(ctx);
14120 + }
14121 +
14122 + /*
14123 +@@ -410,7 +418,9 @@ static int eem_unwrap(struct gether *port,
14124 + * b15: bmType (0 == data, 1 == command)
14125 + */
14126 + if (header & BIT(15)) {
14127 +- struct usb_request *req = cdev->req;
14128 ++ struct usb_request *req;
14129 ++ struct in_context *ctx;
14130 ++ struct usb_ep *ep;
14131 + u16 bmEEMCmd;
14132 +
14133 + /* EEM command packet format:
14134 +@@ -439,11 +449,36 @@ static int eem_unwrap(struct gether *port,
14135 + skb_trim(skb2, len);
14136 + put_unaligned_le16(BIT(15) | BIT(11) | len,
14137 + skb_push(skb2, 2));
14138 ++
14139 ++ ep = port->in_ep;
14140 ++ req = usb_ep_alloc_request(ep, GFP_ATOMIC);
14141 ++ if (!req) {
14142 ++ dev_kfree_skb_any(skb2);
14143 ++ goto next;
14144 ++ }
14145 ++
14146 ++ req->buf = kmalloc(skb2->len, GFP_KERNEL);
14147 ++ if (!req->buf) {
14148 ++ usb_ep_free_request(ep, req);
14149 ++ dev_kfree_skb_any(skb2);
14150 ++ goto next;
14151 ++ }
14152 ++
14153 ++ ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
14154 ++ if (!ctx) {
14155 ++ kfree(req->buf);
14156 ++ usb_ep_free_request(ep, req);
14157 ++ dev_kfree_skb_any(skb2);
14158 ++ goto next;
14159 ++ }
14160 ++ ctx->skb = skb2;
14161 ++ ctx->ep = ep;
14162 ++
14163 + skb_copy_bits(skb2, 0, req->buf, skb2->len);
14164 + req->length = skb2->len;
14165 + req->complete = eem_cmd_complete;
14166 + req->zero = 1;
14167 +- req->context = skb2;
14168 ++ req->context = ctx;
14169 + if (usb_ep_queue(port->in_ep, req, GFP_ATOMIC))
14170 + DBG(cdev, "echo response queue fail\n");
14171 + break;
14172 +diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
14173 +index 7df180b110afc..725e35167837e 100644
14174 +--- a/drivers/usb/gadget/function/f_fs.c
14175 ++++ b/drivers/usb/gadget/function/f_fs.c
14176 +@@ -250,8 +250,8 @@ EXPORT_SYMBOL_GPL(ffs_lock);
14177 + static struct ffs_dev *_ffs_find_dev(const char *name);
14178 + static struct ffs_dev *_ffs_alloc_dev(void);
14179 + static void _ffs_free_dev(struct ffs_dev *dev);
14180 +-static void *ffs_acquire_dev(const char *dev_name);
14181 +-static void ffs_release_dev(struct ffs_data *ffs_data);
14182 ++static int ffs_acquire_dev(const char *dev_name, struct ffs_data *ffs_data);
14183 ++static void ffs_release_dev(struct ffs_dev *ffs_dev);
14184 + static int ffs_ready(struct ffs_data *ffs);
14185 + static void ffs_closed(struct ffs_data *ffs);
14186 +
14187 +@@ -1553,8 +1553,8 @@ unmapped_value:
14188 + static int ffs_fs_get_tree(struct fs_context *fc)
14189 + {
14190 + struct ffs_sb_fill_data *ctx = fc->fs_private;
14191 +- void *ffs_dev;
14192 + struct ffs_data *ffs;
14193 ++ int ret;
14194 +
14195 + ENTER();
14196 +
14197 +@@ -1573,13 +1573,12 @@ static int ffs_fs_get_tree(struct fs_context *fc)
14198 + return -ENOMEM;
14199 + }
14200 +
14201 +- ffs_dev = ffs_acquire_dev(ffs->dev_name);
14202 +- if (IS_ERR(ffs_dev)) {
14203 ++ ret = ffs_acquire_dev(ffs->dev_name, ffs);
14204 ++ if (ret) {
14205 + ffs_data_put(ffs);
14206 +- return PTR_ERR(ffs_dev);
14207 ++ return ret;
14208 + }
14209 +
14210 +- ffs->private_data = ffs_dev;
14211 + ctx->ffs_data = ffs;
14212 + return get_tree_nodev(fc, ffs_sb_fill);
14213 + }
14214 +@@ -1590,7 +1589,6 @@ static void ffs_fs_free_fc(struct fs_context *fc)
14215 +
14216 + if (ctx) {
14217 + if (ctx->ffs_data) {
14218 +- ffs_release_dev(ctx->ffs_data);
14219 + ffs_data_put(ctx->ffs_data);
14220 + }
14221 +
14222 +@@ -1629,10 +1627,8 @@ ffs_fs_kill_sb(struct super_block *sb)
14223 + ENTER();
14224 +
14225 + kill_litter_super(sb);
14226 +- if (sb->s_fs_info) {
14227 +- ffs_release_dev(sb->s_fs_info);
14228 ++ if (sb->s_fs_info)
14229 + ffs_data_closed(sb->s_fs_info);
14230 +- }
14231 + }
14232 +
14233 + static struct file_system_type ffs_fs_type = {
14234 +@@ -1702,6 +1698,7 @@ static void ffs_data_put(struct ffs_data *ffs)
14235 + if (unlikely(refcount_dec_and_test(&ffs->ref))) {
14236 + pr_info("%s(): freeing\n", __func__);
14237 + ffs_data_clear(ffs);
14238 ++ ffs_release_dev(ffs->private_data);
14239 + BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
14240 + swait_active(&ffs->ep0req_completion.wait) ||
14241 + waitqueue_active(&ffs->wait));
14242 +@@ -3031,6 +3028,7 @@ static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f,
14243 + struct ffs_function *func = ffs_func_from_usb(f);
14244 + struct f_fs_opts *ffs_opts =
14245 + container_of(f->fi, struct f_fs_opts, func_inst);
14246 ++ struct ffs_data *ffs_data;
14247 + int ret;
14248 +
14249 + ENTER();
14250 +@@ -3045,12 +3043,13 @@ static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f,
14251 + if (!ffs_opts->no_configfs)
14252 + ffs_dev_lock();
14253 + ret = ffs_opts->dev->desc_ready ? 0 : -ENODEV;
14254 +- func->ffs = ffs_opts->dev->ffs_data;
14255 ++ ffs_data = ffs_opts->dev->ffs_data;
14256 + if (!ffs_opts->no_configfs)
14257 + ffs_dev_unlock();
14258 + if (ret)
14259 + return ERR_PTR(ret);
14260 +
14261 ++ func->ffs = ffs_data;
14262 + func->conf = c;
14263 + func->gadget = c->cdev->gadget;
14264 +
14265 +@@ -3505,6 +3504,7 @@ static void ffs_free_inst(struct usb_function_instance *f)
14266 + struct f_fs_opts *opts;
14267 +
14268 + opts = to_f_fs_opts(f);
14269 ++ ffs_release_dev(opts->dev);
14270 + ffs_dev_lock();
14271 + _ffs_free_dev(opts->dev);
14272 + ffs_dev_unlock();
14273 +@@ -3692,47 +3692,48 @@ static void _ffs_free_dev(struct ffs_dev *dev)
14274 + {
14275 + list_del(&dev->entry);
14276 +
14277 +- /* Clear the private_data pointer to stop incorrect dev access */
14278 +- if (dev->ffs_data)
14279 +- dev->ffs_data->private_data = NULL;
14280 +-
14281 + kfree(dev);
14282 + if (list_empty(&ffs_devices))
14283 + functionfs_cleanup();
14284 + }
14285 +
14286 +-static void *ffs_acquire_dev(const char *dev_name)
14287 ++static int ffs_acquire_dev(const char *dev_name, struct ffs_data *ffs_data)
14288 + {
14289 ++ int ret = 0;
14290 + struct ffs_dev *ffs_dev;
14291 +
14292 + ENTER();
14293 + ffs_dev_lock();
14294 +
14295 + ffs_dev = _ffs_find_dev(dev_name);
14296 +- if (!ffs_dev)
14297 +- ffs_dev = ERR_PTR(-ENOENT);
14298 +- else if (ffs_dev->mounted)
14299 +- ffs_dev = ERR_PTR(-EBUSY);
14300 +- else if (ffs_dev->ffs_acquire_dev_callback &&
14301 +- ffs_dev->ffs_acquire_dev_callback(ffs_dev))
14302 +- ffs_dev = ERR_PTR(-ENOENT);
14303 +- else
14304 ++ if (!ffs_dev) {
14305 ++ ret = -ENOENT;
14306 ++ } else if (ffs_dev->mounted) {
14307 ++ ret = -EBUSY;
14308 ++ } else if (ffs_dev->ffs_acquire_dev_callback &&
14309 ++ ffs_dev->ffs_acquire_dev_callback(ffs_dev)) {
14310 ++ ret = -ENOENT;
14311 ++ } else {
14312 + ffs_dev->mounted = true;
14313 ++ ffs_dev->ffs_data = ffs_data;
14314 ++ ffs_data->private_data = ffs_dev;
14315 ++ }
14316 +
14317 + ffs_dev_unlock();
14318 +- return ffs_dev;
14319 ++ return ret;
14320 + }
14321 +
14322 +-static void ffs_release_dev(struct ffs_data *ffs_data)
14323 ++static void ffs_release_dev(struct ffs_dev *ffs_dev)
14324 + {
14325 +- struct ffs_dev *ffs_dev;
14326 +-
14327 + ENTER();
14328 + ffs_dev_lock();
14329 +
14330 +- ffs_dev = ffs_data->private_data;
14331 +- if (ffs_dev) {
14332 ++ if (ffs_dev && ffs_dev->mounted) {
14333 + ffs_dev->mounted = false;
14334 ++ if (ffs_dev->ffs_data) {
14335 ++ ffs_dev->ffs_data->private_data = NULL;
14336 ++ ffs_dev->ffs_data = NULL;
14337 ++ }
14338 +
14339 + if (ffs_dev->ffs_release_dev_callback)
14340 + ffs_dev->ffs_release_dev_callback(ffs_dev);
14341 +@@ -3760,7 +3761,6 @@ static int ffs_ready(struct ffs_data *ffs)
14342 + }
14343 +
14344 + ffs_obj->desc_ready = true;
14345 +- ffs_obj->ffs_data = ffs;
14346 +
14347 + if (ffs_obj->ffs_ready_callback) {
14348 + ret = ffs_obj->ffs_ready_callback(ffs);
14349 +@@ -3788,7 +3788,6 @@ static void ffs_closed(struct ffs_data *ffs)
14350 + goto done;
14351 +
14352 + ffs_obj->desc_ready = false;
14353 +- ffs_obj->ffs_data = NULL;
14354 +
14355 + if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags) &&
14356 + ffs_obj->ffs_closed_callback)
14357 +diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
14358 +index 8ce043e6ed872..ed380ee58ab5d 100644
14359 +--- a/drivers/usb/host/xhci-mem.c
14360 ++++ b/drivers/usb/host/xhci-mem.c
14361 +@@ -1938,6 +1938,7 @@ no_bw:
14362 + xhci->hw_ports = NULL;
14363 + xhci->rh_bw = NULL;
14364 + xhci->ext_caps = NULL;
14365 ++ xhci->port_caps = NULL;
14366 +
14367 + xhci->page_size = 0;
14368 + xhci->page_shift = 0;
14369 +diff --git a/drivers/usb/host/xhci-pci-renesas.c b/drivers/usb/host/xhci-pci-renesas.c
14370 +index f97ac9f52bf4d..431213cdf9e0e 100644
14371 +--- a/drivers/usb/host/xhci-pci-renesas.c
14372 ++++ b/drivers/usb/host/xhci-pci-renesas.c
14373 +@@ -207,7 +207,8 @@ static int renesas_check_rom_state(struct pci_dev *pdev)
14374 + return 0;
14375 +
14376 + case RENESAS_ROM_STATUS_NO_RESULT: /* No result yet */
14377 +- return 0;
14378 ++ dev_dbg(&pdev->dev, "Unknown ROM status ...\n");
14379 ++ break;
14380 +
14381 + case RENESAS_ROM_STATUS_ERROR: /* Error State */
14382 + default: /* All other states are marked as "Reserved states" */
14383 +@@ -224,13 +225,12 @@ static int renesas_fw_check_running(struct pci_dev *pdev)
14384 + u8 fw_state;
14385 + int err;
14386 +
14387 +- /* Check if device has ROM and loaded, if so skip everything */
14388 +- err = renesas_check_rom(pdev);
14389 +- if (err) { /* we have rom */
14390 +- err = renesas_check_rom_state(pdev);
14391 +- if (!err)
14392 +- return err;
14393 +- }
14394 ++ /*
14395 ++ * Only if device has ROM and loaded FW we can skip loading and
14396 ++ * return success. Otherwise (even unknown state), attempt to load FW.
14397 ++ */
14398 ++ if (renesas_check_rom(pdev) && !renesas_check_rom_state(pdev))
14399 ++ return 0;
14400 +
14401 + /*
14402 + * Test if the device is actually needing the firmware. As most
14403 +diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
14404 +index 35eec707cb512..c7d44daa05c4a 100644
14405 +--- a/drivers/usb/typec/class.c
14406 ++++ b/drivers/usb/typec/class.c
14407 +@@ -446,8 +446,10 @@ typec_register_altmode(struct device *parent,
14408 + int ret;
14409 +
14410 + alt = kzalloc(sizeof(*alt), GFP_KERNEL);
14411 +- if (!alt)
14412 ++ if (!alt) {
14413 ++ altmode_id_remove(parent, id);
14414 + return ERR_PTR(-ENOMEM);
14415 ++ }
14416 +
14417 + alt->adev.svid = desc->svid;
14418 + alt->adev.mode = desc->mode;
14419 +diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
14420 +index 48b048edf1ee8..57ae8b46b8361 100644
14421 +--- a/drivers/vfio/pci/vfio_pci.c
14422 ++++ b/drivers/vfio/pci/vfio_pci.c
14423 +@@ -1614,6 +1614,7 @@ static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
14424 + {
14425 + struct vm_area_struct *vma = vmf->vma;
14426 + struct vfio_pci_device *vdev = vma->vm_private_data;
14427 ++ struct vfio_pci_mmap_vma *mmap_vma;
14428 + vm_fault_t ret = VM_FAULT_NOPAGE;
14429 +
14430 + mutex_lock(&vdev->vma_lock);
14431 +@@ -1621,24 +1622,36 @@ static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
14432 +
14433 + if (!__vfio_pci_memory_enabled(vdev)) {
14434 + ret = VM_FAULT_SIGBUS;
14435 +- mutex_unlock(&vdev->vma_lock);
14436 + goto up_out;
14437 + }
14438 +
14439 +- if (__vfio_pci_add_vma(vdev, vma)) {
14440 +- ret = VM_FAULT_OOM;
14441 +- mutex_unlock(&vdev->vma_lock);
14442 +- goto up_out;
14443 ++ /*
14444 ++ * We populate the whole vma on fault, so we need to test whether
14445 ++ * the vma has already been mapped, such as for concurrent faults
14446 ++ * to the same vma. io_remap_pfn_range() will trigger a BUG_ON if
14447 ++ * we ask it to fill the same range again.
14448 ++ */
14449 ++ list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
14450 ++ if (mmap_vma->vma == vma)
14451 ++ goto up_out;
14452 + }
14453 +
14454 +- mutex_unlock(&vdev->vma_lock);
14455 +-
14456 + if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
14457 +- vma->vm_end - vma->vm_start, vma->vm_page_prot))
14458 ++ vma->vm_end - vma->vm_start,
14459 ++ vma->vm_page_prot)) {
14460 + ret = VM_FAULT_SIGBUS;
14461 ++ zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
14462 ++ goto up_out;
14463 ++ }
14464 ++
14465 ++ if (__vfio_pci_add_vma(vdev, vma)) {
14466 ++ ret = VM_FAULT_OOM;
14467 ++ zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
14468 ++ }
14469 +
14470 + up_out:
14471 + up_read(&vdev->memory_lock);
14472 ++ mutex_unlock(&vdev->vma_lock);
14473 + return ret;
14474 + }
14475 +
14476 +diff --git a/drivers/video/backlight/lm3630a_bl.c b/drivers/video/backlight/lm3630a_bl.c
14477 +index e88a2b0e59046..662029d6a3dc9 100644
14478 +--- a/drivers/video/backlight/lm3630a_bl.c
14479 ++++ b/drivers/video/backlight/lm3630a_bl.c
14480 +@@ -482,8 +482,10 @@ static int lm3630a_parse_node(struct lm3630a_chip *pchip,
14481 +
14482 + device_for_each_child_node(pchip->dev, node) {
14483 + ret = lm3630a_parse_bank(pdata, node, &seen_led_sources);
14484 +- if (ret)
14485 ++ if (ret) {
14486 ++ fwnode_handle_put(node);
14487 + return ret;
14488 ++ }
14489 + }
14490 +
14491 + return ret;
14492 +diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
14493 +index 884b16efa7e8a..564bd0407ed81 100644
14494 +--- a/drivers/video/fbdev/imxfb.c
14495 ++++ b/drivers/video/fbdev/imxfb.c
14496 +@@ -992,7 +992,7 @@ static int imxfb_probe(struct platform_device *pdev)
14497 + info->screen_buffer = dma_alloc_wc(&pdev->dev, fbi->map_size,
14498 + &fbi->map_dma, GFP_KERNEL);
14499 + if (!info->screen_buffer) {
14500 +- dev_err(&pdev->dev, "Failed to allocate video RAM: %d\n", ret);
14501 ++ dev_err(&pdev->dev, "Failed to allocate video RAM\n");
14502 + ret = -ENOMEM;
14503 + goto failed_map;
14504 + }
14505 +diff --git a/drivers/visorbus/visorchipset.c b/drivers/visorbus/visorchipset.c
14506 +index cb1eb7e05f871..5668cad86e374 100644
14507 +--- a/drivers/visorbus/visorchipset.c
14508 ++++ b/drivers/visorbus/visorchipset.c
14509 +@@ -1561,7 +1561,7 @@ schedule_out:
14510 +
14511 + static int visorchipset_init(struct acpi_device *acpi_device)
14512 + {
14513 +- int err = -ENODEV;
14514 ++ int err = -ENOMEM;
14515 + struct visorchannel *controlvm_channel;
14516 +
14517 + chipset_dev = kzalloc(sizeof(*chipset_dev), GFP_KERNEL);
14518 +@@ -1584,8 +1584,10 @@ static int visorchipset_init(struct acpi_device *acpi_device)
14519 + "controlvm",
14520 + sizeof(struct visor_controlvm_channel),
14521 + VISOR_CONTROLVM_CHANNEL_VERSIONID,
14522 +- VISOR_CHANNEL_SIGNATURE))
14523 ++ VISOR_CHANNEL_SIGNATURE)) {
14524 ++ err = -ENODEV;
14525 + goto error_delete_groups;
14526 ++ }
14527 + /* if booting in a crash kernel */
14528 + if (is_kdump_kernel())
14529 + INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
14530 +diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
14531 +index 68b95ad82126e..520a0f6a7d9e9 100644
14532 +--- a/fs/btrfs/Kconfig
14533 ++++ b/fs/btrfs/Kconfig
14534 +@@ -18,6 +18,8 @@ config BTRFS_FS
14535 + select RAID6_PQ
14536 + select XOR_BLOCKS
14537 + select SRCU
14538 ++ depends on !PPC_256K_PAGES # powerpc
14539 ++ depends on !PAGE_SIZE_256KB # hexagon
14540 +
14541 + help
14542 + Btrfs is a general purpose copy-on-write filesystem with extents,
14543 +diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
14544 +index 4e2cce5ca7f6a..04422d929c232 100644
14545 +--- a/fs/btrfs/delayed-inode.c
14546 ++++ b/fs/btrfs/delayed-inode.c
14547 +@@ -1032,12 +1032,10 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
14548 + nofs_flag = memalloc_nofs_save();
14549 + ret = btrfs_lookup_inode(trans, root, path, &key, mod);
14550 + memalloc_nofs_restore(nofs_flag);
14551 +- if (ret > 0) {
14552 +- btrfs_release_path(path);
14553 +- return -ENOENT;
14554 +- } else if (ret < 0) {
14555 +- return ret;
14556 +- }
14557 ++ if (ret > 0)
14558 ++ ret = -ENOENT;
14559 ++ if (ret < 0)
14560 ++ goto out;
14561 +
14562 + leaf = path->nodes[0];
14563 + inode_item = btrfs_item_ptr(leaf, path->slots[0],
14564 +@@ -1075,6 +1073,14 @@ err_out:
14565 + btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
14566 + btrfs_release_delayed_inode(node);
14567 +
14568 ++ /*
14569 ++ * If we fail to update the delayed inode we need to abort the
14570 ++ * transaction, because we could leave the inode with the improper
14571 ++ * counts behind.
14572 ++ */
14573 ++ if (ret && ret != -ENOENT)
14574 ++ btrfs_abort_transaction(trans, ret);
14575 ++
14576 + return ret;
14577 +
14578 + search:
14579 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
14580 +index 4f26dae63b64a..4f21b8fbfd4bc 100644
14581 +--- a/fs/btrfs/inode.c
14582 ++++ b/fs/btrfs/inode.c
14583 +@@ -547,7 +547,7 @@ again:
14584 + * inode has not been flagged as nocompress. This flag can
14585 + * change at any time if we discover bad compression ratios.
14586 + */
14587 +- if (inode_need_compress(BTRFS_I(inode), start, end)) {
14588 ++ if (nr_pages > 1 && inode_need_compress(BTRFS_I(inode), start, end)) {
14589 + WARN_ON(pages);
14590 + pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
14591 + if (!pages) {
14592 +@@ -8213,7 +8213,19 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
14593 + */
14594 + wait_on_page_writeback(page);
14595 +
14596 +- if (offset) {
14597 ++ /*
14598 ++ * For subpage case, we have call sites like
14599 ++ * btrfs_punch_hole_lock_range() which passes range not aligned to
14600 ++ * sectorsize.
14601 ++ * If the range doesn't cover the full page, we don't need to and
14602 ++ * shouldn't clear page extent mapped, as page->private can still
14603 ++ * record subpage dirty bits for other part of the range.
14604 ++ *
14605 ++ * For cases that can invalidate the full even the range doesn't
14606 ++ * cover the full page, like invalidating the last page, we're
14607 ++ * still safe to wait for ordered extent to finish.
14608 ++ */
14609 ++ if (!(offset == 0 && length == PAGE_SIZE)) {
14610 + btrfs_releasepage(page, GFP_NOFS);
14611 + return;
14612 + }
14613 +diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
14614 +index 9e5809118c34d..10f020ab1186f 100644
14615 +--- a/fs/btrfs/send.c
14616 ++++ b/fs/btrfs/send.c
14617 +@@ -4080,6 +4080,17 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
14618 + if (ret < 0)
14619 + goto out;
14620 + } else {
14621 ++ /*
14622 ++ * If we previously orphanized a directory that
14623 ++ * collided with a new reference that we already
14624 ++ * processed, recompute the current path because
14625 ++ * that directory may be part of the path.
14626 ++ */
14627 ++ if (orphanized_dir) {
14628 ++ ret = refresh_ref_path(sctx, cur);
14629 ++ if (ret < 0)
14630 ++ goto out;
14631 ++ }
14632 + ret = send_unlink(sctx, cur->full_path);
14633 + if (ret < 0)
14634 + goto out;
14635 +diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
14636 +index 279d9262b676d..3bb6b688ece52 100644
14637 +--- a/fs/btrfs/sysfs.c
14638 ++++ b/fs/btrfs/sysfs.c
14639 +@@ -382,7 +382,7 @@ static ssize_t btrfs_discard_bitmap_bytes_show(struct kobject *kobj,
14640 + {
14641 + struct btrfs_fs_info *fs_info = discard_to_fs_info(kobj);
14642 +
14643 +- return scnprintf(buf, PAGE_SIZE, "%lld\n",
14644 ++ return scnprintf(buf, PAGE_SIZE, "%llu\n",
14645 + fs_info->discard_ctl.discard_bitmap_bytes);
14646 + }
14647 + BTRFS_ATTR(discard, discard_bitmap_bytes, btrfs_discard_bitmap_bytes_show);
14648 +@@ -404,7 +404,7 @@ static ssize_t btrfs_discard_extent_bytes_show(struct kobject *kobj,
14649 + {
14650 + struct btrfs_fs_info *fs_info = discard_to_fs_info(kobj);
14651 +
14652 +- return scnprintf(buf, PAGE_SIZE, "%lld\n",
14653 ++ return scnprintf(buf, PAGE_SIZE, "%llu\n",
14654 + fs_info->discard_ctl.discard_extent_bytes);
14655 + }
14656 + BTRFS_ATTR(discard, discard_extent_bytes, btrfs_discard_extent_bytes_show);
14657 +diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
14658 +index af2f2f8704d8b..8daa9e4eb1d2e 100644
14659 +--- a/fs/btrfs/transaction.c
14660 ++++ b/fs/btrfs/transaction.c
14661 +@@ -1382,8 +1382,10 @@ int btrfs_defrag_root(struct btrfs_root *root)
14662 +
14663 + while (1) {
14664 + trans = btrfs_start_transaction(root, 0);
14665 +- if (IS_ERR(trans))
14666 +- return PTR_ERR(trans);
14667 ++ if (IS_ERR(trans)) {
14668 ++ ret = PTR_ERR(trans);
14669 ++ break;
14670 ++ }
14671 +
14672 + ret = btrfs_defrag_leaves(trans, root);
14673 +
14674 +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
14675 +index 300951088a11c..4b913de2f24fb 100644
14676 +--- a/fs/btrfs/tree-log.c
14677 ++++ b/fs/btrfs/tree-log.c
14678 +@@ -6348,6 +6348,7 @@ next:
14679 + error:
14680 + if (wc.trans)
14681 + btrfs_end_transaction(wc.trans);
14682 ++ clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
14683 + btrfs_free_path(path);
14684 + return ret;
14685 + }
14686 +diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
14687 +index 248ee81e01516..6599069be690e 100644
14688 +--- a/fs/cifs/cifsglob.h
14689 ++++ b/fs/cifs/cifsglob.h
14690 +@@ -979,7 +979,7 @@ struct cifs_ses {
14691 + struct mutex session_mutex;
14692 + struct TCP_Server_Info *server; /* pointer to server info */
14693 + int ses_count; /* reference counter */
14694 +- enum statusEnum status;
14695 ++ enum statusEnum status; /* updates protected by GlobalMid_Lock */
14696 + unsigned overrideSecFlg; /* if non-zero override global sec flags */
14697 + char *serverOS; /* name of operating system underlying server */
14698 + char *serverNOS; /* name of network operating system of server */
14699 +@@ -1863,6 +1863,7 @@ require use of the stronger protocol */
14700 + * list operations on pending_mid_q and oplockQ
14701 + * updates to XID counters, multiplex id and SMB sequence numbers
14702 + * list operations on global DnotifyReqList
14703 ++ * updates to ses->status
14704 + * tcp_ses_lock protects:
14705 + * list operations on tcp and SMB session lists
14706 + * tcon->open_file_lock protects the list of open files hanging off the tcon
14707 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
14708 +index aabaebd1535f0..fb7088d57e46f 100644
14709 +--- a/fs/cifs/connect.c
14710 ++++ b/fs/cifs/connect.c
14711 +@@ -2829,9 +2829,12 @@ void cifs_put_smb_ses(struct cifs_ses *ses)
14712 + spin_unlock(&cifs_tcp_ses_lock);
14713 + return;
14714 + }
14715 ++ spin_unlock(&cifs_tcp_ses_lock);
14716 ++
14717 ++ spin_lock(&GlobalMid_Lock);
14718 + if (ses->status == CifsGood)
14719 + ses->status = CifsExiting;
14720 +- spin_unlock(&cifs_tcp_ses_lock);
14721 ++ spin_unlock(&GlobalMid_Lock);
14722 +
14723 + cifs_free_ipc(ses);
14724 +
14725 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
14726 +index a9d1555301446..f6ceb79a995d0 100644
14727 +--- a/fs/cifs/smb2ops.c
14728 ++++ b/fs/cifs/smb2ops.c
14729 +@@ -3459,6 +3459,119 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
14730 + return rc;
14731 + }
14732 +
14733 ++static int smb3_simple_fallocate_write_range(unsigned int xid,
14734 ++ struct cifs_tcon *tcon,
14735 ++ struct cifsFileInfo *cfile,
14736 ++ loff_t off, loff_t len,
14737 ++ char *buf)
14738 ++{
14739 ++ struct cifs_io_parms io_parms = {0};
14740 ++ int nbytes;
14741 ++ struct kvec iov[2];
14742 ++
14743 ++ io_parms.netfid = cfile->fid.netfid;
14744 ++ io_parms.pid = current->tgid;
14745 ++ io_parms.tcon = tcon;
14746 ++ io_parms.persistent_fid = cfile->fid.persistent_fid;
14747 ++ io_parms.volatile_fid = cfile->fid.volatile_fid;
14748 ++ io_parms.offset = off;
14749 ++ io_parms.length = len;
14750 ++
14751 ++ /* iov[0] is reserved for smb header */
14752 ++ iov[1].iov_base = buf;
14753 ++ iov[1].iov_len = io_parms.length;
14754 ++ return SMB2_write(xid, &io_parms, &nbytes, iov, 1);
14755 ++}
14756 ++
14757 ++static int smb3_simple_fallocate_range(unsigned int xid,
14758 ++ struct cifs_tcon *tcon,
14759 ++ struct cifsFileInfo *cfile,
14760 ++ loff_t off, loff_t len)
14761 ++{
14762 ++ struct file_allocated_range_buffer in_data, *out_data = NULL, *tmp_data;
14763 ++ u32 out_data_len;
14764 ++ char *buf = NULL;
14765 ++ loff_t l;
14766 ++ int rc;
14767 ++
14768 ++ in_data.file_offset = cpu_to_le64(off);
14769 ++ in_data.length = cpu_to_le64(len);
14770 ++ rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
14771 ++ cfile->fid.volatile_fid,
14772 ++ FSCTL_QUERY_ALLOCATED_RANGES, true,
14773 ++ (char *)&in_data, sizeof(in_data),
14774 ++ 1024 * sizeof(struct file_allocated_range_buffer),
14775 ++ (char **)&out_data, &out_data_len);
14776 ++ if (rc)
14777 ++ goto out;
14778 ++ /*
14779 ++ * It is already all allocated
14780 ++ */
14781 ++ if (out_data_len == 0)
14782 ++ goto out;
14783 ++
14784 ++ buf = kzalloc(1024 * 1024, GFP_KERNEL);
14785 ++ if (buf == NULL) {
14786 ++ rc = -ENOMEM;
14787 ++ goto out;
14788 ++ }
14789 ++
14790 ++ tmp_data = out_data;
14791 ++ while (len) {
14792 ++ /*
14793 ++ * The rest of the region is unmapped so write it all.
14794 ++ */
14795 ++ if (out_data_len == 0) {
14796 ++ rc = smb3_simple_fallocate_write_range(xid, tcon,
14797 ++ cfile, off, len, buf);
14798 ++ goto out;
14799 ++ }
14800 ++
14801 ++ if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
14802 ++ rc = -EINVAL;
14803 ++ goto out;
14804 ++ }
14805 ++
14806 ++ if (off < le64_to_cpu(tmp_data->file_offset)) {
14807 ++ /*
14808 ++ * We are at a hole. Write until the end of the region
14809 ++ * or until the next allocated data,
14810 ++ * whichever comes next.
14811 ++ */
14812 ++ l = le64_to_cpu(tmp_data->file_offset) - off;
14813 ++ if (len < l)
14814 ++ l = len;
14815 ++ rc = smb3_simple_fallocate_write_range(xid, tcon,
14816 ++ cfile, off, l, buf);
14817 ++ if (rc)
14818 ++ goto out;
14819 ++ off = off + l;
14820 ++ len = len - l;
14821 ++ if (len == 0)
14822 ++ goto out;
14823 ++ }
14824 ++ /*
14825 ++ * We are at a section of allocated data, just skip forward
14826 ++ * until the end of the data or the end of the region
14827 ++ * we are supposed to fallocate, whichever comes first.
14828 ++ */
14829 ++ l = le64_to_cpu(tmp_data->length);
14830 ++ if (len < l)
14831 ++ l = len;
14832 ++ off += l;
14833 ++ len -= l;
14834 ++
14835 ++ tmp_data = &tmp_data[1];
14836 ++ out_data_len -= sizeof(struct file_allocated_range_buffer);
14837 ++ }
14838 ++
14839 ++ out:
14840 ++ kfree(out_data);
14841 ++ kfree(buf);
14842 ++ return rc;
14843 ++}
14844 ++
14845 ++
14846 + static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
14847 + loff_t off, loff_t len, bool keep_size)
14848 + {
14849 +@@ -3519,6 +3632,26 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
14850 + }
14851 +
14852 + if ((keep_size == true) || (i_size_read(inode) >= off + len)) {
14853 ++ /*
14854 ++ * At this point, we are trying to fallocate an internal
14855 ++ * regions of a sparse file. Since smb2 does not have a
14856 ++ * fallocate command we have two otions on how to emulate this.
14857 ++ * We can either turn the entire file to become non-sparse
14858 ++ * which we only do if the fallocate is for virtually
14859 ++ * the whole file, or we can overwrite the region with zeroes
14860 ++ * using SMB2_write, which could be prohibitevly expensive
14861 ++ * if len is large.
14862 ++ */
14863 ++ /*
14864 ++ * We are only trying to fallocate a small region so
14865 ++ * just write it with zero.
14866 ++ */
14867 ++ if (len <= 1024 * 1024) {
14868 ++ rc = smb3_simple_fallocate_range(xid, tcon, cfile,
14869 ++ off, len);
14870 ++ goto out;
14871 ++ }
14872 ++
14873 + /*
14874 + * Check if falloc starts within first few pages of file
14875 + * and ends within a few pages of the end of file to
14876 +diff --git a/fs/configfs/file.c b/fs/configfs/file.c
14877 +index da8351d1e4552..4d0825213116a 100644
14878 +--- a/fs/configfs/file.c
14879 ++++ b/fs/configfs/file.c
14880 +@@ -482,13 +482,13 @@ static int configfs_release_bin_file(struct inode *inode, struct file *file)
14881 + buffer->bin_buffer_size);
14882 + }
14883 + up_read(&frag->frag_sem);
14884 +- /* vfree on NULL is safe */
14885 +- vfree(buffer->bin_buffer);
14886 +- buffer->bin_buffer = NULL;
14887 +- buffer->bin_buffer_size = 0;
14888 +- buffer->needs_read_fill = 1;
14889 + }
14890 +
14891 ++ vfree(buffer->bin_buffer);
14892 ++ buffer->bin_buffer = NULL;
14893 ++ buffer->bin_buffer_size = 0;
14894 ++ buffer->needs_read_fill = 1;
14895 ++
14896 + configfs_release(inode, file);
14897 + return 0;
14898 + }
14899 +diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
14900 +index 1fbe6c24d7052..9fa871e287ba3 100644
14901 +--- a/fs/crypto/fname.c
14902 ++++ b/fs/crypto/fname.c
14903 +@@ -344,13 +344,9 @@ int fscrypt_fname_disk_to_usr(const struct inode *inode,
14904 + offsetof(struct fscrypt_nokey_name, sha256));
14905 + BUILD_BUG_ON(BASE64_CHARS(FSCRYPT_NOKEY_NAME_MAX) > NAME_MAX);
14906 +
14907 +- if (hash) {
14908 +- nokey_name.dirhash[0] = hash;
14909 +- nokey_name.dirhash[1] = minor_hash;
14910 +- } else {
14911 +- nokey_name.dirhash[0] = 0;
14912 +- nokey_name.dirhash[1] = 0;
14913 +- }
14914 ++ nokey_name.dirhash[0] = hash;
14915 ++ nokey_name.dirhash[1] = minor_hash;
14916 ++
14917 + if (iname->len <= sizeof(nokey_name.bytes)) {
14918 + memcpy(nokey_name.bytes, iname->name, iname->len);
14919 + size = offsetof(struct fscrypt_nokey_name, bytes[iname->len]);
14920 +diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c
14921 +index 31fb08d94f874..9a6f9a188efb9 100644
14922 +--- a/fs/crypto/keysetup.c
14923 ++++ b/fs/crypto/keysetup.c
14924 +@@ -210,15 +210,40 @@ out_unlock:
14925 + return err;
14926 + }
14927 +
14928 ++/*
14929 ++ * Derive a SipHash key from the given fscrypt master key and the given
14930 ++ * application-specific information string.
14931 ++ *
14932 ++ * Note that the KDF produces a byte array, but the SipHash APIs expect the key
14933 ++ * as a pair of 64-bit words. Therefore, on big endian CPUs we have to do an
14934 ++ * endianness swap in order to get the same results as on little endian CPUs.
14935 ++ */
14936 ++static int fscrypt_derive_siphash_key(const struct fscrypt_master_key *mk,
14937 ++ u8 context, const u8 *info,
14938 ++ unsigned int infolen, siphash_key_t *key)
14939 ++{
14940 ++ int err;
14941 ++
14942 ++ err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf, context, info, infolen,
14943 ++ (u8 *)key, sizeof(*key));
14944 ++ if (err)
14945 ++ return err;
14946 ++
14947 ++ BUILD_BUG_ON(sizeof(*key) != 16);
14948 ++ BUILD_BUG_ON(ARRAY_SIZE(key->key) != 2);
14949 ++ le64_to_cpus(&key->key[0]);
14950 ++ le64_to_cpus(&key->key[1]);
14951 ++ return 0;
14952 ++}
14953 ++
14954 + int fscrypt_derive_dirhash_key(struct fscrypt_info *ci,
14955 + const struct fscrypt_master_key *mk)
14956 + {
14957 + int err;
14958 +
14959 +- err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf, HKDF_CONTEXT_DIRHASH_KEY,
14960 +- ci->ci_nonce, FSCRYPT_FILE_NONCE_SIZE,
14961 +- (u8 *)&ci->ci_dirhash_key,
14962 +- sizeof(ci->ci_dirhash_key));
14963 ++ err = fscrypt_derive_siphash_key(mk, HKDF_CONTEXT_DIRHASH_KEY,
14964 ++ ci->ci_nonce, FSCRYPT_FILE_NONCE_SIZE,
14965 ++ &ci->ci_dirhash_key);
14966 + if (err)
14967 + return err;
14968 + ci->ci_dirhash_key_initialized = true;
14969 +@@ -253,10 +278,9 @@ static int fscrypt_setup_iv_ino_lblk_32_key(struct fscrypt_info *ci,
14970 + if (mk->mk_ino_hash_key_initialized)
14971 + goto unlock;
14972 +
14973 +- err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf,
14974 +- HKDF_CONTEXT_INODE_HASH_KEY, NULL, 0,
14975 +- (u8 *)&mk->mk_ino_hash_key,
14976 +- sizeof(mk->mk_ino_hash_key));
14977 ++ err = fscrypt_derive_siphash_key(mk,
14978 ++ HKDF_CONTEXT_INODE_HASH_KEY,
14979 ++ NULL, 0, &mk->mk_ino_hash_key);
14980 + if (err)
14981 + goto unlock;
14982 + /* pairs with smp_load_acquire() above */
14983 +diff --git a/fs/dax.c b/fs/dax.c
14984 +index df5485b4bddf1..d5d7b9393bcaa 100644
14985 +--- a/fs/dax.c
14986 ++++ b/fs/dax.c
14987 +@@ -488,10 +488,11 @@ static void *grab_mapping_entry(struct xa_state *xas,
14988 + struct address_space *mapping, unsigned int order)
14989 + {
14990 + unsigned long index = xas->xa_index;
14991 +- bool pmd_downgrade = false; /* splitting PMD entry into PTE entries? */
14992 ++ bool pmd_downgrade; /* splitting PMD entry into PTE entries? */
14993 + void *entry;
14994 +
14995 + retry:
14996 ++ pmd_downgrade = false;
14997 + xas_lock_irq(xas);
14998 + entry = get_unlocked_entry(xas, order);
14999 +
15000 +diff --git a/fs/dlm/config.c b/fs/dlm/config.c
15001 +index 73e6643903af5..18a8ffcea0aa4 100644
15002 +--- a/fs/dlm/config.c
15003 ++++ b/fs/dlm/config.c
15004 +@@ -79,6 +79,9 @@ struct dlm_cluster {
15005 + unsigned int cl_new_rsb_count;
15006 + unsigned int cl_recover_callbacks;
15007 + char cl_cluster_name[DLM_LOCKSPACE_LEN];
15008 ++
15009 ++ struct dlm_spaces *sps;
15010 ++ struct dlm_comms *cms;
15011 + };
15012 +
15013 + static struct dlm_cluster *config_item_to_cluster(struct config_item *i)
15014 +@@ -379,6 +382,9 @@ static struct config_group *make_cluster(struct config_group *g,
15015 + if (!cl || !sps || !cms)
15016 + goto fail;
15017 +
15018 ++ cl->sps = sps;
15019 ++ cl->cms = cms;
15020 ++
15021 + config_group_init_type_name(&cl->group, name, &cluster_type);
15022 + config_group_init_type_name(&sps->ss_group, "spaces", &spaces_type);
15023 + config_group_init_type_name(&cms->cs_group, "comms", &comms_type);
15024 +@@ -428,6 +434,9 @@ static void drop_cluster(struct config_group *g, struct config_item *i)
15025 + static void release_cluster(struct config_item *i)
15026 + {
15027 + struct dlm_cluster *cl = config_item_to_cluster(i);
15028 ++
15029 ++ kfree(cl->sps);
15030 ++ kfree(cl->cms);
15031 + kfree(cl);
15032 + }
15033 +
15034 +diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
15035 +index 44e2716ac1580..0c78fdfb1f6fa 100644
15036 +--- a/fs/dlm/lowcomms.c
15037 ++++ b/fs/dlm/lowcomms.c
15038 +@@ -599,7 +599,7 @@ static void close_connection(struct connection *con, bool and_other,
15039 + }
15040 + if (con->othercon && and_other) {
15041 + /* Will only re-enter once. */
15042 +- close_connection(con->othercon, false, true, true);
15043 ++ close_connection(con->othercon, false, tx, rx);
15044 + }
15045 +
15046 + con->rx_leftover = 0;
15047 +diff --git a/fs/erofs/super.c b/fs/erofs/super.c
15048 +index d5a6b9b888a56..f31a08d86be89 100644
15049 +--- a/fs/erofs/super.c
15050 ++++ b/fs/erofs/super.c
15051 +@@ -155,6 +155,7 @@ static int erofs_read_superblock(struct super_block *sb)
15052 + goto out;
15053 + }
15054 +
15055 ++ ret = -EINVAL;
15056 + blkszbits = dsb->blkszbits;
15057 + /* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */
15058 + if (blkszbits != LOG_BLOCK_SIZE) {
15059 +diff --git a/fs/exec.c b/fs/exec.c
15060 +index ca89e0e3ef10f..c7a4ef8df3058 100644
15061 +--- a/fs/exec.c
15062 ++++ b/fs/exec.c
15063 +@@ -1347,6 +1347,10 @@ int begin_new_exec(struct linux_binprm * bprm)
15064 + WRITE_ONCE(me->self_exec_id, me->self_exec_id + 1);
15065 + flush_signal_handlers(me, 0);
15066 +
15067 ++ retval = set_cred_ucounts(bprm->cred);
15068 ++ if (retval < 0)
15069 ++ goto out_unlock;
15070 ++
15071 + /*
15072 + * install the new credentials for this executable
15073 + */
15074 +diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c
15075 +index 916797077aad4..dedbc55cd48f5 100644
15076 +--- a/fs/exfat/dir.c
15077 ++++ b/fs/exfat/dir.c
15078 +@@ -62,7 +62,7 @@ static void exfat_get_uniname_from_ext_entry(struct super_block *sb,
15079 + static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_entry *dir_entry)
15080 + {
15081 + int i, dentries_per_clu, dentries_per_clu_bits = 0, num_ext;
15082 +- unsigned int type, clu_offset;
15083 ++ unsigned int type, clu_offset, max_dentries;
15084 + sector_t sector;
15085 + struct exfat_chain dir, clu;
15086 + struct exfat_uni_name uni_name;
15087 +@@ -85,6 +85,8 @@ static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_ent
15088 +
15089 + dentries_per_clu = sbi->dentries_per_clu;
15090 + dentries_per_clu_bits = ilog2(dentries_per_clu);
15091 ++ max_dentries = (unsigned int)min_t(u64, MAX_EXFAT_DENTRIES,
15092 ++ (u64)sbi->num_clusters << dentries_per_clu_bits);
15093 +
15094 + clu_offset = dentry >> dentries_per_clu_bits;
15095 + exfat_chain_dup(&clu, &dir);
15096 +@@ -108,7 +110,7 @@ static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_ent
15097 + }
15098 + }
15099 +
15100 +- while (clu.dir != EXFAT_EOF_CLUSTER) {
15101 ++ while (clu.dir != EXFAT_EOF_CLUSTER && dentry < max_dentries) {
15102 + i = dentry & (dentries_per_clu - 1);
15103 +
15104 + for ( ; i < dentries_per_clu; i++, dentry++) {
15105 +@@ -244,7 +246,7 @@ static int exfat_iterate(struct file *filp, struct dir_context *ctx)
15106 + if (err)
15107 + goto unlock;
15108 + get_new:
15109 +- if (cpos >= i_size_read(inode))
15110 ++ if (ei->flags == ALLOC_NO_FAT_CHAIN && cpos >= i_size_read(inode))
15111 + goto end_of_dir;
15112 +
15113 + err = exfat_readdir(inode, &cpos, &de);
15114 +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
15115 +index e6542ba264330..e00a35530a4e0 100644
15116 +--- a/fs/ext4/extents.c
15117 ++++ b/fs/ext4/extents.c
15118 +@@ -825,6 +825,7 @@ void ext4_ext_tree_init(handle_t *handle, struct inode *inode)
15119 + eh->eh_entries = 0;
15120 + eh->eh_magic = EXT4_EXT_MAGIC;
15121 + eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
15122 ++ eh->eh_generation = 0;
15123 + ext4_mark_inode_dirty(handle, inode);
15124 + }
15125 +
15126 +@@ -1090,6 +1091,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
15127 + neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
15128 + neh->eh_magic = EXT4_EXT_MAGIC;
15129 + neh->eh_depth = 0;
15130 ++ neh->eh_generation = 0;
15131 +
15132 + /* move remainder of path[depth] to the new leaf */
15133 + if (unlikely(path[depth].p_hdr->eh_entries !=
15134 +@@ -1167,6 +1169,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
15135 + neh->eh_magic = EXT4_EXT_MAGIC;
15136 + neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
15137 + neh->eh_depth = cpu_to_le16(depth - i);
15138 ++ neh->eh_generation = 0;
15139 + fidx = EXT_FIRST_INDEX(neh);
15140 + fidx->ei_block = border;
15141 + ext4_idx_store_pblock(fidx, oldblock);
15142 +diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
15143 +index 0a729027322dd..9a3a8996aacf7 100644
15144 +--- a/fs/ext4/extents_status.c
15145 ++++ b/fs/ext4/extents_status.c
15146 +@@ -1574,11 +1574,9 @@ static unsigned long ext4_es_scan(struct shrinker *shrink,
15147 + ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt);
15148 + trace_ext4_es_shrink_scan_enter(sbi->s_sb, nr_to_scan, ret);
15149 +
15150 +- if (!nr_to_scan)
15151 +- return ret;
15152 +-
15153 + nr_shrunk = __es_shrink(sbi, nr_to_scan, NULL);
15154 +
15155 ++ ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt);
15156 + trace_ext4_es_shrink_scan_exit(sbi->s_sb, nr_shrunk, ret);
15157 + return nr_shrunk;
15158 + }
15159 +diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
15160 +index b294ebcb4db4b..875af329c43ec 100644
15161 +--- a/fs/ext4/ialloc.c
15162 ++++ b/fs/ext4/ialloc.c
15163 +@@ -402,7 +402,7 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
15164 + *
15165 + * We always try to spread first-level directories.
15166 + *
15167 +- * If there are blockgroups with both free inodes and free blocks counts
15168 ++ * If there are blockgroups with both free inodes and free clusters counts
15169 + * not worse than average we return one with smallest directory count.
15170 + * Otherwise we simply return a random group.
15171 + *
15172 +@@ -411,7 +411,7 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
15173 + * It's OK to put directory into a group unless
15174 + * it has too many directories already (max_dirs) or
15175 + * it has too few free inodes left (min_inodes) or
15176 +- * it has too few free blocks left (min_blocks) or
15177 ++ * it has too few free clusters left (min_clusters) or
15178 + * Parent's group is preferred, if it doesn't satisfy these
15179 + * conditions we search cyclically through the rest. If none
15180 + * of the groups look good we just look for a group with more
15181 +@@ -427,7 +427,7 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent,
15182 + ext4_group_t real_ngroups = ext4_get_groups_count(sb);
15183 + int inodes_per_group = EXT4_INODES_PER_GROUP(sb);
15184 + unsigned int freei, avefreei, grp_free;
15185 +- ext4_fsblk_t freeb, avefreec;
15186 ++ ext4_fsblk_t freec, avefreec;
15187 + unsigned int ndirs;
15188 + int max_dirs, min_inodes;
15189 + ext4_grpblk_t min_clusters;
15190 +@@ -446,9 +446,8 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent,
15191 +
15192 + freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter);
15193 + avefreei = freei / ngroups;
15194 +- freeb = EXT4_C2B(sbi,
15195 +- percpu_counter_read_positive(&sbi->s_freeclusters_counter));
15196 +- avefreec = freeb;
15197 ++ freec = percpu_counter_read_positive(&sbi->s_freeclusters_counter);
15198 ++ avefreec = freec;
15199 + do_div(avefreec, ngroups);
15200 + ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);
15201 +
15202 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
15203 +index 3f11c948feb02..18a5321b5ef37 100644
15204 +--- a/fs/ext4/inode.c
15205 ++++ b/fs/ext4/inode.c
15206 +@@ -3419,7 +3419,7 @@ retry:
15207 + * i_disksize out to i_size. This could be beyond where direct I/O is
15208 + * happening and thus expose allocated blocks to direct I/O reads.
15209 + */
15210 +- else if ((map->m_lblk * (1 << blkbits)) >= i_size_read(inode))
15211 ++ else if (((loff_t)map->m_lblk << blkbits) >= i_size_read(inode))
15212 + m_flags = EXT4_GET_BLOCKS_CREATE;
15213 + else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
15214 + m_flags = EXT4_GET_BLOCKS_IO_CREATE_EXT;
15215 +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
15216 +index 9c390c3d7fb15..d7cb7d719ee58 100644
15217 +--- a/fs/ext4/mballoc.c
15218 ++++ b/fs/ext4/mballoc.c
15219 +@@ -1597,10 +1597,11 @@ static int mb_find_extent(struct ext4_buddy *e4b, int block,
15220 + if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) {
15221 + /* Should never happen! (but apparently sometimes does?!?) */
15222 + WARN_ON(1);
15223 +- ext4_error(e4b->bd_sb, "corruption or bug in mb_find_extent "
15224 +- "block=%d, order=%d needed=%d ex=%u/%d/%d@%u",
15225 +- block, order, needed, ex->fe_group, ex->fe_start,
15226 +- ex->fe_len, ex->fe_logical);
15227 ++ ext4_grp_locked_error(e4b->bd_sb, e4b->bd_group, 0, 0,
15228 ++ "corruption or bug in mb_find_extent "
15229 ++ "block=%d, order=%d needed=%d ex=%u/%d/%d@%u",
15230 ++ block, order, needed, ex->fe_group, ex->fe_start,
15231 ++ ex->fe_len, ex->fe_logical);
15232 + ex->fe_len = 0;
15233 + ex->fe_start = 0;
15234 + ex->fe_group = 0;
15235 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
15236 +index 21c4ba2513ce5..4956917b7cc2b 100644
15237 +--- a/fs/ext4/super.c
15238 ++++ b/fs/ext4/super.c
15239 +@@ -3099,8 +3099,15 @@ static void ext4_orphan_cleanup(struct super_block *sb,
15240 + inode_lock(inode);
15241 + truncate_inode_pages(inode->i_mapping, inode->i_size);
15242 + ret = ext4_truncate(inode);
15243 +- if (ret)
15244 ++ if (ret) {
15245 ++ /*
15246 ++ * We need to clean up the in-core orphan list
15247 ++ * manually if ext4_truncate() failed to get a
15248 ++ * transaction handle.
15249 ++ */
15250 ++ ext4_orphan_del(NULL, inode);
15251 + ext4_std_error(inode->i_sb, ret);
15252 ++ }
15253 + inode_unlock(inode);
15254 + nr_truncates++;
15255 + } else {
15256 +@@ -5039,6 +5046,7 @@ no_journal:
15257 + ext4_msg(sb, KERN_ERR,
15258 + "unable to initialize "
15259 + "flex_bg meta info!");
15260 ++ ret = -ENOMEM;
15261 + goto failed_mount6;
15262 + }
15263 +
15264 +diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
15265 +index bdc0f3b2d7abf..cfae2dddb0bae 100644
15266 +--- a/fs/f2fs/data.c
15267 ++++ b/fs/f2fs/data.c
15268 +@@ -4112,6 +4112,12 @@ static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
15269 + if (f2fs_readonly(F2FS_I_SB(inode)->sb))
15270 + return -EROFS;
15271 +
15272 ++ if (f2fs_lfs_mode(F2FS_I_SB(inode))) {
15273 ++ f2fs_err(F2FS_I_SB(inode),
15274 ++ "Swapfile not supported in LFS mode");
15275 ++ return -EINVAL;
15276 ++ }
15277 ++
15278 + ret = f2fs_convert_inline_inode(inode);
15279 + if (ret)
15280 + return ret;
15281 +diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
15282 +index 90dddb507e4af..a0869194ab739 100644
15283 +--- a/fs/fs-writeback.c
15284 ++++ b/fs/fs-writeback.c
15285 +@@ -505,12 +505,19 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
15286 + if (!isw)
15287 + return;
15288 +
15289 ++ atomic_inc(&isw_nr_in_flight);
15290 ++
15291 + /* find and pin the new wb */
15292 + rcu_read_lock();
15293 + memcg_css = css_from_id(new_wb_id, &memory_cgrp_subsys);
15294 +- if (memcg_css)
15295 +- isw->new_wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
15296 ++ if (memcg_css && !css_tryget(memcg_css))
15297 ++ memcg_css = NULL;
15298 + rcu_read_unlock();
15299 ++ if (!memcg_css)
15300 ++ goto out_free;
15301 ++
15302 ++ isw->new_wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
15303 ++ css_put(memcg_css);
15304 + if (!isw->new_wb)
15305 + goto out_free;
15306 +
15307 +@@ -535,11 +542,10 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
15308 + * Let's continue after I_WB_SWITCH is guaranteed to be visible.
15309 + */
15310 + call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
15311 +-
15312 +- atomic_inc(&isw_nr_in_flight);
15313 + return;
15314 +
15315 + out_free:
15316 ++ atomic_dec(&isw_nr_in_flight);
15317 + if (isw->new_wb)
15318 + wb_put(isw->new_wb);
15319 + kfree(isw);
15320 +@@ -2196,28 +2202,6 @@ int dirtytime_interval_handler(struct ctl_table *table, int write,
15321 + return ret;
15322 + }
15323 +
15324 +-static noinline void block_dump___mark_inode_dirty(struct inode *inode)
15325 +-{
15326 +- if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
15327 +- struct dentry *dentry;
15328 +- const char *name = "?";
15329 +-
15330 +- dentry = d_find_alias(inode);
15331 +- if (dentry) {
15332 +- spin_lock(&dentry->d_lock);
15333 +- name = (const char *) dentry->d_name.name;
15334 +- }
15335 +- printk(KERN_DEBUG
15336 +- "%s(%d): dirtied inode %lu (%s) on %s\n",
15337 +- current->comm, task_pid_nr(current), inode->i_ino,
15338 +- name, inode->i_sb->s_id);
15339 +- if (dentry) {
15340 +- spin_unlock(&dentry->d_lock);
15341 +- dput(dentry);
15342 +- }
15343 +- }
15344 +-}
15345 +-
15346 + /**
15347 + * __mark_inode_dirty - internal function
15348 + *
15349 +@@ -2277,9 +2261,6 @@ void __mark_inode_dirty(struct inode *inode, int flags)
15350 + (dirtytime && (inode->i_state & I_DIRTY_INODE)))
15351 + return;
15352 +
15353 +- if (unlikely(block_dump))
15354 +- block_dump___mark_inode_dirty(inode);
15355 +-
15356 + spin_lock(&inode->i_lock);
15357 + if (dirtytime && (inode->i_state & I_DIRTY_INODE))
15358 + goto out_unlock_inode;
15359 +diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
15360 +index 588f8d1240aab..4140d5c3ab5a5 100644
15361 +--- a/fs/fuse/dev.c
15362 ++++ b/fs/fuse/dev.c
15363 +@@ -783,6 +783,7 @@ static int fuse_check_page(struct page *page)
15364 + 1 << PG_uptodate |
15365 + 1 << PG_lru |
15366 + 1 << PG_active |
15367 ++ 1 << PG_workingset |
15368 + 1 << PG_reclaim |
15369 + 1 << PG_waiters))) {
15370 + dump_page(page, "fuse: trying to steal weird page");
15371 +@@ -1275,6 +1276,15 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
15372 + goto restart;
15373 + }
15374 + spin_lock(&fpq->lock);
15375 ++ /*
15376 ++ * Must not put request on fpq->io queue after having been shut down by
15377 ++ * fuse_abort_conn()
15378 ++ */
15379 ++ if (!fpq->connected) {
15380 ++ req->out.h.error = err = -ECONNABORTED;
15381 ++ goto out_end;
15382 ++
15383 ++ }
15384 + list_add(&req->list, &fpq->io);
15385 + spin_unlock(&fpq->lock);
15386 + cs->req = req;
15387 +@@ -1861,7 +1871,7 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
15388 + }
15389 +
15390 + err = -EINVAL;
15391 +- if (oh.error <= -1000 || oh.error > 0)
15392 ++ if (oh.error <= -512 || oh.error > 0)
15393 + goto copy_finish;
15394 +
15395 + spin_lock(&fpq->lock);
15396 +diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
15397 +index ffa031fe52933..756bbdd563e08 100644
15398 +--- a/fs/fuse/dir.c
15399 ++++ b/fs/fuse/dir.c
15400 +@@ -340,18 +340,33 @@ static struct vfsmount *fuse_dentry_automount(struct path *path)
15401 +
15402 + /* Initialize superblock, making @mp_fi its root */
15403 + err = fuse_fill_super_submount(sb, mp_fi);
15404 +- if (err)
15405 ++ if (err) {
15406 ++ fuse_conn_put(fc);
15407 ++ kfree(fm);
15408 ++ sb->s_fs_info = NULL;
15409 + goto out_put_sb;
15410 ++ }
15411 ++
15412 ++ down_write(&fc->killsb);
15413 ++ list_add_tail(&fm->fc_entry, &fc->mounts);
15414 ++ up_write(&fc->killsb);
15415 +
15416 + sb->s_flags |= SB_ACTIVE;
15417 + fsc->root = dget(sb->s_root);
15418 ++
15419 ++ /*
15420 ++ * FIXME: setting SB_BORN requires a write barrier for
15421 ++ * super_cache_count(). We should actually come
15422 ++ * up with a proper ->get_tree() implementation
15423 ++ * for submounts and call vfs_get_tree() to take
15424 ++ * care of the write barrier.
15425 ++ */
15426 ++ smp_wmb();
15427 ++ sb->s_flags |= SB_BORN;
15428 ++
15429 + /* We are done configuring the superblock, so unlock it */
15430 + up_write(&sb->s_umount);
15431 +
15432 +- down_write(&fc->killsb);
15433 +- list_add_tail(&fm->fc_entry, &fc->mounts);
15434 +- up_write(&fc->killsb);
15435 +-
15436 + /* Create the submount */
15437 + mnt = vfs_create_mount(fsc);
15438 + if (IS_ERR(mnt)) {
15439 +diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
15440 +index 16fb0184ce5e1..cfd9d03f604fe 100644
15441 +--- a/fs/gfs2/file.c
15442 ++++ b/fs/gfs2/file.c
15443 +@@ -474,8 +474,8 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
15444 + file_update_time(vmf->vma->vm_file);
15445 +
15446 + /* page is wholly or partially inside EOF */
15447 +- if (offset > size - PAGE_SIZE)
15448 +- length = offset_in_page(size);
15449 ++ if (size - offset < PAGE_SIZE)
15450 ++ length = size - offset;
15451 + else
15452 + length = PAGE_SIZE;
15453 +
15454 +diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
15455 +index f2c6bbe5cdb81..ae9c5c1bdc508 100644
15456 +--- a/fs/gfs2/ops_fstype.c
15457 ++++ b/fs/gfs2/ops_fstype.c
15458 +@@ -670,6 +670,7 @@ static int init_statfs(struct gfs2_sbd *sdp)
15459 + }
15460 +
15461 + iput(pn);
15462 ++ pn = NULL;
15463 + ip = GFS2_I(sdp->sd_sc_inode);
15464 + error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0,
15465 + &sdp->sd_sc_gh);
15466 +diff --git a/fs/io_uring.c b/fs/io_uring.c
15467 +index fdbaaf579cc60..0138aa7133172 100644
15468 +--- a/fs/io_uring.c
15469 ++++ b/fs/io_uring.c
15470 +@@ -2770,7 +2770,7 @@ static bool io_file_supports_async(struct file *file, int rw)
15471 + return true;
15472 + return false;
15473 + }
15474 +- if (S_ISCHR(mode) || S_ISSOCK(mode))
15475 ++ if (S_ISSOCK(mode))
15476 + return true;
15477 + if (S_ISREG(mode)) {
15478 + if (io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
15479 +diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
15480 +index e9d5c8e638b01..ea18e4a2a691d 100644
15481 +--- a/fs/ntfs/inode.c
15482 ++++ b/fs/ntfs/inode.c
15483 +@@ -477,7 +477,7 @@ err_corrupt_attr:
15484 + }
15485 + file_name_attr = (FILE_NAME_ATTR*)((u8*)attr +
15486 + le16_to_cpu(attr->data.resident.value_offset));
15487 +- p2 = (u8*)attr + le32_to_cpu(attr->data.resident.value_length);
15488 ++ p2 = (u8 *)file_name_attr + le32_to_cpu(attr->data.resident.value_length);
15489 + if (p2 < (u8*)attr || p2 > p)
15490 + goto err_corrupt_attr;
15491 + /* This attribute is ok, but is it in the $Extend directory? */
15492 +diff --git a/fs/ocfs2/filecheck.c b/fs/ocfs2/filecheck.c
15493 +index 50f11bfdc8c2d..82a3edc4aea4b 100644
15494 +--- a/fs/ocfs2/filecheck.c
15495 ++++ b/fs/ocfs2/filecheck.c
15496 +@@ -328,11 +328,7 @@ static ssize_t ocfs2_filecheck_attr_show(struct kobject *kobj,
15497 + ret = snprintf(buf + total, remain, "%lu\t\t%u\t%s\n",
15498 + p->fe_ino, p->fe_done,
15499 + ocfs2_filecheck_error(p->fe_status));
15500 +- if (ret < 0) {
15501 +- total = ret;
15502 +- break;
15503 +- }
15504 +- if (ret == remain) {
15505 ++ if (ret >= remain) {
15506 + /* snprintf() didn't fit */
15507 + total = -E2BIG;
15508 + break;
15509 +diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
15510 +index a191094694c61..03eacb249f379 100644
15511 +--- a/fs/ocfs2/stackglue.c
15512 ++++ b/fs/ocfs2/stackglue.c
15513 +@@ -502,11 +502,7 @@ static ssize_t ocfs2_loaded_cluster_plugins_show(struct kobject *kobj,
15514 + list_for_each_entry(p, &ocfs2_stack_list, sp_list) {
15515 + ret = snprintf(buf, remain, "%s\n",
15516 + p->sp_name);
15517 +- if (ret < 0) {
15518 +- total = ret;
15519 +- break;
15520 +- }
15521 +- if (ret == remain) {
15522 ++ if (ret >= remain) {
15523 + /* snprintf() didn't fit */
15524 + total = -E2BIG;
15525 + break;
15526 +@@ -533,7 +529,7 @@ static ssize_t ocfs2_active_cluster_plugin_show(struct kobject *kobj,
15527 + if (active_stack) {
15528 + ret = snprintf(buf, PAGE_SIZE, "%s\n",
15529 + active_stack->sp_name);
15530 +- if (ret == PAGE_SIZE)
15531 ++ if (ret >= PAGE_SIZE)
15532 + ret = -E2BIG;
15533 + }
15534 + spin_unlock(&ocfs2_stack_lock);
15535 +diff --git a/fs/open.c b/fs/open.c
15536 +index 4d7537ae59df5..3aaaad47d9cac 100644
15537 +--- a/fs/open.c
15538 ++++ b/fs/open.c
15539 +@@ -993,12 +993,20 @@ inline struct open_how build_open_how(int flags, umode_t mode)
15540 +
15541 + inline int build_open_flags(const struct open_how *how, struct open_flags *op)
15542 + {
15543 +- int flags = how->flags;
15544 ++ u64 flags = how->flags;
15545 ++ u64 strip = FMODE_NONOTIFY | O_CLOEXEC;
15546 + int lookup_flags = 0;
15547 + int acc_mode = ACC_MODE(flags);
15548 +
15549 +- /* Must never be set by userspace */
15550 +- flags &= ~(FMODE_NONOTIFY | O_CLOEXEC);
15551 ++ BUILD_BUG_ON_MSG(upper_32_bits(VALID_OPEN_FLAGS),
15552 ++ "struct open_flags doesn't yet handle flags > 32 bits");
15553 ++
15554 ++ /*
15555 ++ * Strip flags that either shouldn't be set by userspace like
15556 ++ * FMODE_NONOTIFY or that aren't relevant in determining struct
15557 ++ * open_flags like O_CLOEXEC.
15558 ++ */
15559 ++ flags &= ~strip;
15560 +
15561 + /*
15562 + * Older syscalls implicitly clear all of the invalid flags or argument
15563 +diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
15564 +index 3cec6fbef725e..3931f60e421f7 100644
15565 +--- a/fs/proc/task_mmu.c
15566 ++++ b/fs/proc/task_mmu.c
15567 +@@ -829,7 +829,7 @@ static int show_smap(struct seq_file *m, void *v)
15568 + __show_smap(m, &mss, false);
15569 +
15570 + seq_printf(m, "THPeligible: %d\n",
15571 +- transparent_hugepage_enabled(vma));
15572 ++ transparent_hugepage_active(vma));
15573 +
15574 + if (arch_pkeys_enabled())
15575 + seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma));
15576 +diff --git a/fs/pstore/Kconfig b/fs/pstore/Kconfig
15577 +index e16a49ebfe546..8efe60487b486 100644
15578 +--- a/fs/pstore/Kconfig
15579 ++++ b/fs/pstore/Kconfig
15580 +@@ -165,6 +165,7 @@ config PSTORE_BLK
15581 + tristate "Log panic/oops to a block device"
15582 + depends on PSTORE
15583 + depends on BLOCK
15584 ++ depends on BROKEN
15585 + select PSTORE_ZONE
15586 + default n
15587 + help
15588 +diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
15589 +index d683f5e6d7913..b4d43a4af5f79 100644
15590 +--- a/include/asm-generic/preempt.h
15591 ++++ b/include/asm-generic/preempt.h
15592 +@@ -29,7 +29,7 @@ static __always_inline void preempt_count_set(int pc)
15593 + } while (0)
15594 +
15595 + #define init_idle_preempt_count(p, cpu) do { \
15596 +- task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
15597 ++ task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
15598 + } while (0)
15599 +
15600 + static __always_inline void set_preempt_need_resched(void)
15601 +diff --git a/include/clocksource/timer-ti-dm.h b/include/clocksource/timer-ti-dm.h
15602 +index 4c61dade8835f..f6da8a1326398 100644
15603 +--- a/include/clocksource/timer-ti-dm.h
15604 ++++ b/include/clocksource/timer-ti-dm.h
15605 +@@ -74,6 +74,7 @@
15606 + #define OMAP_TIMER_ERRATA_I103_I767 0x80000000
15607 +
15608 + struct timer_regs {
15609 ++ u32 ocp_cfg;
15610 + u32 tidr;
15611 + u32 tier;
15612 + u32 twer;
15613 +diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
15614 +index 0a288dddcf5be..25806141db591 100644
15615 +--- a/include/crypto/internal/hash.h
15616 ++++ b/include/crypto/internal/hash.h
15617 +@@ -75,13 +75,7 @@ void crypto_unregister_ahashes(struct ahash_alg *algs, int count);
15618 + int ahash_register_instance(struct crypto_template *tmpl,
15619 + struct ahash_instance *inst);
15620 +
15621 +-int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
15622 +- unsigned int keylen);
15623 +-
15624 +-static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
15625 +-{
15626 +- return alg->setkey != shash_no_setkey;
15627 +-}
15628 ++bool crypto_shash_alg_has_setkey(struct shash_alg *alg);
15629 +
15630 + static inline bool crypto_shash_alg_needs_key(struct shash_alg *alg)
15631 + {
15632 +diff --git a/include/dt-bindings/clock/imx8mq-clock.h b/include/dt-bindings/clock/imx8mq-clock.h
15633 +index 9b8045d75b8b6..da62c9f61371b 100644
15634 +--- a/include/dt-bindings/clock/imx8mq-clock.h
15635 ++++ b/include/dt-bindings/clock/imx8mq-clock.h
15636 +@@ -405,25 +405,6 @@
15637 +
15638 + #define IMX8MQ_VIDEO2_PLL1_REF_SEL 266
15639 +
15640 +-#define IMX8MQ_SYS1_PLL_40M_CG 267
15641 +-#define IMX8MQ_SYS1_PLL_80M_CG 268
15642 +-#define IMX8MQ_SYS1_PLL_100M_CG 269
15643 +-#define IMX8MQ_SYS1_PLL_133M_CG 270
15644 +-#define IMX8MQ_SYS1_PLL_160M_CG 271
15645 +-#define IMX8MQ_SYS1_PLL_200M_CG 272
15646 +-#define IMX8MQ_SYS1_PLL_266M_CG 273
15647 +-#define IMX8MQ_SYS1_PLL_400M_CG 274
15648 +-#define IMX8MQ_SYS1_PLL_800M_CG 275
15649 +-#define IMX8MQ_SYS2_PLL_50M_CG 276
15650 +-#define IMX8MQ_SYS2_PLL_100M_CG 277
15651 +-#define IMX8MQ_SYS2_PLL_125M_CG 278
15652 +-#define IMX8MQ_SYS2_PLL_166M_CG 279
15653 +-#define IMX8MQ_SYS2_PLL_200M_CG 280
15654 +-#define IMX8MQ_SYS2_PLL_250M_CG 281
15655 +-#define IMX8MQ_SYS2_PLL_333M_CG 282
15656 +-#define IMX8MQ_SYS2_PLL_500M_CG 283
15657 +-#define IMX8MQ_SYS2_PLL_1000M_CG 284
15658 +-
15659 + #define IMX8MQ_CLK_GPU_CORE 285
15660 + #define IMX8MQ_CLK_GPU_SHADER 286
15661 + #define IMX8MQ_CLK_M4_CORE 287
15662 +diff --git a/include/linux/bio.h b/include/linux/bio.h
15663 +index c6d7653829264..23b7a73cd7575 100644
15664 +--- a/include/linux/bio.h
15665 ++++ b/include/linux/bio.h
15666 +@@ -38,9 +38,6 @@
15667 + #define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter)
15668 + #define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter)
15669 +
15670 +-#define bio_multiple_segments(bio) \
15671 +- ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
15672 +-
15673 + #define bvec_iter_sectors(iter) ((iter).bi_size >> 9)
15674 + #define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter)))
15675 +
15676 +@@ -252,7 +249,7 @@ static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
15677 +
15678 + static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
15679 + {
15680 +- *bv = bio_iovec(bio);
15681 ++ *bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
15682 + }
15683 +
15684 + static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
15685 +@@ -260,10 +257,9 @@ static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
15686 + struct bvec_iter iter = bio->bi_iter;
15687 + int idx;
15688 +
15689 +- if (unlikely(!bio_multiple_segments(bio))) {
15690 +- *bv = bio_iovec(bio);
15691 +- return;
15692 +- }
15693 ++ bio_get_first_bvec(bio, bv);
15694 ++ if (bv->bv_len == bio->bi_iter.bi_size)
15695 ++ return; /* this bio only has a single bvec */
15696 +
15697 + bio_advance_iter(bio, &iter, iter.bi_size);
15698 +
15699 +diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
15700 +index 86d143db65231..83a3ebff74560 100644
15701 +--- a/include/linux/clocksource.h
15702 ++++ b/include/linux/clocksource.h
15703 +@@ -131,7 +131,7 @@ struct clocksource {
15704 + #define CLOCK_SOURCE_UNSTABLE 0x40
15705 + #define CLOCK_SOURCE_SUSPEND_NONSTOP 0x80
15706 + #define CLOCK_SOURCE_RESELECT 0x100
15707 +-
15708 ++#define CLOCK_SOURCE_VERIFY_PERCPU 0x200
15709 + /* simplify initialization of mask field */
15710 + #define CLOCKSOURCE_MASK(bits) GENMASK_ULL((bits) - 1, 0)
15711 +
15712 +diff --git a/include/linux/cred.h b/include/linux/cred.h
15713 +index 18639c069263f..ad160e5fe5c64 100644
15714 +--- a/include/linux/cred.h
15715 ++++ b/include/linux/cred.h
15716 +@@ -144,6 +144,7 @@ struct cred {
15717 + #endif
15718 + struct user_struct *user; /* real user ID subscription */
15719 + struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
15720 ++ struct ucounts *ucounts;
15721 + struct group_info *group_info; /* supplementary groups for euid/fsgid */
15722 + /* RCU deletion */
15723 + union {
15724 +@@ -170,6 +171,7 @@ extern int set_security_override_from_ctx(struct cred *, const char *);
15725 + extern int set_create_files_as(struct cred *, struct inode *);
15726 + extern int cred_fscmp(const struct cred *, const struct cred *);
15727 + extern void __init cred_init(void);
15728 ++extern int set_cred_ucounts(struct cred *);
15729 +
15730 + /*
15731 + * check for validity of credentials
15732 +diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
15733 +index ff55be0117397..e72787731a5b2 100644
15734 +--- a/include/linux/huge_mm.h
15735 ++++ b/include/linux/huge_mm.h
15736 +@@ -7,43 +7,37 @@
15737 +
15738 + #include <linux/fs.h> /* only for vma_is_dax() */
15739 +
15740 +-extern vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
15741 +-extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
15742 +- pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
15743 +- struct vm_area_struct *vma);
15744 +-extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
15745 +-extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
15746 +- pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
15747 +- struct vm_area_struct *vma);
15748 ++vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
15749 ++int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
15750 ++ pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
15751 ++ struct vm_area_struct *vma);
15752 ++void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
15753 ++int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
15754 ++ pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
15755 ++ struct vm_area_struct *vma);
15756 +
15757 + #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
15758 +-extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
15759 ++void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
15760 + #else
15761 + static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
15762 + {
15763 + }
15764 + #endif
15765 +
15766 +-extern vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
15767 +-extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
15768 +- unsigned long addr,
15769 +- pmd_t *pmd,
15770 +- unsigned int flags);
15771 +-extern bool madvise_free_huge_pmd(struct mmu_gather *tlb,
15772 +- struct vm_area_struct *vma,
15773 +- pmd_t *pmd, unsigned long addr, unsigned long next);
15774 +-extern int zap_huge_pmd(struct mmu_gather *tlb,
15775 +- struct vm_area_struct *vma,
15776 +- pmd_t *pmd, unsigned long addr);
15777 +-extern int zap_huge_pud(struct mmu_gather *tlb,
15778 +- struct vm_area_struct *vma,
15779 +- pud_t *pud, unsigned long addr);
15780 +-extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
15781 +- unsigned long new_addr,
15782 +- pmd_t *old_pmd, pmd_t *new_pmd);
15783 +-extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
15784 +- unsigned long addr, pgprot_t newprot,
15785 +- unsigned long cp_flags);
15786 ++vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
15787 ++struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
15788 ++ unsigned long addr, pmd_t *pmd,
15789 ++ unsigned int flags);
15790 ++bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
15791 ++ pmd_t *pmd, unsigned long addr, unsigned long next);
15792 ++int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
15793 ++ unsigned long addr);
15794 ++int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
15795 ++ unsigned long addr);
15796 ++bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
15797 ++ unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd);
15798 ++int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr,
15799 ++ pgprot_t newprot, unsigned long cp_flags);
15800 + vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn,
15801 + pgprot_t pgprot, bool write);
15802 +
15803 +@@ -84,6 +78,7 @@ static inline vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn,
15804 + }
15805 +
15806 + enum transparent_hugepage_flag {
15807 ++ TRANSPARENT_HUGEPAGE_NEVER_DAX,
15808 + TRANSPARENT_HUGEPAGE_FLAG,
15809 + TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
15810 + TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
15811 +@@ -100,13 +95,13 @@ enum transparent_hugepage_flag {
15812 + struct kobject;
15813 + struct kobj_attribute;
15814 +
15815 +-extern ssize_t single_hugepage_flag_store(struct kobject *kobj,
15816 +- struct kobj_attribute *attr,
15817 +- const char *buf, size_t count,
15818 +- enum transparent_hugepage_flag flag);
15819 +-extern ssize_t single_hugepage_flag_show(struct kobject *kobj,
15820 +- struct kobj_attribute *attr, char *buf,
15821 +- enum transparent_hugepage_flag flag);
15822 ++ssize_t single_hugepage_flag_store(struct kobject *kobj,
15823 ++ struct kobj_attribute *attr,
15824 ++ const char *buf, size_t count,
15825 ++ enum transparent_hugepage_flag flag);
15826 ++ssize_t single_hugepage_flag_show(struct kobject *kobj,
15827 ++ struct kobj_attribute *attr, char *buf,
15828 ++ enum transparent_hugepage_flag flag);
15829 + extern struct kobj_attribute shmem_enabled_attr;
15830 +
15831 + #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
15832 +@@ -123,29 +118,53 @@ extern struct kobj_attribute shmem_enabled_attr;
15833 +
15834 + extern unsigned long transparent_hugepage_flags;
15835 +
15836 ++static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
15837 ++ unsigned long haddr)
15838 ++{
15839 ++ /* Don't have to check pgoff for anonymous vma */
15840 ++ if (!vma_is_anonymous(vma)) {
15841 ++ if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
15842 ++ HPAGE_PMD_NR))
15843 ++ return false;
15844 ++ }
15845 ++
15846 ++ if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
15847 ++ return false;
15848 ++ return true;
15849 ++}
15850 ++
15851 ++static inline bool transhuge_vma_enabled(struct vm_area_struct *vma,
15852 ++ unsigned long vm_flags)
15853 ++{
15854 ++ /* Explicitly disabled through madvise. */
15855 ++ if ((vm_flags & VM_NOHUGEPAGE) ||
15856 ++ test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
15857 ++ return false;
15858 ++ return true;
15859 ++}
15860 ++
15861 + /*
15862 + * to be used on vmas which are known to support THP.
15863 +- * Use transparent_hugepage_enabled otherwise
15864 ++ * Use transparent_hugepage_active otherwise
15865 + */
15866 + static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
15867 + {
15868 +- if (vma->vm_flags & VM_NOHUGEPAGE)
15869 ++
15870 ++ /*
15871 ++ * If the hardware/firmware marked hugepage support disabled.
15872 ++ */
15873 ++ if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_NEVER_DAX))
15874 + return false;
15875 +
15876 +- if (vma_is_temporary_stack(vma))
15877 ++ if (!transhuge_vma_enabled(vma, vma->vm_flags))
15878 + return false;
15879 +
15880 +- if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
15881 ++ if (vma_is_temporary_stack(vma))
15882 + return false;
15883 +
15884 + if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG))
15885 + return true;
15886 +- /*
15887 +- * For dax vmas, try to always use hugepage mappings. If the kernel does
15888 +- * not support hugepages, fsdax mappings will fallback to PAGE_SIZE
15889 +- * mappings, and device-dax namespaces, that try to guarantee a given
15890 +- * mapping size, will fail to enable
15891 +- */
15892 ++
15893 + if (vma_is_dax(vma))
15894 + return true;
15895 +
15896 +@@ -156,35 +175,17 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
15897 + return false;
15898 + }
15899 +
15900 +-bool transparent_hugepage_enabled(struct vm_area_struct *vma);
15901 +-
15902 +-#define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1)
15903 +-
15904 +-static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
15905 +- unsigned long haddr)
15906 +-{
15907 +- /* Don't have to check pgoff for anonymous vma */
15908 +- if (!vma_is_anonymous(vma)) {
15909 +- if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) !=
15910 +- (vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK))
15911 +- return false;
15912 +- }
15913 +-
15914 +- if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
15915 +- return false;
15916 +- return true;
15917 +-}
15918 ++bool transparent_hugepage_active(struct vm_area_struct *vma);
15919 +
15920 + #define transparent_hugepage_use_zero_page() \
15921 + (transparent_hugepage_flags & \
15922 + (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
15923 +
15924 +-extern unsigned long thp_get_unmapped_area(struct file *filp,
15925 +- unsigned long addr, unsigned long len, unsigned long pgoff,
15926 +- unsigned long flags);
15927 ++unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
15928 ++ unsigned long len, unsigned long pgoff, unsigned long flags);
15929 +
15930 +-extern void prep_transhuge_page(struct page *page);
15931 +-extern void free_transhuge_page(struct page *page);
15932 ++void prep_transhuge_page(struct page *page);
15933 ++void free_transhuge_page(struct page *page);
15934 + bool is_transparent_hugepage(struct page *page);
15935 +
15936 + bool can_split_huge_page(struct page *page, int *pextra_pins);
15937 +@@ -222,16 +223,12 @@ void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
15938 + __split_huge_pud(__vma, __pud, __address); \
15939 + } while (0)
15940 +
15941 +-extern int hugepage_madvise(struct vm_area_struct *vma,
15942 +- unsigned long *vm_flags, int advice);
15943 +-extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
15944 +- unsigned long start,
15945 +- unsigned long end,
15946 +- long adjust_next);
15947 +-extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
15948 +- struct vm_area_struct *vma);
15949 +-extern spinlock_t *__pud_trans_huge_lock(pud_t *pud,
15950 +- struct vm_area_struct *vma);
15951 ++int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags,
15952 ++ int advice);
15953 ++void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
15954 ++ unsigned long end, long adjust_next);
15955 ++spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
15956 ++spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);
15957 +
15958 + static inline int is_swap_pmd(pmd_t pmd)
15959 + {
15960 +@@ -294,7 +291,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
15961 + struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
15962 + pud_t *pud, int flags, struct dev_pagemap **pgmap);
15963 +
15964 +-extern vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
15965 ++vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
15966 +
15967 + extern struct page *huge_zero_page;
15968 + extern unsigned long huge_zero_pfn;
15969 +@@ -365,7 +362,7 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
15970 + return false;
15971 + }
15972 +
15973 +-static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma)
15974 ++static inline bool transparent_hugepage_active(struct vm_area_struct *vma)
15975 + {
15976 + return false;
15977 + }
15978 +@@ -376,6 +373,12 @@ static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
15979 + return false;
15980 + }
15981 +
15982 ++static inline bool transhuge_vma_enabled(struct vm_area_struct *vma,
15983 ++ unsigned long vm_flags)
15984 ++{
15985 ++ return false;
15986 ++}
15987 ++
15988 + static inline void prep_transhuge_page(struct page *page) {}
15989 +
15990 + static inline bool is_transparent_hugepage(struct page *page)
15991 +diff --git a/include/linux/iio/common/cros_ec_sensors_core.h b/include/linux/iio/common/cros_ec_sensors_core.h
15992 +index c9b80be82440f..f82857bd693fd 100644
15993 +--- a/include/linux/iio/common/cros_ec_sensors_core.h
15994 ++++ b/include/linux/iio/common/cros_ec_sensors_core.h
15995 +@@ -77,7 +77,7 @@ struct cros_ec_sensors_core_state {
15996 + u16 scale;
15997 + } calib[CROS_EC_SENSOR_MAX_AXIS];
15998 + s8 sign[CROS_EC_SENSOR_MAX_AXIS];
15999 +- u8 samples[CROS_EC_SAMPLE_SIZE];
16000 ++ u8 samples[CROS_EC_SAMPLE_SIZE] __aligned(8);
16001 +
16002 + int (*read_ec_sensors_data)(struct iio_dev *indio_dev,
16003 + unsigned long scan_mask, s16 *data);
16004 +diff --git a/include/linux/prandom.h b/include/linux/prandom.h
16005 +index bbf4b4ad61dfd..056d31317e499 100644
16006 +--- a/include/linux/prandom.h
16007 ++++ b/include/linux/prandom.h
16008 +@@ -111,7 +111,7 @@ static inline u32 __seed(u32 x, u32 m)
16009 + */
16010 + static inline void prandom_seed_state(struct rnd_state *state, u64 seed)
16011 + {
16012 +- u32 i = (seed >> 32) ^ (seed << 10) ^ seed;
16013 ++ u32 i = ((seed >> 32) ^ (seed << 10) ^ seed) & 0xffffffffUL;
16014 +
16015 + state->s1 = __seed(i, 2U);
16016 + state->s2 = __seed(i, 8U);
16017 +diff --git a/include/linux/swap.h b/include/linux/swap.h
16018 +index fbc6805358da0..dfabf4660a670 100644
16019 +--- a/include/linux/swap.h
16020 ++++ b/include/linux/swap.h
16021 +@@ -503,6 +503,15 @@ static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry)
16022 + return NULL;
16023 + }
16024 +
16025 ++static inline struct swap_info_struct *get_swap_device(swp_entry_t entry)
16026 ++{
16027 ++ return NULL;
16028 ++}
16029 ++
16030 ++static inline void put_swap_device(struct swap_info_struct *si)
16031 ++{
16032 ++}
16033 ++
16034 + #define swap_address_space(entry) (NULL)
16035 + #define get_nr_swap_pages() 0L
16036 + #define total_swap_pages 0L
16037 +diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
16038 +index 966ed89803274..e4c5df71f0e74 100644
16039 +--- a/include/linux/tracepoint.h
16040 ++++ b/include/linux/tracepoint.h
16041 +@@ -41,7 +41,17 @@ extern int
16042 + tracepoint_probe_register_prio(struct tracepoint *tp, void *probe, void *data,
16043 + int prio);
16044 + extern int
16045 ++tracepoint_probe_register_prio_may_exist(struct tracepoint *tp, void *probe, void *data,
16046 ++ int prio);
16047 ++extern int
16048 + tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data);
16049 ++static inline int
16050 ++tracepoint_probe_register_may_exist(struct tracepoint *tp, void *probe,
16051 ++ void *data)
16052 ++{
16053 ++ return tracepoint_probe_register_prio_may_exist(tp, probe, data,
16054 ++ TRACEPOINT_DEFAULT_PRIO);
16055 ++}
16056 + extern void
16057 + for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv),
16058 + void *priv);
16059 +diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
16060 +index 7616c7bf4b241..e1bd560da1cd4 100644
16061 +--- a/include/linux/user_namespace.h
16062 ++++ b/include/linux/user_namespace.h
16063 +@@ -101,11 +101,15 @@ struct ucounts {
16064 + };
16065 +
16066 + extern struct user_namespace init_user_ns;
16067 ++extern struct ucounts init_ucounts;
16068 +
16069 + bool setup_userns_sysctls(struct user_namespace *ns);
16070 + void retire_userns_sysctls(struct user_namespace *ns);
16071 + struct ucounts *inc_ucount(struct user_namespace *ns, kuid_t uid, enum ucount_type type);
16072 + void dec_ucount(struct ucounts *ucounts, enum ucount_type type);
16073 ++struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid);
16074 ++struct ucounts *get_ucounts(struct ucounts *ucounts);
16075 ++void put_ucounts(struct ucounts *ucounts);
16076 +
16077 + #ifdef CONFIG_USER_NS
16078 +
16079 +diff --git a/include/media/hevc-ctrls.h b/include/media/hevc-ctrls.h
16080 +index 1009cf0891cc6..a3b650ab00f66 100644
16081 +--- a/include/media/hevc-ctrls.h
16082 ++++ b/include/media/hevc-ctrls.h
16083 +@@ -81,7 +81,7 @@ struct v4l2_ctrl_hevc_sps {
16084 + __u64 flags;
16085 + };
16086 +
16087 +-#define V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT (1ULL << 0)
16088 ++#define V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT_ENABLED (1ULL << 0)
16089 + #define V4L2_HEVC_PPS_FLAG_OUTPUT_FLAG_PRESENT (1ULL << 1)
16090 + #define V4L2_HEVC_PPS_FLAG_SIGN_DATA_HIDING_ENABLED (1ULL << 2)
16091 + #define V4L2_HEVC_PPS_FLAG_CABAC_INIT_PRESENT (1ULL << 3)
16092 +@@ -160,6 +160,7 @@ struct v4l2_hevc_pred_weight_table {
16093 + #define V4L2_HEVC_SLICE_PARAMS_FLAG_USE_INTEGER_MV (1ULL << 6)
16094 + #define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_DEBLOCKING_FILTER_DISABLED (1ULL << 7)
16095 + #define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED (1ULL << 8)
16096 ++#define V4L2_HEVC_SLICE_PARAMS_FLAG_DEPENDENT_SLICE_SEGMENT (1ULL << 9)
16097 +
16098 + struct v4l2_ctrl_hevc_slice_params {
16099 + __u32 bit_size;
16100 +diff --git a/include/media/media-dev-allocator.h b/include/media/media-dev-allocator.h
16101 +index b35ea6062596b..2ab54d426c644 100644
16102 +--- a/include/media/media-dev-allocator.h
16103 ++++ b/include/media/media-dev-allocator.h
16104 +@@ -19,7 +19,7 @@
16105 +
16106 + struct usb_device;
16107 +
16108 +-#if defined(CONFIG_MEDIA_CONTROLLER) && defined(CONFIG_USB)
16109 ++#if defined(CONFIG_MEDIA_CONTROLLER) && IS_ENABLED(CONFIG_USB)
16110 + /**
16111 + * media_device_usb_allocate() - Allocate and return struct &media device
16112 + *
16113 +diff --git a/include/media/v4l2-async.h b/include/media/v4l2-async.h
16114 +index d6e31234826f3..92cd9f038fed8 100644
16115 +--- a/include/media/v4l2-async.h
16116 ++++ b/include/media/v4l2-async.h
16117 +@@ -189,9 +189,11 @@ v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier *notifier,
16118 + *
16119 + * @notif: pointer to &struct v4l2_async_notifier
16120 + * @endpoint: local endpoint pointing to the remote sub-device to be matched
16121 +- * @asd: Async sub-device struct allocated by the caller. The &struct
16122 +- * v4l2_async_subdev shall be the first member of the driver's async
16123 +- * sub-device struct, i.e. both begin at the same memory address.
16124 ++ * @asd_struct_size: size of the driver's async sub-device struct, including
16125 ++ * sizeof(struct v4l2_async_subdev). The &struct
16126 ++ * v4l2_async_subdev shall be the first member of
16127 ++ * the driver's async sub-device struct, i.e. both
16128 ++ * begin at the same memory address.
16129 + *
16130 + * Gets the remote endpoint of a given local endpoint, set it up for fwnode
16131 + * matching and adds the async sub-device to the notifier's @asd_list. The
16132 +@@ -199,13 +201,12 @@ v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier *notifier,
16133 + * notifier cleanup time.
16134 + *
16135 + * This is just like @v4l2_async_notifier_add_fwnode_subdev, but with the
16136 +- * exception that the fwnode refers to a local endpoint, not the remote one, and
16137 +- * the function relies on the caller to allocate the async sub-device struct.
16138 ++ * exception that the fwnode refers to a local endpoint, not the remote one.
16139 + */
16140 +-int
16141 ++struct v4l2_async_subdev *
16142 + v4l2_async_notifier_add_fwnode_remote_subdev(struct v4l2_async_notifier *notif,
16143 + struct fwnode_handle *endpoint,
16144 +- struct v4l2_async_subdev *asd);
16145 ++ unsigned int asd_struct_size);
16146 +
16147 + /**
16148 + * v4l2_async_notifier_add_i2c_subdev - Allocate and add an i2c async
16149 +diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
16150 +index 6da4b3c5dd55d..243de74e118e7 100644
16151 +--- a/include/net/bluetooth/hci.h
16152 ++++ b/include/net/bluetooth/hci.h
16153 +@@ -1773,13 +1773,15 @@ struct hci_cp_ext_adv_set {
16154 + __u8 max_events;
16155 + } __packed;
16156 +
16157 ++#define HCI_MAX_EXT_AD_LENGTH 251
16158 ++
16159 + #define HCI_OP_LE_SET_EXT_ADV_DATA 0x2037
16160 + struct hci_cp_le_set_ext_adv_data {
16161 + __u8 handle;
16162 + __u8 operation;
16163 + __u8 frag_pref;
16164 + __u8 length;
16165 +- __u8 data[HCI_MAX_AD_LENGTH];
16166 ++ __u8 data[];
16167 + } __packed;
16168 +
16169 + #define HCI_OP_LE_SET_EXT_SCAN_RSP_DATA 0x2038
16170 +@@ -1788,7 +1790,7 @@ struct hci_cp_le_set_ext_scan_rsp_data {
16171 + __u8 operation;
16172 + __u8 frag_pref;
16173 + __u8 length;
16174 +- __u8 data[HCI_MAX_AD_LENGTH];
16175 ++ __u8 data[];
16176 + } __packed;
16177 +
16178 + #define LE_SET_ADV_DATA_OP_COMPLETE 0x03
16179 +diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
16180 +index df611c8b6b595..e534dff2874e1 100644
16181 +--- a/include/net/bluetooth/hci_core.h
16182 ++++ b/include/net/bluetooth/hci_core.h
16183 +@@ -226,9 +226,9 @@ struct adv_info {
16184 + __u16 remaining_time;
16185 + __u16 duration;
16186 + __u16 adv_data_len;
16187 +- __u8 adv_data[HCI_MAX_AD_LENGTH];
16188 ++ __u8 adv_data[HCI_MAX_EXT_AD_LENGTH];
16189 + __u16 scan_rsp_len;
16190 +- __u8 scan_rsp_data[HCI_MAX_AD_LENGTH];
16191 ++ __u8 scan_rsp_data[HCI_MAX_EXT_AD_LENGTH];
16192 + __s8 tx_power;
16193 + bdaddr_t random_addr;
16194 + bool rpa_expired;
16195 +@@ -523,9 +523,9 @@ struct hci_dev {
16196 + DECLARE_BITMAP(dev_flags, __HCI_NUM_FLAGS);
16197 +
16198 + __s8 adv_tx_power;
16199 +- __u8 adv_data[HCI_MAX_AD_LENGTH];
16200 ++ __u8 adv_data[HCI_MAX_EXT_AD_LENGTH];
16201 + __u8 adv_data_len;
16202 +- __u8 scan_rsp_data[HCI_MAX_AD_LENGTH];
16203 ++ __u8 scan_rsp_data[HCI_MAX_EXT_AD_LENGTH];
16204 + __u8 scan_rsp_data_len;
16205 +
16206 + struct list_head adv_instances;
16207 +diff --git a/include/net/ip.h b/include/net/ip.h
16208 +index 2d6b985d11cca..5538e54d4620c 100644
16209 +--- a/include/net/ip.h
16210 ++++ b/include/net/ip.h
16211 +@@ -31,6 +31,7 @@
16212 + #include <net/flow.h>
16213 + #include <net/flow_dissector.h>
16214 + #include <net/netns/hash.h>
16215 ++#include <net/lwtunnel.h>
16216 +
16217 + #define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */
16218 + #define IPV4_MIN_MTU 68 /* RFC 791 */
16219 +@@ -445,22 +446,25 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
16220 +
16221 + /* 'forwarding = true' case should always honour route mtu */
16222 + mtu = dst_metric_raw(dst, RTAX_MTU);
16223 +- if (mtu)
16224 +- return mtu;
16225 ++ if (!mtu)
16226 ++ mtu = min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
16227 +
16228 +- return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
16229 ++ return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
16230 + }
16231 +
16232 + static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
16233 + const struct sk_buff *skb)
16234 + {
16235 ++ unsigned int mtu;
16236 ++
16237 + if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
16238 + bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
16239 +
16240 + return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
16241 + }
16242 +
16243 +- return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
16244 ++ mtu = min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
16245 ++ return mtu - lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu);
16246 + }
16247 +
16248 + struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
16249 +diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
16250 +index 2a5277758379e..37a7fb1969d6c 100644
16251 +--- a/include/net/ip6_route.h
16252 ++++ b/include/net/ip6_route.h
16253 +@@ -264,11 +264,18 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
16254 +
16255 + static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
16256 + {
16257 ++ int mtu;
16258 ++
16259 + struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
16260 + inet6_sk(skb->sk) : NULL;
16261 +
16262 +- return (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) ?
16263 +- skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
16264 ++ if (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) {
16265 ++ mtu = READ_ONCE(skb_dst(skb)->dev->mtu);
16266 ++ mtu -= lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu);
16267 ++ } else
16268 ++ mtu = dst_mtu(skb_dst(skb));
16269 ++
16270 ++ return mtu;
16271 + }
16272 +
16273 + static inline bool ip6_sk_accept_pmtu(const struct sock *sk)
16274 +@@ -316,7 +323,7 @@ static inline unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
16275 + if (dst_metric_locked(dst, RTAX_MTU)) {
16276 + mtu = dst_metric_raw(dst, RTAX_MTU);
16277 + if (mtu)
16278 +- return mtu;
16279 ++ goto out;
16280 + }
16281 +
16282 + mtu = IPV6_MIN_MTU;
16283 +@@ -326,7 +333,8 @@ static inline unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
16284 + mtu = idev->cnf.mtu6;
16285 + rcu_read_unlock();
16286 +
16287 +- return mtu;
16288 ++out:
16289 ++ return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
16290 + }
16291 +
16292 + u32 ip6_mtu_from_fib6(const struct fib6_result *res,
16293 +diff --git a/include/net/macsec.h b/include/net/macsec.h
16294 +index 52874cdfe2260..d6fa6b97f6efa 100644
16295 +--- a/include/net/macsec.h
16296 ++++ b/include/net/macsec.h
16297 +@@ -241,7 +241,7 @@ struct macsec_context {
16298 + struct macsec_rx_sc *rx_sc;
16299 + struct {
16300 + unsigned char assoc_num;
16301 +- u8 key[MACSEC_KEYID_LEN];
16302 ++ u8 key[MACSEC_MAX_KEY_LEN];
16303 + union {
16304 + struct macsec_rx_sa *rx_sa;
16305 + struct macsec_tx_sa *tx_sa;
16306 +diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
16307 +index 4dd2c9e34976e..f8631ad3c8686 100644
16308 +--- a/include/net/sch_generic.h
16309 ++++ b/include/net/sch_generic.h
16310 +@@ -163,6 +163,12 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
16311 + if (spin_trylock(&qdisc->seqlock))
16312 + goto nolock_empty;
16313 +
16314 ++ /* Paired with smp_mb__after_atomic() to make sure
16315 ++ * STATE_MISSED checking is synchronized with clearing
16316 ++ * in pfifo_fast_dequeue().
16317 ++ */
16318 ++ smp_mb__before_atomic();
16319 ++
16320 + /* If the MISSED flag is set, it means other thread has
16321 + * set the MISSED flag before second spin_trylock(), so
16322 + * we can return false here to avoid multi cpus doing
16323 +@@ -180,6 +186,12 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
16324 + */
16325 + set_bit(__QDISC_STATE_MISSED, &qdisc->state);
16326 +
16327 ++ /* spin_trylock() only has load-acquire semantic, so use
16328 ++ * smp_mb__after_atomic() to ensure STATE_MISSED is set
16329 ++ * before doing the second spin_trylock().
16330 ++ */
16331 ++ smp_mb__after_atomic();
16332 ++
16333 + /* Retry again in case other CPU may not see the new flag
16334 + * after it releases the lock at the end of qdisc_run_end().
16335 + */
16336 +diff --git a/include/net/tc_act/tc_vlan.h b/include/net/tc_act/tc_vlan.h
16337 +index f051046ba0344..f94b8bc26f9ec 100644
16338 +--- a/include/net/tc_act/tc_vlan.h
16339 ++++ b/include/net/tc_act/tc_vlan.h
16340 +@@ -16,6 +16,7 @@ struct tcf_vlan_params {
16341 + u16 tcfv_push_vid;
16342 + __be16 tcfv_push_proto;
16343 + u8 tcfv_push_prio;
16344 ++ bool tcfv_push_prio_exists;
16345 + struct rcu_head rcu;
16346 + };
16347 +
16348 +diff --git a/include/net/xfrm.h b/include/net/xfrm.h
16349 +index c58a6d4eb6103..6232a5f048bde 100644
16350 +--- a/include/net/xfrm.h
16351 ++++ b/include/net/xfrm.h
16352 +@@ -1546,6 +1546,7 @@ void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
16353 + void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
16354 + u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
16355 + int xfrm_init_replay(struct xfrm_state *x);
16356 ++u32 __xfrm_state_mtu(struct xfrm_state *x, int mtu);
16357 + u32 xfrm_state_mtu(struct xfrm_state *x, int mtu);
16358 + int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload);
16359 + int xfrm_init_state(struct xfrm_state *x);
16360 +diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
16361 +index eaa8386dbc630..7a9a23e7a604a 100644
16362 +--- a/include/net/xsk_buff_pool.h
16363 ++++ b/include/net/xsk_buff_pool.h
16364 +@@ -147,11 +147,16 @@ static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool,
16365 + {
16366 + bool cross_pg = (addr & (PAGE_SIZE - 1)) + len > PAGE_SIZE;
16367 +
16368 +- if (pool->dma_pages_cnt && cross_pg) {
16369 ++ if (likely(!cross_pg))
16370 ++ return false;
16371 ++
16372 ++ if (pool->dma_pages_cnt) {
16373 + return !(pool->dma_pages[addr >> PAGE_SHIFT] &
16374 + XSK_NEXT_PG_CONTIG_MASK);
16375 + }
16376 +- return false;
16377 ++
16378 ++ /* skb path */
16379 ++ return addr + len > pool->addrs_cnt;
16380 + }
16381 +
16382 + static inline u64 xp_aligned_extract_addr(struct xsk_buff_pool *pool, u64 addr)
16383 +diff --git a/include/scsi/fc/fc_ms.h b/include/scsi/fc/fc_ms.h
16384 +index 9e273fed0a85f..800d53dc94705 100644
16385 +--- a/include/scsi/fc/fc_ms.h
16386 ++++ b/include/scsi/fc/fc_ms.h
16387 +@@ -63,8 +63,8 @@ enum fc_fdmi_hba_attr_type {
16388 + * HBA Attribute Length
16389 + */
16390 + #define FC_FDMI_HBA_ATTR_NODENAME_LEN 8
16391 +-#define FC_FDMI_HBA_ATTR_MANUFACTURER_LEN 80
16392 +-#define FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN 80
16393 ++#define FC_FDMI_HBA_ATTR_MANUFACTURER_LEN 64
16394 ++#define FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN 64
16395 + #define FC_FDMI_HBA_ATTR_MODEL_LEN 256
16396 + #define FC_FDMI_HBA_ATTR_MODELDESCR_LEN 256
16397 + #define FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN 256
16398 +diff --git a/init/main.c b/init/main.c
16399 +index b4449544390ca..dd26a42e80a87 100644
16400 +--- a/init/main.c
16401 ++++ b/init/main.c
16402 +@@ -914,11 +914,7 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void)
16403 + * time - but meanwhile we still have a functioning scheduler.
16404 + */
16405 + sched_init();
16406 +- /*
16407 +- * Disable preemption - early bootup scheduling is extremely
16408 +- * fragile until we cpu_idle() for the first time.
16409 +- */
16410 +- preempt_disable();
16411 ++
16412 + if (WARN(!irqs_disabled(),
16413 + "Interrupts were enabled *very* early, fixing it\n"))
16414 + local_irq_disable();
16415 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
16416 +index e97724e36dfb5..bf6798fb23319 100644
16417 +--- a/kernel/bpf/verifier.c
16418 ++++ b/kernel/bpf/verifier.c
16419 +@@ -10532,7 +10532,7 @@ static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len
16420 + }
16421 + }
16422 +
16423 +-static void adjust_poke_descs(struct bpf_prog *prog, u32 len)
16424 ++static void adjust_poke_descs(struct bpf_prog *prog, u32 off, u32 len)
16425 + {
16426 + struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
16427 + int i, sz = prog->aux->size_poke_tab;
16428 +@@ -10540,6 +10540,8 @@ static void adjust_poke_descs(struct bpf_prog *prog, u32 len)
16429 +
16430 + for (i = 0; i < sz; i++) {
16431 + desc = &tab[i];
16432 ++ if (desc->insn_idx <= off)
16433 ++ continue;
16434 + desc->insn_idx += len - 1;
16435 + }
16436 + }
16437 +@@ -10560,7 +10562,7 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
16438 + if (adjust_insn_aux_data(env, new_prog, off, len))
16439 + return NULL;
16440 + adjust_subprog_starts(env, off, len);
16441 +- adjust_poke_descs(new_prog, len);
16442 ++ adjust_poke_descs(new_prog, off, len);
16443 + return new_prog;
16444 + }
16445 +
16446 +diff --git a/kernel/cred.c b/kernel/cred.c
16447 +index 421b1149c6516..098213d4a39c3 100644
16448 +--- a/kernel/cred.c
16449 ++++ b/kernel/cred.c
16450 +@@ -60,6 +60,7 @@ struct cred init_cred = {
16451 + .user = INIT_USER,
16452 + .user_ns = &init_user_ns,
16453 + .group_info = &init_groups,
16454 ++ .ucounts = &init_ucounts,
16455 + };
16456 +
16457 + static inline void set_cred_subscribers(struct cred *cred, int n)
16458 +@@ -119,6 +120,8 @@ static void put_cred_rcu(struct rcu_head *rcu)
16459 + if (cred->group_info)
16460 + put_group_info(cred->group_info);
16461 + free_uid(cred->user);
16462 ++ if (cred->ucounts)
16463 ++ put_ucounts(cred->ucounts);
16464 + put_user_ns(cred->user_ns);
16465 + kmem_cache_free(cred_jar, cred);
16466 + }
16467 +@@ -222,6 +225,7 @@ struct cred *cred_alloc_blank(void)
16468 + #ifdef CONFIG_DEBUG_CREDENTIALS
16469 + new->magic = CRED_MAGIC;
16470 + #endif
16471 ++ new->ucounts = get_ucounts(&init_ucounts);
16472 +
16473 + if (security_cred_alloc_blank(new, GFP_KERNEL_ACCOUNT) < 0)
16474 + goto error;
16475 +@@ -284,6 +288,11 @@ struct cred *prepare_creds(void)
16476 +
16477 + if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
16478 + goto error;
16479 ++
16480 ++ new->ucounts = get_ucounts(new->ucounts);
16481 ++ if (!new->ucounts)
16482 ++ goto error;
16483 ++
16484 + validate_creds(new);
16485 + return new;
16486 +
16487 +@@ -363,6 +372,9 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
16488 + ret = create_user_ns(new);
16489 + if (ret < 0)
16490 + goto error_put;
16491 ++ ret = set_cred_ucounts(new);
16492 ++ if (ret < 0)
16493 ++ goto error_put;
16494 + }
16495 +
16496 + #ifdef CONFIG_KEYS
16497 +@@ -653,6 +665,31 @@ int cred_fscmp(const struct cred *a, const struct cred *b)
16498 + }
16499 + EXPORT_SYMBOL(cred_fscmp);
16500 +
16501 ++int set_cred_ucounts(struct cred *new)
16502 ++{
16503 ++ struct task_struct *task = current;
16504 ++ const struct cred *old = task->real_cred;
16505 ++ struct ucounts *old_ucounts = new->ucounts;
16506 ++
16507 ++ if (new->user == old->user && new->user_ns == old->user_ns)
16508 ++ return 0;
16509 ++
16510 ++ /*
16511 ++ * This optimization is needed because alloc_ucounts() uses locks
16512 ++ * for table lookups.
16513 ++ */
16514 ++ if (old_ucounts && old_ucounts->ns == new->user_ns && uid_eq(old_ucounts->uid, new->euid))
16515 ++ return 0;
16516 ++
16517 ++ if (!(new->ucounts = alloc_ucounts(new->user_ns, new->euid)))
16518 ++ return -EAGAIN;
16519 ++
16520 ++ if (old_ucounts)
16521 ++ put_ucounts(old_ucounts);
16522 ++
16523 ++ return 0;
16524 ++}
16525 ++
16526 + /*
16527 + * initialise the credentials stuff
16528 + */
16529 +@@ -719,6 +756,10 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
16530 + if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
16531 + goto error;
16532 +
16533 ++ new->ucounts = get_ucounts(new->ucounts);
16534 ++ if (!new->ucounts)
16535 ++ goto error;
16536 ++
16537 + put_cred(old);
16538 + validate_creds(new);
16539 + return new;
16540 +diff --git a/kernel/fork.c b/kernel/fork.c
16541 +index 7c044d377926c..096945ef49ad7 100644
16542 +--- a/kernel/fork.c
16543 ++++ b/kernel/fork.c
16544 +@@ -2392,7 +2392,7 @@ static inline void init_idle_pids(struct task_struct *idle)
16545 + }
16546 + }
16547 +
16548 +-struct task_struct *fork_idle(int cpu)
16549 ++struct task_struct * __init fork_idle(int cpu)
16550 + {
16551 + struct task_struct *task;
16552 + struct kernel_clone_args args = {
16553 +@@ -2960,6 +2960,12 @@ int ksys_unshare(unsigned long unshare_flags)
16554 + if (err)
16555 + goto bad_unshare_cleanup_cred;
16556 +
16557 ++ if (new_cred) {
16558 ++ err = set_cred_ucounts(new_cred);
16559 ++ if (err)
16560 ++ goto bad_unshare_cleanup_cred;
16561 ++ }
16562 ++
16563 + if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) {
16564 + if (do_sysvsem) {
16565 + /*
16566 +diff --git a/kernel/kthread.c b/kernel/kthread.c
16567 +index 36be4364b313a..9825cf89c614d 100644
16568 +--- a/kernel/kthread.c
16569 ++++ b/kernel/kthread.c
16570 +@@ -1107,14 +1107,14 @@ static bool __kthread_cancel_work(struct kthread_work *work)
16571 + * modify @dwork's timer so that it expires after @delay. If @delay is zero,
16572 + * @work is guaranteed to be queued immediately.
16573 + *
16574 +- * Return: %true if @dwork was pending and its timer was modified,
16575 +- * %false otherwise.
16576 ++ * Return: %false if @dwork was idle and queued, %true otherwise.
16577 + *
16578 + * A special case is when the work is being canceled in parallel.
16579 + * It might be caused either by the real kthread_cancel_delayed_work_sync()
16580 + * or yet another kthread_mod_delayed_work() call. We let the other command
16581 +- * win and return %false here. The caller is supposed to synchronize these
16582 +- * operations a reasonable way.
16583 ++ * win and return %true here. The return value can be used for reference
16584 ++ * counting and the number of queued works stays the same. Anyway, the caller
16585 ++ * is supposed to synchronize these operations a reasonable way.
16586 + *
16587 + * This function is safe to call from any context including IRQ handler.
16588 + * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
16589 +@@ -1126,13 +1126,15 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
16590 + {
16591 + struct kthread_work *work = &dwork->work;
16592 + unsigned long flags;
16593 +- int ret = false;
16594 ++ int ret;
16595 +
16596 + raw_spin_lock_irqsave(&worker->lock, flags);
16597 +
16598 + /* Do not bother with canceling when never queued. */
16599 +- if (!work->worker)
16600 ++ if (!work->worker) {
16601 ++ ret = false;
16602 + goto fast_queue;
16603 ++ }
16604 +
16605 + /* Work must not be used with >1 worker, see kthread_queue_work() */
16606 + WARN_ON_ONCE(work->worker != worker);
16607 +@@ -1150,8 +1152,11 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
16608 + * be used for reference counting.
16609 + */
16610 + kthread_cancel_delayed_work_timer(work, &flags);
16611 +- if (work->canceling)
16612 ++ if (work->canceling) {
16613 ++ /* The number of works in the queue does not change. */
16614 ++ ret = true;
16615 + goto out;
16616 ++ }
16617 + ret = __kthread_cancel_work(work);
16618 +
16619 + fast_queue:
16620 +diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
16621 +index cdca007551e71..8ae9d7abebc08 100644
16622 +--- a/kernel/locking/lockdep.c
16623 ++++ b/kernel/locking/lockdep.c
16624 +@@ -2297,7 +2297,56 @@ static void print_lock_class_header(struct lock_class *class, int depth)
16625 + }
16626 +
16627 + /*
16628 +- * printk the shortest lock dependencies from @start to @end in reverse order:
16629 ++ * Dependency path printing:
16630 ++ *
16631 ++ * After BFS we get a lock dependency path (linked via ->parent of lock_list),
16632 ++ * printing out each lock in the dependency path will help on understanding how
16633 ++ * the deadlock could happen. Here are some details about dependency path
16634 ++ * printing:
16635 ++ *
16636 ++ * 1) A lock_list can be either forwards or backwards for a lock dependency,
16637 ++ * for a lock dependency A -> B, there are two lock_lists:
16638 ++ *
16639 ++ * a) lock_list in the ->locks_after list of A, whose ->class is B and
16640 ++ * ->links_to is A. In this case, we can say the lock_list is
16641 ++ * "A -> B" (forwards case).
16642 ++ *
16643 ++ * b) lock_list in the ->locks_before list of B, whose ->class is A
16644 ++ * and ->links_to is B. In this case, we can say the lock_list is
16645 ++ * "B <- A" (bacwards case).
16646 ++ *
16647 ++ * The ->trace of both a) and b) point to the call trace where B was
16648 ++ * acquired with A held.
16649 ++ *
16650 ++ * 2) A "helper" lock_list is introduced during BFS, this lock_list doesn't
16651 ++ * represent a certain lock dependency, it only provides an initial entry
16652 ++ * for BFS. For example, BFS may introduce a "helper" lock_list whose
16653 ++ * ->class is A, as a result BFS will search all dependencies starting with
16654 ++ * A, e.g. A -> B or A -> C.
16655 ++ *
16656 ++ * The notation of a forwards helper lock_list is like "-> A", which means
16657 ++ * we should search the forwards dependencies starting with "A", e.g A -> B
16658 ++ * or A -> C.
16659 ++ *
16660 ++ * The notation of a bacwards helper lock_list is like "<- B", which means
16661 ++ * we should search the backwards dependencies ending with "B", e.g.
16662 ++ * B <- A or B <- C.
16663 ++ */
16664 ++
16665 ++/*
16666 ++ * printk the shortest lock dependencies from @root to @leaf in reverse order.
16667 ++ *
16668 ++ * We have a lock dependency path as follow:
16669 ++ *
16670 ++ * @root @leaf
16671 ++ * | |
16672 ++ * V V
16673 ++ * ->parent ->parent
16674 ++ * | lock_list | <--------- | lock_list | ... | lock_list | <--------- | lock_list |
16675 ++ * | -> L1 | | L1 -> L2 | ... |Ln-2 -> Ln-1| | Ln-1 -> Ln|
16676 ++ *
16677 ++ * , so it's natural that we start from @leaf and print every ->class and
16678 ++ * ->trace until we reach the @root.
16679 + */
16680 + static void __used
16681 + print_shortest_lock_dependencies(struct lock_list *leaf,
16682 +@@ -2325,6 +2374,61 @@ print_shortest_lock_dependencies(struct lock_list *leaf,
16683 + } while (entry && (depth >= 0));
16684 + }
16685 +
16686 ++/*
16687 ++ * printk the shortest lock dependencies from @leaf to @root.
16688 ++ *
16689 ++ * We have a lock dependency path (from a backwards search) as follow:
16690 ++ *
16691 ++ * @leaf @root
16692 ++ * | |
16693 ++ * V V
16694 ++ * ->parent ->parent
16695 ++ * | lock_list | ---------> | lock_list | ... | lock_list | ---------> | lock_list |
16696 ++ * | L2 <- L1 | | L3 <- L2 | ... | Ln <- Ln-1 | | <- Ln |
16697 ++ *
16698 ++ * , so when we iterate from @leaf to @root, we actually print the lock
16699 ++ * dependency path L1 -> L2 -> .. -> Ln in the non-reverse order.
16700 ++ *
16701 ++ * Another thing to notice here is that ->class of L2 <- L1 is L1, while the
16702 ++ * ->trace of L2 <- L1 is the call trace of L2, in fact we don't have the call
16703 ++ * trace of L1 in the dependency path, which is alright, because most of the
16704 ++ * time we can figure out where L1 is held from the call trace of L2.
16705 ++ */
16706 ++static void __used
16707 ++print_shortest_lock_dependencies_backwards(struct lock_list *leaf,
16708 ++ struct lock_list *root)
16709 ++{
16710 ++ struct lock_list *entry = leaf;
16711 ++ const struct lock_trace *trace = NULL;
16712 ++ int depth;
16713 ++
16714 ++ /*compute depth from generated tree by BFS*/
16715 ++ depth = get_lock_depth(leaf);
16716 ++
16717 ++ do {
16718 ++ print_lock_class_header(entry->class, depth);
16719 ++ if (trace) {
16720 ++ printk("%*s ... acquired at:\n", depth, "");
16721 ++ print_lock_trace(trace, 2);
16722 ++ printk("\n");
16723 ++ }
16724 ++
16725 ++ /*
16726 ++ * Record the pointer to the trace for the next lock_list
16727 ++ * entry, see the comments for the function.
16728 ++ */
16729 ++ trace = entry->trace;
16730 ++
16731 ++ if (depth == 0 && (entry != root)) {
16732 ++ printk("lockdep:%s bad path found in chain graph\n", __func__);
16733 ++ break;
16734 ++ }
16735 ++
16736 ++ entry = get_lock_parent(entry);
16737 ++ depth--;
16738 ++ } while (entry && (depth >= 0));
16739 ++}
16740 ++
16741 + static void
16742 + print_irq_lock_scenario(struct lock_list *safe_entry,
16743 + struct lock_list *unsafe_entry,
16744 +@@ -2442,7 +2546,7 @@ print_bad_irq_dependency(struct task_struct *curr,
16745 + prev_root->trace = save_trace();
16746 + if (!prev_root->trace)
16747 + return;
16748 +- print_shortest_lock_dependencies(backwards_entry, prev_root);
16749 ++ print_shortest_lock_dependencies_backwards(backwards_entry, prev_root);
16750 +
16751 + pr_warn("\nthe dependencies between the lock to be acquired");
16752 + pr_warn(" and %s-irq-unsafe lock:\n", irqclass);
16753 +@@ -2660,8 +2764,18 @@ static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
16754 + * Step 3: we found a bad match! Now retrieve a lock from the backward
16755 + * list whose usage mask matches the exclusive usage mask from the
16756 + * lock found on the forward list.
16757 ++ *
16758 ++ * Note, we should only keep the LOCKF_ENABLED_IRQ_ALL bits, considering
16759 ++ * the follow case:
16760 ++ *
16761 ++ * When trying to add A -> B to the graph, we find that there is a
16762 ++ * hardirq-safe L, that L -> ... -> A, and another hardirq-unsafe M,
16763 ++ * that B -> ... -> M. However M is **softirq-safe**, if we use exact
16764 ++ * invert bits of M's usage_mask, we will find another lock N that is
16765 ++ * **softirq-unsafe** and N -> ... -> A, however N -> .. -> M will not
16766 ++ * cause a inversion deadlock.
16767 + */
16768 +- backward_mask = original_mask(target_entry1->class->usage_mask);
16769 ++ backward_mask = original_mask(target_entry1->class->usage_mask & LOCKF_ENABLED_IRQ_ALL);
16770 +
16771 + ret = find_usage_backwards(&this, backward_mask, &target_entry);
16772 + if (bfs_error(ret)) {
16773 +@@ -4512,7 +4626,7 @@ static int check_wait_context(struct task_struct *curr, struct held_lock *next)
16774 + short curr_inner;
16775 + int depth;
16776 +
16777 +- if (!curr->lockdep_depth || !next_inner || next->trylock)
16778 ++ if (!next_inner || next->trylock)
16779 + return 0;
16780 +
16781 + if (!next_outer)
16782 +diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
16783 +index 61e250cdd7c9c..45b60e9974610 100644
16784 +--- a/kernel/rcu/tree.c
16785 ++++ b/kernel/rcu/tree.c
16786 +@@ -2837,7 +2837,6 @@ static int __init rcu_spawn_core_kthreads(void)
16787 + "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__);
16788 + return 0;
16789 + }
16790 +-early_initcall(rcu_spawn_core_kthreads);
16791 +
16792 + /*
16793 + * Handle any core-RCU processing required by a call_rcu() invocation.
16794 +@@ -4273,6 +4272,7 @@ static int __init rcu_spawn_gp_kthread(void)
16795 + wake_up_process(t);
16796 + rcu_spawn_nocb_kthreads();
16797 + rcu_spawn_boost_kthreads();
16798 ++ rcu_spawn_core_kthreads();
16799 + return 0;
16800 + }
16801 + early_initcall(rcu_spawn_gp_kthread);
16802 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
16803 +index 57b2362518849..679562d2f55d1 100644
16804 +--- a/kernel/sched/core.c
16805 ++++ b/kernel/sched/core.c
16806 +@@ -1063,9 +1063,10 @@ static void uclamp_sync_util_min_rt_default(void)
16807 + static inline struct uclamp_se
16808 + uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id)
16809 + {
16810 ++ /* Copy by value as we could modify it */
16811 + struct uclamp_se uc_req = p->uclamp_req[clamp_id];
16812 + #ifdef CONFIG_UCLAMP_TASK_GROUP
16813 +- struct uclamp_se uc_max;
16814 ++ unsigned int tg_min, tg_max, value;
16815 +
16816 + /*
16817 + * Tasks in autogroups or root task group will be
16818 +@@ -1076,9 +1077,11 @@ uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id)
16819 + if (task_group(p) == &root_task_group)
16820 + return uc_req;
16821 +
16822 +- uc_max = task_group(p)->uclamp[clamp_id];
16823 +- if (uc_req.value > uc_max.value || !uc_req.user_defined)
16824 +- return uc_max;
16825 ++ tg_min = task_group(p)->uclamp[UCLAMP_MIN].value;
16826 ++ tg_max = task_group(p)->uclamp[UCLAMP_MAX].value;
16827 ++ value = uc_req.value;
16828 ++ value = clamp(value, tg_min, tg_max);
16829 ++ uclamp_se_set(&uc_req, value, false);
16830 + #endif
16831 +
16832 + return uc_req;
16833 +@@ -1277,8 +1280,9 @@ static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
16834 + }
16835 +
16836 + static inline void
16837 +-uclamp_update_active(struct task_struct *p, enum uclamp_id clamp_id)
16838 ++uclamp_update_active(struct task_struct *p)
16839 + {
16840 ++ enum uclamp_id clamp_id;
16841 + struct rq_flags rf;
16842 + struct rq *rq;
16843 +
16844 +@@ -1298,9 +1302,11 @@ uclamp_update_active(struct task_struct *p, enum uclamp_id clamp_id)
16845 + * affecting a valid clamp bucket, the next time it's enqueued,
16846 + * it will already see the updated clamp bucket value.
16847 + */
16848 +- if (p->uclamp[clamp_id].active) {
16849 +- uclamp_rq_dec_id(rq, p, clamp_id);
16850 +- uclamp_rq_inc_id(rq, p, clamp_id);
16851 ++ for_each_clamp_id(clamp_id) {
16852 ++ if (p->uclamp[clamp_id].active) {
16853 ++ uclamp_rq_dec_id(rq, p, clamp_id);
16854 ++ uclamp_rq_inc_id(rq, p, clamp_id);
16855 ++ }
16856 + }
16857 +
16858 + task_rq_unlock(rq, p, &rf);
16859 +@@ -1308,20 +1314,14 @@ uclamp_update_active(struct task_struct *p, enum uclamp_id clamp_id)
16860 +
16861 + #ifdef CONFIG_UCLAMP_TASK_GROUP
16862 + static inline void
16863 +-uclamp_update_active_tasks(struct cgroup_subsys_state *css,
16864 +- unsigned int clamps)
16865 ++uclamp_update_active_tasks(struct cgroup_subsys_state *css)
16866 + {
16867 +- enum uclamp_id clamp_id;
16868 + struct css_task_iter it;
16869 + struct task_struct *p;
16870 +
16871 + css_task_iter_start(css, 0, &it);
16872 +- while ((p = css_task_iter_next(&it))) {
16873 +- for_each_clamp_id(clamp_id) {
16874 +- if ((0x1 << clamp_id) & clamps)
16875 +- uclamp_update_active(p, clamp_id);
16876 +- }
16877 +- }
16878 ++ while ((p = css_task_iter_next(&it)))
16879 ++ uclamp_update_active(p);
16880 + css_task_iter_end(&it);
16881 + }
16882 +
16883 +@@ -6512,7 +6512,7 @@ void show_state_filter(unsigned long state_filter)
16884 + * NOTE: this function does not set the idle thread's NEED_RESCHED
16885 + * flag, to make booting more robust.
16886 + */
16887 +-void init_idle(struct task_struct *idle, int cpu)
16888 ++void __init init_idle(struct task_struct *idle, int cpu)
16889 + {
16890 + struct rq *rq = cpu_rq(cpu);
16891 + unsigned long flags;
16892 +@@ -7607,7 +7607,11 @@ static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
16893 +
16894 + #ifdef CONFIG_UCLAMP_TASK_GROUP
16895 + /* Propagate the effective uclamp value for the new group */
16896 ++ mutex_lock(&uclamp_mutex);
16897 ++ rcu_read_lock();
16898 + cpu_util_update_eff(css);
16899 ++ rcu_read_unlock();
16900 ++ mutex_unlock(&uclamp_mutex);
16901 + #endif
16902 +
16903 + return 0;
16904 +@@ -7697,6 +7701,9 @@ static void cpu_util_update_eff(struct cgroup_subsys_state *css)
16905 + enum uclamp_id clamp_id;
16906 + unsigned int clamps;
16907 +
16908 ++ lockdep_assert_held(&uclamp_mutex);
16909 ++ SCHED_WARN_ON(!rcu_read_lock_held());
16910 ++
16911 + css_for_each_descendant_pre(css, top_css) {
16912 + uc_parent = css_tg(css)->parent
16913 + ? css_tg(css)->parent->uclamp : NULL;
16914 +@@ -7729,7 +7736,7 @@ static void cpu_util_update_eff(struct cgroup_subsys_state *css)
16915 + }
16916 +
16917 + /* Immediately update descendants RUNNABLE tasks */
16918 +- uclamp_update_active_tasks(css, clamps);
16919 ++ uclamp_update_active_tasks(css);
16920 + }
16921 + }
16922 +
16923 +diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
16924 +index 8d06d1f4e2f7b..6b98c1fe6e7f8 100644
16925 +--- a/kernel/sched/deadline.c
16926 ++++ b/kernel/sched/deadline.c
16927 +@@ -2470,6 +2470,8 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
16928 + check_preempt_curr_dl(rq, p, 0);
16929 + else
16930 + resched_curr(rq);
16931 ++ } else {
16932 ++ update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
16933 + }
16934 + }
16935 +
16936 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
16937 +index d6e1c90de570a..3d92de7909bf4 100644
16938 +--- a/kernel/sched/fair.c
16939 ++++ b/kernel/sched/fair.c
16940 +@@ -3141,7 +3141,7 @@ void reweight_task(struct task_struct *p, int prio)
16941 + *
16942 + * tg->weight * grq->load.weight
16943 + * ge->load.weight = ----------------------------- (1)
16944 +- * \Sum grq->load.weight
16945 ++ * \Sum grq->load.weight
16946 + *
16947 + * Now, because computing that sum is prohibitively expensive to compute (been
16948 + * there, done that) we approximate it with this average stuff. The average
16949 +@@ -3155,7 +3155,7 @@ void reweight_task(struct task_struct *p, int prio)
16950 + *
16951 + * tg->weight * grq->avg.load_avg
16952 + * ge->load.weight = ------------------------------ (3)
16953 +- * tg->load_avg
16954 ++ * tg->load_avg
16955 + *
16956 + * Where: tg->load_avg ~= \Sum grq->avg.load_avg
16957 + *
16958 +@@ -3171,7 +3171,7 @@ void reweight_task(struct task_struct *p, int prio)
16959 + *
16960 + * tg->weight * grq->load.weight
16961 + * ge->load.weight = ----------------------------- = tg->weight (4)
16962 +- * grp->load.weight
16963 ++ * grp->load.weight
16964 + *
16965 + * That is, the sum collapses because all other CPUs are idle; the UP scenario.
16966 + *
16967 +@@ -3190,7 +3190,7 @@ void reweight_task(struct task_struct *p, int prio)
16968 + *
16969 + * tg->weight * grq->load.weight
16970 + * ge->load.weight = ----------------------------- (6)
16971 +- * tg_load_avg'
16972 ++ * tg_load_avg'
16973 + *
16974 + * Where:
16975 + *
16976 +diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
16977 +index 651218ded9817..d50a31ecedeec 100644
16978 +--- a/kernel/sched/psi.c
16979 ++++ b/kernel/sched/psi.c
16980 +@@ -179,6 +179,8 @@ struct psi_group psi_system = {
16981 +
16982 + static void psi_avgs_work(struct work_struct *work);
16983 +
16984 ++static void poll_timer_fn(struct timer_list *t);
16985 ++
16986 + static void group_init(struct psi_group *group)
16987 + {
16988 + int cpu;
16989 +@@ -198,6 +200,8 @@ static void group_init(struct psi_group *group)
16990 + memset(group->polling_total, 0, sizeof(group->polling_total));
16991 + group->polling_next_update = ULLONG_MAX;
16992 + group->polling_until = 0;
16993 ++ init_waitqueue_head(&group->poll_wait);
16994 ++ timer_setup(&group->poll_timer, poll_timer_fn, 0);
16995 + rcu_assign_pointer(group->poll_task, NULL);
16996 + }
16997 +
16998 +@@ -1126,9 +1130,7 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group,
16999 + return ERR_CAST(task);
17000 + }
17001 + atomic_set(&group->poll_wakeup, 0);
17002 +- init_waitqueue_head(&group->poll_wait);
17003 + wake_up_process(task);
17004 +- timer_setup(&group->poll_timer, poll_timer_fn, 0);
17005 + rcu_assign_pointer(group->poll_task, task);
17006 + }
17007 +
17008 +@@ -1180,6 +1182,7 @@ static void psi_trigger_destroy(struct kref *ref)
17009 + group->poll_task,
17010 + lockdep_is_held(&group->trigger_lock));
17011 + rcu_assign_pointer(group->poll_task, NULL);
17012 ++ del_timer(&group->poll_timer);
17013 + }
17014 + }
17015 +
17016 +@@ -1192,17 +1195,14 @@ static void psi_trigger_destroy(struct kref *ref)
17017 + */
17018 + synchronize_rcu();
17019 + /*
17020 +- * Destroy the kworker after releasing trigger_lock to prevent a
17021 ++ * Stop kthread 'psimon' after releasing trigger_lock to prevent a
17022 + * deadlock while waiting for psi_poll_work to acquire trigger_lock
17023 + */
17024 + if (task_to_destroy) {
17025 + /*
17026 + * After the RCU grace period has expired, the worker
17027 + * can no longer be found through group->poll_task.
17028 +- * But it might have been already scheduled before
17029 +- * that - deschedule it cleanly before destroying it.
17030 + */
17031 +- del_timer_sync(&group->poll_timer);
17032 + kthread_stop(task_to_destroy);
17033 + }
17034 + kfree(t);
17035 +diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
17036 +index 49ec096a8aa1f..b5cf418e2e3fe 100644
17037 +--- a/kernel/sched/rt.c
17038 ++++ b/kernel/sched/rt.c
17039 +@@ -2291,13 +2291,20 @@ void __init init_sched_rt_class(void)
17040 + static void switched_to_rt(struct rq *rq, struct task_struct *p)
17041 + {
17042 + /*
17043 +- * If we are already running, then there's nothing
17044 +- * that needs to be done. But if we are not running
17045 +- * we may need to preempt the current running task.
17046 +- * If that current running task is also an RT task
17047 ++ * If we are running, update the avg_rt tracking, as the running time
17048 ++ * will now on be accounted into the latter.
17049 ++ */
17050 ++ if (task_current(rq, p)) {
17051 ++ update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
17052 ++ return;
17053 ++ }
17054 ++
17055 ++ /*
17056 ++ * If we are not running we may need to preempt the current
17057 ++ * running task. If that current running task is also an RT task
17058 + * then see if we can move to another run queue.
17059 + */
17060 +- if (task_on_rq_queued(p) && rq->curr != p) {
17061 ++ if (task_on_rq_queued(p)) {
17062 + #ifdef CONFIG_SMP
17063 + if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
17064 + rt_queue_push_tasks(rq);
17065 +diff --git a/kernel/smpboot.c b/kernel/smpboot.c
17066 +index f25208e8df836..e4163042c4d66 100644
17067 +--- a/kernel/smpboot.c
17068 ++++ b/kernel/smpboot.c
17069 +@@ -33,7 +33,6 @@ struct task_struct *idle_thread_get(unsigned int cpu)
17070 +
17071 + if (!tsk)
17072 + return ERR_PTR(-ENOMEM);
17073 +- init_idle(tsk, cpu);
17074 + return tsk;
17075 + }
17076 +
17077 +diff --git a/kernel/sys.c b/kernel/sys.c
17078 +index a730c03ee607c..0670e824e0197 100644
17079 +--- a/kernel/sys.c
17080 ++++ b/kernel/sys.c
17081 +@@ -552,6 +552,10 @@ long __sys_setreuid(uid_t ruid, uid_t euid)
17082 + if (retval < 0)
17083 + goto error;
17084 +
17085 ++ retval = set_cred_ucounts(new);
17086 ++ if (retval < 0)
17087 ++ goto error;
17088 ++
17089 + return commit_creds(new);
17090 +
17091 + error:
17092 +@@ -610,6 +614,10 @@ long __sys_setuid(uid_t uid)
17093 + if (retval < 0)
17094 + goto error;
17095 +
17096 ++ retval = set_cred_ucounts(new);
17097 ++ if (retval < 0)
17098 ++ goto error;
17099 ++
17100 + return commit_creds(new);
17101 +
17102 + error:
17103 +@@ -685,6 +693,10 @@ long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
17104 + if (retval < 0)
17105 + goto error;
17106 +
17107 ++ retval = set_cred_ucounts(new);
17108 ++ if (retval < 0)
17109 ++ goto error;
17110 ++
17111 + return commit_creds(new);
17112 +
17113 + error:
17114 +diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
17115 +index 02441ead3c3bb..74492f08660c4 100644
17116 +--- a/kernel/time/clocksource.c
17117 ++++ b/kernel/time/clocksource.c
17118 +@@ -124,6 +124,13 @@ static void __clocksource_change_rating(struct clocksource *cs, int rating);
17119 + #define WATCHDOG_INTERVAL (HZ >> 1)
17120 + #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
17121 +
17122 ++/*
17123 ++ * Maximum permissible delay between two readouts of the watchdog
17124 ++ * clocksource surrounding a read of the clocksource being validated.
17125 ++ * This delay could be due to SMIs, NMIs, or to VCPU preemptions.
17126 ++ */
17127 ++#define WATCHDOG_MAX_SKEW (100 * NSEC_PER_USEC)
17128 ++
17129 + static void clocksource_watchdog_work(struct work_struct *work)
17130 + {
17131 + /*
17132 +@@ -184,12 +191,99 @@ void clocksource_mark_unstable(struct clocksource *cs)
17133 + spin_unlock_irqrestore(&watchdog_lock, flags);
17134 + }
17135 +
17136 ++static ulong max_cswd_read_retries = 3;
17137 ++module_param(max_cswd_read_retries, ulong, 0644);
17138 ++
17139 ++static bool cs_watchdog_read(struct clocksource *cs, u64 *csnow, u64 *wdnow)
17140 ++{
17141 ++ unsigned int nretries;
17142 ++ u64 wd_end, wd_delta;
17143 ++ int64_t wd_delay;
17144 ++
17145 ++ for (nretries = 0; nretries <= max_cswd_read_retries; nretries++) {
17146 ++ local_irq_disable();
17147 ++ *wdnow = watchdog->read(watchdog);
17148 ++ *csnow = cs->read(cs);
17149 ++ wd_end = watchdog->read(watchdog);
17150 ++ local_irq_enable();
17151 ++
17152 ++ wd_delta = clocksource_delta(wd_end, *wdnow, watchdog->mask);
17153 ++ wd_delay = clocksource_cyc2ns(wd_delta, watchdog->mult,
17154 ++ watchdog->shift);
17155 ++ if (wd_delay <= WATCHDOG_MAX_SKEW) {
17156 ++ if (nretries > 1 || nretries >= max_cswd_read_retries) {
17157 ++ pr_warn("timekeeping watchdog on CPU%d: %s retried %d times before success\n",
17158 ++ smp_processor_id(), watchdog->name, nretries);
17159 ++ }
17160 ++ return true;
17161 ++ }
17162 ++ }
17163 ++
17164 ++ pr_warn("timekeeping watchdog on CPU%d: %s read-back delay of %lldns, attempt %d, marking unstable\n",
17165 ++ smp_processor_id(), watchdog->name, wd_delay, nretries);
17166 ++ return false;
17167 ++}
17168 ++
17169 ++static u64 csnow_mid;
17170 ++static cpumask_t cpus_ahead;
17171 ++static cpumask_t cpus_behind;
17172 ++
17173 ++static void clocksource_verify_one_cpu(void *csin)
17174 ++{
17175 ++ struct clocksource *cs = (struct clocksource *)csin;
17176 ++
17177 ++ csnow_mid = cs->read(cs);
17178 ++}
17179 ++
17180 ++static void clocksource_verify_percpu(struct clocksource *cs)
17181 ++{
17182 ++ int64_t cs_nsec, cs_nsec_max = 0, cs_nsec_min = LLONG_MAX;
17183 ++ u64 csnow_begin, csnow_end;
17184 ++ int cpu, testcpu;
17185 ++ s64 delta;
17186 ++
17187 ++ cpumask_clear(&cpus_ahead);
17188 ++ cpumask_clear(&cpus_behind);
17189 ++ preempt_disable();
17190 ++ testcpu = smp_processor_id();
17191 ++ pr_warn("Checking clocksource %s synchronization from CPU %d.\n", cs->name, testcpu);
17192 ++ for_each_online_cpu(cpu) {
17193 ++ if (cpu == testcpu)
17194 ++ continue;
17195 ++ csnow_begin = cs->read(cs);
17196 ++ smp_call_function_single(cpu, clocksource_verify_one_cpu, cs, 1);
17197 ++ csnow_end = cs->read(cs);
17198 ++ delta = (s64)((csnow_mid - csnow_begin) & cs->mask);
17199 ++ if (delta < 0)
17200 ++ cpumask_set_cpu(cpu, &cpus_behind);
17201 ++ delta = (csnow_end - csnow_mid) & cs->mask;
17202 ++ if (delta < 0)
17203 ++ cpumask_set_cpu(cpu, &cpus_ahead);
17204 ++ delta = clocksource_delta(csnow_end, csnow_begin, cs->mask);
17205 ++ cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift);
17206 ++ if (cs_nsec > cs_nsec_max)
17207 ++ cs_nsec_max = cs_nsec;
17208 ++ if (cs_nsec < cs_nsec_min)
17209 ++ cs_nsec_min = cs_nsec;
17210 ++ }
17211 ++ preempt_enable();
17212 ++ if (!cpumask_empty(&cpus_ahead))
17213 ++ pr_warn(" CPUs %*pbl ahead of CPU %d for clocksource %s.\n",
17214 ++ cpumask_pr_args(&cpus_ahead), testcpu, cs->name);
17215 ++ if (!cpumask_empty(&cpus_behind))
17216 ++ pr_warn(" CPUs %*pbl behind CPU %d for clocksource %s.\n",
17217 ++ cpumask_pr_args(&cpus_behind), testcpu, cs->name);
17218 ++ if (!cpumask_empty(&cpus_ahead) || !cpumask_empty(&cpus_behind))
17219 ++ pr_warn(" CPU %d check durations %lldns - %lldns for clocksource %s.\n",
17220 ++ testcpu, cs_nsec_min, cs_nsec_max, cs->name);
17221 ++}
17222 ++
17223 + static void clocksource_watchdog(struct timer_list *unused)
17224 + {
17225 +- struct clocksource *cs;
17226 + u64 csnow, wdnow, cslast, wdlast, delta;
17227 +- int64_t wd_nsec, cs_nsec;
17228 + int next_cpu, reset_pending;
17229 ++ int64_t wd_nsec, cs_nsec;
17230 ++ struct clocksource *cs;
17231 +
17232 + spin_lock(&watchdog_lock);
17233 + if (!watchdog_running)
17234 +@@ -206,10 +300,11 @@ static void clocksource_watchdog(struct timer_list *unused)
17235 + continue;
17236 + }
17237 +
17238 +- local_irq_disable();
17239 +- csnow = cs->read(cs);
17240 +- wdnow = watchdog->read(watchdog);
17241 +- local_irq_enable();
17242 ++ if (!cs_watchdog_read(cs, &csnow, &wdnow)) {
17243 ++ /* Clock readout unreliable, so give it up. */
17244 ++ __clocksource_unstable(cs);
17245 ++ continue;
17246 ++ }
17247 +
17248 + /* Clocksource initialized ? */
17249 + if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) ||
17250 +@@ -407,6 +502,12 @@ static int __clocksource_watchdog_kthread(void)
17251 + unsigned long flags;
17252 + int select = 0;
17253 +
17254 ++ /* Do any required per-CPU skew verification. */
17255 ++ if (curr_clocksource &&
17256 ++ curr_clocksource->flags & CLOCK_SOURCE_UNSTABLE &&
17257 ++ curr_clocksource->flags & CLOCK_SOURCE_VERIFY_PERCPU)
17258 ++ clocksource_verify_percpu(curr_clocksource);
17259 ++
17260 + spin_lock_irqsave(&watchdog_lock, flags);
17261 + list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
17262 + if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
17263 +diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
17264 +index 01710831fd02f..216329c23f18a 100644
17265 +--- a/kernel/trace/bpf_trace.c
17266 ++++ b/kernel/trace/bpf_trace.c
17267 +@@ -2106,7 +2106,8 @@ static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *
17268 + if (prog->aux->max_tp_access > btp->writable_size)
17269 + return -EINVAL;
17270 +
17271 +- return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog);
17272 ++ return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func,
17273 ++ prog);
17274 + }
17275 +
17276 + int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
17277 +diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
17278 +index 96c3f86b81c5f..0b24938cbe92e 100644
17279 +--- a/kernel/trace/trace_events_hist.c
17280 ++++ b/kernel/trace/trace_events_hist.c
17281 +@@ -1539,6 +1539,13 @@ static int contains_operator(char *str)
17282 +
17283 + switch (*op) {
17284 + case '-':
17285 ++ /*
17286 ++ * Unfortunately, the modifier ".sym-offset"
17287 ++ * can confuse things.
17288 ++ */
17289 ++ if (op - str >= 4 && !strncmp(op - 4, ".sym-offset", 11))
17290 ++ return FIELD_OP_NONE;
17291 ++
17292 + if (*str == '-')
17293 + field_op = FIELD_OP_UNARY_MINUS;
17294 + else
17295 +diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
17296 +index 3e261482296cf..f8b161edca5ea 100644
17297 +--- a/kernel/tracepoint.c
17298 ++++ b/kernel/tracepoint.c
17299 +@@ -294,7 +294,8 @@ static void tracepoint_update_call(struct tracepoint *tp, struct tracepoint_func
17300 + * Add the probe function to a tracepoint.
17301 + */
17302 + static int tracepoint_add_func(struct tracepoint *tp,
17303 +- struct tracepoint_func *func, int prio)
17304 ++ struct tracepoint_func *func, int prio,
17305 ++ bool warn)
17306 + {
17307 + struct tracepoint_func *old, *tp_funcs;
17308 + int ret;
17309 +@@ -309,7 +310,7 @@ static int tracepoint_add_func(struct tracepoint *tp,
17310 + lockdep_is_held(&tracepoints_mutex));
17311 + old = func_add(&tp_funcs, func, prio);
17312 + if (IS_ERR(old)) {
17313 +- WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM);
17314 ++ WARN_ON_ONCE(warn && PTR_ERR(old) != -ENOMEM);
17315 + return PTR_ERR(old);
17316 + }
17317 +
17318 +@@ -364,6 +365,32 @@ static int tracepoint_remove_func(struct tracepoint *tp,
17319 + return 0;
17320 + }
17321 +
17322 ++/**
17323 ++ * tracepoint_probe_register_prio_may_exist - Connect a probe to a tracepoint with priority
17324 ++ * @tp: tracepoint
17325 ++ * @probe: probe handler
17326 ++ * @data: tracepoint data
17327 ++ * @prio: priority of this function over other registered functions
17328 ++ *
17329 ++ * Same as tracepoint_probe_register_prio() except that it will not warn
17330 ++ * if the tracepoint is already registered.
17331 ++ */
17332 ++int tracepoint_probe_register_prio_may_exist(struct tracepoint *tp, void *probe,
17333 ++ void *data, int prio)
17334 ++{
17335 ++ struct tracepoint_func tp_func;
17336 ++ int ret;
17337 ++
17338 ++ mutex_lock(&tracepoints_mutex);
17339 ++ tp_func.func = probe;
17340 ++ tp_func.data = data;
17341 ++ tp_func.prio = prio;
17342 ++ ret = tracepoint_add_func(tp, &tp_func, prio, false);
17343 ++ mutex_unlock(&tracepoints_mutex);
17344 ++ return ret;
17345 ++}
17346 ++EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio_may_exist);
17347 ++
17348 + /**
17349 + * tracepoint_probe_register_prio - Connect a probe to a tracepoint with priority
17350 + * @tp: tracepoint
17351 +@@ -387,7 +414,7 @@ int tracepoint_probe_register_prio(struct tracepoint *tp, void *probe,
17352 + tp_func.func = probe;
17353 + tp_func.data = data;
17354 + tp_func.prio = prio;
17355 +- ret = tracepoint_add_func(tp, &tp_func, prio);
17356 ++ ret = tracepoint_add_func(tp, &tp_func, prio, true);
17357 + mutex_unlock(&tracepoints_mutex);
17358 + return ret;
17359 + }
17360 +diff --git a/kernel/ucount.c b/kernel/ucount.c
17361 +index 11b1596e2542a..9894795043c42 100644
17362 +--- a/kernel/ucount.c
17363 ++++ b/kernel/ucount.c
17364 +@@ -8,6 +8,12 @@
17365 + #include <linux/kmemleak.h>
17366 + #include <linux/user_namespace.h>
17367 +
17368 ++struct ucounts init_ucounts = {
17369 ++ .ns = &init_user_ns,
17370 ++ .uid = GLOBAL_ROOT_UID,
17371 ++ .count = 1,
17372 ++};
17373 ++
17374 + #define UCOUNTS_HASHTABLE_BITS 10
17375 + static struct hlist_head ucounts_hashtable[(1 << UCOUNTS_HASHTABLE_BITS)];
17376 + static DEFINE_SPINLOCK(ucounts_lock);
17377 +@@ -125,7 +131,15 @@ static struct ucounts *find_ucounts(struct user_namespace *ns, kuid_t uid, struc
17378 + return NULL;
17379 + }
17380 +
17381 +-static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
17382 ++static void hlist_add_ucounts(struct ucounts *ucounts)
17383 ++{
17384 ++ struct hlist_head *hashent = ucounts_hashentry(ucounts->ns, ucounts->uid);
17385 ++ spin_lock_irq(&ucounts_lock);
17386 ++ hlist_add_head(&ucounts->node, hashent);
17387 ++ spin_unlock_irq(&ucounts_lock);
17388 ++}
17389 ++
17390 ++struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
17391 + {
17392 + struct hlist_head *hashent = ucounts_hashentry(ns, uid);
17393 + struct ucounts *ucounts, *new;
17394 +@@ -160,7 +174,26 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
17395 + return ucounts;
17396 + }
17397 +
17398 +-static void put_ucounts(struct ucounts *ucounts)
17399 ++struct ucounts *get_ucounts(struct ucounts *ucounts)
17400 ++{
17401 ++ unsigned long flags;
17402 ++
17403 ++ if (!ucounts)
17404 ++ return NULL;
17405 ++
17406 ++ spin_lock_irqsave(&ucounts_lock, flags);
17407 ++ if (ucounts->count == INT_MAX) {
17408 ++ WARN_ONCE(1, "ucounts: counter has reached its maximum value");
17409 ++ ucounts = NULL;
17410 ++ } else {
17411 ++ ucounts->count += 1;
17412 ++ }
17413 ++ spin_unlock_irqrestore(&ucounts_lock, flags);
17414 ++
17415 ++ return ucounts;
17416 ++}
17417 ++
17418 ++void put_ucounts(struct ucounts *ucounts)
17419 + {
17420 + unsigned long flags;
17421 +
17422 +@@ -194,7 +227,7 @@ struct ucounts *inc_ucount(struct user_namespace *ns, kuid_t uid,
17423 + {
17424 + struct ucounts *ucounts, *iter, *bad;
17425 + struct user_namespace *tns;
17426 +- ucounts = get_ucounts(ns, uid);
17427 ++ ucounts = alloc_ucounts(ns, uid);
17428 + for (iter = ucounts; iter; iter = tns->ucounts) {
17429 + int max;
17430 + tns = iter->ns;
17431 +@@ -237,6 +270,7 @@ static __init int user_namespace_sysctl_init(void)
17432 + BUG_ON(!user_header);
17433 + BUG_ON(!setup_userns_sysctls(&init_user_ns));
17434 + #endif
17435 ++ hlist_add_ucounts(&init_ucounts);
17436 + return 0;
17437 + }
17438 + subsys_initcall(user_namespace_sysctl_init);
17439 +diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
17440 +index ce396ea4de608..8206a13c81ebc 100644
17441 +--- a/kernel/user_namespace.c
17442 ++++ b/kernel/user_namespace.c
17443 +@@ -1340,6 +1340,9 @@ static int userns_install(struct nsset *nsset, struct ns_common *ns)
17444 + put_user_ns(cred->user_ns);
17445 + set_cred_user_ns(cred, get_user_ns(user_ns));
17446 +
17447 ++ if (set_cred_ucounts(cred) < 0)
17448 ++ return -EINVAL;
17449 ++
17450 + return 0;
17451 + }
17452 +
17453 +diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
17454 +index dcf4a9028e165..5b7f88a2876db 100644
17455 +--- a/lib/Kconfig.debug
17456 ++++ b/lib/Kconfig.debug
17457 +@@ -1302,7 +1302,6 @@ config LOCKDEP
17458 + bool
17459 + depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
17460 + select STACKTRACE
17461 +- depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86
17462 + select KALLSYMS
17463 + select KALLSYMS_ALL
17464 +
17465 +diff --git a/lib/iov_iter.c b/lib/iov_iter.c
17466 +index f0b2ccb1bb018..537bfdc8cd095 100644
17467 +--- a/lib/iov_iter.c
17468 ++++ b/lib/iov_iter.c
17469 +@@ -434,7 +434,7 @@ int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
17470 + int err;
17471 + struct iovec v;
17472 +
17473 +- if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
17474 ++ if (iter_is_iovec(i)) {
17475 + iterate_iovec(i, bytes, v, iov, skip, ({
17476 + err = fault_in_pages_readable(v.iov_base, v.iov_len);
17477 + if (unlikely(err))
17478 +@@ -922,9 +922,12 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
17479 + size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
17480 + kunmap_atomic(kaddr);
17481 + return wanted;
17482 +- } else if (unlikely(iov_iter_is_discard(i)))
17483 ++ } else if (unlikely(iov_iter_is_discard(i))) {
17484 ++ if (unlikely(i->count < bytes))
17485 ++ bytes = i->count;
17486 ++ i->count -= bytes;
17487 + return bytes;
17488 +- else if (likely(!iov_iter_is_pipe(i)))
17489 ++ } else if (likely(!iov_iter_is_pipe(i)))
17490 + return copy_page_to_iter_iovec(page, offset, bytes, i);
17491 + else
17492 + return copy_page_to_iter_pipe(page, offset, bytes, i);
17493 +diff --git a/lib/kstrtox.c b/lib/kstrtox.c
17494 +index a14ccf9050552..8504526541c13 100644
17495 +--- a/lib/kstrtox.c
17496 ++++ b/lib/kstrtox.c
17497 +@@ -39,20 +39,22 @@ const char *_parse_integer_fixup_radix(const char *s, unsigned int *base)
17498 +
17499 + /*
17500 + * Convert non-negative integer string representation in explicitly given radix
17501 +- * to an integer.
17502 ++ * to an integer. A maximum of max_chars characters will be converted.
17503 ++ *
17504 + * Return number of characters consumed maybe or-ed with overflow bit.
17505 + * If overflow occurs, result integer (incorrect) is still returned.
17506 + *
17507 + * Don't you dare use this function.
17508 + */
17509 +-unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long *p)
17510 ++unsigned int _parse_integer_limit(const char *s, unsigned int base, unsigned long long *p,
17511 ++ size_t max_chars)
17512 + {
17513 + unsigned long long res;
17514 + unsigned int rv;
17515 +
17516 + res = 0;
17517 + rv = 0;
17518 +- while (1) {
17519 ++ while (max_chars--) {
17520 + unsigned int c = *s;
17521 + unsigned int lc = c | 0x20; /* don't tolower() this line */
17522 + unsigned int val;
17523 +@@ -82,6 +84,11 @@ unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long
17524 + return rv;
17525 + }
17526 +
17527 ++unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long *p)
17528 ++{
17529 ++ return _parse_integer_limit(s, base, p, INT_MAX);
17530 ++}
17531 ++
17532 + static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res)
17533 + {
17534 + unsigned long long _res;
17535 +diff --git a/lib/kstrtox.h b/lib/kstrtox.h
17536 +index 3b4637bcd2540..158c400ca8658 100644
17537 +--- a/lib/kstrtox.h
17538 ++++ b/lib/kstrtox.h
17539 +@@ -4,6 +4,8 @@
17540 +
17541 + #define KSTRTOX_OVERFLOW (1U << 31)
17542 + const char *_parse_integer_fixup_radix(const char *s, unsigned int *base);
17543 ++unsigned int _parse_integer_limit(const char *s, unsigned int base, unsigned long long *res,
17544 ++ size_t max_chars);
17545 + unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long *res);
17546 +
17547 + #endif
17548 +diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
17549 +index a899b3f0e2e53..76c52b0b76d38 100644
17550 +--- a/lib/locking-selftest.c
17551 ++++ b/lib/locking-selftest.c
17552 +@@ -186,6 +186,7 @@ static void init_shared_classes(void)
17553 + #define HARDIRQ_ENTER() \
17554 + local_irq_disable(); \
17555 + __irq_enter(); \
17556 ++ lockdep_hardirq_threaded(); \
17557 + WARN_ON(!in_irq());
17558 +
17559 + #define HARDIRQ_EXIT() \
17560 +diff --git a/lib/math/rational.c b/lib/math/rational.c
17561 +index 9781d521963d1..c0ab51d8fbb98 100644
17562 +--- a/lib/math/rational.c
17563 ++++ b/lib/math/rational.c
17564 +@@ -12,6 +12,7 @@
17565 + #include <linux/compiler.h>
17566 + #include <linux/export.h>
17567 + #include <linux/minmax.h>
17568 ++#include <linux/limits.h>
17569 +
17570 + /*
17571 + * calculate best rational approximation for a given fraction
17572 +@@ -78,13 +79,18 @@ void rational_best_approximation(
17573 + * found below as 't'.
17574 + */
17575 + if ((n2 > max_numerator) || (d2 > max_denominator)) {
17576 +- unsigned long t = min((max_numerator - n0) / n1,
17577 +- (max_denominator - d0) / d1);
17578 ++ unsigned long t = ULONG_MAX;
17579 +
17580 +- /* This tests if the semi-convergent is closer
17581 +- * than the previous convergent.
17582 ++ if (d1)
17583 ++ t = (max_denominator - d0) / d1;
17584 ++ if (n1)
17585 ++ t = min(t, (max_numerator - n0) / n1);
17586 ++
17587 ++ /* This tests if the semi-convergent is closer than the previous
17588 ++ * convergent. If d1 is zero there is no previous convergent as this
17589 ++ * is the 1st iteration, so always choose the semi-convergent.
17590 + */
17591 +- if (2u * t > a || (2u * t == a && d0 * dp > d1 * d)) {
17592 ++ if (!d1 || 2u * t > a || (2u * t == a && d0 * dp > d1 * d)) {
17593 + n1 = n0 + t * n1;
17594 + d1 = d0 + t * d1;
17595 + }
17596 +diff --git a/lib/seq_buf.c b/lib/seq_buf.c
17597 +index 707453f5d58ee..89c26c393bdba 100644
17598 +--- a/lib/seq_buf.c
17599 ++++ b/lib/seq_buf.c
17600 +@@ -243,12 +243,14 @@ int seq_buf_putmem_hex(struct seq_buf *s, const void *mem,
17601 + break;
17602 +
17603 + /* j increments twice per loop */
17604 +- len -= j / 2;
17605 + hex[j++] = ' ';
17606 +
17607 + seq_buf_putmem(s, hex, j);
17608 + if (seq_buf_has_overflowed(s))
17609 + return -1;
17610 ++
17611 ++ len -= start_len;
17612 ++ data += start_len;
17613 + }
17614 + return 0;
17615 + }
17616 +diff --git a/lib/vsprintf.c b/lib/vsprintf.c
17617 +index fd0fde639ec91..8ade1a86d8187 100644
17618 +--- a/lib/vsprintf.c
17619 ++++ b/lib/vsprintf.c
17620 +@@ -53,6 +53,31 @@
17621 + #include <linux/string_helpers.h>
17622 + #include "kstrtox.h"
17623 +
17624 ++static unsigned long long simple_strntoull(const char *startp, size_t max_chars,
17625 ++ char **endp, unsigned int base)
17626 ++{
17627 ++ const char *cp;
17628 ++ unsigned long long result = 0ULL;
17629 ++ size_t prefix_chars;
17630 ++ unsigned int rv;
17631 ++
17632 ++ cp = _parse_integer_fixup_radix(startp, &base);
17633 ++ prefix_chars = cp - startp;
17634 ++ if (prefix_chars < max_chars) {
17635 ++ rv = _parse_integer_limit(cp, base, &result, max_chars - prefix_chars);
17636 ++ /* FIXME */
17637 ++ cp += (rv & ~KSTRTOX_OVERFLOW);
17638 ++ } else {
17639 ++ /* Field too short for prefix + digit, skip over without converting */
17640 ++ cp = startp + max_chars;
17641 ++ }
17642 ++
17643 ++ if (endp)
17644 ++ *endp = (char *)cp;
17645 ++
17646 ++ return result;
17647 ++}
17648 ++
17649 + /**
17650 + * simple_strtoull - convert a string to an unsigned long long
17651 + * @cp: The start of the string
17652 +@@ -63,18 +88,7 @@
17653 + */
17654 + unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base)
17655 + {
17656 +- unsigned long long result;
17657 +- unsigned int rv;
17658 +-
17659 +- cp = _parse_integer_fixup_radix(cp, &base);
17660 +- rv = _parse_integer(cp, base, &result);
17661 +- /* FIXME */
17662 +- cp += (rv & ~KSTRTOX_OVERFLOW);
17663 +-
17664 +- if (endp)
17665 +- *endp = (char *)cp;
17666 +-
17667 +- return result;
17668 ++ return simple_strntoull(cp, INT_MAX, endp, base);
17669 + }
17670 + EXPORT_SYMBOL(simple_strtoull);
17671 +
17672 +@@ -109,6 +123,21 @@ long simple_strtol(const char *cp, char **endp, unsigned int base)
17673 + }
17674 + EXPORT_SYMBOL(simple_strtol);
17675 +
17676 ++static long long simple_strntoll(const char *cp, size_t max_chars, char **endp,
17677 ++ unsigned int base)
17678 ++{
17679 ++ /*
17680 ++ * simple_strntoull() safely handles receiving max_chars==0 in the
17681 ++ * case cp[0] == '-' && max_chars == 1.
17682 ++ * If max_chars == 0 we can drop through and pass it to simple_strntoull()
17683 ++ * and the content of *cp is irrelevant.
17684 ++ */
17685 ++ if (*cp == '-' && max_chars > 0)
17686 ++ return -simple_strntoull(cp + 1, max_chars - 1, endp, base);
17687 ++
17688 ++ return simple_strntoull(cp, max_chars, endp, base);
17689 ++}
17690 ++
17691 + /**
17692 + * simple_strtoll - convert a string to a signed long long
17693 + * @cp: The start of the string
17694 +@@ -119,10 +148,7 @@ EXPORT_SYMBOL(simple_strtol);
17695 + */
17696 + long long simple_strtoll(const char *cp, char **endp, unsigned int base)
17697 + {
17698 +- if (*cp == '-')
17699 +- return -simple_strtoull(cp + 1, endp, base);
17700 +-
17701 +- return simple_strtoull(cp, endp, base);
17702 ++ return simple_strntoll(cp, INT_MAX, endp, base);
17703 + }
17704 + EXPORT_SYMBOL(simple_strtoll);
17705 +
17706 +@@ -3442,25 +3468,13 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
17707 + break;
17708 +
17709 + if (is_sign)
17710 +- val.s = qualifier != 'L' ?
17711 +- simple_strtol(str, &next, base) :
17712 +- simple_strtoll(str, &next, base);
17713 ++ val.s = simple_strntoll(str,
17714 ++ field_width >= 0 ? field_width : INT_MAX,
17715 ++ &next, base);
17716 + else
17717 +- val.u = qualifier != 'L' ?
17718 +- simple_strtoul(str, &next, base) :
17719 +- simple_strtoull(str, &next, base);
17720 +-
17721 +- if (field_width > 0 && next - str > field_width) {
17722 +- if (base == 0)
17723 +- _parse_integer_fixup_radix(str, &base);
17724 +- while (next - str > field_width) {
17725 +- if (is_sign)
17726 +- val.s = div_s64(val.s, base);
17727 +- else
17728 +- val.u = div_u64(val.u, base);
17729 +- --next;
17730 +- }
17731 +- }
17732 ++ val.u = simple_strntoull(str,
17733 ++ field_width >= 0 ? field_width : INT_MAX,
17734 ++ &next, base);
17735 +
17736 + switch (qualifier) {
17737 + case 'H': /* that's 'hh' in format */
17738 +diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c
17739 +index 750bfef26be37..12ebc97e8b435 100644
17740 +--- a/mm/debug_vm_pgtable.c
17741 ++++ b/mm/debug_vm_pgtable.c
17742 +@@ -58,11 +58,23 @@
17743 + #define RANDOM_ORVALUE (GENMASK(BITS_PER_LONG - 1, 0) & ~ARCH_SKIP_MASK)
17744 + #define RANDOM_NZVALUE GENMASK(7, 0)
17745 +
17746 +-static void __init pte_basic_tests(unsigned long pfn, pgprot_t prot)
17747 ++static void __init pte_basic_tests(unsigned long pfn, int idx)
17748 + {
17749 ++ pgprot_t prot = protection_map[idx];
17750 + pte_t pte = pfn_pte(pfn, prot);
17751 ++ unsigned long val = idx, *ptr = &val;
17752 ++
17753 ++ pr_debug("Validating PTE basic (%pGv)\n", ptr);
17754 ++
17755 ++ /*
17756 ++ * This test needs to be executed after the given page table entry
17757 ++ * is created with pfn_pte() to make sure that protection_map[idx]
17758 ++ * does not have the dirty bit enabled from the beginning. This is
17759 ++ * important for platforms like arm64 where (!PTE_RDONLY) indicate
17760 ++ * dirty bit being set.
17761 ++ */
17762 ++ WARN_ON(pte_dirty(pte_wrprotect(pte)));
17763 +
17764 +- pr_debug("Validating PTE basic\n");
17765 + WARN_ON(!pte_same(pte, pte));
17766 + WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte))));
17767 + WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte))));
17768 +@@ -70,6 +82,8 @@ static void __init pte_basic_tests(unsigned long pfn, pgprot_t prot)
17769 + WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte))));
17770 + WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte))));
17771 + WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte))));
17772 ++ WARN_ON(pte_dirty(pte_wrprotect(pte_mkclean(pte))));
17773 ++ WARN_ON(!pte_dirty(pte_wrprotect(pte_mkdirty(pte))));
17774 + }
17775 +
17776 + static void __init pte_advanced_tests(struct mm_struct *mm,
17777 +@@ -129,14 +143,28 @@ static void __init pte_savedwrite_tests(unsigned long pfn, pgprot_t prot)
17778 + }
17779 +
17780 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE
17781 +-static void __init pmd_basic_tests(unsigned long pfn, pgprot_t prot)
17782 ++static void __init pmd_basic_tests(unsigned long pfn, int idx)
17783 + {
17784 +- pmd_t pmd = pfn_pmd(pfn, prot);
17785 ++ pgprot_t prot = protection_map[idx];
17786 ++ unsigned long val = idx, *ptr = &val;
17787 ++ pmd_t pmd;
17788 +
17789 + if (!has_transparent_hugepage())
17790 + return;
17791 +
17792 +- pr_debug("Validating PMD basic\n");
17793 ++ pr_debug("Validating PMD basic (%pGv)\n", ptr);
17794 ++ pmd = pfn_pmd(pfn, prot);
17795 ++
17796 ++ /*
17797 ++ * This test needs to be executed after the given page table entry
17798 ++ * is created with pfn_pmd() to make sure that protection_map[idx]
17799 ++ * does not have the dirty bit enabled from the beginning. This is
17800 ++ * important for platforms like arm64 where (!PTE_RDONLY) indicate
17801 ++ * dirty bit being set.
17802 ++ */
17803 ++ WARN_ON(pmd_dirty(pmd_wrprotect(pmd)));
17804 ++
17805 ++
17806 + WARN_ON(!pmd_same(pmd, pmd));
17807 + WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd))));
17808 + WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd))));
17809 +@@ -144,6 +172,8 @@ static void __init pmd_basic_tests(unsigned long pfn, pgprot_t prot)
17810 + WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd))));
17811 + WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd))));
17812 + WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd))));
17813 ++ WARN_ON(pmd_dirty(pmd_wrprotect(pmd_mkclean(pmd))));
17814 ++ WARN_ON(!pmd_dirty(pmd_wrprotect(pmd_mkdirty(pmd))));
17815 + /*
17816 + * A huge page does not point to next level page table
17817 + * entry. Hence this must qualify as pmd_bad().
17818 +@@ -156,7 +186,7 @@ static void __init pmd_advanced_tests(struct mm_struct *mm,
17819 + unsigned long pfn, unsigned long vaddr,
17820 + pgprot_t prot, pgtable_t pgtable)
17821 + {
17822 +- pmd_t pmd = pfn_pmd(pfn, prot);
17823 ++ pmd_t pmd;
17824 +
17825 + if (!has_transparent_hugepage())
17826 + return;
17827 +@@ -203,9 +233,14 @@ static void __init pmd_advanced_tests(struct mm_struct *mm,
17828 +
17829 + static void __init pmd_leaf_tests(unsigned long pfn, pgprot_t prot)
17830 + {
17831 +- pmd_t pmd = pfn_pmd(pfn, prot);
17832 ++ pmd_t pmd;
17833 ++
17834 ++ if (!has_transparent_hugepage())
17835 ++ return;
17836 +
17837 + pr_debug("Validating PMD leaf\n");
17838 ++ pmd = pfn_pmd(pfn, prot);
17839 ++
17840 + /*
17841 + * PMD based THP is a leaf entry.
17842 + */
17843 +@@ -238,30 +273,51 @@ static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot)
17844 +
17845 + static void __init pmd_savedwrite_tests(unsigned long pfn, pgprot_t prot)
17846 + {
17847 +- pmd_t pmd = pfn_pmd(pfn, prot);
17848 ++ pmd_t pmd;
17849 +
17850 + if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
17851 + return;
17852 +
17853 ++ if (!has_transparent_hugepage())
17854 ++ return;
17855 ++
17856 + pr_debug("Validating PMD saved write\n");
17857 ++ pmd = pfn_pmd(pfn, prot);
17858 + WARN_ON(!pmd_savedwrite(pmd_mk_savedwrite(pmd_clear_savedwrite(pmd))));
17859 + WARN_ON(pmd_savedwrite(pmd_clear_savedwrite(pmd_mk_savedwrite(pmd))));
17860 + }
17861 +
17862 + #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
17863 +-static void __init pud_basic_tests(unsigned long pfn, pgprot_t prot)
17864 ++static void __init pud_basic_tests(struct mm_struct *mm, unsigned long pfn, int idx)
17865 + {
17866 +- pud_t pud = pfn_pud(pfn, prot);
17867 ++ pgprot_t prot = protection_map[idx];
17868 ++ unsigned long val = idx, *ptr = &val;
17869 ++ pud_t pud;
17870 +
17871 + if (!has_transparent_hugepage())
17872 + return;
17873 +
17874 +- pr_debug("Validating PUD basic\n");
17875 ++ pr_debug("Validating PUD basic (%pGv)\n", ptr);
17876 ++ pud = pfn_pud(pfn, prot);
17877 ++
17878 ++ /*
17879 ++ * This test needs to be executed after the given page table entry
17880 ++ * is created with pfn_pud() to make sure that protection_map[idx]
17881 ++ * does not have the dirty bit enabled from the beginning. This is
17882 ++ * important for platforms like arm64 where (!PTE_RDONLY) indicate
17883 ++ * dirty bit being set.
17884 ++ */
17885 ++ WARN_ON(pud_dirty(pud_wrprotect(pud)));
17886 ++
17887 + WARN_ON(!pud_same(pud, pud));
17888 + WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud))));
17889 ++ WARN_ON(!pud_dirty(pud_mkdirty(pud_mkclean(pud))));
17890 ++ WARN_ON(pud_dirty(pud_mkclean(pud_mkdirty(pud))));
17891 + WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud))));
17892 + WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud))));
17893 + WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud))));
17894 ++ WARN_ON(pud_dirty(pud_wrprotect(pud_mkclean(pud))));
17895 ++ WARN_ON(!pud_dirty(pud_wrprotect(pud_mkdirty(pud))));
17896 +
17897 + if (mm_pmd_folded(mm))
17898 + return;
17899 +@@ -278,7 +334,7 @@ static void __init pud_advanced_tests(struct mm_struct *mm,
17900 + unsigned long pfn, unsigned long vaddr,
17901 + pgprot_t prot)
17902 + {
17903 +- pud_t pud = pfn_pud(pfn, prot);
17904 ++ pud_t pud;
17905 +
17906 + if (!has_transparent_hugepage())
17907 + return;
17908 +@@ -287,6 +343,7 @@ static void __init pud_advanced_tests(struct mm_struct *mm,
17909 + /* Align the address wrt HPAGE_PUD_SIZE */
17910 + vaddr &= HPAGE_PUD_MASK;
17911 +
17912 ++ pud = pfn_pud(pfn, prot);
17913 + set_pud_at(mm, vaddr, pudp, pud);
17914 + pudp_set_wrprotect(mm, vaddr, pudp);
17915 + pud = READ_ONCE(*pudp);
17916 +@@ -325,9 +382,13 @@ static void __init pud_advanced_tests(struct mm_struct *mm,
17917 +
17918 + static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot)
17919 + {
17920 +- pud_t pud = pfn_pud(pfn, prot);
17921 ++ pud_t pud;
17922 ++
17923 ++ if (!has_transparent_hugepage())
17924 ++ return;
17925 +
17926 + pr_debug("Validating PUD leaf\n");
17927 ++ pud = pfn_pud(pfn, prot);
17928 + /*
17929 + * PUD based THP is a leaf entry.
17930 + */
17931 +@@ -359,7 +420,7 @@ static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
17932 + #endif /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
17933 +
17934 + #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
17935 +-static void __init pud_basic_tests(unsigned long pfn, pgprot_t prot) { }
17936 ++static void __init pud_basic_tests(struct mm_struct *mm, unsigned long pfn, int idx) { }
17937 + static void __init pud_advanced_tests(struct mm_struct *mm,
17938 + struct vm_area_struct *vma, pud_t *pudp,
17939 + unsigned long pfn, unsigned long vaddr,
17940 +@@ -372,8 +433,8 @@ static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
17941 + }
17942 + #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
17943 + #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
17944 +-static void __init pmd_basic_tests(unsigned long pfn, pgprot_t prot) { }
17945 +-static void __init pud_basic_tests(unsigned long pfn, pgprot_t prot) { }
17946 ++static void __init pmd_basic_tests(unsigned long pfn, int idx) { }
17947 ++static void __init pud_basic_tests(struct mm_struct *mm, unsigned long pfn, int idx) { }
17948 + static void __init pmd_advanced_tests(struct mm_struct *mm,
17949 + struct vm_area_struct *vma, pmd_t *pmdp,
17950 + unsigned long pfn, unsigned long vaddr,
17951 +@@ -609,12 +670,16 @@ static void __init pte_protnone_tests(unsigned long pfn, pgprot_t prot)
17952 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE
17953 + static void __init pmd_protnone_tests(unsigned long pfn, pgprot_t prot)
17954 + {
17955 +- pmd_t pmd = pmd_mkhuge(pfn_pmd(pfn, prot));
17956 ++ pmd_t pmd;
17957 +
17958 + if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
17959 + return;
17960 +
17961 ++ if (!has_transparent_hugepage())
17962 ++ return;
17963 ++
17964 + pr_debug("Validating PMD protnone\n");
17965 ++ pmd = pmd_mkhuge(pfn_pmd(pfn, prot));
17966 + WARN_ON(!pmd_protnone(pmd));
17967 + WARN_ON(!pmd_present(pmd));
17968 + }
17969 +@@ -634,18 +699,26 @@ static void __init pte_devmap_tests(unsigned long pfn, pgprot_t prot)
17970 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE
17971 + static void __init pmd_devmap_tests(unsigned long pfn, pgprot_t prot)
17972 + {
17973 +- pmd_t pmd = pfn_pmd(pfn, prot);
17974 ++ pmd_t pmd;
17975 ++
17976 ++ if (!has_transparent_hugepage())
17977 ++ return;
17978 +
17979 + pr_debug("Validating PMD devmap\n");
17980 ++ pmd = pfn_pmd(pfn, prot);
17981 + WARN_ON(!pmd_devmap(pmd_mkdevmap(pmd)));
17982 + }
17983 +
17984 + #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
17985 + static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot)
17986 + {
17987 +- pud_t pud = pfn_pud(pfn, prot);
17988 ++ pud_t pud;
17989 ++
17990 ++ if (!has_transparent_hugepage())
17991 ++ return;
17992 +
17993 + pr_debug("Validating PUD devmap\n");
17994 ++ pud = pfn_pud(pfn, prot);
17995 + WARN_ON(!pud_devmap(pud_mkdevmap(pud)));
17996 + }
17997 + #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
17998 +@@ -688,25 +761,33 @@ static void __init pte_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
17999 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE
18000 + static void __init pmd_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
18001 + {
18002 +- pmd_t pmd = pfn_pmd(pfn, prot);
18003 ++ pmd_t pmd;
18004 +
18005 + if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
18006 + return;
18007 +
18008 ++ if (!has_transparent_hugepage())
18009 ++ return;
18010 ++
18011 + pr_debug("Validating PMD soft dirty\n");
18012 ++ pmd = pfn_pmd(pfn, prot);
18013 + WARN_ON(!pmd_soft_dirty(pmd_mksoft_dirty(pmd)));
18014 + WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd)));
18015 + }
18016 +
18017 + static void __init pmd_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
18018 + {
18019 +- pmd_t pmd = pfn_pmd(pfn, prot);
18020 ++ pmd_t pmd;
18021 +
18022 + if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) ||
18023 + !IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION))
18024 + return;
18025 +
18026 ++ if (!has_transparent_hugepage())
18027 ++ return;
18028 ++
18029 + pr_debug("Validating PMD swap soft dirty\n");
18030 ++ pmd = pfn_pmd(pfn, prot);
18031 + WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd)));
18032 + WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd)));
18033 + }
18034 +@@ -735,6 +816,9 @@ static void __init pmd_swap_tests(unsigned long pfn, pgprot_t prot)
18035 + swp_entry_t swp;
18036 + pmd_t pmd;
18037 +
18038 ++ if (!has_transparent_hugepage())
18039 ++ return;
18040 ++
18041 + pr_debug("Validating PMD swap\n");
18042 + pmd = pfn_pmd(pfn, prot);
18043 + swp = __pmd_to_swp_entry(pmd);
18044 +@@ -899,6 +983,7 @@ static int __init debug_vm_pgtable(void)
18045 + unsigned long vaddr, pte_aligned, pmd_aligned;
18046 + unsigned long pud_aligned, p4d_aligned, pgd_aligned;
18047 + spinlock_t *ptl = NULL;
18048 ++ int idx;
18049 +
18050 + pr_info("Validating architecture page table helpers\n");
18051 + prot = vm_get_page_prot(VMFLAGS);
18052 +@@ -963,9 +1048,25 @@ static int __init debug_vm_pgtable(void)
18053 + saved_pmdp = pmd_offset(pudp, 0UL);
18054 + saved_ptep = pmd_pgtable(pmd);
18055 +
18056 +- pte_basic_tests(pte_aligned, prot);
18057 +- pmd_basic_tests(pmd_aligned, prot);
18058 +- pud_basic_tests(pud_aligned, prot);
18059 ++ /*
18060 ++ * Iterate over the protection_map[] to make sure that all
18061 ++ * the basic page table transformation validations just hold
18062 ++ * true irrespective of the starting protection value for a
18063 ++ * given page table entry.
18064 ++ */
18065 ++ for (idx = 0; idx < ARRAY_SIZE(protection_map); idx++) {
18066 ++ pte_basic_tests(pte_aligned, idx);
18067 ++ pmd_basic_tests(pmd_aligned, idx);
18068 ++ pud_basic_tests(mm, pud_aligned, idx);
18069 ++ }
18070 ++
18071 ++ /*
18072 ++ * Both P4D and PGD level tests are very basic which do not
18073 ++ * involve creating page table entries from the protection
18074 ++ * value and the given pfn. Hence just keep them out from
18075 ++ * the above iteration for now to save some test execution
18076 ++ * time.
18077 ++ */
18078 + p4d_basic_tests(p4d_aligned, prot);
18079 + pgd_basic_tests(pgd_aligned, prot);
18080 +
18081 +diff --git a/mm/gup.c b/mm/gup.c
18082 +index c2826f3afe722..6cb7d8ae56f66 100644
18083 +--- a/mm/gup.c
18084 ++++ b/mm/gup.c
18085 +@@ -44,6 +44,23 @@ static void hpage_pincount_sub(struct page *page, int refs)
18086 + atomic_sub(refs, compound_pincount_ptr(page));
18087 + }
18088 +
18089 ++/* Equivalent to calling put_page() @refs times. */
18090 ++static void put_page_refs(struct page *page, int refs)
18091 ++{
18092 ++#ifdef CONFIG_DEBUG_VM
18093 ++ if (VM_WARN_ON_ONCE_PAGE(page_ref_count(page) < refs, page))
18094 ++ return;
18095 ++#endif
18096 ++
18097 ++ /*
18098 ++ * Calling put_page() for each ref is unnecessarily slow. Only the last
18099 ++ * ref needs a put_page().
18100 ++ */
18101 ++ if (refs > 1)
18102 ++ page_ref_sub(page, refs - 1);
18103 ++ put_page(page);
18104 ++}
18105 ++
18106 + /*
18107 + * Return the compound head page with ref appropriately incremented,
18108 + * or NULL if that failed.
18109 +@@ -56,6 +73,21 @@ static inline struct page *try_get_compound_head(struct page *page, int refs)
18110 + return NULL;
18111 + if (unlikely(!page_cache_add_speculative(head, refs)))
18112 + return NULL;
18113 ++
18114 ++ /*
18115 ++ * At this point we have a stable reference to the head page; but it
18116 ++ * could be that between the compound_head() lookup and the refcount
18117 ++ * increment, the compound page was split, in which case we'd end up
18118 ++ * holding a reference on a page that has nothing to do with the page
18119 ++ * we were given anymore.
18120 ++ * So now that the head page is stable, recheck that the pages still
18121 ++ * belong together.
18122 ++ */
18123 ++ if (unlikely(compound_head(page) != head)) {
18124 ++ put_page_refs(head, refs);
18125 ++ return NULL;
18126 ++ }
18127 ++
18128 + return head;
18129 + }
18130 +
18131 +@@ -95,6 +127,14 @@ static __maybe_unused struct page *try_grab_compound_head(struct page *page,
18132 + is_migrate_cma_page(page))
18133 + return NULL;
18134 +
18135 ++ /*
18136 ++ * CAUTION: Don't use compound_head() on the page before this
18137 ++ * point, the result won't be stable.
18138 ++ */
18139 ++ page = try_get_compound_head(page, refs);
18140 ++ if (!page)
18141 ++ return NULL;
18142 ++
18143 + /*
18144 + * When pinning a compound page of order > 1 (which is what
18145 + * hpage_pincount_available() checks for), use an exact count to
18146 +@@ -103,15 +143,10 @@ static __maybe_unused struct page *try_grab_compound_head(struct page *page,
18147 + * However, be sure to *also* increment the normal page refcount
18148 + * field at least once, so that the page really is pinned.
18149 + */
18150 +- if (!hpage_pincount_available(page))
18151 +- refs *= GUP_PIN_COUNTING_BIAS;
18152 +-
18153 +- page = try_get_compound_head(page, refs);
18154 +- if (!page)
18155 +- return NULL;
18156 +-
18157 + if (hpage_pincount_available(page))
18158 + hpage_pincount_add(page, refs);
18159 ++ else
18160 ++ page_ref_add(page, refs * (GUP_PIN_COUNTING_BIAS - 1));
18161 +
18162 + mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_ACQUIRED,
18163 + orig_refs);
18164 +@@ -135,14 +170,7 @@ static void put_compound_head(struct page *page, int refs, unsigned int flags)
18165 + refs *= GUP_PIN_COUNTING_BIAS;
18166 + }
18167 +
18168 +- VM_BUG_ON_PAGE(page_ref_count(page) < refs, page);
18169 +- /*
18170 +- * Calling put_page() for each ref is unnecessarily slow. Only the last
18171 +- * ref needs a put_page().
18172 +- */
18173 +- if (refs > 1)
18174 +- page_ref_sub(page, refs - 1);
18175 +- put_page(page);
18176 ++ put_page_refs(page, refs);
18177 + }
18178 +
18179 + /**
18180 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
18181 +index 6301ecc1f679a..9fe622ff2fc4a 100644
18182 +--- a/mm/huge_memory.c
18183 ++++ b/mm/huge_memory.c
18184 +@@ -63,7 +63,14 @@ static atomic_t huge_zero_refcount;
18185 + struct page *huge_zero_page __read_mostly;
18186 + unsigned long huge_zero_pfn __read_mostly = ~0UL;
18187 +
18188 +-bool transparent_hugepage_enabled(struct vm_area_struct *vma)
18189 ++static inline bool file_thp_enabled(struct vm_area_struct *vma)
18190 ++{
18191 ++ return transhuge_vma_enabled(vma, vma->vm_flags) && vma->vm_file &&
18192 ++ !inode_is_open_for_write(vma->vm_file->f_inode) &&
18193 ++ (vma->vm_flags & VM_EXEC);
18194 ++}
18195 ++
18196 ++bool transparent_hugepage_active(struct vm_area_struct *vma)
18197 + {
18198 + /* The addr is used to check if the vma size fits */
18199 + unsigned long addr = (vma->vm_end & HPAGE_PMD_MASK) - HPAGE_PMD_SIZE;
18200 +@@ -74,6 +81,8 @@ bool transparent_hugepage_enabled(struct vm_area_struct *vma)
18201 + return __transparent_hugepage_enabled(vma);
18202 + if (vma_is_shmem(vma))
18203 + return shmem_huge_enabled(vma);
18204 ++ if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS))
18205 ++ return file_thp_enabled(vma);
18206 +
18207 + return false;
18208 + }
18209 +@@ -375,7 +384,11 @@ static int __init hugepage_init(void)
18210 + struct kobject *hugepage_kobj;
18211 +
18212 + if (!has_transparent_hugepage()) {
18213 +- transparent_hugepage_flags = 0;
18214 ++ /*
18215 ++ * Hardware doesn't support hugepages, hence disable
18216 ++ * DAX PMD support.
18217 ++ */
18218 ++ transparent_hugepage_flags = 1 << TRANSPARENT_HUGEPAGE_NEVER_DAX;
18219 + return -EINVAL;
18220 + }
18221 +
18222 +@@ -1591,7 +1604,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
18223 + * If other processes are mapping this page, we couldn't discard
18224 + * the page unless they all do MADV_FREE so let's skip the page.
18225 + */
18226 +- if (page_mapcount(page) != 1)
18227 ++ if (total_mapcount(page) != 1)
18228 + goto out;
18229 +
18230 + if (!trylock_page(page))
18231 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
18232 +index d4f89c2f95446..fa6b0ac6c280d 100644
18233 +--- a/mm/hugetlb.c
18234 ++++ b/mm/hugetlb.c
18235 +@@ -1252,8 +1252,7 @@ static void destroy_compound_gigantic_page(struct page *page,
18236 + struct page *p = page + 1;
18237 +
18238 + atomic_set(compound_mapcount_ptr(page), 0);
18239 +- if (hpage_pincount_available(page))
18240 +- atomic_set(compound_pincount_ptr(page), 0);
18241 ++ atomic_set(compound_pincount_ptr(page), 0);
18242 +
18243 + for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
18244 + clear_compound_head(p);
18245 +@@ -1316,8 +1315,6 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
18246 + return alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask);
18247 + }
18248 +
18249 +-static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
18250 +-static void prep_compound_gigantic_page(struct page *page, unsigned int order);
18251 + #else /* !CONFIG_CONTIG_ALLOC */
18252 + static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
18253 + int nid, nodemask_t *nodemask)
18254 +@@ -1583,9 +1580,7 @@ static void prep_compound_gigantic_page(struct page *page, unsigned int order)
18255 + set_compound_head(p, page);
18256 + }
18257 + atomic_set(compound_mapcount_ptr(page), -1);
18258 +-
18259 +- if (hpage_pincount_available(page))
18260 +- atomic_set(compound_pincount_ptr(page), 0);
18261 ++ atomic_set(compound_pincount_ptr(page), 0);
18262 + }
18263 +
18264 + /*
18265 +@@ -2481,16 +2476,10 @@ found:
18266 + return 1;
18267 + }
18268 +
18269 +-static void __init prep_compound_huge_page(struct page *page,
18270 +- unsigned int order)
18271 +-{
18272 +- if (unlikely(order > (MAX_ORDER - 1)))
18273 +- prep_compound_gigantic_page(page, order);
18274 +- else
18275 +- prep_compound_page(page, order);
18276 +-}
18277 +-
18278 +-/* Put bootmem huge pages into the standard lists after mem_map is up */
18279 ++/*
18280 ++ * Put bootmem huge pages into the standard lists after mem_map is up.
18281 ++ * Note: This only applies to gigantic (order > MAX_ORDER) pages.
18282 ++ */
18283 + static void __init gather_bootmem_prealloc(void)
18284 + {
18285 + struct huge_bootmem_page *m;
18286 +@@ -2499,20 +2488,19 @@ static void __init gather_bootmem_prealloc(void)
18287 + struct page *page = virt_to_page(m);
18288 + struct hstate *h = m->hstate;
18289 +
18290 ++ VM_BUG_ON(!hstate_is_gigantic(h));
18291 + WARN_ON(page_count(page) != 1);
18292 +- prep_compound_huge_page(page, h->order);
18293 ++ prep_compound_gigantic_page(page, huge_page_order(h));
18294 + WARN_ON(PageReserved(page));
18295 + prep_new_huge_page(h, page, page_to_nid(page));
18296 + put_page(page); /* free it into the hugepage allocator */
18297 +
18298 + /*
18299 +- * If we had gigantic hugepages allocated at boot time, we need
18300 +- * to restore the 'stolen' pages to totalram_pages in order to
18301 +- * fix confusing memory reports from free(1) and another
18302 +- * side-effects, like CommitLimit going negative.
18303 ++ * We need to restore the 'stolen' pages to totalram_pages
18304 ++ * in order to fix confusing memory reports from free(1) and
18305 ++ * other side-effects, like CommitLimit going negative.
18306 + */
18307 +- if (hstate_is_gigantic(h))
18308 +- adjust_managed_page_count(page, 1 << h->order);
18309 ++ adjust_managed_page_count(page, pages_per_huge_page(h));
18310 + cond_resched();
18311 + }
18312 + }
18313 +diff --git a/mm/khugepaged.c b/mm/khugepaged.c
18314 +index a6238118ac4c7..ee88125785638 100644
18315 +--- a/mm/khugepaged.c
18316 ++++ b/mm/khugepaged.c
18317 +@@ -440,9 +440,7 @@ static inline int khugepaged_test_exit(struct mm_struct *mm)
18318 + static bool hugepage_vma_check(struct vm_area_struct *vma,
18319 + unsigned long vm_flags)
18320 + {
18321 +- /* Explicitly disabled through madvise. */
18322 +- if ((vm_flags & VM_NOHUGEPAGE) ||
18323 +- test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
18324 ++ if (!transhuge_vma_enabled(vma, vm_flags))
18325 + return false;
18326 +
18327 + /* Enabled via shmem mount options or sysfs settings. */
18328 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
18329 +index 8d9f5fa4c6d39..92bf987d0a410 100644
18330 +--- a/mm/memcontrol.c
18331 ++++ b/mm/memcontrol.c
18332 +@@ -2898,12 +2898,20 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg)
18333 + }
18334 +
18335 + #ifdef CONFIG_MEMCG_KMEM
18336 ++/*
18337 ++ * The allocated objcg pointers array is not accounted directly.
18338 ++ * Moreover, it should not come from DMA buffer and is not readily
18339 ++ * reclaimable. So those GFP bits should be masked off.
18340 ++ */
18341 ++#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT)
18342 ++
18343 + int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
18344 + gfp_t gfp)
18345 + {
18346 + unsigned int objects = objs_per_slab_page(s, page);
18347 + void *vec;
18348 +
18349 ++ gfp &= ~OBJCGS_CLEAR_MASK;
18350 + vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
18351 + page_to_nid(page));
18352 + if (!vec)
18353 +diff --git a/mm/memory.c b/mm/memory.c
18354 +index eb31b3e4ef93b..0a905e0a7e672 100644
18355 +--- a/mm/memory.c
18356 ++++ b/mm/memory.c
18357 +@@ -3302,6 +3302,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
18358 + {
18359 + struct vm_area_struct *vma = vmf->vma;
18360 + struct page *page = NULL, *swapcache;
18361 ++ struct swap_info_struct *si = NULL;
18362 + swp_entry_t entry;
18363 + pte_t pte;
18364 + int locked;
18365 +@@ -3329,14 +3330,16 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
18366 + goto out;
18367 + }
18368 +
18369 ++ /* Prevent swapoff from happening to us. */
18370 ++ si = get_swap_device(entry);
18371 ++ if (unlikely(!si))
18372 ++ goto out;
18373 +
18374 + delayacct_set_flag(DELAYACCT_PF_SWAPIN);
18375 + page = lookup_swap_cache(entry, vma, vmf->address);
18376 + swapcache = page;
18377 +
18378 + if (!page) {
18379 +- struct swap_info_struct *si = swp_swap_info(entry);
18380 +-
18381 + if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
18382 + __swap_count(entry) == 1) {
18383 + /* skip swapcache */
18384 +@@ -3507,6 +3510,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
18385 + unlock:
18386 + pte_unmap_unlock(vmf->pte, vmf->ptl);
18387 + out:
18388 ++ if (si)
18389 ++ put_swap_device(si);
18390 + return ret;
18391 + out_nomap:
18392 + pte_unmap_unlock(vmf->pte, vmf->ptl);
18393 +@@ -3518,6 +3523,8 @@ out_release:
18394 + unlock_page(swapcache);
18395 + put_page(swapcache);
18396 + }
18397 ++ if (si)
18398 ++ put_swap_device(si);
18399 + return ret;
18400 + }
18401 +
18402 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
18403 +index 81cc7fdc9c8fd..e30d88efd7fbb 100644
18404 +--- a/mm/page_alloc.c
18405 ++++ b/mm/page_alloc.c
18406 +@@ -7788,31 +7788,24 @@ static void calculate_totalreserve_pages(void)
18407 + static void setup_per_zone_lowmem_reserve(void)
18408 + {
18409 + struct pglist_data *pgdat;
18410 +- enum zone_type j, idx;
18411 ++ enum zone_type i, j;
18412 +
18413 + for_each_online_pgdat(pgdat) {
18414 +- for (j = 0; j < MAX_NR_ZONES; j++) {
18415 +- struct zone *zone = pgdat->node_zones + j;
18416 +- unsigned long managed_pages = zone_managed_pages(zone);
18417 +-
18418 +- zone->lowmem_reserve[j] = 0;
18419 ++ for (i = 0; i < MAX_NR_ZONES - 1; i++) {
18420 ++ struct zone *zone = &pgdat->node_zones[i];
18421 ++ int ratio = sysctl_lowmem_reserve_ratio[i];
18422 ++ bool clear = !ratio || !zone_managed_pages(zone);
18423 ++ unsigned long managed_pages = 0;
18424 +
18425 +- idx = j;
18426 +- while (idx) {
18427 +- struct zone *lower_zone;
18428 ++ for (j = i + 1; j < MAX_NR_ZONES; j++) {
18429 ++ struct zone *upper_zone = &pgdat->node_zones[j];
18430 +
18431 +- idx--;
18432 +- lower_zone = pgdat->node_zones + idx;
18433 ++ managed_pages += zone_managed_pages(upper_zone);
18434 +
18435 +- if (!sysctl_lowmem_reserve_ratio[idx] ||
18436 +- !zone_managed_pages(lower_zone)) {
18437 +- lower_zone->lowmem_reserve[j] = 0;
18438 +- continue;
18439 +- } else {
18440 +- lower_zone->lowmem_reserve[j] =
18441 +- managed_pages / sysctl_lowmem_reserve_ratio[idx];
18442 +- }
18443 +- managed_pages += zone_managed_pages(lower_zone);
18444 ++ if (clear)
18445 ++ zone->lowmem_reserve[j] = 0;
18446 ++ else
18447 ++ zone->lowmem_reserve[j] = managed_pages / ratio;
18448 + }
18449 + }
18450 + }
18451 +diff --git a/mm/shmem.c b/mm/shmem.c
18452 +index 6e487bf555f9e..96df61c8af653 100644
18453 +--- a/mm/shmem.c
18454 ++++ b/mm/shmem.c
18455 +@@ -1698,7 +1698,8 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index,
18456 + struct address_space *mapping = inode->i_mapping;
18457 + struct shmem_inode_info *info = SHMEM_I(inode);
18458 + struct mm_struct *charge_mm = vma ? vma->vm_mm : current->mm;
18459 +- struct page *page;
18460 ++ struct swap_info_struct *si;
18461 ++ struct page *page = NULL;
18462 + swp_entry_t swap;
18463 + int error;
18464 +
18465 +@@ -1706,6 +1707,12 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index,
18466 + swap = radix_to_swp_entry(*pagep);
18467 + *pagep = NULL;
18468 +
18469 ++ /* Prevent swapoff from happening to us. */
18470 ++ si = get_swap_device(swap);
18471 ++ if (!si) {
18472 ++ error = EINVAL;
18473 ++ goto failed;
18474 ++ }
18475 + /* Look it up and read it in.. */
18476 + page = lookup_swap_cache(swap, NULL, 0);
18477 + if (!page) {
18478 +@@ -1767,6 +1774,8 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index,
18479 + swap_free(swap);
18480 +
18481 + *pagep = page;
18482 ++ if (si)
18483 ++ put_swap_device(si);
18484 + return 0;
18485 + failed:
18486 + if (!shmem_confirm_swap(mapping, index, swap))
18487 +@@ -1777,6 +1786,9 @@ unlock:
18488 + put_page(page);
18489 + }
18490 +
18491 ++ if (si)
18492 ++ put_swap_device(si);
18493 ++
18494 + return error;
18495 + }
18496 +
18497 +@@ -4080,8 +4092,7 @@ bool shmem_huge_enabled(struct vm_area_struct *vma)
18498 + loff_t i_size;
18499 + pgoff_t off;
18500 +
18501 +- if ((vma->vm_flags & VM_NOHUGEPAGE) ||
18502 +- test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
18503 ++ if (!transhuge_vma_enabled(vma, vma->vm_flags))
18504 + return false;
18505 + if (shmem_huge == SHMEM_HUGE_FORCE)
18506 + return true;
18507 +diff --git a/mm/slab.h b/mm/slab.h
18508 +index e258ffcfb0ef2..944e8b2040ae2 100644
18509 +--- a/mm/slab.h
18510 ++++ b/mm/slab.h
18511 +@@ -326,7 +326,6 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
18512 + if (!memcg_kmem_enabled() || !objcg)
18513 + return;
18514 +
18515 +- flags &= ~__GFP_ACCOUNT;
18516 + for (i = 0; i < size; i++) {
18517 + if (likely(p[i])) {
18518 + page = virt_to_head_page(p[i]);
18519 +diff --git a/mm/z3fold.c b/mm/z3fold.c
18520 +index 8ae944eeb8e20..912ac9a64a155 100644
18521 +--- a/mm/z3fold.c
18522 ++++ b/mm/z3fold.c
18523 +@@ -1063,6 +1063,7 @@ static void z3fold_destroy_pool(struct z3fold_pool *pool)
18524 + destroy_workqueue(pool->compact_wq);
18525 + destroy_workqueue(pool->release_wq);
18526 + z3fold_unregister_migration(pool);
18527 ++ free_percpu(pool->unbuddied);
18528 + kfree(pool);
18529 + }
18530 +
18531 +@@ -1386,7 +1387,7 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
18532 + if (zhdr->foreign_handles ||
18533 + test_and_set_bit(PAGE_CLAIMED, &page->private)) {
18534 + if (kref_put(&zhdr->refcount,
18535 +- release_z3fold_page))
18536 ++ release_z3fold_page_locked))
18537 + atomic64_dec(&pool->pages_nr);
18538 + else
18539 + z3fold_page_unlock(zhdr);
18540 +diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
18541 +index 4676e4b0be2bf..d62ac4b737099 100644
18542 +--- a/net/bluetooth/hci_event.c
18543 ++++ b/net/bluetooth/hci_event.c
18544 +@@ -5256,8 +5256,19 @@ static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
18545 +
18546 + BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
18547 +
18548 +- if (ev->status)
18549 ++ if (ev->status) {
18550 ++ struct adv_info *adv;
18551 ++
18552 ++ adv = hci_find_adv_instance(hdev, ev->handle);
18553 ++ if (!adv)
18554 ++ return;
18555 ++
18556 ++ /* Remove advertising as it has been terminated */
18557 ++ hci_remove_adv_instance(hdev, ev->handle);
18558 ++ mgmt_advertising_removed(NULL, hdev, ev->handle);
18559 ++
18560 + return;
18561 ++ }
18562 +
18563 + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
18564 + if (conn) {
18565 +@@ -5401,7 +5412,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
18566 + struct hci_conn *conn;
18567 + bool match;
18568 + u32 flags;
18569 +- u8 *ptr, real_len;
18570 ++ u8 *ptr;
18571 +
18572 + switch (type) {
18573 + case LE_ADV_IND:
18574 +@@ -5432,14 +5443,10 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
18575 + break;
18576 + }
18577 +
18578 +- real_len = ptr - data;
18579 +-
18580 +- /* Adjust for actual length */
18581 +- if (len != real_len) {
18582 +- bt_dev_err_ratelimited(hdev, "advertising data len corrected %u -> %u",
18583 +- len, real_len);
18584 +- len = real_len;
18585 +- }
18586 ++ /* Adjust for actual length. This handles the case when remote
18587 ++ * device is advertising with incorrect data length.
18588 ++ */
18589 ++ len = ptr - data;
18590 +
18591 + /* If the direct address is present, then this report is from
18592 + * a LE Direct Advertising Report event. In that case it is
18593 +diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
18594 +index 161ea93a53828..1a94ed2f8a4f8 100644
18595 +--- a/net/bluetooth/hci_request.c
18596 ++++ b/net/bluetooth/hci_request.c
18597 +@@ -1060,9 +1060,10 @@ static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
18598 + if (!adv_instance)
18599 + return 0;
18600 +
18601 +- /* TODO: Take into account the "appearance" and "local-name" flags here.
18602 +- * These are currently being ignored as they are not supported.
18603 +- */
18604 ++ if (adv_instance->flags & MGMT_ADV_FLAG_APPEARANCE ||
18605 ++ adv_instance->flags & MGMT_ADV_FLAG_LOCAL_NAME)
18606 ++ return 1;
18607 ++
18608 + return adv_instance->scan_rsp_len;
18609 + }
18610 +
18611 +@@ -1595,33 +1596,33 @@ void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
18612 + return;
18613 +
18614 + if (ext_adv_capable(hdev)) {
18615 +- struct hci_cp_le_set_ext_scan_rsp_data cp;
18616 ++ struct {
18617 ++ struct hci_cp_le_set_ext_scan_rsp_data cp;
18618 ++ u8 data[HCI_MAX_EXT_AD_LENGTH];
18619 ++ } pdu;
18620 +
18621 +- memset(&cp, 0, sizeof(cp));
18622 ++ memset(&pdu, 0, sizeof(pdu));
18623 +
18624 +- /* Extended scan response data doesn't allow a response to be
18625 +- * set if the instance isn't scannable.
18626 +- */
18627 +- if (get_adv_instance_scan_rsp_len(hdev, instance))
18628 ++ if (instance)
18629 + len = create_instance_scan_rsp_data(hdev, instance,
18630 +- cp.data);
18631 ++ pdu.data);
18632 + else
18633 +- len = 0;
18634 ++ len = create_default_scan_rsp_data(hdev, pdu.data);
18635 +
18636 + if (hdev->scan_rsp_data_len == len &&
18637 +- !memcmp(cp.data, hdev->scan_rsp_data, len))
18638 ++ !memcmp(pdu.data, hdev->scan_rsp_data, len))
18639 + return;
18640 +
18641 +- memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
18642 ++ memcpy(hdev->scan_rsp_data, pdu.data, len);
18643 + hdev->scan_rsp_data_len = len;
18644 +
18645 +- cp.handle = instance;
18646 +- cp.length = len;
18647 +- cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
18648 +- cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
18649 ++ pdu.cp.handle = instance;
18650 ++ pdu.cp.length = len;
18651 ++ pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
18652 ++ pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
18653 +
18654 +- hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
18655 +- &cp);
18656 ++ hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
18657 ++ sizeof(pdu.cp) + len, &pdu.cp);
18658 + } else {
18659 + struct hci_cp_le_set_scan_rsp_data cp;
18660 +
18661 +@@ -1744,26 +1745,30 @@ void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
18662 + return;
18663 +
18664 + if (ext_adv_capable(hdev)) {
18665 +- struct hci_cp_le_set_ext_adv_data cp;
18666 ++ struct {
18667 ++ struct hci_cp_le_set_ext_adv_data cp;
18668 ++ u8 data[HCI_MAX_EXT_AD_LENGTH];
18669 ++ } pdu;
18670 +
18671 +- memset(&cp, 0, sizeof(cp));
18672 ++ memset(&pdu, 0, sizeof(pdu));
18673 +
18674 +- len = create_instance_adv_data(hdev, instance, cp.data);
18675 ++ len = create_instance_adv_data(hdev, instance, pdu.data);
18676 +
18677 + /* There's nothing to do if the data hasn't changed */
18678 + if (hdev->adv_data_len == len &&
18679 +- memcmp(cp.data, hdev->adv_data, len) == 0)
18680 ++ memcmp(pdu.data, hdev->adv_data, len) == 0)
18681 + return;
18682 +
18683 +- memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
18684 ++ memcpy(hdev->adv_data, pdu.data, len);
18685 + hdev->adv_data_len = len;
18686 +
18687 +- cp.length = len;
18688 +- cp.handle = instance;
18689 +- cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
18690 +- cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
18691 ++ pdu.cp.length = len;
18692 ++ pdu.cp.handle = instance;
18693 ++ pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
18694 ++ pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
18695 +
18696 +- hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
18697 ++ hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA,
18698 ++ sizeof(pdu.cp) + len, &pdu.cp);
18699 + } else {
18700 + struct hci_cp_le_set_adv_data cp;
18701 +
18702 +diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
18703 +index 12d7b368b428e..13520c7b4f2fb 100644
18704 +--- a/net/bluetooth/mgmt.c
18705 ++++ b/net/bluetooth/mgmt.c
18706 +@@ -7350,6 +7350,9 @@ static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
18707 + for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
18708 + cur_len = data[i];
18709 +
18710 ++ if (!cur_len)
18711 ++ continue;
18712 ++
18713 + if (data[i + 1] == EIR_FLAGS &&
18714 + (!is_adv_data || flags_managed(adv_flags)))
18715 + return false;
18716 +diff --git a/net/bpfilter/main.c b/net/bpfilter/main.c
18717 +index 05e1cfc1e5cd1..291a925462463 100644
18718 +--- a/net/bpfilter/main.c
18719 ++++ b/net/bpfilter/main.c
18720 +@@ -57,7 +57,7 @@ int main(void)
18721 + {
18722 + debug_f = fopen("/dev/kmsg", "w");
18723 + setvbuf(debug_f, 0, _IOLBF, 0);
18724 +- fprintf(debug_f, "Started bpfilter\n");
18725 ++ fprintf(debug_f, "<5>Started bpfilter\n");
18726 + loop();
18727 + fclose(debug_f);
18728 + return 0;
18729 +diff --git a/net/can/bcm.c b/net/can/bcm.c
18730 +index f3e4d9528fa38..0928a39c4423b 100644
18731 +--- a/net/can/bcm.c
18732 ++++ b/net/can/bcm.c
18733 +@@ -785,6 +785,7 @@ static int bcm_delete_rx_op(struct list_head *ops, struct bcm_msg_head *mh,
18734 + bcm_rx_handler, op);
18735 +
18736 + list_del(&op->list);
18737 ++ synchronize_rcu();
18738 + bcm_remove_op(op);
18739 + return 1; /* done */
18740 + }
18741 +@@ -1533,9 +1534,13 @@ static int bcm_release(struct socket *sock)
18742 + REGMASK(op->can_id),
18743 + bcm_rx_handler, op);
18744 +
18745 +- bcm_remove_op(op);
18746 + }
18747 +
18748 ++ synchronize_rcu();
18749 ++
18750 ++ list_for_each_entry_safe(op, next, &bo->rx_ops, list)
18751 ++ bcm_remove_op(op);
18752 ++
18753 + #if IS_ENABLED(CONFIG_PROC_FS)
18754 + /* remove procfs entry */
18755 + if (net->can.bcmproc_dir && bo->bcm_proc_read)
18756 +diff --git a/net/can/gw.c b/net/can/gw.c
18757 +index 6b790b6ff8d26..cbb46d3aa9634 100644
18758 +--- a/net/can/gw.c
18759 ++++ b/net/can/gw.c
18760 +@@ -534,6 +534,7 @@ static int cgw_notifier(struct notifier_block *nb,
18761 + if (gwj->src.dev == dev || gwj->dst.dev == dev) {
18762 + hlist_del(&gwj->list);
18763 + cgw_unregister_filter(net, gwj);
18764 ++ synchronize_rcu();
18765 + kmem_cache_free(cgw_cache, gwj);
18766 + }
18767 + }
18768 +@@ -1092,6 +1093,7 @@ static void cgw_remove_all_jobs(struct net *net)
18769 + hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) {
18770 + hlist_del(&gwj->list);
18771 + cgw_unregister_filter(net, gwj);
18772 ++ synchronize_rcu();
18773 + kmem_cache_free(cgw_cache, gwj);
18774 + }
18775 + }
18776 +@@ -1160,6 +1162,7 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh,
18777 +
18778 + hlist_del(&gwj->list);
18779 + cgw_unregister_filter(net, gwj);
18780 ++ synchronize_rcu();
18781 + kmem_cache_free(cgw_cache, gwj);
18782 + err = 0;
18783 + break;
18784 +diff --git a/net/can/isotp.c b/net/can/isotp.c
18785 +index 1adefb14527d8..5fc28f190677b 100644
18786 +--- a/net/can/isotp.c
18787 ++++ b/net/can/isotp.c
18788 +@@ -1023,9 +1023,6 @@ static int isotp_release(struct socket *sock)
18789 +
18790 + lock_sock(sk);
18791 +
18792 +- hrtimer_cancel(&so->txtimer);
18793 +- hrtimer_cancel(&so->rxtimer);
18794 +-
18795 + /* remove current filters & unregister */
18796 + if (so->bound) {
18797 + if (so->ifindex) {
18798 +@@ -1037,10 +1034,14 @@ static int isotp_release(struct socket *sock)
18799 + SINGLE_MASK(so->rxid),
18800 + isotp_rcv, sk);
18801 + dev_put(dev);
18802 ++ synchronize_rcu();
18803 + }
18804 + }
18805 + }
18806 +
18807 ++ hrtimer_cancel(&so->txtimer);
18808 ++ hrtimer_cancel(&so->rxtimer);
18809 ++
18810 + so->ifindex = 0;
18811 + so->bound = 0;
18812 +
18813 +diff --git a/net/can/j1939/main.c b/net/can/j1939/main.c
18814 +index e52330f628c9f..6884d18f919c7 100644
18815 +--- a/net/can/j1939/main.c
18816 ++++ b/net/can/j1939/main.c
18817 +@@ -193,6 +193,10 @@ static void j1939_can_rx_unregister(struct j1939_priv *priv)
18818 + can_rx_unregister(dev_net(ndev), ndev, J1939_CAN_ID, J1939_CAN_MASK,
18819 + j1939_can_recv, priv);
18820 +
18821 ++ /* The last reference of priv is dropped by the RCU deferred
18822 ++ * j1939_sk_sock_destruct() of the last socket, so we can
18823 ++ * safely drop this reference here.
18824 ++ */
18825 + j1939_priv_put(priv);
18826 + }
18827 +
18828 +diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
18829 +index 56aa66147d5ac..e1a399821238f 100644
18830 +--- a/net/can/j1939/socket.c
18831 ++++ b/net/can/j1939/socket.c
18832 +@@ -398,6 +398,9 @@ static int j1939_sk_init(struct sock *sk)
18833 + atomic_set(&jsk->skb_pending, 0);
18834 + spin_lock_init(&jsk->sk_session_queue_lock);
18835 + INIT_LIST_HEAD(&jsk->sk_session_queue);
18836 ++
18837 ++ /* j1939_sk_sock_destruct() depends on SOCK_RCU_FREE flag */
18838 ++ sock_set_flag(sk, SOCK_RCU_FREE);
18839 + sk->sk_destruct = j1939_sk_sock_destruct;
18840 + sk->sk_protocol = CAN_J1939;
18841 +
18842 +@@ -673,7 +676,7 @@ static int j1939_sk_setsockopt(struct socket *sock, int level, int optname,
18843 +
18844 + switch (optname) {
18845 + case SO_J1939_FILTER:
18846 +- if (!sockptr_is_null(optval)) {
18847 ++ if (!sockptr_is_null(optval) && optlen != 0) {
18848 + struct j1939_filter *f;
18849 + int c;
18850 +
18851 +diff --git a/net/core/filter.c b/net/core/filter.c
18852 +index ef6bdbb63ecbb..7ea752af7894d 100644
18853 +--- a/net/core/filter.c
18854 ++++ b/net/core/filter.c
18855 +@@ -3266,8 +3266,6 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
18856 + shinfo->gso_type |= SKB_GSO_TCPV6;
18857 + }
18858 +
18859 +- /* Due to IPv6 header, MSS needs to be downgraded. */
18860 +- skb_decrease_gso_size(shinfo, len_diff);
18861 + /* Header must be checked, and gso_segs recomputed. */
18862 + shinfo->gso_type |= SKB_GSO_DODGY;
18863 + shinfo->gso_segs = 0;
18864 +@@ -3307,8 +3305,6 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
18865 + shinfo->gso_type |= SKB_GSO_TCPV4;
18866 + }
18867 +
18868 +- /* Due to IPv4 header, MSS can be upgraded. */
18869 +- skb_increase_gso_size(shinfo, len_diff);
18870 + /* Header must be checked, and gso_segs recomputed. */
18871 + shinfo->gso_type |= SKB_GSO_DODGY;
18872 + shinfo->gso_segs = 0;
18873 +diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
18874 +index 4b834bbf95e07..ed9857b2875dc 100644
18875 +--- a/net/ipv4/esp4.c
18876 ++++ b/net/ipv4/esp4.c
18877 +@@ -673,7 +673,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
18878 + struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
18879 + u32 padto;
18880 +
18881 +- padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached));
18882 ++ padto = min(x->tfcpad, __xfrm_state_mtu(x, dst->child_mtu_cached));
18883 + if (skb->len < padto)
18884 + esp.tfclen = padto - skb->len;
18885 + }
18886 +diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
18887 +index 84bb707bd88d8..647bceab56c2d 100644
18888 +--- a/net/ipv4/fib_frontend.c
18889 ++++ b/net/ipv4/fib_frontend.c
18890 +@@ -371,6 +371,8 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
18891 + fl4.flowi4_proto = 0;
18892 + fl4.fl4_sport = 0;
18893 + fl4.fl4_dport = 0;
18894 ++ } else {
18895 ++ swap(fl4.fl4_sport, fl4.fl4_dport);
18896 + }
18897 +
18898 + if (fib_lookup(net, &fl4, &res, 0))
18899 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
18900 +index e968bb47d5bd8..e15c1d8b7c8de 100644
18901 +--- a/net/ipv4/route.c
18902 ++++ b/net/ipv4/route.c
18903 +@@ -1327,7 +1327,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
18904 + mtu = dst_metric_raw(dst, RTAX_MTU);
18905 +
18906 + if (mtu)
18907 +- return mtu;
18908 ++ goto out;
18909 +
18910 + mtu = READ_ONCE(dst->dev->mtu);
18911 +
18912 +@@ -1336,6 +1336,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
18913 + mtu = 576;
18914 + }
18915 +
18916 ++out:
18917 + mtu = min_t(unsigned int, mtu, IP_MAX_MTU);
18918 +
18919 + return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
18920 +diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
18921 +index 4071cb7c7a154..8d001f665fb15 100644
18922 +--- a/net/ipv6/esp6.c
18923 ++++ b/net/ipv6/esp6.c
18924 +@@ -708,7 +708,7 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
18925 + struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
18926 + u32 padto;
18927 +
18928 +- padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached));
18929 ++ padto = min(x->tfcpad, __xfrm_state_mtu(x, dst->child_mtu_cached));
18930 + if (skb->len < padto)
18931 + esp.tfclen = padto - skb->len;
18932 + }
18933 +diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
18934 +index 374105e4394f8..4932dea9820ba 100644
18935 +--- a/net/ipv6/exthdrs.c
18936 ++++ b/net/ipv6/exthdrs.c
18937 +@@ -135,18 +135,23 @@ static bool ip6_parse_tlv(const struct tlvtype_proc *procs,
18938 + len -= 2;
18939 +
18940 + while (len > 0) {
18941 +- int optlen = nh[off + 1] + 2;
18942 +- int i;
18943 ++ int optlen, i;
18944 +
18945 +- switch (nh[off]) {
18946 +- case IPV6_TLV_PAD1:
18947 +- optlen = 1;
18948 ++ if (nh[off] == IPV6_TLV_PAD1) {
18949 + padlen++;
18950 + if (padlen > 7)
18951 + goto bad;
18952 +- break;
18953 ++ off++;
18954 ++ len--;
18955 ++ continue;
18956 ++ }
18957 ++ if (len < 2)
18958 ++ goto bad;
18959 ++ optlen = nh[off + 1] + 2;
18960 ++ if (optlen > len)
18961 ++ goto bad;
18962 +
18963 +- case IPV6_TLV_PADN:
18964 ++ if (nh[off] == IPV6_TLV_PADN) {
18965 + /* RFC 2460 states that the purpose of PadN is
18966 + * to align the containing header to multiples
18967 + * of 8. 7 is therefore the highest valid value.
18968 +@@ -163,12 +168,7 @@ static bool ip6_parse_tlv(const struct tlvtype_proc *procs,
18969 + if (nh[off + i] != 0)
18970 + goto bad;
18971 + }
18972 +- break;
18973 +-
18974 +- default: /* Other TLV code so scan list */
18975 +- if (optlen > len)
18976 +- goto bad;
18977 +-
18978 ++ } else {
18979 + tlv_count++;
18980 + if (tlv_count > max_count)
18981 + goto bad;
18982 +@@ -188,7 +188,6 @@ static bool ip6_parse_tlv(const struct tlvtype_proc *procs,
18983 + return false;
18984 +
18985 + padlen = 0;
18986 +- break;
18987 + }
18988 + off += optlen;
18989 + len -= optlen;
18990 +@@ -306,7 +305,7 @@ fail_and_free:
18991 + #endif
18992 +
18993 + if (ip6_parse_tlv(tlvprocdestopt_lst, skb,
18994 +- init_net.ipv6.sysctl.max_dst_opts_cnt)) {
18995 ++ net->ipv6.sysctl.max_dst_opts_cnt)) {
18996 + skb->transport_header += extlen;
18997 + opt = IP6CB(skb);
18998 + #if IS_ENABLED(CONFIG_IPV6_MIP6)
18999 +@@ -1041,7 +1040,7 @@ fail_and_free:
19000 +
19001 + opt->flags |= IP6SKB_HOPBYHOP;
19002 + if (ip6_parse_tlv(tlvprochopopt_lst, skb,
19003 +- init_net.ipv6.sysctl.max_hbh_opts_cnt)) {
19004 ++ net->ipv6.sysctl.max_hbh_opts_cnt)) {
19005 + skb->transport_header += extlen;
19006 + opt = IP6CB(skb);
19007 + opt->nhoff = sizeof(struct ipv6hdr);
19008 +diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
19009 +index 42ca2d05c480d..08441f06afd48 100644
19010 +--- a/net/ipv6/ip6_tunnel.c
19011 ++++ b/net/ipv6/ip6_tunnel.c
19012 +@@ -1270,8 +1270,6 @@ route_lookup:
19013 + if (max_headroom > dev->needed_headroom)
19014 + dev->needed_headroom = max_headroom;
19015 +
19016 +- skb_set_inner_ipproto(skb, proto);
19017 +-
19018 + err = ip6_tnl_encap(skb, t, &proto, fl6);
19019 + if (err)
19020 + return err;
19021 +@@ -1408,6 +1406,8 @@ ipxip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev,
19022 + if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
19023 + return -1;
19024 +
19025 ++ skb_set_inner_ipproto(skb, protocol);
19026 ++
19027 + err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
19028 + protocol);
19029 + if (err != 0) {
19030 +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
19031 +index fbe26e912300d..142bb28199c48 100644
19032 +--- a/net/mac80211/mlme.c
19033 ++++ b/net/mac80211/mlme.c
19034 +@@ -1094,11 +1094,6 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
19035 + struct ieee80211_hdr_3addr *nullfunc;
19036 + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
19037 +
19038 +- /* Don't send NDPs when STA is connected HE */
19039 +- if (sdata->vif.type == NL80211_IFTYPE_STATION &&
19040 +- !(ifmgd->flags & IEEE80211_STA_DISABLE_HE))
19041 +- return;
19042 +-
19043 + skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif,
19044 + !ieee80211_hw_check(&local->hw, DOESNT_SUPPORT_QOS_NDP));
19045 + if (!skb)
19046 +@@ -1130,10 +1125,6 @@ static void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
19047 + if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
19048 + return;
19049 +
19050 +- /* Don't send NDPs when connected HE */
19051 +- if (!(sdata->u.mgd.flags & IEEE80211_STA_DISABLE_HE))
19052 +- return;
19053 +-
19054 + skb = dev_alloc_skb(local->hw.extra_tx_headroom + 30);
19055 + if (!skb)
19056 + return;
19057 +diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
19058 +index f2fb69da9b6e1..13250cadb4202 100644
19059 +--- a/net/mac80211/sta_info.c
19060 ++++ b/net/mac80211/sta_info.c
19061 +@@ -1398,11 +1398,6 @@ static void ieee80211_send_null_response(struct sta_info *sta, int tid,
19062 + struct ieee80211_tx_info *info;
19063 + struct ieee80211_chanctx_conf *chanctx_conf;
19064 +
19065 +- /* Don't send NDPs when STA is connected HE */
19066 +- if (sdata->vif.type == NL80211_IFTYPE_STATION &&
19067 +- !(sdata->u.mgd.flags & IEEE80211_STA_DISABLE_HE))
19068 +- return;
19069 +-
19070 + if (qos) {
19071 + fc = cpu_to_le16(IEEE80211_FTYPE_DATA |
19072 + IEEE80211_STYPE_QOS_NULLFUNC |
19073 +diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
19074 +index 851fb3d8c791d..bba5696fee36d 100644
19075 +--- a/net/mptcp/subflow.c
19076 ++++ b/net/mptcp/subflow.c
19077 +@@ -338,15 +338,15 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
19078 + goto do_reset;
19079 + }
19080 +
19081 ++ if (!mptcp_finish_join(sk))
19082 ++ goto do_reset;
19083 ++
19084 + subflow_generate_hmac(subflow->local_key, subflow->remote_key,
19085 + subflow->local_nonce,
19086 + subflow->remote_nonce,
19087 + hmac);
19088 + memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN);
19089 +
19090 +- if (!mptcp_finish_join(sk))
19091 +- goto do_reset;
19092 +-
19093 + subflow->mp_join = 1;
19094 + MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
19095 + } else if (mptcp_check_fallback(sk)) {
19096 +diff --git a/net/mptcp/token.c b/net/mptcp/token.c
19097 +index feb4b9ffd4625..0691a4883f3ab 100644
19098 +--- a/net/mptcp/token.c
19099 ++++ b/net/mptcp/token.c
19100 +@@ -156,9 +156,6 @@ int mptcp_token_new_connect(struct sock *sk)
19101 + int retries = TOKEN_MAX_RETRIES;
19102 + struct token_bucket *bucket;
19103 +
19104 +- pr_debug("ssk=%p, local_key=%llu, token=%u, idsn=%llu\n",
19105 +- sk, subflow->local_key, subflow->token, subflow->idsn);
19106 +-
19107 + again:
19108 + mptcp_crypto_key_gen_sha(&subflow->local_key, &subflow->token,
19109 + &subflow->idsn);
19110 +@@ -172,6 +169,9 @@ again:
19111 + goto again;
19112 + }
19113 +
19114 ++ pr_debug("ssk=%p, local_key=%llu, token=%u, idsn=%llu\n",
19115 ++ sk, subflow->local_key, subflow->token, subflow->idsn);
19116 ++
19117 + WRITE_ONCE(msk->token, subflow->token);
19118 + __sk_nulls_add_node_rcu((struct sock *)msk, &bucket->msk_chain);
19119 + bucket->chain_len++;
19120 +diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
19121 +index 2b00f7f47693b..9ce776175214c 100644
19122 +--- a/net/netfilter/nf_tables_offload.c
19123 ++++ b/net/netfilter/nf_tables_offload.c
19124 +@@ -54,15 +54,10 @@ static void nft_flow_rule_transfer_vlan(struct nft_offload_ctx *ctx,
19125 + struct nft_flow_rule *flow)
19126 + {
19127 + struct nft_flow_match *match = &flow->match;
19128 +- struct nft_offload_ethertype ethertype;
19129 +-
19130 +- if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL) &&
19131 +- match->key.basic.n_proto != htons(ETH_P_8021Q) &&
19132 +- match->key.basic.n_proto != htons(ETH_P_8021AD))
19133 +- return;
19134 +-
19135 +- ethertype.value = match->key.basic.n_proto;
19136 +- ethertype.mask = match->mask.basic.n_proto;
19137 ++ struct nft_offload_ethertype ethertype = {
19138 ++ .value = match->key.basic.n_proto,
19139 ++ .mask = match->mask.basic.n_proto,
19140 ++ };
19141 +
19142 + if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_VLAN) &&
19143 + (match->key.vlan.vlan_tpid == htons(ETH_P_8021Q) ||
19144 +@@ -76,7 +71,9 @@ static void nft_flow_rule_transfer_vlan(struct nft_offload_ctx *ctx,
19145 + match->dissector.offset[FLOW_DISSECTOR_KEY_CVLAN] =
19146 + offsetof(struct nft_flow_key, cvlan);
19147 + match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CVLAN);
19148 +- } else {
19149 ++ } else if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC) &&
19150 ++ (match->key.basic.n_proto == htons(ETH_P_8021Q) ||
19151 ++ match->key.basic.n_proto == htons(ETH_P_8021AD))) {
19152 + match->key.basic.n_proto = match->key.vlan.vlan_tpid;
19153 + match->mask.basic.n_proto = match->mask.vlan.vlan_tpid;
19154 + match->key.vlan.vlan_tpid = ethertype.value;
19155 +diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
19156 +index 3c48cdc8935df..faa0844c01fb8 100644
19157 +--- a/net/netfilter/nft_exthdr.c
19158 ++++ b/net/netfilter/nft_exthdr.c
19159 +@@ -42,6 +42,9 @@ static void nft_exthdr_ipv6_eval(const struct nft_expr *expr,
19160 + unsigned int offset = 0;
19161 + int err;
19162 +
19163 ++ if (pkt->skb->protocol != htons(ETH_P_IPV6))
19164 ++ goto err;
19165 ++
19166 + err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL);
19167 + if (priv->flags & NFT_EXTHDR_F_PRESENT) {
19168 + nft_reg_store8(dest, err >= 0);
19169 +diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c
19170 +index c261d57a666ab..2c957629ea660 100644
19171 +--- a/net/netfilter/nft_osf.c
19172 ++++ b/net/netfilter/nft_osf.c
19173 +@@ -28,6 +28,11 @@ static void nft_osf_eval(const struct nft_expr *expr, struct nft_regs *regs,
19174 + struct nf_osf_data data;
19175 + struct tcphdr _tcph;
19176 +
19177 ++ if (pkt->tprot != IPPROTO_TCP) {
19178 ++ regs->verdict.code = NFT_BREAK;
19179 ++ return;
19180 ++ }
19181 ++
19182 + tcp = skb_header_pointer(skb, ip_hdrlen(skb),
19183 + sizeof(struct tcphdr), &_tcph);
19184 + if (!tcp) {
19185 +diff --git a/net/netfilter/nft_tproxy.c b/net/netfilter/nft_tproxy.c
19186 +index d67f83a0958d3..242222dc52c3c 100644
19187 +--- a/net/netfilter/nft_tproxy.c
19188 ++++ b/net/netfilter/nft_tproxy.c
19189 +@@ -30,6 +30,12 @@ static void nft_tproxy_eval_v4(const struct nft_expr *expr,
19190 + __be16 tport = 0;
19191 + struct sock *sk;
19192 +
19193 ++ if (pkt->tprot != IPPROTO_TCP &&
19194 ++ pkt->tprot != IPPROTO_UDP) {
19195 ++ regs->verdict.code = NFT_BREAK;
19196 ++ return;
19197 ++ }
19198 ++
19199 + hp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_hdr), &_hdr);
19200 + if (!hp) {
19201 + regs->verdict.code = NFT_BREAK;
19202 +@@ -91,7 +97,8 @@ static void nft_tproxy_eval_v6(const struct nft_expr *expr,
19203 +
19204 + memset(&taddr, 0, sizeof(taddr));
19205 +
19206 +- if (!pkt->tprot_set) {
19207 ++ if (pkt->tprot != IPPROTO_TCP &&
19208 ++ pkt->tprot != IPPROTO_UDP) {
19209 + regs->verdict.code = NFT_BREAK;
19210 + return;
19211 + }
19212 +diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c
19213 +index eb1d66d20afbb..02a97bca1a1a2 100644
19214 +--- a/net/netlabel/netlabel_mgmt.c
19215 ++++ b/net/netlabel/netlabel_mgmt.c
19216 +@@ -76,6 +76,7 @@ static const struct nla_policy netlbl_mgmt_genl_policy[NLBL_MGMT_A_MAX + 1] = {
19217 + static int netlbl_mgmt_add_common(struct genl_info *info,
19218 + struct netlbl_audit *audit_info)
19219 + {
19220 ++ void *pmap = NULL;
19221 + int ret_val = -EINVAL;
19222 + struct netlbl_domaddr_map *addrmap = NULL;
19223 + struct cipso_v4_doi *cipsov4 = NULL;
19224 +@@ -175,6 +176,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
19225 + ret_val = -ENOMEM;
19226 + goto add_free_addrmap;
19227 + }
19228 ++ pmap = map;
19229 + map->list.addr = addr->s_addr & mask->s_addr;
19230 + map->list.mask = mask->s_addr;
19231 + map->list.valid = 1;
19232 +@@ -183,10 +185,8 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
19233 + map->def.cipso = cipsov4;
19234 +
19235 + ret_val = netlbl_af4list_add(&map->list, &addrmap->list4);
19236 +- if (ret_val != 0) {
19237 +- kfree(map);
19238 +- goto add_free_addrmap;
19239 +- }
19240 ++ if (ret_val != 0)
19241 ++ goto add_free_map;
19242 +
19243 + entry->family = AF_INET;
19244 + entry->def.type = NETLBL_NLTYPE_ADDRSELECT;
19245 +@@ -223,6 +223,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
19246 + ret_val = -ENOMEM;
19247 + goto add_free_addrmap;
19248 + }
19249 ++ pmap = map;
19250 + map->list.addr = *addr;
19251 + map->list.addr.s6_addr32[0] &= mask->s6_addr32[0];
19252 + map->list.addr.s6_addr32[1] &= mask->s6_addr32[1];
19253 +@@ -235,10 +236,8 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
19254 + map->def.calipso = calipso;
19255 +
19256 + ret_val = netlbl_af6list_add(&map->list, &addrmap->list6);
19257 +- if (ret_val != 0) {
19258 +- kfree(map);
19259 +- goto add_free_addrmap;
19260 +- }
19261 ++ if (ret_val != 0)
19262 ++ goto add_free_map;
19263 +
19264 + entry->family = AF_INET6;
19265 + entry->def.type = NETLBL_NLTYPE_ADDRSELECT;
19266 +@@ -248,10 +247,12 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
19267 +
19268 + ret_val = netlbl_domhsh_add(entry, audit_info);
19269 + if (ret_val != 0)
19270 +- goto add_free_addrmap;
19271 ++ goto add_free_map;
19272 +
19273 + return 0;
19274 +
19275 ++add_free_map:
19276 ++ kfree(pmap);
19277 + add_free_addrmap:
19278 + kfree(addrmap);
19279 + add_doi_put_def:
19280 +diff --git a/net/qrtr/ns.c b/net/qrtr/ns.c
19281 +index b8559c8824318..e760d4a38fafd 100644
19282 +--- a/net/qrtr/ns.c
19283 ++++ b/net/qrtr/ns.c
19284 +@@ -783,8 +783,10 @@ void qrtr_ns_init(void)
19285 + }
19286 +
19287 + qrtr_ns.workqueue = alloc_workqueue("qrtr_ns_handler", WQ_UNBOUND, 1);
19288 +- if (!qrtr_ns.workqueue)
19289 ++ if (!qrtr_ns.workqueue) {
19290 ++ ret = -ENOMEM;
19291 + goto err_sock;
19292 ++ }
19293 +
19294 + qrtr_ns.sock->sk->sk_data_ready = qrtr_ns_data_ready;
19295 +
19296 +diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
19297 +index 1cac3c6fbb49c..a108469c664f7 100644
19298 +--- a/net/sched/act_vlan.c
19299 ++++ b/net/sched/act_vlan.c
19300 +@@ -70,7 +70,7 @@ static int tcf_vlan_act(struct sk_buff *skb, const struct tc_action *a,
19301 + /* replace the vid */
19302 + tci = (tci & ~VLAN_VID_MASK) | p->tcfv_push_vid;
19303 + /* replace prio bits, if tcfv_push_prio specified */
19304 +- if (p->tcfv_push_prio) {
19305 ++ if (p->tcfv_push_prio_exists) {
19306 + tci &= ~VLAN_PRIO_MASK;
19307 + tci |= p->tcfv_push_prio << VLAN_PRIO_SHIFT;
19308 + }
19309 +@@ -121,6 +121,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
19310 + struct tc_action_net *tn = net_generic(net, vlan_net_id);
19311 + struct nlattr *tb[TCA_VLAN_MAX + 1];
19312 + struct tcf_chain *goto_ch = NULL;
19313 ++ bool push_prio_exists = false;
19314 + struct tcf_vlan_params *p;
19315 + struct tc_vlan *parm;
19316 + struct tcf_vlan *v;
19317 +@@ -189,7 +190,8 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
19318 + push_proto = htons(ETH_P_8021Q);
19319 + }
19320 +
19321 +- if (tb[TCA_VLAN_PUSH_VLAN_PRIORITY])
19322 ++ push_prio_exists = !!tb[TCA_VLAN_PUSH_VLAN_PRIORITY];
19323 ++ if (push_prio_exists)
19324 + push_prio = nla_get_u8(tb[TCA_VLAN_PUSH_VLAN_PRIORITY]);
19325 + break;
19326 + case TCA_VLAN_ACT_POP_ETH:
19327 +@@ -241,6 +243,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
19328 + p->tcfv_action = action;
19329 + p->tcfv_push_vid = push_vid;
19330 + p->tcfv_push_prio = push_prio;
19331 ++ p->tcfv_push_prio_exists = push_prio_exists || action == TCA_VLAN_ACT_PUSH;
19332 + p->tcfv_push_proto = push_proto;
19333 +
19334 + if (action == TCA_VLAN_ACT_PUSH_ETH) {
19335 +diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
19336 +index c4007b9cd16d6..5b274534264c2 100644
19337 +--- a/net/sched/cls_tcindex.c
19338 ++++ b/net/sched/cls_tcindex.c
19339 +@@ -304,7 +304,7 @@ static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp)
19340 + int i, err = 0;
19341 +
19342 + cp->perfect = kcalloc(cp->hash, sizeof(struct tcindex_filter_result),
19343 +- GFP_KERNEL);
19344 ++ GFP_KERNEL | __GFP_NOWARN);
19345 + if (!cp->perfect)
19346 + return -ENOMEM;
19347 +
19348 +diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
19349 +index 6335230a971e2..ade2d6ddc9148 100644
19350 +--- a/net/sched/sch_qfq.c
19351 ++++ b/net/sched/sch_qfq.c
19352 +@@ -485,11 +485,6 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
19353 +
19354 + if (cl->qdisc != &noop_qdisc)
19355 + qdisc_hash_add(cl->qdisc, true);
19356 +- sch_tree_lock(sch);
19357 +- qdisc_class_hash_insert(&q->clhash, &cl->common);
19358 +- sch_tree_unlock(sch);
19359 +-
19360 +- qdisc_class_hash_grow(sch, &q->clhash);
19361 +
19362 + set_change_agg:
19363 + sch_tree_lock(sch);
19364 +@@ -507,8 +502,11 @@ set_change_agg:
19365 + }
19366 + if (existing)
19367 + qfq_deact_rm_from_agg(q, cl);
19368 ++ else
19369 ++ qdisc_class_hash_insert(&q->clhash, &cl->common);
19370 + qfq_add_to_agg(q, new_agg, cl);
19371 + sch_tree_unlock(sch);
19372 ++ qdisc_class_hash_grow(sch, &q->clhash);
19373 +
19374 + *arg = (unsigned long)cl;
19375 + return 0;
19376 +diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
19377 +index 39ed0e0afe6d9..c045f63d11fa6 100644
19378 +--- a/net/sunrpc/sched.c
19379 ++++ b/net/sunrpc/sched.c
19380 +@@ -591,11 +591,21 @@ static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *q
19381 + struct list_head *q;
19382 + struct rpc_task *task;
19383 +
19384 ++ /*
19385 ++ * Service the privileged queue.
19386 ++ */
19387 ++ q = &queue->tasks[RPC_NR_PRIORITY - 1];
19388 ++ if (queue->maxpriority > RPC_PRIORITY_PRIVILEGED && !list_empty(q)) {
19389 ++ task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
19390 ++ goto out;
19391 ++ }
19392 ++
19393 + /*
19394 + * Service a batch of tasks from a single owner.
19395 + */
19396 + q = &queue->tasks[queue->priority];
19397 +- if (!list_empty(q) && --queue->nr) {
19398 ++ if (!list_empty(q) && queue->nr) {
19399 ++ queue->nr--;
19400 + task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
19401 + goto out;
19402 + }
19403 +diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
19404 +index d4beca895992d..593846d252143 100644
19405 +--- a/net/tipc/bcast.c
19406 ++++ b/net/tipc/bcast.c
19407 +@@ -699,7 +699,7 @@ int tipc_bcast_init(struct net *net)
19408 + spin_lock_init(&tipc_net(net)->bclock);
19409 +
19410 + if (!tipc_link_bc_create(net, 0, 0, NULL,
19411 +- FB_MTU,
19412 ++ one_page_mtu,
19413 + BCLINK_WIN_DEFAULT,
19414 + BCLINK_WIN_DEFAULT,
19415 + 0,
19416 +diff --git a/net/tipc/msg.c b/net/tipc/msg.c
19417 +index 88a3ed80094cd..91dcf648d32bb 100644
19418 +--- a/net/tipc/msg.c
19419 ++++ b/net/tipc/msg.c
19420 +@@ -44,12 +44,15 @@
19421 + #define MAX_FORWARD_SIZE 1024
19422 + #ifdef CONFIG_TIPC_CRYPTO
19423 + #define BUF_HEADROOM ALIGN(((LL_MAX_HEADER + 48) + EHDR_MAX_SIZE), 16)
19424 +-#define BUF_TAILROOM (TIPC_AES_GCM_TAG_SIZE)
19425 ++#define BUF_OVERHEAD (BUF_HEADROOM + TIPC_AES_GCM_TAG_SIZE)
19426 + #else
19427 + #define BUF_HEADROOM (LL_MAX_HEADER + 48)
19428 +-#define BUF_TAILROOM 16
19429 ++#define BUF_OVERHEAD BUF_HEADROOM
19430 + #endif
19431 +
19432 ++const int one_page_mtu = PAGE_SIZE - SKB_DATA_ALIGN(BUF_OVERHEAD) -
19433 ++ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
19434 ++
19435 + static unsigned int align(unsigned int i)
19436 + {
19437 + return (i + 3) & ~3u;
19438 +@@ -67,13 +70,8 @@ static unsigned int align(unsigned int i)
19439 + struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp)
19440 + {
19441 + struct sk_buff *skb;
19442 +-#ifdef CONFIG_TIPC_CRYPTO
19443 +- unsigned int buf_size = (BUF_HEADROOM + size + BUF_TAILROOM + 3) & ~3u;
19444 +-#else
19445 +- unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
19446 +-#endif
19447 +
19448 +- skb = alloc_skb_fclone(buf_size, gfp);
19449 ++ skb = alloc_skb_fclone(BUF_OVERHEAD + size, gfp);
19450 + if (skb) {
19451 + skb_reserve(skb, BUF_HEADROOM);
19452 + skb_put(skb, size);
19453 +@@ -395,7 +393,8 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
19454 + if (unlikely(!skb)) {
19455 + if (pktmax != MAX_MSG_SIZE)
19456 + return -ENOMEM;
19457 +- rc = tipc_msg_build(mhdr, m, offset, dsz, FB_MTU, list);
19458 ++ rc = tipc_msg_build(mhdr, m, offset, dsz,
19459 ++ one_page_mtu, list);
19460 + if (rc != dsz)
19461 + return rc;
19462 + if (tipc_msg_assemble(list))
19463 +diff --git a/net/tipc/msg.h b/net/tipc/msg.h
19464 +index 5d64596ba9877..64ae4c4c44f8c 100644
19465 +--- a/net/tipc/msg.h
19466 ++++ b/net/tipc/msg.h
19467 +@@ -99,9 +99,10 @@ struct plist;
19468 + #define MAX_H_SIZE 60 /* Largest possible TIPC header size */
19469 +
19470 + #define MAX_MSG_SIZE (MAX_H_SIZE + TIPC_MAX_USER_MSG_SIZE)
19471 +-#define FB_MTU 3744
19472 + #define TIPC_MEDIA_INFO_OFFSET 5
19473 +
19474 ++extern const int one_page_mtu;
19475 ++
19476 + struct tipc_skb_cb {
19477 + union {
19478 + struct {
19479 +diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
19480 +index 3abe5257f7577..15395683b8e2a 100644
19481 +--- a/net/tls/tls_sw.c
19482 ++++ b/net/tls/tls_sw.c
19483 +@@ -1154,7 +1154,7 @@ static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
19484 + int ret = 0;
19485 + bool eor;
19486 +
19487 +- eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
19488 ++ eor = !(flags & MSG_SENDPAGE_NOTLAST);
19489 + sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
19490 +
19491 + /* Call the sk_stream functions to manage the sndbuf mem. */
19492 +diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
19493 +index be9fd5a720117..3c7ce60fe9a5a 100644
19494 +--- a/net/xdp/xsk_queue.h
19495 ++++ b/net/xdp/xsk_queue.h
19496 +@@ -126,12 +126,15 @@ static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
19497 + static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
19498 + struct xdp_desc *desc)
19499 + {
19500 +- u64 chunk;
19501 +-
19502 +- if (desc->len > pool->chunk_size)
19503 +- return false;
19504 ++ u64 chunk, chunk_end;
19505 +
19506 + chunk = xp_aligned_extract_addr(pool, desc->addr);
19507 ++ if (likely(desc->len)) {
19508 ++ chunk_end = xp_aligned_extract_addr(pool, desc->addr + desc->len - 1);
19509 ++ if (chunk != chunk_end)
19510 ++ return false;
19511 ++ }
19512 ++
19513 + if (chunk >= pool->addrs_cnt)
19514 + return false;
19515 +
19516 +diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
19517 +index 6d6917b68856f..e843b0d9e2a61 100644
19518 +--- a/net/xfrm/xfrm_device.c
19519 ++++ b/net/xfrm/xfrm_device.c
19520 +@@ -268,6 +268,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
19521 + xso->num_exthdrs = 0;
19522 + xso->flags = 0;
19523 + xso->dev = NULL;
19524 ++ xso->real_dev = NULL;
19525 + dev_put(dev);
19526 +
19527 + if (err != -EOPNOTSUPP)
19528 +diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
19529 +index e4cb0ff4dcf41..ac907b9d32d1e 100644
19530 +--- a/net/xfrm/xfrm_output.c
19531 ++++ b/net/xfrm/xfrm_output.c
19532 +@@ -711,15 +711,8 @@ out:
19533 + static int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb)
19534 + {
19535 + #if IS_ENABLED(CONFIG_IPV6)
19536 +- unsigned int ptr = 0;
19537 + int err;
19538 +
19539 +- if (x->outer_mode.encap == XFRM_MODE_BEET &&
19540 +- ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL, NULL) >= 0) {
19541 +- net_warn_ratelimited("BEET mode doesn't support inner IPv6 fragments\n");
19542 +- return -EAFNOSUPPORT;
19543 +- }
19544 +-
19545 + err = xfrm6_tunnel_check_size(skb);
19546 + if (err)
19547 + return err;
19548 +diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
19549 +index 77499abd9f992..c158e70e8ae10 100644
19550 +--- a/net/xfrm/xfrm_state.c
19551 ++++ b/net/xfrm/xfrm_state.c
19552 +@@ -2516,7 +2516,7 @@ void xfrm_state_delete_tunnel(struct xfrm_state *x)
19553 + }
19554 + EXPORT_SYMBOL(xfrm_state_delete_tunnel);
19555 +
19556 +-u32 xfrm_state_mtu(struct xfrm_state *x, int mtu)
19557 ++u32 __xfrm_state_mtu(struct xfrm_state *x, int mtu)
19558 + {
19559 + const struct xfrm_type *type = READ_ONCE(x->type);
19560 + struct crypto_aead *aead;
19561 +@@ -2547,7 +2547,17 @@ u32 xfrm_state_mtu(struct xfrm_state *x, int mtu)
19562 + return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
19563 + net_adj) & ~(blksize - 1)) + net_adj - 2;
19564 + }
19565 +-EXPORT_SYMBOL_GPL(xfrm_state_mtu);
19566 ++EXPORT_SYMBOL_GPL(__xfrm_state_mtu);
19567 ++
19568 ++u32 xfrm_state_mtu(struct xfrm_state *x, int mtu)
19569 ++{
19570 ++ mtu = __xfrm_state_mtu(x, mtu);
19571 ++
19572 ++ if (x->props.family == AF_INET6 && mtu < IPV6_MIN_MTU)
19573 ++ return IPV6_MIN_MTU;
19574 ++
19575 ++ return mtu;
19576 ++}
19577 +
19578 + int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload)
19579 + {
19580 +diff --git a/samples/bpf/xdp_redirect_user.c b/samples/bpf/xdp_redirect_user.c
19581 +index 9ca2bf457cdae..3c92adc2a7bd0 100644
19582 +--- a/samples/bpf/xdp_redirect_user.c
19583 ++++ b/samples/bpf/xdp_redirect_user.c
19584 +@@ -131,7 +131,7 @@ int main(int argc, char **argv)
19585 + if (!(xdp_flags & XDP_FLAGS_SKB_MODE))
19586 + xdp_flags |= XDP_FLAGS_DRV_MODE;
19587 +
19588 +- if (optind == argc) {
19589 ++ if (optind + 2 != argc) {
19590 + printf("usage: %s <IFNAME|IFINDEX>_IN <IFNAME|IFINDEX>_OUT\n", argv[0]);
19591 + return 1;
19592 + }
19593 +@@ -219,5 +219,5 @@ int main(int argc, char **argv)
19594 + poll_stats(2, ifindex_out);
19595 +
19596 + out:
19597 +- return 0;
19598 ++ return ret;
19599 + }
19600 +diff --git a/scripts/Makefile.build b/scripts/Makefile.build
19601 +index 4c058f12dd73c..8bd4e673383f3 100644
19602 +--- a/scripts/Makefile.build
19603 ++++ b/scripts/Makefile.build
19604 +@@ -275,7 +275,8 @@ define rule_as_o_S
19605 + endef
19606 +
19607 + # Built-in and composite module parts
19608 +-$(obj)/%.o: $(src)/%.c $(recordmcount_source) $(objtool_dep) FORCE
19609 ++.SECONDEXPANSION:
19610 ++$(obj)/%.o: $(src)/%.c $(recordmcount_source) $$(objtool_dep) FORCE
19611 + $(call if_changed_rule,cc_o_c)
19612 + $(call cmd,force_checksrc)
19613 +
19614 +@@ -356,7 +357,7 @@ cmd_modversions_S = \
19615 + fi
19616 + endif
19617 +
19618 +-$(obj)/%.o: $(src)/%.S $(objtool_dep) FORCE
19619 ++$(obj)/%.o: $(src)/%.S $$(objtool_dep) FORCE
19620 + $(call if_changed_rule,as_o_S)
19621 +
19622 + targets += $(filter-out $(subdir-builtin), $(real-obj-y))
19623 +diff --git a/scripts/tools-support-relr.sh b/scripts/tools-support-relr.sh
19624 +index 45e8aa360b457..cb55878bd5b81 100755
19625 +--- a/scripts/tools-support-relr.sh
19626 ++++ b/scripts/tools-support-relr.sh
19627 +@@ -7,7 +7,8 @@ trap "rm -f $tmp_file.o $tmp_file $tmp_file.bin" EXIT
19628 + cat << "END" | $CC -c -x c - -o $tmp_file.o >/dev/null 2>&1
19629 + void *p = &p;
19630 + END
19631 +-$LD $tmp_file.o -shared -Bsymbolic --pack-dyn-relocs=relr -o $tmp_file
19632 ++$LD $tmp_file.o -shared -Bsymbolic --pack-dyn-relocs=relr \
19633 ++ --use-android-relr-tags -o $tmp_file
19634 +
19635 + # Despite printing an error message, GNU nm still exits with exit code 0 if it
19636 + # sees a relr section. So we need to check that nothing is printed to stderr.
19637 +diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
19638 +index 76d19146d74bc..f1ca3cac9b861 100644
19639 +--- a/security/integrity/evm/evm_main.c
19640 ++++ b/security/integrity/evm/evm_main.c
19641 +@@ -521,7 +521,7 @@ void evm_inode_post_setattr(struct dentry *dentry, int ia_valid)
19642 + }
19643 +
19644 + /*
19645 +- * evm_inode_init_security - initializes security.evm
19646 ++ * evm_inode_init_security - initializes security.evm HMAC value
19647 + */
19648 + int evm_inode_init_security(struct inode *inode,
19649 + const struct xattr *lsm_xattr,
19650 +@@ -530,7 +530,8 @@ int evm_inode_init_security(struct inode *inode,
19651 + struct evm_xattr *xattr_data;
19652 + int rc;
19653 +
19654 +- if (!evm_key_loaded() || !evm_protected_xattr(lsm_xattr->name))
19655 ++ if (!(evm_initialized & EVM_INIT_HMAC) ||
19656 ++ !evm_protected_xattr(lsm_xattr->name))
19657 + return 0;
19658 +
19659 + xattr_data = kzalloc(sizeof(*xattr_data), GFP_NOFS);
19660 +diff --git a/security/integrity/evm/evm_secfs.c b/security/integrity/evm/evm_secfs.c
19661 +index cfc3075769bb0..bc10c945f3ed5 100644
19662 +--- a/security/integrity/evm/evm_secfs.c
19663 ++++ b/security/integrity/evm/evm_secfs.c
19664 +@@ -66,12 +66,13 @@ static ssize_t evm_read_key(struct file *filp, char __user *buf,
19665 + static ssize_t evm_write_key(struct file *file, const char __user *buf,
19666 + size_t count, loff_t *ppos)
19667 + {
19668 +- int i, ret;
19669 ++ unsigned int i;
19670 ++ int ret;
19671 +
19672 + if (!capable(CAP_SYS_ADMIN) || (evm_initialized & EVM_SETUP_COMPLETE))
19673 + return -EPERM;
19674 +
19675 +- ret = kstrtoint_from_user(buf, count, 0, &i);
19676 ++ ret = kstrtouint_from_user(buf, count, 0, &i);
19677 +
19678 + if (ret)
19679 + return ret;
19680 +@@ -80,12 +81,12 @@ static ssize_t evm_write_key(struct file *file, const char __user *buf,
19681 + if (!i || (i & ~EVM_INIT_MASK) != 0)
19682 + return -EINVAL;
19683 +
19684 +- /* Don't allow a request to freshly enable metadata writes if
19685 +- * keys are loaded.
19686 ++ /*
19687 ++ * Don't allow a request to enable metadata writes if
19688 ++ * an HMAC key is loaded.
19689 + */
19690 + if ((i & EVM_ALLOW_METADATA_WRITES) &&
19691 +- ((evm_initialized & EVM_KEY_MASK) != 0) &&
19692 +- !(evm_initialized & EVM_ALLOW_METADATA_WRITES))
19693 ++ (evm_initialized & EVM_INIT_HMAC) != 0)
19694 + return -EPERM;
19695 +
19696 + if (i & EVM_INIT_HMAC) {
19697 +diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
19698 +index 5805c5de39fbf..7a282d8e71485 100644
19699 +--- a/sound/firewire/amdtp-stream.c
19700 ++++ b/sound/firewire/amdtp-stream.c
19701 +@@ -1404,14 +1404,17 @@ int amdtp_domain_start(struct amdtp_domain *d, unsigned int ir_delay_cycle)
19702 + unsigned int queue_size;
19703 + struct amdtp_stream *s;
19704 + int cycle;
19705 ++ bool found = false;
19706 + int err;
19707 +
19708 + // Select an IT context as IRQ target.
19709 + list_for_each_entry(s, &d->streams, list) {
19710 +- if (s->direction == AMDTP_OUT_STREAM)
19711 ++ if (s->direction == AMDTP_OUT_STREAM) {
19712 ++ found = true;
19713 + break;
19714 ++ }
19715 + }
19716 +- if (!s)
19717 ++ if (!found)
19718 + return -ENXIO;
19719 + d->irq_target = s;
19720 +
19721 +diff --git a/sound/firewire/motu/motu-protocol-v2.c b/sound/firewire/motu/motu-protocol-v2.c
19722 +index e59e69ab1538b..784073aa10265 100644
19723 +--- a/sound/firewire/motu/motu-protocol-v2.c
19724 ++++ b/sound/firewire/motu/motu-protocol-v2.c
19725 +@@ -353,6 +353,7 @@ const struct snd_motu_spec snd_motu_spec_8pre = {
19726 + .protocol_version = SND_MOTU_PROTOCOL_V2,
19727 + .flags = SND_MOTU_SPEC_RX_MIDI_2ND_Q |
19728 + SND_MOTU_SPEC_TX_MIDI_2ND_Q,
19729 +- .tx_fixed_pcm_chunks = {10, 6, 0},
19730 +- .rx_fixed_pcm_chunks = {10, 6, 0},
19731 ++ // Two dummy chunks always in the end of data block.
19732 ++ .tx_fixed_pcm_chunks = {10, 10, 0},
19733 ++ .rx_fixed_pcm_chunks = {6, 6, 0},
19734 + };
19735 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
19736 +index e46e43dac6bfd..1cc83344c2ecf 100644
19737 +--- a/sound/pci/hda/patch_realtek.c
19738 ++++ b/sound/pci/hda/patch_realtek.c
19739 +@@ -385,6 +385,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
19740 + alc_update_coef_idx(codec, 0x67, 0xf000, 0x3000);
19741 + fallthrough;
19742 + case 0x10ec0215:
19743 ++ case 0x10ec0230:
19744 + case 0x10ec0233:
19745 + case 0x10ec0235:
19746 + case 0x10ec0236:
19747 +@@ -3153,6 +3154,7 @@ static void alc_disable_headset_jack_key(struct hda_codec *codec)
19748 + alc_update_coef_idx(codec, 0x49, 0x0045, 0x0);
19749 + alc_update_coef_idx(codec, 0x44, 0x0045 << 8, 0x0);
19750 + break;
19751 ++ case 0x10ec0230:
19752 + case 0x10ec0236:
19753 + case 0x10ec0256:
19754 + alc_write_coef_idx(codec, 0x48, 0x0);
19755 +@@ -3180,6 +3182,7 @@ static void alc_enable_headset_jack_key(struct hda_codec *codec)
19756 + alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045);
19757 + alc_update_coef_idx(codec, 0x44, 0x007f << 8, 0x0045 << 8);
19758 + break;
19759 ++ case 0x10ec0230:
19760 + case 0x10ec0236:
19761 + case 0x10ec0256:
19762 + alc_write_coef_idx(codec, 0x48, 0xd011);
19763 +@@ -4737,6 +4740,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
19764 + case 0x10ec0255:
19765 + alc_process_coef_fw(codec, coef0255);
19766 + break;
19767 ++ case 0x10ec0230:
19768 + case 0x10ec0236:
19769 + case 0x10ec0256:
19770 + alc_process_coef_fw(codec, coef0256);
19771 +@@ -4851,6 +4855,7 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
19772 + alc_process_coef_fw(codec, coef0255);
19773 + snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
19774 + break;
19775 ++ case 0x10ec0230:
19776 + case 0x10ec0236:
19777 + case 0x10ec0256:
19778 + alc_write_coef_idx(codec, 0x45, 0xc489);
19779 +@@ -5000,6 +5005,7 @@ static void alc_headset_mode_default(struct hda_codec *codec)
19780 + case 0x10ec0255:
19781 + alc_process_coef_fw(codec, coef0255);
19782 + break;
19783 ++ case 0x10ec0230:
19784 + case 0x10ec0236:
19785 + case 0x10ec0256:
19786 + alc_write_coef_idx(codec, 0x1b, 0x0e4b);
19787 +@@ -5098,6 +5104,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
19788 + case 0x10ec0255:
19789 + alc_process_coef_fw(codec, coef0255);
19790 + break;
19791 ++ case 0x10ec0230:
19792 + case 0x10ec0236:
19793 + case 0x10ec0256:
19794 + alc_process_coef_fw(codec, coef0256);
19795 +@@ -5211,6 +5218,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
19796 + case 0x10ec0255:
19797 + alc_process_coef_fw(codec, coef0255);
19798 + break;
19799 ++ case 0x10ec0230:
19800 + case 0x10ec0236:
19801 + case 0x10ec0256:
19802 + alc_process_coef_fw(codec, coef0256);
19803 +@@ -5311,6 +5319,7 @@ static void alc_determine_headset_type(struct hda_codec *codec)
19804 + val = alc_read_coef_idx(codec, 0x46);
19805 + is_ctia = (val & 0x0070) == 0x0070;
19806 + break;
19807 ++ case 0x10ec0230:
19808 + case 0x10ec0236:
19809 + case 0x10ec0256:
19810 + alc_write_coef_idx(codec, 0x1b, 0x0e4b);
19811 +@@ -5604,6 +5613,7 @@ static void alc255_set_default_jack_type(struct hda_codec *codec)
19812 + case 0x10ec0255:
19813 + alc_process_coef_fw(codec, alc255fw);
19814 + break;
19815 ++ case 0x10ec0230:
19816 + case 0x10ec0236:
19817 + case 0x10ec0256:
19818 + alc_process_coef_fw(codec, alc256fw);
19819 +@@ -6204,6 +6214,7 @@ static void alc_combo_jack_hp_jd_restart(struct hda_codec *codec)
19820 + alc_update_coef_idx(codec, 0x4a, 0x8000, 1 << 15); /* Reset HP JD */
19821 + alc_update_coef_idx(codec, 0x4a, 0x8000, 0 << 15);
19822 + break;
19823 ++ case 0x10ec0230:
19824 + case 0x10ec0235:
19825 + case 0x10ec0236:
19826 + case 0x10ec0255:
19827 +@@ -6336,6 +6347,24 @@ static void alc_fixup_no_int_mic(struct hda_codec *codec,
19828 + }
19829 + }
19830 +
19831 ++static void alc285_fixup_hp_spectre_x360(struct hda_codec *codec,
19832 ++ const struct hda_fixup *fix, int action)
19833 ++{
19834 ++ static const hda_nid_t conn[] = { 0x02 };
19835 ++ static const struct hda_pintbl pincfgs[] = {
19836 ++ { 0x14, 0x90170110 }, /* rear speaker */
19837 ++ { }
19838 ++ };
19839 ++
19840 ++ switch (action) {
19841 ++ case HDA_FIXUP_ACT_PRE_PROBE:
19842 ++ snd_hda_apply_pincfgs(codec, pincfgs);
19843 ++ /* force front speaker to DAC1 */
19844 ++ snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn);
19845 ++ break;
19846 ++ }
19847 ++}
19848 ++
19849 + /* for hda_fixup_thinkpad_acpi() */
19850 + #include "thinkpad_helper.c"
19851 +
19852 +@@ -7802,6 +7831,8 @@ static const struct hda_fixup alc269_fixups[] = {
19853 + { 0x20, AC_VERB_SET_PROC_COEF, 0x4e4b },
19854 + { }
19855 + },
19856 ++ .chained = true,
19857 ++ .chain_id = ALC289_FIXUP_ASUS_GA401,
19858 + },
19859 + [ALC285_FIXUP_HP_GPIO_LED] = {
19860 + .type = HDA_FIXUP_FUNC,
19861 +@@ -8113,13 +8144,8 @@ static const struct hda_fixup alc269_fixups[] = {
19862 + .chain_id = ALC269_FIXUP_HP_LINE1_MIC1_LED,
19863 + },
19864 + [ALC285_FIXUP_HP_SPECTRE_X360] = {
19865 +- .type = HDA_FIXUP_PINS,
19866 +- .v.pins = (const struct hda_pintbl[]) {
19867 +- { 0x14, 0x90170110 }, /* enable top speaker */
19868 +- {}
19869 +- },
19870 +- .chained = true,
19871 +- .chain_id = ALC285_FIXUP_SPEAKER2_TO_DAC1,
19872 ++ .type = HDA_FIXUP_FUNC,
19873 ++ .v.func = alc285_fixup_hp_spectre_x360,
19874 + },
19875 + [ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP] = {
19876 + .type = HDA_FIXUP_FUNC,
19877 +@@ -8305,6 +8331,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
19878 + SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", ALC295_FIXUP_HP_OMEN),
19879 + SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
19880 + SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360),
19881 ++ SND_PCI_QUIRK(0x103c, 0x861f, "HP Elite Dragonfly G1", ALC285_FIXUP_HP_GPIO_AMP_INIT),
19882 + SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
19883 + SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO),
19884 + SND_PCI_QUIRK(0x103c, 0x8716, "HP Elite Dragonfly G2 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
19885 +@@ -8322,13 +8349,19 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
19886 + ALC285_FIXUP_HP_GPIO_AMP_INIT),
19887 + SND_PCI_QUIRK(0x103c, 0x87c8, "HP", ALC287_FIXUP_HP_GPIO_LED),
19888 + SND_PCI_QUIRK(0x103c, 0x87e5, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
19889 ++ SND_PCI_QUIRK(0x103c, 0x87e7, "HP ProBook 450 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
19890 ++ SND_PCI_QUIRK(0x103c, 0x87f1, "HP ProBook 630 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
19891 + SND_PCI_QUIRK(0x103c, 0x87f2, "HP ProBook 640 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
19892 + SND_PCI_QUIRK(0x103c, 0x87f4, "HP", ALC287_FIXUP_HP_GPIO_LED),
19893 + SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED),
19894 + SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
19895 ++ SND_PCI_QUIRK(0x103c, 0x880d, "HP EliteBook 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
19896 + SND_PCI_QUIRK(0x103c, 0x8846, "HP EliteBook 850 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
19897 ++ SND_PCI_QUIRK(0x103c, 0x8847, "HP EliteBook x360 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
19898 + SND_PCI_QUIRK(0x103c, 0x884b, "HP EliteBook 840 Aero G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
19899 + SND_PCI_QUIRK(0x103c, 0x884c, "HP EliteBook 840 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
19900 ++ SND_PCI_QUIRK(0x103c, 0x8862, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
19901 ++ SND_PCI_QUIRK(0x103c, 0x8863, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
19902 + SND_PCI_QUIRK(0x103c, 0x886d, "HP ZBook Fury 17.3 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
19903 + SND_PCI_QUIRK(0x103c, 0x8870, "HP ZBook Fury 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
19904 + SND_PCI_QUIRK(0x103c, 0x8873, "HP ZBook Studio 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
19905 +@@ -9326,6 +9359,7 @@ static int patch_alc269(struct hda_codec *codec)
19906 + spec->shutup = alc256_shutup;
19907 + spec->init_hook = alc256_init;
19908 + break;
19909 ++ case 0x10ec0230:
19910 + case 0x10ec0236:
19911 + case 0x10ec0256:
19912 + spec->codec_variant = ALC269_TYPE_ALC256;
19913 +@@ -10617,6 +10651,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
19914 + HDA_CODEC_ENTRY(0x10ec0221, "ALC221", patch_alc269),
19915 + HDA_CODEC_ENTRY(0x10ec0222, "ALC222", patch_alc269),
19916 + HDA_CODEC_ENTRY(0x10ec0225, "ALC225", patch_alc269),
19917 ++ HDA_CODEC_ENTRY(0x10ec0230, "ALC236", patch_alc269),
19918 + HDA_CODEC_ENTRY(0x10ec0231, "ALC231", patch_alc269),
19919 + HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269),
19920 + HDA_CODEC_ENTRY(0x10ec0234, "ALC234", patch_alc269),
19921 +diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
19922 +index 6fb6f36d0d377..3707dc27324d2 100644
19923 +--- a/sound/pci/intel8x0.c
19924 ++++ b/sound/pci/intel8x0.c
19925 +@@ -715,7 +715,7 @@ static inline void snd_intel8x0_update(struct intel8x0 *chip, struct ichdev *ich
19926 + int status, civ, i, step;
19927 + int ack = 0;
19928 +
19929 +- if (!ichdev->prepared || ichdev->suspended)
19930 ++ if (!(ichdev->prepared || chip->in_measurement) || ichdev->suspended)
19931 + return;
19932 +
19933 + spin_lock_irqsave(&chip->reg_lock, flags);
19934 +diff --git a/sound/soc/atmel/atmel-i2s.c b/sound/soc/atmel/atmel-i2s.c
19935 +index bbe2b638abb58..d870f56c44cfc 100644
19936 +--- a/sound/soc/atmel/atmel-i2s.c
19937 ++++ b/sound/soc/atmel/atmel-i2s.c
19938 +@@ -200,6 +200,7 @@ struct atmel_i2s_dev {
19939 + unsigned int fmt;
19940 + const struct atmel_i2s_gck_param *gck_param;
19941 + const struct atmel_i2s_caps *caps;
19942 ++ int clk_use_no;
19943 + };
19944 +
19945 + static irqreturn_t atmel_i2s_interrupt(int irq, void *dev_id)
19946 +@@ -321,9 +322,16 @@ static int atmel_i2s_hw_params(struct snd_pcm_substream *substream,
19947 + {
19948 + struct atmel_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
19949 + bool is_playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
19950 +- unsigned int mr = 0;
19951 ++ unsigned int mr = 0, mr_mask;
19952 + int ret;
19953 +
19954 ++ mr_mask = ATMEL_I2SC_MR_FORMAT_MASK | ATMEL_I2SC_MR_MODE_MASK |
19955 ++ ATMEL_I2SC_MR_DATALENGTH_MASK;
19956 ++ if (is_playback)
19957 ++ mr_mask |= ATMEL_I2SC_MR_TXMONO;
19958 ++ else
19959 ++ mr_mask |= ATMEL_I2SC_MR_RXMONO;
19960 ++
19961 + switch (dev->fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
19962 + case SND_SOC_DAIFMT_I2S:
19963 + mr |= ATMEL_I2SC_MR_FORMAT_I2S;
19964 +@@ -402,7 +410,7 @@ static int atmel_i2s_hw_params(struct snd_pcm_substream *substream,
19965 + return -EINVAL;
19966 + }
19967 +
19968 +- return regmap_write(dev->regmap, ATMEL_I2SC_MR, mr);
19969 ++ return regmap_update_bits(dev->regmap, ATMEL_I2SC_MR, mr_mask, mr);
19970 + }
19971 +
19972 + static int atmel_i2s_switch_mck_generator(struct atmel_i2s_dev *dev,
19973 +@@ -495,18 +503,28 @@ static int atmel_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
19974 + is_master = (mr & ATMEL_I2SC_MR_MODE_MASK) == ATMEL_I2SC_MR_MODE_MASTER;
19975 +
19976 + /* If master starts, enable the audio clock. */
19977 +- if (is_master && mck_enabled)
19978 +- err = atmel_i2s_switch_mck_generator(dev, true);
19979 +- if (err)
19980 +- return err;
19981 ++ if (is_master && mck_enabled) {
19982 ++ if (!dev->clk_use_no) {
19983 ++ err = atmel_i2s_switch_mck_generator(dev, true);
19984 ++ if (err)
19985 ++ return err;
19986 ++ }
19987 ++ dev->clk_use_no++;
19988 ++ }
19989 +
19990 + err = regmap_write(dev->regmap, ATMEL_I2SC_CR, cr);
19991 + if (err)
19992 + return err;
19993 +
19994 + /* If master stops, disable the audio clock. */
19995 +- if (is_master && !mck_enabled)
19996 +- err = atmel_i2s_switch_mck_generator(dev, false);
19997 ++ if (is_master && !mck_enabled) {
19998 ++ if (dev->clk_use_no == 1) {
19999 ++ err = atmel_i2s_switch_mck_generator(dev, false);
20000 ++ if (err)
20001 ++ return err;
20002 ++ }
20003 ++ dev->clk_use_no--;
20004 ++ }
20005 +
20006 + return err;
20007 + }
20008 +diff --git a/sound/soc/codecs/cs42l42.h b/sound/soc/codecs/cs42l42.h
20009 +index 866d7c873e3c9..ca2019732013e 100644
20010 +--- a/sound/soc/codecs/cs42l42.h
20011 ++++ b/sound/soc/codecs/cs42l42.h
20012 +@@ -77,7 +77,7 @@
20013 + #define CS42L42_HP_PDN_SHIFT 3
20014 + #define CS42L42_HP_PDN_MASK (1 << CS42L42_HP_PDN_SHIFT)
20015 + #define CS42L42_ADC_PDN_SHIFT 2
20016 +-#define CS42L42_ADC_PDN_MASK (1 << CS42L42_HP_PDN_SHIFT)
20017 ++#define CS42L42_ADC_PDN_MASK (1 << CS42L42_ADC_PDN_SHIFT)
20018 + #define CS42L42_PDN_ALL_SHIFT 0
20019 + #define CS42L42_PDN_ALL_MASK (1 << CS42L42_PDN_ALL_SHIFT)
20020 +
20021 +diff --git a/sound/soc/codecs/max98373-sdw.c b/sound/soc/codecs/max98373-sdw.c
20022 +index 14fd2f9a0bf3a..39afa011f0e27 100644
20023 +--- a/sound/soc/codecs/max98373-sdw.c
20024 ++++ b/sound/soc/codecs/max98373-sdw.c
20025 +@@ -258,7 +258,7 @@ static __maybe_unused int max98373_resume(struct device *dev)
20026 + struct max98373_priv *max98373 = dev_get_drvdata(dev);
20027 + unsigned long time;
20028 +
20029 +- if (!max98373->hw_init)
20030 ++ if (!max98373->first_hw_init)
20031 + return 0;
20032 +
20033 + if (!slave->unattach_request)
20034 +@@ -349,7 +349,7 @@ static int max98373_io_init(struct sdw_slave *slave)
20035 + struct device *dev = &slave->dev;
20036 + struct max98373_priv *max98373 = dev_get_drvdata(dev);
20037 +
20038 +- if (max98373->pm_init_once) {
20039 ++ if (max98373->first_hw_init) {
20040 + regcache_cache_only(max98373->regmap, false);
20041 + regcache_cache_bypass(max98373->regmap, true);
20042 + }
20043 +@@ -357,7 +357,7 @@ static int max98373_io_init(struct sdw_slave *slave)
20044 + /*
20045 + * PM runtime is only enabled when a Slave reports as Attached
20046 + */
20047 +- if (!max98373->pm_init_once) {
20048 ++ if (!max98373->first_hw_init) {
20049 + /* set autosuspend parameters */
20050 + pm_runtime_set_autosuspend_delay(dev, 3000);
20051 + pm_runtime_use_autosuspend(dev);
20052 +@@ -449,12 +449,12 @@ static int max98373_io_init(struct sdw_slave *slave)
20053 + regmap_write(max98373->regmap, MAX98373_R20B5_BDE_EN, 1);
20054 + regmap_write(max98373->regmap, MAX98373_R20E2_LIMITER_EN, 1);
20055 +
20056 +- if (max98373->pm_init_once) {
20057 ++ if (max98373->first_hw_init) {
20058 + regcache_cache_bypass(max98373->regmap, false);
20059 + regcache_mark_dirty(max98373->regmap);
20060 + }
20061 +
20062 +- max98373->pm_init_once = true;
20063 ++ max98373->first_hw_init = true;
20064 + max98373->hw_init = true;
20065 +
20066 + pm_runtime_mark_last_busy(dev);
20067 +@@ -773,7 +773,7 @@ static int max98373_init(struct sdw_slave *slave, struct regmap *regmap)
20068 + max98373_slot_config(dev, max98373);
20069 +
20070 + max98373->hw_init = false;
20071 +- max98373->pm_init_once = false;
20072 ++ max98373->first_hw_init = false;
20073 +
20074 + /* codec registration */
20075 + ret = devm_snd_soc_register_component(dev, &soc_codec_dev_max98373_sdw,
20076 +diff --git a/sound/soc/codecs/max98373.h b/sound/soc/codecs/max98373.h
20077 +index 4ab29b9d51c74..010f6bb21e9a1 100644
20078 +--- a/sound/soc/codecs/max98373.h
20079 ++++ b/sound/soc/codecs/max98373.h
20080 +@@ -215,7 +215,7 @@ struct max98373_priv {
20081 + /* variables to support soundwire */
20082 + struct sdw_slave *slave;
20083 + bool hw_init;
20084 +- bool pm_init_once;
20085 ++ bool first_hw_init;
20086 + int slot;
20087 + unsigned int rx_mask;
20088 + };
20089 +diff --git a/sound/soc/codecs/rk3328_codec.c b/sound/soc/codecs/rk3328_codec.c
20090 +index 940a2fa933edb..aed18cbb9f68e 100644
20091 +--- a/sound/soc/codecs/rk3328_codec.c
20092 ++++ b/sound/soc/codecs/rk3328_codec.c
20093 +@@ -474,7 +474,8 @@ static int rk3328_platform_probe(struct platform_device *pdev)
20094 + rk3328->pclk = devm_clk_get(&pdev->dev, "pclk");
20095 + if (IS_ERR(rk3328->pclk)) {
20096 + dev_err(&pdev->dev, "can't get acodec pclk\n");
20097 +- return PTR_ERR(rk3328->pclk);
20098 ++ ret = PTR_ERR(rk3328->pclk);
20099 ++ goto err_unprepare_mclk;
20100 + }
20101 +
20102 + ret = clk_prepare_enable(rk3328->pclk);
20103 +@@ -484,19 +485,34 @@ static int rk3328_platform_probe(struct platform_device *pdev)
20104 + }
20105 +
20106 + base = devm_platform_ioremap_resource(pdev, 0);
20107 +- if (IS_ERR(base))
20108 +- return PTR_ERR(base);
20109 ++ if (IS_ERR(base)) {
20110 ++ ret = PTR_ERR(base);
20111 ++ goto err_unprepare_pclk;
20112 ++ }
20113 +
20114 + rk3328->regmap = devm_regmap_init_mmio(&pdev->dev, base,
20115 + &rk3328_codec_regmap_config);
20116 +- if (IS_ERR(rk3328->regmap))
20117 +- return PTR_ERR(rk3328->regmap);
20118 ++ if (IS_ERR(rk3328->regmap)) {
20119 ++ ret = PTR_ERR(rk3328->regmap);
20120 ++ goto err_unprepare_pclk;
20121 ++ }
20122 +
20123 + platform_set_drvdata(pdev, rk3328);
20124 +
20125 +- return devm_snd_soc_register_component(&pdev->dev, &soc_codec_rk3328,
20126 ++ ret = devm_snd_soc_register_component(&pdev->dev, &soc_codec_rk3328,
20127 + rk3328_dai,
20128 + ARRAY_SIZE(rk3328_dai));
20129 ++ if (ret)
20130 ++ goto err_unprepare_pclk;
20131 ++
20132 ++ return 0;
20133 ++
20134 ++err_unprepare_pclk:
20135 ++ clk_disable_unprepare(rk3328->pclk);
20136 ++
20137 ++err_unprepare_mclk:
20138 ++ clk_disable_unprepare(rk3328->mclk);
20139 ++ return ret;
20140 + }
20141 +
20142 + static const struct of_device_id rk3328_codec_of_match[] = {
20143 +diff --git a/sound/soc/codecs/rt1308-sdw.c b/sound/soc/codecs/rt1308-sdw.c
20144 +index c2621b0afe6c1..31daa749c3db4 100644
20145 +--- a/sound/soc/codecs/rt1308-sdw.c
20146 ++++ b/sound/soc/codecs/rt1308-sdw.c
20147 +@@ -709,7 +709,7 @@ static int __maybe_unused rt1308_dev_resume(struct device *dev)
20148 + struct rt1308_sdw_priv *rt1308 = dev_get_drvdata(dev);
20149 + unsigned long time;
20150 +
20151 +- if (!rt1308->hw_init)
20152 ++ if (!rt1308->first_hw_init)
20153 + return 0;
20154 +
20155 + if (!slave->unattach_request)
20156 +diff --git a/sound/soc/codecs/rt5682-i2c.c b/sound/soc/codecs/rt5682-i2c.c
20157 +index 7e652843c57d9..547445d1e3c69 100644
20158 +--- a/sound/soc/codecs/rt5682-i2c.c
20159 ++++ b/sound/soc/codecs/rt5682-i2c.c
20160 +@@ -268,6 +268,7 @@ static void rt5682_i2c_shutdown(struct i2c_client *client)
20161 + {
20162 + struct rt5682_priv *rt5682 = i2c_get_clientdata(client);
20163 +
20164 ++ disable_irq(client->irq);
20165 + cancel_delayed_work_sync(&rt5682->jack_detect_work);
20166 + cancel_delayed_work_sync(&rt5682->jd_check_work);
20167 +
20168 +diff --git a/sound/soc/codecs/rt5682-sdw.c b/sound/soc/codecs/rt5682-sdw.c
20169 +index aa6c325faeab2..c9868dd096fcd 100644
20170 +--- a/sound/soc/codecs/rt5682-sdw.c
20171 ++++ b/sound/soc/codecs/rt5682-sdw.c
20172 +@@ -375,18 +375,12 @@ static int rt5682_sdw_init(struct device *dev, struct regmap *regmap,
20173 + static int rt5682_io_init(struct device *dev, struct sdw_slave *slave)
20174 + {
20175 + struct rt5682_priv *rt5682 = dev_get_drvdata(dev);
20176 +- int ret = 0;
20177 ++ int ret = 0, loop = 10;
20178 + unsigned int val;
20179 +
20180 + if (rt5682->hw_init)
20181 + return 0;
20182 +
20183 +- regmap_read(rt5682->regmap, RT5682_DEVICE_ID, &val);
20184 +- if (val != DEVICE_ID) {
20185 +- dev_err(dev, "Device with ID register %x is not rt5682\n", val);
20186 +- return -ENODEV;
20187 +- }
20188 +-
20189 + /*
20190 + * PM runtime is only enabled when a Slave reports as Attached
20191 + */
20192 +@@ -411,6 +405,19 @@ static int rt5682_io_init(struct device *dev, struct sdw_slave *slave)
20193 + regcache_cache_bypass(rt5682->regmap, true);
20194 + }
20195 +
20196 ++ while (loop > 0) {
20197 ++ regmap_read(rt5682->regmap, RT5682_DEVICE_ID, &val);
20198 ++ if (val == DEVICE_ID)
20199 ++ break;
20200 ++ dev_warn(dev, "Device with ID register %x is not rt5682\n", val);
20201 ++ usleep_range(30000, 30005);
20202 ++ loop--;
20203 ++ }
20204 ++ if (val != DEVICE_ID) {
20205 ++ dev_err(dev, "Device with ID register %x is not rt5682\n", val);
20206 ++ return -ENODEV;
20207 ++ }
20208 ++
20209 + rt5682_calibrate(rt5682);
20210 +
20211 + if (rt5682->first_hw_init) {
20212 +@@ -734,7 +741,7 @@ static int __maybe_unused rt5682_dev_resume(struct device *dev)
20213 + struct rt5682_priv *rt5682 = dev_get_drvdata(dev);
20214 + unsigned long time;
20215 +
20216 +- if (!rt5682->hw_init)
20217 ++ if (!rt5682->first_hw_init)
20218 + return 0;
20219 +
20220 + if (!slave->unattach_request)
20221 +diff --git a/sound/soc/codecs/rt700-sdw.c b/sound/soc/codecs/rt700-sdw.c
20222 +index fb77e77a4ebd5..3a1db79030d71 100644
20223 +--- a/sound/soc/codecs/rt700-sdw.c
20224 ++++ b/sound/soc/codecs/rt700-sdw.c
20225 +@@ -498,7 +498,7 @@ static int __maybe_unused rt700_dev_resume(struct device *dev)
20226 + struct rt700_priv *rt700 = dev_get_drvdata(dev);
20227 + unsigned long time;
20228 +
20229 +- if (!rt700->hw_init)
20230 ++ if (!rt700->first_hw_init)
20231 + return 0;
20232 +
20233 + if (!slave->unattach_request)
20234 +diff --git a/sound/soc/codecs/rt711-sdw.c b/sound/soc/codecs/rt711-sdw.c
20235 +index f0a0691bd31cc..eb54e90c1c604 100644
20236 +--- a/sound/soc/codecs/rt711-sdw.c
20237 ++++ b/sound/soc/codecs/rt711-sdw.c
20238 +@@ -500,7 +500,7 @@ static int __maybe_unused rt711_dev_resume(struct device *dev)
20239 + struct rt711_priv *rt711 = dev_get_drvdata(dev);
20240 + unsigned long time;
20241 +
20242 +- if (!rt711->hw_init)
20243 ++ if (!rt711->first_hw_init)
20244 + return 0;
20245 +
20246 + if (!slave->unattach_request)
20247 +diff --git a/sound/soc/codecs/rt715-sdw.c b/sound/soc/codecs/rt715-sdw.c
20248 +index 8f0aa1e8a2737..361a90ae594cd 100644
20249 +--- a/sound/soc/codecs/rt715-sdw.c
20250 ++++ b/sound/soc/codecs/rt715-sdw.c
20251 +@@ -541,7 +541,7 @@ static int __maybe_unused rt715_dev_resume(struct device *dev)
20252 + struct rt715_priv *rt715 = dev_get_drvdata(dev);
20253 + unsigned long time;
20254 +
20255 +- if (!rt715->hw_init)
20256 ++ if (!rt715->first_hw_init)
20257 + return 0;
20258 +
20259 + if (!slave->unattach_request)
20260 +diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c
20261 +index b0f643fefe1e8..15bcb0f38ec9e 100644
20262 +--- a/sound/soc/fsl/fsl_spdif.c
20263 ++++ b/sound/soc/fsl/fsl_spdif.c
20264 +@@ -1358,14 +1358,27 @@ static int fsl_spdif_probe(struct platform_device *pdev)
20265 + &spdif_priv->cpu_dai_drv, 1);
20266 + if (ret) {
20267 + dev_err(&pdev->dev, "failed to register DAI: %d\n", ret);
20268 +- return ret;
20269 ++ goto err_pm_disable;
20270 + }
20271 +
20272 + ret = imx_pcm_dma_init(pdev, IMX_SPDIF_DMABUF_SIZE);
20273 +- if (ret && ret != -EPROBE_DEFER)
20274 +- dev_err(&pdev->dev, "imx_pcm_dma_init failed: %d\n", ret);
20275 ++ if (ret) {
20276 ++ dev_err_probe(&pdev->dev, ret, "imx_pcm_dma_init failed\n");
20277 ++ goto err_pm_disable;
20278 ++ }
20279 +
20280 + return ret;
20281 ++
20282 ++err_pm_disable:
20283 ++ pm_runtime_disable(&pdev->dev);
20284 ++ return ret;
20285 ++}
20286 ++
20287 ++static int fsl_spdif_remove(struct platform_device *pdev)
20288 ++{
20289 ++ pm_runtime_disable(&pdev->dev);
20290 ++
20291 ++ return 0;
20292 + }
20293 +
20294 + #ifdef CONFIG_PM
20295 +@@ -1374,6 +1387,9 @@ static int fsl_spdif_runtime_suspend(struct device *dev)
20296 + struct fsl_spdif_priv *spdif_priv = dev_get_drvdata(dev);
20297 + int i;
20298 +
20299 ++ /* Disable all the interrupts */
20300 ++ regmap_update_bits(spdif_priv->regmap, REG_SPDIF_SIE, 0xffffff, 0);
20301 ++
20302 + regmap_read(spdif_priv->regmap, REG_SPDIF_SRPC,
20303 + &spdif_priv->regcache_srpc);
20304 + regcache_cache_only(spdif_priv->regmap, true);
20305 +@@ -1469,6 +1485,7 @@ static struct platform_driver fsl_spdif_driver = {
20306 + .pm = &fsl_spdif_pm,
20307 + },
20308 + .probe = fsl_spdif_probe,
20309 ++ .remove = fsl_spdif_remove,
20310 + };
20311 +
20312 + module_platform_driver(fsl_spdif_driver);
20313 +diff --git a/sound/soc/hisilicon/hi6210-i2s.c b/sound/soc/hisilicon/hi6210-i2s.c
20314 +index 907f5f1f7b445..ff05b9779e4be 100644
20315 +--- a/sound/soc/hisilicon/hi6210-i2s.c
20316 ++++ b/sound/soc/hisilicon/hi6210-i2s.c
20317 +@@ -102,18 +102,15 @@ static int hi6210_i2s_startup(struct snd_pcm_substream *substream,
20318 +
20319 + for (n = 0; n < i2s->clocks; n++) {
20320 + ret = clk_prepare_enable(i2s->clk[n]);
20321 +- if (ret) {
20322 +- while (n--)
20323 +- clk_disable_unprepare(i2s->clk[n]);
20324 +- return ret;
20325 +- }
20326 ++ if (ret)
20327 ++ goto err_unprepare_clk;
20328 + }
20329 +
20330 + ret = clk_set_rate(i2s->clk[CLK_I2S_BASE], 49152000);
20331 + if (ret) {
20332 + dev_err(i2s->dev, "%s: setting 49.152MHz base rate failed %d\n",
20333 + __func__, ret);
20334 +- return ret;
20335 ++ goto err_unprepare_clk;
20336 + }
20337 +
20338 + /* enable clock before frequency division */
20339 +@@ -165,6 +162,11 @@ static int hi6210_i2s_startup(struct snd_pcm_substream *substream,
20340 + hi6210_write_reg(i2s, HII2S_SW_RST_N, val);
20341 +
20342 + return 0;
20343 ++
20344 ++err_unprepare_clk:
20345 ++ while (n--)
20346 ++ clk_disable_unprepare(i2s->clk[n]);
20347 ++ return ret;
20348 + }
20349 +
20350 + static void hi6210_i2s_shutdown(struct snd_pcm_substream *substream,
20351 +diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
20352 +index 9dc982c2c7760..75a0bfedb4493 100644
20353 +--- a/sound/soc/intel/boards/sof_sdw.c
20354 ++++ b/sound/soc/intel/boards/sof_sdw.c
20355 +@@ -196,6 +196,7 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
20356 + },
20357 + .driver_data = (void *)(SOF_RT711_JD_SRC_JD1 |
20358 + SOF_SDW_TGL_HDMI |
20359 ++ SOF_RT715_DAI_ID_FIX |
20360 + SOF_SDW_PCH_DMIC),
20361 + },
20362 + {}
20363 +diff --git a/sound/soc/mediatek/common/mtk-btcvsd.c b/sound/soc/mediatek/common/mtk-btcvsd.c
20364 +index 668fef3e319a0..86e982e3209ed 100644
20365 +--- a/sound/soc/mediatek/common/mtk-btcvsd.c
20366 ++++ b/sound/soc/mediatek/common/mtk-btcvsd.c
20367 +@@ -1281,7 +1281,7 @@ static const struct snd_soc_component_driver mtk_btcvsd_snd_platform = {
20368 +
20369 + static int mtk_btcvsd_snd_probe(struct platform_device *pdev)
20370 + {
20371 +- int ret = 0;
20372 ++ int ret;
20373 + int irq_id;
20374 + u32 offset[5] = {0, 0, 0, 0, 0};
20375 + struct mtk_btcvsd_snd *btcvsd;
20376 +@@ -1337,7 +1337,8 @@ static int mtk_btcvsd_snd_probe(struct platform_device *pdev)
20377 + btcvsd->bt_sram_bank2_base = of_iomap(dev->of_node, 1);
20378 + if (!btcvsd->bt_sram_bank2_base) {
20379 + dev_err(dev, "iomap bt_sram_bank2_base fail\n");
20380 +- return -EIO;
20381 ++ ret = -EIO;
20382 ++ goto unmap_pkv_err;
20383 + }
20384 +
20385 + btcvsd->infra = syscon_regmap_lookup_by_phandle(dev->of_node,
20386 +@@ -1345,7 +1346,8 @@ static int mtk_btcvsd_snd_probe(struct platform_device *pdev)
20387 + if (IS_ERR(btcvsd->infra)) {
20388 + dev_err(dev, "cannot find infra controller: %ld\n",
20389 + PTR_ERR(btcvsd->infra));
20390 +- return PTR_ERR(btcvsd->infra);
20391 ++ ret = PTR_ERR(btcvsd->infra);
20392 ++ goto unmap_bank2_err;
20393 + }
20394 +
20395 + /* get offset */
20396 +@@ -1354,7 +1356,7 @@ static int mtk_btcvsd_snd_probe(struct platform_device *pdev)
20397 + ARRAY_SIZE(offset));
20398 + if (ret) {
20399 + dev_warn(dev, "%s(), get offset fail, ret %d\n", __func__, ret);
20400 +- return ret;
20401 ++ goto unmap_bank2_err;
20402 + }
20403 + btcvsd->infra_misc_offset = offset[0];
20404 + btcvsd->conn_bt_cvsd_mask = offset[1];
20405 +@@ -1373,8 +1375,18 @@ static int mtk_btcvsd_snd_probe(struct platform_device *pdev)
20406 + mtk_btcvsd_snd_set_state(btcvsd, btcvsd->tx, BT_SCO_STATE_IDLE);
20407 + mtk_btcvsd_snd_set_state(btcvsd, btcvsd->rx, BT_SCO_STATE_IDLE);
20408 +
20409 +- return devm_snd_soc_register_component(dev, &mtk_btcvsd_snd_platform,
20410 +- NULL, 0);
20411 ++ ret = devm_snd_soc_register_component(dev, &mtk_btcvsd_snd_platform,
20412 ++ NULL, 0);
20413 ++ if (ret)
20414 ++ goto unmap_bank2_err;
20415 ++
20416 ++ return 0;
20417 ++
20418 ++unmap_bank2_err:
20419 ++ iounmap(btcvsd->bt_sram_bank2_base);
20420 ++unmap_pkv_err:
20421 ++ iounmap(btcvsd->bt_pkv_base);
20422 ++ return ret;
20423 + }
20424 +
20425 + static int mtk_btcvsd_snd_remove(struct platform_device *pdev)
20426 +diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
20427 +index b9aacf3d3b29c..7532ab27a48df 100644
20428 +--- a/sound/soc/sh/rcar/adg.c
20429 ++++ b/sound/soc/sh/rcar/adg.c
20430 +@@ -289,7 +289,6 @@ static void rsnd_adg_set_ssi_clk(struct rsnd_mod *ssi_mod, u32 val)
20431 + int rsnd_adg_clk_query(struct rsnd_priv *priv, unsigned int rate)
20432 + {
20433 + struct rsnd_adg *adg = rsnd_priv_to_adg(priv);
20434 +- struct clk *clk;
20435 + int i;
20436 + int sel_table[] = {
20437 + [CLKA] = 0x1,
20438 +@@ -302,10 +301,9 @@ int rsnd_adg_clk_query(struct rsnd_priv *priv, unsigned int rate)
20439 + * find suitable clock from
20440 + * AUDIO_CLKA/AUDIO_CLKB/AUDIO_CLKC/AUDIO_CLKI.
20441 + */
20442 +- for_each_rsnd_clk(clk, adg, i) {
20443 ++ for (i = 0; i < CLKMAX; i++)
20444 + if (rate == adg->clk_rate[i])
20445 + return sel_table[i];
20446 +- }
20447 +
20448 + /*
20449 + * find divided clock from BRGA/BRGB
20450 +diff --git a/sound/usb/format.c b/sound/usb/format.c
20451 +index 91f0ed4a2e7eb..5c5b76c611480 100644
20452 +--- a/sound/usb/format.c
20453 ++++ b/sound/usb/format.c
20454 +@@ -208,9 +208,11 @@ static int parse_audio_format_rates_v1(struct snd_usb_audio *chip, struct audiof
20455 + continue;
20456 + /* C-Media CM6501 mislabels its 96 kHz altsetting */
20457 + /* Terratec Aureon 7.1 USB C-Media 6206, too */
20458 ++ /* Ozone Z90 USB C-Media, too */
20459 + if (rate == 48000 && nr_rates == 1 &&
20460 + (chip->usb_id == USB_ID(0x0d8c, 0x0201) ||
20461 + chip->usb_id == USB_ID(0x0d8c, 0x0102) ||
20462 ++ chip->usb_id == USB_ID(0x0d8c, 0x0078) ||
20463 + chip->usb_id == USB_ID(0x0ccd, 0x00b1)) &&
20464 + fp->altsetting == 5 && fp->maxpacksize == 392)
20465 + rate = 96000;
20466 +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
20467 +index 375cfb9c9ab7e..8e11582fbae98 100644
20468 +--- a/sound/usb/mixer.c
20469 ++++ b/sound/usb/mixer.c
20470 +@@ -3273,8 +3273,9 @@ static void snd_usb_mixer_dump_cval(struct snd_info_buffer *buffer,
20471 + struct usb_mixer_elem_list *list)
20472 + {
20473 + struct usb_mixer_elem_info *cval = mixer_elem_list_to_info(list);
20474 +- static const char * const val_types[] = {"BOOLEAN", "INV_BOOLEAN",
20475 +- "S8", "U8", "S16", "U16"};
20476 ++ static const char * const val_types[] = {
20477 ++ "BOOLEAN", "INV_BOOLEAN", "S8", "U8", "S16", "U16", "S32", "U32",
20478 ++ };
20479 + snd_iprintf(buffer, " Info: id=%i, control=%i, cmask=0x%x, "
20480 + "channels=%i, type=\"%s\"\n", cval->head.id,
20481 + cval->control, cval->cmask, cval->channels,
20482 +@@ -3630,6 +3631,9 @@ static int restore_mixer_value(struct usb_mixer_elem_list *list)
20483 + struct usb_mixer_elem_info *cval = mixer_elem_list_to_info(list);
20484 + int c, err, idx;
20485 +
20486 ++ if (cval->val_type == USB_MIXER_BESPOKEN)
20487 ++ return 0;
20488 ++
20489 + if (cval->cmask) {
20490 + idx = 0;
20491 + for (c = 0; c < MAX_CHANNELS; c++) {
20492 +diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
20493 +index c29e27ac43a7a..6d20ba7ee88fd 100644
20494 +--- a/sound/usb/mixer.h
20495 ++++ b/sound/usb/mixer.h
20496 +@@ -55,6 +55,7 @@ enum {
20497 + USB_MIXER_U16,
20498 + USB_MIXER_S32,
20499 + USB_MIXER_U32,
20500 ++ USB_MIXER_BESPOKEN, /* non-standard type */
20501 + };
20502 +
20503 + typedef void (*usb_mixer_elem_dump_func_t)(struct snd_info_buffer *buffer,
20504 +diff --git a/sound/usb/mixer_scarlett_gen2.c b/sound/usb/mixer_scarlett_gen2.c
20505 +index 9a98b0c048e33..97e72b3e06c26 100644
20506 +--- a/sound/usb/mixer_scarlett_gen2.c
20507 ++++ b/sound/usb/mixer_scarlett_gen2.c
20508 +@@ -949,10 +949,15 @@ static int scarlett2_add_new_ctl(struct usb_mixer_interface *mixer,
20509 + if (!elem)
20510 + return -ENOMEM;
20511 +
20512 ++ /* We set USB_MIXER_BESPOKEN type, so that the core USB mixer code
20513 ++ * ignores them for resume and other operations.
20514 ++ * Also, the head.id field is set to 0, as we don't use this field.
20515 ++ */
20516 + elem->head.mixer = mixer;
20517 + elem->control = index;
20518 +- elem->head.id = index;
20519 ++ elem->head.id = 0;
20520 + elem->channels = channels;
20521 ++ elem->val_type = USB_MIXER_BESPOKEN;
20522 +
20523 + kctl = snd_ctl_new1(ncontrol, elem);
20524 + if (!kctl) {
20525 +diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
20526 +index 33068d6ed5d6c..c58a135dc355e 100644
20527 +--- a/tools/bpf/bpftool/main.c
20528 ++++ b/tools/bpf/bpftool/main.c
20529 +@@ -338,8 +338,10 @@ static int do_batch(int argc, char **argv)
20530 + n_argc = make_args(buf, n_argv, BATCH_ARG_NB_MAX, lines);
20531 + if (!n_argc)
20532 + continue;
20533 +- if (n_argc < 0)
20534 ++ if (n_argc < 0) {
20535 ++ err = n_argc;
20536 + goto err_close;
20537 ++ }
20538 +
20539 + if (json_output) {
20540 + jsonw_start_object(json_wtr);
20541 +diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c
20542 +index d636643ddd358..f32c059fbfb4f 100644
20543 +--- a/tools/bpf/resolve_btfids/main.c
20544 ++++ b/tools/bpf/resolve_btfids/main.c
20545 +@@ -649,6 +649,9 @@ static int symbols_patch(struct object *obj)
20546 + if (sets_patch(obj))
20547 + return -1;
20548 +
20549 ++ /* Set type to ensure endian translation occurs. */
20550 ++ obj->efile.idlist->d_type = ELF_T_WORD;
20551 ++
20552 + elf_flagdata(obj->efile.idlist, ELF_C_SET, ELF_F_DIRTY);
20553 +
20554 + err = elf_update(obj->efile.elf, ELF_C_WRITE);
20555 +diff --git a/tools/perf/util/llvm-utils.c b/tools/perf/util/llvm-utils.c
20556 +index dbdffb6673feb..0bf6b4d4c90a7 100644
20557 +--- a/tools/perf/util/llvm-utils.c
20558 ++++ b/tools/perf/util/llvm-utils.c
20559 +@@ -504,6 +504,7 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf,
20560 + goto errout;
20561 + }
20562 +
20563 ++ err = -ENOMEM;
20564 + if (asprintf(&pipe_template, "%s -emit-llvm | %s -march=bpf %s -filetype=obj -o -",
20565 + template, llc_path, opts) < 0) {
20566 + pr_err("ERROR:\tnot enough memory to setup command line\n");
20567 +@@ -524,6 +525,7 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf,
20568 +
20569 + pr_debug("llvm compiling command template: %s\n", template);
20570 +
20571 ++ err = -ENOMEM;
20572 + if (asprintf(&command_echo, "echo -n \"%s\"", template) < 0)
20573 + goto errout;
20574 +
20575 +diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
20576 +index c83c2c6564e01..23dc5014e7119 100644
20577 +--- a/tools/perf/util/scripting-engines/trace-event-python.c
20578 ++++ b/tools/perf/util/scripting-engines/trace-event-python.c
20579 +@@ -934,7 +934,7 @@ static PyObject *tuple_new(unsigned int sz)
20580 + return t;
20581 + }
20582 +
20583 +-static int tuple_set_u64(PyObject *t, unsigned int pos, u64 val)
20584 ++static int tuple_set_s64(PyObject *t, unsigned int pos, s64 val)
20585 + {
20586 + #if BITS_PER_LONG == 64
20587 + return PyTuple_SetItem(t, pos, _PyLong_FromLong(val));
20588 +@@ -944,6 +944,22 @@ static int tuple_set_u64(PyObject *t, unsigned int pos, u64 val)
20589 + #endif
20590 + }
20591 +
20592 ++/*
20593 ++ * Databases support only signed 64-bit numbers, so even though we are
20594 ++ * exporting a u64, it must be as s64.
20595 ++ */
20596 ++#define tuple_set_d64 tuple_set_s64
20597 ++
20598 ++static int tuple_set_u64(PyObject *t, unsigned int pos, u64 val)
20599 ++{
20600 ++#if BITS_PER_LONG == 64
20601 ++ return PyTuple_SetItem(t, pos, PyLong_FromUnsignedLong(val));
20602 ++#endif
20603 ++#if BITS_PER_LONG == 32
20604 ++ return PyTuple_SetItem(t, pos, PyLong_FromUnsignedLongLong(val));
20605 ++#endif
20606 ++}
20607 ++
20608 + static int tuple_set_s32(PyObject *t, unsigned int pos, s32 val)
20609 + {
20610 + return PyTuple_SetItem(t, pos, _PyLong_FromLong(val));
20611 +@@ -967,7 +983,7 @@ static int python_export_evsel(struct db_export *dbe, struct evsel *evsel)
20612 +
20613 + t = tuple_new(2);
20614 +
20615 +- tuple_set_u64(t, 0, evsel->db_id);
20616 ++ tuple_set_d64(t, 0, evsel->db_id);
20617 + tuple_set_string(t, 1, evsel__name(evsel));
20618 +
20619 + call_object(tables->evsel_handler, t, "evsel_table");
20620 +@@ -985,7 +1001,7 @@ static int python_export_machine(struct db_export *dbe,
20621 +
20622 + t = tuple_new(3);
20623 +
20624 +- tuple_set_u64(t, 0, machine->db_id);
20625 ++ tuple_set_d64(t, 0, machine->db_id);
20626 + tuple_set_s32(t, 1, machine->pid);
20627 + tuple_set_string(t, 2, machine->root_dir ? machine->root_dir : "");
20628 +
20629 +@@ -1004,9 +1020,9 @@ static int python_export_thread(struct db_export *dbe, struct thread *thread,
20630 +
20631 + t = tuple_new(5);
20632 +
20633 +- tuple_set_u64(t, 0, thread->db_id);
20634 +- tuple_set_u64(t, 1, machine->db_id);
20635 +- tuple_set_u64(t, 2, main_thread_db_id);
20636 ++ tuple_set_d64(t, 0, thread->db_id);
20637 ++ tuple_set_d64(t, 1, machine->db_id);
20638 ++ tuple_set_d64(t, 2, main_thread_db_id);
20639 + tuple_set_s32(t, 3, thread->pid_);
20640 + tuple_set_s32(t, 4, thread->tid);
20641 +
20642 +@@ -1025,10 +1041,10 @@ static int python_export_comm(struct db_export *dbe, struct comm *comm,
20643 +
20644 + t = tuple_new(5);
20645 +
20646 +- tuple_set_u64(t, 0, comm->db_id);
20647 ++ tuple_set_d64(t, 0, comm->db_id);
20648 + tuple_set_string(t, 1, comm__str(comm));
20649 +- tuple_set_u64(t, 2, thread->db_id);
20650 +- tuple_set_u64(t, 3, comm->start);
20651 ++ tuple_set_d64(t, 2, thread->db_id);
20652 ++ tuple_set_d64(t, 3, comm->start);
20653 + tuple_set_s32(t, 4, comm->exec);
20654 +
20655 + call_object(tables->comm_handler, t, "comm_table");
20656 +@@ -1046,9 +1062,9 @@ static int python_export_comm_thread(struct db_export *dbe, u64 db_id,
20657 +
20658 + t = tuple_new(3);
20659 +
20660 +- tuple_set_u64(t, 0, db_id);
20661 +- tuple_set_u64(t, 1, comm->db_id);
20662 +- tuple_set_u64(t, 2, thread->db_id);
20663 ++ tuple_set_d64(t, 0, db_id);
20664 ++ tuple_set_d64(t, 1, comm->db_id);
20665 ++ tuple_set_d64(t, 2, thread->db_id);
20666 +
20667 + call_object(tables->comm_thread_handler, t, "comm_thread_table");
20668 +
20669 +@@ -1068,8 +1084,8 @@ static int python_export_dso(struct db_export *dbe, struct dso *dso,
20670 +
20671 + t = tuple_new(5);
20672 +
20673 +- tuple_set_u64(t, 0, dso->db_id);
20674 +- tuple_set_u64(t, 1, machine->db_id);
20675 ++ tuple_set_d64(t, 0, dso->db_id);
20676 ++ tuple_set_d64(t, 1, machine->db_id);
20677 + tuple_set_string(t, 2, dso->short_name);
20678 + tuple_set_string(t, 3, dso->long_name);
20679 + tuple_set_string(t, 4, sbuild_id);
20680 +@@ -1090,10 +1106,10 @@ static int python_export_symbol(struct db_export *dbe, struct symbol *sym,
20681 +
20682 + t = tuple_new(6);
20683 +
20684 +- tuple_set_u64(t, 0, *sym_db_id);
20685 +- tuple_set_u64(t, 1, dso->db_id);
20686 +- tuple_set_u64(t, 2, sym->start);
20687 +- tuple_set_u64(t, 3, sym->end);
20688 ++ tuple_set_d64(t, 0, *sym_db_id);
20689 ++ tuple_set_d64(t, 1, dso->db_id);
20690 ++ tuple_set_d64(t, 2, sym->start);
20691 ++ tuple_set_d64(t, 3, sym->end);
20692 + tuple_set_s32(t, 4, sym->binding);
20693 + tuple_set_string(t, 5, sym->name);
20694 +
20695 +@@ -1130,30 +1146,30 @@ static void python_export_sample_table(struct db_export *dbe,
20696 +
20697 + t = tuple_new(24);
20698 +
20699 +- tuple_set_u64(t, 0, es->db_id);
20700 +- tuple_set_u64(t, 1, es->evsel->db_id);
20701 +- tuple_set_u64(t, 2, es->al->maps->machine->db_id);
20702 +- tuple_set_u64(t, 3, es->al->thread->db_id);
20703 +- tuple_set_u64(t, 4, es->comm_db_id);
20704 +- tuple_set_u64(t, 5, es->dso_db_id);
20705 +- tuple_set_u64(t, 6, es->sym_db_id);
20706 +- tuple_set_u64(t, 7, es->offset);
20707 +- tuple_set_u64(t, 8, es->sample->ip);
20708 +- tuple_set_u64(t, 9, es->sample->time);
20709 ++ tuple_set_d64(t, 0, es->db_id);
20710 ++ tuple_set_d64(t, 1, es->evsel->db_id);
20711 ++ tuple_set_d64(t, 2, es->al->maps->machine->db_id);
20712 ++ tuple_set_d64(t, 3, es->al->thread->db_id);
20713 ++ tuple_set_d64(t, 4, es->comm_db_id);
20714 ++ tuple_set_d64(t, 5, es->dso_db_id);
20715 ++ tuple_set_d64(t, 6, es->sym_db_id);
20716 ++ tuple_set_d64(t, 7, es->offset);
20717 ++ tuple_set_d64(t, 8, es->sample->ip);
20718 ++ tuple_set_d64(t, 9, es->sample->time);
20719 + tuple_set_s32(t, 10, es->sample->cpu);
20720 +- tuple_set_u64(t, 11, es->addr_dso_db_id);
20721 +- tuple_set_u64(t, 12, es->addr_sym_db_id);
20722 +- tuple_set_u64(t, 13, es->addr_offset);
20723 +- tuple_set_u64(t, 14, es->sample->addr);
20724 +- tuple_set_u64(t, 15, es->sample->period);
20725 +- tuple_set_u64(t, 16, es->sample->weight);
20726 +- tuple_set_u64(t, 17, es->sample->transaction);
20727 +- tuple_set_u64(t, 18, es->sample->data_src);
20728 ++ tuple_set_d64(t, 11, es->addr_dso_db_id);
20729 ++ tuple_set_d64(t, 12, es->addr_sym_db_id);
20730 ++ tuple_set_d64(t, 13, es->addr_offset);
20731 ++ tuple_set_d64(t, 14, es->sample->addr);
20732 ++ tuple_set_d64(t, 15, es->sample->period);
20733 ++ tuple_set_d64(t, 16, es->sample->weight);
20734 ++ tuple_set_d64(t, 17, es->sample->transaction);
20735 ++ tuple_set_d64(t, 18, es->sample->data_src);
20736 + tuple_set_s32(t, 19, es->sample->flags & PERF_BRANCH_MASK);
20737 + tuple_set_s32(t, 20, !!(es->sample->flags & PERF_IP_FLAG_IN_TX));
20738 +- tuple_set_u64(t, 21, es->call_path_id);
20739 +- tuple_set_u64(t, 22, es->sample->insn_cnt);
20740 +- tuple_set_u64(t, 23, es->sample->cyc_cnt);
20741 ++ tuple_set_d64(t, 21, es->call_path_id);
20742 ++ tuple_set_d64(t, 22, es->sample->insn_cnt);
20743 ++ tuple_set_d64(t, 23, es->sample->cyc_cnt);
20744 +
20745 + call_object(tables->sample_handler, t, "sample_table");
20746 +
20747 +@@ -1167,8 +1183,8 @@ static void python_export_synth(struct db_export *dbe, struct export_sample *es)
20748 +
20749 + t = tuple_new(3);
20750 +
20751 +- tuple_set_u64(t, 0, es->db_id);
20752 +- tuple_set_u64(t, 1, es->evsel->core.attr.config);
20753 ++ tuple_set_d64(t, 0, es->db_id);
20754 ++ tuple_set_d64(t, 1, es->evsel->core.attr.config);
20755 + tuple_set_bytes(t, 2, es->sample->raw_data, es->sample->raw_size);
20756 +
20757 + call_object(tables->synth_handler, t, "synth_data");
20758 +@@ -1200,10 +1216,10 @@ static int python_export_call_path(struct db_export *dbe, struct call_path *cp)
20759 +
20760 + t = tuple_new(4);
20761 +
20762 +- tuple_set_u64(t, 0, cp->db_id);
20763 +- tuple_set_u64(t, 1, parent_db_id);
20764 +- tuple_set_u64(t, 2, sym_db_id);
20765 +- tuple_set_u64(t, 3, cp->ip);
20766 ++ tuple_set_d64(t, 0, cp->db_id);
20767 ++ tuple_set_d64(t, 1, parent_db_id);
20768 ++ tuple_set_d64(t, 2, sym_db_id);
20769 ++ tuple_set_d64(t, 3, cp->ip);
20770 +
20771 + call_object(tables->call_path_handler, t, "call_path_table");
20772 +
20773 +@@ -1221,20 +1237,20 @@ static int python_export_call_return(struct db_export *dbe,
20774 +
20775 + t = tuple_new(14);
20776 +
20777 +- tuple_set_u64(t, 0, cr->db_id);
20778 +- tuple_set_u64(t, 1, cr->thread->db_id);
20779 +- tuple_set_u64(t, 2, comm_db_id);
20780 +- tuple_set_u64(t, 3, cr->cp->db_id);
20781 +- tuple_set_u64(t, 4, cr->call_time);
20782 +- tuple_set_u64(t, 5, cr->return_time);
20783 +- tuple_set_u64(t, 6, cr->branch_count);
20784 +- tuple_set_u64(t, 7, cr->call_ref);
20785 +- tuple_set_u64(t, 8, cr->return_ref);
20786 +- tuple_set_u64(t, 9, cr->cp->parent->db_id);
20787 ++ tuple_set_d64(t, 0, cr->db_id);
20788 ++ tuple_set_d64(t, 1, cr->thread->db_id);
20789 ++ tuple_set_d64(t, 2, comm_db_id);
20790 ++ tuple_set_d64(t, 3, cr->cp->db_id);
20791 ++ tuple_set_d64(t, 4, cr->call_time);
20792 ++ tuple_set_d64(t, 5, cr->return_time);
20793 ++ tuple_set_d64(t, 6, cr->branch_count);
20794 ++ tuple_set_d64(t, 7, cr->call_ref);
20795 ++ tuple_set_d64(t, 8, cr->return_ref);
20796 ++ tuple_set_d64(t, 9, cr->cp->parent->db_id);
20797 + tuple_set_s32(t, 10, cr->flags);
20798 +- tuple_set_u64(t, 11, cr->parent_db_id);
20799 +- tuple_set_u64(t, 12, cr->insn_count);
20800 +- tuple_set_u64(t, 13, cr->cyc_count);
20801 ++ tuple_set_d64(t, 11, cr->parent_db_id);
20802 ++ tuple_set_d64(t, 12, cr->insn_count);
20803 ++ tuple_set_d64(t, 13, cr->cyc_count);
20804 +
20805 + call_object(tables->call_return_handler, t, "call_return_table");
20806 +
20807 +@@ -1254,14 +1270,14 @@ static int python_export_context_switch(struct db_export *dbe, u64 db_id,
20808 +
20809 + t = tuple_new(9);
20810 +
20811 +- tuple_set_u64(t, 0, db_id);
20812 +- tuple_set_u64(t, 1, machine->db_id);
20813 +- tuple_set_u64(t, 2, sample->time);
20814 ++ tuple_set_d64(t, 0, db_id);
20815 ++ tuple_set_d64(t, 1, machine->db_id);
20816 ++ tuple_set_d64(t, 2, sample->time);
20817 + tuple_set_s32(t, 3, sample->cpu);
20818 +- tuple_set_u64(t, 4, th_out_id);
20819 +- tuple_set_u64(t, 5, comm_out_id);
20820 +- tuple_set_u64(t, 6, th_in_id);
20821 +- tuple_set_u64(t, 7, comm_in_id);
20822 ++ tuple_set_d64(t, 4, th_out_id);
20823 ++ tuple_set_d64(t, 5, comm_out_id);
20824 ++ tuple_set_d64(t, 6, th_in_id);
20825 ++ tuple_set_d64(t, 7, comm_in_id);
20826 + tuple_set_s32(t, 8, flags);
20827 +
20828 + call_object(tables->context_switch_handler, t, "context_switch");
20829 +diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
20830 +index 3ab1200e172fa..b1b37dcade9f2 100644
20831 +--- a/tools/testing/selftests/bpf/.gitignore
20832 ++++ b/tools/testing/selftests/bpf/.gitignore
20833 +@@ -9,6 +9,7 @@ fixdep
20834 + test_dev_cgroup
20835 + /test_progs*
20836 + test_tcpbpf_user
20837 ++!test_progs.h
20838 + test_verifier_log
20839 + feature
20840 + test_sock
20841 +diff --git a/tools/testing/selftests/ftrace/test.d/event/event-no-pid.tc b/tools/testing/selftests/ftrace/test.d/event/event-no-pid.tc
20842 +index e6eb78f0b9545..9933ed24f9012 100644
20843 +--- a/tools/testing/selftests/ftrace/test.d/event/event-no-pid.tc
20844 ++++ b/tools/testing/selftests/ftrace/test.d/event/event-no-pid.tc
20845 +@@ -57,6 +57,10 @@ enable_events() {
20846 + echo 1 > tracing_on
20847 + }
20848 +
20849 ++other_task() {
20850 ++ sleep .001 || usleep 1 || sleep 1
20851 ++}
20852 ++
20853 + echo 0 > options/event-fork
20854 +
20855 + do_reset
20856 +@@ -94,6 +98,9 @@ child=$!
20857 + echo "child = $child"
20858 + wait $child
20859 +
20860 ++# Be sure some other events will happen for small systems (e.g. 1 core)
20861 ++other_task
20862 ++
20863 + echo 0 > tracing_on
20864 +
20865 + cnt=`count_pid $mypid`
20866 +diff --git a/tools/testing/selftests/lkdtm/run.sh b/tools/testing/selftests/lkdtm/run.sh
20867 +index bb7a1775307b8..e95e79bd31268 100755
20868 +--- a/tools/testing/selftests/lkdtm/run.sh
20869 ++++ b/tools/testing/selftests/lkdtm/run.sh
20870 +@@ -76,10 +76,14 @@ fi
20871 + # Save existing dmesg so we can detect new content below
20872 + dmesg > "$DMESG"
20873 +
20874 +-# Most shells yell about signals and we're expecting the "cat" process
20875 +-# to usually be killed by the kernel. So we have to run it in a sub-shell
20876 +-# and silence errors.
20877 +-($SHELL -c 'cat <(echo '"$test"') >'"$TRIGGER" 2>/dev/null) || true
20878 ++# Since the kernel is likely killing the process writing to the trigger
20879 ++# file, it must not be the script's shell itself. i.e. we cannot do:
20880 ++# echo "$test" >"$TRIGGER"
20881 ++# Instead, use "cat" to take the signal. Since the shell will yell about
20882 ++# the signal that killed the subprocess, we must ignore the failure and
20883 ++# continue. However we don't silence stderr since there might be other
20884 ++# useful details reported there in the case of other unexpected conditions.
20885 ++echo "$test" | cat >"$TRIGGER" || true
20886 +
20887 + # Record and dump the results
20888 + dmesg | comm --nocheck-order -13 "$DMESG" - > "$LOG" || true
20889 +diff --git a/tools/testing/selftests/splice/short_splice_read.sh b/tools/testing/selftests/splice/short_splice_read.sh
20890 +index 7810d3589d9ab..22b6c8910b182 100755
20891 +--- a/tools/testing/selftests/splice/short_splice_read.sh
20892 ++++ b/tools/testing/selftests/splice/short_splice_read.sh
20893 +@@ -1,21 +1,87 @@
20894 + #!/bin/sh
20895 + # SPDX-License-Identifier: GPL-2.0
20896 ++#
20897 ++# Test for mishandling of splice() on pseudofilesystems, which should catch
20898 ++# bugs like 11990a5bd7e5 ("module: Correctly truncate sysfs sections output")
20899 ++#
20900 ++# Since splice fallback was removed as part of the set_fs() rework, many of these
20901 ++# tests expect to fail now. See https://lore.kernel.org/lkml/202009181443.C2179FB@keescook/
20902 + set -e
20903 +
20904 ++DIR=$(dirname "$0")
20905 ++
20906 + ret=0
20907 +
20908 ++expect_success()
20909 ++{
20910 ++ title="$1"
20911 ++ shift
20912 ++
20913 ++ echo "" >&2
20914 ++ echo "$title ..." >&2
20915 ++
20916 ++ set +e
20917 ++ "$@"
20918 ++ rc=$?
20919 ++ set -e
20920 ++
20921 ++ case "$rc" in
20922 ++ 0)
20923 ++ echo "ok: $title succeeded" >&2
20924 ++ ;;
20925 ++ 1)
20926 ++ echo "FAIL: $title should work" >&2
20927 ++ ret=$(( ret + 1 ))
20928 ++ ;;
20929 ++ *)
20930 ++ echo "FAIL: something else went wrong" >&2
20931 ++ ret=$(( ret + 1 ))
20932 ++ ;;
20933 ++ esac
20934 ++}
20935 ++
20936 ++expect_failure()
20937 ++{
20938 ++ title="$1"
20939 ++ shift
20940 ++
20941 ++ echo "" >&2
20942 ++ echo "$title ..." >&2
20943 ++
20944 ++ set +e
20945 ++ "$@"
20946 ++ rc=$?
20947 ++ set -e
20948 ++
20949 ++ case "$rc" in
20950 ++ 0)
20951 ++ echo "FAIL: $title unexpectedly worked" >&2
20952 ++ ret=$(( ret + 1 ))
20953 ++ ;;
20954 ++ 1)
20955 ++ echo "ok: $title correctly failed" >&2
20956 ++ ;;
20957 ++ *)
20958 ++ echo "FAIL: something else went wrong" >&2
20959 ++ ret=$(( ret + 1 ))
20960 ++ ;;
20961 ++ esac
20962 ++}
20963 ++
20964 + do_splice()
20965 + {
20966 + filename="$1"
20967 + bytes="$2"
20968 + expected="$3"
20969 ++ report="$4"
20970 +
20971 +- out=$(./splice_read "$filename" "$bytes" | cat)
20972 ++ out=$("$DIR"/splice_read "$filename" "$bytes" | cat)
20973 + if [ "$out" = "$expected" ] ; then
20974 +- echo "ok: $filename $bytes"
20975 ++ echo " matched $report" >&2
20976 ++ return 0
20977 + else
20978 +- echo "FAIL: $filename $bytes"
20979 +- ret=1
20980 ++ echo " no match: '$out' vs $report" >&2
20981 ++ return 1
20982 + fi
20983 + }
20984 +
20985 +@@ -23,34 +89,45 @@ test_splice()
20986 + {
20987 + filename="$1"
20988 +
20989 ++ echo " checking $filename ..." >&2
20990 ++
20991 + full=$(cat "$filename")
20992 ++ rc=$?
20993 ++ if [ $rc -ne 0 ] ; then
20994 ++ return 2
20995 ++ fi
20996 ++
20997 + two=$(echo "$full" | grep -m1 . | cut -c-2)
20998 +
20999 + # Make sure full splice has the same contents as a standard read.
21000 +- do_splice "$filename" 4096 "$full"
21001 ++ echo " splicing 4096 bytes ..." >&2
21002 ++ if ! do_splice "$filename" 4096 "$full" "full read" ; then
21003 ++ return 1
21004 ++ fi
21005 +
21006 + # Make sure a partial splice see the first two characters.
21007 +- do_splice "$filename" 2 "$two"
21008 ++ echo " splicing 2 bytes ..." >&2
21009 ++ if ! do_splice "$filename" 2 "$two" "'$two'" ; then
21010 ++ return 1
21011 ++ fi
21012 ++
21013 ++ return 0
21014 + }
21015 +
21016 +-# proc_single_open(), seq_read()
21017 +-test_splice /proc/$$/limits
21018 +-# special open, seq_read()
21019 +-test_splice /proc/$$/comm
21020 ++### /proc/$pid/ has no splice interface; these should all fail.
21021 ++expect_failure "proc_single_open(), seq_read() splice" test_splice /proc/$$/limits
21022 ++expect_failure "special open(), seq_read() splice" test_splice /proc/$$/comm
21023 +
21024 +-# proc_handler, proc_dointvec_minmax
21025 +-test_splice /proc/sys/fs/nr_open
21026 +-# proc_handler, proc_dostring
21027 +-test_splice /proc/sys/kernel/modprobe
21028 +-# proc_handler, special read
21029 +-test_splice /proc/sys/kernel/version
21030 ++### /proc/sys/ has a splice interface; these should all succeed.
21031 ++expect_success "proc_handler: proc_dointvec_minmax() splice" test_splice /proc/sys/fs/nr_open
21032 ++expect_success "proc_handler: proc_dostring() splice" test_splice /proc/sys/kernel/modprobe
21033 ++expect_success "proc_handler: special read splice" test_splice /proc/sys/kernel/version
21034 +
21035 ++### /sys/ has no splice interface; these should all fail.
21036 + if ! [ -d /sys/module/test_module/sections ] ; then
21037 +- modprobe test_module
21038 ++ expect_success "test_module kernel module load" modprobe test_module
21039 + fi
21040 +-# kernfs, attr
21041 +-test_splice /sys/module/test_module/coresize
21042 +-# kernfs, binattr
21043 +-test_splice /sys/module/test_module/sections/.init.text
21044 ++expect_failure "kernfs attr splice" test_splice /sys/module/test_module/coresize
21045 ++expect_failure "kernfs binattr splice" test_splice /sys/module/test_module/sections/.init.text
21046 +
21047 + exit $ret
21048 +diff --git a/tools/testing/selftests/tc-testing/plugin-lib/scapyPlugin.py b/tools/testing/selftests/tc-testing/plugin-lib/scapyPlugin.py
21049 +index 229ee185b27e1..a7b21658af9b4 100644
21050 +--- a/tools/testing/selftests/tc-testing/plugin-lib/scapyPlugin.py
21051 ++++ b/tools/testing/selftests/tc-testing/plugin-lib/scapyPlugin.py
21052 +@@ -36,7 +36,7 @@ class SubPlugin(TdcPlugin):
21053 + for k in scapy_keys:
21054 + if k not in scapyinfo:
21055 + keyfail = True
21056 +- missing_keys.add(k)
21057 ++ missing_keys.append(k)
21058 + if keyfail:
21059 + print('{}: Scapy block present in the test, but is missing info:'
21060 + .format(self.sub_class))
21061 +diff --git a/tools/testing/selftests/vm/protection_keys.c b/tools/testing/selftests/vm/protection_keys.c
21062 +index fdbb602ecf325..87eecd5ba577b 100644
21063 +--- a/tools/testing/selftests/vm/protection_keys.c
21064 ++++ b/tools/testing/selftests/vm/protection_keys.c
21065 +@@ -510,7 +510,7 @@ int alloc_pkey(void)
21066 + " shadow: 0x%016llx\n",
21067 + __func__, __LINE__, ret, __read_pkey_reg(),
21068 + shadow_pkey_reg);
21069 +- if (ret) {
21070 ++ if (ret > 0) {
21071 + /* clear both the bits: */
21072 + shadow_pkey_reg = set_pkey_bits(shadow_pkey_reg, ret,
21073 + ~PKEY_MASK);
21074 +@@ -561,7 +561,6 @@ int alloc_random_pkey(void)
21075 + int nr_alloced = 0;
21076 + int random_index;
21077 + memset(alloced_pkeys, 0, sizeof(alloced_pkeys));
21078 +- srand((unsigned int)time(NULL));
21079 +
21080 + /* allocate every possible key and make a note of which ones we got */
21081 + max_nr_pkey_allocs = NR_PKEYS;
21082 +@@ -1449,6 +1448,13 @@ void test_implicit_mprotect_exec_only_memory(int *ptr, u16 pkey)
21083 + ret = mprotect(p1, PAGE_SIZE, PROT_EXEC);
21084 + pkey_assert(!ret);
21085 +
21086 ++ /*
21087 ++ * Reset the shadow, assuming that the above mprotect()
21088 ++ * correctly changed PKRU, but to an unknown value since
21089 ++ * the actual alllocated pkey is unknown.
21090 ++ */
21091 ++ shadow_pkey_reg = __read_pkey_reg();
21092 ++
21093 + dprintf2("pkey_reg: %016llx\n", read_pkey_reg());
21094 +
21095 + /* Make sure this is an *instruction* fault */
21096 +@@ -1552,6 +1558,8 @@ int main(void)
21097 + int nr_iterations = 22;
21098 + int pkeys_supported = is_pkeys_supported();
21099 +
21100 ++ srand((unsigned int)time(NULL));
21101 ++
21102 + setup_handlers();
21103 +
21104 + printf("has pkeys: %d\n", pkeys_supported);